diff --git a/biome.json b/biome.json
index 41e8052..56db870 100644
--- a/biome.json
+++ b/biome.json
@@ -40,13 +40,7 @@
"organizeImports": {
"level": "on",
"options": {
- "groups": [
- [":BUN:", ":NODE:"],
- ":BLANK_LINE:",
- [":PACKAGE:"],
- ":BLANK_LINE:",
- ":PATH:"
- ]
+ "groups": [[":BUN:", ":NODE:"], ":BLANK_LINE:", [":PACKAGE:"], ":BLANK_LINE:", ":PATH:"]
}
}
}
diff --git a/bun.lock b/bun.lock
index 4f6acef..a06c9af 100644
--- a/bun.lock
+++ b/bun.lock
@@ -5,12 +5,14 @@
"": {
"name": "@vtemian/opencode-config",
"dependencies": {
- "@opencode-ai/plugin": "1.1.6",
+ "@opencode-ai/plugin": "1.1.23",
"bun-pty": "^0.4.5",
+ "valibot": "^1.2.0",
},
"devDependencies": {
"@biomejs/biome": "^2.3.10",
"bun-types": "latest",
+ "lefthook": "^2.0.13",
"typescript": "^5.7.3",
},
},
@@ -34,9 +36,9 @@
"@biomejs/cli-win32-x64": ["@biomejs/cli-win32-x64@2.3.10", "", { "os": "win32", "cpu": "x64" }, "sha512-pHEFgq7dUEsKnqG9mx9bXihxGI49X+ar+UBrEIj3Wqj3UCZp1rNgV+OoyjFgcXsjCWpuEAF4VJdkZr3TrWdCbQ=="],
- "@opencode-ai/plugin": ["@opencode-ai/plugin@1.1.6", "", { "dependencies": { "@opencode-ai/sdk": "1.1.6", "zod": "4.1.8" } }, "sha512-psGajIrj4V03gn85/7Xy5YXdPoCsRGwBsifruG5TfG63+7Jd1TENNufp+SxGb+xtlddDteDMGVHSnE98q9LbDw=="],
+ "@opencode-ai/plugin": ["@opencode-ai/plugin@1.1.23", "", { "dependencies": { "@opencode-ai/sdk": "1.1.23", "zod": "4.1.8" } }, "sha512-O/iLSKOUuzD95UWhj9y/tEuycPEBv36de0suHXXqeYLWZLZ16DAUSKR+YG7rvRjJS0sbn4biVMw+k7XXk/oxiQ=="],
- "@opencode-ai/sdk": ["@opencode-ai/sdk@1.1.6", "", {}, "sha512-7Tiso9BExVgxz86VY6F807McCyOgu/SCaQJ87wwxxVSN8GpPpmUIYN5h6LH38EBNJWKXDjokasn/y9EkKxOisQ=="],
+ "@opencode-ai/sdk": ["@opencode-ai/sdk@1.1.23", "", {}, "sha512-YjN9ogzkLol92s+/iARXRop9/5oFIezUkvWVay12u1IM6A/WJs50DeKl3oL0x4a68P1a5tI5gD98dLnk2+AlsA=="],
"@types/node": ["@types/node@25.0.3", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-W609buLVRVmeW693xKfzHeIV6nJGGz98uCPfeXI1ELMLXVeKYZ9m15fAMSaUPBHYLGFsVRcMmSCksQOrZV9BYA=="],
@@ -44,10 +46,34 @@
"bun-types": ["bun-types@1.3.5", "", { "dependencies": { "@types/node": "*" } }, "sha512-inmAYe2PFLs0SUbFOWSVD24sg1jFlMPxOjOSSCYqUgn4Hsc3rDc7dFvfVYjFPNHtov6kgUeulV4SxbuIV/stPw=="],
+ "lefthook": ["lefthook@2.0.15", "", { "optionalDependencies": { "lefthook-darwin-arm64": "2.0.15", "lefthook-darwin-x64": "2.0.15", "lefthook-freebsd-arm64": "2.0.15", "lefthook-freebsd-x64": "2.0.15", "lefthook-linux-arm64": "2.0.15", "lefthook-linux-x64": "2.0.15", "lefthook-openbsd-arm64": "2.0.15", "lefthook-openbsd-x64": "2.0.15", "lefthook-windows-arm64": "2.0.15", "lefthook-windows-x64": "2.0.15" }, "bin": { "lefthook": "bin/index.js" } }, "sha512-sl5rePO6UUOLKp6Ci+MMKOc86zicBaPUCvSw2Cq4gCAgTmxpxhIjhz7LOu2ObYerVRPpTq3gvzPTjI71UotjnA=="],
+
+ "lefthook-darwin-arm64": ["lefthook-darwin-arm64@2.0.15", "", { "os": "darwin", "cpu": "arm64" }, "sha512-ygAqG/NzOgY9bEiqeQtiOmCRTtp9AmOd3eyrpEaSrRB9V9f3RHRgWDrWbde9BiHSsCzcbeY9/X2NuKZ69eUsNA=="],
+
+ "lefthook-darwin-x64": ["lefthook-darwin-x64@2.0.15", "", { "os": "darwin", "cpu": "x64" }, "sha512-3wA30CzdSL5MFKD6dk7v8BMq7ScWQivpLbmIn3Pv67AaBavN57N/hcdGqOFnDDFI5WazVwDY7UqDfMIk5HZjEA=="],
+
+ "lefthook-freebsd-arm64": ["lefthook-freebsd-arm64@2.0.15", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-FbYBBLVbX8BjdO+icN1t/pC3TOW3FAvTKv/zggBKNihv6jHNn/3s/0j2xIS0k0Pw9oOE7MVmEni3qp2j5vqHrQ=="],
+
+ "lefthook-freebsd-x64": ["lefthook-freebsd-x64@2.0.15", "", { "os": "freebsd", "cpu": "x64" }, "sha512-udHMjh1E8TfC0Z7Y249XZMATJOyj1Jxlj9JoEinkoBvAsePFKDEQg5teuXuTGhjsHYpqVekfSvLNNfHKUUbbjw=="],
+
+ "lefthook-linux-arm64": ["lefthook-linux-arm64@2.0.15", "", { "os": "linux", "cpu": "arm64" }, "sha512-1HAPmdYhfcOlubv63sTnWtW2rFuC+kT1MvC3JvdrS5V6zrOImbBSnYZMJX/Dd3w4pm0x2ZJb9T+uef8a0jUQkg=="],
+
+ "lefthook-linux-x64": ["lefthook-linux-x64@2.0.15", "", { "os": "linux", "cpu": "x64" }, "sha512-Pho87mlNFH47zc4fPKzQSp8q9sWfIFW/KMMZfx/HZNmX25aUUTOqMyRwaXxtdAo/hNJ9FX4JeuZWq9Y3iyM5VA=="],
+
+ "lefthook-openbsd-arm64": ["lefthook-openbsd-arm64@2.0.15", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-pet03Edlj1QeFUgxcIK1xu8CeZA+ejYplvPgdfe//69+vQFGSDaEx3H2mVx8RqzWfmMbijM2/WfkZXR2EVw3bw=="],
+
+ "lefthook-openbsd-x64": ["lefthook-openbsd-x64@2.0.15", "", { "os": "openbsd", "cpu": "x64" }, "sha512-i+a364CcSAeIO5wQzLMHsthHt/v6n3XwhKmRq/VBzPOUv9KutNeF55yCE/6lvuvzwxpdEfBjh6cXPERC0yp98w=="],
+
+ "lefthook-windows-arm64": ["lefthook-windows-arm64@2.0.15", "", { "os": "win32", "cpu": "arm64" }, "sha512-69u5GdVOT4QIxc2TK5ce0cTXLzwB55Pk9ZnnJNFf1XsyZTGcg9bUWYYTyD12CIIXbVTa0RVXIIrbU9UgP8O1AQ=="],
+
+ "lefthook-windows-x64": ["lefthook-windows-x64@2.0.15", "", { "os": "win32", "cpu": "x64" }, "sha512-/zYEndCUgj8XK+4wvLYLRk3AcfKU6zWf2GHx+tcZ4K2bLaQdej4m+OqmQsVpUlF8N2tN9hfwlj1D50uz75LUuQ=="],
+
"typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="],
"undici-types": ["undici-types@7.16.0", "", {}, "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw=="],
+ "valibot": ["valibot@1.2.0", "", { "peerDependencies": { "typescript": ">=5" }, "optionalPeers": ["typescript"] }, "sha512-mm1rxUsmOxzrwnX5arGS+U4T25RdvpPjPN4yR0u9pUBov9+zGVtO84tif1eY4r6zWxVxu3KzIyknJy3rxfRZZg=="],
+
"zod": ["zod@4.1.8", "", {}, "sha512-5R1P+WwQqmmMIEACyzSvo4JXHY5WiAFHRMg+zBZKgKS+Q1viRa0C1hmUKtHltoIFKtIdki3pRxkmpP74jnNYHQ=="],
}
}
diff --git a/package.json b/package.json
index cf159af..60b0d1c 100644
--- a/package.json
+++ b/package.json
@@ -2,17 +2,18 @@
"name": "micode",
"version": "0.8.4",
"description": "OpenCode plugin with Brainstorm-Research-Plan-Implement workflow",
- "module": "src/index.ts",
- "main": "src/index.ts",
+ "module": "dist/index.js",
+ "main": "dist/index.js",
"types": "src/index.ts",
"type": "module",
"files": [
"src",
+ "dist",
"INSTALL_CLAUDE.md"
],
"scripts": {
"prepare": "lefthook install",
- "build": "tsc --noEmit",
+ "build": "bun build src/index.ts --outdir dist --target bun",
"typecheck": "tsc --noEmit",
"prepublishOnly": "bun run typecheck",
"test": "bun test",
@@ -39,7 +40,7 @@
"url": "https://github.com/vtemian/micode/issues"
},
"dependencies": {
- "@opencode-ai/plugin": "1.1.6",
+ "@opencode-ai/plugin": "1.1.23",
"bun-pty": "^0.4.5",
"valibot": "^1.2.0"
},
diff --git a/src/agents/artifact-searcher.ts b/src/agents/artifact-searcher.ts
index f51febd..52919cb 100644
--- a/src/agents/artifact-searcher.ts
+++ b/src/agents/artifact-searcher.ts
@@ -4,7 +4,6 @@ import type { AgentConfig } from "@opencode-ai/sdk";
export const artifactSearcherAgent: AgentConfig = {
description: "Searches past handoffs, plans, and ledgers for relevant precedent",
mode: "subagent",
- model: "openai/gpt-5.2-codex",
temperature: 0.3,
tools: {
edit: false,
diff --git a/src/agents/bootstrapper.ts b/src/agents/bootstrapper.ts
new file mode 100644
index 0000000..dd1e7d6
--- /dev/null
+++ b/src/agents/bootstrapper.ts
@@ -0,0 +1,164 @@
+// src/agents/bootstrapper.ts
+import type { AgentConfig } from "@opencode-ai/sdk";
+
+export const bootstrapperAgent: AgentConfig = {
+ description: "Analyzes a request and creates exploration branches with scopes for octto brainstorming",
+ mode: "subagent",
+ temperature: 0.5,
+ prompt: `
+Analyze the user's request and create 2-4 exploration branches.
+Each branch explores ONE specific aspect of the design.
+
+
+
+Return ONLY a JSON object. No markdown, no explanation.
+
+{
+ "branches": [
+ {
+ "id": "unique_snake_case_id",
+ "scope": "One sentence describing what this branch explores",
+ "initial_question": {
+ "type": "",
+ "config": { ... }
+ }
+ }
+ ]
+}
+
+
+
+Each branch explores ONE distinct aspect (not overlapping)
+Scope is a clear boundary - questions stay within scope
+2-4 branches total - don't over-decompose
+Branch IDs are short snake_case identifiers
+
+
+
+Request: "Add healthcheck endpoints to the API"
+
+{
+ "branches": [
+ {
+ "id": "services",
+ "scope": "Which services and dependencies need health monitoring",
+ "initial_question": {
+ "type": "pick_many",
+ "config": {
+ "question": "Which services should the healthcheck monitor?",
+ "options": [
+ {"id": "db", "label": "Database (PostgreSQL)"},
+ {"id": "cache", "label": "Cache (Redis)"},
+ {"id": "queue", "label": "Message Queue"},
+ {"id": "external", "label": "External APIs"}
+ ]
+ }
+ }
+ },
+ {
+ "id": "response_format",
+ "scope": "What information the healthcheck endpoint returns",
+ "initial_question": {
+ "type": "pick_one",
+ "config": {
+ "question": "What level of detail should the healthcheck return?",
+ "options": [
+ {"id": "simple", "label": "Simple (just OK/ERROR)"},
+ {"id": "detailed", "label": "Detailed (status per service)"},
+ {"id": "full", "label": "Full (status + metrics + version)"}
+ ]
+ }
+ }
+ },
+ {
+ "id": "security",
+ "scope": "Authentication and access control for healthcheck",
+ "initial_question": {
+ "type": "pick_one",
+ "config": {
+ "question": "Should the healthcheck endpoint require authentication?",
+ "options": [
+ {"id": "public", "label": "Public (no auth)"},
+ {"id": "internal", "label": "Internal only (IP whitelist)"},
+ {"id": "authenticated", "label": "Requires API key"}
+ ]
+ }
+ }
+ }
+ ]
+}
+
+
+
+
+Single choice. config: { question, options: [{id, label, description?}], recommended?, context? }
+
+
+
+Multiple choice. config: { question, options: [{id, label, description?}], recommended?: string[], min?, max?, context? }
+
+
+
+Yes/no. config: { question, context?, yesLabel?, noLabel?, allowCancel? }
+
+
+
+Free text. config: { question, placeholder?, context?, multiline? }
+
+
+
+Numeric range. config: { question, min, max, step?, defaultValue?, context? }
+
+
+
+Order items. config: { question, options: [{id, label, description?}], context? }
+
+
+
+Rate items (stars). config: { question, options: [{id, label, description?}], min?, max?, context? }
+
+
+
+Thumbs up/down. config: { question, context? }
+
+
+
+Options with pros/cons. config: { question, options: [{id, label, description?, pros?: string[], cons?: string[]}], recommended?, allowFeedback?, context? }
+
+
+
+Code diff review. config: { question, before, after, filePath?, language? }
+
+
+
+Code input. config: { question, language?, placeholder?, context? }
+
+
+
+Image upload. config: { question, multiple?, maxImages?, context? }
+
+
+
+File upload. config: { question, multiple?, maxFiles?, accept?: string[], context? }
+
+
+
+Emoji selection. config: { question, emojis?: string[], context? }
+
+
+
+Section review. config: { question, content, context? }
+
+
+
+Plan review. config: { question, sections: [{id, title, content}] }
+
+
+
+
+Never create more than 4 branches
+Never create overlapping scopes
+Never wrap output in markdown code blocks
+Never include text outside the JSON
+`,
+};
diff --git a/src/agents/brainstormer.ts b/src/agents/brainstormer.ts
index 9f963b2..6b43dc9 100644
--- a/src/agents/brainstormer.ts
+++ b/src/agents/brainstormer.ts
@@ -1,9 +1,8 @@
import type { AgentConfig } from "@opencode-ai/sdk";
export const brainstormerAgent: AgentConfig = {
- description: "Refines rough ideas into fully-formed designs through collaborative questioning",
+ description: "Refines rough ideas into fully-formed designs through decisive collaboration",
mode: "primary",
- model: "openai/gpt-5.2-codex",
temperature: 0.7,
tools: {
spawn_agent: false, // Primary agents use built-in Task tool, not spawn_agent
@@ -20,8 +19,69 @@ Turn ideas into fully formed designs through natural collaborative dialogue.
This is DESIGN ONLY. The planner agent handles detailed implementation plans.
+
+You are a SENIOR ENGINEER, not a junior seeking approval.
+- Make decisions. Don't ask "what do you think?" - state "I'm doing X because Y."
+- State assumptions and proceed. User will correct you if wrong. This is faster than asking.
+- When you see a problem, propose a solution. Don't present problems without solutions.
+- Trust your judgment. You have context. Use it to make calls.
+- Disagreement is good. If user pushes back, discuss briefly, then execute their choice.
+
+
+
+ Be a thoughtful colleague, not a formal document generator
+ Write like you're explaining to a smart peer over coffee
+ Show your thinking - "I'm leaning toward X because..." not just "X is the solution"
+ Use "we" and "our" - this is collaborative design
+ Be direct but warm - no corporate speak, no filler phrases
+
+
+
+ USE MARKDOWN FORMATTING - headers, bullets, bold, whitespace
+ NEVER write walls of text - break into digestible chunks
+ Each section gets a ## header
+ Use bullet points for lists of 3+ items
+ Use **bold** for key terms and important concepts
+ Add blank lines between sections for breathing room
+ Keep paragraphs to 2-3 sentences max
+
+
+## Architecture Overview
+
+The system treats **artifacts as first-class records** stored in SQLite, decoupled from files.
+
+**Key insight:** We're shifting from "file-backed" to "event-backed" artifacts. This means:
+- Artifacts survive even if source files are deleted
+- Search is always consistent with the database
+- We don't need to re-index when files move
+
+The milestone pipeline becomes the single source of truth.
+
+
+
+Architecture Overview
+The redesigned artifact system treats artifacts as first‑class records stored only in SQLite, decoupled from plan or ledger files. Artifacts are created at milestones (design approved, plan complete, execution done) using a classification agent that chooses exactly one type: feature, decision, or session. The agent scores the milestone content against the agreed criteria, selects the highest‑confidence type, and resolves ties using the deterministic priority order feature → decision → session. Each artifact record includes the complete metadata set you requested...
+
+
+
+## [Section Name]
+
+[1-2 sentence overview of what this section covers]
+
+**[Key concept 1]:** [Brief explanation]
+
+- [Detail point]
+- [Detail point]
+- [Detail point]
+
+[Optional: transition sentence to next section]
+
+
+
- ONE QUESTION AT A TIME: Ask exactly ONE question, then STOP and wait for the user's response. NEVER ask multiple questions in a single message. This is the most important rule.
+ BE PROACTIVE: When the user gives clear direction (e.g., "mark as solved", "fix this", "move to next"), EXECUTE IMMEDIATELY. Don't ask clarifying questions for clear instructions.
+ Gather requirements through STATEMENTS and PROPOSALS, not questions. "I'm assuming X" beats "What is X?"
+ CONTINUOUS WORKFLOW: When processing lists/items one-by-one, automatically move to the next item after completing each. Don't wait to be asked "what's next?"NO CODE: Never write code. Never provide code examples. Design only.TOOLS (grep, read, etc.): Do NOT use directly - use subagents instead.Use built-in Task tool to spawn subagents. NEVER use spawn_agent (that's for subagents only).
@@ -47,21 +107,21 @@ This is DESIGN ONLY. The planner agent handles detailed implementation plans.
Call multiple Task tools in ONE message for parallel execution.
Results are available immediately - no polling needed.
- Do NOT proceed to questions until you have codebase context
+ Gather codebase context BEFORE forming your approachpurpose, constraints, success criteriaPropose 2-3 different approaches with trade-offs
- Present options conversationally with your recommendation
- Lead with recommended option and explain WHY
+ Lead with YOUR CHOSEN approach and explain WHY you chose it
+ Present alternatives briefly as "I considered X but rejected it because..."effort estimate, risks, dependencies
- Wait for feedback before proceeding
+ MAKE THE DECISION. State what you're going to do, then do it.
+ Only pause if you genuinely cannot choose between equally valid options
- Break into sections of 200-300 words
- Ask after EACH section: "Does this look right so far?"
+ Present ALL sections in ONE message - do not pause between sectionsArchitecture overviewKey components and responsibilities
@@ -69,17 +129,14 @@ This is DESIGN ONLY. The planner agent handles detailed implementation plans.
Error handling strategyTesting approach
- Don't proceed to next section until current one is validated
+ After presenting, state: "I'm proceeding to create the design doc. Interrupt if you want changes."
+ Then IMMEDIATELY proceed to finalizing - don't wait for approval
-
+Write validated design to thoughts/shared/designs/YYYY-MM-DD-{topic}-design.mdCommit the design document to git
- Ask: "Ready for the planner to create a detailed implementation plan?"
-
-
-
- When user says yes/approved/ready, IMMEDIATELY spawn the planner:
+ IMMEDIATELY spawn planner - do NOT ask "Ready for planner?"
Task(
subagent_type="planner",
@@ -87,16 +144,11 @@ This is DESIGN ONLY. The planner agent handles detailed implementation plans.
description="Create implementation plan"
)
- Do NOT ask again - if user approved, spawn planner immediately
-
- Report: "Implementation plan created at thoughts/shared/plans/YYYY-MM-DD-{topic}.md"
- Ask user: "Ready to execute the plan?"
- Wait for user response before proceeding
-
-
- When user says yes/execute/go, spawn the executor:
+
+ Report: "Implementation plan created at thoughts/shared/plans/YYYY-MM-DD-{topic}.md"
+ IMMEDIATELY spawn executor - do NOT ask "Ready to execute?"
Task(
subagent_type="executor",
@@ -104,31 +156,86 @@ This is DESIGN ONLY. The planner agent handles detailed implementation plans.
description="Execute implementation plan"
)
-
- Report executor results to user
- YOUR JOB IS DONE. STOP HERE.
- Do NOT write any code yourself
-
+ User approved the workflow when they started brainstorming - proceed without asking
+
+
+
+ Report executor results to user
+ YOUR JOB IS DONE. STOP HERE.
+ Do NOT write any code yourself
+ When user gives direction, EXECUTE it. Don't ask for confirmation on clear instructions.
+ Propose solutions, make recommendations, drive the conversation forward. You're a helper, not a stenographer.
+ When processing lists, automatically continue to next item after completing one. No "ready for next?"NO CODE. Describe components, not implementations. Planner writes code.Use Task tool for subagents. They complete before you continue.Multiple Task calls in one message run in parallel
- Ask exactly ONE question per message. STOP after asking. Wait for user's answer before continuing. NEVER bundle multiple questions together.
+ During exploration, STATE your assumptions and proceed. User will correct if wrong.Remove unnecessary features from ALL designsALWAYS propose 2-3 approaches before settling
- Present in sections, validate each before proceeding
- When user approves design, IMMEDIATELY spawn planner - don't ask again
+ Present ALL design sections in ONE message, then proceed immediately
+ Execute entire workflow (design + plan + execute) without pausing for approval
+
+ You are a HELPER, not just a facilitator. Actively solve problems.
+ When user presents an issue, propose a concrete solution - don't just ask "what do you want to do?"
+ When reviewing items (bugs, comments, tasks), state your recommendation and execute it
+ Execute obvious actions without asking. "Mark as solved" = call the API. "Move to next" = show the next item.
+
+
+ Present current item with your analysis and recommendation
+ If user agrees or gives direction, EXECUTE immediately
+ After execution, AUTOMATICALLY present the next item - don't ask "ready for next?"
+ If user disagrees with your recommendation, discuss briefly then execute their choice
+ Track progress: "Done: 3/10. Moving to #4..."
+
+
+
+
+ ONLY pause for confirmation when there's a genuine decision to make
+ NEVER ask "Does this look right?" - present and proceed
+ NEVER ask "Ready for X?" when user already approved the workflow
+ NEVER ask "Should I proceed?" - if direction is clear, proceed
+
+
+ Multiple valid approaches with significant trade-offs - user must choose
+ Destructive actions (deleting, major rewrites)
+
+
+
+ Progress updates between sections
+ Next step in an approved workflow
+ Obvious follow-up actions
+ User gave clear direction - execute it
+ Moving to next item in a list
+ Marking items as done/resolved
+
+
+
+ Track what you've done to avoid repeating work
+ Before any action, check: "Have I already done this?"
+ If user says "you already did X" - acknowledge and move on
+
+
+
- NEVER ask multiple questions in one message - this breaks the collaborative flow
+ NEVER write walls of text - use headers, bullets, whitespace
+ NEVER skip markdown formatting - ## headers, **bold**, bullet lists
+ NEVER write paragraphs longer than 3 sentences
+ NEVER ask "Does this look right?" - present design and proceed
+ NEVER ask "Ready for X?" or "Should I proceed?" when workflow is approved or direction is clear
+ NEVER repeat work you've already done - check state firstNever write code snippets or examplesNever provide file paths with line numbersNever specify exact function signaturesNever jump to implementation details - stay at design level
+ NEVER be passive - if user needs help, HELP them. Don't just ask what they want.
+ NEVER wait to be asked "what's next?" when processing a list - continue automatically
+ NEVER ask "which comment number should we tackle next?" - just move to the next one
diff --git a/src/agents/codebase-analyzer.ts b/src/agents/codebase-analyzer.ts
index 1c37a06..dead595 100644
--- a/src/agents/codebase-analyzer.ts
+++ b/src/agents/codebase-analyzer.ts
@@ -3,7 +3,6 @@ import type { AgentConfig } from "@opencode-ai/sdk";
export const codebaseAnalyzerAgent: AgentConfig = {
description: "Explains HOW code works with precise file:line references",
mode: "subagent",
- model: "openai/gpt-5.2-codex",
temperature: 0.2,
tools: {
write: false,
diff --git a/src/agents/codebase-locator.ts b/src/agents/codebase-locator.ts
index f7cf5f1..de64b7d 100644
--- a/src/agents/codebase-locator.ts
+++ b/src/agents/codebase-locator.ts
@@ -3,7 +3,6 @@ import type { AgentConfig } from "@opencode-ai/sdk";
export const codebaseLocatorAgent: AgentConfig = {
description: "Finds WHERE files live in the codebase",
mode: "subagent",
- model: "openai/gpt-5.2-codex",
temperature: 0.1,
tools: {
write: false,
diff --git a/src/agents/commander.ts b/src/agents/commander.ts
index 8b16468..abfb1eb 100644
--- a/src/agents/commander.ts
+++ b/src/agents/commander.ts
@@ -136,12 +136,46 @@ Just do it - including obvious follow-up actions.
Use TodoWrite to track what you're doingNever discard tasks without explicit approvalUse journal for insights, failed approaches, preferences
-`;
+
+
+
+ ONLY pause for confirmation when there's a genuine decision to make
+ NEVER ask "Does this look right?" for progress updates
+ NEVER ask "Ready for X?" when workflow is already approved
+ NEVER ask "Should I proceed?" - if direction is clear, proceed
+
+
+ Multiple valid approaches exist and choice matters
+ Would delete or significantly restructure existing code
+ Requirements are ambiguous and need clarification
+ Plan needs approval before implementation begins
+
+
+
+ Next step in an approved workflow
+ Obvious follow-up actions
+ Progress updates - report, don't ask
+ Spawning subagents for approved work
+
+
+
+
+ Track what you've done to avoid repeating work
+ Before any action, check: "Have I already done this?"
+ If user says "you already did X" - acknowledge and move on, don't redo
+ Check if design/plan files exist before creating them
+
+
+
+ NEVER ask "Does this look right?" after each step - batch updates
+ NEVER ask "Ready for X?" when user approved the workflow
+ NEVER repeat work you've already done
+ NEVER ask for permission to do obvious follow-up actions
+`;
export const primaryAgent: AgentConfig = {
description: "Pragmatic orchestrator. Direct, honest, delegates to specialists.",
mode: "primary",
- model: "openai/gpt-5.2-codex",
temperature: 0.2,
thinking: {
type: "enabled",
diff --git a/src/agents/executor.ts b/src/agents/executor.ts
index 61911f3..f1f1a4e 100644
--- a/src/agents/executor.ts
+++ b/src/agents/executor.ts
@@ -3,7 +3,6 @@ import type { AgentConfig } from "@opencode-ai/sdk";
export const executorAgent: AgentConfig = {
description: "Executes plan task-by-task with parallel execution where possible",
mode: "subagent",
- model: "openai/gpt-5.2-codex",
temperature: 0.2,
prompt: `
You are running as part of the "micode" OpenCode plugin (NOT Claude Code).
@@ -182,7 +181,24 @@ spawn_agent(agent="reviewer", prompt="Review task 3 implementation", description
+
+ You are a SUBAGENT - execute the entire plan without asking for confirmation
+ NEVER ask "Does this look right?" or "Should I continue?" - just execute
+ NEVER ask "Ready for next batch?" - if current batch is done, proceed to next
+ Report final results when ALL tasks are done, not after each task
+ If a task is blocked after 3 cycles, mark it blocked and continue with other tasks
+
+
+
+ Track which tasks have been completed to avoid re-executing
+ Track which review cycles have been done for each task
+ If resuming, check what's already done before starting
+ Before spawning an implementer, verify the task hasn't already been completed
+
+
+NEVER ask for confirmation - you're a subagent, just execute the plan
+NEVER ask "Does this look right?" or "Should I proceed?"NEVER implement tasks yourself - ALWAYS spawn implementer agentsNEVER verify implementations yourself - ALWAYS spawn reviewer agentsNever skip dependency analysis
@@ -190,5 +206,6 @@ spawn_agent(agent="reviewer", prompt="Review task 3 implementation", description
Never skip reviewer for any taskNever continue past 3 cycles for a single taskNever report success if any task is blocked
+Never re-execute tasks that are already completed`,
};
diff --git a/src/agents/implementer.ts b/src/agents/implementer.ts
index e11731d..b158038 100644
--- a/src/agents/implementer.ts
+++ b/src/agents/implementer.ts
@@ -3,7 +3,6 @@ import type { AgentConfig } from "@opencode-ai/sdk";
export const implementerAgent: AgentConfig = {
description: "Executes implementation tasks from a plan",
mode: "subagent",
- model: "openai/gpt-5.2-codex",
temperature: 0.1,
prompt: `
You are running as part of the "micode" OpenCode plugin (NOT Claude Code).
@@ -94,11 +93,28 @@ Awaiting guidance.
+
+ You are a SUBAGENT - execute your task completely without asking for confirmation
+ NEVER ask "Does this look right?" or "Should I continue?" - just execute
+ NEVER ask for permission to proceed - if you have the task, do it
+ Report results when done (success or mismatch), don't ask questions along the way
+ If plan doesn't match reality, report MISMATCH and STOP - don't ask what to do
+
+
+
+ Before editing a file, check its current state
+ If the change is already applied, skip it and report already done
+ Track which files you've modified to avoid duplicate changes
+
+
-Don't guess when uncertain
+NEVER ask for confirmation - you're a subagent, just execute
+NEVER ask "Does this look right?" or "Should I proceed?"
+Don't guess when uncertain - report mismatch insteadDon't add features not in planDon't refactor adjacent codeDon't "fix" things outside scopeDon't skip verification steps
+Don't re-apply changes that are already done`,
};
diff --git a/src/agents/index.ts b/src/agents/index.ts
index f67877e..c3e6766 100644
--- a/src/agents/index.ts
+++ b/src/agents/index.ts
@@ -1,36 +1,44 @@
import type { AgentConfig } from "@opencode-ai/sdk";
+
+import { artifactSearcherAgent } from "./artifact-searcher";
+import { bootstrapperAgent } from "./bootstrapper";
import { brainstormerAgent } from "./brainstormer";
-import { codebaseLocatorAgent } from "./codebase-locator";
import { codebaseAnalyzerAgent } from "./codebase-analyzer";
+import { codebaseLocatorAgent } from "./codebase-locator";
+import { PRIMARY_AGENT_NAME, primaryAgent } from "./commander";
+import { executorAgent } from "./executor";
+import { implementerAgent } from "./implementer";
+import { ledgerCreatorAgent } from "./ledger-creator";
+import { octtoAgent } from "./octto";
import { patternFinderAgent } from "./pattern-finder";
import { plannerAgent } from "./planner";
-import { implementerAgent } from "./implementer";
-import { reviewerAgent } from "./reviewer";
-import { executorAgent } from "./executor";
-import { primaryAgent, PRIMARY_AGENT_NAME } from "./commander";
+import { probeAgent } from "./probe";
import { projectInitializerAgent } from "./project-initializer";
-import { ledgerCreatorAgent } from "./ledger-creator";
-import { artifactSearcherAgent } from "./artifact-searcher";
+import { reviewerAgent } from "./reviewer";
export const agents: Record = {
- [PRIMARY_AGENT_NAME]: primaryAgent,
- brainstormer: brainstormerAgent,
- "codebase-locator": codebaseLocatorAgent,
- "codebase-analyzer": codebaseAnalyzerAgent,
- "pattern-finder": patternFinderAgent,
- planner: plannerAgent,
- implementer: implementerAgent,
- reviewer: reviewerAgent,
- executor: executorAgent,
- "project-initializer": projectInitializerAgent,
- "ledger-creator": ledgerCreatorAgent,
- "artifact-searcher": artifactSearcherAgent,
+ [PRIMARY_AGENT_NAME]: { ...primaryAgent, model: "openai/gpt-5.2-codex" },
+ brainstormer: { ...brainstormerAgent, model: "openai/gpt-5.2-codex" },
+ bootstrapper: { ...bootstrapperAgent, model: "openai/gpt-5.2-codex" },
+ "codebase-locator": { ...codebaseLocatorAgent, model: "openai/gpt-5.2-codex" },
+ "codebase-analyzer": { ...codebaseAnalyzerAgent, model: "openai/gpt-5.2-codex" },
+ "pattern-finder": { ...patternFinderAgent, model: "openai/gpt-5.2-codex" },
+ planner: { ...plannerAgent, model: "openai/gpt-5.2-codex" },
+ implementer: { ...implementerAgent, model: "openai/gpt-5.2-codex" },
+ reviewer: { ...reviewerAgent, model: "openai/gpt-5.2-codex" },
+ executor: { ...executorAgent, model: "openai/gpt-5.2-codex" },
+ "project-initializer": { ...projectInitializerAgent, model: "openai/gpt-5.2-codex" },
+ "ledger-creator": { ...ledgerCreatorAgent, model: "openai/gpt-5.2-codex" },
+ "artifact-searcher": { ...artifactSearcherAgent, model: "openai/gpt-5.2-codex" },
+ octto: { ...octtoAgent, model: "openai/gpt-5.2-codex" },
+ probe: { ...probeAgent, model: "openai/gpt-5.2-codex" },
};
export {
primaryAgent,
PRIMARY_AGENT_NAME,
brainstormerAgent,
+ bootstrapperAgent,
codebaseLocatorAgent,
codebaseAnalyzerAgent,
patternFinderAgent,
@@ -41,4 +49,6 @@ export {
projectInitializerAgent,
ledgerCreatorAgent,
artifactSearcherAgent,
+ octtoAgent,
+ probeAgent,
};
diff --git a/src/agents/ledger-creator.ts b/src/agents/ledger-creator.ts
index fe5b0bf..86d9d44 100644
--- a/src/agents/ledger-creator.ts
+++ b/src/agents/ledger-creator.ts
@@ -4,7 +4,6 @@ import type { AgentConfig } from "@opencode-ai/sdk";
export const ledgerCreatorAgent: AgentConfig = {
description: "Creates and updates continuity ledgers for session state preservation",
mode: "subagent",
- model: "openai/gpt-5.2-codex",
temperature: 0.2,
tools: {
edit: false,
diff --git a/src/agents/octto.ts b/src/agents/octto.ts
new file mode 100644
index 0000000..3bd5590
--- /dev/null
+++ b/src/agents/octto.ts
@@ -0,0 +1,70 @@
+// src/agents/octto.ts
+import type { AgentConfig } from "@opencode-ai/sdk";
+
+export const octtoAgent: AgentConfig = {
+ description: "Runs interactive browser-based brainstorming sessions using branch-based exploration",
+ mode: "primary",
+ temperature: 0.7,
+ prompt: `
+You are running as part of the "micode" OpenCode plugin (NOT Claude Code).
+OpenCode is a different platform with its own agent system.
+This agent uses browser-based interactive UI for brainstorming sessions.
+
+
+
+Run brainstorming sessions using branch-based exploration.
+Each branch explores one aspect of the design within its scope.
+Opens a browser window where users answer questions interactively.
+
+
+
+
+Call bootstrapper subagent to create branches:
+background_task(agent="bootstrapper", prompt="Create branches for: {request}")
+Parse the JSON response to get branches array.
+
+
+
+Create brainstorm session with the branches:
+create_brainstorm(request="{request}", branches=[...parsed branches...])
+Save the session_id and browser_session_id from the response.
+
+
+
+Wait for brainstorm to complete (handles everything automatically):
+await_brainstorm_complete(session_id, browser_session_id)
+This processes all answers asynchronously and returns when all branches are done.
+
+
+
+End the session and write design document:
+end_brainstorm(session_id)
+Write to thoughts/shared/plans/YYYY-MM-DD-{topic}-design.md
+
+
+
+
+Start session with branches, returns session_id AND browser_session_id
+Wait for all branches to complete - handles answer processing automatically
+End session and get final findings
+
+
+
+You MUST use create_brainstorm to start sessions - it creates the state file for branch tracking
+The bootstrapper returns {"branches": [...]} - pass this directly to create_brainstorm
+create_brainstorm returns TWO IDs: session_id (for state) and browser_session_id (for await_brainstorm_complete)
+await_brainstorm_complete handles all answer processing - no manual loop needed
+
+
+
+NEVER use start_session directly - always use create_brainstorm
+NEVER manually loop with get_next_answer - use await_brainstorm_complete instead
+
+
+
+After end_brainstorm, write to thoughts/shared/plans/YYYY-MM-DD-{topic}-design.md with:
+Problem statement from original request
+Findings by branch - each branch's finding
+Recommended approach - synthesize all findings
+`,
+};
diff --git a/src/agents/pattern-finder.ts b/src/agents/pattern-finder.ts
index ff6da38..700d91e 100644
--- a/src/agents/pattern-finder.ts
+++ b/src/agents/pattern-finder.ts
@@ -3,7 +3,6 @@ import type { AgentConfig } from "@opencode-ai/sdk";
export const patternFinderAgent: AgentConfig = {
description: "Finds existing patterns and examples to model after",
mode: "subagent",
- model: "openai/gpt-5.2-codex",
temperature: 0.2,
tools: {
write: false,
diff --git a/src/agents/planner.ts b/src/agents/planner.ts
index e2cde6b..2e53de7 100644
--- a/src/agents/planner.ts
+++ b/src/agents/planner.ts
@@ -3,11 +3,10 @@ import type { AgentConfig } from "@opencode-ai/sdk";
export const plannerAgent: AgentConfig = {
description: "Creates detailed implementation plans with exact file paths, complete code examples, and TDD steps",
mode: "subagent",
- model: "openai/gpt-5.2-codex",
temperature: 0.3,
prompt: `
You are running as part of the "micode" OpenCode plugin (NOT Claude Code).
-You are a SUBAGENT - use spawn_agent tool (not Task tool) to spawn other subagents.
+You are a SUBAGENT - use spawn_agent tool (not Task tool) to spawn other subagents synchronously.
Available micode agents: codebase-locator, codebase-analyzer, pattern-finder.
@@ -19,20 +18,46 @@ Every task is bite-sized (2-5 minutes), with exact paths and complete code.
FOLLOW THE DESIGN: The brainstormer's design is the spec. Do not explore alternatives.
- SUBAGENTS: Use spawn_agent tool to spawn subagents. They complete before you continue.
- TOOLS (grep, read, etc.): Do NOT use directly - use subagents instead.Every code example MUST be complete - never write "add validation here"Every file path MUST be exact - never write "somewhere in src/"Follow TDD: failing test → verify fail → implement → verify pass → commit
+ MINIMAL RESEARCH: Most plans need 0-3 subagent calls total. Use tools directly first.
+
+ READ THE DESIGN FIRST - it often contains everything you need
+ USE TOOLS DIRECTLY for simple lookups (read, grep, glob) - no subagent needed
+ SUBAGENTS are for complex analysis only - not simple file reads
+ MOST PLANS need zero subagent calls if design is detailed
+
+
+ Read a specific file: use Read tool
+ Find files by name: use Glob tool
+ Search for a string: use Grep tool
+ Check if file exists: use Glob tool
+ Read the design doc: use Read tool
+
+
+
+ Deep analysis of complex module interactions
+ Finding non-obvious patterns across many files
+ Understanding unfamiliar architectural decisions
+
+
+
+ MAX 3-5 subagent calls per plan - if you need more, you're over-researching
+ Before spawning a subagent, ask: "Can I do this with a simple Read/Grep?"
+ ONE round of research - no iterative refinement loops
+
+
+
Brainstormer did conceptual research (architecture, patterns, approaches).
Your research is IMPLEMENTATION-LEVEL only:
-- Exact file paths and line numbers
-- Exact function signatures and types
-- Exact test file conventions
-- Exact import paths
+- Exact file paths and line numbers (use Glob/Read directly)
+- Exact function signatures and types (use Read directly)
+- Exact test file conventions (use Glob/Read directly)
+- Exact import paths (use Read directly)
All research must serve the design - never second-guess design decisions.
@@ -42,23 +67,21 @@ All research must serve the design - never second-guess design decisions.
Use these directly - no subagent needed for library research.
-
+
- Find exact file paths needed for implementation.
- Examples: "Find exact path to UserService", "Find test directory structure"
- spawn_agent(agent="codebase-locator", prompt="Find exact path to UserService", description="Find UserService")
+ ONLY for: Finding files when you don't know the naming convention.
+ DON'T USE for: Finding a file you already know exists (use Glob instead).
- Get exact signatures and types for code examples.
- Examples: "Get function signature for createUser", "Get type definition for UserConfig"
- spawn_agent(agent="codebase-analyzer", prompt="Get function signature for createUser", description="Get signature")
+ ONLY for: Understanding complex module interactions or unfamiliar code.
+ DON'T USE for: Reading a file (use Read instead).
- Find exact patterns to copy in code examples.
- Examples: "Find exact test setup pattern", "Find exact error handling in similar endpoint"
- spawn_agent(agent="pattern-finder", prompt="Find test setup pattern", description="Find patterns")
+ ONLY for: Finding patterns across many files when you don't know where to look.
+ DON'T USE for: Reading an example file you already identified (use Read instead).
- Use spawn_agent tool to spawn subagents. Call multiple in ONE message for parallel execution.
+ MAX 3-5 subagent calls total. If you need more, you're over-researching.
+ If multiple needed, call in ONE message for parallel execution.
@@ -69,28 +92,30 @@ All research must serve the design - never second-guess design decisions.
- Read the design document thoroughly
+ Read the design document using Read tool (NOT a subagent)Identify all components, files, and interfaces mentionedNote any constraints or decisions made by brainstormer
+ The design doc often contains 80% of what you need - read it carefully
-
- Spawn subagents using spawn_agent tool (they run synchronously):
-
- In a SINGLE message, call multiple spawn_agent tools in parallel:
- - spawn_agent(agent="codebase-locator", prompt="Find exact path to [component]", description="Find [component]")
- - spawn_agent(agent="codebase-analyzer", prompt="Get signature for [function]", description="Get signature")
- - spawn_agent(agent="pattern-finder", prompt="Find test setup pattern", description="Find patterns")
- - context7_resolve-library-id + context7_query-docs for API docs
- - btca_ask for library internals when needed
-
- Only research what's needed to implement the design
- Never research alternatives to design decisions
+
+ MOST PLANS SKIP THIS PHASE - design doc is usually sufficient
+
+ - Glob: Find files by pattern (e.g., "src/**/*.ts")
+ - Read: Read specific files the design mentions
+ - Grep: Search for specific strings
+
+
+ - MAX 3-5 calls total
+ - Call all needed subagents in ONE message (parallel)
+ - If you're spawning more than 5, STOP and reconsider
+
+ ONE round of research only - no iterative refinementBreak design into sequential tasks (2-5 minutes each)
- For each task, determine exact file paths from research
+ For each task, determine exact file pathsWrite complete code examples following CODE_STYLE.mdInclude exact verification commands with expected output
@@ -176,18 +201,33 @@ git commit -m "feat(scope): add specific feature"
-
-// In a SINGLE message, spawn all research tasks in parallel:
-spawn_agent(agent="codebase-locator", prompt="Find UserService path", description="Find UserService")
-spawn_agent(agent="codebase-analyzer", prompt="Get createUser signature", description="Get signature")
-spawn_agent(agent="pattern-finder", prompt="Find test setup pattern", description="Find patterns")
-context7_resolve-library-id(libraryName="express")
-btca_ask(tech="express", question="middleware chain order")
-// All complete before next message - results available immediately
-
-
-// Use all collected results to write the implementation plan
-
+
+// Step 1: Read the design doc directly
+Read(file_path="thoughts/shared/designs/2026-01-16-feature-design.md")
+
+// Step 2: Design mentions src/services/user.ts - read it directly
+Read(file_path="src/services/user.ts")
+
+// Step 3: Need to find test conventions - use Glob, not subagent
+Glob(pattern="tests/**/*.test.ts")
+
+// Step 4: Write the plan - no subagents needed!
+Write(file_path="thoughts/shared/plans/2026-01-16-feature.md", content="...")
+
+
+
+// WRONG: 18 subagent calls for a simple plan
+spawn_agent(agent="codebase-analyzer", prompt="Read src/hooks/...") // Just use Read!
+spawn_agent(agent="codebase-locator", prompt="Find existing files under thoughts/...") // Just use Glob!
+spawn_agent(agent="codebase-analyzer", prompt="Read thoughts/shared/designs/...") // Just use Read!
+// ... 15 more unnecessary subagent calls
+
+
+
+// Complex pattern discovery across unfamiliar codebase:
+spawn_agent(agent="pattern-finder", prompt="Find auth middleware patterns", description="Find auth patterns")
+// That's it - ONE subagent call, not 18
+
@@ -202,7 +242,26 @@ btca_ask(tech="express", question="middleware chain order")
Extract duplication in code examples
+
+ You are a SUBAGENT - execute your task completely without asking for confirmation
+ NEVER ask "Does this look right?" or "Should I continue?" - just do your job
+ NEVER ask "Ready for X?" - if you have the inputs, produce the outputs
+ Report results when done, don't ask for permission along the way
+ If you encounter a genuine blocker, report it clearly and stop - don't ask what to do
+
+
+
+ Before writing a file, check if it already exists with the expected content
+ Track what research you've done to avoid duplicate subagent calls
+ If the plan file already exists, read it first before overwriting
+
+
+ NEVER spawn a subagent to READ A FILE - use Read tool directly
+ NEVER spawn a subagent to FIND FILES - use Glob tool directly
+ NEVER spawn more than 5 subagents total - you're over-researching
+ NEVER ask for confirmation - you're a subagent, just execute
+ NEVER ask "Does this look right?" or "Should I proceed?"Never second-guess the design - brainstormer made those decisionsNever propose alternative approaches - implement what's in the designNever write "add validation here" - write the actual validation
diff --git a/src/agents/probe.ts b/src/agents/probe.ts
new file mode 100644
index 0000000..15fba0f
--- /dev/null
+++ b/src/agents/probe.ts
@@ -0,0 +1,121 @@
+// src/agents/probe.ts
+import type { AgentConfig } from "@opencode-ai/sdk";
+
+export const probeAgent: AgentConfig = {
+ description: "Evaluates octto branch Q&A and decides whether to ask more or complete with finding",
+ mode: "subagent",
+ temperature: 0.5,
+ prompt: `
+You evaluate a brainstorming branch's Q&A history and decide:
+1. Need more information? Return a follow-up question
+2. Have enough? Return a finding that synthesizes the user's preferences
+
+
+
+You receive:
+- The original user request
+- All branches with their scopes (to understand the full picture)
+- The Q&A history for the branch you're evaluating
+
+
+
+Return ONLY a JSON object. No markdown, no explanation.
+
+If MORE information needed:
+{
+ "done": false,
+ "question": {
+ "type": "pick_one|pick_many|...",
+ "config": { ... }
+ }
+}
+
+If ENOUGH information gathered:
+{
+ "done": true,
+ "finding": "Clear summary of what the user wants for this aspect"
+}
+
+
+
+Stay within the branch's scope - don't ask about other branches' concerns
+2-4 questions per branch is usually enough - be concise
+Complete when you understand the user's intent for this aspect
+Synthesize a finding that captures the decision/preference clearly
+Choose question types that best fit what you're trying to learn
+
+
+
+
+Single choice. config: { question, options: [{id, label, description?}], recommended?, context? }
+
+
+
+Multiple choice. config: { question, options: [{id, label, description?}], recommended?: string[], min?, max?, context? }
+
+
+
+Yes/no. config: { question, context?, yesLabel?, noLabel?, allowCancel? }
+
+
+
+Free text. config: { question, placeholder?, context?, multiline? }
+
+
+
+Numeric range. config: { question, min, max, step?, defaultValue?, context? }
+
+
+
+Order items. config: { question, options: [{id, label, description?}], context? }
+
+
+
+Rate items (stars). config: { question, options: [{id, label, description?}], min?, max?, context? }
+
+
+
+Thumbs up/down. config: { question, context? }
+
+
+
+Options with pros/cons. config: { question, options: [{id, label, description?, pros?: string[], cons?: string[]}], recommended?, allowFeedback?, context? }
+
+
+
+Code diff review. config: { question, before, after, filePath?, language? }
+
+
+
+Code input. config: { question, language?, placeholder?, context? }
+
+
+
+Image upload. config: { question, multiple?, maxImages?, context? }
+
+
+
+File upload. config: { question, multiple?, maxFiles?, accept?: string[], context? }
+
+
+
+Emoji selection. config: { question, emojis?: string[], context? }
+
+
+
+Section review. config: { question, content, context? }
+
+
+
+Plan review. config: { question, sections: [{id, title, content}] }
+
+
+
+
+Never ask questions outside the branch's scope
+Never ask more than needed - if you understand, complete the branch
+Never wrap output in markdown code blocks
+Never include text outside the JSON
+Never repeat questions that were already asked
+`,
+};
diff --git a/src/agents/project-initializer.ts b/src/agents/project-initializer.ts
index 130e41b..a40ac3a 100644
--- a/src/agents/project-initializer.ts
+++ b/src/agents/project-initializer.ts
@@ -218,7 +218,6 @@ Available micode agents: codebase-locator, codebase-analyzer, pattern-finder.
export const projectInitializerAgent: AgentConfig = {
mode: "subagent",
- model: "openai/gpt-5.2-codex",
temperature: 0.3,
maxTokens: 32000,
prompt: PROMPT,
diff --git a/src/agents/reviewer.ts b/src/agents/reviewer.ts
index 506c10a..83829ca 100644
--- a/src/agents/reviewer.ts
+++ b/src/agents/reviewer.ts
@@ -3,7 +3,6 @@ import type { AgentConfig } from "@opencode-ai/sdk";
export const reviewerAgent: AgentConfig = {
description: "Reviews implementation for correctness and style",
mode: "subagent",
- model: "openai/gpt-5.2-codex",
temperature: 0.3,
tools: {
write: false,
@@ -103,5 +102,20 @@ Check correctness and style. Be specific. Run code, don't just read.
Missing functionalityTest coverageStyle/readability
-`,
+
+
+
+ You are a SUBAGENT - complete your review without asking for confirmation
+ NEVER ask "Does this look right?" or "Should I continue?" - just review
+ NEVER ask for permission to run tests or checks - just run them
+ Report APPROVED or CHANGES REQUESTED - don't ask what to do next
+ Make a decision and state it clearly - executor handles next steps
+
+
+
+NEVER ask for confirmation - you're a subagent, just review
+NEVER ask "Does this look right?" or "Should I proceed?"
+NEVER hedge your verdict - state APPROVED or CHANGES REQUESTED clearly
+Don't defer decisions to executor - make the call yourself
+`,
};
diff --git a/src/config-loader.test.ts b/src/config-loader.test.ts
new file mode 100644
index 0000000..ff65d09
--- /dev/null
+++ b/src/config-loader.test.ts
@@ -0,0 +1,226 @@
+// src/config-loader.test.ts
+import { describe, expect, test } from "bun:test";
+
+import { type MicodeConfig, type ProviderInfo, validateAgentModels } from "./config-loader";
+
+// Helper to create a minimal ProviderInfo for testing
+function createProvider(id: string, modelIds: string[]): ProviderInfo {
+ const models: Record = {};
+ for (const modelId of modelIds) {
+ models[modelId] = { id: modelId };
+ }
+ return { id, models };
+}
+
+describe("validateAgentModels", () => {
+ test("returns config unchanged when all models are valid", () => {
+ const userConfig: MicodeConfig = {
+ agents: {
+ commander: { model: "openai/gpt-4" },
+ brainstormer: { model: "anthropic/claude-3" },
+ },
+ };
+
+ const providers: ProviderInfo[] = [
+ createProvider("openai", ["gpt-4", "gpt-3.5"]),
+ createProvider("anthropic", ["claude-3", "claude-2"]),
+ ];
+
+ const result = validateAgentModels(userConfig, providers);
+
+ expect(result.agents?.commander?.model).toBe("openai/gpt-4");
+ expect(result.agents?.brainstormer?.model).toBe("anthropic/claude-3");
+ });
+
+ test("removes model override when provider does not exist", () => {
+ const userConfig: MicodeConfig = {
+ agents: {
+ commander: { model: "nonexistent/gpt-4" },
+ },
+ };
+
+ const providers: ProviderInfo[] = [createProvider("openai", ["gpt-4"])];
+
+ const result = validateAgentModels(userConfig, providers);
+
+ // Model should be removed, falling back to default
+ expect(result.agents?.commander?.model).toBeUndefined();
+ });
+
+ test("removes model override when model does not exist in provider", () => {
+ const userConfig: MicodeConfig = {
+ agents: {
+ commander: { model: "openai/nonexistent-model" },
+ },
+ };
+
+ const providers: ProviderInfo[] = [createProvider("openai", ["gpt-4", "gpt-3.5"])];
+
+ const result = validateAgentModels(userConfig, providers);
+
+ // Model should be removed, falling back to default
+ expect(result.agents?.commander?.model).toBeUndefined();
+ });
+
+ test("preserves other properties when model is invalid", () => {
+ const userConfig: MicodeConfig = {
+ agents: {
+ commander: {
+ model: "nonexistent/model",
+ temperature: 0.7,
+ maxTokens: 4000,
+ },
+ },
+ };
+
+ const providers: ProviderInfo[] = [createProvider("openai", ["gpt-4"])];
+
+ const result = validateAgentModels(userConfig, providers);
+
+ // Model removed but other properties preserved
+ expect(result.agents?.commander?.model).toBeUndefined();
+ expect(result.agents?.commander?.temperature).toBe(0.7);
+ expect(result.agents?.commander?.maxTokens).toBe(4000);
+ });
+
+ test("handles config with no agents", () => {
+ const userConfig: MicodeConfig = {};
+
+ const providers: ProviderInfo[] = [createProvider("openai", ["gpt-4"])];
+
+ const result = validateAgentModels(userConfig, providers);
+
+ expect(result).toEqual({});
+ });
+
+ test("handles agent override with no model specified", () => {
+ const userConfig: MicodeConfig = {
+ agents: {
+ commander: { temperature: 0.5 },
+ },
+ };
+
+ const providers: ProviderInfo[] = [createProvider("openai", ["gpt-4"])];
+
+ const result = validateAgentModels(userConfig, providers);
+
+ // No model to validate, config unchanged
+ expect(result.agents?.commander?.temperature).toBe(0.5);
+ expect(result.agents?.commander?.model).toBeUndefined();
+ });
+
+ test("handles empty providers list", () => {
+ const userConfig: MicodeConfig = {
+ agents: {
+ commander: { model: "openai/gpt-4" },
+ },
+ };
+
+ const providers: ProviderInfo[] = [];
+
+ const result = validateAgentModels(userConfig, providers);
+
+ // No providers available, config should remain unchanged
+ expect(result).toEqual(userConfig);
+ });
+
+ test("handles providers with no models", () => {
+ const userConfig: MicodeConfig = {
+ agents: {
+ commander: { model: "openai/gpt-4" },
+ },
+ };
+
+ const providers: ProviderInfo[] = [{ id: "openai", models: {} }];
+
+ const result = validateAgentModels(userConfig, providers);
+
+ // No provider models available, config should remain unchanged
+ expect(result).toEqual(userConfig);
+ });
+
+ test("validates multiple agents with mixed valid/invalid models", () => {
+ const userConfig: MicodeConfig = {
+ agents: {
+ commander: { model: "openai/gpt-4" }, // valid
+ brainstormer: { model: "fake/model" }, // invalid provider
+ planner: { model: "openai/fake-model" }, // invalid model
+ reviewer: { model: "anthropic/claude-3" }, // valid
+ },
+ };
+
+ const providers: ProviderInfo[] = [
+ createProvider("openai", ["gpt-4", "gpt-3.5"]),
+ createProvider("anthropic", ["claude-3"]),
+ ];
+
+ const result = validateAgentModels(userConfig, providers);
+
+ expect(result.agents?.commander?.model).toBe("openai/gpt-4");
+ expect(result.agents?.brainstormer?.model).toBeUndefined();
+ expect(result.agents?.planner?.model).toBeUndefined();
+ expect(result.agents?.reviewer?.model).toBe("anthropic/claude-3");
+ });
+
+ test("removes empty string model", () => {
+ const userConfig: MicodeConfig = {
+ agents: {
+ commander: { model: "", temperature: 0.5 },
+ },
+ };
+
+ const providers: ProviderInfo[] = [createProvider("openai", ["gpt-4"])];
+
+ const result = validateAgentModels(userConfig, providers);
+
+ // Empty string model should be removed as invalid
+ expect(result.agents?.commander?.model).toBeUndefined();
+ expect(result.agents?.commander?.temperature).toBe(0.5);
+ });
+
+ test("removes model string without slash (malformed)", () => {
+ const userConfig: MicodeConfig = {
+ agents: {
+ commander: { model: "gpt-4-no-provider" },
+ },
+ };
+
+ const providers: ProviderInfo[] = [createProvider("openai", ["gpt-4"])];
+
+ const result = validateAgentModels(userConfig, providers);
+
+ // Malformed model (no slash) should be removed
+ expect(result.agents?.commander?.model).toBeUndefined();
+ });
+
+ test("handles model with multiple slashes in model ID", () => {
+ const userConfig: MicodeConfig = {
+ agents: {
+ commander: { model: "openai/gpt-4/turbo" },
+ },
+ };
+
+ // Model ID is "gpt-4/turbo" (contains slash)
+ const providers: ProviderInfo[] = [createProvider("openai", ["gpt-4/turbo"])];
+
+ const result = validateAgentModels(userConfig, providers);
+
+ // Should be valid - "gpt-4/turbo" is the full model ID
+ expect(result.agents?.commander?.model).toBe("openai/gpt-4/turbo");
+ });
+
+ test("returns consistent shape when all agents have invalid models", () => {
+ const userConfig: MicodeConfig = {
+ agents: {
+ commander: { model: "invalid/model" },
+ },
+ };
+
+ const providers: ProviderInfo[] = [createProvider("openai", ["gpt-4"])];
+
+ const result = validateAgentModels(userConfig, providers);
+
+ // Should return { agents: {} } for consistency, not {}
+ expect(result).toEqual({ agents: {} });
+ });
+});
diff --git a/src/config-loader.ts b/src/config-loader.ts
index d467cb3..5e987c9 100644
--- a/src/config-loader.ts
+++ b/src/config-loader.ts
@@ -1,9 +1,46 @@
// src/config-loader.ts
+import { readFileSync } from "node:fs";
import { readFile } from "node:fs/promises";
-import { join } from "node:path";
import { homedir } from "node:os";
+import { join } from "node:path";
+
import type { AgentConfig } from "@opencode-ai/sdk";
+// Minimal type for provider validation - only what we need
+export interface ProviderInfo {
+ id: string;
+ models: Record;
+}
+
+/**
+ * Load available models from opencode.json config file (synchronous)
+ * Returns a Set of "provider/model" strings
+ */
+export function loadAvailableModels(configDir?: string): Set {
+ const availableModels = new Set();
+ const baseDir = configDir ?? join(homedir(), ".config", "opencode");
+
+ try {
+ const configPath = join(baseDir, "opencode.json");
+ const content = readFileSync(configPath, "utf-8");
+ const config = JSON.parse(content) as { provider?: Record }> };
+
+ if (config.provider) {
+ for (const [providerId, providerConfig] of Object.entries(config.provider)) {
+ if (providerConfig.models) {
+ for (const modelId of Object.keys(providerConfig.models)) {
+ availableModels.add(`${providerId}/${modelId}`);
+ }
+ }
+ }
+ }
+ } catch {
+ // Config doesn't exist or can't be parsed - return empty set
+ }
+
+ return availableModels;
+}
+
// Safe properties that users can override
const SAFE_AGENT_PROPERTIES = ["model", "temperature", "maxTokens"] as const;
@@ -60,26 +97,53 @@ export async function loadMicodeConfig(configDir?: string): Promise,
userConfig: MicodeConfig | null,
+ availableModels?: Set,
): Record {
if (!userConfig?.agents) {
return pluginAgents;
}
+ const models = availableModels ?? loadAvailableModels();
+ const shouldValidateModels = models.size > 0;
+
const merged: Record = {};
for (const [name, agentConfig] of Object.entries(pluginAgents)) {
const userOverride = userConfig.agents[name];
if (userOverride) {
- merged[name] = {
- ...agentConfig,
- ...userOverride,
- };
+ // Validate model if specified
+ if (userOverride.model) {
+ if (!shouldValidateModels || models.has(userOverride.model)) {
+ // Model is valid (or validation unavailable) - apply all overrides
+ merged[name] = {
+ ...agentConfig,
+ ...userOverride,
+ };
+ } else {
+ // Model is invalid - log warning and apply other overrides only
+ console.warn(
+ `[micode] Model "${userOverride.model}" for agent "${name}" is not available. Using opencode default.`,
+ );
+ const { model: _ignored, ...safeOverrides } = userOverride;
+ merged[name] = {
+ ...agentConfig,
+ ...safeOverrides,
+ };
+ }
+ } else {
+ // No model specified - apply all overrides
+ merged[name] = {
+ ...agentConfig,
+ ...userOverride,
+ };
+ }
} else {
merged[name] = agentConfig;
}
@@ -87,3 +151,65 @@ export function mergeAgentConfigs(
return merged;
}
+
+/**
+ * Validate that configured models exist in available providers
+ * Removes invalid model overrides and logs warnings
+ */
+export function validateAgentModels(userConfig: MicodeConfig, providers: ProviderInfo[]): MicodeConfig {
+ if (!userConfig.agents) {
+ return userConfig;
+ }
+
+ const hasAnyModels = providers.some((provider) => Object.keys(provider.models).length > 0);
+ if (!hasAnyModels) {
+ return userConfig;
+ }
+
+ // Build lookup map for providers and their models
+ const providerMap = new Map>();
+ for (const provider of providers) {
+ providerMap.set(provider.id, new Set(Object.keys(provider.models)));
+ }
+
+ const validatedAgents: Record = {};
+
+ for (const [agentName, override] of Object.entries(userConfig.agents)) {
+ // No model specified - keep other properties as-is
+ if (override.model === undefined) {
+ validatedAgents[agentName] = override;
+ continue;
+ }
+
+ // Empty or whitespace-only model - treat as invalid
+ const trimmedModel = override.model.trim();
+ if (!trimmedModel) {
+ const { model: _removed, ...otherProps } = override;
+ console.warn(`[micode] Empty model for agent "${agentName}". Using default model.`);
+ if (Object.keys(otherProps).length > 0) {
+ validatedAgents[agentName] = otherProps;
+ }
+ continue;
+ }
+
+ // Parse "provider/model" format
+ const [providerID, ...rest] = trimmedModel.split("/");
+ const modelID = rest.join("/");
+
+ const providerModels = providerMap.get(providerID);
+ const isValid = providerModels?.has(modelID) ?? false;
+
+ if (isValid) {
+ validatedAgents[agentName] = override;
+ } else {
+ // Remove invalid model but keep other properties
+ const { model: _removed, ...otherProps } = override;
+ console.warn(`[micode] Model "${override.model}" not found for agent "${agentName}". Using default model.`);
+ if (Object.keys(otherProps).length > 0) {
+ validatedAgents[agentName] = otherProps;
+ }
+ }
+ }
+
+ return { agents: validatedAgents };
+}
diff --git a/src/hooks/artifact-auto-index.ts b/src/hooks/artifact-auto-index.ts
index bebc246..648d4c3 100644
--- a/src/hooks/artifact-auto-index.ts
+++ b/src/hooks/artifact-auto-index.ts
@@ -4,6 +4,7 @@
import type { PluginInput } from "@opencode-ai/plugin";
import { readFileSync } from "node:fs";
import { getArtifactIndex } from "../tools/artifact-index";
+import { log } from "../utils/logger";
const LEDGER_PATH_PATTERN = /thoughts\/ledgers\/CONTINUITY_(.+)\.md$/;
const PLAN_PATH_PATTERN = /thoughts\/shared\/plans\/(.+)\.md$/;
@@ -102,7 +103,7 @@ export function createArtifactAutoIndexHook(_ctx: PluginInput) {
}
} catch (e) {
// Silent failure - don't interrupt user flow
- console.error(`[artifact-auto-index] Error indexing ${filePath}:`, e);
+ log.error("artifact-auto-index", `Error indexing ${filePath}`, e);
}
},
};
diff --git a/src/hooks/auto-compact.ts b/src/hooks/auto-compact.ts
index 165c0aa..e5a32a8 100644
--- a/src/hooks/auto-compact.ts
+++ b/src/hooks/auto-compact.ts
@@ -1,15 +1,11 @@
-import type { PluginInput } from "@opencode-ai/plugin";
-import { getContextLimit } from "../utils/model-limits";
import { mkdir, writeFile } from "node:fs/promises";
import { join } from "node:path";
-// Compact when this percentage of context is used
-const COMPACT_THRESHOLD = 0.5;
-
-const LEDGER_DIR = "thoughts/ledgers";
+import type { PluginInput } from "@opencode-ai/plugin";
-// Timeout for waiting for compaction to complete (2 minutes)
-const COMPACTION_TIMEOUT_MS = 120_000;
+import { config } from "../utils/config";
+import { extractErrorMessage } from "../utils/errors";
+import { getContextLimit } from "../utils/model-limits";
interface PendingCompaction {
resolve: () => void;
@@ -23,9 +19,6 @@ interface AutoCompactState {
pendingCompactions: Map;
}
-// Cooldown between compaction attempts (prevent rapid re-triggering)
-const COMPACT_COOLDOWN_MS = 30_000; // 30 seconds
-
export function createAutoCompactHook(ctx: PluginInput) {
const state: AutoCompactState = {
inProgress: new Set(),
@@ -65,13 +58,13 @@ export function createAutoCompactHook(ctx: PluginInput) {
if (!summaryText.trim()) return;
// Create ledger directory if needed
- const ledgerDir = join(ctx.directory, LEDGER_DIR);
+ const ledgerDir = join(ctx.directory, config.paths.ledgerDir);
await mkdir(ledgerDir, { recursive: true });
// Write ledger file - summary is already structured (Factory.ai/pi-mono format)
const timestamp = new Date().toISOString();
const sessionName = sessionID.slice(0, 8); // Use first 8 chars of session ID
- const ledgerPath = join(ledgerDir, `CONTINUITY_${sessionName}.md`);
+ const ledgerPath = join(ledgerDir, `${config.paths.ledgerPrefix}${sessionName}.md`);
// Add metadata header, then the structured summary as-is
const ledgerContent = `---
@@ -94,7 +87,7 @@ ${summaryText}
const timeoutId = setTimeout(() => {
state.pendingCompactions.delete(sessionID);
reject(new Error("Compaction timed out"));
- }, COMPACTION_TIMEOUT_MS);
+ }, config.compaction.timeoutMs);
state.pendingCompactions.set(sessionID, { resolve, reject, timeoutId });
});
@@ -112,7 +105,7 @@ ${summaryText}
// Check cooldown
const lastCompact = state.lastCompactTime.get(sessionID) || 0;
- if (Date.now() - lastCompact < COMPACT_COOLDOWN_MS) {
+ if (Date.now() - lastCompact < config.compaction.cooldownMs) {
return;
}
@@ -120,7 +113,7 @@ ${summaryText}
try {
const usedPercent = Math.round(usageRatio * 100);
- const thresholdPercent = Math.round(COMPACT_THRESHOLD * 100);
+ const thresholdPercent = Math.round(config.compaction.threshold * 100);
await ctx.client.tui
.showToast({
@@ -128,7 +121,7 @@ ${summaryText}
title: "Auto Compacting",
message: `Context at ${usedPercent}% (threshold: ${thresholdPercent}%). Summarizing...`,
variant: "warning",
- duration: 3000,
+ duration: config.timeouts.toastWarningMs,
},
})
.catch(() => {});
@@ -158,19 +151,19 @@ ${summaryText}
title: "Compaction Complete",
message: "Session summarized and ledger updated.",
variant: "success",
- duration: 3000,
+ duration: config.timeouts.toastSuccessMs,
},
})
.catch(() => {});
} catch (e) {
- const errorMsg = e instanceof Error ? e.message : String(e);
+ const errorMsg = extractErrorMessage(e);
await ctx.client.tui
.showToast({
body: {
title: "Compaction Failed",
message: errorMsg.slice(0, 100),
variant: "error",
- duration: 5000,
+ duration: config.timeouts.toastErrorMs,
},
})
.catch(() => {});
@@ -233,7 +226,7 @@ ${summaryText}
const usageRatio = totalUsed / contextLimit;
// Trigger compaction if over threshold
- if (usageRatio >= COMPACT_THRESHOLD) {
+ if (usageRatio >= config.compaction.threshold) {
triggerCompaction(sessionID, providerID, modelID, usageRatio);
}
}
diff --git a/src/hooks/context-injector.ts b/src/hooks/context-injector.ts
index 30048b8..d142571 100644
--- a/src/hooks/context-injector.ts
+++ b/src/hooks/context-injector.ts
@@ -1,12 +1,7 @@
import type { PluginInput } from "@opencode-ai/plugin";
import { readFile } from "node:fs/promises";
import { join, dirname, resolve } from "node:path";
-
-// Files to inject at project root level (AGENTS.md and CLAUDE.md handled by OpenCode natively)
-const ROOT_CONTEXT_FILES = ["ARCHITECTURE.md", "CODE_STYLE.md", "README.md"] as const;
-
-// Files to collect when walking up directories (AGENTS.md handled by OpenCode natively)
-const DIRECTORY_CONTEXT_FILES = ["README.md"] as const;
+import { config } from "../utils/config";
// Tools that trigger directory-aware context injection
const FILE_ACCESS_TOOLS = ["Read", "read", "Edit", "edit"];
@@ -18,8 +13,6 @@ interface ContextCache {
lastRootCheck: number;
}
-const CACHE_TTL = 30_000; // 30 seconds
-
export function createContextInjectorHook(ctx: PluginInput) {
const cache: ContextCache = {
rootContent: new Map(),
@@ -30,14 +23,14 @@ export function createContextInjectorHook(ctx: PluginInput) {
async function loadRootContextFiles(): Promise