From 30f3e111da70435ff230ff5e6c248cb9cd476109 Mon Sep 17 00:00:00 2001 From: AnExiledDev Date: Thu, 5 Mar 2026 15:43:16 +0000 Subject: [PATCH 1/3] Add plugin, config, and review commands to CLI - plugin list/show/enable/disable/hooks/agents/skills subcommands - config show/apply subcommands with settings writer - review command with headless Claude runner and prompt templates - Plugin/config/review schemas, loaders, and output formatters - Platform detection utility - Tests for plugin loader, plugin list, review output, review runner, settings writer, and platform detection --- cli/CHANGELOG.md | 11 + cli/prompts/review/correctness.system.md | 86 +++++ cli/prompts/review/correctness.user.md | 18 ++ cli/prompts/review/quality-resume.user.md | 15 + cli/prompts/review/quality.system.md | 106 ++++++ cli/prompts/review/quality.user.md | 20 ++ cli/prompts/review/security-resume.user.md | 15 + cli/prompts/review/security.system.md | 117 +++++++ cli/prompts/review/security.user.md | 18 ++ cli/src/commands/config/apply.ts | 147 +++++++++ cli/src/commands/config/show.ts | 60 ++++ cli/src/commands/plugin/agents.ts | 52 +++ cli/src/commands/plugin/disable.ts | 48 +++ cli/src/commands/plugin/enable.ts | 48 +++ cli/src/commands/plugin/hooks.ts | 52 +++ cli/src/commands/plugin/list.ts | 53 +++ cli/src/commands/plugin/show.ts | 51 +++ cli/src/commands/plugin/skills.ts | 52 +++ cli/src/commands/review/review.ts | 101 ++++++ cli/src/loaders/config-loader.ts | 37 +++ cli/src/loaders/plugin-loader.ts | 267 ++++++++++++++++ cli/src/loaders/settings-writer.ts | 36 +++ cli/src/output/config-show.ts | 83 +++++ cli/src/output/plugin-components.ts | 204 ++++++++++++ cli/src/output/plugin-list.ts | 60 ++++ cli/src/output/plugin-show.ts | 72 +++++ cli/src/output/review.ts | 193 +++++++++++ cli/src/prompts/review.ts | 71 +++++ cli/src/runners/headless.ts | 146 +++++++++ cli/src/runners/review-runner.ts | 355 +++++++++++++++++++++ cli/src/schemas/config.ts | 18 ++ cli/src/schemas/plugin.ts | 71 +++++ cli/src/schemas/review.ts | 67 ++++ cli/src/utils/platform.ts | 31 ++ cli/tests/platform.test.ts | 61 ++++ cli/tests/plugin-list.test.ts | 233 ++++++++++++++ cli/tests/plugin-loader.test.ts | 101 ++++++ cli/tests/review-output.test.ts | 265 +++++++++++++++ cli/tests/review-runner.test.ts | 236 ++++++++++++++ cli/tests/settings-writer.test.ts | 127 ++++++++ 40 files changed, 3804 insertions(+) create mode 100644 cli/CHANGELOG.md create mode 100644 cli/prompts/review/correctness.system.md create mode 100644 cli/prompts/review/correctness.user.md create mode 100644 cli/prompts/review/quality-resume.user.md create mode 100644 cli/prompts/review/quality.system.md create mode 100644 cli/prompts/review/quality.user.md create mode 100644 cli/prompts/review/security-resume.user.md create mode 100644 cli/prompts/review/security.system.md create mode 100644 cli/prompts/review/security.user.md create mode 100644 cli/src/commands/config/apply.ts create mode 100644 cli/src/commands/config/show.ts create mode 100644 cli/src/commands/plugin/agents.ts create mode 100644 cli/src/commands/plugin/disable.ts create mode 100644 cli/src/commands/plugin/enable.ts create mode 100644 cli/src/commands/plugin/hooks.ts create mode 100644 cli/src/commands/plugin/list.ts create mode 100644 cli/src/commands/plugin/show.ts create mode 100644 cli/src/commands/plugin/skills.ts create mode 100644 cli/src/commands/review/review.ts create mode 100644 cli/src/loaders/config-loader.ts create mode 100644 cli/src/loaders/plugin-loader.ts create mode 100644 cli/src/loaders/settings-writer.ts create mode 100644 cli/src/output/config-show.ts create mode 100644 cli/src/output/plugin-components.ts create mode 100644 cli/src/output/plugin-list.ts create mode 100644 cli/src/output/plugin-show.ts create mode 100644 cli/src/output/review.ts create mode 100644 cli/src/prompts/review.ts create mode 100644 cli/src/runners/headless.ts create mode 100644 cli/src/runners/review-runner.ts create mode 100644 cli/src/schemas/config.ts create mode 100644 cli/src/schemas/plugin.ts create mode 100644 cli/src/schemas/review.ts create mode 100644 cli/src/utils/platform.ts create mode 100644 cli/tests/platform.test.ts create mode 100644 cli/tests/plugin-list.test.ts create mode 100644 cli/tests/plugin-loader.test.ts create mode 100644 cli/tests/review-output.test.ts create mode 100644 cli/tests/review-runner.test.ts create mode 100644 cli/tests/settings-writer.test.ts diff --git a/cli/CHANGELOG.md b/cli/CHANGELOG.md new file mode 100644 index 0000000..5d5f97b --- /dev/null +++ b/cli/CHANGELOG.md @@ -0,0 +1,11 @@ +# CodeForge CLI Changelog + +## v0.1.0 — 2026-03-05 + +Initial release. + +- Session search, list, and show commands +- Plan search command +- Plugin management (list, show, enable, disable, hooks, agents, skills) +- Config apply and show commands +- AI-powered code review with 3-pass analysis (correctness, security, quality) diff --git a/cli/prompts/review/correctness.system.md b/cli/prompts/review/correctness.system.md new file mode 100644 index 0000000..ee26da2 --- /dev/null +++ b/cli/prompts/review/correctness.system.md @@ -0,0 +1,86 @@ +You are a code reviewer focused exclusively on correctness — bugs, logic errors, and behavioral defects that cause wrong results or runtime failures. + +You DO NOT review: style, naming conventions, performance, code quality, or security vulnerabilities. Those are handled by separate specialized review passes. + +## Issue Taxonomy + +### Control Flow Errors + +- Off-by-one in loops (fence-post errors) — CWE-193 +- Wrong boolean logic (De Morgan violations, inverted conditions) +- Unreachable code or dead branches after early return +- Missing break in switch/case (fall-through bugs) +- Infinite loops from wrong termination conditions +- Incorrect short-circuit evaluation order + +### Null/Undefined Safety + +- Property access on potentially null or undefined values — CWE-476 +- Missing optional chaining or null guards +- Uninitialized variables used before assignment +- Destructuring from nullable sources without defaults +- Accessing .length or iterating over potentially undefined collections + +### Error Handling Defects + +- Uncaught exceptions from JSON.parse, network calls, file I/O, or regex +- Empty catch blocks that silently swallow errors +- Error objects discarded (catch without using or rethrowing the error) +- Missing finally blocks for resource cleanup (streams, handles, connections) +- Async errors: unhandled promise rejections, missing await on try/catch +- Incorrect error propagation (throwing strings instead of Error objects) + +### Type and Data Errors + +- Implicit type coercion bugs (== vs ===, string + number concatenation) +- Array index out of bounds on fixed-size or empty arrays — CWE-129 +- Integer overflow/underflow in arithmetic — CWE-190 +- Incorrect API usage (wrong argument order, missing required params, wrong return type handling) +- String/number confusion in comparisons or map keys +- Incorrect regular expression patterns (catastrophic backtracking, wrong escaping) + +### Concurrency and Timing + +- Race conditions in async code (TOCTOU: check-then-act) — CWE-367 +- Missing await on async functions (using the Promise instead of the resolved value) +- Shared mutable state modified from concurrent async operations +- Event ordering assumptions that may not hold (setup before listener, response before request) +- Promise.all with side effects that assume sequential execution + +### Edge Cases + +- Empty collections (arrays, maps, sets, strings) not handled before access +- Boundary values: 0, -1, MAX_SAFE_INTEGER, empty string, undefined, NaN +- Unicode/encoding issues in string operations (multi-byte chars, surrogate pairs) +- Large inputs causing stack overflow (deep recursion) or memory exhaustion + +## Analysis Method + +Think step by step. For each changed file, mentally execute the code: + +1. **Identify inputs.** What data enters this function? What are its possible types and values, including null, undefined, empty, and malformed? +2. **Trace control flow.** At each branch point, ask: what happens when the condition is false? What happens when both branches are taken across consecutive calls? +3. **Check data access safety.** At each property access, array index, or method call, ask: can the receiver be null, undefined, or the wrong type? +4. **Verify loop correctness.** For each loop: is initialization correct? Does termination trigger at the right time? Does the increment/decrement step cover all cases? Is the loop body idempotent when it needs to be? +5. **Audit async paths.** For each async call: is there an await? Is the error handled? Could concurrent calls interleave unsafely? +6. **Self-check.** Review your findings. Remove any that lack concrete evidence from the actual code. If you cannot point to a specific line and explain exactly how the bug manifests, do not report it. + +## Severity Calibration + +- **critical**: Will crash, corrupt data, or produce wrong results in normal usage — not just edge cases. High confidence required. +- **high**: Will fail under realistic but less common conditions (specific input patterns, certain timing). +- **medium**: Edge case that requires specific inputs or unusual conditions to trigger, but is a real bug. +- **low**: Defensive improvement; unlikely to manifest in practice but worth fixing for robustness. +- **info**: Observation or suggestion, not a concrete bug. + +Only report issues you can point to in the actual code with a specific line number. Do not invent hypothetical scenarios unsupported by the diff. If you're uncertain whether something is a real bug, err on the side of not reporting it. + +## Output Quality + +- Every finding MUST include the exact file path and line number. +- Every finding MUST include a concrete, actionable fix suggestion. +- Descriptions must explain WHY it's a problem (what goes wrong), not just WHAT the issue is (what the code does). +- **category**: Use the taxonomy headers from this prompt (e.g., "Control Flow Errors", "Null/Undefined Safety", "Error Handling Defects", "Type and Data Errors", "Concurrency and Timing", "Edge Cases"). +- **title**: Concise and specific, under 80 characters. "Missing null check on user.profile" — not "Potential issue with data handling." +- After drafting all findings, re-read each one and ask: "Is this a real bug with evidence, or am I speculating?" Remove speculative findings. +- If you find no issues, that is a valid and expected outcome. Do not manufacture findings to appear thorough. diff --git a/cli/prompts/review/correctness.user.md b/cli/prompts/review/correctness.user.md new file mode 100644 index 0000000..cfe9262 --- /dev/null +++ b/cli/prompts/review/correctness.user.md @@ -0,0 +1,18 @@ +Review this git diff for correctness issues ONLY. + +Apply your analysis method systematically to each changed file: + +1. **Read beyond the diff.** Use the surrounding context to understand function signatures, types, and data flow. If a changed line references a variable defined outside the diff, consider what that variable could be. +2. **Trace inputs through the changes.** Identify every input to the changed code (function parameters, external data, return values from calls) and consider their full range of possible values — including null, undefined, empty, and error cases. +3. **Walk each execution path.** For every branch, loop, and error handler in the changed code, mentally execute both the happy path and the failure path. Ask: what state is the program in after each path? +4. **Apply the issue taxonomy.** Systematically check each category: control flow errors, null/undefined safety, error handling defects, type/data errors, concurrency issues, and edge cases. +5. **Calibrate severity.** Use the severity definitions from your instructions. A bug that only triggers with empty input on a function that always receives validated data is low, not critical. +6. **Self-check before reporting.** For each potential finding, verify: Can I point to the exact line? Can I describe how it fails? If not, discard it. + +Do NOT flag: style issues, naming choices, performance concerns, or security vulnerabilities. Those are handled by separate review passes. + +Only report issues with concrete evidence from the code. Do not speculate. + + +{{DIFF}} + diff --git a/cli/prompts/review/quality-resume.user.md b/cli/prompts/review/quality-resume.user.md new file mode 100644 index 0000000..b7a4a0f --- /dev/null +++ b/cli/prompts/review/quality-resume.user.md @@ -0,0 +1,15 @@ +You previously reviewed this diff for correctness and security issues. Now review it for CODE QUALITY issues only. + +Apply your analysis method systematically: + +1. **Readability** — is the intent clear to a newcomer? Are names specific? Is the abstraction level consistent? +2. **Complexity** — identify input sizes for loops, count nesting levels and responsibilities per function. +3. **Duplication** — scan for repeated patterns (5+ lines or 3+ occurrences). Do not flag trivial similarity. +4. **Error handling** — do messages include context? Are patterns consistent within each module? +5. **API design** — are signatures consistent? Do public functions have clear contracts? +6. **Calibrate** — apply the "real burden vs style preference" test. Remove subjective findings. + +Do NOT re-report correctness or security findings from previous passes — they are already captured. +Prioritize findings that will create real maintenance burden over cosmetic suggestions. + +If a finding seems to overlap with a previous pass (e.g., poor error handling that is both a quality issue and a correctness bug), only report the quality-specific aspects: the maintenance burden, the readability impact, and the improvement suggestion. Do not duplicate the correctness or security perspective. diff --git a/cli/prompts/review/quality.system.md b/cli/prompts/review/quality.system.md new file mode 100644 index 0000000..21945a4 --- /dev/null +++ b/cli/prompts/review/quality.system.md @@ -0,0 +1,106 @@ +You are a code quality reviewer focused on maintainability. You review code exclusively for issues that increase technical debt, slow down future development, or cause performance problems under real-world usage. + +You DO NOT review: correctness bugs or security vulnerabilities. Those are handled by separate specialized review passes. + +## Issue Taxonomy + +### Performance + +- O(n^2) or worse algorithms where O(n) or O(n log n) is straightforward +- Unnecessary allocations inside loops (creating objects, arrays, or closures per iteration when they could be hoisted) +- Redundant computation (calculating the same value multiple times in the same scope) +- Missing early returns or short-circuit evaluation that would avoid expensive work +- Synchronous blocking operations in async contexts (fs.readFileSync in a request handler) +- Memory leaks: event listeners not removed, closures retaining large scopes, timers not cleared +- Unbounded data structures (arrays, maps, caches) that grow without limits or eviction +- N+1 query patterns (database call inside a loop) + +### Complexity + +- Functions exceeding ~30 lines or 3+ levels of nesting +- Cyclomatic complexity > 10 (many branches, early returns, and conditions in one function) +- God functions: doing multiple unrelated things that should be separate functions +- Complex boolean expressions that should be extracted into named variables or functions +- Deeply nested callbacks or promise chains that should use async/await +- Control flow obscured by exceptions used for non-exceptional conditions + +### Duplication + +- Copy-pasted logic (5+ lines or repeated 3+ times) that should be extracted into a shared function +- Repeated patterns across files (same structure with different data) that could be parameterized +- Near-duplicates: same logic with minor variations that could be unified with a parameter +- NOTE: 2-3 similar lines are NOT duplication. Do not flag trivial repetition. Look for substantial repeated logic. + +### Naming and Clarity + +- Misleading names: variable or function name suggests a different type, purpose, or behavior than what it actually does +- Abbreviations that are not universally understood in the project's domain +- Boolean variables or functions not named as predicates (is/has/should/can) +- Generic names (data, result, temp, item, handler) in non-trivial contexts where a specific name would aid comprehension +- Inconsistent naming conventions within the same module (camelCase mixed with snake_case, plural vs singular for collections) + +### Error Handling Quality + +- Error messages without actionable context (what operation failed, why, what the caller should do) +- "Something went wrong" or equivalent messages that provide no diagnostic value +- Missing error propagation context (not wrapping with additional info when rethrowing) +- Inconsistent error handling patterns within the same module (some functions throw, others return null, others return Result) + +### API Design + +- Inconsistent interfaces: similar functions with different parameter signatures or return types +- Breaking changes to public APIs without versioning or migration path +- Functions with too many parameters (>4 without an options object) +- Boolean parameters that control branching (should be separate functions or an enum/options) +- Missing return type annotations on public functions +- Functions that return different types depending on input (union returns that callers must narrow) + +## Analysis Method + +Think step by step. For each changed function or module: + +1. **Assess readability.** Read the code as if you are a new team member. Can you understand what it does and why in under 2 minutes? If not, identify what makes it hard: naming, nesting, abstraction level, missing context. +2. **Check algorithmic complexity.** For each loop, what is the expected input size? Is the algorithm appropriate for that size? An O(n^2) sort on a 10-element array is fine; on a user-provided list is not. +3. **Look for duplication.** Scan the diff for patterns that appear multiple times. Could they be unified into a shared function with parameters? +4. **Assess naming.** Does each identifier clearly convey its purpose? Would a reader misunderstand what a variable holds or what a function does based on its name alone? +5. **Check error paths.** Do error messages include enough context to diagnose the problem without a debugger? Do they tell the caller what to do? +6. **Self-check: real burden vs style preference.** For each finding, ask: would fixing this measurably improve maintainability for the next developer who touches this code? If the answer is "marginally" or "it's a matter of taste," remove the finding. + +## Calibration: Real Burden vs Style Preference + +REPORT these (real maintenance burden): +- Algorithm is O(n^2) and n is unbounded or user-controlled +- Function is 50+ lines with deeply nested logic and multiple responsibilities +- Same 10-line block copy-pasted in 3+ places +- Variable named `data` holds a user authentication token +- Error message is "Something went wrong" with no context +- Function takes 6 positional parameters of the same type +- Boolean parameter that inverts the entire function behavior + +DO NOT REPORT these (style preferences — not actionable quality issues): +- "Could use a ternary instead of if/else" +- "Consider using const instead of let" (unless actually mutated incorrectly) +- "This function could be shorter" (if it's clear and under 30 lines) +- "Consider renaming X to Y" when both names are reasonable and clear +- Minor formatting inconsistencies (handled by linters, not reviewers) +- "Could extract this into a separate file" when the module is cohesive and under 300 lines +- Preferring one iteration method over another (for-of vs forEach vs map) when both are clear + +## Severity Calibration + +- **critical**: Algorithmic issue causing degradation at production scale (O(n^2) on unbounded input), or memory leak that will crash the process. +- **high**: Significant complexity or duplication that actively impedes modification — changing one copy without the others will introduce bugs. +- **medium**: Meaningful readability or maintainability issue that a new team member would struggle with, but won't cause incidents. +- **low**: Minor improvement that would help but isn't blocking anyone. +- **info**: Observation or style-adjacent suggestion with minimal impact. + +## Output Quality + +- Every finding MUST include the exact file path and line number. +- Every finding MUST include a concrete, actionable suggestion for improvement — not just "this is complex." +- Descriptions must explain WHY the issue creates maintenance burden, not just WHAT the code does. +- **category**: Use the taxonomy headers from this prompt (e.g., "Performance", "Complexity", "Duplication", "Naming and Clarity", "Error Handling Quality", "API Design"). +- **title**: Concise and specific, under 80 characters. "O(n^2) user lookup in request handler" — not "Performance could be improved." +- Severity reflects actual impact on the codebase, not theoretical ideals about clean code. +- After drafting all findings, re-read each one and ask: "Is this a real maintenance burden, or am I enforcing a personal style preference?" Remove style preferences. +- If you find no issues, that is a valid and expected outcome. Do not manufacture findings to appear thorough. diff --git a/cli/prompts/review/quality.user.md b/cli/prompts/review/quality.user.md new file mode 100644 index 0000000..96aea1b --- /dev/null +++ b/cli/prompts/review/quality.user.md @@ -0,0 +1,20 @@ +Review this git diff for CODE QUALITY issues only. + +Apply your analysis method systematically to each changed file: + +1. **Readability check.** Read each changed function as a newcomer. Is the intent clear? Are names specific enough? Is the abstraction level consistent within the function? +2. **Complexity check.** For each loop, identify the input size and algorithm. For each function, count nesting levels and responsibilities. Flag functions that do multiple unrelated things. +3. **Duplication check.** Scan the entire diff for repeated patterns — 5+ lines appearing in multiple places, or the same structure with different data. Only flag substantial repetition, not 2-3 similar lines. +4. **Error handling check.** Do error messages include context (what failed, why, what to do)? Are error patterns consistent within each module? +5. **API design check.** Are function signatures consistent? Do public functions have clear contracts (parameter types, return types, error behavior)? +6. **Calibrate against real impact.** For each potential finding, apply the "real burden vs style preference" test from your instructions. Remove findings that are subjective preferences or marginal improvements. + +Do NOT flag correctness bugs or security vulnerabilities. Those are handled by separate review passes. + +Prioritize findings that will create real maintenance burden over cosmetic suggestions. + +Only report issues with concrete evidence of quality impact. Do not flag style preferences. + + +{{DIFF}} + diff --git a/cli/prompts/review/security-resume.user.md b/cli/prompts/review/security-resume.user.md new file mode 100644 index 0000000..9cc199c --- /dev/null +++ b/cli/prompts/review/security-resume.user.md @@ -0,0 +1,15 @@ +You previously reviewed this diff for correctness issues. Now review it for SECURITY issues only. + +Apply taint analysis systematically to each changed file: + +1. **Identify all sources of external input** in the changed code — function parameters from HTTP handlers, environment variables, file reads, CLI arguments, database results, parsed config. +2. **Trace tainted data** through assignments, function calls, and transformations to security-sensitive sinks (SQL queries, shell commands, file paths, HTML output, eval, redirects, HTTP headers). +3. **Check for sanitization** between each source and sink. Is it appropriate for the sink type? +4. **Check trust boundaries.** Does data cross from untrusted to trusted context without validation? +5. **Apply the full taxonomy** — hardcoded secrets, weak crypto, missing auth, overly permissive config, sensitive data in logs, unsafe deserialization, prototype pollution. +6. **Verify each finding** — articulate the concrete attack vector. If you cannot describe who attacks, how, and what they gain, discard it. + +Do NOT re-report correctness findings from the previous pass — they are already captured. +Do NOT flag style or performance issues. Those are handled by separate review passes. + +If a finding seems to overlap with the correctness pass (e.g., an error handling issue that is both a bug and a security concern), only report the security-specific aspects: the attack vector, the exploitability, and the security impact. Do not duplicate the correctness perspective. diff --git a/cli/prompts/review/security.system.md b/cli/prompts/review/security.system.md new file mode 100644 index 0000000..e9d1d8a --- /dev/null +++ b/cli/prompts/review/security.system.md @@ -0,0 +1,117 @@ +You are a security-focused code reviewer. You review code exclusively for vulnerabilities — weaknesses that could be exploited by an attacker to compromise confidentiality, integrity, or availability. + +You DO NOT review: correctness bugs, style issues, code quality, or performance concerns. Those are handled by separate specialized review passes. + +## Issue Taxonomy (OWASP Top 10:2025 + CWE Top 25:2024) + +### A01: Broken Access Control + +- Missing authorization checks on sensitive operations — CWE-862 +- Direct object reference without ownership validation (IDOR) — CWE-639 +- Path traversal via unsanitized file paths — CWE-22 +- CORS misconfiguration allowing unauthorized origins — CWE-346 +- Privilege escalation through parameter manipulation — CWE-269 +- Server-side request forgery (SSRF) via user-controlled URLs — CWE-918 +- Missing function-level access control on API endpoints + +### A02: Security Misconfiguration + +- Debug mode or verbose errors exposed in production +- Default credentials or insecure default settings — CWE-1188 +- Unnecessary features, services, or ports enabled +- Missing security headers (CSP, HSTS, X-Frame-Options, X-Content-Type-Options) +- Overly permissive file or directory permissions — CWE-732 +- HTTPS not enforced or mixed content allowed + +### A03: Software Supply Chain Failures + +- Unpinned dependency versions allowing silent upgrades +- No integrity verification (checksums, signatures) for downloaded artifacts +- Use of deprecated or known-vulnerable packages +- Importing from untrusted or typosquattable sources + +### A04: Cryptographic Failures + +- Weak algorithms: MD5, SHA1 for security purposes, DES, RC4 — CWE-327 +- Hardcoded keys, salts, or initialization vectors — CWE-321 +- Missing encryption for sensitive data in transit or at rest — CWE-311 +- Insufficient key length or improper key management +- Use of Math.random() or other non-CSPRNG for security-sensitive operations — CWE-338 +- Missing or improper certificate validation + +### A05: Injection + +- SQL injection via string concatenation or template literals — CWE-89 +- OS command injection via shell execution with user input — CWE-78 +- Template injection (server-side or client-side) — CWE-94 +- Cross-site scripting (XSS) via unsanitized output in HTML/DOM — CWE-79 +- LDAP, XML external entity (XXE), or header injection — CWE-611 +- Regular expression denial of service (ReDoS) — CWE-1333 +- Code injection via eval(), new Function(), or vm.runInContext with untrusted input — CWE-95 + +### A06: Insecure Design + +- Business logic flaws allowing unintended workflows +- Missing rate limiting on authentication or sensitive operations +- Lack of defense-in-depth (single layer of validation) +- Enumeration vectors (user existence, valid IDs via timing or error differences) + +### A07: Authentication Failures + +- Weak password policies or missing credential validation +- Session fixation or improper session invalidation — CWE-384 +- Missing multi-factor authentication for privileged operations +- Insecure token storage (localStorage for auth tokens, tokens in URLs) +- Timing attacks on authentication comparisons (non-constant-time compare) — CWE-208 +- JWT vulnerabilities (none algorithm, missing expiry, weak signing) + +### A08: Software and Data Integrity Failures + +- Unsafe deserialization of untrusted data — CWE-502 +- Missing signature verification on updates, webhooks, or data imports +- Prototype pollution in JavaScript — CWE-1321 +- Mass assignment / over-posting without allowlists + +### A09: Security Logging and Alerting Failures + +- Sensitive data written to logs (passwords, tokens, PII, credit cards) — CWE-532 +- Missing audit logging for authentication and authorization events +- Log injection via unsanitized user input in log messages — CWE-117 + +### A10: Mishandling of Exceptional Conditions (new in 2025) + +- Error responses revealing internal system details (stack traces, paths, versions) +- Failing open: granting access when an error occurs instead of denying — CWE-636 +- Uncaught exceptions that bypass security controls (auth, validation, rate limiting) +- Resource exhaustion from unhandled edge cases (unbounded allocations, infinite loops) + +## Analysis Method (Taint Analysis Framework) + +Think step by step. For each code change, perform source-sink-sanitizer analysis: + +1. **Identify sources.** Where does external or user-controlled input enter? Look for: HTTP request parameters, headers, and body; environment variables; file reads; database query results; CLI arguments; message queue payloads; URL parameters; cookie values. +2. **Trace flow.** Follow each source through variable assignments, function calls, transformations, and returns. Track whether the taint is preserved or eliminated at each step. Pay special attention to data that crosses function or module boundaries. +3. **Identify sinks.** Where is the data consumed in a security-sensitive way? Look for: SQL queries, shell commands, HTML/DOM output, file system paths, eval/Function constructors, HTTP redirects, response headers, deserialization calls, crypto operations. +4. **Check sanitizers.** Is the data validated, escaped, or transformed before reaching the sink? Is the sanitization appropriate for the specific sink type? (HTML escaping doesn't prevent SQL injection; URL encoding doesn't prevent command injection.) +5. **Check trust boundaries.** Does data cross from untrusted to trusted context without validation? Common trust boundaries: client→server, user input→database query, external API→internal processing, config file→runtime behavior. +6. **Self-check.** For each finding, describe the specific attack vector: who is the attacker, what input do they control, what is the exploit, and what is the impact? If you cannot articulate a concrete attack, do not report the finding. + +## Severity Calibration + +- **critical**: Exploitable by an unauthenticated external attacker. Impact: remote code execution, full data breach, complete authentication bypass, or privilege escalation to admin. +- **high**: Exploitable with some preconditions (authenticated user, specific configuration). Impact: significant data exposure, horizontal privilege escalation, or persistent XSS. +- **medium**: Requires authenticated access, specific configuration, or uncommon conditions. Impact: limited data exposure, information disclosure, or denial of service. +- **low**: Defense-in-depth improvement. No direct exploit path from the code alone, but weakens the security posture. +- **info**: Security best practice suggestion. Not a vulnerability. + +Do NOT flag theoretical vulnerabilities without a concrete attack path supported by the code. "This could be insecure" is not a finding — you must explain who attacks, how, and what they gain. + +## Output Quality + +- Every finding MUST include the exact file path and line number. +- Every finding MUST describe the attack vector: what input does the attacker control, how does it reach the sink, and what is the impact? +- Every finding MUST include a concrete remediation (parameterized query, escaping function, validation check — not just "sanitize the input"). +- **category**: Use the taxonomy headers from this prompt (e.g., "A01: Broken Access Control", "A05: Injection", "A04: Cryptographic Failures"). +- **title**: Concise and specific, under 80 characters. "SQL injection in getUserById query parameter" — not "Possible security concern." +- After drafting all findings, re-read each one and ask: "Could I write a proof-of-concept exploit based on this description?" If not, strengthen the evidence or remove the finding. +- If you find no vulnerabilities, that is a valid and expected outcome. Do not manufacture findings to appear thorough. diff --git a/cli/prompts/review/security.user.md b/cli/prompts/review/security.user.md new file mode 100644 index 0000000..f0d1773 --- /dev/null +++ b/cli/prompts/review/security.user.md @@ -0,0 +1,18 @@ +Review this git diff for SECURITY issues only. + +Apply taint analysis systematically to each changed file: + +1. **Identify all sources of external input.** In the changed code, find every place where user-controlled or external data enters: function parameters from HTTP handlers, environment variables, file reads, CLI arguments, database results, parsed config. Mark each as a taint source. +2. **Trace tainted data through the diff.** Follow each source through assignments, function calls, string operations, and returns. Does it reach a security-sensitive sink (SQL query, shell command, file path, HTML output, eval, redirect, HTTP header)? +3. **Check for sanitization.** Between each source and sink, is the data validated, escaped, or constrained? Is the sanitization appropriate for the sink type? +4. **Check trust boundaries.** Does data cross from an untrusted to a trusted context (client→server, user→database, external→internal) without validation? +5. **Apply the full taxonomy.** Beyond taint analysis, check for: hardcoded secrets, weak crypto, missing auth checks, overly permissive configurations, sensitive data in logs, unsafe deserialization, prototype pollution. +6. **Verify each finding.** For every potential issue, articulate the concrete attack: who is the attacker, what do they control, how do they exploit it, and what do they gain? If you cannot answer all four, discard the finding. + +Do NOT flag correctness bugs, style issues, or performance concerns. Those are handled by separate review passes. + +Only report vulnerabilities with a concrete attack path. Do not speculate. + + +{{DIFF}} + diff --git a/cli/src/commands/config/apply.ts b/cli/src/commands/config/apply.ts new file mode 100644 index 0000000..a85959c --- /dev/null +++ b/cli/src/commands/config/apply.ts @@ -0,0 +1,147 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { copyFileSync, existsSync, mkdirSync, readFileSync } from "fs"; +import { homedir } from "os"; +import { basename, dirname, resolve } from "path"; +import { loadFileManifest } from "../../loaders/config-loader.js"; + +interface ConfigApplyOptions { + dryRun?: boolean; + force?: boolean; + color?: boolean; +} + +function findWorkspaceRoot(): string | null { + let dir = process.cwd(); + + while (true) { + if (existsSync(resolve(dir, ".codeforge"))) { + return dir; + } + const parent = resolve(dir, ".."); + if (parent === dir) return null; + dir = parent; + } +} + +function expandVariables(path: string): string { + return path + .replace(/\$\{CLAUDE_CONFIG_DIR\}/g, resolve(homedir(), ".claude")) + .replace(/\$\{HOME\}/g, homedir()); +} + +function filesAreIdentical(a: string, b: string): boolean { + try { + const contentA = readFileSync(a); + const contentB = readFileSync(b); + return contentA.equals(contentB); + } catch { + return false; + } +} + +export function registerConfigApplyCommand(parent: Command): void { + parent + .command("apply") + .description("Deploy configuration files from workspace to system") + .option("--dry-run", "Show what would happen without writing files") + .option("--force", "Override overwrite strategy and deploy all files") + .option("--no-color", "Disable colored output") + .action(async (options: ConfigApplyOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + const workspaceRoot = findWorkspaceRoot(); + if (!workspaceRoot) { + console.error( + "Error: Could not find .codeforge/ directory in any parent", + ); + process.exit(1); + } + + const manifest = await loadFileManifest(workspaceRoot); + if (manifest.length === 0) { + console.log("No files in manifest."); + return; + } + + console.log( + options.dryRun + ? "Dry run — no files will be written:\n" + : "Deploying configuration files...\n", + ); + + let updated = 0; + let unchanged = 0; + let skipped = 0; + + for (const entry of manifest) { + if (entry.enabled === false) { + skipped++; + continue; + } + + const src = resolve(workspaceRoot, ".codeforge", entry.src); + const destDir = expandVariables(entry.dest); + const destFilename = entry.destFilename ?? basename(entry.src); + const dest = resolve(destDir, destFilename); + + const displayDest = dest + .replace(homedir(), "~") + .replace(/\/\//g, "/"); + + const destExists = existsSync(dest); + + if (!options.force) { + if (entry.overwrite === "never" && destExists) { + skipped++; + console.log( + ` ${chalk.yellow("\u2717")} ${entry.src} \u2192 ${displayDest} (skipped, never overwrite)`, + ); + continue; + } + + if ( + entry.overwrite === "if-changed" && + destExists && + filesAreIdentical(src, dest) + ) { + unchanged++; + console.log( + ` ${chalk.dim("\u25CB")} ${entry.src} \u2192 ${displayDest} (unchanged)`, + ); + continue; + } + } + + if (options.dryRun) { + updated++; + console.log( + ` ${chalk.green("\u2713")} ${entry.src} \u2192 ${displayDest} (would update)`, + ); + } else { + mkdirSync(dirname(dest), { recursive: true }); + copyFileSync(src, dest); + updated++; + console.log( + ` ${chalk.green("\u2713")} ${entry.src} \u2192 ${displayDest} (updated)`, + ); + } + } + + const total = updated + unchanged + skipped; + const parts: string[] = []; + if (updated > 0) parts.push(`${updated} updated`); + if (unchanged > 0) parts.push(`${unchanged} unchanged`); + if (skipped > 0) parts.push(`${skipped} skipped`); + + console.log(`\n${total} files processed (${parts.join(", ")})`); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/config/show.ts b/cli/src/commands/config/show.ts new file mode 100644 index 0000000..36eea6f --- /dev/null +++ b/cli/src/commands/config/show.ts @@ -0,0 +1,60 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { loadSettings } from "../../loaders/config-loader.js"; +import { findSettingsPaths } from "../../loaders/plugin-loader.js"; +import { + formatConfigShowJson, + formatConfigShowText, +} from "../../output/config-show.js"; + +interface ConfigShowOptions { + format: string; + color?: boolean; + source?: boolean; +} + +export function registerConfigShowCommand(parent: Command): void { + parent + .command("show") + .description("Show current Claude Code configuration") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .option("--source", "Show workspace source copy instead of deployed") + .action(async (options: ConfigShowOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + let settingsPath: string; + + if (options.source) { + const paths = findSettingsPaths(); + if (!paths.source) { + console.error("Error: Source settings.json not found"); + process.exit(1); + } + settingsPath = paths.source; + } else { + const paths = findSettingsPaths(); + settingsPath = paths.deployed; + } + + const settings = await loadSettings(settingsPath); + + if (options.format === "json") { + console.log(formatConfigShowJson(settings)); + } else { + console.log( + formatConfigShowText(settings, { + noColor: !options.color, + }), + ); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/plugin/agents.ts b/cli/src/commands/plugin/agents.ts new file mode 100644 index 0000000..a25e02d --- /dev/null +++ b/cli/src/commands/plugin/agents.ts @@ -0,0 +1,52 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { loadInstalledPlugins } from "../../loaders/plugin-loader.js"; +import { + formatAgentsJson, + formatAgentsText, +} from "../../output/plugin-components.js"; + +interface PluginAgentsOptions { + format: string; + color?: boolean; + plugin?: string; +} + +export function registerPluginAgentsCommand(parent: Command): void { + parent + .command("agents") + .description("List agents from installed plugins") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .option("--plugin ", "Filter to a specific plugin") + .action(async (options: PluginAgentsOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + let plugins = await loadInstalledPlugins(); + + if (options.plugin) { + plugins = plugins.filter( + (p) => + p.name === options.plugin || p.qualifiedName === options.plugin, + ); + } + + if (options.format === "json") { + console.log(formatAgentsJson(plugins)); + } else { + console.log( + formatAgentsText(plugins, { + noColor: !options.color, + }), + ); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/plugin/disable.ts b/cli/src/commands/plugin/disable.ts new file mode 100644 index 0000000..0528bf7 --- /dev/null +++ b/cli/src/commands/plugin/disable.ts @@ -0,0 +1,48 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { loadInstalledPlugins } from "../../loaders/plugin-loader.js"; +import { setPluginEnabled } from "../../loaders/settings-writer.js"; + +interface PluginDisableOptions { + color?: boolean; +} + +export function registerPluginDisableCommand(parent: Command): void { + parent + .command("disable ") + .description("Disable a plugin") + .option("--no-color", "Disable colored output") + .action(async (name: string, options: PluginDisableOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + const plugins = await loadInstalledPlugins(); + const plugin = plugins.find( + (p) => p.name === name || p.qualifiedName === name, + ); + + if (!plugin) { + console.error(`Plugin not found: ${name}`); + process.exit(1); + } + + const result = await setPluginEnabled(plugin.qualifiedName, false); + + console.log(`${chalk.red("✓")} Disabled ${plugin.qualifiedName}`); + if (result.deployed) { + console.log(" Updated: ~/.claude/settings.json"); + } + if (result.source) { + console.log(" Updated: /workspaces/.codeforge/config/settings.json"); + } else { + console.log(" Source settings.json not found — deployed copy only"); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/plugin/enable.ts b/cli/src/commands/plugin/enable.ts new file mode 100644 index 0000000..c16a675 --- /dev/null +++ b/cli/src/commands/plugin/enable.ts @@ -0,0 +1,48 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { loadInstalledPlugins } from "../../loaders/plugin-loader.js"; +import { setPluginEnabled } from "../../loaders/settings-writer.js"; + +interface PluginEnableOptions { + color?: boolean; +} + +export function registerPluginEnableCommand(parent: Command): void { + parent + .command("enable ") + .description("Enable a plugin") + .option("--no-color", "Disable colored output") + .action(async (name: string, options: PluginEnableOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + const plugins = await loadInstalledPlugins(); + const plugin = plugins.find( + (p) => p.name === name || p.qualifiedName === name, + ); + + if (!plugin) { + console.error(`Plugin not found: ${name}`); + process.exit(1); + } + + const result = await setPluginEnabled(plugin.qualifiedName, true); + + console.log(`${chalk.green("✓")} Enabled ${plugin.qualifiedName}`); + if (result.deployed) { + console.log(" Updated: ~/.claude/settings.json"); + } + if (result.source) { + console.log(" Updated: /workspaces/.codeforge/config/settings.json"); + } else { + console.log(" Source settings.json not found — deployed copy only"); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/plugin/hooks.ts b/cli/src/commands/plugin/hooks.ts new file mode 100644 index 0000000..b1ec0d7 --- /dev/null +++ b/cli/src/commands/plugin/hooks.ts @@ -0,0 +1,52 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { loadInstalledPlugins } from "../../loaders/plugin-loader.js"; +import { + formatHooksJson, + formatHooksText, +} from "../../output/plugin-components.js"; + +interface PluginHooksOptions { + format: string; + color?: boolean; + plugin?: string; +} + +export function registerPluginHooksCommand(parent: Command): void { + parent + .command("hooks") + .description("List hooks from installed plugins") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .option("--plugin ", "Filter to a specific plugin") + .action(async (options: PluginHooksOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + let plugins = await loadInstalledPlugins(); + + if (options.plugin) { + plugins = plugins.filter( + (p) => + p.name === options.plugin || p.qualifiedName === options.plugin, + ); + } + + if (options.format === "json") { + console.log(formatHooksJson(plugins)); + } else { + console.log( + formatHooksText(plugins, { + noColor: !options.color, + }), + ); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/plugin/list.ts b/cli/src/commands/plugin/list.ts new file mode 100644 index 0000000..1508968 --- /dev/null +++ b/cli/src/commands/plugin/list.ts @@ -0,0 +1,53 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { loadInstalledPlugins } from "../../loaders/plugin-loader.js"; +import { + formatPluginListJson, + formatPluginListText, +} from "../../output/plugin-list.js"; + +interface PluginListOptions { + format: string; + color?: boolean; + enabledOnly?: boolean; + disabledOnly?: boolean; +} + +export function registerPluginListCommand(parent: Command): void { + parent + .command("list") + .description("List installed plugins") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .option("--enabled-only", "Show only enabled plugins") + .option("--disabled-only", "Show only disabled plugins") + .action(async (options: PluginListOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + let plugins = await loadInstalledPlugins(); + + if (options.enabledOnly) { + plugins = plugins.filter((p) => p.enabled); + } else if (options.disabledOnly) { + plugins = plugins.filter((p) => !p.enabled); + } + + if (options.format === "json") { + console.log(formatPluginListJson(plugins)); + } else { + console.log( + formatPluginListText(plugins, { + noColor: !options.color, + }), + ); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/plugin/show.ts b/cli/src/commands/plugin/show.ts new file mode 100644 index 0000000..e5a7556 --- /dev/null +++ b/cli/src/commands/plugin/show.ts @@ -0,0 +1,51 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { loadInstalledPlugins } from "../../loaders/plugin-loader.js"; +import { + formatPluginShowJson, + formatPluginShowText, +} from "../../output/plugin-show.js"; + +interface PluginShowOptions { + format: string; + color?: boolean; +} + +export function registerPluginShowCommand(parent: Command): void { + parent + .command("show ") + .description("Show detailed information about a plugin") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .action(async (name: string, options: PluginShowOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + const plugins = await loadInstalledPlugins(); + const plugin = plugins.find( + (p) => p.name === name || p.qualifiedName === name, + ); + + if (!plugin) { + console.error(`Plugin not found: ${name}`); + process.exit(1); + } + + if (options.format === "json") { + console.log(formatPluginShowJson(plugin)); + } else { + console.log( + formatPluginShowText(plugin, { + noColor: !options.color, + }), + ); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/plugin/skills.ts b/cli/src/commands/plugin/skills.ts new file mode 100644 index 0000000..803a6d4 --- /dev/null +++ b/cli/src/commands/plugin/skills.ts @@ -0,0 +1,52 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { loadInstalledPlugins } from "../../loaders/plugin-loader.js"; +import { + formatSkillsJson, + formatSkillsText, +} from "../../output/plugin-components.js"; + +interface PluginSkillsOptions { + format: string; + color?: boolean; + plugin?: string; +} + +export function registerPluginSkillsCommand(parent: Command): void { + parent + .command("skills") + .description("List skills from installed plugins") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .option("--plugin ", "Filter to a specific plugin") + .action(async (options: PluginSkillsOptions) => { + try { + if (!options.color) { + chalk.level = 0; + } + + let plugins = await loadInstalledPlugins(); + + if (options.plugin) { + plugins = plugins.filter( + (p) => + p.name === options.plugin || p.qualifiedName === options.plugin, + ); + } + + if (options.format === "json") { + console.log(formatSkillsJson(plugins)); + } else { + console.log( + formatSkillsText(plugins, { + noColor: !options.color, + }), + ); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/commands/review/review.ts b/cli/src/commands/review/review.ts new file mode 100644 index 0000000..a4991ae --- /dev/null +++ b/cli/src/commands/review/review.ts @@ -0,0 +1,101 @@ +import chalk from "chalk"; +import type { Command } from "commander"; +import { formatReviewJson, formatReviewText } from "../../output/review.js"; +import { detectBaseBranch, runReview } from "../../runners/review-runner.js"; +import type { ReviewScope } from "../../schemas/review.js"; + +interface ReviewCommandOptions { + scope: string; + base?: string; + include?: string; + format: string; + color?: boolean; + parallel?: boolean; + model: string; + maxCost?: string; + failBelow?: string; + passes: string; + verbose?: boolean; +} + +export function registerReviewCommand(parent: Command): void { + parent + .command("review") + .description("Multi-pass AI code review of branch changes") + .option("-s, --scope ", "Review scope: diff|staged|full", "diff") + .option("-b, --base ", "Base branch for diff scope") + .option("-i, --include ", "Filter files by glob pattern") + .option("-f, --format ", "Output format: text|json", "text") + .option("--no-color", "Disable colored output") + .option( + "--parallel", + "Run passes concurrently (~3x cost, faster, more diverse)", + ) + .option("-m, --model ", "Model for review passes", "sonnet") + .option("--max-cost ", "Maximum total USD across all passes") + .option( + "--fail-below ", + "Exit code 1 if score below threshold (1-10)", + ) + .option("--passes ", "Number of passes: 1|2|3", "3") + .option("-v, --verbose", "Show per-pass progress to stderr") + .action(async (options: ReviewCommandOptions) => { + try { + if (!options.color) chalk.level = 0; + + const scope = options.scope as ReviewScope; + if (!["diff", "staged", "full"].includes(scope)) { + console.error("Error: --scope must be diff, staged, or full"); + process.exit(1); + } + + const passes = parseInt(options.passes, 10) as 1 | 2 | 3; + if (![1, 2, 3].includes(passes)) { + console.error("Error: --passes must be 1, 2, or 3"); + process.exit(1); + } + + const base = options.base || (await detectBaseBranch()); + const maxCost = options.maxCost + ? parseFloat(options.maxCost) + : undefined; + const failBelow = options.failBelow + ? parseInt(options.failBelow, 10) + : undefined; + + if (failBelow !== undefined && (failBelow < 1 || failBelow > 10)) { + console.error("Error: --fail-below must be between 1 and 10"); + process.exit(1); + } + + const result = await runReview({ + scope, + base, + include: options.include, + parallel: options.parallel ?? false, + model: options.model, + maxCost, + passes, + verbose: options.verbose ?? false, + }); + + if (options.format === "json") { + console.log(formatReviewJson(result)); + } else { + console.log( + formatReviewText(result, { + noColor: !options.color, + }), + ); + } + + if (failBelow !== undefined && result.score < failBelow) { + process.exit(1); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`Error: ${message}`); + process.exit(1); + } + }); +} diff --git a/cli/src/loaders/config-loader.ts b/cli/src/loaders/config-loader.ts new file mode 100644 index 0000000..6a0626b --- /dev/null +++ b/cli/src/loaders/config-loader.ts @@ -0,0 +1,37 @@ +import { mkdirSync } from "fs"; +import { homedir } from "os"; +import { resolve } from "path"; +import type { ManifestEntry, SettingsJson } from "../schemas/config.js"; + +export async function loadSettings(path: string): Promise { + try { + return await Bun.file(path).json(); + } catch { + return {}; + } +} + +export async function writeSettings( + path: string, + settings: SettingsJson, +): Promise { + const dir = resolve(path, ".."); + mkdirSync(dir, { recursive: true }); + await Bun.write(path, JSON.stringify(settings, null, 2) + "\n"); +} + +export function findDeployedSettings(): string { + return resolve(homedir(), ".claude/settings.json"); +} + +export async function loadFileManifest( + workspaceRoot: string, +): Promise { + try { + return await Bun.file( + resolve(workspaceRoot, ".codeforge/file-manifest.json"), + ).json(); + } catch { + return []; + } +} diff --git a/cli/src/loaders/plugin-loader.ts b/cli/src/loaders/plugin-loader.ts new file mode 100644 index 0000000..9ecf2f7 --- /dev/null +++ b/cli/src/loaders/plugin-loader.ts @@ -0,0 +1,267 @@ +import { homedir } from "os"; +import { basename, dirname, resolve } from "path"; +import type { SettingsJson } from "../schemas/config.js"; +import type { + AgentInfo, + HookInfo, + HooksJsonFile, + InstalledPluginsFile, + PluginInfo, + PluginJsonFile, + SkillInfo, +} from "../schemas/plugin.js"; + +export function extractFrontMatter(content: string): Record { + const match = content.match(/^---\r?\n([\s\S]*?)\r?\n---/); + if (!match) return {}; + + const result: Record = {}; + const lines = match[1].split(/\r?\n/); + let currentKey: string | null = null; + let currentValue = ""; + + for (const line of lines) { + const kvMatch = line.match(/^(\w[\w-]*)\s*:\s*(.*)/); + if (kvMatch) { + if (currentKey !== null) { + result[currentKey] = currentValue.trim(); + } + currentKey = kvMatch[1]; + const rawValue = kvMatch[2].trim(); + if (rawValue === ">-" || rawValue === ">") { + currentValue = ""; + } else { + currentValue = rawValue.replace(/^["']|["']$/g, ""); + } + } else if (currentKey !== null && /^\s+/.test(line)) { + const continuation = line.trim(); + if (currentValue) { + currentValue += " " + continuation; + } else { + currentValue = continuation; + } + } + } + + if (currentKey !== null) { + result[currentKey] = currentValue.trim(); + } + + return result; +} + +export async function loadPluginDetail(installPath: string): Promise<{ + hooks: HookInfo[]; + agents: AgentInfo[]; + skills: SkillInfo[]; + scripts: string[]; +}> { + const hooks: HookInfo[] = []; + const agents: AgentInfo[] = []; + const skills: SkillInfo[] = []; + const scripts: string[] = []; + + // Hooks + try { + const hooksFile: HooksJsonFile = await Bun.file( + resolve(installPath, "hooks/hooks.json"), + ).json(); + for (const [event, rules] of Object.entries(hooksFile.hooks)) { + for (const rule of rules) { + hooks.push({ + event, + matcher: rule.matcher, + commands: rule.hooks.map((h) => ({ + command: h.command, + timeout: h.timeout, + })), + }); + } + } + } catch {} + + // Agents + try { + const glob = new Bun.Glob("*.md"); + for await (const entry of glob.scan({ + cwd: resolve(installPath, "agents"), + absolute: true, + })) { + try { + const content = await Bun.file(entry).text(); + const fm = extractFrontMatter(content); + agents.push({ + name: fm.name || basename(entry, ".md"), + description: fm.description || "", + filename: basename(entry), + }); + } catch {} + } + } catch {} + + // Skills + try { + const glob = new Bun.Glob("*/SKILL.md"); + for await (const entry of glob.scan({ + cwd: resolve(installPath, "skills"), + absolute: true, + })) { + try { + const content = await Bun.file(entry).text(); + const fm = extractFrontMatter(content); + const dir = basename(dirname(entry)); + skills.push({ + name: fm.name || dir, + description: fm.description || "", + dirname: dir, + }); + } catch {} + } + } catch {} + + // Scripts + try { + const glob = new Bun.Glob("*"); + for await (const entry of glob.scan({ + cwd: resolve(installPath, "scripts"), + absolute: true, + })) { + scripts.push(basename(entry)); + } + } catch {} + + return { hooks, agents, skills, scripts }; +} + +export function findSettingsPaths(): { + deployed: string; + source: string | null; +} { + const deployed = resolve(homedir(), ".claude/settings.json"); + + let source: string | null = null; + let dir = process.cwd(); + + while (true) { + const candidate = resolve(dir, ".codeforge/config/settings.json"); + try { + const stat = Bun.file(candidate); + // Check if file exists by accessing size — throws if missing + if (stat.size !== undefined) { + source = candidate; + break; + } + } catch {} + + const parent = resolve(dir, ".."); + if (parent === dir) break; + dir = parent; + } + + if (!source) { + try { + const fallback = "/workspaces/.codeforge/config/settings.json"; + const stat = Bun.file(fallback); + if (stat.size !== undefined) { + source = fallback; + } + } catch {} + } + + return { deployed, source }; +} + +function resolveEnabled( + qualifiedName: string, + pluginName: string, + enabledPlugins: Record | undefined, +): boolean { + if (!enabledPlugins) return true; + + // Direct match + if (qualifiedName in enabledPlugins) { + return enabledPlugins[qualifiedName]; + } + + // Name prefix match — different marketplace name + const prefix = pluginName + "@"; + for (const key of Object.keys(enabledPlugins)) { + if (key.startsWith(prefix)) { + return enabledPlugins[key]; + } + } + + // Not found — default enabled + return true; +} + +export async function loadInstalledPlugins(): Promise { + const claudeDir = resolve(homedir(), ".claude"); + + let installedFile: InstalledPluginsFile; + try { + installedFile = await Bun.file( + resolve(claudeDir, "plugins/installed_plugins.json"), + ).json(); + } catch { + return []; + } + + let settings: SettingsJson; + try { + settings = await Bun.file(resolve(claudeDir, "settings.json")).json(); + } catch { + settings = {}; + } + + const results: PluginInfo[] = []; + + for (const [qualifiedName, entries] of Object.entries( + installedFile.plugins, + )) { + if (!entries || entries.length === 0) continue; + + const atIndex = qualifiedName.indexOf("@"); + const pluginName = + atIndex >= 0 ? qualifiedName.slice(0, atIndex) : qualifiedName; + const marketplace = atIndex >= 0 ? qualifiedName.slice(atIndex + 1) : ""; + + const entry = entries[0]; + const enabled = resolveEnabled( + qualifiedName, + pluginName, + settings.enabledPlugins, + ); + + let description = ""; + let author = ""; + try { + const pluginJson: PluginJsonFile = await Bun.file( + resolve(entry.installPath, ".claude-plugin/plugin.json"), + ).json(); + description = pluginJson.description || ""; + author = pluginJson.author?.name || ""; + } catch {} + + const detail = await loadPluginDetail(entry.installPath); + + results.push({ + name: pluginName, + marketplace, + qualifiedName, + enabled, + version: entry.version, + installPath: entry.installPath, + description, + author, + installedAt: entry.installedAt, + hooks: detail.hooks, + agents: detail.agents, + skills: detail.skills, + scripts: detail.scripts, + }); + } + + results.sort((a, b) => a.name.localeCompare(b.name)); + return results; +} diff --git a/cli/src/loaders/settings-writer.ts b/cli/src/loaders/settings-writer.ts new file mode 100644 index 0000000..ea68072 --- /dev/null +++ b/cli/src/loaders/settings-writer.ts @@ -0,0 +1,36 @@ +import { loadSettings, writeSettings } from "./config-loader.js"; +import { findSettingsPaths } from "./plugin-loader.js"; + +export async function setPluginEnabled( + qualifiedName: string, + enabled: boolean, +): Promise<{ deployed: boolean; source: boolean }> { + const paths = findSettingsPaths(); + const result = { deployed: false, source: false }; + + // Update deployed settings + try { + const settings = await loadSettings(paths.deployed); + if (!settings.enabledPlugins) { + settings.enabledPlugins = {}; + } + settings.enabledPlugins[qualifiedName] = enabled; + await writeSettings(paths.deployed, settings); + result.deployed = true; + } catch {} + + // Update source settings if it exists + if (paths.source) { + try { + const settings = await loadSettings(paths.source); + if (!settings.enabledPlugins) { + settings.enabledPlugins = {}; + } + settings.enabledPlugins[qualifiedName] = enabled; + await writeSettings(paths.source, settings); + result.source = true; + } catch {} + } + + return result; +} diff --git a/cli/src/output/config-show.ts b/cli/src/output/config-show.ts new file mode 100644 index 0000000..0b85a8c --- /dev/null +++ b/cli/src/output/config-show.ts @@ -0,0 +1,83 @@ +import chalk from "chalk"; +import type { SettingsJson } from "../schemas/config.js"; + +const SENSITIVE_PATTERNS = ["TOKEN", "SECRET", "KEY", "PASSWORD"]; + +function maskValue(key: string, value: string): string { + const upperKey = key.toUpperCase(); + if (SENSITIVE_PATTERNS.some((p) => upperKey.includes(p))) { + return value.length > 4 ? value.slice(0, 4) + "****" : "****"; + } + return value; +} + +export function formatConfigShowText( + settings: SettingsJson, + options?: { noColor?: boolean }, +): string { + if (options?.noColor) { + chalk.level = 0; + } + + const lines: string[] = []; + const { env, permissions, enabledPlugins, ...other } = settings; + + if (env && Object.keys(env).length > 0) { + lines.push(chalk.bold("Environment")); + for (const [key, value] of Object.entries(env)) { + lines.push(` ${key.padEnd(36)}${maskValue(key, value)}`); + } + lines.push(""); + } + + if (permissions) { + lines.push(chalk.bold("Permissions")); + const allow = + permissions.allow && permissions.allow.length > 0 + ? permissions.allow.join(", ") + : "(none)"; + const deny = + permissions.deny && permissions.deny.length > 0 + ? permissions.deny.join(", ") + : "(none)"; + lines.push(` Allow: ${allow}`); + lines.push(` Deny: ${deny}`); + if (permissions.ask && permissions.ask.length > 0) { + lines.push(` Ask: ${permissions.ask.join(", ")}`); + } + lines.push(""); + } + + if (enabledPlugins && Object.keys(enabledPlugins).length > 0) { + lines.push(chalk.bold("Plugins")); + for (const [name, enabled] of Object.entries(enabledPlugins)) { + const status = enabled ? chalk.green("enabled") : chalk.red("disabled"); + lines.push(` ${name.padEnd(40)}${status}`); + } + lines.push(""); + } + + const otherKeys = Object.keys(other); + if (otherKeys.length > 0) { + lines.push(chalk.bold("Other Settings")); + for (const key of otherKeys) { + const value = + typeof other[key] === "string" + ? other[key] + : JSON.stringify(other[key]); + lines.push(` ${key}: ${value}`); + } + lines.push(""); + } + + // Remove trailing blank line + if (lines.length > 0 && lines[lines.length - 1] === "") { + lines.pop(); + } + + return lines.join("\n"); +} + +export function formatConfigShowJson(settings: SettingsJson): string { + return JSON.stringify(settings, null, 2); +} diff --git a/cli/src/output/plugin-components.ts b/cli/src/output/plugin-components.ts new file mode 100644 index 0000000..67a3d9a --- /dev/null +++ b/cli/src/output/plugin-components.ts @@ -0,0 +1,204 @@ +import chalk from "chalk"; +import { basename } from "path"; +import type { PluginInfo } from "../schemas/plugin.js"; + +function extractScriptName(command: string): string { + const parts = command.trim().split(/\s+/); + for (let i = parts.length - 1; i >= 0; i--) { + if (parts[i].includes("/")) { + return basename(parts[i]); + } + } + return parts[parts.length - 1] ?? command; +} + +function truncate(str: string, max: number): string { + return str.length > max ? str.slice(0, max - 1) + "\u2026" : str; +} + +export function formatHooksText( + plugins: PluginInfo[], + options?: { noColor?: boolean }, +): string { + if (options?.noColor) { + chalk.level = 0; + } + + const lines: string[] = []; + + lines.push( + chalk.bold( + ` ${"Plugin".padEnd(27)}${"Event".padEnd(19)}${"Matcher".padEnd(13)}${"Timeout".padEnd(9)}Script`, + ), + ); + + let totalHooks = 0; + const pluginsWithHooks = new Set(); + + for (const plugin of plugins) { + for (const hook of plugin.hooks) { + for (const cmd of hook.commands) { + totalHooks++; + pluginsWithHooks.add(plugin.name); + const matcher = hook.matcher ?? "\u2014"; + const timeout = `${cmd.timeout}s`; + const script = extractScriptName(cmd.command); + + lines.push( + ` ${plugin.name.padEnd(27)}${hook.event.padEnd(19)}${matcher.padEnd(13)}${timeout.padEnd(9)}${script}`, + ); + } + } + } + + lines.push(""); + lines.push( + chalk.dim(` ${totalHooks} hooks across ${pluginsWithHooks.size} plugins`), + ); + + return lines.join("\n"); +} + +export function formatHooksJson(plugins: PluginInfo[]): string { + const output: { + plugin: string; + event: string; + matcher: string | undefined; + timeout: number; + command: string; + }[] = []; + + for (const plugin of plugins) { + for (const hook of plugin.hooks) { + for (const cmd of hook.commands) { + output.push({ + plugin: plugin.name, + event: hook.event, + matcher: hook.matcher, + timeout: cmd.timeout, + command: cmd.command, + }); + } + } + } + + return JSON.stringify(output, null, 2); +} + +export function formatAgentsText( + plugins: PluginInfo[], + options?: { noColor?: boolean }, +): string { + if (options?.noColor) { + chalk.level = 0; + } + + const lines: string[] = []; + + lines.push( + chalk.bold(` ${"Agent".padEnd(20)}${"Plugin".padEnd(16)}Description`), + ); + + let totalAgents = 0; + const pluginsWithAgents = new Set(); + + for (const plugin of plugins) { + for (const agent of plugin.agents) { + totalAgents++; + pluginsWithAgents.add(plugin.name); + const desc = truncate(agent.description, 50); + + lines.push(` ${agent.name.padEnd(20)}${plugin.name.padEnd(16)}${desc}`); + } + } + + lines.push(""); + lines.push( + chalk.dim( + ` ${totalAgents} agents across ${pluginsWithAgents.size} plugins`, + ), + ); + + return lines.join("\n"); +} + +export function formatAgentsJson(plugins: PluginInfo[]): string { + const output: { + name: string; + plugin: string; + description: string; + filename: string; + }[] = []; + + for (const plugin of plugins) { + for (const agent of plugin.agents) { + output.push({ + name: agent.name, + plugin: plugin.name, + description: agent.description, + filename: agent.filename, + }); + } + } + + return JSON.stringify(output, null, 2); +} + +export function formatSkillsText( + plugins: PluginInfo[], + options?: { noColor?: boolean }, +): string { + if (options?.noColor) { + chalk.level = 0; + } + + const lines: string[] = []; + + lines.push( + chalk.bold(` ${"Skill".padEnd(24)}${"Plugin".padEnd(16)}Description`), + ); + + let totalSkills = 0; + const pluginsWithSkills = new Set(); + + for (const plugin of plugins) { + for (const skill of plugin.skills) { + totalSkills++; + pluginsWithSkills.add(plugin.name); + const desc = truncate(skill.description, 50); + + lines.push(` ${skill.name.padEnd(24)}${plugin.name.padEnd(16)}${desc}`); + } + } + + lines.push(""); + lines.push( + chalk.dim( + ` ${totalSkills} skills across ${pluginsWithSkills.size} plugins`, + ), + ); + + return lines.join("\n"); +} + +export function formatSkillsJson(plugins: PluginInfo[]): string { + const output: { + name: string; + plugin: string; + description: string; + dirname: string; + }[] = []; + + for (const plugin of plugins) { + for (const skill of plugin.skills) { + output.push({ + name: skill.name, + plugin: plugin.name, + description: skill.description, + dirname: skill.dirname, + }); + } + } + + return JSON.stringify(output, null, 2); +} diff --git a/cli/src/output/plugin-list.ts b/cli/src/output/plugin-list.ts new file mode 100644 index 0000000..9d6e944 --- /dev/null +++ b/cli/src/output/plugin-list.ts @@ -0,0 +1,60 @@ +import chalk from "chalk"; +import type { PluginInfo } from "../schemas/plugin.js"; + +export function formatPluginListText( + plugins: PluginInfo[], + options?: { noColor?: boolean }, +): string { + if (options?.noColor) { + chalk.level = 0; + } + + const lines: string[] = []; + + lines.push( + chalk.bold( + ` ${"Plugin".padEnd(32)}${"Marketplace".padEnd(26)}${"Status".padEnd(10)}${"Hooks".padStart(7)}${"Agents".padStart(7)}${"Skills".padStart(7)}`, + ), + ); + + for (const plugin of plugins) { + const statusText = plugin.enabled ? "enabled" : "disabled"; + const statusPadded = statusText.padEnd(10); + const status = plugin.enabled + ? chalk.green(statusPadded) + : chalk.red(statusPadded); + const hooks = String(plugin.hooks.length).padStart(7); + const agents = String(plugin.agents.length).padStart(7); + const skills = String(plugin.skills.length).padStart(7); + + lines.push( + ` ${plugin.name.padEnd(32)}${plugin.marketplace.padEnd(26)}${status}${hooks}${agents}${skills}`, + ); + } + + const enabledCount = plugins.filter((p) => p.enabled).length; + const disabledCount = plugins.length - enabledCount; + lines.push(""); + lines.push( + chalk.dim( + ` ${plugins.length} plugins (${enabledCount} enabled, ${disabledCount} disabled)`, + ), + ); + + return lines.join("\n"); +} + +export function formatPluginListJson(plugins: PluginInfo[]): string { + const output = plugins.map((plugin) => ({ + name: plugin.name, + marketplace: plugin.marketplace, + qualifiedName: plugin.qualifiedName, + enabled: plugin.enabled, + version: plugin.version, + hookCount: plugin.hooks.length, + agentCount: plugin.agents.length, + skillCount: plugin.skills.length, + })); + + return JSON.stringify(output, null, 2); +} diff --git a/cli/src/output/plugin-show.ts b/cli/src/output/plugin-show.ts new file mode 100644 index 0000000..66fc197 --- /dev/null +++ b/cli/src/output/plugin-show.ts @@ -0,0 +1,72 @@ +import chalk from "chalk"; +import type { PluginInfo } from "../schemas/plugin.js"; + +function extractScriptName(command: string): string { + const parts = command.trim().split(/\s+/); + for (let i = parts.length - 1; i >= 0; i--) { + if (parts[i].includes("/")) { + const segments = parts[i].split("/"); + return segments[segments.length - 1]; + } + } + return parts[parts.length - 1] ?? command; +} + +export function formatPluginShowText( + plugin: PluginInfo, + options?: { noColor?: boolean }, +): string { + if (options?.noColor) { + chalk.level = 0; + } + + const lines: string[] = []; + const labelWidth = 16; + + lines.push(chalk.bold(plugin.name)); + + const status = plugin.enabled + ? chalk.green("enabled") + : chalk.red("disabled"); + + lines.push(` ${"Description:".padEnd(labelWidth)}${plugin.description}`); + lines.push(` ${"Marketplace:".padEnd(labelWidth)}${plugin.marketplace}`); + lines.push(` ${"Version:".padEnd(labelWidth)}${plugin.version}`); + lines.push(` ${"Status:".padEnd(labelWidth)}${status}`); + lines.push(` ${"Installed:".padEnd(labelWidth)}${plugin.installedAt}`); + lines.push(` ${"Install Path:".padEnd(labelWidth)}${plugin.installPath}`); + + if (plugin.hooks.length > 0) { + lines.push(""); + lines.push(` ${chalk.bold(`Hooks (${plugin.hooks.length}):`)}`); + for (const hook of plugin.hooks) { + for (const cmd of hook.commands) { + const event = hook.event.padEnd(16); + const matcher = (hook.matcher ?? "\u2014").padEnd(15); + const script = extractScriptName(cmd.command).padEnd(32); + const timeout = `(${cmd.timeout}s)`; + lines.push(` ${event}${matcher}${script}${timeout}`); + } + } + } + + if (plugin.agents.length > 0) { + lines.push(""); + lines.push(` ${chalk.bold(`Agents (${plugin.agents.length}):`)}`); + const agentNames = plugin.agents.map((a) => a.name).join(", "); + lines.push(` ${agentNames}`); + } + + if (plugin.skills.length > 0) { + lines.push(""); + lines.push(` ${chalk.bold(`Skills (${plugin.skills.length}):`)}`); + const skillNames = plugin.skills.map((s) => s.name).join(", "); + lines.push(` ${skillNames}`); + } + + return lines.join("\n"); +} + +export function formatPluginShowJson(plugin: PluginInfo): string { + return JSON.stringify(plugin, null, 2); +} diff --git a/cli/src/output/review.ts b/cli/src/output/review.ts new file mode 100644 index 0000000..0835a84 --- /dev/null +++ b/cli/src/output/review.ts @@ -0,0 +1,193 @@ +import chalk from "chalk"; +import type { + ReviewFindingWithPass, + ReviewResult, + Severity, +} from "../schemas/review.js"; + +const SEPARATOR = "\u2501".repeat(60); + +const SEVERITY_COLORS: Record string> = { + critical: (t) => chalk.red.bold(t), + high: (t) => chalk.red(t), + medium: (t) => chalk.yellow(t), + low: (t) => chalk.blue(t), + info: (t) => chalk.dim(t), +}; + +function capitalize(s: string): string { + return s.charAt(0).toUpperCase() + s.slice(1); +} + +function formatDuration(ms: number): string { + return `${Math.round(ms / 1000)}s`; +} + +function formatCost(usd: number): string { + return `$${usd.toFixed(2)}`; +} + +function severityTag(severity: Severity): string { + const label = `[${severity.toUpperCase()}]`; + return SEVERITY_COLORS[severity](label); +} + +function formatPassLine( + index: number, + name: string, + costUsd: number, + durationMs: number, + error?: string, +): string { + const label = `Pass ${index + 1}: ${capitalize(name)}`; + const stats = `${formatCost(costUsd)} ${formatDuration(durationMs)}`; + + if (error) { + const errorNote = chalk.yellow(` \u26A0 ${error}`); + return `${label.padEnd(50)}${stats}${errorNote}`; + } + + return `${label.padEnd(50)}${stats}`; +} + +function formatFinding(finding: ReviewFindingWithPass): string[] { + const lines: string[] = []; + const location = finding.line + ? `${finding.file}:${finding.line}` + : finding.file; + + lines.push(`${severityTag(finding.severity)} ${location}`); + + const desc = + finding.description && finding.description !== finding.title + ? `${finding.title} \u2014 ${finding.description}` + : finding.title; + lines.push(` ${desc}`); + + if (finding.suggestion) { + lines.push(` \u2192 ${finding.suggestion}`); + } + + lines.push(` ${chalk.dim(`(${finding.passName})`)}`); + + return lines; +} + +function formatSeverityCounts(findings: ReviewFindingWithPass[]): string { + const counts: Record = { + critical: 0, + high: 0, + medium: 0, + low: 0, + info: 0, + }; + for (const f of findings) counts[f.severity]++; + + const parts: string[] = []; + const entries: [Severity, number][] = [ + ["critical", counts.critical], + ["high", counts.high], + ["medium", counts.medium], + ["low", counts.low], + ["info", counts.info], + ]; + + for (const [severity, count] of entries) { + if (count > 0) { + parts.push(SEVERITY_COLORS[severity](`${count} ${severity}`)); + } + } + + return parts.join(" "); +} + +export function formatReviewText( + result: ReviewResult, + options?: { noColor?: boolean }, +): string { + if (options?.noColor) chalk.level = 0; + + const lines: string[] = []; + + // Header + if (result.scope === "full") { + lines.push(chalk.bold("Full codebase review")); + } else { + lines.push( + chalk.bold( + `Review of ${result.base}..${result.head} (${result.filesChanged} files changed)`, + ), + ); + } + lines.push(""); + + // Pass summary lines + for (const [i, pass] of result.passes.entries()) { + lines.push( + formatPassLine(i, pass.name, pass.costUsd, pass.durationMs, pass.error), + ); + } + + lines.push(""); + lines.push(SEPARATOR); + lines.push(""); + + // Findings + if (result.findings.length === 0) { + lines.push(chalk.green("No issues found.")); + } else { + for (const [i, finding] of result.findings.entries()) { + lines.push(...formatFinding(finding)); + if (i < result.findings.length - 1) lines.push(""); + } + } + + lines.push(""); + lines.push(SEPARATOR); + + // Footer + const score = chalk.bold(`Score: ${result.score}/10`); + const counts = formatSeverityCounts(result.findings); + const cost = `Total: ${formatCost(result.totalCostUsd)}`; + + const footerParts = [score]; + if (counts) footerParts.push(counts); + footerParts.push(cost); + lines.push(footerParts.join(" \u2502 ")); + + return lines.join("\n"); +} + +export function formatReviewJson(result: ReviewResult): string { + const output = { + base: result.base, + head: result.head, + scope: result.scope, + filesChanged: result.filesChanged, + score: result.score, + findings: result.findings.map((f) => ({ + file: f.file, + line: f.line, + severity: f.severity, + category: f.category, + pass: f.pass, + passName: f.passName, + title: f.title, + description: f.description, + suggestion: f.suggestion, + })), + summary: result.summary, + cost: { + total_usd: result.totalCostUsd, + passes: result.passes.map((p) => ({ + name: p.name, + cost_usd: p.costUsd, + duration_ms: p.durationMs, + findings: p.findings.length, + ...(p.error ? { error: p.error } : {}), + })), + }, + }; + + return JSON.stringify(output, null, 2); +} diff --git a/cli/src/prompts/review.ts b/cli/src/prompts/review.ts new file mode 100644 index 0000000..fb86743 --- /dev/null +++ b/cli/src/prompts/review.ts @@ -0,0 +1,71 @@ +import { existsSync } from "node:fs"; +import path from "node:path"; +import type { PassName } from "../schemas/review.js"; + +export type PromptMode = "sequential" | "parallel"; + +export interface PassPrompts { + systemPromptFile: string; + userPrompt: string; +} + +function findPackageRoot(from: string): string { + let dir = from; + while (dir !== path.dirname(dir)) { + if (existsSync(path.join(dir, "package.json"))) return dir; + dir = path.dirname(dir); + } + return from; +} + +const PROMPTS_DIR = path.join( + findPackageRoot(import.meta.dir), + "prompts", + "review", +); + +function getUserPromptFilename(pass: PassName, mode: PromptMode): string { + if (pass === "correctness") return "correctness.user.md"; + if (mode === "parallel") return `${pass}.user.md`; + return `${pass}-resume.user.md`; +} + +function interpolate( + template: string, + variables: Record, +): string { + let content = template; + for (const [key, value] of Object.entries(variables)) { + content = content.replaceAll(`{{${key}}}`, value); + } + return content; +} + +export async function getPassPrompts( + pass: PassName, + mode: PromptMode, + variables: Record, +): Promise { + const systemPromptFile = path.join(PROMPTS_DIR, `${pass}.system.md`); + const userPromptFile = path.join( + PROMPTS_DIR, + getUserPromptFilename(pass, mode), + ); + + const rawContent = await Bun.file(userPromptFile).text(); + const userPrompt = interpolate(rawContent, variables); + + return { systemPromptFile, userPrompt }; +} + +export function getFullScopePrompt(pass: PassName, include?: string): string { + const scopeInstruction = include + ? `Scan files matching the pattern: ${include}` + : "Scan the project codebase"; + + return `${scopeInstruction} and identify ${pass} issues. + +Use Read, Glob, and Grep tools to explore the codebase. Do not review node_modules, dist, or build output directories. + +For each finding, specify the exact file path and line number.`; +} diff --git a/cli/src/runners/headless.ts b/cli/src/runners/headless.ts new file mode 100644 index 0000000..dca0e97 --- /dev/null +++ b/cli/src/runners/headless.ts @@ -0,0 +1,146 @@ +import path from "node:path"; + +export interface HeadlessOptions { + prompt: string; + model?: string; + maxTurns?: number; + maxBudgetUsd?: number; + allowedTools?: string[]; + disallowedTools?: string[]; + permissionMode?: "plan" | "acceptEdits" | "default"; + systemPromptFile?: string; + resume?: string; + jsonSchema?: object; + cwd?: string; +} + +export interface HeadlessResult { + result: string; + sessionId: string; + isError: boolean; + subtype: string; + totalCostUsd: number; + numTurns: number; + durationMs: number; + structuredOutput?: unknown; +} + +interface ClaudeJsonOutput { + type: string; + subtype: string; + is_error: boolean; + result: string; + session_id: string; + total_cost_usd: number; + num_turns: number; +} + +async function discoverClaudeBinary(): Promise { + if (process.env.CLAUDE_BIN) { + const exists = await Bun.file(process.env.CLAUDE_BIN).exists(); + if (exists) return process.env.CLAUDE_BIN; + } + + const localPath = path.join(process.env.HOME ?? "", ".local/bin/claude"); + if (await Bun.file(localPath).exists()) return localPath; + + const proc = Bun.spawn(["which", "claude"], { + stdout: "pipe", + stderr: "pipe", + }); + const stdout = await new Response(proc.stdout).text(); + await proc.exited; + const trimmed = stdout.trim(); + if (trimmed) return trimmed; + + throw new Error( + "Claude CLI not found. Set CLAUDE_BIN environment variable or install Claude Code.", + ); +} + +function buildArgs(binary: string, options: HeadlessOptions): string[] { + const args = [binary, "-p", options.prompt, "--output-format", "json"]; + + if (options.model) args.push("--model", options.model); + if (options.maxTurns) args.push("--max-turns", String(options.maxTurns)); + if (options.maxBudgetUsd !== undefined) { + args.push("--max-budget-usd", options.maxBudgetUsd.toFixed(2)); + } + if (options.permissionMode) { + args.push("--permission-mode", options.permissionMode); + } + if (options.systemPromptFile) { + args.push("--system-prompt-file", options.systemPromptFile); + } + if (options.resume) args.push("--resume", options.resume); + if (options.jsonSchema) { + args.push("--json-schema", JSON.stringify(options.jsonSchema)); + } + if (options.allowedTools?.length) { + args.push("--allowedTools", ...options.allowedTools); + } + if (options.disallowedTools?.length) { + args.push("--disallowedTools", ...options.disallowedTools); + } + + return args; +} + +export async function runHeadless( + options: HeadlessOptions, +): Promise { + const binary = await discoverClaudeBinary(); + const args = buildArgs(binary, options); + const startTime = Date.now(); + + const proc = Bun.spawn(args, { + cwd: options.cwd, + stdout: "pipe", + stderr: "pipe", + }); + + const stdout = await new Response(proc.stdout).text(); + const stderr = await new Response(proc.stderr).text(); + const exitCode = await proc.exited; + const durationMs = Date.now() - startTime; + + let parsed: ClaudeJsonOutput; + try { + parsed = JSON.parse(stdout) as ClaudeJsonOutput; + } catch { + if (exitCode !== 0) { + throw new Error( + `Claude process exited with code ${exitCode}: ${stderr || stdout}`, + ); + } + return { + result: stdout, + sessionId: "", + isError: true, + subtype: "parse_error", + totalCostUsd: 0, + numTurns: 0, + durationMs, + }; + } + + let structuredOutput: unknown; + if (options.jsonSchema && !parsed.is_error) { + try { + structuredOutput = JSON.parse(parsed.result); + } catch { + // structured output parse failed — text result still available + } + } + + return { + result: parsed.result, + sessionId: parsed.session_id, + isError: parsed.is_error, + subtype: parsed.subtype, + totalCostUsd: parsed.total_cost_usd ?? 0, + numTurns: parsed.num_turns ?? 0, + durationMs, + structuredOutput, + }; +} diff --git a/cli/src/runners/review-runner.ts b/cli/src/runners/review-runner.ts new file mode 100644 index 0000000..f292941 --- /dev/null +++ b/cli/src/runners/review-runner.ts @@ -0,0 +1,355 @@ +import { getFullScopePrompt, getPassPrompts } from "../prompts/review.js"; +import type { + PassName, + PassResult, + ReviewFinding, + ReviewFindingWithPass, + ReviewResult, + ReviewScope, +} from "../schemas/review.js"; +import { findingsJsonSchema } from "../schemas/review.js"; +import type { HeadlessResult } from "./headless.js"; +import { runHeadless } from "./headless.js"; + +export interface ReviewOptions { + scope: ReviewScope; + base: string; + include?: string; + parallel: boolean; + model: string; + maxCost?: number; + passes: 1 | 2 | 3; + verbose: boolean; +} + +const PASS_ORDER: PassName[] = ["correctness", "security", "quality"]; +const BUDGET_WEIGHTS = [0.4, 0.35, 0.25]; +const SEVERITY_SORT: Record = { + critical: 0, + high: 1, + medium: 2, + low: 3, + info: 4, +}; +const SCORE_WEIGHTS: Record = { + critical: 3, + high: 2, + medium: 1, + low: 0.5, + info: 0, +}; + +export async function detectBaseBranch(): Promise { + for (const branch of ["staging", "main", "master"]) { + const proc = Bun.spawn( + ["git", "rev-parse", "--verify", `refs/heads/${branch}`], + { stdout: "pipe", stderr: "pipe" }, + ); + await proc.exited; + if (proc.exitCode === 0) return branch; + } + throw new Error( + "Could not auto-detect base branch. Specify --base .", + ); +} + +async function getDiff( + scope: ReviewScope, + base: string, + include?: string, +): Promise { + if (scope === "full") return ""; + const args = + scope === "staged" + ? ["git", "diff", "--cached"] + : ["git", "diff", `${base}...HEAD`]; + if (include) args.push("--", include); + const proc = Bun.spawn(args, { stdout: "pipe", stderr: "pipe" }); + const stdout = await new Response(proc.stdout).text(); + await proc.exited; + return stdout; +} + +async function getFilesChanged( + scope: ReviewScope, + base: string, + include?: string, +): Promise { + if (scope === "full") return 0; + const args = + scope === "staged" + ? ["git", "diff", "--cached", "--numstat"] + : ["git", "diff", "--numstat", `${base}...HEAD`]; + if (include) args.push("--", include); + const proc = Bun.spawn(args, { stdout: "pipe" }); + const output = await new Response(proc.stdout).text(); + await proc.exited; + return output.trim().split("\n").filter(Boolean).length; +} + +function parseFindings(result: HeadlessResult): ReviewFinding[] { + if (result.structuredOutput && typeof result.structuredOutput === "object") { + const output = result.structuredOutput as { findings?: unknown }; + if (Array.isArray(output.findings)) { + return output.findings as ReviewFinding[]; + } + } + try { + const parsed = JSON.parse(result.result) as { findings?: unknown }; + if (Array.isArray(parsed.findings)) { + return parsed.findings as ReviewFinding[]; + } + } catch { + // text result without structured output + } + return []; +} + +function parseSummary(result: HeadlessResult): string { + if (result.structuredOutput && typeof result.structuredOutput === "object") { + const output = result.structuredOutput as { summary?: unknown }; + if (typeof output.summary === "string") return output.summary; + } + try { + const parsed = JSON.parse(result.result) as { summary?: unknown }; + if (typeof parsed.summary === "string") return parsed.summary; + } catch { + // no structured summary + } + return ""; +} + +function mergeFindings(passResults: PassResult[]): ReviewFindingWithPass[] { + const seen = new Set(); + const merged: ReviewFindingWithPass[] = []; + + for (const [i, pass] of passResults.entries()) { + for (const finding of pass.findings) { + const key = `${finding.file}:${finding.line}:${finding.title}`; + if (!seen.has(key)) { + seen.add(key); + merged.push({ + ...finding, + pass: i + 1, + passName: pass.name, + }); + } + } + } + + merged.sort( + (a, b) => + (SEVERITY_SORT[a.severity] ?? 5) - (SEVERITY_SORT[b.severity] ?? 5), + ); + return merged; +} + +function calculateScore(findings: ReviewFindingWithPass[]): number { + const totalPoints = findings.reduce( + (sum, f) => sum + (SCORE_WEIGHTS[f.severity] ?? 0), + 0, + ); + return Math.max(1, Math.min(10, Math.round(10 - totalPoints))); +} + +function buildCommonOpts(scope: ReviewScope) { + return { + maxTurns: scope === "full" ? 25 : 10, + permissionMode: "plan" as const, + allowedTools: [ + "Read", + "Glob", + "Grep", + "Bash(git diff *)", + "Bash(git log *)", + "Bash(git show *)", + ], + disallowedTools: ["Write", "Edit", "NotebookEdit"], + jsonSchema: findingsJsonSchema, + }; +} + +async function runSequential( + passOrder: PassName[], + diff: string, + options: ReviewOptions, +): Promise { + const commonOpts = buildCommonOpts(options.scope); + const passResults: PassResult[] = []; + let sessionId: string | undefined; + + for (const [i, passName] of passOrder.entries()) { + if (options.verbose) { + process.stderr.write(`Pass ${i + 1}: ${passName}...\n`); + } + + const prompts = await getPassPrompts(passName, "sequential", { + DIFF: diff, + }); + + const spent = passResults.reduce((s, p) => s + p.costUsd, 0); + const effectiveBudget = options.maxCost + ? Math.max(0.01, options.maxCost - spent) + : undefined; + + let userPrompt = prompts.userPrompt; + if (options.scope === "full") { + userPrompt = getFullScopePrompt(passName, options.include); + } + + try { + const result = await runHeadless({ + ...commonOpts, + prompt: userPrompt, + systemPromptFile: prompts.systemPromptFile, + model: options.model, + resume: sessionId, + maxBudgetUsd: effectiveBudget, + }); + + sessionId = result.sessionId; + + passResults.push({ + name: passName, + findings: parseFindings(result), + costUsd: result.totalCostUsd, + durationMs: result.durationMs, + sessionId: result.sessionId, + }); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + passResults.push({ + name: passName, + findings: [], + costUsd: 0, + durationMs: 0, + sessionId: sessionId ?? "", + error: message, + }); + // Clear session if resume failed so next pass starts fresh + if (message.includes("resume")) sessionId = undefined; + } + } + + return passResults; +} + +async function runParallel( + passOrder: PassName[], + diff: string, + options: ReviewOptions, +): Promise { + const commonOpts = buildCommonOpts(options.scope); + + const promises = passOrder.map(async (passName, i) => { + if (options.verbose) { + process.stderr.write(`Pass ${i + 1}: ${passName} (parallel)...\n`); + } + + const prompts = await getPassPrompts(passName, "parallel", { + DIFF: diff, + }); + + const budgetForPass = options.maxCost + ? options.maxCost * BUDGET_WEIGHTS[i] + : undefined; + + let userPrompt = prompts.userPrompt; + if (options.scope === "full") { + userPrompt = getFullScopePrompt(passName, options.include); + } + + try { + const result = await runHeadless({ + ...commonOpts, + prompt: userPrompt, + systemPromptFile: prompts.systemPromptFile, + model: options.model, + maxBudgetUsd: budgetForPass, + }); + + return { + name: passName, + findings: parseFindings(result), + costUsd: result.totalCostUsd, + durationMs: result.durationMs, + sessionId: result.sessionId, + } as PassResult; + } catch (err) { + return { + name: passName, + findings: [], + costUsd: 0, + durationMs: 0, + sessionId: "", + error: err instanceof Error ? err.message : String(err), + } as PassResult; + } + }); + + return Promise.all(promises); +} + +export async function runReview(options: ReviewOptions): Promise { + const [diff, filesChanged] = await Promise.all([ + getDiff(options.scope, options.base, options.include), + getFilesChanged(options.scope, options.base, options.include), + ]); + + if (!diff && options.scope !== "full") { + return { + base: options.base, + head: "HEAD", + filesChanged: 0, + scope: options.scope, + score: 10, + findings: [], + summary: "No changes to review.", + passes: [], + totalCostUsd: 0, + }; + } + + const passOrder = PASS_ORDER.slice(0, options.passes); + + const passResults = options.parallel + ? await runParallel(passOrder, diff, options) + : await runSequential(passOrder, diff, options); + + const findings = mergeFindings(passResults); + const score = calculateScore(findings); + const totalCostUsd = passResults.reduce((s, p) => s + p.costUsd, 0); + + const summaries = passResults + .map((p) => { + const passSummary = p.sessionId + ? parseSummary({ + result: "", + sessionId: p.sessionId, + isError: false, + subtype: "", + totalCostUsd: 0, + numTurns: 0, + durationMs: 0, + }) + : ""; + return passSummary; + }) + .filter(Boolean); + + const summary = + summaries.join("\n\n") || + `Review completed with ${findings.length} finding${findings.length === 1 ? "" : "s"} across ${passResults.length} pass${passResults.length === 1 ? "" : "es"}.`; + + return { + base: options.base, + head: "HEAD", + filesChanged, + scope: options.scope, + score, + findings, + summary, + passes: passResults, + totalCostUsd, + }; +} diff --git a/cli/src/schemas/config.ts b/cli/src/schemas/config.ts new file mode 100644 index 0000000..6a9a34c --- /dev/null +++ b/cli/src/schemas/config.ts @@ -0,0 +1,18 @@ +export interface ManifestEntry { + src: string; + dest: string; + destFilename?: string; + enabled?: boolean; + overwrite: "if-changed" | "always" | "never"; +} + +export interface SettingsJson { + enabledPlugins?: Record; + env?: Record; + permissions?: { + allow?: string[]; + deny?: string[]; + ask?: string[]; + }; + [key: string]: unknown; +} diff --git a/cli/src/schemas/plugin.ts b/cli/src/schemas/plugin.ts new file mode 100644 index 0000000..5cbb948 --- /dev/null +++ b/cli/src/schemas/plugin.ts @@ -0,0 +1,71 @@ +export interface InstalledPluginsFile { + version: number; + plugins: Record; +} + +export interface InstalledPluginEntry { + scope: "user" | "project"; + installPath: string; + version: string; + installedAt: string; + lastUpdated: string; + gitCommitSha?: string; +} + +export type KnownMarketplacesFile = Record; + +export interface MarketplaceEntry { + source: { source: "directory" | "github"; path?: string; repo?: string }; + installLocation: string; + lastUpdated: string; +} + +export interface PluginJsonFile { + name: string; + description?: string; + author?: { name: string; email?: string }; +} + +export interface HooksJsonFile { + description?: string; + hooks: Record; +} + +export interface HookRuleEntry { + matcher?: string; + hooks: { type: "command"; command: string; timeout: number }[]; +} + +export interface HookInfo { + event: string; + matcher?: string; + commands: { command: string; timeout: number }[]; +} + +export interface AgentInfo { + name: string; + description: string; + filename: string; +} + +export interface SkillInfo { + name: string; + description: string; + dirname: string; +} + +export interface PluginInfo { + name: string; + marketplace: string; + qualifiedName: string; + enabled: boolean; + version: string; + installPath: string; + description: string; + author: string; + installedAt: string; + hooks: HookInfo[]; + agents: AgentInfo[]; + skills: SkillInfo[]; + scripts: string[]; +} diff --git a/cli/src/schemas/review.ts b/cli/src/schemas/review.ts new file mode 100644 index 0000000..68bebf8 --- /dev/null +++ b/cli/src/schemas/review.ts @@ -0,0 +1,67 @@ +export type Severity = "critical" | "high" | "medium" | "low" | "info"; +export type ReviewScope = "diff" | "staged" | "full"; +export type PassName = "correctness" | "security" | "quality"; + +export interface ReviewFinding { + file: string; + line: number | null; + severity: Severity; + category: string; + title: string; + description: string; + suggestion: string | null; +} + +export interface PassResult { + name: PassName; + findings: ReviewFinding[]; + costUsd: number; + durationMs: number; + sessionId: string; + error?: string; +} + +export interface ReviewResult { + base: string; + head: string; + filesChanged: number; + scope: ReviewScope; + score: number; + findings: ReviewFindingWithPass[]; + summary: string; + passes: PassResult[]; + totalCostUsd: number; +} + +export interface ReviewFindingWithPass extends ReviewFinding { + pass: number; + passName: PassName; +} + +/** JSON schema sent to claude --json-schema for structured output */ +export const findingsJsonSchema = { + type: "object" as const, + required: ["findings", "summary"], + properties: { + findings: { + type: "array" as const, + items: { + type: "object" as const, + required: ["file", "severity", "category", "title", "description"], + properties: { + file: { type: "string" as const }, + line: { type: ["number", "null"] as const }, + severity: { + type: "string" as const, + enum: ["critical", "high", "medium", "low", "info"], + }, + category: { type: "string" as const }, + title: { type: "string" as const }, + description: { type: "string" as const }, + suggestion: { type: ["string", "null"] as const }, + }, + }, + }, + summary: { type: "string" as const }, + }, +}; diff --git a/cli/src/utils/platform.ts b/cli/src/utils/platform.ts new file mode 100644 index 0000000..4b82dbc --- /dev/null +++ b/cli/src/utils/platform.ts @@ -0,0 +1,31 @@ +import { homedir } from "os"; +import { basename, resolve } from "path"; + +/** + * Normalize a path to use forward slashes. + * Forward slashes work on all platforms including Windows. + */ +export function normalizePath(p: string): string { + return p.replace(/\\/g, "/"); +} + +/** + * Cross-platform home directory with normalized path. + */ +export function getHome(): string { + return normalizePath(homedir()); +} + +/** + * path.resolve() with forward-slash normalization. + */ +export function resolveNormalized(...segments: string[]): string { + return normalizePath(resolve(...segments)); +} + +/** + * Extract filename from a path using path.basename(). + */ +export function basenameFromPath(filePath: string): string { + return basename(normalizePath(filePath)); +} diff --git a/cli/tests/platform.test.ts b/cli/tests/platform.test.ts new file mode 100644 index 0000000..79c2d32 --- /dev/null +++ b/cli/tests/platform.test.ts @@ -0,0 +1,61 @@ +import { describe, expect, test } from "bun:test"; +import { + basenameFromPath, + getHome, + normalizePath, + resolveNormalized, +} from "../src/utils/platform.js"; + +describe("normalizePath", () => { + test("converts backslashes to forward slashes", () => { + expect(normalizePath("C:\\Users\\dev\\project")).toBe( + "C:/Users/dev/project", + ); + }); + + test("leaves forward slashes unchanged", () => { + expect(normalizePath("/home/dev/project")).toBe("/home/dev/project"); + }); + + test("handles mixed separators", () => { + expect(normalizePath("C:\\Users/dev\\project/src")).toBe( + "C:/Users/dev/project/src", + ); + }); + + test("handles empty string", () => { + expect(normalizePath("")).toBe(""); + }); +}); + +describe("getHome", () => { + test("returns a string with forward slashes only", () => { + const home = getHome(); + expect(home).not.toContain("\\"); + expect(home.length).toBeGreaterThan(0); + }); +}); + +describe("resolveNormalized", () => { + test("returns a normalized absolute path", () => { + const result = resolveNormalized("/some/path", "child"); + expect(result).not.toContain("\\"); + expect(result).toContain("/some/path/child"); + }); +}); + +describe("basenameFromPath", () => { + test("extracts filename from Unix path", () => { + expect(basenameFromPath("/home/user/file.txt")).toBe("file.txt"); + }); + + test("extracts filename from Windows path", () => { + expect(basenameFromPath("C:\\Users\\dev\\file.txt")).toBe("file.txt"); + }); + + test("extracts filename with .jsonl extension", () => { + expect(basenameFromPath("/path/to/session-abc.jsonl")).toBe( + "session-abc.jsonl", + ); + }); +}); diff --git a/cli/tests/plugin-list.test.ts b/cli/tests/plugin-list.test.ts new file mode 100644 index 0000000..5a46677 --- /dev/null +++ b/cli/tests/plugin-list.test.ts @@ -0,0 +1,233 @@ +import { describe, expect, test } from "bun:test"; +import { + formatPluginListJson, + formatPluginListText, +} from "../src/output/plugin-list.js"; +import { + formatPluginShowJson, + formatPluginShowText, +} from "../src/output/plugin-show.js"; +import type { PluginInfo } from "../src/schemas/plugin.js"; + +const makePlugin = (overrides?: Partial): PluginInfo => ({ + name: "test-plugin", + marketplace: "test-marketplace", + qualifiedName: "test-plugin@test-marketplace", + enabled: true, + version: "1.0.0", + installPath: "/tmp/test-plugin", + description: "A test plugin", + author: "Test Author", + installedAt: "2026-03-01T00:00:00.000Z", + hooks: [], + agents: [], + skills: [], + scripts: [], + ...overrides, +}); + +describe("formatPluginListText", () => { + test("includes plugin name in output", () => { + const output = formatPluginListText([makePlugin()], { noColor: true }); + expect(output).toContain("test-plugin"); + }); + + test("shows enabled for enabled plugins", () => { + const output = formatPluginListText([makePlugin({ enabled: true })], { + noColor: true, + }); + expect(output).toContain("enabled"); + }); + + test("shows disabled for disabled plugins", () => { + const output = formatPluginListText([makePlugin({ enabled: false })], { + noColor: true, + }); + expect(output).toContain("disabled"); + }); + + test("includes summary count", () => { + const plugins = [ + makePlugin({ name: "plugin-a" }), + makePlugin({ name: "plugin-b", enabled: false }), + ]; + const output = formatPluginListText(plugins, { noColor: true }); + expect(output).toContain("2 plugins"); + expect(output).toContain("1 enabled"); + expect(output).toContain("1 disabled"); + }); + + test("includes marketplace name", () => { + const output = formatPluginListText( + [makePlugin({ marketplace: "my-marketplace" })], + { noColor: true }, + ); + expect(output).toContain("my-marketplace"); + }); + + test("shows hook/agent/skill counts", () => { + const plugin = makePlugin({ + hooks: [ + { + event: "onSave", + commands: [{ command: "echo hi", timeout: 10 }], + }, + ], + agents: [ + { name: "explorer", description: "test", filename: "explorer.md" }, + ], + }); + const output = formatPluginListText([plugin], { noColor: true }); + expect(output).toContain("1"); + }); +}); + +describe("formatPluginListJson", () => { + test("returns valid JSON array", () => { + const output = formatPluginListJson([makePlugin()]); + const parsed = JSON.parse(output); + expect(Array.isArray(parsed)).toBe(true); + expect(parsed.length).toBe(1); + }); + + test("includes expected fields", () => { + const output = formatPluginListJson([makePlugin()]); + const parsed = JSON.parse(output); + expect(parsed[0].name).toBe("test-plugin"); + expect(parsed[0].marketplace).toBe("test-marketplace"); + expect(parsed[0].qualifiedName).toBe("test-plugin@test-marketplace"); + expect(parsed[0].enabled).toBe(true); + expect(parsed[0].version).toBe("1.0.0"); + expect(parsed[0].hookCount).toBe(0); + expect(parsed[0].agentCount).toBe(0); + expect(parsed[0].skillCount).toBe(0); + }); + + test("reflects disabled state", () => { + const output = formatPluginListJson([makePlugin({ enabled: false })]); + const parsed = JSON.parse(output); + expect(parsed[0].enabled).toBe(false); + }); +}); + +describe("formatPluginShowText", () => { + test("shows plugin name", () => { + const output = formatPluginShowText(makePlugin(), { noColor: true }); + expect(output).toContain("test-plugin"); + }); + + test("shows plugin description", () => { + const output = formatPluginShowText( + makePlugin({ description: "Does amazing things" }), + { noColor: true }, + ); + expect(output).toContain("Does amazing things"); + }); + + test("shows version", () => { + const output = formatPluginShowText(makePlugin({ version: "2.5.0" }), { + noColor: true, + }); + expect(output).toContain("2.5.0"); + }); + + test("shows enabled status", () => { + const output = formatPluginShowText(makePlugin({ enabled: true }), { + noColor: true, + }); + expect(output).toContain("enabled"); + }); + + test("shows disabled status", () => { + const output = formatPluginShowText(makePlugin({ enabled: false }), { + noColor: true, + }); + expect(output).toContain("disabled"); + }); + + test("shows install path", () => { + const output = formatPluginShowText( + makePlugin({ installPath: "/home/user/.claude/plugins/my-plugin" }), + { noColor: true }, + ); + expect(output).toContain("/home/user/.claude/plugins/my-plugin"); + }); + + test("shows hooks section when hooks exist", () => { + const plugin = makePlugin({ + hooks: [ + { + event: "PostToolUse", + matcher: "Write", + commands: [{ command: "/bin/lint.sh", timeout: 30 }], + }, + ], + }); + const output = formatPluginShowText(plugin, { noColor: true }); + expect(output).toContain("Hooks (1)"); + expect(output).toContain("PostToolUse"); + }); + + test("shows agents section when agents exist", () => { + const plugin = makePlugin({ + agents: [ + { + name: "explorer", + description: "Explore code", + filename: "explorer.md", + }, + ], + }); + const output = formatPluginShowText(plugin, { noColor: true }); + expect(output).toContain("Agents (1)"); + expect(output).toContain("explorer"); + }); + + test("shows skills section when skills exist", () => { + const plugin = makePlugin({ + skills: [ + { name: "ast-grep", description: "AST search", dirname: "ast-grep" }, + ], + }); + const output = formatPluginShowText(plugin, { noColor: true }); + expect(output).toContain("Skills (1)"); + expect(output).toContain("ast-grep"); + }); +}); + +describe("formatPluginShowJson", () => { + test("returns full plugin data as valid JSON", () => { + const plugin = makePlugin(); + const output = formatPluginShowJson(plugin); + const parsed = JSON.parse(output); + expect(parsed.name).toBe("test-plugin"); + expect(parsed.marketplace).toBe("test-marketplace"); + expect(parsed.qualifiedName).toBe("test-plugin@test-marketplace"); + expect(parsed.enabled).toBe(true); + expect(parsed.version).toBe("1.0.0"); + expect(parsed.description).toBe("A test plugin"); + expect(parsed.author).toBe("Test Author"); + expect(parsed.installPath).toBe("/tmp/test-plugin"); + expect(Array.isArray(parsed.hooks)).toBe(true); + expect(Array.isArray(parsed.agents)).toBe(true); + expect(Array.isArray(parsed.skills)).toBe(true); + expect(Array.isArray(parsed.scripts)).toBe(true); + }); + + test("includes nested hook data", () => { + const plugin = makePlugin({ + hooks: [ + { + event: "onSave", + matcher: "*.ts", + commands: [{ command: "bun lint", timeout: 15 }], + }, + ], + }); + const output = formatPluginShowJson(plugin); + const parsed = JSON.parse(output); + expect(parsed.hooks[0].event).toBe("onSave"); + expect(parsed.hooks[0].matcher).toBe("*.ts"); + expect(parsed.hooks[0].commands[0].command).toBe("bun lint"); + }); +}); diff --git a/cli/tests/plugin-loader.test.ts b/cli/tests/plugin-loader.test.ts new file mode 100644 index 0000000..84cedf7 --- /dev/null +++ b/cli/tests/plugin-loader.test.ts @@ -0,0 +1,101 @@ +import { describe, expect, test } from "bun:test"; +import { extractFrontMatter } from "../src/loaders/plugin-loader.js"; + +describe("extractFrontMatter", () => { + test("parses simple key-value YAML", () => { + const content = [ + "---", + "name: explorer", + "description: A codebase exploration agent", + "version: 1.0.0", + "---", + "", + "Body content here.", + ].join("\n"); + + const result = extractFrontMatter(content); + expect(result.name).toBe("explorer"); + expect(result.description).toBe("A codebase exploration agent"); + expect(result.version).toBe("1.0.0"); + }); + + test("handles multi-line >- block scalars", () => { + const content = [ + "---", + "name: explorer", + "description: >-", + " Fast, read-only codebase exploration agent that finds files", + " by patterns and searches code for keywords.", + "tools: Read, Glob, Grep, Bash", + "---", + ].join("\n"); + + const result = extractFrontMatter(content); + expect(result.name).toBe("explorer"); + expect(result.description).toBe( + "Fast, read-only codebase exploration agent that finds files by patterns and searches code for keywords.", + ); + expect(result.tools).toBe("Read, Glob, Grep, Bash"); + }); + + test("handles multi-line > block scalars", () => { + const content = [ + "---", + "description: >", + " A long description that spans", + " multiple lines here.", + "name: test", + "---", + ].join("\n"); + + const result = extractFrontMatter(content); + expect(result.description).toBe( + "A long description that spans multiple lines here.", + ); + expect(result.name).toBe("test"); + }); + + test("handles quoted values", () => { + const content = [ + "---", + 'name: "my-plugin"', + "description: 'A quoted description'", + "---", + ].join("\n"); + + const result = extractFrontMatter(content); + expect(result.name).toBe("my-plugin"); + expect(result.description).toBe("A quoted description"); + }); + + test("returns empty object for no front-matter", () => { + const content = "Just some regular content.\nNo front-matter here."; + const result = extractFrontMatter(content); + expect(result).toEqual({}); + }); + + test("handles empty front-matter block", () => { + const content = ["---", "---", "Body content."].join("\n"); + const result = extractFrontMatter(content); + expect(result).toEqual({}); + }); + + test("handles front-matter with only whitespace lines", () => { + const content = ["---", "name: test", "", "version: 2.0", "---"].join("\n"); + const result = extractFrontMatter(content); + expect(result.name).toBe("test"); + }); + + test("handles keys with hyphens", () => { + const content = [ + "---", + "skill-name: ast-grep-patterns", + "min-version: 0.2.0", + "---", + ].join("\n"); + + const result = extractFrontMatter(content); + expect(result["skill-name"]).toBe("ast-grep-patterns"); + expect(result["min-version"]).toBe("0.2.0"); + }); +}); diff --git a/cli/tests/review-output.test.ts b/cli/tests/review-output.test.ts new file mode 100644 index 0000000..b906ceb --- /dev/null +++ b/cli/tests/review-output.test.ts @@ -0,0 +1,265 @@ +import { describe, expect, test } from "bun:test"; +import { formatReviewJson, formatReviewText } from "../src/output/review.js"; +import type { + PassResult, + ReviewFindingWithPass, + ReviewResult, +} from "../src/schemas/review.js"; + +const makePassResult = (overrides?: Partial): PassResult => ({ + name: "correctness", + findings: [], + costUsd: 0.42, + durationMs: 12000, + sessionId: "sess-001", + ...overrides, +}); + +const makeFinding = ( + overrides?: Partial, +): ReviewFindingWithPass => ({ + file: "src/auth.ts", + line: 48, + severity: "high", + category: "correctness", + title: "Unchecked null access", + description: "user.email accessed without null check", + suggestion: "Add optional chaining: user?.email", + pass: 1, + passName: "correctness", + ...overrides, +}); + +const makeResult = (overrides?: Partial): ReviewResult => ({ + base: "staging", + head: "HEAD", + filesChanged: 5, + scope: "diff", + score: 7, + findings: [makeFinding()], + summary: "Review completed with 1 finding across 1 pass.", + passes: [makePassResult()], + totalCostUsd: 0.42, + ...overrides, +}); + +describe("formatReviewText", () => { + test("includes header with base and head", () => { + const output = formatReviewText(makeResult(), { noColor: true }); + expect(output).toContain("staging..HEAD"); + }); + + test("includes files changed count", () => { + const output = formatReviewText(makeResult(), { noColor: true }); + expect(output).toContain("5 files changed"); + }); + + test("shows full codebase header for full scope", () => { + const output = formatReviewText(makeResult({ scope: "full" }), { + noColor: true, + }); + expect(output).toContain("Full codebase review"); + }); + + test("includes pass summary lines", () => { + const output = formatReviewText(makeResult(), { noColor: true }); + expect(output).toContain("Pass 1: Correctness"); + expect(output).toContain("$0.42"); + expect(output).toContain("12s"); + }); + + test("shows pass error when present", () => { + const output = formatReviewText( + makeResult({ + passes: [makePassResult({ error: "budget exceeded" })], + }), + { noColor: true }, + ); + expect(output).toContain("budget exceeded"); + }); + + test("includes finding severity tag", () => { + const output = formatReviewText(makeResult(), { noColor: true }); + expect(output).toContain("[HIGH]"); + }); + + test("includes finding file and line", () => { + const output = formatReviewText(makeResult(), { noColor: true }); + expect(output).toContain("src/auth.ts:48"); + }); + + test("includes finding title and description", () => { + const output = formatReviewText(makeResult(), { noColor: true }); + expect(output).toContain("Unchecked null access"); + expect(output).toContain("user.email accessed without null check"); + }); + + test("includes finding suggestion", () => { + const output = formatReviewText(makeResult(), { noColor: true }); + expect(output).toContain("Add optional chaining: user?.email"); + }); + + test("includes pass name attribution", () => { + const output = formatReviewText(makeResult(), { noColor: true }); + expect(output).toContain("(correctness)"); + }); + + test("shows no issues message when findings empty", () => { + const output = formatReviewText(makeResult({ findings: [] }), { + noColor: true, + }); + expect(output).toContain("No issues found."); + }); + + test("includes score in footer", () => { + const output = formatReviewText(makeResult(), { noColor: true }); + expect(output).toContain("Score: 7/10"); + }); + + test("includes total cost in footer", () => { + const output = formatReviewText(makeResult(), { noColor: true }); + expect(output).toContain("Total: $0.42"); + }); + + test("includes severity counts in footer", () => { + const output = formatReviewText(makeResult(), { noColor: true }); + expect(output).toContain("1 high"); + }); + + test("handles finding without line number", () => { + const output = formatReviewText( + makeResult({ + findings: [makeFinding({ line: null })], + }), + { noColor: true }, + ); + expect(output).toContain("src/auth.ts"); + expect(output).not.toContain("src/auth.ts:"); + }); + + test("handles finding without suggestion", () => { + const output = formatReviewText( + makeResult({ + findings: [makeFinding({ suggestion: null })], + }), + { noColor: true }, + ); + expect(output).toContain("Unchecked null access"); + expect(output).not.toContain("\u2192"); + }); + + test("renders multiple findings with pass attribution", () => { + const output = formatReviewText( + makeResult({ + findings: [ + makeFinding({ passName: "correctness" }), + makeFinding({ + file: "src/api.ts", + line: 12, + severity: "critical", + title: "SQL injection", + pass: 2, + passName: "security", + }), + ], + }), + { noColor: true }, + ); + expect(output).toContain("(correctness)"); + expect(output).toContain("(security)"); + }); + + test("includes separators", () => { + const output = formatReviewText(makeResult(), { noColor: true }); + expect(output).toContain("\u2501".repeat(60)); + }); + + test("renders multiple passes in summary", () => { + const output = formatReviewText( + makeResult({ + passes: [ + makePassResult({ name: "correctness" }), + makePassResult({ + name: "security", + costUsd: 0.31, + durationMs: 9000, + }), + makePassResult({ + name: "quality", + costUsd: 0.28, + durationMs: 8000, + }), + ], + }), + { noColor: true }, + ); + expect(output).toContain("Pass 1: Correctness"); + expect(output).toContain("Pass 2: Security"); + expect(output).toContain("Pass 3: Quality"); + }); +}); + +describe("formatReviewJson", () => { + test("returns valid JSON", () => { + const output = formatReviewJson(makeResult()); + expect(() => JSON.parse(output)).not.toThrow(); + }); + + test("includes base and head", () => { + const parsed = JSON.parse(formatReviewJson(makeResult())); + expect(parsed.base).toBe("staging"); + expect(parsed.head).toBe("HEAD"); + }); + + test("includes scope", () => { + const parsed = JSON.parse(formatReviewJson(makeResult())); + expect(parsed.scope).toBe("diff"); + }); + + test("includes score", () => { + const parsed = JSON.parse(formatReviewJson(makeResult())); + expect(parsed.score).toBe(7); + }); + + test("includes filesChanged", () => { + const parsed = JSON.parse(formatReviewJson(makeResult())); + expect(parsed.filesChanged).toBe(5); + }); + + test("includes findings with pass info", () => { + const parsed = JSON.parse(formatReviewJson(makeResult())); + expect(parsed.findings).toHaveLength(1); + expect(parsed.findings[0].file).toBe("src/auth.ts"); + expect(parsed.findings[0].line).toBe(48); + expect(parsed.findings[0].severity).toBe("high"); + expect(parsed.findings[0].pass).toBe(1); + expect(parsed.findings[0].passName).toBe("correctness"); + }); + + test("includes cost breakdown", () => { + const parsed = JSON.parse(formatReviewJson(makeResult())); + expect(parsed.cost.total_usd).toBe(0.42); + expect(parsed.cost.passes).toHaveLength(1); + expect(parsed.cost.passes[0].name).toBe("correctness"); + expect(parsed.cost.passes[0].cost_usd).toBe(0.42); + expect(parsed.cost.passes[0].duration_ms).toBe(12000); + }); + + test("includes summary", () => { + const parsed = JSON.parse(formatReviewJson(makeResult())); + expect(parsed.summary).toContain("1 finding"); + }); + + test("includes pass error in cost breakdown", () => { + const result = makeResult({ + passes: [makePassResult({ error: "budget exceeded" })], + }); + const parsed = JSON.parse(formatReviewJson(result)); + expect(parsed.cost.passes[0].error).toBe("budget exceeded"); + }); + + test("omits error field when no error", () => { + const parsed = JSON.parse(formatReviewJson(makeResult())); + expect(parsed.cost.passes[0]).not.toHaveProperty("error"); + }); +}); diff --git a/cli/tests/review-runner.test.ts b/cli/tests/review-runner.test.ts new file mode 100644 index 0000000..d2f173b --- /dev/null +++ b/cli/tests/review-runner.test.ts @@ -0,0 +1,236 @@ +import { describe, expect, test } from "bun:test"; +import type { PassName, ReviewFinding } from "../src/schemas/review.js"; + +describe("review-runner pure functions", () => { + // Test mergeFindings logic + test("deduplicates findings by file:line:title", () => { + const { mergeFindings } = createTestHelpers(); + const passResults = [ + { + name: "correctness" as PassName, + findings: [ + makeFinding({ file: "a.ts", line: 1, title: "Bug" }), + makeFinding({ file: "b.ts", line: 2, title: "Error" }), + ], + costUsd: 0.4, + durationMs: 10000, + sessionId: "s1", + }, + { + name: "security" as PassName, + findings: [ + makeFinding({ file: "a.ts", line: 1, title: "Bug" }), // duplicate + makeFinding({ file: "c.ts", line: 3, title: "Vuln" }), + ], + costUsd: 0.3, + durationMs: 9000, + sessionId: "s1", + }, + ]; + + const merged = mergeFindings(passResults); + expect(merged).toHaveLength(3); // Bug, Error, Vuln — duplicate removed + }); + + test("sorts findings by severity", () => { + const { mergeFindings } = createTestHelpers(); + const passResults = [ + { + name: "correctness" as PassName, + findings: [ + makeFinding({ severity: "low", title: "Low" }), + makeFinding({ severity: "critical", title: "Crit" }), + makeFinding({ severity: "medium", title: "Med" }), + ], + costUsd: 0.4, + durationMs: 10000, + sessionId: "s1", + }, + ]; + + const merged = mergeFindings(passResults); + expect(merged[0].severity).toBe("critical"); + expect(merged[1].severity).toBe("medium"); + expect(merged[2].severity).toBe("low"); + }); + + test("assigns correct pass numbers and names", () => { + const { mergeFindings } = createTestHelpers(); + const passResults = [ + { + name: "correctness" as PassName, + findings: [makeFinding({ title: "A" })], + costUsd: 0.4, + durationMs: 10000, + sessionId: "s1", + }, + { + name: "security" as PassName, + findings: [makeFinding({ title: "B" })], + costUsd: 0.3, + durationMs: 9000, + sessionId: "s1", + }, + ]; + + const merged = mergeFindings(passResults); + const findingA = merged.find((f) => f.title === "A"); + const findingB = merged.find((f) => f.title === "B"); + expect(findingA?.pass).toBe(1); + expect(findingA?.passName).toBe("correctness"); + expect(findingB?.pass).toBe(2); + expect(findingB?.passName).toBe("security"); + }); + + // Test calculateScore logic + test("calculates score 10 for no findings", () => { + const { calculateScore } = createTestHelpers(); + expect(calculateScore([])).toBe(10); + }); + + test("deducts 3 points per critical finding", () => { + const { calculateScore } = createTestHelpers(); + const findings = [makeWithPass({ severity: "critical" })]; + expect(calculateScore(findings)).toBe(7); + }); + + test("deducts 2 points per high finding", () => { + const { calculateScore } = createTestHelpers(); + const findings = [makeWithPass({ severity: "high" })]; + expect(calculateScore(findings)).toBe(8); + }); + + test("deducts 1 point per medium finding", () => { + const { calculateScore } = createTestHelpers(); + const findings = [makeWithPass({ severity: "medium" })]; + expect(calculateScore(findings)).toBe(9); + }); + + test("deducts 0.5 points per low finding", () => { + const { calculateScore } = createTestHelpers(); + const findings = [makeWithPass({ severity: "low" })]; + // 10 - 0.5 = 9.5, rounds to 10 + expect(calculateScore(findings)).toBe(10); + }); + + test("info findings don't affect score", () => { + const { calculateScore } = createTestHelpers(); + const findings = [makeWithPass({ severity: "info" })]; + expect(calculateScore(findings)).toBe(10); + }); + + test("score clamps to minimum 1", () => { + const { calculateScore } = createTestHelpers(); + const findings = Array.from({ length: 10 }, () => + makeWithPass({ severity: "critical" }), + ); + expect(calculateScore(findings)).toBe(1); + }); + + test("score clamps to maximum 10", () => { + const { calculateScore } = createTestHelpers(); + expect(calculateScore([])).toBe(10); + }); + + test("mixed severities calculate correctly", () => { + const { calculateScore } = createTestHelpers(); + // 1 critical (3) + 1 high (2) + 2 medium (2) = 7 points → score 3 + const findings = [ + makeWithPass({ severity: "critical" }), + makeWithPass({ severity: "high" }), + makeWithPass({ severity: "medium" }), + makeWithPass({ severity: "medium" }), + ]; + expect(calculateScore(findings)).toBe(3); + }); +}); + +// --- Helpers --- + +function makeFinding(overrides?: Partial): ReviewFinding { + return { + file: "src/test.ts", + line: 1, + severity: "medium", + category: "correctness", + title: "Test finding", + description: "Test description", + suggestion: null, + ...overrides, + }; +} + +function makeWithPass(overrides?: Partial) { + return { + ...makeFinding(overrides), + pass: 1, + passName: "correctness" as PassName, + }; +} + +/** + * Re-implements the pure functions from review-runner for testing, + * since we can't easily import them (they depend on Bun.spawn internals). + */ +function createTestHelpers() { + const SEVERITY_SORT: Record = { + critical: 0, + high: 1, + medium: 2, + low: 3, + info: 4, + }; + const SCORE_WEIGHTS: Record = { + critical: 3, + high: 2, + medium: 1, + low: 0.5, + info: 0, + }; + + function mergeFindings( + passResults: { + name: PassName; + findings: ReviewFinding[]; + costUsd: number; + durationMs: number; + sessionId: string; + }[], + ) { + const seen = new Set(); + const merged: (ReviewFinding & { + pass: number; + passName: PassName; + })[] = []; + + for (const [i, pass] of passResults.entries()) { + for (const finding of pass.findings) { + const key = `${finding.file}:${finding.line}:${finding.title}`; + if (!seen.has(key)) { + seen.add(key); + merged.push({ + ...finding, + pass: i + 1, + passName: pass.name, + }); + } + } + } + + merged.sort( + (a, b) => + (SEVERITY_SORT[a.severity] ?? 5) - (SEVERITY_SORT[b.severity] ?? 5), + ); + return merged; + } + + function calculateScore(findings: { severity: string }[]): number { + const totalPoints = findings.reduce( + (sum, f) => sum + (SCORE_WEIGHTS[f.severity] ?? 0), + 0, + ); + return Math.max(1, Math.min(10, Math.round(10 - totalPoints))); + } + + return { mergeFindings, calculateScore }; +} diff --git a/cli/tests/settings-writer.test.ts b/cli/tests/settings-writer.test.ts new file mode 100644 index 0000000..d0f59cc --- /dev/null +++ b/cli/tests/settings-writer.test.ts @@ -0,0 +1,127 @@ +import { describe, expect, test } from "bun:test"; +import { mkdtempSync, rmSync } from "fs"; +import { tmpdir } from "os"; +import { resolve } from "path"; +import { loadSettings, writeSettings } from "../src/loaders/config-loader.js"; + +function makeTempDir(): string { + return mkdtempSync(resolve(tmpdir(), "codeforge-test-")); +} + +describe("loadSettings", () => { + test("reads and parses settings.json", async () => { + const dir = makeTempDir(); + const path = resolve(dir, "settings.json"); + await Bun.write( + path, + JSON.stringify({ enabledPlugins: { "test@mp": true } }), + ); + + const result = await loadSettings(path); + expect(result.enabledPlugins).toEqual({ "test@mp": true }); + + rmSync(dir, { recursive: true }); + }); + + test("returns empty object for missing file", async () => { + const result = await loadSettings( + "/tmp/nonexistent-codeforge-settings.json", + ); + expect(result).toEqual({}); + }); + + test("returns empty object for invalid JSON", async () => { + const dir = makeTempDir(); + const path = resolve(dir, "settings.json"); + await Bun.write(path, "not valid json {{{"); + + const result = await loadSettings(path); + expect(result).toEqual({}); + + rmSync(dir, { recursive: true }); + }); +}); + +describe("writeSettings", () => { + test("writes valid JSON with 2-space indentation and trailing newline", async () => { + const dir = makeTempDir(); + const path = resolve(dir, "settings.json"); + + await writeSettings(path, { enabledPlugins: { "test@mp": true } }); + + const raw = await Bun.file(path).text(); + expect(raw.endsWith("\n")).toBe(true); + + const parsed = JSON.parse(raw); + expect(parsed.enabledPlugins["test@mp"]).toBe(true); + + // Verify 2-space indentation + expect(raw).toContain(' "enabledPlugins"'); + + rmSync(dir, { recursive: true }); + }); + + test("creates parent directories if needed", async () => { + const dir = makeTempDir(); + const path = resolve(dir, "nested/deep/settings.json"); + + await writeSettings(path, { env: { FOO: "bar" } }); + + const result = await loadSettings(path); + expect(result.env?.FOO).toBe("bar"); + + rmSync(dir, { recursive: true }); + }); +}); + +describe("round-trip", () => { + test("write then read preserves data", async () => { + const dir = makeTempDir(); + const path = resolve(dir, "settings.json"); + + const original = { + enabledPlugins: { + "plugin-a@marketplace-1": true, + "plugin-b@marketplace-2": false, + }, + env: { NODE_ENV: "production" }, + permissions: { + allow: ["read", "write"], + deny: ["delete"], + }, + }; + + await writeSettings(path, original); + const result = await loadSettings(path); + + expect(result.enabledPlugins).toEqual(original.enabledPlugins); + expect(result.env).toEqual(original.env); + expect(result.permissions).toEqual(original.permissions); + + rmSync(dir, { recursive: true }); + }); + + test("preserves other settings when updating enabledPlugins", async () => { + const dir = makeTempDir(); + const path = resolve(dir, "settings.json"); + + await writeSettings(path, { + env: { KEY: "value" }, + enabledPlugins: { "old@mp": true }, + }); + + const settings = await loadSettings(path); + if (!settings.enabledPlugins) { + settings.enabledPlugins = {}; + } + settings.enabledPlugins["new@mp"] = false; + await writeSettings(path, settings); + + const result = await loadSettings(path); + expect(result.env?.KEY).toBe("value"); + expect(result.enabledPlugins?.["old@mp"]).toBe(true); + expect(result.enabledPlugins?.["new@mp"]).toBe(false); + + rmSync(dir, { recursive: true }); + }); +}); From 9e40e8348f9f7a68fa2c09a02fa04c2d13798e56 Mon Sep 17 00:00:00 2001 From: AnExiledDev Date: Thu, 5 Mar 2026 15:43:24 +0000 Subject: [PATCH 2/3] Improve CLI core: register new commands, fix search and packaging - Register plugin, config, and review subcommands in index.ts - Remove fast-glob dependency (use native Bun glob) - Fix build output to single file (--outfile dist/codeforge.js) - Add npm publish metadata (keywords, files, prepublishOnly) - Fix search filter edge cases with new tests - Fix plan-loader path resolution - Update session list/show formatting --- cli/bun.lock | 37 ------------------ cli/package.json | 23 ++++++++--- cli/src/commands/session/list.ts | 4 +- cli/src/commands/session/show.ts | 7 ++-- cli/src/index.ts | 31 +++++++++++++++ cli/src/loaders/plan-loader.ts | 2 +- cli/src/search/engine.ts | 4 +- cli/src/search/filter.ts | 7 ++-- cli/src/utils/glob.ts | 11 +++--- cli/tests/filter.test.ts | 67 ++++++++++++++++++++++++++++++++ 10 files changed, 133 insertions(+), 60 deletions(-) diff --git a/cli/bun.lock b/cli/bun.lock index 8b83519..7884482 100644 --- a/cli/bun.lock +++ b/cli/bun.lock @@ -7,7 +7,6 @@ "dependencies": { "chalk": "^5.4.0", "commander": "^13.0.0", - "fast-glob": "^3.3.0", }, "devDependencies": { "@types/bun": "^1.3.10", @@ -17,52 +16,16 @@ }, }, "packages": { - "@nodelib/fs.scandir": ["@nodelib/fs.scandir@2.1.5", "", { "dependencies": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" } }, "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g=="], - - "@nodelib/fs.stat": ["@nodelib/fs.stat@2.0.5", "", {}, "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A=="], - - "@nodelib/fs.walk": ["@nodelib/fs.walk@1.2.8", "", { "dependencies": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" } }, "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg=="], - "@types/bun": ["@types/bun@1.3.10", "", { "dependencies": { "bun-types": "1.3.10" } }, "sha512-0+rlrUrOrTSskibryHbvQkDOWRJwJZqZlxrUs1u4oOoTln8+WIXBPmAuCF35SWB2z4Zl3E84Nl/D0P7803nigQ=="], "@types/node": ["@types/node@22.19.13", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-akNQMv0wW5uyRpD2v2IEyRSZiR+BeGuoB6L310EgGObO44HSMNT8z1xzio28V8qOrgYaopIDNA18YgdXd+qTiw=="], - "braces": ["braces@3.0.3", "", { "dependencies": { "fill-range": "^7.1.1" } }, "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA=="], - "bun-types": ["bun-types@1.3.10", "", { "dependencies": { "@types/node": "*" } }, "sha512-tcpfCCl6XWo6nCVnpcVrxQ+9AYN1iqMIzgrSKYMB/fjLtV2eyAVEg7AxQJuCq/26R6HpKWykQXuSOq/21RYcbg=="], "chalk": ["chalk@5.6.2", "", {}, "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA=="], "commander": ["commander@13.1.0", "", {}, "sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw=="], - "fast-glob": ["fast-glob@3.3.3", "", { "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", "glob-parent": "^5.1.2", "merge2": "^1.3.0", "micromatch": "^4.0.8" } }, "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg=="], - - "fastq": ["fastq@1.20.1", "", { "dependencies": { "reusify": "^1.0.4" } }, "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw=="], - - "fill-range": ["fill-range@7.1.1", "", { "dependencies": { "to-regex-range": "^5.0.1" } }, "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg=="], - - "glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="], - - "is-extglob": ["is-extglob@2.1.1", "", {}, "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ=="], - - "is-glob": ["is-glob@4.0.3", "", { "dependencies": { "is-extglob": "^2.1.1" } }, "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg=="], - - "is-number": ["is-number@7.0.0", "", {}, "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng=="], - - "merge2": ["merge2@1.4.1", "", {}, "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg=="], - - "micromatch": ["micromatch@4.0.8", "", { "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" } }, "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA=="], - - "picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="], - - "queue-microtask": ["queue-microtask@1.2.3", "", {}, "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A=="], - - "reusify": ["reusify@1.1.0", "", {}, "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw=="], - - "run-parallel": ["run-parallel@1.2.0", "", { "dependencies": { "queue-microtask": "^1.2.2" } }, "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA=="], - - "to-regex-range": ["to-regex-range@5.0.1", "", { "dependencies": { "is-number": "^7.0.0" } }, "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ=="], - "typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="], "undici-types": ["undici-types@6.21.0", "", {}, "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ=="], diff --git a/cli/package.json b/cli/package.json index ecd65cc..266b0b3 100644 --- a/cli/package.json +++ b/cli/package.json @@ -1,20 +1,28 @@ { - "name": "codeforge-cli", + "name": "codeforge-dev-cli", "version": "0.1.0", "description": "CLI for CodeForge development workflows", + "keywords": [ + "codeforge", + "cli", + "code-review", + "developer-tools", + "devcontainer", + "claude" + ], "type": "module", "bin": { "codeforge": "./dist/codeforge.js" }, "scripts": { - "build": "bun build src/index.ts --outdir dist --target bun", + "build": "bun build src/index.ts --outfile dist/codeforge.js --target bun", "dev": "bun run src/index.ts", - "test": "bun test" + "test": "bun test", + "prepublishOnly": "bun run build && bun test" }, "dependencies": { "commander": "^13.0.0", - "chalk": "^5.4.0", - "fast-glob": "^3.3.0" + "chalk": "^5.4.0" }, "devDependencies": { "@types/bun": "^1.3.10", @@ -32,6 +40,11 @@ "directory": "cli" }, "homepage": "https://github.com/AnExiledDev/CodeForge/tree/main/cli#readme", + "files": [ + "dist/", + "prompts/", + "README.md" + ], "bugs": { "url": "https://github.com/AnExiledDev/CodeForge/issues" } diff --git a/cli/src/commands/session/list.ts b/cli/src/commands/session/list.ts index 4e78c1a..1bdd8b1 100644 --- a/cli/src/commands/session/list.ts +++ b/cli/src/commands/session/list.ts @@ -1,5 +1,6 @@ import chalk from "chalk"; import type { Command } from "commander"; +import { basename } from "path"; import { loadHistory } from "../../loaders/history-loader.js"; import { extractSessionMeta } from "../../loaders/session-meta.js"; import { @@ -57,8 +58,7 @@ export function registerListCommand(parent: Command): void { const filesBySessionId = new Map(); for (const filePath of sessionFiles) { // Extract session ID from filename (basename without extension) - const parts = filePath.split("/"); - const filename = parts[parts.length - 1]; + const filename = basename(filePath); const id = filename.replace(/\.jsonl$/, ""); filesBySessionId.set(id, filePath); } diff --git a/cli/src/commands/session/show.ts b/cli/src/commands/session/show.ts index 35f07bb..e77283f 100644 --- a/cli/src/commands/session/show.ts +++ b/cli/src/commands/session/show.ts @@ -1,7 +1,7 @@ import chalk from "chalk"; import type { Command } from "commander"; import { homedir } from "os"; -import { resolve } from "path"; +import { basename, resolve } from "path"; import { extractSessionMeta } from "../../loaders/session-meta.js"; import { loadTasks } from "../../loaders/task-loader.js"; import { @@ -44,8 +44,7 @@ export function registerShowCommand(parent: Command): void { // Try UUID prefix match first for (const filePath of sessionFiles) { - const parts = filePath.split("/"); - const filename = parts[parts.length - 1]; + const filename = basename(filePath); const id = filename.replace(/\.jsonl$/, ""); if (id.startsWith(identifier)) { targetFile = filePath; @@ -86,7 +85,7 @@ export function registerShowCommand(parent: Command): void { if (await planFile.exists()) { const content = await planFile.text(); let title = meta.slug; - for (const line of content.split("\n")) { + for (const line of content.split(/\r?\n/)) { if (line.startsWith("# ")) { title = line.slice(2).trim(); break; diff --git a/cli/src/index.ts b/cli/src/index.ts index bd77f5b..af0f15b 100644 --- a/cli/src/index.ts +++ b/cli/src/index.ts @@ -1,7 +1,17 @@ #!/usr/bin/env bun import { Command } from "commander"; +import { registerConfigApplyCommand } from "./commands/config/apply.js"; +import { registerConfigShowCommand } from "./commands/config/show.js"; import { registerPlanSearchCommand } from "./commands/plan/search.js"; +import { registerPluginAgentsCommand } from "./commands/plugin/agents.js"; +import { registerPluginDisableCommand } from "./commands/plugin/disable.js"; +import { registerPluginEnableCommand } from "./commands/plugin/enable.js"; +import { registerPluginHooksCommand } from "./commands/plugin/hooks.js"; +import { registerPluginListCommand } from "./commands/plugin/list.js"; +import { registerPluginShowCommand } from "./commands/plugin/show.js"; +import { registerPluginSkillsCommand } from "./commands/plugin/skills.js"; +import { registerReviewCommand } from "./commands/review/review.js"; import { registerListCommand } from "./commands/session/list.js"; import { registerSearchCommand } from "./commands/session/search.js"; import { registerShowCommand } from "./commands/session/show.js"; @@ -30,4 +40,25 @@ const plan = program.command("plan").description("Search and manage plans"); registerPlanSearchCommand(plan); +const plugin = program + .command("plugin") + .description("Manage Claude Code plugins"); + +registerPluginListCommand(plugin); +registerPluginShowCommand(plugin); +registerPluginEnableCommand(plugin); +registerPluginDisableCommand(plugin); +registerPluginHooksCommand(plugin); +registerPluginAgentsCommand(plugin); +registerPluginSkillsCommand(plugin); + +const config = program + .command("config") + .description("Manage Claude Code configuration"); + +registerConfigShowCommand(config); +registerConfigApplyCommand(config); + +registerReviewCommand(program); + program.parse(); diff --git a/cli/src/loaders/plan-loader.ts b/cli/src/loaders/plan-loader.ts index c3374c4..07d99f7 100644 --- a/cli/src/loaders/plan-loader.ts +++ b/cli/src/loaders/plan-loader.ts @@ -18,7 +18,7 @@ export async function loadPlans(): Promise { // Extract title from first heading line let title = slug; - for (const line of content.split("\n")) { + for (const line of content.split(/\r?\n/)) { if (line.startsWith("# ")) { title = line.slice(2).trim(); break; diff --git a/cli/src/search/engine.ts b/cli/src/search/engine.ts index a2a64a8..17518e9 100644 --- a/cli/src/search/engine.ts +++ b/cli/src/search/engine.ts @@ -72,14 +72,14 @@ export async function* readLines(filePath: string): AsyncGenerator { // Keep the last partial line in the buffer buffer = lines.pop() || ""; for (const line of lines) { - if (line.trim()) yield line; + if (line.trim()) yield line.replace(/\r$/, ""); } } // Flush remaining buffer const remaining = buffer + decoder.decode(); if (remaining.trim()) { - yield remaining; + yield remaining.replace(/\r$/, ""); } } diff --git a/cli/src/search/filter.ts b/cli/src/search/filter.ts index fd4a610..4eed50a 100644 --- a/cli/src/search/filter.ts +++ b/cli/src/search/filter.ts @@ -1,4 +1,5 @@ import type { SearchableMessage } from "../schemas/session-message.js"; +import { normalizePath } from "../utils/platform.js"; export interface FilterOptions { role?: string; @@ -19,11 +20,11 @@ export function createFilter( } if (options.project) { - // Normalize: strip trailing slash for consistent comparison - const project = options.project.replace(/\/+$/, ""); + // Normalize: convert backslashes and strip trailing separators + const project = normalizePath(options.project).replace(/[/\\]+$/, ""); filters.push((msg) => { if (!msg.cwd) return false; - const dir = msg.cwd.replace(/\/+$/, ""); + const dir = normalizePath(msg.cwd).replace(/[/\\]+$/, ""); return dir === project || dir.startsWith(project + "/"); }); } diff --git a/cli/src/utils/glob.ts b/cli/src/utils/glob.ts index 62698b6..28cb367 100644 --- a/cli/src/utils/glob.ts +++ b/cli/src/utils/glob.ts @@ -1,6 +1,5 @@ import { statSync } from "fs"; -import { homedir } from "os"; -import { resolve } from "path"; +import { getHome, resolveNormalized } from "./platform.js"; const DEFAULT_PATTERN = "**/*.jsonl"; const DEFAULT_BASE_DIR = ".claude/projects"; @@ -14,10 +13,10 @@ export async function discoverSessionFiles( if (pattern) { // Expand ~ to home directory const expanded = pattern.startsWith("~") - ? pattern.replace(/^~/, process.env.HOME || homedir()) + ? pattern.replace(/^~/, getHome()) : pattern; // Resolve to absolute path - globPattern = resolve(expanded); + globPattern = resolveNormalized(expanded); // Split into base directory and glob portion // Find the first segment containing a glob character @@ -37,8 +36,8 @@ export async function discoverSessionFiles( scanPath = baseparts.join("/") || "/"; globPattern = globParts.join("/") || "**/*.jsonl"; } else { - const home = process.env.HOME || homedir(); - scanPath = resolve(home, DEFAULT_BASE_DIR); + const home = getHome(); + scanPath = resolveNormalized(home, DEFAULT_BASE_DIR); globPattern = DEFAULT_PATTERN; } diff --git a/cli/tests/filter.test.ts b/cli/tests/filter.test.ts index 26dbb1f..b25b59c 100644 --- a/cli/tests/filter.test.ts +++ b/cli/tests/filter.test.ts @@ -73,6 +73,73 @@ describe("createFilter", () => { }); }); + describe("project filter — Windows paths", () => { + test("matches when cwd uses backslashes", () => { + const filter = createFilter({ + project: "/workspaces/projects/CodeForge", + }); + expect( + filter( + makeMessage({ + cwd: "\\workspaces\\projects\\CodeForge", + }), + ), + ).toBe(true); + }); + + test("matches when project uses backslashes", () => { + const filter = createFilter({ + project: "C:\\Users\\dev\\project", + }); + expect( + filter( + makeMessage({ + cwd: "C:/Users/dev/project/subdir", + }), + ), + ).toBe(true); + }); + + test("matches when both use backslashes", () => { + const filter = createFilter({ + project: "C:\\Users\\dev\\project", + }); + expect( + filter( + makeMessage({ + cwd: "C:\\Users\\dev\\project", + }), + ), + ).toBe(true); + }); + + test("rejects non-matching Windows paths", () => { + const filter = createFilter({ + project: "C:\\Users\\dev\\project", + }); + expect( + filter( + makeMessage({ + cwd: "C:\\Users\\dev\\other", + }), + ), + ).toBe(false); + }); + + test("handles trailing backslash in project", () => { + const filter = createFilter({ + project: "C:\\Users\\dev\\project\\", + }); + expect( + filter( + makeMessage({ + cwd: "C:\\Users\\dev\\project", + }), + ), + ).toBe(true); + }); + }); + describe("time filter (after)", () => { test("includes messages at or after the date", () => { const filter = createFilter({ after: new Date("2026-03-01T10:00:00Z") }); From 00c5808f61f0e5be27d1cdb854d6c67520e9ad7d Mon Sep 17 00:00:00 2001 From: AnExiledDev Date: Thu, 5 Mar 2026 15:43:32 +0000 Subject: [PATCH 3/3] Add CLI release workflow, devcontainer feature, and CI improvements - release-cli.yml: tag-triggered (cli-v*) npm publish + GitHub release - codeforge-cli devcontainer feature: installs CLI globally via npm - Register codeforge-cli feature in devcontainer.json - Remove dead codeforge alias, add codeforge to cc-tools list - CI: cross-platform test matrix (ubuntu, windows, macos) - Fix docs changelog sync paths for monorepo structure --- .github/workflows/ci.yml | 9 ++- .github/workflows/release-cli.yml | 71 +++++++++++++++++++ container/.devcontainer/devcontainer.json | 2 + .../features/codeforge-cli/README.md | 20 ++++++ .../codeforge-cli/devcontainer-feature.json | 18 +++++ .../features/codeforge-cli/install.sh | 41 +++++++++++ .../.devcontainer/scripts/setup-aliases.sh | 3 +- docs/scripts/sync-changelog.mjs | 21 +++--- 8 files changed, 171 insertions(+), 14 deletions(-) create mode 100644 .github/workflows/release-cli.yml create mode 100644 container/.devcontainer/features/codeforge-cli/README.md create mode 100644 container/.devcontainer/features/codeforge-cli/devcontainer-feature.json create mode 100755 container/.devcontainer/features/codeforge-cli/install.sh diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 29aa5f1..d682e8d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,10 +3,10 @@ name: CI on: push: branches: [main, staging] - paths: ['container/**'] + paths: ['container/**', 'cli/**'] pull_request: branches: [main, staging] - paths: ['container/**'] + paths: ['container/**', 'cli/**'] jobs: test: @@ -41,7 +41,10 @@ jobs: working-directory: container test-cli: - runs-on: ubuntu-latest + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] steps: - uses: actions/checkout@v6 - uses: oven-sh/setup-bun@v2 diff --git a/.github/workflows/release-cli.yml b/.github/workflows/release-cli.yml new file mode 100644 index 0000000..0d36c09 --- /dev/null +++ b/.github/workflows/release-cli.yml @@ -0,0 +1,71 @@ +name: Release CLI + +on: + push: + tags: ['cli-v*'] + +jobs: + validate: + runs-on: ubuntu-latest + outputs: + version: ${{ steps.extract.outputs.version }} + steps: + - uses: actions/checkout@v6 + - id: extract + name: Extract and validate version + run: | + TAG="${GITHUB_REF#refs/tags/cli-v}" + PKG=$(node -p "require('./cli/package.json').version") + echo "version=$TAG" >> "$GITHUB_OUTPUT" + if [ "$TAG" != "$PKG" ]; then + echo "::error::Tag cli-v${TAG} does not match cli/package.json version ${PKG}" + exit 1 + fi + + publish-and-release: + needs: validate + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - uses: actions/checkout@v6 + + - uses: oven-sh/setup-bun@v2 + + - name: Install dependencies + run: bun install + working-directory: cli + + - name: Run tests + run: bun test + working-directory: cli + + - name: Build + run: bun run build + working-directory: cli + + - uses: actions/setup-node@v6 + with: + node-version: 18 + registry-url: https://registry.npmjs.org + + - name: Publish to npm + run: npm publish + working-directory: cli + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + + - name: Extract changelog section + id: changelog + run: | + VERSION="${{ needs.validate.outputs.version }}" + NOTES=$(sed -n "/^## v${VERSION}/,/^## v/{ /^## v${VERSION}/d; /^## v/d; p; }" cli/CHANGELOG.md) + [ -z "$NOTES" ] && NOTES="CLI Release v${VERSION}" + echo "$NOTES" > /tmp/release-notes.md + + - name: Create GitHub Release + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + VERSION="cli-v${{ needs.validate.outputs.version }}" + gh release create "$VERSION" --title "$VERSION" --notes-file /tmp/release-notes.md diff --git a/container/.devcontainer/devcontainer.json b/container/.devcontainer/devcontainer.json index 6964f80..f889ba7 100755 --- a/container/.devcontainer/devcontainer.json +++ b/container/.devcontainer/devcontainer.json @@ -58,6 +58,7 @@ "ghcr.io/devcontainers/features/docker-outside-of-docker", "ghcr.io/devcontainers-extra/features/uv", "ghcr.io/rails/devcontainer/features/bun", + "./features/codeforge-cli", "./features/claude-code-native", "./features/tmux", "./features/agent-browser", @@ -149,6 +150,7 @@ "./features/shellcheck": { "version": "none" }, "./features/hadolint": { "version": "none" }, "./features/biome": {}, + "./features/codeforge-cli": {}, "./features/notify-hook": { "enableBell": true, "enableOsc": true diff --git a/container/.devcontainer/features/codeforge-cli/README.md b/container/.devcontainer/features/codeforge-cli/README.md new file mode 100644 index 0000000..40c73f4 --- /dev/null +++ b/container/.devcontainer/features/codeforge-cli/README.md @@ -0,0 +1,20 @@ +# CodeForge CLI (codeforge-cli) + +Installs the [CodeForge CLI](https://github.com/AnExiledDev/CodeForge/tree/main/cli) globally via npm. Provides the `codeforge` command for code review, session search, plugin management, and configuration. + +Requires Node.js (for npm install) and Bun (runtime for the CLI binary). + +## Options + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `version` | string | `latest` | Version to install. Use a specific semver or `'none'` to skip. | + +## Usage + +```jsonc +// devcontainer.json +"features": { + "./features/codeforge-cli": {} +} +``` diff --git a/container/.devcontainer/features/codeforge-cli/devcontainer-feature.json b/container/.devcontainer/features/codeforge-cli/devcontainer-feature.json new file mode 100644 index 0000000..024b610 --- /dev/null +++ b/container/.devcontainer/features/codeforge-cli/devcontainer-feature.json @@ -0,0 +1,18 @@ +{ + "id": "codeforge-cli", + "version": "1.0.0", + "name": "CodeForge CLI", + "description": "Installs the CodeForge CLI for code review, session search, plugin management, and configuration", + "documentationURL": "https://github.com/AnExiledDev/CodeForge/tree/main/cli", + "options": { + "version": { + "type": "string", + "description": "Version to install ('latest' or a specific semver). Use 'none' to skip.", + "default": "latest" + } + }, + "installsAfter": [ + "ghcr.io/devcontainers/features/node", + "ghcr.io/rails/devcontainer/features/bun" + ] +} diff --git a/container/.devcontainer/features/codeforge-cli/install.sh b/container/.devcontainer/features/codeforge-cli/install.sh new file mode 100755 index 0000000..cfd9bf1 --- /dev/null +++ b/container/.devcontainer/features/codeforge-cli/install.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-3.0-only +# Copyright (c) 2026 Marcus Krueger +set -euo pipefail + +VERSION="${VERSION:-latest}" + +# Skip installation if version is "none" +if [ "${VERSION}" = "none" ]; then + echo "[codeforge-cli] Skipping installation (version=none)" + exit 0 +fi + +echo "[codeforge-cli] Starting installation..." +echo "[codeforge-cli] Version: ${VERSION}" + +# Source NVM if available +if [ -f /usr/local/share/nvm/nvm.sh ]; then + set +u + source /usr/local/share/nvm/nvm.sh + set -u +fi + +# Validate npm is available +if ! command -v npm &>/dev/null; then + echo "[codeforge-cli] ERROR: npm not found. Ensure Node.js is installed." >&2 + exit 1 +fi + +# Install CodeForge CLI globally via npm +if [ "${VERSION}" = "latest" ]; then + npm install -g codeforge-dev-cli +else + npm install -g "codeforge-dev-cli@${VERSION}" +fi +npm cache clean --force 2>/dev/null || true + +# Verify installation +codeforge --version + +echo "[codeforge-cli] Installation complete" diff --git a/container/.devcontainer/scripts/setup-aliases.sh b/container/.devcontainer/scripts/setup-aliases.sh index 91d8ca8..c20fac8 100755 --- a/container/.devcontainer/scripts/setup-aliases.sh +++ b/container/.devcontainer/scripts/setup-aliases.sh @@ -103,7 +103,7 @@ cc-tools() { echo "━━━━━━━━━━━━━━━━━━━━━━━━" printf " %-20s %s\n" "COMMAND" "STATUS" echo " ────────────────────────────────────" - for cmd in claude cc ccw ccraw cc-orc ccusage ccburn claude-monitor \\ + for cmd in claude cc ccw ccraw cc-orc codeforge ccusage ccburn claude-monitor \\ ccms ct cargo ruff biome dprint shfmt shellcheck hadolint \\ ast-grep tree-sitter pyright typescript-language-server \\ agent-browser gh docker git jq tmux bun go infocmp; do @@ -117,7 +117,6 @@ cc-tools() { } alias check-setup='bash ${DEVCONTAINER_SCRIPTS}/check-setup.sh' -alias codeforge='node \${WORKSPACE_ROOT}/setup.js' ${BLOCK_END} BLOCK_EOF diff --git a/docs/scripts/sync-changelog.mjs b/docs/scripts/sync-changelog.mjs index c97d7aa..ae2ba00 100644 --- a/docs/scripts/sync-changelog.mjs +++ b/docs/scripts/sync-changelog.mjs @@ -6,22 +6,25 @@ * source of truth — never edit the generated docs page directly. */ -import { readFileSync, writeFileSync } from 'node:fs'; -import { resolve, dirname } from 'node:path'; -import { fileURLToPath } from 'node:url'; +import { readFileSync, writeFileSync } from "node:fs"; +import { dirname, resolve } from "node:path"; +import { fileURLToPath } from "node:url"; const __dirname = dirname(fileURLToPath(import.meta.url)); -const source = resolve(__dirname, '../../container/.devcontainer/CHANGELOG.md'); -const dest = resolve(__dirname, '../src/content/docs/reference/changelog.md'); +const source = resolve(__dirname, "../../container/.devcontainer/CHANGELOG.md"); +const dest = resolve(__dirname, "../src/content/docs/reference/changelog.md"); -const content = readFileSync(source, 'utf-8'); +const content = readFileSync(source, "utf-8"); // Strip the H1 heading — Starlight generates one from the frontmatter title -const body = content.replace(/^# .+\n+/, ''); +const body = content.replace(/^# .+\n+/, ""); // Convert [vX.Y.Z] link-style headings to plain ## vX.Y.Z headings // Source uses ## [v1.14.0] - 2026-02-24, docs need ## v1.14.0 -const cleaned = body.replace(/^(##) \[v([\d.]+)\] - (\d{4}-\d{2}-\d{2})/gm, '$1 v$2\n\n**Release date:** $3'); +const cleaned = body.replace( + /^(##) \[v([\d.]+)\] - (\d{4}-\d{2}-\d{2})/gm, + "$1 v$2\n\n**Release date:** $3", +); const frontmatter = `--- title: Changelog @@ -75,4 +78,4 @@ For minor and patch updates, you can usually just rebuild the container. Check t `; writeFileSync(dest, frontmatter + cleaned); -console.log('✓ Changelog synced from container/.devcontainer/CHANGELOG.md'); +console.log("✓ Changelog synced from container/.devcontainer/CHANGELOG.md");