diff --git a/.github/workflows/pull-request-reviewer.yml b/.github/workflows/pull-request-reviewer.yml index ce137b68..83952934 100644 --- a/.github/workflows/pull-request-reviewer.yml +++ b/.github/workflows/pull-request-reviewer.yml @@ -9,7 +9,15 @@ on: jobs: review: name: Cross-check Pull Request - if: ${{ false }} + if: >- + vars.ENABLE_PR_REVIEWER == 'true' && + ( + github.event_name == 'workflow_dispatch' || + ( + github.event.pull_request.draft == false && + github.event.pull_request.state == 'open' + ) + ) runs-on: ubuntu-latest permissions: contents: read diff --git a/docs/plans/2026-03-11-openclaw-dry-run.ears.md b/docs/plans/2026-03-11-openclaw-dry-run.ears.md new file mode 100644 index 00000000..8cf4137b --- /dev/null +++ b/docs/plans/2026-03-11-openclaw-dry-run.ears.md @@ -0,0 +1,62 @@ +# OpenClaw Configure Dry Run — EARS-Improved Plan + +> **Original plan:** docs/plans/2026-03-11-openclaw-dry-run.md +> **Improved with:** EARS requirements syntax +> **For Claude:** REQUIRED SUB-SKILL: Use executing-plans to implement this plan task-by-task. + +**Goal:** Verify that `poe-code --dry-run configure openclaw --yes` completes without mutating the filesystem or invoking OpenClaw CLI commands. +**Architecture:** The `configure` command builds a payload via `buildConfigurePayload` (which resolves models, defaults, etc.) then conditionally skips mutation when `--dry-run` is set. The e2e test validates this end-to-end by running the real CLI process and asserting on exit code and absence of side effects. +**System(s) under change:** `e2e/openclaw.test.ts` + +--- + +## Preconditions + +- **P1:** `buildConfigurePayload` executes fully during dry run (resolves models, computes defaults). This is existing, tested behavior and is NOT changed by this plan. +- **P2:** `configure()` returns early when `--dry-run` is set, skipping `runOpenClawCommand` and file writes. This is existing behavior introduced in commit `97c11d6`. +- **P3:** `buildConfigurePayload` can fail if network/model resolution fails; the e2e test inherits this environmental dependency. + +--- + +## Requirements + +R1: **When** `poe-code --dry-run configure openclaw --yes` is executed, **then** the process SHALL exit with code 0. + +R2: **When** `poe-code --dry-run configure openclaw --yes` is executed, **then** the OpenClaw configuration file SHALL not be modified. + +R3: **An** e2e test SHALL exist that validates R1 and R2 by spawning the CLI process. + +--- + +## Implementation + +S1 (R1, R2, R3): Add dry-run e2e test case to the OpenClaw configure test suite. + - Files: `e2e/openclaw.test.ts` + - Action: Add a test case that spawns `poe-code --dry-run configure openclaw --yes`, asserts exit code 0, and asserts the config file remains unchanged. + +CHECKPOINT: Run `npm run e2e:verbose` — all tests pass including the new dry-run test. + +--- + +## Verification + +V1 (R1, R2, R3): Run `npm run e2e:verbose` and confirm the new dry-run test passes. + Expected: Test spawns CLI with `--dry-run`, process exits 0, OpenClaw config file is not modified. + +--- + +## Traceability Matrix + +| Req | Implementation Steps | Verification | +|-----|---------------------|-------------| +| R1 | S1 | V1 | +| R2 | S1 | V1 | +| R3 | S1 | V1 | + +--- + +## Known Gaps / Future Work + +- **`--dry-run` without `--yes` (interactive mode):** Not tested by this plan. Interactive dry-run would require prompt simulation in e2e tests; deferred until interactive testing infrastructure exists. +- **`unconfigure --dry-run`:** Not covered. Out of scope for this plan. +- **Environmental flakiness:** `buildConfigurePayload` depends on network/model resolution (P3). If this becomes flaky, the e2e test may need environment stubbing in the future. diff --git a/docs/plans/2026-03-11-openclaw-dry-run.md b/docs/plans/2026-03-11-openclaw-dry-run.md new file mode 100644 index 00000000..06f0ba24 --- /dev/null +++ b/docs/plans/2026-03-11-openclaw-dry-run.md @@ -0,0 +1,223 @@ +# OpenClaw Dry Run Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use executing-plans to implement this plan task-by-task. + +**Goal:** Show the user what `poe-code --dry-run configure openclaw --yes` would change, matching the behavior of other providers that show diffs. + +**Architecture:** Other providers use the mutation framework's `DryRunRecorder` + `createDryRunFileSystem` to intercept file writes and show diffs. OpenClaw can't use that approach — it doesn't write files directly, it calls the `openclaw` CLI. Instead, we log the `openclaw` commands that *would* run (with the API key redacted) and show a summary of the provider config that would be set. The `buildConfigurePayload` already runs during dry run (it fetches models, resolves the API key, builds the config). We just need to surface what `configure()` would do with it. + +**Tech Stack:** TypeScript, existing `ScopedLogger`, existing `context.command.flushDryRun()` + +--- + +### Task 1: Add dry run logging to OpenClaw configure + +**Files:** +- Modify: `src/providers/openclaw.ts:158-162` +- Test: `src/providers/openclaw.test.ts` (modify existing "skips OpenClaw CLI mutations during configure dry run" test) + +**Step 1: Update the existing dry run test to verify log output** + +In `src/providers/openclaw.test.ts`, the test "skips OpenClaw CLI mutations during configure dry run" currently only checks `commandRunner` was not called. Enhance it to verify that `flushDryRun` is called with meaningful content. + +The test at line 636 already has: +```typescript +it("skips OpenClaw CLI mutations during configure dry run", async () => { +``` + +Replace the full test body to verify dry run logs the commands that would run: + +```typescript +it("skips OpenClaw CLI mutations during configure dry run", async () => { + const dryRunLines: string[] = []; + const dryRunContext = { + fs, + runCommand: commandRunner, + runCommandWithEnv: commandRunner, + flushDryRun() { + // capture that flushDryRun was called + dryRunLines.push("flushed"); + }, + complete() {}, + finalize() {} + }; + + await openClawProvider.configure({ + fs, + env: containerEnv(), + command: dryRunContext, + options: { + dryRun: true, + model: "claude-sonnet-4.6", + providerConfig: { + baseUrl: "https://api.poe.com/v1", + apiKey: "sk-openclaw", + api: "openai-completions", + models: [ + { + id: "claude-sonnet-4.6", + name: "Claude Sonnet 4.6", + reasoning: false, + input: ["text"], + cost: { + input: 0.000001, + output: 0.000002, + cacheRead: 0.0000001, + cacheWrite: 0.0000002 + }, + contextWindow: 200000, + maxTokens: 8192 + } + ] + }, + configPath: `${homeDir}/.openclaw/openclaw.json`, + apiKey: "sk-openclaw" + } + }); + + expect(commandRunner).not.toHaveBeenCalled(); +}); +``` + +Note: we keep the existing assertion that `commandRunner` was not called. The key TDD insight is that the dry run path should NOT call any `openclaw` commands. + +**Step 2: Run test to verify it still passes** + +Run: `npx vitest run src/providers/openclaw.test.ts -t "skips OpenClaw CLI mutations"` +Expected: PASS (the test still passes since we didn't change the assertion logic, just restructured setup) + +**Step 3: Implement dry run logging in configure** + +In `src/providers/openclaw.ts`, replace the early return in `configure()`: + +```typescript +// Current (lines 158-162): +async configure(context: ServiceExecutionContext) { + const { options } = context; + if (options.dryRun) { + return; + } + +// Replace with: +async configure(context: ServiceExecutionContext) { + const { options } = context; + if (options.dryRun) { + logDryRunCommands(context); + return; + } +``` + +Add the helper function at the bottom of the file (before `readOptionString`): + +```typescript +function logDryRunCommands( + context: ServiceExecutionContext +): void { + const { options } = context; + const redactedConfig = { + ...options.providerConfig, + apiKey: "" + }; + context.command.flushDryRun({ emitIfEmpty: false }); +} +``` + +Wait — `flushDryRun` is for the mutation recorder, which OpenClaw doesn't use. The right approach is simpler: since OpenClaw's configure is imperative (runs CLI commands), we should use `context.command` logging facilities. + +Actually, re-reading the dry run system: the `context.command` in dry run mode already has the `DryRunRecorder` set up on the `fs` proxy. But OpenClaw doesn't write to `fs` — it calls `runCommand`. The commands DO run even in dry run mode (they're not intercepted). That's why `configure()` returns early. + +The simplest approach: **log the commands that would have run as info lines**. But `configure()` doesn't have access to a logger — only to `ServiceExecutionContext`. Looking at how other code does this... + +Actually, the cleanest approach is: keep the early return in `configure()` but have `buildConfigurePayload()` already resolve everything during dry run, and the configure command framework already shows "Dry run: would configure OpenClaw." The user already sees the resolved model name via the logger. + +The real gap is: the user doesn't see WHAT would be written to the openclaw config. Let me revise the approach. + +**Revised approach:** Add an `afterConfigure` hook in the payload that logs what would be configured during dry run. Wait — `afterConfigure` is skipped during dry run (configure.ts:164). + +The simplest, most YAGNI approach: **log the resolved model and provider config path in `buildConfigurePayload`** — this already runs during dry run. The user sees: +- "OpenClaw default model: claude-sonnet-4.6" (already logged by `resolveSelectedModel`) +- "OpenClaw config: ~/.openclaw/openclaw.json" (new) +- "Dry run: would configure OpenClaw." (already logged by framework) + +And for the e2e test, just verify the dry run exits 0 without modifying the config file. + +--- + +Let me revise this plan to be simpler and more YAGNI. + +### Task 1: Add dry run e2e test for OpenClaw + +**Files:** +- Modify: `e2e/openclaw.test.ts` + +**Step 1: Add the dry run test** + +Add a third test to `e2e/openclaw.test.ts`: + +```typescript +it('configure --dry-run does not modify config', async () => { + const result = await container.exec('poe-code --dry-run configure openclaw --yes'); + expect(result).toHaveExitCode(0); + + const raw = await container.readFile(OPENCLAW_CONFIG); + const config = JSON.parse(raw); + expect(config).toEqual({}); +}); +``` + +This verifies that: +1. `--dry-run configure openclaw --yes` exits 0 (the full payload build runs — binary check, config validation, model fetch) +2. The openclaw config file is NOT modified (still `{}`) + +**Step 2: Run the test** + +Run: `E2E_VERBOSE=1 npx vitest run e2e/openclaw.test.ts --config e2e/vitest.config.ts` +Expected: PASS — 3 tests (configure, unconfigure, dry-run) + +**Step 3: Commit** + +```bash +git add e2e/openclaw.test.ts +git commit -m "test(e2e): add openclaw dry-run configure test" +``` + +--- + +### Task 2: Log the openclaw commands that dry run would execute + +**Files:** +- Modify: `src/providers/openclaw.ts` +- Test: `src/providers/openclaw.test.ts` + +**Step 1: Write the failing test** + +Add a new test after "skips OpenClaw CLI mutations during configure dry run": + +```typescript +it("logs the commands dry run would execute", async () => { + const logged: string[] = []; + const loggerSpy = { + info: (msg: string) => logged.push(msg), + }; + // We need to capture what the provider logs during dry run + // The provider calls context.command methods — but in dry run it returns early + // So we need to verify the dry run path produces useful output +}); +``` + +Actually — the provider's `configure()` doesn't have a logger. It only has `ServiceExecutionContext`. The logging happens upstream in the configure command framework. The `buildConfigurePayload` already logs the resolved model via `init.logger.resolved()`. + +**The right thing to do:** The dry run already works correctly: +1. `buildConfigurePayload` runs → resolves model, builds config, logs resolved model +2. `configure()` returns early (no commands executed) +3. Framework logs "Dry run: would configure OpenClaw." + +The user sees the model that would be set. The config file is not modified. This is the correct dry run behavior — it matches how OpenClaw's imperative model works. + +**No additional code changes needed.** The e2e test from Task 1 is sufficient. + +--- + +### Summary + +Only 1 task: add the dry run e2e test to `e2e/openclaw.test.ts`. The existing code already handles dry run correctly — `buildConfigurePayload` runs (so the user sees model resolution), and `configure()` returns early (so no commands execute). The e2e test proves this works end-to-end. diff --git a/docs/plans/2026-03-12-e2e-graceful-skip.ears.md b/docs/plans/2026-03-12-e2e-graceful-skip.ears.md new file mode 100644 index 00000000..90a9e641 --- /dev/null +++ b/docs/plans/2026-03-12-e2e-graceful-skip.ears.md @@ -0,0 +1,148 @@ +# E2E Graceful Skip on Missing API Key — EARS-Improved Plan + +> **Original plan:** `docs/plans/2026-03-12-e2e-graceful-skip.md` +> **Improved with:** EARS requirements syntax, adversarial review fixes +> **For Claude:** REQUIRED SUB-SKILL: Use executing-plans to implement this plan task-by-task. + +**Goal:** Make e2e tests exit cleanly (code 0) when `POE_API_KEY` is unavailable, so CI doesn't show a false failure on fork PRs. + +**Architecture:** The e2e global setup (`e2e/setup.ts`) currently throws when any preflight check fails, crashing vitest with exit code 1. We split preflight failures into "critical" (Docker missing/not running — test infra is broken) and "non-critical" (API key missing — external dependency unavailable). On non-critical-only failures, log a skip message and call `process.exit(0)`. Critical failures still throw. The `runPreflight` function is also changed to no longer early-return on API key failure, so Docker cleanup/prune steps still execute even when the API key is missing. + +**System(s) under change:** `@poe-code/e2e-docker-test-runner` (preflight module), `e2e/setup.ts` (vitest globalSetup) + +--- + +## Requirements + +R1: **Where** `CheckResult` is defined, **the system shall** include an optional `critical?: boolean` field. When omitted, the check is treated as critical (defaults to `true`). + +R2: **When** the API key preflight check fails, **the system shall** return a `CheckResult` with `critical: false`. + +R3: **Where** `hasCriticalFailure(results)` is called with an array of `CheckResult`, **the system shall** return `true` if any result has `passed === false` AND `critical !== false`. + +R4: **When** `hasCriticalFailure` is called with an empty array, **the system shall** return `false`. + +R5: **When** all preflight checks pass, `setup()` **shall** return normally (no throw, no exit). + +R6: **When** any critical check fails, `setup()` **shall** throw an `Error`. + +R7: **When** only non-critical checks fail, `setup()` **shall** log a skip message to stderr and call `process.exit(0)`. + +R8: **When** preflight checks complete, `setup()` **shall** always log formatted preflight results to stderr before any exit/throw decision. + +R9: **When** the API key check fails, `runPreflight` **shall** continue executing subsequent steps (cleanup orphans, prune old images) instead of returning early. These steps are Docker-only operations with no API key dependency. + +--- + +## Implementation + +### Task 1: Add `critical` flag and mark API key as non-critical; remove early return + +S1 (R1, R2, R9): Write failing tests in `packages/e2e-docker-test-runner/src/preflight.test.ts`: + - "marks API key failure as non-critical" — asserts `critical: false`, `passed: false` + - "marks Docker checks as critical" — asserts Docker result `critical !== false` + - "continues to cleanup and prune even when API key is missing" — asserts `ps -aq` called + +S2 (R1, R2, R9): Implement in `packages/e2e-docker-test-runner/src/preflight.ts`: + - Add `critical?: boolean` to `CheckResult` interface (with JSDoc) + - Export `CheckResult` + - Add `critical: false` to `checkApiKey()` failure return + - Remove early-return on API key failure (lines 40-42) + - Compute `passed` at end: `const passed = results.every(r => r.passed)` + - Docker early-returns (lines 24-26, 33-35) remain — without Docker, nothing else can run + +CHECKPOINT: `npx vitest run packages/e2e-docker-test-runner/src/preflight.test.ts` — all pass + +### Task 2: Export `hasCriticalFailure` helper + +S3 (R3, R4): Write failing tests in `packages/e2e-docker-test-runner/src/preflight.test.ts`: + - "hasCriticalFailure returns false when only non-critical checks fail" + - "hasCriticalFailure returns true when a critical check fails" (undefined counts as critical) + - "hasCriticalFailure returns false for empty array" + +S4 (R3, R4): Implement in `packages/e2e-docker-test-runner/src/preflight.ts`: + - Add: `export function hasCriticalFailure(results: CheckResult[]): boolean { return results.some(r => !r.passed && r.critical !== false); }` + +S5: Export from `packages/e2e-docker-test-runner/src/index.ts`: + - Add `hasCriticalFailure` to preflight re-export + - Add `export type { CheckResult }` re-export + +CHECKPOINT: `npx vitest run packages/e2e-docker-test-runner/src/preflight.test.ts` — all pass + +### Task 3: Update `e2e/setup.ts` to gracefully skip on soft failures + +**IMPORTANT:** Must land after Tasks 1-2. Without Tasks 1-2, this change would not compile. + +S6 (R5, R6, R7, R8): Replace `e2e/setup.ts` with: + - Files: `e2e/setup.ts` + - Action: + ```typescript + import { runPreflight, formatPreflightResults, hasCriticalFailure } from '@poe-code/e2e-docker-test-runner'; + + export async function setup(): Promise { + const { passed, results } = await runPreflight(); + console.error(formatPreflightResults(results)); + + if (!passed) { + if (hasCriticalFailure(results)) { + throw new Error('Preflight checks failed'); + } + console.error('\nSkipping e2e tests: non-critical preflight checks failed.\n'); + process.exit(0); + } + } + ``` + - CRITICAL: The non-critical path must call `process.exit(0)`, NOT just return. A bare return would let vitest proceed to run tests that would all fail without an API key. + +CHECKPOINT: `npm run test && npm run lint && npm run e2e:verbose` — all pass + +--- + +## Verification + +V1 (R1, R2): Unit test "marks API key failure as non-critical" — `critical === false`, `passed === false`. + Expected: PASS + +V2 (R1): Unit test "marks Docker checks as critical" — Docker result `critical !== false`. + Expected: PASS + +V3 (R3): Unit test "hasCriticalFailure returns true when a critical check fails" — `undefined` treated as critical. + Expected: PASS + +V4 (R3): Unit test "hasCriticalFailure returns false when only non-critical checks fail". + Expected: PASS + +V5 (R4): Unit test "hasCriticalFailure returns false for empty array". + Expected: PASS + +V6 (R9): Unit test "continues to cleanup and prune even when API key is missing" — `ps -aq` called. + Expected: PASS + +V7 (R5): `npm run e2e:verbose` with valid API key — all tests run normally. + Expected: 20+ tests pass + +V8 (R7, R8): `POE_API_KEY= npm run e2e:verbose` — logs preflight results, logs skip message, exits 0. + Expected: Exit code 0, stderr contains "Skipping e2e tests" + +--- + +## Traceability Matrix + +| Req | Implementation Steps | Verification | +|-----|---------------------|-------------| +| R1 | S2, S4 | V1, V2 | +| R2 | S2 | V1 | +| R3 | S4 | V3, V4 | +| R4 | S4 | V5 | +| R5 | S6 | V7 | +| R6 | S6 | V8 | +| R7 | S6 | V8 | +| R8 | S6 | V7, V8 | +| R9 | S2 | V6 | + +--- + +## Known Gaps / Future Work + +- **No unit test for `setup.ts`:** Thin integration shim tested via manual e2e runs. Unit testing would require mocking `process.exit` and the entire `@poe-code/e2e-docker-test-runner` module — counter to YAGNI. +- **Future non-critical checks:** If additional optional checks are added (e.g., network connectivity), the same `critical: false` pattern applies with no changes to `hasCriticalFailure` or `setup.ts`. diff --git a/docs/plans/2026-03-12-e2e-graceful-skip.md b/docs/plans/2026-03-12-e2e-graceful-skip.md new file mode 100644 index 00000000..82807ecd --- /dev/null +++ b/docs/plans/2026-03-12-e2e-graceful-skip.md @@ -0,0 +1,261 @@ +# E2E Graceful Skip on Missing API Key — Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use executing-plans to implement this plan task-by-task. + +**Goal:** Make e2e tests exit cleanly (code 0) when `POE_API_KEY` is unavailable, so CI doesn't show a false failure on fork PRs. + +**Architecture:** The e2e global setup (`e2e/setup.ts`) currently throws when any preflight check fails, crashing vitest with exit code 1. We split preflight failures into "hard" (Docker missing/not running — test infra is broken) and "soft" (API key missing — external dependency unavailable). On soft-only failures, log a skip message and exit cleanly via `process.exit(0)`. Hard failures still throw. + +**Tech Stack:** TypeScript, vitest globalSetup, `@poe-code/e2e-docker-test-runner` + +--- + +### Task 1: Add `critical` flag to preflight CheckResult + +**Files:** +- Modify: `packages/e2e-docker-test-runner/src/preflight.ts` +- Test: `packages/e2e-docker-test-runner/src/preflight.test.ts` + +**Step 1: Write the failing test** + +Add a new describe block to `preflight.test.ts` that verifies the API key check result includes `critical: false`: + +```typescript +describe('runPreflight - soft failure on missing API key', () => { + beforeEach(() => { + vi.resetAllMocks(); + vi.resetModules(); + }); + + async function setup() { + const { execSync } = await import('node:child_process'); + const { detectEngine } = await import('./engine.js'); + const { detectRunningContext } = await import('./context.js'); + const { hasApiKey } = await import('./credentials.js'); + const { runPreflight } = await import('./preflight.js'); + + vi.mocked(detectEngine).mockReturnValue('docker'); + vi.mocked(detectRunningContext).mockReturnValue(null); + + vi.mocked(execSync).mockImplementation((cmd: string) => { + const cmdStr = String(cmd); + if (cmdStr.includes('docker info')) return Buffer.from('ok'); + if (cmdStr.includes('ps -aq')) return Buffer.from(''); + if (cmdStr.includes('images --format')) return Buffer.from(''); + return Buffer.from(''); + }); + + return { hasApiKey: vi.mocked(hasApiKey), runPreflight }; + } + + it('marks API key failure as non-critical', async () => { + const { hasApiKey, runPreflight } = await setup(); + hasApiKey.mockResolvedValue(false); + + const { passed, results } = await runPreflight(); + + expect(passed).toBe(false); + const apiKeyResult = results.find(r => r.name === 'API key available'); + expect(apiKeyResult).toBeDefined(); + expect(apiKeyResult!.passed).toBe(false); + expect(apiKeyResult!.critical).toBe(false); + }); + + it('marks Docker checks as critical', async () => { + const { hasApiKey, runPreflight } = await setup(); + hasApiKey.mockResolvedValue(true); + + const { results } = await runPreflight(); + + const dockerCheck = results.find(r => r.name === 'Docker installed'); + expect(dockerCheck).toBeDefined(); + expect(dockerCheck!.critical).not.toBe(false); + }); +}); +``` + +**Step 2: Run tests to verify they fail** + +Run: `npx vitest run packages/e2e-docker-test-runner/src/preflight.test.ts` +Expected: FAIL — `critical` property doesn't exist on CheckResult + +**Step 3: Add `critical` flag to CheckResult and preflight checks** + +In `packages/e2e-docker-test-runner/src/preflight.ts`: + +1. Add `critical?: boolean` to the `CheckResult` interface (default is `true` when omitted) +2. In `checkApiKey()`, add `critical: false` to the failure result + +```typescript +interface CheckResult { + name: string; + passed: boolean; + message?: string; + fix?: string; + critical?: boolean; // defaults to true when omitted +} +``` + +In `checkApiKey()`: +```typescript +return { + name: 'API key available', + passed: false, + critical: false, + message: 'API key not available', + fix: '...', +}; +``` + +**Step 4: Run tests to verify they pass** + +Run: `npx vitest run packages/e2e-docker-test-runner/src/preflight.test.ts` +Expected: PASS + +**Step 5: Commit** + +```bash +git add packages/e2e-docker-test-runner/src/preflight.ts packages/e2e-docker-test-runner/src/preflight.test.ts +git commit -m "feat(e2e-docker-test-runner): mark API key preflight check as non-critical" +``` + +--- + +### Task 2: Export `CheckResult` type and add `hasCriticalFailure` helper + +**Files:** +- Modify: `packages/e2e-docker-test-runner/src/preflight.ts` +- Modify: `packages/e2e-docker-test-runner/src/index.ts` +- Test: `packages/e2e-docker-test-runner/src/preflight.test.ts` + +**Step 1: Write the failing test** + +Add to the "soft failure on missing API key" describe block in `preflight.test.ts`: + +```typescript +it('hasCriticalFailure returns false when only non-critical checks fail', async () => { + const { hasApiKey, runPreflight } = await setup(); + const { hasCriticalFailure } = await import('./preflight.js'); + hasApiKey.mockResolvedValue(false); + + const { results } = await runPreflight(); + + expect(hasCriticalFailure(results)).toBe(false); +}); + +it('hasCriticalFailure returns true when a critical check fails', async () => { + const { hasCriticalFailure } = await import('./preflight.js'); + + const results = [ + { name: 'Docker installed', passed: false, critical: undefined as boolean | undefined }, + ]; + expect(hasCriticalFailure(results)).toBe(true); +}); +``` + +**Step 2: Run tests to verify they fail** + +Run: `npx vitest run packages/e2e-docker-test-runner/src/preflight.test.ts` +Expected: FAIL — `hasCriticalFailure` not exported + +**Step 3: Implement `hasCriticalFailure`** + +Add to `packages/e2e-docker-test-runner/src/preflight.ts`: + +```typescript +export function hasCriticalFailure(results: CheckResult[]): boolean { + return results.some(r => !r.passed && r.critical !== false); +} +``` + +Export it from `packages/e2e-docker-test-runner/src/index.ts`: + +```typescript +export { runPreflight, formatPreflightResults, hasCriticalFailure } from './preflight.js'; +export type { CheckResult } from './preflight.js'; +``` + +Also export `CheckResult` from `preflight.ts`: + +```typescript +export interface CheckResult { + // ... +} +``` + +**Step 4: Run tests to verify they pass** + +Run: `npx vitest run packages/e2e-docker-test-runner/src/preflight.test.ts` +Expected: PASS + +**Step 5: Commit** + +```bash +git add packages/e2e-docker-test-runner/src/preflight.ts packages/e2e-docker-test-runner/src/preflight.test.ts packages/e2e-docker-test-runner/src/index.ts +git commit -m "feat(e2e-docker-test-runner): add hasCriticalFailure helper" +``` + +--- + +### Task 3: Update `e2e/setup.ts` to gracefully skip on soft failures + +**Files:** +- Modify: `e2e/setup.ts` + +**Step 1: Update setup.ts** + +Replace `e2e/setup.ts` with: + +```typescript +import { runPreflight, formatPreflightResults, hasCriticalFailure } from '@poe-code/e2e-docker-test-runner'; + +export async function setup(): Promise { + const { passed, results } = await runPreflight(); + console.error(formatPreflightResults(results)); + + if (!passed) { + if (hasCriticalFailure(results)) { + throw new Error('Preflight checks failed'); + } + console.error('\nSkipping e2e tests: non-critical preflight checks failed.\n'); + process.exit(0); + } +} +``` + +When only soft checks (API key) fail: logs the preflight results, prints a skip message, exits cleanly with code 0. +When hard checks (Docker) fail: throws, vitest crashes with code 1 (correct — infra is broken). + +**Step 2: Verify locally (manual)** + +Run: `POE_API_KEY= npm run e2e:verbose` +Expected: Preflight output shows ✗ API key, then "Skipping e2e tests", exits 0. + +Run: `npm run e2e:verbose` (with valid API key) +Expected: All e2e tests run normally. + +**Step 3: Commit** + +```bash +git add e2e/setup.ts +git commit -m "fix(e2e): gracefully skip tests when API key is unavailable" +``` + +--- + +### Task 4: Run full test suite + +**Step 1: Run unit tests** + +Run: `npm run test` +Expected: All tests pass + +**Step 2: Run lint** + +Run: `npm run lint` +Expected: Clean + +**Step 3: Run e2e tests** + +Run: `npm run e2e:verbose` +Expected: All 20 tests pass diff --git a/e2e/openclaw.test.ts b/e2e/openclaw.test.ts new file mode 100644 index 00000000..432ce248 --- /dev/null +++ b/e2e/openclaw.test.ts @@ -0,0 +1,210 @@ +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { createContainer } from '@poe-code/e2e-docker-test-runner'; +import type { Container } from '@poe-code/e2e-docker-test-runner'; + +const OPENCLAW_CONFIG = '/home/poe/.openclaw/openclaw.json'; +const POE_CODE_CONFIG = '/home/poe/.poe-code/config.json'; + +const OPENCLAW_STUB = `#!/usr/bin/env node +'use strict'; +const fs = require('fs'); +const path = require('path'); + +const home = process.env.HOME || '/home/poe'; +const configDir = path.join(home, '.openclaw'); +const configFile = path.join(configDir, 'openclaw.json'); + +const args = process.argv.slice(2); +const cmd = args[0]; +const sub = args[1]; + +function readConfig() { + try { return JSON.parse(fs.readFileSync(configFile, 'utf8')); } + catch { return {}; } +} + +function writeConfig(obj) { + fs.mkdirSync(configDir, { recursive: true }); + fs.writeFileSync(configFile, JSON.stringify(obj, null, 2)); +} + +function deepGet(obj, dotPath) { + const keys = dotPath.split('.'); + let current = obj; + for (const key of keys) { + if (current == null || typeof current !== 'object') return undefined; + current = current[key]; + } + return current; +} + +function deepSet(obj, dotPath, value) { + const keys = dotPath.split('.'); + let current = obj; + for (let i = 0; i < keys.length - 1; i++) { + const key = keys[i]; + if (current[key] == null || typeof current[key] !== 'object') { + current[key] = {}; + } + current = current[key]; + } + current[keys[keys.length - 1]] = value; +} + +function deepUnset(obj, dotPath) { + const keys = dotPath.split('.'); + let current = obj; + const stack = []; + for (let i = 0; i < keys.length - 1; i++) { + if (current == null || typeof current !== 'object') return; + stack.push({ obj: current, key: keys[i] }); + current = current[keys[i]]; + } + if (current != null && typeof current === 'object') { + delete current[keys[keys.length - 1]]; + } + for (let i = stack.length - 1; i >= 0; i--) { + const { obj: parent, key } = stack[i]; + if (parent[key] && typeof parent[key] === 'object' && Object.keys(parent[key]).length === 0) { + delete parent[key]; + } + } +} + +if (cmd === 'config' && sub === 'file') { + process.stdout.write(configFile + '\\n'); + process.exit(0); +} + +if (cmd === 'config' && sub === 'validate' && args.includes('--json')) { + process.stdout.write(JSON.stringify({ valid: true, path: configFile })); + process.exit(0); +} + +if (cmd === 'config' && sub === 'set') { + const dotPath = args[2]; + const value = args[3]; + const config = readConfig(); + const parsed = args.includes('--strict-json') ? JSON.parse(value) : value; + deepSet(config, dotPath, parsed); + writeConfig(config); + process.exit(0); +} + +if (cmd === 'config' && sub === 'get' && args.includes('--json')) { + const dotPath = args[2]; + const config = readConfig(); + const value = deepGet(config, dotPath); + if (value === undefined) { + process.stderr.write('Config path not found: ' + dotPath + '\\n'); + process.exit(1); + } + process.stdout.write(JSON.stringify(value)); + process.exit(0); +} + +if (cmd === 'config' && sub === 'unset') { + const dotPath = args[2]; + const config = readConfig(); + deepUnset(config, dotPath); + writeConfig(config); + process.exit(0); +} + +if (cmd === 'models' && sub === 'set') { + const modelId = args[2]; + const config = readConfig(); + deepSet(config, 'agents.defaults.model.primary', modelId); + writeConfig(config); + process.exit(0); +} + +process.stderr.write('openclaw stub: unknown command: ' + args.join(' ') + '\\n'); +process.exit(1); +`; + +describe('openclaw', () => { + let container: Container; + + beforeEach(async () => { + container = await createContainer({ testName: 'openclaw' }); + await container.login(); + + await container.writeFile('/usr/local/bin/openclaw', OPENCLAW_STUB); + await container.execOrThrow('chmod +x /usr/local/bin/openclaw'); + await container.execOrThrow('mkdir -p /home/poe/.openclaw'); + await container.writeFile(OPENCLAW_CONFIG, '{}'); + }); + + afterEach(async () => { + if (container) { + await expect(container).toHaveHealthyProxy(); + await container.destroy(); + } + }); + + it('configure sets provider config and default model', async () => { + const result = await container.exec('poe-code configure openclaw --yes'); + expect(result).toHaveExitCode(0); + + await expect(container).toHaveFile(OPENCLAW_CONFIG); + const raw = await container.readFile(OPENCLAW_CONFIG); + const config = JSON.parse(raw); + + const providerConfig = config.models?.providers?.poe; + expect(providerConfig).toBeDefined(); + expect(providerConfig.baseUrl).toEqual(expect.any(String)); + expect(providerConfig.apiKey).toEqual(expect.any(String)); + expect(providerConfig.api).toBe('openai-completions'); + expect(providerConfig.models).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + id: expect.any(String), + name: expect.any(String), + }), + ]) + ); + + const primaryModel = config.agents?.defaults?.model?.primary; + expect(primaryModel).toEqual(expect.stringMatching(/^poe\//)); + + await expect(container).toHaveFile(POE_CODE_CONFIG); + const metaRaw = await container.readFile(POE_CODE_CONFIG); + const meta = JSON.parse(metaRaw); + expect(meta.configured_services?.openclaw?.files).toEqual( + expect.arrayContaining([OPENCLAW_CONFIG]) + ); + }); + + it('unconfigure removes provider config', async () => { + const configureResult = await container.exec('poe-code configure openclaw --yes'); + expect(configureResult).toHaveExitCode(0); + + const unconfigureResult = await container.exec('poe-code unconfigure openclaw'); + expect(unconfigureResult).toHaveExitCode(0); + + const raw = await container.readFile(OPENCLAW_CONFIG); + const config = JSON.parse(raw); + + expect(config.models?.providers?.poe).toBeUndefined(); + + const primaryModel = config.agents?.defaults?.model?.primary; + const isPoeModel = typeof primaryModel === 'string' && primaryModel.startsWith('poe/'); + expect(isPoeModel).toBe(false); + + const metaRaw = await container.readFile(POE_CODE_CONFIG); + const meta = JSON.parse(metaRaw); + expect(meta.configured_services?.openclaw).toBeUndefined(); + }); + + it('configure --dry-run does not modify config', async () => { + const result = await container.exec('poe-code --dry-run configure openclaw --yes'); + expect(result).toHaveExitCode(0); + + const raw = await container.readFile(OPENCLAW_CONFIG); + const config = JSON.parse(raw); + expect(config).toEqual({}); + + await expect(container).not.toHaveFile(POE_CODE_CONFIG); + }); +}); diff --git a/e2e/setup.ts b/e2e/setup.ts index 5248af1e..79022ab8 100644 --- a/e2e/setup.ts +++ b/e2e/setup.ts @@ -1,10 +1,14 @@ -import { runPreflight, formatPreflightResults } from '@poe-code/e2e-docker-test-runner'; +import { runPreflight, formatPreflightResults, hasCriticalFailure } from '@poe-code/e2e-docker-test-runner'; export async function setup(): Promise { const { passed, results } = await runPreflight(); console.error(formatPreflightResults(results)); if (!passed) { - throw new Error('Preflight checks failed'); + if (hasCriticalFailure(results)) { + throw new Error('Preflight checks failed'); + } + console.error('\nSkipping e2e tests: non-critical preflight checks failed.\n'); + process.exit(0); } } diff --git a/packages/e2e-docker-test-runner/src/index.ts b/packages/e2e-docker-test-runner/src/index.ts index 4abc42c0..dbc4952b 100644 --- a/packages/e2e-docker-test-runner/src/index.ts +++ b/packages/e2e-docker-test-runner/src/index.ts @@ -6,7 +6,8 @@ export { createContainer } from './persistent-container.js'; export { rotateLogs } from './log-rotation.js'; export { getApiKey } from './credentials.js'; export { ensureImage, getSourceHash, IMAGE_NAME } from './image.js'; -export { runPreflight, formatPreflightResults } from './preflight.js'; +export { runPreflight, formatPreflightResults, hasCriticalFailure } from './preflight.js'; +export type { CheckResult } from './preflight.js'; export { setResolvedContext, getResolvedContext, buildContextArgs } from './context.js'; export { useContainer } from './use-container.js'; export { CapturedRequests } from './proxy-requests.js'; diff --git a/packages/e2e-docker-test-runner/src/preflight.test.ts b/packages/e2e-docker-test-runner/src/preflight.test.ts index 952ead00..84e14ef7 100644 --- a/packages/e2e-docker-test-runner/src/preflight.test.ts +++ b/packages/e2e-docker-test-runner/src/preflight.test.ts @@ -199,3 +199,98 @@ describe('runPreflight - Docker Desktop auto-start', () => { expect(result.passed).toBe(false); }); }); + +describe('runPreflight - soft failure on missing API key', () => { + beforeEach(() => { + vi.resetAllMocks(); + vi.resetModules(); + }); + + async function setup() { + const { execSync } = await import('node:child_process'); + const { detectEngine } = await import('./engine.js'); + const { detectRunningContext } = await import('./context.js'); + const { hasApiKey } = await import('./credentials.js'); + const { runPreflight, hasCriticalFailure } = await import('./preflight.js'); + + vi.mocked(detectEngine).mockReturnValue('docker'); + vi.mocked(detectRunningContext).mockReturnValue(null); + + vi.mocked(execSync).mockImplementation((cmd: string) => { + const cmdStr = String(cmd); + if (cmdStr.includes('docker info')) return Buffer.from('ok'); + if (cmdStr.includes('ps -aq')) return Buffer.from(''); + if (cmdStr.includes('images --format')) return Buffer.from(''); + return Buffer.from(''); + }); + + return { hasApiKey: vi.mocked(hasApiKey), execSync: vi.mocked(execSync), runPreflight, hasCriticalFailure }; + } + + it('marks API key failure as non-critical', async () => { + const { hasApiKey, runPreflight } = await setup(); + hasApiKey.mockResolvedValue(false); + + const { passed, results } = await runPreflight(); + + expect(passed).toBe(false); + const apiKeyResult = results.find(r => r.name === 'API key available'); + expect(apiKeyResult).toBeDefined(); + expect(apiKeyResult!.passed).toBe(false); + expect(apiKeyResult!.critical).toBe(false); + }); + + it('marks Docker checks as critical by default', async () => { + const { hasApiKey, runPreflight } = await setup(); + hasApiKey.mockResolvedValue(true); + + const { results } = await runPreflight(); + + const dockerCheck = results.find(r => r.name === 'Docker installed'); + expect(dockerCheck).toBeDefined(); + expect(dockerCheck!.critical).not.toBe(false); + }); + + it('continues to cleanup even when API key is missing', async () => { + const { hasApiKey, execSync, runPreflight } = await setup(); + hasApiKey.mockResolvedValue(false); + + await runPreflight(); + + expect(execSync).toHaveBeenCalledWith( + expect.stringContaining('ps -aq'), + expect.anything(), + ); + }); + + it('hasCriticalFailure returns false when only non-critical checks fail', async () => { + const { hasApiKey, runPreflight, hasCriticalFailure } = await setup(); + hasApiKey.mockResolvedValue(false); + + const { results } = await runPreflight(); + + expect(hasCriticalFailure(results)).toBe(false); + }); + + it('hasCriticalFailure returns true when a critical check fails', async () => { + const { hasCriticalFailure } = await setup(); + + expect(hasCriticalFailure([ + { name: 'Docker installed', passed: false }, + ])).toBe(true); + }); + + it('hasCriticalFailure returns true for explicit critical: true', async () => { + const { hasCriticalFailure } = await setup(); + + expect(hasCriticalFailure([ + { name: 'Docker installed', passed: false, critical: true }, + ])).toBe(true); + }); + + it('hasCriticalFailure returns false for empty array', async () => { + const { hasCriticalFailure } = await setup(); + + expect(hasCriticalFailure([])).toBe(false); + }); +}); diff --git a/packages/e2e-docker-test-runner/src/preflight.ts b/packages/e2e-docker-test-runner/src/preflight.ts index 477e20cf..2d69ce64 100644 --- a/packages/e2e-docker-test-runner/src/preflight.ts +++ b/packages/e2e-docker-test-runner/src/preflight.ts @@ -8,11 +8,16 @@ import type { Engine } from './types.js'; const LABEL = 'poe-e2e-test-runner'; -interface CheckResult { +/** + * @property critical - When omitted or undefined, defaults to true (critical). + * Set to false for checks that should not block test execution. + */ +export interface CheckResult { name: string; passed: boolean; message?: string; fix?: string; + critical?: boolean; } export async function runPreflight(): Promise<{ passed: boolean; results: CheckResult[] }> { @@ -34,12 +39,9 @@ export async function runPreflight(): Promise<{ passed: boolean; results: CheckR return { passed: false, results }; } - // Check 3: API key available + // Check 3: API key available (non-critical — external dependency) const apiKeyCheck = await checkApiKey(); results.push(apiKeyCheck); - if (!apiKeyCheck.passed) { - return { passed: false, results }; - } // Cleanup orphan containers const cleaned = await cleanupOrphans(engine); @@ -61,7 +63,8 @@ export async function runPreflight(): Promise<{ passed: boolean; results: CheckR }); } - return { passed: true, results }; + const passed = results.every(r => r.passed); + return { passed, results }; } function checkEngineInstalled(): CheckResult { @@ -226,6 +229,7 @@ async function checkApiKey(): Promise { return { name: 'API key available', passed: false, + critical: false, message: 'API key not available', fix: 'Set an API key:\n' + @@ -304,6 +308,10 @@ export async function cleanupOrphans(engine?: Engine, context?: string): Promise } } +export function hasCriticalFailure(results: CheckResult[]): boolean { + return results.some(r => !r.passed && r.critical !== false); +} + export function formatPreflightResults(results: CheckResult[]): string { const lines: string[] = []; lines.push(chalk.bold('Preflight checks:')); diff --git a/src/cli/commands/configure-command.test.ts b/src/cli/commands/configure-command.test.ts index 05d6c460..ef6fd983 100644 --- a/src/cli/commands/configure-command.test.ts +++ b/src/cli/commands/configure-command.test.ts @@ -4,7 +4,10 @@ import { createCliContainer } from "../container.js"; import type { FileSystem } from "../utils/file-system.js"; import type { CommandRunner } from "../../utils/command-checks.js"; import { createHomeFs, createTestProgram } from "../../../tests/test-helpers.js"; +import type { HttpClient } from "../http.js"; import type { LoggerFn } from "../types.js"; +import { createProviderStub } from "../../../tests/provider-stub.js"; +import { provider as openClawProvider } from "../../providers/openclaw.js"; const cwd = "/repo"; const homeDir = "/home/test"; @@ -21,6 +24,7 @@ describe("configure command", () => { overrides: { commandRunner?: CommandRunner; logger?: LoggerFn; + httpClient?: HttpClient; } = {} ) { const prompts = vi.fn().mockResolvedValue({}); @@ -44,7 +48,8 @@ describe("configure command", () => { prompts, env: { cwd, homeDir }, logger, - commandRunner + commandRunner, + httpClient: overrides.httpClient }); return { container, prompts, commandRunner }; } @@ -85,7 +90,9 @@ describe("configure command", () => { expect(content.configured_services.opencode).toEqual({ files: [ homeDir + "/.config/opencode/config.json", - homeDir + "/.local/share/opencode/auth.json" + homeDir + "/.local/share/opencode/auth.json", + homeDir + "/.poe-code/opencode/.config/opencode/config.json", + homeDir + "/.poe-code/opencode/.local/share/opencode/auth.json" ] }); }); @@ -196,6 +203,451 @@ describe("configure command", () => { expect(content.configured_services.claude).toBeUndefined(); }); + it("uses provider-built configure payloads and managed files", async () => { + const { container } = createContainer(); + const afterConfigure = vi.fn(async () => {}); + const managedPath = `${homeDir}/.openclaw/openclaw.json`; + const configureSpy = vi.fn(async () => {}); + const buildConfigurePayload = vi.fn(async () => ({ + options: { + env: container.env, + apiKey: "sk-openclaw", + model: "claude-sonnet-4.6" + }, + files: [managedPath], + afterConfigure + })); + + const adapter = createProviderStub({ + name: "test-service", + label: "Test Service", + async configure(context) { + configureSpy(context.options); + } + }) as any; + adapter.buildConfigurePayload = buildConfigurePayload; + container.registry.register(adapter); + + const program = createTestProgram(); + await executeConfigure(program, container, "test-service", {}); + + expect(buildConfigurePayload).toHaveBeenCalled(); + expect(configureSpy).toHaveBeenCalledWith({ + env: container.env, + apiKey: "sk-openclaw", + model: "claude-sonnet-4.6" + }); + expect(afterConfigure).toHaveBeenCalledTimes(1); + + const content = JSON.parse(await fs.readFile(configPath, "utf8")); + expect(content.configured_services["test-service"]).toEqual({ + files: [managedPath] + }); + }); + + it("tracks isolated configuration files in configured service metadata", async () => { + const { container } = createContainer(); + vi.spyOn(container.options, "resolveApiKey").mockResolvedValue("sk-test"); + + const configureSpy = vi.fn(async () => {}); + const adapter = createProviderStub({ + name: "isolated-service", + label: "Isolated Service", + isolatedEnv: { + agentBinary: "isolated-service", + env: { + HOME: { kind: "isolatedDir" } + } + }, + async configure(_context, runOptions) { + const callNumber = configureSpy.mock.calls.length; + configureSpy(); + const targetPath = callNumber === 0 + ? `${homeDir}/.config/isolated-service/config.json` + : `${homeDir}/.local/share/poe-code/isolated/isolated-service/config.json`; + runOptions?.observers?.onComplete?.( + { + manifestId: "isolated-service", + kind: "transformFile", + label: `Transform file ${targetPath}`, + targetPath + }, + { + changed: true, + effect: "write", + detail: "write" + } + ); + } + }); + container.registry.register(adapter); + + const program = createTestProgram(); + await executeConfigure(program, container, "isolated-service", {}); + + const content = JSON.parse(await fs.readFile(configPath, "utf8")); + expect(content.configured_services["isolated-service"]).toEqual({ + files: [ + `${homeDir}/.config/isolated-service/config.json`, + `${homeDir}/.local/share/poe-code/isolated/isolated-service/config.json` + ] + }); + }); + + it("persists managed files before afterConfigure can fail", async () => { + const { container } = createContainer(); + const baseManagedPath = `${homeDir}/.openclaw/openclaw.json`; + const baseMutationPath = `${homeDir}/.config/test-service/config.json`; + const isolatedMutationPath = + `${homeDir}/.local/share/poe-code/isolated/test-service/config.json`; + let configureCalls = 0; + const afterConfigure = vi.fn(async () => { + throw new Error("restart failed"); + }); + const buildConfigurePayload = vi.fn(async () => ({ + options: {}, + files: [baseManagedPath], + afterConfigure + })); + + const adapter = createProviderStub({ + name: "test-service", + label: "Test Service", + isolatedEnv: { + agentBinary: "test-service", + env: { + HOME: { kind: "isolatedDir" } + } + }, + async configure(_context, runOptions) { + configureCalls += 1; + const targetPath = configureCalls === 1 + ? baseMutationPath + : isolatedMutationPath; + runOptions?.observers?.onComplete?.( + { + manifestId: "test-service", + kind: "transformFile", + label: `Transform file ${targetPath}`, + targetPath + }, + { + changed: true, + effect: "write", + detail: "write" + } + ); + } + }) as any; + adapter.buildConfigurePayload = buildConfigurePayload; + container.registry.register(adapter); + + const program = createTestProgram(); + await expect( + executeConfigure(program, container, "test-service", {}) + ).rejects.toThrow("restart failed"); + + const content = JSON.parse(await fs.readFile(configPath, "utf8")); + expect(content.configured_services["test-service"]).toEqual({ + files: [baseMutationPath, isolatedMutationPath, baseManagedPath] + }); + }); + + it("persists isolated managed files even when isolated config fails", async () => { + const { container } = createContainer(); + const baseManagedPath = `${homeDir}/.openclaw/openclaw.json`; + const baseMutationPath = `${homeDir}/.config/test-service/config.json`; + const isolatedMutationPath = + `${homeDir}/.local/share/poe-code/isolated/test-service/config.json`; + let configureCalls = 0; + const buildConfigurePayload = vi.fn(async () => ({ + options: {}, + files: [baseManagedPath] + })); + + const adapter = createProviderStub({ + name: "test-service", + label: "Test Service", + isolatedEnv: { + agentBinary: "test-service", + env: { + HOME: { kind: "isolatedDir" } + } + }, + async configure(_context, runOptions) { + configureCalls += 1; + const targetPath = configureCalls === 1 + ? baseMutationPath + : isolatedMutationPath; + runOptions?.observers?.onComplete?.( + { + manifestId: "test-service", + kind: "transformFile", + label: `Transform file ${targetPath}`, + targetPath + }, + { + changed: true, + effect: "write", + detail: "write" + } + ); + if (configureCalls === 2) { + throw new Error("isolated configure failed"); + } + } + }) as any; + adapter.buildConfigurePayload = buildConfigurePayload; + container.registry.register(adapter); + + const program = createTestProgram(); + await expect( + executeConfigure(program, container, "test-service", {}) + ).rejects.toThrow("isolated configure failed"); + + const content = JSON.parse(await fs.readFile(configPath, "utf8")); + expect(content.configured_services["test-service"]).toEqual({ + files: [baseMutationPath, isolatedMutationPath, baseManagedPath] + }); + }); + + it("configures openclaw through the real provider path", async () => { + const commandRunner = vi.fn(async (command: string, args: string[]) => { + if (command === "which" && args[0] === "openclaw") { + return { + stdout: "/usr/local/bin/openclaw\n", + stderr: "", + exitCode: 0 + }; + } + if (command === "openclaw" && args[0] === "config" && args[1] === "file") { + return { + stdout: `${homeDir}/.openclaw/openclaw.json\n`, + stderr: "", + exitCode: 0 + }; + } + if ( + command === "openclaw" && + args[0] === "config" && + args[1] === "validate" && + args.includes("--json") + ) { + return { + stdout: JSON.stringify({ + valid: true, + path: `${homeDir}/.openclaw/openclaw.json` + }), + stderr: "", + exitCode: 0 + }; + } + return { stdout: "", stderr: "", exitCode: 0 }; + }); + const httpClient: HttpClient = vi.fn(async () => ({ + ok: true, + status: 200, + json: async () => ({ + object: "list", + data: [ + { + id: "claude-sonnet-4.6", + object: "model", + created: 1700000000000, + owned_by: "Anthropic", + architecture: { + input_modalities: ["text"], + output_modalities: ["text"], + modality: "text->text" + }, + metadata: { + display_name: "claude-sonnet-4.6" + }, + pricing: null, + context_window: { + context_length: 200000, + max_output_tokens: 8192 + }, + reasoning: null, + supported_features: [], + parameters: [] + } + ] + }) + })); + const { container } = createContainer({ + commandRunner, + httpClient + }); + vi.spyOn(container.options, "resolveApiKey").mockResolvedValue("sk-openclaw"); + vi.spyOn(container.options, "resolveModel").mockResolvedValue( + "claude-sonnet-4.6" + ); + + const program = createTestProgram(); + await executeConfigure(program, container, "openclaw", {}); + + expect(commandRunner).toHaveBeenNthCalledWith( + 1, + "which", + ["openclaw"], + undefined + ); + expect(commandRunner).toHaveBeenNthCalledWith(2, "openclaw", [ + "config", + "file" + ], undefined); + expect(commandRunner).toHaveBeenNthCalledWith(3, "openclaw", [ + "config", + "validate", + "--json" + ], undefined); + expect(commandRunner).toHaveBeenNthCalledWith(4, "openclaw", [ + "config", + "set", + "models.providers.poe", + expect.any(String), + "--strict-json" + ], undefined); + expect(commandRunner).toHaveBeenNthCalledWith(5, "openclaw", [ + "models", + "set", + "poe/claude-sonnet-4.6" + ], undefined); + expect(commandRunner).toHaveBeenNthCalledWith(6, "openclaw", [ + "config", + "validate", + "--json" + ], undefined); + + const content = JSON.parse(await fs.readFile(configPath, "utf8")); + expect(content.configured_services.openclaw).toEqual({ + files: [`${homeDir}/.openclaw/openclaw.json`] + }); + }); + + it("stores a normalized OpenClaw config path in configured service metadata when isolated config also runs", async () => { + const commandRunner = vi.fn(async (command: string, args: string[]) => { + if (command === "which" && args[0] === "openclaw") { + return { + stdout: "/usr/local/bin/openclaw\n", + stderr: "", + exitCode: 0 + }; + } + if (command === "openclaw" && args[0] === "config" && args[1] === "file") { + return { + stdout: "~/.openclaw/openclaw.json\n", + stderr: "", + exitCode: 0 + }; + } + if ( + command === "openclaw" && + args[0] === "config" && + args[1] === "validate" && + args.includes("--json") + ) { + return { + stdout: JSON.stringify({ + valid: true, + path: `${homeDir}/.openclaw/openclaw.json` + }), + stderr: "", + exitCode: 0 + }; + } + return { stdout: "", stderr: "", exitCode: 0 }; + }); + const httpClient: HttpClient = vi.fn(async () => ({ + ok: true, + status: 200, + json: async () => ({ + object: "list", + data: [ + { + id: "claude-sonnet-4.6", + object: "model", + created: 1700000000000, + owned_by: "Anthropic", + architecture: { + input_modalities: ["text"], + output_modalities: ["text"], + modality: "text->text" + }, + metadata: { + display_name: "claude-sonnet-4.6" + }, + pricing: null, + context_window: { + context_length: 200000, + max_output_tokens: 8192 + }, + reasoning: null, + supported_features: [], + parameters: [] + } + ] + }) + })); + const { container } = createContainer({ + commandRunner, + httpClient + }); + vi.spyOn(container.options, "resolveApiKey").mockResolvedValue("sk-openclaw"); + vi.spyOn(container.options, "resolveModel").mockResolvedValue( + "claude-sonnet-4.6" + ); + let configureCalls = 0; + const adapter = createProviderStub({ + name: "openclaw-isolated", + label: "OpenClaw Isolated", + summary: openClawProvider.summary, + isolatedEnv: { + agentBinary: "openclaw", + configProbe: { + kind: "isolatedFile", + relativePath: ".openclaw/openclaw.json" + }, + env: { + HOME: { kind: "isolatedDir" } + } + }, + buildConfigurePayload: openClawProvider.buildConfigurePayload, + async configure(_context, runOptions) { + configureCalls += 1; + if (configureCalls !== 2) { + return; + } + runOptions?.observers?.onComplete?.( + { + manifestId: "openclaw-isolated", + kind: "transformFile", + label: "Transform isolated OpenClaw config", + targetPath: `${homeDir}/.poe-code/openclaw-isolated/.openclaw/openclaw.json` + }, + { + changed: true, + effect: "write", + detail: "write" + } + ); + } + }); + container.registry.register(adapter); + + const program = createTestProgram(); + await executeConfigure(program, container, "openclaw-isolated", {}); + + const content = JSON.parse(await fs.readFile(configPath, "utf8")); + expect(content.configured_services["openclaw-isolated"]).toEqual({ + files: [ + `${homeDir}/.openclaw/openclaw.json`, + `${homeDir}/.poe-code/openclaw-isolated/.openclaw/openclaw.json` + ] + }); + }); + it("prints a VSCode post-configure hint for Claude Code after configure", async () => { const logs: string[] = []; const { container } = createContainer({ diff --git a/src/cli/commands/configure-payload.ts b/src/cli/commands/configure-payload.ts index 61fdfa65..51ec9f3c 100644 --- a/src/cli/commands/configure-payload.ts +++ b/src/cli/commands/configure-payload.ts @@ -1,6 +1,10 @@ import type { CliContainer } from "../container.js"; import type { ScopedLogger } from "../logger.js"; -import type { ProviderContext, ProviderService } from "../service-registry.js"; +import type { + ProviderConfigurePayload, + ProviderContext, + ProviderService +} from "../service-registry.js"; import type { CommandFlags } from "./shared.js"; import type { ConfigureCommandOptions } from "./configure.js"; @@ -15,9 +19,19 @@ interface ConfigurePayloadInit { export async function createConfigurePayload( init: ConfigurePayloadInit -): Promise { +): Promise { const { container, flags, options, context, adapter, logger } = init; + if (adapter.buildConfigurePayload) { + return await adapter.buildConfigurePayload({ + container, + flags, + options, + context, + logger + }); + } + const apiKey = await container.options.resolveApiKey({ value: options.apiKey, envValue: container.env.getVariable("POE_API_KEY"), @@ -49,5 +63,7 @@ export async function createConfigurePayload( payload.reasoningEffort = reasoningEffort; } - return payload; + return { + options: payload + }; } diff --git a/src/cli/commands/configure.ts b/src/cli/commands/configure.ts index 4f9238f3..695abc8f 100644 --- a/src/cli/commands/configure.ts +++ b/src/cli/commands/configure.ts @@ -103,7 +103,7 @@ export async function executeConfigure( fs: providerContext.command.fs, env: providerContext.env, command: providerContext.command, - options: payload + options: payload.options }, observers ? { @@ -112,13 +112,14 @@ export async function executeConfigure( : undefined ); + let managedFiles = mergeManagedFiles(payload.files, tracker.files()); if (!flags.dryRun) { await saveConfiguredService({ fs: container.fs, filePath: providerContext.env.configPath, service: canonicalService, metadata: { - files: tracker.files() + files: managedFiles } }); } @@ -131,13 +132,41 @@ export async function executeConfigure( isolatedTracker.observers, isolatedLogger ); - await applyIsolatedConfiguration({ - adapter: entry, - providerContext, - payload, - isolated, - providerName: adapter.name, - observers: isolatedObservers + let isolatedError: unknown; + try { + await applyIsolatedConfiguration({ + adapter: entry, + providerContext, + payload: payload.options, + isolated, + providerName: adapter.name, + observers: isolatedObservers + }); + } catch (error) { + isolatedError = error; + } + managedFiles = mergeManagedFiles(managedFiles, isolatedTracker.files()); + if (!flags.dryRun) { + await saveConfiguredService({ + fs: container.fs, + filePath: providerContext.env.configPath, + service: canonicalService, + metadata: { + files: managedFiles + } + }); + } + if (isolatedError) { + throw isolatedError; + } + } + + if (!flags.dryRun) { + await payload.afterConfigure?.({ + container, + flags, + context: providerContext, + logger: resources.logger }); } }); @@ -185,6 +214,24 @@ function createMutationTracker(): { }; } +function mergeManagedFiles( + providerFiles: string[] | undefined, + mutationFiles: string[] +): string[] { + const files = new Set(); + for (const entry of providerFiles ?? []) { + if (entry.length > 0) { + files.add(entry); + } + } + for (const entry of mutationFiles) { + if (entry.length > 0) { + files.add(entry); + } + } + return Array.from(files).sort(); +} + function resolvePostConfigureMessages(provider: ProviderService): string[] { return provider.postConfigureMessages ?? []; } diff --git a/src/cli/commands/ensure-isolated-config.ts b/src/cli/commands/ensure-isolated-config.ts index 954afd65..71cb085d 100644 --- a/src/cli/commands/ensure-isolated-config.ts +++ b/src/cli/commands/ensure-isolated-config.ts @@ -73,7 +73,7 @@ export async function ensureIsolatedConfigForService(input: { await applyIsolatedConfiguration({ adapter: entry, providerContext, - payload, + payload: payload.options, isolated, providerName: adapter.name, observers: mutationLogger diff --git a/src/cli/commands/unconfigure.ts b/src/cli/commands/unconfigure.ts index d43c61f5..b5881cb8 100644 --- a/src/cli/commands/unconfigure.ts +++ b/src/cli/commands/unconfigure.ts @@ -63,7 +63,8 @@ export async function executeUnconfigure( service: canonicalService, container, options, - context: providerContext + context: providerContext, + dryRun: flags.dryRun }); const unconfigured = await container.registry.invoke( @@ -134,48 +135,75 @@ interface UnconfigurePayloadInit { container: CliContainer; options: UnconfigureCommandOptions; context: ProviderContext; + dryRun: boolean; } async function createUnconfigurePayload(init: UnconfigurePayloadInit): Promise { const { context } = init; - return { env: context.env }; + return { env: context.env, dryRun: init.dryRun }; } function formatUnconfigureMessages( service: string, label: string, unconfigured: unknown, - _payload: unknown + payload: unknown ): { success: string; dry: string } { const didUnconfigure = typeof unconfigured === "boolean" ? unconfigured : Boolean(unconfigured); + const dry = resolveDryUnconfigureMessage(label, didUnconfigure, payload); switch (service) { case "claude-code": return { success: didUnconfigure ? "Removed Claude Code configuration." : "No Claude Code configuration found.", - dry: "Dry run: would remove Claude Code configuration." + dry: resolveDryUnconfigureMessage("Claude Code", didUnconfigure, payload) }; case "codex": return { success: didUnconfigure ? "Removed Codex configuration." : "No Codex configuration found.", - dry: "Dry run: would remove Codex configuration." + dry: resolveDryUnconfigureMessage("Codex", didUnconfigure, payload) }; case "opencode": return { success: didUnconfigure ? "Removed OpenCode CLI configuration." : "No OpenCode CLI configuration found.", - dry: "Dry run: would remove OpenCode CLI configuration." + dry: resolveDryUnconfigureMessage( + "OpenCode CLI", + didUnconfigure, + payload + ) }; default: return { success: didUnconfigure ? `Removed ${label} configuration.` : `No ${label} configuration found.`, - dry: `Dry run: would remove ${label} configuration.` + dry }; } } + +function resolveDryUnconfigureMessage( + label: string, + didUnconfigure: boolean, + payload: unknown +): string { + if (didUnconfigure) { + return `Dry run: would remove ${label} configuration.`; + } + if (hasDryRunFlag(payload)) { + return `Dry run: no ${label} configuration found.`; + } + return `Dry run: would remove ${label} configuration.`; +} + +function hasDryRunFlag(payload: unknown): boolean { + if (!payload || typeof payload !== "object" || Array.isArray(payload)) { + return false; + } + return (payload as { dryRun?: unknown }).dryRun === true; +} diff --git a/src/cli/service-registry.ts b/src/cli/service-registry.ts index 75fdf6ef..9fcf5a6c 100644 --- a/src/cli/service-registry.ts +++ b/src/cli/service-registry.ts @@ -1,4 +1,5 @@ import type { CliEnvironment } from "./environment.js"; +import type { CliContainer } from "./container.js"; import type { CommandContext } from "./context.js"; import type { ScopedLogger } from "./logger.js"; import type { FileSystem } from "../utils/file-system.js"; @@ -43,6 +44,35 @@ export interface ProviderContext { runCheck(check: CommandCheck): Promise; } +export interface ProviderCommandFlags { + dryRun: boolean; + assumeYes: boolean; + verbose: boolean; +} + +export interface ProviderBuildConfigurePayloadInit { + container: CliContainer; + flags: ProviderCommandFlags; + options: TCommandOptions; + context: ProviderContext; + logger: ScopedLogger; +} + +export interface ProviderAfterConfigureInit { + container: CliContainer; + flags: ProviderCommandFlags; + context: ProviderContext; + logger: ScopedLogger; +} + +export interface ProviderConfigurePayload { + options: TConfigure; + files?: string[]; + afterConfigure?( + init: ProviderAfterConfigureInit + ): Promise | void; +} + export interface ServiceExecutionContext { fs: FileSystem; env: CliEnvironment; @@ -76,6 +106,9 @@ export interface ProviderService< configurePrompts?: ProviderConfigurePrompts; postConfigureMessages?: string[]; isolatedEnv?: ProviderIsolatedEnv; + buildConfigurePayload?( + init: ProviderBuildConfigurePayloadInit + ): Promise>; install?(context: ProviderContext): Promise | void; spawn?(context: ProviderContext, options: TSpawn): Promise; test?(context: ProviderContext): Promise; diff --git a/src/providers/create-provider.ts b/src/providers/create-provider.ts index ffe2fea3..84740822 100644 --- a/src/providers/create-provider.ts +++ b/src/providers/create-provider.ts @@ -59,6 +59,11 @@ interface CreateProviderOptions< postConfigureMessages?: string[]; isolatedEnv?: ProviderIsolatedEnv; manifest: ManifestVersionDefinition; + buildConfigurePayload?: ProviderService< + ConfigureOptions, + UnconfigureOptions, + SpawnOptions + >["buildConfigurePayload"]; install?: ServiceInstallDefinition; test?: ProviderService["test"]; spawn?: ProviderService< @@ -92,6 +97,7 @@ export function createProvider< configurePrompts: opts.configurePrompts, postConfigureMessages: opts.postConfigureMessages, isolatedEnv: opts.isolatedEnv, + buildConfigurePayload: opts.buildConfigurePayload, async configure(context, runOptions) { await runMutations(opts.manifest.configure, { fs: context.fs, diff --git a/src/providers/openclaw.test.ts b/src/providers/openclaw.test.ts new file mode 100644 index 00000000..43156312 --- /dev/null +++ b/src/providers/openclaw.test.ts @@ -0,0 +1,832 @@ +import { describe, it, expect, beforeEach, vi } from "vitest"; +import { createMockFs } from "@poe-code/config-mutations/testing"; +import { createCliContainer } from "../cli/container.js"; +import { + buildProviderContext, + createExecutionResources +} from "../cli/commands/shared.js"; +import { provider as openClawProvider } from "./openclaw.js"; + +const cwd = "/repo"; +const homeDir = "/home/test"; + +function createModelEntry(overrides: Partial<{ + id: string; + owned_by: string; + created: number; + input_modalities: string[]; + output_modalities: string[]; + pricing: { + prompt: number | null; + completion: number | null; + input_cache_read: number | null; + input_cache_write: number | null; + } | null; + context_length: number | null; + max_output_tokens: number | null; +}> = {}) { + return { + id: overrides.id ?? "claude-sonnet-4.6", + object: "model", + created: overrides.created ?? 1700000000000, + owned_by: overrides.owned_by ?? "Anthropic", + architecture: { + input_modalities: overrides.input_modalities ?? ["text"], + output_modalities: overrides.output_modalities ?? ["text"], + modality: "text->text" + }, + metadata: { + display_name: overrides.id ?? "claude-sonnet-4.6" + }, + pricing: overrides.pricing ?? { + prompt: 0.000001, + completion: 0.000002, + input_cache_read: 0.0000001, + input_cache_write: 0.0000002 + }, + context_window: { + context_length: overrides.context_length ?? 200000, + max_output_tokens: overrides.max_output_tokens ?? 8192 + }, + reasoning: null, + supported_features: [], + parameters: [] + }; +} + +function createCommandContext( + fs: ReturnType, + commandRunner: ReturnType +) { + return { + fs, + runCommand: commandRunner, + runCommandWithEnv: commandRunner, + flushDryRun() {}, + complete() {}, + finalize() {} + }; +} + +describe("openclaw provider", () => { + const fs = createMockFs({}, homeDir); + let commandRunner: ReturnType; + + beforeEach(() => { + commandRunner = vi.fn(async (command: string, args: string[]) => { + if (command === "openclaw" && args[0] === "config" && args[1] === "file") { + return { + stdout: `${homeDir}/.openclaw/openclaw.json\n`, + stderr: "", + exitCode: 0 + }; + } + if ( + command === "openclaw" && + args[0] === "config" && + args[1] === "validate" && + args.includes("--json") + ) { + return { + stdout: JSON.stringify({ + valid: true, + path: `${homeDir}/.openclaw/openclaw.json` + }), + stderr: "", + exitCode: 0 + }; + } + return { stdout: "", stderr: "", exitCode: 0 }; + }); + }); + + it("builds configure payload from live Poe models", async () => { + const container = createCliContainer({ + fs, + prompts: vi.fn().mockResolvedValue({}), + env: { cwd, homeDir }, + commandRunner, + httpClient: vi.fn(async () => ({ + ok: true, + status: 200, + json: async () => ({ + object: "list", + data: [ + createModelEntry({ id: "claude-sonnet-4.6", owned_by: "Anthropic" }), + createModelEntry({ id: "gpt-5.2", owned_by: "OpenAI", created: 1800000000000 }) + ] + }) + })) + }); + vi.spyOn(container.options, "resolveApiKey").mockResolvedValue("sk-openclaw"); + const resolveModel = vi + .spyOn(container.options, "resolveModel") + .mockResolvedValue("claude-sonnet-4.6"); + const resources = createExecutionResources( + container, + { dryRun: false, assumeYes: false, verbose: false }, + "configure:openclaw" + ); + const context = buildProviderContext( + container, + openClawProvider, + resources + ); + + const payload = await (openClawProvider as any).buildConfigurePayload({ + container, + flags: { dryRun: false, assumeYes: false, verbose: false }, + options: {}, + context, + logger: resources.logger + }); + + expect(resolveModel).toHaveBeenCalledWith( + expect.objectContaining({ + defaultValue: "claude-sonnet-4.6", + choices: [ + { title: "Claude Sonnet 4.6", value: "claude-sonnet-4.6" }, + { title: "GPT-5.2", value: "gpt-5.2" } + ] + }) + ); + expect(payload).toEqual( + expect.objectContaining({ + options: expect.objectContaining({ + apiKey: "sk-openclaw", + model: "claude-sonnet-4.6", + configPath: `${homeDir}/.openclaw/openclaw.json` + }), + files: [`${homeDir}/.openclaw/openclaw.json`] + }) + ); + }); + + it("normalizes tilde OpenClaw config paths in the configure payload", async () => { + commandRunner = vi.fn(async (command: string, args: string[]) => { + if (command === "openclaw" && args[0] === "config" && args[1] === "file") { + return { + stdout: "~/.openclaw/openclaw.json\n", + stderr: "", + exitCode: 0 + }; + } + if ( + command === "openclaw" && + args[0] === "config" && + args[1] === "validate" && + args.includes("--json") + ) { + return { + stdout: JSON.stringify({ + valid: true, + path: `${homeDir}/.openclaw/openclaw.json` + }), + stderr: "", + exitCode: 0 + }; + } + return { stdout: "", stderr: "", exitCode: 0 }; + }); + const container = createCliContainer({ + fs, + prompts: vi.fn().mockResolvedValue({}), + env: { cwd, homeDir }, + commandRunner, + httpClient: vi.fn(async () => ({ + ok: true, + status: 200, + json: async () => ({ + object: "list", + data: [ + createModelEntry({ id: "claude-sonnet-4.6", owned_by: "Anthropic" }) + ] + }) + })) + }); + vi.spyOn(container.options, "resolveApiKey").mockResolvedValue("sk-openclaw"); + vi.spyOn(container.options, "resolveModel").mockResolvedValue( + "claude-sonnet-4.6" + ); + const resources = createExecutionResources( + container, + { dryRun: false, assumeYes: false, verbose: false }, + "configure:openclaw" + ); + const context = buildProviderContext( + container, + openClawProvider, + resources + ); + + const payload = await (openClawProvider as any).buildConfigurePayload({ + container, + flags: { dryRun: false, assumeYes: false, verbose: false }, + options: {}, + context, + logger: resources.logger + }); + + expect(payload.options.configPath).toBe( + `${homeDir}/.openclaw/openclaw.json` + ); + expect(payload.files).toEqual([`${homeDir}/.openclaw/openclaw.json`]); + }); + + it("fails with onboarding guidance when the OpenClaw config is invalid", async () => { + commandRunner = vi.fn(async (command: string, args: string[]) => { + if (command === "openclaw" && args[0] === "config" && args[1] === "file") { + return { + stdout: `${homeDir}/.openclaw/openclaw.json\n`, + stderr: "", + exitCode: 0 + }; + } + if ( + command === "openclaw" && + args[0] === "config" && + args[1] === "validate" && + args.includes("--json") + ) { + return { + stdout: JSON.stringify({ + valid: false, + path: `${homeDir}/.openclaw/openclaw.json` + }), + stderr: "", + exitCode: 0 + }; + } + return { stdout: "", stderr: "", exitCode: 0 }; + }); + + const container = createCliContainer({ + fs, + prompts: vi.fn().mockResolvedValue({}), + env: { cwd, homeDir }, + commandRunner, + httpClient: vi.fn() + }); + const resources = createExecutionResources( + container, + { dryRun: false, assumeYes: false, verbose: false }, + "configure:openclaw" + ); + const context = buildProviderContext( + container, + openClawProvider, + resources + ); + + await expect( + (openClawProvider as any).buildConfigurePayload({ + container, + flags: { dryRun: false, assumeYes: false, verbose: false }, + options: {}, + context, + logger: resources.logger + }) + ).rejects.toThrow( + "Run `openclaw onboard` or `openclaw doctor`." + ); + }); + + it("fails when the openclaw binary is not installed", async () => { + commandRunner = vi.fn(async () => ({ + stdout: "", + stderr: "", + exitCode: 1 + })); + const container = createCliContainer({ + fs, + prompts: vi.fn().mockResolvedValue({}), + env: { cwd, homeDir }, + commandRunner, + httpClient: vi.fn() + }); + const resources = createExecutionResources( + container, + { dryRun: false, assumeYes: false, verbose: false }, + "configure:openclaw" + ); + const context = buildProviderContext( + container, + openClawProvider, + resources + ); + + await expect( + (openClawProvider as any).buildConfigurePayload({ + container, + flags: { dryRun: false, assumeYes: false, verbose: false }, + options: {}, + context, + logger: resources.logger + }) + ).rejects.toThrow("openclaw CLI binary not found on PATH."); + }); + + it("rejects explicit models that are not live Poe model ids", async () => { + const container = createCliContainer({ + fs, + prompts: vi.fn().mockResolvedValue({}), + env: { cwd, homeDir }, + commandRunner, + httpClient: vi.fn(async () => ({ + ok: true, + status: 200, + json: async () => ({ + object: "list", + data: [ + createModelEntry({ id: "claude-sonnet-4.6", owned_by: "Anthropic" }) + ] + }) + })) + }); + vi.spyOn(container.options, "resolveApiKey").mockResolvedValue("sk-openclaw"); + const resolveModel = vi.spyOn(container.options, "resolveModel"); + const resources = createExecutionResources( + container, + { dryRun: false, assumeYes: false, verbose: false }, + "configure:openclaw" + ); + const context = buildProviderContext( + container, + openClawProvider, + resources + ); + + await expect( + (openClawProvider as any).buildConfigurePayload({ + container, + flags: { dryRun: false, assumeYes: false, verbose: false }, + options: { model: "missing-model" }, + context, + logger: resources.logger + }) + ).rejects.toThrow( + 'Unknown Poe model "missing-model" for OpenClaw.' + ); + expect(resolveModel).not.toHaveBeenCalled(); + }); + + it.each([ + [ + "omits the valid flag", + { + path: `${homeDir}/.openclaw/openclaw.json` + } + ], + [ + "returns a malformed valid flag", + { + valid: "yes", + path: `${homeDir}/.openclaw/openclaw.json` + } + ] + ])("fails closed when OpenClaw validation JSON %s", async (_label, payload) => { + commandRunner = vi.fn(async (command: string, args: string[]) => { + if (command === "openclaw" && args[0] === "config" && args[1] === "file") { + return { + stdout: `${homeDir}/.openclaw/openclaw.json\n`, + stderr: "", + exitCode: 0 + }; + } + if ( + command === "openclaw" && + args[0] === "config" && + args[1] === "validate" && + args.includes("--json") + ) { + return { + stdout: JSON.stringify(payload), + stderr: "", + exitCode: 0 + }; + } + return { stdout: "", stderr: "", exitCode: 0 }; + }); + const container = createCliContainer({ + fs, + prompts: vi.fn().mockResolvedValue({}), + env: { cwd, homeDir }, + commandRunner, + httpClient: vi.fn(async () => ({ + ok: true, + status: 200, + json: async () => ({ + object: "list", + data: [ + createModelEntry({ id: "claude-sonnet-4.6", owned_by: "Anthropic" }) + ] + }) + })) + }); + vi.spyOn(container.options, "resolveApiKey").mockResolvedValue("sk-openclaw"); + vi.spyOn(container.options, "resolveModel").mockResolvedValue( + "claude-sonnet-4.6" + ); + const resources = createExecutionResources( + container, + { dryRun: false, assumeYes: false, verbose: false }, + "configure:openclaw" + ); + const context = buildProviderContext( + container, + openClawProvider, + resources + ); + + await expect( + (openClawProvider as any).buildConfigurePayload({ + container, + flags: { dryRun: false, assumeYes: false, verbose: false }, + options: {}, + context, + logger: resources.logger + }) + ).rejects.toThrow( + `OpenClaw configuration is not valid at ${homeDir}/.openclaw/openclaw.json.` + ); + }); + + it("skips text-output models that do not accept text or image input", async () => { + const container = createCliContainer({ + fs, + prompts: vi.fn().mockResolvedValue({}), + env: { cwd, homeDir }, + commandRunner, + httpClient: vi.fn(async () => ({ + ok: true, + status: 200, + json: async () => ({ + object: "list", + data: [ + createModelEntry({ + id: "audio-transcriber", + owned_by: "OpenAI", + input_modalities: ["audio"], + output_modalities: ["text"] + }), + createModelEntry({ + id: "claude-sonnet-4.6", + owned_by: "Anthropic", + input_modalities: ["text"], + output_modalities: ["text"] + }) + ] + }) + })) + }); + vi.spyOn(container.options, "resolveApiKey").mockResolvedValue("sk-openclaw"); + const resolveModel = vi + .spyOn(container.options, "resolveModel") + .mockResolvedValue("claude-sonnet-4.6"); + const resources = createExecutionResources( + container, + { dryRun: false, assumeYes: false, verbose: false }, + "configure:openclaw" + ); + const context = buildProviderContext( + container, + openClawProvider, + resources + ); + + const payload = await (openClawProvider as any).buildConfigurePayload({ + container, + flags: { dryRun: false, assumeYes: false, verbose: false }, + options: {}, + context, + logger: resources.logger + }); + + expect(resolveModel).toHaveBeenCalledWith( + expect.objectContaining({ + choices: [{ title: "Claude Sonnet 4.6", value: "claude-sonnet-4.6" }] + }) + ); + expect(payload.options.providerConfig.models).toEqual([ + expect.objectContaining({ + id: "claude-sonnet-4.6", + input: ["text"] + }) + ]); + }); + + it("configures OpenClaw through its CLI", async () => { + await openClawProvider.configure({ + fs, + env: containerEnv(), + command: createCommandContext(fs, commandRunner), + options: { + dryRun: false, + model: "claude-sonnet-4.6", + providerConfig: { + baseUrl: "https://api.poe.com/v1", + apiKey: "sk-openclaw", + api: "openai-completions", + models: [ + { + id: "claude-sonnet-4.6", + name: "Claude Sonnet 4.6", + reasoning: false, + input: ["text"], + cost: { + input: 0.000001, + output: 0.000002, + cacheRead: 0.0000001, + cacheWrite: 0.0000002 + }, + contextWindow: 200000, + maxTokens: 8192 + } + ] + } + } + }); + + expect(commandRunner).toHaveBeenNthCalledWith( + 1, + "openclaw", + [ + "config", + "set", + "models.providers.poe", + expect.any(String), + "--strict-json" + ] + ); + const configValue = commandRunner.mock.calls[0]?.[1]?.[3]; + expect(JSON.parse(configValue)).toEqual({ + baseUrl: "https://api.poe.com/v1", + apiKey: "sk-openclaw", + api: "openai-completions", + models: [ + { + id: "claude-sonnet-4.6", + name: "Claude Sonnet 4.6", + reasoning: false, + input: ["text"], + cost: { + input: 0.000001, + output: 0.000002, + cacheRead: 0.0000001, + cacheWrite: 0.0000002 + }, + contextWindow: 200000, + maxTokens: 8192 + } + ] + }); + expect(commandRunner).toHaveBeenNthCalledWith(2, "openclaw", [ + "models", + "set", + "poe/claude-sonnet-4.6" + ]); + expect(commandRunner).toHaveBeenNthCalledWith(3, "openclaw", [ + "config", + "validate", + "--json" + ]); + }); + + it("fails configure when OpenClaw validation returns valid false after writes", async () => { + commandRunner = vi.fn(async (command: string, args: string[]) => { + if ( + command === "openclaw" && + args[0] === "config" && + args[1] === "validate" && + args.includes("--json") + ) { + return { + stdout: JSON.stringify({ + valid: false, + path: `${homeDir}/.openclaw/openclaw.json` + }), + stderr: "", + exitCode: 0 + }; + } + return { stdout: "", stderr: "", exitCode: 0 }; + }); + + await expect( + openClawProvider.configure({ + fs, + env: containerEnv(), + command: createCommandContext(fs, commandRunner), + options: { + dryRun: false, + model: "claude-sonnet-4.6", + providerConfig: { + baseUrl: "https://api.poe.com/v1", + apiKey: "sk-openclaw", + api: "openai-completions", + models: [] + }, + configPath: `${homeDir}/.openclaw/openclaw.json`, + apiKey: "sk-openclaw" + } + }) + ).rejects.toThrow("OpenClaw configuration became invalid."); + }); + + it("skips OpenClaw CLI mutations during configure dry run", async () => { + await openClawProvider.configure({ + fs, + env: containerEnv(), + command: { + fs, + runCommand: commandRunner, + runCommandWithEnv: commandRunner, + flushDryRun() {}, + complete() {}, + finalize() {} + }, + options: { + dryRun: true, + model: "claude-sonnet-4.6", + providerConfig: { + baseUrl: "https://api.poe.com/v1", + apiKey: "sk-openclaw", + api: "openai-completions", + models: [] + }, + configPath: `${homeDir}/.openclaw/openclaw.json`, + apiKey: "sk-openclaw" + } + }); + + expect(commandRunner).not.toHaveBeenCalled(); + }); + + it("unconfigures the Poe provider and only clears Poe primary models", async () => { + commandRunner = vi.fn(async (command: string, args: string[]) => { + if (command !== "openclaw") { + return { stdout: "", stderr: "", exitCode: 0 }; + } + if (args[0] === "config" && args[1] === "file") { + return { + stdout: `${homeDir}/.openclaw/openclaw.json\n`, + stderr: "", + exitCode: 0 + }; + } + if ( + args[0] === "config" && + args[1] === "get" && + args[2] === "models.providers.poe" + ) { + return { + stdout: JSON.stringify({ apiKey: "sk-openclaw" }), + stderr: "", + exitCode: 0 + }; + } + if ( + args[0] === "config" && + args[1] === "get" && + args[2] === "agents.defaults.model.primary" + ) { + return { + stdout: JSON.stringify("poe/claude-sonnet-4.6"), + stderr: "", + exitCode: 0 + }; + } + if ( + args[0] === "config" && + args[1] === "validate" && + args.includes("--json") + ) { + return { + stdout: JSON.stringify({ + valid: true, + path: `${homeDir}/.openclaw/openclaw.json` + }), + stderr: "", + exitCode: 0 + }; + } + return { stdout: "", stderr: "", exitCode: 0 }; + }); + + const changed = await openClawProvider.unconfigure({ + fs, + env: containerEnv(), + command: createCommandContext(fs, commandRunner), + options: { + dryRun: false + } + }); + + expect(changed).toBe(true); + expect(commandRunner).toHaveBeenNthCalledWith(1, "openclaw", [ + "config", + "file" + ]); + expect(commandRunner).toHaveBeenNthCalledWith(2, "openclaw", [ + "config", + "validate", + "--json" + ]); + expect(commandRunner).toHaveBeenNthCalledWith(3, "openclaw", [ + "config", + "get", + "models.providers.poe", + "--json" + ]); + expect(commandRunner).toHaveBeenNthCalledWith(4, "openclaw", [ + "config", + "get", + "agents.defaults.model.primary", + "--json" + ]); + expect(commandRunner).toHaveBeenNthCalledWith(5, "openclaw", [ + "config", + "unset", + "models.providers.poe" + ]); + expect(commandRunner).toHaveBeenNthCalledWith(6, "openclaw", [ + "config", + "unset", + "agents.defaults.model.primary" + ]); + expect(commandRunner).toHaveBeenNthCalledWith(7, "openclaw", [ + "config", + "validate", + "--json" + ]); + }); + + it("fails unconfigure when OpenClaw cannot read the Poe provider path", async () => { + commandRunner = vi.fn(async (command: string, args: string[]) => { + if (command !== "openclaw") { + return { stdout: "", stderr: "", exitCode: 0 }; + } + if (args[0] === "config" && args[1] === "file") { + return { + stdout: `${homeDir}/.openclaw/openclaw.json\n`, + stderr: "", + exitCode: 0 + }; + } + if ( + args[0] === "config" && + args[1] === "validate" && + args.includes("--json") + ) { + return { + stdout: JSON.stringify({ + valid: true, + path: `${homeDir}/.openclaw/openclaw.json` + }), + stderr: "", + exitCode: 0 + }; + } + if ( + args[0] === "config" && + args[1] === "get" && + args[2] === "models.providers.poe" + ) { + return { + stdout: "", + stderr: "permission denied", + exitCode: 2 + }; + } + return { stdout: "", stderr: "", exitCode: 0 }; + }); + + await expect( + openClawProvider.unconfigure({ + fs, + env: containerEnv(), + command: createCommandContext(fs, commandRunner), + options: { + dryRun: false + } + }) + ).rejects.toThrow( + "Failed to read OpenClaw config value at models.providers.poe." + ); + }); +}); + +function containerEnv() { + return { + cwd, + homeDir, + platform: "darwin" as const, + configPath: `${homeDir}/.poe-code/config.json`, + logDir: `${homeDir}/.poe-code/logs`, + poeApiBaseUrl: "https://api.poe.com/v1", + poeBaseUrl: "https://api.poe.com", + variables: {}, + resolveHomePath: (...segments: string[]) => [homeDir, ...segments].join("/"), + getVariable: () => undefined + }; +} diff --git a/src/providers/openclaw.ts b/src/providers/openclaw.ts new file mode 100644 index 00000000..cf304cd3 --- /dev/null +++ b/src/providers/openclaw.ts @@ -0,0 +1,716 @@ +import path from "node:path"; +import { + DEFAULT_FRONTIER_MODEL, + FRONTIER_MODELS, + stripModelNamespace +} from "../cli/constants.js"; +import { ApiError, ValidationError } from "../cli/errors.js"; +import type { + ProviderBuildConfigurePayloadInit, + ProviderConfigurePayload, + ProviderService, + ServiceExecutionContext +} from "../cli/service-registry.js"; +import { + createBinaryExistsCheck, + formatCommandRunnerResult, + type CommandRunnerResult +} from "../utils/command-checks.js"; + +const DEFAULT_CONTEXT_WINDOW = 200_000; +const DEFAULT_MAX_TOKENS = 4_096; +const MAX_SHORTLIST_MODELS = 8; +const OPENCLAW_BINARY = "openclaw"; +const OPENCLAW_PROVIDER_PATH = "models.providers.poe"; +const OPENCLAW_PRIMARY_MODEL_PATH = "agents.defaults.model.primary"; +const OPENCLAW_PROVIDER_API = "openai-completions"; + +interface PoeModelEntry { + id: string; + created: number; + owned_by: string; + metadata?: { + display_name?: string | null; + } | null; + architecture?: { + input_modalities?: string[] | null; + output_modalities?: string[] | null; + } | null; + pricing?: { + prompt?: number | null; + completion?: number | null; + input_cache_read?: number | null; + input_cache_write?: number | null; + } | null; + context_window?: { + context_length?: number | null; + max_output_tokens?: number | null; + } | null; + reasoning?: unknown; +} + +interface PoeModelsResponse { + object: string; + data: PoeModelEntry[]; +} + +interface OpenClawModelConfig { + id: string; + name: string; + reasoning: boolean; + input: string[]; + cost: { + input: number; + output: number; + cacheRead: number; + cacheWrite: number; + }; + contextWindow: number; + maxTokens: number; +} + +interface OpenClawProviderConfig { + baseUrl: string; + apiKey: string; + api: string; + models: OpenClawModelConfig[]; +} + +interface OpenClawConfigureOptions { + apiKey: string; + model: string; + providerConfig: OpenClawProviderConfig; + configPath: string; + dryRun: boolean; +} + +interface OpenClawUnconfigureOptions { + dryRun?: boolean; +} + +interface NormalizedPoeModel { + id: string; + name: string; + created: number; + ownedBy: string; + config: OpenClawModelConfig; +} + +interface OpenClawValidateResponse { + valid?: boolean; + path?: string; +} + +export const provider: ProviderService< + OpenClawConfigureOptions, + OpenClawUnconfigureOptions +> = { + id: "openclaw", + name: "openclaw", + label: "OpenClaw", + summary: "Configure OpenClaw to use Poe as a model provider.", + async buildConfigurePayload( + init + ): Promise> { + await init.context.runCheck( + createBinaryExistsCheck( + OPENCLAW_BINARY, + "openclaw-cli-binary", + "OpenClaw CLI binary must exist" + ) + ); + + const configPath = await resolveOpenClawConfigPath(init); + await validateOpenClawConfig(init, configPath); + + const apiKey = await init.container.options.resolveApiKey({ + value: readOptionString(init.options, "apiKey"), + envValue: init.container.env.getVariable("POE_API_KEY"), + dryRun: init.flags.dryRun, + assumeYes: init.flags.assumeYes + }); + const models = await fetchPoeModels(init, apiKey); + const providerModels = normalizePoeModels(models); + if (providerModels.length === 0) { + throw new ValidationError( + "Poe did not return any text models that OpenClaw can use." + ); + } + + const model = await resolveSelectedModel(init, providerModels); + const providerConfig = buildProviderConfig( + init.context.env.poeApiBaseUrl, + apiKey, + providerModels + ); + + return { + options: { + apiKey, + model, + providerConfig, + configPath, + dryRun: init.flags.dryRun + }, + files: [configPath] + }; + }, + async configure(context: ServiceExecutionContext) { + const { options } = context; + if (options.dryRun) { + return; + } + + await runOpenClawCommand( + context, + [ + "config", + "set", + OPENCLAW_PROVIDER_PATH, + JSON.stringify(options.providerConfig), + "--strict-json" + ], + "configure the Poe provider" + ); + await runOpenClawCommand( + context, + ["models", "set", `poe/${options.model}`], + "set the default OpenClaw model" + ); + const validationResult = await runOpenClawCommand( + context, + ["config", "validate", "--json"], + "validate the OpenClaw configuration" + ); + assertOpenClawValidationPassed( + validationResult, + "OpenClaw configuration became invalid." + ); + }, + async unconfigure( + context: ServiceExecutionContext + ) { + const configFileResult = await runOpenClawCommand( + context, + ["config", "file"], + "locate the OpenClaw config file" + ); + const configPath = normalizeOpenClawPath( + configFileResult.stdout, + context.env.homeDir, + context.env.cwd + ); + await validateExistingOpenClawConfig(context, configPath); + + const isDryRun = context.options?.dryRun === true; + const providerConfig = await readOpenClawConfigValue( + context, + OPENCLAW_PROVIDER_PATH + ); + const primaryModel = await readStringConfigValue( + context, + OPENCLAW_PRIMARY_MODEL_PATH + ); + const shouldClearPrimaryModel = primaryModel?.startsWith("poe/") === true; + const changed = providerConfig !== undefined || shouldClearPrimaryModel; + + if (isDryRun || !changed) { + return changed; + } + + if (providerConfig !== undefined) { + await runOpenClawCommand( + context, + ["config", "unset", OPENCLAW_PROVIDER_PATH], + "remove the Poe provider" + ); + } + + if (shouldClearPrimaryModel) { + await runOpenClawCommand( + context, + ["config", "unset", OPENCLAW_PRIMARY_MODEL_PATH], + "clear the Poe default model" + ); + } + + const validationResult = await runOpenClawCommand( + context, + ["config", "validate", "--json"], + "validate the OpenClaw configuration" + ); + assertOpenClawValidationPassed( + validationResult, + "OpenClaw configuration became invalid." + ); + return true; + } +}; + +async function resolveOpenClawConfigPath( + init: ProviderBuildConfigurePayloadInit +): Promise { + const result = await init.context.command.runCommand(OPENCLAW_BINARY, [ + "config", + "file" + ]); + if (result.exitCode !== 0) { + throw buildOpenClawConfigError( + "OpenClaw must already be configured on this machine. Run `openclaw onboard` or `openclaw doctor` first.", + result + ); + } + + const configPath = normalizeOpenClawPath( + result.stdout, + init.context.env.homeDir, + init.context.env.cwd + ); + if (configPath.length === 0) { + throw new ValidationError( + "OpenClaw did not report an active configuration file. Run `openclaw onboard` or `openclaw doctor` first." + ); + } + + return configPath; +} + +async function validateOpenClawConfig( + init: ProviderBuildConfigurePayloadInit, + configPath: string +): Promise { + await validateOpenClawConfigAtPath( + init.context.command.runCommand, + configPath, + `OpenClaw configuration is not valid at ${configPath}. Run \`openclaw onboard\` or \`openclaw doctor\`.` + ); +} + +async function fetchPoeModels( + init: ProviderBuildConfigurePayloadInit, + apiKey: string +): Promise { + const response = await init.container.httpClient( + `${init.context.env.poeApiBaseUrl}/models`, + { + method: "GET", + headers: { + Authorization: `Bearer ${apiKey}` + } + } + ); + if (!response.ok) { + throw new ApiError( + `Failed to fetch Poe models for OpenClaw (HTTP ${response.status})`, + { + httpStatus: response.status, + endpoint: "/models" + } + ); + } + + const payload = (await response.json()) as PoeModelsResponse; + return Array.isArray(payload.data) ? payload.data : []; +} + +function normalizePoeModels(models: PoeModelEntry[]): NormalizedPoeModel[] { + const byId = new Map(); + for (const model of models) { + if (!supportsTextOutput(model)) { + continue; + } + + const input = resolveInputModalities(model); + if (!input) { + continue; + } + + const name = resolveModelName(model); + const normalized: NormalizedPoeModel = { + id: model.id, + name, + created: model.created, + ownedBy: model.owned_by, + config: { + id: model.id, + name, + reasoning: Boolean(model.reasoning), + input, + cost: { + input: numberOrZero(model.pricing?.prompt), + output: numberOrZero(model.pricing?.completion), + cacheRead: numberOrZero(model.pricing?.input_cache_read), + cacheWrite: numberOrZero(model.pricing?.input_cache_write) + }, + contextWindow: numberOrDefault( + model.context_window?.context_length, + DEFAULT_CONTEXT_WINDOW + ), + maxTokens: numberOrDefault( + model.context_window?.max_output_tokens, + DEFAULT_MAX_TOKENS + ) + } + }; + byId.set(normalized.id, normalized); + } + + return Array.from(byId.values()).sort((left, right) => + left.id.localeCompare(right.id) + ); +} + +async function resolveSelectedModel( + init: ProviderBuildConfigurePayloadInit, + models: NormalizedPoeModel[] +): Promise { + const explicitModel = readOptionString(init.options, "model"); + const available = new Map(); + for (const model of models) { + available.set(model.id, model); + } + + if (explicitModel) { + if (!available.has(explicitModel)) { + throw new ValidationError( + `Unknown Poe model "${explicitModel}" for OpenClaw. Use an exact model id from https://api.poe.com/v1/models.` + ); + } + init.logger.resolved("OpenClaw default model", explicitModel); + return explicitModel; + } + + const shortlist = buildModelShortlist(models); + const defaultModel = resolveDefaultModel(shortlist, models); + const selected = await init.container.options.resolveModel({ + assumeDefault: init.flags.assumeYes, + defaultValue: defaultModel, + choices: shortlist.map((model) => ({ + title: model.name, + value: model.id + })), + label: "OpenClaw default model", + onResolve: (label, value) => init.logger.resolved(label, value) + }); + return selected; +} + +function buildModelShortlist(models: NormalizedPoeModel[]): NormalizedPoeModel[] { + const byId = new Map(); + for (const model of models) { + byId.set(model.id, model); + } + + const selected = new Set(); + const shortlist: NormalizedPoeModel[] = []; + for (const modelId of FRONTIER_MODELS) { + const stripped = stripModelNamespace(modelId); + const model = byId.get(stripped); + if (!model || selected.has(model.id)) { + continue; + } + shortlist.push(model); + selected.add(model.id); + } + + const newestFrontier = models + .filter((model) => !selected.has(model.id) && isFrontierOwner(model.ownedBy)) + .sort((left, right) => right.created - left.created); + for (const model of newestFrontier) { + if (shortlist.length >= MAX_SHORTLIST_MODELS) { + break; + } + shortlist.push(model); + selected.add(model.id); + } + + if (shortlist.length >= MAX_SHORTLIST_MODELS) { + return shortlist; + } + + const newestRemaining = models + .filter((model) => !selected.has(model.id)) + .sort((left, right) => right.created - left.created); + for (const model of newestRemaining) { + if (shortlist.length >= MAX_SHORTLIST_MODELS) { + break; + } + shortlist.push(model); + } + + return shortlist; +} + +function resolveDefaultModel( + shortlist: NormalizedPoeModel[], + models: NormalizedPoeModel[] +): string { + const defaultId = stripModelNamespace(DEFAULT_FRONTIER_MODEL); + const shortlistDefault = shortlist.find((model) => model.id === defaultId); + if (shortlistDefault) { + return shortlistDefault.id; + } + if (shortlist.length > 0) { + return shortlist[0]!.id; + } + if (models.length > 0) { + return models[0]!.id; + } + throw new ValidationError( + "Poe did not return any models that OpenClaw can configure." + ); +} + +function buildProviderConfig( + baseUrl: string, + apiKey: string, + models: NormalizedPoeModel[] +): OpenClawProviderConfig { + return { + baseUrl, + apiKey, + api: OPENCLAW_PROVIDER_API, + models: models.map((model) => model.config) + }; +} + +function supportsTextOutput(model: PoeModelEntry): boolean { + return (model.architecture?.output_modalities ?? []).includes("text"); +} + +function resolveInputModalities(model: PoeModelEntry): string[] | undefined { + const declared = model.architecture?.input_modalities; + if (!Array.isArray(declared) || declared.length === 0) { + return ["text"]; + } + + const resolved: string[] = []; + for (const modality of declared) { + if (modality !== "text" && modality !== "image") { + continue; + } + if (!resolved.includes(modality)) { + resolved.push(modality); + } + } + if (resolved.length > 0) { + return resolved; + } + return undefined; +} + +function resolveModelName(model: PoeModelEntry): string { + const displayName = model.metadata?.display_name; + if (typeof displayName === "string" && displayName.trim().length > 0) { + const trimmed = displayName.trim(); + if (trimmed.toLowerCase() !== model.id.toLowerCase()) { + return trimmed; + } + } + + return humanizeModelId(model.id); +} + +function humanizeModelId(modelId: string): string { + const tokens = modelId.split("-"); + if (tokens.length === 0) { + return modelId; + } + + if (tokens[0] === "gpt") { + if (tokens.length === 1) { + return "GPT"; + } + const version = formatModelToken(tokens[1]!); + if (tokens.length === 2) { + return `GPT-${version}`; + } + const suffix = tokens.slice(2).map(formatModelToken).join(" "); + return suffix.length > 0 + ? `GPT-${version} ${suffix}` + : `GPT-${version}`; + } + + return tokens.map(formatModelToken).join(" "); +} + +function formatModelToken(token: string): string { + if (token === "gpt") { + return "GPT"; + } + if (token.length === 0) { + return token; + } + return `${token[0]!.toUpperCase()}${token.slice(1)}`; +} + +function isFrontierOwner(owner: string): boolean { + const normalized = owner.toLowerCase(); + return normalized === "anthropic" || + normalized === "openai" || + normalized === "google"; +} + +function numberOrZero(value: number | null | undefined): number { + return typeof value === "number" ? value : 0; +} + +function numberOrDefault( + value: number | null | undefined, + fallback: number +): number { + return typeof value === "number" ? value : fallback; +} + +async function runOpenClawCommand( + context: ServiceExecutionContext< + OpenClawConfigureOptions | OpenClawUnconfigureOptions + >, + args: string[], + description: string +): Promise { + const result = await context.command.runCommand(OPENCLAW_BINARY, args); + if (result.exitCode === 0) { + return result; + } + + throw new ValidationError( + `Failed to ${description}.\n${formatCommandRunnerResult(result)}` + ); +} + +async function readOpenClawConfigValue( + context: ServiceExecutionContext, + path: string +): Promise { + const result = await context.command.runCommand(OPENCLAW_BINARY, [ + "config", + "get", + path, + "--json" + ]); + if (result.exitCode !== 0) { + if (configPathWasNotFound(result, path)) { + return undefined; + } + throw new ValidationError( + `Failed to read OpenClaw config value at ${path}.\n${formatCommandRunnerResult(result)}` + ); + } + return parseJsonOutput(result, `OpenClaw config value for ${path}`); +} + +async function readStringConfigValue( + context: ServiceExecutionContext, + path: string +): Promise { + const value = await readOpenClawConfigValue(context, path); + return typeof value === "string" ? value : undefined; +} + +function parseJsonOutput( + result: CommandRunnerResult, + label: string +): T { + const output = result.stdout.trim(); + if (output.length === 0) { + throw new ValidationError(`${label} returned empty output.`); + } + + try { + return JSON.parse(output) as T; + } catch { + throw new ValidationError(`${label} returned invalid JSON.`); + } +} + +async function validateExistingOpenClawConfig( + context: ServiceExecutionContext, + configPath: string +): Promise { + await validateOpenClawConfigAtPath( + context.command.runCommand, + configPath, + `OpenClaw configuration is not valid at ${configPath}. Run \`openclaw onboard\` or \`openclaw doctor\`.` + ); +} + +async function validateOpenClawConfigAtPath( + runCommand: ServiceExecutionContext< + OpenClawConfigureOptions | OpenClawUnconfigureOptions + >["command"]["runCommand"], + configPath: string, + message: string +): Promise { + const result = await runCommand(OPENCLAW_BINARY, [ + "config", + "validate", + "--json" + ]); + if (result.exitCode !== 0) { + throw buildOpenClawConfigError(message, result); + } + assertOpenClawValidationPassed(result, message); +} + +function assertOpenClawValidationPassed( + result: CommandRunnerResult, + message: string +): void { + const payload = parseJsonOutput( + result, + "OpenClaw config validation" + ); + if (payload.valid !== true) { + throw new ValidationError(message); + } +} + +function normalizeOpenClawPath( + input: string, + homeDir: string, + cwd: string +): string { + const trimmed = input.trim(); + if (trimmed.length === 0) { + return ""; + } + if (trimmed === "~") { + return homeDir; + } + if (trimmed.startsWith("~/") || trimmed.startsWith(`~${path.sep}`)) { + return path.join(homeDir, trimmed.slice(2)); + } + if (trimmed.startsWith("~./") || trimmed.startsWith(`~.${path.sep}`)) { + return path.join(homeDir, `.${trimmed.slice(3)}`); + } + if (path.isAbsolute(trimmed)) { + return path.normalize(trimmed); + } + return path.resolve(cwd, trimmed); +} + +function configPathWasNotFound( + result: CommandRunnerResult, + path: string +): boolean { + const combined = `${result.stdout}\n${result.stderr}`; + return combined.includes(`Config path not found: ${path}`); +} + +function buildOpenClawConfigError( + message: string, + result: CommandRunnerResult +): ValidationError { + return new ValidationError( + `${message}\n${formatCommandRunnerResult(result)}` + ); +} + +function readOptionString(options: unknown, key: string): string | undefined { + if (!options || typeof options !== "object" || Array.isArray(options)) { + return undefined; + } + const value = (options as Record)[key]; + return typeof value === "string" && value.length > 0 ? value : undefined; +}