diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index fdece63093..8b11ccdfff 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -15,7 +15,7 @@ jobs: uses: actions/checkout@v6 - name: Run markdownlint-cli2 - uses: DavidAnson/markdownlint-cli2-action@ce4853d43830c74c1753b39f3cf40f71c2031eb9 # v23 + uses: DavidAnson/markdownlint-cli2-action@6b51ade7a9e4a75a7ad929842dd298a3804ebe8b # v23 with: globs: | '**/*.md' diff --git a/AGENTS.md b/AGENTS.md index 7adfd1d12e..d711b4214d 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -20,23 +20,17 @@ src/specify_cli/integrations/ ├── base.py # IntegrationBase, MarkdownIntegration, TomlIntegration, YamlIntegration, SkillsIntegration ├── manifest.py # IntegrationManifest (file tracking) ├── claude/ # Example: SkillsIntegration subclass -│ ├── __init__.py # ClaudeIntegration class -│ └── scripts/ # Thin wrapper scripts -│ ├── update-context.sh -│ └── update-context.ps1 +│ └── __init__.py # ClaudeIntegration class ├── gemini/ # Example: TomlIntegration subclass -│ ├── __init__.py -│ └── scripts/ +│ └── __init__.py ├── windsurf/ # Example: MarkdownIntegration subclass -│ ├── __init__.py -│ └── scripts/ +│ └── __init__.py ├── copilot/ # Example: IntegrationBase subclass (custom setup) -│ ├── __init__.py -│ └── scripts/ +│ └── __init__.py └── ... # One subpackage per supported agent ``` -The registry is the **single source of truth for Python integration metadata**. Supported agents, their directories, formats, and capabilities are derived from the integration classes for the Python integration layer. However, context-update behavior still requires explicit cases in the shared dispatcher scripts (`scripts/bash/update-agent-context.sh` and `scripts/powershell/update-agent-context.ps1`), which currently maintain their own supported-agent lists and agent-key→context-file mappings until they are migrated to registry-based dispatch. +The registry is the **single source of truth for Python integration metadata**. Supported agents, their directories, formats, capabilities, and context files are derived from the integration classes for the Python integration layer. --- @@ -179,63 +173,11 @@ def _register_builtins() -> None: # ... ``` -### 4. Add scripts +### 4. Context file behavior -Create two thin wrapper scripts in `src/specify_cli/integrations//scripts/` that delegate to the shared context-update scripts. Each is ~25 lines of boilerplate. +Set `context_file` on the integration class. The base integration setup creates or updates the managed Spec Kit section in that file, and uninstall removes the managed section when appropriate. -> **Note on `` vs ``:** `` is the Python-safe directory name for your integration — it matches `` exactly when the key contains no hyphens (e.g., key `"gemini"` → `gemini/`), but uses underscores when it does (e.g., key `"kiro-cli"` → `kiro_cli/`). The `IntegrationBase.key` class attribute always retains the original hyphenated value (e.g., `key = "kiro-cli"`), since that is what the CLI and registry use. - -**`update-context.sh`:** - -```bash -#!/usr/bin/env bash -# update-context.sh — integration: create/update -set -euo pipefail - -_script_dir="$(cd "$(dirname "$0")" && pwd)" -_root="$_script_dir" -while [ "$_root" != "/" ] && [ ! -d "$_root/.specify" ]; do _root="$(dirname "$_root")"; done -if [ -z "${REPO_ROOT:-}" ]; then - if [ -d "$_root/.specify" ]; then - REPO_ROOT="$_root" - else - git_root="$(git rev-parse --show-toplevel 2>/dev/null || true)" - if [ -n "$git_root" ] && [ -d "$git_root/.specify" ]; then - REPO_ROOT="$git_root" - else - REPO_ROOT="$_root" - fi - fi -fi - -exec "$REPO_ROOT/.specify/scripts/bash/update-agent-context.sh" -``` - -**`update-context.ps1`:** - -```powershell -# update-context.ps1 — integration: create/update -$ErrorActionPreference = 'Stop' - -$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Definition -$repoRoot = try { git rev-parse --show-toplevel 2>$null } catch { $null } -if (-not $repoRoot -or -not (Test-Path (Join-Path $repoRoot '.specify'))) { - $repoRoot = $scriptDir - $fsRoot = [System.IO.Path]::GetPathRoot($repoRoot) - while ($repoRoot -and $repoRoot -ne $fsRoot -and -not (Test-Path (Join-Path $repoRoot '.specify'))) { - $repoRoot = Split-Path -Parent $repoRoot - } -} - -& "$repoRoot/.specify/scripts/powershell/update-agent-context.ps1" -AgentType -``` - -Replace `` with your integration key and `` / `` with the appropriate values. - -You must also add the agent to the shared context-update scripts so the shared dispatcher recognises the new key: - -- **`scripts/bash/update-agent-context.sh`** — add a file-path variable and a case in `update_specific_agent()`. -- **`scripts/powershell/update-agent-context.ps1`** — add a file-path variable, add the new key to the `AgentType` parameter's `[ValidateSet(...)]`, add a switch case in `Update-SpecificAgent`, and add an entry in `Update-AllExistingAgents`. +Only add custom setup logic when the agent needs non-standard behavior. Most integrations do not need wrapper scripts or separate context-update dispatch code. ### 5. Test it @@ -422,7 +364,6 @@ Implementation: Extends `MarkdownIntegration` with custom `setup()` method that: 3. Applies Forge-specific transformations via `_apply_forge_transformations()` 4. Strips `handoffs` frontmatter key 5. Injects missing `name` fields -6. Ensures the shared `update-agent-context.*` scripts include a `forge` case that maps context updates to `AGENTS.md` and lists `forge` in their usage/help text ### Goose Integration @@ -436,7 +377,7 @@ Implementation: Extends `YamlIntegration` (parallel to `TomlIntegration`): 2. Extracts title and description from frontmatter 3. Renders output as Goose recipe YAML (version, title, description, author, extensions, activities, prompt) 4. Uses `yaml.safe_dump()` for header fields to ensure proper escaping -5. Context updates map to `AGENTS.md` (shared with opencode/codex/pi/forge) +5. Sets `context_file = "AGENTS.md"` so the base setup manages the Spec Kit context section there ## Common Pitfalls diff --git a/CHANGELOG.md b/CHANGELOG.md index 52d2c87cb7..48db19ddf4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,21 @@ +## [0.8.4] - 2026-05-01 + +### Changed + +- fix(specify): correct self-referencing step number in validation flow (#2152) +- chore(deps): bump DavidAnson/markdownlint-cli2-action (#2425) +- Add security-governance to community catalog (#2386) +- Add cross-platform-governance to community catalog (#2384) +- Add architecture-governance to community catalog (#2383) +- Add a11y-governance to community catalog (#2381) +- feat(extensions): add Spec2Cloud extension for Azure deployment workflow (#2412) +- fix: migrate extension commands on integration switch (#2404) +- feat: add Squad Bridge extension to community catalog (#2417) +- chore: release 0.8.3, begin 0.8.4.dev0 development (#2418) + ## [0.8.3] - 2026-04-29 ### Changed diff --git a/EOF b/EOF new file mode 100644 index 0000000000..e69de29bb2 diff --git a/README.md b/README.md index b52e3822b4..102c55ecb0 100644 --- a/README.md +++ b/README.md @@ -263,6 +263,7 @@ The following community-contributed extensions are available in [`catalog.commun | Spec Scope | Effort estimation and scope tracking — estimate work, detect creep, and budget time per phase | `process` | Read-only | [spec-kit-scope-](https://github.com/Quratulain-bilal/spec-kit-scope-) | | Spec Sync | Detect and resolve drift between specs and implementation. AI-assisted resolution with human approval | `docs` | Read+Write | [spec-kit-sync](https://github.com/bgervin/spec-kit-sync) | | Spec Validate | Comprehension validation, review gating, and approval state for spec-kit artifacts — staged quizzes, peer review SLA, and a hard gate before /speckit.implement | `process` | Read+Write | [spec-kit-spec-validate](https://github.com/aeltayeb/spec-kit-spec-validate) | +| Spec2Cloud | Spec-driven workflow tuned for shipping to Azure | `process` | Read+Write | [spec2cloud](https://github.com/Azure-Samples/Spec2Cloud) | | SpecTest | Auto-generate test scaffolds from spec criteria, map coverage, and find untested requirements | `code` | Read+Write | [spec-kit-spectest](https://github.com/Quratulain-bilal/spec-kit-spectest) | | Squad Bridge | Bootstrap and synchronize a Squad agent team from your Speckit spec and tasks | `process` | Read+Write | [spec-kit-squad](https://github.com/jwill824/spec-kit-squad) | | Staff Review Extension | Staff-engineer-level code review that validates implementation against spec, checks security, performance, and test coverage | `code` | Read-only | [spec-kit-staff-review](https://github.com/arunt14/spec-kit-staff-review) | @@ -270,6 +271,7 @@ The following community-contributed extensions are available in [`catalog.commun | Superpowers Bridge | Orchestrates obra/superpowers skills within the spec-kit SDD workflow across the full lifecycle (clarification, TDD, review, verification, critique, debugging, branch completion) | `process` | Read+Write | [superpowers-bridge](https://github.com/RbBtSn0w/spec-kit-extensions/tree/main/superpowers-bridge) | | Superpowers Bridge (WangX0111) | Bridges spec-kit with obra/superpowers (brainstorming, TDD, subagent, code-review) into a unified, resumable workflow with graceful degradation and session progress tracking | `process` | Read+Write | [superspec](https://github.com/WangX0111/superspec) | | TinySpec | Lightweight single-file workflow for small tasks — skip the heavy multi-step SDD process | `process` | Read+Write | [spec-kit-tinyspec](https://github.com/Quratulain-bilal/spec-kit-tinyspec) | +| Token Consumption Analyzer | Captures, analyzes, and compares token consumption across SDD workflows | `visibility` | Read-only | [spec-kit-token-analyzer](https://github.com/coderandhiker/spec-kit-token-analyzer) | | V-Model Extension Pack | Enforces V-Model paired generation of development specs and test specs with full traceability | `docs` | Read+Write | [spec-kit-v-model](https://github.com/leocamello/spec-kit-v-model) | | Verify Extension | Post-implementation quality gate that validates implemented code against specification artifacts | `code` | Read-only | [spec-kit-verify](https://github.com/ismaelJimenez/spec-kit-verify) | | Verify Tasks Extension | Detect phantom completions: tasks marked [X] in tasks.md with no real implementation | `code` | Read-only | [spec-kit-verify-tasks](https://github.com/datastone-inc/spec-kit-verify-tasks) | diff --git a/docs/community/presets.md b/docs/community/presets.md index 0bb7a7ab42..15f2b7c9ff 100644 --- a/docs/community/presets.md +++ b/docs/community/presets.md @@ -7,9 +7,12 @@ The following community-contributed presets customize how Spec Kit behaves — o | Preset | Purpose | Provides | Requires | URL | |--------|---------|----------|----------|-----| +| A11Y Governance | Adds WCAG 2.2 AA accessibility checks, bilingual DE/EN delivery, CEFR-B2 readability, CLI accessibility, and inclusive-content guidance | 9 templates, 3 commands | — | [spec-kit-preset-a11y-governance](https://github.com/hindermath/spec-kit-preset-a11y-governance) | | AIDE In-Place Migration | Adapts the AIDE extension workflow for in-place technology migrations (X → Y pattern) — adds migration objectives, verification gates, knowledge documents, and behavioral equivalence criteria | 2 templates, 8 commands | AIDE extension | [spec-kit-presets](https://github.com/mnriem/spec-kit-presets) | +| Architecture Governance | Adds secure architecture governance: trust boundaries, threat modeling, STRIDE/CAPEC, S-ADRs, Zero Trust applicability, and OWASP SAMM | 11 templates, 3 commands | — | [spec-kit-preset-architecture-governance](https://github.com/hindermath/spec-kit-preset-architecture-governance) | | Canon Core | Adapts original Spec Kit workflow to work together with Canon extension | 2 templates, 8 commands | — | [spec-kit-canon](https://github.com/maximiliamus/spec-kit-canon) | | Claude AskUserQuestion | Upgrades `/speckit.clarify` and `/speckit.checklist` on Claude Code from Markdown-table prompts to the native AskUserQuestion picker, with a recommended option and reasoning on every question | 2 commands | — | [spec-kit-preset-claude-ask-questions](https://github.com/0xrafasec/spec-kit-preset-claude-ask-questions) | +| Cross-Platform Governance | Adds Bash/PowerShell parity, dry-run/WhatIf parity, Unix man-page expectations, PowerShell comment-based help, and Verb-Noun Cmdlet discipline | 8 templates, 3 commands | — | [spec-kit-preset-cross-platform-governance](https://github.com/hindermath/spec-kit-preset-cross-platform-governance) | | Explicit Task Dependencies | Adds explicit `(depends on T###)` dependency declarations and an Execution Wave DAG to tasks.md for parallel scheduling | 1 template, 1 command | — | [spec-kit-preset-explicit-task-dependencies](https://github.com/Quratulain-bilal/spec-kit-preset-explicit-task-dependencies) | | Fiction Book Writing | It adapts the Spec-Driven Development workflow for storytelling to create books or audiobooks (with annotations) in 12 languages: features become story elements, specs become story briefs, plans become story structures, and tasks become scene-by-scene writing tasks. Supports single and multi-POV, all major plot structure frameworks, and two style modes: an author voice sample or humanized AI prose. Supports interactive elements like brainstorming, interview, roleplay and extras like statistics, cover builder and bio command. Export with templates for KDP, D2D etc. | 22 templates, 27 commands, 2 scripts | — | [speckit-preset-fiction-book-writing](https://github.com/adaumann/speckit-preset-fiction-book-writing) | | iSAQB Architecture Governance | Adds general iSAQB/CPSA-F and arc42 architecture governance: goals, context, building blocks, runtime and deployment views, quality scenarios, ADRs, risks, and technical debt | 13 templates, 3 commands | — | [spec-kit-preset-isaqb-architecture-governance](https://github.com/hindermath/spec-kit-preset-isaqb-architecture-governance) | @@ -17,6 +20,7 @@ The following community-contributed presets customize how Spec Kit behaves — o | Multi-Repo Branching | Coordinates feature branch creation across multiple git repositories (independent repos and submodules) during plan and tasks phases | 2 commands | — | [spec-kit-preset-multi-repo-branching](https://github.com/sakitA/spec-kit-preset-multi-repo-branching) | | Pirate Speak (Full) | Transforms all Spec Kit output into pirate speak — specs become "Voyage Manifests", plans become "Battle Plans", tasks become "Crew Assignments" | 6 templates, 9 commands | — | [spec-kit-presets](https://github.com/mnriem/spec-kit-presets) | | Screenwriting | Spec-Driven Development for screenwriting/scriptwriting/tutorials: feature films, television (pilot, episode, limited series), and stage plays. Adapts the Spec Kit workflow to screenplay craft — slug lines, action lines, act breaks, beat sheets, and industry-standard pitch documents. Supports three-act, Save the Cat, TV pilot, network episode, cable/streaming episode, and stage-play structural frameworks. Export to Fountain, FTX, PDF | 26 templates, 32 commands, 1 script | — | [speckit-preset-screenwriting](https://github.com/adaumann/speckit-preset-screenwriting) | +| Security Governance | Adds secure development governance: memory-safe-language preference, secure code generation, NIST SSDF, CWE Top 25, OWASP ASVS, SBOM/VEX/SLSA, OpenSSF Scorecard, and EU CRA applicability | 12 templates, 3 commands | — | [spec-kit-preset-security-governance](https://github.com/hindermath/spec-kit-preset-security-governance) | | Table of Contents Navigation | Adds a navigable Table of Contents to generated spec.md, plan.md, and tasks.md documents | 3 templates, 3 commands | — | [spec-kit-preset-toc-navigation](https://github.com/Quratulain-bilal/spec-kit-preset-toc-navigation) | | VS Code Ask Questions | Enhances the clarify command to use `vscode/askQuestions` for batched interactive questioning. | 1 command | — | [spec-kit-presets](https://github.com/fdcastel/spec-kit-presets) | diff --git a/docs/reference/core.md b/docs/reference/core.md index fdab05a02b..aeef06ab79 100644 --- a/docs/reference/core.md +++ b/docs/reference/core.md @@ -22,6 +22,10 @@ specify init [] Creates a new Spec Kit project with the necessary directory structure, templates, scripts, and AI coding agent integration files. +> [!NOTE] +> The git extension is currently enabled by default during `specify init`. +> Starting in `v0.10.0`, it will require explicit opt-in. To add it after init, run `specify extension add git`. + Use `` to create a new directory, or `--here` (or `.`) to initialize in the current directory. If the directory already has files, use `--force` to merge without confirmation. ### Examples diff --git a/docs/reference/integrations.md b/docs/reference/integrations.md index e1b8e60a8b..d3f9fc6282 100644 --- a/docs/reference/integrations.md +++ b/docs/reference/integrations.md @@ -43,6 +43,8 @@ specify integration list ``` Shows all available integrations, which one is currently installed, and whether each requires a CLI tool or is IDE-based. +When multiple integrations are installed, the list marks the default integration separately from the other installed integrations. +The list also shows whether each built-in integration is declared multi-install safe. ## Install an Integration @@ -53,9 +55,12 @@ specify integration install | Option | Description | | ------------------------ | ------------------------------------------------------------------------ | | `--script sh\|ps` | Script type: `sh` (bash/zsh) or `ps` (PowerShell) | +| `--force` | Opt in to installing alongside integrations that are not declared multi-install safe | | `--integration-options` | Integration-specific options (e.g. `--integration-options="--commands-dir .myagent/cmds"`) | -Installs the specified integration into the current project. Fails if another integration is already installed — use `switch` instead. If the installation fails partway through, it automatically rolls back to a clean state. +Installs the specified integration into the current project. If another integration is already installed, the command only proceeds automatically when all involved integrations are declared multi-install safe. Otherwise, use `switch` to replace the default integration or pass `--force` to explicitly opt in to multi-install. If the installation fails partway through, it automatically rolls back to a clean state. + +Installing an additional integration does not change the default integration. Use `specify integration use ` to change the default. > **Note:** All integration management commands require a project already initialized with `specify init`. To start a new project with a specific agent, use `specify init --integration ` instead. @@ -84,10 +89,22 @@ specify integration switch | Option | Description | | ------------------------ | ------------------------------------------------------------------------ | | `--script sh\|ps` | Script type: `sh` (bash/zsh) or `ps` (PowerShell) | -| `--force` | Force removal of modified files during uninstall | -| `--integration-options` | Options for the target integration | +| `--force` | Force removal of modified files during uninstall; when the target is already installed, overwrite managed shared templates while changing the default | +| `--integration-options` | Options for the target integration when it is not already installed | + +If the target integration is not already installed, equivalent to running `uninstall` followed by `install` in a single step. In this mode, `--force` controls whether modified files from the removed integration are deleted. If the target integration is already installed, `switch` only changes the default integration, like `use`; in this mode, `--force` controls whether managed shared templates are overwritten while the default changes. `--integration-options` is rejected for already-installed targets because changing integration options requires reinstalling managed files; run `upgrade --integration-options ...` first, then `use `. + +## Use an Installed Integration + +```bash +specify integration use +``` -Equivalent to running `uninstall` followed by `install` in a single step. +| Option | Description | +| --------- | --------------------------------------------------- | +| `--force` | Overwrite managed shared templates while changing the default | + +Sets the default integration without uninstalling any other installed integrations. This also refreshes managed shared templates so command references match the new default integration's invocation style. Modified or untracked shared templates are preserved unless `--force` is used. ## Upgrade an Integration @@ -101,7 +118,7 @@ specify integration upgrade [] | `--script sh\|ps` | Script type: `sh` (bash/zsh) or `ps` (PowerShell) | | `--integration-options` | Options for the integration | -Reinstalls the current integration with updated templates and commands (e.g., after upgrading Spec Kit). Defaults to the currently installed integration; if a key is provided, it must match the installed one — otherwise the command fails and suggests using `switch` instead. Detects locally modified files and blocks the upgrade unless `--force` is used. Stale files from the previous install that are no longer needed are removed automatically. +Reinstalls an installed integration with updated templates and commands (e.g., after upgrading Spec Kit). Defaults to the default integration; if a key is provided, it must be one of the installed integrations. Detects locally modified files and blocks the upgrade unless `--force` is used. Stale files from the previous install that are no longer needed are removed automatically. Shared templates stay aligned with the default integration even when upgrading a non-default integration. ## Integration-Specific Options @@ -120,9 +137,39 @@ specify integration install generic --integration-options="--commands-dir .myage ## FAQ -### Can I use multiple integrations at the same time? +### Can I install multiple integrations in the same project? + +Yes, but it is intended for team portability rather than the default workflow. Multiple integrations are allowed automatically only when the installed integration and the new integration are declared multi-install safe by Spec Kit. For other combinations, pass `--force` to acknowledge that multiple agents may see unrelated agent-specific instructions or commands. + +Spec Kit tracks one default integration in `.specify/integration.json` with `default_integration`, all installed integrations with `installed_integrations`, per-integration runtime settings with `integration_settings`, and a dedicated `integration_state_schema` for future state migrations. The legacy `integration` field remains as an alias for the default integration. + +### Which integrations are multi-install safe? + +An integration is multi-install safe when it uses isolated agent directories, a dedicated context file that does not collide with another safe integration, stable command invocation settings, and a separate install manifest. Shared Spec Kit templates remain aligned to the single default integration. + +The currently declared multi-install safe integrations are: + +| Key | Isolation | +| --- | --------- | +| `auggie` | `.augment/commands`, `.augment/rules/specify-rules.md` | +| `claude` | `.claude/skills`, `CLAUDE.md` | +| `codebuddy` | `.codebuddy/commands`, `CODEBUDDY.md` | +| `codex` | `.agents/skills`, `AGENTS.md` | +| `cursor-agent` | `.cursor/skills`, `.cursor/rules/specify-rules.mdc` | +| `gemini` | `.gemini/commands`, `GEMINI.md` | +| `iflow` | `.iflow/commands`, `IFLOW.md` | +| `junie` | `.junie/commands`, `.junie/AGENTS.md` | +| `kilocode` | `.kilocode/workflows`, `.kilocode/rules/specify-rules.md` | +| `kimi` | `.kimi/skills`, `KIMI.md` | +| `qodercli` | `.qoder/commands`, `QODER.md` | +| `qwen` | `.qwen/commands`, `QWEN.md` | +| `roo` | `.roo/commands`, `.roo/rules/specify-rules.md` | +| `shai` | `.shai/commands`, `SHAI.md` | +| `tabnine` | `.tabnine/agent/commands`, `TABNINE.md` | +| `trae` | `.trae/skills`, `.trae/rules/project_rules.md` | +| `windsurf` | `.windsurf/workflows`, `.windsurf/rules/specify-rules.md` | -No. Only one AI coding agent integration can be installed per project. Use `specify integration switch ` to change to a different AI coding agent. +Integrations that share a context file or command directory with another integration, require dynamic install paths such as `--commands-dir`, or merge shared tool settings are not declared safe by default. They can still be installed alongside another integration with `--force`. ### What happens to my changes when I uninstall or switch? @@ -138,4 +185,4 @@ CLI-based integrations (like Claude Code, Gemini CLI) require the tool to be ins ### When should I use `upgrade` vs `switch`? -Use `upgrade` when you've upgraded Spec Kit and want to refresh the same integration's templates. Use `switch` when you want to change to a different AI coding agent. +Use `upgrade` when you've upgraded Spec Kit and want to refresh an installed integration's managed files. Use `switch` when you want to replace the current default with another integration; if the target is already installed, `switch` behaves like `use`. diff --git a/extensions/catalog.community.json b/extensions/catalog.community.json index e336b95107..a952e8b9cc 100644 --- a/extensions/catalog.community.json +++ b/extensions/catalog.community.json @@ -1,6 +1,6 @@ { "schema_version": "1.0", - "updated_at": "2026-04-29T00:00:00Z", + "updated_at": "2026-05-01T15:01:47Z", "catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json", "extensions": { "aide": { @@ -1280,8 +1280,8 @@ "id": "memory-md", "description": "Repository-native durable memory for Spec Kit projects", "author": "DyanGalih", - "version": "0.6.2", - "download_url": "https://github.com/DyanGalih/spec-kit-memory-hub/archive/refs/tags/v0.6.2.zip", + "version": "0.6.9", + "download_url": "https://github.com/DyanGalih/spec-kit-memory-hub/archive/refs/tags/v0.6.9.zip", "repository": "https://github.com/DyanGalih/spec-kit-memory-hub", "homepage": "https://github.com/DyanGalih/spec-kit-memory-hub", "documentation": "https://github.com/DyanGalih/spec-kit-memory-hub/blob/main/README.md", @@ -1305,7 +1305,7 @@ "downloads": 0, "stars": 0, "created_at": "2026-04-23T00:00:00Z", - "updated_at": "2026-04-23T00:00:00Z" + "updated_at": "2026-05-01T14:48:00Z" }, "memorylint": { "name": "MemoryLint", @@ -1931,8 +1931,8 @@ "id": "security-review", "description": "Full-project secure-by-design security audits plus staged, branch/PR, plan, task, follow-up, and apply reviews", "author": "DyanGalih", - "version": "1.3.0", - "download_url": "https://github.com/DyanGalih/spec-kit-security-review/archive/refs/tags/v1.3.0.zip", + "version": "1.3.3", + "download_url": "https://github.com/DyanGalih/spec-kit-security-review/archive/refs/tags/v1.3.3.zip", "repository": "https://github.com/DyanGalih/spec-kit-security-review", "homepage": "https://github.com/DyanGalih/spec-kit-security-review", "documentation": "https://github.com/DyanGalih/spec-kit-security-review/blob/main/README.md", @@ -1956,7 +1956,7 @@ "downloads": 0, "stars": 0, "created_at": "2026-04-03T03:24:03Z", - "updated_at": "2026-04-29T00:00:00Z" + "updated_at": "2026-05-01T14:48:00Z" }, "sf": { "name": "SFSpeckit — Salesforce Spec-Driven Development", @@ -2095,6 +2095,38 @@ "created_at": "2026-04-20T00:00:00Z", "updated_at": "2026-04-21T00:00:00Z" }, + "spec2cloud": { + "name": "Spec2Cloud", + "id": "spec2cloud", + "description": "Spec-driven workflow tuned for shipping to Azure: spec → plan → tasks → implement → deploy.", + "author": "Azure Samples", + "version": "1.1.0", + "download_url": "https://github.com/Azure-Samples/Spec2Cloud/releases/download/spec-kit-spec2cloud-v1.1.0/extension.zip", + "repository": "https://github.com/Azure-Samples/Spec2Cloud", + "homepage": "https://aka.ms/spec2cloud", + "documentation": "https://github.com/Azure-Samples/Spec2Cloud/blob/main/spec-kit/README.md", + "changelog": "https://github.com/Azure-Samples/Spec2Cloud/blob/main/spec-kit/CHANGELOG.md", + "license": "MIT", + "requires": { + "speckit_version": ">=0.4.0" + }, + "provides": { + "commands": 2, + "hooks": 0 + }, + "tags": [ + "spec2cloud", + "azure", + "cloud", + "deploy", + "workflow" + ], + "verified": false, + "downloads": 0, + "stars": 0, + "created_at": "2026-04-30T00:00:00Z", + "updated_at": "2026-04-30T00:00:00Z" + }, "speckit-utils": { "name": "SDD Utilities", "id": "speckit-utils", @@ -2463,6 +2495,37 @@ "created_at": "2026-04-25T00:00:00Z", "updated_at": "2026-04-25T00:00:00Z" }, + "token-analyzer": { + "name": "Token Consumption Analyzer", + "id": "token-analyzer", + "description": "Captures, analyzes, and compares token consumption across SDD workflows", + "author": "Chris Roberts | coderandhiker", + "version": "0.1.0", + "download_url": "https://github.com/coderandhiker/spec-kit-token-analyzer/archive/refs/tags/v0.1.0.zip", + "repository": "https://github.com/coderandhiker/spec-kit-token-analyzer", + "homepage": "https://github.com/coderandhiker/spec-kit-token-analyzer", + "documentation": "https://github.com/coderandhiker/spec-kit-token-analyzer/blob/main/README.md", + "changelog": "https://github.com/coderandhiker/spec-kit-token-analyzer/blob/main/CHANGELOG.md", + "license": "MIT", + "requires": { + "speckit_version": ">=0.2.0" + }, + "provides": { + "commands": 3, + "hooks": 4 + }, + "tags": [ + "tokens", + "measurement", + "optimization", + "analysis" + ], + "verified": false, + "downloads": 0, + "stars": 0, + "created_at": "2026-05-01T00:00:00Z", + "updated_at": "2026-05-01T00:00:00Z" + }, "v-model": { "name": "V-Model Extension Pack", "id": "v-model", @@ -2758,7 +2821,7 @@ "downloads": 0, "stars": 0, "created_at": "2026-04-13T00:00:00Z", - "updated_at": "2026-04-13T00:00:00Z" + "updated_at": "2026-04-13T00:00:00Z" } } } diff --git a/newsletters/2026-April.md b/newsletters/2026-April.md new file mode 100644 index 0000000000..913dedaf23 --- /dev/null +++ b/newsletters/2026-April.md @@ -0,0 +1,147 @@ +# Spec Kit - April 2026 Newsletter + +This edition covers Spec Kit activity in April 2026. Seventeen releases shipped (v0.4.4 through v0.8.3), delivering a full integration plugin architecture, a workflow engine, preset composition strategies, an integration catalog, and comprehensive documentation. The community extension catalog tripled from 26 to 83 entries, community presets grew from 2 to 12, and Spec Kit appeared on the Thoughtworks Technology Radar. A summary is in the table below, followed by details. + +| **Spec Kit Core (Apr 2026)** | **Community & Content** | **SDD Ecosystem & Next** | +| --- | --- | --- | +| Seventeen releases shipped with major features: integration plugin architecture, workflow engine, preset composition, integration catalog, bundled lean preset, documentation site, and academic citation support. Three new agents added (Forgecode, Goose, Devin for Terminal). The repo grew from ~82k to **92,038 stars**. [\[github.com\]](https://github.com/github/spec-kit/releases) | Thoughtworks Technology Radar placed Spec Kit in the "Assess" ring. Community catalog grew from 26 to **83 extensions** and from 2 to **12 presets**. 12 substantive external articles published. XB Software documented a real legacy project. Fabián Silva shipped the Caramelo VS Code extension. | Matt Rickard argued for "smaller specs, harder checks." Will Torber's three-framework comparison recommended OpenSpec for most teams. The "Spec Layer" debate emerged: specs as constraint surfaces for AI agents. Spec Kit leads in breadth and portability; competitors differentiate on drift detection and orchestration depth. | + +*** + +> **Important:** April's release pace outran external coverage. Most analyses published during the month (Rickard on April 1, Thoughtworks Radar on April 15, XB Software on April 17, Torber on April 23) were evaluating versions that predated the workflow engine (v0.7.0), integration catalog (v0.7.2), preset composition (v0.8.0), and catalog discovery CLI (v0.8.3). The ceremony and flexibility concerns they raised are precisely what these features address — the lean preset, pluggable workflows, composable presets, and community extensions like Conduct, MAQA, and Fleet Orchestrator already deliver alternative workflows beyond the default SDD process. We look forward to seeing how upcoming reviews account for these capabilities. + +## Spec Kit Project Updates + +### Releases Overview + +**v0.4.4** (April 1) delivered the first stage of the **integration plugin architecture** — base classes, a manifest system, and a registry that replaced the hard-coded agent scaffolding. It also added the Product Forge, Superpowers Bridge, MAQA suite (7 extensions), Spec Kit Onboard, and Plan Review Gate to the community catalog, fixed Claude Code CLI detection for npm-local installs, and added `--allow-existing-branch` to `create-new-feature`. [\[github.com\]](https://github.com/github/spec-kit/releases/tag/v0.4.4) + +**v0.4.5** (April 2) completed the integration migration in five stages: standard markdown integrations for 19 agents, TOML integrations (Gemini, Tabnine), skills and generic integrations, and removal of the legacy scaffold path. It also installed Claude Code as native skills, added a `--dry-run` flag for `create-new-feature`, support for 4+ digit feature branch numbers, the Fix Findings extension, and five lifecycle extensions to the community catalog. [\[github.com\]](https://github.com/github/spec-kit/releases/tag/v0.4.5) + +**v0.5.0** (April 2) was a significant packaging change: **template zip bundles were removed from releases**, with the CLI itself now handling all scaffolding. This ensured CLI and templates stay in sync. It also introduced `DEVELOPMENT.md` for contributor onboarding. [\[github.com\]](https://github.com/github/spec-kit/releases/tag/v0.5.0) + +**v0.5.1** (April 8) was a large patch release. It added the **bundled Git extension** (stages 1 and 2) with hooks on all core commands and `GIT_BRANCH_NAME` override support, **Forgecode** agent support, and the `specify integration` subcommand for post-init integration management. Argument hints were added to Claude Code commands. Numerous community extensions joined the catalog (Confluence, Canon, Spec Diagram, Branch Convention, Spec Refine, FixIt, Optimize, Security Review) along with presets (explicit-task-dependencies, toc-navigation, VS Code Ask Questions). Bug fixes included pinning typer≥0.24.0/click≥8.2.1 to fix an import crash, BSD-portable sed escaping, Trae agent fix, TOML frontmatter stripping, and preventing ambiguous TOML closing quotes. [\[github.com\]](https://github.com/github/spec-kit/releases/tag/v0.5.1) + +**v0.6.0** (April 9) rewrote **AGENTS.md for the new integration architecture**, added the SpecKit Companion to Community Friends, and brought Bugfix Workflow, Worktree Isolation, and MemoryLint to the community catalog. A new multi-repo-branching preset arrived. [\[github.com\]](https://github.com/github/spec-kit/releases/tag/v0.6.0) + +**v0.6.1** (April 10) added the **bundled lean preset** with a minimal workflow command set — a lighter-weight alternative to the full SDD ceremony. It also migrated **Cursor** from `.cursor/commands` to `.cursor/skills` and added Brownfield Bootstrap, CI Guard, SpecTest, PR Bridge, TinySpec, and Status Report to the community catalog. [\[github.com\]](https://github.com/github/spec-kit/releases/tag/v0.6.1) + +**v0.6.2** (April 13) added **Goose AI agent** support (YAML-based recipe format), the GitHub Issues Integration extension, and the What-if Analysis extension. [\[github.com\]](https://github.com/github/spec-kit/releases/tag/v0.6.2) + +**v0.7.0** (April 14) delivered the **workflow engine with catalog system**, enabling pluggable, multi-step workflow definitions. It added SFSpeckit (Salesforce SDD), the Worktrees extension, optional single-segment branch prefix for gitflow compatibility, and the claude-ask-questions and fiction-book-writing presets. [\[github.com\]](https://github.com/github/spec-kit/releases/tag/v0.7.0) + +**v0.7.1** (April 15) deprecated the `--ai` flag in favor of `--integration` on `specify init`, added Windows to the CI test matrix, fixed Claude skill chaining for hook execution, merged TESTING.md into CONTRIBUTING.md, and added the Agent Assign and Architect Preview extensions. [\[github.com\]](https://github.com/github/spec-kit/releases/tag/v0.7.1) + +**v0.7.2** (April 16) delivered the **integration catalog** for discovery, versioning, and community distribution of agent integrations. It also produced a major **documentation overhaul**: reference pages for core commands, extensions, presets, workflows, and integrations were added to `docs/reference/`, and the README CLI section was simplified. The Issues extension and Catalog CI extension joined the community catalog. [\[github.com\]](https://github.com/github/spec-kit/releases/tag/v0.7.2) + +**v0.7.3** (April 17) replaced shell-based context updates with a **marker-based upsert** mechanism, eliminating accidental context file bloat. It added a **Community Friends page** to the docs site, the Spec Scope and Blueprint extensions, and a Claude Code/Copilot CLI plugin marketplace reference in the README. [\[github.com\]](https://github.com/github/spec-kit/releases/tag/v0.7.3) + +**v0.7.4** (April 21) added **CITATION.cff and .zenodo.json** for academic citation support. It introduced Ripple (side-effect detection), Spec Validate, Version Guard, Spec Reference Loader, and Memory Loader extensions. A fix stripped UTF-8 BOM from agent context files, and the Antigravity (agy) agent layout was migrated to `.agents/` with `--skills` deprecated. [\[github.com\]](https://github.com/github/spec-kit/releases/tag/v0.7.4) + +**v0.7.5** (April 22) added `specify self check` and `self upgrade` stubs, the **preset wrap strategy** (completing the composition trifecta alongside prepend and append), the Red Team adversarial review extension, the Wireframe extension, and a **directory traversal security fix** in command write paths. Skill placeholder resolution was expanded to all SKILL.md agents. Community content (walkthroughs and presets) was moved from the README to the docs site. [\[github.com\]](https://github.com/github/spec-kit/releases/tag/v0.7.5) + +**v0.8.0** (April 23) delivered **preset composition strategies** (prepend, append, wrap) for templates, commands, and scripts — enabling presets to layer content around existing artifacts. It also added Copilot `--integration-options="--skills"` for skills-based scaffolding, `pipx` as an alternative installation method, and the Memory MD extension. [\[github.com\]](https://github.com/github/spec-kit/releases/tag/v0.8.0) + +**v0.8.1** (April 24) fixed `/speckit.plan` on custom git branches via `.specify/feature.json`, migrated the **Mistral Vibe** integration to SkillsIntegration, added the **Screenwriting** and **Jira** presets, and resolved command reference formats per integration type (dot vs. hyphen notation). [\[github.com\]](https://github.com/github/spec-kit/releases/tag/v0.8.1) + +**v0.8.2** (April 28) introduced **GITHUB_TOKEN/GH_TOKEN authentication** for private catalog and extension downloads, deprecated the `--no-git` flag (removal gated at v0.10.0), replaced all deprecated `--ai` references with `--integration` in documentation, and added MarkItDown Document Converter, Microsoft 365 Integration, Spec Orchestrator, and the Fiction Book Writing v1.7 preset with RAG (Chroma DB) offline semantic search. [\[github.com\]](https://github.com/github/spec-kit/releases/tag/v0.8.2) + +**v0.8.3** (April 29) closed the month with **catalog discovery CLI commands** (search, info, catalog list/add/remove), support for **Devin for Terminal** as a skills-based integration, a fix for the opencode command dispatch, and the OWASP LLM Threat Model, iSAQB Architecture Governance, and Work IQ extensions. A fix was also added to the upgrade hint to prevent users from accidentally installing a PyPI squat package. [\[github.com\]](https://github.com/github/spec-kit/releases/tag/v0.8.3) + +### Architecture & Infrastructure Highlights + +The most significant architectural change in April was the **integration plugin architecture** (v0.4.4–v0.4.5), which replaced hard-coded agent scaffolding with a registry of self-describing integration classes. Each agent is now a self-contained subpackage under `src/specify_cli/integrations//` with base classes for Markdown, TOML, YAML, and Skills formats. This six-stage migration touched all 28 supported agents and laid the groundwork for the integration catalog (v0.7.2) and community-distributed integrations. + +The **workflow engine** (v0.7.0) introduced a catalog-based system for pluggable, multi-step workflow definitions — moving beyond the fixed seven-step SDD sequence. + +**Preset composition strategies** (v0.7.5/v0.8.0) completed the preset system with prepend, append, and wrap modes. Presets can now layer content around existing templates, commands, and scripts rather than only replacing them. + +The **marker-based context upsert** (v0.7.3) replaced fragile shell-based sed operations for updating agent context files, eliminating a class of bugs around context bloat and encoding issues. + +**Template zip bundles were removed** (v0.5.0), coupling the CLI and templates into a single distributable artifact. + +### Bug Fixes and Security + +The most critical fix was **blocking directory traversal in command write paths** (#2229, v0.7.5), which prevented a potential path traversal vulnerability in the CommandRegistrar. Other security-adjacent fixes included hardening against a **PyPI squat package** in upgrade hints (v0.8.3) and adding **GITHUB_TOKEN authentication** for private catalog downloads (v0.8.2). + +Notable bug fixes: typer/click import crash (v0.5.1), BSD-portable sed escaping (v0.5.1), UTF-8 BOM stripping from context files (v0.7.4), CRLF warning suppression in PowerShell auto-commit (v0.7.3), Claude skill chaining for hooks (v0.7.1), TOML ambiguous closing quotes (v0.5.1), and custom branch support for `/speckit.plan` (v0.8.1). [\[github.com\]](https://github.com/github/spec-kit/releases) + +### The Extension & Preset Ecosystem + +The community extension catalog **tripled** during April, growing from 26 to **83 entries**. 59 new extensions were added and 2 were removed (Cognitive Squad and Understanding, whose repositories were no longer available). Community presets grew from 2 to **12 entries**, with 10 new presets added. + +Notable new extensions by category: + +- **Project management**: GitHub Issues Integration (Fatima367, aaronrsun), Spec Orchestrator (Quratulain-bilal), Agent Assign (xuyang), Status Report (Open-Agent-Tools) +- **Quality & security**: Red Team adversarial review (Ash Brener), Security Review (DyanGalih), Ripple side-effect detection (chordpli), Spec Validate (Ahmed Eltayeb), CI Guard (Quratulain-bilal), OWASP LLM Threat Model (NaviaSamal) +- **Multi-agent & orchestration**: MAQA suite with 7 extensions covering multi-agent QA, Jira, Azure DevOps, GitHub Projects, Linear, and Trello integrations (GenieRobot), Product Forge (VaiYav) +- **Spec lifecycle**: Spec Refine (Quratulain-bilal), Bugfix Workflow (Quratulain-bilal), Fix Findings (Quratulain-bilal), Brownfield Bootstrap (Quratulain-bilal), TinySpec (Quratulain-bilal) +- **Developer experience**: Blueprint code review (chordpli), Confluence (aaronrsun), MarkItDown Document Converter (BenBtg), Microsoft 365 Integration (BenBtg), Memory MD (DyanGalih), Memory Loader (KevinBrown5280), MemoryLint (RbBtSn0w) +- **Domain-specific**: SFSpeckit for Salesforce (Sumanth Yanamala), iSAQB Architecture Governance preset (Thorsten Hindermann), Canon baseline-driven workflows (Maxim Stupakov) +- **Creative**: Fiction Book Writing preset v1.7 with RAG/Chroma DB support (Andreas Daumann), Screenwriting preset (Andreas Daumann) + +Notable contributor **Quratulain-bilal** contributed 15 extensions during the month, spanning spec lifecycle, workflow management, and CI/CD integration. **GenieRobot** contributed the 7-extension MAQA suite. **BenBtg** contributed both MarkItDown and Microsoft 365 integrations. [\[github.com\]](https://github.com/github/spec-kit/releases) + +### Documentation Overhaul + +April saw a comprehensive documentation effort. Reference pages for **core commands, extensions, presets, workflows, and integrations** were created under `docs/reference/`. Community content — **walkthroughs, presets, and a Community Friends page** — was moved from the README to `docs/community/`, reducing README length while improving discoverability. The deprecated `--ai` flag references were replaced with `--integration` across all documentation. TESTING.md was merged into CONTRIBUTING.md, and `DEVELOPMENT.md` was introduced for contributor onboarding. [\[github.com\]](https://github.com/github/spec-kit/releases) + +## Community & Content + +### Thoughtworks Technology Radar + +On **April 15**, the **Thoughtworks Technology Radar Volume 34** placed GitHub Spec Kit in the **"Assess" ring** under Languages & Frameworks. The blip noted that teams report value in brownfield projects, that the constitution captures project scope and architecture, but flagged potential **instruction bloat, context rot, and verbose markdown output** as concerns to watch. This is the first appearance of any SDD-specific tool on the Radar. [\[thoughtworks.com\]](https://www.thoughtworks.com/radar/languages-and-frameworks/github-spec-kit) + +### Developer Articles and Blog Posts + +April produced 12 substantive external articles (plus one excluded as AI-generated SEO spam). + +**Matt Rickard** published *"The Spec Layer: Why Spec-Driven Development (SDD) Works"* on April 1. His thesis: specs reduce execution freedom for AI agents, functioning as constraint surfaces. He compared Spec Kit, Kiro, OpenSpec, Tessl, Intent, and Symphony, and advocated for **"smaller specs, harder checks, less guessing."** [\[blog.matt-rickard.com\]](https://blog.matt-rickard.com/p/the-spec-layer) + +**Fabián Silva** published *"I Built a Visual Spec-Driven Development Extension for VS Code That Works With Any LLM"* on April 3 on DEV Community. His **Caramelo** VS Code extension adds a visual UI, approval gates, Jira integration, and multi-LLM support on top of Spec Kit's workflow, reading and writing the standard `specs/` directory. [\[dev.to\]](https://dev.to/fabian_silva_/i-built-a-visual-spec-driven-development-extension-for-vs-code-that-works-with-any-llm-36ok) + +**James M** published *"GitHub Spec Kit in 2026: SDD Goes Mainstream"* on April 4, calling the transition "from framework to platform" and highlighting Claude Code native skills, multi-agent support, and the massive ecosystem growth. [\[jamesm.blog\]](https://jamesm.blog/ai/github-spec-kit-2026-update/) + +**Peter Saktor** published a detailed tutorial on DEV Community on April 6: *"GitHub Spec-Kit: From Vibe Coding to Spec-Driven Development,"* walking through a full 7-step SDD workflow refactoring an Azure Container App with 33 tasks across 6 phases. [\[dev.to\]](https://dev.to/petersaktor/github-spec-kit-from-vibe-coding-to-spec-driven-development-1pgd) + +**Codexplorer** published *"Spec Kit: GitHub's Answer to 'The AI Built the Wrong Thing Again'"* on Medium (April 11), framing Spec Kit as flipping the spec-code relationship, with Go code examples covering the seven slash commands. [\[medium.com\]](https://codexplorer.medium.com/spec-kit-githubs-answer-to-the-ai-built-the-wrong-thing-again-22f122f142fb) + +**XB Software** published *"Spec Kit on a Real Project: Implementation Experience in Large Legacy Code"* on April 17 — a field report from applying SDD to legacy systems. A week-long task was completed in half the time. The AI surfaced hidden requirements gaps. They noted API integration weakness, that SDD is overkill for small tasks, and that an experienced reviewer is still essential. [\[xbsoftware.com\]](https://xbsoftware.com/blog/ai-in-legacy-systems-spec-driven-development/) + +**What IT Is** published *"Perspectives in Spec Driven Development"* on April 21, surveying the SDD landscape (Spec Kit, Kiro, Tessl) and calling Spec Kit "a good entry point." [\[theitsolutionist.com\]](https://theitsolutionist.com/2026/04/21/perspectives-in-spec-driven-development/) + +**Will Torber** published *"Spec Kit vs BMAD vs OpenSpec: Choosing an SDD Framework in 2026"* on DEV Community on April 23. He recommended Spec Kit for greenfield but flagged brownfield friction and the branch-per-spec limitation, ultimately **recommending OpenSpec for most teams**. [\[dev.to\]](https://dev.to/willtorber/spec-kit-vs-bmad-vs-openspec-choosing-an-sdd-framework-in-2026-d3j) + +**Truong Phung** published *"Spec Kit vs. Superpowers: A Comprehensive Comparison & Practical Guide to Combining Both"* on DEV Community on April 25 — an 11-section comparison proposing a hybrid workflow: "Spec Kit plans WHAT, Superpowers controls HOW," with a step-by-step playbook. [\[dev.to\]](https://dev.to/truongpx396/spec-kit-vs-superpowers-a-comprehensive-comparison-practical-guide-to-combining-both-52jj) + +**Markus Wondrak** published *"Re-evaluating GitHub's Spec Kit: Structured SDLC Automation"* on LinkedIn on April 26, examining Spec Kit as a structured SDLC automation approach requiring human review at phase boundaries. [\[linkedin.com\]](https://www.linkedin.com/pulse/re-evaluating-githubs-spec-kit-structured-sdlc-markus-wondrak-eewqf/) + +**FintechExtra** published a factual release-notes summary of v0.8.2 on April 28, highlighting authenticated catalog downloads, the UTF-8 manifest fix, and the Chroma DB semantic search in the fiction writing preset. [\[fintechextra.com\]](https://www.fintechextra.com/news/github-spec-kit-v082-expands-catalog-support-and-tightens-cli-behavior-331) + +### Community Friends and Tools + +The **SpecKit Companion** VS Code extension was added to the Community Friends section (v0.6.0). A community-maintained plugin for **Claude Code and GitHub Copilot CLI** that installs Spec Kit skills via the plugin marketplace was referenced in the README (v0.7.3). Fabián Silva's **Caramelo** VS Code extension demonstrated a visual UI approach to SDD. [\[github.com\]](https://github.com/github/spec-kit) + +## SDD Ecosystem & Industry Trends + +### The "Spec Layer" Debate + +Matt Rickard's "The Spec Layer" essay established a new framing for SDD: specifications as **constraint surfaces** that reduce execution freedom for AI agents. His comparison of six SDD tools argued for smaller, more focused specs with harder verification checks — a departure from comprehensive specification documents. This framing resonated across the community, with the Thoughtworks Radar entry and multiple comparison articles echoing the tension between spec depth and practical overhead. + +### Competitive Landscape + +**Will Torber's** three-framework comparison (Spec Kit, BMAD, OpenSpec) recommended **OpenSpec for most teams**, citing lower ceremony and better brownfield support. **Truong Phung** proposed combining Spec Kit with **Superpowers** (Jesse Vincent) for a "plan WHAT + control HOW" hybrid. These comparisons reflected a maturing market where practitioners combine tools rather than picking one. + +The **Thoughtworks Radar** placement validated SDD as a category worth tracking but flagged instruction bloat and context rot as open concerns — the same issues the Augment Code comparison raised in March. XB Software's field report confirmed these in practice: SDD adds value for complex legacy work but creates unnecessary overhead for small tasks. + +Spec Kit continued to lead in **GitHub popularity** (92k stars) and **agent breadth** (29 integrations). The market continued to differentiate along several axes: Spec Kit on portability and ecosystem breadth, Intent on living specs and drift detection, BMAD-METHOD on multi-agent orchestration, and OpenSpec on simplicity. [\[dev.to\]](https://dev.to/willtorber/spec-kit-vs-bmad-vs-openspec-choosing-an-sdd-framework-in-2026-d3j) [\[thoughtworks.com\]](https://www.thoughtworks.com/radar/languages-and-frameworks/github-spec-kit) + +## Roadmap + +Areas under discussion or in progress for future development: + +- **Spec lifecycle management** — context rot and spec drift remained the most cited concern across articles (Thoughtworks Radar, XB Software, Will Torber). The marker-based upsert (v0.7.3) addressed context file drift; spec-level drift detection remains an open area. The Reconcile and Archive extensions are community steps toward this. [\[thoughtworks.com\]](https://www.thoughtworks.com/radar/languages-and-frameworks/github-spec-kit) +- **Workflow customization** — the workflow engine (v0.7.0) and preset composition strategies (v0.8.0) provide the foundation. Community presets for fiction writing, screenwriting, Jira tracking, and architecture governance demonstrate the breadth of possible workflows beyond standard SDD. [\[github.com\]](https://github.com/github/spec-kit/releases) +- **Catalog discovery and distribution** — the integration catalog (v0.7.2) and catalog discovery CLI (v0.8.3) bring `specify` closer to a package-manager experience for extensions, presets, and integrations. Private catalog authentication (v0.8.2) supports enterprise distribution. [\[github.com\]](https://github.com/github/spec-kit/releases) +- **Experience simplification** — the bundled lean preset (v0.6.1), `specify self check` (v0.7.5), and the deprecation of `--ai` in favor of `--integration` (v0.7.1) reflect ongoing work to reduce ceremony and improve the onboarding experience. Multiple external articles (Torber, XB Software) noted SDD overhead as a barrier. [\[dev.to\]](https://dev.to/willtorber/spec-kit-vs-bmad-vs-openspec-choosing-an-sdd-framework-in-2026-d3j) +- **Cross-platform and enterprise** — Windows CI (v0.7.1), GITHUB_TOKEN authentication (v0.8.2), Salesforce-specific extensions, and the iSAQB architecture governance preset indicate growing enterprise adoption. [\[github.com\]](https://github.com/github/spec-kit) diff --git a/presets/catalog.community.json b/presets/catalog.community.json index 7031652bfd..8064bfc960 100644 --- a/presets/catalog.community.json +++ b/presets/catalog.community.json @@ -3,6 +3,34 @@ "updated_at": "2026-04-27T00:00:00Z", "catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/presets/catalog.community.json", "presets": { + "a11y-governance": { + "name": "A11Y Governance", + "id": "a11y-governance", + "version": "0.2.0", + "description": "Adds accessibility, bilingual DE/EN delivery, CEFR-B2 readability, and inclusive-content governance to Spec Kit.", + "author": "Thorsten Hindermann", + "repository": "https://github.com/hindermath/spec-kit-preset-a11y-governance", + "download_url": "https://github.com/hindermath/spec-kit-preset-a11y-governance/archive/refs/tags/v0.2.0.zip", + "homepage": "https://github.com/hindermath/spec-kit-preset-a11y-governance", + "documentation": "https://github.com/hindermath/spec-kit-preset-a11y-governance/blob/main/README.md", + "license": "MIT", + "requires": { + "speckit_version": ">=0.8.0" + }, + "provides": { + "templates": 9, + "commands": 3 + }, + "tags": [ + "a11y", + "accessibility", + "bilingual", + "wcag", + "inclusion" + ], + "created_at": "2026-04-27T00:00:00Z", + "updated_at": "2026-04-27T00:00:00Z" + }, "aide-in-place": { "name": "AIDE In-Place Migration", "id": "aide-in-place", @@ -16,7 +44,9 @@ "license": "MIT", "requires": { "speckit_version": ">=0.2.0", - "extensions": ["aide"] + "extensions": [ + "aide" + ] }, "provides": { "templates": 2, @@ -29,6 +59,34 @@ "aide" ] }, + "architecture-governance": { + "name": "Architecture Governance", + "id": "architecture-governance", + "version": "0.2.0", + "description": "Adds secure architecture governance, threat modeling, STRIDE/CAPEC, Zero Trust, S-ADRs, and OWASP SAMM to Spec Kit.", + "author": "Thorsten Hindermann", + "repository": "https://github.com/hindermath/spec-kit-preset-architecture-governance", + "download_url": "https://github.com/hindermath/spec-kit-preset-architecture-governance/archive/refs/tags/v0.2.0.zip", + "homepage": "https://github.com/hindermath/spec-kit-preset-architecture-governance", + "documentation": "https://github.com/hindermath/spec-kit-preset-architecture-governance/blob/main/README.md", + "license": "MIT", + "requires": { + "speckit_version": ">=0.8.0" + }, + "provides": { + "templates": 11, + "commands": 3 + }, + "tags": [ + "architecture", + "governance", + "threat-modeling", + "stride", + "zero-trust" + ], + "created_at": "2026-04-27T00:00:00Z", + "updated_at": "2026-04-27T00:00:00Z" + }, "canon-core": { "name": "Canon Core", "id": "canon-core", @@ -80,6 +138,34 @@ "created_at": "2026-04-13T00:00:00Z", "updated_at": "2026-04-13T00:00:00Z" }, + "cross-platform-governance": { + "name": "Cross-Platform Governance", + "id": "cross-platform-governance", + "version": "0.1.0", + "description": "Adds Bash and PowerShell parity, dry-run/WhatIf parity, man-page expectations, and Verb-Noun Cmdlet discipline.", + "author": "Thorsten Hindermann", + "repository": "https://github.com/hindermath/spec-kit-preset-cross-platform-governance", + "download_url": "https://github.com/hindermath/spec-kit-preset-cross-platform-governance/archive/refs/tags/v0.1.0.zip", + "homepage": "https://github.com/hindermath/spec-kit-preset-cross-platform-governance", + "documentation": "https://github.com/hindermath/spec-kit-preset-cross-platform-governance/blob/main/README.md", + "license": "MIT", + "requires": { + "speckit_version": ">=0.8.0" + }, + "provides": { + "templates": 8, + "commands": 3 + }, + "tags": [ + "cross-platform", + "bash", + "powershell", + "man-page", + "cmdlet" + ], + "created_at": "2026-04-27T00:00:00Z", + "updated_at": "2026-04-27T00:00:00Z" + }, "explicit-task-dependencies": { "name": "Explicit Task Dependencies", "id": "explicit-task-dependencies", @@ -287,6 +373,34 @@ "created_at": "2026-04-23T08:00:00Z", "updated_at": "2026-04-23T08:00:00Z" }, + "security-governance": { + "name": "Security Governance", + "id": "security-governance", + "version": "0.2.0", + "description": "Adds secure development governance, MSL preference, ASVS verification, supply-chain transparency, and EU CRA awareness.", + "author": "Thorsten Hindermann", + "repository": "https://github.com/hindermath/spec-kit-preset-security-governance", + "download_url": "https://github.com/hindermath/spec-kit-preset-security-governance/archive/refs/tags/v0.2.0.zip", + "homepage": "https://github.com/hindermath/spec-kit-preset-security-governance", + "documentation": "https://github.com/hindermath/spec-kit-preset-security-governance/blob/main/README.md", + "license": "MIT", + "requires": { + "speckit_version": ">=0.8.0" + }, + "provides": { + "templates": 12, + "commands": 3 + }, + "tags": [ + "security", + "governance", + "msl", + "asvs", + "supply-chain" + ], + "created_at": "2026-04-27T00:00:00Z", + "updated_at": "2026-04-27T00:00:00Z" + }, "toc-navigation": { "name": "Table of Contents Navigation", "id": "toc-navigation", diff --git a/pyproject.toml b/pyproject.toml index 2c5980d38f..98920d8549 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "specify-cli" -version = "0.8.4.dev0" +version = "0.8.5.dev0" description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)." requires-python = ">=3.11" dependencies = [ diff --git a/scripts/bash/setup-tasks.sh b/scripts/bash/setup-tasks.sh new file mode 100644 index 0000000000..3f6a40b12d --- /dev/null +++ b/scripts/bash/setup-tasks.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +set -e + +# Parse command line arguments +JSON_MODE=false + +for arg in "$@"; do + case "$arg" in + --json) JSON_MODE=true ;; + --help|-h) + echo "Usage: $0 [--json]" + echo " --json Output results in JSON format" + echo " --help Show this help message" + exit 0 + ;; + *) echo "ERROR: Unknown option '$arg'" >&2; exit 1 ;; + esac +done + +# Source common functions +SCRIPT_DIR="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "$SCRIPT_DIR/common.sh" + +# Get feature paths +_paths_output=$(get_feature_paths) || { echo "ERROR: Failed to resolve feature paths" >&2; exit 1; } +eval "$_paths_output" +unset _paths_output + +# Validate branch +# If feature.json pins an existing feature directory, branch naming is not required. +if ! feature_json_matches_feature_dir "$REPO_ROOT" "$FEATURE_DIR"; then + check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1 +fi + +if [[ ! -f "$IMPL_PLAN" ]]; then + echo "ERROR: plan.md not found in $FEATURE_DIR" >&2 + echo "Run /speckit.plan first to create the implementation plan." >&2 + exit 1 +fi + +if [[ ! -f "$FEATURE_SPEC" ]]; then + echo "ERROR: spec.md not found in $FEATURE_DIR" >&2 + echo "Run /speckit.specify first to create the feature structure." >&2 + exit 1 +fi + +# Build available docs list +docs=() +[[ -f "$RESEARCH" ]] && docs+=("research.md") +[[ -f "$DATA_MODEL" ]] && docs+=("data-model.md") +if [[ -d "$CONTRACTS_DIR" ]] && [[ -n "$(ls -A "$CONTRACTS_DIR" 2>/dev/null)" ]]; then + docs+=("contracts/") +fi +[[ -f "$QUICKSTART" ]] && docs+=("quickstart.md") + +# Resolve tasks template through override stack +TASKS_TEMPLATE=$(resolve_template "tasks-template" "$REPO_ROOT") || true +if [[ -z "$TASKS_TEMPLATE" ]] || [[ ! -f "$TASKS_TEMPLATE" ]]; then + echo "ERROR: Could not resolve required tasks-template from the template override stack for $REPO_ROOT" >&2 + echo "Template 'tasks-template' was not found in any supported location (overrides, presets, extensions, or shared core). Add an override at .specify/templates/overrides/tasks-template.md, or run 'specify init' / reinstall shared infra to restore the core .specify/templates/tasks-template.md template." >&2 + exit 1 +fi + +# Output results +if $JSON_MODE; then + if has_jq; then + if [[ ${#docs[@]} -eq 0 ]]; then + json_docs="[]" + else + json_docs=$(printf '%s\n' "${docs[@]}" | jq -R . | jq -s .) + fi + jq -cn \ + --arg feature_dir "$FEATURE_DIR" \ + --argjson docs "$json_docs" \ + --arg tasks_template "${TASKS_TEMPLATE:-}" \ + '{FEATURE_DIR:$feature_dir,AVAILABLE_DOCS:$docs,TASKS_TEMPLATE:$tasks_template}' + else + if [[ ${#docs[@]} -eq 0 ]]; then + json_docs="[]" + else + json_docs=$(for d in "${docs[@]}"; do printf '"%s",' "$(json_escape "$d")"; done) + json_docs="[${json_docs%,}]" + fi + printf '{"FEATURE_DIR":"%s","AVAILABLE_DOCS":%s,"TASKS_TEMPLATE":"%s"}\n' \ + "$(json_escape "$FEATURE_DIR")" "$json_docs" "$(json_escape "${TASKS_TEMPLATE:-}")" + fi +else + echo "FEATURE_DIR: $FEATURE_DIR" + echo "TASKS_TEMPLATE: ${TASKS_TEMPLATE:-not found}" + echo "AVAILABLE_DOCS:" + check_file "$RESEARCH" "research.md" + check_file "$DATA_MODEL" "data-model.md" + check_dir "$CONTRACTS_DIR" "contracts/" + check_file "$QUICKSTART" "quickstart.md" +fi diff --git a/scripts/powershell/setup-tasks.ps1 b/scripts/powershell/setup-tasks.ps1 new file mode 100644 index 0000000000..e00ae7a02f --- /dev/null +++ b/scripts/powershell/setup-tasks.ps1 @@ -0,0 +1,74 @@ +#!/usr/bin/env pwsh + +[CmdletBinding()] +param( + [switch]$Json, + [switch]$Help +) + +$ErrorActionPreference = 'Stop' + +if ($Help) { + Write-Output "Usage: setup-tasks.ps1 [-Json] [-Help]" + exit 0 +} + +# Source common functions +. "$PSScriptRoot/common.ps1" + +# Get feature paths and validate branch +$paths = Get-FeaturePathsEnv + +# If feature.json pins an existing feature directory, branch naming is not required. +if (-not (Test-FeatureJsonMatchesFeatureDir -RepoRoot $paths.REPO_ROOT -ActiveFeatureDir $paths.FEATURE_DIR)) { + if (-not (Test-FeatureBranch -Branch $paths.CURRENT_BRANCH -HasGit $paths.HAS_GIT)) { + exit 1 + } +} + +if (-not (Test-Path $paths.IMPL_PLAN -PathType Leaf)) { + [Console]::Error.WriteLine("ERROR: plan.md not found in $($paths.FEATURE_DIR)") + [Console]::Error.WriteLine("Run /speckit.plan first to create the implementation plan.") + exit 1 +} + +if (-not (Test-Path $paths.FEATURE_SPEC -PathType Leaf)) { + [Console]::Error.WriteLine("ERROR: spec.md not found in $($paths.FEATURE_DIR)") + [Console]::Error.WriteLine("Run /speckit.specify first to create the feature structure.") + exit 1 +} + +# Build available docs list +$docs = @() +if (Test-Path $paths.RESEARCH) { $docs += 'research.md' } +if (Test-Path $paths.DATA_MODEL) { $docs += 'data-model.md' } +if ((Test-Path $paths.CONTRACTS_DIR) -and (Get-ChildItem -Path $paths.CONTRACTS_DIR -ErrorAction SilentlyContinue | Select-Object -First 1)) { + $docs += 'contracts/' +} +if (Test-Path $paths.QUICKSTART) { $docs += 'quickstart.md' } + +# Resolve tasks template through override stack +$tasksTemplate = Resolve-Template -TemplateName 'tasks-template' -RepoRoot $paths.REPO_ROOT +if (-not $tasksTemplate -or -not (Test-Path -LiteralPath $tasksTemplate -PathType Leaf)) { + $expectedCoreTemplate = Join-Path $paths.REPO_ROOT '.specify/templates/tasks-template.md' + [Console]::Error.WriteLine("ERROR: Tasks template not found for repository root: $($paths.REPO_ROOT)`nTemplate resolution order: overrides -> presets -> extensions -> core.`nExpected shared/core template location: $expectedCoreTemplate`nTo continue, verify whether 'tasks-template.md' is available in '.specify/templates/overrides/', preset templates, extension templates, or restore the shared/core templates (for example by re-running 'specify init') so that '.specify/templates/tasks-template.md' exists.") + exit 1 +} +$tasksTemplate = (Resolve-Path -LiteralPath $tasksTemplate).Path + +# Output results +if ($Json) { + [PSCustomObject]@{ + FEATURE_DIR = $paths.FEATURE_DIR + AVAILABLE_DOCS = $docs + TASKS_TEMPLATE = $tasksTemplate + } | ConvertTo-Json -Compress +} else { + Write-Output "FEATURE_DIR: $($paths.FEATURE_DIR)" + Write-Output "TASKS_TEMPLATE: $(if ($tasksTemplate) { $tasksTemplate } else { 'not found' })" + Write-Output "AVAILABLE_DOCS:" + Test-FileExists -Path $paths.RESEARCH -Description 'research.md' | Out-Null + Test-FileExists -Path $paths.DATA_MODEL -Description 'data-model.md' | Out-Null + Test-DirHasFiles -Path $paths.CONTRACTS_DIR -Description 'contracts/' | Out-Null + Test-FileExists -Path $paths.QUICKSTART -Description 'quickstart.md' | Out-Null +} diff --git a/src/specify_cli/__init__.py b/src/specify_cli/__init__.py index 8039f79983..0e17a84fea 100644 --- a/src/specify_cli/__init__.py +++ b/src/specify_cli/__init__.py @@ -43,6 +43,7 @@ from packaging.version import InvalidVersion, Version from typing import Any, Optional +from specify_cli.paths import INIT_OPTIONS_FILE import typer from rich.console import Console @@ -54,6 +55,27 @@ from rich.tree import Tree from typer.core import TyperGroup +from .integration_runtime import ( + invoke_separator_for_integration as _invoke_separator_for_integration, + resolve_integration_options as _resolve_integration_options_impl, + with_integration_setting as _with_integration_setting, +) +from .integration_state import ( + INTEGRATION_JSON, + INTEGRATION_STATE_SCHEMA, + dedupe_integration_keys as _dedupe_integration_keys, + default_integration_key as _default_integration_key, + installed_integration_keys as _installed_integration_keys, + integration_setting as _integration_setting, + integration_settings as _integration_settings, + normalize_integration_state as _normalize_integration_state, + write_integration_json as _write_integration_json_file, +) +from .shared_infra import ( + install_shared_infra as _install_shared_infra_impl, + refresh_shared_templates as _refresh_shared_templates_impl, +) + # For cross-platform keyboard input import readchar @@ -643,6 +665,11 @@ def _locate_core_pack() -> Path | None: return None +def _repo_root() -> Path: + """Return the source checkout root used for editable installs.""" + return Path(__file__).parent.parent.parent + + def _locate_bundled_extension(extension_id: str) -> Path | None: """Return the path to a bundled extension, or None. @@ -660,8 +687,7 @@ def _locate_bundled_extension(extension_id: str) -> Path | None: return candidate # Source-checkout / editable install: look relative to repo root - repo_root = Path(__file__).parent.parent.parent - candidate = repo_root / "extensions" / extension_id + candidate = _repo_root() / "extensions" / extension_id if (candidate / "extension.yml").is_file(): return candidate @@ -685,8 +711,7 @@ def _locate_bundled_workflow(workflow_id: str) -> Path | None: return candidate # Source-checkout / editable install: look relative to repo root - repo_root = Path(__file__).parent.parent.parent - candidate = repo_root / "workflows" / workflow_id + candidate = _repo_root() / "workflows" / workflow_id if (candidate / "workflow.yml").is_file(): return candidate @@ -710,14 +735,31 @@ def _locate_bundled_preset(preset_id: str) -> Path | None: return candidate # Source-checkout / editable install: look relative to repo root - repo_root = Path(__file__).parent.parent.parent - candidate = repo_root / "presets" / preset_id + candidate = _repo_root() / "presets" / preset_id if (candidate / "preset.yml").is_file(): return candidate return None +def _refresh_shared_templates( + project_path: Path, + *, + invoke_separator: str, + force: bool = False, +) -> None: + """Refresh default-sensitive shared templates without touching scripts.""" + _refresh_shared_templates_impl( + project_path, + version=get_speckit_version(), + core_pack=_locate_core_pack(), + repo_root=_repo_root(), + console=console, + invoke_separator=invoke_separator, + force=force, + ) + + def _install_shared_infra( project_path: Path, script_type: str, @@ -741,79 +783,36 @@ def _install_shared_infra( Returns ``True`` on success. """ - from .integrations.base import IntegrationBase - from .integrations.manifest import IntegrationManifest - - core = _locate_core_pack() - manifest = IntegrationManifest("speckit", project_path, version=get_speckit_version()) + return _install_shared_infra_impl( + project_path, + script_type, + version=get_speckit_version(), + core_pack=_locate_core_pack(), + repo_root=_repo_root(), + console=console, + force=force, + invoke_separator=invoke_separator, + ) - # Scripts - if core and (core / "scripts").is_dir(): - scripts_src = core / "scripts" - else: - repo_root = Path(__file__).parent.parent.parent - scripts_src = repo_root / "scripts" - - skipped_files: list[str] = [] - - if scripts_src.is_dir(): - dest_scripts = project_path / ".specify" / "scripts" - dest_scripts.mkdir(parents=True, exist_ok=True) - variant_dir = "bash" if script_type == "sh" else "powershell" - variant_src = scripts_src / variant_dir - if variant_src.is_dir(): - dest_variant = dest_scripts / variant_dir - dest_variant.mkdir(parents=True, exist_ok=True) - for src_path in variant_src.rglob("*"): - if src_path.is_file(): - rel_path = src_path.relative_to(variant_src) - dst_path = dest_variant / rel_path - if dst_path.exists() and not force: - skipped_files.append(str(dst_path.relative_to(project_path))) - else: - dst_path.parent.mkdir(parents=True, exist_ok=True) - shutil.copy2(src_path, dst_path) - rel = dst_path.relative_to(project_path).as_posix() - manifest.record_existing(rel) - - # Page templates (not command templates, not vscode-settings.json) - if core and (core / "templates").is_dir(): - templates_src = core / "templates" - else: - repo_root = Path(__file__).parent.parent.parent - templates_src = repo_root / "templates" - - if templates_src.is_dir(): - dest_templates = project_path / ".specify" / "templates" - dest_templates.mkdir(parents=True, exist_ok=True) - for f in templates_src.iterdir(): - if f.is_file() and f.name != "vscode-settings.json" and not f.name.startswith("."): - dst = dest_templates / f.name - if dst.exists() and not force: - skipped_files.append(str(dst.relative_to(project_path))) - else: - content = f.read_text(encoding="utf-8") - content = IntegrationBase.resolve_command_refs( - content, invoke_separator - ) - dst.write_text(content, encoding="utf-8") - rel = dst.relative_to(project_path).as_posix() - manifest.record_existing(rel) - if skipped_files: - console.print( - f"[yellow]⚠[/yellow] {len(skipped_files)} shared infrastructure file(s) already exist and were not updated:" - ) - for f in skipped_files: - console.print(f" {f}") - console.print( - "To refresh shared infrastructure, run " - "[cyan]specify init --here --force[/cyan] or " - "[cyan]specify integration upgrade --force[/cyan]." +def _install_shared_infra_or_exit( + project_path: Path, + script_type: str, + tracker: StepTracker | None = None, + force: bool = False, + invoke_separator: str = ".", +) -> bool: + try: + return _install_shared_infra( + project_path, + script_type, + tracker=tracker, + force=force, + invoke_separator=invoke_separator, ) - - manifest.save() - return True + except (ValueError, OSError) as exc: + console.print(f"[red]Error:[/red] Failed to install shared infrastructure: {exc}") + raise typer.Exit(1) def ensure_executable_scripts(project_path: Path, tracker: StepTracker | None = None) -> None: @@ -855,7 +854,7 @@ def ensure_executable_scripts(project_path: Path, tracker: StepTracker | None = os.chmod(script, new_mode) updated += 1 except Exception as e: - failures.append(f"{script.relative_to(project_path)}: {e}") + failures.append(f"{_display_project_path(project_path, script)}: {e}") if tracker: detail = f"{updated} updated" + (f", {len(failures)} failed" if failures else "") tracker.add("chmod", "Set script permissions recursively") @@ -903,10 +902,6 @@ def ensure_constitution_from_template(project_path: Path, tracker: StepTracker | else: console.print(f"[yellow]Warning: Could not initialize constitution: {e}[/yellow]") - -INIT_OPTIONS_FILE = ".specify/init-options.json" - - def save_init_options(project_path: Path, options: dict[str, Any]) -> None: """Persist the CLI options used during ``specify init``. @@ -1266,6 +1261,8 @@ def init( ]: tracker.add(key, label) + git_default_notice = False + with Live(tracker.render(), console=console, refresh_per_second=8, transient=True) as live: tracker.attach_refresh(lambda: live.update(tracker.render())) try: @@ -1298,20 +1295,32 @@ def init( raw_options=integration_options, ) manifest.save() - - # Write .specify/integration.json - integration_json = project_path / ".specify" / "integration.json" - integration_json.parent.mkdir(parents=True, exist_ok=True) - integration_json.write_text(json.dumps({ - "integration": resolved_integration.key, - "version": get_speckit_version(), - }, indent=2) + "\n", encoding="utf-8") + integration_settings = _with_integration_setting( + {}, + resolved_integration.key, + resolved_integration, + script_type=selected_script, + raw_options=integration_options, + parsed_options=integration_parsed_options or None, + ) + _write_integration_json( + project_path, + resolved_integration.key, + [resolved_integration.key], + integration_settings, + ) tracker.complete("integration", resolved_integration.config.get("name", resolved_integration.key)) # Install shared infrastructure (scripts, templates) tracker.start("shared-infra") - _install_shared_infra(project_path, selected_script, tracker=tracker, force=force, invoke_separator=resolved_integration.effective_invoke_separator(integration_parsed_options)) + _install_shared_infra_or_exit( + project_path, + selected_script, + tracker=tracker, + force=force, + invoke_separator=resolved_integration.effective_invoke_separator(integration_parsed_options), + ) tracker.complete("shared-infra", f"scripts ({selected_script}) + templates") ensure_constitution_from_template(project_path, tracker=tracker) @@ -1349,6 +1358,7 @@ def init( manager.install_from_directory( bundled_path, get_speckit_version() ) + git_default_notice = True git_messages.append("extension installed") else: git_has_error = True @@ -1519,6 +1529,18 @@ def init( console.print() console.print(deprecation_notice) + if git_default_notice: + default_change_notice = Panel( + "The git extension is currently enabled by default during [bold]specify init[/bold].\n" + "Starting in [bold]v0.10.0[/bold], this will require explicit opt-in.\n" + "Use [bold]specify extension add git[/bold] after init when needed.", + title="[yellow]Notice: Git Default Changing[/yellow]", + border_style="yellow", + padding=(1, 2), + ) + console.print() + console.print(default_change_notice) + steps_lines = [] if not here: steps_lines.append(f"1. Go to the project folder: [cyan]cd {project_name}[/cyan]") @@ -1869,7 +1891,7 @@ def get_speckit_version() -> str: # Fallback: try reading from pyproject.toml try: import tomllib - pyproject_path = Path(__file__).parent.parent.parent / "pyproject.toml" + pyproject_path = _repo_root() / "pyproject.toml" if pyproject_path.exists(): with open(pyproject_path, "rb") as f: data = tomllib.load(f) @@ -1896,13 +1918,8 @@ def get_speckit_version() -> str: add_completion=False, ) integration_app.add_typer(integration_catalog_app, name="catalog") - - -INTEGRATION_JSON = ".specify/integration.json" - - def _read_integration_json(project_root: Path) -> dict[str, Any]: - """Load ``.specify/integration.json``. Returns ``{}`` when missing.""" + """Load ``.specify/integration.json``. Returns normalized state when present.""" path = project_root / INTEGRATION_JSON if not path.exists(): return {} @@ -1922,20 +1939,42 @@ def _read_integration_json(project_root: Path) -> dict[str, Any]: console.print(f"[red]Error:[/red] {path} must contain a JSON object, got {type(data).__name__}.") console.print(f"Please fix or delete {INTEGRATION_JSON} and retry.") raise typer.Exit(1) - return data + schema = data.get("integration_state_schema") + if isinstance(schema, int) and not isinstance(schema, bool) and schema > INTEGRATION_STATE_SCHEMA: + console.print( + f"[red]Error:[/red] {path} uses integration state schema {schema}, " + f"but this CLI only supports schema {INTEGRATION_STATE_SCHEMA}." + ) + console.print("Please upgrade Spec Kit before modifying integrations.") + raise typer.Exit(1) + return _normalize_integration_state(data) def _write_integration_json( project_root: Path, - integration_key: str, + integration_key: str | None, + installed_integrations: list[str] | None = None, + integration_settings: dict[str, dict[str, Any]] | None = None, ) -> None: - """Write ``.specify/integration.json`` for *integration_key*.""" - dest = project_root / INTEGRATION_JSON - dest.parent.mkdir(parents=True, exist_ok=True) - dest.write_text(json.dumps({ - "integration": integration_key, - "version": get_speckit_version(), - }, indent=2) + "\n", encoding="utf-8") + """Write ``.specify/integration.json`` with legacy-compatible state.""" + _write_integration_json_file( + project_root, + version=get_speckit_version(), + integration_key=integration_key, + installed_integrations=installed_integrations, + settings=integration_settings, + ) + + +def _clear_init_options_for_integration(project_root: Path, integration_key: str) -> None: + """Clear active integration keys from init-options.json when they match.""" + opts = load_init_options(project_root) + if opts.get("integration") == integration_key or opts.get("ai") == integration_key: + opts.pop("integration", None) + opts.pop("ai", None) + opts.pop("ai_skills", None) + opts.pop("context_file", None) + save_init_options(project_root, opts) def _remove_integration_json(project_root: Path) -> None: @@ -1945,6 +1984,13 @@ def _remove_integration_json(project_root: Path) -> None: path.unlink() +_MANIFEST_READ_ERRORS = (ValueError, FileNotFoundError, OSError, UnicodeDecodeError) + + +class _SharedTemplateRefreshError(RuntimeError): + """Raised when default integration metadata should not be persisted.""" + + def _normalize_script_type(script_type: str, source: str) -> str: """Normalize and validate a script type from CLI/config sources.""" normalized = script_type.strip().lower() @@ -1968,6 +2014,112 @@ def _resolve_script_type(project_root: Path, script_type: str | None) -> str: return "ps" if os.name == "nt" else "sh" +def _resolve_integration_script_type( + project_root: Path, + state: dict[str, Any], + key: str, + script_type: str | None = None, +) -> str: + """Resolve script type for an integration, preferring stored settings.""" + if script_type: + return _normalize_script_type(script_type, "--script") + + stored = _integration_setting(state, key).get("script") + if isinstance(stored, str) and stored.strip(): + return _normalize_script_type(stored, f"{INTEGRATION_JSON} integration_settings.{key}.script") + + return _resolve_script_type(project_root, None) + + +def _resolve_integration_options( + integration: Any, + state: dict[str, Any], + key: str, + raw_options: str | None, +) -> tuple[str | None, dict[str, Any] | None]: + """Resolve raw and parsed options for an integration operation.""" + return _resolve_integration_options_impl( + integration, + state, + key, + raw_options, + parse_options=_parse_integration_options, + ) + + +def _set_default_integration( + project_root: Path, + state: dict[str, Any], + key: str, + integration: Any, + installed_keys: list[str], + *, + script_type: str | None = None, + raw_options: str | None = None, + parsed_options: dict[str, Any] | None = None, + refresh_templates: bool = True, + refresh_templates_force: bool = False, +) -> None: + """Persist *key* as default and align active runtime metadata.""" + resolved_script = _resolve_integration_script_type(project_root, state, key, script_type) + settings = _with_integration_setting( + state, + key, + integration, + script_type=resolved_script, + raw_options=raw_options, + parsed_options=parsed_options, + ) + + if refresh_templates: + try: + _refresh_shared_templates( + project_root, + invoke_separator=_invoke_separator_for_integration( + integration, {"integration_settings": settings}, key, parsed_options + ), + force=refresh_templates_force, + ) + except (ValueError, OSError) as exc: + raise _SharedTemplateRefreshError( + f"Failed to refresh shared templates for '{key}': {exc}" + ) from exc + + _write_integration_json(project_root, key, installed_keys, settings) + _update_init_options_for_integration(project_root, integration, script_type=resolved_script) + + +def _set_default_integration_or_exit(*args: Any, **kwargs: Any) -> None: + try: + _set_default_integration(*args, **kwargs) + except _SharedTemplateRefreshError as exc: + console.print(f"[red]Error:[/red] {exc}") + raise typer.Exit(1) + + +def _display_project_path(project_root: Path, path: str | Path) -> str: + """Return a stable POSIX-style display path for paths under a project.""" + path_obj = Path(path) + try: + rel_path = path_obj.relative_to(project_root) if path_obj.is_absolute() else path_obj + except ValueError: + try: + rel_path = path_obj.resolve().relative_to(project_root.resolve()) + except (OSError, ValueError): + return path_obj.as_posix() + return rel_path.as_posix() + + +def _require_specify_project() -> Path: + """Return the current project root if it is a spec-kit project, else exit.""" + project_root = Path.cwd() + if (project_root / ".specify").is_dir(): + return project_root + console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") + console.print("Run this command from a spec-kit project root") + raise typer.Exit(1) + + @integration_app.command("list") def integration_list( catalog: bool = typer.Option(False, "--catalog", help="Browse full catalog (built-in + community)"), @@ -1975,16 +2127,10 @@ def integration_list( """List available integrations and installed status.""" from .integrations import INTEGRATION_REGISTRY - project_root = Path.cwd() - - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() current = _read_integration_json(project_root) - installed_key = current.get("integration") + default_key = _default_integration_key(current) + installed_keys = set(_installed_integration_keys(current)) if catalog: from .integrations.catalog import IntegrationCatalog, IntegrationCatalogError @@ -2006,12 +2152,15 @@ def integration_list( table.add_column("Version") table.add_column("Source") table.add_column("Status") + table.add_column("Multi-install Safe") for entry in sorted(entries, key=lambda e: e["id"]): eid = entry["id"] cat_name = entry.get("_catalog_name", "") install_allowed = entry.get("_install_allowed", True) - if eid == installed_key: + if eid == default_key: + status = "[green]installed (default)[/green]" + elif eid in installed_keys: status = "[green]installed[/green]" elif eid in INTEGRATION_REGISTRY: status = "built-in" @@ -2019,12 +2168,16 @@ def integration_list( status = "discovery-only" else: status = "" + safe = "" + if eid in INTEGRATION_REGISTRY: + safe = "yes" if getattr(INTEGRATION_REGISTRY[eid], "multi_install_safe", False) else "no" table.add_row( eid, entry.get("name", eid), entry.get("version", ""), cat_name, status, + safe, ) console.print(table) @@ -2035,6 +2188,7 @@ def integration_list( table.add_column("Name") table.add_column("Status") table.add_column("CLI Required") + table.add_column("Multi-install Safe") for key in sorted(INTEGRATION_REGISTRY.keys()): integration = INTEGRATION_REGISTRY[key] @@ -2042,18 +2196,22 @@ def integration_list( name = cfg.get("name", key) requires_cli = cfg.get("requires_cli", False) - if key == installed_key: + if key == default_key: + status = "[green]installed (default)[/green]" + elif key in installed_keys: status = "[green]installed[/green]" else: status = "" cli_req = "yes" if requires_cli else "no (IDE)" - table.add_row(key, name, status, cli_req) + safe = "yes" if getattr(integration, "multi_install_safe", False) else "no" + table.add_row(key, name, status, cli_req, safe) console.print(table) - if installed_key: - console.print(f"\n[dim]Current integration:[/dim] [cyan]{installed_key}[/cyan]") + if installed_keys: + console.print(f"\n[dim]Default integration:[/dim] [cyan]{default_key or 'none'}[/cyan]") + console.print(f"[dim]Installed integrations:[/dim] [cyan]{', '.join(sorted(installed_keys))}[/cyan]") else: console.print("\n[yellow]No integration currently installed.[/yellow]") console.print("Install one with: [cyan]specify integration install [/cyan]") @@ -2063,20 +2221,14 @@ def integration_list( def integration_install( key: str = typer.Argument(help="Integration key to install (e.g. claude, copilot)"), script: str | None = typer.Option(None, "--script", help="Script type: sh or ps (default: from init-options.json or platform default)"), + force: bool = typer.Option(False, "--force", help="Allow multi-install when integrations are not declared safe"), integration_options: str | None = typer.Option(None, "--integration-options", help='Options for the integration (e.g. --integration-options="--commands-dir .myagent/cmds")'), ): """Install an integration into an existing project.""" from .integrations import INTEGRATION_REGISTRY, get_integration from .integrations.manifest import IntegrationManifest - project_root = Path.cwd() - - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() integration = get_integration(key) if integration is None: console.print(f"[red]Error:[/red] Unknown integration '{key}'") @@ -2085,30 +2237,68 @@ def integration_install( raise typer.Exit(1) current = _read_integration_json(project_root) - installed_key = current.get("integration") + default_key = _default_integration_key(current) + installed_keys = _installed_integration_keys(current) - if installed_key and installed_key == key: + if key in installed_keys: console.print(f"[yellow]Integration '{key}' is already installed.[/yellow]") - console.print("Run [cyan]specify integration uninstall[/cyan] first, then reinstall.") + console.print( + f"Run [cyan]specify integration upgrade {key}[/cyan] to reinstall managed files, " + f"or [cyan]specify integration uninstall {key}[/cyan] first." + ) raise typer.Exit(0) - if installed_key: - console.print(f"[red]Error:[/red] Integration '{installed_key}' is already installed.") - console.print(f"Run [cyan]specify integration uninstall[/cyan] first, or use [cyan]specify integration switch {key}[/cyan].") - raise typer.Exit(1) + if installed_keys and not force: + unsafe_keys = [] + for installed_key in installed_keys: + installed_integration = get_integration(installed_key) + if not installed_integration or not getattr(installed_integration, "multi_install_safe", False): + unsafe_keys.append(installed_key) + if unsafe_keys or not getattr(integration, "multi_install_safe", False): + console.print( + f"[red]Error:[/red] Installed integrations: {', '.join(installed_keys)}." + ) + if default_key: + console.print(f"Default integration: [cyan]{default_key}[/cyan].") + console.print( + "Installing multiple integrations is only automatic when all involved " + "integrations are declared multi-install safe." + ) + console.print( + f"Run [cyan]specify integration switch {key}[/cyan] to replace the default " + f"integration, or retry with [cyan]--force[/cyan] to opt in." + ) + raise typer.Exit(1) selected_script = _resolve_script_type(project_root, script) # Build parsed options from --integration-options so the integration # can determine its effective invoke separator before shared infra # is installed. - parsed_options: dict[str, Any] | None = None - if integration_options: - parsed_options = _parse_integration_options(integration, integration_options) + raw_options, parsed_options = _resolve_integration_options( + integration, current, key, integration_options + ) # Ensure shared infrastructure is present (safe to run unconditionally; # _install_shared_infra merges missing files without overwriting). - _install_shared_infra(project_root, selected_script, invoke_separator=integration.effective_invoke_separator(parsed_options)) + infra_integration = integration + infra_key = key + infra_parsed = parsed_options + if default_key: + default_integration = get_integration(default_key) + if default_integration is not None: + infra_integration = default_integration + infra_key = default_key + _, infra_parsed = _resolve_integration_options( + default_integration, current, default_key, None + ) + _install_shared_infra_or_exit( + project_root, + selected_script, + invoke_separator=_invoke_separator_for_integration( + infra_integration, current, infra_key, infra_parsed + ), + ) if os.name != "nt": ensure_executable_scripts(project_root) @@ -2121,11 +2311,22 @@ def integration_install( project_root, manifest, parsed_options=parsed_options, script_type=selected_script, - raw_options=integration_options, + raw_options=raw_options, ) manifest.save() - _write_integration_json(project_root, integration.key) - _update_init_options_for_integration(project_root, integration, script_type=selected_script) + new_installed = _dedupe_integration_keys([*installed_keys, integration.key]) + new_default = default_key or integration.key + settings = _with_integration_setting( + current, + integration.key, + integration, + script_type=selected_script, + raw_options=raw_options, + parsed_options=parsed_options, + ) + _write_integration_json(project_root, new_default, new_installed, settings) + if new_default == integration.key: + _update_init_options_for_integration(project_root, integration, script_type=selected_script) except Exception as e: # Attempt rollback of any files written by setup @@ -2134,12 +2335,19 @@ def integration_install( except Exception as rollback_err: # Suppress so the original setup error remains the primary failure console.print(f"[yellow]Warning:[/yellow] Failed to roll back integration changes: {rollback_err}") - _remove_integration_json(project_root) + if installed_keys: + _write_integration_json( + project_root, default_key, installed_keys, _integration_settings(current) + ) + else: + _remove_integration_json(project_root) console.print(f"[red]Error:[/red] Failed to install integration: {e}") raise typer.Exit(1) name = (integration.config or {}).get("name", key) console.print(f"\n[green]✓[/green] Integration '{name}' installed successfully") + if default_key: + console.print(f"[dim]Default integration remains:[/dim] [cyan]{default_key}[/cyan]") def _parse_integration_options(integration: Any, raw_options: str) -> dict[str, Any] | None: @@ -2211,6 +2419,44 @@ def _update_init_options_for_integration( save_init_options(project_root, opts) +@integration_app.command("use") +def integration_use( + key: str = typer.Argument(help="Installed integration key to make the default"), + force: bool = typer.Option(False, "--force", help="Overwrite managed shared templates while changing the default"), +): + """Set the default integration without uninstalling other integrations.""" + from .integrations import get_integration + + project_root = _require_specify_project() + current = _read_integration_json(project_root) + installed_keys = _installed_integration_keys(current) + if key not in installed_keys: + console.print(f"[red]Error:[/red] Integration '{key}' is not installed.") + if installed_keys: + console.print(f"[yellow]Installed integrations:[/yellow] {', '.join(installed_keys)}") + else: + console.print("Install one with: [cyan]specify integration install [/cyan]") + raise typer.Exit(1) + + integration = get_integration(key) + if integration is None: + console.print(f"[red]Error:[/red] Unknown integration '{key}'") + raise typer.Exit(1) + + raw_options, parsed_options = _resolve_integration_options(integration, current, key, None) + _set_default_integration_or_exit( + project_root, + current, + key, + integration, + installed_keys, + raw_options=raw_options, + parsed_options=parsed_options, + refresh_templates_force=force, + ) + console.print(f"[green]✓[/green] Default integration set to [bold]{key}[/bold].") + + @integration_app.command("uninstall") def integration_uninstall( key: str = typer.Argument(None, help="Integration key to uninstall (default: current integration)"), @@ -2220,25 +2466,19 @@ def integration_uninstall( from .integrations import get_integration from .integrations.manifest import IntegrationManifest - project_root = Path.cwd() - - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() current = _read_integration_json(project_root) - installed_key = current.get("integration") + default_key = _default_integration_key(current) + installed_keys = _installed_integration_keys(current) if key is None: - if not installed_key: + if not default_key: console.print("[yellow]No integration is currently installed.[/yellow]") raise typer.Exit(0) - key = installed_key + key = default_key - if installed_key and installed_key != key: - console.print(f"[red]Error:[/red] Integration '{key}' is not the currently installed integration ('{installed_key}').") + if key not in installed_keys: + console.print(f"[red]Error:[/red] Integration '{key}' is not installed.") raise typer.Exit(1) integration = get_integration(key) @@ -2246,20 +2486,35 @@ def integration_uninstall( manifest_path = project_root / ".specify" / "integrations" / f"{key}.manifest.json" if not manifest_path.exists(): console.print(f"[yellow]No manifest found for integration '{key}'. Nothing to uninstall.[/yellow]") - _remove_integration_json(project_root) - # Clear integration-related keys from init-options.json - opts = load_init_options(project_root) - if opts.get("integration") == key or opts.get("ai") == key: - opts.pop("integration", None) - opts.pop("ai", None) - opts.pop("ai_skills", None) - opts.pop("context_file", None) - save_init_options(project_root, opts) + remaining = [installed for installed in installed_keys if installed != key] + new_default = default_key if default_key != key else (remaining[0] if remaining else None) + if remaining: + if default_key == key and new_default and (new_integration := get_integration(new_default)): + raw_options, parsed_options = _resolve_integration_options( + new_integration, current, new_default, None + ) + _set_default_integration_or_exit( + project_root, + current, + new_default, + new_integration, + remaining, + raw_options=raw_options, + parsed_options=parsed_options, + ) + else: + _write_integration_json( + project_root, new_default, remaining, _integration_settings(current) + ) + else: + _remove_integration_json(project_root) + if default_key == key: + _clear_init_options_for_integration(project_root, key) raise typer.Exit(0) try: manifest = IntegrationManifest.load(key, project_root) - except (ValueError, FileNotFoundError) as exc: + except _MANIFEST_READ_ERRORS as exc: console.print(f"[red]Error:[/red] Integration manifest for '{key}' is unreadable.") console.print(f"Manifest: {manifest_path}") console.print( @@ -2276,16 +2531,31 @@ def integration_uninstall( if integration: integration.remove_context_section(project_root) - _remove_integration_json(project_root) + remaining = [installed for installed in installed_keys if installed != key] + new_default = default_key if default_key != key else (remaining[0] if remaining else None) + if remaining: + if default_key == key and new_default and (new_integration := get_integration(new_default)): + raw_options, parsed_options = _resolve_integration_options( + new_integration, current, new_default, None + ) + _set_default_integration_or_exit( + project_root, + current, + new_default, + new_integration, + remaining, + raw_options=raw_options, + parsed_options=parsed_options, + ) + else: + _write_integration_json( + project_root, new_default, remaining, _integration_settings(current) + ) + else: + _remove_integration_json(project_root) - # Update init-options.json to clear the integration - opts = load_init_options(project_root) - if opts.get("integration") == key or opts.get("ai") == key: - opts.pop("integration", None) - opts.pop("ai", None) - opts.pop("ai_skills", None) - opts.pop("context_file", None) - save_init_options(project_root, opts) + if default_key == key: + _clear_init_options_for_integration(project_root, key) name = (integration.config or {}).get("name", key) if integration else key console.print(f"\n[green]✓[/green] Integration '{name}' uninstalled") @@ -2294,7 +2564,7 @@ def integration_uninstall( if skipped: console.print(f"\n[yellow]⚠[/yellow] {len(skipped)} modified file(s) were preserved:") for path in skipped: - rel = path.relative_to(project_root) if path.is_absolute() else path + rel = _display_project_path(project_root, path) console.print(f" {rel}") @@ -2309,14 +2579,7 @@ def integration_switch( from .integrations import INTEGRATION_REGISTRY, get_integration from .integrations.manifest import IntegrationManifest - project_root = Path.cwd() - - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() target_integration = get_integration(target) if target_integration is None: console.print(f"[red]Error:[/red] Unknown integration '{target}'") @@ -2325,10 +2588,67 @@ def integration_switch( raise typer.Exit(1) current = _read_integration_json(project_root) - installed_key = current.get("integration") + installed_keys = _installed_integration_keys(current) + installed_key = _default_integration_key(current) if installed_key == target: - console.print(f"[yellow]Integration '{target}' is already installed. Nothing to switch.[/yellow]") + if integration_options is not None: + console.print( + "[red]Error:[/red] --integration-options cannot be used when switching " + "to an already installed integration." + ) + console.print( + f"Run [cyan]specify integration upgrade {target} --integration-options ...[/cyan] " + "to update managed files/options." + ) + raise typer.Exit(1) + if force: + raw_options, parsed_options = _resolve_integration_options( + target_integration, current, target, None + ) + _set_default_integration_or_exit( + project_root, + current, + target, + target_integration, + installed_keys, + raw_options=raw_options, + parsed_options=parsed_options, + refresh_templates_force=True, + ) + console.print( + f"\n[green]✓[/green] Default integration remains [bold]{target}[/bold]; " + "managed shared templates refreshed." + ) + raise typer.Exit(0) + console.print(f"[yellow]Integration '{target}' is already the default integration. Nothing to switch.[/yellow]") + raise typer.Exit(0) + + if target in installed_keys: + if integration_options is not None: + console.print( + "[red]Error:[/red] --integration-options cannot be used when switching " + "to an already installed integration." + ) + console.print( + f"Run [cyan]specify integration upgrade {target} --integration-options ...[/cyan] " + f"to update managed files/options, then [cyan]specify integration use {target}[/cyan]." + ) + raise typer.Exit(1) + raw_options, parsed_options = _resolve_integration_options( + target_integration, current, target, None + ) + _set_default_integration_or_exit( + project_root, + current, + target, + target_integration, + installed_keys, + raw_options=raw_options, + parsed_options=parsed_options, + refresh_templates_force=force, + ) + console.print(f"\n[green]✓[/green] Default integration set to [bold]{target}[/bold].") raise typer.Exit(0) selected_script = _resolve_script_type(project_root, script) @@ -2342,7 +2662,7 @@ def integration_switch( console.print(f"Uninstalling current integration: [cyan]{installed_key}[/cyan]") try: old_manifest = IntegrationManifest.load(installed_key, project_root) - except (ValueError, FileNotFoundError) as exc: + except _MANIFEST_READ_ERRORS as exc: console.print(f"[red]Error:[/red] Could not read integration manifest for '{installed_key}': {manifest_path}") console.print(f"[dim]{exc}[/dim]") console.print( @@ -2366,7 +2686,7 @@ def integration_switch( console.print(f" Removed {len(removed)} file(s)") if skipped: console.print(f" [yellow]⚠[/yellow] {len(skipped)} modified file(s) preserved") - except (ValueError, FileNotFoundError) as exc: + except _MANIFEST_READ_ERRORS as exc: console.print(f"[yellow]Warning:[/yellow] Could not read manifest for '{installed_key}': {exc}") else: console.print(f"[red]Error:[/red] Integration '{installed_key}' is installed but has no manifest.") @@ -2376,25 +2696,62 @@ def integration_switch( ) raise typer.Exit(1) + # Unregister extension commands for the old agent so they don't + # remain as orphans in the old agent's directory. + try: + from .extensions import ExtensionManager + + ext_mgr = ExtensionManager(project_root) + ext_mgr.unregister_agent_artifacts(installed_key) + except Exception as ext_err: + console.print( + f"[yellow]Warning:[/yellow] Could not clean up extension artifacts " + f"(commands, skills, registry entries) for '{installed_key}': {ext_err}" + ) + # Clear metadata so a failed Phase 2 doesn't leave stale references - _remove_integration_json(project_root) - opts = load_init_options(project_root) - opts.pop("integration", None) - opts.pop("ai", None) - opts.pop("ai_skills", None) - opts.pop("context_file", None) - save_init_options(project_root, opts) + installed_keys = [installed for installed in installed_keys if installed != installed_key] + _clear_init_options_for_integration(project_root, installed_key) + if installed_keys: + fallback_key = installed_keys[0] + fallback_integration = get_integration(fallback_key) + if fallback_integration is not None: + raw_options, parsed_options = _resolve_integration_options( + fallback_integration, current, fallback_key, None + ) + _set_default_integration_or_exit( + project_root, + current, + fallback_key, + fallback_integration, + installed_keys, + raw_options=raw_options, + parsed_options=parsed_options, + ) + else: + _write_integration_json( + project_root, fallback_key, installed_keys, _integration_settings(current) + ) + else: + _remove_integration_json(project_root) + current = _read_integration_json(project_root) # Build parsed options from --integration-options so the integration # can determine its effective invoke separator before shared infra # is installed. - parsed_options: dict[str, Any] | None = None - if integration_options: - parsed_options = _parse_integration_options(target_integration, integration_options) + raw_options, parsed_options = _resolve_integration_options( + target_integration, current, target, integration_options + ) # Ensure shared infrastructure is present (safe to run unconditionally; # _install_shared_infra merges missing files without overwriting). - _install_shared_infra(project_root, selected_script, invoke_separator=target_integration.effective_invoke_separator(parsed_options)) + _install_shared_infra_or_exit( + project_root, + selected_script, + invoke_separator=_invoke_separator_for_integration( + target_integration, current, target, parsed_options + ), + ) if os.name != "nt": ensure_executable_scripts(project_root) @@ -2409,11 +2766,32 @@ def integration_switch( project_root, manifest, parsed_options=parsed_options, script_type=selected_script, - raw_options=integration_options, + raw_options=raw_options, ) manifest.save() - _write_integration_json(project_root, target_integration.key) - _update_init_options_for_integration(project_root, target_integration, script_type=selected_script) + _set_default_integration( + project_root, + current, + target_integration.key, + target_integration, + _dedupe_integration_keys([*installed_keys, target_integration.key]), + script_type=selected_script, + raw_options=raw_options, + parsed_options=parsed_options, + ) + + # Re-register extension commands for the new agent so that + # previously-installed extensions are available in the new integration. + try: + from .extensions import ExtensionManager + + ext_mgr = ExtensionManager(project_root) + ext_mgr.register_enabled_extensions_for_agent(target) + except Exception as ext_err: + console.print( + f"[yellow]Warning:[/yellow] Could not register extension commands, skills, " + f"or related artifacts for '{target}': {ext_err}" + ) except Exception as e: # Attempt rollback of any files written by setup @@ -2422,7 +2800,34 @@ def integration_switch( except Exception as rollback_err: # Suppress so the original setup error remains the primary failure console.print(f"[yellow]Warning:[/yellow] Failed to roll back integration '{target}': {rollback_err}") - _remove_integration_json(project_root) + if installed_keys: + fallback_key = installed_keys[0] + fallback_integration = get_integration(fallback_key) + if fallback_integration is not None: + raw_options, parsed_options = _resolve_integration_options( + fallback_integration, current, fallback_key, None + ) + try: + _set_default_integration( + project_root, + current, + fallback_key, + fallback_integration, + installed_keys, + raw_options=raw_options, + parsed_options=parsed_options, + ) + except _SharedTemplateRefreshError as restore_err: + console.print( + f"[yellow]Warning:[/yellow] Failed to restore default " + f"integration '{fallback_key}': {restore_err}" + ) + else: + _write_integration_json( + project_root, fallback_key, installed_keys, _integration_settings(current) + ) + else: + _remove_integration_json(project_root) console.print(f"[red]Error:[/red] Failed to install integration '{target}': {e}") raise typer.Exit(1) @@ -2445,16 +2850,10 @@ def integration_upgrade( from .integrations import get_integration from .integrations.manifest import IntegrationManifest - project_root = Path.cwd() - - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() current = _read_integration_json(project_root) - installed_key = current.get("integration") + installed_key = _default_integration_key(current) + installed_keys = _installed_integration_keys(current) if key is None: if not installed_key: @@ -2462,11 +2861,8 @@ def integration_upgrade( raise typer.Exit(0) key = installed_key - if installed_key and installed_key != key: - console.print( - f"[red]Error:[/red] Integration '{key}' is not the currently installed integration ('{installed_key}')." - ) - console.print(f"Use [cyan]specify integration switch {key}[/cyan] instead.") + if key not in installed_keys: + console.print(f"[red]Error:[/red] Integration '{key}' is not installed.") raise typer.Exit(1) integration = get_integration(key) @@ -2482,7 +2878,7 @@ def integration_upgrade( try: old_manifest = IntegrationManifest.load(key, project_root) - except (ValueError, FileNotFoundError) as exc: + except _MANIFEST_READ_ERRORS as exc: console.print(f"[red]Error:[/red] Integration manifest for '{key}' is unreadable: {exc}") raise typer.Exit(1) @@ -2495,17 +2891,35 @@ def integration_upgrade( console.print("\nUse [cyan]--force[/cyan] to overwrite modified files, or resolve manually.") raise typer.Exit(1) - selected_script = _resolve_script_type(project_root, script) + selected_script = _resolve_integration_script_type(project_root, current, key, script) # Build parsed options from --integration-options so the integration # can determine its effective invoke separator before shared infra # is installed. - parsed_options: dict[str, Any] | None = None - if integration_options: - parsed_options = _parse_integration_options(integration, integration_options) + raw_options, parsed_options = _resolve_integration_options( + integration, current, key, integration_options + ) # Ensure shared infrastructure is up to date; --force overwrites existing files. - _install_shared_infra(project_root, selected_script, force=force, invoke_separator=integration.effective_invoke_separator(parsed_options)) + infra_integration = integration + infra_key = key + infra_parsed = parsed_options + if installed_key and installed_key != key: + default_integration = get_integration(installed_key) + if default_integration is not None: + infra_integration = default_integration + infra_key = installed_key + _, infra_parsed = _resolve_integration_options( + default_integration, current, installed_key, None + ) + _install_shared_infra_or_exit( + project_root, + selected_script, + force=force, + invoke_separator=_invoke_separator_for_integration( + infra_integration, current, infra_key, infra_parsed + ), + ) if os.name != "nt": ensure_executable_scripts(project_root) @@ -2519,11 +2933,33 @@ def integration_upgrade( new_manifest, parsed_options=parsed_options, script_type=selected_script, - raw_options=integration_options, + raw_options=raw_options, + ) + settings = _with_integration_setting( + current, + key, + integration, + script_type=selected_script, + raw_options=raw_options, + parsed_options=parsed_options, ) + if installed_key == key: + try: + _refresh_shared_templates( + project_root, + invoke_separator=_invoke_separator_for_integration( + integration, {"integration_settings": settings}, key, parsed_options + ), + force=force, + ) + except (ValueError, OSError) as exc: + raise _SharedTemplateRefreshError( + f"Failed to refresh shared templates for '{key}': {exc}" + ) from exc new_manifest.save() - _write_integration_json(project_root, key) - _update_init_options_for_integration(project_root, integration, script_type=selected_script) + _write_integration_json(project_root, installed_key, installed_keys, settings) + if installed_key == key: + _update_init_options_for_integration(project_root, integration, script_type=selected_script) except Exception as exc: # Don't teardown — setup overwrites in-place, so teardown would # delete files that were working before the upgrade. Just report. @@ -2557,16 +2993,6 @@ def integration_upgrade( # not additive like extensions and presets. -def _require_specify_project() -> Path: - """Return the current project root if it is a spec-kit project, else exit.""" - project_root = Path.cwd() - if not (project_root / ".specify").exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - return project_root - - @integration_app.command("search") def integration_search( query: Optional[str] = typer.Argument(None, help="Search query (optional)"), @@ -2862,14 +3288,7 @@ def preset_list(): """List installed presets.""" from .presets import PresetManager - project_root = Path.cwd() - - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() manager = PresetManager(project_root) installed = manager.list_installed() @@ -2908,14 +3327,7 @@ def preset_add( PresetCompatibilityError, ) - project_root = Path.cwd() - - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() # Validate priority if priority < 1: console.print("[red]Error:[/red] Priority must be a positive integer (1 or higher)") @@ -3029,14 +3441,7 @@ def preset_remove( """Remove an installed preset.""" from .presets import PresetManager - project_root = Path.cwd() - - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() manager = PresetManager(project_root) if not manager.registry.is_installed(preset_id): @@ -3059,14 +3464,7 @@ def preset_search( """Search for presets in the catalog.""" from .presets import PresetCatalog, PresetError - project_root = Path.cwd() - - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() catalog = PresetCatalog(project_root) try: @@ -3096,14 +3494,7 @@ def preset_resolve( """Show which template will be resolved for a given name.""" from .presets import PresetResolver - project_root = Path.cwd() - - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() resolver = PresetResolver(project_root) layers = resolver.collect_all_layers(template_name) @@ -3167,14 +3558,7 @@ def preset_info( from .extensions import normalize_priority from .presets import PresetCatalog, PresetManager, PresetError - project_root = Path.cwd() - - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() # Check if installed locally first manager = PresetManager(project_root) local_pack = manager.get_pack(preset_id) @@ -3241,15 +3625,7 @@ def preset_set_priority( """Set the resolution priority of an installed preset.""" from .presets import PresetManager - project_root = Path.cwd() - - # Check if we're in a spec-kit project - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() # Validate priority if priority < 1: console.print("[red]Error:[/red] Priority must be a positive integer (1 or higher)") @@ -3292,15 +3668,7 @@ def preset_enable( """Enable a disabled preset.""" from .presets import PresetManager - project_root = Path.cwd() - - # Check if we're in a spec-kit project - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() manager = PresetManager(project_root) # Check if preset is installed @@ -3333,15 +3701,7 @@ def preset_disable( """Disable a preset without removing it.""" from .presets import PresetManager - project_root = Path.cwd() - - # Check if we're in a spec-kit project - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() manager = PresetManager(project_root) # Check if preset is installed @@ -3376,14 +3736,7 @@ def preset_catalog_list(): """List all active preset catalogs.""" from .presets import PresetCatalog, PresetValidationError - project_root = Path.cwd() - - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() catalog = PresetCatalog(project_root) try: @@ -3416,7 +3769,7 @@ def preset_catalog_list(): except PresetValidationError: proj_loaded = False if proj_loaded: - console.print(f"[dim]Config: {config_path.relative_to(project_root)}[/dim]") + console.print(f"[dim]Config: {_display_project_path(project_root, config_path)}[/dim]") else: try: user_loaded = user_config_path.exists() and catalog._load_catalog_config(user_config_path) is not None @@ -3445,13 +3798,8 @@ def preset_catalog_add( """Add a catalog to .specify/preset-catalogs.yml.""" from .presets import PresetCatalog, PresetValidationError - project_root = Path.cwd() - + project_root = _require_specify_project() specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) # Validate URL tmp_catalog = PresetCatalog(project_root) @@ -3468,7 +3816,8 @@ def preset_catalog_add( try: config = yaml.safe_load(config_path.read_text(encoding="utf-8")) or {} except Exception as e: - console.print(f"[red]Error:[/red] Failed to read {config_path}: {e}") + config_label = _display_project_path(project_root, config_path) + console.print(f"[red]Error:[/red] Failed to read {config_label}: {e}") raise typer.Exit(1) else: config = {} @@ -3500,7 +3849,7 @@ def preset_catalog_add( console.print(f"\n[green]✓[/green] Added catalog '[bold]{name}[/bold]' ({install_label})") console.print(f" URL: {url}") console.print(f" Priority: {priority}") - console.print(f"\nConfig saved to {config_path.relative_to(project_root)}") + console.print(f"\nConfig saved to {_display_project_path(project_root, config_path)}") @preset_catalog_app.command("remove") @@ -3508,13 +3857,8 @@ def preset_catalog_remove( name: str = typer.Argument(help="Catalog name to remove"), ): """Remove a catalog from .specify/preset-catalogs.yml.""" - project_root = Path.cwd() - + project_root = _require_specify_project() specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) config_path = specify_dir / "preset-catalogs.yml" if not config_path.exists(): @@ -3677,15 +4021,7 @@ def extension_list( """List installed extensions.""" from .extensions import ExtensionManager - project_root = Path.cwd() - - # Check if we're in a spec-kit project - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() manager = ExtensionManager(project_root) installed = manager.list_installed() @@ -3718,14 +4054,7 @@ def catalog_list(): """List all active extension catalogs.""" from .extensions import ExtensionCatalog, ValidationError - project_root = Path.cwd() - - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() catalog = ExtensionCatalog(project_root) try: @@ -3758,7 +4087,7 @@ def catalog_list(): except ValidationError: proj_loaded = False if proj_loaded: - console.print(f"[dim]Config: {config_path.relative_to(project_root)}[/dim]") + console.print(f"[dim]Config: {_display_project_path(project_root, config_path)}[/dim]") else: try: user_loaded = user_config_path.exists() and catalog._load_catalog_config(user_config_path) is not None @@ -3787,13 +4116,8 @@ def catalog_add( """Add a catalog to .specify/extension-catalogs.yml.""" from .extensions import ExtensionCatalog, ValidationError - project_root = Path.cwd() - + project_root = _require_specify_project() specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) # Validate URL tmp_catalog = ExtensionCatalog(project_root) @@ -3810,7 +4134,8 @@ def catalog_add( try: config = yaml.safe_load(config_path.read_text(encoding="utf-8")) or {} except Exception as e: - console.print(f"[red]Error:[/red] Failed to read {config_path}: {e}") + config_label = _display_project_path(project_root, config_path) + console.print(f"[red]Error:[/red] Failed to read {config_label}: {e}") raise typer.Exit(1) else: config = {} @@ -3842,7 +4167,7 @@ def catalog_add( console.print(f"\n[green]✓[/green] Added catalog '[bold]{name}[/bold]' ({install_label})") console.print(f" URL: {url}") console.print(f" Priority: {priority}") - console.print(f"\nConfig saved to {config_path.relative_to(project_root)}") + console.print(f"\nConfig saved to {_display_project_path(project_root, config_path)}") @catalog_app.command("remove") @@ -3850,13 +4175,8 @@ def catalog_remove( name: str = typer.Argument(help="Catalog name to remove"), ): """Remove a catalog from .specify/extension-catalogs.yml.""" - project_root = Path.cwd() - + project_root = _require_specify_project() specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) config_path = specify_dir / "extension-catalogs.yml" if not config_path.exists(): @@ -3898,15 +4218,7 @@ def extension_add( """Install an extension.""" from .extensions import ExtensionManager, ExtensionCatalog, ExtensionError, ValidationError, CompatibilityError, REINSTALL_COMMAND - project_root = Path.cwd() - - # Check if we're in a spec-kit project - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() # Validate priority if priority < 1: console.print("[red]Error:[/red] Priority must be a positive integer (1 or higher)") @@ -4080,15 +4392,7 @@ def extension_remove( """Uninstall an extension.""" from .extensions import ExtensionManager - project_root = Path.cwd() - - # Check if we're in a spec-kit project - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() manager = ExtensionManager(project_root) # Resolve extension ID from argument (handles ambiguous names) @@ -4156,15 +4460,7 @@ def extension_search( """Search for available extensions in catalog.""" from .extensions import ExtensionCatalog, ExtensionError - project_root = Path.cwd() - - # Check if we're in a spec-kit project - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() catalog = ExtensionCatalog(project_root) try: @@ -4240,15 +4536,7 @@ def extension_info( """Show detailed information about an extension.""" from .extensions import ExtensionCatalog, ExtensionManager, normalize_priority - project_root = Path.cwd() - - # Check if we're in a spec-kit project - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() catalog = ExtensionCatalog(project_root) manager = ExtensionManager(project_root) installed = manager.list_installed() @@ -4442,15 +4730,7 @@ def extension_update( from packaging import version as pkg_version import shutil - project_root = Path.cwd() - - # Check if we're in a spec-kit project - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() manager = ExtensionManager(project_root) catalog = ExtensionCatalog(project_root) speckit_version = get_speckit_version() @@ -4838,15 +5118,7 @@ def extension_enable( """Enable a disabled extension.""" from .extensions import ExtensionManager, HookExecutor - project_root = Path.cwd() - - # Check if we're in a spec-kit project - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() manager = ExtensionManager(project_root) hook_executor = HookExecutor(project_root) @@ -4885,15 +5157,7 @@ def extension_disable( """Disable an extension without removing it.""" from .extensions import ExtensionManager, HookExecutor - project_root = Path.cwd() - - # Check if we're in a spec-kit project - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() manager = ExtensionManager(project_root) hook_executor = HookExecutor(project_root) @@ -4935,15 +5199,7 @@ def extension_set_priority( """Set the resolution priority of an installed extension.""" from .extensions import ExtensionManager - project_root = Path.cwd() - - # Check if we're in a spec-kit project - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - console.print("Run this command from a spec-kit project root") - raise typer.Exit(1) - + project_root = _require_specify_project() # Validate priority if priority < 1: console.print("[red]Error:[/red] Priority must be a positive integer (1 or higher)") @@ -5005,10 +5261,7 @@ def workflow_run( """Run a workflow from an installed ID or local YAML path.""" from .workflows.engine import WorkflowEngine - project_root = Path.cwd() - if not (project_root / ".specify").exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - raise typer.Exit(1) + project_root = _require_specify_project() engine = WorkflowEngine(project_root) engine.on_step_start = lambda sid, label: console.print(f" \u25b8 [{sid}] {label} \u2026") @@ -5072,10 +5325,7 @@ def workflow_resume( """Resume a paused or failed workflow run.""" from .workflows.engine import WorkflowEngine - project_root = Path.cwd() - if not (project_root / ".specify").exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - raise typer.Exit(1) + project_root = _require_specify_project() engine = WorkflowEngine(project_root) engine.on_step_start = lambda sid, label: console.print(f" \u25b8 [{sid}] {label} \u2026") @@ -5108,10 +5358,7 @@ def workflow_status( """Show workflow run status.""" from .workflows.engine import WorkflowEngine - project_root = Path.cwd() - if not (project_root / ".specify").exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - raise typer.Exit(1) + project_root = _require_specify_project() engine = WorkflowEngine(project_root) if run_id: @@ -5170,12 +5417,7 @@ def workflow_list(): """List installed workflows.""" from .workflows.catalog import WorkflowRegistry - project_root = Path.cwd() - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - raise typer.Exit(1) - + project_root = _require_specify_project() registry = WorkflowRegistry(project_root) installed = registry.list() @@ -5202,12 +5444,7 @@ def workflow_add( from .workflows.catalog import WorkflowCatalog, WorkflowRegistry, WorkflowCatalogError from .workflows.engine import WorkflowDefinition - project_root = Path.cwd() - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - raise typer.Exit(1) - + project_root = _require_specify_project() registry = WorkflowRegistry(project_root) workflows_dir = project_root / ".specify" / "workflows" @@ -5438,12 +5675,7 @@ def workflow_remove( """Uninstall a workflow.""" from .workflows.catalog import WorkflowRegistry - project_root = Path.cwd() - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - raise typer.Exit(1) - + project_root = _require_specify_project() registry = WorkflowRegistry(project_root) if not registry.is_installed(workflow_id): @@ -5468,10 +5700,7 @@ def workflow_search( """Search workflow catalogs.""" from .workflows.catalog import WorkflowCatalog, WorkflowCatalogError - project_root = Path.cwd() - if not (project_root / ".specify").exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - raise typer.Exit(1) + project_root = _require_specify_project() catalog = WorkflowCatalog(project_root) try: @@ -5504,10 +5733,7 @@ def workflow_info( from .workflows.catalog import WorkflowCatalog, WorkflowRegistry, WorkflowCatalogError from .workflows.engine import WorkflowEngine - project_root = Path.cwd() - if not (project_root / ".specify").exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - raise typer.Exit(1) + project_root = _require_specify_project() # Check installed first registry = WorkflowRegistry(project_root) @@ -5601,12 +5827,7 @@ def workflow_catalog_add( """Add a workflow catalog source.""" from .workflows.catalog import WorkflowCatalog, WorkflowValidationError - project_root = Path.cwd() - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - raise typer.Exit(1) - + project_root = _require_specify_project() catalog = WorkflowCatalog(project_root) try: catalog.add_catalog(url, name) @@ -5624,12 +5845,7 @@ def workflow_catalog_remove( """Remove a workflow catalog source by index.""" from .workflows.catalog import WorkflowCatalog, WorkflowValidationError - project_root = Path.cwd() - specify_dir = project_root / ".specify" - if not specify_dir.exists(): - console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)") - raise typer.Exit(1) - + project_root = _require_specify_project() catalog = WorkflowCatalog(project_root) try: removed_name = catalog.remove_catalog(index) diff --git a/src/specify_cli/extensions.py b/src/specify_cli/extensions.py index 761b7f31e7..81687b4186 100644 --- a/src/specify_cli/extensions.py +++ b/src/specify_cli/extensions.py @@ -962,29 +962,40 @@ def _register_extension_skills( return written - def _unregister_extension_skills(self, skill_names: List[str], extension_id: str) -> None: + def _unregister_extension_skills( + self, + skill_names: List[str], + extension_id: str, + skills_dir: Optional[Path] = None, + ) -> None: """Remove SKILL.md directories for extension skills. Called during extension removal to clean up skill files that were created by ``_register_extension_skills()``. - If ``_get_skills_dir()`` returns ``None`` (e.g. the user removed - init-options.json or toggled ai_skills after installation), we - fall back to scanning all known agent skills directories so that - orphaned skill directories are still cleaned up. In that case - each candidate directory is verified against the SKILL.md - ``metadata.source`` field before removal to avoid accidentally - deleting user-created skills with the same name. + If *skills_dir* is not provided and ``_get_skills_dir()`` returns + ``None`` (e.g. the user removed init-options.json or toggled + ai_skills after installation), we fall back to scanning all known + agent skills directories so that orphaned skill directories are + still cleaned up. In that case each candidate directory is + verified against the SKILL.md ``metadata.source`` field before + removal to avoid accidentally deleting user-created skills with + the same name. Args: skill_names: List of skill names to remove. extension_id: Extension ID used to verify ownership during fallback candidate scanning. + skills_dir: Optional explicit skills directory to use instead + of resolving via ``_get_skills_dir()``. Useful when the + caller needs to target a specific agent's skills directory + regardless of the currently-active agent in init-options. """ if not skill_names: return - skills_dir = self._get_skills_dir() + if skills_dir is None: + skills_dir = self._get_skills_dir() if skills_dir: # Fast path: we know the exact skills directory @@ -1332,6 +1343,156 @@ def remove(self, extension_id: str, keep_config: bool = False) -> bool: return True + @staticmethod + def _valid_name_list(value: Any) -> List[str]: + """Return string entries from a registry list, ignoring corrupt values.""" + if not isinstance(value, list): + return [] + return [item for item in value if isinstance(item, str)] + + def unregister_agent_artifacts(self, agent_name: str) -> None: + """Remove extension files registered for a specific agent. + + Extension command files are tracked per agent in ``registered_commands``. + Extension skills are scoped to the provided *agent_name*; they are removed + from that agent's skills directory (resolved via its integration config) + and the registry field is cleared. + + Skips cleanup when *agent_name* is not a supported agent to avoid + losing registry entries while leaving orphaned files on disk. + """ + if not agent_name: + return + + registrar = CommandRegistrar() + if agent_name not in registrar.AGENT_CONFIGS: + return + + # Resolve the skills directory for the specific agent so cleanup is + # agent-scoped and does not depend on the currently-active agent in + # init-options. Use the same helper that extension install uses. + from . import _get_skills_dir as resolve_skills_dir + + agent_skills_dir = resolve_skills_dir(self.project_root, agent_name) + + for ext_id, metadata in self.registry.list().items(): + updates: Dict[str, Any] = {} + + registered_commands = metadata.get("registered_commands", {}) + if isinstance(registered_commands, dict) and agent_name in registered_commands: + command_names = self._valid_name_list(registered_commands.get(agent_name)) + if command_names: + registrar.unregister_commands({agent_name: command_names}, self.project_root) + + new_registered = copy.deepcopy(registered_commands) + new_registered.pop(agent_name, None) + updates["registered_commands"] = new_registered + + registered_skills = self._valid_name_list(metadata.get("registered_skills", [])) + if registered_skills: + # Only pass the resolved skills_dir when it actually exists. + # Otherwise let _unregister_extension_skills fall back to + # scanning all known agent skills directories, which is useful + # for cleaning up stale entries created by earlier installs. + skills_dir = agent_skills_dir if agent_skills_dir.is_dir() else None + self._unregister_extension_skills( + registered_skills, ext_id, skills_dir=skills_dir + ) + + # Only reconcile registry state when cleanup was scoped to a + # specific existing directory. When skills_dir is None, + # _unregister_extension_skills falls back to scanning multiple + # candidate directories, so agent_skills_dir cannot be used to + # infer what was removed. When skills_dir is set, + # _unregister_extension_skills may intentionally skip deletion + # when ownership cannot be verified (e.g., corrupted/missing + # SKILL.md or mismatching metadata.source). Only drop registry + # entries for skill directories that were actually removed so + # future cleanup attempts can still find skipped ones. + if skills_dir is not None: + remaining_skills = [ + skill_name + for skill_name in registered_skills + if (skills_dir / skill_name).is_dir() + ] + if remaining_skills != registered_skills: + updates["registered_skills"] = remaining_skills + + if updates: + self.registry.update(ext_id, updates) + + def register_enabled_extensions_for_agent(self, agent_name: str) -> None: + """Register installed, enabled extensions for ``agent_name``. + + This is intended to be called after switching integrations. Command + registration is scoped to the explicit ``agent_name`` argument, but some + behavior still depends on the current init-options state (for example, + skills-mode handling uses the active ``ai`` / ``ai_skills`` settings). + + Callers should therefore pass the agent that has just been made active + in init-options; in normal use, ``agent_name`` is expected to match the + current ``ai`` value. This mirrors extension install behavior while + avoiding stale default-mode command directories when that active agent + is running in skills mode (notably Copilot ``--skills``). + """ + if not agent_name: + return + + from . import load_init_options + + registrar = CommandRegistrar() + agent_config = registrar.AGENT_CONFIGS.get(agent_name) + init_options = load_init_options(self.project_root) + if not isinstance(init_options, dict): + init_options = {} + + active_agent = init_options.get("ai") + skills_mode_active = ( + active_agent == agent_name + and bool(init_options.get("ai_skills")) + and bool(agent_config) + and agent_config.get("extension") != "/SKILL.md" + ) + + for ext_id, metadata in self.registry.list().items(): + if not metadata.get("enabled", True): + continue + + manifest = self.get_extension(ext_id) + if manifest is None: + continue + + ext_dir = self.extensions_dir / ext_id + updates: Dict[str, Any] = {} + + if agent_config and not skills_mode_active: + registered = registrar.register_commands_for_agent( + agent_name, manifest, ext_dir, self.project_root + ) + registered_commands = metadata.get("registered_commands", {}) + if not isinstance(registered_commands, dict): + registered_commands = {} + new_registered = copy.deepcopy(registered_commands) + if registered: + new_registered[agent_name] = registered + else: + # Registration returned empty list (e.g., corrupted + # manifest pointing at missing command files). Clear + # stale entry so later cleanup doesn't try to remove + # files that were never written. + new_registered.pop(agent_name, None) + if new_registered != registered_commands: + updates["registered_commands"] = new_registered + + registered_skills = self._register_extension_skills(manifest, ext_dir) + if registered_skills: + existing_skills = self._valid_name_list(metadata.get("registered_skills", [])) + merged_skills = list(dict.fromkeys(existing_skills + registered_skills)) + updates["registered_skills"] = merged_skills + + if updates: + self.registry.update(ext_id, updates) + def list_installed(self) -> List[Dict[str, Any]]: """List all installed extensions with metadata. diff --git a/src/specify_cli/integration_runtime.py b/src/specify_cli/integration_runtime.py new file mode 100644 index 0000000000..a36dcc672c --- /dev/null +++ b/src/specify_cli/integration_runtime.py @@ -0,0 +1,90 @@ +"""Runtime helpers for integration commands.""" + +from __future__ import annotations + +from collections.abc import Callable +from typing import Any + +from .integration_state import integration_setting, integration_settings + + +ParseOptions = Callable[[Any, str], dict[str, Any] | None] + + +def resolve_integration_options( + integration: Any, + state: dict[str, Any], + key: str, + raw_options: str | None, + *, + parse_options: ParseOptions, +) -> tuple[str | None, dict[str, Any] | None]: + """Resolve raw and parsed options for an integration operation.""" + if raw_options is not None: + return raw_options, parse_options(integration, raw_options) + + setting = integration_setting(state, key) + stored_raw = setting.get("raw_options") + if not isinstance(stored_raw, str): + stored_raw = None + + stored_parsed = setting.get("parsed_options") + if isinstance(stored_parsed, dict): + return stored_raw, stored_parsed or None + + if stored_raw: + return stored_raw, parse_options(integration, stored_raw) + + return None, None + + +def with_integration_setting( + state: dict[str, Any], + key: str, + integration: Any, + *, + script_type: str | None = None, + raw_options: str | None = None, + parsed_options: dict[str, Any] | None = None, +) -> dict[str, dict[str, Any]]: + """Return integration settings with *key* updated.""" + settings = integration_settings(state) + current = dict(settings.get(key, {})) + + if script_type: + current["script"] = script_type + if raw_options is not None: + current["raw_options"] = raw_options + elif "raw_options" in current and not current.get("raw_options"): + current.pop("raw_options", None) + + if parsed_options is not None: + current["parsed_options"] = parsed_options + elif raw_options is not None: + current.pop("parsed_options", None) + + current["invoke_separator"] = integration.effective_invoke_separator(parsed_options) + settings[key] = current + return settings + + +def invoke_separator_for_integration( + integration: Any, + state: dict[str, Any], + key: str, + parsed_options: dict[str, Any] | None = None, +) -> str: + """Resolve the invocation separator for stored/default integration state.""" + if parsed_options is not None: + return integration.effective_invoke_separator(parsed_options) + + setting = integration_setting(state, key) + stored_separator = setting.get("invoke_separator") + if isinstance(stored_separator, str) and stored_separator: + return stored_separator + + stored_parsed = setting.get("parsed_options") + if isinstance(stored_parsed, dict): + return integration.effective_invoke_separator(stored_parsed) + + return integration.effective_invoke_separator(None) diff --git a/src/specify_cli/integration_state.py b/src/specify_cli/integration_state.py new file mode 100644 index 0000000000..ac892dfbf6 --- /dev/null +++ b/src/specify_cli/integration_state.py @@ -0,0 +1,161 @@ +"""State helpers for installed AI agent integrations.""" + +from __future__ import annotations + +import json +from pathlib import Path +from typing import Any + + +INTEGRATION_JSON = ".specify/integration.json" +INTEGRATION_STATE_SCHEMA = 1 + + +def clean_integration_key(key: Any) -> str | None: + """Return a stripped integration key, or None for empty/non-string values.""" + if not isinstance(key, str) or not key.strip(): + return None + return key.strip() + + +def dedupe_integration_keys(keys: list[Any]) -> list[str]: + """Return a de-duplicated list of non-empty integration keys.""" + seen: set[str] = set() + deduped: list[str] = [] + for key in keys: + clean = clean_integration_key(key) + if clean is None: + continue + if clean in seen: + continue + seen.add(clean) + deduped.append(clean) + return deduped + + +def normalize_integration_settings(settings: Any) -> dict[str, dict[str, Any]]: + """Return JSON-safe per-integration runtime settings.""" + if not isinstance(settings, dict): + return {} + + normalized: dict[str, dict[str, Any]] = {} + for key, value in settings.items(): + if not isinstance(key, str) or not key.strip() or not isinstance(value, dict): + continue + + clean: dict[str, Any] = {} + script = value.get("script") + if isinstance(script, str) and script.strip(): + clean["script"] = script.strip() + + raw_options = value.get("raw_options") + if isinstance(raw_options, str): + clean["raw_options"] = raw_options + + parsed_options = value.get("parsed_options") + if isinstance(parsed_options, dict): + clean["parsed_options"] = parsed_options + + invoke_separator = value.get("invoke_separator") + if isinstance(invoke_separator, str) and invoke_separator.strip(): + clean["invoke_separator"] = invoke_separator.strip() + + if clean: + normalized[key.strip()] = clean + + return normalized + + +def _normalized_integration_state_schema(value: Any) -> int: + if isinstance(value, int) and not isinstance(value, bool) and value > INTEGRATION_STATE_SCHEMA: + return value + return INTEGRATION_STATE_SCHEMA + + +def normalize_integration_state(data: dict[str, Any]) -> dict[str, Any]: + """Normalize legacy and multi-install integration metadata.""" + legacy_key = clean_integration_key(data.get("integration")) + default_key = clean_integration_key(data.get("default_integration")) or legacy_key + + installed = data.get("installed_integrations") + installed_keys = dedupe_integration_keys(installed if isinstance(installed, list) else []) + if not default_key and installed_keys: + default_key = installed_keys[0] + if default_key and default_key not in installed_keys: + installed_keys.insert(0, default_key) + + settings = normalize_integration_settings(data.get("integration_settings")) + + normalized = dict(data) + normalized["integration_state_schema"] = _normalized_integration_state_schema( + data.get("integration_state_schema") + ) + if default_key: + normalized["integration"] = default_key + normalized["default_integration"] = default_key + else: + normalized.pop("integration", None) + normalized.pop("default_integration", None) + normalized["installed_integrations"] = installed_keys + normalized["integration_settings"] = { + key: settings[key] for key in installed_keys if key in settings + } + return normalized + + +def default_integration_key(state: dict[str, Any]) -> str | None: + """Return the default integration key from normalized state.""" + key = state.get("default_integration") or state.get("integration") + return clean_integration_key(key) + + +def installed_integration_keys(state: dict[str, Any]) -> list[str]: + """Return installed integration keys from normalized state.""" + return dedupe_integration_keys(state.get("installed_integrations", [])) + + +def integration_settings(state: dict[str, Any]) -> dict[str, dict[str, Any]]: + """Return normalized per-integration settings from state.""" + return normalize_integration_settings(state.get("integration_settings")) + + +def integration_setting(state: dict[str, Any], key: str) -> dict[str, Any]: + """Return stored runtime settings for *key*.""" + return dict(integration_settings(state).get(key, {})) + + +def write_integration_json( + project_root: Path, + *, + version: str, + integration_key: str | None, + installed_integrations: list[str] | None = None, + settings: dict[str, dict[str, Any]] | None = None, +) -> None: + """Write ``.specify/integration.json`` with legacy-compatible state.""" + dest = project_root / INTEGRATION_JSON + dest.parent.mkdir(parents=True, exist_ok=True) + + integration_key = clean_integration_key(integration_key) + installed = dedupe_integration_keys(installed_integrations or []) + if integration_key and integration_key not in installed: + installed.insert(0, integration_key) + if not integration_key and installed: + integration_key = installed[0] + + normalized_settings = normalize_integration_settings(settings or {}) + normalized_settings = { + key: normalized_settings[key] for key in installed if key in normalized_settings + } + + data: dict[str, Any] = { + "version": version, + "integration_state_schema": INTEGRATION_STATE_SCHEMA, + "installed_integrations": installed, + "integration_settings": normalized_settings, + } + if integration_key: + data["integration"] = integration_key + data["default_integration"] = integration_key + + dest.write_text(json.dumps(data, indent=2) + "\n", encoding="utf-8") diff --git a/src/specify_cli/integrations/auggie/__init__.py b/src/specify_cli/integrations/auggie/__init__.py index 9715e936ef..08e20fbc25 100644 --- a/src/specify_cli/integrations/auggie/__init__.py +++ b/src/specify_cli/integrations/auggie/__init__.py @@ -19,3 +19,4 @@ class AuggieIntegration(MarkdownIntegration): "extension": ".md", } context_file = ".augment/rules/specify-rules.md" + multi_install_safe = True diff --git a/src/specify_cli/integrations/base.py b/src/specify_cli/integrations/base.py index f3b74b0c05..c46340ddff 100644 --- a/src/specify_cli/integrations/base.py +++ b/src/specify_cli/integrations/base.py @@ -87,6 +87,14 @@ class IntegrationBase(ABC): invoke_separator: str = "." """Separator used in slash-command invocations (``"."`` → ``/speckit.plan``).""" + multi_install_safe: bool = False + """Whether this integration is declared safe to install alongside others. + + Safe integrations must use a static, unique agent root, command directory, + and context file. Registry tests enforce those invariants for every + integration that sets this flag. + """ + # -- Markers for managed context section ------------------------------ CONTEXT_MARKER_START = "" diff --git a/src/specify_cli/integrations/claude/__init__.py b/src/specify_cli/integrations/claude/__init__.py index 3e39db717e..88aef85285 100644 --- a/src/specify_cli/integrations/claude/__init__.py +++ b/src/specify_cli/integrations/claude/__init__.py @@ -53,6 +53,7 @@ class ClaudeIntegration(SkillsIntegration): "extension": "/SKILL.md", } context_file = "CLAUDE.md" + multi_install_safe = True @staticmethod def inject_argument_hint(content: str, hint: str) -> str: diff --git a/src/specify_cli/integrations/codebuddy/__init__.py b/src/specify_cli/integrations/codebuddy/__init__.py index 061ac7641f..980ac7fed7 100644 --- a/src/specify_cli/integrations/codebuddy/__init__.py +++ b/src/specify_cli/integrations/codebuddy/__init__.py @@ -19,3 +19,4 @@ class CodebuddyIntegration(MarkdownIntegration): "extension": ".md", } context_file = "CODEBUDDY.md" + multi_install_safe = True diff --git a/src/specify_cli/integrations/codex/__init__.py b/src/specify_cli/integrations/codex/__init__.py index b3b509b654..1c24a84bd2 100644 --- a/src/specify_cli/integrations/codex/__init__.py +++ b/src/specify_cli/integrations/codex/__init__.py @@ -27,6 +27,7 @@ class CodexIntegration(SkillsIntegration): "extension": "/SKILL.md", } context_file = "AGENTS.md" + multi_install_safe = True def build_exec_args( self, diff --git a/src/specify_cli/integrations/cursor_agent/__init__.py b/src/specify_cli/integrations/cursor_agent/__init__.py index a5472654fa..70af454ce9 100644 --- a/src/specify_cli/integrations/cursor_agent/__init__.py +++ b/src/specify_cli/integrations/cursor_agent/__init__.py @@ -26,6 +26,7 @@ class CursorAgentIntegration(SkillsIntegration): } context_file = ".cursor/rules/specify-rules.mdc" + multi_install_safe = True @classmethod def options(cls) -> list[IntegrationOption]: diff --git a/src/specify_cli/integrations/gemini/__init__.py b/src/specify_cli/integrations/gemini/__init__.py index d66f0b80bc..7c6fe159c7 100644 --- a/src/specify_cli/integrations/gemini/__init__.py +++ b/src/specify_cli/integrations/gemini/__init__.py @@ -19,3 +19,4 @@ class GeminiIntegration(TomlIntegration): "extension": ".toml", } context_file = "GEMINI.md" + multi_install_safe = True diff --git a/src/specify_cli/integrations/iflow/__init__.py b/src/specify_cli/integrations/iflow/__init__.py index 4acc2cf372..65d4d21c63 100644 --- a/src/specify_cli/integrations/iflow/__init__.py +++ b/src/specify_cli/integrations/iflow/__init__.py @@ -19,3 +19,4 @@ class IflowIntegration(MarkdownIntegration): "extension": ".md", } context_file = "IFLOW.md" + multi_install_safe = True diff --git a/src/specify_cli/integrations/junie/__init__.py b/src/specify_cli/integrations/junie/__init__.py index 0cc3b3f0ff..98d0494a8a 100644 --- a/src/specify_cli/integrations/junie/__init__.py +++ b/src/specify_cli/integrations/junie/__init__.py @@ -19,3 +19,4 @@ class JunieIntegration(MarkdownIntegration): "extension": ".md", } context_file = ".junie/AGENTS.md" + multi_install_safe = True diff --git a/src/specify_cli/integrations/kilocode/__init__.py b/src/specify_cli/integrations/kilocode/__init__.py index ffd38f741a..11674dd9f1 100644 --- a/src/specify_cli/integrations/kilocode/__init__.py +++ b/src/specify_cli/integrations/kilocode/__init__.py @@ -19,3 +19,4 @@ class KilocodeIntegration(MarkdownIntegration): "extension": ".md", } context_file = ".kilocode/rules/specify-rules.md" + multi_install_safe = True diff --git a/src/specify_cli/integrations/kimi/__init__.py b/src/specify_cli/integrations/kimi/__init__.py index 5421d48012..3b257768e2 100644 --- a/src/specify_cli/integrations/kimi/__init__.py +++ b/src/specify_cli/integrations/kimi/__init__.py @@ -36,6 +36,7 @@ class KimiIntegration(SkillsIntegration): "extension": "/SKILL.md", } context_file = "KIMI.md" + multi_install_safe = True @classmethod def options(cls) -> list[IntegrationOption]: diff --git a/src/specify_cli/integrations/manifest.py b/src/specify_cli/integrations/manifest.py index 50ac08ea3d..258c536e5b 100644 --- a/src/specify_cli/integrations/manifest.py +++ b/src/specify_cli/integrations/manifest.py @@ -11,6 +11,7 @@ import hashlib import json import os +import tempfile from datetime import datetime, timezone from pathlib import Path from typing import Any @@ -47,6 +48,59 @@ def _validate_rel_path(rel: Path, root: Path) -> Path: return resolved +def _manifest_path_label(root: Path, path: Path) -> str: + try: + return path.relative_to(root).as_posix() + except ValueError: + return path.as_posix() + + +def _ensure_safe_manifest_directory(root: Path, directory: Path) -> None: + """Create a manifest directory without following symlinked parents.""" + root_resolved = root.resolve() + try: + rel = directory.relative_to(root) + except ValueError: + label = _manifest_path_label(root, directory) + raise ValueError(f"Integration manifest directory escapes project root: {label}") from None + + current = root + for part in rel.parts: + current = current / part + label = _manifest_path_label(root, current) + if current.is_symlink(): + raise ValueError(f"Refusing to use symlinked integration manifest directory: {label}") + if current.exists(): + if not current.is_dir(): + raise ValueError(f"Integration manifest directory path is not a directory: {label}") + try: + current.resolve().relative_to(root_resolved) + except (OSError, ValueError): + raise ValueError(f"Integration manifest directory escapes project root: {label}") from None + continue + current.mkdir() + try: + current.resolve().relative_to(root_resolved) + except (OSError, ValueError): + raise ValueError(f"Integration manifest directory escapes project root: {label}") from None + + +def _ensure_safe_manifest_destination(root: Path, path: Path) -> None: + """Refuse manifest writes that would escape the project or follow symlinks.""" + root_resolved = root.resolve() + _ensure_safe_manifest_directory(root, path.parent) + label = _manifest_path_label(root, path) + if path.is_symlink(): + raise ValueError(f"Refusing to overwrite symlinked integration manifest path: {label}") + if path.exists(): + if not path.is_file(): + raise ValueError(f"Integration manifest path is not a file: {label}") + try: + path.resolve().relative_to(root_resolved) + except (OSError, ValueError): + raise ValueError(f"Integration manifest path escapes project root: {label}") from None + + class IntegrationManifest: """Tracks files installed by a single integration. @@ -217,8 +271,19 @@ def save(self) -> Path: "files": self._files, } path = self.manifest_path - path.parent.mkdir(parents=True, exist_ok=True) - path.write_text(json.dumps(data, indent=2) + "\n", encoding="utf-8") + content = json.dumps(data, indent=2) + "\n" + _ensure_safe_manifest_destination(self.project_root, path) + fd, temp_name = tempfile.mkstemp(prefix=f".{path.name}.", dir=path.parent) + temp_path = Path(temp_name) + try: + with os.fdopen(fd, "w", encoding="utf-8") as fh: + fh.write(content) + temp_path.chmod(0o644) + _ensure_safe_manifest_destination(self.project_root, path) + os.replace(temp_path, path) + finally: + if temp_path.exists(): + temp_path.unlink() return path @classmethod diff --git a/src/specify_cli/integrations/qodercli/__init__.py b/src/specify_cli/integrations/qodercli/__init__.py index 541001be17..ee2d4b6255 100644 --- a/src/specify_cli/integrations/qodercli/__init__.py +++ b/src/specify_cli/integrations/qodercli/__init__.py @@ -19,3 +19,4 @@ class QodercliIntegration(MarkdownIntegration): "extension": ".md", } context_file = "QODER.md" + multi_install_safe = True diff --git a/src/specify_cli/integrations/qwen/__init__.py b/src/specify_cli/integrations/qwen/__init__.py index d9d930152c..2506a57681 100644 --- a/src/specify_cli/integrations/qwen/__init__.py +++ b/src/specify_cli/integrations/qwen/__init__.py @@ -19,3 +19,4 @@ class QwenIntegration(MarkdownIntegration): "extension": ".md", } context_file = "QWEN.md" + multi_install_safe = True diff --git a/src/specify_cli/integrations/roo/__init__.py b/src/specify_cli/integrations/roo/__init__.py index 3c680e7e35..f610a3cc63 100644 --- a/src/specify_cli/integrations/roo/__init__.py +++ b/src/specify_cli/integrations/roo/__init__.py @@ -19,3 +19,4 @@ class RooIntegration(MarkdownIntegration): "extension": ".md", } context_file = ".roo/rules/specify-rules.md" + multi_install_safe = True diff --git a/src/specify_cli/integrations/shai/__init__.py b/src/specify_cli/integrations/shai/__init__.py index 7a9d1deb02..123953da72 100644 --- a/src/specify_cli/integrations/shai/__init__.py +++ b/src/specify_cli/integrations/shai/__init__.py @@ -19,3 +19,4 @@ class ShaiIntegration(MarkdownIntegration): "extension": ".md", } context_file = "SHAI.md" + multi_install_safe = True diff --git a/src/specify_cli/integrations/tabnine/__init__.py b/src/specify_cli/integrations/tabnine/__init__.py index 2928a214a7..0d0076bc56 100644 --- a/src/specify_cli/integrations/tabnine/__init__.py +++ b/src/specify_cli/integrations/tabnine/__init__.py @@ -19,3 +19,4 @@ class TabnineIntegration(TomlIntegration): "extension": ".toml", } context_file = "TABNINE.md" + multi_install_safe = True diff --git a/src/specify_cli/integrations/trae/__init__.py b/src/specify_cli/integrations/trae/__init__.py index 343a7527f8..4556487d07 100644 --- a/src/specify_cli/integrations/trae/__init__.py +++ b/src/specify_cli/integrations/trae/__init__.py @@ -27,6 +27,7 @@ class TraeIntegration(SkillsIntegration): "extension": "/SKILL.md", } context_file = ".trae/rules/project_rules.md" + multi_install_safe = True @classmethod def options(cls) -> list[IntegrationOption]: diff --git a/src/specify_cli/integrations/windsurf/__init__.py b/src/specify_cli/integrations/windsurf/__init__.py index f0f77d318e..ae5c3301f4 100644 --- a/src/specify_cli/integrations/windsurf/__init__.py +++ b/src/specify_cli/integrations/windsurf/__init__.py @@ -19,3 +19,4 @@ class WindsurfIntegration(MarkdownIntegration): "extension": ".md", } context_file = ".windsurf/rules/specify-rules.md" + multi_install_safe = True diff --git a/src/specify_cli/paths.py b/src/specify_cli/paths.py new file mode 100644 index 0000000000..0f8d1bc1b8 --- /dev/null +++ b/src/specify_cli/paths.py @@ -0,0 +1,9 @@ +"""Shared path constants for specify_cli. + +This module is intentionally dependency-free (no typer, no rich, no workflows) +so it can be safely imported from anywhere in the package without side effects. +""" + +SPECIFY_DIR = ".specify" +INTEGRATION_JSON = f"{SPECIFY_DIR}/integration.json" +INIT_OPTIONS_FILE = f"{SPECIFY_DIR}/init-options.json" diff --git a/src/specify_cli/shared_infra.py b/src/specify_cli/shared_infra.py new file mode 100644 index 0000000000..1e8be7b282 --- /dev/null +++ b/src/specify_cli/shared_infra.py @@ -0,0 +1,317 @@ +"""Shared Spec Kit infrastructure installation helpers.""" + +from __future__ import annotations + +import os +import tempfile +from pathlib import Path +from typing import Any + +from .integrations.base import IntegrationBase +from .integrations.manifest import IntegrationManifest + + +def load_speckit_manifest( + project_path: Path, + *, + version: str, + console: Any | None = None, +) -> IntegrationManifest: + """Load the shared infrastructure manifest, preserving existing entries.""" + manifest_path = project_path / ".specify" / "integrations" / "speckit.manifest.json" + if manifest_path.exists(): + try: + manifest = IntegrationManifest.load("speckit", project_path) + manifest.version = version + return manifest + except (ValueError, FileNotFoundError, OSError, UnicodeDecodeError) as exc: + if console is not None: + console.print( + f"[yellow]Warning:[/yellow] Could not read shared infrastructure " + f"manifest at {manifest_path}: {exc}" + ) + console.print( + "A new shared manifest will be created; previously tracked " + "shared files may be treated as untracked." + ) + return IntegrationManifest("speckit", project_path, version=version) + + +def shared_templates_source( + *, + core_pack: Path | None, + repo_root: Path, +) -> Path: + """Return the bundled/source shared templates directory.""" + if core_pack and (core_pack / "templates").is_dir(): + return core_pack / "templates" + return repo_root / "templates" + + +def shared_scripts_source( + *, + core_pack: Path | None, + repo_root: Path, +) -> Path: + """Return the bundled/source shared scripts directory.""" + if core_pack and (core_pack / "scripts").is_dir(): + return core_pack / "scripts" + return repo_root / "scripts" + + +def _shared_destination_label(project_path: Path, dest: Path) -> str: + try: + return dest.relative_to(project_path).as_posix() + except ValueError: + return str(dest) + + +def _shared_relative_path(project_path: Path, dest: Path) -> Path: + try: + rel = dest.relative_to(project_path) + except ValueError: + label = _shared_destination_label(project_path, dest) + raise ValueError(f"Shared infrastructure path escapes project root: {label}") from None + + if rel.is_absolute() or ".." in rel.parts: + label = _shared_destination_label(project_path, dest) + raise ValueError(f"Shared infrastructure path escapes project root: {label}") + return rel + + +def _ensure_safe_shared_directory(project_path: Path, directory: Path, *, create: bool = True) -> None: + """Create a shared infra directory without following symlinked parents.""" + root = project_path.resolve() + rel = _shared_relative_path(project_path, directory) + current = project_path + + for part in rel.parts: + current = current / part + label = _shared_destination_label(project_path, current) + if current.is_symlink(): + raise ValueError(f"Refusing to use symlinked shared infrastructure directory: {label}") + if current.exists(): + if not current.is_dir(): + raise ValueError(f"Shared infrastructure directory path is not a directory: {label}") + try: + current.resolve().relative_to(root) + except (OSError, ValueError): + raise ValueError(f"Shared infrastructure directory escapes project root: {label}") from None + continue + if not create: + raise ValueError(f"Shared infrastructure directory does not exist: {label}") + current.mkdir() + if current.is_symlink(): + raise ValueError(f"Refusing to use symlinked shared infrastructure directory: {label}") + try: + current.resolve().relative_to(root) + except (OSError, ValueError): + raise ValueError(f"Shared infrastructure directory escapes project root: {label}") from None + + +def _validate_safe_shared_directory(project_path: Path, directory: Path) -> None: + """Validate existing directory parents while allowing missing directories.""" + root = project_path.resolve() + rel = _shared_relative_path(project_path, directory) + current = project_path + + for part in rel.parts: + current = current / part + label = _shared_destination_label(project_path, current) + if current.is_symlink(): + raise ValueError(f"Refusing to use symlinked shared infrastructure directory: {label}") + if not current.exists(): + continue + if not current.is_dir(): + raise ValueError(f"Shared infrastructure directory path is not a directory: {label}") + try: + current.resolve().relative_to(root) + except (OSError, ValueError): + raise ValueError(f"Shared infrastructure directory escapes project root: {label}") from None + + +def _ensure_safe_shared_destination( + project_path: Path, + dest: Path, + *, + parent_must_exist: bool = True, +) -> None: + """Refuse shared infra writes that would escape or follow symlinks.""" + root = project_path.resolve() + _shared_relative_path(project_path, dest) + if parent_must_exist: + _ensure_safe_shared_directory(project_path, dest.parent, create=False) + else: + _validate_safe_shared_directory(project_path, dest.parent) + label = _shared_destination_label(project_path, dest) + if dest.is_symlink(): + raise ValueError(f"Refusing to overwrite symlinked shared infrastructure path: {label}") + + if dest.exists(): + try: + dest.resolve().relative_to(root) + except (OSError, ValueError): + raise ValueError(f"Shared infrastructure destination escapes project root: {label}") from None + + +def _write_shared_text(project_path: Path, dest: Path, content: str) -> None: + _write_shared_bytes(project_path, dest, content.encode("utf-8")) + + +def _write_shared_bytes( + project_path: Path, + dest: Path, + content: bytes, + *, + mode: int = 0o644, +) -> None: + _ensure_safe_shared_destination(project_path, dest) + fd, temp_name = tempfile.mkstemp(prefix=f".{dest.name}.", dir=dest.parent) + temp_path = Path(temp_name) + try: + with os.fdopen(fd, "wb") as fh: + fh.write(content) + temp_path.chmod(mode) + _ensure_safe_shared_destination(project_path, dest) + os.replace(temp_path, dest) + finally: + if temp_path.exists(): + temp_path.unlink() + + +def refresh_shared_templates( + project_path: Path, + *, + version: str, + core_pack: Path | None, + repo_root: Path, + console: Any, + invoke_separator: str, + force: bool = False, +) -> None: + """Refresh default-sensitive shared templates without touching scripts.""" + templates_src = shared_templates_source(core_pack=core_pack, repo_root=repo_root) + if not templates_src.is_dir(): + return + + manifest = load_speckit_manifest(project_path, version=version, console=console) + tracked_files = manifest.files + modified = set(manifest.check_modified()) + skipped_files: list[str] = [] + planned_updates: list[tuple[Path, str, str]] = [] + + dest_templates = project_path / ".specify" / "templates" + _ensure_safe_shared_directory(project_path, dest_templates) + for src in templates_src.iterdir(): + if not src.is_file() or src.name == "vscode-settings.json" or src.name.startswith("."): + continue + + dst = dest_templates / src.name + _ensure_safe_shared_destination(project_path, dst) + rel = dst.relative_to(project_path).as_posix() + if dst.exists() and not force: + if rel not in tracked_files or rel in modified: + skipped_files.append(rel) + continue + + content = src.read_text(encoding="utf-8") + content = IntegrationBase.resolve_command_refs(content, invoke_separator) + planned_updates.append((dst, rel, content)) + + for dst, rel, content in planned_updates: + _write_shared_text(project_path, dst, content) + manifest.record_existing(rel) + + manifest.save() + + if skipped_files: + console.print( + f"[yellow]⚠[/yellow] {len(skipped_files)} modified or untracked shared template file(s) were not updated:" + ) + for rel in skipped_files: + console.print(f" {rel}") + + +def install_shared_infra( + project_path: Path, + script_type: str, + *, + version: str, + core_pack: Path | None, + repo_root: Path, + console: Any, + force: bool = False, + invoke_separator: str = ".", +) -> bool: + """Install shared scripts and templates into *project_path*.""" + manifest = load_speckit_manifest(project_path, version=version, console=console) + skipped_files: list[str] = [] + planned_copies: list[tuple[Path, str, bytes, int]] = [] + planned_templates: list[tuple[Path, str, str]] = [] + + scripts_src = shared_scripts_source(core_pack=core_pack, repo_root=repo_root) + if scripts_src.is_dir(): + dest_scripts = project_path / ".specify" / "scripts" + _ensure_safe_shared_directory(project_path, dest_scripts) + variant_dir = "bash" if script_type == "sh" else "powershell" + variant_src = scripts_src / variant_dir + if variant_src.is_dir(): + dest_variant = dest_scripts / variant_dir + _ensure_safe_shared_directory(project_path, dest_variant) + for src_path in variant_src.rglob("*"): + if not src_path.is_file(): + continue + + rel_path = src_path.relative_to(variant_src) + dst_path = dest_variant / rel_path + _ensure_safe_shared_destination(project_path, dst_path, parent_must_exist=False) + if dst_path.exists() and not force: + skipped_files.append(dst_path.relative_to(project_path).as_posix()) + continue + + _ensure_safe_shared_directory(project_path, dst_path.parent) + rel = dst_path.relative_to(project_path).as_posix() + planned_copies.append((dst_path, rel, src_path.read_bytes(), src_path.stat().st_mode & 0o777)) + + templates_src = shared_templates_source(core_pack=core_pack, repo_root=repo_root) + if templates_src.is_dir(): + dest_templates = project_path / ".specify" / "templates" + _ensure_safe_shared_directory(project_path, dest_templates) + for src in templates_src.iterdir(): + if not src.is_file() or src.name == "vscode-settings.json" or src.name.startswith("."): + continue + + dst = dest_templates / src.name + _ensure_safe_shared_destination(project_path, dst) + if dst.exists() and not force: + skipped_files.append(dst.relative_to(project_path).as_posix()) + continue + + content = src.read_text(encoding="utf-8") + content = IntegrationBase.resolve_command_refs(content, invoke_separator) + rel = dst.relative_to(project_path).as_posix() + planned_templates.append((dst, rel, content)) + + for dst_path, rel, content, mode in planned_copies: + _ensure_safe_shared_directory(project_path, dst_path.parent) + _write_shared_bytes(project_path, dst_path, content, mode=mode) + manifest.record_existing(rel) + + for dst, rel, content in planned_templates: + _write_shared_text(project_path, dst, content) + manifest.record_existing(rel) + + if skipped_files: + console.print( + f"[yellow]⚠[/yellow] {len(skipped_files)} shared infrastructure file(s) already exist and were not updated:" + ) + for path in skipped_files: + console.print(f" {path}") + console.print( + "To refresh shared infrastructure, run " + "[cyan]specify init --here --force[/cyan] or " + "[cyan]specify integration upgrade --force[/cyan]." + ) + + manifest.save() + return True diff --git a/src/specify_cli/workflows/engine.py b/src/specify_cli/workflows/engine.py index b2e11af674..cbe559fcab 100644 --- a/src/specify_cli/workflows/engine.py +++ b/src/specify_cli/workflows/engine.py @@ -20,6 +20,8 @@ import yaml from .base import RunStatus, StepContext, StepResult, StepStatus +from specify_cli.paths import INTEGRATION_JSON as _INTEGRATION_JSON +from specify_cli.paths import INIT_OPTIONS_FILE as _INIT_OPTIONS_FILE # -- Workflow Definition -------------------------------------------------- @@ -82,8 +84,6 @@ def from_string(cls, content: str) -> WorkflowDefinition: # ID format: lowercase alphanumeric with hyphens _ID_PATTERN = re.compile(r"^[a-z0-9][a-z0-9-]*[a-z0-9]$|^[a-z0-9]$") -_INTEGRATION_JSON = ".specify/integration.json" - # Valid step types (matching STEP_REGISTRY keys) def _get_valid_step_types() -> set[str]: """Return valid step types from the registry, with a built-in fallback.""" @@ -721,43 +721,63 @@ def _resolve_inputs( elif input_def.get("required", False): msg = f"Required input {name!r} not provided." raise ValueError(msg) - + # Also resolve "auto" sentinel when explicitly supplied by the caller if resolved.get("integration") == "auto": - resolved["integration"] = self._load_project_integration() + resolved["integration"] = self._resolve_default("integration", "auto") return resolved def _resolve_default(self, name: str, default: Any) -> Any: """Resolve special default sentinels against project state. For the ``integration`` input, ``"auto"`` resolves to the integration - recorded in ``.specify/integration.json`` so workflows dispatch to the - AI the project was actually initialized with. + recorded in project metadata so workflows dispatch to the AI the + project was actually initialized with. """ if name == "integration" and default == "auto": return self._load_project_integration() return default def _load_project_integration(self) -> str: - """Read the active integration key from ``.specify/integration.json``. + """Read the active integration key from project metadata. - Returns the stored integration string, or ``"copilot"`` when the file is - missing, unreadable, or does not contain a valid non-empty key. - The ``"copilot"`` fallback preserves backwards compatibility for projects - that predate the introduction of ``.specify/integration.json``. + The primary source is ``.specify/integration.json``. If that file is + missing or invalid, fall back to ``.specify/init-options.json`` for + older projects or partially migrated state, checking ``integration`` + first and then ``ai``. Returns ``"copilot"`` only when neither source + contains a valid non-empty integration key. """ - path = self.project_root / _INTEGRATION_JSON - if not path.is_file(): - return "copilot" - try: - data = json.loads(path.read_text(encoding="utf-8")) - except (OSError, UnicodeDecodeError, json.JSONDecodeError): - return "copilot" - if isinstance(data, dict): - value = data.get("integration") - if isinstance(value, str): - value = value.strip() - if value and value != "auto": - return value + + def _read_integration(path: Path, *keys: str) -> str | None: + if not path.is_file(): + return None + try: + data = json.loads(path.read_text(encoding="utf-8")) + except (OSError, UnicodeDecodeError, json.JSONDecodeError): + return None + if not isinstance(data, dict): + return None + for key in keys: + value = data.get(key) + if isinstance(value, str): + value = value.strip() + if value and value != "auto": # skip "auto" to avoid circular resolution + return value + return None + + integration = _read_integration( + self.project_root / _INTEGRATION_JSON, "integration" + ) + if integration is not None: + return integration + + integration = _read_integration( + self.project_root / _INIT_OPTIONS_FILE, + "integration", + "ai", + ) + if integration is not None: + return integration + return "copilot" @staticmethod diff --git a/templates/commands/specify.md b/templates/commands/specify.md index 1f3f5c4465..cafa32f4e2 100644 --- a/templates/commands/specify.md +++ b/templates/commands/specify.md @@ -183,7 +183,7 @@ Given that feature description, do this: c. **Handle Validation Results**: - - **If all items pass**: Mark checklist complete and proceed to step 7 + - **If all items pass**: Mark checklist complete and proceed to step 8 - **If items fail (excluding [NEEDS CLARIFICATION])**: 1. List the failing items and specific issues diff --git a/templates/commands/tasks.md b/templates/commands/tasks.md index 4e204abc1b..e5af6793b6 100644 --- a/templates/commands/tasks.md +++ b/templates/commands/tasks.md @@ -10,8 +10,8 @@ handoffs: prompt: Start the implementation in phases send: true scripts: - sh: scripts/bash/check-prerequisites.sh --json - ps: scripts/powershell/check-prerequisites.ps1 -Json + sh: scripts/bash/setup-tasks.sh --json + ps: scripts/powershell/setup-tasks.ps1 -Json --- ## User Input @@ -58,7 +58,7 @@ You **MUST** consider the user input before proceeding (if not empty). ## Outline -1. **Setup**: Run `{SCRIPT}` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot"). +1. **Setup**: Run `{SCRIPT}` from repo root and parse FEATURE_DIR, TASKS_TEMPLATE, and AVAILABLE_DOCS list. `FEATURE_DIR` and `TASKS_TEMPLATE` must be absolute paths when provided. `AVAILABLE_DOCS` is a list of document names/relative paths available under `FEATURE_DIR` (for example `research.md` or `contracts/`). For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot"). 2. **Load design documents**: Read from FEATURE_DIR: - **Required**: plan.md (tech stack, libraries, structure), spec.md (user stories with priorities) @@ -76,7 +76,7 @@ You **MUST** consider the user input before proceeding (if not empty). - Create parallel execution examples per user story - Validate task completeness (each user story has all needed tasks, independently testable) -4. **Generate tasks.md**: Use `templates/tasks-template.md` as structure, fill with: +4. **Generate tasks.md**: Read the tasks template from TASKS_TEMPLATE (from the JSON output above) and use it as structure. If TASKS_TEMPLATE is empty, fall back to `.specify/templates/tasks-template.md`. Fill with: - Correct feature name from plan.md - Phase 1: Setup tasks (project initialization) - Phase 2: Foundational tasks (blocking prerequisites for all user stories) diff --git a/tests/integrations/test_cli.py b/tests/integrations/test_cli.py index 60e51a5fb9..7732d57300 100644 --- a/tests/integrations/test_cli.py +++ b/tests/integrations/test_cli.py @@ -1,13 +1,21 @@ """Tests for --integration flag on specify init (CLI-level).""" +import io import json import os +import pytest import yaml +from rich.console import Console from tests.conftest import strip_ansi +class _NoopConsole: + def print(self, *args, **kwargs): + pass + + def _normalize_cli_output(output: str) -> str: output = strip_ansi(output) output = " ".join(output.split()) @@ -254,6 +262,310 @@ def test_shared_infra_skip_warning_displayed(self, tmp_path, capsys): normalized = " ".join(captured.out.split()) assert "specify integration upgrade --force" in normalized + def test_shared_infra_warns_when_manifest_cannot_be_loaded(self, tmp_path, capsys): + """Invalid shared manifests warn before falling back to a new manifest.""" + from specify_cli import _install_shared_infra + + project = tmp_path / "bad-shared-manifest-test" + project.mkdir() + integrations_dir = project / ".specify" / "integrations" + integrations_dir.mkdir(parents=True) + manifest_path = integrations_dir / "speckit.manifest.json" + manifest_path.write_text("{not json", encoding="utf-8") + + _install_shared_infra(project, "sh") + + captured = capsys.readouterr() + assert "Could not read shared infrastructure manifest" in captured.out + assert "A new shared manifest will be created" in captured.out + + def test_shared_infra_warns_when_manifest_cannot_be_decoded(self, tmp_path, capsys): + """Non-UTF-8 shared manifests warn before falling back to a new manifest.""" + from specify_cli import _install_shared_infra + + project = tmp_path / "bad-shared-manifest-encoding-test" + project.mkdir() + integrations_dir = project / ".specify" / "integrations" + integrations_dir.mkdir(parents=True) + manifest_path = integrations_dir / "speckit.manifest.json" + manifest_path.write_bytes(b"\xff\xfe\x00") + + _install_shared_infra(project, "sh") + + captured = capsys.readouterr() + assert "Could not read shared infrastructure manifest" in captured.out + assert "A new shared manifest will be created" in captured.out + + @pytest.mark.skipif(not hasattr(os, "symlink"), reason="symlinks are unavailable") + def test_shared_infra_refuses_symlinked_script_destination(self, tmp_path): + """Shared script refreshes must not follow destination symlinks.""" + from specify_cli import _install_shared_infra + + project = tmp_path / "symlink-script-test" + project.mkdir() + (project / ".specify").mkdir() + + outside = tmp_path / "outside-script.sh" + outside.write_text("# outside\n", encoding="utf-8") + scripts_dir = project / ".specify" / "scripts" / "bash" + scripts_dir.mkdir(parents=True) + os.symlink(outside, scripts_dir / "common.sh") + + with pytest.raises(ValueError, match="Refusing to overwrite symlinked"): + _install_shared_infra(project, "sh", force=True) + + assert outside.read_text(encoding="utf-8") == "# outside\n" + + @pytest.mark.skipif(not hasattr(os, "symlink"), reason="symlinks are unavailable") + def test_shared_infra_refuses_symlinked_template_destination(self, tmp_path): + """Shared template installs must not follow destination symlinks.""" + from specify_cli import _install_shared_infra + + project = tmp_path / "symlink-template-test" + project.mkdir() + (project / ".specify").mkdir() + + outside = tmp_path / "outside-template.md" + outside.write_text("# outside\n", encoding="utf-8") + templates_dir = project / ".specify" / "templates" + templates_dir.mkdir(parents=True) + os.symlink(outside, templates_dir / "plan-template.md") + + with pytest.raises(ValueError, match="Refusing to overwrite symlinked"): + _install_shared_infra(project, "sh", force=True) + + assert outside.read_text(encoding="utf-8") == "# outside\n" + + @pytest.mark.skipif(not hasattr(os, "symlink"), reason="symlinks are unavailable") + def test_shared_template_refresh_refuses_symlinked_destination(self, tmp_path): + """Template-only refreshes must not follow destination symlinks.""" + from specify_cli import _refresh_shared_templates + + project = tmp_path / "symlink-refresh-test" + project.mkdir() + (project / ".specify").mkdir() + + outside = tmp_path / "outside-refresh.md" + outside.write_text("# outside\n", encoding="utf-8") + templates_dir = project / ".specify" / "templates" + templates_dir.mkdir(parents=True) + os.symlink(outside, templates_dir / "plan-template.md") + + with pytest.raises(ValueError, match="Refusing to overwrite symlinked"): + _refresh_shared_templates(project, invoke_separator=".", force=True) + + assert outside.read_text(encoding="utf-8") == "# outside\n" + + @pytest.mark.skipif(not hasattr(os, "symlink"), reason="symlinks are unavailable") + def test_shared_infra_refuses_symlinked_specify_directory_before_mkdir(self, tmp_path): + """Shared infra directory creation must not follow a symlinked .specify.""" + from specify_cli import _install_shared_infra + + project = tmp_path / "symlink-dir-test" + project.mkdir() + outside = tmp_path / "outside-specify" + outside.mkdir() + os.symlink(outside, project / ".specify") + + with pytest.raises(ValueError, match="symlinked shared infrastructure directory"): + _install_shared_infra(project, "sh", force=True) + + assert not (outside / "scripts").exists() + assert not (outside / "templates").exists() + + @pytest.mark.skipif(not hasattr(os, "symlink"), reason="symlinks are unavailable") + def test_shared_infra_refuses_symlinked_shared_manifest(self, tmp_path): + """Shared infra manifest saves must not follow destination symlinks.""" + from specify_cli.shared_infra import install_shared_infra + + project = tmp_path / "symlink-shared-manifest-test" + project.mkdir() + integrations_dir = project / ".specify" / "integrations" + integrations_dir.mkdir(parents=True) + + outside = tmp_path / "outside-manifest.json" + outside.write_text("# outside\n", encoding="utf-8") + os.symlink(outside, integrations_dir / "speckit.manifest.json") + + core_pack = tmp_path / "core-pack" + templates_src = core_pack / "templates" + templates_src.mkdir(parents=True) + (templates_src / "plan-template.md").write_text("# plan\n", encoding="utf-8") + + with pytest.raises(ValueError, match="symlinked integration manifest"): + install_shared_infra( + project, + "sh", + version="test", + core_pack=core_pack, + repo_root=tmp_path / "unused", + console=_NoopConsole(), + force=True, + ) + + assert outside.read_text(encoding="utf-8") == "# outside\n" + + @pytest.mark.skipif(not hasattr(os, "symlink"), reason="symlinks are unavailable") + def test_shared_template_refresh_preflights_before_writing(self, tmp_path): + """Template refresh validates all destinations before writing any file.""" + from specify_cli.shared_infra import refresh_shared_templates + + project = tmp_path / "preflight-refresh-test" + project.mkdir() + templates_dir = project / ".specify" / "templates" + templates_dir.mkdir(parents=True) + + core_pack = tmp_path / "core-pack" + templates_src = core_pack / "templates" + templates_src.mkdir(parents=True) + (templates_src / "a-template.md").write_text("# new a\n", encoding="utf-8") + (templates_src / "z-template.md").write_text("# new z\n", encoding="utf-8") + + existing = templates_dir / "a-template.md" + existing.write_text("# old a\n", encoding="utf-8") + outside = tmp_path / "outside-z.md" + outside.write_text("# outside\n", encoding="utf-8") + os.symlink(outside, templates_dir / "z-template.md") + + with pytest.raises(ValueError, match="Refusing to overwrite symlinked"): + refresh_shared_templates( + project, + version="test", + core_pack=core_pack, + repo_root=tmp_path / "unused", + console=_NoopConsole(), + invoke_separator=".", + force=True, + ) + + assert existing.read_text(encoding="utf-8") == "# old a\n" + assert outside.read_text(encoding="utf-8") == "# outside\n" + + @pytest.mark.skipif(not hasattr(os, "symlink"), reason="symlinks are unavailable") + def test_shared_infra_install_preflights_before_writing(self, tmp_path): + """Full shared infra installs validate destinations before writing any file.""" + from specify_cli.shared_infra import install_shared_infra + + project = tmp_path / "preflight-install-test" + project.mkdir() + scripts_dir = project / ".specify" / "scripts" / "bash" + scripts_dir.mkdir(parents=True) + + core_pack = tmp_path / "core-pack" + scripts_src = core_pack / "scripts" / "bash" + scripts_src.mkdir(parents=True) + (scripts_src / "a.sh").write_text("# new a\n", encoding="utf-8") + (scripts_src / "z.sh").write_text("# new z\n", encoding="utf-8") + + existing = scripts_dir / "a.sh" + existing.write_text("# old a\n", encoding="utf-8") + outside = tmp_path / "outside-z.sh" + outside.write_text("# outside\n", encoding="utf-8") + os.symlink(outside, scripts_dir / "z.sh") + + with pytest.raises(ValueError, match="Refusing to overwrite symlinked"): + install_shared_infra( + project, + "sh", + version="test", + core_pack=core_pack, + repo_root=tmp_path / "unused", + console=_NoopConsole(), + force=True, + ) + + assert existing.read_text(encoding="utf-8") == "# old a\n" + assert outside.read_text(encoding="utf-8") == "# outside\n" + + def test_shared_infra_install_supports_nested_script_sources(self, tmp_path): + """Nested script source files create safe destination parents at write time.""" + from specify_cli.shared_infra import install_shared_infra + + project = tmp_path / "nested-script-install-test" + project.mkdir() + + core_pack = tmp_path / "core-pack" + nested_src = core_pack / "scripts" / "bash" / "nested" + nested_src.mkdir(parents=True) + (nested_src / "deep.sh").write_text("# nested\n", encoding="utf-8") + + install_shared_infra( + project, + "sh", + version="test", + core_pack=core_pack, + repo_root=tmp_path / "unused", + console=_NoopConsole(), + force=True, + ) + + nested_dest = project / ".specify" / "scripts" / "bash" / "nested" / "deep.sh" + assert nested_dest.read_text(encoding="utf-8") == "# nested\n" + + def test_shared_infra_skip_warning_uses_posix_paths(self, tmp_path): + """Skipped shared infra paths are reported consistently across platforms.""" + from specify_cli.shared_infra import install_shared_infra + + project = tmp_path / "posix-skip-warning-test" + project.mkdir() + nested_dest = project / ".specify" / "scripts" / "bash" / "nested" + nested_dest.mkdir(parents=True) + (nested_dest / "deep.sh").write_text("# existing script\n", encoding="utf-8") + + templates_dest = project / ".specify" / "templates" + templates_dest.mkdir(parents=True) + (templates_dest / "plan-template.md").write_text("# existing template\n", encoding="utf-8") + + core_pack = tmp_path / "core-pack" + nested_src = core_pack / "scripts" / "bash" / "nested" + nested_src.mkdir(parents=True) + (nested_src / "deep.sh").write_text("# bundled script\n", encoding="utf-8") + + templates_src = core_pack / "templates" + templates_src.mkdir(parents=True) + (templates_src / "plan-template.md").write_text("# bundled template\n", encoding="utf-8") + + buffer = io.StringIO() + install_shared_infra( + project, + "sh", + version="test", + core_pack=core_pack, + repo_root=tmp_path / "unused", + console=Console(file=buffer, force_terminal=False, width=120), + force=False, + ) + + output = buffer.getvalue() + assert ".specify/scripts/bash/nested/deep.sh" in output + assert ".specify/templates/plan-template.md" in output + + @pytest.mark.skipif(os.name == "nt", reason="POSIX mode bits are not stable on Windows") + def test_shared_template_writes_are_not_world_writable(self, tmp_path): + """Shared template writes use a safe default mode instead of chmod 666.""" + from specify_cli.shared_infra import install_shared_infra + + project = tmp_path / "template-mode-test" + project.mkdir() + + core_pack = tmp_path / "core-pack" + templates_src = core_pack / "templates" + templates_src.mkdir(parents=True) + (templates_src / "plan-template.md").write_text("# plan\n", encoding="utf-8") + + install_shared_infra( + project, + "sh", + version="test", + core_pack=core_pack, + repo_root=tmp_path / "unused", + console=_NoopConsole(), + force=True, + ) + + written = project / ".specify" / "templates" / "plan-template.md" + assert written.stat().st_mode & 0o777 == 0o644 + def test_shared_infra_no_warning_when_forced(self, tmp_path, capsys): """No skip warning when force=True (all files overwritten).""" from specify_cli import _install_shared_infra @@ -473,6 +785,32 @@ def test_no_git_emits_deprecation_warning(self, tmp_path): assert "will be removed" in normalized_output assert "git extension will no longer be enabled by default" in normalized_output + def test_default_git_auto_enable_emits_notice(self, tmp_path): + """Default git auto-enable emits notice about the v0.10.0 opt-in change.""" + from typer.testing import CliRunner + from specify_cli import app + + project = tmp_path / "git-default-notice" + project.mkdir() + old_cwd = os.getcwd() + try: + os.chdir(project) + runner = CliRunner() + result = runner.invoke(app, [ + "init", "--here", "--ai", "claude", "--script", "sh", + "--ignore-agent-tools", + ], catch_exceptions=False) + finally: + os.chdir(old_cwd) + + normalized_output = _normalize_cli_output(result.output) + assert result.exit_code == 0, result.output + # Check for key message components (notice may have box-drawing chars) + assert "git extension is currently enabled by default" in normalized_output + assert "v0.10.0" in normalized_output + assert "explicit opt-in" in normalized_output + assert "specify extension add git" in normalized_output + def test_git_extension_commands_registered(self, tmp_path): """Git extension commands are registered with the agent during init.""" from typer.testing import CliRunner @@ -706,6 +1044,118 @@ def test_catalog_list_requires_specify_project(self, tmp_path): assert result.exit_code == 1 assert "Not a spec-kit project" in result.output + def test_primary_integration_commands_require_specify_project(self, tmp_path): + project = tmp_path / "bare" + project.mkdir() + commands = [ + ["integration", "list"], + ["integration", "install", "codex"], + ["integration", "use", "codex"], + ["integration", "uninstall"], + ["integration", "switch", "codex"], + ["integration", "upgrade"], + ] + + for command in commands: + result = self._invoke(command, project) + failure_context = ( + f"command={command!r}, exit_code={result.exit_code}, output={result.output!r}" + ) + assert result.exit_code == 1, failure_context + assert "Not a spec-kit project" in result.output, failure_context + + def test_integration_commands_require_specify_directory(self, tmp_path): + project = tmp_path / "bad" + project.mkdir() + (project / ".specify").write_text("not a directory") + + commands = [ + ["integration", "list"], + ["integration", "use", "codex"], + ] + + for command in commands: + result = self._invoke(command, project) + assert result.exit_code == 1, result.output + assert "Not a spec-kit project" in result.output + + def test_project_scoped_commands_require_specify_directory(self, tmp_path): + project = tmp_path / "bad-feature-commands" + project.mkdir() + (project / ".specify").write_text("not a directory") + + commands = [ + ["preset", "list"], + ["preset", "add", "demo"], + ["preset", "remove", "demo"], + ["preset", "search"], + ["preset", "resolve", "spec-template"], + ["preset", "info", "demo"], + ["preset", "set-priority", "demo", "5"], + ["preset", "enable", "demo"], + ["preset", "disable", "demo"], + ["preset", "catalog", "list"], + ["preset", "catalog", "add", "https://example.com/catalog.yml", "--name", "demo"], + ["preset", "catalog", "remove", "demo"], + ["extension", "list"], + ["extension", "add", "demo"], + ["extension", "remove", "demo"], + ["extension", "search"], + ["extension", "info", "demo"], + ["extension", "update", "demo"], + ["extension", "enable", "demo"], + ["extension", "disable", "demo"], + ["extension", "set-priority", "demo", "5"], + ["extension", "catalog", "list"], + ["extension", "catalog", "add", "https://example.com/catalog.yml", "--name", "demo"], + ["extension", "catalog", "remove", "demo"], + ["workflow", "run", "demo"], + ["workflow", "resume", "demo"], + ["workflow", "status"], + ["workflow", "list"], + ["workflow", "add", "demo"], + ["workflow", "remove", "demo"], + ["workflow", "search"], + ["workflow", "info", "demo"], + ["workflow", "catalog", "add", "https://example.com/catalog.yml"], + ["workflow", "catalog", "remove", "0"], + ] + + for command in commands: + result = self._invoke(command, project) + failure_context = ( + f"command={command!r}, exit_code={result.exit_code}, output={result.output!r}" + ) + assert result.exit_code == 1, failure_context + assert "Not a spec-kit project" in result.output, failure_context + + def test_catalog_config_output_uses_posix_paths(self, tmp_path): + project = self._make_project(tmp_path) + + preset_add = self._invoke([ + "preset", "catalog", "add", + "https://example.com/preset-catalog.yml", + "--name", "demo-presets", + ], project) + assert preset_add.exit_code == 0, preset_add.output + assert "Config saved to .specify/preset-catalogs.yml" in preset_add.output + + preset_list = self._invoke(["preset", "catalog", "list"], project) + assert preset_list.exit_code == 0, preset_list.output + assert "Config: .specify/preset-catalogs.yml" in preset_list.output + + extension_add = self._invoke([ + "extension", "catalog", "add", + "https://example.com/extension-catalog.yml", + "--name", "demo-extensions", + ], project) + assert extension_add.exit_code == 0, extension_add.output + assert "Config saved to .specify/extension-catalogs.yml" in extension_add.output + + extension_list = self._invoke(["extension", "catalog", "list"], project) + assert extension_list.exit_code == 0, extension_list.output + assert "Config: .specify/extension-catalogs.yml" in extension_list.output + # -- search ------------------------------------------------------------ def test_search_lists_all(self, tmp_path, monkeypatch): diff --git a/tests/integrations/test_integration_base_markdown.py b/tests/integrations/test_integration_base_markdown.py index 82d7b8cfb3..0b74a6f1a9 100644 --- a/tests/integrations/test_integration_base_markdown.py +++ b/tests/integrations/test_integration_base_markdown.py @@ -274,11 +274,11 @@ def _expected_files(self, script_variant: str) -> list[str]: if script_variant == "sh": for name in ["check-prerequisites.sh", "common.sh", "create-new-feature.sh", - "setup-plan.sh"]: + "setup-plan.sh", "setup-tasks.sh"]: files.append(f".specify/scripts/bash/{name}") else: for name in ["check-prerequisites.ps1", "common.ps1", "create-new-feature.ps1", - "setup-plan.ps1"]: + "setup-plan.ps1", "setup-tasks.ps1"]: files.append(f".specify/scripts/powershell/{name}") for name in ["checklist-template.md", diff --git a/tests/integrations/test_integration_base_skills.py b/tests/integrations/test_integration_base_skills.py index 98a65fcff4..89140de1c3 100644 --- a/tests/integrations/test_integration_base_skills.py +++ b/tests/integrations/test_integration_base_skills.py @@ -387,6 +387,7 @@ def _expected_files(self, script_variant: str) -> list[str]: ".specify/scripts/bash/common.sh", ".specify/scripts/bash/create-new-feature.sh", ".specify/scripts/bash/setup-plan.sh", + ".specify/scripts/bash/setup-tasks.sh", ] else: files += [ @@ -394,6 +395,7 @@ def _expected_files(self, script_variant: str) -> list[str]: ".specify/scripts/powershell/common.ps1", ".specify/scripts/powershell/create-new-feature.ps1", ".specify/scripts/powershell/setup-plan.ps1", + ".specify/scripts/powershell/setup-tasks.ps1", ] # Templates files += [ diff --git a/tests/integrations/test_integration_base_toml.py b/tests/integrations/test_integration_base_toml.py index 78273b560e..56862e534c 100644 --- a/tests/integrations/test_integration_base_toml.py +++ b/tests/integrations/test_integration_base_toml.py @@ -516,6 +516,7 @@ def _expected_files(self, script_variant: str) -> list[str]: "common.sh", "create-new-feature.sh", "setup-plan.sh", + "setup-tasks.sh", ]: files.append(f".specify/scripts/bash/{name}") else: @@ -524,6 +525,7 @@ def _expected_files(self, script_variant: str) -> list[str]: "common.ps1", "create-new-feature.ps1", "setup-plan.ps1", + "setup-tasks.ps1", ]: files.append(f".specify/scripts/powershell/{name}") diff --git a/tests/integrations/test_integration_base_yaml.py b/tests/integrations/test_integration_base_yaml.py index e1dee3bad7..956c7a796f 100644 --- a/tests/integrations/test_integration_base_yaml.py +++ b/tests/integrations/test_integration_base_yaml.py @@ -395,6 +395,7 @@ def _expected_files(self, script_variant: str) -> list[str]: "common.sh", "create-new-feature.sh", "setup-plan.sh", + "setup-tasks.sh", ]: files.append(f".specify/scripts/bash/{name}") else: @@ -403,6 +404,7 @@ def _expected_files(self, script_variant: str) -> list[str]: "common.ps1", "create-new-feature.ps1", "setup-plan.ps1", + "setup-tasks.ps1", ]: files.append(f".specify/scripts/powershell/{name}") diff --git a/tests/integrations/test_integration_catalog.py b/tests/integrations/test_integration_catalog.py index 6c55ae4ebc..8b21ddfb8b 100644 --- a/tests/integrations/test_integration_catalog.py +++ b/tests/integrations/test_integration_catalog.py @@ -670,7 +670,7 @@ def test_upgrade_wrong_integration_key(self, tmp_path): finally: os.chdir(old) assert result.exit_code != 0 - assert "not the currently installed integration" in result.output + assert "not installed" in result.output def test_upgrade_no_manifest(self, tmp_path): """Upgrade with missing manifest suggests fresh install.""" diff --git a/tests/integrations/test_integration_copilot.py b/tests/integrations/test_integration_copilot.py index 2df4d2d7df..c6e9259b09 100644 --- a/tests/integrations/test_integration_copilot.py +++ b/tests/integrations/test_integration_copilot.py @@ -206,6 +206,7 @@ def test_complete_file_inventory_sh(self, tmp_path): ".specify/scripts/bash/common.sh", ".specify/scripts/bash/create-new-feature.sh", ".specify/scripts/bash/setup-plan.sh", + ".specify/scripts/bash/setup-tasks.sh", ".specify/templates/checklist-template.md", ".specify/templates/constitution-template.md", ".specify/templates/plan-template.md", @@ -265,6 +266,7 @@ def test_complete_file_inventory_ps(self, tmp_path): ".specify/scripts/powershell/common.ps1", ".specify/scripts/powershell/create-new-feature.ps1", ".specify/scripts/powershell/setup-plan.ps1", + ".specify/scripts/powershell/setup-tasks.ps1", ".specify/templates/checklist-template.md", ".specify/templates/constitution-template.md", ".specify/templates/plan-template.md", @@ -614,6 +616,7 @@ def test_complete_file_inventory_skills_sh(self, tmp_path): ".specify/scripts/bash/common.sh", ".specify/scripts/bash/create-new-feature.sh", ".specify/scripts/bash/setup-plan.sh", + ".specify/scripts/bash/setup-tasks.sh", # Templates ".specify/templates/checklist-template.md", ".specify/templates/constitution-template.md", diff --git a/tests/integrations/test_integration_generic.py b/tests/integrations/test_integration_generic.py index f0272afa8d..290a36419e 100644 --- a/tests/integrations/test_integration_generic.py +++ b/tests/integrations/test_integration_generic.py @@ -264,6 +264,7 @@ def test_complete_file_inventory_sh(self, tmp_path): ".specify/scripts/bash/common.sh", ".specify/scripts/bash/create-new-feature.sh", ".specify/scripts/bash/setup-plan.sh", + ".specify/scripts/bash/setup-tasks.sh", ".specify/templates/checklist-template.md", ".specify/templates/constitution-template.md", ".specify/templates/plan-template.md", @@ -319,6 +320,7 @@ def test_complete_file_inventory_ps(self, tmp_path): ".specify/scripts/powershell/common.ps1", ".specify/scripts/powershell/create-new-feature.ps1", ".specify/scripts/powershell/setup-plan.ps1", + ".specify/scripts/powershell/setup-tasks.ps1", ".specify/templates/checklist-template.md", ".specify/templates/constitution-template.md", ".specify/templates/plan-template.md", diff --git a/tests/integrations/test_integration_state.py b/tests/integrations/test_integration_state.py new file mode 100644 index 0000000000..1d6bdb0268 --- /dev/null +++ b/tests/integrations/test_integration_state.py @@ -0,0 +1,86 @@ +"""Tests for integration state normalization helpers.""" + +import json + +from specify_cli.integration_state import ( + INTEGRATION_JSON, + default_integration_key, + integration_setting, + normalize_integration_state, + write_integration_json, +) + + +def test_normalize_integration_state_strips_default_key_without_duplicates(): + state = normalize_integration_state( + { + "default_integration": " claude ", + "integration": " claude ", + "installed_integrations": ["claude"], + } + ) + + assert state["integration"] == "claude" + assert state["default_integration"] == "claude" + assert state["installed_integrations"] == ["claude"] + + +def test_normalize_integration_state_strips_legacy_key_fallback(): + state = normalize_integration_state( + { + "integration": " codex ", + "installed_integrations": [], + } + ) + + assert state["integration"] == "codex" + assert state["default_integration"] == "codex" + assert state["installed_integrations"] == ["codex"] + + +def test_normalize_integration_state_preserves_newer_schema(): + state = normalize_integration_state( + { + "integration_state_schema": 99, + "integration": "claude", + "installed_integrations": ["claude"], + "future_field": {"keep": True}, + } + ) + + assert state["integration_state_schema"] == 99 + assert state["future_field"] == {"keep": True} + + +def test_default_integration_key_strips_raw_state_values(): + assert default_integration_key({"default_integration": " claude "}) == "claude" + assert default_integration_key({"integration": " codex "}) == "codex" + + +def test_integration_settings_strip_invoke_separator(): + setting = integration_setting( + { + "integration_settings": { + "claude": { + "invoke_separator": " - ", + } + } + }, + "claude", + ) + + assert setting["invoke_separator"] == "-" + + +def test_write_integration_json_strips_integration_key(tmp_path): + write_integration_json( + tmp_path, + version="1.2.3", + integration_key=" claude ", + installed_integrations=["claude"], + ) + + state = json.loads((tmp_path / INTEGRATION_JSON).read_text(encoding="utf-8")) + assert state["integration"] == "claude" + assert state["default_integration"] == "claude" + assert state["installed_integrations"] == ["claude"] diff --git a/tests/integrations/test_integration_subcommand.py b/tests/integrations/test_integration_subcommand.py index f5322bdf5e..750bbb6efa 100644 --- a/tests/integrations/test_integration_subcommand.py +++ b/tests/integrations/test_integration_subcommand.py @@ -3,6 +3,7 @@ import json import os +import pytest from typer.testing import CliRunner from specify_cli import app @@ -31,6 +32,27 @@ def _init_project(tmp_path, integration="copilot"): return project +def _run_in_project(project, args): + """Run a CLI command from inside a generated project.""" + old_cwd = os.getcwd() + try: + os.chdir(project) + return runner.invoke(app, args, catch_exceptions=False) + finally: + os.chdir(old_cwd) + + +def _write_invalid_manifest(project, key): + manifest = project / ".specify" / "integrations" / f"{key}.manifest.json" + manifest.write_bytes(b"\xff\xfe\x00") + return manifest + + +def _integration_list_row_cells(output: str, key: str) -> list[str]: + row = next(line for line in output.splitlines() if line.startswith(f"│ {key}")) + return [cell.strip() for cell in row.split("│")[1:-1]] + + # ── list ───────────────────────────────────────────────────────────── @@ -70,6 +92,39 @@ def test_list_shows_available_integrations(self, tmp_path): assert "claude" in result.output assert "gemini" in result.output + def test_list_shows_multi_install_safe_status(self, tmp_path): + project = _init_project(tmp_path, "claude") + old_cwd = os.getcwd() + try: + os.chdir(project) + result = runner.invoke(app, ["integration", "list"]) + finally: + os.chdir(old_cwd) + assert result.exit_code == 0 + assert "Multi-install" in result.output + assert "Safe" in result.output + assert _integration_list_row_cells(result.output, "claude")[-1] == "yes" + assert _integration_list_row_cells(result.output, "copilot")[-1] == "no" + + def test_list_rejects_newer_integration_state_schema(self, tmp_path): + project = _init_project(tmp_path, "claude") + int_json = project / ".specify" / "integration.json" + data = json.loads(int_json.read_text(encoding="utf-8")) + data["integration_state_schema"] = 99 + int_json.write_text(json.dumps(data), encoding="utf-8") + + old_cwd = os.getcwd() + try: + os.chdir(project) + result = runner.invoke(app, ["integration", "list"]) + finally: + os.chdir(old_cwd) + + assert result.exit_code != 0 + normalized = " ".join(result.output.split()) + assert "schema 99" in normalized + assert "only supports schema 1" in normalized + # ── install ────────────────────────────────────────────────────────── @@ -106,7 +161,9 @@ def test_install_already_installed(self, tmp_path): os.chdir(old_cwd) assert result.exit_code == 0 assert "already installed" in result.output - assert "uninstall" in result.output + normalized = " ".join(result.output.split()) + assert "specify integration upgrade copilot" in normalized + assert "specify integration uninstall copilot" in normalized def test_install_different_when_one_exists(self, tmp_path): project = _init_project(tmp_path, "copilot") @@ -117,8 +174,112 @@ def test_install_different_when_one_exists(self, tmp_path): finally: os.chdir(old_cwd) assert result.exit_code != 0 - assert "already installed" in result.output - assert "uninstall" in result.output + assert "Installed integrations: copilot" in result.output + assert "Default integration: copilot" in result.output + assert "--force" in result.output + + def test_install_multi_safe_integration(self, tmp_path): + project = _init_project(tmp_path, "claude") + old_cwd = os.getcwd() + try: + os.chdir(project) + result = runner.invoke(app, [ + "integration", "install", "codex", + "--script", "sh", + ], catch_exceptions=False) + finally: + os.chdir(old_cwd) + assert result.exit_code == 0, result.output + assert "installed successfully" in result.output + + data = json.loads((project / ".specify" / "integration.json").read_text(encoding="utf-8")) + assert data["integration"] == "claude" + assert data["default_integration"] == "claude" + assert data["integration_state_schema"] == 1 + assert data["installed_integrations"] == ["claude", "codex"] + assert data["integration_settings"]["claude"]["invoke_separator"] == "-" + assert data["integration_settings"]["codex"]["invoke_separator"] == "-" + + assert (project / ".claude" / "skills" / "speckit-plan" / "SKILL.md").exists() + assert (project / ".agents" / "skills" / "speckit-plan" / "SKILL.md").exists() + + def test_install_additional_preserves_shared_manifest(self, tmp_path): + project = _init_project(tmp_path, "claude") + shared_manifest = project / ".specify" / "integrations" / "speckit.manifest.json" + before = set(json.loads(shared_manifest.read_text(encoding="utf-8"))["files"]) + assert before + + old_cwd = os.getcwd() + try: + os.chdir(project) + result = runner.invoke(app, [ + "integration", "install", "codex", + "--script", "sh", + ], catch_exceptions=False) + finally: + os.chdir(old_cwd) + assert result.exit_code == 0, result.output + + after = set(json.loads(shared_manifest.read_text(encoding="utf-8"))["files"]) + assert before <= after + + def test_install_multi_safe_migrates_legacy_state(self, tmp_path): + project = _init_project(tmp_path, "claude") + int_json = project / ".specify" / "integration.json" + int_json.write_text(json.dumps({ + "integration": "claude", + "version": "0.0.0", + }), encoding="utf-8") + + old_cwd = os.getcwd() + try: + os.chdir(project) + result = runner.invoke(app, [ + "integration", "install", "codex", + "--script", "sh", + ], catch_exceptions=False) + finally: + os.chdir(old_cwd) + assert result.exit_code == 0, result.output + + data = json.loads(int_json.read_text(encoding="utf-8")) + assert data["integration"] == "claude" + assert data["default_integration"] == "claude" + assert data["installed_integrations"] == ["claude", "codex"] + + def test_install_multi_unsafe_requires_force(self, tmp_path): + project = _init_project(tmp_path, "copilot") + old_cwd = os.getcwd() + try: + os.chdir(project) + result = runner.invoke(app, [ + "integration", "install", "claude", + "--script", "sh", + ]) + finally: + os.chdir(old_cwd) + assert result.exit_code != 0 + assert "Installed integrations: copilot" in result.output + assert "multi-install safe" in result.output + assert "--force" in result.output + + def test_install_multi_unsafe_allowed_with_force(self, tmp_path): + project = _init_project(tmp_path, "copilot") + old_cwd = os.getcwd() + try: + os.chdir(project) + result = runner.invoke(app, [ + "integration", "install", "claude", + "--script", "sh", + "--force", + ], catch_exceptions=False) + finally: + os.chdir(old_cwd) + assert result.exit_code == 0, result.output + + data = json.loads((project / ".specify" / "integration.json").read_text(encoding="utf-8")) + assert data["integration"] == "copilot" + assert data["installed_integrations"] == ["copilot", "claude"] def test_install_into_bare_project(self, tmp_path): """Install into a project with .specify/ but no integration.""" @@ -236,6 +397,7 @@ def test_uninstall_preserves_modified_files(self, tmp_path): os.chdir(old_cwd) assert result.exit_code == 0 assert "preserved" in result.output + assert ".claude/skills/speckit-plan/SKILL.md" in result.output # Modified file kept assert plan_file.exists() @@ -250,7 +412,68 @@ def test_uninstall_wrong_key(self, tmp_path): finally: os.chdir(old_cwd) assert result.exit_code != 0 - assert "not the currently installed" in result.output + assert "not installed" in result.output + + def test_uninstall_invalid_manifest_reports_cli_error(self, tmp_path): + project = _init_project(tmp_path, "claude") + _write_invalid_manifest(project, "claude") + + old_cwd = os.getcwd() + try: + os.chdir(project) + result = runner.invoke(app, ["integration", "uninstall", "claude"]) + finally: + os.chdir(old_cwd) + assert result.exit_code != 0 + assert "manifest" in result.output + assert "unreadable" in result.output + + def test_uninstall_non_default_preserves_default(self, tmp_path): + project = _init_project(tmp_path, "claude") + old_cwd = os.getcwd() + try: + os.chdir(project) + install = runner.invoke(app, [ + "integration", "install", "codex", + "--script", "sh", + ], catch_exceptions=False) + assert install.exit_code == 0, install.output + + result = runner.invoke(app, [ + "integration", "uninstall", "codex", + ], catch_exceptions=False) + finally: + os.chdir(old_cwd) + assert result.exit_code == 0, result.output + assert not (project / ".agents" / "skills" / "speckit-plan" / "SKILL.md").exists() + assert (project / ".claude" / "skills" / "speckit-plan" / "SKILL.md").exists() + + data = json.loads((project / ".specify" / "integration.json").read_text(encoding="utf-8")) + assert data["integration"] == "claude" + assert data["installed_integrations"] == ["claude"] + + def test_uninstall_default_refreshes_templates_for_fallback(self, tmp_path): + project = _init_project(tmp_path, "gemini") + template = project / ".specify" / "templates" / "plan-template.md" + assert "/speckit.plan" in template.read_text(encoding="utf-8") + + old_cwd = os.getcwd() + try: + os.chdir(project) + install = runner.invoke(app, [ + "integration", "install", "claude", + "--script", "sh", + ], catch_exceptions=False) + assert install.exit_code == 0, install.output + + result = runner.invoke(app, ["integration", "uninstall", "gemini"], catch_exceptions=False) + finally: + os.chdir(old_cwd) + assert result.exit_code == 0, result.output + + data = json.loads((project / ".specify" / "integration.json").read_text(encoding="utf-8")) + assert data["integration"] == "claude" + assert "/speckit-plan" in template.read_text(encoding="utf-8") def test_uninstall_preserves_shared_infra(self, tmp_path): """Shared scripts and templates are not removed by integration uninstall.""" @@ -271,6 +494,135 @@ def test_uninstall_preserves_shared_infra(self, tmp_path): assert (project / ".specify" / "templates").is_dir() +class TestIntegrationUse: + def test_use_installed_integration_sets_default(self, tmp_path): + project = _init_project(tmp_path, "claude") + old_cwd = os.getcwd() + try: + os.chdir(project) + install = runner.invoke(app, [ + "integration", "install", "codex", + "--script", "sh", + ], catch_exceptions=False) + assert install.exit_code == 0, install.output + + result = runner.invoke(app, ["integration", "use", "codex"], catch_exceptions=False) + finally: + os.chdir(old_cwd) + assert result.exit_code == 0, result.output + + data = json.loads((project / ".specify" / "integration.json").read_text(encoding="utf-8")) + assert data["integration"] == "codex" + assert data["default_integration"] == "codex" + assert data["installed_integrations"] == ["claude", "codex"] + + opts = json.loads((project / ".specify" / "init-options.json").read_text(encoding="utf-8")) + assert opts["integration"] == "codex" + assert opts["ai"] == "codex" + + def test_use_requires_installed_integration(self, tmp_path): + project = _init_project(tmp_path, "claude") + old_cwd = os.getcwd() + try: + os.chdir(project) + result = runner.invoke(app, ["integration", "use", "codex"]) + finally: + os.chdir(old_cwd) + assert result.exit_code != 0 + assert "not installed" in result.output + + def test_use_refreshes_shared_templates_between_command_styles(self, tmp_path): + project = _init_project(tmp_path, "claude") + template = project / ".specify" / "templates" / "plan-template.md" + assert "/speckit-plan" in template.read_text(encoding="utf-8") + + old_cwd = os.getcwd() + try: + os.chdir(project) + install = runner.invoke(app, [ + "integration", "install", "gemini", + "--script", "sh", + ], catch_exceptions=False) + assert install.exit_code == 0, install.output + + use_gemini = runner.invoke(app, ["integration", "use", "gemini"], catch_exceptions=False) + assert use_gemini.exit_code == 0, use_gemini.output + assert "/speckit.plan" in template.read_text(encoding="utf-8") + + use_claude = runner.invoke(app, ["integration", "use", "claude"], catch_exceptions=False) + assert use_claude.exit_code == 0, use_claude.output + assert "/speckit-plan" in template.read_text(encoding="utf-8") + finally: + os.chdir(old_cwd) + + def test_use_preserves_modified_templates_unless_forced(self, tmp_path): + project = _init_project(tmp_path, "claude") + template = project / ".specify" / "templates" / "plan-template.md" + template.write_text("custom template with /speckit-plan\n", encoding="utf-8") + + old_cwd = os.getcwd() + try: + os.chdir(project) + install = runner.invoke(app, [ + "integration", "install", "gemini", + "--script", "sh", + ], catch_exceptions=False) + assert install.exit_code == 0, install.output + + use_gemini = runner.invoke(app, ["integration", "use", "gemini"], catch_exceptions=False) + assert use_gemini.exit_code == 0, use_gemini.output + assert template.read_text(encoding="utf-8") == "custom template with /speckit-plan\n" + + force_use = runner.invoke(app, [ + "integration", "use", "gemini", + "--force", + ], catch_exceptions=False) + assert force_use.exit_code == 0, force_use.output + finally: + os.chdir(old_cwd) + + updated = template.read_text(encoding="utf-8") + assert "/speckit.plan" in updated + assert "custom template" not in updated + + @pytest.mark.skipif(not hasattr(os, "symlink"), reason="symlinks are unavailable") + def test_use_does_not_persist_default_when_template_refresh_fails(self, tmp_path): + project = _init_project(tmp_path, "claude") + int_json = project / ".specify" / "integration.json" + init_options = project / ".specify" / "init-options.json" + + old_cwd = os.getcwd() + try: + os.chdir(project) + install = runner.invoke(app, [ + "integration", "install", "codex", + "--script", "sh", + ], catch_exceptions=False) + assert install.exit_code == 0, install.output + + before_state = json.loads(int_json.read_text(encoding="utf-8")) + before_options = json.loads(init_options.read_text(encoding="utf-8")) + + outside = tmp_path / "outside-template.md" + outside.write_text("# outside\n", encoding="utf-8") + template = project / ".specify" / "templates" / "plan-template.md" + template.unlink() + os.symlink(outside, template) + + result = runner.invoke(app, [ + "integration", "use", "codex", + "--force", + ]) + finally: + os.chdir(old_cwd) + + assert result.exit_code != 0 + assert "Failed to refresh shared templates" in result.output + assert json.loads(int_json.read_text(encoding="utf-8")) == before_state + assert json.loads(init_options.read_text(encoding="utf-8")) == before_options + assert outside.read_text(encoding="utf-8") == "# outside\n" + + # ── switch ─────────────────────────────────────────────────────────── @@ -296,6 +648,22 @@ def test_switch_unknown_target(self, tmp_path): assert result.exit_code != 0 assert "Unknown integration" in result.output + def test_switch_invalid_current_manifest_reports_cli_error(self, tmp_path): + project = _init_project(tmp_path, "claude") + _write_invalid_manifest(project, "claude") + + old_cwd = os.getcwd() + try: + os.chdir(project) + result = runner.invoke(app, [ + "integration", "switch", "codex", + "--script", "sh", + ]) + finally: + os.chdir(old_cwd) + assert result.exit_code != 0 + assert "Could not read integration manifest" in result.output + def test_switch_same_noop(self, tmp_path): project = _init_project(tmp_path, "copilot") old_cwd = os.getcwd() @@ -305,7 +673,48 @@ def test_switch_same_noop(self, tmp_path): finally: os.chdir(old_cwd) assert result.exit_code == 0 - assert "already installed" in result.output + assert "already the default integration" in result.output + + def test_switch_same_force_refreshes_shared_templates(self, tmp_path): + project = _init_project(tmp_path, "claude") + template = project / ".specify" / "templates" / "plan-template.md" + template.write_text("# custom shared template\n", encoding="utf-8") + + old_cwd = os.getcwd() + try: + os.chdir(project) + result = runner.invoke(app, [ + "integration", "switch", "claude", + "--force", + ], catch_exceptions=False) + finally: + os.chdir(old_cwd) + assert result.exit_code == 0, result.output + assert "managed shared templates refreshed" in result.output + assert "/speckit-plan" in template.read_text(encoding="utf-8") + + def test_switch_installed_target_rejects_integration_options(self, tmp_path): + project = _init_project(tmp_path, "claude") + old_cwd = os.getcwd() + try: + os.chdir(project) + install = runner.invoke(app, [ + "integration", "install", "codex", + "--script", "sh", + ], catch_exceptions=False) + assert install.exit_code == 0, install.output + + result = runner.invoke(app, [ + "integration", "switch", "codex", + "--integration-options", "--bogus", + ]) + finally: + os.chdir(old_cwd) + assert result.exit_code != 0 + assert "--integration-options cannot be used" in result.output + + data = json.loads((project / ".specify" / "integration.json").read_text(encoding="utf-8")) + assert data["default_integration"] == "claude" def test_switch_between_integrations(self, tmp_path): project = _init_project(tmp_path, "claude") @@ -334,6 +743,142 @@ def test_switch_between_integrations(self, tmp_path): data = json.loads((project / ".specify" / "integration.json").read_text(encoding="utf-8")) assert data["integration"] == "copilot" + def test_switch_migrates_extension_commands(self, tmp_path): + """Switching should migrate extension commands to the new agent directory.""" + project = _init_project(tmp_path, "kimi") + + # Install the bundled git extension + result = _run_in_project(project, ["extension", "add", "git"]) + assert result.exit_code == 0, f"extension add failed: {result.output}" + + # Verify git extension skills exist for kimi + kimi_git_feature = project / ".kimi" / "skills" / "speckit-git-feature" / "SKILL.md" + assert kimi_git_feature.exists(), "Git extension skill should exist for kimi" + + result = _run_in_project(project, [ + "integration", "switch", "opencode", + "--script", "sh", + ]) + assert result.exit_code == 0, result.output + + # Git extension commands should exist for opencode + opencode_git_feature = project / ".opencode" / "command" / "speckit.git.feature.md" + assert opencode_git_feature.exists(), "Git extension command should exist for opencode" + + # Old kimi extension skills should be removed + assert not kimi_git_feature.exists(), "Old kimi extension skill should be removed" + + # Extension registry should be updated + registry = json.loads( + (project / ".specify" / "extensions" / ".registry").read_text(encoding="utf-8") + ) + registered_commands = registry["extensions"]["git"]["registered_commands"] + assert "opencode" in registered_commands + assert "kimi" not in registered_commands + + # Switch to claude + result = _run_in_project(project, [ + "integration", "switch", "claude", + "--script", "sh", + ]) + assert result.exit_code == 0, result.output + + # Git extension skills should exist for claude + claude_git_feature = project / ".claude" / "skills" / "speckit-git-feature" / "SKILL.md" + assert claude_git_feature.exists(), "Git extension skill should exist for claude" + + # Old opencode extension commands should be removed + assert not opencode_git_feature.exists(), "Old opencode extension command should be removed" + + # Extension registry should be updated + registry = json.loads( + (project / ".specify" / "extensions" / ".registry").read_text(encoding="utf-8") + ) + registered_commands = registry["extensions"]["git"]["registered_commands"] + assert "claude" in registered_commands + assert "opencode" not in registered_commands + + def test_switch_migrates_copilot_skills_extension_commands(self, tmp_path): + """Copilot --skills should receive extension skills, not .agent.md files.""" + project = _init_project(tmp_path, "opencode") + + result = _run_in_project(project, ["extension", "add", "git"]) + assert result.exit_code == 0, f"extension add failed: {result.output}" + + result = _run_in_project(project, [ + "integration", "switch", "copilot", + "--script", "sh", + "--integration-options", "--skills", + ]) + assert result.exit_code == 0, result.output + + copilot_git_feature = project / ".github" / "skills" / "speckit-git-feature" / "SKILL.md" + copilot_agent_file = project / ".github" / "agents" / "speckit.git.feature.agent.md" + assert copilot_git_feature.exists(), "Git extension skill should exist for Copilot skills mode" + assert not copilot_agent_file.exists(), "Copilot skills mode should not create extension .agent.md files" + + # Verify Copilot-specific frontmatter: mode field should map from + # skill name (speckit-git-feature) back to dot notation (speckit.git-feature) + skill_content = copilot_git_feature.read_text(encoding="utf-8") + assert "mode: speckit.git-feature" in skill_content, ( + "Copilot skill frontmatter should contain mode mapped from skill name" + ) + + registry = json.loads( + (project / ".specify" / "extensions" / ".registry").read_text(encoding="utf-8") + ) + git_meta = registry["extensions"]["git"] + assert "speckit-git-feature" in git_meta["registered_skills"] + assert "copilot" not in git_meta["registered_commands"] + + result = _run_in_project(project, [ + "integration", "switch", "opencode", + "--script", "sh", + ]) + assert result.exit_code == 0, result.output + + opencode_git_feature = project / ".opencode" / "command" / "speckit.git.feature.md" + assert opencode_git_feature.exists(), "Git extension command should exist for opencode" + assert not copilot_git_feature.exists(), "Old Copilot extension skill should be removed" + + registry = json.loads( + (project / ".specify" / "extensions" / ".registry").read_text(encoding="utf-8") + ) + git_meta = registry["extensions"]["git"] + assert git_meta["registered_skills"] == [] + assert "opencode" in git_meta["registered_commands"] + assert "copilot" not in git_meta["registered_commands"] + + def test_switch_does_not_register_disabled_extensions(self, tmp_path): + """Disabled extensions should stay disabled and should not migrate commands.""" + project = _init_project(tmp_path, "opencode") + + result = _run_in_project(project, ["extension", "add", "git"]) + assert result.exit_code == 0, f"extension add failed: {result.output}" + result = _run_in_project(project, ["extension", "disable", "git"]) + assert result.exit_code == 0, result.output + + opencode_git_feature = project / ".opencode" / "command" / "speckit.git.feature.md" + assert opencode_git_feature.exists(), "Disabled extension command remains until integration switch" + + result = _run_in_project(project, [ + "integration", "switch", "claude", + "--script", "sh", + ]) + assert result.exit_code == 0, result.output + + claude_git_feature = project / ".claude" / "skills" / "speckit-git-feature" / "SKILL.md" + assert not claude_git_feature.exists(), "Disabled extension should not be registered for new agent" + assert not opencode_git_feature.exists(), "Old disabled extension command should be removed on switch" + + registry = json.loads( + (project / ".specify" / "extensions" / ".registry").read_text(encoding="utf-8") + ) + git_meta = registry["extensions"]["git"] + assert git_meta["enabled"] is False + assert "claude" not in git_meta["registered_commands"] + assert "opencode" not in git_meta["registered_commands"] + def test_switch_preserves_shared_infra(self, tmp_path): """Switching preserves shared scripts, templates, and memory.""" project = _init_project(tmp_path, "claude") @@ -376,6 +921,107 @@ def test_switch_from_nothing(self, tmp_path): data = json.loads((project / ".specify" / "integration.json").read_text(encoding="utf-8")) assert data["integration"] == "claude" + def test_failed_switch_keeps_fallback_metadata_consistent(self, tmp_path): + project = _init_project(tmp_path, "claude") + old_cwd = os.getcwd() + try: + os.chdir(project) + install = runner.invoke(app, [ + "integration", "install", "codex", + "--script", "sh", + ], catch_exceptions=False) + assert install.exit_code == 0, install.output + + result = runner.invoke(app, [ + "integration", "switch", "generic", + "--script", "sh", + ], catch_exceptions=False) + finally: + os.chdir(old_cwd) + assert result.exit_code != 0 + + data = json.loads((project / ".specify" / "integration.json").read_text(encoding="utf-8")) + assert data["integration"] == "codex" + assert data["installed_integrations"] == ["codex"] + + opts = json.loads((project / ".specify" / "init-options.json").read_text(encoding="utf-8")) + assert opts["integration"] == "codex" + assert opts["ai"] == "codex" + + template = project / ".specify" / "templates" / "plan-template.md" + assert "/speckit-plan" in template.read_text(encoding="utf-8") + + +class TestIntegrationUpgrade: + def test_upgrade_invalid_manifest_reports_cli_error(self, tmp_path): + project = _init_project(tmp_path, "claude") + _write_invalid_manifest(project, "claude") + + old_cwd = os.getcwd() + try: + os.chdir(project) + result = runner.invoke(app, ["integration", "upgrade", "claude"]) + finally: + os.chdir(old_cwd) + assert result.exit_code != 0 + assert "manifest" in result.output + assert "unreadable" in result.output + + def test_upgrade_does_not_persist_state_when_template_refresh_fails(self, tmp_path, monkeypatch): + project = _init_project(tmp_path, "claude") + int_json = project / ".specify" / "integration.json" + init_options = project / ".specify" / "init-options.json" + manifest_path = project / ".specify" / "integrations" / "claude.manifest.json" + + before_state = json.loads(int_json.read_text(encoding="utf-8")) + before_options = json.loads(init_options.read_text(encoding="utf-8")) + before_manifest = manifest_path.read_text(encoding="utf-8") + + import specify_cli + + def fail_refresh(*args, **kwargs): + raise ValueError("refuse refresh") + + monkeypatch.setattr(specify_cli, "_refresh_shared_templates", fail_refresh) + + result = _run_in_project(project, [ + "integration", "upgrade", "claude", + "--force", + ]) + + assert result.exit_code != 0 + assert "Failed to refresh shared templates" in result.output + assert json.loads(int_json.read_text(encoding="utf-8")) == before_state + assert json.loads(init_options.read_text(encoding="utf-8")) == before_options + assert manifest_path.read_text(encoding="utf-8") == before_manifest + + def test_upgrade_non_default_keeps_default_template_invocations(self, tmp_path): + project = _init_project(tmp_path, "gemini") + template = project / ".specify" / "templates" / "plan-template.md" + assert "/speckit.plan" in template.read_text(encoding="utf-8") + + old_cwd = os.getcwd() + try: + os.chdir(project) + install = runner.invoke(app, [ + "integration", "install", "claude", + "--script", "sh", + ], catch_exceptions=False) + assert install.exit_code == 0, install.output + + result = runner.invoke(app, [ + "integration", "upgrade", "claude", + "--script", "sh", + "--force", + ], catch_exceptions=False) + finally: + os.chdir(old_cwd) + assert result.exit_code == 0, result.output + + data = json.loads((project / ".specify" / "integration.json").read_text(encoding="utf-8")) + assert data["integration"] == "gemini" + assert "/speckit.plan" in template.read_text(encoding="utf-8") + # ── Full lifecycle ─────────────────────────────────────────────────── diff --git a/tests/integrations/test_registry.py b/tests/integrations/test_registry.py index 8ab1425148..1b36501056 100644 --- a/tests/integrations/test_registry.py +++ b/tests/integrations/test_registry.py @@ -1,7 +1,13 @@ """Tests for INTEGRATION_REGISTRY — mechanics, completeness, and registrar alignment.""" +import json +import os +from pathlib import PurePosixPath + import pytest +from typer.testing import CliRunner +from specify_cli import app from specify_cli.integrations import ( INTEGRATION_REGISTRY, _register, @@ -25,6 +31,72 @@ ] +def _multi_install_safe_keys() -> list[str]: + return sorted( + key + for key, integration in INTEGRATION_REGISTRY.items() + if integration.multi_install_safe + ) + + +def _multi_install_safe_pairs() -> list[tuple[str, str]]: + safe_keys = _multi_install_safe_keys() + return [ + (safe_keys[left], safe_keys[right]) + for left in range(len(safe_keys)) + for right in range(left + 1, len(safe_keys)) + ] + + +def _posix_path(value: str | None) -> str | None: + if not value: + return None + return PurePosixPath(value).as_posix() + + +def _integration_root_dir(key: str) -> str | None: + integration = INTEGRATION_REGISTRY[key] + cfg = integration.config if isinstance(integration.config, dict) else {} + return _posix_path(cfg.get("folder")) + + +def _integration_commands_dir(key: str) -> str | None: + integration = INTEGRATION_REGISTRY[key] + cfg = integration.config if isinstance(integration.config, dict) else {} + folder = cfg.get("folder") + if not folder: + return None + subdir = cfg.get("commands_subdir", "commands") + return (PurePosixPath(folder) / subdir).as_posix() + + +def _paths_overlap(first: str | None, second: str | None) -> bool: + if not first or not second: + return False + left = PurePosixPath(first) + right = PurePosixPath(second) + try: + left.relative_to(right) + return True + except ValueError: + pass + try: + right.relative_to(left) + return True + except ValueError: + return False + + +def _path_is_inside(path: str | None, directory: str | None) -> bool: + if not path or not directory: + return False + try: + PurePosixPath(path).relative_to(PurePosixPath(directory)) + return True + except ValueError: + return False + + class TestRegistry: def test_registry_is_dict(self): assert isinstance(INTEGRATION_REGISTRY, dict) @@ -85,3 +157,134 @@ def test_no_stale_cursor_shorthand(self): """The old 'cursor' shorthand must not appear in AGENT_CONFIGS.""" from specify_cli.agents import CommandRegistrar assert "cursor" not in CommandRegistrar.AGENT_CONFIGS + + +class TestMultiInstallSafeContracts: + """Declared safe integrations must stay isolated from each other.""" + + @pytest.mark.parametrize("key", _multi_install_safe_keys()) + def test_safe_integrations_have_static_isolated_paths(self, key): + integration = INTEGRATION_REGISTRY[key] + + assert _integration_root_dir(key), ( + f"{key} is declared multi-install safe but has no static root directory" + ) + assert _integration_commands_dir(key), ( + f"{key} is declared multi-install safe but has no static commands directory" + ) + assert integration.context_file, ( + f"{key} is declared multi-install safe but has no context file" + ) + + @pytest.mark.parametrize(("first", "second"), _multi_install_safe_pairs()) + def test_safe_integrations_have_distinct_agent_roots(self, first, second): + assert not _paths_overlap(_integration_root_dir(first), _integration_root_dir(second)), ( + f"{first} and {second} are declared multi-install safe but have " + f"overlapping agent roots {_integration_root_dir(first)!r} and " + f"{_integration_root_dir(second)!r}" + ) + + @pytest.mark.parametrize(("first", "second"), _multi_install_safe_pairs()) + def test_safe_integrations_have_distinct_command_dirs(self, first, second): + assert not _paths_overlap(_integration_commands_dir(first), _integration_commands_dir(second)), ( + f"{first} and {second} are declared multi-install safe but have " + f"overlapping command directories {_integration_commands_dir(first)!r} and " + f"{_integration_commands_dir(second)!r}" + ) + + @pytest.mark.parametrize(("first", "second"), _multi_install_safe_pairs()) + def test_safe_integrations_have_distinct_context_files(self, first, second): + first_context = _posix_path(INTEGRATION_REGISTRY[first].context_file) + second_context = _posix_path(INTEGRATION_REGISTRY[second].context_file) + + assert first_context != second_context, ( + f"{first} and {second} are declared multi-install safe but share " + f"context file {first_context!r}" + ) + + @pytest.mark.parametrize(("first", "second"), _multi_install_safe_pairs()) + def test_safe_context_files_do_not_overlap_other_agent_roots(self, first, second): + first_context = _posix_path(INTEGRATION_REGISTRY[first].context_file) + second_context = _posix_path(INTEGRATION_REGISTRY[second].context_file) + + assert not _path_is_inside(first_context, _integration_root_dir(second)), ( + f"{first} context file {first_context!r} lives under {second} " + f"agent root {_integration_root_dir(second)!r}" + ) + assert not _path_is_inside(second_context, _integration_root_dir(first)), ( + f"{second} context file {second_context!r} lives under {first} " + f"agent root {_integration_root_dir(first)!r}" + ) + + @pytest.mark.parametrize(("first", "second"), _multi_install_safe_pairs()) + def test_safe_context_files_do_not_overlap_other_command_dirs(self, first, second): + first_context = _posix_path(INTEGRATION_REGISTRY[first].context_file) + second_context = _posix_path(INTEGRATION_REGISTRY[second].context_file) + + assert not _path_is_inside(first_context, _integration_commands_dir(second)), ( + f"{first} context file {first_context!r} lives under {second} " + f"commands directory {_integration_commands_dir(second)!r}" + ) + assert not _path_is_inside(second_context, _integration_commands_dir(first)), ( + f"{second} context file {second_context!r} lives under {first} " + f"commands directory {_integration_commands_dir(first)!r}" + ) + + @pytest.mark.parametrize(("first", "second"), _multi_install_safe_pairs()) + def test_safe_integrations_have_disjoint_manifests( + self, + tmp_path, + first, + second, + ): + for initial, additional in ((first, second), (second, first)): + project_root = tmp_path / f"project-{initial}-{additional}" + project_root.mkdir() + runner = CliRunner() + + original_cwd = os.getcwd() + try: + os.chdir(project_root) + init_result = runner.invoke( + app, + [ + "init", + "--here", + "--integration", + initial, + "--script", + "sh", + "--no-git", + "--ignore-agent-tools", + ], + catch_exceptions=False, + ) + assert init_result.exit_code == 0, init_result.output + + install_result = runner.invoke( + app, + ["integration", "install", additional, "--script", "sh"], + catch_exceptions=False, + ) + assert install_result.exit_code == 0, install_result.output + finally: + os.chdir(original_cwd) + + initial_manifest = json.loads( + ( + project_root / ".specify" / "integrations" / f"{initial}.manifest.json" + ).read_text(encoding="utf-8") + ) + additional_manifest = json.loads( + ( + project_root / ".specify" / "integrations" / f"{additional}.manifest.json" + ).read_text(encoding="utf-8") + ) + + initial_files = set(initial_manifest.get("files", {})) + additional_files = set(additional_manifest.get("files", {})) + + assert initial_files.isdisjoint(additional_files), ( + f"{initial} and {additional} are declared multi-install safe but both manage " + f"these files: {sorted(initial_files & additional_files)}" + ) diff --git a/tests/test_presets.py b/tests/test_presets.py index 4b167ed9be..848c072dd0 100644 --- a/tests/test_presets.py +++ b/tests/test_presets.py @@ -14,6 +14,7 @@ import json import tempfile import shutil +import warnings import zipfile from pathlib import Path from datetime import datetime, timezone @@ -1921,6 +1922,10 @@ def test_url_cache_expired(self, project_dir): SELF_TEST_PRESET_DIR = Path(__file__).parent.parent / "presets" / "self-test" +SELF_TEST_WRAP_WARNING = ( + r"Cannot compose command 'speckit\.wrap-test': no base layer\. " + r"Stale command files may remain\." +) CORE_TEMPLATE_NAMES = [ "spec-template", @@ -1931,6 +1936,18 @@ def test_url_cache_expired(self, project_dir): ] +def install_self_test_preset(manager: PresetManager, speckit_version: str = "0.1.5") -> PresetManifest: + """Install self-test while filtering its intentionally missing wrap base.""" + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=SELF_TEST_WRAP_WARNING, + category=UserWarning, + module=r"specify_cli\.presets", + ) + return manager.install_from_directory(SELF_TEST_PRESET_DIR, speckit_version) + + class TestSelfTestPreset: """Tests using the self-test preset that ships with the repo.""" @@ -1971,7 +1988,7 @@ def test_self_test_templates_have_marker(self): def test_install_self_test_preset(self, project_dir): """Test installing the self-test preset from its directory.""" manager = PresetManager(project_dir) - manifest = manager.install_from_directory(SELF_TEST_PRESET_DIR, "0.1.5") + manifest = install_self_test_preset(manager) assert manifest.id == "self-test" assert manager.registry.is_installed("self-test") @@ -1984,7 +2001,7 @@ def test_self_test_overrides_all_core_templates(self, project_dir): # Install self-test preset manager = PresetManager(project_dir) - manager.install_from_directory(SELF_TEST_PRESET_DIR, "0.1.5") + install_self_test_preset(manager) # Every core template should now resolve from the preset resolver = PresetResolver(project_dir) @@ -2003,7 +2020,7 @@ def test_self_test_resolve_with_source(self, project_dir): (templates_dir / f"{name}.md").write_text(f"# Core {name}\n") manager = PresetManager(project_dir) - manager.install_from_directory(SELF_TEST_PRESET_DIR, "0.1.5") + install_self_test_preset(manager) resolver = PresetResolver(project_dir) for name in CORE_TEMPLATE_NAMES: @@ -2020,7 +2037,7 @@ def test_self_test_removal_restores_core(self, project_dir): (templates_dir / f"{name}.md").write_text(f"# Core {name}\n") manager = PresetManager(project_dir) - manager.install_from_directory(SELF_TEST_PRESET_DIR, "0.1.5") + install_self_test_preset(manager) manager.remove("self-test") resolver = PresetResolver(project_dir) @@ -2056,7 +2073,7 @@ def test_self_test_registers_commands_for_claude(self, project_dir): claude_dir.mkdir(parents=True) manager = PresetManager(project_dir) - manager.install_from_directory(SELF_TEST_PRESET_DIR, "0.1.5") + install_self_test_preset(manager) # Check the skill was registered cmd_file = claude_dir / "speckit-specify" / "SKILL.md" @@ -2072,7 +2089,7 @@ def test_self_test_registers_commands_for_gemini(self, project_dir): gemini_dir.mkdir(parents=True) manager = PresetManager(project_dir) - manager.install_from_directory(SELF_TEST_PRESET_DIR, "0.1.5") + install_self_test_preset(manager) # Check the command was registered in TOML format cmd_file = gemini_dir / "speckit.specify.toml" @@ -2087,7 +2104,7 @@ def test_self_test_unregisters_commands_on_remove(self, project_dir): claude_dir.mkdir(parents=True) manager = PresetManager(project_dir) - manager.install_from_directory(SELF_TEST_PRESET_DIR, "0.1.5") + install_self_test_preset(manager) cmd_file = claude_dir / "speckit-specify" / "SKILL.md" assert cmd_file.exists() @@ -2098,7 +2115,7 @@ def test_self_test_unregisters_commands_on_remove(self, project_dir): def test_self_test_no_commands_without_agent_dirs(self, project_dir): """Test that no commands are registered when no agent dirs exist.""" manager = PresetManager(project_dir) - manager.install_from_directory(SELF_TEST_PRESET_DIR, "0.1.5") + install_self_test_preset(manager) metadata = manager.registry.get("self-test") assert metadata["registered_commands"] == {} @@ -2247,8 +2264,7 @@ def test_skill_overridden_on_preset_install(self, project_dir, temp_dir): # Install self-test preset (has a command override for speckit.specify) manager = PresetManager(project_dir) - SELF_TEST_DIR = Path(__file__).parent.parent / "presets" / "self-test" - manager.install_from_directory(SELF_TEST_DIR, "0.1.5") + install_self_test_preset(manager) skill_file = skills_dir / "speckit-specify" / "SKILL.md" assert skill_file.exists() @@ -2267,8 +2283,7 @@ def test_skill_not_updated_when_ai_skills_disabled(self, project_dir, temp_dir): self._create_skill(skills_dir, "speckit-specify", body="untouched") manager = PresetManager(project_dir) - SELF_TEST_DIR = Path(__file__).parent.parent / "presets" / "self-test" - manager.install_from_directory(SELF_TEST_DIR, "0.1.5") + install_self_test_preset(manager) skill_file = skills_dir / "speckit-specify" / "SKILL.md" content = skill_file.read_text() @@ -2300,8 +2315,7 @@ def test_skill_not_updated_without_init_options(self, project_dir, temp_dir): self._create_skill(skills_dir, "speckit-specify", body="untouched") manager = PresetManager(project_dir) - SELF_TEST_DIR = Path(__file__).parent.parent / "presets" / "self-test" - manager.install_from_directory(SELF_TEST_DIR, "0.1.5") + install_self_test_preset(manager) skill_file = skills_dir / "speckit-specify" / "SKILL.md" file_content = skill_file.read_text() @@ -2321,8 +2335,7 @@ def test_skill_restored_on_preset_remove(self, project_dir, temp_dir): (core_cmds / "specify.md").write_text("---\ndescription: Core specify command\n---\n\nCore specify body\n") manager = PresetManager(project_dir) - SELF_TEST_DIR = Path(__file__).parent.parent / "presets" / "self-test" - manager.install_from_directory(SELF_TEST_DIR, "0.1.5") + install_self_test_preset(manager) # Verify preset content is in the skill skill_file = skills_dir / "speckit-specify" / "SKILL.md" @@ -2358,8 +2371,7 @@ def test_skill_restored_on_remove_resolves_script_placeholders(self, project_dir ) manager = PresetManager(project_dir) - SELF_TEST_DIR = Path(__file__).parent.parent / "presets" / "self-test" - manager.install_from_directory(SELF_TEST_DIR, "0.1.5") + install_self_test_preset(manager) manager.remove("self-test") content = (skills_dir / "speckit-specify" / "SKILL.md").read_text() @@ -2375,8 +2387,7 @@ def test_skill_not_overridden_when_skill_path_is_file(self, project_dir): (skills_dir / "speckit-specify").write_text("not-a-directory") manager = PresetManager(project_dir) - SELF_TEST_DIR = Path(__file__).parent.parent / "presets" / "self-test" - manager.install_from_directory(SELF_TEST_DIR, "0.1.5") + install_self_test_preset(manager) assert (skills_dir / "speckit-specify").is_file() metadata = manager.registry.get("self-test") @@ -2388,8 +2399,7 @@ def test_no_skills_registered_when_no_skill_dir_exists(self, project_dir, temp_d # Don't create skills dir — simulate --ai-skills never created them manager = PresetManager(project_dir) - SELF_TEST_DIR = Path(__file__).parent.parent / "presets" / "self-test" - manager.install_from_directory(SELF_TEST_DIR, "0.1.5") + install_self_test_preset(manager) metadata = manager.registry.get("self-test") assert metadata.get("registered_skills", []) == [] @@ -2590,8 +2600,7 @@ def test_kimi_legacy_dotted_skill_override_still_applies(self, project_dir, temp (project_dir / ".kimi" / "commands").mkdir(parents=True, exist_ok=True) manager = PresetManager(project_dir) - self_test_dir = Path(__file__).parent.parent / "presets" / "self-test" - manager.install_from_directory(self_test_dir, "0.1.5") + install_self_test_preset(manager) skill_file = skills_dir / "speckit.specify" / "SKILL.md" assert skill_file.exists() @@ -2611,8 +2620,7 @@ def test_kimi_skill_updated_even_when_ai_skills_disabled(self, project_dir, temp (project_dir / ".kimi" / "commands").mkdir(parents=True, exist_ok=True) manager = PresetManager(project_dir) - self_test_dir = Path(__file__).parent.parent / "presets" / "self-test" - manager.install_from_directory(self_test_dir, "0.1.5") + install_self_test_preset(manager) skill_file = skills_dir / "speckit-specify" / "SKILL.md" assert skill_file.exists() @@ -2791,8 +2799,7 @@ def test_preset_skill_registration_handles_non_dict_init_options(self, project_d self._create_skill(skills_dir, "speckit-specify", body="untouched") manager = PresetManager(project_dir) - self_test_dir = Path(__file__).parent.parent / "presets" / "self-test" - manager.install_from_directory(self_test_dir, "0.1.5") + install_self_test_preset(manager) skill_content = (skills_dir / "speckit-specify" / "SKILL.md").read_text() assert "untouched" in skill_content @@ -3451,7 +3458,7 @@ def test_end_to_end_wrap_via_self_test_preset(self, project_dir): ) manager = PresetManager(project_dir) - manager.install_from_directory(SELF_TEST_PRESET_DIR, "0.1.5") + install_self_test_preset(manager) written = (skill_subdir / "SKILL.md").read_text() assert "{CORE_TEMPLATE}" not in written @@ -3503,7 +3510,7 @@ def test_register_skills_inherits_scripts_from_core_when_preset_omits_them(self, ) manager = PresetManager(project_dir) - manager.install_from_directory(SELF_TEST_PRESET_DIR, "0.1.5") + install_self_test_preset(manager) written = (skill_subdir / "SKILL.md").read_text() # {SCRIPT} should have been resolved (not left as a literal placeholder) diff --git a/tests/test_setup_tasks.py b/tests/test_setup_tasks.py new file mode 100644 index 0000000000..f2e10d8b0f --- /dev/null +++ b/tests/test_setup_tasks.py @@ -0,0 +1,584 @@ +"""Tests for setup-tasks.{sh,ps1} template resolution and branch validation.""" + +import json +import os +import shutil +import subprocess +from pathlib import Path + +import pytest + +from tests.conftest import requires_bash + +PROJECT_ROOT = Path(__file__).resolve().parent.parent +COMMON_SH = PROJECT_ROOT / "scripts" / "bash" / "common.sh" +SETUP_TASKS_SH = PROJECT_ROOT / "scripts" / "bash" / "setup-tasks.sh" +COMMON_PS = PROJECT_ROOT / "scripts" / "powershell" / "common.ps1" +SETUP_TASKS_PS = PROJECT_ROOT / "scripts" / "powershell" / "setup-tasks.ps1" +TASKS_TEMPLATE = PROJECT_ROOT / "templates" / "tasks-template.md" + +HAS_PWSH = shutil.which("pwsh") is not None +_POWERSHELL = shutil.which("powershell.exe") or shutil.which("powershell") + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _install_bash_scripts(repo: Path) -> None: + d = repo / ".specify" / "scripts" / "bash" + d.mkdir(parents=True, exist_ok=True) + shutil.copy(COMMON_SH, d / "common.sh") + shutil.copy(SETUP_TASKS_SH, d / "setup-tasks.sh") + + +def _install_ps_scripts(repo: Path) -> None: + d = repo / ".specify" / "scripts" / "powershell" + d.mkdir(parents=True, exist_ok=True) + shutil.copy(COMMON_PS, d / "common.ps1") + shutil.copy(SETUP_TASKS_PS, d / "setup-tasks.ps1") + + +def _install_core_tasks_template(repo: Path) -> None: + """Copy the real tasks-template.md into the core template location.""" + tdir = repo / ".specify" / "templates" + tdir.mkdir(parents=True, exist_ok=True) + shutil.copy(TASKS_TEMPLATE, tdir / "tasks-template.md") + + +def _minimal_feature(repo: Path) -> Path: + """ + Create a numbered branch-style feature directory with spec.md and plan.md + so all prerequisite checks in setup-tasks pass. + Returns the feature directory path. + """ + feat = repo / "specs" / "001-my-feature" + feat.mkdir(parents=True, exist_ok=True) + (feat / "spec.md").write_text("# spec\n", encoding="utf-8") + (feat / "plan.md").write_text("# plan\n", encoding="utf-8") + return feat + + +def _clean_env() -> dict[str, str]: + """ + Return os.environ with all SPECIFY_* variables stripped so the scripts + rely purely on git branch + feature.json state set up by each fixture. + """ + env = os.environ.copy() + for key in list(env): + if key.startswith("SPECIFY_"): + env.pop(key) + return env + + +def _git_init(repo: Path) -> None: + subprocess.run(["git", "init", "-q"], cwd=repo, check=True) + subprocess.run( + ["git", "config", "user.email", "test@example.com"], cwd=repo, check=True + ) + subprocess.run(["git", "config", "user.name", "Test User"], cwd=repo, check=True) + subprocess.run( + ["git", "commit", "--allow-empty", "-m", "init", "-q"], cwd=repo, check=True + ) + + +# --------------------------------------------------------------------------- +# Shared fixture +# --------------------------------------------------------------------------- + +@pytest.fixture +def tasks_repo(tmp_path: Path) -> Path: + """ + A minimal repo with: + - git initialised on a numbered branch (001-my-feature) + - core tasks-template.md in place + - both bash and PowerShell scripts installed + """ + repo = tmp_path / "proj" + repo.mkdir() + _git_init(repo) + + # Switch to a numbered branch so branch validation passes without feature.json + subprocess.run( + ["git", "checkout", "-q", "-b", "001-my-feature"], + cwd=repo, + check=True, + ) + + (repo / ".specify").mkdir() + _install_core_tasks_template(repo) + _install_bash_scripts(repo) + _install_ps_scripts(repo) + return repo + + +# =========================================================================== +# BASH TESTS +# =========================================================================== + +@requires_bash +def test_setup_tasks_bash_core_template_resolved(tasks_repo: Path) -> None: + """ + When the core tasks-template.md is present and all prerequisites are met, + setup-tasks.sh --json should exit 0 and return an absolute, existing + TASKS_TEMPLATE path pointing to the core template. + """ + feat = _minimal_feature(tasks_repo) + script = tasks_repo / ".specify" / "scripts" / "bash" / "setup-tasks.sh" + + result = subprocess.run( + ["bash", str(script), "--json"], + cwd=tasks_repo, + capture_output=True, + text=True, + check=False, + env=_clean_env(), + ) + + assert result.returncode == 0, result.stderr + result.stdout + + data = json.loads(result.stdout) + tasks_tmpl = Path(data["TASKS_TEMPLATE"]) + assert tasks_tmpl.is_absolute(), "TASKS_TEMPLATE must be an absolute path" + assert tasks_tmpl.is_file(), "TASKS_TEMPLATE must point to an existing file" + assert tasks_tmpl.name == "tasks-template.md" + + +@requires_bash +def test_setup_tasks_bash_override_wins(tasks_repo: Path) -> None: + """ + When an override exists at .specify/templates/overrides/tasks-template.md, + setup-tasks.sh --json must return the override path, not the core path. + """ + feat = _minimal_feature(tasks_repo) + + # Create the override + overrides_dir = tasks_repo / ".specify" / "templates" / "overrides" + overrides_dir.mkdir(parents=True, exist_ok=True) + override_file = overrides_dir / "tasks-template.md" + override_file.write_text("# override tasks template\n", encoding="utf-8") + + script = tasks_repo / ".specify" / "scripts" / "bash" / "setup-tasks.sh" + + result = subprocess.run( + ["bash", str(script), "--json"], + cwd=tasks_repo, + capture_output=True, + text=True, + check=False, + env=_clean_env(), + ) + + assert result.returncode == 0, result.stderr + result.stdout + + data = json.loads(result.stdout) + tasks_tmpl = Path(data["TASKS_TEMPLATE"]) + assert tasks_tmpl.is_absolute(), "TASKS_TEMPLATE must be an absolute path" + assert tasks_tmpl.is_file(), "TASKS_TEMPLATE must point to an existing file" + # The resolved path must be inside the overrides directory + assert "overrides" in tasks_tmpl.parts, ( + f"Expected override path but got: {tasks_tmpl}" + ) + + +@requires_bash +def test_setup_tasks_bash_extension_wins_over_core(tasks_repo: Path) -> None: + """ + When an extension template exists, setup-tasks.sh --json must resolve + tasks-template.md from the extension before falling back to the core path. + """ + feat = _minimal_feature(tasks_repo) + + # FIX: real extension layout is .specify/extensions//templates/.md + extension_dir = ( + tasks_repo / ".specify" / "extensions" / "test-extension" / "templates" + ) + extension_dir.mkdir(parents=True, exist_ok=True) + extension_file = extension_dir / "tasks-template.md" + extension_file.write_text("# extension tasks template\n", encoding="utf-8") + + script = tasks_repo / ".specify" / "scripts" / "bash" / "setup-tasks.sh" + + result = subprocess.run( + ["bash", str(script), "--json"], + cwd=tasks_repo, + capture_output=True, + text=True, + check=False, + env=_clean_env(), + ) + + assert result.returncode == 0, result.stderr + result.stdout + + data = json.loads(result.stdout) + tasks_tmpl = Path(data["TASKS_TEMPLATE"]) + assert tasks_tmpl.is_absolute(), "TASKS_TEMPLATE must be an absolute path" + assert tasks_tmpl.is_file(), "TASKS_TEMPLATE must point to an existing file" + assert tasks_tmpl == extension_file.resolve(), ( + f"Expected extension path but got: {tasks_tmpl}" + ) + + +@requires_bash +def test_setup_tasks_bash_preset_wins_over_extension(tasks_repo: Path) -> None: + """ + When both preset and extension templates exist, setup-tasks.sh --json must + resolve the preset path because presets outrank extensions. + """ + feat = _minimal_feature(tasks_repo) + + # FIX: real extension layout is .specify/extensions//templates/.md + extension_dir = ( + tasks_repo / ".specify" / "extensions" / "test-extension" / "templates" + ) + extension_dir.mkdir(parents=True, exist_ok=True) + extension_file = extension_dir / "tasks-template.md" + extension_file.write_text("# extension tasks template\n", encoding="utf-8") + + # FIX: real preset layout is .specify/presets//templates/.md + preset_dir = tasks_repo / ".specify" / "presets" / "test-preset" / "templates" + preset_dir.mkdir(parents=True, exist_ok=True) + preset_file = preset_dir / "tasks-template.md" + preset_file.write_text("# preset tasks template\n", encoding="utf-8") + + script = tasks_repo / ".specify" / "scripts" / "bash" / "setup-tasks.sh" + + result = subprocess.run( + ["bash", str(script), "--json"], + cwd=tasks_repo, + capture_output=True, + text=True, + check=False, + env=_clean_env(), + ) + + assert result.returncode == 0, result.stderr + result.stdout + + data = json.loads(result.stdout) + tasks_tmpl = Path(data["TASKS_TEMPLATE"]) + assert tasks_tmpl.is_absolute(), "TASKS_TEMPLATE must be an absolute path" + assert tasks_tmpl.is_file(), "TASKS_TEMPLATE must point to an existing file" + assert tasks_tmpl == preset_file.resolve(), ( + f"Expected preset path but got: {tasks_tmpl}" + ) + + +@requires_bash +def test_setup_tasks_bash_preset_priority_order(tasks_repo: Path) -> None: + """ + When two presets both provide tasks-template.md, the one listed first in + .specify/presets/.registry wins. + """ + feat = _minimal_feature(tasks_repo) + + # resolve_template reads .specify/presets/.registry as a JSON object with a + # "presets" map where each entry has a numeric "priority" (lower = higher + # precedence). Create two presets; priority-1-preset wins over priority-2-preset. + high_priority_dir = ( + tasks_repo / ".specify" / "presets" / "priority-1-preset" / "templates" + ) + high_priority_dir.mkdir(parents=True, exist_ok=True) + high_priority_file = high_priority_dir / "tasks-template.md" + high_priority_file.write_text("# high priority preset tasks template\n", encoding="utf-8") + low_priority_dir = ( + tasks_repo / ".specify" / "presets" / "priority-2-preset" / "templates" + ) + + low_priority_dir.mkdir(parents=True, exist_ok=True) + low_priority_file = low_priority_dir / "tasks-template.md" + low_priority_file.write_text("# low priority preset tasks template\n", encoding="utf-8") + + # Write .registry JSON using the correct schema: object with "presets" map, + # each preset has a numeric "priority" (lower number = higher precedence). + registry_json = tasks_repo / ".specify" / "presets" / ".registry" + registry_json.write_text( + json.dumps({ + "presets": { + "priority-1-preset": {"priority": 1, "enabled": True}, + "priority-2-preset": {"priority": 2, "enabled": True}, + } + }), + encoding="utf-8", + ) + + script = tasks_repo / ".specify" / "scripts" / "bash" / "setup-tasks.sh" + + result = subprocess.run( + ["bash", str(script), "--json"], + cwd=tasks_repo, + capture_output=True, + text=True, + check=False, + env=_clean_env(), + ) + + assert result.returncode == 0, result.stderr + result.stdout + + data = json.loads(result.stdout) + tasks_tmpl = Path(data["TASKS_TEMPLATE"]) + assert tasks_tmpl.is_absolute(), "TASKS_TEMPLATE must be an absolute path" + assert tasks_tmpl.is_file(), "TASKS_TEMPLATE must point to an existing file" + assert tasks_tmpl == high_priority_file.resolve(), ( + f"Expected high-priority preset path but got: {tasks_tmpl}" + ) + + +@requires_bash +def test_setup_tasks_bash_missing_template_errors(tasks_repo: Path) -> None: + """ + When tasks-template.md is absent from all locations, setup-tasks.sh must + exit non-zero and print a helpful ERROR message to stderr. + """ + feat = _minimal_feature(tasks_repo) + + # Remove the core template so no template exists anywhere + core = tasks_repo / ".specify" / "templates" / "tasks-template.md" + core.unlink() + + script = tasks_repo / ".specify" / "scripts" / "bash" / "setup-tasks.sh" + + result = subprocess.run( + ["bash", str(script), "--json"], + cwd=tasks_repo, + capture_output=True, + text=True, + check=False, + env=_clean_env(), + ) + + assert result.returncode != 0 + assert "ERROR" in result.stderr + assert "tasks-template" in result.stderr + + +@requires_bash +def test_setup_tasks_bash_passes_custom_branch_when_feature_json_valid( + tasks_repo: Path, +) -> None: + """ + On a non-standard branch, setup-tasks.sh must succeed when feature.json + pins a valid FEATURE_DIR (branch validation should be skipped). + """ + subprocess.run( + ["git", "checkout", "-q", "-b", "feature/custom-branch"], + cwd=tasks_repo, + check=True, + ) + + feat = tasks_repo / "specs" / "001-my-feature" + feat.mkdir(parents=True, exist_ok=True) + (feat / "spec.md").write_text("# spec\n", encoding="utf-8") + (feat / "plan.md").write_text("# plan\n", encoding="utf-8") + + (tasks_repo / ".specify" / "feature.json").write_text( + json.dumps({"feature_directory": "specs/001-my-feature"}), + encoding="utf-8", + ) + + script = tasks_repo / ".specify" / "scripts" / "bash" / "setup-tasks.sh" + + result = subprocess.run( + ["bash", str(script), "--json"], + cwd=tasks_repo, + capture_output=True, + text=True, + check=False, + env=_clean_env(), + ) + + assert result.returncode == 0, result.stderr + result.stdout + + +@requires_bash +def test_setup_tasks_bash_fails_custom_branch_without_feature_json( + tasks_repo: Path, +) -> None: + """ + On a non-standard branch with no feature.json, setup-tasks.sh must fail + and report that we are not on a feature branch. + """ + subprocess.run( + ["git", "checkout", "-q", "-b", "feature/custom-branch"], + cwd=tasks_repo, + check=True, + ) + + script = tasks_repo / ".specify" / "scripts" / "bash" / "setup-tasks.sh" + + result = subprocess.run( + ["bash", str(script), "--json"], + cwd=tasks_repo, + capture_output=True, + text=True, + check=False, + env=_clean_env(), + ) + + assert result.returncode != 0 + assert "Not on a feature branch" in result.stderr + + +# =========================================================================== +# POWERSHELL TESTS +# =========================================================================== + +@pytest.mark.skipif(not (HAS_PWSH or _POWERSHELL), reason="no PowerShell available") +def test_setup_tasks_ps_core_template_resolved(tasks_repo: Path) -> None: + """ + When the core tasks-template.md is present and all prerequisites are met, + setup-tasks.ps1 -Json should exit 0 and return an absolute, existing + TASKS_TEMPLATE path. + """ + feat = _minimal_feature(tasks_repo) + script = tasks_repo / ".specify" / "scripts" / "powershell" / "setup-tasks.ps1" + exe = "pwsh" if HAS_PWSH else _POWERSHELL + + result = subprocess.run( + [exe, "-NoProfile", "-File", str(script), "-Json"], + cwd=tasks_repo, + capture_output=True, + text=True, + check=False, + env=_clean_env(), + ) + + assert result.returncode == 0, result.stderr + result.stdout + + data = json.loads(result.stdout) + tasks_tmpl = Path(data["TASKS_TEMPLATE"]) + assert tasks_tmpl.is_absolute(), "TASKS_TEMPLATE must be an absolute path" + assert tasks_tmpl.is_file(), "TASKS_TEMPLATE must point to an existing file" + assert tasks_tmpl.name == "tasks-template.md" + + +@pytest.mark.skipif(not (HAS_PWSH or _POWERSHELL), reason="no PowerShell available") +def test_setup_tasks_ps_override_wins(tasks_repo: Path) -> None: + """ + When an override exists at .specify/templates/overrides/tasks-template.md, + setup-tasks.ps1 -Json must return the override path, not the core path. + """ + feat = _minimal_feature(tasks_repo) + + overrides_dir = tasks_repo / ".specify" / "templates" / "overrides" + overrides_dir.mkdir(parents=True, exist_ok=True) + override_file = overrides_dir / "tasks-template.md" + override_file.write_text("# override tasks template\n", encoding="utf-8") + + script = tasks_repo / ".specify" / "scripts" / "powershell" / "setup-tasks.ps1" + exe = "pwsh" if HAS_PWSH else _POWERSHELL + + result = subprocess.run( + [exe, "-NoProfile", "-File", str(script), "-Json"], + cwd=tasks_repo, + capture_output=True, + text=True, + check=False, + env=_clean_env(), + ) + + assert result.returncode == 0, result.stderr + result.stdout + + data = json.loads(result.stdout) + tasks_tmpl = Path(data["TASKS_TEMPLATE"]) + assert tasks_tmpl.is_absolute(), "TASKS_TEMPLATE must be an absolute path" + assert tasks_tmpl.is_file(), "TASKS_TEMPLATE must point to an existing file" + assert "overrides" in tasks_tmpl.parts, ( + f"Expected override path but got: {tasks_tmpl}" + ) + + +@pytest.mark.skipif(not (HAS_PWSH or _POWERSHELL), reason="no PowerShell available") +def test_setup_tasks_ps_missing_template_errors(tasks_repo: Path) -> None: + """ + When tasks-template.md is absent from all locations, setup-tasks.ps1 must + exit non-zero and write a helpful error to stderr. + """ + feat = _minimal_feature(tasks_repo) + + core = tasks_repo / ".specify" / "templates" / "tasks-template.md" + core.unlink() + + script = tasks_repo / ".specify" / "scripts" / "powershell" / "setup-tasks.ps1" + exe = "pwsh" if HAS_PWSH else _POWERSHELL + + result = subprocess.run( + [exe, "-NoProfile", "-File", str(script), "-Json"], + cwd=tasks_repo, + capture_output=True, + text=True, + check=False, + env=_clean_env(), + ) + + assert result.returncode != 0 + assert "tasks-template" in result.stderr.lower() or "tasks-template" in result.stdout.lower() + + +@pytest.mark.skipif(not (HAS_PWSH or _POWERSHELL), reason="no PowerShell available") +def test_setup_tasks_ps_passes_custom_branch_when_feature_json_valid( + tasks_repo: Path, +) -> None: + """ + On a non-standard branch, setup-tasks.ps1 must succeed when feature.json + pins a valid FEATURE_DIR (branch validation should be skipped). + """ + subprocess.run( + ["git", "checkout", "-q", "-b", "feature/custom-branch"], + cwd=tasks_repo, + check=True, + ) + + feat = tasks_repo / "specs" / "001-my-feature" + feat.mkdir(parents=True, exist_ok=True) + (feat / "spec.md").write_text("# spec\n", encoding="utf-8") + (feat / "plan.md").write_text("# plan\n", encoding="utf-8") + + (tasks_repo / ".specify" / "feature.json").write_text( + json.dumps({"feature_directory": "specs/001-my-feature"}), + encoding="utf-8", + ) + + script = tasks_repo / ".specify" / "scripts" / "powershell" / "setup-tasks.ps1" + exe = "pwsh" if HAS_PWSH else _POWERSHELL + + result = subprocess.run( + [exe, "-NoProfile", "-File", str(script), "-Json"], + cwd=tasks_repo, + capture_output=True, + text=True, + check=False, + env=_clean_env(), + ) + + assert result.returncode == 0, result.stderr + result.stdout + + +@pytest.mark.skipif(not (HAS_PWSH or _POWERSHELL), reason="no PowerShell available") +def test_setup_tasks_ps_fails_custom_branch_without_feature_json( + tasks_repo: Path, +) -> None: + """ + On a non-standard branch with no feature.json, setup-tasks.ps1 must fail + and report that we are not on a feature branch. + """ + subprocess.run( + ["git", "checkout", "-q", "-b", "feature/custom-branch"], + cwd=tasks_repo, + check=True, + ) + + script = tasks_repo / ".specify" / "scripts" / "powershell" / "setup-tasks.ps1" + exe = "pwsh" if HAS_PWSH else _POWERSHELL + + result = subprocess.run( + [exe, "-NoProfile", "-File", str(script), "-Json"], + cwd=tasks_repo, + capture_output=True, + text=True, + check=False, + env=_clean_env(), + ) + + assert result.returncode != 0 + assert "Not on a feature branch" in result.stderr + \ No newline at end of file diff --git a/tests/test_workflows.py b/tests/test_workflows.py index d0939d5eb3..dcb6f72f78 100644 --- a/tests/test_workflows.py +++ b/tests/test_workflows.py @@ -1845,172 +1845,180 @@ def test_switch_workflow(self, project_dir): assert "do-specify" not in state.step_results -# --------------------------------------------------------------------------- -# Integration auto-detection tests -# --------------------------------------------------------------------------- +# ===== Integration Auto-Detect Tests ===== -class TestIntegrationAutoDetect: - """Test auto-detection of project integration from .specify/integration.json. - When workflow inputs specify ``default: "auto"`` for the integration - input, the engine should resolve it by reading the project's - ``.specify/integration.json`` instead of hardcoding ``"copilot"``. +class TestIntegrationAutoDetect: + """Tests for _resolve_default / _load_project_integration auto-detection.""" - Regression tests for https://github.com/github/spec-kit/issues/2406. - """ + def test_integration_auto_default_uses_project_integration(self, project_dir): + """'auto' default resolves to the value in .specify/integration.json.""" + from specify_cli.workflows.engine import WorkflowEngine - @staticmethod - def _make_workflow_yaml(default_integration: str = "auto") -> str: - return f""" + (project_dir / ".specify" / "integration.json").write_text( + '{"integration": "opencode"}', encoding="utf-8" + ) + engine = WorkflowEngine(project_dir) + yaml_str = """ schema_version: "1.0" workflow: id: "auto-test" name: "Auto Test" version: "1.0.0" inputs: - spec: - type: string - default: "build login" integration: type: string - default: "{default_integration}" + default: "auto" steps: - - id: specify - command: speckit.specify - integration: "{{{{ inputs.integration }}}}" - input: - args: "{{{{ inputs.spec }}}}" + - id: echo + type: shell + run: "echo {{ inputs.integration }}" """ - - def test_integration_auto_default_uses_project_integration(self, project_dir): - """'auto' default resolves to the integration in .specify/integration.json.""" - from unittest.mock import patch - from specify_cli.workflows.engine import WorkflowEngine, WorkflowDefinition - - int_json = project_dir / ".specify" / "integration.json" - int_json.write_text(json.dumps({"integration": "opencode"}), encoding="utf-8") - - definition = WorkflowDefinition.from_string(self._make_workflow_yaml()) - engine = WorkflowEngine(project_dir) - - with patch( - "specify_cli.workflows.steps.command.shutil.which", return_value=None - ): - state = engine.execute(definition) - - step_output = state.step_results["specify"]["output"] - assert step_output["integration"] == "opencode" + from specify_cli.workflows.engine import WorkflowDefinition + definition = WorkflowDefinition.from_string(yaml_str) + resolved = engine._resolve_inputs(definition, {}) + assert resolved["integration"] == "opencode" def test_integration_auto_default_falls_back_to_copilot_when_no_json(self, project_dir): - """When no integration.json exists, 'auto' falls back to 'copilot'.""" - from unittest.mock import patch + """'auto' falls back to 'copilot' when integration.json is absent.""" from specify_cli.workflows.engine import WorkflowEngine, WorkflowDefinition - definition = WorkflowDefinition.from_string(self._make_workflow_yaml()) engine = WorkflowEngine(project_dir) - - with patch( - "specify_cli.workflows.steps.command.shutil.which", return_value=None - ): - state = engine.execute(definition) - - step_output = state.step_results["specify"]["output"] - assert step_output["integration"] == "copilot" + yaml_str = """ +schema_version: "1.0" +workflow: + id: "fallback-test" + name: "Fallback Test" + version: "1.0.0" +inputs: + integration: + type: string + default: "auto" +steps: + - id: echo + type: shell + run: "echo {{ inputs.integration }}" +""" + definition = WorkflowDefinition.from_string(yaml_str) + resolved = engine._resolve_inputs(definition, {}) + assert resolved["integration"] == "copilot" def test_integration_explicit_input_overrides_auto(self, project_dir): - """Explicit --input integration=gemini takes precedence over auto.""" - from unittest.mock import patch + """Explicitly provided --input integration=X overrides 'auto' detection.""" from specify_cli.workflows.engine import WorkflowEngine, WorkflowDefinition - int_json = project_dir / ".specify" / "integration.json" - int_json.write_text(json.dumps({"integration": "opencode"}), encoding="utf-8") - - definition = WorkflowDefinition.from_string(self._make_workflow_yaml()) + (project_dir / ".specify" / "integration.json").write_text( + '{"integration": "opencode"}', encoding="utf-8" + ) engine = WorkflowEngine(project_dir) + yaml_str = """ +schema_version: "1.0" +workflow: + id: "explicit-test" + name: "Explicit Test" + version: "1.0.0" +inputs: + integration: + type: string + default: "auto" +steps: + - id: echo + type: shell + run: "echo {{ inputs.integration }}" +""" + definition = WorkflowDefinition.from_string(yaml_str) + resolved = engine._resolve_inputs(definition, {"integration": "claude"}) + assert resolved["integration"] == "claude" - with patch( - "specify_cli.workflows.steps.command.shutil.which", return_value=None - ): - state = engine.execute(definition, {"integration": "gemini"}) - - step_output = state.step_results["specify"]["output"] - assert step_output["integration"] == "gemini" - - def test_integration_explicit_auto_uses_project_integration(self, project_dir): - """Explicit --input integration=auto resolves from integration.json.""" - from unittest.mock import patch + def test_integration_explicit_auto_input_also_resolves(self, project_dir): + """Explicitly passing --input integration=auto also triggers auto-detection.""" from specify_cli.workflows.engine import WorkflowEngine, WorkflowDefinition - int_json = project_dir / ".specify" / "integration.json" - int_json.write_text(json.dumps({"integration": "opencode"}), encoding="utf-8") - - definition = WorkflowDefinition.from_string(self._make_workflow_yaml()) + (project_dir / ".specify" / "integration.json").write_text( + '{"integration": "gemini"}', encoding="utf-8" + ) engine = WorkflowEngine(project_dir) - - with patch( - "specify_cli.workflows.steps.command.shutil.which", return_value=None - ): - state = engine.execute(definition, {"integration": "auto"}) - - step_output = state.step_results["specify"]["output"] - assert step_output["integration"] == "opencode" + yaml_str = """ +schema_version: "1.0" +workflow: + id: "explicit-auto-test" + name: "Explicit Auto Test" + version: "1.0.0" +inputs: + integration: + type: string + default: "auto" +steps: + - id: echo + type: shell + run: "echo {{ inputs.integration }}" +""" + definition = WorkflowDefinition.from_string(yaml_str) + resolved = engine._resolve_inputs(definition, {"integration": "auto"}) + assert resolved["integration"] == "gemini" def test_integration_auto_ignores_malformed_integration_json(self, project_dir): - """When integration.json contains invalid JSON, 'auto' falls back to 'copilot'.""" - from unittest.mock import patch + """Malformed integration.json falls back to 'copilot'.""" from specify_cli.workflows.engine import WorkflowEngine, WorkflowDefinition - int_json = project_dir / ".specify" / "integration.json" - int_json.write_text("{invalid json content", encoding="utf-8") - - definition = WorkflowDefinition.from_string(self._make_workflow_yaml()) + (project_dir / ".specify" / "integration.json").write_text( + "not valid json", encoding="utf-8" + ) engine = WorkflowEngine(project_dir) - - with patch( - "specify_cli.workflows.steps.command.shutil.which", return_value=None - ): - state = engine.execute(definition) - - step_output = state.step_results["specify"]["output"] - assert step_output["integration"] == "copilot" + assert engine._load_project_integration() == "copilot" def test_integration_auto_falls_back_on_oserror(self, project_dir): - """When integration.json is unreadable (OSError), 'auto' falls back to 'copilot'.""" + """OSError reading integration.json falls back to 'copilot'.""" from unittest.mock import patch - from specify_cli.workflows.engine import WorkflowEngine, WorkflowDefinition - - int_json = project_dir / ".specify" / "integration.json" - int_json.write_text(json.dumps({"integration": "opencode"}), encoding="utf-8") + from specify_cli.workflows.engine import WorkflowEngine - definition = WorkflowDefinition.from_string(self._make_workflow_yaml()) engine = WorkflowEngine(project_dir) + with patch("pathlib.Path.read_text", side_effect=OSError("permission denied")): + # Create a file so is_file() returns True + (project_dir / ".specify" / "integration.json").write_text( + '{"integration": "claude"}', encoding="utf-8" + ) + assert engine._load_project_integration() == "copilot" - with ( - patch( - "specify_cli.workflows.steps.command.shutil.which", return_value=None - ), - patch.object(Path, "read_text", side_effect=OSError("Permission denied")), - ): - state = engine.execute(definition) + def test_integration_auto_ignores_whitespace_only_value(self, project_dir): + """Whitespace-only integration value falls back to 'copilot'.""" + from specify_cli.workflows.engine import WorkflowEngine - step_output = state.step_results["specify"]["output"] - assert step_output["integration"] == "copilot" + (project_dir / ".specify" / "integration.json").write_text( + '{"integration": " "}', encoding="utf-8" + ) + engine = WorkflowEngine(project_dir) + assert engine._load_project_integration() == "copilot" - def test_integration_auto_ignores_whitespace_only_value(self, project_dir): - """When integration.json has a whitespace-only value, 'auto' falls back to 'copilot'.""" - from unittest.mock import patch - from specify_cli.workflows.engine import WorkflowEngine, WorkflowDefinition + def test_integration_auto_falls_back_to_init_options_json(self, project_dir): + """Falls back to init-options.json when integration.json is absent.""" + from specify_cli.workflows.engine import WorkflowEngine + + (project_dir / ".specify" / "init-options.json").write_text( + '{"integration": "claude"}', encoding="utf-8" + ) + engine = WorkflowEngine(project_dir) + assert engine._load_project_integration() == "claude" - int_json = project_dir / ".specify" / "integration.json" - int_json.write_text(json.dumps({"integration": " "}), encoding="utf-8") + def test_integration_auto_init_options_ai_key_fallback(self, project_dir): + """Uses 'ai' key from init-options.json when 'integration' key absent.""" + from specify_cli.workflows.engine import WorkflowEngine - definition = WorkflowDefinition.from_string(self._make_workflow_yaml()) + (project_dir / ".specify" / "init-options.json").write_text( + '{"ai": "opencode"}', encoding="utf-8" + ) engine = WorkflowEngine(project_dir) + assert engine._load_project_integration() == "opencode" - with patch( - "specify_cli.workflows.steps.command.shutil.which", return_value=None - ): - state = engine.execute(definition) + def test_integration_auto_integration_json_takes_priority(self, project_dir): + """integration.json takes priority over init-options.json.""" + from specify_cli.workflows.engine import WorkflowEngine - step_output = state.step_results["specify"]["output"] - assert step_output["integration"] == "copilot" + (project_dir / ".specify" / "integration.json").write_text( + '{"integration": "gemini"}', encoding="utf-8" + ) + (project_dir / ".specify" / "init-options.json").write_text( + '{"integration": "claude"}', encoding="utf-8" + ) + engine = WorkflowEngine(project_dir) + assert engine._load_project_integration() == "gemini"