From adcb67adad1eeb3afb9f36ca18da0bbab3936b8c Mon Sep 17 00:00:00 2001 From: bigben <245982990@qq.com> Date: Tue, 12 May 2026 10:34:17 +0800 Subject: [PATCH 1/5] Add arch and agent command support --- pyproject.toml | 8 +- scripts/bash/setup-arch.sh | 94 ++++++ scripts/powershell/setup-arch.ps1 | 86 +++++ src/specify_cli/__init__.py | 61 +++- src/specify_cli/agent_projection.py | 295 ++++++++++++++++++ src/specify_cli/extensions.py | 1 + .../integrations/claude/__init__.py | 1 + templates/agent-governance-template.md | 67 ++++ .../architecture-development-template.md | 39 +++ templates/architecture-logical-template.md | 39 +++ templates/architecture-physical-template.md | 39 +++ templates/architecture-process-template.md | 39 +++ templates/architecture-scenario-template.md | 37 +++ templates/architecture-template.md | 48 +++ templates/commands/agent.md | 63 ++++ templates/commands/arch.md | 156 +++++++++ .../test_integration_base_markdown.py | 14 +- .../test_integration_base_skills.py | 12 +- .../test_integration_base_toml.py | 9 + .../test_integration_base_yaml.py | 9 + .../integrations/test_integration_copilot.py | 33 +- .../integrations/test_integration_generic.py | 16 + tests/test_agent_projection.py | 81 +++++ tests/test_arch_templates.py | 76 +++++ tests/test_setup_arch.py | 166 ++++++++++ 25 files changed, 1473 insertions(+), 16 deletions(-) create mode 100755 scripts/bash/setup-arch.sh create mode 100755 scripts/powershell/setup-arch.ps1 create mode 100644 src/specify_cli/agent_projection.py create mode 100644 templates/agent-governance-template.md create mode 100644 templates/architecture-development-template.md create mode 100644 templates/architecture-logical-template.md create mode 100644 templates/architecture-physical-template.md create mode 100644 templates/architecture-process-template.md create mode 100644 templates/architecture-scenario-template.md create mode 100644 templates/architecture-template.md create mode 100644 templates/commands/agent.md create mode 100644 templates/commands/arch.md create mode 100644 tests/test_agent_projection.py create mode 100644 tests/test_arch_templates.py create mode 100644 tests/test_setup_arch.py diff --git a/pyproject.toml b/pyproject.toml index d7a949d8b1..c213843453 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,6 +29,13 @@ packages = ["src/specify_cli"] # Bundle core assets so `specify init` works without network access (air-gapped / enterprise) # Page templates (exclude commands/ — bundled separately below to avoid duplication) "templates/checklist-template.md" = "specify_cli/core_pack/templates/checklist-template.md" +"templates/architecture-development-template.md" = "specify_cli/core_pack/templates/architecture-development-template.md" +"templates/architecture-logical-template.md" = "specify_cli/core_pack/templates/architecture-logical-template.md" +"templates/architecture-physical-template.md" = "specify_cli/core_pack/templates/architecture-physical-template.md" +"templates/architecture-process-template.md" = "specify_cli/core_pack/templates/architecture-process-template.md" +"templates/architecture-scenario-template.md" = "specify_cli/core_pack/templates/architecture-scenario-template.md" +"templates/architecture-template.md" = "specify_cli/core_pack/templates/architecture-template.md" +"templates/agent-governance-template.md" = "specify_cli/core_pack/templates/agent-governance-template.md" "templates/constitution-template.md" = "specify_cli/core_pack/templates/constitution-template.md" "templates/plan-template.md" = "specify_cli/core_pack/templates/plan-template.md" "templates/spec-template.md" = "specify_cli/core_pack/templates/spec-template.md" @@ -70,4 +77,3 @@ omit = ["*/tests/*", "*/__pycache__/*"] precision = 2 show_missing = true skip_covered = false - diff --git a/scripts/bash/setup-arch.sh b/scripts/bash/setup-arch.sh new file mode 100755 index 0000000000..f6d566d275 --- /dev/null +++ b/scripts/bash/setup-arch.sh @@ -0,0 +1,94 @@ +#!/usr/bin/env bash + +set -e + +# Parse command line arguments +JSON_MODE=false + +for arg in "$@"; do + case "$arg" in + --json) + JSON_MODE=true + ;; + --help|-h) + echo "Usage: $0 [--json]" + echo " --json Output results in JSON format" + echo " --help Show this help message" + exit 0 + ;; + *) + ;; + esac +done + +# Get script directory and load common functions +SCRIPT_DIR="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "$SCRIPT_DIR/common.sh" + +REPO_ROOT=$(get_repo_root) +ARCH_DIR="$REPO_ROOT/.specify/memory" +ARCH_FILE="$ARCH_DIR/architecture.md" +SCENARIO_VIEW="$ARCH_DIR/architecture-scenario-view.md" +LOGICAL_VIEW="$ARCH_DIR/architecture-logical-view.md" +PROCESS_VIEW="$ARCH_DIR/architecture-process-view.md" +DEVELOPMENT_VIEW="$ARCH_DIR/architecture-development-view.md" +PHYSICAL_VIEW="$ARCH_DIR/architecture-physical-view.md" + +mkdir -p "$ARCH_DIR" + +copy_template_if_missing() { + local template_name="$1" + local destination="$2" + + if [[ -f "$destination" ]]; then + return 0 + fi + + local template + template=$(resolve_template "$template_name" "$REPO_ROOT") || true + if [[ -n "$template" ]] && [[ -f "$template" ]]; then + cp "$template" "$destination" + echo "Copied $template_name template to $destination" + else + echo "Warning: $template_name template not found" + touch "$destination" + fi +} + +copy_template_if_missing "architecture-template" "$ARCH_FILE" +copy_template_if_missing "architecture-scenario-template" "$SCENARIO_VIEW" +copy_template_if_missing "architecture-logical-template" "$LOGICAL_VIEW" +copy_template_if_missing "architecture-process-template" "$PROCESS_VIEW" +copy_template_if_missing "architecture-development-template" "$DEVELOPMENT_VIEW" +copy_template_if_missing "architecture-physical-template" "$PHYSICAL_VIEW" + +if $JSON_MODE; then + if has_jq; then + jq -cn \ + --arg arch_file "$ARCH_FILE" \ + --arg arch_dir "$ARCH_DIR" \ + --arg scenario_view "$SCENARIO_VIEW" \ + --arg logical_view "$LOGICAL_VIEW" \ + --arg process_view "$PROCESS_VIEW" \ + --arg development_view "$DEVELOPMENT_VIEW" \ + --arg physical_view "$PHYSICAL_VIEW" \ + '{ARCH_FILE:$arch_file,ARCH_DIR:$arch_dir,SCENARIO_VIEW:$scenario_view,LOGICAL_VIEW:$logical_view,PROCESS_VIEW:$process_view,DEVELOPMENT_VIEW:$development_view,PHYSICAL_VIEW:$physical_view}' + else + printf '{"ARCH_FILE":"%s","ARCH_DIR":"%s","SCENARIO_VIEW":"%s","LOGICAL_VIEW":"%s","PROCESS_VIEW":"%s","DEVELOPMENT_VIEW":"%s","PHYSICAL_VIEW":"%s"}\n' \ + "$(json_escape "$ARCH_FILE")" \ + "$(json_escape "$ARCH_DIR")" \ + "$(json_escape "$SCENARIO_VIEW")" \ + "$(json_escape "$LOGICAL_VIEW")" \ + "$(json_escape "$PROCESS_VIEW")" \ + "$(json_escape "$DEVELOPMENT_VIEW")" \ + "$(json_escape "$PHYSICAL_VIEW")" + fi +else + echo "ARCH_FILE: $ARCH_FILE" + echo "ARCH_DIR: $ARCH_DIR" + echo "SCENARIO_VIEW: $SCENARIO_VIEW" + echo "LOGICAL_VIEW: $LOGICAL_VIEW" + echo "PROCESS_VIEW: $PROCESS_VIEW" + echo "DEVELOPMENT_VIEW: $DEVELOPMENT_VIEW" + echo "PHYSICAL_VIEW: $PHYSICAL_VIEW" +fi diff --git a/scripts/powershell/setup-arch.ps1 b/scripts/powershell/setup-arch.ps1 new file mode 100755 index 0000000000..b2f7427360 --- /dev/null +++ b/scripts/powershell/setup-arch.ps1 @@ -0,0 +1,86 @@ +#!/usr/bin/env pwsh +# Setup project-level 4+1 architecture artifacts + +[CmdletBinding()] +param( + [switch]$Json, + [switch]$Help +) + +$ErrorActionPreference = 'Stop' + +if ($Help) { + Write-Output "Usage: ./setup-arch.ps1 [-Json] [-Help]" + Write-Output " -Json Output results in JSON format" + Write-Output " -Help Show this help message" + exit 0 +} + +. "$PSScriptRoot/common.ps1" + +function Convert-ToPlainPath { + param([Parameter(Mandatory = $true)][string]$Path) + + if ($Path -like 'Microsoft.PowerShell.Core\FileSystem::*') { + return $Path.Substring('Microsoft.PowerShell.Core\FileSystem::'.Length) + } + return $Path +} + +$repoRoot = Convert-ToPlainPath (Get-RepoRoot) +$archDir = Join-Path $repoRoot ".specify/memory" +$archFile = Join-Path $archDir "architecture.md" +$scenarioView = Join-Path $archDir "architecture-scenario-view.md" +$logicalView = Join-Path $archDir "architecture-logical-view.md" +$processView = Join-Path $archDir "architecture-process-view.md" +$developmentView = Join-Path $archDir "architecture-development-view.md" +$physicalView = Join-Path $archDir "architecture-physical-view.md" + +New-Item -ItemType Directory -Path $archDir -Force | Out-Null + +function Copy-TemplateIfMissing { + param( + [Parameter(Mandatory = $true)][string]$TemplateName, + [Parameter(Mandatory = $true)][string]$Destination + ) + + if (Test-Path -LiteralPath $Destination -PathType Leaf) { + return + } + + $template = Resolve-Template -TemplateName $TemplateName -RepoRoot $repoRoot + if ($template -and (Test-Path -LiteralPath $template -PathType Leaf)) { + Copy-Item -LiteralPath $template -Destination $Destination -Force + Write-Output "Copied $TemplateName template to $Destination" + } else { + Write-Warning "$TemplateName template not found" + New-Item -ItemType File -Path $Destination -Force | Out-Null + } +} + +Copy-TemplateIfMissing -TemplateName "architecture-template" -Destination $archFile +Copy-TemplateIfMissing -TemplateName "architecture-scenario-template" -Destination $scenarioView +Copy-TemplateIfMissing -TemplateName "architecture-logical-template" -Destination $logicalView +Copy-TemplateIfMissing -TemplateName "architecture-process-template" -Destination $processView +Copy-TemplateIfMissing -TemplateName "architecture-development-template" -Destination $developmentView +Copy-TemplateIfMissing -TemplateName "architecture-physical-template" -Destination $physicalView + +if ($Json) { + [PSCustomObject]@{ + ARCH_FILE = $archFile + ARCH_DIR = $archDir + SCENARIO_VIEW = $scenarioView + LOGICAL_VIEW = $logicalView + PROCESS_VIEW = $processView + DEVELOPMENT_VIEW = $developmentView + PHYSICAL_VIEW = $physicalView + } | ConvertTo-Json -Compress +} else { + Write-Output "ARCH_FILE: $archFile" + Write-Output "ARCH_DIR: $archDir" + Write-Output "SCENARIO_VIEW: $scenarioView" + Write-Output "LOGICAL_VIEW: $logicalView" + Write-Output "PROCESS_VIEW: $processView" + Write-Output "DEVELOPMENT_VIEW: $developmentView" + Write-Output "PHYSICAL_VIEW: $physicalView" +} diff --git a/src/specify_cli/__init__.py b/src/specify_cli/__init__.py index 325692900e..031119ebdd 100644 --- a/src/specify_cli/__init__.py +++ b/src/specify_cli/__init__.py @@ -74,6 +74,10 @@ install_shared_infra as _install_shared_infra_impl, refresh_shared_templates as _refresh_shared_templates_impl, ) +from .agent_projection import ( + ensure_agent_governance_from_template as _ensure_agent_governance_from_template, + refresh_agent_projection as _refresh_agent_projection, +) # For cross-platform keyboard input import readchar @@ -906,6 +910,46 @@ def ensure_constitution_from_template(project_path: Path, tracker: StepTracker | console.print(f"[yellow]Warning: Could not initialize constitution: {e}[/yellow]") +def ensure_agent_governance_from_template(project_path: Path, tracker: StepTracker | None = None) -> None: + """Copy agent-governance template to memory if it doesn't exist.""" + try: + result = _ensure_agent_governance_from_template(project_path) + except Exception as e: + if tracker: + tracker.add("agent-governance", "Agent governance setup") + tracker.error("agent-governance", str(e)) + else: + console.print(f"[yellow]Warning: Could not initialize agent governance: {e}[/yellow]") + return + + if tracker: + tracker.add("agent-governance", "Agent governance setup") + if result is None: + tracker.error("agent-governance", "template not found") + else: + tracker.complete("agent-governance", "available") + + +def refresh_agent_projection(project_path: Path, tracker: StepTracker | None = None) -> None: + """Refresh generated agent governance projections.""" + try: + result = _refresh_agent_projection(project_path) + except Exception as e: + if tracker: + tracker.add("agent-projection", "Agent governance projection") + tracker.error("agent-projection", str(e)) + else: + console.print(f"[yellow]Warning: Could not refresh agent projection: {e}[/yellow]") + return + + if tracker: + tracker.add("agent-projection", "Agent governance projection") + if result.memory_path is None: + tracker.skip("agent-projection", "agent-governance template missing") + else: + tracker.complete("agent-projection", f"{len(result.projection_paths)} file(s) refreshed") + + INIT_OPTIONS_FILE = ".specify/init-options.json" @@ -951,6 +995,8 @@ def _get_skills_dir(project_path: Path, selected_ai: str) -> Path: # Constants kept for backward compatibility with presets and extensions. DEFAULT_SKILLS_DIR = ".agents/skills" SKILL_DESCRIPTIONS = { + "arch": "Generate project-level 4+1 architecture view artifacts and synthesis.", + "agent": "Create or update agent governance and refresh agent instruction projections.", "specify": "Create or update feature specifications from natural language descriptions.", "plan": "Generate technical implementation plans from feature specifications.", "tasks": "Break down implementation plans into actionable task lists.", @@ -1339,6 +1385,8 @@ def init( tracker.complete("shared-infra", f"scripts ({selected_script}) + templates") ensure_constitution_from_template(project_path, tracker=tracker) + ensure_agent_governance_from_template(project_path, tracker=tracker) + refresh_agent_projection(project_path, tracker=tracker) if not no_git: tracker.start("git") @@ -1607,11 +1655,12 @@ def _display_cmd(name: str) -> str: steps_lines.append(f"{step_num}. Start using {usage_label} with your coding agent:") - steps_lines.append(f" {step_num}.1 [cyan]{_display_cmd('constitution')}[/] - Establish project principles") - steps_lines.append(f" {step_num}.2 [cyan]{_display_cmd('specify')}[/] - Create baseline specification") - steps_lines.append(f" {step_num}.3 [cyan]{_display_cmd('plan')}[/] - Create implementation plan") - steps_lines.append(f" {step_num}.4 [cyan]{_display_cmd('tasks')}[/] - Generate actionable tasks") - steps_lines.append(f" {step_num}.5 [cyan]{_display_cmd('implement')}[/] - Execute implementation") + steps_lines.append(f" {step_num}.1 [cyan]{_display_cmd('arch')}[/] - Shape 4+1 architecture views") + steps_lines.append(f" {step_num}.2 [cyan]{_display_cmd('constitution')}[/] - Establish project principles") + steps_lines.append(f" {step_num}.3 [cyan]{_display_cmd('specify')}[/] - Create baseline specification") + steps_lines.append(f" {step_num}.4 [cyan]{_display_cmd('plan')}[/] - Create implementation plan") + steps_lines.append(f" {step_num}.5 [cyan]{_display_cmd('tasks')}[/] - Generate actionable tasks") + steps_lines.append(f" {step_num}.6 [cyan]{_display_cmd('implement')}[/] - Execute implementation") steps_panel = Panel("\n".join(steps_lines), title="Next Steps", border_style="cyan", padding=(1,2)) console.print() @@ -1975,6 +2024,7 @@ def _write_integration_json( installed_integrations=installed_integrations, settings=integration_settings, ) + refresh_agent_projection(project_root) def _clear_init_options_for_integration(project_root: Path, integration_key: str) -> None: @@ -1993,6 +2043,7 @@ def _remove_integration_json(project_root: Path) -> None: path = project_root / INTEGRATION_JSON if path.exists(): path.unlink() + refresh_agent_projection(project_root) _MANIFEST_READ_ERRORS = (ValueError, FileNotFoundError, OSError, UnicodeDecodeError) diff --git a/src/specify_cli/agent_projection.py b/src/specify_cli/agent_projection.py new file mode 100644 index 0000000000..8f2fb1cb54 --- /dev/null +++ b/src/specify_cli/agent_projection.py @@ -0,0 +1,295 @@ +"""Agent governance memory and projection helpers.""" + +from __future__ import annotations + +import json +from dataclasses import dataclass +from pathlib import Path +from typing import Any + +import yaml + +from .integration_state import ( + INTEGRATION_JSON, + default_integration_key, + installed_integration_keys, + normalize_integration_state, +) + + +AGENT_GOVERNANCE_MEMORY = ".specify/memory/agent-governance.md" +AGENT_GOVERNANCE_TEMPLATE = ".specify/templates/agent-governance-template.md" + +PROJECTION_MARKER_START = "" +PROJECTION_MARKER_END = "" + + +@dataclass(frozen=True) +class AgentProjectionResult: + """Files updated by an agent projection refresh.""" + + memory_path: Path | None + projection_paths: list[Path] + + +def ensure_agent_governance_from_template(project_root: Path) -> Path | None: + """Copy agent-governance template to memory if missing.""" + memory_path = project_root / AGENT_GOVERNANCE_MEMORY + if memory_path.exists(): + return memory_path + + template_path = project_root / AGENT_GOVERNANCE_TEMPLATE + if not template_path.exists(): + return None + + memory_path.parent.mkdir(parents=True, exist_ok=True) + memory_path.write_bytes(template_path.read_bytes()) + return memory_path + + +def refresh_agent_projection(project_root: Path) -> AgentProjectionResult: + """Refresh repo-level and agent-specific governance projections. + + The source of truth is ``.specify/memory/agent-governance.md`` plus the + repository's current integration, skill, MCP, and extension state. Existing + text outside the generated projection markers is preserved. + """ + memory_path = ensure_agent_governance_from_template(project_root) + if memory_path is None: + return AgentProjectionResult(None, []) + + state = _read_integration_state(project_root) + installed = installed_integration_keys(state) + default_key = default_integration_key(state) + projection_paths = _projection_targets(project_root, state) + projection = _render_projection(project_root, memory_path, state) + updated: list[Path] = [] + + for path in projection_paths: + content = _adapter_prelude(path, default_key, installed) + if path.exists(): + existing = path.read_text(encoding="utf-8-sig") + new_content = _upsert_marked_section(existing, projection) + if new_content == existing: + continue + else: + path.parent.mkdir(parents=True, exist_ok=True) + new_content = content + "\n" + projection + + path.write_text(_normalize_newlines(new_content), encoding="utf-8") + updated.append(path) + + return AgentProjectionResult(memory_path, updated) + + +def _read_integration_state(project_root: Path) -> dict[str, Any]: + path = project_root / INTEGRATION_JSON + if not path.exists(): + return normalize_integration_state({}) + try: + data = json.loads(path.read_text(encoding="utf-8")) + except (json.JSONDecodeError, OSError, UnicodeDecodeError): + return normalize_integration_state({}) + return normalize_integration_state(data if isinstance(data, dict) else {}) + + +def _projection_targets(project_root: Path, state: dict[str, Any]) -> list[Path]: + targets: list[Path] = [project_root / "AGENTS.md"] + + try: + from .integrations import get_integration + except Exception: + get_integration = None # type: ignore[assignment] + + for key in installed_integration_keys(state): + integration = get_integration(key) if get_integration else None + context_file = getattr(integration, "context_file", None) + if isinstance(context_file, str) and context_file.strip(): + targets.append(project_root / context_file) + + # Common adapter files. They are created when the corresponding + # integration is installed, and refreshed whenever they already exist so + # uninstall/switch operations do not leave stale generated projections. + for key, path in { + "claude": "CLAUDE.md", + "gemini": "GEMINI.md", + "copilot": ".github/copilot-instructions.md", + }.items(): + target = project_root / path + if key in installed_integration_keys(state) or target.exists(): + targets.append(target) + + deduped: list[Path] = [] + seen: set[str] = set() + for path in targets: + rel = path.resolve().as_posix() + if rel in seen: + continue + seen.add(rel) + deduped.append(path) + return deduped + + +def _render_projection( + project_root: Path, + memory_path: Path, + state: dict[str, Any], +) -> str: + installed = installed_integration_keys(state) + default_key = default_integration_key(state) + skills = _scan_skills(project_root) + mcp_configs = _scan_mcp_configs(project_root) + extensions = _scan_extensions(project_root) + + lines = [ + PROJECTION_MARKER_START, + "# Agent Governance Projection", + "", + "Generated from repository state. Do not edit this section directly; update", + f"`{AGENT_GOVERNANCE_MEMORY}`, integrations, skills, MCP config, or extensions instead.", + "", + "## Governing Source", + f"- Agent governance SSOT: `{AGENT_GOVERNANCE_MEMORY}`", + "- Project principles: `.specify/memory/constitution.md`", + "- Business semantics: `.specify/memory/uc.md`", + "- Architecture boundaries: `.specify/memory/architecture.md`", + "", + "## Active Integrations", + f"- Default integration: `{default_key or 'none'}`", + f"- Installed integrations: {', '.join(f'`{key}`' for key in installed) if installed else '`none`'}", + "", + "## Active Skills", + ] + + if skills: + for skill in skills: + lines.append(f"- `{skill}`") + else: + lines.append("- `none detected`") + + lines.extend(["", "## MCP Configuration"]) + if mcp_configs: + for config in mcp_configs: + lines.append(f"- `{config}`") + else: + lines.append("- `none detected`") + + lines.extend(["", "## Extensions"]) + if extensions: + for extension in extensions: + lines.append(f"- `{extension}`") + else: + lines.append("- `none detected`") + + lines.extend([ + "", + "## Required Operating Rules", + "- Follow current user instructions first.", + "- Treat `.specify/memory/agent-governance.md` as the source of truth for agent, skill, and MCP behavior.", + "- Treat `.specify/memory/constitution.md` as the source of truth for project principles and quality gates.", + "- Do not edit governance, CI, MCP config, secrets, permissions, or tool settings unless explicitly requested.", + "- Do not overwrite user edits or modify files outside the active task scope.", + "- Report changed files, commands run, validation results, and unresolved risks before handoff.", + "", + f"_Projection source file: `{memory_path.relative_to(project_root).as_posix()}`_", + PROJECTION_MARKER_END, + "", + ]) + return "\n".join(lines) + + +def _upsert_marked_section(content: str, projection: str) -> str: + start = content.find(PROJECTION_MARKER_START) + end = content.find(PROJECTION_MARKER_END, start if start != -1 else 0) + if start != -1 and end != -1 and end > start: + end += len(PROJECTION_MARKER_END) + if end < len(content) and content[end] == "\r": + end += 1 + if end < len(content) and content[end] == "\n": + end += 1 + return content[:start] + projection + content[end:] + + if content and not content.endswith("\n"): + content += "\n" + return content + ("\n" if content else "") + projection + + +def _adapter_prelude(path: Path, default_key: str | None, installed: list[str]) -> str: + name = path.name + if name == "AGENTS.md": + return "# Repo Agent Governance\n\nThis file is the repository-level agent governance projection." + if name == "CLAUDE.md": + return "# Claude Instructions\n\nRead `AGENTS.md` first; it is the repository-level agent governance projection." + if name == "GEMINI.md": + return "# Gemini Instructions\n\nRead `AGENTS.md` first; it is the repository-level agent governance projection." + if name == "copilot-instructions.md": + return "# GitHub Copilot Instructions\n\nRead `AGENTS.md` first; it is the repository-level agent governance projection." + installed_text = ", ".join(installed) if installed else "none" + return ( + "# Agent Instructions\n\n" + "Read `AGENTS.md` first; it is the repository-level agent governance projection.\n\n" + f"Default integration: `{default_key or 'none'}`. Installed integrations: `{installed_text}`." + ) + + +def _scan_skills(project_root: Path) -> list[str]: + skills: list[str] = [] + for skill_file in project_root.rglob("SKILL.md"): + if any(part in {".git", "__pycache__", ".venv", "node_modules"} for part in skill_file.parts): + continue + try: + rel = skill_file.relative_to(project_root).as_posix() + except ValueError: + rel = skill_file.as_posix() + skills.append(rel) + return sorted(skills) + + +def _scan_mcp_configs(project_root: Path) -> list[str]: + candidates: list[str] = [] + names = { + ".mcp.json", + "mcp.json", + "mcp.yml", + "mcp.yaml", + "mcp.config.json", + } + for path in project_root.rglob("*"): + if not path.is_file(): + continue + if any(part in {".git", "__pycache__", ".venv", "node_modules"} for part in path.parts): + continue + if path.name in names or "mcp" in path.name.lower(): + try: + candidates.append(path.relative_to(project_root).as_posix()) + except ValueError: + candidates.append(path.as_posix()) + return sorted(candidates) + + +def _scan_extensions(project_root: Path) -> list[str]: + registry = project_root / ".specify" / "extensions.yml" + if not registry.exists(): + return [] + try: + data = yaml.safe_load(registry.read_text(encoding="utf-8")) or {} + except (yaml.YAMLError, OSError, UnicodeDecodeError): + return [".specify/extensions.yml"] + if not isinstance(data, dict): + return [".specify/extensions.yml"] + extensions = data.get("extensions") + if isinstance(extensions, dict): + return sorted(str(key) for key in extensions) + if isinstance(extensions, list): + names = [] + for item in extensions: + if isinstance(item, dict) and item.get("id"): + names.append(str(item["id"])) + elif isinstance(item, str): + names.append(item) + return sorted(names) or [".specify/extensions.yml"] + return [".specify/extensions.yml"] + + +def _normalize_newlines(content: str) -> str: + return content.replace("\r\n", "\n").replace("\r", "\n") diff --git a/src/specify_cli/extensions.py b/src/specify_cli/extensions.py index 944ee4a06d..c3f3617643 100644 --- a/src/specify_cli/extensions.py +++ b/src/specify_cli/extensions.py @@ -27,6 +27,7 @@ _FALLBACK_CORE_COMMAND_NAMES = frozenset({ "analyze", + "arch", "checklist", "clarify", "constitution", diff --git a/src/specify_cli/integrations/claude/__init__.py b/src/specify_cli/integrations/claude/__init__.py index 88aef85285..b45a4963da 100644 --- a/src/specify_cli/integrations/claude/__init__.py +++ b/src/specify_cli/integrations/claude/__init__.py @@ -23,6 +23,7 @@ # Mapping of command template stem → argument-hint text shown inline # when a user invokes the slash command in Claude Code. ARGUMENT_HINTS: dict[str, str] = { + "arch": "Optional architecture scenario or 4+1 design focus", "specify": "Describe the feature you want to specify", "plan": "Optional guidance for the planning phase", "tasks": "Optional task generation constraints", diff --git a/templates/agent-governance-template.md b/templates/agent-governance-template.md new file mode 100644 index 0000000000..71c6545676 --- /dev/null +++ b/templates/agent-governance-template.md @@ -0,0 +1,67 @@ +# Agent Governance + + + +## Authority Order + +1. Current user instruction +2. This agent governance file +3. `.specify/memory/constitution.md` +4. `.specify/memory/architecture.md` +5. `.specify/memory/uc.md` +6. Active feature artifacts under `specs//` +7. Skill-local `SKILL.md` +8. Tool/MCP defaults + +## Source Of Truth + +- Project principles: `.specify/memory/constitution.md` +- Business semantics: `.specify/memory/uc.md` +- Architecture boundaries: `.specify/memory/architecture.md` +- Feature work: `specs//` +- Agent operations: `.specify/memory/agent-governance.md` +- Skill contracts: each `SKILL.md` +- MCP permissions: MCP configuration and allowlists + +## Write Boundaries + +- Do not edit governance, CI, MCP config, secrets, permissions, or tool settings unless explicitly requested. +- Do not modify files outside the active task scope. +- Do not overwrite user edits. +- Do not rewrite generated files unless the owning workflow requires it. + +## Skill Contract + +Each skill must declare: + +- purpose +- trigger +- allowed read paths +- allowed write paths +- forbidden paths +- outputs +- validation command + +## MCP Policy + +- MCP tools are read-only by default. +- Mutating MCP calls require explicit user intent. +- External writes must report target, action, and result. +- Secrets and tokens must never be logged or written to repo files. + +## Validation + +Before handoff, report: + +- changed files +- commands run +- tests/validation result +- unresolved risks + diff --git a/templates/architecture-development-template.md b/templates/architecture-development-template.md new file mode 100644 index 0000000000..d9f8c6976c --- /dev/null +++ b/templates/architecture-development-template.md @@ -0,0 +1,39 @@ +# Development View + +**Input**: `.specify/memory/architecture-logical-view.md`, `.specify/memory/architecture-process-view.md` + +**Purpose**: Derive architecture-level components, package boundary intent, contract/artifact semantics, and dependency rules from logical and process views. + +## Architecture-Level Components + +| Component / Capability Package | Responsibility | Input / Output Boundary | Collaborators | Explicitly Must Not Own | Source View Evidence | +|--------------------------------|----------------|-------------------------|---------------|--------------------------|----------------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Package Boundary Intent + +| Package / Boundary | Abstraction Level | Owned Concepts | May Depend On | Must Not Depend On | Evolution Rule | +|--------------------|-------------------|----------------|---------------|--------------------|----------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Contracts and Artifacts + +| Contract / Artifact | Semantics | Producer | Consumer | Lifecycle | Architecture Consequence | +|---------------------|-----------|----------|----------|-----------|--------------------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Dependency Rules + +| Rule | Allowed Direction | Forbidden Direction | Reason | Risk If Violated | +|------|-------------------|---------------------|--------|------------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Development View Gaps + +| Gap | Affected Component / Boundary | Why It Matters | +|-----|-------------------------------|----------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Prohibited Content + +Do not write source file paths, concrete package trees, classes, functions, implementation tasks, framework-specific wiring, or code generation notes here. diff --git a/templates/architecture-logical-template.md b/templates/architecture-logical-template.md new file mode 100644 index 0000000000..057874c3e1 --- /dev/null +++ b/templates/architecture-logical-template.md @@ -0,0 +1,39 @@ +# Logical View + +**Input**: `.specify/memory/architecture-scenario-view.md` + +**Purpose**: Derive capability boundaries, domain objects, states, relationships, and invariants from the scenario view. + +## Capability Boundaries + +| Capability / Boundary | Responsibility | Input | Output | Explicitly Does Not Own | Scenario Source | +|-----------------------|----------------|-------|--------|--------------------------|-----------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Domain Objects and Relationships + +| Object | Meaning | Owning Capability | Key Relationships | Fact Source | Invariants | +|--------|---------|-------------------|-------------------|-------------|------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## State and Lifecycle + +| Object / Flow | State | Entered When | Exited When | Forbidden Transition | Responsible Boundary | +|---------------|-------|--------------|-------------|----------------------|----------------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Logical Decisions + +| Decision | Scope | Owner / Boundary | Affected Objects or Flows | Consequence | +|----------|-------|------------------|---------------------------|-------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Logical Gaps + +| Gap | Affected Capability / Object | Why It Matters | +|-----|------------------------------|----------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Prohibited Content + +Do not write classes, DTOs, database tables, fields, method names, endpoints, schemas, or implementation data structures here. diff --git a/templates/architecture-physical-template.md b/templates/architecture-physical-template.md new file mode 100644 index 0000000000..9271d3f76c --- /dev/null +++ b/templates/architecture-physical-template.md @@ -0,0 +1,39 @@ +# Physical View + +**Input**: `.specify/memory/architecture-process-view.md`, `.specify/memory/architecture-development-view.md` + +**Purpose**: Derive deployment, hosting, external system, fact-source, observability, and operational boundaries from process and development views. + +## Deployment and Hosting Boundaries + +| Runtime / Hosting Unit | Carries | Boundary | Depends On | Release / Migration Impact | +|------------------------|---------|----------|------------|----------------------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## External System Collaboration + +| External System | Purpose | Exchanged Content | Authoritative Fact | Failure Impact | Isolation / Substitute Boundary | +|-----------------|---------|-------------------|--------------------|----------------|---------------------------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Fact Sources and Observability + +| Fact / Event | Authoritative Source | Observable Location | Consumers | Traceability Requirement | +|--------------|----------------------|---------------------|-----------|--------------------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Operations and Release Boundaries + +| Operational Concern | Responsible Boundary | Trigger | Affected Views | Architecture Consequence | +|---------------------|----------------------|---------|----------------|--------------------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Physical View Gaps + +| Gap | Affected Deployment / External Boundary | Why It Matters | +|-----|-----------------------------------------|----------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Prohibited Content + +Do not write Kubernetes YAML, cloud resource manifests, machine sizes, service SKUs, deployment scripts, runbooks, or concrete infrastructure configuration here. diff --git a/templates/architecture-process-template.md b/templates/architecture-process-template.md new file mode 100644 index 0000000000..143a085c21 --- /dev/null +++ b/templates/architecture-process-template.md @@ -0,0 +1,39 @@ +# Process View + +**Input**: `.specify/memory/architecture-scenario-view.md`, `.specify/memory/architecture-logical-view.md` + +**Purpose**: Derive runtime collaboration, handoffs, approvals, receipts, state advancement, and failure closure from scenario paths and logical boundaries. + +## Main Runtime Links + +| Runtime Link | Trigger | Source | Target | Transferred Content / Fact | Completion Condition | +|--------------|---------|--------|--------|----------------------------|----------------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Handoffs and Approvals + +| Handoff / Approval | From | To | Meaning | Accepted Path | Rejected / Returned Path | +|--------------------|------|----|---------|---------------|--------------------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Receipts and User Participation + +| Receipt / Participation Point | Sender | Receiver | Content | User Action | Architecture Consequence | +|-------------------------------|--------|----------|---------|-------------|--------------------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Failure, Degradation, and Closure + +| Failure / Branch | Detection Boundary | Responsible Boundary | Degradation or Compensation | User-Visible Result | Closure Condition | +|------------------|--------------------|----------------------|-----------------------------|---------------------|-------------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Process Gaps + +| Gap | Affected Runtime Link / Scenario | Why It Matters | +|-----|----------------------------------|----------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Prohibited Content + +Do not write call stacks, queue names, retry counts, thread/process details, endpoint sequences, workflow engine configuration, or orchestration code here. diff --git a/templates/architecture-scenario-template.md b/templates/architecture-scenario-template.md new file mode 100644 index 0000000000..359c45244b --- /dev/null +++ b/templates/architecture-scenario-template.md @@ -0,0 +1,37 @@ +# Scenario View + +**Purpose**: Produce the UC semantics for the architecture workflow. This view is the source for the logical, process, development, and physical views. + +## Actors and Participants + +| Actor / Participant | Goal | Responsibility | Boundary | +|---------------------|------|----------------|----------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Use Cases + +| Use Case | Actor | Goal | Preconditions | Scope Boundary | +|----------|-------|------|---------------|----------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Scenario Paths + +| Scenario | Main Path | Successful Outcome | Alternative / Failure Branches | +|----------|-----------|--------------------|--------------------------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Acceptance Semantics + +| Acceptance Scenario | Observable Result | Must Hold | Not Covered | +|---------------------|-------------------|-----------|-------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Scenario Gaps + +| Gap | Affected Scenario | Why It Matters | +|-----|-------------------|----------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Prohibited Content + +Do not write architecture components, class designs, APIs, database tables, implementation tasks, test strategy, deployment scripts, or framework choices here. diff --git a/templates/architecture-template.md b/templates/architecture-template.md new file mode 100644 index 0000000000..9f2395bd94 --- /dev/null +++ b/templates/architecture-template.md @@ -0,0 +1,48 @@ +# Architecture Synthesis: [PROJECT] + +**Input Views**: +- Scenario: `.specify/memory/architecture-scenario-view.md` +- Logical: `.specify/memory/architecture-logical-view.md` +- Process: `.specify/memory/architecture-process-view.md` +- Development: `.specify/memory/architecture-development-view.md` +- Physical: `.specify/memory/architecture-physical-view.md` + +**Note**: This synthesis is filled in by the `__SPECKIT_COMMAND_ARCH__` command after the five 4+1 view files are updated. + +## View Index + +| View | File | Purpose | Current Status | +|------|------|---------|----------------| +| Scenario | `.specify/memory/architecture-scenario-view.md` | UC-producing actor, use case, path, branch, and acceptance semantics | NEEDS ARCH UPDATE | +| Logical | `.specify/memory/architecture-logical-view.md` | Capability boundaries, domain objects, states, and invariants | NEEDS ARCH UPDATE | +| Process | `.specify/memory/architecture-process-view.md` | Runtime links, handoffs, approvals, receipts, failure closure | NEEDS ARCH UPDATE | +| Development | `.specify/memory/architecture-development-view.md` | Architecture-level components, package boundaries, contracts, dependencies | NEEDS ARCH UPDATE | +| Physical | `.specify/memory/architecture-physical-view.md` | Deployment, external systems, fact sources, observability, operations | NEEDS ARCH UPDATE | + +## Architecture Axis + +[Summarize the central design forces that connect the five views: primary scenario flow, authority boundary, fact-source model, collaboration model, deployment constraint, or failure-closure model.] + +## Cross-View Mapping + +| Stable Concept | Scenario View | Logical View | Process View | Development View | Physical View | Architecture Consequence | +|----------------|---------------|--------------|--------------|------------------|---------------|--------------------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Key Architecture Conclusions + +| Conclusion | Affected Views | Boundary/Owner | Consequence | +|------------|----------------|----------------|-------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Cross-Cutting Constraints + +| Constraint | Source | Affected Views | Scope | Architecture Consequence | +|------------|--------|----------------|-------|--------------------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | + +## Open Risks and Review Triggers + +| Risk or Trigger | Missing Evidence / Change Condition | Affected Views | Required Architecture Review | +|-----------------|-------------------------------------|----------------|------------------------------| +| NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | NEEDS ARCH UPDATE | diff --git a/templates/commands/agent.md b/templates/commands/agent.md new file mode 100644 index 0000000000..53db5b78b9 --- /dev/null +++ b/templates/commands/agent.md @@ -0,0 +1,63 @@ +--- +description: Create or update agent governance and refresh agent instruction projections. +--- + +## User Input + +```text +$ARGUMENTS +``` + +You **MUST** consider the user input before proceeding (if not empty). + +## Outline + +You are updating `.specify/memory/agent-governance.md`, the source of truth for how AI agents, skills, MCP tools, and integration adapters operate in this repository. + +**Note**: If `.specify/memory/agent-governance.md` does not exist yet, copy `.specify/templates/agent-governance-template.md` first. + +Follow this execution flow: + +1. Load `.specify/memory/agent-governance.md`. +2. Load supporting context if present: + - `.specify/memory/constitution.md` for project principles and quality gates. + - `.specify/memory/architecture.md` for architecture boundaries. + - `.specify/memory/uc.md` for business semantics. + - `.specify/integration.json` for installed/default integrations. + - Any `SKILL.md` files for skill-local contracts. + - MCP configuration files such as `.mcp.json`, `mcp.json`, `mcp.yml`, or `mcp.yaml`. + - `.specify/extensions.yml` for enabled extensions. +3. Update agent governance: + - Keep the authority order explicit. + - Keep source-of-truth boundaries between constitution, architecture, UC, skills, MCP, and feature artifacts. + - Keep write boundaries testable and concrete. + - Require explicit user intent for mutating MCP calls and external writes. + - Preserve user-authored repo-specific rules unless they conflict with higher authority. +4. Refresh projections: + - `AGENTS.md` + - active integration context files such as `CLAUDE.md`, `GEMINI.md`, `.github/copilot-instructions.md`, and other registered `context_file` paths. + - Preserve content outside `` and ``. +5. Produce a Sync Impact Report in `.specify/memory/agent-governance.md`: + - Active/default integration + - Installed integrations + - Skills scanned + - MCP config files scanned + - Projection files refreshed + - Follow-up TODOs + +## Validation + +- No projection file should duplicate long governance text outside the generated projection markers. +- `AGENTS.md` is the repo-level agent governance projection. +- Agent-specific files are adapters that point back to `AGENTS.md`. +- Do not modify `.specify/memory/constitution.md`, `.specify/memory/architecture.md`, `.specify/memory/uc.md`, feature specs, plans, tasks, source code, tests, CI, MCP config, or secrets unless the user explicitly requested that separate change. + +## Output + +Report: + +- Whether `.specify/memory/agent-governance.md` was created or updated. +- Projection files refreshed. +- Skills and MCP config files detected. +- Any unresolved governance risks. + diff --git a/templates/commands/arch.md b/templates/commands/arch.md new file mode 100644 index 0000000000..632ea76a30 --- /dev/null +++ b/templates/commands/arch.md @@ -0,0 +1,156 @@ +--- +description: Execute the 4+1 architecture workflow and generate architecture view artifacts. +scripts: + sh: scripts/bash/setup-arch.sh --json + ps: scripts/powershell/setup-arch.ps1 -Json +--- + +## User Input + +```text +$ARGUMENTS +``` + +You **MUST** consider the user input before proceeding (if not empty). + +## Goal + +Generate or update the project-level 4+1 architecture artifacts: + +- Main synthesis: `.specify/memory/architecture.md` +- Scenario view: `.specify/memory/architecture-scenario-view.md` +- Logical view: `.specify/memory/architecture-logical-view.md` +- Process view: `.specify/memory/architecture-process-view.md` +- Development view: `.specify/memory/architecture-development-view.md` +- Physical view: `.specify/memory/architecture-physical-view.md` + +The scenario view is the entry point. It produces the UC semantics for this architecture pass: actors, goals, use cases, scenario paths, branches, and acceptance meaning. The other four views are derived from the scenario view. + +## Operating Boundaries + +- Write only the six architecture artifacts listed above. +- Do not require `.specify/memory/uc.md`. If it exists, read it only as supporting reference, not as a hard prerequisite or sole source of truth. +- Do not modify `.specify/memory/uc.md`, `.specify/memory/constitution.md`, feature specs, plans, tasks, source code, tests, or root `docs/`. +- Stay at abstract architecture-design level. +- Do not write concrete classes, files, functions, endpoints, DTO fields, database tables, framework selections, library choices, UI component details, deployment manifests, task breakdowns, test strategy, validation anchors, code notes, deployment scripts, or runbooks. +- If evidence is insufficient, record a specific gap in the affected view instead of inventing business facts, components, interfaces, modules, deployment units, or numeric metrics. + +## Outline + +1. **Setup**: Run `{SCRIPT}` from repo root and parse JSON for `ARCH_FILE`, `ARCH_DIR`, `SCENARIO_VIEW`, `LOGICAL_VIEW`, `PROCESS_VIEW`, `DEVELOPMENT_VIEW`, and `PHYSICAL_VIEW`. + +2. **Load context**: + - Read all six architecture artifacts created by setup. + - Read `.specify/memory/uc.md` if present as optional scenario background. + - Read the five view templates under `.specify/templates/`. + +3. **Execute architecture workflow**: + - Phase 0: Fill `SCENARIO_VIEW`. + - Phase 1: Fill `LOGICAL_VIEW` from `SCENARIO_VIEW`. + - Phase 2: Fill `PROCESS_VIEW` from `SCENARIO_VIEW` and `LOGICAL_VIEW`. + - Phase 3: Fill `DEVELOPMENT_VIEW` from `LOGICAL_VIEW` and `PROCESS_VIEW`. + - Phase 4: Fill `PHYSICAL_VIEW` from `PROCESS_VIEW` and `DEVELOPMENT_VIEW`. + - Phase 5: Update `ARCH_FILE` as a synthesis and index over the five views. + +4. **Stop and report**: Report the six updated paths and any explicit unresolved architecture gaps. + +## Phases + +### Phase 0: Scenario View + +**Output**: `.specify/memory/architecture-scenario-view.md` + +Create or update the UC-producing scenario view: + +- Actors and external participants +- Use cases and goals +- Preconditions and scope boundaries +- Main scenario paths +- Alternative and failure branches +- Acceptance semantics +- Open scenario questions + +This phase is authoritative for scenario semantics inside the architecture workflow. Do not defer UC creation to a separate command. + +### Phase 1: Logical View + +**Input**: `.specify/memory/architecture-scenario-view.md` +**Output**: `.specify/memory/architecture-logical-view.md` + +Derive: + +- System capability boundaries +- Domain objects and relationships +- Object ownership and fact sources +- State lifecycle and invariants +- Governance or decision boundaries that are architectural, not organizational process notes + +Do not write class models, DTOs, database tables, field lists, method names, endpoint names, or implementation data structures. + +### Phase 2: Process View + +**Input**: `.specify/memory/architecture-scenario-view.md`, `.specify/memory/architecture-logical-view.md` +**Output**: `.specify/memory/architecture-process-view.md` + +Derive: + +- Main runtime links +- Handoffs and approvals +- Receipts and user participation points +- State advancement across scenario paths +- Failure, degradation, compensation, and closure + +Do not write call stacks, queue names, retry counts, thread/process details, endpoint sequences, or implementation orchestration code. + +### Phase 3: Development View + +**Input**: `.specify/memory/architecture-logical-view.md`, `.specify/memory/architecture-process-view.md` +**Output**: `.specify/memory/architecture-development-view.md` + +Derive: + +- Architecture-level components or capability packages +- Package boundary intent +- Contract and artifact semantics +- Dependency direction and forbidden crossings +- Component responsibility, collaborators, and input/output boundary + +Do not write source file paths, classes, functions, module-by-module implementation tasks, or framework-specific wiring. + +### Phase 4: Physical View + +**Input**: `.specify/memory/architecture-process-view.md`, `.specify/memory/architecture-development-view.md` +**Output**: `.specify/memory/architecture-physical-view.md` + +Derive: + +- Deployment and hosting boundaries +- External system collaboration +- Fact-source placement +- Observability and operational boundaries +- Release or runtime ownership constraints + +Do not write Kubernetes YAML, cloud resource manifests, machine sizes, concrete service SKUs, deployment scripts, or runbooks. + +### Phase 5: Architecture Synthesis + +**Input**: all five view files +**Output**: `architecture.md` + +Update the main synthesis file: + +- View index with links to all five view files +- Architecture axis and central design forces +- Cross-view mapping table +- Key boundaries and constraints +- Open risks and architecture review triggers + +Do not copy every detail from the view files. Summarize the architecture conclusions that connect multiple views. + +## Quality Bar + +- Scenario view must contain enough UC semantics for the other four views to derive from it. +- Every non-placeholder conclusion must be traceable to a scenario, object, runtime link, component boundary, deployment boundary, or stated constraint. +- Use stable names consistently across all five views and the synthesis file. +- Keep uncertainty specific: record what is unknown, which view it affects, and which architecture conclusion cannot yet be made. +- Remove generic statements such as "scalable", "secure", "observable", or "modular" unless they name owner, affected view, scope, and architecture consequence. diff --git a/tests/integrations/test_integration_base_markdown.py b/tests/integrations/test_integration_base_markdown.py index 0b74a6f1a9..ecfeefeb4b 100644 --- a/tests/integrations/test_integration_base_markdown.py +++ b/tests/integrations/test_integration_base_markdown.py @@ -252,7 +252,7 @@ def test_init_options_includes_context_file(self, tmp_path): # -- Complete file inventory ------------------------------------------ COMMAND_STEMS = [ - "analyze", "checklist", "clarify", "constitution", + "analyze", "arch", "checklist", "clarify", "constitution", "implement", "plan", "specify", "tasks", "taskstoissues", ] @@ -274,14 +274,20 @@ def _expected_files(self, script_variant: str) -> list[str]: if script_variant == "sh": for name in ["check-prerequisites.sh", "common.sh", "create-new-feature.sh", - "setup-plan.sh", "setup-tasks.sh"]: + "setup-arch.sh", "setup-plan.sh", "setup-tasks.sh"]: files.append(f".specify/scripts/bash/{name}") else: for name in ["check-prerequisites.ps1", "common.ps1", "create-new-feature.ps1", - "setup-plan.ps1", "setup-tasks.ps1"]: + "setup-arch.ps1", "setup-plan.ps1", "setup-tasks.ps1"]: files.append(f".specify/scripts/powershell/{name}") - for name in ["checklist-template.md", + for name in ["architecture-development-template.md", + "architecture-logical-template.md", + "architecture-physical-template.md", + "architecture-process-template.md", + "architecture-scenario-template.md", + "architecture-template.md", + "checklist-template.md", "constitution-template.md", "plan-template.md", "spec-template.md", "tasks-template.md"]: files.append(f".specify/templates/{name}") diff --git a/tests/integrations/test_integration_base_skills.py b/tests/integrations/test_integration_base_skills.py index 89140de1c3..9b2484f3dc 100644 --- a/tests/integrations/test_integration_base_skills.py +++ b/tests/integrations/test_integration_base_skills.py @@ -100,7 +100,7 @@ def test_skill_directory_structure(self, tmp_path): skill_files = [f for f in created if "scripts" not in f.parts] expected_commands = { - "analyze", "checklist", "clarify", "constitution", + "analyze", "arch", "checklist", "clarify", "constitution", "implement", "plan", "specify", "tasks", "taskstoissues", } @@ -359,7 +359,7 @@ def test_options_include_skills_flag(self): # -- Complete file inventory ------------------------------------------ _SKILL_COMMANDS = [ - "analyze", "checklist", "clarify", "constitution", + "analyze", "arch", "checklist", "clarify", "constitution", "implement", "plan", "specify", "tasks", "taskstoissues", ] @@ -386,6 +386,7 @@ def _expected_files(self, script_variant: str) -> list[str]: ".specify/scripts/bash/check-prerequisites.sh", ".specify/scripts/bash/common.sh", ".specify/scripts/bash/create-new-feature.sh", + ".specify/scripts/bash/setup-arch.sh", ".specify/scripts/bash/setup-plan.sh", ".specify/scripts/bash/setup-tasks.sh", ] @@ -394,11 +395,18 @@ def _expected_files(self, script_variant: str) -> list[str]: ".specify/scripts/powershell/check-prerequisites.ps1", ".specify/scripts/powershell/common.ps1", ".specify/scripts/powershell/create-new-feature.ps1", + ".specify/scripts/powershell/setup-arch.ps1", ".specify/scripts/powershell/setup-plan.ps1", ".specify/scripts/powershell/setup-tasks.ps1", ] # Templates files += [ + ".specify/templates/architecture-development-template.md", + ".specify/templates/architecture-logical-template.md", + ".specify/templates/architecture-physical-template.md", + ".specify/templates/architecture-process-template.md", + ".specify/templates/architecture-scenario-template.md", + ".specify/templates/architecture-template.md", ".specify/templates/checklist-template.md", ".specify/templates/constitution-template.md", ".specify/templates/plan-template.md", diff --git a/tests/integrations/test_integration_base_toml.py b/tests/integrations/test_integration_base_toml.py index 56862e534c..689878a3da 100644 --- a/tests/integrations/test_integration_base_toml.py +++ b/tests/integrations/test_integration_base_toml.py @@ -484,6 +484,7 @@ def test_init_options_includes_context_file(self, tmp_path): COMMAND_STEMS = [ "analyze", + "arch", "checklist", "clarify", "constitution", @@ -515,6 +516,7 @@ def _expected_files(self, script_variant: str) -> list[str]: "check-prerequisites.sh", "common.sh", "create-new-feature.sh", + "setup-arch.sh", "setup-plan.sh", "setup-tasks.sh", ]: @@ -524,12 +526,19 @@ def _expected_files(self, script_variant: str) -> list[str]: "check-prerequisites.ps1", "common.ps1", "create-new-feature.ps1", + "setup-arch.ps1", "setup-plan.ps1", "setup-tasks.ps1", ]: files.append(f".specify/scripts/powershell/{name}") for name in [ + "architecture-development-template.md", + "architecture-logical-template.md", + "architecture-physical-template.md", + "architecture-process-template.md", + "architecture-scenario-template.md", + "architecture-template.md", "checklist-template.md", "constitution-template.md", "plan-template.md", diff --git a/tests/integrations/test_integration_base_yaml.py b/tests/integrations/test_integration_base_yaml.py index 956c7a796f..3111b6b78a 100644 --- a/tests/integrations/test_integration_base_yaml.py +++ b/tests/integrations/test_integration_base_yaml.py @@ -363,6 +363,7 @@ def test_init_options_includes_context_file(self, tmp_path): COMMAND_STEMS = [ "analyze", + "arch", "checklist", "clarify", "constitution", @@ -394,6 +395,7 @@ def _expected_files(self, script_variant: str) -> list[str]: "check-prerequisites.sh", "common.sh", "create-new-feature.sh", + "setup-arch.sh", "setup-plan.sh", "setup-tasks.sh", ]: @@ -403,12 +405,19 @@ def _expected_files(self, script_variant: str) -> list[str]: "check-prerequisites.ps1", "common.ps1", "create-new-feature.ps1", + "setup-arch.ps1", "setup-plan.ps1", "setup-tasks.ps1", ]: files.append(f".specify/scripts/powershell/{name}") for name in [ + "architecture-development-template.md", + "architecture-logical-template.md", + "architecture-physical-template.md", + "architecture-process-template.md", + "architecture-scenario-template.md", + "architecture-template.md", "checklist-template.md", "constitution-template.md", "plan-template.md", diff --git a/tests/integrations/test_integration_copilot.py b/tests/integrations/test_integration_copilot.py index c6e9259b09..e7d47cfc68 100644 --- a/tests/integrations/test_integration_copilot.py +++ b/tests/integrations/test_integration_copilot.py @@ -125,9 +125,9 @@ def test_directory_structure(self, tmp_path): agents_dir = tmp_path / ".github" / "agents" assert agents_dir.is_dir() agent_files = sorted(agents_dir.glob("speckit.*.agent.md")) - assert len(agent_files) == 9 + assert len(agent_files) == 10 expected_commands = { - "analyze", "checklist", "clarify", "constitution", + "analyze", "arch", "checklist", "clarify", "constitution", "implement", "plan", "specify", "tasks", "taskstoissues", } actual_commands = {f.name.removeprefix("speckit.").removesuffix(".agent.md") for f in agent_files} @@ -179,6 +179,7 @@ def test_complete_file_inventory_sh(self, tmp_path): actual = sorted(p.relative_to(project).as_posix() for p in project.rglob("*") if p.is_file()) expected = sorted([ ".github/agents/speckit.analyze.agent.md", + ".github/agents/speckit.arch.agent.md", ".github/agents/speckit.checklist.agent.md", ".github/agents/speckit.clarify.agent.md", ".github/agents/speckit.constitution.agent.md", @@ -188,6 +189,7 @@ def test_complete_file_inventory_sh(self, tmp_path): ".github/agents/speckit.tasks.agent.md", ".github/agents/speckit.taskstoissues.agent.md", ".github/prompts/speckit.analyze.prompt.md", + ".github/prompts/speckit.arch.prompt.md", ".github/prompts/speckit.checklist.prompt.md", ".github/prompts/speckit.clarify.prompt.md", ".github/prompts/speckit.constitution.prompt.md", @@ -205,8 +207,15 @@ def test_complete_file_inventory_sh(self, tmp_path): ".specify/scripts/bash/check-prerequisites.sh", ".specify/scripts/bash/common.sh", ".specify/scripts/bash/create-new-feature.sh", + ".specify/scripts/bash/setup-arch.sh", ".specify/scripts/bash/setup-plan.sh", ".specify/scripts/bash/setup-tasks.sh", + ".specify/templates/architecture-development-template.md", + ".specify/templates/architecture-logical-template.md", + ".specify/templates/architecture-physical-template.md", + ".specify/templates/architecture-process-template.md", + ".specify/templates/architecture-scenario-template.md", + ".specify/templates/architecture-template.md", ".specify/templates/checklist-template.md", ".specify/templates/constitution-template.md", ".specify/templates/plan-template.md", @@ -239,6 +248,7 @@ def test_complete_file_inventory_ps(self, tmp_path): actual = sorted(p.relative_to(project).as_posix() for p in project.rglob("*") if p.is_file()) expected = sorted([ ".github/agents/speckit.analyze.agent.md", + ".github/agents/speckit.arch.agent.md", ".github/agents/speckit.checklist.agent.md", ".github/agents/speckit.clarify.agent.md", ".github/agents/speckit.constitution.agent.md", @@ -248,6 +258,7 @@ def test_complete_file_inventory_ps(self, tmp_path): ".github/agents/speckit.tasks.agent.md", ".github/agents/speckit.taskstoissues.agent.md", ".github/prompts/speckit.analyze.prompt.md", + ".github/prompts/speckit.arch.prompt.md", ".github/prompts/speckit.checklist.prompt.md", ".github/prompts/speckit.clarify.prompt.md", ".github/prompts/speckit.constitution.prompt.md", @@ -265,8 +276,15 @@ def test_complete_file_inventory_ps(self, tmp_path): ".specify/scripts/powershell/check-prerequisites.ps1", ".specify/scripts/powershell/common.ps1", ".specify/scripts/powershell/create-new-feature.ps1", + ".specify/scripts/powershell/setup-arch.ps1", ".specify/scripts/powershell/setup-plan.ps1", ".specify/scripts/powershell/setup-tasks.ps1", + ".specify/templates/architecture-development-template.md", + ".specify/templates/architecture-logical-template.md", + ".specify/templates/architecture-physical-template.md", + ".specify/templates/architecture-process-template.md", + ".specify/templates/architecture-scenario-template.md", + ".specify/templates/architecture-template.md", ".specify/templates/checklist-template.md", ".specify/templates/constitution-template.md", ".specify/templates/plan-template.md", @@ -286,7 +304,7 @@ class TestCopilotSkillsMode: """Tests for Copilot integration in --skills mode.""" _SKILL_COMMANDS = [ - "analyze", "checklist", "clarify", "constitution", + "analyze", "arch", "checklist", "clarify", "constitution", "implement", "plan", "specify", "tasks", "taskstoissues", ] @@ -615,9 +633,16 @@ def test_complete_file_inventory_skills_sh(self, tmp_path): ".specify/scripts/bash/check-prerequisites.sh", ".specify/scripts/bash/common.sh", ".specify/scripts/bash/create-new-feature.sh", + ".specify/scripts/bash/setup-arch.sh", ".specify/scripts/bash/setup-plan.sh", ".specify/scripts/bash/setup-tasks.sh", # Templates + ".specify/templates/architecture-development-template.md", + ".specify/templates/architecture-logical-template.md", + ".specify/templates/architecture-physical-template.md", + ".specify/templates/architecture-process-template.md", + ".specify/templates/architecture-scenario-template.md", + ".specify/templates/architecture-template.md", ".specify/templates/checklist-template.md", ".specify/templates/constitution-template.md", ".specify/templates/plan-template.md", @@ -724,4 +749,4 @@ def test_init_skills_next_steps_show_skill_syntax(self, tmp_path): # Must NOT show the dotted /speckit.plan form assert "/speckit.plan" not in result.output, ( f"Should not show /speckit.plan in skills mode:\n{result.output}" - ) \ No newline at end of file + ) diff --git a/tests/integrations/test_integration_generic.py b/tests/integrations/test_integration_generic.py index 4f515a01d2..9ce5964e04 100644 --- a/tests/integrations/test_integration_generic.py +++ b/tests/integrations/test_integration_generic.py @@ -257,6 +257,7 @@ def test_complete_file_inventory_sh(self, tmp_path): expected = sorted([ "AGENTS.md", ".myagent/commands/speckit.analyze.md", + ".myagent/commands/speckit.arch.md", ".myagent/commands/speckit.checklist.md", ".myagent/commands/speckit.clarify.md", ".myagent/commands/speckit.constitution.md", @@ -273,8 +274,15 @@ def test_complete_file_inventory_sh(self, tmp_path): ".specify/scripts/bash/check-prerequisites.sh", ".specify/scripts/bash/common.sh", ".specify/scripts/bash/create-new-feature.sh", + ".specify/scripts/bash/setup-arch.sh", ".specify/scripts/bash/setup-plan.sh", ".specify/scripts/bash/setup-tasks.sh", + ".specify/templates/architecture-development-template.md", + ".specify/templates/architecture-logical-template.md", + ".specify/templates/architecture-physical-template.md", + ".specify/templates/architecture-process-template.md", + ".specify/templates/architecture-scenario-template.md", + ".specify/templates/architecture-template.md", ".specify/templates/checklist-template.md", ".specify/templates/constitution-template.md", ".specify/templates/plan-template.md", @@ -313,6 +321,7 @@ def test_complete_file_inventory_ps(self, tmp_path): expected = sorted([ "AGENTS.md", ".myagent/commands/speckit.analyze.md", + ".myagent/commands/speckit.arch.md", ".myagent/commands/speckit.checklist.md", ".myagent/commands/speckit.clarify.md", ".myagent/commands/speckit.constitution.md", @@ -329,8 +338,15 @@ def test_complete_file_inventory_ps(self, tmp_path): ".specify/scripts/powershell/check-prerequisites.ps1", ".specify/scripts/powershell/common.ps1", ".specify/scripts/powershell/create-new-feature.ps1", + ".specify/scripts/powershell/setup-arch.ps1", ".specify/scripts/powershell/setup-plan.ps1", ".specify/scripts/powershell/setup-tasks.ps1", + ".specify/templates/architecture-development-template.md", + ".specify/templates/architecture-logical-template.md", + ".specify/templates/architecture-physical-template.md", + ".specify/templates/architecture-process-template.md", + ".specify/templates/architecture-scenario-template.md", + ".specify/templates/architecture-template.md", ".specify/templates/checklist-template.md", ".specify/templates/constitution-template.md", ".specify/templates/plan-template.md", diff --git a/tests/test_agent_projection.py b/tests/test_agent_projection.py new file mode 100644 index 0000000000..cac2e14ba9 --- /dev/null +++ b/tests/test_agent_projection.py @@ -0,0 +1,81 @@ +import json +import shutil +from pathlib import Path + +from specify_cli.agent_projection import ( + AGENT_GOVERNANCE_MEMORY, + PROJECTION_MARKER_START, + ensure_agent_governance_from_template, + refresh_agent_projection, +) + + +REPO_ROOT = Path(__file__).resolve().parent.parent + + +def _copy_template(project: Path, name: str) -> None: + dest = project / ".specify" / "templates" / name + dest.parent.mkdir(parents=True, exist_ok=True) + shutil.copy2(REPO_ROOT / "templates" / name, dest) + + +def test_ensure_agent_governance_from_template(tmp_path): + _copy_template(tmp_path, "agent-governance-template.md") + + result = ensure_agent_governance_from_template(tmp_path) + + assert result == tmp_path / AGENT_GOVERNANCE_MEMORY + content = result.read_text(encoding="utf-8") + assert "# Agent Governance" in content + assert "## Authority Order" in content + + +def test_refresh_agent_projection_creates_repo_and_agent_adapters(tmp_path): + _copy_template(tmp_path, "agent-governance-template.md") + (tmp_path / ".specify" / "integration.json").parent.mkdir(parents=True, exist_ok=True) + (tmp_path / ".specify" / "integration.json").write_text( + json.dumps( + { + "integration": "gemini", + "default_integration": "gemini", + "installed_integrations": ["gemini", "copilot"], + "integration_settings": {}, + } + ), + encoding="utf-8", + ) + (tmp_path / ".gemini" / "commands" / "speckit-test" / "SKILL.md").parent.mkdir( + parents=True, + exist_ok=True, + ) + (tmp_path / ".gemini" / "commands" / "speckit-test" / "SKILL.md").write_text( + "# Test Skill\n", + encoding="utf-8", + ) + (tmp_path / ".mcp.json").write_text("{}", encoding="utf-8") + + result = refresh_agent_projection(tmp_path) + + assert result.memory_path == tmp_path / AGENT_GOVERNANCE_MEMORY + assert (tmp_path / "AGENTS.md").exists() + assert (tmp_path / "GEMINI.md").exists() + assert (tmp_path / ".github" / "copilot-instructions.md").exists() + agents = (tmp_path / "AGENTS.md").read_text(encoding="utf-8") + assert PROJECTION_MARKER_START in agents + assert "Default integration: `gemini`" in agents + assert "`.gemini/commands/speckit-test/SKILL.md`" in agents + assert "`.mcp.json`" in agents + + +def test_refresh_agent_projection_preserves_user_content(tmp_path): + _copy_template(tmp_path, "agent-governance-template.md") + agents = tmp_path / "AGENTS.md" + agents.write_text("# Custom Rules\n\nKeep this.\n", encoding="utf-8") + + refresh_agent_projection(tmp_path) + + content = agents.read_text(encoding="utf-8") + assert "# Custom Rules" in content + assert "Keep this." in content + assert PROJECTION_MARKER_START in content + diff --git a/tests/test_arch_templates.py b/tests/test_arch_templates.py new file mode 100644 index 0000000000..a76dc10d33 --- /dev/null +++ b/tests/test_arch_templates.py @@ -0,0 +1,76 @@ +"""Quality guards for 4+1 architecture templates and command.""" + +from pathlib import Path + + +PROJECT_ROOT = Path(__file__).resolve().parent.parent +TEMPLATES = PROJECT_ROOT / "templates" + + +def _read_template(name: str) -> str: + return (TEMPLATES / name).read_text(encoding="utf-8") + + +def test_arch_command_is_phase_based_and_does_not_require_uc_command(): + content = _read_template("commands/arch.md") + + assert "scripts:" in content + assert "setup-arch.sh --json" in content + assert "setup-arch.ps1 -Json" in content + for phase in [ + "Phase 0: Scenario View", + "Phase 1: Logical View", + "Phase 2: Process View", + "Phase 3: Development View", + "Phase 4: Physical View", + "Phase 5: Architecture Synthesis", + ]: + assert phase in content + assert "Do not require `.specify/memory/uc.md`" in content + assert "__SPECKIT_COMMAND_UC__" not in content + assert "Read `.specify/memory/constitution.md`" not in content + assert ".specify/memory/architecture/" not in content + + +def test_architecture_synthesis_references_five_view_files(): + content = _read_template("architecture-template.md") + + for filename in [ + "architecture-scenario-view.md", + "architecture-logical-view.md", + "architecture-process-view.md", + "architecture-development-view.md", + "architecture-physical-view.md", + ]: + assert f".specify/memory/{filename}" in content + assert "Cross-View Mapping" in content + assert "Key Architecture Conclusions" in content + assert ".specify/memory/architecture/" not in content + + +def test_init_next_steps_place_arch_before_constitution(): + init_source = (PROJECT_ROOT / "src" / "specify_cli" / "__init__.py").read_text(encoding="utf-8") + + arch_index = init_source.index("_display_cmd('arch')") + constitution_index = init_source.index("_display_cmd('constitution')") + + assert arch_index < constitution_index + + +def test_view_templates_define_inputs_and_reject_implementation_detail(): + scenario = _read_template("architecture-scenario-template.md") + logical = _read_template("architecture-logical-template.md") + process = _read_template("architecture-process-template.md") + development = _read_template("architecture-development-template.md") + physical = _read_template("architecture-physical-template.md") + + assert "Produce the UC semantics" in scenario + assert "Do not write architecture components" in scenario + assert "**Input**: `.specify/memory/architecture-scenario-view.md`" in logical + assert "Do not write classes, DTOs, database tables" in logical + assert "**Input**: `.specify/memory/architecture-scenario-view.md`, `.specify/memory/architecture-logical-view.md`" in process + assert "Do not write call stacks, queue names, retry counts" in process + assert "**Input**: `.specify/memory/architecture-logical-view.md`, `.specify/memory/architecture-process-view.md`" in development + assert "Do not write source file paths, concrete package trees" in development + assert "**Input**: `.specify/memory/architecture-process-view.md`, `.specify/memory/architecture-development-view.md`" in physical + assert "Do not write Kubernetes YAML, cloud resource manifests" in physical diff --git a/tests/test_setup_arch.py b/tests/test_setup_arch.py new file mode 100644 index 0000000000..3f9a5e3e2b --- /dev/null +++ b/tests/test_setup_arch.py @@ -0,0 +1,166 @@ +"""Tests for setup-arch project-level architecture artifact initialization.""" + +import json +import os +import shutil +import subprocess +import sys +from pathlib import Path + +import pytest + +from tests.conftest import requires_bash + + +PROJECT_ROOT = Path(__file__).resolve().parent.parent +COMMON_SH = PROJECT_ROOT / "scripts" / "bash" / "common.sh" +SETUP_ARCH_SH = PROJECT_ROOT / "scripts" / "bash" / "setup-arch.sh" +COMMON_PS = PROJECT_ROOT / "scripts" / "powershell" / "common.ps1" +SETUP_ARCH_PS = PROJECT_ROOT / "scripts" / "powershell" / "setup-arch.ps1" +ARCH_TEMPLATES = [ + "architecture-template.md", + "architecture-scenario-template.md", + "architecture-logical-template.md", + "architecture-process-template.md", + "architecture-development-template.md", + "architecture-physical-template.md", +] + +HAS_PWSH = shutil.which("pwsh") is not None +_POWERSHELL = shutil.which("powershell.exe") or shutil.which("powershell") + + +def _install_bash_scripts(repo: Path) -> None: + d = repo / ".specify" / "scripts" / "bash" + d.mkdir(parents=True, exist_ok=True) + shutil.copy(COMMON_SH, d / "common.sh") + shutil.copy(SETUP_ARCH_SH, d / "setup-arch.sh") + + +def _install_ps_scripts(repo: Path) -> None: + d = repo / ".specify" / "scripts" / "powershell" + d.mkdir(parents=True, exist_ok=True) + shutil.copy(COMMON_PS, d / "common.ps1") + shutil.copy(SETUP_ARCH_PS, d / "setup-arch.ps1") + + +def _install_templates(repo: Path) -> None: + d = repo / ".specify" / "templates" + d.mkdir(parents=True, exist_ok=True) + for name in ARCH_TEMPLATES: + shutil.copy(PROJECT_ROOT / "templates" / name, d / name) + + +def _clean_env() -> dict[str, str]: + env = os.environ.copy() + for key in list(env): + if key.startswith("SPECIFY_"): + env.pop(key) + return env + + +def _powershell_script_arg(exe: str, script: Path) -> str: + if sys.platform != "win32" and str(exe).endswith("powershell.exe") and shutil.which("wslpath"): + result = subprocess.run( + ["wslpath", "-w", str(script)], + capture_output=True, + text=True, + check=True, + ) + return result.stdout.strip() + return str(script) + + +@pytest.fixture +def arch_repo(tmp_path: Path) -> Path: + repo = tmp_path / "proj" + repo.mkdir() + (repo / ".specify").mkdir() + _install_templates(repo) + _install_bash_scripts(repo) + _install_ps_scripts(repo) + return repo + + +def _json_from_output(output: str) -> dict[str, str]: + for line in reversed(output.strip().splitlines()): + line = line.strip() + if line.startswith("{") and line.endswith("}"): + return json.loads(line) + raise AssertionError(f"No JSON object found in output:\n{output}") + + +def _assert_arch_json(repo: Path, data: dict[str, str], *, exact_paths: bool = True) -> None: + expected = { + "ARCH_FILE": repo / ".specify" / "memory" / "architecture.md", + "ARCH_DIR": repo / ".specify" / "memory", + "SCENARIO_VIEW": repo / ".specify" / "memory" / "architecture-scenario-view.md", + "LOGICAL_VIEW": repo / ".specify" / "memory" / "architecture-logical-view.md", + "PROCESS_VIEW": repo / ".specify" / "memory" / "architecture-process-view.md", + "DEVELOPMENT_VIEW": repo / ".specify" / "memory" / "architecture-development-view.md", + "PHYSICAL_VIEW": repo / ".specify" / "memory" / "architecture-physical-view.md", + } + assert set(data) == set(expected) + for key, path in expected.items(): + if exact_paths: + assert Path(data[key]) == path + else: + normalized = data[key].replace("\\", "/") + assert normalized.endswith(path.relative_to(repo).as_posix()) + assert path.is_file() if key != "ARCH_DIR" else path.is_dir() + + +@requires_bash +def test_setup_arch_bash_creates_all_artifacts_and_json(arch_repo: Path) -> None: + script = arch_repo / ".specify" / "scripts" / "bash" / "setup-arch.sh" + result = subprocess.run( + ["bash", str(script), "--json"], + cwd=arch_repo, + capture_output=True, + text=True, + check=False, + env=_clean_env(), + ) + + assert result.returncode == 0, result.stderr + result.stdout + data = _json_from_output(result.stdout) + _assert_arch_json(arch_repo, data) + assert "Scenario View" in (arch_repo / ".specify" / "memory" / "architecture-scenario-view.md").read_text(encoding="utf-8") + + +@requires_bash +def test_setup_arch_bash_preserves_existing_files(arch_repo: Path) -> None: + existing = arch_repo / ".specify" / "memory" / "architecture-scenario-view.md" + existing.parent.mkdir(parents=True) + existing.write_text("# Custom Scenario\n", encoding="utf-8") + + script = arch_repo / ".specify" / "scripts" / "bash" / "setup-arch.sh" + result = subprocess.run( + ["bash", str(script), "--json"], + cwd=arch_repo, + capture_output=True, + text=True, + check=False, + env=_clean_env(), + ) + + assert result.returncode == 0, result.stderr + result.stdout + assert existing.read_text(encoding="utf-8") == "# Custom Scenario\n" + + +@pytest.mark.skipif(not (HAS_PWSH or _POWERSHELL), reason="no PowerShell available") +def test_setup_arch_powershell_creates_all_artifacts_and_json(arch_repo: Path) -> None: + script = arch_repo / ".specify" / "scripts" / "powershell" / "setup-arch.ps1" + exe = "pwsh" if HAS_PWSH else _POWERSHELL + result = subprocess.run( + [exe, "-NoProfile", "-ExecutionPolicy", "Bypass", "-File", _powershell_script_arg(exe, script), "-Json"], + cwd=arch_repo, + capture_output=True, + text=True, + check=False, + env=_clean_env(), + ) + + assert result.returncode == 0, result.stderr + result.stdout + data = _json_from_output(result.stdout) + _assert_arch_json(arch_repo, data, exact_paths=False) From 57f509248c4da0fef9bb3f651cca968a2ebdade2 Mon Sep 17 00:00:00 2001 From: bigben <245982990@qq.com> Date: Tue, 12 May 2026 14:57:40 +0800 Subject: [PATCH 2/5] Add agent governance projection support --- src/specify_cli/__init__.py | 49 +++ src/specify_cli/agent_projection.py | 329 ++++++++++++++++++ src/specify_cli/agents.py | 39 ++- src/specify_cli/extensions.py | 1 + src/specify_cli/integrations/base.py | 28 +- .../integrations/claude/__init__.py | 5 +- templates/agent-governance-template.md | 72 ++++ templates/commands/governance.md | 63 ++++ templates/commands/implement.md | 10 + .../test_integration_base_markdown.py | 7 +- .../test_integration_base_skills.py | 26 +- .../test_integration_base_toml.py | 4 + .../test_integration_base_yaml.py | 4 + .../integrations/test_integration_copilot.py | 21 +- .../integrations/test_integration_generic.py | 6 + tests/test_agent_projection.py | 112 ++++++ 16 files changed, 745 insertions(+), 31 deletions(-) create mode 100644 src/specify_cli/agent_projection.py create mode 100644 templates/agent-governance-template.md create mode 100644 templates/commands/governance.md create mode 100644 tests/test_agent_projection.py diff --git a/src/specify_cli/__init__.py b/src/specify_cli/__init__.py index 325692900e..3c4c2a0e36 100644 --- a/src/specify_cli/__init__.py +++ b/src/specify_cli/__init__.py @@ -74,6 +74,10 @@ install_shared_infra as _install_shared_infra_impl, refresh_shared_templates as _refresh_shared_templates_impl, ) +from .agent_projection import ( + ensure_agent_governance_from_template as _ensure_agent_governance_from_template, + refresh_agent_projection as _refresh_agent_projection, +) # For cross-platform keyboard input import readchar @@ -906,6 +910,46 @@ def ensure_constitution_from_template(project_path: Path, tracker: StepTracker | console.print(f"[yellow]Warning: Could not initialize constitution: {e}[/yellow]") +def ensure_agent_governance_from_template(project_path: Path, tracker: StepTracker | None = None) -> None: + """Copy agent-governance template to memory if it doesn't exist.""" + try: + result = _ensure_agent_governance_from_template(project_path) + except Exception as e: + if tracker: + tracker.add("agent-governance", "Agent governance setup") + tracker.error("agent-governance", str(e)) + else: + console.print(f"[yellow]Warning: Could not initialize agent governance: {e}[/yellow]") + return + + if tracker: + tracker.add("agent-governance", "Agent governance setup") + if result is None: + tracker.error("agent-governance", "template not found") + else: + tracker.complete("agent-governance", "available") + + +def refresh_agent_projection(project_path: Path, tracker: StepTracker | None = None) -> None: + """Refresh generated agent governance projections.""" + try: + result = _refresh_agent_projection(project_path) + except Exception as e: + if tracker: + tracker.add("agent-projection", "Agent governance projection") + tracker.error("agent-projection", str(e)) + else: + console.print(f"[yellow]Warning: Could not refresh agent projection: {e}[/yellow]") + return + + if tracker: + tracker.add("agent-projection", "Agent governance projection") + if result.memory_path is None: + tracker.skip("agent-projection", "agent-governance template missing") + else: + tracker.complete("agent-projection", f"{len(result.projection_paths)} file(s) refreshed") + + INIT_OPTIONS_FILE = ".specify/init-options.json" @@ -951,6 +995,7 @@ def _get_skills_dir(project_path: Path, selected_ai: str) -> Path: # Constants kept for backward compatibility with presets and extensions. DEFAULT_SKILLS_DIR = ".agents/skills" SKILL_DESCRIPTIONS = { + "governance": "Create or update agent governance and refresh agent instruction projections.", "specify": "Create or update feature specifications from natural language descriptions.", "plan": "Generate technical implementation plans from feature specifications.", "tasks": "Break down implementation plans into actionable task lists.", @@ -1339,6 +1384,8 @@ def init( tracker.complete("shared-infra", f"scripts ({selected_script}) + templates") ensure_constitution_from_template(project_path, tracker=tracker) + ensure_agent_governance_from_template(project_path, tracker=tracker) + refresh_agent_projection(project_path, tracker=tracker) if not no_git: tracker.start("git") @@ -1975,6 +2022,7 @@ def _write_integration_json( installed_integrations=installed_integrations, settings=integration_settings, ) + refresh_agent_projection(project_root) def _clear_init_options_for_integration(project_root: Path, integration_key: str) -> None: @@ -1993,6 +2041,7 @@ def _remove_integration_json(project_root: Path) -> None: path = project_root / INTEGRATION_JSON if path.exists(): path.unlink() + refresh_agent_projection(project_root) _MANIFEST_READ_ERRORS = (ValueError, FileNotFoundError, OSError, UnicodeDecodeError) diff --git a/src/specify_cli/agent_projection.py b/src/specify_cli/agent_projection.py new file mode 100644 index 0000000000..5353b5b932 --- /dev/null +++ b/src/specify_cli/agent_projection.py @@ -0,0 +1,329 @@ +"""Agent governance memory and projection helpers.""" + +from __future__ import annotations + +import json +from dataclasses import dataclass +from pathlib import Path +from typing import Any + +import yaml + +from .integration_state import ( + INTEGRATION_JSON, + default_integration_key, + installed_integration_keys, + normalize_integration_state, +) + + +AGENT_GOVERNANCE_MEMORY = ".specify/memory/agent-governance.md" +AGENT_GOVERNANCE_TEMPLATE = ".specify/templates/agent-governance-template.md" + +PROJECTION_MARKER_START = "" +PROJECTION_MARKER_END = "" + + +@dataclass(frozen=True) +class AgentProjectionResult: + """Files updated by an agent projection refresh.""" + + memory_path: Path | None + projection_paths: list[Path] + + +def ensure_agent_governance_from_template(project_root: Path) -> Path | None: + """Copy agent-governance template to memory if missing.""" + memory_path = project_root / AGENT_GOVERNANCE_MEMORY + if memory_path.exists(): + return memory_path + + template_path = project_root / AGENT_GOVERNANCE_TEMPLATE + if not template_path.exists(): + return None + + memory_path.parent.mkdir(parents=True, exist_ok=True) + memory_path.write_bytes(template_path.read_bytes()) + return memory_path + + +def refresh_agent_projection(project_root: Path) -> AgentProjectionResult: + """Refresh repo-level and agent-specific governance projections. + + The source of truth is ``.specify/memory/agent-governance.md`` plus the + repository's current integration, skill, MCP, and extension state. Existing + text outside the generated projection markers is preserved. + """ + memory_path = ensure_agent_governance_from_template(project_root) + if memory_path is None: + return AgentProjectionResult(None, []) + + state = _read_integration_state(project_root) + installed = installed_integration_keys(state) + default_key = default_integration_key(state) + projection_paths = _projection_targets(project_root, state) + projection = _render_projection(project_root, memory_path, state) + updated: list[Path] = [] + + for path in projection_paths: + content = _adapter_prelude(path, default_key, installed) + if path.exists(): + existing = path.read_text(encoding="utf-8-sig") + new_content = _upsert_marked_section(existing, projection) + if new_content == existing: + continue + else: + path.parent.mkdir(parents=True, exist_ok=True) + new_content = content + "\n" + projection + + path.write_text(_normalize_newlines(new_content), encoding="utf-8") + updated.append(path) + + return AgentProjectionResult(memory_path, updated) + + +def _read_integration_state(project_root: Path) -> dict[str, Any]: + path = project_root / INTEGRATION_JSON + if not path.exists(): + return normalize_integration_state({}) + try: + data = json.loads(path.read_text(encoding="utf-8")) + except (json.JSONDecodeError, OSError, UnicodeDecodeError): + return normalize_integration_state({}) + return normalize_integration_state(data if isinstance(data, dict) else {}) + + +def _projection_targets(project_root: Path, state: dict[str, Any]) -> list[Path]: + targets: list[Path] = [project_root / "AGENTS.md"] + + try: + from .integrations import get_integration + except Exception: + get_integration = None # type: ignore[assignment] + + for key in installed_integration_keys(state): + integration = get_integration(key) if get_integration else None + context_file = getattr(integration, "context_file", None) + if isinstance(context_file, str) and context_file.strip(): + targets.append(project_root / context_file) + + # Common adapter files. They are created when the corresponding + # integration is installed, and refreshed whenever they already exist so + # uninstall/switch operations do not leave stale generated projections. + for key, path in { + "claude": "CLAUDE.md", + "gemini": "GEMINI.md", + "copilot": ".github/copilot-instructions.md", + }.items(): + target = project_root / path + if key in installed_integration_keys(state) or target.exists(): + targets.append(target) + + deduped: list[Path] = [] + seen: set[str] = set() + for path in targets: + rel = path.resolve().as_posix() + if rel in seen: + continue + seen.add(rel) + deduped.append(path) + return deduped + + +def _render_projection( + project_root: Path, + memory_path: Path, + state: dict[str, Any], +) -> str: + installed = installed_integration_keys(state) + default_key = default_integration_key(state) + skills = _scan_skills(project_root) + mcp_configs = _scan_mcp_configs(project_root) + extensions = _scan_extensions(project_root) + governance_body = _read_governance_body(memory_path) + + lines = [ + PROJECTION_MARKER_START, + "# Repository Agent Governance Projection", + "", + "Generated from repository state. Do not edit this section directly; update", + f"`{AGENT_GOVERNANCE_MEMORY}`, integrations, skills, MCP config, or extensions instead.", + "", + "## Governing Source", + f"- Repository-level agent governance SSOT: `{AGENT_GOVERNANCE_MEMORY}`", + "- Project principles SSOT: `.specify/memory/constitution.md`", + "- Feature work SSOT: `specs//`", + "", + "## Repository Agent Governance", + governance_body or "- No repository-level agent governance rules found.", + "", + "## Active Integrations", + f"- Default integration: `{default_key or 'none'}`", + f"- Installed integrations: {', '.join(f'`{key}`' for key in installed) if installed else '`none`'}", + "", + "## Active Skills", + ] + + if skills: + for skill in skills: + lines.append(f"- `{skill}`") + else: + lines.append("- `none detected`") + + lines.extend(["", "## MCP Configuration"]) + if mcp_configs: + for config in mcp_configs: + lines.append(f"- `{config}`") + else: + lines.append("- `none detected`") + + lines.extend(["", "## Extensions"]) + if extensions: + for extension in extensions: + lines.append(f"- `{extension}`") + else: + lines.append("- `none detected`") + + lines.extend([ + "", + "## Required Operating Rules", + "- Follow current user instructions first.", + "- Treat `.specify/memory/agent-governance.md` as the source of truth for repository-level agent, skill, MCP, and integration behavior.", + "- Treat `.specify/memory/constitution.md` as the source of truth for Specify project principles and quality gates.", + "- Keep governance domains separate: agent governance, constitution, and feature artifacts keep their own authority.", + "- Agent code writes are allowed only while executing the generated Spec Kit implement command or integration-equivalent implement skill/alias.", + "- Before writing code, tests, build configuration, migrations, runtime assets, or other implementation files, verify the active change has `spec.md`, `plan.md`, and `tasks.md` under `specs//`.", + "- For bug fixes, refactors, and small code changes, create or update the required spec artifacts first; do not bypass the code-write gate.", + "- Do not edit governance, CI, MCP config, secrets, permissions, or tool settings unless explicitly requested.", + "- Do not overwrite user edits or modify files outside the active task scope.", + "- Report changed files, commands run, validation results, and unresolved risks before handoff.", + "", + f"_Projection source file: `{memory_path.relative_to(project_root).as_posix()}`_", + PROJECTION_MARKER_END, + "", + ]) + return "\n".join(lines) + + +def _read_governance_body(memory_path: Path) -> str: + """Return the user-governed content from agent-governance memory.""" + try: + content = memory_path.read_text(encoding="utf-8-sig") + except (OSError, UnicodeDecodeError): + return "" + + lines = content.replace("\r\n", "\n").replace("\r", "\n").splitlines() + filtered: list[str] = [] + in_sync_report = False + for line in lines: + stripped = line.strip() + if stripped == "": + in_sync_report = False + continue + filtered.append(line) + + body = "\n".join(filtered).strip() + if body.startswith("# "): + body = body.split("\n", 1)[1].strip() if "\n" in body else "" + return body + + +def _upsert_marked_section(content: str, projection: str) -> str: + start = content.find(PROJECTION_MARKER_START) + end = content.find(PROJECTION_MARKER_END, start if start != -1 else 0) + if start != -1 and end != -1 and end > start: + end += len(PROJECTION_MARKER_END) + if end < len(content) and content[end] == "\r": + end += 1 + if end < len(content) and content[end] == "\n": + end += 1 + return content[:start] + projection + content[end:] + + if content and not content.endswith("\n"): + content += "\n" + return content + ("\n" if content else "") + projection + + +def _adapter_prelude(path: Path, default_key: str | None, installed: list[str]) -> str: + name = path.name + if name == "AGENTS.md": + return "# Repository Agent Governance\n\nThis file is governed by the Spec Kit governance command. Preserve user-authored instructions outside the generated Spec Kit projection markers." + if name == "CLAUDE.md": + return "# Claude Instructions\n\nRead `AGENTS.md` first; it is the repository-level agent governance projection governed by the Spec Kit governance command." + if name == "GEMINI.md": + return "# Gemini Instructions\n\nRead `AGENTS.md` first; it is the repository-level agent governance projection governed by the Spec Kit governance command." + if name == "copilot-instructions.md": + return "# GitHub Copilot Instructions\n\nRead `AGENTS.md` first; it is the repository-level agent governance projection governed by the Spec Kit governance command." + installed_text = ", ".join(installed) if installed else "none" + return ( + "# Agent Instructions\n\n" + "Read `AGENTS.md` first; it is the repository-level agent governance projection governed by the Spec Kit governance command.\n\n" + f"Default integration: `{default_key or 'none'}`. Installed integrations: `{installed_text}`." + ) + + +def _scan_skills(project_root: Path) -> list[str]: + skills: list[str] = [] + for skill_file in project_root.rglob("SKILL.md"): + if any(part in {".git", "__pycache__", ".venv", "node_modules"} for part in skill_file.parts): + continue + try: + rel = skill_file.relative_to(project_root).as_posix() + except ValueError: + rel = skill_file.as_posix() + skills.append(rel) + return sorted(skills) + + +def _scan_mcp_configs(project_root: Path) -> list[str]: + candidates: list[str] = [] + names = { + ".mcp.json", + "mcp.json", + "mcp.yml", + "mcp.yaml", + "mcp.config.json", + } + for path in project_root.rglob("*"): + if not path.is_file(): + continue + if any(part in {".git", "__pycache__", ".venv", "node_modules"} for part in path.parts): + continue + if path.name in names or "mcp" in path.name.lower(): + try: + candidates.append(path.relative_to(project_root).as_posix()) + except ValueError: + candidates.append(path.as_posix()) + return sorted(candidates) + + +def _scan_extensions(project_root: Path) -> list[str]: + registry = project_root / ".specify" / "extensions.yml" + if not registry.exists(): + return [] + try: + data = yaml.safe_load(registry.read_text(encoding="utf-8")) or {} + except (yaml.YAMLError, OSError, UnicodeDecodeError): + return [".specify/extensions.yml"] + if not isinstance(data, dict): + return [".specify/extensions.yml"] + extensions = data.get("extensions") + if isinstance(extensions, dict): + return sorted(str(key) for key in extensions) + if isinstance(extensions, list): + names = [] + for item in extensions: + if isinstance(item, dict) and item.get("id"): + names.append(str(item["id"])) + elif isinstance(item, str): + names.append(item) + return sorted(names) or [".specify/extensions.yml"] + return [".specify/extensions.yml"] + + +def _normalize_newlines(content: str) -> str: + return content.replace("\r\n", "\n").replace("\r", "\n") diff --git a/src/specify_cli/agents.py b/src/specify_cli/agents.py index 4d78d5ac41..e700eba3e2 100644 --- a/src/specify_cli/agents.py +++ b/src/specify_cli/agents.py @@ -112,7 +112,7 @@ def render_frontmatter(fm: dict) -> str: return "" yaml_str = yaml.dump( - fm, default_flow_style=False, sort_keys=False, allow_unicode=True + fm, default_flow_style=False, sort_keys=False, allow_unicode=True, width=1000 ) return f"---\n{yaml_str}---\n" @@ -285,8 +285,8 @@ def render_skill_command( Technical debt note: Spec-kit currently has multiple SKILL.md generators (template packaging, init-time conversion, and extension/preset overrides). Keep the skill - frontmatter keys aligned (name/description/compatibility/metadata, with - metadata.author and metadata.source subkeys) to avoid drift across agents. + frontmatter keys aligned (name/description/purpose/trigger/boundaries/ + outputs/validation/compatibility/metadata) to avoid drift across agents. """ if not isinstance(frontmatter, dict): frontmatter = {} @@ -316,9 +316,42 @@ def build_skill_frontmatter( source: str, ) -> dict: """Build consistent SKILL.md frontmatter across all skill generators.""" + is_implement_skill = skill_name == "speckit-implement" + allowed_write_paths = [ + ".specify/**", + "specs/**", + ] + if is_implement_skill: + allowed_write_paths.extend([ + "**", + ]) skill_frontmatter = { "name": skill_name, "description": description, + "purpose": description, + "trigger": f"Invoke this skill for the `{skill_name}` Spec Kit workflow.", + "allowed-read-paths": [ + ".specify/**", + "specs/**", + "templates/**", + "scripts/**", + ], + "allowed-write-paths": allowed_write_paths, + "forbidden-paths": [ + ".git/**", + "**/.env*", + "**/secrets/**", + "**/*secret*", + "**/*token*", + ], + "outputs": [ + ( + "Implementation files, completed tasks.md checkboxes, validation results, and handoff summary" + if is_implement_skill + else "Workflow-specific spec artifacts and handoff summary" + ), + ], + "validation-command": "Run the validation commands required by the active Spec Kit workflow.", "compatibility": "Requires spec-kit project structure with .specify/ directory", "metadata": { "author": "github-spec-kit", diff --git a/src/specify_cli/extensions.py b/src/specify_cli/extensions.py index 944ee4a06d..924361ad74 100644 --- a/src/specify_cli/extensions.py +++ b/src/specify_cli/extensions.py @@ -26,6 +26,7 @@ from packaging.specifiers import SpecifierSet, InvalidSpecifier _FALLBACK_CORE_COMMAND_NAMES = frozenset({ + "governance", "analyze", "checklist", "clarify", diff --git a/src/specify_cli/integrations/base.py b/src/specify_cli/integrations/base.py index 7ce107caec..7294889842 100644 --- a/src/specify_cli/integrations/base.py +++ b/src/specify_cli/integrations/base.py @@ -1483,24 +1483,18 @@ def setup( if not description: description = f"Spec Kit: {command_name} workflow" - # Build SKILL.md with manually formatted frontmatter to match - # the release packaging script output exactly (double-quoted - # values, no yaml.safe_dump quoting differences). - def _quote(v: str) -> str: - escaped = v.replace("\\", "\\\\").replace('"', '\\"') - return f'"{escaped}"' - - skill_content = ( - f"---\n" - f"name: {_quote(skill_name)}\n" - f"description: {_quote(description)}\n" - f"compatibility: {_quote('Requires spec-kit project structure with .specify/ directory')}\n" - f"metadata:\n" - f" author: {_quote('github-spec-kit')}\n" - f" source: {_quote('templates/commands/' + src_file.name)}\n" - f"---\n" - f"{processed_body}" + from specify_cli.agents import CommandRegistrar + + skill_frontmatter = CommandRegistrar.build_skill_frontmatter( + self.key, + skill_name, + description, + f"templates/commands/{src_file.name}", ) + frontmatter_text = yaml.safe_dump( + skill_frontmatter, sort_keys=False, width=1000 + ).strip() + skill_content = f"---\n{frontmatter_text}\n---\n{processed_body}" # Write speckit-/SKILL.md skill_dir = skills_dir / skill_name diff --git a/src/specify_cli/integrations/claude/__init__.py b/src/specify_cli/integrations/claude/__init__.py index 88aef85285..8b888df6c8 100644 --- a/src/specify_cli/integrations/claude/__init__.py +++ b/src/specify_cli/integrations/claude/__init__.py @@ -30,6 +30,7 @@ "analyze": "Optional focus areas for analysis", "clarify": "Optional areas to clarify in the spec", "constitution": "Principles or values for the project constitution", + "governance": "Optional agent governance rules or projection scope", "checklist": "Domain or focus area for the checklist", "taskstoissues": "Optional filter or label for GitHub issues", } @@ -113,7 +114,9 @@ def _render_skill(self, template_name: str, frontmatter: dict[str, Any], body: s skill_frontmatter = self._build_skill_fm( skill_name, description, f"templates/commands/{template_name}.md" ) - frontmatter_text = yaml.safe_dump(skill_frontmatter, sort_keys=False).strip() + frontmatter_text = yaml.safe_dump( + skill_frontmatter, sort_keys=False, width=1000 + ).strip() return f"---\n{frontmatter_text}\n---\n\n{body.strip()}\n" def _build_skill_fm(self, name: str, description: str, source: str) -> dict: diff --git a/templates/agent-governance-template.md b/templates/agent-governance-template.md new file mode 100644 index 0000000000..ba80f3845f --- /dev/null +++ b/templates/agent-governance-template.md @@ -0,0 +1,72 @@ +# Repository Agent Governance + +This file is the source of truth for repository-level agent collaboration and generated agent instruction projections such as `AGENTS.md` and active integration context files. + +It does not define project principles, architecture decisions, or feature requirements. Those remain governed by their own source files. + + + +## Authority Order + +1. Current user instruction +2. This repository agent governance file +3. User-authored repository instructions preserved outside generated projection markers +4. `.specify/memory/constitution.md` +5. Active feature artifacts under `specs//` +6. Skill-local `SKILL.md` +7. Tool/MCP defaults + +## Source Of Truth + +- Project principles: `.specify/memory/constitution.md` +- Feature work: `specs//` +- Repository-level agent governance: `.specify/memory/agent-governance.md` +- Agent instruction projections: `AGENTS.md` and active integration context files +- Skill contracts: each `SKILL.md` +- MCP permissions: MCP configuration and allowlists + +## Write Boundaries + +- Agent code writes are allowed only while executing the generated Spec Kit implement command or integration-equivalent implement skill/alias, such as `/speckit.implement` or `/speckit-implement`. +- Before any agent writes source code, tests, build configuration, migrations, runtime assets, or other implementation files, the active change MUST have `spec.md`, `plan.md`, and `tasks.md` under `specs//`. +- Bug fixes, refactors, and small code changes are not exceptions. If the required spec artifacts do not exist, first create or update the spec artifacts through the Spec Kit workflow, then stop before implementation. +- Direct user requests to "just edit code" or similar are treated as requests to run the required spec workflow; they are not permission to bypass the code-write gate. +- Do not edit governance, CI, MCP config, secrets, permissions, or tool settings unless explicitly requested. +- Do not modify files outside the active task scope. +- Do not overwrite user edits. +- Do not rewrite generated files unless the owning workflow requires it. + +## Skill Contract + +Each skill must declare: + +- purpose +- trigger +- allowed read paths +- allowed write paths +- forbidden paths +- outputs +- validation command + +## MCP Policy + +- MCP tools are read-only by default. +- Mutating MCP calls require explicit user intent. +- External writes must report target, action, and result. +- Secrets and tokens must never be logged or written to repo files. + +## Validation + +Before handoff, report: + +- changed files +- commands run +- tests/validation result +- unresolved risks diff --git a/templates/commands/governance.md b/templates/commands/governance.md new file mode 100644 index 0000000000..d2fa3bdd58 --- /dev/null +++ b/templates/commands/governance.md @@ -0,0 +1,63 @@ +--- +description: Create or update agent governance and refresh agent instruction projections. +--- + +## User Input + +```text +$ARGUMENTS +``` + +You **MUST** consider the user input before proceeding (if not empty). + +## Outline + +You are updating repository-level agent governance for this project. The source file is `.specify/memory/agent-governance.md`. Generated agent instruction projections such as `AGENTS.md` and active integration context files are owned by `__SPECKIT_COMMAND_GOVERNANCE__`. + +This command governs agent collaboration, skill usage, MCP/tool permissions, and integration adapter behavior. It MUST NOT redefine project principles or feature requirements. + +**Note**: If `.specify/memory/agent-governance.md` does not exist yet, copy `.specify/templates/agent-governance-template.md` first. + +Follow this execution flow: + +1. Load `.specify/memory/agent-governance.md`. +2. Load supporting context if present: + - `.specify/memory/constitution.md` for project principles and quality gates. + - `.specify/integration.json` for installed/default integrations. + - Any `SKILL.md` files for skill-local contracts. + - MCP configuration files such as `.mcp.json`, `mcp.json`, `mcp.yml`, or `mcp.yaml`. + - `.specify/extensions.yml` for enabled extensions. +3. Update agent governance: + - Keep the authority order explicit. + - Keep source-of-truth boundaries between agent governance, constitution, skills, MCP, and feature artifacts. + - Keep write boundaries testable and concrete. + - Require explicit user intent for mutating MCP calls and external writes. + - Preserve user-authored repo-specific rules unless they conflict with higher authority. +4. Refresh projections: + - `AGENTS.md` + - active integration context files such as `CLAUDE.md`, `GEMINI.md`, `.github/copilot-instructions.md`, and other registered `context_file` paths. + - Preserve content outside `` and ``. +5. Produce a Sync Impact Report in `.specify/memory/agent-governance.md`: + - Active/default integration + - Installed integrations + - Skills scanned + - MCP config files scanned + - Projection files refreshed + - Follow-up TODOs + +## Validation + +- No projection file should duplicate long governance text outside the generated projection markers. +- `AGENTS.md` is the repo-level agent governance projection owned by `__SPECKIT_COMMAND_GOVERNANCE__`. +- Agent-specific files are adapters that point back to `AGENTS.md`. +- Specify governance files keep their own authority and must not be rewritten by this command unless the user explicitly requests that separate change. +- Do not modify `.specify/memory/constitution.md`, feature specs, plans, tasks, source code, tests, CI, MCP config, or secrets unless the user explicitly requested that separate change. + +## Output + +Report: + +- Whether `.specify/memory/agent-governance.md` was created or updated. +- Projection files refreshed. +- Skills and MCP config files detected. +- Any unresolved governance risks. diff --git a/templates/commands/implement.md b/templates/commands/implement.md index 52a042161f..7c8dfc6bea 100644 --- a/templates/commands/implement.md +++ b/templates/commands/implement.md @@ -13,6 +13,16 @@ $ARGUMENTS You **MUST** consider the user input before proceeding (if not empty). +## Code-Write Authority + +This command is the only Spec Kit workflow that may write implementation files. +Implementation files include source code, tests, build configuration, migrations, runtime assets, and other files that change repository behavior. + +- Do not write implementation files unless this command is active. +- Bug fixes, refactors, and one-line changes must still enter through this command before implementation files are changed. +- Before writing implementation files, confirm the active feature directory contains `spec.md`, `plan.md`, and `tasks.md`. If any are missing, stop and instruct the user to run the required Spec Kit workflow commands first. +- Writes to `.specify/`, `specs//`, and generated agent command/context files remain governed by their owning Spec Kit workflows. + ## Pre-Execution Checks **Check for extension hooks (before implementation)**: diff --git a/tests/integrations/test_integration_base_markdown.py b/tests/integrations/test_integration_base_markdown.py index 0b74a6f1a9..26ed89726a 100644 --- a/tests/integrations/test_integration_base_markdown.py +++ b/tests/integrations/test_integration_base_markdown.py @@ -252,7 +252,7 @@ def test_init_options_includes_context_file(self, tmp_path): # -- Complete file inventory ------------------------------------------ COMMAND_STEMS = [ - "analyze", "checklist", "clarify", "constitution", + "governance", "analyze", "checklist", "clarify", "constitution", "implement", "plan", "specify", "tasks", "taskstoissues", ] @@ -281,11 +281,14 @@ def _expected_files(self, script_variant: str) -> list[str]: "setup-plan.ps1", "setup-tasks.ps1"]: files.append(f".specify/scripts/powershell/{name}") - for name in ["checklist-template.md", + for name in ["agent-governance-template.md", + "checklist-template.md", "constitution-template.md", "plan-template.md", "spec-template.md", "tasks-template.md"]: files.append(f".specify/templates/{name}") + files.append("AGENTS.md") + files.append(".specify/memory/agent-governance.md") files.append(".specify/memory/constitution.md") # Bundled workflow files.append(".specify/workflows/speckit/workflow.yml") diff --git a/tests/integrations/test_integration_base_skills.py b/tests/integrations/test_integration_base_skills.py index 89140de1c3..1670518505 100644 --- a/tests/integrations/test_integration_base_skills.py +++ b/tests/integrations/test_integration_base_skills.py @@ -100,7 +100,7 @@ def test_skill_directory_structure(self, tmp_path): skill_files = [f for f in created if "scripts" not in f.parts] expected_commands = { - "analyze", "checklist", "clarify", "constitution", + "governance", "analyze", "checklist", "clarify", "constitution", "implement", "plan", "specify", "tasks", "taskstoissues", } @@ -114,7 +114,7 @@ def test_skill_directory_structure(self, tmp_path): assert actual_commands == expected_commands def test_skill_frontmatter_structure(self, tmp_path): - """SKILL.md must have name, description, compatibility, metadata.""" + """SKILL.md must have governance contract frontmatter.""" i = get_integration(self.KEY) m = IntegrationManifest(self.KEY, tmp_path) created = i.setup(tmp_path, m) @@ -127,8 +127,23 @@ def test_skill_frontmatter_structure(self, tmp_path): fm = yaml.safe_load(parts[1]) assert "name" in fm, f"{f} frontmatter missing 'name'" assert "description" in fm, f"{f} frontmatter missing 'description'" + assert "purpose" in fm, f"{f} frontmatter missing 'purpose'" + assert "trigger" in fm, f"{f} frontmatter missing 'trigger'" + assert "allowed-read-paths" in fm, f"{f} frontmatter missing 'allowed-read-paths'" + assert "allowed-write-paths" in fm, f"{f} frontmatter missing 'allowed-write-paths'" + assert "forbidden-paths" in fm, f"{f} frontmatter missing 'forbidden-paths'" + assert "outputs" in fm, f"{f} frontmatter missing 'outputs'" + assert "validation-command" in fm, f"{f} frontmatter missing 'validation-command'" assert "compatibility" in fm, f"{f} frontmatter missing 'compatibility'" assert "metadata" in fm, f"{f} frontmatter missing 'metadata'" + assert ".specify/**" in fm["allowed-read-paths"] + assert ".git/**" in fm["forbidden-paths"] + if fm["name"] == "speckit-implement": + assert "**" in fm["allowed-write-paths"] + assert "Implementation files, completed tasks.md checkboxes, validation results, and handoff summary" in fm["outputs"] + else: + assert fm["allowed-write-paths"] == [".specify/**", "specs/**"] + assert "Workflow-specific spec artifacts and handoff summary" in fm["outputs"] assert fm["metadata"]["author"] == "github-spec-kit" assert "source" in fm["metadata"] @@ -359,7 +374,7 @@ def test_options_include_skills_flag(self): # -- Complete file inventory ------------------------------------------ _SKILL_COMMANDS = [ - "analyze", "checklist", "clarify", "constitution", + "governance", "analyze", "checklist", "clarify", "constitution", "implement", "plan", "specify", "tasks", "taskstoissues", ] @@ -378,6 +393,7 @@ def _expected_files(self, script_variant: str) -> list[str]: ".specify/integration.json", f".specify/integrations/{self.KEY}.manifest.json", ".specify/integrations/speckit.manifest.json", + ".specify/memory/agent-governance.md", ".specify/memory/constitution.md", ] # Script variant @@ -399,6 +415,7 @@ def _expected_files(self, script_variant: str) -> list[str]: ] # Templates files += [ + ".specify/templates/agent-governance-template.md", ".specify/templates/checklist-template.md", ".specify/templates/constitution-template.md", ".specify/templates/plan-template.md", @@ -413,7 +430,8 @@ def _expected_files(self, script_variant: str) -> list[str]: # Agent context file (if set) if i.context_file: files.append(i.context_file) - return sorted(files) + files.append("AGENTS.md") + return sorted(set(files)) def test_complete_file_inventory_sh(self, tmp_path): """Every file produced by specify init --integration --script sh.""" diff --git a/tests/integrations/test_integration_base_toml.py b/tests/integrations/test_integration_base_toml.py index 56862e534c..1efb37b7c3 100644 --- a/tests/integrations/test_integration_base_toml.py +++ b/tests/integrations/test_integration_base_toml.py @@ -483,6 +483,7 @@ def test_init_options_includes_context_file(self, tmp_path): # -- Complete file inventory ------------------------------------------ COMMAND_STEMS = [ + "governance", "analyze", "checklist", "clarify", @@ -530,6 +531,7 @@ def _expected_files(self, script_variant: str) -> list[str]: files.append(f".specify/scripts/powershell/{name}") for name in [ + "agent-governance-template.md", "checklist-template.md", "constitution-template.md", "plan-template.md", @@ -538,6 +540,8 @@ def _expected_files(self, script_variant: str) -> list[str]: ]: files.append(f".specify/templates/{name}") + files.append("AGENTS.md") + files.append(".specify/memory/agent-governance.md") files.append(".specify/memory/constitution.md") # Bundled workflow files.append(".specify/workflows/speckit/workflow.yml") diff --git a/tests/integrations/test_integration_base_yaml.py b/tests/integrations/test_integration_base_yaml.py index 956c7a796f..3b2b9f9ff8 100644 --- a/tests/integrations/test_integration_base_yaml.py +++ b/tests/integrations/test_integration_base_yaml.py @@ -362,6 +362,7 @@ def test_init_options_includes_context_file(self, tmp_path): # -- Complete file inventory ------------------------------------------ COMMAND_STEMS = [ + "governance", "analyze", "checklist", "clarify", @@ -409,6 +410,7 @@ def _expected_files(self, script_variant: str) -> list[str]: files.append(f".specify/scripts/powershell/{name}") for name in [ + "agent-governance-template.md", "checklist-template.md", "constitution-template.md", "plan-template.md", @@ -417,6 +419,8 @@ def _expected_files(self, script_variant: str) -> list[str]: ]: files.append(f".specify/templates/{name}") + files.append("AGENTS.md") + files.append(".specify/memory/agent-governance.md") files.append(".specify/memory/constitution.md") # Bundled workflow files.append(".specify/workflows/speckit/workflow.yml") diff --git a/tests/integrations/test_integration_copilot.py b/tests/integrations/test_integration_copilot.py index c6e9259b09..39a9dd047b 100644 --- a/tests/integrations/test_integration_copilot.py +++ b/tests/integrations/test_integration_copilot.py @@ -125,9 +125,9 @@ def test_directory_structure(self, tmp_path): agents_dir = tmp_path / ".github" / "agents" assert agents_dir.is_dir() agent_files = sorted(agents_dir.glob("speckit.*.agent.md")) - assert len(agent_files) == 9 + assert len(agent_files) == 10 expected_commands = { - "analyze", "checklist", "clarify", "constitution", + "governance", "analyze", "checklist", "clarify", "constitution", "implement", "plan", "specify", "tasks", "taskstoissues", } actual_commands = {f.name.removeprefix("speckit.").removesuffix(".agent.md") for f in agent_files} @@ -178,7 +178,9 @@ def test_complete_file_inventory_sh(self, tmp_path): assert result.exit_code == 0 actual = sorted(p.relative_to(project).as_posix() for p in project.rglob("*") if p.is_file()) expected = sorted([ + "AGENTS.md", ".github/agents/speckit.analyze.agent.md", + ".github/agents/speckit.governance.agent.md", ".github/agents/speckit.checklist.agent.md", ".github/agents/speckit.clarify.agent.md", ".github/agents/speckit.constitution.agent.md", @@ -188,6 +190,7 @@ def test_complete_file_inventory_sh(self, tmp_path): ".github/agents/speckit.tasks.agent.md", ".github/agents/speckit.taskstoissues.agent.md", ".github/prompts/speckit.analyze.prompt.md", + ".github/prompts/speckit.governance.prompt.md", ".github/prompts/speckit.checklist.prompt.md", ".github/prompts/speckit.clarify.prompt.md", ".github/prompts/speckit.constitution.prompt.md", @@ -207,11 +210,13 @@ def test_complete_file_inventory_sh(self, tmp_path): ".specify/scripts/bash/create-new-feature.sh", ".specify/scripts/bash/setup-plan.sh", ".specify/scripts/bash/setup-tasks.sh", + ".specify/templates/agent-governance-template.md", ".specify/templates/checklist-template.md", ".specify/templates/constitution-template.md", ".specify/templates/plan-template.md", ".specify/templates/spec-template.md", ".specify/templates/tasks-template.md", + ".specify/memory/agent-governance.md", ".specify/memory/constitution.md", ".specify/workflows/speckit/workflow.yml", ".specify/workflows/workflow-registry.json", @@ -238,7 +243,9 @@ def test_complete_file_inventory_ps(self, tmp_path): assert result.exit_code == 0 actual = sorted(p.relative_to(project).as_posix() for p in project.rglob("*") if p.is_file()) expected = sorted([ + "AGENTS.md", ".github/agents/speckit.analyze.agent.md", + ".github/agents/speckit.governance.agent.md", ".github/agents/speckit.checklist.agent.md", ".github/agents/speckit.clarify.agent.md", ".github/agents/speckit.constitution.agent.md", @@ -248,6 +255,7 @@ def test_complete_file_inventory_ps(self, tmp_path): ".github/agents/speckit.tasks.agent.md", ".github/agents/speckit.taskstoissues.agent.md", ".github/prompts/speckit.analyze.prompt.md", + ".github/prompts/speckit.governance.prompt.md", ".github/prompts/speckit.checklist.prompt.md", ".github/prompts/speckit.clarify.prompt.md", ".github/prompts/speckit.constitution.prompt.md", @@ -267,11 +275,13 @@ def test_complete_file_inventory_ps(self, tmp_path): ".specify/scripts/powershell/create-new-feature.ps1", ".specify/scripts/powershell/setup-plan.ps1", ".specify/scripts/powershell/setup-tasks.ps1", + ".specify/templates/agent-governance-template.md", ".specify/templates/checklist-template.md", ".specify/templates/constitution-template.md", ".specify/templates/plan-template.md", ".specify/templates/spec-template.md", ".specify/templates/tasks-template.md", + ".specify/memory/agent-governance.md", ".specify/memory/constitution.md", ".specify/workflows/speckit/workflow.yml", ".specify/workflows/workflow-registry.json", @@ -286,7 +296,7 @@ class TestCopilotSkillsMode: """Tests for Copilot integration in --skills mode.""" _SKILL_COMMANDS = [ - "analyze", "checklist", "clarify", "constitution", + "governance", "analyze", "checklist", "clarify", "constitution", "implement", "plan", "specify", "tasks", "taskstoissues", ] @@ -604,6 +614,7 @@ def test_complete_file_inventory_skills_sh(self, tmp_path): expected = sorted([ # Skill files *[f".github/skills/speckit-{cmd}/SKILL.md" for cmd in self._SKILL_COMMANDS], + "AGENTS.md", # Context file ".github/copilot-instructions.md", # Integration metadata @@ -618,11 +629,13 @@ def test_complete_file_inventory_skills_sh(self, tmp_path): ".specify/scripts/bash/setup-plan.sh", ".specify/scripts/bash/setup-tasks.sh", # Templates + ".specify/templates/agent-governance-template.md", ".specify/templates/checklist-template.md", ".specify/templates/constitution-template.md", ".specify/templates/plan-template.md", ".specify/templates/spec-template.md", ".specify/templates/tasks-template.md", + ".specify/memory/agent-governance.md", ".specify/memory/constitution.md", # Bundled workflow ".specify/workflows/speckit/workflow.yml", @@ -724,4 +737,4 @@ def test_init_skills_next_steps_show_skill_syntax(self, tmp_path): # Must NOT show the dotted /speckit.plan form assert "/speckit.plan" not in result.output, ( f"Should not show /speckit.plan in skills mode:\n{result.output}" - ) \ No newline at end of file + ) diff --git a/tests/integrations/test_integration_generic.py b/tests/integrations/test_integration_generic.py index 4f515a01d2..5902756fd8 100644 --- a/tests/integrations/test_integration_generic.py +++ b/tests/integrations/test_integration_generic.py @@ -257,6 +257,7 @@ def test_complete_file_inventory_sh(self, tmp_path): expected = sorted([ "AGENTS.md", ".myagent/commands/speckit.analyze.md", + ".myagent/commands/speckit.governance.md", ".myagent/commands/speckit.checklist.md", ".myagent/commands/speckit.clarify.md", ".myagent/commands/speckit.constitution.md", @@ -269,12 +270,14 @@ def test_complete_file_inventory_sh(self, tmp_path): ".specify/integration.json", ".specify/integrations/generic.manifest.json", ".specify/integrations/speckit.manifest.json", + ".specify/memory/agent-governance.md", ".specify/memory/constitution.md", ".specify/scripts/bash/check-prerequisites.sh", ".specify/scripts/bash/common.sh", ".specify/scripts/bash/create-new-feature.sh", ".specify/scripts/bash/setup-plan.sh", ".specify/scripts/bash/setup-tasks.sh", + ".specify/templates/agent-governance-template.md", ".specify/templates/checklist-template.md", ".specify/templates/constitution-template.md", ".specify/templates/plan-template.md", @@ -313,6 +316,7 @@ def test_complete_file_inventory_ps(self, tmp_path): expected = sorted([ "AGENTS.md", ".myagent/commands/speckit.analyze.md", + ".myagent/commands/speckit.governance.md", ".myagent/commands/speckit.checklist.md", ".myagent/commands/speckit.clarify.md", ".myagent/commands/speckit.constitution.md", @@ -325,12 +329,14 @@ def test_complete_file_inventory_ps(self, tmp_path): ".specify/integration.json", ".specify/integrations/generic.manifest.json", ".specify/integrations/speckit.manifest.json", + ".specify/memory/agent-governance.md", ".specify/memory/constitution.md", ".specify/scripts/powershell/check-prerequisites.ps1", ".specify/scripts/powershell/common.ps1", ".specify/scripts/powershell/create-new-feature.ps1", ".specify/scripts/powershell/setup-plan.ps1", ".specify/scripts/powershell/setup-tasks.ps1", + ".specify/templates/agent-governance-template.md", ".specify/templates/checklist-template.md", ".specify/templates/constitution-template.md", ".specify/templates/plan-template.md", diff --git a/tests/test_agent_projection.py b/tests/test_agent_projection.py new file mode 100644 index 0000000000..83d5f0cef7 --- /dev/null +++ b/tests/test_agent_projection.py @@ -0,0 +1,112 @@ +import json +import shutil +from pathlib import Path + +from specify_cli.agent_projection import ( + AGENT_GOVERNANCE_MEMORY, + PROJECTION_MARKER_START, + ensure_agent_governance_from_template, + refresh_agent_projection, +) + + +REPO_ROOT = Path(__file__).resolve().parent.parent + + +def _copy_template(project: Path, name: str) -> None: + dest = project / ".specify" / "templates" / name + dest.parent.mkdir(parents=True, exist_ok=True) + shutil.copy2(REPO_ROOT / "templates" / name, dest) + + +def test_ensure_agent_governance_from_template(tmp_path): + _copy_template(tmp_path, "agent-governance-template.md") + + result = ensure_agent_governance_from_template(tmp_path) + + assert result == tmp_path / AGENT_GOVERNANCE_MEMORY + content = result.read_text(encoding="utf-8") + assert "# Repository Agent Governance" in content + assert "## Authority Order" in content + assert "Agent code writes are allowed only while executing the generated Spec Kit implement command" in content + assert "/speckit.implement" in content + assert "/speckit-implement" in content + assert "Bug fixes, refactors, and small code changes are not exceptions" in content + assert content.index("2. This repository agent governance file") < content.index( + "3. User-authored repository instructions preserved outside generated projection markers" + ) + + +def test_refresh_agent_projection_creates_repo_and_agent_adapters(tmp_path): + _copy_template(tmp_path, "agent-governance-template.md") + (tmp_path / ".specify" / "integration.json").parent.mkdir(parents=True, exist_ok=True) + (tmp_path / ".specify" / "integration.json").write_text( + json.dumps( + { + "integration": "gemini", + "default_integration": "gemini", + "installed_integrations": ["gemini", "copilot"], + "integration_settings": {}, + } + ), + encoding="utf-8", + ) + (tmp_path / ".gemini" / "commands" / "speckit-test" / "SKILL.md").parent.mkdir( + parents=True, + exist_ok=True, + ) + (tmp_path / ".gemini" / "commands" / "speckit-test" / "SKILL.md").write_text( + "# Test Skill\n", + encoding="utf-8", + ) + (tmp_path / ".mcp.json").write_text("{}", encoding="utf-8") + + result = refresh_agent_projection(tmp_path) + + assert result.memory_path == tmp_path / AGENT_GOVERNANCE_MEMORY + assert (tmp_path / "AGENTS.md").exists() + assert (tmp_path / "GEMINI.md").exists() + assert (tmp_path / ".github" / "copilot-instructions.md").exists() + agents = (tmp_path / "AGENTS.md").read_text(encoding="utf-8") + assert PROJECTION_MARKER_START in agents + assert "Default integration: `gemini`" in agents + assert "Feature work SSOT: `specs//`" in agents + assert "Agent code writes are allowed only while executing the generated Spec Kit implement command" in agents + assert "verify the active change has `spec.md`, `plan.md`, and `tasks.md`" in agents + assert "Architecture SSOT: artifacts produced by `/speckit.arch`" not in agents + assert "Scenario semantics: `/speckit.arch` scenario view" not in agents + assert "Business semantics SSOT: `.specify/memory/uc.md`" not in agents + assert "`.gemini/commands/speckit-test/SKILL.md`" in agents + assert "`.mcp.json`" in agents + + +def test_refresh_agent_projection_preserves_user_content(tmp_path): + _copy_template(tmp_path, "agent-governance-template.md") + agents = tmp_path / "AGENTS.md" + agents.write_text("# Custom Rules\n\nKeep this.\n", encoding="utf-8") + + refresh_agent_projection(tmp_path) + + content = agents.read_text(encoding="utf-8") + assert "# Custom Rules" in content + assert "Keep this." in content + assert PROJECTION_MARKER_START in content + + +def test_refresh_agent_projection_projects_governance_memory_rules(tmp_path): + _copy_template(tmp_path, "agent-governance-template.md") + memory = ensure_agent_governance_from_template(tmp_path) + assert memory is not None + content = memory.read_text(encoding="utf-8") + memory.write_text( + content + "\n## Repository-Specific Rules\n\n- Always report MCP writes before execution.\n", + encoding="utf-8", + ) + + refresh_agent_projection(tmp_path) + + agents = (tmp_path / "AGENTS.md").read_text(encoding="utf-8") + assert "## Repository Agent Governance" in agents + assert "## Repository-Specific Rules" in agents + assert "- Always report MCP writes before execution." in agents + assert "Sync Impact Report" not in agents From 31e0037ac264fb38daf1d7441ebbc7026165c7d3 Mon Sep 17 00:00:00 2001 From: bigben <245982990@qq.com> Date: Wed, 13 May 2026 11:37:47 +0800 Subject: [PATCH 3/5] Add orchestrated implement workflow --- .../speckit.orchestrated.implement.md | 17 + extensions/orchestrated/extension.yml | 24 + pyproject.toml | 2 + src/specify_cli/__init__.py | 133 ++++-- src/specify_cli/extensions.py | 10 +- src/specify_cli/workflows/__init__.py | 2 + .../steps/speckit_task_shards/__init__.py | 436 ++++++++++++++++++ tests/extensions/orchestrated/__init__.py | 1 + .../test_orchestrated_extension.py | 62 +++ tests/integrations/test_cli.py | 41 ++ tests/test_workflows.py | 201 ++++++++ workflows/catalog.json | 9 + .../workflow.yml | 42 ++ 13 files changed, 939 insertions(+), 41 deletions(-) create mode 100644 extensions/orchestrated/commands/speckit.orchestrated.implement.md create mode 100644 extensions/orchestrated/extension.yml create mode 100644 src/specify_cli/workflows/steps/speckit_task_shards/__init__.py create mode 100644 tests/extensions/orchestrated/__init__.py create mode 100644 tests/extensions/orchestrated/test_orchestrated_extension.py create mode 100644 workflows/speckit-orchestrated-implement/workflow.yml diff --git a/extensions/orchestrated/commands/speckit.orchestrated.implement.md b/extensions/orchestrated/commands/speckit.orchestrated.implement.md new file mode 100644 index 0000000000..11c494cea2 --- /dev/null +++ b/extensions/orchestrated/commands/speckit.orchestrated.implement.md @@ -0,0 +1,17 @@ +--- +description: Execute the implementation plan by splitting tasks.md into workflow handoff shards +--- + +## User Input + +```text +$ARGUMENTS +``` + +Run the orchestrated implementation workflow from the repository root: + +```sh +specify workflow run speckit-orchestrated-implement -i integration=__AGENT__ -i args="$ARGUMENTS" +``` + +Wait for the workflow to complete. If it fails while building handoff shards, report the error and do not run `speckit.implement` manually. If a shard fails during fan-out, report the failing shard and preserve the generated handoff files for resume or debugging. diff --git a/extensions/orchestrated/extension.yml b/extensions/orchestrated/extension.yml new file mode 100644 index 0000000000..dfcff98350 --- /dev/null +++ b/extensions/orchestrated/extension.yml @@ -0,0 +1,24 @@ +schema_version: "1.0" + +extension: + id: orchestrated + name: "Orchestrated Implementation" + version: "1.0.0" + description: "Split implementation tasks into handoff shards and run speckit.implement for each shard" + author: spec-kit-core + repository: https://github.com/github/spec-kit + license: MIT + +requires: + speckit_version: ">=0.8.9.dev0" + +provides: + commands: + - name: speckit.orchestrated.implement + file: commands/speckit.orchestrated.implement.md + description: "Run implementation through workflow-generated handoff shards" + +tags: + - "orchestration" + - "implementation" + - "workflow" diff --git a/pyproject.toml b/pyproject.toml index c488cdd37d..30d27a71da 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,8 +47,10 @@ packages = ["src/specify_cli"] "scripts/powershell" = "specify_cli/core_pack/scripts/powershell" # Bundled extensions (installable via `specify extension add `) "extensions/git" = "specify_cli/core_pack/extensions/git" +"extensions/orchestrated" = "specify_cli/core_pack/extensions/orchestrated" # Bundled workflows (auto-installed during `specify init`) "workflows/speckit" = "specify_cli/core_pack/workflows/speckit" +"workflows/speckit-orchestrated-implement" = "specify_cli/core_pack/workflows/speckit-orchestrated-implement" # Bundled presets (installable via `specify preset add ` or `specify init --preset `) "presets/lean" = "specify_cli/core_pack/presets/lean" diff --git a/src/specify_cli/__init__.py b/src/specify_cli/__init__.py index ccc8fbaa09..4e6f56c969 100644 --- a/src/specify_cli/__init__.py +++ b/src/specify_cli/__init__.py @@ -95,6 +95,7 @@ def _build_agent_config() -> dict[str, dict[str, Any]]: AGENT_CONFIG = _build_agent_config() DEFAULT_INIT_INTEGRATION = "copilot" +DEFAULT_BUNDLED_WORKFLOWS = ("speckit", "speckit-orchestrated-implement") AI_ASSISTANT_ALIASES = { "kiro": "kiro-cli", @@ -725,6 +726,58 @@ def _locate_bundled_workflow(workflow_id: str) -> Path | None: return None +def _install_bundled_workflows(project_path: Path) -> str: + """Install default bundled workflows and return a tracker summary.""" + from .workflows.catalog import WorkflowRegistry + from .workflows.engine import WorkflowDefinition + + wf_registry = WorkflowRegistry(project_path) + messages: list[str] = [] + + for workflow_id in DEFAULT_BUNDLED_WORKFLOWS: + bundled_wf = _locate_bundled_workflow(workflow_id) + if not bundled_wf: + messages.append(f"{workflow_id} not found") + continue + if wf_registry.is_installed(workflow_id): + messages.append(f"{workflow_id} already installed") + continue + + dest_wf = project_path / ".specify" / "workflows" / workflow_id + dest_wf.mkdir(parents=True, exist_ok=True) + shutil.copy2(bundled_wf / "workflow.yml", dest_wf / "workflow.yml") + + definition = WorkflowDefinition.from_yaml(dest_wf / "workflow.yml") + wf_registry.add( + workflow_id, + { + "name": definition.name, + "version": definition.version, + "description": definition.description, + "source": "bundled", + }, + ) + messages.append(f"{workflow_id} installed") + + return "; ".join(messages) if messages else "none" + + +def _install_bundled_extension(project_path: Path, extension_id: str) -> str: + """Install a bundled extension if needed and return a tracker summary.""" + from .extensions import ExtensionManager + + bundled_path = _locate_bundled_extension(extension_id) + if not bundled_path: + return "bundled extension not found" + + manager = ExtensionManager(project_path) + if manager.registry.is_installed(extension_id): + return "extension already installed" + + manager.install_from_directory(bundled_path, get_speckit_version()) + return "extension installed" + + def _locate_bundled_preset(preset_id: str) -> Path | None: """Return the path to a bundled preset, or None. @@ -1339,6 +1392,7 @@ def init( ("chmod", "Ensure scripts executable"), ("constitution", "Constitution setup"), ("git", "Install git extension"), + ("orchestrated", "Install orchestrated extension"), ("workflow", "Install bundled workflow"), ("final", "Finalize"), ]: @@ -1434,21 +1488,12 @@ def init( git_messages.append("git not available") # Step 2: Install bundled git extension try: - from .extensions import ExtensionManager - bundled_path = _locate_bundled_extension("git") - if bundled_path: - manager = ExtensionManager(project_path) - if manager.registry.is_installed("git"): - git_messages.append("extension already installed") - else: - manager.install_from_directory( - bundled_path, get_speckit_version() - ) - git_default_notice = True - git_messages.append("extension installed") - else: + git_ext_message = _install_bundled_extension(project_path, "git") + if git_ext_message == "extension installed": + git_default_notice = True + if git_ext_message == "bundled extension not found": git_has_error = True - git_messages.append("bundled extension not found") + git_messages.append(git_ext_message) except Exception as ext_err: git_has_error = True sanitized_ext = str(ext_err).replace('\n', ' ').strip() @@ -1463,33 +1508,29 @@ def init( else: tracker.skip("git", "--no-git flag") - # Install bundled speckit workflow + # Install bundled orchestrated extension. This is independent of + # --no-git because it provides a core workflow entry point. + tracker.start("orchestrated") try: - bundled_wf = _locate_bundled_workflow("speckit") - if bundled_wf: - from .workflows.catalog import WorkflowRegistry - from .workflows.engine import WorkflowDefinition - wf_registry = WorkflowRegistry(project_path) - if wf_registry.is_installed("speckit"): - tracker.complete("workflow", "already installed") - else: - import shutil as _shutil - dest_wf = project_path / ".specify" / "workflows" / "speckit" - dest_wf.mkdir(parents=True, exist_ok=True) - _shutil.copy2( - bundled_wf / "workflow.yml", - dest_wf / "workflow.yml", - ) - definition = WorkflowDefinition.from_yaml(dest_wf / "workflow.yml") - wf_registry.add("speckit", { - "name": definition.name, - "version": definition.version, - "description": definition.description, - "source": "bundled", - }) - tracker.complete("workflow", "speckit installed") + orchestrated_message = _install_bundled_extension( + project_path, + "orchestrated", + ) + if orchestrated_message == "bundled extension not found": + tracker.error("orchestrated", orchestrated_message) else: - tracker.skip("workflow", "bundled workflow not found") + tracker.complete("orchestrated", orchestrated_message) + except Exception as ext_err: + sanitized_ext = str(ext_err).replace('\n', ' ').strip() + tracker.error( + "orchestrated", + f"extension install failed: {sanitized_ext[:120]}", + ) + + # Install bundled workflows + tracker.start("workflow") + try: + tracker.complete("workflow", _install_bundled_workflows(project_path)) except Exception as wf_err: sanitized_wf = str(wf_err).replace('\n', ' ').strip() tracker.error("workflow", f"install failed: {sanitized_wf[:120]}") @@ -1977,6 +2018,20 @@ def get_speckit_version() -> str: with open(pyproject_path, "rb") as f: data = tomllib.load(f) return data.get("project", {}).get("version", "unknown") + except Exception: + # Fall back to a small regex parser for environments where this + # module is invoked with Python < 3.11 and tomllib is unavailable. + pass + try: + import re + pyproject_path = _repo_root() / "pyproject.toml" + if pyproject_path.exists(): + match = re.search( + r'(?m)^version\s*=\s*"([^"]+)"', + pyproject_path.read_text(encoding="utf-8"), + ) + if match: + return match.group(1) except Exception: # Intentionally ignore any errors while reading/parsing pyproject.toml. # If this lookup fails for any reason, we fall back to returning "unknown" below. diff --git a/src/specify_cli/extensions.py b/src/specify_cli/extensions.py index c3b53ef961..554151b2bb 100644 --- a/src/specify_cli/extensions.py +++ b/src/specify_cli/extensions.py @@ -1118,7 +1118,10 @@ def check_compatibility( # Parse version specifier (e.g., ">=0.1.0,<2.0.0") try: specifier = SpecifierSet(required) - if current not in specifier: + base_current = pkg_version.Version(current.base_version) + if current not in specifier and not ( + current.is_prerelease and base_current in specifier + ): raise CompatibilityError( f"Extension requires spec-kit {required}, " f"but {speckit_version} is installed.\n" @@ -1574,7 +1577,10 @@ def version_satisfies(current: str, required: str) -> bool: try: current_ver = pkg_version.Version(current) specifier = SpecifierSet(required) - return current_ver in specifier + base_current = pkg_version.Version(current_ver.base_version) + return current_ver in specifier or ( + current_ver.is_prerelease and base_current in specifier + ) except (pkg_version.InvalidVersion, InvalidSpecifier): return False diff --git a/src/specify_cli/workflows/__init__.py b/src/specify_cli/workflows/__init__.py index 13782f620b..6366bf2659 100644 --- a/src/specify_cli/workflows/__init__.py +++ b/src/specify_cli/workflows/__init__.py @@ -50,6 +50,7 @@ def _register_builtin_steps() -> None: from .steps.if_then import IfThenStep from .steps.prompt import PromptStep from .steps.shell import ShellStep + from .steps.speckit_task_shards import SpeckitTaskShardsStep from .steps.switch import SwitchStep from .steps.while_loop import WhileStep @@ -61,6 +62,7 @@ def _register_builtin_steps() -> None: _register_step(IfThenStep()) _register_step(PromptStep()) _register_step(ShellStep()) + _register_step(SpeckitTaskShardsStep()) _register_step(SwitchStep()) _register_step(WhileStep()) diff --git a/src/specify_cli/workflows/steps/speckit_task_shards/__init__.py b/src/specify_cli/workflows/steps/speckit_task_shards/__init__.py new file mode 100644 index 0000000000..4f15f299a1 --- /dev/null +++ b/src/specify_cli/workflows/steps/speckit_task_shards/__init__.py @@ -0,0 +1,436 @@ +"""Spec Kit task shard step. + +Builds conservative implementation handoff shards from the active feature's +``tasks.md`` so a workflow can fan out into repeated ``speckit.implement`` calls. +""" + +from __future__ import annotations + +import json +import os +import re +import subprocess +from dataclasses import dataclass +from pathlib import Path, PurePosixPath +from typing import Any + +from specify_cli.workflows.base import StepBase, StepContext, StepResult, StepStatus +from specify_cli.workflows.expressions import evaluate_expression + + +_TASK_RE = re.compile(r"^\s*-\s+\[[ xX]\]\s+(?P[A-Za-z]+\d{3,})\b(?P.*)$") +_HEADING_RE = re.compile(r"^\s{0,3}#{2,6}\s+(?P.+?)\s*$") +_BACKTICK_RE = re.compile(r"`([^`]+)`") +_PATH_TOKEN_RE = re.compile( + r"(?<![\w./-])([A-Za-z0-9_.-]+(?:/[A-Za-z0-9_.-]+)+|[A-Za-z0-9_.-]+\.[A-Za-z0-9_.-]+)(?![\w./-])" +) + + +@dataclass +class ParsedTask: + task_id: str + text: str + phase: str + parallel: bool + paths: list[str] + + +@dataclass +class TaskShard: + shard_id: str + tasks: list[ParsedTask] + + @property + def task_ids(self) -> list[str]: + return [task.task_id for task in self.tasks] + + @property + def paths(self) -> list[str]: + seen: dict[str, None] = {} + for task in self.tasks: + for path in task.paths: + seen.setdefault(path, None) + return list(seen) + + +class SpeckitTaskShardsStep(StepBase): + """Generate handoff files from the active feature's ``tasks.md``.""" + + type_key = "speckit-task-shards" + + def execute(self, config: dict[str, Any], context: StepContext) -> StepResult: + input_data = config.get("input", {}) + resolved_input: dict[str, Any] = {} + for key, value in input_data.items(): + resolved_input[key] = evaluate_expression(value, context) + + args = str(resolved_input.get("args", "") or "") + try: + max_shards = int(resolved_input.get("max_shards", 8) or 8) + except (TypeError, ValueError): + return self._failed("max_shards must be a positive integer.", resolved_input) + if max_shards < 1: + return self._failed("max_shards must be a positive integer.", resolved_input) + + project_root = Path(context.project_root or ".").resolve() + try: + feature_dir = self._resolve_feature_dir(project_root) + self._require_feature_files(feature_dir) + tasks = self._parse_tasks(feature_dir / "tasks.md") + shards = self._build_shards(tasks, max_shards) + items = self._write_handoffs( + project_root, + feature_dir, + shards, + args, + context.run_id or "manual", + ) + except ValueError as exc: + return self._failed(str(exc), resolved_input) + + return StepResult( + status=StepStatus.COMPLETED, + output={ + "input": resolved_input, + "feature_dir": str(feature_dir), + "tasks_path": str(feature_dir / "tasks.md"), + "item_count": len(items), + "items": items, + }, + ) + + def validate(self, config: dict[str, Any]) -> list[str]: + errors = super().validate(config) + input_data = config.get("input", {}) + if input_data is not None and not isinstance(input_data, dict): + errors.append( + f"speckit-task-shards step {config.get('id', '?')!r}: 'input' must be a mapping." + ) + return errors + + @staticmethod + def _failed(error: str, input_data: dict[str, Any]) -> StepResult: + return StepResult( + status=StepStatus.FAILED, + error=error, + output={"input": input_data, "error": error, "items": []}, + ) + + @classmethod + def _resolve_feature_dir(cls, project_root: Path) -> Path: + feature_json = project_root / ".specify" / "feature.json" + if feature_json.is_file(): + try: + raw = json.loads(feature_json.read_text(encoding="utf-8")) + except (json.JSONDecodeError, OSError) as exc: + raise ValueError(f"Failed to parse .specify/feature.json: {exc}") from exc + feature_value = raw.get("feature_directory") if isinstance(raw, dict) else None + if feature_value: + return cls._normalize_feature_dir(project_root, str(feature_value)) + + env_feature = os.environ.get("SPECIFY_FEATURE_DIRECTORY", "").strip() + if env_feature: + return cls._normalize_feature_dir(project_root, env_feature) + + branch = cls._current_branch(project_root) + if not branch: + raise ValueError( + "Unable to resolve active feature: no .specify/feature.json, " + "SPECIFY_FEATURE_DIRECTORY, or git branch is available." + ) + return cls._find_feature_dir_by_prefix(project_root, branch) + + @staticmethod + def _normalize_feature_dir(project_root: Path, value: str) -> Path: + path = Path(value) + if not path.is_absolute(): + path = project_root / path + return path.resolve() + + @staticmethod + def _current_branch(project_root: Path) -> str | None: + try: + proc = subprocess.run( + ["git", "rev-parse", "--abbrev-ref", "HEAD"], + cwd=project_root, + capture_output=True, + text=True, + timeout=5, + ) + except (OSError, subprocess.TimeoutExpired): + return None + if proc.returncode != 0: + return None + branch = proc.stdout.strip() + if branch == "HEAD": + return None + if "/" in branch: + branch = branch.rsplit("/", 1)[1] + return branch or None + + @classmethod + def _find_feature_dir_by_prefix(cls, project_root: Path, branch: str) -> Path: + specs_dir = project_root / "specs" + prefix = "" + timestamp = re.match(r"^(\d{8}-\d{6})-", branch) + sequential = re.match(r"^(\d{3,})-", branch) + if timestamp: + prefix = timestamp.group(1) + elif sequential: + prefix = sequential.group(1) + else: + return (specs_dir / branch).resolve() + + matches = sorted(path for path in specs_dir.glob(f"{prefix}-*") if path.is_dir()) + if not matches: + return (specs_dir / branch).resolve() + if len(matches) > 1: + names = ", ".join(path.name for path in matches) + raise ValueError( + f"Multiple spec directories found with prefix {prefix!r}: {names}." + ) + return matches[0].resolve() + + @staticmethod + def _require_feature_files(feature_dir: Path) -> None: + if not feature_dir.is_dir(): + raise ValueError(f"Feature directory not found: {feature_dir}") + missing = [ + name + for name in ("spec.md", "plan.md", "tasks.md") + if not (feature_dir / name).is_file() + ] + if missing: + raise ValueError( + f"Feature directory {feature_dir} is missing required file(s): " + + ", ".join(missing) + ) + + @classmethod + def _parse_tasks(cls, tasks_path: Path) -> list[ParsedTask]: + current_phase = "Tasks" + tasks: list[ParsedTask] = [] + for line in tasks_path.read_text(encoding="utf-8").splitlines(): + heading = _HEADING_RE.match(line) + if heading: + current_phase = heading.group("title").strip() + continue + + match = _TASK_RE.match(line) + if not match: + continue + + task_id = match.group("id") + text = line.strip() + body = match.group("body") + parallel = "[P]" in body + paths = cls._extract_paths(body) + if parallel and not paths: + raise ValueError( + f"Parallel task {task_id} must declare at least one explicit path." + ) + tasks.append( + ParsedTask( + task_id=task_id, + text=text, + phase=current_phase, + parallel=parallel, + paths=paths, + ) + ) + + if not tasks: + raise ValueError(f"No implementation tasks found in {tasks_path}.") + cls._validate_parallel_conflicts(tasks) + return tasks + + @classmethod + def _extract_paths(cls, text: str) -> list[str]: + candidates: list[str] = [] + for raw in _BACKTICK_RE.findall(text): + candidates.extend(raw.split()) + candidates.extend(match.group(1) for match in _PATH_TOKEN_RE.finditer(text)) + + paths: dict[str, None] = {} + for candidate in candidates: + normalized = cls._normalize_task_path(candidate) + if normalized: + paths.setdefault(normalized, None) + return list(paths) + + @staticmethod + def _normalize_task_path(raw: str) -> str | None: + value = raw.strip().strip(".,;:()[]{}") + if not value or value.startswith(("http://", "https://")): + return None + value = value.replace("\\", "/") + if value in {".", ".."} or "/../" in f"/{value}/": + return None + if value.startswith("/"): + value = value.lstrip("/") + if not ("/" in value or "." in PurePosixPath(value).name): + return None + return str(PurePosixPath(value)) + + @classmethod + def _validate_parallel_conflicts(cls, tasks: list[ParsedTask]) -> None: + by_phase: dict[str, list[ParsedTask]] = {} + for task in tasks: + if task.parallel: + by_phase.setdefault(task.phase, []).append(task) + + for phase, phase_tasks in by_phase.items(): + for idx, left in enumerate(phase_tasks): + for right in phase_tasks[idx + 1 :]: + overlap = cls._overlap(left.paths, right.paths) + if overlap: + raise ValueError( + f"Parallel tasks {left.task_id} and {right.task_id} in " + f"{phase!r} write overlapping path {overlap!r}." + ) + + @classmethod + def _build_shards(cls, tasks: list[ParsedTask], max_shards: int) -> list[TaskShard]: + groups: list[list[ParsedTask]] = [] + current: list[ParsedTask] = [] + + for task in tasks: + if task.parallel: + if current: + groups.append(current) + current = [] + groups.append([task]) + else: + current.append(task) + if current: + groups.append(current) + + while len(groups) > max_shards: + merge_index = cls._find_merge_candidate(groups) + if merge_index is None: + raise ValueError( + f"Unable to cap handoff shards at {max_shards} without merging " + "groups that declare overlapping write paths." + ) + groups[merge_index] = groups[merge_index] + groups[merge_index + 1] + del groups[merge_index + 1] + + width = max(2, len(str(len(groups)))) + return [ + TaskShard(f"shard-{idx + 1:0{width}d}", group) + for idx, group in enumerate(groups) + ] + + @classmethod + def _find_merge_candidate(cls, groups: list[list[ParsedTask]]) -> int | None: + for idx in range(len(groups) - 1): + left_paths = cls._group_paths(groups[idx]) + right_paths = cls._group_paths(groups[idx + 1]) + if not cls._overlap(left_paths, right_paths): + return idx + return None + + @staticmethod + def _group_paths(tasks: list[ParsedTask]) -> list[str]: + paths: dict[str, None] = {} + for task in tasks: + for path in task.paths: + paths.setdefault(path, None) + return list(paths) + + @staticmethod + def _overlap(left_paths: list[str], right_paths: list[str]) -> str | None: + for left in left_paths: + left_parts = PurePosixPath(left).parts + for right in right_paths: + right_parts = PurePosixPath(right).parts + if left == right: + return left + min_len = min(len(left_parts), len(right_parts)) + if left_parts[:min_len] == right_parts[:min_len]: + return left if len(left_parts) <= len(right_parts) else right + return None + + @classmethod + def _write_handoffs( + cls, + project_root: Path, + feature_dir: Path, + shards: list[TaskShard], + original_args: str, + run_id: str, + ) -> list[dict[str, Any]]: + handoff_dir = feature_dir / "handoffs" / "orchestrated" / run_id + handoff_dir.mkdir(parents=True, exist_ok=True) + + items: list[dict[str, Any]] = [] + for shard in shards: + handoff_path = handoff_dir / f"{shard.shard_id}.json" + payload = cls._handoff_payload(project_root, feature_dir, shard) + handoff_path.write_text( + json.dumps(payload, indent=2, sort_keys=True) + "\n", + encoding="utf-8", + ) + shard_args = cls._handoff_args(original_args, handoff_path, shard) + items.append( + { + "shard_id": shard.shard_id, + "handoff_path": str(handoff_path), + "task_ids": shard.task_ids, + "args": shard_args, + } + ) + return items + + @classmethod + def _handoff_payload( + cls, + project_root: Path, + feature_dir: Path, + shard: TaskShard, + ) -> dict[str, Any]: + feature_ref = cls._display_path(project_root, feature_dir) + context_refs = [ + cls._display_path(project_root, feature_dir / name) + for name in ("spec.md", "plan.md", "tasks.md") + ] + for optional_name in ("data-model.md", "research.md", "quickstart.md"): + optional_path = feature_dir / optional_name + if optional_path.is_file(): + context_refs.append(cls._display_path(project_root, optional_path)) + contracts_dir = feature_dir / "contracts" + if contracts_dir.is_dir(): + context_refs.append(cls._display_path(project_root, contracts_dir)) + + return { + "contract_type": "speckit.orchestrated.implement.handoff.v1", + "shard_id": shard.shard_id, + "feature_dir": feature_ref, + "task_ids": shard.task_ids, + "task_text": [task.text for task in shard.tasks], + "allowed_read_paths": list(dict.fromkeys([feature_ref, *context_refs])), + "allowed_write_paths": shard.paths, + "required_context_refs": context_refs, + "validation_commands": [], + "forbidden_actions": [ + "Do not modify tasks outside task_ids.", + "Do not modify paths outside allowed_write_paths unless the task explicitly requires a generated adjacent file.", + "Do not revert user changes or unrelated work.", + ], + } + + @staticmethod + def _handoff_args(original_args: str, handoff_path: Path, shard: TaskShard) -> str: + prefix = f"{original_args.strip()} " if original_args.strip() else "" + task_ids = ", ".join(shard.task_ids) + return ( + f"{prefix}Use orchestrated handoff JSON {handoff_path}. " + f"Execute only task IDs: {task_ids}." + ) + + @staticmethod + def _display_path(project_root: Path, path: Path) -> str: + try: + return str(path.resolve().relative_to(project_root)) + except ValueError: + return str(path) diff --git a/tests/extensions/orchestrated/__init__.py b/tests/extensions/orchestrated/__init__.py new file mode 100644 index 0000000000..ae25f26e35 --- /dev/null +++ b/tests/extensions/orchestrated/__init__.py @@ -0,0 +1 @@ +"""Tests for the bundled orchestrated extension.""" diff --git a/tests/extensions/orchestrated/test_orchestrated_extension.py b/tests/extensions/orchestrated/test_orchestrated_extension.py new file mode 100644 index 0000000000..b375af64fe --- /dev/null +++ b/tests/extensions/orchestrated/test_orchestrated_extension.py @@ -0,0 +1,62 @@ +"""Tests for the bundled orchestrated extension.""" + +from __future__ import annotations + +from pathlib import Path + + +PROJECT_ROOT = Path(__file__).resolve().parents[3] + + +def test_orchestrated_manifest_validates(): + from specify_cli.extensions import ExtensionManifest + + manifest = ExtensionManifest( + PROJECT_ROOT / "extensions" / "orchestrated" / "extension.yml" + ) + + assert manifest.id == "orchestrated" + assert [cmd["name"] for cmd in manifest.commands] == [ + "speckit.orchestrated.implement" + ] + + +def test_orchestrated_command_invokes_workflow(): + command_path = ( + PROJECT_ROOT + / "extensions" + / "orchestrated" + / "commands" + / "speckit.orchestrated.implement.md" + ) + + content = command_path.read_text(encoding="utf-8") + assert "specify workflow run speckit-orchestrated-implement" in content + assert "-i integration=__AGENT__" in content + assert '-i args="$ARGUMENTS"' in content + + +def test_orchestrated_command_registers_for_markdown_agent(tmp_path): + from specify_cli.extensions import ExtensionManager + + project = tmp_path / "project" + project.mkdir() + (project / ".windsurf" / "workflows").mkdir(parents=True) + + manager = ExtensionManager(project) + manager.install_from_directory( + PROJECT_ROOT / "extensions" / "orchestrated", + "0.8.9", + ) + + command_file = ( + project + / ".windsurf" + / "workflows" + / "speckit.orchestrated.implement.md" + ) + assert command_file.exists() + content = command_file.read_text(encoding="utf-8") + assert "specify workflow run speckit-orchestrated-implement" in content + assert "__AGENT__" not in content + assert "-i integration=windsurf" in content diff --git a/tests/integrations/test_cli.py b/tests/integrations/test_cli.py index de09205310..2cfaace157 100644 --- a/tests/integrations/test_cli.py +++ b/tests/integrations/test_cli.py @@ -785,6 +785,16 @@ def test_no_git_skips_extension(self, tmp_path): ext_dir = project / ".specify" / "extensions" / "git" assert not ext_dir.exists(), "git extension should not be installed with --no-git" + # Orchestrated extension is core workflow plumbing and is still installed. + orchestrated_dir = project / ".specify" / "extensions" / "orchestrated" + assert orchestrated_dir.exists(), "orchestrated extension should be installed" + + workflows_dir = project / ".specify" / "workflows" + assert (workflows_dir / "speckit" / "workflow.yml").exists() + assert ( + workflows_dir / "speckit-orchestrated-implement" / "workflow.yml" + ).exists() + def test_no_git_emits_deprecation_warning(self, tmp_path): """Using --no-git emits a visible deprecation warning.""" from typer.testing import CliRunner @@ -864,6 +874,37 @@ def test_git_extension_commands_registered(self, tmp_path): git_skills = [f for f in claude_skills.iterdir() if f.name.startswith("speckit-git-")] assert len(git_skills) > 0, "no git extension commands registered" + def test_orchestrated_extension_commands_registered(self, tmp_path): + """Orchestrated extension command is registered with the agent during init.""" + from typer.testing import CliRunner + from specify_cli import app + + project = tmp_path / "orchestrated-cmds" + project.mkdir() + old_cwd = os.getcwd() + try: + os.chdir(project) + runner = CliRunner() + result = runner.invoke(app, [ + "init", "--here", "--ai", "claude", "--script", "sh", + "--no-git", "--ignore-agent-tools", + ], catch_exceptions=False) + finally: + os.chdir(old_cwd) + + assert result.exit_code == 0, f"init failed: {result.output}" + + skill = ( + project + / ".claude" + / "skills" + / "speckit-orchestrated-implement" + / "SKILL.md" + ) + assert skill.exists(), "orchestrated command skill was not registered" + content = skill.read_text(encoding="utf-8") + assert "specify workflow run speckit-orchestrated-implement" in content + class TestSharedInfraCommandRefs: """Verify _install_shared_infra resolves __SPECKIT_COMMAND_*__ in page templates.""" diff --git a/tests/test_workflows.py b/tests/test_workflows.py index 4c042fc7d5..b319ca836c 100644 --- a/tests/test_workflows.py +++ b/tests/test_workflows.py @@ -1089,6 +1089,121 @@ def test_validate_step_not_mapping(self): assert any("'step' must be a mapping" in e for e in errors) +class TestSpeckitTaskShardsStep: + """Test the Spec Kit task sharding step.""" + + def _write_feature(self, project_dir: Path, tasks: str, feature_name: str = "001-demo") -> Path: + feature_dir = project_dir / "specs" / feature_name + feature_dir.mkdir(parents=True, exist_ok=True) + (project_dir / ".specify" / "feature.json").write_text( + json.dumps({"feature_directory": f"specs/{feature_name}"}), + encoding="utf-8", + ) + (feature_dir / "spec.md").write_text("# Spec\n", encoding="utf-8") + (feature_dir / "plan.md").write_text("# Plan\n", encoding="utf-8") + (feature_dir / "tasks.md").write_text(tasks, encoding="utf-8") + return feature_dir + + def test_execute_generates_handoff_items(self, project_dir): + from specify_cli.workflows.base import StepContext, StepStatus + from specify_cli.workflows.steps.speckit_task_shards import SpeckitTaskShardsStep + + feature_dir = self._write_feature( + project_dir, + """ +# Tasks + +## Phase 1: Setup +- [ ] T001 Create project scaffolding in `pyproject.toml` +- [ ] T002 [P] Add model in `src/models/user.py` +- [ ] T003 [P] Add tests in `tests/test_user.py` +- [ ] T004 Wire service in `src/services/user_service.py` +""", + ) + + step = SpeckitTaskShardsStep() + result = step.execute( + {"id": "build-shards", "input": {"args": "--fast", "max_shards": 4}}, + StepContext(project_root=str(project_dir), run_id="testrun"), + ) + + assert result.status == StepStatus.COMPLETED + assert result.output["feature_dir"] == str(feature_dir.resolve()) + assert result.output["item_count"] == 4 + first = result.output["items"][0] + assert first["shard_id"] == "shard-01" + assert first["task_ids"] == ["T001"] + assert "--fast Use orchestrated handoff JSON" in first["args"] + handoff = Path(first["handoff_path"]) + assert handoff.exists() + data = json.loads(handoff.read_text(encoding="utf-8")) + assert data["contract_type"] == "speckit.orchestrated.implement.handoff.v1" + assert data["task_ids"] == ["T001"] + assert "specs/001-demo/spec.md" in data["required_context_refs"] + + def test_missing_tasks_fails(self, project_dir): + from specify_cli.workflows.base import StepContext, StepStatus + from specify_cli.workflows.steps.speckit_task_shards import SpeckitTaskShardsStep + + feature_dir = project_dir / "specs" / "001-demo" + feature_dir.mkdir(parents=True) + (project_dir / ".specify" / "feature.json").write_text( + json.dumps({"feature_directory": "specs/001-demo"}), + encoding="utf-8", + ) + (feature_dir / "spec.md").write_text("# Spec\n", encoding="utf-8") + (feature_dir / "plan.md").write_text("# Plan\n", encoding="utf-8") + + result = SpeckitTaskShardsStep().execute( + {"id": "build-shards", "input": {}}, + StepContext(project_root=str(project_dir), run_id="testrun"), + ) + + assert result.status == StepStatus.FAILED + assert "tasks.md" in result.error + + def test_parallel_task_without_path_fails(self, project_dir): + from specify_cli.workflows.base import StepContext, StepStatus + from specify_cli.workflows.steps.speckit_task_shards import SpeckitTaskShardsStep + + self._write_feature( + project_dir, + """ +# Tasks +- [ ] T001 [P] Add isolated unit tests +""", + ) + + result = SpeckitTaskShardsStep().execute( + {"id": "build-shards", "input": {}}, + StepContext(project_root=str(project_dir), run_id="testrun"), + ) + + assert result.status == StepStatus.FAILED + assert "must declare at least one explicit path" in result.error + + def test_parallel_write_conflict_fails(self, project_dir): + from specify_cli.workflows.base import StepContext, StepStatus + from specify_cli.workflows.steps.speckit_task_shards import SpeckitTaskShardsStep + + self._write_feature( + project_dir, + """ +# Tasks +- [ ] T001 [P] Add repository in `src/app.py` +- [ ] T002 [P] Add service in `src/app.py` +""", + ) + + result = SpeckitTaskShardsStep().execute( + {"id": "build-shards", "input": {}}, + StepContext(project_root=str(project_dir), run_id="testrun"), + ) + + assert result.status == StepStatus.FAILED + assert "write overlapping path" in result.error + + class TestFanInStep: """Test the fan-in step type.""" @@ -1283,6 +1398,20 @@ def test_invalid_step_type(self): errors = validate_workflow(definition) assert any("invalid type" in e.lower() for e in errors) + def test_bundled_orchestrated_workflow_validates(self): + from specify_cli.workflows.engine import WorkflowDefinition, validate_workflow + + workflow_path = ( + Path(__file__).resolve().parent.parent + / "workflows" + / "speckit-orchestrated-implement" + / "workflow.yml" + ) + definition = WorkflowDefinition.from_yaml(workflow_path) + errors = validate_workflow(definition) + assert errors == [] + assert definition.id == "speckit-orchestrated-implement" + def test_nested_step_validation(self): from specify_cli.workflows.engine import WorkflowDefinition, validate_workflow @@ -1749,6 +1878,16 @@ def test_get_catalog_configs(self, project_dir): assert configs[0]["name"] == "default" assert isinstance(configs[0]["install_allowed"], bool) + def test_bundled_catalog_contains_orchestrated_workflow(self): + catalog_path = Path(__file__).resolve().parent.parent / "workflows" / "catalog.json" + data = json.loads(catalog_path.read_text(encoding="utf-8")) + + workflow = data["workflows"]["speckit-orchestrated-implement"] + assert workflow["name"] == "Orchestrated Implementation" + assert workflow["url"].endswith( + "/workflows/speckit-orchestrated-implement/workflow.yml" + ) + # ===== Integration Test ===== @@ -1803,6 +1942,68 @@ def test_full_sequential_workflow(self, project_dir): assert "echo-partial" not in state.step_results assert "plan" in state.step_results + def test_orchestrated_workflow_fans_out_to_implement(self, project_dir, monkeypatch): + """The bundled workflow dispatches speckit.implement once per shard.""" + from specify_cli.workflows.base import RunStatus + from specify_cli.workflows.engine import WorkflowEngine, WorkflowDefinition + from specify_cli.workflows.steps.command import CommandStep + + feature_dir = project_dir / "specs" / "001-demo" + feature_dir.mkdir(parents=True, exist_ok=True) + (project_dir / ".specify" / "feature.json").write_text( + json.dumps({"feature_directory": "specs/001-demo"}), + encoding="utf-8", + ) + (feature_dir / "spec.md").write_text("# Spec\n", encoding="utf-8") + (feature_dir / "plan.md").write_text("# Plan\n", encoding="utf-8") + (feature_dir / "tasks.md").write_text( + """ +# Tasks +- [ ] T001 [P] Add model in `src/model.py` +- [ ] T002 [P] Add tests in `tests/test_model.py` +""", + encoding="utf-8", + ) + + calls = [] + + def fake_dispatch(command, integration_key, model, args, context): + calls.append( + { + "command": command, + "integration": integration_key, + "args": args, + } + ) + return {"exit_code": 0, "stdout": "", "stderr": ""} + + monkeypatch.setattr( + CommandStep, + "_try_dispatch", + staticmethod(fake_dispatch), + ) + + workflow_path = ( + Path(__file__).resolve().parent.parent + / "workflows" + / "speckit-orchestrated-implement" + / "workflow.yml" + ) + definition = WorkflowDefinition.from_yaml(workflow_path) + state = WorkflowEngine(project_dir).execute( + definition, + {"integration": "claude", "args": "--fast", "max_shards": "4"}, + ) + + assert state.status == RunStatus.COMPLETED + assert [call["command"] for call in calls] == [ + "speckit.implement", + "speckit.implement", + ] + assert all(call["integration"] == "claude" for call in calls) + assert all("Use orchestrated handoff JSON" in call["args"] for call in calls) + assert all("--fast" in call["args"] for call in calls) + def test_switch_workflow(self, project_dir): """Test switch step type in a workflow.""" from specify_cli.workflows.engine import WorkflowEngine, WorkflowDefinition diff --git a/workflows/catalog.json b/workflows/catalog.json index 967120afb0..a011421ef9 100644 --- a/workflows/catalog.json +++ b/workflows/catalog.json @@ -11,6 +11,15 @@ "version": "1.0.0", "url": "https://raw.githubusercontent.com/github/spec-kit/main/workflows/speckit/workflow.yml", "tags": ["sdd", "full-cycle"] + }, + "speckit-orchestrated-implement": { + "id": "speckit-orchestrated-implement", + "name": "Orchestrated Implementation", + "description": "Builds task handoff shards, then runs speckit.implement once per shard", + "author": "GitHub", + "version": "1.0.0", + "url": "https://raw.githubusercontent.com/github/spec-kit/main/workflows/speckit-orchestrated-implement/workflow.yml", + "tags": ["sdd", "implementation", "orchestration"] } } } diff --git a/workflows/speckit-orchestrated-implement/workflow.yml b/workflows/speckit-orchestrated-implement/workflow.yml new file mode 100644 index 0000000000..268b9601ce --- /dev/null +++ b/workflows/speckit-orchestrated-implement/workflow.yml @@ -0,0 +1,42 @@ +schema_version: "1.0" +workflow: + id: "speckit-orchestrated-implement" + name: "Orchestrated Implementation" + version: "1.0.0" + author: "GitHub" + description: "Builds task handoff shards, then runs speckit.implement once per shard" + +requires: + speckit_version: ">=0.8.9" + +inputs: + integration: + type: string + default: "copilot" + prompt: "Integration to use for shard execution" + args: + type: string + default: "" + prompt: "Additional implementation arguments" + max_shards: + type: number + default: 8 + prompt: "Maximum number of handoff shards" + +steps: + - id: build-shards + type: speckit-task-shards + input: + args: "{{ inputs.args }}" + max_shards: "{{ inputs.max_shards }}" + + - id: implement-shards + type: fan-out + items: "{{ steps.build-shards.output.items }}" + max_concurrency: "{{ inputs.max_shards }}" + step: + id: implement + command: speckit.implement + integration: "{{ inputs.integration }}" + input: + args: "{{ item.args }}" From 7abf52f2bb37bcd9c9a34ca180848e9f986a238b Mon Sep 17 00:00:00 2001 From: bigben <245982990@qq.com> Date: Wed, 13 May 2026 18:22:17 +0800 Subject: [PATCH 4/5] Migrate implement workflow to preset override --- .../speckit.orchestrated.implement.md | 17 ---- extensions/orchestrated/extension.yml | 24 ------ presets/catalog.json | 21 +++++ .../implement/commands/speckit.implement.md | 27 ++++++ presets/implement/preset.yml | 25 ++++++ pyproject.toml | 4 +- src/specify_cli/__init__.py | 76 ++++++++++++----- src/specify_cli/presets.py | 5 +- .../steps/speckit_task_shards/__init__.py | 6 +- tests/extensions/orchestrated/__init__.py | 1 - .../test_orchestrated_extension.py | 62 -------------- tests/integrations/test_cli.py | 84 ++++++++++++++++--- tests/test_presets.py | 44 ++++++++++ tests/test_workflows.py | 24 +++--- workflows/catalog.json | 10 +-- .../workflow.yml | 4 +- 16 files changed, 274 insertions(+), 160 deletions(-) delete mode 100644 extensions/orchestrated/commands/speckit.orchestrated.implement.md delete mode 100644 extensions/orchestrated/extension.yml create mode 100644 presets/implement/commands/speckit.implement.md create mode 100644 presets/implement/preset.yml delete mode 100644 tests/extensions/orchestrated/__init__.py delete mode 100644 tests/extensions/orchestrated/test_orchestrated_extension.py rename workflows/{speckit-orchestrated-implement => speckit-implement}/workflow.yml (92%) diff --git a/extensions/orchestrated/commands/speckit.orchestrated.implement.md b/extensions/orchestrated/commands/speckit.orchestrated.implement.md deleted file mode 100644 index 11c494cea2..0000000000 --- a/extensions/orchestrated/commands/speckit.orchestrated.implement.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -description: Execute the implementation plan by splitting tasks.md into workflow handoff shards ---- - -## User Input - -```text -$ARGUMENTS -``` - -Run the orchestrated implementation workflow from the repository root: - -```sh -specify workflow run speckit-orchestrated-implement -i integration=__AGENT__ -i args="$ARGUMENTS" -``` - -Wait for the workflow to complete. If it fails while building handoff shards, report the error and do not run `speckit.implement` manually. If a shard fails during fan-out, report the failing shard and preserve the generated handoff files for resume or debugging. diff --git a/extensions/orchestrated/extension.yml b/extensions/orchestrated/extension.yml deleted file mode 100644 index dfcff98350..0000000000 --- a/extensions/orchestrated/extension.yml +++ /dev/null @@ -1,24 +0,0 @@ -schema_version: "1.0" - -extension: - id: orchestrated - name: "Orchestrated Implementation" - version: "1.0.0" - description: "Split implementation tasks into handoff shards and run speckit.implement for each shard" - author: spec-kit-core - repository: https://github.com/github/spec-kit - license: MIT - -requires: - speckit_version: ">=0.8.9.dev0" - -provides: - commands: - - name: speckit.orchestrated.implement - file: commands/speckit.orchestrated.implement.md - description: "Run implementation through workflow-generated handoff shards" - -tags: - - "orchestration" - - "implementation" - - "workflow" diff --git a/presets/catalog.json b/presets/catalog.json index f272617926..7de3b7074f 100644 --- a/presets/catalog.json +++ b/presets/catalog.json @@ -3,6 +3,27 @@ "updated_at": "2026-04-24T00:00:00Z", "catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/presets/catalog.json", "presets": { + "implement": { + "name": "Implement Workflow", + "id": "implement", + "version": "1.0.0", + "description": "Runs the implementation command through task handoff shards", + "author": "github", + "repository": "https://github.com/github/spec-kit", + "license": "MIT", + "bundled": true, + "requires": { + "speckit_version": ">=0.8.9.dev0" + }, + "provides": { + "commands": 1, + "templates": 0 + }, + "tags": [ + "implementation", + "workflow" + ] + }, "lean": { "name": "Lean Workflow", "id": "lean", diff --git a/presets/implement/commands/speckit.implement.md b/presets/implement/commands/speckit.implement.md new file mode 100644 index 0000000000..aaca7c077c --- /dev/null +++ b/presets/implement/commands/speckit.implement.md @@ -0,0 +1,27 @@ +--- +description: Execute the implementation plan by splitting tasks.md into workflow handoff shards +--- + +## User Input + +```text +$ARGUMENTS +``` + +If the user input references a handoff JSON file, execute that handoff directly: + +1. Read the handoff JSON file. +2. Load only the listed `required_context_refs` plus any files needed inside `allowed_read_paths`. +3. Execute only the listed `task_ids`, respecting `allowed_write_paths` and `forbidden_actions`. +4. Mark only the completed listed tasks in `tasks.md`. +5. Run any `validation_commands` from the handoff, plus focused validation for changed files. + +Do not run `specify workflow run` while executing a handoff JSON. + +Otherwise, run the implementation workflow from the repository root: + +```sh +specify workflow run speckit-implement -i integration=__AGENT__ -i args="$ARGUMENTS" +``` + +Wait for the workflow to complete. If it fails while building handoff shards, report the error and do not run `speckit.implement` manually. If a shard fails during fan-out, report the failing shard and preserve the generated handoff files for resume or debugging. diff --git a/presets/implement/preset.yml b/presets/implement/preset.yml new file mode 100644 index 0000000000..4b3ab970c9 --- /dev/null +++ b/presets/implement/preset.yml @@ -0,0 +1,25 @@ +schema_version: "1.0" + +preset: + id: "implement" + name: "Implement Workflow" + version: "1.0.0" + description: "Runs the implementation command through task handoff shards" + author: "github" + repository: "https://github.com/github/spec-kit" + license: "MIT" + +requires: + speckit_version: ">=0.8.9.dev0" + +provides: + templates: + - type: "command" + name: "speckit.implement" + file: "commands/speckit.implement.md" + description: "Execute implementation through workflow-generated task handoffs" + replaces: "speckit.implement" + +tags: + - "implementation" + - "workflow" diff --git a/pyproject.toml b/pyproject.toml index 30d27a71da..2f4c8b215f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,11 +47,11 @@ packages = ["src/specify_cli"] "scripts/powershell" = "specify_cli/core_pack/scripts/powershell" # Bundled extensions (installable via `specify extension add <name>`) "extensions/git" = "specify_cli/core_pack/extensions/git" -"extensions/orchestrated" = "specify_cli/core_pack/extensions/orchestrated" # Bundled workflows (auto-installed during `specify init`) "workflows/speckit" = "specify_cli/core_pack/workflows/speckit" -"workflows/speckit-orchestrated-implement" = "specify_cli/core_pack/workflows/speckit-orchestrated-implement" +"workflows/speckit-implement" = "specify_cli/core_pack/workflows/speckit-implement" # Bundled presets (installable via `specify preset add <name>` or `specify init --preset <name>`) +"presets/implement" = "specify_cli/core_pack/presets/implement" "presets/lean" = "specify_cli/core_pack/presets/lean" [project.optional-dependencies] diff --git a/src/specify_cli/__init__.py b/src/specify_cli/__init__.py index 4e6f56c969..835cfc8e54 100644 --- a/src/specify_cli/__init__.py +++ b/src/specify_cli/__init__.py @@ -95,7 +95,9 @@ def _build_agent_config() -> dict[str, dict[str, Any]]: AGENT_CONFIG = _build_agent_config() DEFAULT_INIT_INTEGRATION = "copilot" -DEFAULT_BUNDLED_WORKFLOWS = ("speckit", "speckit-orchestrated-implement") +DEFAULT_BUNDLED_WORKFLOWS = ("speckit", "speckit-implement") +DEFAULT_BUNDLED_PRESETS = ("implement",) +DEFAULT_BUNDLED_PRESET_PRIORITY = 20 AI_ASSISTANT_ALIASES = { "kiro": "kiro-cli", @@ -778,6 +780,43 @@ def _install_bundled_extension(project_path: Path, extension_id: str) -> str: return "extension installed" +def _install_default_bundled_presets( + project_path: Path, + *, + skip: set[str] | None = None, +) -> str: + """Install default bundled presets and return a tracker summary.""" + from .presets import PresetManager + + manager = PresetManager(project_path) + speckit_ver = get_speckit_version() + skip = skip or set() + messages: list[str] = [] + + for preset_id in DEFAULT_BUNDLED_PRESETS: + if preset_id in skip: + messages.append(f"{preset_id} skipped") + continue + bundled_path = _locate_bundled_preset(preset_id) + if not bundled_path: + messages.append(f"{preset_id} not found") + continue + if manager.registry.is_installed(preset_id): + messages.append(f"{preset_id} already installed") + continue + + manager.install_from_directory( + bundled_path, + speckit_ver, + priority=DEFAULT_BUNDLED_PRESET_PRIORITY, + ) + messages.append( + f"{preset_id} installed (priority {DEFAULT_BUNDLED_PRESET_PRIORITY})" + ) + + return "; ".join(messages) if messages else "none" + + def _locate_bundled_preset(preset_id: str) -> Path | None: """Return the path to a bundled preset, or None. @@ -1392,8 +1431,8 @@ def init( ("chmod", "Ensure scripts executable"), ("constitution", "Constitution setup"), ("git", "Install git extension"), - ("orchestrated", "Install orchestrated extension"), ("workflow", "Install bundled workflow"), + ("preset", "Install default preset"), ("final", "Finalize"), ]: tracker.add(key, label) @@ -1508,25 +1547,6 @@ def init( else: tracker.skip("git", "--no-git flag") - # Install bundled orchestrated extension. This is independent of - # --no-git because it provides a core workflow entry point. - tracker.start("orchestrated") - try: - orchestrated_message = _install_bundled_extension( - project_path, - "orchestrated", - ) - if orchestrated_message == "bundled extension not found": - tracker.error("orchestrated", orchestrated_message) - else: - tracker.complete("orchestrated", orchestrated_message) - except Exception as ext_err: - sanitized_ext = str(ext_err).replace('\n', ' ').strip() - tracker.error( - "orchestrated", - f"extension install failed: {sanitized_ext[:120]}", - ) - # Install bundled workflows tracker.start("workflow") try: @@ -1559,6 +1579,20 @@ def init( init_opts["ai_skills"] = True save_init_options(project_path, init_opts) + tracker.start("preset") + explicit_default_preset = preset in DEFAULT_BUNDLED_PRESETS + try: + tracker.complete( + "preset", + _install_default_bundled_presets( + project_path, + skip={preset} if explicit_default_preset else set(), + ), + ) + except Exception as preset_err: + sanitized_preset = str(preset_err).replace('\n', ' ').strip() + tracker.error("preset", f"install failed: {sanitized_preset[:120]}") + # Install preset if specified if preset: try: diff --git a/src/specify_cli/presets.py b/src/specify_cli/presets.py index 041c832e45..8188836567 100644 --- a/src/specify_cli/presets.py +++ b/src/specify_cli/presets.py @@ -572,7 +572,10 @@ def check_compatibility( try: specifier = SpecifierSet(required) - if current not in specifier: + base_current = pkg_version.Version(current.base_version) + if current not in specifier and not ( + current.is_prerelease and base_current in specifier + ): raise PresetCompatibilityError( f"Preset requires spec-kit {required}, " f"but {speckit_version} is installed.\n" diff --git a/src/specify_cli/workflows/steps/speckit_task_shards/__init__.py b/src/specify_cli/workflows/steps/speckit_task_shards/__init__.py index 4f15f299a1..fd50458044 100644 --- a/src/specify_cli/workflows/steps/speckit_task_shards/__init__.py +++ b/src/specify_cli/workflows/steps/speckit_task_shards/__init__.py @@ -360,7 +360,7 @@ def _write_handoffs( original_args: str, run_id: str, ) -> list[dict[str, Any]]: - handoff_dir = feature_dir / "handoffs" / "orchestrated" / run_id + handoff_dir = feature_dir / "handoffs" / "implement" / run_id handoff_dir.mkdir(parents=True, exist_ok=True) items: list[dict[str, Any]] = [] @@ -403,7 +403,7 @@ def _handoff_payload( context_refs.append(cls._display_path(project_root, contracts_dir)) return { - "contract_type": "speckit.orchestrated.implement.handoff.v1", + "contract_type": "speckit.implement.handoff.v1", "shard_id": shard.shard_id, "feature_dir": feature_ref, "task_ids": shard.task_ids, @@ -424,7 +424,7 @@ def _handoff_args(original_args: str, handoff_path: Path, shard: TaskShard) -> s prefix = f"{original_args.strip()} " if original_args.strip() else "" task_ids = ", ".join(shard.task_ids) return ( - f"{prefix}Use orchestrated handoff JSON {handoff_path}. " + f"{prefix}Use handoff JSON {handoff_path}. " f"Execute only task IDs: {task_ids}." ) diff --git a/tests/extensions/orchestrated/__init__.py b/tests/extensions/orchestrated/__init__.py deleted file mode 100644 index ae25f26e35..0000000000 --- a/tests/extensions/orchestrated/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Tests for the bundled orchestrated extension.""" diff --git a/tests/extensions/orchestrated/test_orchestrated_extension.py b/tests/extensions/orchestrated/test_orchestrated_extension.py deleted file mode 100644 index b375af64fe..0000000000 --- a/tests/extensions/orchestrated/test_orchestrated_extension.py +++ /dev/null @@ -1,62 +0,0 @@ -"""Tests for the bundled orchestrated extension.""" - -from __future__ import annotations - -from pathlib import Path - - -PROJECT_ROOT = Path(__file__).resolve().parents[3] - - -def test_orchestrated_manifest_validates(): - from specify_cli.extensions import ExtensionManifest - - manifest = ExtensionManifest( - PROJECT_ROOT / "extensions" / "orchestrated" / "extension.yml" - ) - - assert manifest.id == "orchestrated" - assert [cmd["name"] for cmd in manifest.commands] == [ - "speckit.orchestrated.implement" - ] - - -def test_orchestrated_command_invokes_workflow(): - command_path = ( - PROJECT_ROOT - / "extensions" - / "orchestrated" - / "commands" - / "speckit.orchestrated.implement.md" - ) - - content = command_path.read_text(encoding="utf-8") - assert "specify workflow run speckit-orchestrated-implement" in content - assert "-i integration=__AGENT__" in content - assert '-i args="$ARGUMENTS"' in content - - -def test_orchestrated_command_registers_for_markdown_agent(tmp_path): - from specify_cli.extensions import ExtensionManager - - project = tmp_path / "project" - project.mkdir() - (project / ".windsurf" / "workflows").mkdir(parents=True) - - manager = ExtensionManager(project) - manager.install_from_directory( - PROJECT_ROOT / "extensions" / "orchestrated", - "0.8.9", - ) - - command_file = ( - project - / ".windsurf" - / "workflows" - / "speckit.orchestrated.implement.md" - ) - assert command_file.exists() - content = command_file.read_text(encoding="utf-8") - assert "specify workflow run speckit-orchestrated-implement" in content - assert "__AGENT__" not in content - assert "-i integration=windsurf" in content diff --git a/tests/integrations/test_cli.py b/tests/integrations/test_cli.py index 2cfaace157..d36366e6e2 100644 --- a/tests/integrations/test_cli.py +++ b/tests/integrations/test_cli.py @@ -785,14 +785,13 @@ def test_no_git_skips_extension(self, tmp_path): ext_dir = project / ".specify" / "extensions" / "git" assert not ext_dir.exists(), "git extension should not be installed with --no-git" - # Orchestrated extension is core workflow plumbing and is still installed. - orchestrated_dir = project / ".specify" / "extensions" / "orchestrated" - assert orchestrated_dir.exists(), "orchestrated extension should be installed" + implement_preset_dir = project / ".specify" / "presets" / "implement" + assert implement_preset_dir.exists(), "implement preset should be installed" workflows_dir = project / ".specify" / "workflows" assert (workflows_dir / "speckit" / "workflow.yml").exists() assert ( - workflows_dir / "speckit-orchestrated-implement" / "workflow.yml" + workflows_dir / "speckit-implement" / "workflow.yml" ).exists() def test_no_git_emits_deprecation_warning(self, tmp_path): @@ -874,12 +873,12 @@ def test_git_extension_commands_registered(self, tmp_path): git_skills = [f for f in claude_skills.iterdir() if f.name.startswith("speckit-git-")] assert len(git_skills) > 0, "no git extension commands registered" - def test_orchestrated_extension_commands_registered(self, tmp_path): - """Orchestrated extension command is registered with the agent during init.""" + def test_default_implement_preset_updates_skill_command(self, tmp_path): + """Default implement preset updates the core implement skill during init.""" from typer.testing import CliRunner from specify_cli import app - project = tmp_path / "orchestrated-cmds" + project = tmp_path / "implement-skill" project.mkdir() old_cwd = os.getcwd() try: @@ -898,12 +897,77 @@ def test_orchestrated_extension_commands_registered(self, tmp_path): project / ".claude" / "skills" - / "speckit-orchestrated-implement" + / "speckit-implement" / "SKILL.md" ) - assert skill.exists(), "orchestrated command skill was not registered" + assert skill.exists(), "implement command skill was not registered" content = skill.read_text(encoding="utf-8") - assert "specify workflow run speckit-orchestrated-implement" in content + assert "specify workflow run speckit-implement" in content + generated_implement_skills = sorted( + path.name + for path in (project / ".claude" / "skills").iterdir() + if path.name.endswith("implement") + ) + assert generated_implement_skills == ["speckit-implement"] + + def test_default_implement_preset_updates_markdown_command(self, tmp_path): + """Default implement preset updates markdown command integrations.""" + from typer.testing import CliRunner + from specify_cli import app + + project = tmp_path / "implement-markdown" + project.mkdir() + old_cwd = os.getcwd() + try: + os.chdir(project) + runner = CliRunner() + result = runner.invoke(app, [ + "init", "--here", "--ai", "windsurf", "--script", "sh", + "--no-git", "--ignore-agent-tools", + ], catch_exceptions=False) + finally: + os.chdir(old_cwd) + + assert result.exit_code == 0, f"init failed: {result.output}" + + command = project / ".windsurf" / "workflows" / "speckit.implement.md" + assert command.exists(), "implement command was not registered" + content = command.read_text(encoding="utf-8") + assert "specify workflow run speckit-implement" in content + assert "-i integration=windsurf" in content + generated_implement_commands = sorted( + path.name + for path in (project / ".windsurf" / "workflows").iterdir() + if path.name.endswith("implement.md") + ) + assert generated_implement_commands == ["speckit.implement.md"] + + def test_explicit_preset_wins_over_default_implement_preset(self, tmp_path): + """A user-selected preset has higher priority than the default implement preset.""" + from typer.testing import CliRunner + from specify_cli import app + + project = tmp_path / "lean-wins" + project.mkdir() + old_cwd = os.getcwd() + try: + os.chdir(project) + runner = CliRunner() + result = runner.invoke(app, [ + "init", "--here", "--ai", "claude", "--script", "sh", + "--no-git", "--ignore-agent-tools", "--preset", "lean", + ], catch_exceptions=False) + finally: + os.chdir(old_cwd) + + assert result.exit_code == 0, f"init failed: {result.output}" + assert (project / ".specify" / "presets" / "implement").exists() + assert (project / ".specify" / "presets" / "lean").exists() + + skill = project / ".claude" / "skills" / "speckit-implement" / "SKILL.md" + content = skill.read_text(encoding="utf-8") + assert "## Outline" in content + assert "specify workflow run speckit-implement" not in content class TestSharedInfraCommandRefs: diff --git a/tests/test_presets.py b/tests/test_presets.py index 52566cbedc..6001bd7763 100644 --- a/tests/test_presets.py +++ b/tests/test_presets.py @@ -1922,6 +1922,7 @@ def test_url_cache_expired(self, project_dir): SELF_TEST_PRESET_DIR = Path(__file__).parent.parent / "presets" / "self-test" +IMPLEMENT_PRESET_DIR = Path(__file__).parent.parent / "presets" / "implement" SELF_TEST_WRAP_WARNING = ( r"Cannot compose command 'speckit\.wrap-test': no base layer\. " r"Stale command files may remain\." @@ -1936,6 +1937,49 @@ def test_url_cache_expired(self, project_dir): ] +class TestImplementPreset: + """Tests for the bundled implement preset.""" + + def test_manifest_valid(self): + manifest = PresetManifest(IMPLEMENT_PRESET_DIR / "preset.yml") + + assert manifest.id == "implement" + assert manifest.name == "Implement Workflow" + assert [t["name"] for t in manifest.templates] == ["speckit.implement"] + assert manifest.templates[0]["replaces"] == "speckit.implement" + + def test_command_invokes_workflow(self): + command_path = IMPLEMENT_PRESET_DIR / "commands" / "speckit.implement.md" + + content = command_path.read_text(encoding="utf-8") + assert "specify workflow run speckit-implement" in content + assert "-i integration=__AGENT__" in content + assert '-i args="$ARGUMENTS"' in content + assert "If the user input references a handoff JSON file" in content + assert "Do not run `specify workflow run` while executing a handoff JSON" in content + + def test_catalog_contains_implement_preset(self): + catalog_path = Path(__file__).parent.parent / "presets" / "catalog.json" + data = json.loads(catalog_path.read_text(encoding="utf-8")) + + preset = data["presets"]["implement"] + assert preset["bundled"] is True + assert preset["provides"]["commands"] == 1 + + def test_install_resolves_implement_command(self, project_dir): + manager = PresetManager(project_dir) + manager.install_from_directory(IMPLEMENT_PRESET_DIR, "0.8.9.dev0") + + resolver = PresetResolver(project_dir) + result = resolver.resolve("speckit.implement", "command") + + assert result is not None + assert "presets/implement" in result.as_posix() + assert "specify workflow run speckit-implement" in result.read_text( + encoding="utf-8" + ) + + def install_self_test_preset(manager: PresetManager, speckit_version: str = "0.1.5") -> PresetManifest: """Install self-test while filtering its intentionally missing wrap base.""" with warnings.catch_warnings(): diff --git a/tests/test_workflows.py b/tests/test_workflows.py index b319ca836c..221bd8e038 100644 --- a/tests/test_workflows.py +++ b/tests/test_workflows.py @@ -1133,11 +1133,11 @@ def test_execute_generates_handoff_items(self, project_dir): first = result.output["items"][0] assert first["shard_id"] == "shard-01" assert first["task_ids"] == ["T001"] - assert "--fast Use orchestrated handoff JSON" in first["args"] + assert "--fast Use handoff JSON" in first["args"] handoff = Path(first["handoff_path"]) assert handoff.exists() data = json.loads(handoff.read_text(encoding="utf-8")) - assert data["contract_type"] == "speckit.orchestrated.implement.handoff.v1" + assert data["contract_type"] == "speckit.implement.handoff.v1" assert data["task_ids"] == ["T001"] assert "specs/001-demo/spec.md" in data["required_context_refs"] @@ -1398,19 +1398,19 @@ def test_invalid_step_type(self): errors = validate_workflow(definition) assert any("invalid type" in e.lower() for e in errors) - def test_bundled_orchestrated_workflow_validates(self): + def test_bundled_implement_workflow_validates(self): from specify_cli.workflows.engine import WorkflowDefinition, validate_workflow workflow_path = ( Path(__file__).resolve().parent.parent / "workflows" - / "speckit-orchestrated-implement" + / "speckit-implement" / "workflow.yml" ) definition = WorkflowDefinition.from_yaml(workflow_path) errors = validate_workflow(definition) assert errors == [] - assert definition.id == "speckit-orchestrated-implement" + assert definition.id == "speckit-implement" def test_nested_step_validation(self): from specify_cli.workflows.engine import WorkflowDefinition, validate_workflow @@ -1878,14 +1878,14 @@ def test_get_catalog_configs(self, project_dir): assert configs[0]["name"] == "default" assert isinstance(configs[0]["install_allowed"], bool) - def test_bundled_catalog_contains_orchestrated_workflow(self): + def test_bundled_catalog_contains_implement_workflow(self): catalog_path = Path(__file__).resolve().parent.parent / "workflows" / "catalog.json" data = json.loads(catalog_path.read_text(encoding="utf-8")) - workflow = data["workflows"]["speckit-orchestrated-implement"] - assert workflow["name"] == "Orchestrated Implementation" + workflow = data["workflows"]["speckit-implement"] + assert workflow["name"] == "Implementation" assert workflow["url"].endswith( - "/workflows/speckit-orchestrated-implement/workflow.yml" + "/workflows/speckit-implement/workflow.yml" ) @@ -1942,7 +1942,7 @@ def test_full_sequential_workflow(self, project_dir): assert "echo-partial" not in state.step_results assert "plan" in state.step_results - def test_orchestrated_workflow_fans_out_to_implement(self, project_dir, monkeypatch): + def test_implement_workflow_fans_out_to_implement(self, project_dir, monkeypatch): """The bundled workflow dispatches speckit.implement once per shard.""" from specify_cli.workflows.base import RunStatus from specify_cli.workflows.engine import WorkflowEngine, WorkflowDefinition @@ -1986,7 +1986,7 @@ def fake_dispatch(command, integration_key, model, args, context): workflow_path = ( Path(__file__).resolve().parent.parent / "workflows" - / "speckit-orchestrated-implement" + / "speckit-implement" / "workflow.yml" ) definition = WorkflowDefinition.from_yaml(workflow_path) @@ -2001,7 +2001,7 @@ def fake_dispatch(command, integration_key, model, args, context): "speckit.implement", ] assert all(call["integration"] == "claude" for call in calls) - assert all("Use orchestrated handoff JSON" in call["args"] for call in calls) + assert all("Use handoff JSON" in call["args"] for call in calls) assert all("--fast" in call["args"] for call in calls) def test_switch_workflow(self, project_dir): diff --git a/workflows/catalog.json b/workflows/catalog.json index a011421ef9..250c638df3 100644 --- a/workflows/catalog.json +++ b/workflows/catalog.json @@ -12,14 +12,14 @@ "url": "https://raw.githubusercontent.com/github/spec-kit/main/workflows/speckit/workflow.yml", "tags": ["sdd", "full-cycle"] }, - "speckit-orchestrated-implement": { - "id": "speckit-orchestrated-implement", - "name": "Orchestrated Implementation", + "speckit-implement": { + "id": "speckit-implement", + "name": "Implementation", "description": "Builds task handoff shards, then runs speckit.implement once per shard", "author": "GitHub", "version": "1.0.0", - "url": "https://raw.githubusercontent.com/github/spec-kit/main/workflows/speckit-orchestrated-implement/workflow.yml", - "tags": ["sdd", "implementation", "orchestration"] + "url": "https://raw.githubusercontent.com/github/spec-kit/main/workflows/speckit-implement/workflow.yml", + "tags": ["sdd", "implementation"] } } } diff --git a/workflows/speckit-orchestrated-implement/workflow.yml b/workflows/speckit-implement/workflow.yml similarity index 92% rename from workflows/speckit-orchestrated-implement/workflow.yml rename to workflows/speckit-implement/workflow.yml index 268b9601ce..83c6cc58d5 100644 --- a/workflows/speckit-orchestrated-implement/workflow.yml +++ b/workflows/speckit-implement/workflow.yml @@ -1,7 +1,7 @@ schema_version: "1.0" workflow: - id: "speckit-orchestrated-implement" - name: "Orchestrated Implementation" + id: "speckit-implement" + name: "Implementation" version: "1.0.0" author: "GitHub" description: "Builds task handoff shards, then runs speckit.implement once per shard" From 96a5d11b14c331e3ce78fd7113cdc2715d26cd11 Mon Sep 17 00:00:00 2001 From: bigben <245982990@qq.com> Date: Fri, 15 May 2026 17:11:35 +0800 Subject: [PATCH 5/5] Clarify integration setup instructions --- .devcontainer/devcontainer.json | 0 .gitattributes | 0 .github/CODEOWNERS | 0 .github/ISSUE_TEMPLATE/agent_request.yml | 0 .github/ISSUE_TEMPLATE/bug_report.yml | 0 .github/ISSUE_TEMPLATE/config.yml | 0 .../ISSUE_TEMPLATE/extension_submission.yml | 0 .github/ISSUE_TEMPLATE/feature_request.yml | 0 .github/ISSUE_TEMPLATE/preset_submission.yml | 0 .github/PULL_REQUEST_TEMPLATE.md | 0 .github/dependabot.yml | 0 .github/workflows/RELEASE-PROCESS.md | 0 .github/workflows/catalog-assign.yml | 0 .github/workflows/codeql.yml | 0 .github/workflows/docs.yml | 0 .github/workflows/lint.yml | 0 .github/workflows/release-trigger.yml | 0 .github/workflows/release.yml | 0 .github/workflows/stale.yml | 0 .github/workflows/test.yml | 0 .gitignore | 0 .markdownlint-cli2.jsonc | 0 .zenodo.json | 0 AGENTS.md | 0 CHANGELOG.md | 0 CITATION.cff | 0 CODE_OF_CONDUCT.md | 0 CONTRIBUTING.md | 0 DEVELOPMENT.md | 0 EOF | 0 LICENSE | 0 README.md | 0 SECURITY.md | 0 SUPPORT.md | 0 docs/.gitignore | 0 docs/README.md | 0 docs/community/friends.md | 0 docs/community/presets.md | 0 docs/community/walkthroughs.md | 0 docs/concepts/sdd.md | 0 docs/docfx.json | 0 docs/index.md | 0 docs/install/uv.md | 0 docs/installation.md | 0 docs/local-development.md | 0 docs/quickstart.md | 0 docs/reference/authentication.md | 0 docs/reference/core.md | 0 docs/reference/extensions.md | 0 docs/reference/integrations.md | 0 docs/reference/overview.md | 0 docs/reference/presets.md | 0 docs/reference/workflows.md | 0 docs/template/public/main.css | 0 docs/toc.yml | 0 docs/upgrade.md | 0 extensions/EXTENSION-API-REFERENCE.md | 0 extensions/EXTENSION-DEVELOPMENT-GUIDE.md | 0 extensions/EXTENSION-PUBLISHING-GUIDE.md | 0 extensions/EXTENSION-USER-GUIDE.md | 0 extensions/README.md | 0 extensions/RFC-EXTENSION-SYSTEM.md | 0 extensions/catalog.community.json | 0 extensions/catalog.json | 0 extensions/git/README.md | 0 extensions/git/commands/speckit.git.commit.md | 0 .../git/commands/speckit.git.feature.md | 0 .../git/commands/speckit.git.initialize.md | 0 extensions/git/commands/speckit.git.remote.md | 0 .../git/commands/speckit.git.validate.md | 0 extensions/git/config-template.yml | 0 extensions/git/extension.yml | 0 extensions/git/git-config.yml | 0 .../git/scripts/powershell/auto-commit.ps1 | 0 .../scripts/powershell/create-new-feature.ps1 | 0 .../git/scripts/powershell/git-common.ps1 | 0 .../scripts/powershell/initialize-repo.ps1 | 0 extensions/selftest/commands/selftest.md | 0 extensions/selftest/extension.yml | 0 extensions/template/.gitignore | 0 extensions/template/CHANGELOG.md | 0 extensions/template/EXAMPLE-README.md | 0 extensions/template/LICENSE | 0 extensions/template/README.md | 0 extensions/template/commands/example.md | 0 extensions/template/config-template.yml | 0 extensions/template/extension.yml | 0 integrations/CONTRIBUTING.md | 0 integrations/README.md | 0 integrations/catalog.community.json | 0 integrations/catalog.json | 0 media/bootstrap-claude-code.gif | Bin media/logo_large.webp | Bin media/logo_small.webp | Bin media/spec-kit-video-header.jpg | Bin media/specify_cli.gif | Bin newsletters/2026-April.md | 0 newsletters/2026-February.md | 0 newsletters/2026-March.md | 0 presets/ARCHITECTURE.md | 0 presets/PUBLISHING.md | 0 presets/README.md | 0 presets/catalog.community.json | 0 presets/catalog.json | 21 - .../implement/commands/speckit.implement.md | 27 -- presets/implement/preset.yml | 25 - presets/lean/README.md | 0 presets/lean/commands/speckit.constitution.md | 0 presets/lean/commands/speckit.implement.md | 0 presets/lean/commands/speckit.plan.md | 0 presets/lean/commands/speckit.specify.md | 0 presets/lean/commands/speckit.tasks.md | 0 presets/lean/preset.yml | 0 presets/scaffold/README.md | 0 .../commands/speckit.myext.myextcmd.md | 0 presets/scaffold/commands/speckit.specify.md | 0 presets/scaffold/preset.yml | 0 presets/scaffold/templates/myext-template.md | 0 presets/scaffold/templates/spec-template.md | 0 presets/self-test/commands/speckit.specify.md | 0 .../self-test/commands/speckit.wrap-test.md | 0 presets/self-test/preset.yml | 0 .../templates/agent-file-template.md | 0 .../self-test/templates/checklist-template.md | 0 .../templates/constitution-template.md | 0 presets/self-test/templates/plan-template.md | 0 presets/self-test/templates/spec-template.md | 0 presets/self-test/templates/tasks-template.md | 0 pyproject.toml | 1 - scripts/bash/check-prerequisites.sh | 0 scripts/bash/common.sh | 0 scripts/bash/create-new-feature.sh | 0 scripts/bash/setup-plan.sh | 0 scripts/bash/setup-tasks.sh | 0 scripts/powershell/check-prerequisites.ps1 | 0 scripts/powershell/common.ps1 | 0 scripts/powershell/create-new-feature.ps1 | 0 scripts/powershell/setup-plan.ps1 | 0 scripts/powershell/setup-tasks.ps1 | 0 spec-driven.md | 0 spec-kit.code-workspace | 0 src/specify_cli/__init__.py | 107 ++++- src/specify_cli/_github_http.py | 0 src/specify_cli/agent_projection.py | 0 src/specify_cli/agents.py | 0 src/specify_cli/authentication/__init__.py | 0 .../authentication/azure_devops.py | 0 src/specify_cli/authentication/base.py | 0 src/specify_cli/authentication/config.py | 0 src/specify_cli/authentication/github.py | 0 src/specify_cli/authentication/http.py | 0 src/specify_cli/catalogs.py | 0 src/specify_cli/extensions.py | 150 +++++- src/specify_cli/integration_runtime.py | 0 src/specify_cli/integration_state.py | 0 src/specify_cli/integrations/__init__.py | 0 src/specify_cli/integrations/agy/__init__.py | 0 src/specify_cli/integrations/amp/__init__.py | 0 .../integrations/auggie/__init__.py | 0 src/specify_cli/integrations/base.py | 0 src/specify_cli/integrations/bob/__init__.py | 0 src/specify_cli/integrations/catalog.py | 0 .../integrations/claude/__init__.py | 0 .../integrations/codebuddy/__init__.py | 0 .../integrations/codex/__init__.py | 0 .../integrations/copilot/__init__.py | 0 .../integrations/cursor_agent/__init__.py | 0 .../integrations/devin/__init__.py | 0 .../integrations/forge/__init__.py | 0 .../integrations/gemini/__init__.py | 0 .../integrations/generic/__init__.py | 0 .../integrations/goose/__init__.py | 0 .../integrations/iflow/__init__.py | 0 .../integrations/junie/__init__.py | 0 .../integrations/kilocode/__init__.py | 0 src/specify_cli/integrations/kimi/__init__.py | 0 .../integrations/kiro_cli/__init__.py | 0 .../integrations/lingma/__init__.py | 0 src/specify_cli/integrations/manifest.py | 0 .../integrations/opencode/__init__.py | 0 src/specify_cli/integrations/pi/__init__.py | 0 .../integrations/qodercli/__init__.py | 0 src/specify_cli/integrations/qwen/__init__.py | 0 src/specify_cli/integrations/roo/__init__.py | 0 src/specify_cli/integrations/shai/__init__.py | 0 .../integrations/tabnine/__init__.py | 0 src/specify_cli/integrations/trae/__init__.py | 0 src/specify_cli/integrations/vibe/__init__.py | 0 .../integrations/windsurf/__init__.py | 0 src/specify_cli/presets.py | 145 ++++++ src/specify_cli/shared_infra.py | 0 src/specify_cli/workflows/__init__.py | 2 - src/specify_cli/workflows/base.py | 0 src/specify_cli/workflows/catalog.py | 0 src/specify_cli/workflows/engine.py | 129 ++++-- src/specify_cli/workflows/expressions.py | 2 + src/specify_cli/workflows/steps/__init__.py | 0 .../workflows/steps/command/__init__.py | 0 .../workflows/steps/do_while/__init__.py | 0 .../workflows/steps/fan_in/__init__.py | 0 .../workflows/steps/fan_out/__init__.py | 13 +- .../workflows/steps/gate/__init__.py | 0 .../workflows/steps/if_then/__init__.py | 0 .../workflows/steps/prompt/__init__.py | 0 .../workflows/steps/shell/__init__.py | 13 + .../steps/speckit_task_shards/__init__.py | 436 ------------------ .../workflows/steps/switch/__init__.py | 0 .../workflows/steps/while_loop/__init__.py | 0 templates/agent-governance-template.md | 0 .../architecture-development-template.md | 0 templates/architecture-logical-template.md | 0 templates/architecture-physical-template.md | 0 templates/architecture-process-template.md | 0 templates/architecture-scenario-template.md | 0 templates/architecture-template.md | 0 templates/checklist-template.md | 0 templates/commands/agent.md | 0 templates/commands/analyze.md | 0 templates/commands/arch.md | 0 templates/commands/checklist.md | 0 templates/commands/clarify.md | 0 templates/commands/constitution.md | 0 templates/commands/governance.md | 0 templates/commands/plan.md | 0 templates/commands/specify.md | 0 templates/commands/tasks.md | 0 templates/commands/taskstoissues.md | 0 templates/constitution-template.md | 0 templates/plan-template.md | 0 templates/spec-template.md | 0 templates/tasks-template.md | 0 templates/vscode-settings.json | 0 tests/__init__.py | 0 tests/auth_helpers.py | 0 tests/conftest.py | 68 +++ tests/extensions/__init__.py | 0 tests/extensions/git/__init__.py | 0 tests/extensions/git/test_git_extension.py | 0 tests/hooks/.specify/extensions.yml | 0 tests/hooks/TESTING.md | 0 tests/hooks/plan.md | 0 tests/hooks/spec.md | 0 tests/hooks/tasks.md | 0 tests/integrations/__init__.py | 0 tests/integrations/conftest.py | 0 tests/integrations/test_base.py | 0 tests/integrations/test_cli.py | 17 +- tests/integrations/test_integration_agy.py | 0 tests/integrations/test_integration_amp.py | 0 tests/integrations/test_integration_auggie.py | 0 .../test_integration_base_markdown.py | 5 + .../test_integration_base_skills.py | 5 + .../test_integration_base_toml.py | 5 + .../test_integration_base_yaml.py | 5 + tests/integrations/test_integration_bob.py | 0 .../integrations/test_integration_catalog.py | 0 tests/integrations/test_integration_claude.py | 0 .../test_integration_codebuddy.py | 0 tests/integrations/test_integration_codex.py | 0 .../integrations/test_integration_copilot.py | 0 .../test_integration_cursor_agent.py | 0 tests/integrations/test_integration_devin.py | 0 tests/integrations/test_integration_forge.py | 0 tests/integrations/test_integration_gemini.py | 0 .../integrations/test_integration_generic.py | 0 tests/integrations/test_integration_goose.py | 0 tests/integrations/test_integration_iflow.py | 0 tests/integrations/test_integration_junie.py | 0 .../integrations/test_integration_kilocode.py | 0 tests/integrations/test_integration_kimi.py | 0 .../integrations/test_integration_kiro_cli.py | 0 tests/integrations/test_integration_lingma.py | 0 .../integrations/test_integration_opencode.py | 0 tests/integrations/test_integration_pi.py | 0 .../integrations/test_integration_qodercli.py | 0 tests/integrations/test_integration_qwen.py | 0 tests/integrations/test_integration_roo.py | 0 tests/integrations/test_integration_shai.py | 0 tests/integrations/test_integration_state.py | 0 .../test_integration_subcommand.py | 0 .../integrations/test_integration_tabnine.py | 0 tests/integrations/test_integration_trae.py | 0 tests/integrations/test_integration_vibe.py | 0 .../integrations/test_integration_windsurf.py | 0 tests/integrations/test_manifest.py | 0 tests/integrations/test_registry.py | 0 tests/test_agent_config_consistency.py | 0 tests/test_agent_projection.py | 0 tests/test_arch_templates.py | 0 tests/test_authentication.py | 0 tests/test_branch_numbering.py | 0 tests/test_check_tool.py | 0 tests/test_cli_version.py | 0 tests/test_extension_skills.py | 0 tests/test_extensions.py | 73 ++- tests/test_github_http.py | 0 tests/test_merge.py | 0 tests/test_presets.py | 134 ++++-- tests/test_registrar_path_traversal.py | 0 tests/test_setup_arch.py | 0 tests/test_setup_plan_feature_json.py | 0 tests/test_setup_tasks.py | 0 tests/test_timestamp_branches.py | 0 tests/test_upgrade.py | 0 tests/test_workflows.py | 262 +++++------ workflows/ARCHITECTURE.md | 0 workflows/PUBLISHING.md | 0 workflows/README.md | 0 workflows/catalog.community.json | 0 workflows/catalog.json | 9 - workflows/speckit-implement/workflow.yml | 42 -- workflows/speckit/workflow.yml | 0 312 files changed, 913 insertions(+), 783 deletions(-) mode change 100644 => 100755 .devcontainer/devcontainer.json mode change 100644 => 100755 .gitattributes mode change 100644 => 100755 .github/CODEOWNERS mode change 100644 => 100755 .github/ISSUE_TEMPLATE/agent_request.yml mode change 100644 => 100755 .github/ISSUE_TEMPLATE/bug_report.yml mode change 100644 => 100755 .github/ISSUE_TEMPLATE/config.yml mode change 100644 => 100755 .github/ISSUE_TEMPLATE/extension_submission.yml mode change 100644 => 100755 .github/ISSUE_TEMPLATE/feature_request.yml mode change 100644 => 100755 .github/ISSUE_TEMPLATE/preset_submission.yml mode change 100644 => 100755 .github/PULL_REQUEST_TEMPLATE.md mode change 100644 => 100755 .github/dependabot.yml mode change 100644 => 100755 .github/workflows/RELEASE-PROCESS.md mode change 100644 => 100755 .github/workflows/catalog-assign.yml mode change 100644 => 100755 .github/workflows/codeql.yml mode change 100644 => 100755 .github/workflows/docs.yml mode change 100644 => 100755 .github/workflows/lint.yml mode change 100644 => 100755 .github/workflows/release-trigger.yml mode change 100644 => 100755 .github/workflows/release.yml mode change 100644 => 100755 .github/workflows/stale.yml mode change 100644 => 100755 .github/workflows/test.yml mode change 100644 => 100755 .gitignore mode change 100644 => 100755 .markdownlint-cli2.jsonc mode change 100644 => 100755 .zenodo.json mode change 100644 => 100755 AGENTS.md mode change 100644 => 100755 CHANGELOG.md mode change 100644 => 100755 CITATION.cff mode change 100644 => 100755 CODE_OF_CONDUCT.md mode change 100644 => 100755 CONTRIBUTING.md mode change 100644 => 100755 DEVELOPMENT.md mode change 100644 => 100755 EOF mode change 100644 => 100755 LICENSE mode change 100644 => 100755 README.md mode change 100644 => 100755 SECURITY.md mode change 100644 => 100755 SUPPORT.md mode change 100644 => 100755 docs/.gitignore mode change 100644 => 100755 docs/README.md mode change 100644 => 100755 docs/community/friends.md mode change 100644 => 100755 docs/community/presets.md mode change 100644 => 100755 docs/community/walkthroughs.md mode change 100644 => 100755 docs/concepts/sdd.md mode change 100644 => 100755 docs/docfx.json mode change 100644 => 100755 docs/index.md mode change 100644 => 100755 docs/install/uv.md mode change 100644 => 100755 docs/installation.md mode change 100644 => 100755 docs/local-development.md mode change 100644 => 100755 docs/quickstart.md mode change 100644 => 100755 docs/reference/authentication.md mode change 100644 => 100755 docs/reference/core.md mode change 100644 => 100755 docs/reference/extensions.md mode change 100644 => 100755 docs/reference/integrations.md mode change 100644 => 100755 docs/reference/overview.md mode change 100644 => 100755 docs/reference/presets.md mode change 100644 => 100755 docs/reference/workflows.md mode change 100644 => 100755 docs/template/public/main.css mode change 100644 => 100755 docs/toc.yml mode change 100644 => 100755 docs/upgrade.md mode change 100644 => 100755 extensions/EXTENSION-API-REFERENCE.md mode change 100644 => 100755 extensions/EXTENSION-DEVELOPMENT-GUIDE.md mode change 100644 => 100755 extensions/EXTENSION-PUBLISHING-GUIDE.md mode change 100644 => 100755 extensions/EXTENSION-USER-GUIDE.md mode change 100644 => 100755 extensions/README.md mode change 100644 => 100755 extensions/RFC-EXTENSION-SYSTEM.md mode change 100644 => 100755 extensions/catalog.community.json mode change 100644 => 100755 extensions/catalog.json mode change 100644 => 100755 extensions/git/README.md mode change 100644 => 100755 extensions/git/commands/speckit.git.commit.md mode change 100644 => 100755 extensions/git/commands/speckit.git.feature.md mode change 100644 => 100755 extensions/git/commands/speckit.git.initialize.md mode change 100644 => 100755 extensions/git/commands/speckit.git.remote.md mode change 100644 => 100755 extensions/git/commands/speckit.git.validate.md mode change 100644 => 100755 extensions/git/config-template.yml mode change 100644 => 100755 extensions/git/extension.yml mode change 100644 => 100755 extensions/git/git-config.yml mode change 100644 => 100755 extensions/git/scripts/powershell/auto-commit.ps1 mode change 100644 => 100755 extensions/git/scripts/powershell/create-new-feature.ps1 mode change 100644 => 100755 extensions/git/scripts/powershell/git-common.ps1 mode change 100644 => 100755 extensions/git/scripts/powershell/initialize-repo.ps1 mode change 100644 => 100755 extensions/selftest/commands/selftest.md mode change 100644 => 100755 extensions/selftest/extension.yml mode change 100644 => 100755 extensions/template/.gitignore mode change 100644 => 100755 extensions/template/CHANGELOG.md mode change 100644 => 100755 extensions/template/EXAMPLE-README.md mode change 100644 => 100755 extensions/template/LICENSE mode change 100644 => 100755 extensions/template/README.md mode change 100644 => 100755 extensions/template/commands/example.md mode change 100644 => 100755 extensions/template/config-template.yml mode change 100644 => 100755 extensions/template/extension.yml mode change 100644 => 100755 integrations/CONTRIBUTING.md mode change 100644 => 100755 integrations/README.md mode change 100644 => 100755 integrations/catalog.community.json mode change 100644 => 100755 integrations/catalog.json mode change 100644 => 100755 media/bootstrap-claude-code.gif mode change 100644 => 100755 media/logo_large.webp mode change 100644 => 100755 media/logo_small.webp mode change 100644 => 100755 media/spec-kit-video-header.jpg mode change 100644 => 100755 media/specify_cli.gif mode change 100644 => 100755 newsletters/2026-April.md mode change 100644 => 100755 newsletters/2026-February.md mode change 100644 => 100755 newsletters/2026-March.md mode change 100644 => 100755 presets/ARCHITECTURE.md mode change 100644 => 100755 presets/PUBLISHING.md mode change 100644 => 100755 presets/README.md mode change 100644 => 100755 presets/catalog.community.json delete mode 100644 presets/implement/commands/speckit.implement.md delete mode 100644 presets/implement/preset.yml mode change 100644 => 100755 presets/lean/README.md mode change 100644 => 100755 presets/lean/commands/speckit.constitution.md mode change 100644 => 100755 presets/lean/commands/speckit.implement.md mode change 100644 => 100755 presets/lean/commands/speckit.plan.md mode change 100644 => 100755 presets/lean/commands/speckit.specify.md mode change 100644 => 100755 presets/lean/commands/speckit.tasks.md mode change 100644 => 100755 presets/lean/preset.yml mode change 100644 => 100755 presets/scaffold/README.md mode change 100644 => 100755 presets/scaffold/commands/speckit.myext.myextcmd.md mode change 100644 => 100755 presets/scaffold/commands/speckit.specify.md mode change 100644 => 100755 presets/scaffold/preset.yml mode change 100644 => 100755 presets/scaffold/templates/myext-template.md mode change 100644 => 100755 presets/scaffold/templates/spec-template.md mode change 100644 => 100755 presets/self-test/commands/speckit.specify.md mode change 100644 => 100755 presets/self-test/commands/speckit.wrap-test.md mode change 100644 => 100755 presets/self-test/preset.yml mode change 100644 => 100755 presets/self-test/templates/agent-file-template.md mode change 100644 => 100755 presets/self-test/templates/checklist-template.md mode change 100644 => 100755 presets/self-test/templates/constitution-template.md mode change 100644 => 100755 presets/self-test/templates/plan-template.md mode change 100644 => 100755 presets/self-test/templates/spec-template.md mode change 100644 => 100755 presets/self-test/templates/tasks-template.md mode change 100644 => 100755 scripts/bash/check-prerequisites.sh mode change 100644 => 100755 scripts/bash/common.sh mode change 100644 => 100755 scripts/bash/create-new-feature.sh mode change 100644 => 100755 scripts/bash/setup-plan.sh mode change 100644 => 100755 scripts/bash/setup-tasks.sh mode change 100644 => 100755 scripts/powershell/check-prerequisites.ps1 mode change 100644 => 100755 scripts/powershell/common.ps1 mode change 100644 => 100755 scripts/powershell/create-new-feature.ps1 mode change 100644 => 100755 scripts/powershell/setup-plan.ps1 mode change 100644 => 100755 scripts/powershell/setup-tasks.ps1 mode change 100644 => 100755 spec-driven.md mode change 100644 => 100755 spec-kit.code-workspace mode change 100644 => 100755 src/specify_cli/_github_http.py mode change 100644 => 100755 src/specify_cli/agent_projection.py mode change 100644 => 100755 src/specify_cli/agents.py mode change 100644 => 100755 src/specify_cli/authentication/__init__.py mode change 100644 => 100755 src/specify_cli/authentication/azure_devops.py mode change 100644 => 100755 src/specify_cli/authentication/base.py mode change 100644 => 100755 src/specify_cli/authentication/config.py mode change 100644 => 100755 src/specify_cli/authentication/github.py mode change 100644 => 100755 src/specify_cli/authentication/http.py mode change 100644 => 100755 src/specify_cli/catalogs.py mode change 100644 => 100755 src/specify_cli/integration_runtime.py mode change 100644 => 100755 src/specify_cli/integration_state.py mode change 100644 => 100755 src/specify_cli/integrations/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/agy/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/amp/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/auggie/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/base.py mode change 100644 => 100755 src/specify_cli/integrations/bob/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/catalog.py mode change 100644 => 100755 src/specify_cli/integrations/claude/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/codebuddy/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/codex/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/copilot/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/cursor_agent/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/devin/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/forge/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/gemini/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/generic/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/goose/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/iflow/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/junie/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/kilocode/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/kimi/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/kiro_cli/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/lingma/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/manifest.py mode change 100644 => 100755 src/specify_cli/integrations/opencode/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/pi/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/qodercli/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/qwen/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/roo/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/shai/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/tabnine/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/trae/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/vibe/__init__.py mode change 100644 => 100755 src/specify_cli/integrations/windsurf/__init__.py mode change 100644 => 100755 src/specify_cli/shared_infra.py mode change 100644 => 100755 src/specify_cli/workflows/base.py mode change 100644 => 100755 src/specify_cli/workflows/catalog.py mode change 100644 => 100755 src/specify_cli/workflows/steps/__init__.py mode change 100644 => 100755 src/specify_cli/workflows/steps/command/__init__.py mode change 100644 => 100755 src/specify_cli/workflows/steps/do_while/__init__.py mode change 100644 => 100755 src/specify_cli/workflows/steps/fan_in/__init__.py mode change 100644 => 100755 src/specify_cli/workflows/steps/gate/__init__.py mode change 100644 => 100755 src/specify_cli/workflows/steps/if_then/__init__.py mode change 100644 => 100755 src/specify_cli/workflows/steps/prompt/__init__.py delete mode 100644 src/specify_cli/workflows/steps/speckit_task_shards/__init__.py mode change 100644 => 100755 src/specify_cli/workflows/steps/switch/__init__.py mode change 100644 => 100755 src/specify_cli/workflows/steps/while_loop/__init__.py mode change 100644 => 100755 templates/agent-governance-template.md mode change 100644 => 100755 templates/architecture-development-template.md mode change 100644 => 100755 templates/architecture-logical-template.md mode change 100644 => 100755 templates/architecture-physical-template.md mode change 100644 => 100755 templates/architecture-process-template.md mode change 100644 => 100755 templates/architecture-scenario-template.md mode change 100644 => 100755 templates/architecture-template.md mode change 100644 => 100755 templates/checklist-template.md mode change 100644 => 100755 templates/commands/agent.md mode change 100644 => 100755 templates/commands/analyze.md mode change 100644 => 100755 templates/commands/arch.md mode change 100644 => 100755 templates/commands/checklist.md mode change 100644 => 100755 templates/commands/clarify.md mode change 100644 => 100755 templates/commands/constitution.md mode change 100644 => 100755 templates/commands/governance.md mode change 100644 => 100755 templates/commands/plan.md mode change 100644 => 100755 templates/commands/specify.md mode change 100644 => 100755 templates/commands/tasks.md mode change 100644 => 100755 templates/commands/taskstoissues.md mode change 100644 => 100755 templates/constitution-template.md mode change 100644 => 100755 templates/plan-template.md mode change 100644 => 100755 templates/spec-template.md mode change 100644 => 100755 templates/tasks-template.md mode change 100644 => 100755 templates/vscode-settings.json mode change 100644 => 100755 tests/__init__.py mode change 100644 => 100755 tests/auth_helpers.py mode change 100644 => 100755 tests/extensions/__init__.py mode change 100644 => 100755 tests/extensions/git/__init__.py mode change 100644 => 100755 tests/extensions/git/test_git_extension.py mode change 100644 => 100755 tests/hooks/.specify/extensions.yml mode change 100644 => 100755 tests/hooks/TESTING.md mode change 100644 => 100755 tests/hooks/plan.md mode change 100644 => 100755 tests/hooks/spec.md mode change 100644 => 100755 tests/hooks/tasks.md mode change 100644 => 100755 tests/integrations/__init__.py mode change 100644 => 100755 tests/integrations/conftest.py mode change 100644 => 100755 tests/integrations/test_base.py mode change 100644 => 100755 tests/integrations/test_integration_agy.py mode change 100644 => 100755 tests/integrations/test_integration_amp.py mode change 100644 => 100755 tests/integrations/test_integration_auggie.py mode change 100644 => 100755 tests/integrations/test_integration_bob.py mode change 100644 => 100755 tests/integrations/test_integration_catalog.py mode change 100644 => 100755 tests/integrations/test_integration_claude.py mode change 100644 => 100755 tests/integrations/test_integration_codebuddy.py mode change 100644 => 100755 tests/integrations/test_integration_codex.py mode change 100644 => 100755 tests/integrations/test_integration_copilot.py mode change 100644 => 100755 tests/integrations/test_integration_cursor_agent.py mode change 100644 => 100755 tests/integrations/test_integration_devin.py mode change 100644 => 100755 tests/integrations/test_integration_forge.py mode change 100644 => 100755 tests/integrations/test_integration_gemini.py mode change 100644 => 100755 tests/integrations/test_integration_generic.py mode change 100644 => 100755 tests/integrations/test_integration_goose.py mode change 100644 => 100755 tests/integrations/test_integration_iflow.py mode change 100644 => 100755 tests/integrations/test_integration_junie.py mode change 100644 => 100755 tests/integrations/test_integration_kilocode.py mode change 100644 => 100755 tests/integrations/test_integration_kimi.py mode change 100644 => 100755 tests/integrations/test_integration_kiro_cli.py mode change 100644 => 100755 tests/integrations/test_integration_lingma.py mode change 100644 => 100755 tests/integrations/test_integration_opencode.py mode change 100644 => 100755 tests/integrations/test_integration_pi.py mode change 100644 => 100755 tests/integrations/test_integration_qodercli.py mode change 100644 => 100755 tests/integrations/test_integration_qwen.py mode change 100644 => 100755 tests/integrations/test_integration_roo.py mode change 100644 => 100755 tests/integrations/test_integration_shai.py mode change 100644 => 100755 tests/integrations/test_integration_state.py mode change 100644 => 100755 tests/integrations/test_integration_subcommand.py mode change 100644 => 100755 tests/integrations/test_integration_tabnine.py mode change 100644 => 100755 tests/integrations/test_integration_trae.py mode change 100644 => 100755 tests/integrations/test_integration_vibe.py mode change 100644 => 100755 tests/integrations/test_integration_windsurf.py mode change 100644 => 100755 tests/integrations/test_manifest.py mode change 100644 => 100755 tests/integrations/test_registry.py mode change 100644 => 100755 tests/test_agent_config_consistency.py mode change 100644 => 100755 tests/test_agent_projection.py mode change 100644 => 100755 tests/test_arch_templates.py mode change 100644 => 100755 tests/test_authentication.py mode change 100644 => 100755 tests/test_branch_numbering.py mode change 100644 => 100755 tests/test_check_tool.py mode change 100644 => 100755 tests/test_cli_version.py mode change 100644 => 100755 tests/test_extension_skills.py mode change 100644 => 100755 tests/test_github_http.py mode change 100644 => 100755 tests/test_merge.py mode change 100644 => 100755 tests/test_registrar_path_traversal.py mode change 100644 => 100755 tests/test_setup_arch.py mode change 100644 => 100755 tests/test_setup_plan_feature_json.py mode change 100644 => 100755 tests/test_setup_tasks.py mode change 100644 => 100755 tests/test_timestamp_branches.py mode change 100644 => 100755 tests/test_upgrade.py mode change 100644 => 100755 workflows/ARCHITECTURE.md mode change 100644 => 100755 workflows/PUBLISHING.md mode change 100644 => 100755 workflows/README.md mode change 100644 => 100755 workflows/catalog.community.json delete mode 100644 workflows/speckit-implement/workflow.yml mode change 100644 => 100755 workflows/speckit/workflow.yml diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json old mode 100644 new mode 100755 diff --git a/.gitattributes b/.gitattributes old mode 100644 new mode 100755 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS old mode 100644 new mode 100755 diff --git a/.github/ISSUE_TEMPLATE/agent_request.yml b/.github/ISSUE_TEMPLATE/agent_request.yml old mode 100644 new mode 100755 diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml old mode 100644 new mode 100755 diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml old mode 100644 new mode 100755 diff --git a/.github/ISSUE_TEMPLATE/extension_submission.yml b/.github/ISSUE_TEMPLATE/extension_submission.yml old mode 100644 new mode 100755 diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml old mode 100644 new mode 100755 diff --git a/.github/ISSUE_TEMPLATE/preset_submission.yml b/.github/ISSUE_TEMPLATE/preset_submission.yml old mode 100644 new mode 100755 diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md old mode 100644 new mode 100755 diff --git a/.github/dependabot.yml b/.github/dependabot.yml old mode 100644 new mode 100755 diff --git a/.github/workflows/RELEASE-PROCESS.md b/.github/workflows/RELEASE-PROCESS.md old mode 100644 new mode 100755 diff --git a/.github/workflows/catalog-assign.yml b/.github/workflows/catalog-assign.yml old mode 100644 new mode 100755 diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml old mode 100644 new mode 100755 diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml old mode 100644 new mode 100755 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml old mode 100644 new mode 100755 diff --git a/.github/workflows/release-trigger.yml b/.github/workflows/release-trigger.yml old mode 100644 new mode 100755 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml old mode 100644 new mode 100755 diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml old mode 100644 new mode 100755 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml old mode 100644 new mode 100755 diff --git a/.gitignore b/.gitignore old mode 100644 new mode 100755 diff --git a/.markdownlint-cli2.jsonc b/.markdownlint-cli2.jsonc old mode 100644 new mode 100755 diff --git a/.zenodo.json b/.zenodo.json old mode 100644 new mode 100755 diff --git a/AGENTS.md b/AGENTS.md old mode 100644 new mode 100755 diff --git a/CHANGELOG.md b/CHANGELOG.md old mode 100644 new mode 100755 diff --git a/CITATION.cff b/CITATION.cff old mode 100644 new mode 100755 diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md old mode 100644 new mode 100755 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md old mode 100644 new mode 100755 diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md old mode 100644 new mode 100755 diff --git a/EOF b/EOF old mode 100644 new mode 100755 diff --git a/LICENSE b/LICENSE old mode 100644 new mode 100755 diff --git a/README.md b/README.md old mode 100644 new mode 100755 diff --git a/SECURITY.md b/SECURITY.md old mode 100644 new mode 100755 diff --git a/SUPPORT.md b/SUPPORT.md old mode 100644 new mode 100755 diff --git a/docs/.gitignore b/docs/.gitignore old mode 100644 new mode 100755 diff --git a/docs/README.md b/docs/README.md old mode 100644 new mode 100755 diff --git a/docs/community/friends.md b/docs/community/friends.md old mode 100644 new mode 100755 diff --git a/docs/community/presets.md b/docs/community/presets.md old mode 100644 new mode 100755 diff --git a/docs/community/walkthroughs.md b/docs/community/walkthroughs.md old mode 100644 new mode 100755 diff --git a/docs/concepts/sdd.md b/docs/concepts/sdd.md old mode 100644 new mode 100755 diff --git a/docs/docfx.json b/docs/docfx.json old mode 100644 new mode 100755 diff --git a/docs/index.md b/docs/index.md old mode 100644 new mode 100755 diff --git a/docs/install/uv.md b/docs/install/uv.md old mode 100644 new mode 100755 diff --git a/docs/installation.md b/docs/installation.md old mode 100644 new mode 100755 diff --git a/docs/local-development.md b/docs/local-development.md old mode 100644 new mode 100755 diff --git a/docs/quickstart.md b/docs/quickstart.md old mode 100644 new mode 100755 diff --git a/docs/reference/authentication.md b/docs/reference/authentication.md old mode 100644 new mode 100755 diff --git a/docs/reference/core.md b/docs/reference/core.md old mode 100644 new mode 100755 diff --git a/docs/reference/extensions.md b/docs/reference/extensions.md old mode 100644 new mode 100755 diff --git a/docs/reference/integrations.md b/docs/reference/integrations.md old mode 100644 new mode 100755 diff --git a/docs/reference/overview.md b/docs/reference/overview.md old mode 100644 new mode 100755 diff --git a/docs/reference/presets.md b/docs/reference/presets.md old mode 100644 new mode 100755 diff --git a/docs/reference/workflows.md b/docs/reference/workflows.md old mode 100644 new mode 100755 diff --git a/docs/template/public/main.css b/docs/template/public/main.css old mode 100644 new mode 100755 diff --git a/docs/toc.yml b/docs/toc.yml old mode 100644 new mode 100755 diff --git a/docs/upgrade.md b/docs/upgrade.md old mode 100644 new mode 100755 diff --git a/extensions/EXTENSION-API-REFERENCE.md b/extensions/EXTENSION-API-REFERENCE.md old mode 100644 new mode 100755 diff --git a/extensions/EXTENSION-DEVELOPMENT-GUIDE.md b/extensions/EXTENSION-DEVELOPMENT-GUIDE.md old mode 100644 new mode 100755 diff --git a/extensions/EXTENSION-PUBLISHING-GUIDE.md b/extensions/EXTENSION-PUBLISHING-GUIDE.md old mode 100644 new mode 100755 diff --git a/extensions/EXTENSION-USER-GUIDE.md b/extensions/EXTENSION-USER-GUIDE.md old mode 100644 new mode 100755 diff --git a/extensions/README.md b/extensions/README.md old mode 100644 new mode 100755 diff --git a/extensions/RFC-EXTENSION-SYSTEM.md b/extensions/RFC-EXTENSION-SYSTEM.md old mode 100644 new mode 100755 diff --git a/extensions/catalog.community.json b/extensions/catalog.community.json old mode 100644 new mode 100755 diff --git a/extensions/catalog.json b/extensions/catalog.json old mode 100644 new mode 100755 diff --git a/extensions/git/README.md b/extensions/git/README.md old mode 100644 new mode 100755 diff --git a/extensions/git/commands/speckit.git.commit.md b/extensions/git/commands/speckit.git.commit.md old mode 100644 new mode 100755 diff --git a/extensions/git/commands/speckit.git.feature.md b/extensions/git/commands/speckit.git.feature.md old mode 100644 new mode 100755 diff --git a/extensions/git/commands/speckit.git.initialize.md b/extensions/git/commands/speckit.git.initialize.md old mode 100644 new mode 100755 diff --git a/extensions/git/commands/speckit.git.remote.md b/extensions/git/commands/speckit.git.remote.md old mode 100644 new mode 100755 diff --git a/extensions/git/commands/speckit.git.validate.md b/extensions/git/commands/speckit.git.validate.md old mode 100644 new mode 100755 diff --git a/extensions/git/config-template.yml b/extensions/git/config-template.yml old mode 100644 new mode 100755 diff --git a/extensions/git/extension.yml b/extensions/git/extension.yml old mode 100644 new mode 100755 diff --git a/extensions/git/git-config.yml b/extensions/git/git-config.yml old mode 100644 new mode 100755 diff --git a/extensions/git/scripts/powershell/auto-commit.ps1 b/extensions/git/scripts/powershell/auto-commit.ps1 old mode 100644 new mode 100755 diff --git a/extensions/git/scripts/powershell/create-new-feature.ps1 b/extensions/git/scripts/powershell/create-new-feature.ps1 old mode 100644 new mode 100755 diff --git a/extensions/git/scripts/powershell/git-common.ps1 b/extensions/git/scripts/powershell/git-common.ps1 old mode 100644 new mode 100755 diff --git a/extensions/git/scripts/powershell/initialize-repo.ps1 b/extensions/git/scripts/powershell/initialize-repo.ps1 old mode 100644 new mode 100755 diff --git a/extensions/selftest/commands/selftest.md b/extensions/selftest/commands/selftest.md old mode 100644 new mode 100755 diff --git a/extensions/selftest/extension.yml b/extensions/selftest/extension.yml old mode 100644 new mode 100755 diff --git a/extensions/template/.gitignore b/extensions/template/.gitignore old mode 100644 new mode 100755 diff --git a/extensions/template/CHANGELOG.md b/extensions/template/CHANGELOG.md old mode 100644 new mode 100755 diff --git a/extensions/template/EXAMPLE-README.md b/extensions/template/EXAMPLE-README.md old mode 100644 new mode 100755 diff --git a/extensions/template/LICENSE b/extensions/template/LICENSE old mode 100644 new mode 100755 diff --git a/extensions/template/README.md b/extensions/template/README.md old mode 100644 new mode 100755 diff --git a/extensions/template/commands/example.md b/extensions/template/commands/example.md old mode 100644 new mode 100755 diff --git a/extensions/template/config-template.yml b/extensions/template/config-template.yml old mode 100644 new mode 100755 diff --git a/extensions/template/extension.yml b/extensions/template/extension.yml old mode 100644 new mode 100755 diff --git a/integrations/CONTRIBUTING.md b/integrations/CONTRIBUTING.md old mode 100644 new mode 100755 diff --git a/integrations/README.md b/integrations/README.md old mode 100644 new mode 100755 diff --git a/integrations/catalog.community.json b/integrations/catalog.community.json old mode 100644 new mode 100755 diff --git a/integrations/catalog.json b/integrations/catalog.json old mode 100644 new mode 100755 diff --git a/media/bootstrap-claude-code.gif b/media/bootstrap-claude-code.gif old mode 100644 new mode 100755 diff --git a/media/logo_large.webp b/media/logo_large.webp old mode 100644 new mode 100755 diff --git a/media/logo_small.webp b/media/logo_small.webp old mode 100644 new mode 100755 diff --git a/media/spec-kit-video-header.jpg b/media/spec-kit-video-header.jpg old mode 100644 new mode 100755 diff --git a/media/specify_cli.gif b/media/specify_cli.gif old mode 100644 new mode 100755 diff --git a/newsletters/2026-April.md b/newsletters/2026-April.md old mode 100644 new mode 100755 diff --git a/newsletters/2026-February.md b/newsletters/2026-February.md old mode 100644 new mode 100755 diff --git a/newsletters/2026-March.md b/newsletters/2026-March.md old mode 100644 new mode 100755 diff --git a/presets/ARCHITECTURE.md b/presets/ARCHITECTURE.md old mode 100644 new mode 100755 diff --git a/presets/PUBLISHING.md b/presets/PUBLISHING.md old mode 100644 new mode 100755 diff --git a/presets/README.md b/presets/README.md old mode 100644 new mode 100755 diff --git a/presets/catalog.community.json b/presets/catalog.community.json old mode 100644 new mode 100755 diff --git a/presets/catalog.json b/presets/catalog.json index 7de3b7074f..f272617926 100644 --- a/presets/catalog.json +++ b/presets/catalog.json @@ -3,27 +3,6 @@ "updated_at": "2026-04-24T00:00:00Z", "catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/presets/catalog.json", "presets": { - "implement": { - "name": "Implement Workflow", - "id": "implement", - "version": "1.0.0", - "description": "Runs the implementation command through task handoff shards", - "author": "github", - "repository": "https://github.com/github/spec-kit", - "license": "MIT", - "bundled": true, - "requires": { - "speckit_version": ">=0.8.9.dev0" - }, - "provides": { - "commands": 1, - "templates": 0 - }, - "tags": [ - "implementation", - "workflow" - ] - }, "lean": { "name": "Lean Workflow", "id": "lean", diff --git a/presets/implement/commands/speckit.implement.md b/presets/implement/commands/speckit.implement.md deleted file mode 100644 index aaca7c077c..0000000000 --- a/presets/implement/commands/speckit.implement.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -description: Execute the implementation plan by splitting tasks.md into workflow handoff shards ---- - -## User Input - -```text -$ARGUMENTS -``` - -If the user input references a handoff JSON file, execute that handoff directly: - -1. Read the handoff JSON file. -2. Load only the listed `required_context_refs` plus any files needed inside `allowed_read_paths`. -3. Execute only the listed `task_ids`, respecting `allowed_write_paths` and `forbidden_actions`. -4. Mark only the completed listed tasks in `tasks.md`. -5. Run any `validation_commands` from the handoff, plus focused validation for changed files. - -Do not run `specify workflow run` while executing a handoff JSON. - -Otherwise, run the implementation workflow from the repository root: - -```sh -specify workflow run speckit-implement -i integration=__AGENT__ -i args="$ARGUMENTS" -``` - -Wait for the workflow to complete. If it fails while building handoff shards, report the error and do not run `speckit.implement` manually. If a shard fails during fan-out, report the failing shard and preserve the generated handoff files for resume or debugging. diff --git a/presets/implement/preset.yml b/presets/implement/preset.yml deleted file mode 100644 index 4b3ab970c9..0000000000 --- a/presets/implement/preset.yml +++ /dev/null @@ -1,25 +0,0 @@ -schema_version: "1.0" - -preset: - id: "implement" - name: "Implement Workflow" - version: "1.0.0" - description: "Runs the implementation command through task handoff shards" - author: "github" - repository: "https://github.com/github/spec-kit" - license: "MIT" - -requires: - speckit_version: ">=0.8.9.dev0" - -provides: - templates: - - type: "command" - name: "speckit.implement" - file: "commands/speckit.implement.md" - description: "Execute implementation through workflow-generated task handoffs" - replaces: "speckit.implement" - -tags: - - "implementation" - - "workflow" diff --git a/presets/lean/README.md b/presets/lean/README.md old mode 100644 new mode 100755 diff --git a/presets/lean/commands/speckit.constitution.md b/presets/lean/commands/speckit.constitution.md old mode 100644 new mode 100755 diff --git a/presets/lean/commands/speckit.implement.md b/presets/lean/commands/speckit.implement.md old mode 100644 new mode 100755 diff --git a/presets/lean/commands/speckit.plan.md b/presets/lean/commands/speckit.plan.md old mode 100644 new mode 100755 diff --git a/presets/lean/commands/speckit.specify.md b/presets/lean/commands/speckit.specify.md old mode 100644 new mode 100755 diff --git a/presets/lean/commands/speckit.tasks.md b/presets/lean/commands/speckit.tasks.md old mode 100644 new mode 100755 diff --git a/presets/lean/preset.yml b/presets/lean/preset.yml old mode 100644 new mode 100755 diff --git a/presets/scaffold/README.md b/presets/scaffold/README.md old mode 100644 new mode 100755 diff --git a/presets/scaffold/commands/speckit.myext.myextcmd.md b/presets/scaffold/commands/speckit.myext.myextcmd.md old mode 100644 new mode 100755 diff --git a/presets/scaffold/commands/speckit.specify.md b/presets/scaffold/commands/speckit.specify.md old mode 100644 new mode 100755 diff --git a/presets/scaffold/preset.yml b/presets/scaffold/preset.yml old mode 100644 new mode 100755 diff --git a/presets/scaffold/templates/myext-template.md b/presets/scaffold/templates/myext-template.md old mode 100644 new mode 100755 diff --git a/presets/scaffold/templates/spec-template.md b/presets/scaffold/templates/spec-template.md old mode 100644 new mode 100755 diff --git a/presets/self-test/commands/speckit.specify.md b/presets/self-test/commands/speckit.specify.md old mode 100644 new mode 100755 diff --git a/presets/self-test/commands/speckit.wrap-test.md b/presets/self-test/commands/speckit.wrap-test.md old mode 100644 new mode 100755 diff --git a/presets/self-test/preset.yml b/presets/self-test/preset.yml old mode 100644 new mode 100755 diff --git a/presets/self-test/templates/agent-file-template.md b/presets/self-test/templates/agent-file-template.md old mode 100644 new mode 100755 diff --git a/presets/self-test/templates/checklist-template.md b/presets/self-test/templates/checklist-template.md old mode 100644 new mode 100755 diff --git a/presets/self-test/templates/constitution-template.md b/presets/self-test/templates/constitution-template.md old mode 100644 new mode 100755 diff --git a/presets/self-test/templates/plan-template.md b/presets/self-test/templates/plan-template.md old mode 100644 new mode 100755 diff --git a/presets/self-test/templates/spec-template.md b/presets/self-test/templates/spec-template.md old mode 100644 new mode 100755 diff --git a/presets/self-test/templates/tasks-template.md b/presets/self-test/templates/tasks-template.md old mode 100644 new mode 100755 diff --git a/pyproject.toml b/pyproject.toml index e368d4f1e0..33edf77287 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,7 +49,6 @@ packages = ["src/specify_cli"] "extensions/git" = "specify_cli/core_pack/extensions/git" # Bundled workflows (auto-installed during `specify init`) "workflows/speckit" = "specify_cli/core_pack/workflows/speckit" -"workflows/speckit-implement" = "specify_cli/core_pack/workflows/speckit-implement" # Bundled presets (installable via `specify preset add <name>` or `specify init --preset <name>`) "presets/implement" = "specify_cli/core_pack/presets/implement" "presets/lean" = "specify_cli/core_pack/presets/lean" diff --git a/scripts/bash/check-prerequisites.sh b/scripts/bash/check-prerequisites.sh old mode 100644 new mode 100755 diff --git a/scripts/bash/common.sh b/scripts/bash/common.sh old mode 100644 new mode 100755 diff --git a/scripts/bash/create-new-feature.sh b/scripts/bash/create-new-feature.sh old mode 100644 new mode 100755 diff --git a/scripts/bash/setup-plan.sh b/scripts/bash/setup-plan.sh old mode 100644 new mode 100755 diff --git a/scripts/bash/setup-tasks.sh b/scripts/bash/setup-tasks.sh old mode 100644 new mode 100755 diff --git a/scripts/powershell/check-prerequisites.ps1 b/scripts/powershell/check-prerequisites.ps1 old mode 100644 new mode 100755 diff --git a/scripts/powershell/common.ps1 b/scripts/powershell/common.ps1 old mode 100644 new mode 100755 diff --git a/scripts/powershell/create-new-feature.ps1 b/scripts/powershell/create-new-feature.ps1 old mode 100644 new mode 100755 diff --git a/scripts/powershell/setup-plan.ps1 b/scripts/powershell/setup-plan.ps1 old mode 100644 new mode 100755 diff --git a/scripts/powershell/setup-tasks.ps1 b/scripts/powershell/setup-tasks.ps1 old mode 100644 new mode 100755 diff --git a/spec-driven.md b/spec-driven.md old mode 100644 new mode 100755 diff --git a/spec-kit.code-workspace b/spec-kit.code-workspace old mode 100644 new mode 100755 diff --git a/src/specify_cli/__init__.py b/src/specify_cli/__init__.py index 835cfc8e54..0400200ea1 100644 --- a/src/specify_cli/__init__.py +++ b/src/specify_cli/__init__.py @@ -95,9 +95,15 @@ def _build_agent_config() -> dict[str, dict[str, Any]]: AGENT_CONFIG = _build_agent_config() DEFAULT_INIT_INTEGRATION = "copilot" -DEFAULT_BUNDLED_WORKFLOWS = ("speckit", "speckit-implement") -DEFAULT_BUNDLED_PRESETS = ("implement",) +DEFAULT_BUNDLED_WORKFLOWS = ("speckit",) +DEFAULT_BUNDLED_PRESETS: tuple[str, ...] = () DEFAULT_BUNDLED_PRESET_PRIORITY = 20 +DEFAULT_IMPLEMENT_PRESET_ID = "implement" +DEFAULT_IMPLEMENT_PRESET_URL = ( + "https://github.com/bigsmartben/spec-kit-implement-preset/" + "archive/refs/tags/v1.0.0.zip" +) +DEFAULT_IMPLEMENT_PRESET_SOURCE_ENV = "SPECKIT_IMPLEMENT_PRESET_SOURCE" AI_ASSISTANT_ALIASES = { "kiro": "kiro-cli", @@ -780,6 +786,75 @@ def _install_bundled_extension(project_path: Path, extension_id: str) -> str: return "extension installed" +def _install_default_implement_preset( + project_path: Path, + *, + priority: int = DEFAULT_BUNDLED_PRESET_PRIORITY, +) -> str: + """Install the default external implement preset.""" + from .presets import PresetManager + + manager = PresetManager(project_path) + if manager.registry.is_installed(DEFAULT_IMPLEMENT_PRESET_ID): + return "implement already installed" + + source = os.environ.get( + DEFAULT_IMPLEMENT_PRESET_SOURCE_ENV, + DEFAULT_IMPLEMENT_PRESET_URL, + ).strip() + if not source: + raise RuntimeError( + f"{DEFAULT_IMPLEMENT_PRESET_SOURCE_ENV} is empty; " + "cannot install the default implement preset." + ) + + source_path = Path(source).expanduser() + if source_path.is_dir(): + manager.install_from_directory( + source_path.resolve(), + get_speckit_version(), + priority=priority, + ) + return ( + "implement installed from local source " + f"(priority {priority})" + ) + if source_path.is_file(): + manager.install_from_zip( + source_path.resolve(), + get_speckit_version(), + priority=priority, + ) + return ( + "implement installed from local archive " + f"(priority {priority})" + ) + + download_dir = project_path / ".specify" / "presets" / ".cache" / "downloads" + download_dir.mkdir(parents=True, exist_ok=True) + zip_path = download_dir / "implement-default.zip" + + try: + from specify_cli.authentication.http import open_url as _open_url + + with _open_url(source, timeout=60) as response: + zip_path.write_bytes(response.read()) + manager.install_from_zip( + zip_path, + get_speckit_version(), + priority=priority, + ) + return f"implement installed (priority {priority})" + except Exception as exc: + raise RuntimeError( + "Failed to install required default implement preset from " + f"{source}. Set {DEFAULT_IMPLEMENT_PRESET_SOURCE_ENV} to a " + "local preset directory for offline/dev installs." + ) from exc + finally: + zip_path.unlink(missing_ok=True) + + def _install_default_bundled_presets( project_path: Path, *, @@ -943,6 +1018,7 @@ def ensure_executable_scripts(project_path: Path, tracker: StepTracker | None = scan_roots = [ project_path / ".specify" / "scripts", project_path / ".specify" / "extensions", + project_path / ".specify" / "presets", ] failures: list[str] = [] updated = 0 @@ -1580,18 +1656,21 @@ def init( save_init_options(project_path, init_opts) tracker.start("preset") - explicit_default_preset = preset in DEFAULT_BUNDLED_PRESETS + explicit_default_preset = preset == DEFAULT_IMPLEMENT_PRESET_ID try: - tracker.complete( - "preset", - _install_default_bundled_presets( - project_path, - skip={preset} if explicit_default_preset else set(), - ), - ) + preset_messages: list[str] = [] + if explicit_default_preset: + preset_messages.append("implement skipped") + else: + preset_messages.append(_install_default_implement_preset(project_path)) + bundled_message = _install_default_bundled_presets(project_path) + if bundled_message != "none": + preset_messages.append(bundled_message) + tracker.complete("preset", "; ".join(preset_messages)) except Exception as preset_err: sanitized_preset = str(preset_err).replace('\n', ' ').strip() tracker.error("preset", f"install failed: {sanitized_preset[:120]}") + raise # Install preset if specified if preset: @@ -1603,11 +1682,13 @@ def init( # Try local directory first, then bundled, then catalog local_path = Path(preset).resolve() if local_path.is_dir() and (local_path / "preset.yml").exists(): - preset_manager.install_from_directory(local_path, speckit_ver) + preset_manager.install_from_directory(local_path, speckit_ver, priority=10) + elif preset == DEFAULT_IMPLEMENT_PRESET_ID: + _install_default_implement_preset(project_path, priority=10) else: bundled_path = _locate_bundled_preset(preset) if bundled_path: - preset_manager.install_from_directory(bundled_path, speckit_ver) + preset_manager.install_from_directory(bundled_path, speckit_ver, priority=10) else: preset_catalog = PresetCatalog(project_path) pack_info = preset_catalog.get_pack_info(preset) @@ -1627,7 +1708,7 @@ def init( zip_path = None try: zip_path = preset_catalog.download_pack(preset) - preset_manager.install_from_zip(zip_path, speckit_ver) + preset_manager.install_from_zip(zip_path, speckit_ver, priority=10) except PresetError as preset_err: console.print(f"[yellow]Warning:[/yellow] Failed to install preset '{preset}': {preset_err}") finally: diff --git a/src/specify_cli/_github_http.py b/src/specify_cli/_github_http.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/agent_projection.py b/src/specify_cli/agent_projection.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/agents.py b/src/specify_cli/agents.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/authentication/__init__.py b/src/specify_cli/authentication/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/authentication/azure_devops.py b/src/specify_cli/authentication/azure_devops.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/authentication/base.py b/src/specify_cli/authentication/base.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/authentication/config.py b/src/specify_cli/authentication/config.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/authentication/github.py b/src/specify_cli/authentication/github.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/authentication/http.py b/src/specify_cli/authentication/http.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/catalogs.py b/src/specify_cli/catalogs.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/extensions.py b/src/specify_cli/extensions.py index 554151b2bb..4a892fd58f 100644 --- a/src/specify_cli/extensions.py +++ b/src/specify_cli/extensions.py @@ -201,23 +201,29 @@ def _validate(self): # Validate provides section provides = self.data["provides"] commands = provides.get("commands", []) + workflows = provides.get("workflows", []) hooks = self.data.get("hooks") if "commands" in provides and not isinstance(commands, list): raise ValidationError( "Invalid provides.commands: expected a list" ) + if "workflows" in provides and not isinstance(workflows, list): + raise ValidationError( + "Invalid provides.workflows: expected a list" + ) if "hooks" in self.data and not isinstance(hooks, dict): raise ValidationError( "Invalid hooks: expected a mapping" ) has_commands = bool(commands) + has_workflows = bool(workflows) has_hooks = bool(hooks) - if not has_commands and not has_hooks: + if not has_commands and not has_workflows and not has_hooks: raise ValidationError( - "Extension must provide at least one command or hook" + "Extension must provide at least one command, workflow, or hook" ) # Validate hook values (if present) @@ -276,6 +282,33 @@ def _validate(self): f"Aliases for command '{cmd['name']}' must be strings" ) + for workflow in workflows: + if not isinstance(workflow, dict): + raise ValidationError( + "Each workflow entry in 'provides.workflows' must be a mapping" + ) + if "id" not in workflow or "file" not in workflow: + raise ValidationError("Workflow missing 'id' or 'file'") + workflow_id = workflow["id"] + if not isinstance(workflow_id, str) or not re.match( + r"^[a-z0-9](?:[a-z0-9-]*[a-z0-9])?$", workflow_id + ): + raise ValidationError( + f"Invalid workflow id '{workflow_id}': " + "must be lowercase alphanumeric with hyphens" + ) + file_path = workflow["file"] + if not isinstance(file_path, str): + raise ValidationError( + f"Invalid workflow file for '{workflow_id}': expected a string" + ) + normalized = os.path.normpath(file_path) + if os.path.isabs(normalized) or normalized.startswith(".."): + raise ValidationError( + f"Invalid workflow file path '{file_path}': " + "must be a relative path within the extension directory" + ) + # Rewrite any hook command references that pointed at a renamed command or # an alias-form ref (ext.cmd → speckit.ext.cmd). Always emit a warning when # the reference is changed so extension authors know to update the manifest. @@ -355,6 +388,11 @@ def commands(self) -> List[Dict[str, Any]]: """Get list of provided commands.""" return self.data.get("provides", {}).get("commands", []) + @property + def workflows(self) -> List[Dict[str, Any]]: + """Get list of provided workflows.""" + return self.data.get("provides", {}).get("workflows", []) + @property def hooks(self) -> Dict[str, Any]: """Get hook definitions.""" @@ -1132,6 +1170,102 @@ def check_compatibility( return True + def _register_extension_workflows( + self, + manifest: ExtensionManifest, + extension_dir: Path, + ) -> List[str]: + """Install workflow definitions provided by an extension.""" + if not manifest.workflows: + return [] + + from .workflows.catalog import WorkflowRegistry + from .workflows.engine import ( + WorkflowDefinition, + validate_workflow, + ) + + registry = WorkflowRegistry(self.project_root) + registered: List[str] = [] + workflows_root = self.project_root / ".specify" / "workflows" + ext_root = extension_dir.resolve() + + for wf_info in manifest.workflows: + workflow_id = wf_info["id"] + file_rel = wf_info["file"] + source_path = (ext_root / file_rel).resolve() + try: + source_path.relative_to(ext_root) + except ValueError as exc: + raise ValidationError( + f"Workflow '{workflow_id}' file escapes extension directory" + ) from exc + if not source_path.is_file(): + raise ValidationError( + f"Workflow '{workflow_id}' file not found: {file_rel}" + ) + + definition = WorkflowDefinition.from_yaml(source_path) + errors = validate_workflow(definition) + if errors: + raise ValidationError( + f"Workflow '{workflow_id}' is invalid:\n- " + + "\n- ".join(errors) + ) + if definition.id != workflow_id: + raise ValidationError( + f"Workflow manifest id '{workflow_id}' does not match " + f"workflow.yml id '{definition.id}'" + ) + + existing = registry.get(workflow_id) + if existing and existing.get("source") != f"extension:{manifest.id}": + raise ValidationError( + f"Workflow '{workflow_id}' is already installed from " + f"{existing.get('source', 'another source')}" + ) + + dest_dir = workflows_root / workflow_id + dest_dir.mkdir(parents=True, exist_ok=True) + shutil.copy2(source_path, dest_dir / "workflow.yml") + registry.add( + workflow_id, + { + "name": definition.name, + "version": definition.version, + "description": definition.description, + "source": f"extension:{manifest.id}", + "extension_id": manifest.id, + }, + ) + registered.append(workflow_id) + + return registered + + def _unregister_extension_workflows( + self, + extension_id: str, + workflow_ids: List[str], + ) -> None: + """Remove extension-owned workflow definitions from the project.""" + if not workflow_ids: + return + + from .workflows.catalog import WorkflowRegistry + + registry = WorkflowRegistry(self.project_root) + workflows_root = self.project_root / ".specify" / "workflows" + source_tag = f"extension:{extension_id}" + + for workflow_id in workflow_ids: + metadata = registry.get(workflow_id) + if metadata and metadata.get("source") != source_tag: + continue + workflow_dir = workflows_root / workflow_id + if workflow_dir.exists(): + shutil.rmtree(workflow_dir) + registry.remove(workflow_id) + def install_from_directory( self, source_dir: Path, @@ -1196,6 +1330,9 @@ def install_from_directory( # was used during project initialisation (feature parity). registered_skills = self._register_extension_skills(manifest, dest_dir) + # Install extension-provided workflow definitions. + registered_workflows = self._register_extension_workflows(manifest, dest_dir) + # Register hooks hook_executor = HookExecutor(self.project_root) hook_executor.register_hooks(manifest) @@ -1209,6 +1346,7 @@ def install_from_directory( "priority": priority, "registered_commands": registered_commands, "registered_skills": registered_skills, + "registered_workflows": registered_workflows, }) return manifest @@ -1290,11 +1428,16 @@ def remove(self, extension_id: str, keep_config: bool = False) -> bool: metadata = self.registry.get(extension_id) registered_commands = metadata.get("registered_commands", {}) if metadata else {} raw_skills = metadata.get("registered_skills", []) if metadata else [] + raw_workflows = metadata.get("registered_workflows", []) if metadata else [] # Normalize: must be a list of plain strings to avoid corrupted-registry errors if isinstance(raw_skills, list): registered_skills = [s for s in raw_skills if isinstance(s, str)] else: registered_skills = [] + if isinstance(raw_workflows, list): + registered_workflows = [w for w in raw_workflows if isinstance(w, str)] + else: + registered_workflows = [] extension_dir = self.extensions_dir / extension_id @@ -1306,6 +1449,9 @@ def remove(self, extension_id: str, keep_config: bool = False) -> bool: # Unregister agent skills self._unregister_extension_skills(registered_skills, extension_id) + # Unregister extension-owned workflows + self._unregister_extension_workflows(extension_id, registered_workflows) + if keep_config: # Preserve config files, only remove non-config files if extension_dir.exists(): diff --git a/src/specify_cli/integration_runtime.py b/src/specify_cli/integration_runtime.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integration_state.py b/src/specify_cli/integration_state.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/__init__.py b/src/specify_cli/integrations/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/agy/__init__.py b/src/specify_cli/integrations/agy/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/amp/__init__.py b/src/specify_cli/integrations/amp/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/auggie/__init__.py b/src/specify_cli/integrations/auggie/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/base.py b/src/specify_cli/integrations/base.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/bob/__init__.py b/src/specify_cli/integrations/bob/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/catalog.py b/src/specify_cli/integrations/catalog.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/claude/__init__.py b/src/specify_cli/integrations/claude/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/codebuddy/__init__.py b/src/specify_cli/integrations/codebuddy/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/codex/__init__.py b/src/specify_cli/integrations/codex/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/copilot/__init__.py b/src/specify_cli/integrations/copilot/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/cursor_agent/__init__.py b/src/specify_cli/integrations/cursor_agent/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/devin/__init__.py b/src/specify_cli/integrations/devin/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/forge/__init__.py b/src/specify_cli/integrations/forge/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/gemini/__init__.py b/src/specify_cli/integrations/gemini/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/generic/__init__.py b/src/specify_cli/integrations/generic/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/goose/__init__.py b/src/specify_cli/integrations/goose/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/iflow/__init__.py b/src/specify_cli/integrations/iflow/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/junie/__init__.py b/src/specify_cli/integrations/junie/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/kilocode/__init__.py b/src/specify_cli/integrations/kilocode/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/kimi/__init__.py b/src/specify_cli/integrations/kimi/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/kiro_cli/__init__.py b/src/specify_cli/integrations/kiro_cli/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/lingma/__init__.py b/src/specify_cli/integrations/lingma/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/manifest.py b/src/specify_cli/integrations/manifest.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/opencode/__init__.py b/src/specify_cli/integrations/opencode/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/pi/__init__.py b/src/specify_cli/integrations/pi/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/qodercli/__init__.py b/src/specify_cli/integrations/qodercli/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/qwen/__init__.py b/src/specify_cli/integrations/qwen/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/roo/__init__.py b/src/specify_cli/integrations/roo/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/shai/__init__.py b/src/specify_cli/integrations/shai/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/tabnine/__init__.py b/src/specify_cli/integrations/tabnine/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/trae/__init__.py b/src/specify_cli/integrations/trae/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/vibe/__init__.py b/src/specify_cli/integrations/vibe/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/integrations/windsurf/__init__.py b/src/specify_cli/integrations/windsurf/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/presets.py b/src/specify_cli/presets.py index 8188836567..7f82907ca9 100644 --- a/src/specify_cli/presets.py +++ b/src/specify_cli/presets.py @@ -200,6 +200,11 @@ def _validate(self): raise PresetValidationError( "Preset must provide at least one template" ) + workflows = provides.get("workflows", []) + if "workflows" in provides and not isinstance(workflows, list): + raise PresetValidationError( + "Invalid provides.workflows: expected a list" + ) # Validate templates for tmpl in provides["templates"]: @@ -260,6 +265,33 @@ def _validate(self): "must be lowercase alphanumeric with hyphens only" ) + for workflow in workflows: + if not isinstance(workflow, dict): + raise PresetValidationError( + "Each workflow entry in 'provides.workflows' must be a mapping" + ) + if "id" not in workflow or "file" not in workflow: + raise PresetValidationError("Workflow missing 'id' or 'file'") + workflow_id = workflow["id"] + if not isinstance(workflow_id, str) or not re.match( + r"^[a-z0-9](?:[a-z0-9-]*[a-z0-9])?$", workflow_id + ): + raise PresetValidationError( + f"Invalid workflow id '{workflow_id}': " + "must be lowercase alphanumeric with hyphens" + ) + file_path = workflow["file"] + if not isinstance(file_path, str): + raise PresetValidationError( + f"Invalid workflow file for '{workflow_id}': expected a string" + ) + normalized = os.path.normpath(file_path) + if os.path.isabs(normalized) or normalized.startswith(".."): + raise PresetValidationError( + f"Invalid workflow file path '{file_path}': " + "must be a relative path within the preset directory" + ) + @property def id(self) -> str: """Get preset ID.""" @@ -295,6 +327,11 @@ def templates(self) -> List[Dict[str, Any]]: """Get list of provided templates.""" return self.data["provides"]["templates"] + @property + def workflows(self) -> List[Dict[str, Any]]: + """Get list of provided workflows.""" + return self.data.get("provides", {}).get("workflows", []) + @property def tags(self) -> List[str]: """Get preset tags.""" @@ -1482,6 +1519,99 @@ def _unregister_skills(self, skill_names: List[str], preset_dir: Path) -> None: # No core or extension template — remove the skill entirely shutil.rmtree(skill_subdir) + def _register_preset_workflows( + self, + manifest: PresetManifest, + preset_dir: Path, + ) -> List[str]: + """Install workflow definitions provided by a preset.""" + if not manifest.workflows: + return [] + + from .workflows.catalog import WorkflowRegistry + from .workflows.engine import WorkflowDefinition, validate_workflow + + registry = WorkflowRegistry(self.project_root) + registered: List[str] = [] + workflows_root = self.project_root / ".specify" / "workflows" + preset_root = preset_dir.resolve() + + for wf_info in manifest.workflows: + workflow_id = wf_info["id"] + file_rel = wf_info["file"] + source_path = (preset_root / file_rel).resolve() + try: + source_path.relative_to(preset_root) + except ValueError as exc: + raise PresetValidationError( + f"Workflow '{workflow_id}' file escapes preset directory" + ) from exc + if not source_path.is_file(): + raise PresetValidationError( + f"Workflow '{workflow_id}' file not found: {file_rel}" + ) + + definition = WorkflowDefinition.from_yaml(source_path) + errors = validate_workflow(definition) + if errors: + raise PresetValidationError( + f"Workflow '{workflow_id}' is invalid:\n- " + + "\n- ".join(errors) + ) + if definition.id != workflow_id: + raise PresetValidationError( + f"Workflow manifest id '{workflow_id}' does not match " + f"workflow.yml id '{definition.id}'" + ) + + existing = registry.get(workflow_id) + if existing and existing.get("source") != f"preset:{manifest.id}": + raise PresetValidationError( + f"Workflow '{workflow_id}' is already installed from " + f"{existing.get('source', 'another source')}" + ) + + dest_dir = workflows_root / workflow_id + dest_dir.mkdir(parents=True, exist_ok=True) + shutil.copy2(source_path, dest_dir / "workflow.yml") + registry.add( + workflow_id, + { + "name": definition.name, + "version": definition.version, + "description": definition.description, + "source": f"preset:{manifest.id}", + "preset_id": manifest.id, + }, + ) + registered.append(workflow_id) + + return registered + + def _unregister_preset_workflows( + self, + preset_id: str, + workflow_ids: List[str], + ) -> None: + """Remove preset-owned workflow definitions from the project.""" + if not workflow_ids: + return + + from .workflows.catalog import WorkflowRegistry + + registry = WorkflowRegistry(self.project_root) + workflows_root = self.project_root / ".specify" / "workflows" + source_tag = f"preset:{preset_id}" + + for workflow_id in workflow_ids: + metadata = registry.get(workflow_id) + if not metadata or metadata.get("source") != source_tag: + continue + workflow_dir = workflows_root / workflow_id + if workflow_dir.exists(): + shutil.rmtree(workflow_dir) + registry.remove(workflow_id) + def install_from_directory( self, source_dir: Path, @@ -1537,7 +1667,13 @@ def install_from_directory( registered_commands: Dict[str, List[str]] = {} registered_skills: List[str] = [] + registered_workflows: List[str] = [] try: + registered_workflows = self._register_preset_workflows(manifest, dest_dir) + self.registry.update(manifest.id, { + "registered_workflows": registered_workflows, + }) + # Register command overrides with AI agents and persist the result # immediately so cleanup can recover even if installation stops # before later phases complete. @@ -1563,6 +1699,8 @@ def install_from_directory( self._unregister_commands(registered_commands) if registered_skills: self._unregister_skills(registered_skills, dest_dir) + if registered_workflows: + self._unregister_preset_workflows(manifest.id, registered_workflows) try: if dest_dir.exists(): shutil.rmtree(dest_dir) @@ -1673,6 +1811,11 @@ def remove(self, pack_id: str) -> bool: # Restore original skills when preset is removed registered_skills = metadata.get("registered_skills", []) if metadata else [] registered_commands = metadata.get("registered_commands", {}) if metadata else {} + raw_workflows = metadata.get("registered_workflows", []) if metadata else [] + if isinstance(raw_workflows, list): + registered_workflows = [w for w in raw_workflows if isinstance(w, str)] + else: + registered_workflows = [] pack_dir = self.presets_dir / pack_id # Collect ALL command names before filtering for reconciliation, @@ -1713,6 +1856,8 @@ def remove(self, pack_id: str) -> bool: if registered_commands: self._unregister_commands(registered_commands) + self._unregister_preset_workflows(pack_id, registered_workflows) + if pack_dir.exists(): shutil.rmtree(pack_dir) diff --git a/src/specify_cli/shared_infra.py b/src/specify_cli/shared_infra.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/workflows/__init__.py b/src/specify_cli/workflows/__init__.py index 6366bf2659..13782f620b 100644 --- a/src/specify_cli/workflows/__init__.py +++ b/src/specify_cli/workflows/__init__.py @@ -50,7 +50,6 @@ def _register_builtin_steps() -> None: from .steps.if_then import IfThenStep from .steps.prompt import PromptStep from .steps.shell import ShellStep - from .steps.speckit_task_shards import SpeckitTaskShardsStep from .steps.switch import SwitchStep from .steps.while_loop import WhileStep @@ -62,7 +61,6 @@ def _register_builtin_steps() -> None: _register_step(IfThenStep()) _register_step(PromptStep()) _register_step(ShellStep()) - _register_step(SpeckitTaskShardsStep()) _register_step(SwitchStep()) _register_step(WhileStep()) diff --git a/src/specify_cli/workflows/base.py b/src/specify_cli/workflows/base.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/workflows/catalog.py b/src/specify_cli/workflows/catalog.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/workflows/engine.py b/src/specify_cli/workflows/engine.py index d6a73bbeb0..06b3fe0f71 100644 --- a/src/specify_cli/workflows/engine.py +++ b/src/specify_cli/workflows/engine.py @@ -13,6 +13,8 @@ import json import re import uuid +import copy +from concurrent.futures import ThreadPoolExecutor, as_completed from datetime import datetime, timezone from pathlib import Path from typing import Any @@ -662,37 +664,42 @@ def _execute_steps( items = result.output.get("items", []) template = result.output.get("step_template", {}) if template and items: - fan_out_results = [] - for item_idx, item_val in enumerate(result.output["items"]): - context.item = item_val - # Per-item ID: parentId:templateId:index - item_step = dict(template) - base_id = item_step.get("id", "item") - item_step["id"] = f"{step_id}:{base_id}:{item_idx}" - self._execute_steps( - [item_step], context, state, registry, - step_offset=-1, - ) - # Collect per-item result for fan-in - item_result = context.steps.get(item_step["id"], {}) - fan_out_results.append(item_result.get("output", {})) - if state.status in ( - RunStatus.PAUSED, - RunStatus.FAILED, - RunStatus.ABORTED, - ): - break - context.item = None + fan_out_results, fan_out_step_results = self._execute_fan_out( + step_id, + template, + items, + int(result.output.get("max_concurrency", 1) or 1), + context, + registry, + ) # Preserve original output and add collected results fan_out_output = dict(result.output) fan_out_output["results"] = fan_out_results + fan_out_output["failed_count"] = sum( + 1 + for _item_step_id, item_result in fan_out_step_results + if item_result.get("status") == StepStatus.FAILED.value + ) context.steps[step_id]["output"] = fan_out_output state.step_results[step_id]["output"] = fan_out_output - if state.status in ( - RunStatus.PAUSED, - RunStatus.FAILED, - RunStatus.ABORTED, - ): + + for item_step_id, item_step_result in fan_out_step_results: + context.steps[item_step_id] = item_step_result + state.step_results[item_step_id] = item_step_result + + if fan_out_output["failed_count"]: + state.status = RunStatus.FAILED + state.append_log( + { + "event": "step_failed", + "step_id": step_id, + "error": ( + f"{fan_out_output['failed_count']} fan-out " + "item(s) failed." + ), + } + ) + state.save() return else: # Empty items or no template — normalize output @@ -700,6 +707,76 @@ def _execute_steps( context.steps[step_id]["output"] = result.output state.step_results[step_id]["output"] = result.output + def _execute_fan_out( + self, + parent_step_id: str, + template: dict[str, Any], + items: list[Any], + max_concurrency: int, + context: StepContext, + registry: dict[str, Any], + ) -> tuple[list[dict[str, Any]], list[tuple[str, dict[str, Any]]]]: + """Execute a fan-out nested step template with bounded concurrency.""" + max_workers = max(1, min(max_concurrency, len(items))) + + def run_item(item_idx: int, item_val: Any) -> tuple[int, str, dict[str, Any]]: + item_step = copy.deepcopy(template) + base_id = item_step.get("id", "item") + item_step["id"] = f"{parent_step_id}:{base_id}:{item_idx}" + item_context = StepContext( + inputs=copy.deepcopy(context.inputs), + steps=copy.deepcopy(context.steps), + item=copy.deepcopy(item_val), + fan_in=copy.deepcopy(context.fan_in), + default_integration=context.default_integration, + default_model=context.default_model, + default_options=copy.deepcopy(context.default_options), + project_root=context.project_root, + run_id=context.run_id, + ) + item_state = RunState( + run_id=f"{context.run_id or 'run'}-{parent_step_id}-{item_idx}", + workflow_id=f"{parent_step_id}:fan-out", + project_root=self.project_root, + ) + item_state.status = RunStatus.RUNNING + self._execute_steps( + [item_step], + item_context, + item_state, + registry, + step_offset=-1, + ) + item_result = item_context.steps.get(item_step["id"], {}) + return item_idx, item_step["id"], item_result + + ordered: list[tuple[str, dict[str, Any]] | None] = [None] * len(items) + with ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = { + executor.submit(run_item, item_idx, item_val): item_idx + for item_idx, item_val in enumerate(items) + } + for future in as_completed(futures): + item_idx = futures[future] + try: + item_idx, item_step_id, item_result = future.result() + except Exception as exc: + base_id = template.get("id", "item") + item_step_id = f"{parent_step_id}:{base_id}:{item_idx}" + item_result = { + "integration": context.default_integration, + "model": context.default_model, + "options": copy.deepcopy(context.default_options), + "input": {"item": copy.deepcopy(items[item_idx])}, + "output": {"error": str(exc)}, + "status": StepStatus.FAILED.value, + } + ordered[item_idx] = (item_step_id, item_result) + + step_results = [item for item in ordered if item is not None] + outputs = [item_result.get("output", {}) for _step_id, item_result in step_results] + return outputs, step_results + def _resolve_inputs( self, definition: WorkflowDefinition, diff --git a/src/specify_cli/workflows/expressions.py b/src/specify_cli/workflows/expressions.py index eb39a31e79..a4c4d534b8 100644 --- a/src/specify_cli/workflows/expressions.py +++ b/src/specify_cli/workflows/expressions.py @@ -102,6 +102,8 @@ def _build_namespace(context: Any) -> dict[str, Any]: ns["item"] = context.item if hasattr(context, "fan_in"): ns["fan_in"] = context.fan_in or {} + if hasattr(context, "run_id"): + ns["run_id"] = context.run_id return ns diff --git a/src/specify_cli/workflows/steps/__init__.py b/src/specify_cli/workflows/steps/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/workflows/steps/command/__init__.py b/src/specify_cli/workflows/steps/command/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/workflows/steps/do_while/__init__.py b/src/specify_cli/workflows/steps/do_while/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/workflows/steps/fan_in/__init__.py b/src/specify_cli/workflows/steps/fan_in/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/workflows/steps/fan_out/__init__.py b/src/specify_cli/workflows/steps/fan_out/__init__.py index c2fff1face..3a96a86689 100644 --- a/src/specify_cli/workflows/steps/fan_out/__init__.py +++ b/src/specify_cli/workflows/steps/fan_out/__init__.py @@ -12,9 +12,8 @@ class FanOutStep(StepBase): """Dispatch a step template for each item in a collection. The engine executes the nested ``step:`` template once per item, - setting ``context.item`` for each iteration. Execution is - currently sequential; ``max_concurrency`` is accepted but not - enforced. + setting ``context.item`` for each iteration. ``max_concurrency`` is + evaluated here and enforced by the workflow engine. """ type_key = "fan-out" @@ -26,6 +25,14 @@ def execute(self, config: dict[str, Any], context: StepContext) -> StepResult: items = [] max_concurrency = config.get("max_concurrency", 1) + if isinstance(max_concurrency, str) and "{{" in max_concurrency: + max_concurrency = evaluate_expression(max_concurrency, context) + try: + max_concurrency = int(max_concurrency) + except (TypeError, ValueError): + max_concurrency = 1 + if max_concurrency < 1: + max_concurrency = 1 step_template = config.get("step", {}) return StepResult( diff --git a/src/specify_cli/workflows/steps/gate/__init__.py b/src/specify_cli/workflows/steps/gate/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/workflows/steps/if_then/__init__.py b/src/specify_cli/workflows/steps/if_then/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/workflows/steps/prompt/__init__.py b/src/specify_cli/workflows/steps/prompt/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/workflows/steps/shell/__init__.py b/src/specify_cli/workflows/steps/shell/__init__.py index 73ac99530a..4ae35260eb 100644 --- a/src/specify_cli/workflows/steps/shell/__init__.py +++ b/src/specify_cli/workflows/steps/shell/__init__.py @@ -3,6 +3,7 @@ from __future__ import annotations import subprocess +import json from typing import Any from specify_cli.workflows.base import StepBase, StepContext, StepResult, StepStatus @@ -49,6 +50,18 @@ def execute(self, config: dict[str, Any], context: StepContext) -> StepResult: error=f"Shell command exited with code {proc.returncode}.", output=output, ) + if config.get("json_output") or config.get("output_format") == "json": + try: + parsed = json.loads(proc.stdout or "{}") + except json.JSONDecodeError as exc: + return StepResult( + status=StepStatus.FAILED, + error=f"Shell command did not produce valid JSON: {exc}", + output=output, + ) + output["json"] = parsed + if isinstance(parsed, dict): + output.update(parsed) return StepResult( status=StepStatus.COMPLETED, output=output, diff --git a/src/specify_cli/workflows/steps/speckit_task_shards/__init__.py b/src/specify_cli/workflows/steps/speckit_task_shards/__init__.py deleted file mode 100644 index fd50458044..0000000000 --- a/src/specify_cli/workflows/steps/speckit_task_shards/__init__.py +++ /dev/null @@ -1,436 +0,0 @@ -"""Spec Kit task shard step. - -Builds conservative implementation handoff shards from the active feature's -``tasks.md`` so a workflow can fan out into repeated ``speckit.implement`` calls. -""" - -from __future__ import annotations - -import json -import os -import re -import subprocess -from dataclasses import dataclass -from pathlib import Path, PurePosixPath -from typing import Any - -from specify_cli.workflows.base import StepBase, StepContext, StepResult, StepStatus -from specify_cli.workflows.expressions import evaluate_expression - - -_TASK_RE = re.compile(r"^\s*-\s+\[[ xX]\]\s+(?P<id>[A-Za-z]+\d{3,})\b(?P<body>.*)$") -_HEADING_RE = re.compile(r"^\s{0,3}#{2,6}\s+(?P<title>.+?)\s*$") -_BACKTICK_RE = re.compile(r"`([^`]+)`") -_PATH_TOKEN_RE = re.compile( - r"(?<![\w./-])([A-Za-z0-9_.-]+(?:/[A-Za-z0-9_.-]+)+|[A-Za-z0-9_.-]+\.[A-Za-z0-9_.-]+)(?![\w./-])" -) - - -@dataclass -class ParsedTask: - task_id: str - text: str - phase: str - parallel: bool - paths: list[str] - - -@dataclass -class TaskShard: - shard_id: str - tasks: list[ParsedTask] - - @property - def task_ids(self) -> list[str]: - return [task.task_id for task in self.tasks] - - @property - def paths(self) -> list[str]: - seen: dict[str, None] = {} - for task in self.tasks: - for path in task.paths: - seen.setdefault(path, None) - return list(seen) - - -class SpeckitTaskShardsStep(StepBase): - """Generate handoff files from the active feature's ``tasks.md``.""" - - type_key = "speckit-task-shards" - - def execute(self, config: dict[str, Any], context: StepContext) -> StepResult: - input_data = config.get("input", {}) - resolved_input: dict[str, Any] = {} - for key, value in input_data.items(): - resolved_input[key] = evaluate_expression(value, context) - - args = str(resolved_input.get("args", "") or "") - try: - max_shards = int(resolved_input.get("max_shards", 8) or 8) - except (TypeError, ValueError): - return self._failed("max_shards must be a positive integer.", resolved_input) - if max_shards < 1: - return self._failed("max_shards must be a positive integer.", resolved_input) - - project_root = Path(context.project_root or ".").resolve() - try: - feature_dir = self._resolve_feature_dir(project_root) - self._require_feature_files(feature_dir) - tasks = self._parse_tasks(feature_dir / "tasks.md") - shards = self._build_shards(tasks, max_shards) - items = self._write_handoffs( - project_root, - feature_dir, - shards, - args, - context.run_id or "manual", - ) - except ValueError as exc: - return self._failed(str(exc), resolved_input) - - return StepResult( - status=StepStatus.COMPLETED, - output={ - "input": resolved_input, - "feature_dir": str(feature_dir), - "tasks_path": str(feature_dir / "tasks.md"), - "item_count": len(items), - "items": items, - }, - ) - - def validate(self, config: dict[str, Any]) -> list[str]: - errors = super().validate(config) - input_data = config.get("input", {}) - if input_data is not None and not isinstance(input_data, dict): - errors.append( - f"speckit-task-shards step {config.get('id', '?')!r}: 'input' must be a mapping." - ) - return errors - - @staticmethod - def _failed(error: str, input_data: dict[str, Any]) -> StepResult: - return StepResult( - status=StepStatus.FAILED, - error=error, - output={"input": input_data, "error": error, "items": []}, - ) - - @classmethod - def _resolve_feature_dir(cls, project_root: Path) -> Path: - feature_json = project_root / ".specify" / "feature.json" - if feature_json.is_file(): - try: - raw = json.loads(feature_json.read_text(encoding="utf-8")) - except (json.JSONDecodeError, OSError) as exc: - raise ValueError(f"Failed to parse .specify/feature.json: {exc}") from exc - feature_value = raw.get("feature_directory") if isinstance(raw, dict) else None - if feature_value: - return cls._normalize_feature_dir(project_root, str(feature_value)) - - env_feature = os.environ.get("SPECIFY_FEATURE_DIRECTORY", "").strip() - if env_feature: - return cls._normalize_feature_dir(project_root, env_feature) - - branch = cls._current_branch(project_root) - if not branch: - raise ValueError( - "Unable to resolve active feature: no .specify/feature.json, " - "SPECIFY_FEATURE_DIRECTORY, or git branch is available." - ) - return cls._find_feature_dir_by_prefix(project_root, branch) - - @staticmethod - def _normalize_feature_dir(project_root: Path, value: str) -> Path: - path = Path(value) - if not path.is_absolute(): - path = project_root / path - return path.resolve() - - @staticmethod - def _current_branch(project_root: Path) -> str | None: - try: - proc = subprocess.run( - ["git", "rev-parse", "--abbrev-ref", "HEAD"], - cwd=project_root, - capture_output=True, - text=True, - timeout=5, - ) - except (OSError, subprocess.TimeoutExpired): - return None - if proc.returncode != 0: - return None - branch = proc.stdout.strip() - if branch == "HEAD": - return None - if "/" in branch: - branch = branch.rsplit("/", 1)[1] - return branch or None - - @classmethod - def _find_feature_dir_by_prefix(cls, project_root: Path, branch: str) -> Path: - specs_dir = project_root / "specs" - prefix = "" - timestamp = re.match(r"^(\d{8}-\d{6})-", branch) - sequential = re.match(r"^(\d{3,})-", branch) - if timestamp: - prefix = timestamp.group(1) - elif sequential: - prefix = sequential.group(1) - else: - return (specs_dir / branch).resolve() - - matches = sorted(path for path in specs_dir.glob(f"{prefix}-*") if path.is_dir()) - if not matches: - return (specs_dir / branch).resolve() - if len(matches) > 1: - names = ", ".join(path.name for path in matches) - raise ValueError( - f"Multiple spec directories found with prefix {prefix!r}: {names}." - ) - return matches[0].resolve() - - @staticmethod - def _require_feature_files(feature_dir: Path) -> None: - if not feature_dir.is_dir(): - raise ValueError(f"Feature directory not found: {feature_dir}") - missing = [ - name - for name in ("spec.md", "plan.md", "tasks.md") - if not (feature_dir / name).is_file() - ] - if missing: - raise ValueError( - f"Feature directory {feature_dir} is missing required file(s): " - + ", ".join(missing) - ) - - @classmethod - def _parse_tasks(cls, tasks_path: Path) -> list[ParsedTask]: - current_phase = "Tasks" - tasks: list[ParsedTask] = [] - for line in tasks_path.read_text(encoding="utf-8").splitlines(): - heading = _HEADING_RE.match(line) - if heading: - current_phase = heading.group("title").strip() - continue - - match = _TASK_RE.match(line) - if not match: - continue - - task_id = match.group("id") - text = line.strip() - body = match.group("body") - parallel = "[P]" in body - paths = cls._extract_paths(body) - if parallel and not paths: - raise ValueError( - f"Parallel task {task_id} must declare at least one explicit path." - ) - tasks.append( - ParsedTask( - task_id=task_id, - text=text, - phase=current_phase, - parallel=parallel, - paths=paths, - ) - ) - - if not tasks: - raise ValueError(f"No implementation tasks found in {tasks_path}.") - cls._validate_parallel_conflicts(tasks) - return tasks - - @classmethod - def _extract_paths(cls, text: str) -> list[str]: - candidates: list[str] = [] - for raw in _BACKTICK_RE.findall(text): - candidates.extend(raw.split()) - candidates.extend(match.group(1) for match in _PATH_TOKEN_RE.finditer(text)) - - paths: dict[str, None] = {} - for candidate in candidates: - normalized = cls._normalize_task_path(candidate) - if normalized: - paths.setdefault(normalized, None) - return list(paths) - - @staticmethod - def _normalize_task_path(raw: str) -> str | None: - value = raw.strip().strip(".,;:()[]{}") - if not value or value.startswith(("http://", "https://")): - return None - value = value.replace("\\", "/") - if value in {".", ".."} or "/../" in f"/{value}/": - return None - if value.startswith("/"): - value = value.lstrip("/") - if not ("/" in value or "." in PurePosixPath(value).name): - return None - return str(PurePosixPath(value)) - - @classmethod - def _validate_parallel_conflicts(cls, tasks: list[ParsedTask]) -> None: - by_phase: dict[str, list[ParsedTask]] = {} - for task in tasks: - if task.parallel: - by_phase.setdefault(task.phase, []).append(task) - - for phase, phase_tasks in by_phase.items(): - for idx, left in enumerate(phase_tasks): - for right in phase_tasks[idx + 1 :]: - overlap = cls._overlap(left.paths, right.paths) - if overlap: - raise ValueError( - f"Parallel tasks {left.task_id} and {right.task_id} in " - f"{phase!r} write overlapping path {overlap!r}." - ) - - @classmethod - def _build_shards(cls, tasks: list[ParsedTask], max_shards: int) -> list[TaskShard]: - groups: list[list[ParsedTask]] = [] - current: list[ParsedTask] = [] - - for task in tasks: - if task.parallel: - if current: - groups.append(current) - current = [] - groups.append([task]) - else: - current.append(task) - if current: - groups.append(current) - - while len(groups) > max_shards: - merge_index = cls._find_merge_candidate(groups) - if merge_index is None: - raise ValueError( - f"Unable to cap handoff shards at {max_shards} without merging " - "groups that declare overlapping write paths." - ) - groups[merge_index] = groups[merge_index] + groups[merge_index + 1] - del groups[merge_index + 1] - - width = max(2, len(str(len(groups)))) - return [ - TaskShard(f"shard-{idx + 1:0{width}d}", group) - for idx, group in enumerate(groups) - ] - - @classmethod - def _find_merge_candidate(cls, groups: list[list[ParsedTask]]) -> int | None: - for idx in range(len(groups) - 1): - left_paths = cls._group_paths(groups[idx]) - right_paths = cls._group_paths(groups[idx + 1]) - if not cls._overlap(left_paths, right_paths): - return idx - return None - - @staticmethod - def _group_paths(tasks: list[ParsedTask]) -> list[str]: - paths: dict[str, None] = {} - for task in tasks: - for path in task.paths: - paths.setdefault(path, None) - return list(paths) - - @staticmethod - def _overlap(left_paths: list[str], right_paths: list[str]) -> str | None: - for left in left_paths: - left_parts = PurePosixPath(left).parts - for right in right_paths: - right_parts = PurePosixPath(right).parts - if left == right: - return left - min_len = min(len(left_parts), len(right_parts)) - if left_parts[:min_len] == right_parts[:min_len]: - return left if len(left_parts) <= len(right_parts) else right - return None - - @classmethod - def _write_handoffs( - cls, - project_root: Path, - feature_dir: Path, - shards: list[TaskShard], - original_args: str, - run_id: str, - ) -> list[dict[str, Any]]: - handoff_dir = feature_dir / "handoffs" / "implement" / run_id - handoff_dir.mkdir(parents=True, exist_ok=True) - - items: list[dict[str, Any]] = [] - for shard in shards: - handoff_path = handoff_dir / f"{shard.shard_id}.json" - payload = cls._handoff_payload(project_root, feature_dir, shard) - handoff_path.write_text( - json.dumps(payload, indent=2, sort_keys=True) + "\n", - encoding="utf-8", - ) - shard_args = cls._handoff_args(original_args, handoff_path, shard) - items.append( - { - "shard_id": shard.shard_id, - "handoff_path": str(handoff_path), - "task_ids": shard.task_ids, - "args": shard_args, - } - ) - return items - - @classmethod - def _handoff_payload( - cls, - project_root: Path, - feature_dir: Path, - shard: TaskShard, - ) -> dict[str, Any]: - feature_ref = cls._display_path(project_root, feature_dir) - context_refs = [ - cls._display_path(project_root, feature_dir / name) - for name in ("spec.md", "plan.md", "tasks.md") - ] - for optional_name in ("data-model.md", "research.md", "quickstart.md"): - optional_path = feature_dir / optional_name - if optional_path.is_file(): - context_refs.append(cls._display_path(project_root, optional_path)) - contracts_dir = feature_dir / "contracts" - if contracts_dir.is_dir(): - context_refs.append(cls._display_path(project_root, contracts_dir)) - - return { - "contract_type": "speckit.implement.handoff.v1", - "shard_id": shard.shard_id, - "feature_dir": feature_ref, - "task_ids": shard.task_ids, - "task_text": [task.text for task in shard.tasks], - "allowed_read_paths": list(dict.fromkeys([feature_ref, *context_refs])), - "allowed_write_paths": shard.paths, - "required_context_refs": context_refs, - "validation_commands": [], - "forbidden_actions": [ - "Do not modify tasks outside task_ids.", - "Do not modify paths outside allowed_write_paths unless the task explicitly requires a generated adjacent file.", - "Do not revert user changes or unrelated work.", - ], - } - - @staticmethod - def _handoff_args(original_args: str, handoff_path: Path, shard: TaskShard) -> str: - prefix = f"{original_args.strip()} " if original_args.strip() else "" - task_ids = ", ".join(shard.task_ids) - return ( - f"{prefix}Use handoff JSON {handoff_path}. " - f"Execute only task IDs: {task_ids}." - ) - - @staticmethod - def _display_path(project_root: Path, path: Path) -> str: - try: - return str(path.resolve().relative_to(project_root)) - except ValueError: - return str(path) diff --git a/src/specify_cli/workflows/steps/switch/__init__.py b/src/specify_cli/workflows/steps/switch/__init__.py old mode 100644 new mode 100755 diff --git a/src/specify_cli/workflows/steps/while_loop/__init__.py b/src/specify_cli/workflows/steps/while_loop/__init__.py old mode 100644 new mode 100755 diff --git a/templates/agent-governance-template.md b/templates/agent-governance-template.md old mode 100644 new mode 100755 diff --git a/templates/architecture-development-template.md b/templates/architecture-development-template.md old mode 100644 new mode 100755 diff --git a/templates/architecture-logical-template.md b/templates/architecture-logical-template.md old mode 100644 new mode 100755 diff --git a/templates/architecture-physical-template.md b/templates/architecture-physical-template.md old mode 100644 new mode 100755 diff --git a/templates/architecture-process-template.md b/templates/architecture-process-template.md old mode 100644 new mode 100755 diff --git a/templates/architecture-scenario-template.md b/templates/architecture-scenario-template.md old mode 100644 new mode 100755 diff --git a/templates/architecture-template.md b/templates/architecture-template.md old mode 100644 new mode 100755 diff --git a/templates/checklist-template.md b/templates/checklist-template.md old mode 100644 new mode 100755 diff --git a/templates/commands/agent.md b/templates/commands/agent.md old mode 100644 new mode 100755 diff --git a/templates/commands/analyze.md b/templates/commands/analyze.md old mode 100644 new mode 100755 diff --git a/templates/commands/arch.md b/templates/commands/arch.md old mode 100644 new mode 100755 diff --git a/templates/commands/checklist.md b/templates/commands/checklist.md old mode 100644 new mode 100755 diff --git a/templates/commands/clarify.md b/templates/commands/clarify.md old mode 100644 new mode 100755 diff --git a/templates/commands/constitution.md b/templates/commands/constitution.md old mode 100644 new mode 100755 diff --git a/templates/commands/governance.md b/templates/commands/governance.md old mode 100644 new mode 100755 diff --git a/templates/commands/plan.md b/templates/commands/plan.md old mode 100644 new mode 100755 diff --git a/templates/commands/specify.md b/templates/commands/specify.md old mode 100644 new mode 100755 diff --git a/templates/commands/tasks.md b/templates/commands/tasks.md old mode 100644 new mode 100755 diff --git a/templates/commands/taskstoissues.md b/templates/commands/taskstoissues.md old mode 100644 new mode 100755 diff --git a/templates/constitution-template.md b/templates/constitution-template.md old mode 100644 new mode 100755 diff --git a/templates/plan-template.md b/templates/plan-template.md old mode 100644 new mode 100755 diff --git a/templates/spec-template.md b/templates/spec-template.md old mode 100644 new mode 100755 diff --git a/templates/tasks-template.md b/templates/tasks-template.md old mode 100644 new mode 100755 diff --git a/templates/vscode-settings.json b/templates/vscode-settings.json old mode 100644 new mode 100755 diff --git a/tests/__init__.py b/tests/__init__.py old mode 100644 new mode 100755 diff --git a/tests/auth_helpers.py b/tests/auth_helpers.py old mode 100644 new mode 100755 diff --git a/tests/conftest.py b/tests/conftest.py index 0e568a1e2a..30ae32cc59 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -7,6 +7,7 @@ import sys import pytest +import yaml _ANSI_ESCAPE_RE = re.compile(r"\x1b\[[0-?]*[ -/]*[@-~]") @@ -81,3 +82,70 @@ def _isolate_auth_config(monkeypatch): # Also clear the per-process cache so tests that unset _config_override # won't see a previously cached real-file result. monkeypatch.setattr(_auth_http, "_config_cache", None) + + +@pytest.fixture(autouse=True) +def _default_implement_preset_source(tmp_path, monkeypatch): + """Use a local external implement preset for tests that run init.""" + if os.environ.get("SPECKIT_IMPLEMENT_PRESET_SOURCE"): + return + + preset_dir = tmp_path / "default-implement-preset" + (preset_dir / "commands").mkdir(parents=True, exist_ok=True) + (preset_dir / "workflows" / "speckit-orchestrated-implement").mkdir( + parents=True, + exist_ok=True, + ) + (preset_dir / "preset.yml").write_text( + yaml.safe_dump( + { + "schema_version": "1.0", + "preset": { + "id": "implement", + "name": "Orchestrated Implement", + "version": "1.0.0", + "description": "Test orchestrated implement preset", + }, + "requires": {"speckit_version": ">=0.1.0"}, + "provides": { + "templates": [ + { + "type": "command", + "name": "speckit.implement", + "file": "commands/speckit.implement.md", + "strategy": "replace", + "replaces": "speckit.implement", + } + ], + "workflows": [ + { + "id": "speckit-orchestrated-implement", + "file": "workflows/speckit-orchestrated-implement/workflow.yml", + } + ], + }, + }, + sort_keys=False, + ), + encoding="utf-8", + ) + (preset_dir / "commands" / "speckit.implement.md").write_text( + "---\ndescription: Test orchestrated implement\n---\n\n" + "Run workflow or handoff shard. Use handoff JSON when provided.\n", + encoding="utf-8", + ) + (preset_dir / "workflows" / "speckit-orchestrated-implement" / "workflow.yml").write_text( + """ +schema_version: "1.0" +workflow: + id: "speckit-orchestrated-implement" + name: "Orchestrated Implementation" + version: "1.0.0" +steps: + - id: one + type: shell + run: "echo ok" +""", + encoding="utf-8", + ) + monkeypatch.setenv("SPECKIT_IMPLEMENT_PRESET_SOURCE", str(preset_dir)) diff --git a/tests/extensions/__init__.py b/tests/extensions/__init__.py old mode 100644 new mode 100755 diff --git a/tests/extensions/git/__init__.py b/tests/extensions/git/__init__.py old mode 100644 new mode 100755 diff --git a/tests/extensions/git/test_git_extension.py b/tests/extensions/git/test_git_extension.py old mode 100644 new mode 100755 diff --git a/tests/hooks/.specify/extensions.yml b/tests/hooks/.specify/extensions.yml old mode 100644 new mode 100755 diff --git a/tests/hooks/TESTING.md b/tests/hooks/TESTING.md old mode 100644 new mode 100755 diff --git a/tests/hooks/plan.md b/tests/hooks/plan.md old mode 100644 new mode 100755 diff --git a/tests/hooks/spec.md b/tests/hooks/spec.md old mode 100644 new mode 100755 diff --git a/tests/hooks/tasks.md b/tests/hooks/tasks.md old mode 100644 new mode 100755 diff --git a/tests/integrations/__init__.py b/tests/integrations/__init__.py old mode 100644 new mode 100755 diff --git a/tests/integrations/conftest.py b/tests/integrations/conftest.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_base.py b/tests/integrations/test_base.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_cli.py b/tests/integrations/test_cli.py index d36366e6e2..14e2ba6ad7 100644 --- a/tests/integrations/test_cli.py +++ b/tests/integrations/test_cli.py @@ -23,6 +23,7 @@ def _normalize_cli_output(output: str) -> str: class TestInitIntegrationFlag: + def test_integration_and_ai_mutually_exclusive(self, tmp_path): from typer.testing import CliRunner from specify_cli import app @@ -791,8 +792,9 @@ def test_no_git_skips_extension(self, tmp_path): workflows_dir = project / ".specify" / "workflows" assert (workflows_dir / "speckit" / "workflow.yml").exists() assert ( - workflows_dir / "speckit-implement" / "workflow.yml" + workflows_dir / "speckit-orchestrated-implement" / "workflow.yml" ).exists() + assert not (project / ".specify" / "extensions" / "orchestrated").exists() def test_no_git_emits_deprecation_warning(self, tmp_path): """Using --no-git emits a visible deprecation warning.""" @@ -902,13 +904,15 @@ def test_default_implement_preset_updates_skill_command(self, tmp_path): ) assert skill.exists(), "implement command skill was not registered" content = skill.read_text(encoding="utf-8") - assert "specify workflow run speckit-implement" in content + assert "Run workflow or handoff shard" in content + assert "Use handoff JSON" in content generated_implement_skills = sorted( path.name for path in (project / ".claude" / "skills").iterdir() - if path.name.endswith("implement") + if path.name == "speckit-implement" ) assert generated_implement_skills == ["speckit-implement"] + assert not (project / ".claude" / "skills" / "speckit-orchestrated-implement").exists() def test_default_implement_preset_updates_markdown_command(self, tmp_path): """Default implement preset updates markdown command integrations.""" @@ -933,12 +937,12 @@ def test_default_implement_preset_updates_markdown_command(self, tmp_path): command = project / ".windsurf" / "workflows" / "speckit.implement.md" assert command.exists(), "implement command was not registered" content = command.read_text(encoding="utf-8") - assert "specify workflow run speckit-implement" in content - assert "-i integration=windsurf" in content + assert "Run workflow or handoff shard" in content + assert "Use handoff JSON" in content generated_implement_commands = sorted( path.name for path in (project / ".windsurf" / "workflows").iterdir() - if path.name.endswith("implement.md") + if path.name == "speckit.implement.md" ) assert generated_implement_commands == ["speckit.implement.md"] @@ -968,6 +972,7 @@ def test_explicit_preset_wins_over_default_implement_preset(self, tmp_path): content = skill.read_text(encoding="utf-8") assert "## Outline" in content assert "specify workflow run speckit-implement" not in content + assert "orchestrated.implement" not in content class TestSharedInfraCommandRefs: diff --git a/tests/integrations/test_integration_agy.py b/tests/integrations/test_integration_agy.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_amp.py b/tests/integrations/test_integration_amp.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_auggie.py b/tests/integrations/test_integration_auggie.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_base_markdown.py b/tests/integrations/test_integration_base_markdown.py index e34c1c7d66..65cc18fe60 100644 --- a/tests/integrations/test_integration_base_markdown.py +++ b/tests/integrations/test_integration_base_markdown.py @@ -297,6 +297,11 @@ def _expected_files(self, script_variant: str) -> list[str]: files.append(".specify/memory/agent-governance.md") files.append(".specify/memory/constitution.md") # Bundled workflow + files.append(".specify/presets/.registry") + files.append(".specify/presets/implement/commands/speckit.implement.md") + files.append(".specify/presets/implement/preset.yml") + files.append(".specify/presets/implement/workflows/speckit-orchestrated-implement/workflow.yml") + files.append(".specify/workflows/speckit-orchestrated-implement/workflow.yml") files.append(".specify/workflows/speckit/workflow.yml") files.append(".specify/workflows/workflow-registry.json") diff --git a/tests/integrations/test_integration_base_skills.py b/tests/integrations/test_integration_base_skills.py index 65b51757fb..85d4dcdf5e 100644 --- a/tests/integrations/test_integration_base_skills.py +++ b/tests/integrations/test_integration_base_skills.py @@ -432,6 +432,11 @@ def _expected_files(self, script_variant: str) -> list[str]: ] # Bundled workflow files += [ + ".specify/presets/.registry", + ".specify/presets/implement/commands/speckit.implement.md", + ".specify/presets/implement/preset.yml", + ".specify/presets/implement/workflows/speckit-orchestrated-implement/workflow.yml", + ".specify/workflows/speckit-orchestrated-implement/workflow.yml", ".specify/workflows/speckit/workflow.yml", ".specify/workflows/workflow-registry.json", ] diff --git a/tests/integrations/test_integration_base_toml.py b/tests/integrations/test_integration_base_toml.py index 94f56c125e..628f0cbdc3 100644 --- a/tests/integrations/test_integration_base_toml.py +++ b/tests/integrations/test_integration_base_toml.py @@ -554,6 +554,11 @@ def _expected_files(self, script_variant: str) -> list[str]: files.append(".specify/memory/agent-governance.md") files.append(".specify/memory/constitution.md") # Bundled workflow + files.append(".specify/presets/.registry") + files.append(".specify/presets/implement/commands/speckit.implement.md") + files.append(".specify/presets/implement/preset.yml") + files.append(".specify/presets/implement/workflows/speckit-orchestrated-implement/workflow.yml") + files.append(".specify/workflows/speckit-orchestrated-implement/workflow.yml") files.append(".specify/workflows/speckit/workflow.yml") files.append(".specify/workflows/workflow-registry.json") diff --git a/tests/integrations/test_integration_base_yaml.py b/tests/integrations/test_integration_base_yaml.py index d2d81247a8..a2249514d0 100644 --- a/tests/integrations/test_integration_base_yaml.py +++ b/tests/integrations/test_integration_base_yaml.py @@ -433,6 +433,11 @@ def _expected_files(self, script_variant: str) -> list[str]: files.append(".specify/memory/agent-governance.md") files.append(".specify/memory/constitution.md") # Bundled workflow + files.append(".specify/presets/.registry") + files.append(".specify/presets/implement/commands/speckit.implement.md") + files.append(".specify/presets/implement/preset.yml") + files.append(".specify/presets/implement/workflows/speckit-orchestrated-implement/workflow.yml") + files.append(".specify/workflows/speckit-orchestrated-implement/workflow.yml") files.append(".specify/workflows/speckit/workflow.yml") files.append(".specify/workflows/workflow-registry.json") diff --git a/tests/integrations/test_integration_bob.py b/tests/integrations/test_integration_bob.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_catalog.py b/tests/integrations/test_integration_catalog.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_claude.py b/tests/integrations/test_integration_claude.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_codebuddy.py b/tests/integrations/test_integration_codebuddy.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_codex.py b/tests/integrations/test_integration_codex.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_copilot.py b/tests/integrations/test_integration_copilot.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_cursor_agent.py b/tests/integrations/test_integration_cursor_agent.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_devin.py b/tests/integrations/test_integration_devin.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_forge.py b/tests/integrations/test_integration_forge.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_gemini.py b/tests/integrations/test_integration_gemini.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_generic.py b/tests/integrations/test_integration_generic.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_goose.py b/tests/integrations/test_integration_goose.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_iflow.py b/tests/integrations/test_integration_iflow.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_junie.py b/tests/integrations/test_integration_junie.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_kilocode.py b/tests/integrations/test_integration_kilocode.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_kimi.py b/tests/integrations/test_integration_kimi.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_kiro_cli.py b/tests/integrations/test_integration_kiro_cli.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_lingma.py b/tests/integrations/test_integration_lingma.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_opencode.py b/tests/integrations/test_integration_opencode.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_pi.py b/tests/integrations/test_integration_pi.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_qodercli.py b/tests/integrations/test_integration_qodercli.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_qwen.py b/tests/integrations/test_integration_qwen.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_roo.py b/tests/integrations/test_integration_roo.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_shai.py b/tests/integrations/test_integration_shai.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_state.py b/tests/integrations/test_integration_state.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_subcommand.py b/tests/integrations/test_integration_subcommand.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_tabnine.py b/tests/integrations/test_integration_tabnine.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_trae.py b/tests/integrations/test_integration_trae.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_vibe.py b/tests/integrations/test_integration_vibe.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_integration_windsurf.py b/tests/integrations/test_integration_windsurf.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_manifest.py b/tests/integrations/test_manifest.py old mode 100644 new mode 100755 diff --git a/tests/integrations/test_registry.py b/tests/integrations/test_registry.py old mode 100644 new mode 100755 diff --git a/tests/test_agent_config_consistency.py b/tests/test_agent_config_consistency.py old mode 100644 new mode 100755 diff --git a/tests/test_agent_projection.py b/tests/test_agent_projection.py old mode 100644 new mode 100755 diff --git a/tests/test_arch_templates.py b/tests/test_arch_templates.py old mode 100644 new mode 100755 diff --git a/tests/test_authentication.py b/tests/test_authentication.py old mode 100644 new mode 100755 diff --git a/tests/test_branch_numbering.py b/tests/test_branch_numbering.py old mode 100644 new mode 100755 diff --git a/tests/test_check_tool.py b/tests/test_check_tool.py old mode 100644 new mode 100755 diff --git a/tests/test_cli_version.py b/tests/test_cli_version.py old mode 100644 new mode 100755 diff --git a/tests/test_extension_skills.py b/tests/test_extension_skills.py old mode 100644 new mode 100755 diff --git a/tests/test_extensions.py b/tests/test_extensions.py index 1434ba309d..63a3038e89 100644 --- a/tests/test_extensions.py +++ b/tests/test_extensions.py @@ -381,7 +381,10 @@ def test_no_commands_no_hooks(self, temp_dir, valid_manifest_data): with open(manifest_path, 'w') as f: yaml.dump(valid_manifest_data, f) - with pytest.raises(ValidationError, match="must provide at least one command or hook"): + with pytest.raises( + ValidationError, + match="must provide at least one command, workflow, or hook", + ): ExtensionManifest(manifest_path) def test_hooks_only_extension(self, temp_dir, valid_manifest_data): @@ -851,6 +854,74 @@ def test_install_accepts_free_form_alias(self, temp_dir, project_dir): assert manifest.commands[0]["aliases"] == ["speckit.shortcut"] assert manifest.warnings == [] + def test_install_and_remove_extension_workflow(self, temp_dir, project_dir): + """Extensions can provide workflow definitions tracked by workflow registry.""" + import yaml + from specify_cli.workflows.catalog import WorkflowRegistry + + ext_dir = temp_dir / "workflow-ext" + ext_dir.mkdir() + (ext_dir / "commands").mkdir() + (ext_dir / "workflows" / "demo-flow").mkdir(parents=True) + + manifest_data = { + "schema_version": "1.0", + "extension": { + "id": "workflow-ext", + "name": "Workflow Extension", + "version": "1.0.0", + "description": "Test", + }, + "requires": {"speckit_version": ">=0.1.0"}, + "provides": { + "commands": [ + { + "name": "speckit.workflow-ext.run", + "file": "commands/run.md", + } + ], + "workflows": [ + { + "id": "demo-flow", + "file": "workflows/demo-flow/workflow.yml", + } + ], + }, + } + + (ext_dir / "extension.yml").write_text(yaml.dump(manifest_data), encoding="utf-8") + (ext_dir / "commands" / "run.md").write_text( + "---\ndescription: Test\n---\n\nBody", + encoding="utf-8", + ) + (ext_dir / "workflows" / "demo-flow" / "workflow.yml").write_text( + """ +schema_version: "1.0" +workflow: + id: "demo-flow" + name: "Demo Flow" + version: "1.0.0" +steps: + - id: one + type: shell + run: "echo ok" +""", + encoding="utf-8", + ) + + manager = ExtensionManager(project_dir) + manager.install_from_directory(ext_dir, "0.1.0", register_commands=False) + + workflow_path = project_dir / ".specify" / "workflows" / "demo-flow" / "workflow.yml" + assert workflow_path.exists() + metadata = WorkflowRegistry(project_dir).get("demo-flow") + assert metadata["source"] == "extension:workflow-ext" + ext_metadata = manager.registry.get("workflow-ext") + assert ext_metadata["registered_workflows"] == ["demo-flow"] + + assert manager.remove("workflow-ext") + assert not workflow_path.exists() + assert WorkflowRegistry(project_dir).get("demo-flow") is None def test_install_rejects_namespace_squatting(self, temp_dir, project_dir): """Install should reject commands and aliases outside the extension namespace.""" import yaml diff --git a/tests/test_github_http.py b/tests/test_github_http.py old mode 100644 new mode 100755 diff --git a/tests/test_merge.py b/tests/test_merge.py old mode 100644 new mode 100755 diff --git a/tests/test_presets.py b/tests/test_presets.py index 6001bd7763..b233ade59f 100644 --- a/tests/test_presets.py +++ b/tests/test_presets.py @@ -1922,7 +1922,6 @@ def test_url_cache_expired(self, project_dir): SELF_TEST_PRESET_DIR = Path(__file__).parent.parent / "presets" / "self-test" -IMPLEMENT_PRESET_DIR = Path(__file__).parent.parent / "presets" / "implement" SELF_TEST_WRAP_WARNING = ( r"Cannot compose command 'speckit\.wrap-test': no base layer\. " r"Stale command files may remain\." @@ -1937,47 +1936,130 @@ def test_url_cache_expired(self, project_dir): ] -class TestImplementPreset: - """Tests for the bundled implement preset.""" +def create_implement_preset_fixture(root: Path) -> Path: + """Create a minimal external implement preset with a workflow.""" + preset_dir = root / "implement-preset" + (preset_dir / "commands").mkdir(parents=True) + (preset_dir / "workflows" / "speckit-orchestrated-implement").mkdir( + parents=True + ) + (preset_dir / "commands" / "speckit.implement.md").write_text( + "---\ndescription: Orchestrated implement\n---\n\n" + "specify workflow run speckit-orchestrated-implement\n" + "Use handoff JSON when provided.\n", + encoding="utf-8", + ) + (preset_dir / "workflows" / "speckit-orchestrated-implement" / "workflow.yml").write_text( + """ +schema_version: "1.0" +workflow: + id: "speckit-orchestrated-implement" + name: "Orchestrated Implementation" + version: "1.0.0" +steps: + - id: one + type: shell + run: "echo ok" +""", + encoding="utf-8", + ) + (preset_dir / "preset.yml").write_text( + yaml.safe_dump( + { + "schema_version": "1.0", + "preset": { + "id": "implement", + "name": "Orchestrated Implement", + "version": "1.0.0", + "description": "External implement preset", + }, + "requires": {"speckit_version": ">=0.1.0"}, + "provides": { + "templates": [ + { + "type": "command", + "name": "speckit.implement", + "file": "commands/speckit.implement.md", + "strategy": "replace", + "replaces": "speckit.implement", + } + ], + "workflows": [ + { + "id": "speckit-orchestrated-implement", + "file": "workflows/speckit-orchestrated-implement/workflow.yml", + } + ], + }, + }, + sort_keys=False, + ), + encoding="utf-8", + ) + return preset_dir - def test_manifest_valid(self): - manifest = PresetManifest(IMPLEMENT_PRESET_DIR / "preset.yml") - assert manifest.id == "implement" - assert manifest.name == "Implement Workflow" - assert [t["name"] for t in manifest.templates] == ["speckit.implement"] - assert manifest.templates[0]["replaces"] == "speckit.implement" +class TestImplementPresetWiring: + """Tests for external implement preset support in core.""" + + def test_core_has_no_bundled_implement_proxy(self): + assert not (Path(__file__).parent.parent / "presets" / "implement").exists() - def test_command_invokes_workflow(self): - command_path = IMPLEMENT_PRESET_DIR / "commands" / "speckit.implement.md" + def test_manifest_accepts_workflows(self, temp_dir): + preset_dir = create_implement_preset_fixture(temp_dir) + manifest = PresetManifest(preset_dir / "preset.yml") - content = command_path.read_text(encoding="utf-8") - assert "specify workflow run speckit-implement" in content - assert "-i integration=__AGENT__" in content - assert '-i args="$ARGUMENTS"' in content - assert "If the user input references a handoff JSON file" in content - assert "Do not run `specify workflow run` while executing a handoff JSON" in content + assert manifest.id == "implement" + assert manifest.templates[0]["name"] == "speckit.implement" + assert manifest.templates[0]["strategy"] == "replace" + assert manifest.workflows[0]["id"] == "speckit-orchestrated-implement" - def test_catalog_contains_implement_preset(self): + def test_catalog_does_not_bundle_implement_preset(self): catalog_path = Path(__file__).parent.parent / "presets" / "catalog.json" data = json.loads(catalog_path.read_text(encoding="utf-8")) - preset = data["presets"]["implement"] - assert preset["bundled"] is True - assert preset["provides"]["commands"] == 1 + assert "implement" not in data["presets"] - def test_install_resolves_implement_command(self, project_dir): + def test_install_resolves_command_and_registers_workflow(self, project_dir, temp_dir): + from specify_cli.workflows.catalog import WorkflowRegistry + + preset_dir = create_implement_preset_fixture(temp_dir) manager = PresetManager(project_dir) - manager.install_from_directory(IMPLEMENT_PRESET_DIR, "0.8.9.dev0") + manager.install_from_directory(preset_dir, "0.8.9.dev0") resolver = PresetResolver(project_dir) result = resolver.resolve("speckit.implement", "command") assert result is not None assert "presets/implement" in result.as_posix() - assert "specify workflow run speckit-implement" in result.read_text( - encoding="utf-8" - ) + assert "Use handoff JSON" in result.read_text(encoding="utf-8") + + metadata = WorkflowRegistry(project_dir).get("speckit-orchestrated-implement") + assert metadata is not None + assert metadata["source"] == "preset:implement" + assert ( + project_dir + / ".specify" + / "workflows" + / "speckit-orchestrated-implement" + / "workflow.yml" + ).exists() + + def test_remove_unregisters_preset_workflow(self, project_dir, temp_dir): + from specify_cli.workflows.catalog import WorkflowRegistry + + preset_dir = create_implement_preset_fixture(temp_dir) + manager = PresetManager(project_dir) + manager.install_from_directory(preset_dir, "0.8.9.dev0") + + assert manager.remove("implement") is True + assert WorkflowRegistry(project_dir).get("speckit-orchestrated-implement") is None + assert not ( + project_dir + / ".specify" + / "workflows" + / "speckit-orchestrated-implement" + ).exists() def install_self_test_preset(manager: PresetManager, speckit_version: str = "0.1.5") -> PresetManifest: diff --git a/tests/test_registrar_path_traversal.py b/tests/test_registrar_path_traversal.py old mode 100644 new mode 100755 diff --git a/tests/test_setup_arch.py b/tests/test_setup_arch.py old mode 100644 new mode 100755 diff --git a/tests/test_setup_plan_feature_json.py b/tests/test_setup_plan_feature_json.py old mode 100644 new mode 100755 diff --git a/tests/test_setup_tasks.py b/tests/test_setup_tasks.py old mode 100644 new mode 100755 diff --git a/tests/test_timestamp_branches.py b/tests/test_timestamp_branches.py old mode 100644 new mode 100755 diff --git a/tests/test_upgrade.py b/tests/test_upgrade.py old mode 100644 new mode 100755 diff --git a/tests/test_workflows.py b/tests/test_workflows.py index 221bd8e038..d0f87d4066 100644 --- a/tests/test_workflows.py +++ b/tests/test_workflows.py @@ -1089,121 +1089,6 @@ def test_validate_step_not_mapping(self): assert any("'step' must be a mapping" in e for e in errors) -class TestSpeckitTaskShardsStep: - """Test the Spec Kit task sharding step.""" - - def _write_feature(self, project_dir: Path, tasks: str, feature_name: str = "001-demo") -> Path: - feature_dir = project_dir / "specs" / feature_name - feature_dir.mkdir(parents=True, exist_ok=True) - (project_dir / ".specify" / "feature.json").write_text( - json.dumps({"feature_directory": f"specs/{feature_name}"}), - encoding="utf-8", - ) - (feature_dir / "spec.md").write_text("# Spec\n", encoding="utf-8") - (feature_dir / "plan.md").write_text("# Plan\n", encoding="utf-8") - (feature_dir / "tasks.md").write_text(tasks, encoding="utf-8") - return feature_dir - - def test_execute_generates_handoff_items(self, project_dir): - from specify_cli.workflows.base import StepContext, StepStatus - from specify_cli.workflows.steps.speckit_task_shards import SpeckitTaskShardsStep - - feature_dir = self._write_feature( - project_dir, - """ -# Tasks - -## Phase 1: Setup -- [ ] T001 Create project scaffolding in `pyproject.toml` -- [ ] T002 [P] Add model in `src/models/user.py` -- [ ] T003 [P] Add tests in `tests/test_user.py` -- [ ] T004 Wire service in `src/services/user_service.py` -""", - ) - - step = SpeckitTaskShardsStep() - result = step.execute( - {"id": "build-shards", "input": {"args": "--fast", "max_shards": 4}}, - StepContext(project_root=str(project_dir), run_id="testrun"), - ) - - assert result.status == StepStatus.COMPLETED - assert result.output["feature_dir"] == str(feature_dir.resolve()) - assert result.output["item_count"] == 4 - first = result.output["items"][0] - assert first["shard_id"] == "shard-01" - assert first["task_ids"] == ["T001"] - assert "--fast Use handoff JSON" in first["args"] - handoff = Path(first["handoff_path"]) - assert handoff.exists() - data = json.loads(handoff.read_text(encoding="utf-8")) - assert data["contract_type"] == "speckit.implement.handoff.v1" - assert data["task_ids"] == ["T001"] - assert "specs/001-demo/spec.md" in data["required_context_refs"] - - def test_missing_tasks_fails(self, project_dir): - from specify_cli.workflows.base import StepContext, StepStatus - from specify_cli.workflows.steps.speckit_task_shards import SpeckitTaskShardsStep - - feature_dir = project_dir / "specs" / "001-demo" - feature_dir.mkdir(parents=True) - (project_dir / ".specify" / "feature.json").write_text( - json.dumps({"feature_directory": "specs/001-demo"}), - encoding="utf-8", - ) - (feature_dir / "spec.md").write_text("# Spec\n", encoding="utf-8") - (feature_dir / "plan.md").write_text("# Plan\n", encoding="utf-8") - - result = SpeckitTaskShardsStep().execute( - {"id": "build-shards", "input": {}}, - StepContext(project_root=str(project_dir), run_id="testrun"), - ) - - assert result.status == StepStatus.FAILED - assert "tasks.md" in result.error - - def test_parallel_task_without_path_fails(self, project_dir): - from specify_cli.workflows.base import StepContext, StepStatus - from specify_cli.workflows.steps.speckit_task_shards import SpeckitTaskShardsStep - - self._write_feature( - project_dir, - """ -# Tasks -- [ ] T001 [P] Add isolated unit tests -""", - ) - - result = SpeckitTaskShardsStep().execute( - {"id": "build-shards", "input": {}}, - StepContext(project_root=str(project_dir), run_id="testrun"), - ) - - assert result.status == StepStatus.FAILED - assert "must declare at least one explicit path" in result.error - - def test_parallel_write_conflict_fails(self, project_dir): - from specify_cli.workflows.base import StepContext, StepStatus - from specify_cli.workflows.steps.speckit_task_shards import SpeckitTaskShardsStep - - self._write_feature( - project_dir, - """ -# Tasks -- [ ] T001 [P] Add repository in `src/app.py` -- [ ] T002 [P] Add service in `src/app.py` -""", - ) - - result = SpeckitTaskShardsStep().execute( - {"id": "build-shards", "input": {}}, - StepContext(project_root=str(project_dir), run_id="testrun"), - ) - - assert result.status == StepStatus.FAILED - assert "write overlapping path" in result.error - - class TestFanInStep: """Test the fan-in step type.""" @@ -1398,20 +1283,6 @@ def test_invalid_step_type(self): errors = validate_workflow(definition) assert any("invalid type" in e.lower() for e in errors) - def test_bundled_implement_workflow_validates(self): - from specify_cli.workflows.engine import WorkflowDefinition, validate_workflow - - workflow_path = ( - Path(__file__).resolve().parent.parent - / "workflows" - / "speckit-implement" - / "workflow.yml" - ) - definition = WorkflowDefinition.from_yaml(workflow_path) - errors = validate_workflow(definition) - assert errors == [] - assert definition.id == "speckit-implement" - def test_nested_step_validation(self): from specify_cli.workflows.engine import WorkflowDefinition, validate_workflow @@ -1878,15 +1749,12 @@ def test_get_catalog_configs(self, project_dir): assert configs[0]["name"] == "default" assert isinstance(configs[0]["install_allowed"], bool) - def test_bundled_catalog_contains_implement_workflow(self): + def test_bundled_catalog_does_not_contain_implement_workflow(self): catalog_path = Path(__file__).resolve().parent.parent / "workflows" / "catalog.json" data = json.loads(catalog_path.read_text(encoding="utf-8")) - workflow = data["workflows"]["speckit-implement"] - assert workflow["name"] == "Implementation" - assert workflow["url"].endswith( - "/workflows/speckit-implement/workflow.yml" - ) + assert "speckit" in data["workflows"] + assert "speckit-implement" not in data["workflows"] # ===== Integration Test ===== @@ -1942,8 +1810,8 @@ def test_full_sequential_workflow(self, project_dir): assert "echo-partial" not in state.step_results assert "plan" in state.step_results - def test_implement_workflow_fans_out_to_implement(self, project_dir, monkeypatch): - """The bundled workflow dispatches speckit.implement once per shard.""" + def test_orchestrated_workflow_fans_out_to_implement_handoff_mode(self, project_dir, monkeypatch): + """The preset workflow dispatches speckit.implement once per handoff shard.""" from specify_cli.workflows.base import RunStatus from specify_cli.workflows.engine import WorkflowEngine, WorkflowDefinition from specify_cli.workflows.steps.command import CommandStep @@ -1983,13 +1851,60 @@ def fake_dispatch(command, integration_key, model, args, context): staticmethod(fake_dispatch), ) - workflow_path = ( - Path(__file__).resolve().parent.parent - / "workflows" - / "speckit-implement" - / "workflow.yml" + script_dir = project_dir / ".specify" / "presets" / "implement" / "scripts" + script_dir.mkdir(parents=True, exist_ok=True) + script_path = script_dir / "build-task-shards.py" + script_path.write_text( + """#!/usr/bin/env python3 +import json +from pathlib import Path +Path("specs/001-demo/handoffs/implement/test").mkdir(parents=True, exist_ok=True) +print(json.dumps({"items": [ + {"args": "--fast Use handoff JSON specs/001-demo/handoffs/implement/test/shard-01.json"}, + {"args": "--fast Use handoff JSON specs/001-demo/handoffs/implement/test/shard-02.json"} +], "item_count": 2})) +""", + encoding="utf-8", ) - definition = WorkflowDefinition.from_yaml(workflow_path) + script_path.chmod(0o755) + + definition = WorkflowDefinition.from_string(""" +schema_version: "1.0" +workflow: + id: "speckit-orchestrated-implement" + name: "Orchestrated Implementation" + version: "1.0.0" +inputs: + integration: + type: string + default: "copilot" + args: + type: string + default: "" + max_shards: + type: number + default: 8 +steps: + - id: build-shards + type: shell + run: >- + python3 .specify/presets/implement/scripts/build-task-shards.py + --project-root . + --run-id "{{ run_id }}" + --max-shards "{{ inputs.max_shards }}" + --args="{{ inputs.args }}" + json_output: true + - id: implement-shards + type: fan-out + items: "{{ steps.build-shards.output.items }}" + max_concurrency: "{{ inputs.max_shards }}" + step: + id: implement + command: speckit.implement + integration: "{{ inputs.integration }}" + input: + args: "{{ item.args }}" +""") state = WorkflowEngine(project_dir).execute( definition, {"integration": "claude", "args": "--fast", "max_shards": "4"}, @@ -2004,6 +1919,67 @@ def fake_dispatch(command, integration_key, model, args, context): assert all("Use handoff JSON" in call["args"] for call in calls) assert all("--fast" in call["args"] for call in calls) + def test_fan_out_enforces_max_concurrency(self, project_dir, monkeypatch): + """Fan-out executes nested command steps with a bounded concurrency limit.""" + import threading + import time + + from specify_cli.workflows.base import RunStatus + from specify_cli.workflows.engine import WorkflowEngine, WorkflowDefinition + from specify_cli.workflows.steps.command import CommandStep + + lock = threading.Lock() + active = 0 + max_active = 0 + calls = [] + + def fake_dispatch(command, integration_key, model, args, context): + nonlocal active, max_active + with lock: + active += 1 + max_active = max(max_active, active) + time.sleep(0.05) + with lock: + active -= 1 + calls.append(args) + return {"exit_code": 0, "stdout": "", "stderr": ""} + + monkeypatch.setattr( + CommandStep, + "_try_dispatch", + staticmethod(fake_dispatch), + ) + + definition = WorkflowDefinition.from_string(""" +schema_version: "1.0" +workflow: + id: "fanout-concurrency" + name: "Fanout Concurrency" + version: "1.0.0" +inputs: {} +steps: + - id: seed + type: shell + run: >- + python3 -c 'import json; print(json.dumps({"items": [{"args": "a"}, {"args": "b"}, {"args": "c"}, {"args": "d"}]}))' + json_output: true + - id: parallel + type: fan-out + items: "{{ steps.seed.output.items }}" + max_concurrency: 2 + step: + id: impl + command: speckit.implement + integration: claude + input: + args: "{{ item.args }}" +""") + state = WorkflowEngine(project_dir).execute(definition) + + assert state.status == RunStatus.COMPLETED + assert sorted(calls) == ["a", "b", "c", "d"] + assert max_active == 2 + def test_switch_workflow(self, project_dir): """Test switch step type in a workflow.""" from specify_cli.workflows.engine import WorkflowEngine, WorkflowDefinition diff --git a/workflows/ARCHITECTURE.md b/workflows/ARCHITECTURE.md old mode 100644 new mode 100755 diff --git a/workflows/PUBLISHING.md b/workflows/PUBLISHING.md old mode 100644 new mode 100755 diff --git a/workflows/README.md b/workflows/README.md old mode 100644 new mode 100755 diff --git a/workflows/catalog.community.json b/workflows/catalog.community.json old mode 100644 new mode 100755 diff --git a/workflows/catalog.json b/workflows/catalog.json index 250c638df3..967120afb0 100644 --- a/workflows/catalog.json +++ b/workflows/catalog.json @@ -11,15 +11,6 @@ "version": "1.0.0", "url": "https://raw.githubusercontent.com/github/spec-kit/main/workflows/speckit/workflow.yml", "tags": ["sdd", "full-cycle"] - }, - "speckit-implement": { - "id": "speckit-implement", - "name": "Implementation", - "description": "Builds task handoff shards, then runs speckit.implement once per shard", - "author": "GitHub", - "version": "1.0.0", - "url": "https://raw.githubusercontent.com/github/spec-kit/main/workflows/speckit-implement/workflow.yml", - "tags": ["sdd", "implementation"] } } } diff --git a/workflows/speckit-implement/workflow.yml b/workflows/speckit-implement/workflow.yml deleted file mode 100644 index 83c6cc58d5..0000000000 --- a/workflows/speckit-implement/workflow.yml +++ /dev/null @@ -1,42 +0,0 @@ -schema_version: "1.0" -workflow: - id: "speckit-implement" - name: "Implementation" - version: "1.0.0" - author: "GitHub" - description: "Builds task handoff shards, then runs speckit.implement once per shard" - -requires: - speckit_version: ">=0.8.9" - -inputs: - integration: - type: string - default: "copilot" - prompt: "Integration to use for shard execution" - args: - type: string - default: "" - prompt: "Additional implementation arguments" - max_shards: - type: number - default: 8 - prompt: "Maximum number of handoff shards" - -steps: - - id: build-shards - type: speckit-task-shards - input: - args: "{{ inputs.args }}" - max_shards: "{{ inputs.max_shards }}" - - - id: implement-shards - type: fan-out - items: "{{ steps.build-shards.output.items }}" - max_concurrency: "{{ inputs.max_shards }}" - step: - id: implement - command: speckit.implement - integration: "{{ inputs.integration }}" - input: - args: "{{ item.args }}" diff --git a/workflows/speckit/workflow.yml b/workflows/speckit/workflow.yml old mode 100644 new mode 100755