diff --git a/.gitignore b/.gitignore index 0e581c4f..fc17a9ad 100644 --- a/.gitignore +++ b/.gitignore @@ -127,6 +127,8 @@ RustAgentDemo/*.a concat_output.txt CLAUDE.md .cargo/ +my-artifact.txt +my-artifact.txt.auths.json # Git worktrees .worktrees/ diff --git a/crates/auths-cli/src/commands/id/claim.rs b/crates/auths-cli/src/commands/id/claim.rs index 42248b5e..025e17ca 100644 --- a/crates/auths-cli/src/commands/id/claim.rs +++ b/crates/auths-cli/src/commands/id/claim.rs @@ -63,12 +63,16 @@ pub fn handle_claim( }; let on_device_code = |code: &auths_core::ports::platform::DeviceCodeResponse| { + println!(); + println!(" Copy this code: {}", style(&code.user_code).bold().cyan()); + println!(" At: {}", style(&code.verification_uri).cyan()); println!(); println!( - " Enter this code: {}", - style(&code.user_code).bold().cyan() + " {}", + style("Press 'enter' to open GitHub after copying the code above").blue() ); - println!(" At: {}", style(&code.verification_uri).cyan()); + // Wait for the user to press Enter before opening the browser. + let _ = std::io::stdin().read_line(&mut String::new()); println!(); if let Err(e) = open::that(&code.verification_uri) { println!( diff --git a/crates/auths-cli/src/commands/org.rs b/crates/auths-cli/src/commands/org.rs index fc616bc3..8749750f 100644 --- a/crates/auths-cli/src/commands/org.rs +++ b/crates/auths-cli/src/commands/org.rs @@ -173,6 +173,17 @@ pub enum OrgSubcommand { #[arg(long, action = ArgAction::SetTrue)] include_revoked: bool, }, + + /// Join an organization using an invite code + Join { + /// Invite code (e.g. from `auths org join --code C23BD59F`) + #[arg(long)] + code: String, + + /// Registry URL to contact + #[arg(long, default_value = "https://auths-registry.fly.dev")] + registry: String, + }, } /// Handles `org` commands for issuing or revoking member authorizations. @@ -943,7 +954,120 @@ pub fn handle_org( Ok(()) } + + OrgSubcommand::Join { code, registry } => handle_join(&code, ®istry), + } +} + +/// Handles the `org join` subcommand by looking up and accepting an invite +/// via the registry HTTP API. +fn handle_join(code: &str, registry: &str) -> Result<()> { + let rt = tokio::runtime::Runtime::new()?; + let client = reqwest::Client::new(); + let base = registry.trim_end_matches('/'); + + // 1. Look up invite details. + let details_url = format!("{}/v1/invites/{}", base, code); + let details_resp = rt + .block_on(async { client.get(&details_url).send().await }) + .context("failed to contact registry")?; + + if details_resp.status() == reqwest::StatusCode::NOT_FOUND { + anyhow::bail!( + "Invite code '{}' not found. Check the code and try again.", + code + ); + } + if !details_resp.status().is_success() { + let status = details_resp.status(); + let body = rt.block_on(details_resp.text()).unwrap_or_default(); + anyhow::bail!("Failed to look up invite ({}): {}", status, body); } + + let details: serde_json::Value = rt + .block_on(details_resp.json()) + .context("invalid response from registry")?; + + let org_name = details["display_name"].as_str().unwrap_or("Unknown"); + let role = details["role"].as_str().unwrap_or("member"); + let status = details["status"].as_str().unwrap_or("unknown"); + + if status == "expired" { + anyhow::bail!("This invite has expired. Ask the org admin for a new one."); + } + if status == "accepted" { + anyhow::bail!("This invite has already been accepted."); + } + + println!("Organization: {}", org_name); + println!("Role: {}", role); + println!("Status: {}", status); + println!(); + + // 2. Accept the invite. This requires auth — build a signed bearer token. + let repo_path = layout::resolve_repo_path(None)?; + let identity_storage = RegistryIdentityStorage::new(repo_path.clone()); + let managed_identity = identity_storage + .load_identity() + .context("no local identity found — run `auths init` first")?; + let did = managed_identity.controller_did.to_string(); + + let key_storage = get_platform_keychain()?; + let primary_alias = KeyAlias::new_unchecked("main"); + let (_stored_did, _role, encrypted_key) = key_storage + .load_key(&primary_alias) + .context("failed to load signing key — run `auths init` first")?; + + let passphrase = + rpassword::prompt_password("Enter passphrase: ").context("failed to read passphrase")?; + let pkcs8_bytes = decrypt_keypair(&encrypted_key, &passphrase).context("wrong passphrase")?; + + let pkcs8 = auths_crypto::Pkcs8Der::new(&pkcs8_bytes[..]); + let seed = auths_core::crypto::ssh::extract_seed_from_pkcs8(&pkcs8) + .context("failed to extract seed from key material")?; + + // Create a signed bearer payload: { did, timestamp, signature } + #[allow(clippy::disallowed_methods)] // CLI is the presentation boundary + let timestamp = Utc::now().to_rfc3339(); + let message = format!("{}\n{}", did, timestamp); + let signature = { + use ring::signature::Ed25519KeyPair; + let kp = Ed25519KeyPair::from_seed_unchecked(seed.as_bytes()) + .map_err(|e| anyhow!("invalid key: {e}"))?; + let sig = kp.sign(message.as_bytes()); + use base64::Engine; + base64::engine::general_purpose::STANDARD.encode(sig.as_ref()) + }; + + let bearer_payload = serde_json::json!({ + "did": did, + "timestamp": timestamp, + "signature": signature, + }); + let bearer_token = serde_json::to_string(&bearer_payload)?; + + let accept_url = format!("{}/v1/invites/{}/accept", base, code); + let accept_resp = rt + .block_on(async { + client + .post(&accept_url) + .header("Authorization", format!("Bearer {}", bearer_token)) + .header("Content-Type", "application/json") + .send() + .await + }) + .context("failed to contact registry")?; + + if !accept_resp.status().is_success() { + let status = accept_resp.status(); + let body = rt.block_on(accept_resp.text()).unwrap_or_default(); + anyhow::bail!("Failed to accept invite ({}): {}", status, body); + } + + println!("✅ Successfully joined {} as {}", org_name, role); + println!(" Your DID: {}", did); + + Ok(()) } fn display_dry_run_revoke_member(org: &str, member: &str, invoker_did: &str) -> Result<()> { diff --git a/docs/plans/agent_launch.md b/docs/plans/agent_launch.md deleted file mode 100644 index cc871041..00000000 --- a/docs/plans/agent_launch.md +++ /dev/null @@ -1,720 +0,0 @@ -# Auths Agent Identity: Engineering Roadmap & Market Strategy - -## Executive Summary - -The agentic AI market is projected to grow from $7.55B (2025) to $199B by 2034 (CAGR 43.8%). NIST released its [NCCoE concept paper on AI agent identity](https://www.nccoe.nist.gov/projects/software-and-ai-agent-identity-and-authorization) in February 2026, signaling that agent identity is now a regulatory-grade concern. The [MCP specification](https://modelcontextprotocol.io/specification/2025-03-26/basic/authorization) now mandates OAuth 2.1 for tool authorization. An [IETF draft](https://www.ietf.org/archive/id/draft-oauth-ai-agents-on-behalf-of-user-01.html) formalizes "on behalf of" delegation for AI agents. Auth0 launched a [Token Vault](https://auth0.com/blog/auth0-token-vault-secure-token-exchange-for-ai-agents/) specifically for AI agent credential management. HashiCorp positions [SPIFFE as the "TCP/IP of agent identity"](https://www.hashicorp.com/en/blog/spiffe-securing-the-identity-of-agentic-ai-and-non-human-actors). - -Auths has a structural advantage none of these players possess: **cryptographic delegation chains with capability narrowing from human to agent to sub-agent, verifiable offline, stored in Git**. This document maps the feedback's valuation milestones against the actual codebase state and defines the engineering work to capture this market. - ---- - -## Part 1: What Exists vs. What's Missing - -### Already Built (the feedback underestimates this) - -| Capability | Location | Status | -|-----------|----------|--------| -| Agent provisioning API (headless, CI-ready) | `auths-id/src/agent_identity.rs` | Full. `provision_agent_identity()` with persistent/ephemeral storage modes, `AgentProvisioningConfig`, `AgentIdentityBundle`. | -| `SignerType` enum (Human/Agent/Workload) | `auths-verifier/src/core.rs:686-698` | Full. Signed into canonical attestation data — cannot be forged post-signing. | -| Delegation chains with `delegated_by` | `auths-verifier/src/core.rs:676-678` | Full. Field included in signed envelope. | -| Chain verification with capability narrowing | `auths-verifier/src/verifier.rs:138-166` | Full. Set-intersection semantics — capabilities can only narrow, never expand. | -| Policy engine (IsAgent/IsHuman/IsWorkload predicates) | `auths-policy/src/eval.rs:380-411` | Full. Also: `MaxChainDepth`, `WorkloadIssuerIs`, `WorkloadClaimEquals`, `DelegatedBy`. | -| OIDC bridge (attestation chain -> JWT) | `auths-oidc-bridge/src/` | Full. Token exchange, JWKS endpoint, cloud provider auto-detection (AWS/GCP/Azure). | -| GitHub Actions OIDC cross-referencing | `auths-oidc-bridge/src/github_oidc.rs` | Full. Confused-deputy prevention via actor/repo validation. | -| Agent signing daemon (IPC over Unix socket) | `auths-cli/src/commands/agent.rs` | Full. start/stop/status/lock/unlock/install-service (launchd/systemd). | -| Ephemeral agent identities (InMemory mode) | `auths-id/src/agent_identity.rs:62-64` | Full. Stateless containers (Fargate, Docker). Identity dies with process. | -| MCP integration pattern | `docs/architecture/oidc-bridge.md:92-102` | Documented. Agent exchanges attestation chain for JWT, presents to MCP server as Bearer token. | -| SPIFFE comparison & positioning | `docs/architecture/oidc-bridge.md:104-117` | Documented. Auths covers delegation gap SPIFFE doesn't address. | - -### Not Yet Built - -| Capability | NIST Alignment | Gap Size | -|-----------|---------------|----------| -| MCP tool server with Auths-native auth | NIST Focus Area 2 (Authorization) | Medium -- need an `auths-mcp-server` crate | -| SPIFFE SVID compatibility layer | NIST scope (SPIFFE referenced) | Medium -- bidirectional translation, not replacement | -| OAuth 2.1 "on behalf of" (IETF draft) | NIST Focus Area 3 (Access Delegation) | Medium -- extend OIDC bridge with `act` claim | -| Automatic chain depth calculation | Internal gap | Small -- ~50 lines in verify.rs | -| OIDC trust registry | NIST Focus Area 2 | Medium -- covered in v2_launch.md | -| Non-repudiation audit logging | NIST Focus Area 4 (Logging) | Large -- new crate | -| Human-in-the-loop approval gates | NIST Focus Area 3 | Medium -- policy + webhook | -| SCIM provisioning API | Enterprise IAM integration | Medium -- REST handlers | -| NGAC graph-based authorization | Beyond current policy engine | Large -- research-grade | -| HSM/hardware key storage | Enterprise compliance | Large -- covered in v2_launch.md | - ---- - -## Part 2: Feedback Corrections - -The feedback structures valuation milestones ($4M -> $500M) around building capabilities that **largely already exist**. Here's what the feedback gets wrong: - -### 1. "Static Service Accounts" as the only competitor - -The landscape has shifted dramatically since the feedback was written. The real competitors in 2026 are: - -- **Auth0 Token Vault**: [Launched](https://auth0.com/blog/auth0-token-vault-secure-token-exchange-for-ai-agents/) with RFC 8693 token exchange for AI agents, 30+ pre-integrated OAuth providers, MCP support. Backed by Okta/Auth0 distribution. However: centralized, no delegation chains, no offline verification. -- **HashiCorp Vault + SPIFFE**: [Positioning SPIFFE for agentic AI](https://www.hashicorp.com/en/blog/spiffe-securing-the-identity-of-agentic-ai-and-non-human-actors). Vault Enterprise 1.21 adds SPIFFE integration. However: requires SPIRE server (centralized), no human->agent delegation, no capability narrowing. -- **Google A2A Protocol**: Agent-to-agent communication protocol. No identity primitive -- relies on external identity providers. -- **MCP Authorization Layer**: [MCP spec](https://stackoverflow.blog/2026/01/21/is-that-allowed-authentication-and-authorization-in-model-context-protocol) now requires OAuth 2.1, but "leaves authorization up to the implementer." This is the gap Auths fills. - -### 2. "$10M: Implement SPIFFE/SPIRE Baselines" - -Wrong framing. Auths should not implement SPIFFE -- it should **bridge to SPIFFE**. SPIFFE handles infrastructure-level workload identity (container attestation, node attestation). Auths handles the layer above: who authorized this workload, what can it do, and who is accountable. The two are complementary, not competitive. The OIDC bridge already provides the integration surface. - -### 3. "NGAC (Next Generation Access Control)" - -Premature. The existing `auths-policy` engine is already attribute-based with 30+ evaluation predicates, three-valued logic (Allow/Deny/Indeterminate), and composable expressions. NGAC's graph-based model adds complexity without clear user demand. The right move is to extend the existing engine with context-aware predicates rather than replace it with NGAC. - -### 4. "Decouple Orchestration from Authorization" - -Already done. The SDK's `AuthsContext` injects all dependencies. The `CommitSigningWorkflow` in `auths-sdk/src/workflows/signing.rs` is fully decoupled from CLI concerns. Ports (`AgentSigningPort`, `GitLogProvider`, etc.) abstract all I/O. - -### 5. "Ephemeral State Management" - -Already done. `AgentStorageMode::InMemory` creates a process-lifetime tempdir. Identity is torn down when the process exits (`std::mem::forget(tmp)` prevents premature cleanup but the OS reclaims on exit). - ---- - -## Part 3: Revised Epics (NIST-Aligned) - -The NIST NCCoE concept paper defines [four focus areas](https://www.nccoe.nist.gov/projects/software-and-ai-agent-identity-and-authorization). Every epic below maps to one or more: - -1. **Identification**: Distinguishing AI agents from humans, managing agent metadata -2. **Authorization**: OAuth 2.0/2.1, policy-based access control -3. **Access Delegation**: Linking user identities to agents, maintaining accountability -4. **Logging & Transparency**: Linking agent actions to non-human entities - ---- - -### Epic 1: MCP-Native Agent Authorization - -**NIST alignment**: Focus Areas 1, 2 -**Priority: P0** -**Effort: 3-4 weeks** - -MCP is becoming the universal protocol for AI agents accessing tools. The [MCP spec now classifies servers as OAuth Resource Servers](https://stackoverflow.blog/2026/01/21/is-that-allowed-authentication-and-authorization-in-model-context-protocol), but "leaves authorization up to the implementer." Auths fills this gap: MCP gets OAuth tokens, but those tokens carry cryptographic delegation provenance. - -#### 1.1 Create `auths-mcp-server` crate - -A reference MCP tool server that demonstrates Auths-backed authorization. This is the developer on-ramp -- an AI engineer drops it into their agent framework and gets cryptographic identity for free. - -**File to create:** `crates/auths-mcp-server/src/lib.rs` - -```rust -/// MCP tool server middleware that validates Auths-backed JWTs. -/// -/// Extracts the Bearer token from the Authorization header, validates it -/// against the OIDC bridge's JWKS endpoint, and checks capabilities -/// against the requested tool. -/// -/// Args: -/// * `jwks_url`: The OIDC bridge's `/.well-known/jwks.json` endpoint. -/// * `tool_capabilities`: Map of tool names to required capabilities. -/// -/// Usage: -/// ```ignore -/// let auth = AuthsToolAuth::new( -/// "https://oidc.example.com/.well-known/jwks.json", -/// HashMap::from([ -/// ("read_file", "fs:read"), -/// ("write_file", "fs:write"), -/// ("deploy", "deploy:staging"), -/// ]), -/// ); -/// ``` -pub struct AuthsToolAuth { - jwks_url: String, - tool_capabilities: HashMap, - jwks_cache: Arc>, -} - -impl AuthsToolAuth { - /// Validate an incoming MCP tool invocation against the agent's capabilities. - /// - /// Args: - /// * `bearer_token`: The JWT from the Authorization header. - /// * `tool_name`: The MCP tool being invoked. - pub async fn authorize_tool_call( - &self, - bearer_token: &str, - tool_name: &str, - ) -> Result { - let claims = self.validate_jwt(bearer_token).await?; - let required_cap = self.tool_capabilities.get(tool_name) - .ok_or(AuthError::UnknownTool(tool_name.to_string()))?; - - if !claims.capabilities.contains(&required_cap.to_string()) { - return Err(AuthError::InsufficientCapabilities { - required: required_cap.to_string(), - granted: claims.capabilities.clone(), - }); - } - - Ok(VerifiedAgent { - did: claims.sub, - keri_prefix: claims.keri_prefix, - signer_type: claims.signer_type(), - capabilities: claims.capabilities, - delegated_by: claims.delegated_by(), - }) - } -} -``` - -**Why this matters**: Every MCP tool server today does ad-hoc authorization (API keys, service accounts). This crate makes it one line to add cryptographic agent identity to any MCP server. The developer experience is: `cargo add auths-mcp-server`, configure tool->capability mapping, done. - -#### 1.2 Agent-side MCP client helper - -**File to create:** `crates/auths-sdk/src/workflows/mcp.rs` - -```rust -/// Acquires an OAuth Bearer token for MCP tool server access. -/// -/// Exchanges the agent's attestation chain for a scoped JWT via the OIDC bridge, -/// then attaches it to outgoing MCP requests. -/// -/// Args: -/// * `bridge_url`: The OIDC bridge endpoint. -/// * `chain`: The agent's attestation chain. -/// * `root_public_key`: The root identity's Ed25519 public key. -/// * `requested_capabilities`: Capabilities needed for this MCP session. -/// -/// Usage: -/// ```ignore -/// let token = acquire_mcp_token(&bridge_url, &chain, &root_pk, &["fs:read"]).await?; -/// ``` -pub async fn acquire_mcp_token( - bridge_url: &str, - chain: &[Attestation], - root_public_key: &[u8], - requested_capabilities: &[&str], -) -> Result { - // POST to bridge /token endpoint with attestation chain - // Returns JWT with scoped capabilities - todo!() // Make a full implementation -} -``` - -#### 1.3 Python SDK wrapper for AI frameworks - -Most AI agent frameworks (LangChain, AutoGen, CrewAI, Semantic Kernel) are Python. The `auths-verifier` WASM bindings or C FFI provide the bridge, but a thin Python SDK is the adoption accelerant. - -**File to create:** `sdks/python/auths_agent/auth.py` - -```python -"""Auths agent identity for Python AI frameworks. - -Usage with LangChain: - from auths_agent import AuthsAgentAuth - - auth = AuthsAgentAuth( - bridge_url="https://oidc.example.com", - attestation_chain_path="~/.auths-agent/chain.json", - ) - - # Get Bearer token for MCP tool access - token = auth.get_token(capabilities=["fs:read", "web:search"]) -""" -``` - -**Testing (E2E):** -YOU MUST ADD NEW e2e tests to /Users/bordumb/workspace/repositories/auths-base/auths/tests/e2e -```bash -# 1. Provision agent identity -auths agent provision --name "mcp-test-agent" --capabilities "fs:read,fs:write" - -# 2. Start OIDC bridge -auths-oidc-bridge --config bridge.toml & - -# 3. Start reference MCP server with Auths auth -cargo run -p auths-mcp-server -- --bridge-url http://localhost:3300 & - -# 4. Agent exchanges attestation for JWT and calls MCP tool -curl -X POST http://localhost:8080/mcp/tools/read_file \ - -H "Authorization: Bearer $(auths token exchange --capabilities fs:read)" \ - -d '{"path": "/tmp/test.txt"}' - -# 5. Attempt unauthorized tool -- should fail -curl -X POST http://localhost:8080/mcp/tools/deploy \ - -H "Authorization: Bearer $(auths token exchange --capabilities fs:read)" \ - -d '{"env": "production"}' -# Expected: 403 Forbidden -- missing "deploy:staging" capability -``` -YOU MUST ADD NEW e2e tests to /Users/bordumb/workspace/repositories/auths-base/auths/tests/e2e - -Lastly, review /Users/bordumb/workspace/repositories/auths-base/auths/scripts/releases/2_crates.py -And make sure to add the new auths-mcp-server crate in the proper place there - ---- - -### Epic 2: OAuth 2.1 "On Behalf Of" Delegation - -**NIST alignment**: Focus Area 3 (Access Delegation) -**Priority: P0** -**Effort: 2-3 weeks** - -The [IETF draft for OAuth 2.0 "on behalf of" for AI agents](https://www.ietf.org/archive/id/draft-oauth-ai-agents-on-behalf-of-user-01.html) formalizes the pattern Auths already implements cryptographically. The draft introduces `requested_actor` and `actor_token` parameters so authorization servers can issue tokens that explicitly represent "Agent X acting on behalf of User Y." - -Auths' attestation chains already encode this relationship (`delegated_by` + `signer_type`). The gap is surfacing it in the JWT format the IETF draft expects. - -#### 2.1 Add `act` (actor) claim to bridge-issued JWTs - -The IETF draft specifies an `act` claim in issued JWTs that identifies the actor (agent) distinct from the subject (human). Currently, the OIDC bridge issues `sub` as the root KERI DID. The `act` claim should identify the leaf agent. - -**File to modify:** `crates/auths-oidc-bridge/src/token.rs` - -```rust -/// Extended OIDC claims with RFC 8693 actor claim. -pub struct OidcClaims { - // ... existing fields ... - - /// RFC 8693 actor claim -- identifies the agent acting on behalf of the subject. - /// Present when the attestation chain has depth > 0 (delegation occurred). - /// - /// Usage: - /// ```ignore - /// // JWT payload: - /// // "sub": "did:keri:EHuman123...", // the human who authorized - /// // "act": { "sub": "did:keri:EAgent456..." } // the agent performing the action - /// ``` - #[serde(skip_serializing_if = "Option::is_none")] - pub act: Option, -} - -/// The actor performing actions on behalf of the subject. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ActorClaim { - /// The actor's DID (the leaf agent in the delegation chain). - pub sub: String, # BIG QUESTION: should this be typed, and not a plain String? - /// The actor's signer type. - #[serde(skip_serializing_if = "Option::is_none")] - pub signer_type: Option, -} -``` - -#### 2.2 Support RFC 8693 token exchange at the bridge - -The OIDC bridge currently accepts attestation chains at `/token`. Extend it to also accept RFC 8693 `urn:ietf:params:oauth:grant-type:token-exchange` requests, where an existing JWT can be exchanged for a narrower-scoped JWT. This enables agent->sub-agent delegation through standard OAuth flows. - -**File to modify:** `crates/auths-oidc-bridge/src/routes.rs` - -Add a new route handler for token exchange: - -```rust -/// RFC 8693 token exchange endpoint. -/// -/// Accepts a subject_token (existing JWT) and an actor_token (agent attestation), -/// and issues a new JWT with narrowed capabilities representing the actor -/// acting on behalf of the original subject. -/// -/// Args: -/// * `grant_type`: Must be `urn:ietf:params:oauth:grant-type:token-exchange` -/// * `subject_token`: The parent agent's JWT -/// * `actor_token`: The sub-agent's attestation chain (JSON) -/// * `scope`: Requested capabilities (subset of parent's) -async fn handle_token_exchange( - State(state): State, - Form(params): Form, -) -> Result, BridgeError> { - // 1. Validate the subject_token JWT - // 2. Verify the actor's attestation chain - // 3. Intersect requested scope with subject_token's capabilities - // 4. Issue new JWT with act claim pointing to the sub-agent - todo!() -} -``` - -**Testing:** -YOU MUST ADD NEW e2e tests to /Users/bordumb/workspace/repositories/auths-base/auths/tests/e2e -```bash -# 1. Human's agent gets initial JWT -TOKEN=$(auths token exchange \ - --bridge http://localhost:3300 \ - --capabilities "sign:commit,deploy:staging") - -# 2. Agent delegates to sub-agent via RFC 8693 token exchange -SUB_TOKEN=$(curl -X POST http://localhost:3300/token \ - -d "grant_type=urn:ietf:params:oauth:grant-type:token-exchange" \ - -d "subject_token=$TOKEN" \ - -d "actor_token=$(cat sub-agent-chain.json)" \ - -d "scope=deploy:staging") - -# 3. Verify sub-token has act claim and narrowed scope -echo $SUB_TOKEN | jwt decode -# Expected: sub=did:keri:EHuman, act.sub=did:keri:ESubAgent, capabilities=[deploy:staging] -``` -YOU MUST ADD NEW e2e tests to /Users/bordumb/workspace/repositories/auths-base/auths/tests/e2e - ---- - -### Epic 3: Non-Repudiation Audit Logging - -**NIST alignment**: Focus Area 4 (Logging & Transparency) -**Priority: P1** -**Effort: 3-4 weeks** - -The NIST concept paper's fourth focus area is "Linking specific AI agent actions to their non-human entity to enable effective visibility into system activity." Auths' Git-native storage provides a natural append-only ledger. Every agent action can be logged as a signed Git commit, creating a tamper-evident audit trail traceable to the authorizing human. - -#### 3.1 Create `auths-audit` crate - -**File to create:** `crates/auths-audit/src/lib.rs` - -```rust -/// An audit entry recording an agent's action, cryptographically bound to -/// its attestation chain and the authorizing human's identity. -/// -/// Stored as signed JSON blobs under `refs/auths/audit//`. -/// -/// Args: -/// * `agent_did`: The agent's KERI identity. -/// * `action`: What the agent did (e.g., "tool:read_file", "deploy:staging"). -/// * `target`: What was acted upon (e.g., file path, deployment ID). -/// * `attestation_rid`: The attestation RID that authorized this action. -/// * `delegation_chain_root`: The human DID at the root of the delegation chain. -pub struct AuditEntry { - pub timestamp: DateTime, - pub agent_did: String, # BIG QUESTION: should this be strongly typed and not just a plan String? - pub action: String, # BIG QUESTION: should this be strongly typed and not just a plan String? - pub target: String, - pub attestation_rid: String, # BIG QUESTION: should this be strongly typed and not just a plan String? - pub delegation_chain_root: String, # BIG QUESTION: should this be strongly typed and not just a plan String? - pub signer_type: SignerType, - /// Ed25519 signature over the canonical JSON of this entry. - pub signature: Vec, -} -``` - -#### 3.2 Audit middleware for MCP server - -Every tool invocation through `auths-mcp-server` automatically logs an `AuditEntry`. The audit log is a Git ref (`refs/auths/audit/`) -- tamper-evident, replicable, and queryable with standard Git tools. - -#### 3.3 CLI audit query commands - -```bash -# Show all actions by a specific agent -auths audit log --agent did:keri:EAgent456... - -# Show all actions authorized by a specific human -auths audit log --root did:keri:EHuman123... - -# Show all deploy actions in the last 24h -auths audit log --action "deploy:*" --since 24h - -# Export as JSON for SIEM integration -auths audit export --format json --since 7d > audit.json -``` - -**Testing:** -YOU MUST ADD NEW e2e tests to /Users/bordumb/workspace/repositories/auths-base/auths/tests/e2e -```bash -# 1. Agent performs MCP tool call -curl -X POST http://localhost:8080/mcp/tools/read_file \ - -H "Authorization: Bearer $TOKEN" \ - -d '{"path": "/tmp/test.txt"}' - -# 2. Verify audit entry was created -auths audit log --agent did:keri:EAgent456... --since 1m -# Expected: timestamp | agent DID | tool:read_file | /tmp/test.txt | root:did:keri:EHuman123... - -# 3. Verify audit entry signature -auths audit verify --entry -# Expected: "Signature valid. Authorized by: did:keri:EHuman123... via delegation chain." -``` -YOU MUST ADD NEW e2e tests to /Users/bordumb/workspace/repositories/auths-base/auths/tests/e2e - -Lastly, review /Users/bordumb/workspace/repositories/auths-base/auths/scripts/releases/2_crates.py -And make sure to add the new auths-mcp-server crate in the proper place there - ---- - -### Epic 4: SPIFFE Bridge (Not Replacement) - -**NIST alignment**: Focus Areas 1, 2 -**Priority: P1** -**Effort: 2-3 weeks** - -SPIFFE handles infrastructure-level identity ("this container is running in Kubernetes namespace X"). Auths handles authorization-level identity ("this agent is authorized by Human Y to do Z"). The bridge connects them: a SPIFFE SVID proves the runtime environment, an Auths attestation proves the authorization chain. Together they answer both "where is this running?" and "who said it could?" - -#### 4.1 SPIFFE SVID verification in the OIDC bridge - -**File to modify:** `crates/auths-oidc-bridge/src/lib.rs` - -Accept a SPIFFE SVID as an optional attestation of the runtime environment during token exchange. The bridge embeds the SPIFFE ID in the issued JWT as a `spiffe_id` claim, binding the cryptographic identity to the physical execution boundary. - -```rust -/// Optional SPIFFE workload attestation submitted alongside the attestation chain. -/// -/// When present, the bridge verifies the X.509 SVID against the SPIFFE trust bundle -/// and embeds the SPIFFE ID in the issued JWT. -/// -/// Usage: -/// ```ignore -/// POST /token -/// { -/// "attestation_chain": [...], -/// "root_public_key": "...", -/// "spiffe_svid": "", -/// "spiffe_trust_bundle": "" -/// } -/// ``` -pub struct SpiffeAttestation { - pub svid: Vec, - pub trust_bundle: Vec, -} -``` - -#### 4.2 SVID-to-Attestation translation - -For environments that already use SPIFFE, provide a function to bootstrap an Auths attestation chain from a verified SVID. The SVID proves runtime identity; the Auths chain adds capability scoping and human-traceable delegation. - -**Testing:** -YOU MUST ADD NEW e2e tests to /Users/bordumb/workspace/repositories/auths-base/auths/tests/e2e -```bash -# 1. In a SPIRE-managed environment, get SVID -SVID=$(spire-agent api fetch x509 -socketPath /run/spire/sockets/agent.sock) - -# 2. Exchange SVID + attestation chain for JWT -curl -X POST http://localhost:3300/token \ - -d "{ - \"attestation_chain\": $(cat chain.json), - \"root_public_key\": \"...\", - \"spiffe_svid\": \"$SVID\" - }" - -# 3. Verify JWT contains both KERI and SPIFFE identifiers -echo $TOKEN | jwt decode -# Expected: sub=did:keri:E..., spiffe_id=spiffe://cluster.local/ns/default/sa/agent -``` -YOU MUST ADD NEW e2e tests to /Users/bordumb/workspace/repositories/auths-base/auths/tests/e2e - ---- - -### Epic 5: Human-in-the-Loop Approval Gates - -**NIST alignment**: Focus Area 3 (Access Delegation) -**Priority: P2** -**Effort: 2-3 weeks** - -The NIST concept paper specifically calls out "human-in-the-loop approval to autonomous action" as a spectrum that identity systems must support. The policy engine already has the predicates; what's missing is the execution mechanism that pauses an agent when it hits a policy boundary. - -#### 5.1 Policy-driven approval gates - -Extend the policy engine with a new decision outcome: `RequiresApproval`. When an action exceeds a risk threshold (e.g., deploying to production, deleting data), the policy engine returns this outcome instead of Allow/Deny. The MCP server or SDK pauses execution and emits an approval request. - -**File to modify:** `crates/auths-policy/src/decision.rs` - -```rust -pub enum Outcome { - Allow, - Deny, - Indeterminate, - /// Action requires human approval before proceeding. - /// The agent's execution is paused until an approval attestation is received. - RequiresApproval, -} -``` - -#### 5.2 Approval attestation format - -A human approves by issuing a short-lived, single-use attestation: - -```json -{ - "version": 1, - "issuer": "did:keri:EHuman123...", - "subject": "did:keri:EAgent456...", - "capabilities": ["deploy:production"], - "signer_type": "Human", - "expires_at": "2026-03-05T10:35:00Z", - "note": "Approved deploy to production for release v2.1.0" -} -``` - -This attestation is verified by the MCP server/SDK before allowing the action to proceed. The 5-minute expiry ensures the approval is contextual, not blanket. - -#### 5.3 Approval notification channels - -- **CLI**: `auths approve --request ` (for local development) -- **Webhook**: POST to configured URL with approval request payload (for Slack/Teams integration) -- **MCP**: Use MCP's [elicitation](https://blog.modelcontextprotocol.io/posts/2025-11-25-first-mcp-anniversary/) to request user confirmation through the agent's UI - ---- - -### Epic 6: SCIM Provisioning API - -**NIST alignment**: Focus Area 1 (Identification) -**Priority: P2** -**Effort: 2-3 weeks** - -Enterprises manage identities through SCIM (System for Cross-domain Identity Management). Adding SCIM support means IT teams can provision/deprovision agent identities through their existing directory (Okta, Azure AD, Google Workspace) rather than running CLI commands. - -#### 6.1 SCIM resource types for agents - -**File to create:** `crates/auths-scim/src/lib.rs` - -Map Auths agent identities to SCIM resource types: - -```rust -/// SCIM User resource representing an Auths agent identity. -/// -/// Maps SCIM standard attributes to Auths identity fields: -/// - `userName` -> agent name -/// - `externalId` -> KERI DID -/// - `active` -> not revoked -/// - `entitlements` -> capabilities -/// - Custom schema extension for delegation chain metadata -pub struct ScimAgentResource { - pub schemas: Vec, - pub id: String, // Auths internal ID - pub external_id: String, // KERI DID # BIG QUESTION: should this be strongly typed and not just a plan String? - pub user_name: String, // Agent name - pub active: bool, // Not revoked - pub entitlements: Vec, // Capabilities -} -``` - -#### 6.2 REST endpoints - -Standard SCIM endpoints (`/scim/v2/Users`, `/scim/v2/Groups`) backed by the Git registry. When an enterprise directory deprovisions an agent, the SCIM handler revokes the agent's attestation. - ---- - -### Epic 7: Context-Aware Dynamic Authorization - -**NIST alignment**: Focus Area 2 (Authorization) -**Priority: P3** -**Effort: 3-4 weeks** - -The feedback proposes NGAC (graph-based authorization). The pragmatic move is to extend the existing policy engine with **context-reactive predicates** rather than replace it with a new graph model. This delivers the same outcome (dynamic authorization based on changing context) with far less complexity. - -#### 7.1 Data classification predicates - -When an agent retrieves a document classified as "Internal Confidential," its ability to call external tools should be automatically restricted. Extend the policy engine: - -**File to modify:** `crates/auths-policy/src/expr.rs` - -```rust -/// Checks if the current data context has a classification at or below the threshold. -/// Example: If context has "confidential" data, deny "webhook:external" capability. -DataClassificationBelow(String), - -/// Checks if the current session has accumulated data from multiple sensitivity levels. -/// Prevents confused deputy attacks where an agent aggregates sensitive data -/// then exfiltrates it through a low-sensitivity channel. -NoSensitivityEscalation, -``` - -#### 7.2 Session-scoped capability reduction - -The SDK maintains a session context that narrows available capabilities as the agent interacts with sensitive resources. This is an enforcement mechanism, not just a policy check -- the session's effective capabilities are recomputed after every tool call. - ---- - -## Part 4: Valuation Milestones (Revised) - -The feedback's milestones are directionally correct but miscalibrate what's already built. Here's a recalibrated view: - -### Current State (~$4-6M Seed) - -What exists today is **not** a fragile prototype. It's a production-grade identity primitive with: -- Full delegation chain verification (offline, no server) -- Three-tier signing workflow with platform keychain integration -- OIDC federation to AWS/GCP/Azure -- Policy engine with 30+ predicates -- Mobile FFI (UniFFI), WASM, and C FFI -- Agent provisioning API with ephemeral/persistent modes - -The gap is **ecosystem integration, not core capability**. - -### $10-15M (Post-Seed / Pre-Series A) - -**Deliverables:** Epics 1 + 2 (MCP integration + OAuth 2.1 OBO) - -The product becomes usable by AI engineers without leaving their framework. A LangChain developer adds `auths-agent` to their Python project, calls `auth.get_token()`, and gets cryptographic identity for their agent's MCP tool calls. Every action is traceable to a human principal. - -**Proof point:** Working demo of "Agent signs a commit and deploys to staging, fully traceable to the authorizing human, with capability narrowing enforced at every hop." - -### $20-30M (Series A) - -**Deliverables:** Epics 3 + 4 + 6 (Audit logging + SPIFFE bridge + SCIM) - -Enterprise IT teams can provision agent identities through their existing directory (Okta/Azure AD via SCIM), bind them to SPIFFE runtime attestation, and get tamper-evident audit logs. SOCs can answer "which human authorized this agent to deploy to production at 3am?" - -**Proof point:** Integration with a Fortune 500's CI/CD pipeline. Audit logs feed into Splunk/Datadog. - -### $60-100M (Series B) - -**Deliverables:** Epics 5 + 7 (HITL approval gates + dynamic authorization) - -The platform enforces real-time authorization boundaries. An agent that reads a confidential document is automatically blocked from calling external webhooks. High-risk actions (production deploys, data deletion) require human approval via MCP elicitation or Slack webhook. This is the "confused deputy" mitigation that the NIST concept paper calls for. - -**Proof point:** NIST NCCoE demonstration project participation. FedRAMP authorization in progress. - -### $300M+ (Series C / Category Leader) - -**Deliverables:** Protocol standardization + cross-domain federation - -Auths attestation chains become the wire format for agent-to-agent trust across enterprise boundaries. Company A's procurement agent presents an attestation chain to Company B's supply chain agent. Both verify offline. The OIDC bridge handles trust translation. Auths is the protocol layer, not just a product. - -**Proof point:** IETF RFC or NIST SP referencing Auths' delegation chain format. - ---- - -## Part 5: The Structural Moat - -Every competitor in this space has a centralized dependency: - -| Competitor | Central Dependency | What happens when it's down? | -|-----------|-------------------|-------------------------------| -| Auth0 Token Vault | Auth0's servers | All agent auth fails | -| HashiCorp Vault + SPIFFE | SPIRE server | No new SVIDs issued | -| Google A2A | Google's identity services | No agent-to-agent trust | -| Okta/CyberArk NHI | Vendor's cloud | All non-human identity fails | - -Auths has **none**. Verification is a pure computation over the attestation chain and a root public key. No network call. No server dependency. Works in air-gapped environments, on submarines, in disconnected edge deployments. - -The delegation chain is the second moat. No other system provides: - -``` -Human (KERI, hardware-backed) - -> Device (Ed25519 dual-signed) - -> AI Agent (scoped: sign:commit, 24h TTL) - -> Sub-Agent (scoped: deploy:staging, 1h TTL) - -> MCP Tool Call (scoped: fs:read, single-use) -``` - -Each link is independently verifiable. Capabilities only narrow. Any link can be revoked without affecting the chain above it. This is the zero-trust delegation model that the NIST concept paper describes but no one else has implemented. - ---- - -## Part 6: Execution Priority Summary - -| # | Epic | NIST Focus | Priority | Effort | Impact | -|---|------|-----------|----------|--------|--------| -| 1 | MCP-native agent authorization | 1, 2 | P0 | 3-4 weeks | Developer on-ramp, framework integration | -| 2 | OAuth 2.1 "on behalf of" | 3 | P0 | 2-3 weeks | Standards compliance, delegation chain in JWT | -| 3 | Non-repudiation audit logging | 4 | P1 | 3-4 weeks | Enterprise SOC requirement | -| 4 | SPIFFE bridge | 1, 2 | P1 | 2-3 weeks | Infrastructure interop | -| 5 | Human-in-the-loop approval | 3 | P2 | 2-3 weeks | Risk management, confused deputy prevention | -| 6 | SCIM provisioning | 1 | P2 | 2-3 weeks | Enterprise directory integration | -| 7 | Context-aware dynamic auth | 2 | P3 | 3-4 weeks | Advanced authorization, data classification | - -**Critical path:** Epics 1 + 2 are the immediate priority. They transform Auths from "identity primitive" to "agent authorization infrastructure" and align directly with the NIST NCCoE demonstration scope. - ---- - -## Sources - -- [NIST NCCoE: Software and AI Agent Identity and Authorization](https://www.nccoe.nist.gov/projects/software-and-ai-agent-identity-and-authorization) -- [NIST NCCoE Concept Paper (PDF, Feb 2026)](https://www.nccoe.nist.gov/sites/default/files/2026-02/accelerating-the-adoption-of-software-and-ai-agent-identity-and-authorization-concept-paper.pdf) -- [NIST AI Agent Standards Initiative](https://www.nist.gov/caisi/ai-agent-standards-initiative) -- [IETF Draft: OAuth 2.0 On-Behalf-Of for AI Agents](https://www.ietf.org/archive/id/draft-oauth-ai-agents-on-behalf-of-user-01.html) -- [MCP Authorization Specification (2025-03-26)](https://modelcontextprotocol.io/specification/2025-03-26/basic/authorization) -- [Stack Overflow: Authentication and Authorization in MCP](https://stackoverflow.blog/2026/01/21/is-that-allowed-authentication-and-authorization-in-model-context-protocol) -- [Auth0 Token Vault for AI Agents](https://auth0.com/blog/auth0-token-vault-secure-token-exchange-for-ai-agents/) -- [HashiCorp: SPIFFE for Agentic AI and Non-Human Actors](https://www.hashicorp.com/en/blog/spiffe-securing-the-identity-of-agentic-ai-and-non-human-actors) -- [HashiCorp: Zero Trust for Agentic Systems](https://www.hashicorp.com/en/blog/zero-trust-for-agentic-systems-managing-non-human-identities-at-scale) -- [Red Hat: Zero Trust for Autonomous Agentic AI Systems](https://next.redhat.com/2026/02/26/zero-trust-for-autonomous-agentic-ai-systems-building-more-secure-foundations/) -- [SPIFFE Meets OAuth2: Workload Identity in the Agentic AI Era](https://riptides.io/blog-post/spiffe-meets-oauth2-current-landscape-for-secure-workload-identity-in-the-agentic-ai-era/) -- [Agentic AI Market Size: $199B by 2034, CAGR 43.8%](https://www.precedenceresearch.com/agentic-ai-market) -- [Auth0 MCP Spec Updates (June 2025)](https://auth0.com/blog/mcp-specs-update-all-about-auth/) -- [Securing MCP: OAuth, mTLS, Zero Trust](https://dasroot.net/posts/2026/02/securing-model-context-protocol-oauth-mtls-zero-trust/) diff --git a/docs/plans/authsec_competition.md b/docs/plans/authsec_competition.md deleted file mode 100644 index c60f74a9..00000000 --- a/docs/plans/authsec_competition.md +++ /dev/null @@ -1,312 +0,0 @@ -# AuthSec Competitive Analysis - -## Executive Summary - -AuthSec (`authsec.ai`) is an open-source, agent-first identity platform targeting MCP (Model Context Protocol) servers and AI agent infrastructure. It provides centralized authentication (OAuth 2.1, SAML, CIBA) and workload identity (SPIFFE/SPIRE X.509) with a managed SaaS backend. - -**Auths and AuthSec are not direct competitors.** They solve adjacent problems with fundamentally different architectures. AuthSec is a centralized identity platform that bolts auth onto AI agents. Auths is a decentralized identity primitive that makes cryptographic identity infrastructure-free. Where they overlap is in workload identity, capability-scoped delegation, and OIDC token exchange — but their trust models are incompatible at the philosophical level. - -The competitive risk is not technical substitution but **market confusion**: both use similar terminology (attestation, capabilities, workload identity, zero-trust) while meaning very different things. This document disambiguates them. - ---- - -## Part 1: What AuthSec Actually Is - -### 1.1 Architecture - -AuthSec is a **four-service Go/Gin backend** behind `dev.api.authsec.dev`: - -| Service | Purpose | -|---------|---------| -| User Management API (v4.0.0) | Multi-tenant user auth, MFA, directory sync | -| Auth Manager API (v1.1.0) | JWT generation, group management, token validation | -| Client Management API (v0.4.0) | Client CRUD, tenant-specific config | -| WebAuthn & MFA API (v2.0) | WebAuthn registration, TOTP, SMS verification | - -**Stack**: Go/Gin + PostgreSQL + HashiCorp Vault + SPIRE + Prometheus. Frontend: React 19/Vite 6. Mobile: Expo 54/React Native. - -All components are MIT-licensed. Self-hosting is supported. The managed service runs at `app.authsec.dev`. - -### 1.2 Identity Model - -AuthSec has a **dual identity plane**: - -**User identity** (humans): OAuth 2.1 with PKCE, SAML 2.0, OIDC federation, CIBA (push-to-mobile), WebAuthn/FIDO2, TOTP. After authentication, a JWT is issued containing `email`, `tenant_id`, `user_id`, `org_id`, `roles`, `groups`, `scopes`, `resources`, `permissions`. This JWT drives all authorization decisions. - -**Workload identity** (agents/services): SPIFFE/SPIRE issues X.509-SVIDs (short-lived certificates) through SPIRE agents. Attestation uses Kubernetes selectors (namespace, service account, pod labels). Certificates auto-rotate every 30 minutes. Workloads present mTLS certificates for service-to-service communication. - -### 1.3 Authorization Model - -Five-dimensional RBAC: - -1. **Roles** — e.g., `admin`, `editor`, `viewer` -2. **Groups** — e.g., `engineering`, `devops` -3. **Scopes** — e.g., `read`, `write`, `deploy` -4. **Resources** — e.g., `production`, `staging` -5. **Permissions** — Composite `resource:action` strings - -Evaluation supports AND/OR logic: `require_all=True` (all dimensions must match) or `require_all=False` (any one match suffices). - -### 1.4 MCP Integration Pattern - -The core value proposition — "3 lines of code" to add auth to an MCP server: - -```python -@protected_by_AuthSec("tool_name", roles=["admin"], scopes=["write"]) -async def my_tool(arguments: dict, session=None) -> list: - user = arguments.get("_user_info") - return [{"type": "text", "text": f"Hello {user['email']}"}] -``` - -**Flow**: Unauthenticated users see only OAuth management tools. After browser-redirect OAuth flow, JWT is decoded and cached. Protected tools become visible based on RBAC evaluation. Each tool call triggers upstream validation via the SDK Manager service. `_user_info` is injected into tool arguments automatically. - -### 1.5 CIBA (Client-Initiated Backchannel Authentication) - -Enables "headerless" authentication — no browser redirects. User receives a push notification on the AuthSec mobile authenticator, approves, and the agent receives a JWT. Designed for voice assistants, CLI tools, IoT devices, and desktop apps. - -### 1.6 Delegation - -The `DelegationClient` enables AI agents to act on behalf of users: -- Users grant scoped, time-limited permissions to agents -- Agents pull JWT-SVID delegation tokens -- Tokens contain: permissions list, audience, expiration -- Auto-refresh before expiry - -### 1.7 Secret Management - -`ServiceAccessSDK` wraps HashiCorp Vault for secure credential retrieval. MCP tools access third-party APIs without exposing raw credentials. All secrets are encrypted at rest and retrieved per-session. - ---- - -## Part 2: Head-to-Head Comparison - -### 2.1 Fundamental Architecture - -| Dimension | Auths | AuthSec | -|-----------|-------|---------| -| **Trust model** | Self-certifying (no authority) | Centralized authority (SaaS or self-hosted) | -| **Identity primitive** | `did:keri:E...` (hash of inception event) | UUID/email in PostgreSQL | -| **Infrastructure requirement** | Git (already everywhere) | Go backend + PostgreSQL + Vault + SPIRE + Prometheus | -| **Key management** | Platform keychain (macOS/Linux/Windows) | HashiCorp Vault | -| **Verification model** | Offline, stateless (WASM/FFI embeddable) | Online (requires SDK Manager API call) | -| **Key rotation** | KERI pre-rotation (planned, safe) | SPIRE auto-rotation (30-min SVIDs) | -| **Identity portability** | Fully portable (`did:keri` is forge-agnostic) | Locked to AuthSec instance | -| **Vendor lock-in** | None | High (identity lives in their PostgreSQL) | -| **Offline capability** | Full (200KB WASM verifier) | None (all validation server-side) | - -### 2.2 Workload Identity - -This is the primary area of overlap. - -| Dimension | Auths | AuthSec | -|-----------|-------|---------| -| **Workload credential** | Signed attestation chain → OIDC JWT | SPIFFE X.509-SVID → JWT | -| **Attestation** | Cryptographic (dual-signed, chain-verified) | Kubernetes selectors (namespace, service account) | -| **Trust anchor** | Self-certifying identity + optional witness quorum | SPIRE server trust bundle | -| **Capability model** | First-class, intersected along chain, policy-evaluated | RBAC roles/scopes (flat, not chain-intersected) | -| **Cloud provider bridge** | OIDC bridge → AWS STS / GCP WIF / Azure AD | Not built-in (separate integration) | -| **GitHub Actions integration** | OIDC cross-reference (verify OIDC token + KERI identity simultaneously) | Not built-in | -| **TTL enforcement** | Trust registry caps + bridge max TTL + policy | SVID rotation interval (30 min default) | - -**Key insight**: AuthSec's workload identity is **Kubernetes-native** (SPIRE attestation via pod selectors). Auths' workload identity is **Git-native** (attestation chain from identity to device to workload). AuthSec requires Kubernetes. Auths requires Git. - -### 2.3 Authorization / Policy - -| Dimension | Auths | AuthSec | -|-----------|-------|---------| -| **Policy language** | Declarative expression AST (And/Or/Not + 20+ predicates) | Five-dimensional RBAC (roles/groups/scopes/resources/permissions) | -| **Evaluation** | Local, compiled, deterministic | Remote API call to SDK Manager | -| **Predicates** | `HasCapability`, `IssuerIs`, `NotExpired`, `RefMatches`, `PathAllowed`, `IsAgent`, `IsHuman`, `IsWorkload`, `RepoIs`, `EnvIs`, etc. | Role membership, scope check, resource check | -| **Composability** | Arbitrary nesting (And/Or/Not) with depth limits | AND/OR at top level only | -| **Audit trail** | `Decision` struct with `reason`, `message`, `policy_hash` | Server-side logging | -| **Policy storage** | JSON/TOML, compiled at startup | Dashboard UI + database | -| **Git ref scoping** | First-class (`RefMatches("refs/heads/main")`) | Not applicable | -| **File path scoping** | First-class (`PathAllowed(["src/**"])`) | Not applicable | - -**Key insight**: Auths' policy engine is a **general-purpose authorization language** with developer-workflow predicates (Git refs, file paths, environments). AuthSec's RBAC is a **standard enterprise access control model** with no developer-workflow awareness. - -### 2.4 Developer Experience - -| Dimension | Auths | AuthSec | -|-----------|-------|---------| -| **Setup** | `cargo install auths && auths init` (30 seconds) | Register on dashboard, create workspace, obtain client_id, configure OAuth callbacks | -| **Git signing** | Native (`auths sign commit`, Git hook integration) | Not applicable | -| **Commit verification** | `auths verify HEAD` or embedded WASM | Not applicable | -| **CI/CD integration** | `auths init --profile ci` (ephemeral identity) | SPIRE agent + Kubernetes deployment | -| **MCP server auth** | Not yet built (but OIDC bridge enables it) | Core use case (decorator pattern) | -| **SDKs** | Rust (primary), WASM (browser/Node), FFI (C/Swift/Kotlin) | Python, TypeScript, Go | -| **Mobile** | Not yet built | Expo/React Native authenticator app | - -### 2.5 Supply Chain & Provenance - -| Dimension | Auths | AuthSec | -|-----------|-------|---------| -| **Artifact signing** | `auths artifact sign ` | Not applicable | -| **Commit signing** | Native (Ed25519 via Git) | Not applicable | -| **Attestation chains** | Core primitive (transitive delegation) | Not applicable | -| **Witness quorum** | Built-in (N-of-M independent receipting) | Not applicable | -| **Software provenance** | Core use case (Sigstore-complementary) | Not applicable | - -### 2.6 Maturity & Ecosystem - -| Dimension | Auths | AuthSec | -|-----------|-------|---------| -| **Codebase** | ~80K lines Rust, 18 crates, layered architecture | ~9 repos (Go + Python + TypeScript + React) | -| **GitHub activity** | Active development | 37 commits on main SDK repo, 1 star | -| **License** | (check project) | MIT across all repos | -| **Documentation** | CLAUDE.md, extensive inline docs, plan docs | Docusaurus site (partially broken at time of review) | -| **Community** | Early | Very early | -| **Production readiness** | Pre-launch (hardening in progress) | Pre-launch (v0.x APIs) | - ---- - -## Part 3: Where AuthSec Has Advantages - -### 3.1 MCP Server Integration (Today) - -AuthSec has a **working MCP auth solution now**. The `@protected_by_AuthSec()` decorator pattern is trivial to adopt. Tool visibility control (hiding tools until authenticated) is a genuinely useful UX pattern. Auths does not currently have an MCP SDK. - -**Mitigation**: Auths' OIDC bridge can issue JWTs consumable by MCP servers, but we lack the SDK wrapper and decorator pattern. Building `auths-mcp-sdk` (Python/TypeScript) would close this gap. - -### 3.2 Mobile Authenticator - -AuthSec has a mobile app (Expo/React Native) that supports TOTP, CIBA push notifications, and biometrics. This enables "approve from your phone" flows. Auths has no mobile presence. - -**Mitigation**: Auths' WASM verifier could be embedded in a React Native app. The attestation-based model actually makes this simpler (no server round-trip needed for verification). But the app doesn't exist yet. - -### 3.3 CIBA (Headerless Auth) - -CIBA is genuinely useful for AI agents that can't do browser redirects. Voice assistants, CLI tools, and headless agents need this. Auths' model doesn't require browser redirects at all (attestations are bearer proofs), but we haven't marketed this or built the convenience wrappers. - -### 3.4 Enterprise SSO (SAML, Active Directory) - -AuthSec supports SAML 2.0, Active Directory sync (via `ad-agent`), and Entra ID integration. Enterprise buyers expect this. Auths has no SAML or AD integration. - -**Mitigation**: Auths' identity model is orthogonal to SSO — a `did:keri` identity can be linked to an enterprise SSO identity via platform claims. But the integration doesn't exist yet. - -### 3.5 Multi-Tenant Dashboard - -AuthSec has a full React admin dashboard for managing users, roles, clients, and policies. Auths is CLI-only. - -**Mitigation**: The CLI-first approach is a strength for developer adoption but a weakness for enterprise sales. A web dashboard for trust registry and policy management would address this. - ---- - -## Part 4: Where Auths Has Structural Advantages - -### 4.1 No Infrastructure Required - -AuthSec requires Go backend + PostgreSQL + HashiCorp Vault + SPIRE + Prometheus. Auths requires Git. This is not a marginal difference — it's a category difference. Every developer already has Git. Nobody already has a SPIRE deployment. - -**Implication**: Auths can achieve adoption at the individual developer level without any organizational procurement. AuthSec requires an infrastructure decision before a single developer can use it. - -### 4.2 Identity Portability - -AuthSec identities live in their PostgreSQL database. Migrating away means losing your identity. Auths identities are self-certifying — `did:keri:E...` is derived from the inception event hash, not from any platform. Your identity follows you across GitHub, GitLab, Radicle, Forgejo, or any future forge. - -**Implication**: Auths identities survive platform death. AuthSec identities don't. - -### 4.3 Offline Verification - -AuthSec requires an API call to the SDK Manager for every authorization decision. Auths' verifier is a 200KB WASM module that runs offline — in browsers, CI runners, edge functions, mobile apps, or embedded systems. No network, no server, no single point of failure. - -**Implication**: Auths can verify identity in air-gapped environments, submarines, Mars rovers, or just someone's laptop with no internet. AuthSec cannot. - -### 4.4 Key Rotation Safety - -AuthSec rotates SPIRE SVIDs every 30 minutes, which protects against compromise but doesn't help with identity continuity — if the SPIRE root CA is compromised, everything is compromised. Auths uses KERI pre-rotation: when creating an identity, you commit to the hash of your next key. If the current key is compromised, the attacker cannot rotate because they lack the pre-image of the next key hash. Rotation is a planned lifecycle event, not an emergency. - -**Implication**: Auths' key compromise model is architecturally superior. Compromise in AuthSec requires trusting a single CA. Compromise in Auths requires breaking pre-rotation commitments AND witness quorum. - -### 4.5 Witness Quorum (Decentralized Accountability) - -Auths' witness model provides independent accountability without centralization. N-of-M independent witnesses must receipt a key event before it's accepted. This prevents an attacker from presenting different key histories to different verifiers. AuthSec has no equivalent — trust is centralized in SPIRE. - -### 4.6 Supply Chain Security - -Auths was built for commit signing, artifact signing, and attestation chains. This is the core use case. AuthSec has no supply chain capabilities — it's an auth platform bolted onto AI agent infrastructure. - -**Implication**: Auths is positioned for SLSA compliance, software provenance, and supply chain integrity. AuthSec is positioned for API access control. - -### 4.7 Policy Expressiveness - -Auths' policy engine has 20+ predicates including developer-workflow-specific ones (`RefMatches`, `PathAllowed`, `EnvIs`, `IsAgent`, `IsHuman`). Policies compose arbitrarily with And/Or/Not and compile to deterministic evaluation. AuthSec has five-dimensional RBAC with AND/OR at the top level. - -**Implication**: "Only human signers can modify `refs/heads/main`, but CI workloads can deploy to staging if they have `deploy_staging` capability and the attestation chain has witness quorum" — this is one policy expression in Auths. It's not expressible in AuthSec's RBAC model. - -### 4.8 Cryptographic Attestation Chains - -Auths' attestation chains are self-contained, offline-verifiable proofs of delegated authority. The verification question is "here's the proof" not "ask the server." Each link is dual-signed, capabilities are intersected, and the chain can be verified by anyone with the root public key. - -AuthSec's delegation is JWT-based — you call the server, it gives you a token, the token is opaque to the relying party. If the server is down, delegation doesn't work. - ---- - -## Part 5: Strategic Positioning - -### 5.1 The Market Confusion Risk - -Both projects use overlapping terminology: - -| Term | Auths Meaning | AuthSec Meaning | -|------|--------------|-----------------| -| **Attestation** | Cryptographically signed, chain-verifiable delegation proof | Kubernetes pod selector matching via SPIRE | -| **Capability** | First-class, intersectable, policy-evaluated authorization scope | RBAC scope/permission string | -| **Zero-trust** | No central authority, self-certifying identities, offline verification | Short-lived certificates, mTLS, per-call validation | -| **Workload identity** | Git-native attestation chain + OIDC bridge | SPIFFE X.509-SVID via Kubernetes SPIRE | -| **Policy** | Declarative expression AST with 20+ predicates | Dashboard-configured RBAC rules | - -**Risk**: Enterprise buyers searching for "workload identity" or "agent authentication" may find AuthSec and assume it covers the same ground as Auths. It doesn't — but the terminology overlap makes this non-obvious. - -### 5.2 Complementary, Not Competitive - -The two projects are actually complementary: - -- **AuthSec** answers: "How do I add OAuth login to my MCP server?" -- **Auths** answers: "How do I cryptographically prove who wrote this code and what they're authorized to do?" - -A realistic enterprise deployment could use both: -1. AuthSec for user-facing MCP server authentication (OAuth, RBAC, CIBA) -2. Auths for developer identity, commit signing, artifact provenance, and CI/CD capability delegation - -The OIDC bridge is the natural integration point: Auths issues JWTs that AuthSec could consume as a trusted identity provider. - -### 5.3 Where to Win - -| Segment | Winner | Why | -|---------|--------|-----| -| MCP server auth (today) | AuthSec | They have SDKs and decorator patterns | -| Developer commit signing | Auths | AuthSec doesn't do this | -| Supply chain provenance | Auths | AuthSec doesn't do this | -| CI/CD capability delegation | Auths | Attestation chains + trust registry + policy engine | -| Enterprise SSO integration | AuthSec | SAML, AD sync, dashboard | -| Air-gapped / offline environments | Auths | WASM verifier, no server dependency | -| Kubernetes-native workloads | AuthSec | SPIFFE/SPIRE is purpose-built for K8s | -| Git-native developer workflows | Auths | Git refs, commit signing, ref-scoped policy | -| Individual developer adoption | Auths | `cargo install auths && auths init` vs. deploy 5 services | -| Enterprise procurement | AuthSec | Dashboard, multi-tenant, SAML | - -### 5.4 Recommended Response - -1. **Don't compete on MCP auth directly.** Build `auths-mcp-sdk` as a thin wrapper that uses the OIDC bridge, but don't try to replicate AuthSec's OAuth/SAML/CIBA stack. Instead, position Auths as the identity layer that MCP auth platforms (including AuthSec) can trust. - -2. **Lean into supply chain.** AuthSec has zero supply chain capabilities. This is Auths' moat. Commit signing, artifact provenance, attestation chains, and SLSA compliance are areas where AuthSec cannot compete without rebuilding from scratch. - -3. **Emphasize infrastructure-free.** "Works with Git you already have" vs. "deploy Go + PostgreSQL + Vault + SPIRE + Prometheus" is a devastating comparison for individual and small-team adoption. - -4. **Publish the OIDC bridge as an integration point.** Position the bridge not as competition to AuthSec but as something AuthSec could consume. "AuthSec authenticates your users. Auths proves what your code is authorized to do." - -5. **Build the trust registry dashboard.** The CLI-only story is fine for developers but insufficient for enterprise security teams. A minimal web UI for trust registry and policy management would close the enterprise gap without building a full SaaS platform. - ---- - -## Part 6: Technical Gaps to Close - -| Gap | Priority | Effort | Notes | -|-----|----------|--------|-------| -| MCP SDK (Python/TypeScript) | P2 | 1-2 weeks | Thin wrapper around OIDC bridge. Not core to mission but closes marketing gap. | -| Enterprise SSO (SAML) | P3 | 2-3 weeks | Only needed for enterprise sales. Could use existing OIDC bridge as adapter. | -| Trust registry web UI | P2 | 1-2 weeks | Minimal React/HTMX dashboard for policy and trust registry management. | -| Mobile verifier app | P3 | 2-3 weeks | WASM verifier in React Native. Enables "verify from phone" use case. | -| CIBA-equivalent flow | P3 | 1 week | Auths doesn't need CIBA (attestations are bearer proofs), but marketing should explain why. | -| Active Directory sync | P4 | 2 weeks | Only for enterprise. Low priority until enterprise pipeline exists. | diff --git a/docs/plans/cli_cleanup.md b/docs/plans/cli_cleanup.md deleted file mode 100644 index 3ccf9cf9..00000000 --- a/docs/plans/cli_cleanup.md +++ /dev/null @@ -1,890 +0,0 @@ -# CLI Cleanup Plan - -## Design Principle - -> **All business logic lives in `auths-sdk`. The `auths-cli` is a thin presentation layer.** -> -> SDK workflows return structured results (reports, status enums). CLI calls SDK, then formats and prints the results. CLI never does file I/O, git operations, or config parsing directly — it delegates to SDK. - -## Source Map - -Key files an implementer needs to know about: - -| Area | File | What it does | -|------|------|-------------| -| `auths init` handler | `crates/auths-cli/src/commands/init/mod.rs` | Entry point: `handle_init()` (L141), `run_developer_setup()` (L166) | -| `auths init` helpers | `crates/auths-cli/src/commands/init/helpers.rs` | `write_allowed_signers()` (L99), `set_git_config()` (L137) | -| `auths doctor` handler | `crates/auths-cli/src/commands/doctor.rs` | `handle_doctor()` (L43), `run_checks()` (L97) | -| Doctor fix adapters | `crates/auths-cli/src/adapters/doctor_fixes.rs` | `GitSigningConfigFix::apply()` (L99), `AllowedSignersFix::apply()` (L37) | -| Allowed signers workflow | `crates/auths-sdk/src/workflows/allowed_signers.rs` | `AllowedSigners` struct (L254), `sync()` (L376), `save()` (L302) | -| SDK setup / init | `crates/auths-sdk/src/setup.rs` | `initialize()` (L49) — orchestrates identity creation | -| Registry ref store | `crates/auths-infra-git/src/ref_store.rs` | `GitRefStore` — reads/writes `refs/auths/registry` | -| Identity init | `crates/auths-id/src/identity/initialize.rs` | `initialize_registry_identity()` (L104) | -| Git hooks | `crates/auths-id/src/storage/registry/hooks.rs` | `install_cache_hooks()` (L60), `install_linearity_hook()` (L271) | -| Diagnostics workflow | `crates/auths-sdk/src/workflows/diagnostics.rs` | `DiagnosticsWorkflow` — used by `auths doctor` | -| SSH config workflow | `crates/auths-sdk/src/workflows/ssh_config.rs` | **New.** `SshConfigWorkflow::ensure_config()`, `check_config()` | -| Registry sync workflow | `crates/auths-sdk/src/workflows/registry_sync.rs` | **New.** `RegistrySyncWorkflow::sync_to_repo()` | -| Key backup workflow | `crates/auths-sdk/src/workflows/key_backup.rs` | **New.** `KeyBackupWorkflow::export()`, `is_backed_up()` | - -## Execution Order - -Tasks have dependencies. Do them in this order: - -1. **Task 1** (SSH config) — standalone, no deps -2. **Task 3** (repo allowed_signers) — standalone, no deps -3. **Task 6** (auto-push registry on init) — standalone, no deps -4. **Task 2** (pre-push hook) — after task 6 (same area, don't want conflicts) -5. **Task 5** (doctor checks) — after tasks 1, 3, 6 (doctor needs to check what they write) -6. **Task 4** (identity reset) — after tasks 3, 5, 6 (uses all the new cleanup logic) -7. **Task 7** (umbrella: single-command onboarding) — after all above (integration) -8. **Task 8** (pre-rotation backup nudge) — independent, can be done anytime - -## Testing - -For each task, verify by running the end-to-end flow on a **clean machine** (or with `rm -rf ~/.auths`): - -```bash -# 1. Fresh init -auths init - -# 2. Check everything was set up -auths doctor # should pass all checks -cat ~/.ssh/config # should have IgnoreUnknown UseKeychain -cat .auths/allowed_signers # should have the new key -git for-each-ref refs/auths/ # should have registry ref - -# 3. Make a signed commit and push -git commit --allow-empty -m "test: signed commit" -git push origin main # should also push refs/auths/registry - -# 4. Verify the commit -auths verify HEAD # should pass -``` - ---- - -## Tasks - -### 1. SSH config: add `IgnoreUnknown UseKeychain` - -## Problem - -`auths init` writes `UseKeychain yes` to `~/.ssh/config` under a `Host *` block. This is a macOS-specific OpenSSH option. If the user's SSH version doesn't recognize it, **all git+SSH operations fail**: - -``` -/Users/.../.ssh/config: line 7: Bad configuration option: usekeychain -/Users/.../.ssh/config: terminating, 1 bad configuration options -fatal: Could not read from remote repository. -``` - -## Fix - -### 1. `auths init` (onboarding) -When writing the SSH config, prepend `IgnoreUnknown UseKeychain` on the same `Host *` block: - -``` -Host * - IgnoreUnknown UseKeychain - AddKeysToAgent yes - UseKeychain yes - IdentityFile ~/.ssh/id_ed25519_... -``` - -This tells SSH to silently skip `UseKeychain` if unsupported, rather than failing. - -### 2. `auths doctor` (diagnostics) -`auths doctor` should check for this condition: -- If `~/.ssh/config` contains `UseKeychain` without a preceding `IgnoreUnknown UseKeychain`, flag it as a warning -- Print the location of the SSH config and suggest adding the directive -- Users who break their auths setup will likely reach for `auths doctor` first, so this is an important diagnostic to surface - -## Implementation - -### Design note - -All business logic goes in **auths-sdk**. The CLI is a thin presentation layer that calls SDK functions and prints output. - -### `auths init` — write SSH config (SDK) - -**File:** `crates/auths-sdk/src/workflows/ssh_config.rs` (new file) - -There is currently **no function that writes `~/.ssh/config`**. The existing `write_allowed_signers()` in CLI helpers (L99) only writes `~/.ssh/allowed_signers`. Create a new SDK workflow: - -```rust -pub struct SshConfigWorkflow; - -impl SshConfigWorkflow { - /// Ensures ~/.ssh/config has IgnoreUnknown UseKeychain and the identity file. - /// Returns a description of what was changed, or None if no change needed. - pub fn ensure_config(identity_file: &Path) -> Result> { - let home = dirs::home_dir().context("no home directory")?; - let ssh_dir = home.join(".ssh"); - std::fs::create_dir_all(&ssh_dir)?; - let config_path = ssh_dir.join("config"); - - let existing = std::fs::read_to_string(&config_path).unwrap_or_default(); - - // Skip if IgnoreUnknown UseKeychain already present - if existing.contains("IgnoreUnknown UseKeychain") { - return Ok(None); - } - - let block = format!( - "\nHost *\n IgnoreUnknown UseKeychain\n AddKeysToAgent yes\n UseKeychain yes\n IdentityFile {}\n", - identity_file.display() - ); - - let mut f = std::fs::OpenOptions::new().create(true).append(true).open(&config_path)?; - f.write_all(block.as_bytes())?; - Ok(Some(format!("Added IgnoreUnknown UseKeychain to {}", config_path.display()))) - } - - /// Checks if UseKeychain exists without IgnoreUnknown. Returns diagnostic info. - pub fn check_config() -> Result { /* ... */ } -} -``` - -Register the module in `crates/auths-sdk/src/workflows/mod.rs`. - -**CLI caller** (`crates/auths-cli/src/commands/init/mod.rs`, post-setup phase ~L236): -```rust -if let Some(msg) = SshConfigWorkflow::ensure_config(&ssh_key_path)? { - out.println(&format!("✓ {msg}")); -} -``` - -### `auths doctor` — check SSH config - -**File:** `crates/auths-sdk/src/workflows/diagnostics.rs` - -Add a new check method alongside `check_git_signing_config()` (L72): - -```rust -fn check_ssh_config(&self, checks: &mut Vec) -> Result<(), DiagnosticError> { - let home = dirs::home_dir().ok_or_else(|| DiagnosticError::ExecutionFailed("no home".into()))?; - let config_path = home.join(".ssh").join("config"); - let content = std::fs::read_to_string(&config_path).unwrap_or_default(); - - let has_usekeychain = content.lines().any(|l| l.trim().eq_ignore_ascii_case("usekeychain yes")); - let has_ignore = content.lines().any(|l| l.trim().starts_with("IgnoreUnknown") && l.contains("UseKeychain")); - - if has_usekeychain && !has_ignore { - checks.push(CheckResult { - name: "ssh_config_usekeychain".into(), - passed: false, - message: Some(format!( - "~/.ssh/config has UseKeychain without IgnoreUnknown UseKeychain. Add 'IgnoreUnknown UseKeychain' to the Host * block in {}", - config_path.display() - )), - config_issues: vec![ConfigIssue::Absent("IgnoreUnknown UseKeychain".into())], - }); - } else { - checks.push(CheckResult { name: "ssh_config_usekeychain".into(), passed: true, message: None, config_issues: vec![] }); - } - Ok(()) -} -``` - -Register in `available_checks()` (L31) and call from `run()` (L61). - -**File:** `crates/auths-cli/src/adapters/doctor_fixes.rs` - -Add `SshConfigFix` implementing `DiagnosticFix` (same pattern as `AllowedSignersFix`). Register it in `build_available_fixes()` in `crates/auths-cli/src/commands/doctor.rs` (L193). - -## Context - -Discovered while dogfooding the `@auths-dev/verify` widget. After wiping and re-creating an identity, `git push` failed due to this SSH config issue. - - -### 2. Pre-push hook to sync `refs/auths/registry` - -## Problem - -After `auths init`, the registry (`refs/auths/registry`) is written to `~/.auths/.git`, not to the current project repo. Users must manually run: - -```bash -git fetch ~/.auths refs/auths/registry:refs/auths/registry -git push origin refs/auths/registry --force -``` - -This is undiscoverable — nothing in the CLI tells users they need to do this, and downstream tools (e.g., the `@auths-dev/verify` widget) silently fail because the project repo on GitHub has no `refs/auths/registry`. - -## Proposal - -Add a **pre-push Git hook** that automatically syncs `refs/auths/registry` from `~/.auths` into the project repo before pushing. - -### Why pre-push (not pre-commit) - -- Not every commit needs the registry synced — only when pushing to a remote -- Catches all pushes including direct-to-main workflows -- Pre-commit would be too frequent and noisy - -### Suggested behavior - -1. On `git push`, the hook checks if `~/.auths/.git/refs/auths/registry` exists -2. If so, fetch it into the local repo: `git fetch ~/.auths refs/auths/registry:refs/auths/registry` -3. Include `refs/auths/registry` in the push -4. If `~/.auths` has no registry, skip silently (user hasn't run `auths init`) - -### Installation - -The hook could be installed automatically by `auths init` or `auths git setup`, similar to how git signing is configured. - -## Implementation - -**File:** `crates/auths-id/src/storage/registry/hooks.rs` - -Follow the existing pattern from `install_cache_hooks()` (L60) and `install_linearity_hook()` (L271): - -1. Add a constant for the hook marker: -```rust -const REGISTRY_SYNC_MARKER: &str = "# auths-registry-sync"; -``` - -2. Add hook script: - -Does this work for Mac, Linux and Windows? (e.g. `$HOME`) -```rust -const REGISTRY_SYNC_HOOK: &str = r#"#!/bin/sh -# auths-registry-sync -# Syncs refs/auths/registry from ~/.auths into this repo before pushing - -AUTHS_HOME="$HOME/.auths" -REGISTRY_REF="refs/auths/registry" - -if [ -d "$AUTHS_HOME/.git" ] && git --git-dir="$AUTHS_HOME/.git" rev-parse --verify "$REGISTRY_REF" >/dev/null 2>&1; then - git fetch "$AUTHS_HOME" "$REGISTRY_REF:$REGISTRY_REF" --quiet 2>/dev/null || true - # Read push args from stdin (pre-push receives: ) - # After the normal push completes, push the registry ref too - REMOTE="$1" - git push "$REMOTE" "$REGISTRY_REF" --force --quiet 2>/dev/null || true -fi -"#; -``` - -3. Add installation function following the same pattern as `install_cache_hooks()`: -```rust -pub fn install_pre_push_hook(repo_path: &Path) -> Result<()> { - let git_dir = find_git_dir(repo_path)?; - let hooks_dir = git_dir.join("hooks"); - std::fs::create_dir_all(&hooks_dir)?; - install_hook(&hooks_dir, "pre-push", REGISTRY_SYNC_HOOK, REGISTRY_SYNC_MARKER)?; - Ok(()) -} -``` - -The `install_hook()` helper (L87) already handles idempotency (checks for marker), appending to existing hooks, and setting `0o755` permissions. - -**Caller:** Add to `run_developer_setup()` in `crates/auths-cli/src/commands/init/mod.rs` (L236, post-setup phase). Only install when running inside a git repo (check `.git` exists in cwd or parents). - -## Context - -Discovered while dogfooding the verify widget (`@auths-dev/verify`) with the [example-verify-badge](https://github.com/auths-dev/example-verify-badge) repo. The widget fetches `refs/auths/registry` from the GitHub API to verify attestations, but the ref was missing from the remote because the manual sync step was not documented or automated. - -### 3. Auto-populate `.auths/allowed_signers` in repo - -## Problem - -After running `auths init`, the user's signing key is added to `~/.ssh/allowed_signers` (global), but the repo's `.auths/allowed_signers` is not created or updated. This means: - -1. The GitHub Action (`auths-verify-github-action`) can't verify commits because it reads `.auths/allowed_signers` from the repo -2. The user has to manually figure out the correct format (` namespaces="git" ssh-ed25519 `) -3. New contributors have no obvious way to add their key - -## Expected behavior - -`auths init` should: -- Create `.auths/allowed_signers` in the current repo if it doesn't exist -- Append the user's device DID principal + SSH public key in the correct format -- Match the format used in `~/.ssh/allowed_signers` (e.g., `z6Mk...@auths.local namespaces="git" ssh-ed25519 AAAA...`) - -## Implementation - -### Design note - -All business logic goes in **auths-sdk**. The CLI is a thin presentation layer. - -**File:** `crates/auths-sdk/src/workflows/allowed_signers.rs` - -The `AllowedSigners` struct (L254) and `sync()` (L376) already exist and handle the correct format: `@auths.local namespaces="git" ssh-ed25519 `. Add a convenience method to the existing workflow: - -```rust -impl AllowedSigners { - /// Sync allowed_signers for a specific repo's .auths/ directory. - /// Creates .auths/allowed_signers if it doesn't exist. - /// Returns the number of signers added. - pub fn sync_repo(repo_root: &Path) -> Result { - let auths_dir = repo_root.join(".auths"); - std::fs::create_dir_all(&auths_dir)?; - let signers_path = auths_dir.join("allowed_signers"); - - let home = auths_core::paths::auths_home()?; - let storage = RegistryAttestationStorage::new(&home); - let mut signers = AllowedSigners::load(&signers_path) - .unwrap_or_else(|_| AllowedSigners::new(&signers_path)); - let report = signers.sync(&storage)?; - signers.save()?; - Ok(report) - } -} -``` - -This belongs in the SDK because it reuses `AllowedSigners`, `RegistryAttestationStorage`, and `sync()` — all SDK types. - -**CLI caller** (`crates/auths-cli/src/commands/init/mod.rs`, post-setup phase ~L236): -```rust -if let Ok(repo_root) = detect_repo_root() { - let report = AllowedSigners::sync_repo(&repo_root)?; - out.println(&format!("✓ Wrote {} signer(s) to .auths/allowed_signers", report.added)); -} -``` - -## Context - -Discovered during dogfooding. The example repos had placeholder keys in `.auths/allowed_signers` that had to be manually replaced with real keys before the GitHub Action would pass. - -### 4. Identity reset (`auths init --reset`) - -## Problem - -When a user needs to wipe and recreate their identity (e.g., during development or after key compromise), the process is manual and error-prone: - -1. Must manually `rm -rf ~/.auths` to remove the old identity -2. `auths init --force` creates a new identity but doesn't clean up stale data: - - Old `refs/auths/registry` refs remain in repos with mismatched attestations - - Old entries in `~/.ssh/allowed_signers` accumulate (though this is harmless) - - Old SSH key files remain in `~/.ssh/` - - `.auths/allowed_signers` in repos still references the old key -3. Must manually `git update-ref -d refs/auths/registry` in each repo, then re-push -4. Multiple `auths init` runs can accumulate broken attestations in the registry - -## Expected behavior - -Provide a clean reset path: - -- `auths init --reset` that: - - Removes the old identity from `~/.auths` - - Cleans up `refs/auths/registry` in the current repo - - Updates `~/.ssh/allowed_signers` (removes old entry, adds new) - - Updates `.auths/allowed_signers` in the current repo - - Warns about other repos that may still reference the old identity - -## Implementation - -### Design note - -All business logic goes in **auths-sdk**. The CLI only adds the `--reset` flag and calls SDK. - -### SDK — reset workflow - -**File:** `crates/auths-sdk/src/setup.rs` - -Add `reset()` alongside the existing `initialize()` (L49). It's the inverse operation: - -```rust -/// Result of resetting an identity. CLI uses this to display what happened. -pub struct ResetReport { - pub identity_removed: bool, - pub registry_cleaned: bool, - pub global_signers_cleaned: usize, // number of entries removed - pub repo_signers_cleaned: usize, // number of entries removed -} - -/// Wipe the current identity and clean up all artifacts. -/// Call this before `initialize()` to do a full reset+reinit. -pub fn reset(repo_root: Option<&Path>) -> Result { - let mut report = ResetReport { identity_removed: false, registry_cleaned: false, global_signers_cleaned: 0, repo_signers_cleaned: 0 }; - let home = auths_core::paths::auths_home()?; - - // 1. Remove old identity - if home.exists() { - std::fs::remove_dir_all(&home)?; - report.identity_removed = true; - } - - // 2. Clean refs/auths/registry in current repo - if let Some(root) = repo_root { - let status = Command::new("git") - .current_dir(root) - .args(["update-ref", "-d", "refs/auths/registry"]) - .status(); - report.registry_cleaned = status.map(|s| s.success()).unwrap_or(false); - } - - // 3. Clean old entries from ~/.ssh/allowed_signers - let ssh_signers = dirs::home_dir().unwrap().join(".ssh/allowed_signers"); - if ssh_signers.exists() { - let content = std::fs::read_to_string(&ssh_signers)?; - let original_count = content.lines().count(); - let filtered: Vec<&str> = content.lines() - .filter(|l| !l.contains("@auths.local")) - .collect(); - report.global_signers_cleaned = original_count - filtered.len(); - std::fs::write(&ssh_signers, filtered.join("\n") + "\n")?; - } - - // 4. Clean .auths/allowed_signers in current repo - if let Some(root) = repo_root { - let repo_signers = root.join(".auths/allowed_signers"); - if repo_signers.exists() { - let content = std::fs::read_to_string(&repo_signers)?; - let original_count = content.lines().count(); - let filtered: Vec<&str> = content.lines() - .filter(|l| !l.contains("@auths.local")) - .collect(); - report.repo_signers_cleaned = original_count - filtered.len(); - std::fs::write(&repo_signers, filtered.join("\n") + "\n")?; - } - } - - Ok(report) -} -``` - -### CLI — thin wrapper - -**File:** `crates/auths-cli/src/commands/init/mod.rs` - -1. Add `--reset` flag to `InitCommand` struct (around L101): -```rust -/// Reset and reinitialize identity (implies --force) -#[clap(long)] -pub reset: bool, -``` - -2. Add reset logic at the top of `handle_init()` (L141), before profile selection: -```rust -if cmd.reset { - cmd.force = true; - let repo_root = detect_repo_root().ok(); - let report = auths_sdk::setup::reset(repo_root.as_deref())?; - - // CLI only does presentation - if report.identity_removed { out.println("Removed old identity."); } - if report.registry_cleaned { out.println("Cleaned refs/auths/registry."); } - if report.global_signers_cleaned > 0 { out.println(&format!("Removed {} old entries from ~/.ssh/allowed_signers.", report.global_signers_cleaned)); } - if report.repo_signers_cleaned > 0 { out.println(&format!("Removed {} old entries from .auths/allowed_signers.", report.repo_signers_cleaned)); } - out.println("Warning: other repos may still reference the old identity. Run 'auths doctor' in each repo."); -} -``` - -After reset, the normal `auths init` flow continues and creates a fresh identity. - -## Context - -During dogfooding, multiple identity recreations left stale attestations in the registry. The widget showed "InvalidSignature" because old attestations referenced a different identity's key. Had to manually `git update-ref -d refs/auths/registry` and re-init to fix. - - -### 5. Expand `auths doctor` checks - -## Problem - -`auths doctor` is the natural place users go when things break, but it currently doesn't catch several common issues discovered during dogfooding: - -## Checks to add - -### SSH config -- Detect `UseKeychain` without `IgnoreUnknown UseKeychain` (see #74) -- Verify the SSH identity file referenced in config actually exists -- Check `gpg.format = ssh` and `commit.gpgsign = true` in git config - -### Registry -- Check if `refs/auths/registry` exists in the current repo -- Verify the identity in the registry matches the current active identity -- Warn if the registry has attestations signed by a different identity (stale data from identity recreation) -- Check if registry is pushed to the remote - -### Allowed signers -- Check if `~/.ssh/allowed_signers` exists and contains the current device's key -- Check if `.auths/allowed_signers` exists in the current repo -- Warn if repo's allowed_signers has placeholder/example keys -- Verify format is correct (` namespaces="git" ssh-ed25519 `) - -### Signing -- Verify a test signature can be created and verified (round-trip check) -- Check that `git log --show-signature` works for recent commits - -## Implementation - -### Architecture - -The diagnostics system has three layers: - -1. **Provider traits** (`crates/auths-sdk/src/ports/diagnostics.rs`): `GitDiagnosticProvider` (L64) and `CryptoDiagnosticProvider` (L78) — define what the system can check -2. **Workflow** (`crates/auths-sdk/src/workflows/diagnostics.rs`): `DiagnosticsWorkflow` — orchestrates checks, returns `DiagnosticReport` -3. **Fix adapters** (`crates/auths-cli/src/adapters/doctor_fixes.rs`): Implement `DiagnosticFix` trait — each fix addresses a specific `CheckResult` - -Currently only 3 checks exist: `git_version`, `ssh_keygen`, `git_signing_config`. Add the new ones below. - -### New checks to add - -For each check, add a method to `DiagnosticsWorkflow` following the pattern of `check_git_signing_config()` (L72): - -**1. `check_ssh_config`** — See Task 1 implementation above. - -**2. `check_ssh_identity_file`** — Verify the SSH key file referenced in `~/.ssh/config` exists: -```rust -// Read ~/.ssh/config, find IdentityFile lines, check each file exists -``` - -**3. `check_registry_exists`** — Check `refs/auths/registry` in current repo: -```rust -fn check_registry(&self, checks: &mut Vec) -> Result<(), DiagnosticError> { - let output = Command::new("git") - .args(["rev-parse", "--verify", "refs/auths/registry"]) - .output(); - // If fails, push ConfigIssue::Absent("refs/auths/registry") - // Also: compare identity in registry with active identity from ~/.auths -} -``` - -**4. `check_repo_allowed_signers`** — Check `.auths/allowed_signers` exists and has current key: -```rust -// Read .auths/allowed_signers, check for current device DID principal -// Warn if contains placeholder keys (e.g., "ssh-ed25519 AAAA..." with no real principal) -``` - -**5. `check_signing_roundtrip`** — Verify sign + verify works: -```rust -// Create a temp file, sign it with ssh-keygen, verify it — confirms the full chain works -``` - -**6. `check_pre_rotation_backup`** (Task 8) — Gentle nudge about backup. - -### Extending the provider traits - -Some new checks (registry, allowed signers) don't fit neatly into `GitDiagnosticProvider` or `CryptoDiagnosticProvider`. Options: -- Add methods to the existing traits -- Add a new `IdentityDiagnosticProvider` trait -- Keep the checks as standalone methods in `DiagnosticsWorkflow` that use `Command::new("git")` directly (simplest, matches the pattern of `check_git_signing_config`) - -Recommended: keep them as private methods on `DiagnosticsWorkflow` (simplest). Only add new traits if the checks need mocking in tests. - -### Fix adapters - -For each new check that has a fix, add a struct implementing `DiagnosticFix` in `doctor_fixes.rs` and register it in `build_available_fixes()` (doctor.rs L193). Follow the pattern: - -```rust -pub struct RegistryFix { /* fields */ } - -impl DiagnosticFix for RegistryFix { - fn name(&self) -> &str { "registry_sync" } - fn is_safe(&self) -> bool { true } - fn can_fix(&self, check: &CheckResult) -> bool { check.name == "registry_exists" && !check.passed } - fn apply(&self) -> Result { - // git fetch ~/.auths refs/auths/registry:refs/auths/registry - Ok("Synced registry from ~/.auths".into()) - } -} -``` - -### Updating `available_checks()` - -Update the static slice in `available_checks()` (L31) to include all new check names, and add dispatch branches in `run_single()` (L38). - -## Context - -During dogfooding, every one of these issues was hit. `auths doctor` surfacing them with actionable fix commands would have saved significant debugging time. - -### 6. Auto-push registry on `auths init` - -## Problem - -`auths init` creates the identity and writes attestations to `refs/auths/registry` in `~/.auths/.git`, but the user must manually: - -1. `git fetch ~/.auths refs/auths/registry:refs/auths/registry` — pull registry into the project repo -2. `git push origin refs/auths/registry` — push to remote - -This is non-obvious and undiscoverable. New users don't know the registry exists in `~/.auths`, and the fetch-from-local-path pattern is uncommon. - -## Expected behavior - -After `auths init` (when run inside a git repo): -- Automatically copy `refs/auths/registry` from `~/.auths` into the current repo -- Prompt or auto-push to the remote - -This is related to but distinct from #73 (pre-push hook for ongoing sync). This issue is about the **initial setup** experience. - -## Implementation - -### Design note - -All business logic goes in **auths-sdk**. The CLI is a thin presentation layer. - -### SDK — registry sync workflow - -**File:** `crates/auths-sdk/src/workflows/registry_sync.rs` (new file) - -```rust -pub struct RegistrySyncReport { - pub fetched: bool, - pub pushed: bool, - pub skipped_reason: Option, -} - -pub struct RegistrySyncWorkflow; - -impl RegistrySyncWorkflow { - /// Sync refs/auths/registry from ~/.auths into the given repo, optionally push to remote. - pub fn sync_to_repo(repo_root: &Path) -> Result { - let home = auths_core::paths::auths_home()?; - let mut report = RegistrySyncReport { fetched: false, pushed: false, skipped_reason: None }; - - // Fetch registry from ~/.auths into this repo - let status = Command::new("git") - .current_dir(repo_root) - .args(["fetch", &home.to_string_lossy(), "refs/auths/registry:refs/auths/registry"]) - .status()?; - if !status.success() { - report.skipped_reason = Some("could not fetch registry from ~/.auths".into()); - return Ok(report); - } - report.fetched = true; - - // Push to remote (if remote exists) - let remote_check = Command::new("git") - .current_dir(repo_root) - .args(["remote", "get-url", "origin"]) - .output()?; - if remote_check.status.success() { - let push_status = Command::new("git") - .current_dir(repo_root) - .args(["push", "origin", "refs/auths/registry", "--force"]) - .status()?; - report.pushed = push_status.success(); - } - - Ok(report) - } -} -``` - -Register the module in `crates/auths-sdk/src/workflows/mod.rs`. - -### CLI — thin wrapper - -**CLI caller** (`crates/auths-cli/src/commands/init/mod.rs`, post-setup phase ~L236): -```rust -if let Ok(repo_root) = detect_repo_root() { - let report = RegistrySyncWorkflow::sync_to_repo(&repo_root)?; - if report.fetched { out.println("✓ Synced refs/auths/registry into this repo."); } - if report.pushed { out.println("✓ Pushed refs/auths/registry to origin."); } - if let Some(reason) = report.skipped_reason { out.println(&format!("⚠ Registry sync skipped: {reason}")); } -} -``` - -**Note:** This is related to Task 2 (pre-push hook) but handles the **initial** sync. Task 2 handles **ongoing** sync on subsequent pushes. Both should be implemented. - -## Context - -During dogfooding, `auths init --force` completed successfully but the widget showed errors because the registry was never pushed to the remote. Required manual git plumbing to fix. - -### 7. Single-command onboarding (`auths init` in a repo) - -## Problem - -The current onboarding flow requires multiple manual steps that aren't documented in sequence: - -1. `auths init` — create identity, configure git signing -2. Manually create/update `.auths/allowed_signers` in the repo -3. Manually fetch registry from `~/.auths` into the project repo -4. Manually push `refs/auths/registry` to the remote -5. Manually add `.github/workflows/verify-commits.yml` -6. Manually fix SSH config if `UseKeychain` breaks - -A first-time user hitting any of these steps without guidance will get stuck. - -## Expected behavior - -`auths init` (when run in a git repo) should handle the full happy path: - -1. Create identity + configure signing (already works) -2. Write `.auths/allowed_signers` with the new key (#77) -3. Copy registry into the repo and push to remote (#80) -4. Fix SSH config issues (#74) -5. Optionally scaffold the CI workflow (or print the command to do so) - -Each step should have clear output showing what was done. If any step fails, `auths doctor` (#79) should catch it. - -## Non-goals - -- Don't force GitHub Pages setup (that's for the widget, not core signing) -- Don't require network access for the identity creation itself - -## Implementation - -### Design note - -This is an **integration task**. The CLI orchestrates SDK functions and displays results. All business logic lives in SDK workflows (Tasks 1, 3, 6) and auths-id (Task 2). - -**File:** `crates/auths-cli/src/commands/init/mod.rs` - -Update `run_developer_setup()` (L166) to add new steps after identity creation. The current flow has 5 phases. Add to the POST-SETUP phase (L236): - -```rust -// === POST-SETUP (existing) === -offer_shell_completions(interactive, &out)?; -write_allowed_signers(&config)?; // existing: writes ~/.ssh/allowed_signers - -// === NEW STEPS — CLI calls SDK, then prints results === - -// Task 1: SSH config (SDK: SshConfigWorkflow) -if let Some(msg) = SshConfigWorkflow::ensure_config(&ssh_key_path)? { - out.println(&format!("✓ {msg}")); -} - -// Task 3: Repo allowed_signers (SDK: AllowedSigners::sync_repo) -if let Ok(repo_root) = detect_repo_root() { - let report = AllowedSigners::sync_repo(&repo_root)?; - out.println(&format!("✓ Wrote {} signer(s) to .auths/allowed_signers", report.added)); - - // Task 6: Registry sync (SDK: RegistrySyncWorkflow) - let sync = RegistrySyncWorkflow::sync_to_repo(&repo_root)?; - if sync.fetched { out.println("✓ Synced refs/auths/registry into this repo."); } - if sync.pushed { out.println("✓ Pushed refs/auths/registry to origin."); } - if let Some(reason) = sync.skipped_reason { out.println(&format!("⚠ Registry sync: {reason}")); } - - // Task 2: Pre-push hook (auths-id: install_pre_push_hook) - install_pre_push_hook(&repo_root)?; - out.println("✓ Pre-push hook installed"); -} - -// Optional: print CI workflow instructions -out.println("\nTo add CI verification, create .github/workflows/verify-commits.yml:"); -out.println(" See: https://github.com/marketplace/actions/verify-commit-signatures-with-auths"); -``` - -Each step should be wrapped in error handling that warns but doesn't fail the overall init (non-fatal). The init should complete even if, e.g., the user has no remote configured. - -**Output:** Each step prints what it did. On failure, print a warning and suggest running `auths doctor` for diagnosis. - -## Context - -End-to-end dogfooding session: took ~2 hours to get from `auths init` to a working verification badge, mostly due to undocumented manual steps between the init and the verification actually working. - -### 8. Pre-rotation key backup nudge - -## Problem - -KERI pre-rotation is one of the strongest features of the identity model — the next rotation key is committed to at inception, so key compromise doesn't mean identity loss. But currently, users are never prompted to back up or even know about their pre-rotation key. - -We shouldn't surface this during onboarding. The `auths init` flow should stay fast and frictionless — like how `ssh-keygen` lets you skip the passphrase and most tutorials tell you to. Security-conscious users set one later. Same principle: don't front-load complexity that blocks adoption. - -## Proposed behavior - -### 1. `auths doctor` — gentle nudge -After identity creation, `auths doctor` should include a check: -> "You have a pre-rotation key but haven't backed it up. Run `auths key backup` to export it." - -Low severity, informational — not a blocker. - -### 2. `auths key backup` / `auths recovery export` — explicit command -A dedicated command to export the pre-rotation key material when the user is ready. Clear warnings about what it is and how to store it safely. - -### 3. Post-rotation prompt -After a user performs their first key rotation (`auths key rotate`), prompt them: -> "You just rotated keys. Your new pre-rotation commitment is set. Run `auths key backup` to save your recovery key." - -This is the natural moment where pre-rotation becomes concrete and meaningful. - -### 4. Enterprise/team docs -For organizations that need formal key ceremony procedures, document the pre-rotation backup as part of team onboarding — but keep it out of the individual developer fast path. - -## Implementation - -### `auths doctor` — backup check - -**File:** `crates/auths-sdk/src/workflows/diagnostics.rs` - -Add `check_pre_rotation_backup()` as a private method on `DiagnosticsWorkflow`: - -```rust -fn check_pre_rotation_backup(&self, checks: &mut Vec) -> Result<(), DiagnosticError> { - let home = auths_core::paths::auths_home() - .map_err(|e| DiagnosticError::ExecutionFailed(e.to_string()))?; - - // Check if a backup marker file exists (e.g., ~/.auths/.backup_exported) - let backup_marker = home.join(".backup_exported"); - if home.exists() && !backup_marker.exists() { - checks.push(CheckResult { - name: "pre_rotation_backup".into(), - passed: true, // informational, not a failure - message: Some( - "You have a pre-rotation key but haven't backed it up. Run `auths key backup` to export it.".into() - ), - config_issues: vec![], - }); - } - Ok(()) -} -``` - -This is informational only — `passed: true` means it won't fail the doctor run, but the message will be displayed. - -### `auths key backup` — new command - -#### SDK — export logic - -**File:** `crates/auths-sdk/src/workflows/key_backup.rs` (new file) - -```rust -pub struct KeyBackupResult { - pub key_material: Vec, // the exported pre-rotation private key - pub key_hash: String, // the pre-rotation commitment hash -} - -pub struct KeyBackupWorkflow; - -impl KeyBackupWorkflow { - /// Export the pre-rotation key material. Marks backup as completed. - pub fn export() -> Result { - let home = auths_core::paths::auths_home()?; - // Read the next_key_hash from state.json (the pre-rotation commitment) - // Export the pre-rotation private key from the keychain - // Touch ~/.auths/.backup_exported as marker - todo!() - } - - /// Check if backup has been performed. - pub fn is_backed_up() -> Result { - let home = auths_core::paths::auths_home()?; - Ok(home.join(".backup_exported").exists()) - } -} -``` - -Register the module in `crates/auths-sdk/src/workflows/mod.rs`. - -#### CLI — thin wrapper - -**File:** `crates/auths-cli/src/commands/mod.rs` — register new subcommand -**File:** `crates/auths-cli/src/commands/key/backup.rs` — new file - -```rust -pub fn handle_key_backup() -> Result<()> { - let out = Output::new(); - out.println("⚠ This exports your pre-rotation recovery key."); - out.println(" Store it securely (password manager, hardware token, etc.)."); - out.println(" Anyone with this key can recover your identity after key rotation.\n"); - - let result = KeyBackupWorkflow::export()?; - // Display key material to user - out.println(&format!("Pre-rotation key hash: {}", result.key_hash)); - // ... display key_material in a safe format - Ok(()) -} -``` - -### Post-rotation prompt - -**File:** wherever `auths key rotate` is handled — after successful rotation, print: -``` -You just rotated keys. Your new pre-rotation commitment is set. -Run `auths key backup` to save your recovery key. -``` - -## Non-goals - -- Don't require backup during `auths init` -- Don't block any workflow on missing backup -- Don't make the user think about key management before they've signed their first commit diff --git a/docs/plans/fe_be/fe_be_cohesion_plan.md b/docs/plans/fe_be/fe_be_cohesion_plan.md new file mode 100644 index 00000000..5cb0b699 --- /dev/null +++ b/docs/plans/fe_be/fe_be_cohesion_plan.md @@ -0,0 +1,773 @@ +# Frontend-Backend Integration Audit + +**Date:** 2026-03-16 +**Repos audited:** +- `auths-site` — Next.js 16 frontend (`apps/web/src/`) +- `auths-cloud/crates/auths-registry-server` — Axum registry API +- `auths-cloud/crates/auths-auth-server` — Axum auth API +- `auths-cloud/crates/auths-cache` — Redis/Git tiered cache +- `auths` — CLI and core Rust crates + +**Scope:** Full-stack trace of all API endpoints the frontend calls, covering contract alignment, auth piping, SQL schema, error contracts, fixture drift, and naming consistency. + +--- + +## Epic 1: Backend Identity Response Shape Does Not Match Frontend Contract + +Summary: The backend `IdentityResponse::Active` nests key data under `key_state.current_keys` (array of KERI strings), but the frontend expects top-level `public_keys` (array of `{ key_id, algorithm, public_key_hex, created_at }` objects). The frontend's `fetchIdentity()` papers over this with a transformation that fabricates `key_id`, hardcodes `algorithm`, and fabricates `created_at`. Meanwhile `fetchBatchIdentities()` skips the transformation entirely, so `public_keys` is `undefined` on batch-fetched identities. Since there's no backwards compatibility concern, fix the backend to return the shape the frontend needs. + +### Task 1: Flatten `public_keys` into `IdentityResponse::Active` on the backend + +**Repo:** `auths-cloud` +**File:** `crates/auths-registry-server/src/routes/identity.rs` +**Lines:** ~39–91 + +**Problem:** +Backend returns `key_state: { current_keys: ["Dbase64url..."], next_commitment, last_event_said, is_abandoned }`. Frontend wants `public_keys: [{ key_id, algorithm, public_key_hex, created_at }]` and `is_abandoned` at the top level. The frontend fabricates `key_id` as `"key-{index}"`, hardcodes `algorithm` as `"Ed25519"`, and sets `created_at` to `new Date().toISOString()` (a lie). The `fetchBatchIdentities()` path doesn't do this transformation at all, so batch-fetched identities have no `public_keys`. + +**Current code:** +```rust +#[serde(tag = "status")] +pub enum IdentityResponse { + #[serde(rename = "active")] + Active { + did: String, + sequence: u64, + key_state: KeyStateResponse, + platform_claims: Vec, + artifacts: Vec, + trust_tier: Option, + trust_score: Option, + }, + #[serde(rename = "unclaimed")] + Unclaimed { did: String }, +} +``` + +**Fixed code:** +```rust +#[derive(Debug, Serialize, JsonSchema)] +pub struct PublicKeyResponse { + pub key_id: String, + pub algorithm: String, + pub public_key_hex: String, + pub created_at: String, +} + +#[serde(tag = "status")] +pub enum IdentityResponse { + #[serde(rename = "active")] + Active { + did: String, + sequence: u64, + public_keys: Vec, + is_abandoned: bool, + platform_claims: Vec, + artifacts: Vec, + trust_tier: Option, + trust_score: Option, + }, + #[serde(rename = "unclaimed")] + Unclaimed { did: String }, +} +``` + +Construct `public_keys` in the handler by mapping `key_state.current_keys` with their index as `key_id`, the algorithm from KERI key prefix (`"D"` → Ed25519), and `created_at` from `identity_state.updated_at`. Keep `KeyStateResponse` as an internal type if needed for KEL operations, but don't expose it in the identity response. + +**Why:** Eliminates the entire frontend transformation layer. Both `fetchIdentity()` and `fetchBatchIdentities()` become simple pass-throughs. Fixes the batch identity bug and removes fabricated data. + +--- + +### Task 2: Remove frontend `key_state → public_keys` transformation + +**Repo:** `auths-site` +**File:** `apps/web/src/lib/api/registry.ts` +**Lines:** ~350–411 + +**Problem:** +After Task 1, the backend returns `public_keys` directly. The manual transformation in `fetchIdentity()` (lines 370–411) is now dead code that would double-wrap the data. + +**Current code:** +```typescript +const keyState = (data.key_state ?? {}) as Record; +const currentKeys = Array.isArray(keyState.current_keys) + ? (keyState.current_keys as string[]) + : []; +const public_keys = currentKeys.map((key, i) => ({ + key_id: `key-${i}`, + algorithm: 'Ed25519', + public_key_hex: key, + created_at: new Date().toISOString(), +})); +// ... 40 lines of manual reshaping +``` + +**Fixed code:** +```typescript +export async function fetchIdentity( + did: string, + signal?: AbortSignal, +): Promise { + if (USE_FIXTURES) { + const fixture = await resolveIdentityFixture(did); + if (fixture) return fixture; + } + return registryFetch( + `/v1/identities/${encodeURIComponent(did)}`, + undefined, + signal, + ); +} +``` + +Simple pass-through — the backend now returns the exact shape the frontend needs. + +**Why:** ~40 lines of fragile transformation code replaced by a direct cast. No more fabricated fields. + +--- + +## Epic 2: Invite-Accepted Members Get Zero Capabilities + +Summary: When a user accepts an org invite, the `accept_invite` handler inserts an `org_members` row with `capabilities = '[]'` (empty JSON array), regardless of role. This means invite-accepted members — even admins — cannot perform any operations until an admin manually updates their capabilities. + +### Task 1: Assign default capabilities based on role on invite acceptance + +**Repo:** `auths-cloud` +**File:** `crates/auths-registry-server/src/routes/invite.rs` +**Lines:** ~222–232 + +**Problem:** +`accept_invite` sets `capabilities` to `'[]'` for all members. Compare with `create_org` which gives the founder `["sign_commit", "sign_release", "manage_members", "rotate_keys"]`. An admin who joins via invite gets zero capabilities. + +**Current code:** +```rust +sqlx::query( + "INSERT INTO org_members (org_did, member_did, role, capabilities, log_sequence, granted_at) \ + VALUES ($1, $2, $3, '[]', 0, NOW()) \ + ON CONFLICT (org_did, member_did) DO NOTHING", +) +.bind(&org_did) +.bind(&identity.did) +.bind(&role) +.execute(pool) +.await +``` + +**Fixed code:** +```rust +let capabilities = match role.as_str() { + "admin" => serde_json::json!(["sign_commit", "sign_release", "manage_members", "rotate_keys"]), + "member" => serde_json::json!(["sign_commit", "sign_release"]), + _ => serde_json::json!([]), +}; + +sqlx::query( + "INSERT INTO org_members (org_did, member_did, role, capabilities, log_sequence, granted_at) \ + VALUES ($1, $2, $3, $4, 0, NOW()) \ + ON CONFLICT (org_did, member_did) DO UPDATE SET role = $3, capabilities = $4, revoked_at = NULL", +) +.bind(&org_did) +.bind(&identity.did) +.bind(&role) +.bind(&capabilities) +.execute(pool) +.await +``` + +**Why:** Without this, every member who joins via invite link has no signing or management capabilities. The `ON CONFLICT DO UPDATE` also fixes the silent-drop issue (see Epic 4). + +--- + +## Epic 3: Log Sequence Race Condition in Org Creation + +Summary: `create_org` calculates the next `log_sequence` via `SELECT COALESCE(MAX(log_sequence), -1) + 1` without any locking. Concurrent org creations get duplicate sequences. The `ON CONFLICT (log_sequence) DO NOTHING` silently drops the second org's activity log entry. + +### Task 1: Use a sequence or advisory lock for log_sequence generation in create_org + +**Repo:** `auths-cloud` +**File:** `crates/auths-registry-server/src/routes/org.rs` +**Lines:** ~390–410 + +**Problem:** +Two concurrent `POST /v1/orgs` requests both read `MAX(log_sequence)` as N, both try to insert N+1, one succeeds and the other's `ON CONFLICT DO NOTHING` silently drops the log entry. The second org's creation and member addition are never recorded in the activity feed. + +**Current code:** +```rust +let create_seq: i64 = sqlx::query_scalar::<_, i64>( + "SELECT COALESCE(MAX(log_sequence), -1) + 1 FROM log_entries", +) +.fetch_one(pool) +.await +.map_err(|e| ApiError::StorageError(format!("log sequence query failed: {e}")))?; + +sqlx::query( + "INSERT INTO log_entries \ + (log_sequence, entry_type, actor_did, summary, metadata, \ + merkle_included, is_genesis_phase, occurred_at) \ + VALUES ($1, 'org_create', $2, $3, $4, TRUE, FALSE, NOW()) \ + ON CONFLICT (log_sequence) DO NOTHING", +) +``` + +**Fixed code:** +```rust +let create_seq: i64 = sqlx::query_scalar::<_, i64>( + "INSERT INTO log_entries \ + (log_sequence, entry_type, actor_did, summary, metadata, \ + merkle_included, is_genesis_phase, occurred_at) \ + VALUES (\ + (SELECT COALESCE(MAX(log_sequence), -1) + 1 FROM log_entries), \ + 'org_create', $1, $2, $3, TRUE, FALSE, NOW()\ + ) \ + RETURNING log_sequence", +) +.bind(&org_did) +.bind(format!("Organization created: {}", &body.display_name)) +.bind(serde_json::json!({"display_name": &body.display_name})) +.fetch_one(pool) +.await +.map_err(|e| ApiError::StorageError(format!("org create log entry failed: {e}")))?; +``` + +Alternatively, use a Postgres `SEQUENCE` (like the fallback path in `register_identity` already uses `genesis_log_seq`). + +**Why:** Without this fix, concurrent org creations silently lose activity log entries — the transparency feed is incomplete. + +--- + +### Task 2: Same race in member_seq calculation + +**Repo:** `auths-cloud` +**File:** `crates/auths-registry-server/src/routes/org.rs` +**Lines:** ~413, ~429–442 + +**Problem:** +`member_seq = create_seq + 1` assumes no other entry was inserted between the two writes. If the first entry was dropped due to conflict (pre-fix) or another concurrent writer grabbed `create_seq + 1`, the member addition log entry is also lost. + +**Current code:** +```rust +let member_seq = create_seq + 1; + +sqlx::query( + "INSERT INTO log_entries \ + (log_sequence, entry_type, ...) \ + VALUES ($1, 'org_add_member', ...) \ + ON CONFLICT (log_sequence) DO NOTHING", +) +.bind(member_seq) +``` + +**Fixed code:** +Use the same atomic INSERT...RETURNING pattern as Task 1, or wrap both inserts in a transaction. + +**Why:** Same race condition as Task 1, affecting the member addition log entry. + +--- + +## Epic 4: org_members ON CONFLICT DO NOTHING Silently Drops Updates + +Summary: Both `create_org` and `accept_invite` use `ON CONFLICT (org_did, member_did) DO NOTHING` when inserting members. If the member row already exists (e.g., re-invited after revocation, or partial retry), the update is silently lost. + +### Task 1: Change ON CONFLICT to DO UPDATE in create_org admin insert + +**Repo:** `auths-cloud` +**File:** `crates/auths-registry-server/src/routes/org.rs` +**Lines:** ~416–426 + +**Problem:** +If an org is partially created (identity_state row exists from a previous attempt), the admin member insert silently does nothing on retry. The admin has no capabilities. + +**Current code:** +```rust +sqlx::query( + "INSERT INTO org_members (org_did, member_did, role, capabilities, log_sequence, granted_at) \ + VALUES ($1, $2, 'admin', $3, $4, NOW()) \ + ON CONFLICT (org_did, member_did) DO NOTHING", +) +``` + +**Fixed code:** +```rust +sqlx::query( + "INSERT INTO org_members (org_did, member_did, role, capabilities, log_sequence, granted_at) \ + VALUES ($1, $2, 'admin', $3, $4, NOW()) \ + ON CONFLICT (org_did, member_did) DO UPDATE SET \ + role = EXCLUDED.role, \ + capabilities = EXCLUDED.capabilities, \ + revoked_at = NULL", +) +``` + +**Why:** Without `DO UPDATE`, a partially-failed org creation followed by a retry leaves the admin with stale or missing capabilities. + +--- + +### Task 2: Change ON CONFLICT to DO UPDATE in accept_invite + +**Repo:** `auths-cloud` +**File:** `crates/auths-registry-server/src/routes/invite.rs` +**Lines:** ~222–232 + +**Problem:** +If an admin revokes a member and then re-invites them, the `accept_invite` handler's `DO NOTHING` means the member row still has `revoked_at` set — they appear revoked despite accepting a new invite. + +**Current code:** +```rust +sqlx::query( + "INSERT INTO org_members (org_did, member_did, role, capabilities, log_sequence, granted_at) \ + VALUES ($1, $2, $3, '[]', 0, NOW()) \ + ON CONFLICT (org_did, member_did) DO NOTHING", +) +``` + +**Fixed code:** +```rust +sqlx::query( + "INSERT INTO org_members (org_did, member_did, role, capabilities, log_sequence, granted_at) \ + VALUES ($1, $2, $3, $4, 0, NOW()) \ + ON CONFLICT (org_did, member_did) DO UPDATE SET \ + role = EXCLUDED.role, \ + capabilities = EXCLUDED.capabilities, \ + revoked_at = NULL, \ + granted_at = EXCLUDED.granted_at", +) +``` + +**Why:** Without `DO UPDATE`, re-invited members stay revoked and new invite data is silently dropped. + +--- + +## Epic 5: Invite Not Found Returns IDENTITY_NOT_FOUND Error Code + +Summary: The `get_invite` and `accept_invite` handlers return `ApiError::IdentityNotFound("invite not found")` when an invite code is invalid, producing HTTP 404 with error code `IDENTITY_NOT_FOUND`. The error code is semantically wrong. + +### Task 1: Add InviteNotFound error variant or use generic NotFound + +**Repo:** `auths-cloud` +**File:** `crates/auths-registry-server/src/routes/invite.rs` +**Lines:** 127, 204 + +**Problem:** +Frontend checks `error.status === 404` (correct), but the error code `IDENTITY_NOT_FOUND` is confusing for invite endpoints — it suggests an identity issue, not an invalid invite code. + +**Current code:** +```rust +return Err(ApiError::IdentityNotFound("invite not found".into())); +``` + +**Fixed code:** +```rust +// In error.rs, add: +#[error("invite not found: {0}")] +InviteNotFound(String), +// Map to: status 404, code "INVITE_NOT_FOUND", title "Invite Not Found" + +// In invite.rs: +return Err(ApiError::InviteNotFound(code)); +``` + +**Why:** Error code `IDENTITY_NOT_FOUND` on an invite endpoint is misleading in logs and for API consumers who switch on `code`. No backwards compat concern — just add the correct variant. + +--- + +## Epic 6: Fixture Drift — Identity Shape Does Not Match Real API + +Summary: Fixtures return `ActiveIdentity` with `key_id: "key-laptop-001"` and `created_at: "2024-12-01T..."`, but the real backend (after Epic 1 fix) will return `key_id: "key-0"` and `created_at` from `identity_state.updated_at`. Fixtures should match the real API shape exactly so dev-mode and production behave identically. + +### Task 1: Align fixture identity shapes to match the real backend response + +**Repo:** `auths-site` +**File:** `apps/web/src/lib/api/fixtures.ts` +**Lines:** ~35–80 (SOVEREIGN_IDENTITY and other persona definitions) + +**Problem:** +Fixture `key_id` values like `"key-laptop-001"` don't match the backend's `"key-0"` format. Any component that renders or keys on `key_id` behaves differently in dev vs prod. + +**Current code (fixture):** +```typescript +const SOVEREIGN_IDENTITY: ActiveIdentity = { + status: 'active', + did: SOVEREIGN_DID, + public_keys: [ + { + key_id: 'key-laptop-001', + algorithm: 'Ed25519', + public_key_hex: 'aB3d...', + created_at: '2024-12-01T10:00:00Z', + }, + // ... + ], + // ... +}; +``` + +**Fixed code:** +```typescript +const SOVEREIGN_IDENTITY: ActiveIdentity = { + status: 'active', + did: SOVEREIGN_DID, + public_keys: [ + { + key_id: 'key-0', + algorithm: 'Ed25519', + public_key_hex: 'DaB3d...', // KERI-encoded key as backend returns + created_at: '2025-01-15T10:00:00Z', + }, + // ... + ], + // ... +}; +``` + +Apply the same `key_id` format (`"key-0"`, `"key-1"`, `"key-2"`) and KERI-encoded `public_key_hex` values across all 6 persona fixtures. + +**Why:** Fixtures and real API should return identical shapes. No backwards compat concern — just make fixtures match reality. + +--- + +## Epic 7: Naming Inconsistency — display_name vs name + +Summary: The org name field is called `display_name` in most endpoints but `name` in `GET /v1/orgs/{did}/status`. Frontend types mirror this split. While not a runtime bug, this makes the API confusing and error-prone for consumers. + +### Task 1: Rename OrgStatusResponse.name to display_name (backend) + +**Repo:** `auths-cloud` +**File:** `crates/auths-registry-server/src/routes/org.rs` +**Lines:** ~454–460 + +**Problem:** +`OrgStatusResponse` uses `name` while `create_org` response, `get_invite` response, and the DB column all use `display_name`. + +**Current code:** +```rust +pub struct OrgStatusResponse { + pub org_did: String, + pub name: String, + pub member_count: i64, + pub pending_invites: i64, + pub signing_policy_enabled: bool, +} +``` + +**Fixed code:** +```rust +pub struct OrgStatusResponse { + pub org_did: String, + pub display_name: String, + pub member_count: i64, + pub pending_invites: i64, + pub signing_policy_enabled: bool, +} +``` + +Also update the handler (line 519) to set `display_name: name` instead of `name`. + +**Why:** Inconsistent field naming across related endpoints. API consumers must remember that `POST /v1/orgs` returns `display_name` but `GET /v1/orgs/{did}/status` returns `name` for the same concept. + +--- + +### Task 2: Rename OrgStatusResponse.name to display_name (frontend) + +**Repo:** `auths-site` +**File:** `apps/web/src/lib/api/registry.ts` +**Lines:** ~901–907 + +**Problem:** +Frontend type mirrors the backend's inconsistency. + +**Current code:** +```typescript +export interface OrgStatusResponse { + org_did: string; + name: string; + member_count: number; + pending_invites: number; + signing_policy_enabled: boolean; +} +``` + +**Fixed code:** +```typescript +export interface OrgStatusResponse { + org_did: string; + display_name: string; + member_count: number; + pending_invites: number; + signing_policy_enabled: boolean; +} +``` + +**Why:** Must stay in sync with the backend rename from Task 1. + +--- + +--- + +## Epic 8: Auth Server vs Registry Server Error Response Shape Divergence + +Summary: The auth server (`auths-auth-server`) returns errors as `{ error: string, code: string }`, while the registry server (`auths-registry-server`) returns RFC 9457 Problem Details with `{ type: string, title: string, status: number, detail: string, code: string }`. The frontend error parser handles both via a fallback chain, but two different error contracts across the same product's APIs is a maintenance risk. + +### Task 1: Align auth server error response to RFC 9457 + +**Repo:** `auths-cloud` +**File:** `crates/auths-auth-server/src/error.rs` +**Lines:** entire file + +**Problem:** +Auth server's `ErrorResponse` has `{ error, code }` while registry server's has `{ type, title, status, detail, code }`. The frontend must handle both shapes in its error parsing logic (lines 237–252 of `registry.ts`). The fallback chain works today but is fragile — any change to field order or naming could break one path. + +**Current code:** +```rust +pub struct ErrorResponse { + pub error: String, + pub code: String, +} +``` + +**Fixed code:** +```rust +pub struct ErrorResponse { + #[serde(rename = "type")] + pub error_type: String, + pub title: String, + pub status: u16, + pub detail: String, + pub code: String, +} +``` + +And update `IntoResponse` to construct the full RFC 9457 body: + +```rust +let error_type = format!("urn:auths:error:{}", code.to_lowercase().replace('_', "-")); +let body = ErrorResponse { + error_type, + title: title.to_string(), + status: status.as_u16(), + detail: error_message, + code: code.to_string(), +}; +``` + +**Why:** One error contract across both APIs means the frontend can rely on a single parsing path. + +--- + +### Task 2: Simplify frontend error parser to use only RFC 9457 fields + +**Repo:** `auths-site` +**File:** `apps/web/src/lib/api/registry.ts` +**Lines:** ~237–252 (in `registryFetch`), ~958–965 (in `registryFetchAuth`), ~1006–1013 (in `authFetch`) + +**Problem:** +After Task 1, both servers return RFC 9457 `{ type, title, status, detail, code }`. The frontend's fallback chain (`body.detail` → `body.error` → `body.message`) is now unnecessary — `body.detail` is always present. The `body.error` and `body.message` fallbacks are dead code. + +**Current code:** +```typescript +if (typeof body.detail === 'string') { + message = body.detail; + detail = body.detail; +} else if (typeof body.error === 'string') { + message = body.error; +} else if (typeof body.message === 'string') { + message = body.message; +} +if (typeof body.code === 'string') code = body.code; +if (typeof body.type === 'string') errorType = body.type; +``` + +**Fixed code:** +```typescript +message = body.detail ?? res.statusText; +detail = body.detail; +code = body.code; +errorType = body.type; +``` + +**Why:** No backwards compat concern — both servers now return the same shape. Dead fallback paths are confusion risk. + +--- + +## Epic 9: Signing Policy Check Only Tests Row Existence + +Summary: `get_org_status` determines `signing_policy_enabled` via `SELECT EXISTS(SELECT 1 FROM org_policies WHERE org_did = $1)`. If a policy row exists but its `policy_expr` has `require_signing: false`, the status endpoint still reports `signing_policy_enabled: true`. + +### Task 1: Check policy_expr content, not just row existence + +**Repo:** `auths-cloud` +**File:** `crates/auths-registry-server/src/routes/org.rs` +**Lines:** ~509–515 + +**Problem:** +An org that created a policy then set `require_signing: false` still has a row in `org_policies`. The `EXISTS` check reports signing as enabled when it's actually disabled. + +**Current code:** +```rust +let signing_policy_enabled: bool = sqlx::query_scalar::<_, bool>( + "SELECT EXISTS(SELECT 1 FROM org_policies WHERE org_did = $1)", +) +.bind(&org_did) +.fetch_one(pool) +.await +.map_err(|e| ApiError::StorageError(format!("policy check failed: {e}")))?; +``` + +**Fixed code:** +```rust +let signing_policy_enabled: bool = sqlx::query_scalar::<_, bool>( + "SELECT COALESCE(\ + (SELECT (policy_expr->>'require_signing')::boolean \ + FROM org_policies WHERE org_did = $1), \ + false\ + )", +) +.bind(&org_did) +.fetch_one(pool) +.await +.map_err(|e| ApiError::StorageError(format!("policy check failed: {e}")))?; +``` + +**Why:** Without this, the dashboard shows "Signing policy: On" even when the org explicitly disabled signing. Misleading for org admins. + +--- + +## Epic 10: Rename `did_prefix` Column to `did` Across All Tables + +Summary: The column `did_prefix` stores the complete DID string (`did:keri:E...`), not just the KERI prefix (`E...`). The name is misleading and invites bugs where developers strip the `did:keri:` scheme before querying, getting zero rows. Since there are zero users, rename the column outright instead of documenting the mismatch. + +### Task 1: Rename `did_prefix` → `did` in existing CREATE TABLE migrations + +**Repo:** `auths-cloud` +**Files:** +- `crates/auths-registry-server/migrations/004_public_registry.sql` +- `crates/auths-registry-server/migrations/005_platform_claims_unique.sql` +- `crates/auths-registry-server/migrations/006_identity_state.sql` +- `crates/auths-registry-server/migrations/007_kel_events.sql` +- `crates/auths-registry-server/migrations/014_backfill_log_entries.sql` +- `crates/auths-registry-server/migrations/018_genesis_log_fallback.sql` + +**Problem:** +`did_prefix` is used as a column name in 4 tables: `identity_state` (PK), `public_registrations`, `platform_claims`, `kel_events`, plus referenced in indexes and backfill migrations. The name implies it stores just the prefix portion, but it stores the full DID. Database will be rebuilt from scratch so just fix the DDL directly. + +**Current code (004_public_registry.sql):** +```sql +CREATE TABLE public_registrations ( + ... + did_prefix TEXT NOT NULL, + ... +); +CREATE TABLE platform_claims ( + ... + did_prefix TEXT NOT NULL, + ... +); +CREATE INDEX idx_platform_claims_did ON platform_claims (did_prefix); +``` + +**Fixed code:** +```sql +CREATE TABLE public_registrations ( + ... + did TEXT NOT NULL, + ... +); +CREATE TABLE platform_claims ( + ... + did TEXT NOT NULL, + ... +); +CREATE INDEX idx_platform_claims_did ON platform_claims (did); +``` + +Apply the same `did_prefix` → `did` rename in `005_platform_claims_unique.sql` (unique index), `006_identity_state.sql` (PK column), `007_kel_events.sql` (column + composite PK), `014_backfill_log_entries.sql` (SELECT/INSERT references), and `018_genesis_log_fallback.sql` (SELECT/INSERT references). + +**Why:** Clean column name from day one. No migration needed since the DB is rebuilt from scratch. + +--- + +### Task 2: Update all Rust SQL queries referencing `did_prefix` + +**Repo:** `auths-cloud` +**File:** Multiple files in `crates/auths-registry-server/src/` (~40 references) + +**Problem:** +Every SQL string literal that references `did_prefix` must be updated to `did`. Affected files: + +| File | Approx references | +|------|-------------------| +| `routes/identity.rs` | 3 | +| `routes/org.rs` | 5 | +| `routes/invite.rs` | 1 | +| `routes/pubkeys.rs` | 2 | +| `middleware/identity_auth.rs` | 2 | +| `services/registration.rs` | 4 | +| `services/proof_verification.rs` | 2 | +| `sequencer/mod.rs` | 6 | +| `sequencer/validation.rs` | 4 | +| `sequencer/auto_provision.rs` | 2 | + +**Current code (example from identity_auth.rs):** +```rust +"SELECT current_keys FROM identity_state WHERE did_prefix = $1 AND is_abandoned = FALSE" +``` + +**Fixed code:** +```rust +"SELECT current_keys FROM identity_state WHERE did = $1 AND is_abandoned = FALSE" +``` + +Apply the same `did_prefix` → `did` rename in every SQL string literal and Rust variable name across all listed files. + +**Why:** SQL queries must match the renamed column or they will fail at runtime. + +--- + +### Task 3: Rename `did_prefix` Rust variables to `did` where they hold full DIDs + +**Repo:** `auths-cloud` +**File:** `crates/auths-registry-server/src/services/registration.rs` and others + +**Problem:** +Rust code uses `did_prefix` as a variable name for the full DID string (e.g., `let did_prefix = format!("did:keri:{}", prefix);`). After the column rename, keeping the variable name `did_prefix` re-introduces the same confusion at the code level. + +**Current code:** +```rust +let did_prefix = format!("did:keri:{}", prefix); +sqlx::query("INSERT INTO public_registrations (did_prefix) VALUES ($1)") + .bind(&did_prefix) +``` + +**Fixed code:** +```rust +let did = format!("did:keri:{}", prefix); +sqlx::query("INSERT INTO public_registrations (did) VALUES ($1)") + .bind(&did) +``` + +**Why:** Variable names should match column names to avoid the same semantic confusion the column rename is fixing. + +--- + +## Appendix: Items Verified as Correct + +These integration points were audited and found to be working correctly: + +1. **Error parsing fallback chain**: The frontend reads `body.detail` → `body.error` → `body.message` in that order. This covers both registry (`detail`) and auth (`error`) server error shapes. + +2. **Error type field**: Registry server uses `#[serde(rename = "type")]` on `error_type`, so it serializes as `"type"` in JSON. Frontend reads `body.type`. Match confirmed. + +3. **Auth challenge → verify flow**: Frontend maps `raw.challenge` → `nonce`, constructs CLI command with `--nonce`, auth server verifies against stored nonce. Signature payload reconstruction uses the same canonical JSON. Flow is correct. + +4. **Session token → registry middleware**: Frontend stores UUID token from verify response, sends as `Bearer {uuid}`, registry middleware parses as UUID, validates against auth server's `/auth/status/{uuid}`. Works correctly. + +5. **Anonymous tier promotion**: Middleware promotes DB tier `"anonymous"` to `"individual"` for signed requests (line 242 of identity_auth.rs), preventing the "same string, different semantics" collision between "unauthenticated" and "unpaid" anonymous. + +6. **OrgPolicyResponse for public org page**: `fetchOrgPolicy` uses unauthenticated `registryFetch` and the backend's `get_policy` is indeed a public endpoint (no auth check). No bug. + +7. **Activity feed types**: Frontend `FeedEntry` fields (`log_sequence`, `entry_type`, `actor_did`, `summary`, `metadata`, `occurred_at`, `merkle_included`, `is_genesis_phase`) match backend `log_entries` columns exactly. + +8. **Artifact query/response**: Frontend `ArtifactEntry` fields match backend `ArtifactEntryResponse` fields. + +9. **Namespace types**: Frontend `NamespaceInfo` and `NamespaceBrowseResponse` match backend response shapes. + +10. **Network stats**: Frontend `NetworkStats` fields match backend stats endpoint. diff --git a/docs/plans/fe_be/fe_be_piping_prompt.md b/docs/plans/fe_be/fe_be_piping_prompt.md new file mode 100644 index 00000000..540a34a3 --- /dev/null +++ b/docs/plans/fe_be/fe_be_piping_prompt.md @@ -0,0 +1,94 @@ +# Frontend-Backend Integration Audit Prompt + +## Prompt + +You are a systems integration auditor. Your job is to trace data flow across frontend and backend boundaries, find mismatches, and produce a structured remediation plan. + +### What to analyze + +Given a feature or API surface area I point you to, do a full-stack trace covering: + +1. **Contract alignment** — Do the frontend's request payloads, headers, and query params match what the backend handler actually deserializes? Check struct/interface field names, types, optionality, and casing. + +2. **Response shape alignment** — Do the backend's response bodies match the frontend's TypeScript types? Check every field the frontend reads (including in `.then()` chains, destructuring, and template expressions). + +3. **Auth/middleware piping** — For each endpoint: + - What middleware runs before the handler? + - What does the handler extract from request extensions, headers, or state? + - Is every extractor guaranteed to be populated by the middleware chain, or are there conditional paths that skip insertion? + - Can a valid user request reach the handler without the required extensions? + +4. **Semantic collisions in shared values** — Are there values (enum variants, tier names, status strings) that mean different things in different layers? For example, a database column storing `"anonymous"` to mean "unpaid/no-platform-claim" vs middleware logic using `"anonymous"` to mean "unauthenticated" — same string, completely different semantics. Trace each value from where it's written (DB insert, auto-provisioning) through where it's read (middleware, handler guards) and flag any place the same value carries different meaning. + +5. **SQL schema vs INSERT alignment** — For every INSERT statement in the codebase, verify that all NOT NULL columns without defaults are included. Also check `ON CONFLICT` clauses: `DO NOTHING` silently drops data when a row exists — if the intent is to update a field (like `display_name`), it must be `DO UPDATE SET`. + +6. **Error contract** — Does the backend's error response shape (`{ error, detail, code, type }` etc.) match what the frontend's error handling parses? Are status codes correct (401 vs 403 vs 422 vs 500)? + +7. **ID format consistency** — When a value like a DID is stored in the database, is it stored with or without its scheme prefix (e.g., `did:keri:E...` vs `E...`)? Verify that every query binding matches the storage convention. A common bug: one layer strips a prefix before querying, but the DB stores the full value, so the lookup silently returns no rows. + +8. **Fixture/mock drift** — Do hardcoded fixtures or mock responses in the frontend match the real backend response shape? Stale fixtures mask type errors at dev time. + +9. **Naming consistency** — Are domain terms consistent across the stack? (e.g., `name` vs `display_name` vs `org_name` for the same concept) + +### What to produce + +Organize findings as **epics**, each containing **subtasks**. Use this exact format: + +```markdown +## Epic: [Short title describing the integration issue category] + +Summary: [1-2 sentences on what's wrong and why it matters] + +### Task 1: [Specific fix description] + +**Repo:** `` +**File:** `` +**Lines:** `` + +**Problem:** +<1-2 sentences> + +**Current code:** +``` +// the problematic snippet +``` + +**Fixed code:** +``` +// the corrected snippet +``` + +**Why:** <1 sentence on what breaks without this fix> + +--- + +### Task 2: ... +``` + +### Rules + +- Every task must specify the repo name and full file path. These are separate repos, not monorepo packages. +- Include the actual code snippet (current + fixed). Do not describe changes abstractly. +- Group related fixes into the same epic (e.g., "rename `org_name` to `display_name`" touches frontend types, fixture data, and component rendering — that's one epic with multiple tasks). +- Order epics by severity: runtime errors > silent data bugs > naming inconsistencies > style. +- For middleware/piping issues, include a short diagram of the middleware chain and mark where the break occurs. +- Do not suggest adding features, refactoring for aesthetics, or improving code style. Only flag things that are broken, will break, or silently produce wrong results. +- If a fixture or mock exists, always check it against the real backend response. Drift here means the dev-mode app works but production doesn't. + +### How I'll use this + +I will give you: +- A feature name or endpoint path (e.g., `POST /v1/orgs`, or "the onboarding wizard") +- The repos involved (e.g., "frontend is in `auths-site`, backend is in `auths-cloud`") +- Optionally, a symptom (e.g., "getting 422 on org creation") + +You then read the relevant source files across both repos, trace the full request-response cycle, and produce the epic/task breakdown above. + +--- + +## Example usage from a previous failure + +> Audit the `POST /v1/orgs` endpoint. +> Frontend: `auths-site/apps/web/src/lib/api/registry.ts` and components under `auths-site/apps/web/src/app/try/org/`. +> Backend: `auths-cloud/crates/auths-registry-server/src/routes/org.rs` and middleware in `auths-cloud/crates/auths-registry-server/src/middleware/`. +> Symptom: 422 Unprocessable Entity when creating an org from the wizard. diff --git a/docs/plans/launch_cleaning.md b/docs/plans/launch_cleaning.md deleted file mode 100644 index 352809c8..00000000 --- a/docs/plans/launch_cleaning.md +++ /dev/null @@ -1,463 +0,0 @@ -# Auths Codebase Review -**Version:** `0.0.1-rc.13` · **Lines of code:** ~121K · **Crates:** 22 -**Date:** 2026-03-11 - ---- - -## Section 1: Code Quality Review - -### 1.1 Architecture & Layering - -**Verdict: Mostly sound with notable leakage.** - -The SDK-first design is clearly intentional and mostly respected. `auths-sdk/src/workflows/` is where the real logic lives — signing, rotation, provisioning, audit, etc. — and the CLI delegates to those workflows through a dependency-injected `AuthsContext`. The `ExecutableCommand` trait is implemented consistently across every top-level command (including `WhoamiCommand`, `WitnessCommand`, and the newer commands like `RegistryOverrides`). Port traits in `auths-core/src/ports/` and `auths-sdk/src/ports/` are well-designed and free of implementation leakage. - -**Specific issues:** - -- **`Utc::now()` called directly throughout CLI command handlers** (not just entry points). Examples: `commands/id/identity.rs:9787`, `commands/device/pair/common.rs:5774`, `commands/emergency.rs:8193,8341,8355`, `commands/org.rs:14352,14416,14540,14621,14677,14687`. The `ClockProvider` port exists, and there is even a workspace lint banning `Utc::now()` in the SDK layers, but the CLI commands are exempt from this lint and call `Utc::now()` directly. This makes those code paths untestable without real time passing. - -- **`commands/scim.rs:16435` spawns `auths-scim-server` as a child process** without any path validation. The binary name is hardcoded as a bare string. If `auths-scim-server` is not on `PATH`, the error message "Is it installed?" is the only guidance — no path, no suggestion to install via `cargo install`. This is a presentation-layer concern that is fine to keep in the CLI, but the spawn has no timeout and `child.wait()` will block indefinitely. - -- **Business logic in CLI commands that should be in the SDK:** `commands/id/migrate.rs` (~1300 lines) contains substantial identity migration orchestration. `commands/device/pair/common.rs` builds pairing state machines inline. These would benefit from extraction into `auths-sdk/src/workflows/`. - ---- - -### 1.2 Type Safety - -**Verdict: Good foundational work; a gap in the domain boundary.** - -Newtypes exist for the right things: -- `DeviceDID(pub String)` — `auths-verifier/src/types.rs:110819` -- `IdentityDID(pub String)` — `auths-verifier/src/types.rs:110720` -- `KeyAlias(String)` — `auths-core:32796` -- `EmailAddress(String)` — `auths-sdk:92064` -- `Ed25519PublicKey([u8; 32])` and `Ed25519Signature([u8; 64])` — `auths-verifier/src/types.rs` - -**Specific issues:** - -- **Raw `String` used at CLI-to-SDK boundaries for domain types.** In `commands/device/pair/common.rs` and throughout `commands/id/`: - ``` - device_did: String (line 4876) - identity_key_alias: String (line 4862) - controller_did: String (line 9204) - ``` - These fields cross module boundaries without being wrapped in their newtypes. This means validation is deferred past the point where it is cheapest to catch. - -- **`pub` fields on `DeviceDID` and `IdentityDID`** — `DeviceDID(pub String)`. The inner `String` should be private, forcing construction through a validated `::new()` or `::parse()` that checks DID format at creation time. - -- **`AgentSigningAdapter` in `auths-cli/src/adapters/agent.rs:608,612`** — stores `key_alias: String` directly instead of `KeyAlias`. The newtype exists but is not used here. - ---- - -### 1.3 DRY / Duplication - -**Three independent implementations of `expand_tilde`:** - -| Location | Signature | Error type | -|---|---|---| -| `auths-cli/src/commands/git.rs:8977` | `pub(crate) fn expand_tilde(path: &Path) -> Result` | `anyhow::Error` | -| `auths-cli/src/commands/witness.rs:19704` | `fn expand_tilde(path: &std::path::Path) -> Result` | `anyhow::Error` | -| `auths-storage/src/git/config.rs:56667` | `fn expand_tilde(path: &std::path::Path) -> Result` | `StorageError` | - -The first two are byte-for-byte identical. The third is identical in logic but returns a different error type. There is a natural home for this in `auths-core` or a `auths-cli/src/core/fs.rs` utility (the `core/fs.rs` file already exists but does not contain `expand_tilde`). This is a pre-launch cleanup item. - -**Other duplication:** -- `generate_token_b64()` appears to be defined separately in `commands/scim.rs` and potentially other places — warrants audit. -- `mask_url()` in `commands/scim.rs` is a one-off utility with no shared home. -- JSON response helper `JsonResponse` with `.error()` and `.success()` constructors exists alongside raw `serde_json::json!` construction in several command files. - ---- - -### 1.4 Error Handling - -**Verdict: Structurally good; a class of opaque `String` variants undermines the discipline.** - -The `AuthsErrorInfo` trait (providing `error_code()` and `suggestion()`) is implemented on `AgentError`, `TrustError`, `SetupError`, `DeviceError`, and `AttestationError`. The layering of `anyhow` at the CLI and `thiserror` in the SDK/core is respected — no `anyhow` was found leaking into `auths-sdk`, `auths-id`, `auths-core`, or `auths-verifier`. - -**Specific issues:** - -- **Opaque `String` error variants** that lose structure and prevent `AuthsErrorInfo` from providing specific codes/suggestions: - ``` - auths-core: SecurityError(String), CryptoError(String), SigningFailed(String) - StorageError(String), GitError(String), InvalidInput(String), Proto(String) - (lines 27259–27295) - auths-sdk: StorageError(String), SigningFailed(String) (lines 87350, 88263) - auths-verifier: InvalidInput(String), CryptoError(String) (lines 107893, 107897) - ``` - Each of these should be a structured variant (e.g., `CryptoError { operation: &'static str, source: ring::error::Unspecified }`) so that `error_code()` can return a stable, documentable string. - -- **`MutexError(String)` at `auths-core:27291`** — mutex poisoning is a programming error, not a user-facing error. It should panic or be mapped to an internal error code, not propagate a `String` to the user. - -- **Errors in `commands/audit.rs` and `commands/org.rs` at lines 4105, 10234, 10332, 10627, 10736** construct JSON with `"created_at": chrono::Utc::now()` inline, mixing side-effectful timestamp generation into serialisation paths. - ---- - -### 1.5 Testing - -**Verdict: Unusually thorough for a solo project; two structural gaps.** - -1,389 unit tests found across the workspace. Fakes exist for all core port traits: `FakeConfigStore`, `FakeAttestationSink`, `FakeAttestationSource`, `FakeIdentityStorage`, `FakeRegistryBackend` (in `auths-id`), plus `FakeAgent`, `FakeAllowedSignersStore`, `FakeGit`, `FakeGitConfig`, `FakeSigner` (in `auths-sdk/src/testing/fakes/`). Contract tests in `auths-sdk/src/testing/contracts/` provide a shared test suite for adapters. - -Fuzz targets exist for `attestation_parse`, `did_parse`, and `verify_chain` in `auths-verifier/fuzz/`. - -**Gaps:** - -1. **CLI integration test coverage is thin — only ~50 lines** use `assert_cmd`/`Command::cargo_bin`. For a tool whose primary surface is a CLI, there should be end-to-end tests for at minimum `init`, `sign`, `verify`, `doctor`, and `device pair`. The happy path for the core user journey (`init` → `git commit` → `verify HEAD`) does not appear to have an integration test. - -2. **`Utc::now()` called directly in ~35 CLI command sites** (detailed above). Because these are not injected through `ClockProvider`, time-sensitive logic (expiry checks, token freshness, freeze state) cannot be tested deterministically without mocking system time. This is the most significant testability gap. - ---- - -### 1.6 Security - -**Verdict: Strong fundamentals; three areas need hardening before public launch.** - -**What's working well:** -- `Zeroizing` and `ZeroizeOnDrop` are used consistently on `SecureSeed`, `Ed25519Keypair.secret_key_bytes`, and X25519 shared secrets. -- `validate_passphrase()` validates at the boundary. -- The KEL validation chain in `auths-id/src/keri/validate.rs` calls `verify_event_said()`, `verify_sequence()`, `verify_chain_linkage()`, and `verify_event_signature()` — the full chain is cryptographically verified, not just structurally present. Ed25519 signatures are verified with `ring` (`pk.verify()` at line `53210`). -- `auths-verifier` has fuzz targets. - -**Issues requiring attention before launch:** - -~~**P0 — `verify-options` pass-through in `auths-sign`:**~~ -In `bin/sign.rs`, `args.verify_options` (a `Vec` populated from CLI `--verify-option` flags) is passed directly as arguments to `ssh-keygen` via `.arg("-O").arg(opt)` (lines ~198–199 and ~230–231). While `Command::new` with explicit `.arg()` calls is not shell injection, a crafted `-O` value like `no-touch-required` or a future `ssh-keygen` flag could alter verification semantics. These options should be validated against an allowlist of known-safe `verify-time=` patterns before being passed through. This binary is callable from CI environments with attacker-influenced inputs. - -~~**P1 — `DeviceDID` and `IdentityDID` inner values are publicly accessible:**~~ -`DeviceDID(pub String)` and `IdentityDID(pub String)` can be constructed with arbitrary strings without parsing. The DID format (`did:keri:...`) is not validated at construction. A malformed DID that bypasses newtypes can reach storage and the KEL resolver. - -~~**P2 — `commands/emergency.rs:8341` writes `frozen_at: chrono::Utc::now()` into a freeze record:**~~ -This timestamp is written to the git ref store and is later used to compute `expires_description()`. Since the clock is not injected, replay or time-skew attacks on freeze state cannot be tested. This is lower severity but relevant for enterprise audit trail integrity. - ---- - -## Section 2: v0.1.0 Launch Readiness - -### Feature Completeness - -The core end-to-end user journey is implemented: -- `auths init` — guided setup with SSH agent integration, key generation, git config. -- `git commit` → signed via `auths-sign` git-config hook. -- `auths verify HEAD` / `auths verify-commit` — full attestation chain verification. -- GitHub Action — working (per your confirmation). -- `auths doctor` — functional with fix suggestions. -- Device pairing — LAN, online relay, and offline QR modes implemented. -- Key rotation — `auths key rotate` with KEL append. -- Revocation — `auths emergency` with freeze/revoke semantics. - -### API Stability - -CLI flags are well-structured and consistently named. JSON output (`--json`) is present on the main verification paths. However, JSON schemas are generated by `xtask/src/gen_schema.rs` and appear to be in flux — the schema for attestation bundles and verification output should be frozen and versioned before public docs point to them. - -SDK public types in `auths-verifier` are the most stable — these are what the WASM widget, Python SDK, and Node SDK consume. `auths-sdk` public types are less stable and should not be documented as stable external API at v0.1.0. - -### Overall Readiness Rating: **7 / 10** - -Blockers before shipping: - -| # | Blocker | Severity | -|---|---|---| -| 1 | `verify-options` allowlist in `auths-sign` | P0 security | -| 2 | `DeviceDID`/`IdentityDID` with `pub` inner field — validate at construction | P0 type safety | -| 3 | End-to-end CLI integration test for core journey (`init` → `sign` → `verify`) | P0 launch confidence | -| 4 | `expand_tilde` triplicate — consolidate before adding a 4th | P1 DRY | -| 5 | `Utc::now()` in ~35 CLI command sites — at minimum the expiry and freeze paths need `ClockProvider` injection | P1 testability | -| 6 | Opaque `String` variants in error enums — replace the 10 identified with structured variants | P1 user experience | -| 7 | `commands/scim.rs` child process spawn — add timeout, better error message | P2 | -| 8 | JSON output schema versioning — freeze `--json` schemas before publishing docs | P2 | - -Items 1–3 are hard blocks. Items 4–6 are strong recommendations. Items 7–8 can be post-launch. - ---- - -## Section 3: Valuation & Product Strategy - -### 3.1 Current Fair Valuation - -**Range: $1.5M – $4M pre-money.** - -Rationale: - -- **Technical depth is real and rare.** A solo KERI-based cryptographic identity system in Rust with a working CLI, GitHub Action, WASM verifier, Python SDK, Node SDK, and multi-platform CI pipeline represents 6–12 months of senior engineering time minimum for a team. As a solo build over ~2.5 months with AI assistance, it demonstrates extraordinary execution velocity. -- **No revenue, no production users.** Pre-launch means no ARR multiple can be applied. -- **Comparable early-stage developer security tools** (Sigstore graduated into CNCF with Google/Purdue/Red Hat backing before it had revenue; Keybase raised at ~$10M with a working product but no clear business model). The comparable without institutional backing and without proven adoption is in the $1.5–4M range. -- **The KERI bet is a differentiator and a risk.** KERI is technically superior to X.509 for self-sovereign identity, but has almost no mainstream adoption. An investor will price in the education cost. -- **Upside scenario:** If the Hacker News launch generates measurable GitHub stars (>500), active users (>100 in first month), and PR integrations (even 2–3 notable repos), the valuation conversation shifts to $5–8M seed. - ---- - -### 3.2 Path to $50M Valuation - -$50M requires enterprise SaaS revenue or a clear path to it. Here is what needs to be true: - -**Revenue model ($50M = ~$5M ARR at 10x, or ~$3M ARR at 15x for a growing company):** - -- **Free tier:** Open source CLI, GitHub Action, WASM verifier, Python/Node SDKs. This is already the plan and is correct — developer adoption is the top of funnel. -- **Team tier ($29/user/month):** Managed witness infrastructure, org-level policy enforcement, audit log export (SOC 2 evidence), SAML/OIDC SSO, Slack/Teams alerts for signing anomalies. Target: engineering teams of 5–50. -- **Enterprise tier ($80–150/user/month or $50K–200K/year flat):** SCIM provisioning (already built!), self-hosted witness nodes, HSM integration, GitHub Enterprise + GitLab self-hosted connectors, SLA, priority support, CISO-friendly compliance exports (SLSA, SBOM attestation). Target: >500-engineer orgs with compliance mandates. -- **Infrastructure licensing ($500K+/year):** For financial services or defense contractors who cannot use SaaS — air-gapped deployment of the full Auths stack. - -At $3M ARR from 50 enterprise customers averaging $60K/year, with 15x multiple on growing SaaS, $50M is credible. - -**Market positioning:** - -| Competitor | Weakness Auths exploits | -|---|---| -| **Sigstore / Cosign** | Certificate-authority dependent (Fulcio), not self-sovereign, Google-run trust root that enterprises cannot audit-own | -| **GitHub's built-in signing** | Tied to GitHub, no portability to GitLab/self-hosted, no org-level enforcement policy, no revocation story | -| **GPG commit signing** | Horrible UX, key distribution nightmare, no rotation story, no device binding | -| **Keybase** (effectively dead) | Centralized servers, no cryptographic revocation, no enterprise features, abandoned | -| **SpruceID / DIDKit** | Broader W3C DID focus, not git-native, no developer UX story | - -Auths' moat is: **git-native storage + KERI-based self-sovereign rotation + developer UX that matches GPG simplicity without the GPG pain.** - -**Adoption metrics an investor needs to see before $50M:** -- 2,000+ GitHub stars -- 500+ weekly active CLI users (telemetry) -- 10+ enterprise pilots (even unpaid) -- 3+ notable open-source repositories with Auths CI verification in their workflows -- Published CVE or security audit report showing the protocol is sound - -**Team composition needed at $50M pitch:** -- 1 technical co-founder / CEO (you) -- 1 additional senior Rust engineer -- 1 developer advocate / growth engineer -- 1 enterprise sales / solutions engineer - -**Technical moat (what's hard to replicate):** -1. KERI-based KEL with cryptographic rotation — competitors would have to rebuild from protocol foundations. -2. The `auths-verifier` WASM module that verifies anywhere with no server dependency — this is genuinely unusual. -3. Git-native storage means zero infrastructure cost for the user in the free tier — no server to maintain. -4. Multi-platform SDK surface (Rust, Python, Node, WASM) built from a single source of truth. - ---- - -### 3.3 v1.0.0 Feature Requirements - -These are the features that separate "impressive developer tool" from "enterprise-mandatable infrastructure." - ---- - -~~#### Epic 1: Structured Error Codes and Actionable CLI Output~~ -**Why it matters:** A CISO cannot mandate a tool their engineers curse at. Error messages must be searchable in docs. - -**Scope:** -- Replace all 10+ opaque `String` error variants identified in Section 1.4 with structured enum variants. Each variant must carry typed fields (not strings) and implement `AuthsErrorInfo` with a stable `error_code()` (e.g., `E1042`) and a `suggestion()` string pointing to a docs URL. -- Every error emitted by the CLI must have a unique, stable, documented error code. Format: `[AUTHS-EXXX]` prefixed in terminal output. -- Add a `auths error ` subcommand that prints the full explanation and resolution steps for a given error code — identical to how the Rust compiler handles `rustc --explain E0XXX`. -- Error codes must be included in JSON output (`--json` flag) so CI systems can programmatically handle specific failure modes. - -**Files to touch:** -- `crates/auths-core/src/error.rs` — replace `String` variants -- `crates/auths-sdk/src/ports/agent.rs`, `crates/auths-sdk/src/result.rs` -- `crates/auths-verifier/src/error.rs` -- `crates/auths-cli/src/commands/executable.rs` — add error code formatting to output -- New: `crates/auths-cli/src/commands/explain.rs` -- Docs: `docs/errors/` directory with one `.md` per error code. Look into automating error docs via a similar approach in `auths/crates/xtask/src/gen_docs.rs`, and should add new `{error}.md` files if we add errors to the code - ---- - -~~#### Epic 2: `Utc::now()` Injection — Complete Clock Discipline~~ -**Why it matters:** Every expiry check, freeze check, and token validity check in the CLI is currently untestable. This is a launch blocker for the freeze/revocation path and a pre-condition for writing meaningful integration tests. - -**Scope:** -- Audit all `Utc::now()` call sites in `auths-cli` (~35 identified). For each: - - If the call is in an `ExecutableCommand::execute()` entry point, it is acceptable to call `Utc::now()` once and pass the result down. - - If the call is more than one function call deep from the entry point, it must accept a `DateTime` parameter instead. -- Commands requiring specific attention: `emergency.rs` (freeze/revoke timestamps), `device/pair/common.rs` (paired_at, token expiry), `org.rs` (created_at, attestation expiry), `commands/id/identity.rs` (bundle_timestamp), `status.rs`. -- The workspace lint `{ path = "chrono::offset::Utc::now", reason = "inject ClockProvider..." }` already exists but exempts `auths-cli`. Remove the exemption and fix the resulting compilation errors. -- Update fakes: `auths-sdk/src/testing/fakes/` — add `FakeClock` (likely already partially exists given `ClockProvider` is in `auths-verifier/src/clock.rs`; confirm it is exposed in the testing module). - -**Files to touch:** -- `crates/auths-cli/src/commands/emergency.rs` -- `crates/auths-cli/src/commands/device/pair/common.rs` -- `crates/auths-cli/src/commands/org.rs` -- `crates/auths-cli/src/commands/id/identity.rs` -- `crates/auths-cli/src/commands/status.rs` -- `crates/auths-cli/src/commands/id/migrate.rs` -- `crates/auths-cli/src/commands/device/authorization.rs` -- `Workspace.toml` — remove `auths-cli` exemption from `disallowed-methods` lint - ---- - -~~#### Epic 3: CLI Integration Test Suite~~ -**Why it matters:** With only ~50 lines of `assert_cmd` coverage across the entire CLI, you cannot confidently say the install-to-first-commit journey works on a clean machine. This is a launch confidence blocker. - -**Scope:** -- Write integration tests using `assert_cmd` + `tempfile` for the following scenarios. Each test must use a real temporary git repository and a real temporary `$HOME`-equivalent directory (no global state): - - 1. **`init` happy path** — `auths init --non-interactive` (or with scripted prompts) produces valid `~/.auths/` layout, sets `git config gpg.ssh.allowedSignersFile`, sets `git config gpg.format ssh`, sets `git config user.signingkey`. - 2. **`sign` + `verify` round trip** — after `init`, make a commit, run `auths verify HEAD`, assert exit 0 and JSON output contains `"status": "verified"`. - 3. **`doctor` detects misconfiguration** — remove `gpg.format` from git config, run `auths doctor`, assert it identifies the missing config and suggests a fix. - 4. **`key rotate` maintains verify** — rotate the signing key, make a new commit, verify both old and new commits pass (KEL replay). - 5. **`emergency revoke` blocks verify** — after revocation, `auths verify HEAD` on a pre-revocation commit must fail with a specific error code. - 6. **`--json` output schema** — assert that `auths verify HEAD --json` output is valid against the published JSON schema. - -- Each test must be runnable in CI without network access (use `FakeWitness` or disable witness requirement). -- Tests must be in `crates/auths-cli/tests/` using `assert_cmd::Command::cargo_bin("auths")`. -- Add a `Makefile` target or `xtask` subcommand `cargo xtask test-integration` that runs these with appropriate environment isolation. - ---- - -~~#### Epic 4: `expand_tilde` Consolidation and `auths-utils` Crate~~ -**Why it matters:** Three implementations of the same function is a maintenance hazard. The right fix is a micro-crate or a shared module that all layers can depend on without introducing circular dependencies. - -**Scope:** -- Create `crates/auths-utils/` as a new zero-dependency crate (no `auths-*` dependencies, only `std` + `dirs`). -- Move `expand_tilde` into `auths-utils/src/path.rs` with signature `pub fn expand_tilde(path: &Path) -> Result` where `ExpandTildeError` is a `thiserror` enum with a single `HomeDirNotFound` variant. -- Replace the three existing implementations with `use auths_utils::path::expand_tilde`. -- Also move `mask_url()` (currently inlined in `commands/scim.rs`) into `auths-utils/src/url.rs`. -- Add `auths-utils` as a `workspace` dependency. -- The crate should be `publish = false` — it is an internal utility, not a public API surface. - -**Files to touch:** -- New: `crates/auths-utils/Cargo.toml` (model after other crates), `crates/auths-utils/src/lib.rs`, `crates/auths-utils/src/path.rs`, `crates/auths-utils/src/url.rs`, `README.md` -- `crates/auths-cli/src/commands/git.rs:8977` — delete `expand_tilde`, add `use auths_utils::path::expand_tilde` -- `crates/auths-cli/src/commands/witness.rs:19704` — same -- `crates/auths-storage/src/git/config.rs:56667` — delete `expand_tilde`, adapt error type -- `Cargo.toml` (workspace) — add `auths-utils` member and workspace dependency -- `auths/scripts/releases/2_crates.py` - add `crates/auths-utils` to the correct release ordering - ---- - -~~#### Epic 5: `DeviceDID` and `IdentityDID` Validation at Construction~~ -**Why it matters:** A DID newtype that accepts arbitrary strings provides false safety. Any malformed DID that reaches the KEL resolver or storage layer can cause confusing errors deep in the stack. - -**Scope:** -- Make the inner fields of `DeviceDID` and `IdentityDID` private: change `DeviceDID(pub String)` to `DeviceDID(String)` in `auths-verifier/src/types.rs`. -- Add `DeviceDID::parse(s: &str) -> Result` that validates the string matches the `did:keri:` pattern using the existing KERI prefix parsing logic. -- Add `DeviceDID::as_str(&self) -> &str` and implement `Display` and `FromStr`. -- Do the same for `IdentityDID`. -- Fix all construction sites in `commands/device/pair/common.rs`, `commands/id/identity.rs`, `commands/id/migrate.rs` that currently use `device_did: String` — replace with `DeviceDID::parse()`. -- Add unit tests: valid DID parses, invalid format returns `DidParseError`, `Display` round-trips through `FromStr`. - -**Files to touch:** -- `crates/auths-verifier/src/types.rs` — make inner fields private, add `parse()`, `as_str()`, `Display`, `FromStr` -- `crates/auths-cli/src/commands/device/pair/common.rs` — fix construction sites -- `crates/auths-cli/src/commands/id/identity.rs` — fix construction sites -- `crates/auths-cli/src/commands/id/migrate.rs` — fix construction sites -- `crates/auths-cli/src/adapters/agent.rs` — replace `key_alias: String` with `KeyAlias` - ---- - -~~#### Epic 6: `auths-sign` verify-options Allowlist (Security)~~ -**Why it matters:** The `verify-options` flags are passed directly to `ssh-keygen` with no validation. In a GitHub Actions context, these values can originate from PR metadata or environment variables, making this a potential vector for altering verification semantics. - -**Scope:** -- In `crates/auths-cli/src/bin/sign.rs`, before passing `args.verify_options` to `ssh-keygen`, validate each option against an allowlist. -- Permitted options: `verify-time=` (digits only after `=`), `print-pubkeys`, `hashalg=sha256`, `hashalg=sha512`. -- Reject any option not on the allowlist with a specific error: `[AUTHS-E0031] Unsupported verify option '{opt}'. Allowed options: verify-time=`. -- Add unit tests in `bin/sign.rs` for: valid `verify-time=1700000000` passes, `verify-time=abc` fails, an unknown option fails, an injection attempt like `no-touch-required` fails. - -**Files to touch:** -- `crates/auths-cli/src/bin/sign.rs` — add `validate_verify_option(opt: &str) -> Result<()>` and call it before the `ssh-keygen` spawn loop - ---- - -~~#### Epic 7: Enterprise SAML/OIDC Identity Binding~~ -**Why it matters:** A CISO cannot mandate Auths if device identity cannot be tied to the corporate IdP. This is the single most common enterprise procurement question for developer security tools. - -**Scope:** -- Extend `commands/device/authorization.rs` (already has an `OAuthDeviceFlowProvider` port) to support SAML 2.0 assertion binding in addition to OIDC. -- The binding must produce an attestation event that records: IdP issuer, subject (employee email), authentication time, and authentication context class (e.g., `PasswordProtectedTransport`, `MultiFactor`). -- This attestation must be stored in the KEL as an `ixn` (interaction) event so it is part of the verifiable identity chain. -- Add `auths id bind-idp --provider ` subcommand. -- The `auths verify` output must include `"idp_binding": { "issuer": "...", "subject": "...", "bound_at": "..." }` in `--json` mode. -- Supported IdPs for v1.0.0: Okta, Azure AD (Entra ID), Google Workspace. Generic SAML as a fourth option. -- The `auths-sdk` must expose `IdpBinding` as a public type so Python/Node SDKs can surface it. - -**Files to touch:** -- `crates/auths-cli/src/commands/id/` — new `bind_idp.rs` -- `crates/auths-core/src/ports/platform.rs` — add `SamlAssertionProvider` port -- `crates/auths-infra-http/` — add Okta, Azure AD, Google Workspace OAuth/SAML adapters -- `crates/auths-sdk/src/workflows/` — new `idp_binding.rs` workflow -- `crates/auths-sdk/src/types.rs` — add `IdpBinding` public type -- `crates/auths-verifier/src/types.rs` — include `idp_binding` in `VerifiedIdentity` -- `crates/auths-verifier/src/verify.rs` — surface binding in verification output - ---- - -#### Epic 8: SLSA Provenance and SBOM Attestation -**Why it matters:** Post-EO 14028 (US Executive Order on Cybersecurity), enterprises must produce software supply chain attestations. Auths is perfectly positioned to be the signing layer for SLSA Level 2+ provenance and SPDX/CycloneDX SBOMs. This is the "why not just use GPG" answer for a CISO. - -**Scope:** -- Extend `commands/artifact/` (already exists with `sign`, `verify`, `publish`) to support structured attestation payloads conforming to: - - SLSA Provenance v1.0 (`https://slsa.dev/provenance/v1`) - - SPDX 2.3 SBOM - - CycloneDX 1.5 SBOM - - in-toto attestation framework (link layer) -- `auths artifact sign --slsa-provenance --builder-id --source-uri ` must produce a signed attestation bundle that can be verified by `slsa-verifier` independently. -- `auths artifact verify --policy slsa-level=2` must check that the provenance attestation was signed by a key in the KEL and that the build parameters meet the specified SLSA level. -- Publish attestation bundles to OCI registries (via `oras` or direct OCI push) in addition to git refs, so container image attestations can be stored alongside the image. -- The `auths-verifier` WASM module must be able to verify SLSA attestations without a git repository present (pure in-memory from attestation bundle JSON). - -**Files to touch:** -- `crates/auths-cli/src/commands/artifact/` — extend `sign.rs`, `verify.rs`, `publish.rs` -- `crates/auths-sdk/src/workflows/artifact.rs` — add SLSA/SBOM payload constructors -- `crates/auths-verifier/src/` — add `slsa.rs` for SLSA-specific verification -- New: `crates/auths-oci/` — OCI registry push/pull adapter -- Docs: `docs/attestation/slsa.md`, `docs/attestation/sbom.md` - ---- - -#### Epic 9: GitLab, Bitbucket, and Forgejo Support -**Why it matters:** GitHub Action coverage is in place. But >40% of enterprise git usage is GitLab self-hosted or Bitbucket. Without parity, Auths is a GitHub-only tool in enterprise evaluation. - -**Scope:** -- GitLab CI: provide a `.gitlab-ci.yml` template and a Docker image `ghcr.io/auths-dev/auths-verify:latest` that can be used as a GitLab CI include. Mirrors the GitHub Action interface exactly (same inputs/outputs, same JSON schema). -- Bitbucket Pipelines: provide a Bitbucket Pipe (`auths-dev/auths-verify-pipe`) published to the Atlassian Marketplace. -- Forgejo/Gitea: provide an Actions workflow compatible with Forgejo's GitHub Actions runner. -- The `auths-infra-git` crate should abstract over the specific platform — add a `GitPlatform` enum (`GitHub`, `GitLab`, `Bitbucket`, `Forgejo`, `Generic`) and use it to select the correct commit signing hook format and the correct CI template output from `auths git install-hooks`. -- `auths doctor` must detect which CI platform the current repo is configured for and check the appropriate template is installed. - -**Files to touch:** -- `crates/auths-infra-git/src/` — add platform detection -- `crates/auths-cli/src/commands/git.rs` — extend `install-hooks` for multi-platform -- `crates/auths-cli/src/commands/doctor.rs` — add CI platform checks -- New: `.github/actions/verify/` (already exists), `gitlab/`, `bitbucket-pipe/`, `forgejo/` at repo root -- Docs: `docs/ci/gitlab.md`, `docs/ci/bitbucket.md`, `docs/ci/forgejo.md` - ---- - -#### Epic 10: Managed Witness Infrastructure and SLA (Monetisation Layer) -**Why it matters:** The open-source free tier requires no server. The paid tier requires Auths to operate witness infrastructure. Without this, there is no business. - -**Scope:** -- Operate `witness.auths.dev` as a high-availability witness service. Architecture: 3-node cluster, active-passive with automatic failover, 99.9% uptime SLA for Team tier, 99.99% for Enterprise. -- `auths witness` command gains `--use-managed` flag that registers with `witness.auths.dev` using the OAuth device flow, receives an API token, and stores it in config. -- Managed witness events are timestamped with an RFC 3161 trusted timestamp (e.g., from a public TSA like `timestamp.digicert.com`) so that witness events are independently verifiable even if the Auths service goes offline. -- Add `auths witness status` that shows the current witness configuration and health of the configured witness endpoint. -- Billing integration: Team tier allows up to N witness events/month (start with 10,000), Enterprise is unlimited. Over-limit requests receive a `[AUTHS-E4029] Witness quota exceeded` error with a link to upgrade. -- The witness protocol must be documented publicly so customers can self-host — this is the open-core safety valve that prevents vendor lock-in concerns blocking enterprise adoption. - -**Files to touch:** -- `crates/auths-cli/src/commands/witness.rs` — add `--use-managed`, `status` subcommand -- `crates/auths-core/src/ports/` — add `ManagedWitnessPort` with quota error variant -- `crates/auths-infra-http/src/` — add managed witness HTTP adapter with auth header injection -- New: `services/witness-server/` — the server-side component (separate repo or workspace member) -- Docs: `docs/witness/self-hosting.md`, `docs/witness/managed.md` - ---- - -## Appendix: Pre-Launch Checklist - -| Item | Status | -|---|---| -| `auths init` → `sign` → `verify` end-to-end works | ✅ Implemented | -| GitHub Action CI verification | ✅ Confirmed working | -| WASM verifier (NPM widget) | ✅ Confirmed working | -| Python SDK | ✅ Confirmed working | -| Node.js SDK | ✅ Confirmed working | -| Documentation (quickstart → CI) | ✅ Confirmed complete | -| `auths doctor` | ✅ Functional | -| Device pairing (LAN + online + offline) | ✅ Implemented | -| Key rotation with KEL append | ✅ Implemented | -| Revocation (`emergency`) | ✅ Implemented | -| `verify-options` allowlist in `auths-sign` | ❌ Epic 6 — P0 | -| `DeviceDID`/`IdentityDID` private inner fields | ❌ Epic 5 — P0 | -| CLI integration test suite (init→sign→verify) | ❌ Epic 3 — P0 | -| `expand_tilde` triplicate consolidated | ❌ Epic 4 — P1 | -| `Utc::now()` injection in CLI commands | ❌ Epic 2 — P1 | -| Structured error codes + `auths error ` | ❌ Epic 1 — P1 | -| JSON schema versioned and frozen | ⚠️ Needs freeze before docs publish | -| SCIM server spawn timeout + error message | ⚠️ Low priority | diff --git a/docs/plans/typing_cleaning.md b/docs/plans/typing_cleaning.md deleted file mode 100644 index 0ec1c059..00000000 --- a/docs/plans/typing_cleaning.md +++ /dev/null @@ -1,353 +0,0 @@ -# Typing Cleaning: Strong Newtypes for Cryptographic String Fields - -## Context - -Many cryptographic and identity fields throughout the codebase are plain `String` where they should be strongly typed newtypes. The fn-62 epic already addresses `IdentityDID` and `DeviceDID` validation. This plan covers **everything else**: commit OIDs, public keys (hex), policy IDs, and consistent adoption of existing newtypes (`ResourceId`, `Prefix`, `Said`). - -## Design Decisions - -### Two newtype tiers (follow existing codebase convention) - -| Tier | Pattern | Serde | Constructor | Example | -|------|---------|-------|-------------|---------| -| **Unvalidated** | `From`, `Deref` | `#[serde(transparent)]` | `new()` | `ResourceId`, `PolicyId` | -| **Validated** | `TryFrom`, `AsRef` | `#[serde(try_from = "String")]` | `parse()` + `new_unchecked()` | `Capability`, `IdentityDID`, `CommitOid`, `PublicKeyHex` | - -Validated types must NOT implement `From` or `Deref` — these defeat type safety by allowing construction/coercion that bypasses validation. - -### SQL boundary (sqlite crate, not rusqlite) - -The codebase uses the `sqlite` crate (v0.32) with `BindableWithIndex`/`ReadableWithIndex` traits — NOT `rusqlite`. No `ToSql`/`FromSql` impls needed. Binding uses `stmt.bind((idx, value.as_str()))`, reading uses `stmt.read::(idx)` then wraps with `new_unchecked()` (trust the DB — data was validated on write). - -### FFI boundary (unchanged) - -Both `packages/auths-python` (PyO3) and `packages/auths-node` (napi-rs) keep `String` fields at the FFI boundary. Conversion via `.to_string()` or `.as_str().to_owned()`. No wrapper impls needed. Python type stubs and Node type definitions remain unchanged. - -### GitRef: reuse existing type - -A `GitRef` type already exists at `crates/auths-id/src/storage/layout.rs:22-71` (unvalidated, with `Deref`, `Display`, `From`, `join()`). Rather than create a competing type in auths-verifier, reuse the existing one by importing it where needed. If validation (`refs/` prefix check) is desired later, add it to the existing type. - -### Excluded from scope - -- **`IdentityEvent.previous_hash`**: This is a SHA-256 content hash of a commit OID string, NOT a commit OID itself. It stays as `String` (or gets its own `EventChainHash` type in a future epic). -- **`PairingResponse.device_x25519_pubkey`, `device_signing_pubkey`, `signature`**: These are base64url-encoded, NOT hex-encoded. They cannot be `PublicKeyHex` or `SignatureHex`. They stay as `String` (or get a `Base64UrlKey`/`Base64UrlSignature` type in a future epic). -- **KERI event fields** (`k: Vec`, `n: Vec`, `x: String`): Base64url CESR keys, tightly coupled to wire format. Defer to a future CESR typing epic. -- **`IndexedIdentity.current_keys: Vec`**: Base64url KERI keys, same encoding concern. -- **`ThresholdPolicy.signers: Vec`**: These are DID strings but mixed `IdentityDID`/`DeviceDID` — needs clarification on which DID type. Defer to fn-62 extension. - ---- - -## Existing Newtypes (Already Done) - -These live in `crates/auths-verifier/src/` and follow established patterns: - -| Type | Inner | Location | Tier | -|------|-------|----------|------| -| `ResourceId(String)` | `String` | `core.rs:46` | Unvalidated | -| `IdentityDID(String)` | `String` | `types.rs:147` | Validated | -| `DeviceDID(String)` | `String` | `types.rs:303` | Validated | -| `Prefix(String)` | `String` | `keri.rs:66` | Validated | -| `Said(String)` | `String` | `keri.rs:163` | Validated | -| `Ed25519PublicKey([u8; 32])` | `[u8; 32]` | `core.rs:181` | Validated (byte-array) | -| `Ed25519Signature([u8; 64])` | `[u8; 64]` | `core.rs:283` | Validated (byte-array) | - -**Shared conventions:** -- No macros — all hand-written -- All conditionally derive `schemars::JsonSchema` with `#[cfg_attr(feature = "schema", ...)]` -- Error types use `thiserror::Error` -- `#[repr(transparent)]` on validated string newtypes - ---- - -## New Newtypes to Create - -### 1. `CommitOid(String)` — Git commit hash (Validated) - -**Where to define:** `crates/auths-verifier/src/core.rs` - -**Validation:** 40-char lowercase hex (SHA-1) or 64-char (SHA-256). Use `parse()` + `new_unchecked()`. - -**Serde:** `#[serde(try_from = "String")]` — rejects malformed OIDs on deserialization. - -**Traits:** `Debug, Clone, PartialEq, Eq, Hash, Serialize` + `Display`, `AsRef`, `TryFrom`, `TryFrom<&str>`, `FromStr`, `From for String` - -**No `Default`** — an empty `CommitOid` is semantically wrong. - -**git2 interop:** Cannot implement `From` in auths-verifier (no git2 dep). Use `CommitOid::new_unchecked(oid.to_string())` at call sites, following the `oid_to_event_hash` pattern at `crates/auths-id/src/witness.rs:39-62`. - -**Sites to update (3):** - -| File | Field/Param | Current Type | -|------|-------------|-------------| -| `crates/auths-index/src/index.rs:20` | `IndexedAttestation.commit_oid` | `String` | -| `crates/auths-index/src/schema.rs:9` | DB column | `TEXT` (keep as TEXT, convert at boundary) | -| `crates/auths-id/src/keri/cache.rs:42,247` | `CacheEntry.last_commit_oid` | `String` | - -### 2. `PublicKeyHex(String)` — Hex-encoded Ed25519 public key (Validated) - -**Where to define:** `crates/auths-verifier/src/core.rs` - -**Validation:** 64-char hex string (32 bytes) — validate with `hex::decode` and length check. - -**Serde:** `#[serde(try_from = "String")]` - -**Conversion:** `pub fn to_ed25519(&self) -> Result` - -**Sites to update (~12, excluding base64url-encoded fields):** - -| File | Field/Param | Current Type | -|------|-------------|-------------| -| `crates/auths-verifier/src/core.rs:667` | `IdentityBundle.public_key_hex` | `String` | -| `crates/auths-core/src/trust/roots_file.rs:47` | `TrustedRoot.public_key_hex` | `String` | -| `crates/auths-core/src/trust/pinned.rs:28` | `PinnedIdentity.public_key_hex` | `String` | -| `crates/auths-core/src/testing/builder.rs:69` | builder field | `String` | -| `crates/auths-cli/src/commands/device/authorization.rs:31` | `public_key` | `String` | -| `crates/auths-cli/src/commands/trust.rs:99` | `public_key_hex` | `String` | -| `crates/auths-sdk/src/workflows/org.rs:204,240,256,273` | org admin/member keys | `String` | -| `crates/auths-sdk/src/workflows/mcp.rs:16` | `root_public_key` | `String` | - -### 3. `PolicyId(String)` — Policy identifier (Unvalidated) - -**Where to define:** `crates/auths-verifier/src/core.rs` - -**Serde:** `#[serde(transparent)]` — opaque identifier, no validation needed. - -**Traits:** Follow `ResourceId` pattern — `From`, `From<&str>`, `Deref`, `Display` - -**Sites to update (2):** - -| File | Field/Param | Current Type | -|------|-------------|-------------| -| `crates/auths-verifier/src/core.rs:987` | `ThresholdPolicy.policy_id` | `String` | -| `crates/auths-verifier/src/core.rs:1000` | constructor param | `String` | - ---- - -## Existing Newtypes: Inconsistent Adoption - -These types already exist but aren't used everywhere they should be. - -### `ResourceId` — exists at `core.rs:46`, used inconsistently - -| File | Field/Param | Current Type | Should Be | -|------|-------------|-------------|-----------| -| `crates/auths-index/src/index.rs:12` | `IndexedAttestation.rid` | `String` | `ResourceId` | -| `crates/auths-index/src/index.rs:51` | `IndexedOrgMember.rid` | `String` | `ResourceId` | -| `crates/auths-id/src/identity/helpers.rs:28` | `IdentityHelper.rid` | `String` | `ResourceId` | -| `crates/auths-sdk/src/workflows/artifact.rs:28` | `ArtifactSigningRequest.attestation_rid` | `String` | `ResourceId` | -| `crates/auths-sdk/src/signing.rs:193` | `SignedAttestation.rid` | `String` | `ResourceId` | - -### `Prefix` — exists at `keri.rs:66`, used inconsistently - -| File | Field/Param | Current Type | Should Be | -|------|-------------|-------------|-----------| -| `crates/auths-index/src/index.rs:35` | `IndexedIdentity.prefix` | `String` | `Prefix` | -| `crates/auths-index/src/index.rs:48` | `IndexedOrgMember.org_prefix` | `String` | `Prefix` | - -### `Said` — exists at `keri.rs:163`, used inconsistently - -| File | Field/Param | Current Type | Should Be | -|------|-------------|-------------|-----------| -| `crates/auths-index/src/index.rs:38` | `IndexedIdentity.tip_said` | `String` | `Said` | - -### `IdentityDID` / `DeviceDID` — partially addressed by fn-62 - -Additional sites beyond fn-62 scope (core/SDK layer, not CLI boundary): - -| File | Field/Param | Current Type | Should Be | -|------|-------------|-------------|-----------| -| `crates/auths-index/src/index.rs:14,50` | `issuer_did` | `String` | `IdentityDID` | -| `crates/auths-index/src/index.rs:16,49` | `device_did`, `member_did` | `String` | `DeviceDID` | -| `crates/auths-id/src/keri/cache.rs:36,241` | `CacheEntry.did` | `String` | `IdentityDID` | -| `crates/auths-id/src/keri/resolve.rs:44` | `ResolveResult.did` | `String` | `IdentityDID` | -| `crates/auths-id/src/identity/helpers.rs:27` | `IdentityHelper.did` | `String` | `IdentityDID` | -| `crates/auths-sdk/src/types.rs:589` | `DeviceAttestation.device_did` | `String` | `DeviceDID` | -| `crates/auths-sdk/src/workflows/artifact.rs:32` | `ArtifactSigningResult.signer_did` | `String` | `IdentityDID` | -| `crates/auths-sdk/src/workflows/org.rs:196,236,252` | org workflow DIDs | `String` | `IdentityDID`/`DeviceDID` | -| `crates/auths-core/src/witness/server.rs:60,114` | `WitnessConfig.witness_did` | `String` | `DeviceDID` | -| `crates/auths-pairing-protocol/src/response.rs:20` | `PairingResponse.device_did` | `String` | `DeviceDID` | -| `crates/auths-pairing-protocol/src/types.rs:57,81` | pairing DIDs | `String` | `DeviceDID` | - ---- - -## Cascade to FFI Packages - -### Impact Assessment: **Minimal** - -Both `packages/auths-python` (PyO3) and `packages/auths-node` (napi-rs) use a consistent adapter pattern: internal Rust newtypes are converted to `String` at the FFI boundary via `.to_string()` or `hex::encode()`. The FFI-exposed structs remain `String` fields. - -**No wrapper impls needed.** As long as newtypes implement `Display`, the existing `.to_string()` calls continue to work. - -### auths-python (PyO3) - -Binding structs use `#[pyclass]` with `#[pyo3(get)]` on `String` fields. Python consumers receive `str`. - -**Files with conversion points (`.to_string()` calls that may reference changed types):** -- `packages/auths-python/src/identity.rs` — ~6 conversion sites -- `packages/auths-python/src/commit_sign.rs` — signature/DID conversions -- `packages/auths-python/src/attestation_query.rs` — rid, DID conversions -- `packages/auths-python/src/org.rs` — org prefix, DID conversions -- `packages/auths-python/src/artifact_sign.rs` — rid conversions - -**Type stubs (manually maintained, no change needed):** -- `packages/auths-python/python/auths/__init__.pyi` — fields remain `str` - -### auths-node (napi-rs) - -Binding structs use `#[napi(object)]` with `String` fields. JavaScript consumers receive `string`. - -**Files with conversion points:** -- `packages/auths-node/src/identity.rs` — ~8 conversion sites -- `packages/auths-node/src/commit_sign.rs` — signature/DID conversions -- `packages/auths-node/src/artifact.rs` — rid, digest conversions -- `packages/auths-node/src/org.rs` — org prefix, DID conversions -- `packages/auths-node/src/types.rs` — defines all `Napi*` structs - -**Type definitions (auto-generated, no change needed):** -- `packages/auths-node/index.d.ts` — regenerated by napi-rs build - -### What Changes in FFI Code - -For each conversion site, the change is mechanical: - -```rust -// Before (if inner field was public): -did: result.did.0, -// After (Display impl handles it): -did: result.did.to_string(), -// Or for owned values: -did: result.did.into_inner(), -``` - -### auths-mobile-ffi (Swift/Kotlin) - -- `crates/auths-mobile-ffi/src/lib.rs` — ~15 DID and public_key_hex fields as `String` -- Same pattern: convert via `.to_string()` at boundary, FFI types remain `String` - ---- - -## Execution Plan - -### Phase 1: Define New Newtypes (additive, non-breaking) - -**Task: Create `CommitOid`, `PublicKeyHex`, `PolicyId` in auths-verifier** - -File: `crates/auths-verifier/src/core.rs` - -For **validated types** (`CommitOid`, `PublicKeyHex`), follow the `Capability` pattern: -1. Define struct with `#[serde(try_from = "String")]` and `#[repr(transparent)]` -2. Derive `Debug, Clone, PartialEq, Eq, Hash, Serialize` -3. Implement `parse()` + `new_unchecked()` + `as_str()` + `into_inner()` -4. Implement `Display`, `AsRef`, `TryFrom`, `TryFrom<&str>`, `FromStr`, `From for String` -5. Define error type (e.g. `CommitOidError`, `PublicKeyHexError`) with `thiserror::Error` - -For **unvalidated types** (`PolicyId`), follow the `ResourceId` pattern: -1. Define struct with `#[serde(transparent)]` -2. Derive `Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize` -3. Implement `Deref`, `Display`, `From`, `From<&str>` -4. Add `new()`, `as_str()` methods - -Re-export all from `crates/auths-verifier/src/lib.rs`. - -Add tests in `crates/auths-verifier/tests/cases/newtypes.rs`. - -### Phase 2: Adopt Existing Newtypes in auths-index (`ResourceId`, `Prefix`, `Said`) - -**Prerequisite:** Add `auths-verifier` to `auths-index/Cargo.toml` dependencies (Layer 4 → Layer 1, architecturally sound). - -1. Replace `rid: String` with `rid: ResourceId` in `IndexedAttestation`, `IndexedOrgMember` -2. Replace `prefix: String` with `prefix: Prefix` in `IndexedIdentity`, `IndexedOrgMember` -3. Replace `tip_said: String` with `tip_said: Said` in `IndexedIdentity` -4. Update SQL write sites: `.as_str()` on newtypes for `stmt.bind()` -5. Update SQL read sites: wrap `stmt.read::()` results with `::new_unchecked()` (trust the DB) -6. Adopt `ResourceId` in `auths-sdk` (`ArtifactSigningRequest.attestation_rid`, `SignedAttestation.rid`) and `auths-id` (`IdentityHelper.rid`) - -### Phase 3: Thread `CommitOid` Through Codebase - -1. Replace `commit_oid: String` with `commit_oid: CommitOid` in `IndexedAttestation` -2. Replace `last_commit_oid: String` with `last_commit_oid: CommitOid` in `CacheEntry` / `CachedKelState` -3. Update SQL boundary code (same pattern as Phase 2) -4. Update git2 conversion sites: `CommitOid::new_unchecked(oid.to_string())` at `auths-index/src/rebuild.rs:124`, `auths-id/src/keri/cache.rs:110`, `auths-id/src/storage/indexed.rs:94` - -### Phase 4: Thread `PublicKeyHex` Through Codebase - -1. Replace `public_key_hex: String` with `public_key_hex: PublicKeyHex` in: - - `IdentityBundle` (auths-verifier) - - `TrustedRoot`, `PinnedIdentity` (auths-core) - - Org workflow structs (auths-sdk) - - MCP config (auths-sdk) -2. Update builder patterns in `auths-core/src/testing/builder.rs` -3. Update CLI display code -4. Exclude `PairingResponse` fields (base64url, not hex) and `auths-mobile-ffi` fields (base64url) - -### Phase 5: Thread `PolicyId` + DID types beyond fn-62 - -1. `PolicyId` in `ThresholdPolicy` (2 sites, auths-verifier internal) -2. After fn-62 completes, extend `IdentityDID`/`DeviceDID` adoption to: - - `auths-index` — all DID fields - - `auths-id` — cache, resolve, helpers - - `auths-sdk` — workflows, types - - `auths-pairing-protocol` — response and types - - `auths-core` — witness config - -### Phase 6: FFI Package Updates - -After core types are threaded: -1. Update `packages/auths-python/src/*.rs` — change any `.0` field access to `.to_string()` or `.as_str().to_owned()` -2. Update `packages/auths-node/src/*.rs` — same pattern -3. Update `crates/auths-mobile-ffi/src/lib.rs` — same pattern -4. Verify Python type stubs unchanged -5. Verify Node type definitions regenerate correctly - ---- - -## Summary Table - -| Newtype | Tier | Define In | Sites | Phase | -|---------|------|-----------|-------|-------| -| `CommitOid` | Validated | auths-verifier | 3 | 1, 3 | -| `PublicKeyHex` | Validated | auths-verifier | ~12 | 1, 4 | -| `PolicyId` | Unvalidated | auths-verifier | 2 | 1, 5 | -| `ResourceId` (adopt) | — | already exists | 5 | 2 | -| `Prefix` (adopt) | — | already exists | 2 | 2 | -| `Said` (adopt) | — | already exists | 1 | 2 | -| `IdentityDID` (extend) | — | fn-62 | ~15 | 5 | -| `DeviceDID` (extend) | — | fn-62 | ~10 | 5 | - -**Total: ~50 String fields across ~30 files** (reduced from original ~89 after excluding base64url fields, KERI event fields, and properly scoped exclusions) - ---- - -## Verification Commands - -```bash -# After each phase: -cargo build --workspace -cargo nextest run --workspace -cargo clippy --all-targets --all-features -- -D warnings -cargo test --all --doc - -# WASM check (auths-verifier only): -cd crates/auths-verifier && cargo check --target wasm32-unknown-unknown --no-default-features --features wasm - -# FFI package checks: -cd packages/auths-node && npm run build -cd packages/auths-python && maturin develop -``` - -## Risks & Mitigations - -1. **Serde backward compatibility** — Validated types use `#[serde(try_from = "String")]` which enforces format on deserialization. Risk: old cached files (e.g., `CachedKelState`) with malformed values fail to load. Mitigation: audit existing cached data before switching; use `new_unchecked()` in cache deserialization if needed. -2. **SQL boundary** — Uses `sqlite` crate (NOT `rusqlite`). No trait impls needed. Bind via `.as_str()`, read via `String` + `new_unchecked()` wrapper. -3. **git2 interop** — Cannot implement `From` in auths-verifier (no git2 dep, orphan rule). Use `CommitOid::new_unchecked(oid.to_string())` at call sites. -4. **auths-index dependency** — Must add `auths-verifier` to `auths-index/Cargo.toml`. Architecturally sound (Layer 4 → Layer 1). -5. **WASM compilation** — All new types in auths-verifier must compile for `wasm32-unknown-unknown`. The `hex` crate is already a dependency, so validation logic is fine. - -## Deferred Items - -- `EventChainHash(String)` for `IdentityEvent.previous_hash` (SHA-256 content hash, not commit OID) -- `Base64UrlKey(String)` for `PairingResponse` X25519/signing keys -- `Base64UrlSignature(String)` for `PairingResponse.signature` -- `CesrKey(String)` for KERI event `k`/`n`/`x` fields -- `SignatureHex(String)` — no confirmed hex-encoded signature String fields after excluding base64url ones -- `GitRef` type promotion from `auths-id` to `auths-verifier` (if needed for cross-crate use) diff --git a/docs/plans/wiring/architecture_output.md b/docs/plans/wiring/architecture_output.md new file mode 100644 index 00000000..281e37b4 --- /dev/null +++ b/docs/plans/wiring/architecture_output.md @@ -0,0 +1,500 @@ +# Architecture Output + +## 1 Architecture Summary + +### Product + +Auths is a **decentralized identity and code signing platform** built on KERI (Key Event Receipt Infrastructure). It enables developers and organizations to cryptographically sign Git commits, manage device identities, delegate scoped authority to AI agents, and verify trust chains — all without a centralized certificate authority. + +### System Overview + +The codebase is a **polyglot monorepo** spanning Rust, TypeScript, Swift, Kotlin, Python, and Ruby across 12+ sub-projects. + +| Layer | Technology | Purpose | +|-------|-----------|---------| +| **Core Backend** | Rust (27-crate workspace, Axum 0.8) | Identity, crypto, signing, verification | +| **Cloud Services** | Rust (Axum, 13 crates) | Registry, auth, chat relay, OIDC bridge, SCIM, witness, monitor | +| **Web Frontend** | Next.js 16 + React 19 + TypeScript | Explorer, network stats, org management, docs | +| **Verify Widget** | TypeScript Web Component (Shadow DOM) | Embeddable verification badge for any webpage | +| **iOS/macOS Chat** | SwiftUI + Rust FFI (UniFFI) | E2E encrypted messaging with KERI identities | +| **Mobile App** | SwiftUI (iOS) + Jetpack Compose (Android) + Rust FFI | Identity management, device pairing, emergency controls | +| **GitHub App** | Rust (Axum) | Webhook receiver for commit verification | +| **GitHub Action** | TypeScript (Node.js 20) | CI verification of commit signatures | +| **Agent Demo** | Python | Delegation/verification simulation | +| **CLI** | Rust (clap) | Developer-facing signing/verification tool | +| **Homebrew Tap** | Ruby | CLI distribution for macOS/Linux | + +### Architecture Principles + +- **Layered Rust crates** (6 levels): Crypto → Verification → Core → Domain → Services → Presentation +- **Port/adapter pattern**: All I/O trait-injected, no reverse dependencies +- **Git-first storage**: Identity data stored as Git refs (`refs/auths/`, `refs/keri/`) +- **PostgreSQL hot path**: Cloud registry uses Postgres for fast reads, Git for audit trail +- **Edge verification**: Clients can verify locally without contacting a server (WASM, CLI) +- **E2E encryption**: Chat uses X25519 ECDH + AES-256-GCM; server sees only ciphertext + +### Key Services (Ports) + +| Service | Default Port | Role | +|---------|-------------|------| +| auths-registry-server | 3000 | Central identity registry API | +| auths-auth-server | 3001 | Challenge/response authentication | +| auths-chat-server | 3002 | Encrypted message relay + WebSocket | +| auths-oidc-bridge | 3300 | KERI attestation → cloud JWT exchange | +| auths-pairing-daemon | (LAN) | mDNS device pairing | +| auths-mcp-server | 8080 | MCP tool server (JWT-gated) | +| auths-witness | 8080 | Transparency log witness | +| auths-scim-server | configurable | SCIM 2.0 agent provisioning | +| auths-github-app | 3001 | GitHub webhook handler | + +--- + +## 2 Frontend → API Map + +### auths-site (Next.js) → Registry API + +| Component | Endpoint | Method | Feature | +|-----------|----------|--------|---------| +| `explorer-client.tsx` | `/v1/identities/{did}` | GET | DID/identity lookup | +| `explorer-client.tsx` | `/v1/identities/batch` | POST | Batch identity resolution (N+1 mitigation) | +| `explorer-client.tsx` | `/v1/artifacts` | GET | Artifact/package search (cursor pagination) | +| `explorer-client.tsx` | `/v1/pubkeys` | GET | Public keys + platform claims | +| `explorer-client.tsx` | `/v1/activity/feed` | GET | Unified activity feed (transparency log) | +| `explorer-client.tsx` | `/v1/namespaces` | GET | Namespace browsing | +| `explorer-client.tsx` | `/v1/identities/search` | GET | Identity search | +| Network page | `/v1/stats` | GET | Network statistics | +| Org management (`/try/org/`) | `/v1/orgs/{orgDid}/policy` | GET | Org policy retrieval | +| Org management (`/try/org/`) | `/v1/orgs/{orgDid}/status` | GET | Org status | +| Org management (`/try/org/`) | `/v1/orgs/{orgDid}/invite` | POST | Org invite creation | +| `challenge-auth.tsx` | `/auth/init` | POST | Auth challenge initiation (AUTH_BASE_URL) | +| `challenge-auth.tsx` | `/auth/verify` | POST | Auth challenge verification (AUTH_BASE_URL) | +| `platform-passport.tsx` | (read-only display) | — | Renders platform claims from identity data | +| `provenance-ledger.tsx` | (read-only display) | — | Renders package signature history | +| `trust-graph.tsx` | (read-only display) | — | Identity trust visualization | +| WASM bridge (`wasm-bridge.ts`) | (local WASM) | — | Client-side signature verification | + +### auths-verify-widget (Web Component) → Forge APIs + +| Component | Endpoint | Method | Feature | +|-----------|----------|--------|---------| +| `github.ts` resolver | GitHub REST API (`/repos/{owner}/{repo}/git/refs/auths/registry`) | GET | Fetch attestation refs from GitHub | +| `gitlab.ts` resolver | GitLab API (`/projects/{id}/repository/tree`) | GET | Fetch attestation refs from GitLab | +| `gitea.ts` resolver | Gitea API (`/repos/{owner}/{repo}/git/refs`) | GET | Fetch attestation refs from Gitea | +| `detect.ts` | (URL parsing) | — | Auto-detect forge type from repository URL | +| WASM verifier | (local WASM) | — | Ed25519 signature verification in browser | + +### auths-chat (SwiftUI iOS/macOS) → Chat Server API + +| Component | Endpoint | Method | Feature | +|-----------|----------|--------|---------| +| `ConversationListView` | `/conversations` | GET | List conversations | +| `NewConversationView` | `/conversations` | POST | Create conversation | +| `MessageThreadView` | `/conversations/{id}/messages` | GET | Fetch messages | +| `MessageThreadView` | `/conversations/{id}/messages` | POST | Send encrypted message | +| `ContentView` | WebSocket `/ws` | WS | Real-time message updates | +| `PairDeviceView` | `/auth/register` | POST | Register user DID | +| `ShowPairingQRView` | (local QR generation) | — | QR code for pairing | + +### auths-mobile (SwiftUI + Compose) → Registry + Mobile API + +| Component (iOS / Android) | Endpoint | Method | Feature | +|---------------------------|----------|--------|---------| +| `CreateIdentityView` / `OnboardingScreen` | `/v1/identities/{prefix}/kel` | POST | Create identity (inception event) | +| `IdentityView` / `IdentityScreen` | `/mobile/identity` | GET | Fetch identity | +| `DevicesView` / `DevicesScreen` | `/mobile/devices` | GET | List paired devices | +| `DevicesView` / `DevicesScreen` | `/mobile/devices/{id}/revoke` | POST | Revoke device | +| `PairDeviceView` / `PairDeviceScreen` | `/mobile/pair/initiate` | POST | Initiate pairing | +| `PairDeviceView` / `PairDeviceScreen` | `/mobile/pair/complete` | POST | Complete pairing | +| `EmergencyView` / `EmergencyScreen` | `/mobile/emergency/freeze` | POST | Emergency freeze | +| `SettingsView` / `SettingsScreen` | `/mobile/notifications/register` | POST | Push notification setup | + +--- + +## 3 API → Backend Map + +### Registry Server (auths-registry-server, port 3000) + +| Endpoint | Handler | Service/Logic | Notes | +|----------|---------|--------------|-------| +| `GET /v1/health` | health handler | — | Returns status | +| `GET /v1/identities/:prefix` | identity handler | `auths-storage` (Git + Postgres read) | Resolve identity by KERI prefix | +| `GET /v1/identities/:prefix/kel` | KEL handler | `auths-id` KERI state machine | Fetch Key Event Log | +| `POST /v1/identities/:prefix/kel` | KEL handler | `auths-id` KERI state machine | Append KEL event (inception/rotation) | +| `GET /v1/devices/:did` | device handler | `auths-storage` | Lookup device by DID | +| `GET /v1/devices/:did/attestation` | attestation handler | `auths-verifier` | Fetch device attestation | +| `GET /v1/orgs/:org_did/members` | org handler | `auths-id` registry | List org members | +| `POST /v1/orgs/:org_did/members` | org handler | `auths-sdk` org workflow | Add org member | +| `DELETE /v1/orgs/:org_did/members/:member_did` | org handler | `auths-sdk` org workflow | Remove member | +| `POST /v1/verify` | verify handler | `auths-verifier` | Verify attestation chain | +| `POST /v1/pairing/sessions` | pairing handler | `auths-pairing-protocol` | Create pairing session | +| `GET /v1/pairing/sessions/:id` | pairing handler | `auths-pairing-protocol` | Get session state | +| `DELETE /v1/pairing/sessions/:id` | pairing handler | `auths-pairing-protocol` | Cancel session | + +### Auth Server (auths-auth-server, port 3001) + +| Endpoint | Handler | Service/Logic | Notes | +|----------|---------|--------------|-------| +| `/auth/init` | challenge handler | Challenge generation + store | Generates nonce, stores with TTL (300s default) | +| `/auth/verify` | verify handler | `auths-infra-http::HttpIdentityResolver` → registry | Resolves identity keys via registry, verifies signature | + +### Chat Server (auths-chat-server, port 3002) + +| Endpoint | Handler | Service/Logic | Notes | +|----------|---------|--------------|-------| +| `POST /auth/register` | register handler | Identity resolution via registry | Register DID for chat | +| `GET /conversations` | conversation handler | SQLite store | List user conversations | +| `POST /conversations` | conversation handler | SQLite store | Create conversation | +| `GET /conversations/{id}/messages` | message handler | SQLite store | Fetch encrypted messages | +| `POST /conversations/{id}/messages` | message handler | SQLite store | Store encrypted message | +| WebSocket `/ws` | WS handler | Tokio broadcast | Real-time relay | + +### OIDC Bridge (auths-oidc-bridge, port 3300) + +| Endpoint | Handler | Service/Logic | Notes | +|----------|---------|--------------|-------| +| `POST /token` (implied) | token handler | `auths-verifier` + JWT signing | Verifies attestation chain, issues cloud JWT | +| `GET /.well-known/openid-configuration` | metadata handler | Static config | OIDC discovery | +| `GET /jwks` | JWKS handler | Ed25519 public key export | Key set for JWT verification | + +### Pairing Daemon (LAN, auths-pairing-daemon) + +| Endpoint | Handler | Service/Logic | Notes | +|----------|---------|--------------|-------| +| `GET /health` | `handle_health()` | — | Returns "ok" | +| `GET /v1/pairing/sessions/by-code/{code}` | `handle_lookup_by_code()` | `DaemonState` | Lookup by 6-char code | +| `GET /v1/pairing/sessions/{id}` | `handle_get_session()` | `DaemonState` | Get session details | +| `POST /v1/pairing/sessions/{id}/response` | `handle_submit_response()` | `DaemonState` + ECDH | Submit ECDH + signing keys | +| `POST /v1/pairing/sessions/{id}/confirm` | `handle_submit_confirmation()` | `DaemonState` + SAS | Submit SAS confirmation | +| `GET /v1/pairing/sessions/{id}/confirmation` | `handle_get_confirmation()` | `DaemonState` | Poll confirmation state | + +### MCP Server (auths-mcp-server, port 8080) + +| Endpoint | Handler | Service/Logic | Notes | +|----------|---------|--------------|-------| +| `GET /health` | `health()` | — | Status + version | +| `GET /.well-known/oauth-protected-resource` | `protected_resource_metadata()` | Static | OAuth metadata | +| `GET /mcp/tools` | `list_tools()` | Tool registry | Enumerate tools + required capabilities | +| `POST /mcp/tools/{tool_name}` | `handle_tool_call()` | JWT middleware → tool executor | Execute tool (read_file, write_file, deploy) | + +### Witness Server (auths-witness, port 8080) + +| Endpoint | Handler | Service/Logic | Notes | +|----------|---------|--------------|-------| +| `POST /witness/{prefix}/event` | event handler | SQLite (first-seen-always-seen) | Submit KERI event for witnessing | +| `GET /witness/{prefix}/head` | head handler | SQLite | Latest observed sequence | +| `GET /witness/{prefix}/receipt/{said}` | receipt handler | SQLite + Ed25519 signing | Retrieve issued receipt | +| `GET /health` | health handler | — | Status + metrics | + +### SCIM Server (auths-scim-server) + +| Endpoint | Handler | Service/Logic | Notes | +|----------|---------|--------------|-------| +| SCIM 2.0 standard endpoints | SCIM handlers | PostgreSQL | Multi-tenant agent provisioning | + +--- + +## 4 Backend → Database Map + +### Git Storage (Primary — `~/.auths/` repository) + +| Service | Refs/Tables | Operations | Data | +|---------|------------|------------|------| +| `auths-storage::git` | `refs/auths/registry/*` | Read/write Git refs + blobs | Attestation chains | +| `auths-storage::git` | `refs/keri/{prefix}/*` | Read/write Git refs | Key Event Logs (KEL) | +| `auths-infra-git` | `refs/auths/` | Clone, fetch, push | Identity bundles | +| `auths-id` KERI | `refs/keri/{prefix}/kel` | Append-only event log | Inception, rotation, interaction events | +| `auths-id` attestations | `refs/auths/attestations/{rid}` | Create/read blobs | Signed attestation JWTs | + +### PostgreSQL (Cloud Hot Path) + +| Service | Tables | Queries | Data Returned | +|---------|--------|---------|---------------| +| `auths-registry-server` | `identities`, `attestations`, `devices` | SELECT by prefix/DID, INSERT events | Identity records, attestation metadata | +| `auths-registry-server` | `org_members` | SELECT by org_did, INSERT/DELETE | Org membership records | +| `auths-auth-server` | `challenges` (optional, else in-memory) | INSERT challenge, SELECT + DELETE on verify | Challenge nonce + TTL | +| `auths-scim-server` | `tenants`, `agents` | SCIM CRUD operations | Agent provisioning records | +| `auths-storage::postgres` | Shard-partitioned tables | Indexed lookups | Attestation metadata, membership | + +### SQLite (Local/Embedded) + +| Service | Database | Tables | Data | +|---------|----------|--------|------| +| Witness Server (`auths-core::witness`) | `witness.db` | `first_seen_events` (prefix, sequence, d, t) | KERI event first-seen records | +| Witness Server | `witness.db` | `receipts` (prefix, said, receipt_json) | Issued witness receipts | +| Witness Server | `witness.db` | `duplicity_log` | Evidence of forked identities | +| `auths-index` | `index.db` | Attestation index by device DID | O(1) attestation lookups | +| `auths-chat-server` | `chat.db` | `messages`, `conversations` | Encrypted message ciphertext | + +### Redis (Cache Tier 0) + +| Service | Keys | Operations | Data | +|---------|------|------------|------| +| `auths-cache` | `identity:{prefix}` | GET/SET with TTL | Cached identity resolution | +| `auths-cache` | `attestation:{rid}` | GET/SET with TTL | Cached attestation data | + +--- + +## 5 Feature Pipelines + +### Pipeline 1: Identity Exploration (Web) + +``` +explorer-client.tsx (Next.js) +→ GET /v1/identities/{did} (registryFetch wrapper, 5s timeout) +→ auths-registry-server handler +→ auths-storage (Git refs + Postgres) +→ PostgreSQL SELECT / Git blob read +→ Identity JSON response +→ useIdentityProfile() hook (TanStack Query, 120s stale) +→ Trust tier computation (client-side: claims×20 + keys×15 + artifacts×5) +→ platform-passport.tsx + trust-graph.tsx render +``` + +### Pipeline 2: Commit Signing (CLI) + +``` +Developer runs: auths sign-commit +→ auths-cli sign command +→ auths-sdk signing workflow (signing.rs) +→ auths-core SecureSigner (platform keychain: macOS Keychain / Linux Secret Service) +→ Ed25519 signature generation +→ Git commit signed via SSH signature format +→ refs/auths/ updated in local repo +``` + +### Pipeline 3: Commit Verification (GitHub Action CI) + +``` +Push/PR event on GitHub +→ auths-verify-github-action triggers +→ src/main.ts reads inputs (allowed-signers, commit-range) +→ src/verifier.ts downloads + caches auths CLI binary (SHA256 verified) +→ Detects commit range from GitHub event context +→ Runs: auths verify-commit --json (per commit) +→ Classifies results (verified / unsigned / unknown key / corrupted) +→ Generates GitHub Step Summary (markdown table) +→ Optionally posts PR comment with fix instructions +→ Sets outputs: verified, results JSON, total, passed, failed +``` + +### Pipeline 4: Device Pairing (Mobile → LAN Daemon) + +``` +PairDeviceView (SwiftUI) / PairDeviceScreen (Compose) +→ QR code scan (camera) +→ POST /mobile/pair/initiate (to registry/mobile API) +→ GET /v1/pairing/sessions/by-code/{code} (LAN daemon) +→ DaemonState session lookup +→ POST /v1/pairing/sessions/{id}/response (ECDH key exchange) +→ SAS display on both devices +→ POST /v1/pairing/sessions/{id}/confirm (SAS confirmation) +→ Attestation created linking new device DID to identity +→ DevicesView updated with new paired device +``` + +### Pipeline 5: Challenge-Response Authentication (Web) + +``` +challenge-auth.tsx (React) +→ POST /auth/init (auths-auth-server:3001) +→ Challenge nonce generated + stored (TTL 300s) +→ Client signs challenge with device key (local keychain) +→ POST /auth/verify (auths-auth-server:3001) +→ HttpIdentityResolver → GET /v1/identities/{did} (registry:3000) +→ Fetch public keys for DID +→ Ed25519 signature verification +→ Auth session established +→ Auth context updated in React state +``` + +### Pipeline 6: Encrypted Chat (iOS/macOS) + +``` +MessageThreadView (SwiftUI) +→ EncryptionService.encrypt() (Rust FFI: X25519 ECDH + AES-256-GCM) +→ POST /conversations/{id}/messages (auths-chat-server:3002) +→ SQLite INSERT (ciphertext only, server cannot read) +→ WebSocket broadcast to other participants +→ Receiver: EncryptionService.decrypt() (Rust FFI) +→ MessageBubbleView renders plaintext +``` + +### Pipeline 7: Artifact Search + Provenance (Web) + +``` +Search input (explorer-client.tsx) +→ useRegistrySearch() hook (debounced 300ms) +→ GET /v1/artifacts?q=...&cursor=... (cursor pagination) +→ auths-registry-server → PostgreSQL query +→ Artifact list response +→ useArtifactSearch() infinite pagination +→ usePackageDetail() → POST /v1/identities/batch (top 10 signers) +→ provenance-ledger.tsx renders signature history +``` + +### Pipeline 8: Org Management (Web) + +``` +/try/org/ pages (Next.js App Router) +→ GET /v1/orgs/{orgDid}/policy (registry) +→ GET /v1/orgs/{orgDid}/status (registry) +→ POST /v1/orgs/{orgDid}/invite (create invite) +→ /join/[code] page (invite acceptance) +→ POST /v1/orgs/{orgDid}/members (registry → auths-sdk org workflow) +→ auths-id registry → PostgreSQL INSERT +→ Attestation created with Role + Capabilities +``` + +### Pipeline 9: KERI → Cloud JWT Exchange (OIDC Bridge) + +``` +Agent or service with attestation chain +→ POST /token (auths-oidc-bridge:3300) +→ auths-verifier validates attestation chain +→ Extract capabilities from attestations +→ Sign JWT with Ed25519 key +→ Return cloud JWT with scoped claims +→ Consumer verifies JWT via GET /jwks +``` + +### Pipeline 10: Emergency Freeze (Mobile) + +``` +EmergencyView (SwiftUI) / EmergencyScreen (Compose) +→ Biometric authentication (Face ID / fingerprint) +→ POST /mobile/emergency/freeze +→ Registry marks identity as frozen +→ All attestations under this identity become invalid +→ UI shows frozen state +``` + +--- + +## 6 Dead Code List + +### Unused / Stub Endpoints + +| Item | Location | Status | +|------|----------|--------| +| `auths-github-app` commit verification | `src/webhook.rs` | Push + PR handlers log events but verification logic is TODO | +| `auths-github-app` check run creation | `src/github.rs` | GitHub API client is a stub — no actual check runs posted | +| MCP `deploy` tool | `auths-mcp-server` tools | Mock implementation (returns "deployment queued") | +| MCP `read_file` / `write_file` | `auths-mcp-server` tools | Sandboxed to `/tmp` only — demonstration tools | + +### Unused / Disconnected Services + +| Item | Notes | +|------|-------| +| `auths-agent-demo` | Pure local simulation; no network calls. Demonstrates SDK but not wired to any running service | +| `auths-examples/` templates | Reference repos, not deployed. Some may reference APIs that don't exist yet | + +### Potentially Unused Frontend Components + +| Item | Location | Notes | +|------|----------|-------| +| Various diagram components | `auths-site` | Educational/documentation diagrams — may not be linked from navigation | +| Fixture/demo mode code | `auths-site`, `auths-mobile` | `USE_FIXTURES` flag enables test data — dead in production builds | + +### Deleted / Removed Sub-projects (per git status) + +| Item | Status | +|------|--------| +| `auths-legacy/auths` | Deleted | +| `auths-releases` | Deleted | +| `auths-verify-action` | Deleted (replaced by `auths-verify-github-action`) | + +### Deleted Planning Documents (per git status) + +| File | Status | +|------|--------| +| `crate_org_roadmap.md`, `current_roadmap.md`, `debate_roadmap.md`, `ecosystem_roadmap.md`, `enterprise_roadmap.md`, `financial_success_roadmap.md`, `gamification_roadmap.md`, `http_security.md`, `licensing_roadmap.md`, `milestone_roadmap_2.md`, `new_roadmap.md`, `roadmap_auths.md`, `roadmap_overall.md`, `stripe_roadmap.md`, `unicorn_roadmap.md` | All deleted | + +--- + +## 7 Backend Capabilities Inventory + +| Capability | Description | Exposure | +|-----------|-------------|----------| +| **Identity Resolution** | Resolve KERI identities by prefix/DID | **Exposed** — registry API → web explorer, mobile, auth server | +| **KEL Management** | Create/append Key Event Log (inception, rotation, interaction) | **Exposed** — registry API → CLI, mobile | +| **Attestation Verification** | Verify Ed25519 signature chains | **Exposed** — registry API, WASM widget, CLI, GitHub Action | +| **Device Management** | Register, list, revoke devices | **Exposed** — registry API → mobile app | +| **Device Pairing** | LAN mDNS + ECDH pairing protocol | **Exposed** — pairing daemon → mobile, chat | +| **Org Membership** | Add/remove members with roles + capabilities | **Exposed** — registry API → web org management | +| **Challenge-Response Auth** | DID-based un-phishable authentication | **Exposed** — auth server → web challenge-auth | +| **OIDC Bridge** | Exchange attestation chains for cloud JWTs | **Exposed** — OIDC bridge server → MCP server, enterprise consumers | +| **Encrypted Messaging** | E2E encrypted chat relay (X25519 + AES-256-GCM) | **Exposed** — chat server → iOS/macOS chat app | +| **SCIM Provisioning** | SCIM 2.0 multi-tenant agent provisioning | **Exposed** — SCIM server → enterprise IdP systems | +| **Transparency Log** | Append-only log with Merkle tree consistency proofs | **Partially exposed** — activity feed on web; witness + monitor running | +| **Artifact Search** | Search signed artifacts/packages | **Exposed** — registry API → web explorer | +| **Network Statistics** | Aggregate network health/stats | **Exposed** — registry API → web network page | +| **Namespace Browsing** | Browse identity namespaces | **Exposed** — registry API → web explorer | +| **Identity Search** | Full-text identity search | **Exposed** — registry API → web explorer | +| **Trust Tier Computation** | Weighted scoring of identity trustworthiness | **Exposed** — client-side in web (claims×20, keys×15, artifacts×5) | +| **Key Rotation** | KERI pre-rotation with forward security | **Exposed** — CLI workflow, SDK; mobile app supports inception | +| **Emergency Freeze** | Instantly invalidate all identity attestations | **Exposed** — mobile app → registry | +| **Git Allowed Signers** | Generate SSH `allowed_signers` files from attestations | **Partially exposed** — CLI + GitHub Action only; no web UI | +| **Policy Engine** | Capability-based authorization (sign_commit, deploy:staging, etc.) | **Partially exposed** — used internally by SDK/MCP; org policy visible on web | +| **Agent Delegation** | Scoped, time-bounded delegation to AI agents | **Not exposed to frontend** — demonstrated in agent-demo only; SDK supports it | +| **IdP Binding** | Bind KERI identities to corporate IdPs (Okta, Google, Entra, SAML) | **Not exposed to frontend** — cloud-cli only; no web UI | +| **Diagnostics** | System health checks (keychains, Git, crypto) | **Not exposed to frontend** — CLI only | +| **Audit Events** | Structured audit event emission | **Not exposed to frontend** — internal to SDK workflows | +| **Cache Tiering** | Redis (Tier 0) + Git (Tier 1) identity cache | **Not exposed** — internal infrastructure | +| **Webhook Processing** | GitHub push/PR event handling | **Not exposed to frontend** — GitHub App (backend-only, partially implemented) | +| **MCP Tool Execution** | JWT-gated tool execution (file I/O, deploy) | **Not exposed to frontend** — MCP server only; mock tools | +| **Witness Receipting** | KERI event witnessing with first-seen enforcement | **Not exposed to frontend** — witness server (backend infrastructure) | +| **Log Monitoring** | Periodic transparency log integrity verification | **Not exposed** — background service | +| **Push Notifications** | Mobile push notification registration | **Partially exposed** — mobile app registers; no visible notification UI yet | + +--- + +## 8 Broken Pipelines + +### Frontend Exists → Backend Missing + +| Frontend | Expected Backend | Status | +|----------|-----------------|--------| +| `auths-mobile` calls `GET /mobile/identity` | No `/mobile/identity` endpoint found in registry-server routes | **Missing** — mobile-specific API routes not defined in registry-server | +| `auths-mobile` calls `GET /mobile/devices` | No `/mobile/devices` endpoint found in registry-server routes | **Missing** — mobile device management API not defined | +| `auths-mobile` calls `POST /mobile/pair/initiate` | No `/mobile/pair/initiate` endpoint in registry-server | **Missing** — mobile pairing initiation route missing from cloud | +| `auths-mobile` calls `POST /mobile/pair/complete` | No `/mobile/pair/complete` endpoint in registry-server | **Missing** — mobile pairing completion route missing | +| `auths-mobile` calls `POST /mobile/emergency/freeze` | No `/mobile/emergency/freeze` endpoint in registry-server | **Missing** — emergency freeze route not in registry | +| `auths-mobile` calls `POST /mobile/notifications/register` | No notification registration endpoint in registry-server | **Missing** — push notification backend not implemented | +| `auths-chat` (iOS/macOS) calls chat-server endpoints | `auths-chat-server` exists but may not be deployed | **Uncertain** — chat server crate exists but no fly.toml/deployment config for it | +| `auths-site` Org invite flow (`/join/[code]`) | Invite acceptance API endpoint | **Uncertain** — invite creation exists; acceptance flow may be incomplete | + +### Backend Exists → API Missing + +| Backend Capability | Expected API | Status | +|-------------------|--------------|--------| +| `auths-sdk` agent delegation workflows | No REST API for delegation management | **Missing** — only usable via SDK/CLI, no HTTP API | +| `auths-sdk` audit event workflows | No REST API for audit log retrieval | **Missing** — events emitted internally but not queryable via API | +| `auths-sdk` diagnostics | No REST API for system diagnostics | **Missing** — CLI-only | +| `auths-sdk` policy_diff analysis | No REST API for policy diff | **Missing** — SDK function only | +| `auths-cloud-sdk` IdP binding | No REST API for IdP management | **Missing** — cloud-cli only | +| `auths-sdk` allowed_signers generation | No REST API endpoint | **Missing** — CLI-only | +| `auths-core` keychain operations | No REST API for remote keychain management | **By design** — keychains are local-only | + +### API Exists → Frontend Unused + +| API Endpoint | Available In | Frontend Usage | +|-------------|-------------|----------------| +| `POST /v1/verify` (registry-server) | Registry API | **Not called from any frontend** — verification done client-side via WASM or CLI | +| `DELETE /v1/pairing/sessions/:id` (registry-server) | Registry API | **Not called from any frontend** — no cancel-pairing UI | +| `POST /witness/{prefix}/event` (witness server) | Witness API | **Not called from any frontend** — backend infrastructure only | +| `GET /witness/{prefix}/head` (witness server) | Witness API | **Not called from any frontend** — backend infrastructure only | +| `GET /witness/{prefix}/receipt/{said}` (witness server) | Witness API | **Not called from any frontend** — backend infrastructure only | +| MCP tool endpoints (`/mcp/tools/*`) | MCP server | **Not called from any frontend** — designed for machine-to-machine use | +| SCIM endpoints | SCIM server | **Not called from any frontend** — designed for enterprise IdP integration | +| `POST /v1/identities/:prefix/kel` (registry) | Registry API | **Only called from mobile/CLI** — no web UI for identity creation | + +### Cross-Project Wiring Gaps + +| Gap | Description | +|-----|-------------| +| GitHub App → Registry verification | `auths-github-app` receives webhooks but commit verification against registry is TODO | +| GitHub App → Check Runs | GitHub API client in `src/github.rs` is a stub; no check runs are posted | +| Chat server deployment | `auths-chat-server` has no deployment config (Dockerfile/fly.toml), unlike registry and auth servers | +| SCIM → Agent provisioning pipeline | SCIM server provisions agents in PostgreSQL, but no pipeline connects provisioned agents to the OIDC bridge or MCP server | +| Monitor alerts | `auths-monitor` verifies log integrity but has no alerting/notification output (logs only) | +| Push notifications | Mobile app registers for notifications, but no notification dispatch service exists | diff --git a/docs/plans/wiring/architecture_prompt.md b/docs/plans/wiring/architecture_prompt.md new file mode 100644 index 00000000..341c98d4 --- /dev/null +++ b/docs/plans/wiring/architecture_prompt.md @@ -0,0 +1,129 @@ +You are a principal software architect analyzing an unfamiliar full-stack codebase. + +Your task is to reverse engineer the architecture and system structure. + +Do not suggest improvements yet. + +Your goal is only to map the system. + +The codebase includes: + • frontend + • backend + • database + • APIs connecting them + +⸻ + +Step 1 — Identify the System Structure + +Determine: + • frontend framework + • backend framework + • API structure + • database models + • major product features + +Summarize the product and architecture. + +⸻ + +Step 2 — Build Dependency Maps + +Construct the following maps. + +Frontend → API + +Component Endpoint Method Feature + + + +⸻ + +API → Backend + +Endpoint Controller Service Logic + + + +⸻ + +Backend → Database + +Service Tables Queries Data Returned + + + +⸻ + +Feature Pipelines + +Trace full product pipelines: + +UI Component +→ API Endpoint +→ Controller +→ Service +→ Database +→ Response +→ UI State + + +⸻ + +Step 3 — Identify Dead Code + +Detect: + • unused endpoints + • unused services + • unused models + • unused frontend components + +⸻ + +Step 4 — Identify Product Capabilities + +List all backend capabilities that exist, such as: + • analytics + • repository metadata + • search + • notifications + • history + +Determine whether each capability is: + +exposed to frontend +partially exposed +not exposed + + +⸻ + +Step 5 — Identify Broken Pipelines + +Find areas where: + +frontend exists → backend missing +backend exists → API missing +API exists → frontend unused + + +⸻ + +Output Format + +Produce: + +1 Architecture Summary +2 Frontend → API Map +3 API → Backend Map +4 Backend → Database Map +5 Feature Pipelines +6 Dead Code List +7 Backend Capabilities Inventory +8 Broken Pipelines + +Do not propose solutions yet. + +Write your output to: +/Users/bordumb/workspace/repositories/auths-base/auths/docs/plans/wiring +filename: architecture_output.md diff --git a/docs/plans/wiring/fix_auth_pattern_prompt.md b/docs/plans/wiring/fix_auth_pattern_prompt.md new file mode 100644 index 00000000..4861e429 --- /dev/null +++ b/docs/plans/wiring/fix_auth_pattern_prompt.md @@ -0,0 +1,288 @@ +# Prompt: Replace Bearer Token Auth with DID Signature Auth + +## Context + +You are working on **Auths**, a decentralized identity system where cryptographic identity IS the credential. The fundamental principle is: **users authenticate by signing with their Ed25519 private key, and servers verify signatures against the user's DID-resolved public key.** There are no API keys, no session tokens, no OAuth tokens. + +The current frontend violates this principle. After a challenge-response flow, the auth server returns a UUID session token, and the frontend passes it as `Authorization: Bearer ` on every subsequent request. The registry server then makes a roundtrip to the auth server to validate that session — a centralized session pattern layered on top of a system designed to eliminate it. + +**The registry server already supports the correct pattern.** Its `identity_auth` middleware has a fallback path that accepts a signed `BearerPayload` (`{ did, timestamp, signature }`) and validates the Ed25519 signature directly — no auth server roundtrip. The frontend simply doesn't use this path. + +## The Problem + +### Current flow (wrong): +``` +1. Frontend calls POST /auth/init → gets { id, nonce, domain } +2. User runs CLI: auths auth challenge --nonce ... --domain ... --json +3. User pastes JSON { signature, public_key, did } into frontend +4. Frontend calls POST /auth/verify → gets { token: "", did, expires_at } +5. Frontend stores { token, did, expiresAt } in React context (memory only) +6. Every authenticated request sends: Authorization: Bearer +7. Registry server receives Bearer , parses as UUID +8. Registry server calls auth-server GET /auth/status/ to validate session +9. Auth server looks up session in DB, returns { status: "verified", did } +10. Registry server extracts DID, continues +``` + +Problems: +- Step 5: Token stored in memory only — page reload = logged out +- Steps 7-9: Every authenticated request requires a roundtrip from registry → auth server +- The UUID token IS a session — this is traditional session auth, not decentralized identity +- The token parameter propagates through every frontend API function + +### Correct flow (DID signature auth): +``` +1. User's Ed25519 keypair lives in browser (WebCrypto + IndexedDB) +2. For each authenticated request, frontend signs: "{did}\n{iso8601_timestamp}" with private key +3. Frontend sends: Authorization: Bearer {"did":"...","timestamp":"...","signature":"..."} +4. Registry server parses BearerPayload, verifies Ed25519 signature against DID's public key +5. No roundtrip to auth server. No session state. No expiry dance. +``` + +The registry server's `validate_signed_challenge()` already implements step 4. + +## Repositories + +- **Frontend**: `/Users/bordumb/workspace/repositories/auths-base/auths-site` (Next.js 16, React 19, TypeScript) +- **Backend (cloud)**: `/Users/bordumb/workspace/repositories/auths-base/auths-cloud` (Rust, Axum) +- **Core logic**: `/Users/bordumb/workspace/repositories/auths-base/auths` + +## Current Code + +### Frontend: Auth Context + +**File:** `auths-site/apps/web/src/lib/auth/auth-context.tsx` + +```typescript +interface AuthState { + token: string; // ← UUID session token (wrong) + did: string; + expiresAt: string; +} + +interface AuthContextValue { + auth: AuthState | null; + setAuth: (state: AuthState) => void; + clearAuth: () => void; + isAuthenticated: boolean; +} +``` + +### Frontend: API Client (authenticated requests) + +**File:** `auths-site/apps/web/src/lib/api/registry.ts` + +```typescript +async function registryFetchAuth( + path: string, + options: { + method?: string; + token?: string; // ← token param + body?: Record; + params?: Record; + signal?: AbortSignal; + } = {}, +): Promise { + const url = new URL(path, REGISTRY_BASE_URL); + const headers: Record = { Accept: 'application/json' }; + if (options.token) headers.Authorization = `Bearer ${options.token}`; // ← Bearer UUID + if (options.body) headers['Content-Type'] = 'application/json'; + const res = await fetch(url.toString(), { ... }); + return res.json() as Promise; +} + +// Every authenticated function takes token as parameter: +export async function createOrg(name: string, token: string, signal?: AbortSignal) { ... } +export async function fetchOrgStatus(orgDid: string, token: string, signal?: AbortSignal) { ... } +export async function createInvite(orgDid: string, role: string, expiresIn: string, token: string, signal?: AbortSignal) { ... } +export async function setOrgPolicy(orgDid: string, requireSigning: boolean, token: string, signal?: AbortSignal) { ... } +``` + +### Frontend: Challenge-Response Component + +**File:** `auths-site/apps/web/src/components/challenge-auth.tsx` + +After the user pastes CLI output and verification succeeds: +```typescript +const res = await verifyChallenge(sessionId, { + signature: data.signature, + public_key: data.public_key, + did: data.did, +}); +setAuth({ token: res.token, did: res.did, expiresAt: res.expires_at }); +``` + +### Backend: Auth Server Verify Endpoint + +**File:** `auths-cloud/crates/auths-auth-server/src/routes/verify.rs` + +```rust +#[derive(Debug, Serialize)] +pub struct VerifyResponse { + pub verified: bool, + pub token: String, // ← Session UUID returned as "token" + pub did: String, + pub expires_at: String, +} +``` + +### Backend: Registry Server Auth Middleware (already supports both paths) + +**File:** `auths-cloud/crates/auths-registry-server/src/middleware/identity_auth.rs` + +```rust +pub async fn auth_middleware(...) -> Result { + let token = request.headers().get("authorization") + .and_then(|v| v.to_str().ok()) + .and_then(|h| h.strip_prefix("Bearer ")); + + let identity = match token { + None => anonymous_identity(), + Some(t) => match Uuid::parse_str(t) { + // Path A: Session UUID → roundtrip to auth server (WRONG) + Ok(uuid) => validate_session_token(auth_url, uuid).await?, + Err(_) => match serde_json::from_str::(t) { + // Path B: Signed DID payload → direct verification (CORRECT) + Ok(payload) => validate_signed_challenge(&state, payload).await?, + Err(_) => anonymous_identity(), + }, + }, + }; + request.extensions_mut().insert(identity); + Ok(next.run(request).await) +} +``` + +The `BearerPayload` and `validate_signed_challenge` already exist and work. The frontend simply never sends a payload in this format. + +## Security Model: Where Keys Live + +**CRITICAL: The server NEVER stores or sees private keys.** + +All private key material lives exclusively on the user's device: + +| Data | Where it lives | Why | +|---|---|---| +| **Private key** | User's browser — **IndexedDB only** (via WebCrypto). Never sent to the server. Never stored in memory as a raw string. Use `extractable: false` for HSM-like protection where possible. | The server has no business holding private keys. Decentralized identity means the user controls their own key material. | +| **Public key** | Already on the server (in the registry as part of the DID). Also stored in browser `localStorage` for convenience. | Public keys are not secrets — they're published by design. | +| **DID identifier** | Already on the server (registry). Also in browser `localStorage`. | DIDs are public identifiers, like email addresses. | + +The signing flow is entirely client-side: +``` +User's browser (IndexedDB private key) + → WebCrypto signs "{did}\n{timestamp}" + → sends only the signature + DID + timestamp to server + → server resolves DID → gets public key from registry + → server verifies signature + → server NEVER receives the private key +``` + +This is the same model as SSH key auth, GPG signing, or hardware security keys — the private key never leaves the device. + +## What To Change + +### 1. Replace AuthState with DID identity state + +**File:** `auths-site/apps/web/src/lib/auth/auth-context.tsx` + +Replace the `token`-based state with a DID + public key reference: + +```typescript +interface AuthState { + did: string; + publicKeyHex: string; + // Private key is in IndexedDB on the user's device — NOT here, NOT on the server +} +``` + +The auth context should: +- Store DID + public key reference (not a session token). These are both public information — safe for `localStorage`. +- Persist to `localStorage` so users stay logged in across page reloads +- Provide a `signRequest(data: string): Promise` function that retrieves the private key **from the user's local IndexedDB** and signs via WebCrypto +- Load persisted identity on initialization + +### 2. Add WebCrypto signing utility (client-side only) + +**File:** `auths-site/apps/web/src/lib/auth/signing.ts` (new) + +Create a utility module for **client-side-only** key operations. None of these functions communicate with the server: + +- `generateKeypair()`: Ed25519 via `crypto.subtle.generateKey()` — key is generated in the browser +- `storePrivateKey(did, key)`: Save CryptoKey to **the user's local IndexedDB** (not the server) +- `loadPrivateKey(did)`: Retrieve CryptoKey from **the user's local IndexedDB** +- `signData(did, data)`: Load key from local IndexedDB + sign + return hex. The signature (not the key) is what gets sent to the server. +- `exportPublicKeyHex(publicKey)`: Export raw Ed25519 public key as hex + +IndexedDB is a browser-local database — it lives on the user's machine, like cookies or localStorage, but supports storing non-extractable CryptoKey objects that can't even be read by JavaScript. + +### 3. Replace registryFetchAuth token pattern with per-request signing + +**File:** `auths-site/apps/web/src/lib/api/registry.ts` + +Replace the `token` parameter on every function with automatic per-request signing. Each request gets a fresh signature (signed client-side, verified server-side): + +```typescript +async function registryFetchAuth(path: string, options: { ... }): Promise { + const { auth, signRequest } = getAuthContext(); + if (auth) { + const timestamp = new Date().toISOString(); + // signRequest uses WebCrypto locally — private key never leaves the browser + const signature = await signRequest(`${auth.did}\n${timestamp}`); + const payload = JSON.stringify({ did: auth.did, timestamp, signature }); + headers.Authorization = `Bearer ${payload}`; + } + // ... rest unchanged +} +``` + +Remove the `token` parameter from every function that currently accepts it: +- `createOrg(name, token, signal)` → `createOrg(name, signal)` +- `fetchOrgStatus(orgDid, token, signal)` → `fetchOrgStatus(orgDid, signal)` +- `createInvite(orgDid, role, expiresIn, token, signal)` → `createInvite(orgDid, role, expiresIn, signal)` +- `setOrgPolicy(orgDid, requireSigning, token, signal)` → `setOrgPolicy(orgDid, requireSigning, signal)` +- All other `*Auth*` functions + +### 4. Update challenge-auth flow to store keypair locally, not server token + +**File:** `auths-site/apps/web/src/components/challenge-auth.tsx` + +The challenge-response flow currently proves the user controls a DID. After verification: + +- **Current**: stores `{ token: uuid, did, expiresAt }` — the proof is a server-side session token +- **New**: stores `{ did, publicKeyHex }` in localStorage and the user's signing key in their local IndexedDB — the proof is the key itself, held on their device + +Two onboarding paths: +1. **CLI users** (existing flow): Complete challenge-response, then the frontend needs the user's key in the browser. Options: (a) generate a browser-specific device key in the browser and have the CLI attest it, or (b) export the key from CLI and import to browser IndexedDB. +2. **Web-native users** (new flow): Generate Ed25519 keypair via WebCrypto in the browser, register the public key with `POST /v1/identities`, store private key in the user's local IndexedDB. The private key never touches the server. + +### 5. Update all consuming components + +Every component that currently reads `auth.token` and passes it to API functions needs updating. Search for all occurrences of: +- `auth?.token` +- `auth.token` +- `token: string` in function signatures that come from auth context +- `useAuth()` destructuring that reads `token` + +## What NOT To Change + +- **Backend registry server middleware**: `identity_auth.rs` already supports `BearerPayload` signature validation. No changes needed. The server already knows how to verify signatures against DID-resolved public keys. +- **Backend auth server**: Keep it functional for the CLI challenge-response proof flow. The auth server still serves a purpose — it's how users prove DID ownership during onboarding. But its session token is no longer used for subsequent API requests. +- **Public (unauthenticated) API functions**: `registryFetch()` (without Auth) stays the same. +- **React Query hooks**: The hooks themselves don't change shape — only the underlying fetch functions lose their `token` parameter. + +## Constraints + +- Ed25519 WebCrypto support: Available in all modern browsers (Chrome 113+, Firefox 127+, Safari 17+). Use feature detection with a fallback message. +- IndexedDB for private key storage: This is a standard browser API for client-side storage (like localStorage but supports CryptoKey objects). WebCrypto's `extractable: false` option means the private key can be used for signing but cannot be exported or read — even by the page's own JavaScript. +- Clock skew: The `timestamp` in `BearerPayload` will be validated server-side. Ensure the server allows reasonable skew (e.g., ±5 minutes). +- The `validate_signed_challenge` function in the registry server may need review to confirm it handles the exact `BearerPayload` format. Read it carefully before assuming it works as-is. + +## Definition of Done + +1. No function in `registry.ts` accepts a `token: string` parameter for auth purposes +2. `AuthState` contains `{ did, publicKeyHex }` — no `token` field +3. Auth state persists across page reloads (`localStorage` for DID + publicKey, browser-local IndexedDB for private key) +4. Every authenticated request sends `Authorization: Bearer ` — not a UUID +5. The auth server session token is no longer stored or used after the initial challenge-response proof +6. All existing authenticated features (org creation, invites, policy management) continue to work +7. **The server never receives, stores, or has access to any user's private key** diff --git a/docs/plans/wiring/standalone_auth_test_prompt.md b/docs/plans/wiring/standalone_auth_test_prompt.md new file mode 100644 index 00000000..8beb2c98 --- /dev/null +++ b/docs/plans/wiring/standalone_auth_test_prompt.md @@ -0,0 +1,197 @@ +# Prompt: Build Standalone Auth Test Frontend + +## Goal + +Build a minimal standalone web app that proves the DID signature auth flow works end-to-end against the real registry server. This app exists to validate the `BearerPayload` signing pattern before porting it to the main `auths-site` frontend. + +This is a **test harness**, not a product. Keep it as simple as possible. + +## Context + +The Auths registry server (`auths-registry-server`) has an auth middleware that accepts two formats in the `Authorization: Bearer` header: + +1. **Session UUID** (current, being replaced): `Bearer 550e8400-e29b-41d4-a716-446655440000` +2. **Signed DID payload** (target): `Bearer {"did":"did:keri:...","timestamp":"2026-03-16T12:00:00Z","signature":"ab12cd34..."}` + +Path 2 is already implemented in the middleware but has never been tested from a browser. This standalone app tests path 2. + +### Key backend file to understand + +**`auths-cloud/crates/auths-registry-server/src/middleware/identity_auth.rs`** + +The middleware does this: +```rust +let token = request.headers().get("authorization") + .and_then(|v| v.to_str().ok()) + .and_then(|h| h.strip_prefix("Bearer ")); + +let identity = match token { + None => anonymous_identity(), + Some(t) => match Uuid::parse_str(t) { + Ok(uuid) => validate_session_token(auth_url, uuid).await?, + Err(_) => match serde_json::from_str::(t) { + Ok(payload) => validate_signed_challenge(&state, payload).await?, + Err(_) => anonymous_identity(), + }, + }, +}; +``` + +When the Bearer value is NOT a UUID, it tries to parse as `BearerPayload` and calls `validate_signed_challenge()`. **You must read `identity_auth.rs` to understand the exact `BearerPayload` struct and what `validate_signed_challenge` expects** — field names, signature format (hex? base64?), what message is signed, timestamp validation window, etc. Do not assume — read the code. + +### Key backend file for identity registration + +**`auths-cloud/crates/auths-registry-server/src/routes/identity.rs`** + +The `POST /v1/identities` endpoint is public and self-authenticating (no auth required to register). Read it to understand the exact request body shape for registering a new identity with a public key. + +## Repositories + +- **Backend (cloud)**: `/Users/bordumb/workspace/repositories/auths-base/auths-cloud` +- **Core logic**: `/Users/bordumb/workspace/repositories/auths-base/auths` +- **Standalone app**: Create at `/Users/bordumb/workspace/repositories/auths-base/auths-auth-test` + +## What To Build + +A single-page web app with **4 steps**, displayed vertically on one page. Each step shows its output inline. No routing, no state management library, no build complexity. + +### Tech stack + +- **Vite + vanilla TypeScript** (no React, no framework) +- Single `index.html` + `src/main.ts` + `src/crypto.ts` + `src/api.ts` +- Minimal CSS — just enough to be readable + +### Step 1: Generate Keypair + +A button that: +1. Generates an Ed25519 keypair via `crypto.subtle.generateKey('Ed25519', ...)` +2. Stores the private key (CryptoKey object) in IndexedDB — use `extractable: false` +3. Exports the public key as raw bytes → hex string +4. Displays the public key hex on screen +5. Stores `{ publicKeyHex }` in a module-level variable for subsequent steps + +**Display:** +``` +[Generate Keypair] + +Public Key: 3b6a27bceeb6a0... (64 hex chars) +Stored in: IndexedDB ✓ +``` + +### Step 2: Register Identity + +A button that: +1. Calls `POST /v1/identities` on the registry server with the public key from Step 1 +2. Displays the returned DID +3. Stores the DID in the module-level variable + +**Important:** Read `identity.rs` in the registry server to determine the exact request body format. It may require a KERI inception event, not just a raw public key. If so, read the core `auths-id` crate to understand how inception events are constructed, and build the minimum viable request. If building a full KERI inception event is too complex for a test harness, document what you found and use whichever simpler registration path exists. + +**Display:** +``` +[Register Identity] + +DID: did:keri:EBf1... +Status: 201 Created ✓ +``` + +### Step 3: Make Authenticated Request + +A button that: +1. Constructs the message to sign (read `identity_auth.rs` to find the exact format — likely `"{did}\n{timestamp}"` but verify) +2. Retrieves the private key from IndexedDB +3. Signs the message via `crypto.subtle.sign('Ed25519', privateKey, messageBytes)` +4. Constructs the `BearerPayload` JSON (match the exact struct from `identity_auth.rs`) +5. Sends a GET request to an authenticated endpoint with `Authorization: Bearer ` +6. Displays the full request headers sent and the response + +Use an endpoint that behaves differently for authenticated vs anonymous users. Good candidates: +- `GET /v1/account/status` (if it exists — returns identity info for authenticated users) +- `GET /v1/orgs` or any endpoint that returns user-specific data +- If no good candidate exists, use `GET /v1/health` and check that the server at least doesn't reject the auth header + +**Display:** +``` +[Make Authenticated Request] + +Request: + GET /v1/account/status + Authorization: Bearer {"did":"did:keri:EBf1...","timestamp":"2026-03-16T...","signature":"a1b2c3..."} + +Response: + Status: 200 OK + Body: { "did": "did:keri:EBf1...", "tier": "session", ... } +``` + +### Step 4: Verify Anonymous vs Authenticated + +Two buttons side by side that call the same endpoint — one without auth, one with. Displays both responses so you can visually confirm the auth is working. + +**Display:** +``` +[Request Without Auth] [Request With Auth] + +Status: 200 Status: 200 +Body: { anonymous: true } Body: { did: "did:keri:..." } +``` + +### Debug Panel + +At the bottom of the page, a persistent log that shows: +- Every IndexedDB operation (store/retrieve) +- Every `crypto.subtle` call and its result +- Every HTTP request/response with full headers +- Any errors with stack traces + +This is the most important part of the UI. The four steps above are just buttons — the debug panel is where you'll actually diagnose issues. + +## File Structure + +``` +auths-auth-test/ +├── index.html +├── package.json (vite + typescript only) +├── tsconfig.json +├── vite.config.ts +└── src/ + ├── main.ts (UI wiring — buttons, display, debug log) + ├── crypto.ts (WebCrypto + IndexedDB operations) + ├── api.ts (fetch calls to registry server) + └── style.css (minimal) +``` + +## Configuration + +The registry server URL should be configurable at the top of `api.ts`: + +```typescript +const REGISTRY_URL = 'http://localhost:3000'; +``` + +The app should handle CORS. The registry server has CORS enabled when `AUTHS_CORS=1` is set. + +## Before You Write Code + +1. **Read `identity_auth.rs`** — find the `BearerPayload` struct definition, the `validate_signed_challenge` function, and understand exactly what it validates (signature format, message format, timestamp window, DID resolution path). +2. **Read the identity registration route** — find the `POST /v1/identities` handler and understand the request body. +3. **Read the `BearerPayload` struct** — find every field. The struct may have fields beyond `did`, `timestamp`, `signature`. Match it exactly. +4. **Check the signature format** — is it hex-encoded? base64? raw bytes? The registry middleware will reject the request if the encoding is wrong. +5. **Check what message is signed** — is it `"{did}\n{timestamp}"` or something else? A single wrong byte means signature verification fails. + +If you find that the `BearerPayload` / `validate_signed_challenge` code doesn't match what's described in this prompt, **trust the code, not this prompt**. This prompt is based on an exploration of the codebase — the code is the source of truth. + +## Definition of Done + +1. `npm run dev` serves the app on localhost +2. Clicking through Steps 1-4 in order produces a successful authenticated request against a locally running registry server +3. The debug panel shows the exact `Authorization` header sent and confirms the server accepted it (not falling back to anonymous) +4. If anything fails (wrong payload format, signature mismatch, etc.), the debug panel shows enough detail to diagnose why + +## What NOT To Do + +- No React, no Next.js, no framework — vanilla TypeScript only +- No CSS framework — raw CSS, just enough to not be ugly +- No state management — module-level variables are fine for a test harness +- No tests — this IS the test +- No production concerns (error boundaries, loading states, accessibility) — this is a throwaway diagnostic tool +- Do not modify the backend — the point is to test the backend as-is diff --git a/docs/plans/wiring/wiring_output.md b/docs/plans/wiring/wiring_output.md new file mode 100644 index 00000000..dbcf80ff --- /dev/null +++ b/docs/plans/wiring/wiring_output.md @@ -0,0 +1,1326 @@ +# Wiring Output — Product-Driven Engineering Improvements + +--- + +## Step 1 — Product Experience Analysis + +### Are backend capabilities visible to users? + +**Fully Exposed (9 capabilities):** +- Identity resolution, KEL management, attestation verification, device management +- Org membership, challenge-response auth, encrypted chat, artifact search, network stats + +**Partially Exposed (4 capabilities):** + +| Capability | What's Missing | +|---|---| +| Transparency log | Activity feed exists on web, but no inclusion proofs, no checkpoint viewer, no consistency verification UI | +| Policy engine | Org policy is display-only JSON on web — no lint, compile, explain, test, or diff tools | +| Org analytics | Backend has `/v1/orgs/{orgDid}/analytics` with signing coverage, member adoption, key health — **frontend never calls it** | +| Trust tier | Server computes `server_trust_tier` + `server_trust_score` but frontend falls back to client-side computation | + +**Not Exposed (8 capabilities):** + +| Capability | Backend Location | Impact | +|---|---|---| +| Agent delegation | `auths-sdk::workflows::org` + `auths-agent-demo` | Core differentiator for AI-age identity — invisible to web users | +| Audit/compliance reports | `auths-sdk::workflows::audit::AuditWorkflow` | CLI-only; orgs can't see signing compliance on web | +| System diagnostics | `auths-sdk::workflows::diagnostics::DiagnosticsWorkflow` | No self-service health checks | +| Policy diff + risk scoring | `auths-sdk::workflows::policy_diff` | Policy changes are blind — no risk preview | +| Allowed signers management | `auths-sdk::workflows::allowed_signers` | SSH integration invisible to web | +| IdP binding (Okta/Entra/Google/SAML) | `auths-cloud::auths-idp` | Enterprise onboarding has no web flow | +| Key rotation | `auths-sdk::workflows::rotation` | Critical security operation — CLI-only | +| Billing/subscription (Stripe) | Registry server `/v1/billing/*` | Backend wired but no frontend pages | + +### Are APIs optimized for UI needs? + +**No — several gaps:** + +1. **Auth state not persisted.** `auth-context.tsx` stores tokens in React state only — page reload = logged out. No `localStorage`, no refresh token, no session cookie. + +2. **Org members inferred from audit feed.** `org-client.tsx` extracts member DIDs from activity feed `org_add_member` entries. The backend has a proper `GET /v1/orgs/{orgDid}/members` endpoint with role, capabilities, and pagination — but the frontend doesn't use it. + +3. **No batch verification endpoint used.** Frontend verifies artifacts via WASM one-at-a-time. Backend has `POST /v1/attestations/verify` but frontend never calls it. + +4. **No identity creation on web.** `POST /v1/identities` exists and is public/self-authenticating. The `/try/individual` flow tells users to install the CLI. A web-based DID creation flow is feasible. + +5. **Analytics endpoints built but unused.** Backend serves signing coverage, member adoption, and key health at `/v1/orgs/{orgDid}/analytics` — no frontend page consumes this. + +### Are there product features blocked by architecture? + +1. **Web-based identity creation** — Not architecturally blocked (API exists), but frontend doesn't implement it. Requires WebCrypto Ed25519 key generation client-side. +2. **Agent delegation dashboard** — SDK workflows exist but no REST API wraps them. Needs new registry endpoints. +3. **Policy playground** — `auths-policy` crate has `compile()`, `evaluate()`, `enforce()` — needs REST endpoints and a web editor. +4. **Mobile app** — 6 `/mobile/*` endpoints referenced by mobile clients don't exist in the registry server. + +--- + +## Step 2 — Improvement Opportunities + +### A. Backend Capabilities Not Surfaced + +| # | Capability | SDK Function | Effort | +|---|---|---|---| +| A1 | Org analytics dashboard | `GET /v1/orgs/{orgDid}/analytics` (exists) | Low — frontend only | +| A2 | Org members direct listing | `GET /v1/orgs/{orgDid}/members` (exists) | Low — frontend only | +| A3 | Agent delegation management | `sdk::workflows::org::add_organization_member()` | Medium — needs REST + UI | +| A4 | Audit compliance reports | `sdk::workflows::audit::AuditWorkflow::generate_report()` | Medium — needs REST + UI | +| A5 | Policy playground | `auths_policy::compile()`, `evaluate3()`, `enforce()` | Medium — needs REST + UI | +| A6 | Billing/subscription pages | `/v1/billing/checkout`, `/billing/portal`, `/billing/info` | Low — frontend only | + +### B. Missing Feature Pipelines + +| # | Pipeline | Current State | Fix | +|---|---|---|---| +| B1 | Mobile API | Frontend references `/mobile/*` — endpoints don't exist | Add mobile routes to registry server | +| B2 | GitHub App verification | Webhook handlers log events but verification is TODO | Wire `auths-verifier` into webhook handler | +| B3 | Web identity creation | `/try/individual` points to CLI install | Add WebCrypto key gen + `POST /v1/identities` | +| B4 | SCIM → OIDC bridge | SCIM provisions agents but doesn't connect to OIDC | Wire provisioned agent capabilities into token claims | +| B5 | Invite acceptance flow | `POST /v1/invites/{code}/accept` exists but `/join/[code]` page may not call it | Verify and wire frontend | + +### C. Inefficient APIs / Frontend Patterns + +| # | Problem | Fix | +|---|---|---| +| C1 | Auth tokens not persisted across page reloads | Add `localStorage` persistence with expiry checks | +| C2 | Org members scraped from audit feed | Use `GET /v1/orgs/{orgDid}/members` directly | +| C3 | Client-side trust tier when server computes it | Ensure backend always returns `server_trust_tier` | +| C4 | No error retry/recovery | Add React Query `retry` configuration | + +### D. Dead Code + +| # | Item | Action | +|---|---|---| +| D1 | `auths-github-app` stub handlers | Either implement or remove | +| D2 | MCP `deploy`/`read_file`/`write_file` mock tools | Either implement or remove | +| D3 | `auths-agent-demo` disconnected simulation | Convert to integration test or remove | +| D4 | `transparency-placeholder.tsx` | Remove | +| D5 | Deleted subprojects (auths-legacy, auths-releases, auths-verify-action) | Clean git state | +| D6 | 15 deleted roadmap markdown files | Clean git state | + +--- + +## Step 3 — Engineering Epics + +### Epic 1: Surface Org Analytics & Members on Web + +Expose the existing backend org analytics and member listing endpoints to the frontend. Zero backend work — purely frontend wiring. + +### Epic 2: Persist Auth State + +Fix the auth context so users stay logged in across page reloads. Add token refresh or at minimum `localStorage` persistence. + +### Epic 3: Agent Delegation Dashboard + +Build the REST API and web UI for managing AI agent delegations — the product's core differentiator for the AI era. + +### Epic 4: Policy Playground + +Expose the policy engine (lint, compile, evaluate, diff) via REST and build an interactive web editor. + +### Epic 5: Audit & Compliance Dashboard + +Surface commit signing audit reports on the web for organizations. + +### Epic 6: Billing & Subscription Pages + +Wire the existing Stripe billing endpoints to frontend pages for checkout, portal, and usage tracking. + +### Epic 7: Web-Based Identity Creation + +Allow users to create an identity directly from the browser using WebCrypto, removing the CLI installation requirement for first-time users. + +### Epic 8: Remove Dead Code & Stubs + +Clean up stub handlers, mock tools, disconnected demos, and deleted-but-tracked files. + +### Epic 9: Mobile API Endpoints + +Implement the `/mobile/*` API routes that the mobile clients reference but don't exist. + +--- + +## Step 4 — Implementation Tasks + +--- + +### Epic 1: Surface Org Analytics & Members on Web + +#### Task 1.1: Add org analytics API calls to registry client + +**Repository:** `auths-site` + +**Files:** `apps/web/src/lib/api/registry.ts` + +**Current code:** +```typescript +// No analytics fetch functions exist +export async function fetchOrgStatus( + orgDid: string, + token: string, + signal?: AbortSignal, +): Promise { + return registryFetchAuth( + `/v1/orgs/${encodeURIComponent(orgDid)}/status`, + token, + signal, + ); +} +``` + +**Improved code:** +```typescript +export interface AnalyticsSummary { + signing_coverage: { + total_commits_verified: number; + auths_signed: number; + gpg_signed: number; + ssh_signed: number; + unsigned: number; + coverage_percent: number; + }; + member_adoption: { + total_members: number; + auths_active: number; + active_signers: number; + adoption_percent: number; + }; + key_health: { + total_keys: number; + keys_due_for_rotation: number; + keys_expiring_soon: number; + keys_revoked: number; + }; + period: { start: string; end: string; days: number }; +} + +export async function fetchOrgAnalytics( + orgDid: string, + token: string, + signal?: AbortSignal, +): Promise { + return registryFetchAuth( + `/v1/orgs/${encodeURIComponent(orgDid)}/analytics`, + token, + signal, + ); +} + +export interface MemberResponse { + member_did: string; + role: string | null; + capabilities: string[]; + issuer: string; + revoked_at: string | null; + expires_at: string | null; + added_at: string | null; +} + +export async function fetchOrgMembers( + orgDid: string, + token: string, + params?: { role?: string; include_revoked?: boolean; limit?: number; after?: string }, + signal?: AbortSignal, +): Promise { + return registryFetchAuth( + `/v1/orgs/${encodeURIComponent(orgDid)}/members`, + token, + signal, + params as Record, + ); +} +``` + +**Explanation:** The backend already serves org analytics at `GET /v1/orgs/{orgDid}/analytics` with signing coverage, member adoption, and key health. The backend also has `GET /v1/orgs/{orgDid}/members` with role/capability/pagination support. Adding these fetch functions unlocks the data for the frontend. + +--- + +#### Task 1.2: Add React Query hooks for org analytics and members + +**Repository:** `auths-site` + +**Files:** `apps/web/src/lib/queries/registry.ts` + +**Current code:** +```typescript +// Only orgPolicy and orgStatus hooks exist +orgPolicy: (orgDid: string) => [...registryKeys.all, 'orgPolicy', orgDid] as const, +orgStatus: (orgDid: string) => [...registryKeys.all, 'orgStatus', orgDid] as const, +``` + +**Improved code:** +```typescript +orgPolicy: (orgDid: string) => [...registryKeys.all, 'orgPolicy', orgDid] as const, +orgStatus: (orgDid: string) => [...registryKeys.all, 'orgStatus', orgDid] as const, +orgAnalytics: (orgDid: string) => [...registryKeys.all, 'orgAnalytics', orgDid] as const, +orgMembers: (orgDid: string) => [...registryKeys.all, 'orgMembers', orgDid] as const, +``` + +And add hooks: + +```typescript +export function useOrgAnalytics(orgDid: string, token: string) { + return useQuery({ + queryKey: registryKeys.orgAnalytics(orgDid), + queryFn: ({ signal }) => fetchOrgAnalytics(orgDid, token, signal), + enabled: orgDid.length > 0 && token.length > 0, + staleTime: 300_000, + }); +} + +export function useOrgMembers(orgDid: string, token: string) { + return useQuery({ + queryKey: registryKeys.orgMembers(orgDid), + queryFn: ({ signal }) => fetchOrgMembers(orgDid, token, signal), + enabled: orgDid.length > 0 && token.length > 0, + staleTime: 120_000, + }); +} +``` + +**Explanation:** These hooks follow existing patterns (TanStack Query, staleTime, signal forwarding) and unlock org analytics and member data for components. + +--- + +#### Task 1.3: Replace audit-feed-inferred members with direct member listing + +**Repository:** `auths-site` + +**Files:** `apps/web/src/app/registry/org/[did]/org-client.tsx` + +**Current code:** +```typescript +function OrgMembers({ members }: { members: FeedEntry[] }) { + const [showAll, setShowAll] = useState(false); + const visible = showAll ? members : members.slice(0, INITIAL_CAP); + const hasMore = members.length > INITIAL_CAP; + + // Extracts member_did from activity feed entries + const memberDid = entry.metadata.member_did as string | undefined; +} +``` + +**Improved code:** +```typescript +function OrgMembers({ orgDid, token }: { orgDid: string; token: string }) { + const { data: members, isLoading } = useOrgMembers(orgDid, token); + const [showAll, setShowAll] = useState(false); + + if (isLoading) return ; + if (!members?.length) return ; + + const active = members.filter((m) => !m.revoked_at); + const visible = showAll ? active : active.slice(0, INITIAL_CAP); + + return ( +
+

Members ({active.length})

+ {visible.map((member) => ( + + ))} + {active.length > INITIAL_CAP && !showAll && ( + + )} +
+ ); +} +``` + +**Explanation:** Replaces the fragile pattern of inferring members from the audit activity feed with a direct call to `GET /v1/orgs/{orgDid}/members`. This gives us proper roles, capabilities, and revocation status — data that was lost when scraping from audit entries. + +--- + +#### Task 1.4: Add org analytics dashboard section to org page + +**Repository:** `auths-site` + +**Files:** `apps/web/src/app/registry/org/[did]/org-client.tsx` (new section) + +**Current code:** +```typescript +// No analytics section exists on org page +``` + +**Improved code:** +```typescript +function OrgAnalyticsDashboard({ orgDid, token }: { orgDid: string; token: string }) { + const { data, isLoading } = useOrgAnalytics(orgDid, token); + + if (isLoading) return ; + if (!data) return null; + + const { signing_coverage, member_adoption, key_health } = data; + + return ( +
+ + + 0} + /> +
+ ); +} +``` + +**Explanation:** The backend already computes signing coverage (what % of commits are signed), member adoption (what % of members use auths), and key health (rotation/expiry status). Surfacing this on the org page turns the product into a compliance dashboard — the primary value proposition for enterprise buyers. + +--- + +### Epic 2: Persist Auth State + +#### Task 2.1: Add localStorage persistence to auth context + +**Repository:** `auths-site` + +**Files:** `apps/web/src/lib/auth/auth-context.tsx` + +**Current code:** +```typescript +interface AuthState { + token: string; + did: string; + expiresAt: string; +} + +// In provider: +const [auth, setAuthState] = useState(null); +``` + +**Improved code:** +```typescript +const AUTH_STORAGE_KEY = 'auths_auth_state'; + +function loadPersistedAuth(): AuthState | null { + if (typeof window === 'undefined') return null; + const raw = localStorage.getItem(AUTH_STORAGE_KEY); + if (!raw) return null; + const parsed = JSON.parse(raw) as AuthState; + if (Date.now() >= Date.parse(parsed.expiresAt)) { + localStorage.removeItem(AUTH_STORAGE_KEY); + return null; + } + return parsed; +} + +// In provider: +const [auth, setAuthState] = useState(loadPersistedAuth); + +const setAuth = useCallback((state: AuthState) => { + setAuthState(state); + localStorage.setItem(AUTH_STORAGE_KEY, JSON.stringify(state)); +}, []); + +const clearAuth = useCallback(() => { + setAuthState(null); + localStorage.removeItem(AUTH_STORAGE_KEY); +}, []); +``` + +**Explanation:** Currently, every page reload logs the user out because auth state is held only in React state. This adds `localStorage` persistence with automatic expiry checking. The DID challenge-response flow requires the user to run a CLI command — losing that session on reload is a terrible UX. + +--- + +### Epic 3: Agent Delegation Dashboard + +#### Task 3.1: Add delegation REST endpoints to registry server + +**Repository:** `auths-cloud` + +**Files:** `crates/auths-registry-server/src/routes/delegation.rs` (new file) + +**Current code:** +``` +// No delegation endpoints exist +``` + +**Improved code:** +```rust +use axum::{extract::State, routing::{get, post, delete}, Json, Router}; +use crate::{middleware::identity_auth::AuthenticatedIdentity, state::ServerState, error::ApiError}; + +pub fn routes() -> Router { + Router::new() + .route("/", post(create_delegation)) + .route("/", get(list_delegations)) + .route("/{delegation_rid}", get(get_delegation)) + .route("/{delegation_rid}/revoke", post(revoke_delegation)) +} + +#[derive(Deserialize)] +pub struct CreateDelegationRequest { + pub delegate_did: String, + pub capabilities: Vec, + pub expires_in_seconds: Option, + pub delegate_type: DelegateType, // Agent | Human | Workload +} + +#[derive(Serialize)] +pub struct DelegationResponse { + pub rid: String, + pub issuer: String, + pub delegate_did: String, + pub delegate_type: String, + pub capabilities: Vec, + pub created_at: String, + pub expires_at: Option, + pub revoked_at: Option, + pub status: String, // active | expired | revoked +} + +async fn create_delegation( + State(state): State, + auth: AuthenticatedIdentity, + Json(req): Json, +) -> Result, ApiError> { + // Uses auths-sdk::workflows::org::add_organization_member() + // with delegate_type to distinguish agent/human/workload + todo!() +} + +async fn list_delegations( + State(state): State, + auth: AuthenticatedIdentity, +) -> Result>, ApiError> { + // Query delegations issued by auth.did + todo!() +} + +async fn revoke_delegation( + State(state): State, + auth: AuthenticatedIdentity, + Path(rid): Path, +) -> Result, ApiError> { + // Uses auths-sdk revocation workflow + todo!() +} +``` + +**Explanation:** Agent delegation is the product's core differentiator. The SDK has full support for scoped, time-bounded delegation to AI agents with capability-based authorization. But there's no REST API — it's only usable from the CLI. This endpoint enables the web dashboard. + +--- + +#### Task 3.2: Add delegation management page to frontend + +**Repository:** `auths-site` + +**Files:** `apps/web/src/app/registry/org/[did]/delegations/page.tsx` (new), `apps/web/src/app/registry/org/[did]/delegations/delegations-client.tsx` (new) + +**Current code:** +``` +// No delegation UI exists +``` + +**Improved code:** +```typescript +// delegations-client.tsx +'use client'; + +export function DelegationsClient({ orgDid }: { orgDid: string }) { + const { auth } = useAuth(); + const { data: delegations, isLoading } = useDelegations(orgDid, auth?.token ?? ''); + const [showCreate, setShowCreate] = useState(false); + + return ( +
+
+

Agent Delegations

+ +
+ +
+ {delegations?.map((d) => ( + revokeDelegation(d.rid)} + /> + ))} +
+ + {showCreate && ( + setShowCreate(false)} + /> + )} +
+ ); +} + +function DelegationCard({ delegation, onRevoke }: { + delegation: DelegationResponse; + onRevoke: () => void; +}) { + return ( +
+
+
+ {delegation.delegate_type} +

{delegation.delegate_did}

+
+ +
+
+ {delegation.capabilities.map((cap) => ( + {cap} + ))} +
+
+ Expires: {delegation.expires_at ?? 'never'} + {delegation.status === 'active' && ( + + )} +
+
+ ); +} + +function CreateDelegationModal({ orgDid, onClose }: { + orgDid: string; + onClose: () => void; +}) { + const [delegateDid, setDelegateDid] = useState(''); + const [capabilities, setCapabilities] = useState([]); + const [delegateType, setDelegateType] = useState<'agent' | 'human' | 'workload'>('agent'); + const [expiresIn, setExpiresIn] = useState('3600'); + + return ( + +

Create Delegation

+ + + setDelegateDid(e.target.value)} /> + + + + + + + + + setExpiresIn(e.target.value)} /> + +
+ + +
+
+ ); +} +``` + +**Explanation:** This page gives users a visual interface for managing scoped, time-bounded delegations to AI agents — the core product differentiator. Users can create delegations with specific capabilities (`deploy:staging`, `sign_commit`, etc.), set expiry, and revoke instantly. This is the feature that makes Auths unique in the AI era. + +--- + +### Epic 4: Policy Playground + +#### Task 4.1: Add policy evaluation REST endpoints + +**Repository:** `auths-cloud` + +**Files:** `crates/auths-registry-server/src/routes/policy.rs` (new file) + +**Current code:** +``` +// No policy evaluation endpoints exist (only org policy GET/SET) +``` + +**Improved code:** +```rust +pub fn routes() -> Router { + Router::new() + .route("/lint", post(lint_policy)) + .route("/compile", post(compile_policy)) + .route("/evaluate", post(evaluate_policy)) + .route("/diff", post(diff_policies)) +} + +#[derive(Deserialize)] +pub struct PolicyInput { + pub expression: serde_json::Value, // Policy Expr as JSON +} + +#[derive(Serialize)] +pub struct LintResult { + pub valid: bool, + pub errors: Vec, + pub warnings: Vec, +} + +#[derive(Serialize)] +pub struct EvalResult { + pub decision: String, // Allow | Deny | Abstain + pub matched_predicates: Vec, + pub unmatched_predicates: Vec, +} + +#[derive(Deserialize)] +pub struct DiffRequest { + pub old_policy: serde_json::Value, + pub new_policy: serde_json::Value, +} + +#[derive(Serialize)] +pub struct DiffResult { + pub changes: Vec, + pub overall_risk: String, // HIGH | MEDIUM | LOW +} + +#[derive(Serialize)] +pub struct PolicyChange { + pub kind: String, // added | removed | changed + pub description: String, + pub risk: String, +} + +async fn lint_policy( + Json(input): Json, +) -> Result, ApiError> { + // Uses auths_policy::compile() to validate syntax + // Returns structured errors if invalid + todo!() +} + +async fn evaluate_policy( + Json(input): Json, +) -> Result, ApiError> { + // Uses auths_policy::evaluate3() with provided context + todo!() +} + +async fn diff_policies( + Json(input): Json, +) -> Result, ApiError> { + // Uses auths-sdk::workflows::policy_diff::compute_policy_diff() + // + overall_risk_score() + todo!() +} +``` + +**Explanation:** The `auths-policy` crate is a full expression-based policy engine with compile, evaluate, and diff capabilities. Exposing these via REST enables a web-based policy playground where org admins can write policies, test them against sample attestations, and preview the risk of policy changes before deploying. + +--- + +#### Task 4.2: Add policy playground page to frontend + +**Repository:** `auths-site` + +**Files:** `apps/web/src/app/registry/org/[did]/policy/page.tsx` (new), `apps/web/src/app/registry/org/[did]/policy/policy-playground.tsx` (new) + +**Current code:** +```typescript +// OrgSigningPolicy in org-client.tsx is read-only JSON display: +function OrgSigningPolicy({ orgDid }: { orgDid: string }) { + const { data: policy } = useOrgPolicy(orgDid); + return ( +
{JSON.stringify(policy, null, 2)}
+ ); +} +``` + +**Improved code:** +```typescript +// policy-playground.tsx +'use client'; + +export function PolicyPlayground({ orgDid }: { orgDid: string }) { + const [expr, setExpr] = useState(''); + const [lintResult, setLintResult] = useState(null); + const [evalResult, setEvalResult] = useState(null); + const [diffResult, setDiffResult] = useState(null); + const { data: currentPolicy } = useOrgPolicy(orgDid); + + const handleLint = async () => { + const result = await lintPolicy(JSON.parse(expr)); + setLintResult(result); + }; + + const handleDiff = async () => { + if (!currentPolicy) return; + const result = await diffPolicies(currentPolicy, JSON.parse(expr)); + setDiffResult(result); + }; + + return ( +
+
+

Policy Editor

+