diff --git a/Cargo.lock b/Cargo.lock index ad4d9921..d7ac4fd3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -321,6 +321,7 @@ dependencies = [ "auths-sdk", "auths-storage", "auths-telemetry", + "auths-transparency", "auths-utils", "auths-verifier", "axum", @@ -344,7 +345,7 @@ dependencies = [ "log", "nix", "open", - "pkcs8", + "pkcs8 0.10.2", "predicates 2.1.5", "reqwest 0.13.2", "ring", @@ -394,7 +395,7 @@ dependencies = [ "multibase", "once_cell", "parking_lot", - "pkcs8", + "pkcs8 0.10.2", "qrcode", "rand 0.8.5", "ring", @@ -468,7 +469,7 @@ dependencies = [ "log", "mockall", "multibase", - "pkcs8", + "pkcs8 0.10.2", "proptest", "rand 0.10.0", "ring", @@ -618,6 +619,7 @@ dependencies = [ "rand 0.8.5", "regex-lite", "ring", + "schemars 0.8.22", "serde", "serde_json", "sha2", @@ -681,6 +683,7 @@ dependencies = [ "auths-sdk", "auths-storage", "auths-telemetry", + "auths-transparency", "auths-verifier", "base64", "chrono", @@ -754,6 +757,30 @@ dependencies = [ "tempfile", ] +[[package]] +name = "auths-transparency" +version = "0.0.1-rc.8" +dependencies = [ + "async-trait", + "auths-crypto", + "auths-verifier", + "aws-config", + "aws-sdk-s3", + "base64", + "chrono", + "futures", + "hex", + "json-canon", + "proptest", + "ring", + "serde", + "serde_json", + "sha2", + "tempfile", + "thiserror 2.0.18", + "tokio", +] + [[package]] name = "auths-utils" version = "0.0.1-rc.8" @@ -778,7 +805,7 @@ dependencies = [ "json-canon", "libc", "log", - "pkcs8", + "pkcs8 0.10.2", "proptest", "ring", "schemars 0.8.22", @@ -799,6 +826,48 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "aws-config" +version = "1.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11493b0bad143270fb8ad284a096dd529ba91924c5409adeac856cc1bf047dbc" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-sdk-sso", + "aws-sdk-ssooidc", + "aws-sdk-sts", + "aws-smithy-async", + "aws-smithy-http 0.63.6", + "aws-smithy-json 0.62.5", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "hex", + "http 1.4.0", + "sha1", + "time", + "tokio", + "tracing", + "url", + "zeroize", +] + +[[package]] +name = "aws-credential-types" +version = "1.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f20799b373a1be121fe3005fba0c2090af9411573878f224df44b42727fcaf7" +dependencies = [ + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "zeroize", +] + [[package]] name = "aws-lc-rs" version = "1.16.1" @@ -821,6 +890,412 @@ dependencies = [ "fs_extra", ] +[[package]] +name = "aws-runtime" +version = "1.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fc0651c57e384202e47153c1260b84a9936e19803d747615edf199dc3b98d17" +dependencies = [ + "aws-credential-types", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-eventstream", + "aws-smithy-http 0.63.6", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "bytes-utils", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "http-body 0.4.6", + "http-body 1.0.1", + "percent-encoding", + "pin-project-lite", + "tracing", + "uuid", +] + +[[package]] +name = "aws-sdk-s3" +version = "1.119.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d65fddc3844f902dfe1864acb8494db5f9342015ee3ab7890270d36fbd2e01c" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-checksums", + "aws-smithy-eventstream", + "aws-smithy-http 0.62.6", + "aws-smithy-json 0.61.9", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "bytes", + "fastrand", + "hex", + "hmac", + "http 0.2.12", + "http 1.4.0", + "http-body 0.4.6", + "lru", + "percent-encoding", + "regex-lite", + "sha2", + "tracing", + "url", +] + +[[package]] +name = "aws-sdk-sso" +version = "1.96.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f64a6eded248c6b453966e915d32aeddb48ea63ad17932682774eb026fbef5b1" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http 0.63.6", + "aws-smithy-json 0.62.5", + "aws-smithy-observability", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-ssooidc" +version = "1.98.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db96d720d3c622fcbe08bae1c4b04a72ce6257d8b0584cb5418da00ae20a344f" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http 0.63.6", + "aws-smithy-json 0.62.5", + "aws-smithy-observability", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-sts" +version = "1.100.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fafbdda43b93f57f699c5dfe8328db590b967b8a820a13ccdd6687355dfcc7ca" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http 0.63.6", + "aws-smithy-json 0.62.5", + "aws-smithy-observability", + "aws-smithy-query", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sigv4" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0b660013a6683ab23797778e21f1f854744fdf05f68204b4cca4c8c04b5d1f4" +dependencies = [ + "aws-credential-types", + "aws-smithy-eventstream", + "aws-smithy-http 0.63.6", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "crypto-bigint 0.5.5", + "form_urlencoded", + "hex", + "hmac", + "http 0.2.12", + "http 1.4.0", + "p256 0.11.1", + "percent-encoding", + "ring", + "sha2", + "subtle", + "time", + "tracing", + "zeroize", +] + +[[package]] +name = "aws-smithy-async" +version = "1.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ffcaf626bdda484571968400c326a244598634dc75fd451325a54ad1a59acfc" +dependencies = [ + "futures-util", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "aws-smithy-checksums" +version = "0.63.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87294a084b43d649d967efe58aa1f9e0adc260e13a6938eb904c0ae9b45824ae" +dependencies = [ + "aws-smithy-http 0.62.6", + "aws-smithy-types", + "bytes", + "crc-fast", + "hex", + "http 0.2.12", + "http-body 0.4.6", + "md-5", + "pin-project-lite", + "sha1", + "sha2", + "tracing", +] + +[[package]] +name = "aws-smithy-eventstream" +version = "0.60.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf09d74e5e32f76b8762da505a3cd59303e367a664ca67295387baa8c1d7548" +dependencies = [ + "aws-smithy-types", + "bytes", + "crc32fast", +] + +[[package]] +name = "aws-smithy-http" +version = "0.62.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "826141069295752372f8203c17f28e30c464d22899a43a0c9fd9c458d469c88b" +dependencies = [ + "aws-smithy-eventstream", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "bytes-utils", + "futures-core", + "futures-util", + "http 0.2.12", + "http 1.4.0", + "http-body 0.4.6", + "percent-encoding", + "pin-project-lite", + "pin-utils", + "tracing", +] + +[[package]] +name = "aws-smithy-http" +version = "0.63.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba1ab2dc1c2c3749ead27180d333c42f11be8b0e934058fb4b2258ee8dbe5231" +dependencies = [ + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "bytes-utils", + "futures-core", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "percent-encoding", + "pin-project-lite", + "pin-utils", + "tracing", +] + +[[package]] +name = "aws-smithy-http-client" +version = "1.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a2f165a7feee6f263028b899d0a181987f4fa7179a6411a32a439fba7c5f769" +dependencies = [ + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "h2 0.3.27", + "h2 0.4.13", + "http 0.2.12", + "http 1.4.0", + "http-body 0.4.6", + "hyper 0.14.32", + "hyper 1.8.1", + "hyper-rustls 0.24.2", + "hyper-rustls 0.27.7", + "hyper-util", + "pin-project-lite", + "rustls 0.21.12", + "rustls 0.23.37", + "rustls-native-certs", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.4", + "tower", + "tracing", +] + +[[package]] +name = "aws-smithy-json" +version = "0.61.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49fa1213db31ac95288d981476f78d05d9cbb0353d22cdf3472cc05bb02f6551" +dependencies = [ + "aws-smithy-types", +] + +[[package]] +name = "aws-smithy-json" +version = "0.62.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9648b0bb82a2eedd844052c6ad2a1a822d1f8e3adee5fbf668366717e428856a" +dependencies = [ + "aws-smithy-types", +] + +[[package]] +name = "aws-smithy-observability" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06c2315d173edbf1920da8ba3a7189695827002e4c0fc961973ab1c54abca9c" +dependencies = [ + "aws-smithy-runtime-api", +] + +[[package]] +name = "aws-smithy-query" +version = "0.60.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a56d79744fb3edb5d722ef79d86081e121d3b9422cb209eb03aea6aa4f21ebd" +dependencies = [ + "aws-smithy-types", + "urlencoding", +] + +[[package]] +name = "aws-smithy-runtime" +version = "1.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "028999056d2d2fd58a697232f9eec4a643cf73a71cf327690a7edad1d2af2110" +dependencies = [ + "aws-smithy-async", + "aws-smithy-http 0.63.6", + "aws-smithy-http-client", + "aws-smithy-observability", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "http-body 0.4.6", + "http-body 1.0.1", + "http-body-util", + "pin-project-lite", + "pin-utils", + "tokio", + "tracing", +] + +[[package]] +name = "aws-smithy-runtime-api" +version = "1.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "876ab3c9c29791ba4ba02b780a3049e21ec63dabda09268b175272c3733a79e6" +dependencies = [ + "aws-smithy-async", + "aws-smithy-types", + "bytes", + "http 0.2.12", + "http 1.4.0", + "pin-project-lite", + "tokio", + "tracing", + "zeroize", +] + +[[package]] +name = "aws-smithy-types" +version = "1.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2b1117b3b2bbe166d11199b540ceed0d0f7676e36e7b962b5a437a9971eac75" +dependencies = [ + "base64-simd", + "bytes", + "bytes-utils", + "futures-core", + "http 0.2.12", + "http 1.4.0", + "http-body 0.4.6", + "http-body 1.0.1", + "http-body-util", + "itoa", + "num-integer", + "pin-project-lite", + "pin-utils", + "ryu", + "serde", + "time", + "tokio", + "tokio-util", +] + +[[package]] +name = "aws-smithy-xml" +version = "0.60.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce02add1aa3677d022f8adf81dcbe3046a95f17a1b1e8979c145cd21d3d22b3" +dependencies = [ + "xmlparser", +] + +[[package]] +name = "aws-types" +version = "1.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47c8323699dd9b3c8d5b3c13051ae9cdef58fd179957c882f8374dd8725962d9" +dependencies = [ + "aws-credential-types", + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "rustc_version", + "tracing", +] + [[package]] name = "axum" version = "0.8.8" @@ -831,10 +1306,10 @@ dependencies = [ "bytes", "form_urlencoded", "futures-util", - "http", - "http-body", + "http 1.4.0", + "http-body 1.0.1", "http-body-util", - "hyper", + "hyper 1.8.1", "hyper-util", "itoa", "matchit", @@ -862,8 +1337,8 @@ checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" dependencies = [ "bytes", "futures-core", - "http", - "http-body", + "http 1.4.0", + "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", @@ -882,16 +1357,16 @@ dependencies = [ "arc-swap", "bytes", "fs-err", - "http", - "http-body", - "hyper", + "http 1.4.0", + "http-body 1.0.1", + "hyper 1.8.1", "hyper-util", "pin-project-lite", - "rustls", + "rustls 0.23.37", "rustls-pemfile", "rustls-pki-types", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.4", "tower-service", ] @@ -901,6 +1376,12 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" +[[package]] +name = "base16ct" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" + [[package]] name = "base16ct" version = "0.2.0" @@ -929,6 +1410,16 @@ version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +[[package]] +name = "base64-simd" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "339abbe78e73178762e23bea9dfd08e697eb3f3301cd4be981c0f78ba5859195" +dependencies = [ + "outref", + "vsimd", +] + [[package]] name = "base64ct" version = "1.8.3" @@ -1062,6 +1553,16 @@ version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" +[[package]] +name = "bytes-utils" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dafe3a8757b027e2be6e4e5601ed563c55989fcf1546e933c66c8eb3a058d35" +dependencies = [ + "bytes", + "either", +] + [[package]] name = "cast" version = "0.3.0" @@ -1106,7 +1607,7 @@ dependencies = [ "k256", "lazy_static", "num-rational", - "p256", + "p256 0.13.2", "rand_core 0.6.4", "regex", "serde_json", @@ -1464,6 +1965,19 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" +[[package]] +name = "crc-fast" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ddc2d09feefeee8bd78101665bd8645637828fa9317f9f292496dbbd8c65ff3" +dependencies = [ + "crc", + "digest", + "rand 0.9.2", + "regex", + "rustversion", +] + [[package]] name = "crc32fast" version = "1.5.0" @@ -1584,6 +2098,18 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" +[[package]] +name = "crypto-bigint" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-bigint" version = "0.5.5" @@ -1715,6 +2241,16 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "092966b41edc516079bdf31ec78a2e0588d1d0c08f78b91d8307215928642b2b" +[[package]] +name = "der" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" +dependencies = [ + "const-oid", + "zeroize", +] + [[package]] name = "der" version = "0.7.10" @@ -1846,6 +2382,18 @@ dependencies = [ "getrandom 0.2.17", ] +[[package]] +name = "ecdsa" +version = "0.14.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" +dependencies = [ + "der 0.6.1", + "elliptic-curve 0.12.3", + "rfc6979 0.3.1", + "signature 1.6.4", +] + [[package]] name = "ecdsa" version = "0.16.9" @@ -1854,10 +2402,10 @@ checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ "der 0.7.10", "digest", - "elliptic-curve", - "rfc6979", - "signature", - "spki", + "elliptic-curve 0.13.8", + "rfc6979 0.4.0", + "signature 2.2.0", + "spki 0.7.3", ] [[package]] @@ -1866,8 +2414,8 @@ version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ - "pkcs8", - "signature", + "pkcs8 0.10.2", + "signature 2.2.0", ] [[package]] @@ -1894,22 +2442,42 @@ dependencies = [ "serde", ] +[[package]] +name = "elliptic-curve" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" +dependencies = [ + "base16ct 0.1.1", + "crypto-bigint 0.4.9", + "der 0.6.1", + "digest", + "ff 0.12.1", + "generic-array", + "group 0.12.1", + "pkcs8 0.9.0", + "rand_core 0.6.4", + "sec1 0.3.0", + "subtle", + "zeroize", +] + [[package]] name = "elliptic-curve" version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ - "base16ct", - "crypto-bigint", + "base16ct 0.2.0", + "crypto-bigint 0.5.5", "digest", - "ff", + "ff 0.13.1", "generic-array", - "group", + "group 0.13.0", "pem-rfc7468", - "pkcs8", + "pkcs8 0.10.2", "rand_core 0.6.4", - "sec1", + "sec1 0.7.3", "subtle", "zeroize", ] @@ -2070,6 +2638,16 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +[[package]] +name = "ff" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "ff" version = "0.13.1" @@ -2417,17 +2995,47 @@ dependencies = [ "url", ] +[[package]] +name = "group" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +dependencies = [ + "ff 0.12.1", + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff", + "ff 0.13.1", "rand_core 0.6.4", "subtle", ] +[[package]] +name = "h2" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.12", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "h2" version = "0.4.13" @@ -2439,7 +3047,7 @@ dependencies = [ "fnv", "futures-core", "futures-sink", - "http", + "http 1.4.0", "indexmap", "slab", "tokio", @@ -2552,6 +3160,17 @@ dependencies = [ "utf8-width", ] +[[package]] +name = "http" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http" version = "1.4.0" @@ -2562,6 +3181,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http 0.2.12", + "pin-project-lite", +] + [[package]] name = "http-body" version = "1.0.1" @@ -2569,7 +3199,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http", + "http 1.4.0", ] [[package]] @@ -2580,8 +3210,8 @@ checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", "futures-core", - "http", - "http-body", + "http 1.4.0", + "http-body 1.0.1", "pin-project-lite", ] @@ -2597,6 +3227,30 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" +[[package]] +name = "hyper" +version = "0.14.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.27", + "http 0.2.12", + "http-body 0.4.6", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2 0.5.10", + "tokio", + "tower-service", + "tracing", + "want", +] + [[package]] name = "hyper" version = "1.8.1" @@ -2607,9 +3261,9 @@ dependencies = [ "bytes", "futures-channel", "futures-core", - "h2", - "http", - "http-body", + "h2 0.4.13", + "http 1.4.0", + "http-body 1.0.1", "httparse", "httpdate", "itoa", @@ -2620,20 +3274,35 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http 0.2.12", + "hyper 0.14.32", + "log", + "rustls 0.21.12", + "tokio", + "tokio-rustls 0.24.1", +] + [[package]] name = "hyper-rustls" version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ - "http", - "hyper", + "http 1.4.0", + "hyper 1.8.1", "hyper-util", - "rustls", + "rustls 0.23.37", "rustls-native-certs", "rustls-pki-types", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.4", "tower-service", "webpki-roots", ] @@ -2648,14 +3317,14 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "http", - "http-body", - "hyper", + "http 1.4.0", + "http-body 1.0.1", + "hyper 1.8.1", "ipnet", "libc", "percent-encoding", "pin-project-lite", - "socket2", + "socket2 0.6.3", "system-configuration", "tokio", "tower-service", @@ -3061,7 +3730,7 @@ dependencies = [ "regex", "regex-syntax", "reqwest 0.13.2", - "rustls", + "rustls 0.23.37", "serde", "serde_json", "unicode-general-category", @@ -3090,11 +3759,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", - "ecdsa", - "elliptic-curve", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", "once_cell", "sha2", - "signature", + "signature 2.2.0", ] [[package]] @@ -3206,6 +3875,15 @@ version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" +[[package]] +name = "lru" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +dependencies = [ + "hashbrown 0.15.5", +] + [[package]] name = "lru-slab" version = "0.1.2" @@ -3260,7 +3938,7 @@ dependencies = [ "log", "mio", "socket-pktinfo", - "socket2", + "socket2 0.6.3", ] [[package]] @@ -3296,15 +3974,15 @@ checksum = "3589659543c04c7dc5526ec858591015b87cd8746583b51b48ef4353f99dbcda" dependencies = [ "base64", "http-body-util", - "hyper", - "hyper-rustls", + "hyper 1.8.1", + "hyper-rustls 0.27.7", "hyper-util", "indexmap", "ipnet", "metrics", "metrics-util", "quanta", - "rustls", + "rustls 0.23.37", "thiserror 2.0.18", "tokio", "tracing", @@ -3688,14 +4366,25 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a80800c0488c3a21695ea981a54918fbb37abf04f4d0720c453632255e2ff0e" +[[package]] +name = "p256" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" +dependencies = [ + "ecdsa 0.14.8", + "elliptic-curve 0.12.3", + "sha2", +] + [[package]] name = "p256" version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" dependencies = [ - "ecdsa", - "elliptic-curve", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", "primeorder", "sha2", ] @@ -3706,8 +4395,8 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe42f1670a52a47d448f14b6a5c61dd78fce51856e68edaa38f7ae3a46b8d6b6" dependencies = [ - "ecdsa", - "elliptic-curve", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", "primeorder", "sha2", ] @@ -3718,9 +4407,9 @@ version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fc9e2161f1f215afdfce23677034ae137bbd45016a880c2eb3ba8eb95f085b2" dependencies = [ - "base16ct", - "ecdsa", - "elliptic-curve", + "base16ct 0.2.0", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", "primeorder", "rand_core 0.6.4", "sha2", @@ -3869,8 +4558,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" dependencies = [ "der 0.7.10", - "pkcs8", - "spki", + "pkcs8 0.10.2", + "spki 0.7.3", +] + +[[package]] +name = "pkcs8" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" +dependencies = [ + "der 0.6.1", + "spki 0.6.0", ] [[package]] @@ -3880,7 +4579,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ "der 0.7.10", - "spki", + "spki 0.7.3", ] [[package]] @@ -4042,7 +4741,7 @@ version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" dependencies = [ - "elliptic-curve", + "elliptic-curve 0.13.8", ] [[package]] @@ -4130,8 +4829,8 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls", - "socket2", + "rustls 0.23.37", + "socket2 0.6.3", "thiserror 2.0.18", "tokio", "tracing", @@ -4151,7 +4850,7 @@ dependencies = [ "rand 0.9.2", "ring", "rustc-hash", - "rustls", + "rustls 0.23.37", "rustls-pki-types", "slab", "thiserror 2.0.18", @@ -4169,7 +4868,7 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2", + "socket2 0.6.3", "tracing", "windows-sys 0.60.2", ] @@ -4215,7 +4914,7 @@ dependencies = [ "ec25519", "multibase", "serde", - "signature", + "signature 2.2.0", "thiserror 2.0.18", "zeroize", ] @@ -4464,25 +5163,25 @@ dependencies = [ "base64", "bytes", "futures-core", - "http", - "http-body", + "http 1.4.0", + "http-body 1.0.1", "http-body-util", - "hyper", - "hyper-rustls", + "hyper 1.8.1", + "hyper-rustls 0.27.7", "hyper-util", "js-sys", "log", "percent-encoding", "pin-project-lite", "quinn", - "rustls", + "rustls 0.23.37", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.4", "tower", "tower-http", "tower-service", @@ -4505,12 +5204,12 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2", - "http", - "http-body", + "h2 0.4.13", + "http 1.4.0", + "http-body 1.0.1", "http-body-util", - "hyper", - "hyper-rustls", + "hyper 1.8.1", + "hyper-rustls 0.27.7", "hyper-util", "js-sys", "log", @@ -4518,7 +5217,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls", + "rustls 0.23.37", "rustls-pki-types", "rustls-platform-verifier", "serde", @@ -4526,7 +5225,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.4", "tower", "tower-http", "tower-service", @@ -4536,6 +5235,17 @@ dependencies = [ "web-sys", ] +[[package]] +name = "rfc6979" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" +dependencies = [ + "crypto-bigint 0.4.9", + "hmac", + "zeroize", +] + [[package]] name = "rfc6979" version = "0.4.0" @@ -4597,11 +5307,11 @@ dependencies = [ "num-integer", "num-traits", "pkcs1", - "pkcs8", + "pkcs8 0.10.2", "rand_core 0.6.4", "sha2", - "signature", - "spki", + "signature 2.2.0", + "spki 0.7.3", "subtle", "zeroize", ] @@ -4654,6 +5364,18 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "log", + "ring", + "rustls-webpki 0.101.7", + "sct", +] + [[package]] name = "rustls" version = "0.23.37" @@ -4664,7 +5386,7 @@ dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki", + "rustls-webpki 0.103.9", "subtle", "zeroize", ] @@ -4711,10 +5433,10 @@ dependencies = [ "jni", "log", "once_cell", - "rustls", + "rustls 0.23.37", "rustls-native-certs", "rustls-platform-verifier-android", - "rustls-webpki", + "rustls-webpki 0.103.9", "security-framework 3.7.0", "security-framework-sys", "webpki-root-certs", @@ -4727,6 +5449,16 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustls-webpki" version = "0.103.9" @@ -4852,22 +5584,46 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "sdd" version = "3.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "490dcfcbfef26be6800d11870ff2df8774fa6e86d047e3e8c8a76b25655e41ca" +[[package]] +name = "sec1" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" +dependencies = [ + "base16ct 0.1.1", + "der 0.6.1", + "generic-array", + "pkcs8 0.9.0", + "subtle", + "zeroize", +] + [[package]] name = "sec1" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "base16ct", + "base16ct 0.2.0", "der 0.7.10", "generic-array", - "pkcs8", + "pkcs8 0.10.2", "subtle", "zeroize", ] @@ -5160,6 +5916,16 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "1.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +dependencies = [ + "digest", + "rand_core 0.6.4", +] + [[package]] name = "signature" version = "2.2.0" @@ -5222,10 +5988,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "927136cc2ae6a1b0e66ac6b1210902b75c3f726db004a73bc18686dcd0dcd22f" dependencies = [ "libc", - "socket2", + "socket2 0.6.3", "windows-sys 0.60.2", ] +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "socket2" version = "0.6.3" @@ -5245,6 +6021,16 @@ dependencies = [ "lock_api", ] +[[package]] +name = "spki" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" +dependencies = [ + "base64ct", + "der 0.6.1", +] + [[package]] name = "spki" version = "0.7.3" @@ -5415,7 +6201,7 @@ dependencies = [ "log", "secrecy 0.8.0", "service-binding", - "signature", + "signature 2.2.0", "ssh-encoding", "ssh-key", "subtle", @@ -5453,14 +6239,14 @@ checksum = "3b86f5297f0f04d08cabaa0f6bff7cb6aec4d9c3b49d87990d63da9d9156a8c3" dependencies = [ "ed25519-dalek", "num-bigint-dig", - "p256", + "p256 0.13.2", "p384", "p521", "rand_core 0.6.4", "rsa", - "sec1", + "sec1 0.7.3", "sha2", - "signature", + "signature 2.2.0", "ssh-cipher", "ssh-encoding", "subtle", @@ -5714,7 +6500,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.6.3", "tokio-macros", "tracing", "windows-sys 0.61.2", @@ -5741,13 +6527,23 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.12", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ - "rustls", + "rustls 0.23.37", "tokio", ] @@ -5887,8 +6683,8 @@ dependencies = [ "bitflags", "bytes", "futures-util", - "http", - "http-body", + "http 1.4.0", + "http-body 1.0.1", "iri-string", "pin-project-lite", "tower", @@ -5998,7 +6794,7 @@ checksum = "8628dcc84e5a09eb3d8423d6cb682965dea9133204e8fb3efee74c2a0c259442" dependencies = [ "bytes", "data-encoding", - "http", + "http 1.4.0", "httparse", "log", "native-tls", @@ -6129,6 +6925,12 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + [[package]] name = "utf-8" version = "0.7.6" @@ -6935,9 +7737,9 @@ dependencies = [ "base64", "deadpool", "futures", - "http", + "http 1.4.0", "http-body-util", - "hyper", + "hyper 1.8.1", "hyper-util", "log", "once_cell", @@ -7064,6 +7866,12 @@ dependencies = [ "rustix", ] +[[package]] +name = "xmlparser" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" + [[package]] name = "xtask" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 11e25a0e..5017f330 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,6 +14,7 @@ members = [ "crates/auths-infra-git", "crates/auths-infra-http", "crates/auths-storage", + "crates/auths-transparency", "crates/auths-keri", "crates/auths-jwt", "crates/auths-mcp-server", @@ -61,6 +62,7 @@ auths-jwt = { path = "crates/auths-jwt", version = "0.0.1-rc.7" } auths-pairing-daemon = { path = "crates/auths-pairing-daemon", version = "0.0.1-rc.8" } auths-pairing-protocol = { path = "crates/auths-pairing-protocol", version = "0.0.1-rc.7" } auths-storage = { path = "crates/auths-storage", version = "0.0.1-rc.4" } +auths-transparency = { path = "crates/auths-transparency", version = "0.0.1-rc.8", default-features = false } auths-utils = { path = "crates/auths-utils" } insta = { version = "1", features = ["json"] } diff --git a/crates/auths-cli/Cargo.toml b/crates/auths-cli/Cargo.toml index 19b1e459..585ae73b 100644 --- a/crates/auths-cli/Cargo.toml +++ b/crates/auths-cli/Cargo.toml @@ -38,6 +38,7 @@ auths-policy.workspace = true auths-index.workspace = true auths-crypto.workspace = true auths-sdk.workspace = true +auths-transparency = { workspace = true, features = ["native"] } auths-pairing-protocol.workspace = true auths-telemetry = { workspace = true, features = ["sink-http"] } auths-verifier = { workspace = true, features = ["native"] } diff --git a/crates/auths-cli/src/cli.rs b/crates/auths-cli/src/cli.rs index dfccaecf..2df9da2a 100644 --- a/crates/auths-cli/src/cli.rs +++ b/crates/auths-cli/src/cli.rs @@ -3,6 +3,7 @@ use std::path::PathBuf; use clap::builder::styling::{AnsiColor, Effects, Styles}; use clap::{Parser, Subcommand}; +use crate::commands::account::AccountCommand; use crate::commands::agent::AgentCommand; use crate::commands::approval::ApprovalCommand; use crate::commands::artifact::ArtifactCommand; @@ -21,6 +22,8 @@ use crate::commands::id::IdCommand; use crate::commands::init::InitCommand; use crate::commands::key::KeyCommand; use crate::commands::learn::LearnCommand; +use crate::commands::log::LogCommand; +use crate::commands::namespace::NamespaceCommand; use crate::commands::org::OrgCommand; use crate::commands::policy::PolicyCommand; use crate::commands::scim::ScimCommand; @@ -119,6 +122,8 @@ pub enum RootCommand { #[command(hide = true)] Trust(TrustCommand), #[command(hide = true)] + Namespace(NamespaceCommand), + #[command(hide = true)] Org(OrgCommand), #[command(hide = true)] Audit(AuditCommand), @@ -135,4 +140,8 @@ pub enum RootCommand { Commit(CommitCmd), #[command(hide = true)] Debug(DebugCmd), + #[command(hide = true)] + Log(LogCommand), + #[command(hide = true)] + Account(AccountCommand), } diff --git a/crates/auths-cli/src/commands/account.rs b/crates/auths-cli/src/commands/account.rs new file mode 100644 index 00000000..017b6927 --- /dev/null +++ b/crates/auths-cli/src/commands/account.rs @@ -0,0 +1,119 @@ +use anyhow::Result; +use clap::{Parser, Subcommand}; +use serde::Deserialize; + +use super::executable::ExecutableCommand; +use crate::config::CliConfig; + +/// Manage your registry account and view usage. +#[derive(Parser, Debug, Clone)] +pub struct AccountCommand { + #[clap(subcommand)] + pub subcommand: AccountSubcommand, +} + +#[derive(Subcommand, Debug, Clone)] +pub enum AccountSubcommand { + /// Show account status and rate limits + Status { + /// Registry URL to query + #[arg(long, default_value = "https://registry.auths.dev")] + registry_url: String, + }, + /// Show API usage history + Usage { + /// Registry URL to query + #[arg(long, default_value = "https://registry.auths.dev")] + registry_url: String, + /// Number of days to show + #[arg(long, default_value = "7")] + days: u32, + }, +} + +#[derive(Debug, Deserialize)] +struct AccountStatusResponse { + did: String, + tier: String, + daily_limit: i32, + daily_used: i32, + expires_at: Option, +} + +#[derive(Debug, Deserialize)] +struct UsageEntry { + date: String, + request_count: i32, +} + +fn handle_status(registry_url: &str) -> Result<()> { + let url = registry_url.trim_end_matches('/'); + + println!("Fetching account status..."); + + let client = reqwest::blocking::Client::new(); + let resp = client + .get(format!("{url}/v1/account/status")) + .send() + .map_err(|e| anyhow::anyhow!("Failed to fetch account status: {e}"))?; + + if !resp.status().is_success() { + return Err(anyhow::anyhow!("Registry returned {}", resp.status())); + } + + let status: AccountStatusResponse = resp + .json() + .map_err(|e| anyhow::anyhow!("Failed to parse response: {e}"))?; + + println!("\nAccount Status:"); + println!(" DID: {}", status.did); + println!(" Tier: {}", status.tier); + println!(" Daily Limit: {}", status.daily_limit); + println!(" Daily Used: {}", status.daily_used); + if let Some(expires) = status.expires_at { + println!(" Expires: {expires}"); + } + + Ok(()) +} + +fn handle_usage(registry_url: &str, days: u32) -> Result<()> { + let url = registry_url.trim_end_matches('/'); + + println!("Fetching usage history ({days} days)..."); + + let client = reqwest::blocking::Client::new(); + let resp = client + .get(format!("{url}/v1/account/usage?days={days}")) + .send() + .map_err(|e| anyhow::anyhow!("Failed to fetch usage: {e}"))?; + + if !resp.status().is_success() { + return Err(anyhow::anyhow!("Registry returned {}", resp.status())); + } + + let entries: Vec = resp + .json() + .map_err(|e| anyhow::anyhow!("Failed to parse response: {e}"))?; + + if entries.is_empty() { + println!("\nNo usage data found."); + return Ok(()); + } + + println!("\nUsage History:"); + for entry in &entries { + println!(" {} -- {} requests", entry.date, entry.request_count); + } + + Ok(()) +} + +impl ExecutableCommand for AccountCommand { + fn execute(&self, _ctx: &CliConfig) -> Result<()> { + match &self.subcommand { + AccountSubcommand::Status { registry_url } => handle_status(registry_url), + AccountSubcommand::Usage { registry_url, days } => handle_usage(registry_url, *days), + } + } +} diff --git a/crates/auths-cli/src/commands/artifact/publish.rs b/crates/auths-cli/src/commands/artifact/publish.rs index 4dc571f4..98e97f87 100644 --- a/crates/auths-cli/src/commands/artifact/publish.rs +++ b/crates/auths-cli/src/commands/artifact/publish.rs @@ -4,8 +4,9 @@ use std::time::Duration; use anyhow::{Context, Result, bail}; use auths_infra_http::HttpRegistryClient; use auths_sdk::workflows::artifact::{ - ArtifactPublishConfig, ArtifactPublishError, publish_artifact, + ArtifactPublishConfig, ArtifactPublishError, ArtifactPublishResult, publish_artifact, }; +use auths_transparency::OfflineBundle; use auths_verifier::core::ResourceId; use serde::Serialize; @@ -121,6 +122,9 @@ async fn handle_publish_async( other => anyhow::anyhow!("{}", other), })?; + // Cache checkpoint from bundle if present in the signature file + cache_checkpoint_from_sig(&sig_contents); + if is_json_mode() { let json_resp = JsonResponse::success( "artifact publish", @@ -150,7 +154,63 @@ async fn handle_publish_async( registry_url, pkg ); } + display_rate_limit(&out, &body); } Ok(()) } + +fn display_rate_limit(out: &Output, result: &ArtifactPublishResult) { + let Some(ref rl) = result.rate_limit else { + return; + }; + println!(); + if let Some(tier) = &rl.tier { + println!(" Tier: {}", out.info(tier)); + } + if let (Some(remaining), Some(limit)) = (rl.remaining, rl.limit) { + println!( + " Quota: {}/{} requests remaining today", + out.bold(&remaining.to_string()), + limit + ); + } + if let Some(reset) = rl.reset + && let Some(dt) = chrono::DateTime::from_timestamp(reset, 0) + { + let human = dt.format("%Y-%m-%d %H:%M UTC"); + println!(" Resets at: {human}"); + } +} + +/// Best-effort checkpoint caching after publish, using the bundle in the sig file. +#[allow(clippy::disallowed_methods)] // CLI is the presentation boundary +fn cache_checkpoint_from_sig(sig_contents: &str) { + let sig_value: serde_json::Value = match serde_json::from_str(sig_contents) { + Ok(v) => v, + Err(_) => return, + }; + + if sig_value.get("offline_bundle").is_none() { + return; + } + + let bundle: OfflineBundle = match serde_json::from_value(sig_value["offline_bundle"].clone()) { + Ok(b) => b, + Err(_) => return, + }; + + let cache_path = match dirs::home_dir() { + Some(home) => home.join(".auths").join("log_checkpoint.json"), + None => return, + }; + + if let Err(e) = auths_sdk::workflows::transparency::try_cache_checkpoint( + &cache_path, + &bundle.signed_checkpoint, + None, + ) && !is_json_mode() + { + eprintln!("Warning: checkpoint cache update failed: {e}"); + } +} diff --git a/crates/auths-cli/src/commands/artifact/verify.rs b/crates/auths-cli/src/commands/artifact/verify.rs index ffa04743..29e568ad 100644 --- a/crates/auths-cli/src/commands/artifact/verify.rs +++ b/crates/auths-cli/src/commands/artifact/verify.rs @@ -3,6 +3,10 @@ use serde::Serialize; use std::fs; use std::path::{Path, PathBuf}; +use auths_transparency::{ + BundleVerificationReport, CheckpointStatus, DelegationStatus, InclusionStatus, NamespaceStatus, + OfflineBundle, SignatureStatus, TrustRoot, WitnessStatus, +}; use auths_verifier::core::Attestation; use auths_verifier::witness::{WitnessQuorum, WitnessReceipt, WitnessVerifyConfig}; use auths_verifier::{ @@ -71,6 +75,16 @@ pub async fn handle_verify( } }; + let sig_value: serde_json::Value = match serde_json::from_str(&sig_content) { + Ok(v) => v, + Err(e) => { + return output_error(&file_str, 2, &format!("Failed to parse .auths.json: {}", e)); + } + }; + if sig_value.get("offline_bundle").is_some() { + return handle_bundle_verify(file, &sig_content); + } + // 2. Parse attestation let attestation: Attestation = match serde_json::from_str(&sig_content) { Ok(a) => a, @@ -337,3 +351,146 @@ fn output_result(exit_code: i32, result: VerifyArtifactResult) -> Result<()> { } Ok(()) } + +fn handle_bundle_verify(file: &Path, sig_content: &str) -> Result<()> { + let file_str = file.to_string_lossy().to_string(); + + let sig_value: serde_json::Value = + serde_json::from_str(sig_content).with_context(|| "Failed to parse .auths.json")?; + let bundle: OfflineBundle = serde_json::from_value(sig_value["offline_bundle"].clone()) + .with_context(|| "Failed to parse offline_bundle from .auths.json")?; + + let trust_root: TrustRoot = serde_json::from_str(&default_trust_root_json()) + .with_context(|| "Failed to parse default trust root")?; + + #[allow(clippy::disallowed_methods)] // CLI is the presentation boundary + let now = chrono::Utc::now(); + + let report = auths_transparency::verify_bundle(&bundle, &trust_root, now); + + if is_json_mode() { + println!( + "{}", + serde_json::to_string(&report).with_context(|| "Failed to serialize bundle report")? + ); + } else { + render_bundle_report(&report); + } + + if report.is_valid() { + cache_checkpoint_from_bundle(&bundle); + Ok(()) + } else { + output_error(&file_str, 1, "Bundle verification failed") + } +} + +/// Best-effort checkpoint caching after bundle verification. +#[allow(clippy::disallowed_methods)] // CLI is the presentation boundary +fn cache_checkpoint_from_bundle(bundle: &OfflineBundle) { + let cache_path = match dirs::home_dir() { + Some(home) => home.join(".auths").join("log_checkpoint.json"), + None => return, + }; + + match auths_sdk::workflows::transparency::try_cache_checkpoint( + &cache_path, + &bundle.signed_checkpoint, + None, + ) { + Ok(report) => { + if report.old_size == 0 && !is_json_mode() { + eprintln!( + "Cached transparency checkpoint (tree size: {})", + report.new_size + ); + } + } + Err(e) => { + if !is_json_mode() { + eprintln!("Warning: checkpoint cache update failed: {e}"); + } + } + } +} + +fn render_bundle_report(report: &BundleVerificationReport) { + println!("Bundle Verification:"); + + match &report.signature { + SignatureStatus::Verified => println!(" Signature: \u{2713} Verified"), + SignatureStatus::Failed { reason } => { + println!(" Signature: \u{2717} Failed: {reason}") + } + SignatureStatus::NotProvided => println!(" Signature: - Not provided"), + _ => println!(" Signature: ? Unknown status"), + } + + match &report.inclusion { + InclusionStatus::Verified => println!(" Inclusion: \u{2713} Verified"), + InclusionStatus::Failed { reason } => { + println!(" Inclusion: \u{2717} Failed: {reason}") + } + InclusionStatus::NotProvided => println!(" Inclusion: - Not provided"), + _ => println!(" Inclusion: ? Unknown status"), + } + + match &report.checkpoint { + CheckpointStatus::Verified => println!(" Checkpoint: \u{2713} Verified"), + CheckpointStatus::InvalidSignature => { + println!(" Checkpoint: \u{2717} Invalid signature") + } + CheckpointStatus::NotProvided => println!(" Checkpoint: - Not provided"), + _ => println!(" Checkpoint: ? Unknown status"), + } + + match &report.witnesses { + WitnessStatus::Quorum { verified, required } => { + println!(" Witnesses: \u{2713} Quorum ({verified}/{required} verified)"); + } + WitnessStatus::Insufficient { verified, required } => { + println!(" Witnesses: \u{2717} Insufficient ({verified}/{required} verified)"); + } + WitnessStatus::NotProvided => println!(" Witnesses: - Not provided"), + _ => println!(" Witnesses: ? Unknown status"), + } + + match &report.namespace { + NamespaceStatus::Authorized => println!(" Namespace: \u{2713} Authorized"), + NamespaceStatus::Owned => println!(" Namespace: \u{2713} Owned"), + NamespaceStatus::Unowned => println!(" Namespace: - Unowned"), + NamespaceStatus::Unauthorized => println!(" Namespace: \u{2717} Unauthorized"), + _ => println!(" Namespace: ? Unknown status"), + } + + match &report.delegation { + DelegationStatus::Direct => println!(" Delegation: \u{2713} Direct"), + DelegationStatus::ChainVerified { + org_did, + member_did, + .. + } => { + println!(" Delegation: \u{2713} Chain verified ({org_did} \u{2192} {member_did})"); + } + DelegationStatus::ChainBroken { reason } => { + println!(" Delegation: \u{2717} Chain broken: {reason}"); + } + DelegationStatus::NoDelegationData => println!(" Delegation: - No delegation data"), + _ => println!(" Delegation: ? Unknown status"), + } + + for warning in &report.warnings { + println!(" Warning: \u{26a0} {warning}"); + } +} + +fn default_trust_root_json() -> String { + // Epic 1 hardcoded trust root: no witnesses, placeholder log key. + // Will be replaced by TUF-distributed trust root in fn-76. + serde_json::json!({ + "log_public_key": "0000000000000000000000000000000000000000000000000000000000000000", + "log_origin": "auths.dev/log", + "witnesses": [] + }) + .to_string() +} diff --git a/crates/auths-cli/src/commands/id/bind_idp.rs b/crates/auths-cli/src/commands/id/bind_idp.rs new file mode 100644 index 00000000..b9aad75d --- /dev/null +++ b/crates/auths-cli/src/commands/id/bind_idp.rs @@ -0,0 +1,49 @@ +use anyhow::{Result, anyhow}; +use clap::Parser; + +const CLOUD_BINARY: &str = "auths-cloud"; + +/// Stub command that delegates to the `auths-cloud` binary. +/// +/// If `auths-cloud` is on `$PATH`, forwards all arguments. +/// Otherwise, prints an informational message about Auths Cloud. +#[derive(Parser, Debug, Clone)] +#[command(about = "Bind this identity to an enterprise IdP (requires Auths Cloud)")] +pub struct BindIdpStubCommand { + /// All arguments are forwarded to auths-cloud. + #[arg(trailing_var_arg = true, allow_hyphen_values = true)] + args: Vec, +} + +pub fn handle_bind_idp(cmd: BindIdpStubCommand) -> Result<()> { + match which::which(CLOUD_BINARY) { + Ok(path) => { + let status = std::process::Command::new(path) + .args(["id", "bind-idp"]) + .args(&cmd.args) + .status() + .map_err(|e| anyhow!("failed to execute {CLOUD_BINARY}: {e}"))?; + + if status.success() { + Ok(()) + } else { + Err(anyhow!( + "{CLOUD_BINARY} exited with status {}", + status.code().unwrap_or(-1) + )) + } + } + Err(_) => { + let out = crate::ux::format::Output::new(); + out.newline(); + out.print_info("IdP binding requires Auths Cloud."); + out.newline(); + out.println(" Bind your Auths identity to enterprise identity providers"); + out.println(" like Okta, Microsoft Entra ID, Google Workspace, or SAML 2.0."); + out.newline(); + out.println(" Learn more: https://auths.dev/cloud"); + out.newline(); + Ok(()) + } + } +} diff --git a/crates/auths-cli/src/commands/id/identity.rs b/crates/auths-cli/src/commands/id/identity.rs index 0f9ca2ca..aa90abd7 100644 --- a/crates/auths-cli/src/commands/id/identity.rs +++ b/crates/auths-cli/src/commands/id/identity.rs @@ -188,6 +188,12 @@ pub enum IdSubcommand { /// Import existing GPG or SSH keys into Auths. Migrate(super::migrate::MigrateCommand), + + /// Bind this identity to an enterprise IdP (Okta, Entra ID, Google Workspace, SAML). + /// + /// Requires the `auths-cloud` binary on $PATH. If not installed, + /// prints information about Auths Cloud. + BindIdp(super::bind_idp::BindIdpStubCommand), } fn display_dry_run_rotate( @@ -641,5 +647,7 @@ pub fn handle_id( } IdSubcommand::Migrate(migrate_cmd) => super::migrate::handle_migrate(migrate_cmd, now), + + IdSubcommand::BindIdp(bind_cmd) => super::bind_idp::handle_bind_idp(bind_cmd), } } diff --git a/crates/auths-cli/src/commands/id/mod.rs b/crates/auths-cli/src/commands/id/mod.rs index 6e5e12a6..44d248cd 100644 --- a/crates/auths-cli/src/commands/id/mod.rs +++ b/crates/auths-cli/src/commands/id/mod.rs @@ -1,3 +1,4 @@ +pub mod bind_idp; pub mod claim; pub mod identity; pub mod migrate; diff --git a/crates/auths-cli/src/commands/log.rs b/crates/auths-cli/src/commands/log.rs new file mode 100644 index 00000000..affa7d24 --- /dev/null +++ b/crates/auths-cli/src/commands/log.rs @@ -0,0 +1,222 @@ +use std::time::Duration; + +use anyhow::{Context, Result, bail}; +use auths_core::ports::network::RegistryClient; +use auths_infra_http::HttpRegistryClient; +use auths_transparency::SignedCheckpoint; +use clap::{Args, Subcommand}; +use serde::Serialize; + +use super::executable::ExecutableCommand; +use crate::config::CliConfig; +use crate::ux::format::{JsonResponse, is_json_mode}; + +#[derive(Args, Debug, Clone)] +#[command(about = "Inspect and verify the transparency log")] +pub struct LogCommand { + #[command(subcommand)] + pub command: LogSubcommand, +} + +#[derive(Subcommand, Debug, Clone)] +pub enum LogSubcommand { + /// Fetch and display a log entry by sequence number + Inspect(InspectArgs), + /// Verify log consistency from the cached checkpoint + Verify(VerifyArgs), +} + +#[derive(Args, Debug, Clone)] +pub struct InspectArgs { + /// Sequence number of the entry to inspect + pub sequence: u64, + + /// Registry URL to fetch from + #[clap(long, default_value = "https://public.auths.dev")] + pub registry: String, +} + +#[derive(Args, Debug, Clone)] +pub struct VerifyArgs { + /// Registry URL to verify against + #[clap(long, default_value = "https://public.auths.dev")] + pub registry: String, +} + +#[derive(Serialize)] +struct VerifyResult { + consistent: bool, + cached_size: u64, + latest_size: u64, + cached_root: String, + latest_root: String, +} + +impl ExecutableCommand for LogCommand { + fn execute(&self, _ctx: &CliConfig) -> Result<()> { + let rt = tokio::runtime::Runtime::new().context("Failed to create async runtime")?; + rt.block_on(async { + match &self.command { + LogSubcommand::Inspect(args) => handle_inspect(args).await, + LogSubcommand::Verify(args) => handle_verify(args).await, + } + }) + } +} + +async fn handle_inspect(args: &InspectArgs) -> Result<()> { + let registry_url = args.registry.trim_end_matches('/'); + let client = + HttpRegistryClient::new_with_timeouts(Duration::from_secs(30), Duration::from_secs(60)); + + let path = format!("v1/log/entries/{}", args.sequence); + let response_bytes = client + .fetch_registry_data(registry_url, &path) + .await + .context("Failed to fetch log entry")?; + + let entry: serde_json::Value = + serde_json::from_slice(&response_bytes).context("Failed to parse log entry response")?; + + if is_json_mode() { + println!( + "{}", + serde_json::to_string_pretty(&entry).context("Failed to serialize entry")? + ); + } else { + let entry_type = entry + .get("content") + .and_then(|c| c.get("entry_type")) + .and_then(|t| t.as_str()) + .unwrap_or("unknown"); + let actor = entry + .get("content") + .and_then(|c| c.get("actor_did")) + .and_then(|a| a.as_str()) + .unwrap_or("unknown"); + let timestamp = entry + .get("timestamp") + .and_then(|t| t.as_str()) + .unwrap_or("unknown"); + let sequence = entry + .get("sequence") + .and_then(|s| s.as_u64()) + .unwrap_or(args.sequence); + + println!("Log Entry #{sequence}"); + println!(" Type: {entry_type}"); + println!(" Actor: {actor}"); + println!(" Timestamp: {timestamp}"); + + if let Some(body) = entry.get("content").and_then(|c| c.get("body")) { + println!( + " Body: {}", + serde_json::to_string_pretty(body).unwrap_or_default() + ); + } + } + + Ok(()) +} + +#[allow(clippy::disallowed_methods)] // CLI is the presentation boundary +async fn handle_verify(args: &VerifyArgs) -> Result<()> { + let cache_path = dirs::home_dir() + .map(|h| h.join(".auths").join("log_checkpoint.json")) + .ok_or_else(|| anyhow::anyhow!("Could not determine home directory"))?; + + let cached_checkpoint: SignedCheckpoint = match std::fs::read_to_string(&cache_path) { + Ok(json) => serde_json::from_str(&json).context("Failed to parse cached checkpoint")?, + Err(e) if e.kind() == std::io::ErrorKind::NotFound => { + if is_json_mode() { + let result = VerifyResult { + consistent: false, + cached_size: 0, + latest_size: 0, + cached_root: String::new(), + latest_root: String::new(), + }; + JsonResponse::success("log verify", result).print()?; + } else { + println!("No cached checkpoint found at {}", cache_path.display()); + println!("Run 'auths artifact verify' with a bundle to establish initial trust."); + } + return Ok(()); + } + Err(e) => return Err(e).context("Failed to read cached checkpoint"), + }; + + let registry_url = args.registry.trim_end_matches('/'); + let client = + HttpRegistryClient::new_with_timeouts(Duration::from_secs(30), Duration::from_secs(60)); + + let response_bytes = client + .fetch_registry_data(registry_url, "v1/log/checkpoint") + .await + .context("Failed to fetch latest checkpoint from registry")?; + + let latest_checkpoint: SignedCheckpoint = + serde_json::from_slice(&response_bytes).context("Failed to parse latest checkpoint")?; + + let report = auths_sdk::workflows::transparency::try_cache_checkpoint( + &cache_path, + &latest_checkpoint, + None, + ); + + match report { + Ok(consistency) => { + if is_json_mode() { + let result = VerifyResult { + consistent: consistency.consistent, + cached_size: consistency.old_size, + latest_size: consistency.new_size, + cached_root: hex::encode(cached_checkpoint.checkpoint.root.as_bytes()), + latest_root: hex::encode(latest_checkpoint.checkpoint.root.as_bytes()), + }; + JsonResponse::success("log verify", result).print()?; + } else { + println!("Log Consistency: verified"); + println!( + " Cached: size={}, root={}", + consistency.old_size, + hex::encode(cached_checkpoint.checkpoint.root.as_bytes()) + ); + println!( + " Latest: size={}, root={}", + consistency.new_size, + hex::encode(latest_checkpoint.checkpoint.root.as_bytes()) + ); + println!(" Checkpoint updated."); + } + } + Err(e) => { + if is_json_mode() { + let result = VerifyResult { + consistent: false, + cached_size: cached_checkpoint.checkpoint.size, + latest_size: latest_checkpoint.checkpoint.size, + cached_root: hex::encode(cached_checkpoint.checkpoint.root.as_bytes()), + latest_root: hex::encode(latest_checkpoint.checkpoint.root.as_bytes()), + }; + let resp: JsonResponse = JsonResponse { + success: false, + command: "log verify".into(), + data: Some(result), + error: Some(e.to_string()), + }; + println!( + "{}", + serde_json::to_string(&resp).context("Failed to serialize error response")? + ); + } else { + eprintln!("Log Consistency: FAILED"); + eprintln!(" Error: {e}"); + eprintln!(" This may indicate a split-view attack."); + } + bail!("Log consistency verification failed: {e}"); + } + } + + Ok(()) +} diff --git a/crates/auths-cli/src/commands/mod.rs b/crates/auths-cli/src/commands/mod.rs index 23a09371..8a0d10c5 100644 --- a/crates/auths-cli/src/commands/mod.rs +++ b/crates/auths-cli/src/commands/mod.rs @@ -1,6 +1,7 @@ pub mod executable; pub mod registry_overrides; +pub mod account; pub mod agent; pub mod approval; pub mod artifact; @@ -20,6 +21,8 @@ pub mod index; pub mod init; pub mod key; pub mod learn; +pub mod log; +pub mod namespace; pub mod org; pub mod policy; pub mod provision; diff --git a/crates/auths-cli/src/commands/namespace.rs b/crates/auths-cli/src/commands/namespace.rs new file mode 100644 index 00000000..a7b5a9b6 --- /dev/null +++ b/crates/auths-cli/src/commands/namespace.rs @@ -0,0 +1,367 @@ +use anyhow::{Context, Result, anyhow}; +use clap::{Parser, Subcommand}; + +use auths_core::signing::StorageSigner; +use auths_core::storage::keychain::{KeyAlias, get_platform_keychain}; +use auths_id::storage::identity::IdentityStorage; +use auths_id::storage::layout; +use auths_sdk::registration::DEFAULT_REGISTRY_URL; +use auths_sdk::workflows::namespace::{ + ClaimNamespaceCommand, DelegateNamespaceCommand, TransferNamespaceCommand, + parse_claim_response, parse_lookup_response, sign_namespace_claim, sign_namespace_delegate, + sign_namespace_transfer, +}; +use auths_storage::git::RegistryIdentityStorage; + +use crate::commands::executable::ExecutableCommand; +use crate::config::CliConfig; + +/// Manage namespace claims in package ecosystems. +#[derive(Parser, Debug, Clone)] +pub struct NamespaceCommand { + #[clap(subcommand)] + pub subcommand: NamespaceSubcommand, +} + +/// Subcommands for managing namespace claims and delegations. +#[derive(Subcommand, Debug, Clone)] +pub enum NamespaceSubcommand { + /// Claim a namespace in a package ecosystem + Claim { + /// Package ecosystem (e.g. npm, crates.io, pypi) + #[arg(long)] + ecosystem: String, + + /// Package name to claim + #[arg(long)] + package_name: String, + + /// Registry URL (defaults to the public registry) + #[arg(long)] + registry_url: Option, + + /// Alias of the signing key in keychain + #[arg(long)] + signer_alias: Option, + }, + + /// Delegate namespace authority to another identity + Delegate { + /// Package ecosystem (e.g. npm, crates.io, pypi) + #[arg(long)] + ecosystem: String, + + /// Package name + #[arg(long)] + package_name: String, + + /// DID of the identity to delegate to + #[arg(long)] + delegate_did: String, + + /// Registry URL (defaults to the public registry) + #[arg(long)] + registry_url: Option, + + /// Alias of the signing key in keychain + #[arg(long)] + signer_alias: Option, + }, + + /// Transfer namespace ownership to another identity + Transfer { + /// Package ecosystem (e.g. npm, crates.io, pypi) + #[arg(long)] + ecosystem: String, + + /// Package name + #[arg(long)] + package_name: String, + + /// DID of the new owner + #[arg(long)] + new_owner_did: String, + + /// Registry URL (defaults to the public registry) + #[arg(long)] + registry_url: Option, + + /// Alias of the signing key in keychain + #[arg(long)] + signer_alias: Option, + }, + + /// Look up namespace information + Lookup { + /// Package ecosystem (e.g. npm, crates.io, pypi) + #[arg(long)] + ecosystem: String, + + /// Package name + #[arg(long)] + package_name: String, + + /// Registry URL (defaults to the public registry) + #[arg(long)] + registry_url: Option, + }, +} + +impl ExecutableCommand for NamespaceCommand { + #[allow(clippy::disallowed_methods)] + fn execute(&self, ctx: &CliConfig) -> Result<()> { + handle_namespace(self.clone(), ctx) + } +} + +fn resolve_registry_url(registry_url: Option) -> String { + registry_url.unwrap_or_else(|| DEFAULT_REGISTRY_URL.to_string()) +} + +fn load_identity_and_alias( + ctx: &CliConfig, + signer_alias: Option, +) -> Result<(auths_verifier::types::IdentityDID, KeyAlias)> { + let repo_path = layout::resolve_repo_path(ctx.repo_path.clone())?; + let identity_storage = RegistryIdentityStorage::new(repo_path); + let managed_identity = identity_storage + .load_identity() + .context("Failed to load identity. Run `auths init` first.")?; + + let controller_did = managed_identity.controller_did; + + let alias_str = signer_alias.unwrap_or_else(|| { + let prefix = controller_did + .as_str() + .strip_prefix("did:keri:") + .unwrap_or(controller_did.as_str()); + format!( + "ns-{}", + prefix + .chars() + .filter(|c| c.is_alphanumeric()) + .take(20) + .collect::() + .to_lowercase() + ) + }); + + let key_alias = KeyAlias::new_unchecked(alias_str); + Ok((controller_did, key_alias)) +} + +fn post_signed_entry(registry_url: &str, body: serde_json::Value) -> Result { + let url = format!("{}/v1/log/entries", registry_url.trim_end_matches('/')); + + let client = reqwest::blocking::Client::new(); + let response = client + .post(&url) + .json(&body) + .send() + .with_context(|| format!("Failed to POST to {url}"))?; + + let status = response.status(); + let response_text = response + .text() + .context("Failed to read registry response")?; + + if !status.is_success() { + return Err(anyhow!( + "Registry returned HTTP {}: {}", + status, + response_text + )); + } + + serde_json::from_str(&response_text).context("Failed to parse registry response") +} + +/// Handles `namespace` commands for managing package namespace claims. +pub fn handle_namespace(cmd: NamespaceCommand, ctx: &CliConfig) -> Result<()> { + match cmd.subcommand { + NamespaceSubcommand::Claim { + ecosystem, + package_name, + registry_url, + signer_alias, + } => { + let registry_url = resolve_registry_url(registry_url); + let (controller_did, key_alias) = load_identity_and_alias(ctx, signer_alias)?; + let signer = StorageSigner::new(get_platform_keychain()?); + let passphrase_provider = ctx.passphrase_provider.clone(); + + println!("Claiming namespace {}/{}...", ecosystem, package_name); + + let sdk_cmd = ClaimNamespaceCommand { + ecosystem: ecosystem.clone(), + package_name: package_name.clone(), + registry_url: registry_url.clone(), + }; + + let signed = sign_namespace_claim( + &sdk_cmd, + &controller_did, + &signer, + passphrase_provider.as_ref(), + &key_alias, + ) + .context("Failed to sign namespace claim")?; + + let response = post_signed_entry(®istry_url, signed.to_request_body())?; + + let result = parse_claim_response( + &ecosystem, + &package_name, + controller_did.as_str(), + &response, + ); + + println!("\nNamespace claimed successfully!"); + println!(" Ecosystem: {}", result.ecosystem); + println!(" Package: {}", result.package_name); + println!(" Owner: {}", result.owner_did); + println!(" Log Sequence: {}", result.log_sequence); + + Ok(()) + } + + NamespaceSubcommand::Delegate { + ecosystem, + package_name, + delegate_did, + registry_url, + signer_alias, + } => { + let registry_url = resolve_registry_url(registry_url); + let (controller_did, key_alias) = load_identity_and_alias(ctx, signer_alias)?; + let signer = StorageSigner::new(get_platform_keychain()?); + let passphrase_provider = ctx.passphrase_provider.clone(); + + println!( + "Delegating namespace {}/{} to {}...", + ecosystem, package_name, delegate_did + ); + + let sdk_cmd = DelegateNamespaceCommand { + ecosystem: ecosystem.clone(), + package_name: package_name.clone(), + delegate_did: delegate_did.clone(), + registry_url: registry_url.clone(), + }; + + let signed = sign_namespace_delegate( + &sdk_cmd, + &controller_did, + &signer, + passphrase_provider.as_ref(), + &key_alias, + ) + .context("Failed to sign namespace delegation")?; + + post_signed_entry(®istry_url, signed.to_request_body())?; + + println!("\nNamespace delegation successful!"); + println!(" Ecosystem: {}", ecosystem); + println!(" Package: {}", package_name); + println!(" Delegate: {}", delegate_did); + + Ok(()) + } + + NamespaceSubcommand::Transfer { + ecosystem, + package_name, + new_owner_did, + registry_url, + signer_alias, + } => { + let registry_url = resolve_registry_url(registry_url); + let (controller_did, key_alias) = load_identity_and_alias(ctx, signer_alias)?; + let signer = StorageSigner::new(get_platform_keychain()?); + let passphrase_provider = ctx.passphrase_provider.clone(); + + println!( + "Transferring namespace {}/{} to {}...", + ecosystem, package_name, new_owner_did + ); + + let sdk_cmd = TransferNamespaceCommand { + ecosystem: ecosystem.clone(), + package_name: package_name.clone(), + new_owner_did: new_owner_did.clone(), + registry_url: registry_url.clone(), + }; + + let signed = sign_namespace_transfer( + &sdk_cmd, + &controller_did, + &signer, + passphrase_provider.as_ref(), + &key_alias, + ) + .context("Failed to sign namespace transfer")?; + + post_signed_entry(®istry_url, signed.to_request_body())?; + + println!("\nNamespace transfer successful!"); + println!(" Ecosystem: {}", ecosystem); + println!(" Package: {}", package_name); + println!(" New Owner: {}", new_owner_did); + + Ok(()) + } + + NamespaceSubcommand::Lookup { + ecosystem, + package_name, + registry_url, + } => { + let registry_url = resolve_registry_url(registry_url); + + println!("Looking up namespace {}/{}...", ecosystem, package_name); + + let url = format!( + "{}/v1/namespaces/{}/{}", + registry_url.trim_end_matches('/'), + ecosystem, + package_name + ); + + let client = reqwest::blocking::Client::new(); + let response = client + .get(&url) + .send() + .with_context(|| format!("Failed to GET {url}"))?; + + if response.status() == reqwest::StatusCode::NOT_FOUND { + println!("\nNamespace {}/{} is not claimed.", ecosystem, package_name); + return Ok(()); + } + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().unwrap_or_default(); + return Err(anyhow!("Registry returned HTTP {}: {}", status, body)); + } + + let body: serde_json::Value = response + .json() + .context("Failed to parse registry response")?; + + let info = parse_lookup_response(&ecosystem, &package_name, &body); + + println!("\nNamespace: {}/{}", info.ecosystem, info.package_name); + println!(" Owner: {}", info.owner_did); + if info.delegates.is_empty() { + println!(" Delegates: (none)"); + } else { + println!(" Delegates:"); + for d in &info.delegates { + println!(" - {}", d); + } + } + + Ok(()) + } + } +} diff --git a/crates/auths-cli/src/commands/snapshots/auths_cli__commands__status__tests__status_json_snapshot.snap.new b/crates/auths-cli/src/commands/snapshots/auths_cli__commands__status__tests__status_json_snapshot.snap.new deleted file mode 100644 index ce7c8d31..00000000 --- a/crates/auths-cli/src/commands/snapshots/auths_cli__commands__status__tests__status_json_snapshot.snap.new +++ /dev/null @@ -1,49 +0,0 @@ ---- -source: crates/auths-cli/src/commands/status.rs -assertion_line: 453 -expression: report ---- -{ - "identity": { - "controller_did": "did:keri:ETestController123", - "alias": "dev-machine" - }, - "agent": { - "running": true, - "pid": 12345, - "socket_path": "/tmp/agent.sock" - }, - "devices": { - "linked": 2, - "revoked": 1, - "expiring_soon": [ - { - "device_did": "did:key:zExpiringSoon", - "expires_in_days": 3 - } - ], - "devices_detail": [ - { - "device_did": "did:key:zActiveDevice", - "status": "active", - "revoked_at": null, - "expires_at": "2025-09-13T12:00:00Z", - "expires_in_days": 90 - }, - { - "device_did": "did:key:zExpiringSoon", - "status": "expiring_soon", - "revoked_at": null, - "expires_at": "2025-06-18T12:00:00Z", - "expires_in_days": 3 - }, - { - "device_did": "did:key:zRevokedDevice", - "status": "revoked", - "revoked_at": "2025-06-05T12:00:00Z", - "expires_at": "2025-08-04T12:00:00Z", - "expires_in_days": null - } - ] - } -} diff --git a/crates/auths-cli/src/main.rs b/crates/auths-cli/src/main.rs index 1e08704b..7ada302f 100644 --- a/crates/auths-cli/src/main.rs +++ b/crates/auths-cli/src/main.rs @@ -91,6 +91,7 @@ fn run() -> Result<()> { RootCommand::Policy(cmd) => cmd.execute(&ctx), RootCommand::Git(cmd) => cmd.execute(&ctx), RootCommand::Trust(cmd) => cmd.execute(&ctx), + RootCommand::Namespace(cmd) => cmd.execute(&ctx), RootCommand::Org(cmd) => cmd.execute(&ctx), RootCommand::Audit(cmd) => cmd.execute(&ctx), RootCommand::Agent(cmd) => cmd.execute(&ctx), @@ -99,6 +100,8 @@ fn run() -> Result<()> { RootCommand::Config(cmd) => cmd.execute(&ctx), RootCommand::Commit(cmd) => cmd.execute(&ctx), RootCommand::Debug(cmd) => cmd.execute(&ctx), + RootCommand::Log(cmd) => cmd.execute(&ctx), + RootCommand::Account(cmd) => cmd.execute(&ctx), }; if let Some(action) = action { diff --git a/crates/auths-core/src/ports/network.rs b/crates/auths-core/src/ports/network.rs index 8c73f7ca..bf96c4d2 100644 --- a/crates/auths-core/src/ports/network.rs +++ b/crates/auths-core/src/ports/network.rs @@ -306,6 +306,22 @@ pub trait WitnessClient: Send + Sync { ) -> impl Future>, NetworkError>> + Send; } +/// Rate limit information extracted from HTTP response headers. +/// +/// Populated from standard `X-RateLimit-*` headers when present in the +/// registry response. +#[derive(Debug, Clone, Default)] +pub struct RateLimitInfo { + /// Maximum requests allowed in the current window. + pub limit: Option, + /// Remaining requests in the current window. + pub remaining: Option, + /// Unix timestamp when the rate limit window resets. + pub reset: Option, + /// The access tier for this identity (e.g., "free", "team"). + pub tier: Option, +} + /// Response from a registry POST operation. /// /// Carries the HTTP status code and body so callers can dispatch on @@ -316,6 +332,8 @@ pub struct RegistryResponse { pub status: u16, /// Response body bytes. pub body: Vec, + /// Rate limit information extracted from response headers, if present. + pub rate_limit: Option, } /// Fetches and pushes data to a remote registry service. diff --git a/crates/auths-infra-http/src/registry_client.rs b/crates/auths-infra-http/src/registry_client.rs index 601df7d8..9f7f23d0 100644 --- a/crates/auths-infra-http/src/registry_client.rs +++ b/crates/auths-infra-http/src/registry_client.rs @@ -1,4 +1,4 @@ -use auths_core::ports::network::{NetworkError, RegistryClient, RegistryResponse}; +use auths_core::ports::network::{NetworkError, RateLimitInfo, RegistryClient, RegistryResponse}; use std::future::Future; use std::time::Duration; @@ -113,12 +113,48 @@ impl RegistryClient for HttpRegistryClient { .await .map_err(|e| map_reqwest_error(e, &endpoint))?; let status = response.status().as_u16(); + let rate_limit = extract_rate_limit_headers(&response); let body = response.bytes().await.map(|b| b.to_vec()).map_err(|e| { NetworkError::InvalidResponse { detail: e.to_string(), } })?; - Ok(RegistryResponse { status, body }) + Ok(RegistryResponse { + status, + body, + rate_limit, + }) } } } + +fn extract_rate_limit_headers(response: &reqwest::Response) -> Option { + let headers = response.headers(); + let limit = headers + .get("x-ratelimit-limit") + .and_then(|v| v.to_str().ok()) + .and_then(|s| s.parse::().ok()); + let remaining = headers + .get("x-ratelimit-remaining") + .and_then(|v| v.to_str().ok()) + .and_then(|s| s.parse::().ok()); + let reset = headers + .get("x-ratelimit-reset") + .and_then(|v| v.to_str().ok()) + .and_then(|s| s.parse::().ok()); + let tier = headers + .get("x-ratelimit-tier") + .and_then(|v| v.to_str().ok()) + .map(String::from); + + if limit.is_some() || remaining.is_some() || reset.is_some() || tier.is_some() { + Some(RateLimitInfo { + limit, + remaining, + reset, + tier, + }) + } else { + None + } +} diff --git a/crates/auths-pairing-daemon/src/security_audit.md b/crates/auths-pairing-daemon/src/security_audit.md new file mode 100644 index 00000000..31170e2d --- /dev/null +++ b/crates/auths-pairing-daemon/src/security_audit.md @@ -0,0 +1,113 @@ +# Security Audit — Public Verification Network + +**Date:** 2026-03-12 +**Scope:** `.flow/tasks/fn-72.*` through `fn-76.*`, `public_network_spec.md` +**Severity scale:** Critical > High > Medium > Low + +--- + +## Launch Blockers (fixed in task files) + +These have been encoded directly into the task files with `**SECURITY:**` annotations. + +| # | Severity | Finding | Task(s) | Fix Applied | +|---|----------|---------|---------|-------------| +| 1 | Critical | Log signing key as plain env var | fn-72.6 | HSM/KMS required for prod. Trait abstraction for key loading so HSM is drop-in. | +| 2 | High | Witness amnesia on restart enables fork acceptance | fn-73.2 | Persist `last_checkpoint.json` to disk with fsync. Load on startup. | +| 3 | High | Replay attack — no nonce on EntryContent | fn-72.6 | Content-hash dedup cache in sequencer (bounded, per-actor, 1hr TTL). | +| 4 | High | Namespace squatting — first-claim with no proof | fn-74.1 | Require Free tier (GitHub-verified) for claims. Rate-limit 5/day/identity, 10/hour/IP. | +| 5 | Medium | No entry body size limits | fn-72.6, fn-72.7 | 64 KiB max body. Field-level limits (ecosystem 128, package_name 256, display_name 256). | +| 6 | High | TOFU trust root poisoning via MITM | fn-76.2 | Hardcoded root key pinned in binary. Fetched trust root must be signed by pinned key. | +| 7 | Medium | Postgres desync allows stale-permission exploitation | fn-72.6 | Degraded mode: refuse appends if Postgres write fails, until reconciliation. | +| 8 | Medium | Unbounded identity creation (Sybil flooding) | fn-72.7 | Register entries rate-limited per IP (10/min). Uses existing `RateLimiter` pattern. | +| 9 | Medium | Self-referential delegation chains | fn-72.3 | Strict chain ordering, DID connectivity checks, no duplicate sequences. | +| 10 | Medium | Stale offline bundles verify indefinitely | fn-72.3 | Warning for bundles >90 days old. | + +--- + +## Ship-Aware (monitor, fix soon — NOT in task files yet) + +### SA-1: Access Grant Escalation via Gist Swap +**Severity:** Medium +**Tasks:** fn-75.3 (Auto-provisioning) + +**Attack:** Create identity B, point its platform claim to identity A's public Gist. If the server only checks "does a valid Gist exist" rather than "does the Gist reference THIS identity's DID", identity B gets Free tier. + +**Mitigation:** The `determine_tier()` function MUST verify that the Gist's signed platform claim references the requesting identity's `did:keri:E...`, not just any valid DID. Also re-verify the Gist exists periodically (not just at init time) — if the user deletes the Gist, the trust signal is gone. + +### SA-2: Witness Collusion — All Same Operator +**Severity:** Medium +**Tasks:** fn-73.2, fn-73.3, fn-76.4 + +**Attack:** All 3 initial witnesses run on Fly.io under auths.dev control. Single entity compromise = quorum. + +**Mitigation:** Before public launch, onboard at least one external witness operated by a different organization on different infrastructure. Document a witness diversity policy. The `TrustRoot.witnesses` list should eventually include organizational metadata so clients can audit diversity. + +### SA-3: NamespaceTransfer Enables Instant Hostile Takeover +**Severity:** Medium +**Tasks:** fn-74.1 + +**Attack:** Compromised org admin key → immediate namespace transfer → sign malicious artifacts under stolen namespace. No cooling-off period, no multi-admin approval. + +**Mitigation:** Consider a time-locked transfer (24-48hr cancellation window). Require multi-admin approval if the org has >1 admin. At minimum, the monitor (fn-76.3) should alert on all NamespaceTransfer events so the team can respond. + +### SA-4: Sequencer DoS via Entry Flood +**Severity:** Medium +**Tasks:** fn-72.6 + +**Attack:** Per-entry checkpoint signing (Ed25519) + per-entry tile writes + per-entry Postgres writes = CPU and I/O bottleneck. Single-writer actor serializes everything through one channel. + +**Mitigation:** Use a bounded mpsc channel (backpressure). The GovernorLayer IP rate limit handles most of this, but consider batching checkpoint signing (every N entries or every T seconds) if throughput becomes an issue. Per-entry signing is the correct default for now — it gives clients immediate proofs. + +### SA-5: Tile API Read DoS (Pre-CDN) +**Severity:** Low +**Tasks:** fn-72.8 + +**Attack:** Tile API routes are exempt from rate limiting (public data). Before S3/CDN (fn-76.1), all reads hit the Fly Volume directly. Repeated partial-tile and checkpoint requests (10s TTL) are not cached. + +**Mitigation:** Apply a generous IP rate limit on tile API reads (1000 req/min/IP) for Epic 1. Once S3TileStore (fn-76.1) is deployed with CDN cache headers, this becomes a CDN problem. + +### SA-6: Revocation Check Misses During Postgres Desync +**Severity:** Medium +**Tasks:** fn-72.7 + +**Attack:** Specific instance of the Postgres desync problem. `DeviceRevoke` writes to tiles but Postgres write fails. Subsequent `Attest` from the revoked device passes validation (Postgres still shows active binding). + +**Mitigation:** The degraded-mode fix (fn-72.6 launch blocker) addresses this — sequencer refuses appends when Postgres is behind tiles. For defense-in-depth, security-critical checks (device revocation, membership revocation) could double-check against the tile log. + +### SA-7: Missing Expiry on Offline Bundles (Deep) +**Severity:** Medium +**Tasks:** fn-72.3 + +**Attack:** A revoked org member's old attestations remain valid in their offline bundles because the bundle was created before revocation. The bundle verifier cannot know about the revocation without contacting the live log. + +**Mitigation:** The 90-day stale warning (launch blocker fix) helps. For stronger guarantees, consumers should re-verify bundles against the live log. Add an `auths artifact refresh` CLI command that re-fetches the latest checkpoint and delegation chain status for an existing bundle. Document that offline verification is a point-in-time check, not a live revocation check. + +### SA-8: C2SP Key ID Collision (32-bit) +**Severity:** Low +**Tasks:** fn-72.1 + +**Attack:** 4-byte key ID = birthday collision at ~65K keys. Only relevant if the witness network grows large. + +**Mitigation:** Verifier should try all keys matching a given key ID, not just the first match. This is a C2SP spec limitation, not something we can change. + +--- + +## Existing Pattern to Reuse + +The `RateLimiter` at `crates/auths-pairing-daemon/src/rate_limiter.rs` provides a clean per-IP sliding-window rate limiter with Axum middleware integration. This should be reused (or extracted to a shared crate) for: + +- **Register entry rate limiting** (fn-72.7): 10/min/IP +- **NamespaceClaim rate limiting** (fn-74.1): 10/hour/IP +- **Tile API read rate limiting** (fn-72.8): 1000/min/IP (pre-CDN) + +The pattern: `RateLimiter::new(limit)` + `limiter.check(ip)` + `rate_limit_middleware` as Axum layer. Thread-safe via `Mutex>`. Consider extracting to a workspace-level `auths-rate-limit` crate or a shared module so the pairing daemon, registry server, and witness binary can all use it. + +--- + +## Summary + +- **6 launch blockers** fixed directly in task files (HSM signing key, witness persistence, replay dedup, namespace anti-squat, body size limits, trust root pinning) +- **4 additional launch blockers** fixed (Postgres degraded mode, Sybil rate limiting, chain validation, stale bundles) +- **8 ship-aware items** documented above for near-term follow-up +- **2 low-severity items** acceptable for v1 diff --git a/crates/auths-pairing-protocol/Cargo.toml b/crates/auths-pairing-protocol/Cargo.toml index b7e0c8cd..6298d1bf 100644 --- a/crates/auths-pairing-protocol/Cargo.toml +++ b/crates/auths-pairing-protocol/Cargo.toml @@ -24,6 +24,11 @@ thiserror.workspace = true hkdf = "0.12" sha2 = "0.10" chacha20poly1305 = { version = "0.10", features = ["std"] } +schemars = { version = "0.8", optional = true } + +[features] +default = [] +schema = ["dep:schemars"] [dev-dependencies] regex-lite = "0.1" diff --git a/crates/auths-pairing-protocol/src/types.rs b/crates/auths-pairing-protocol/src/types.rs index bf6a80a6..8b558a34 100644 --- a/crates/auths-pairing-protocol/src/types.rs +++ b/crates/auths-pairing-protocol/src/types.rs @@ -3,6 +3,7 @@ use serde::{Deserialize, Serialize}; /// A base64url-encoded (no padding) byte string. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] #[serde(transparent)] pub struct Base64UrlEncoded(String); @@ -39,6 +40,7 @@ impl std::fmt::Display for Base64UrlEncoded { /// Session status. #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] #[serde(rename_all = "lowercase")] pub enum SessionStatus { Pending, @@ -52,6 +54,7 @@ pub enum SessionStatus { /// Request to create a new pairing session. #[derive(Debug, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] pub struct CreateSessionRequest { pub session_id: String, pub controller_did: String, @@ -64,6 +67,7 @@ pub struct CreateSessionRequest { /// Response to session creation. #[derive(Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] pub struct CreateSessionResponse { pub session_id: String, pub status: SessionStatus, @@ -74,6 +78,7 @@ pub struct CreateSessionResponse { /// Request to submit a pairing response. #[derive(Debug, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] pub struct SubmitResponseRequest { pub device_x25519_pubkey: Base64UrlEncoded, pub device_signing_pubkey: Base64UrlEncoded, @@ -86,6 +91,7 @@ pub struct SubmitResponseRequest { /// Response when getting session status. #[derive(Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] pub struct GetSessionResponse { pub session_id: String, pub status: SessionStatus, @@ -98,6 +104,7 @@ pub struct GetSessionResponse { /// Response for successful operations. #[derive(Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] pub struct SuccessResponse { pub success: bool, pub message: String, @@ -105,6 +112,7 @@ pub struct SuccessResponse { /// Request to submit a SAS confirmation (or abort). #[derive(Debug, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] pub struct SubmitConfirmationRequest { #[serde(skip_serializing_if = "Option::is_none")] pub encrypted_attestation: Option, @@ -114,6 +122,7 @@ pub struct SubmitConfirmationRequest { /// Response when polling for confirmation. #[derive(Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] pub struct GetConfirmationResponse { #[serde(skip_serializing_if = "Option::is_none")] pub encrypted_attestation: Option, diff --git a/crates/auths-sdk/Cargo.toml b/crates/auths-sdk/Cargo.toml index c630927d..c9733b9e 100644 --- a/crates/auths-sdk/Cargo.toml +++ b/crates/auths-sdk/Cargo.toml @@ -17,6 +17,7 @@ auths-telemetry.workspace = true auths-policy.workspace = true auths-crypto.workspace = true auths-verifier = { workspace = true, features = ["native"] } +auths-transparency = { workspace = true, features = ["native"] } ring.workspace = true thiserror.workspace = true serde = { version = "1", features = ["derive"] } diff --git a/crates/auths-sdk/src/workflows/artifact.rs b/crates/auths-sdk/src/workflows/artifact.rs index d76293c1..e66e3e5e 100644 --- a/crates/auths-sdk/src/workflows/artifact.rs +++ b/crates/auths-sdk/src/workflows/artifact.rs @@ -1,6 +1,6 @@ //! Artifact digest computation and publishing workflow. -use auths_core::ports::network::{NetworkError, RegistryClient}; +use auths_core::ports::network::{NetworkError, RateLimitInfo, RegistryClient}; use auths_verifier::core::ResourceId; use serde::Deserialize; use thiserror::Error; @@ -31,6 +31,9 @@ pub struct ArtifactPublishResult { pub package_name: Option, /// DID of the identity that signed the attestation. pub signer_did: String, + /// Rate limit information from response headers, if the registry provides it. + #[serde(skip)] + pub rate_limit: Option, } /// Errors that can occur when publishing an artifact attestation. @@ -89,8 +92,9 @@ pub async fn publish_artifact( match response.status { 201 => { - let result: ArtifactPublishResult = serde_json::from_slice(&response.body) + let mut result: ArtifactPublishResult = serde_json::from_slice(&response.body) .map_err(|e| ArtifactPublishError::Deserialize(e.to_string()))?; + result.rate_limit = response.rate_limit; Ok(result) } 409 => Err(ArtifactPublishError::DuplicateAttestation), diff --git a/crates/auths-sdk/src/workflows/mod.rs b/crates/auths-sdk/src/workflows/mod.rs index 5c2f5eb5..b947a9cd 100644 --- a/crates/auths-sdk/src/workflows/mod.rs +++ b/crates/auths-sdk/src/workflows/mod.rs @@ -6,9 +6,11 @@ pub mod diagnostics; pub mod git_integration; #[cfg(feature = "mcp")] pub mod mcp; +pub mod namespace; pub mod org; pub mod platform; pub mod policy_diff; pub mod provision; pub mod rotation; pub mod signing; +pub mod transparency; diff --git a/crates/auths-sdk/src/workflows/namespace.rs b/crates/auths-sdk/src/workflows/namespace.rs new file mode 100644 index 00000000..bdaaa9d5 --- /dev/null +++ b/crates/auths-sdk/src/workflows/namespace.rs @@ -0,0 +1,463 @@ +//! Namespace management workflows: claim, delegate, transfer, and lookup. +//! +//! These workflows build transparency log entries for namespace operations, +//! canonicalize and sign them, and return the signed payload ready for +//! submission to a registry server at `/v1/log/entries`. + +use auths_core::signing::{PassphraseProvider, SecureSigner}; +use auths_core::storage::keychain::KeyAlias; +use auths_transparency::entry::{EntryBody, EntryContent, EntryType}; +use auths_verifier::CanonicalDid; +use auths_verifier::types::IdentityDID; +use base64::Engine; +use base64::engine::general_purpose::STANDARD as BASE64; +use thiserror::Error; + +/// Errors from namespace management workflows. +#[derive(Debug, Error)] +#[non_exhaustive] +pub enum NamespaceError { + /// The namespace is already claimed by another identity. + #[error("namespace '{ecosystem}/{package_name}' is already claimed")] + AlreadyClaimed { + /// The package ecosystem (e.g. "npm", "crates.io"). + ecosystem: String, + /// The package name within the ecosystem. + package_name: String, + }, + + /// The namespace was not found in the registry. + #[error("namespace '{ecosystem}/{package_name}' not found")] + NotFound { + /// The package ecosystem. + ecosystem: String, + /// The package name. + package_name: String, + }, + + /// The caller is not authorized to manage this namespace. + #[error("not authorized to manage namespace '{ecosystem}/{package_name}'")] + Unauthorized { + /// The package ecosystem. + ecosystem: String, + /// The package name. + package_name: String, + }, + + /// The ecosystem string is invalid. + #[error("invalid ecosystem: {0}")] + InvalidEcosystem(String), + + /// The package name string is invalid. + #[error("invalid package name: {0}")] + InvalidPackageName(String), + + /// A network operation failed. + #[error("network error: {0}")] + NetworkError(String), + + /// A signing operation failed. + #[error("signing error: {0}")] + SigningError(String), + + /// Serialization or canonicalization failed. + #[error("serialization error: {0}")] + SerializationError(String), +} + +/// Command to claim a namespace in a package ecosystem. +/// +/// Args: +/// * `ecosystem`: Package ecosystem identifier (e.g. "npm", "crates.io"). +/// * `package_name`: Package name to claim within the ecosystem. +/// * `registry_url`: Base URL of the registry server. +/// +/// Usage: +/// ```ignore +/// let cmd = ClaimNamespaceCommand { +/// ecosystem: "npm".into(), +/// package_name: "my-package".into(), +/// registry_url: "https://registry.example.com".into(), +/// }; +/// ``` +pub struct ClaimNamespaceCommand { + /// Package ecosystem identifier (e.g. "npm", "crates.io"). + pub ecosystem: String, + /// Package name to claim within the ecosystem. + pub package_name: String, + /// Base URL of the registry server. + pub registry_url: String, +} + +/// Command to delegate namespace authority to another identity. +/// +/// Args: +/// * `ecosystem`: Package ecosystem identifier. +/// * `package_name`: Package name within the ecosystem. +/// * `delegate_did`: DID of the identity receiving delegation. +/// * `registry_url`: Base URL of the registry server. +/// +/// Usage: +/// ```ignore +/// let cmd = DelegateNamespaceCommand { +/// ecosystem: "npm".into(), +/// package_name: "my-package".into(), +/// delegate_did: "did:keri:Edelegate...".into(), +/// registry_url: "https://registry.example.com".into(), +/// }; +/// ``` +pub struct DelegateNamespaceCommand { + /// Package ecosystem identifier. + pub ecosystem: String, + /// Package name within the ecosystem. + pub package_name: String, + /// DID of the identity receiving delegation. + pub delegate_did: String, + /// Base URL of the registry server. + pub registry_url: String, +} + +/// Command to transfer namespace ownership to a new identity. +/// +/// Args: +/// * `ecosystem`: Package ecosystem identifier. +/// * `package_name`: Package name within the ecosystem. +/// * `new_owner_did`: DID of the identity receiving ownership. +/// * `registry_url`: Base URL of the registry server. +/// +/// Usage: +/// ```ignore +/// let cmd = TransferNamespaceCommand { +/// ecosystem: "npm".into(), +/// package_name: "my-package".into(), +/// new_owner_did: "did:keri:Enewowner...".into(), +/// registry_url: "https://registry.example.com".into(), +/// }; +/// ``` +pub struct TransferNamespaceCommand { + /// Package ecosystem identifier. + pub ecosystem: String, + /// Package name within the ecosystem. + pub package_name: String, + /// DID of the identity receiving ownership. + pub new_owner_did: String, + /// Base URL of the registry server. + pub registry_url: String, +} + +/// Result of a successful namespace claim. +/// +/// Args: +/// * `ecosystem`: The claimed ecosystem. +/// * `package_name`: The claimed package name. +/// * `owner_did`: DID of the new owner. +/// * `log_sequence`: Sequence number assigned by the transparency log. +pub struct NamespaceClaimResult { + /// The claimed ecosystem. + pub ecosystem: String, + /// The claimed package name. + pub package_name: String, + /// DID of the new owner. + pub owner_did: String, + /// Sequence number assigned by the transparency log. + pub log_sequence: u64, +} + +/// Namespace information returned by a lookup. +/// +/// Args: +/// * `ecosystem`: The namespace's ecosystem. +/// * `package_name`: The namespace's package name. +/// * `owner_did`: DID of the current owner. +/// * `delegates`: DIDs of identities with delegated authority. +pub struct NamespaceInfo { + /// The namespace's ecosystem. + pub ecosystem: String, + /// The namespace's package name. + pub package_name: String, + /// DID of the current owner. + pub owner_did: String, + /// DIDs of identities with delegated authority. + pub delegates: Vec, +} + +/// A signed entry ready for submission to the registry. +/// +/// Contains the serialized entry content and the base64-encoded actor signature. +/// Submit this to `POST /v1/log/entries` as `{ "content": ..., "actor_sig": "..." }`. +/// +/// Args: +/// * `content`: The serialized entry content as a JSON value. +/// * `actor_sig`: Base64-encoded Ed25519 signature over the canonical content. +pub struct SignedEntry { + /// The serialized entry content as a JSON value. + pub content: serde_json::Value, + /// Base64-encoded Ed25519 signature over the canonical content. + pub actor_sig: String, +} + +impl SignedEntry { + /// Serialize to the JSON body expected by `POST /v1/log/entries`. + /// + /// Usage: + /// ```ignore + /// let body = signed_entry.to_request_body(); + /// // POST body to registry + /// ``` + pub fn to_request_body(&self) -> serde_json::Value { + serde_json::json!({ + "content": self.content, + "actor_sig": self.actor_sig, + }) + } +} + +fn validate_ecosystem(ecosystem: &str) -> Result<(), NamespaceError> { + if ecosystem.is_empty() { + return Err(NamespaceError::InvalidEcosystem( + "ecosystem must not be empty".into(), + )); + } + Ok(()) +} + +fn validate_package_name(package_name: &str) -> Result<(), NamespaceError> { + if package_name.is_empty() { + return Err(NamespaceError::InvalidPackageName( + "package name must not be empty".into(), + )); + } + Ok(()) +} + +fn build_and_sign_entry( + content: &EntryContent, + signer: &dyn SecureSigner, + passphrase_provider: &dyn PassphraseProvider, + signer_alias: &KeyAlias, +) -> Result { + let canonical_bytes = content + .canonicalize() + .map_err(|e| NamespaceError::SerializationError(e.to_string()))?; + + let sig_bytes = signer + .sign_with_alias(signer_alias, passphrase_provider, &canonical_bytes) + .map_err(|e| NamespaceError::SigningError(e.to_string()))?; + + let content_value = serde_json::to_value(content) + .map_err(|e| NamespaceError::SerializationError(e.to_string()))?; + + Ok(SignedEntry { + content: content_value, + actor_sig: BASE64.encode(&sig_bytes), + }) +} + +/// Build and sign a `NamespaceClaim` entry. +/// +/// Creates an `EntryContent` with a `NamespaceClaim` body, canonicalizes +/// it, and signs it with the caller's key. Returns a [`SignedEntry`] +/// ready for submission to `POST /v1/log/entries`. +/// +/// Args: +/// * `cmd`: The claim command with ecosystem, package name, and registry URL. +/// * `actor_did`: The DID of the claiming identity. +/// * `signer`: Signing backend for creating the cryptographic signature. +/// * `passphrase_provider`: Provider for obtaining key decryption passphrases. +/// * `signer_alias`: Keychain alias of the signing key. +/// +/// Usage: +/// ```ignore +/// let signed = sign_namespace_claim(cmd, &actor_did, &signer, provider, &alias)?; +/// // POST signed.to_request_body() to registry +/// ``` +pub fn sign_namespace_claim( + cmd: &ClaimNamespaceCommand, + actor_did: &IdentityDID, + signer: &dyn SecureSigner, + passphrase_provider: &dyn PassphraseProvider, + signer_alias: &KeyAlias, +) -> Result { + validate_ecosystem(&cmd.ecosystem)?; + validate_package_name(&cmd.package_name)?; + + #[allow(clippy::disallowed_methods)] + // INVARIANT: actor_did is an IdentityDID from storage, always valid + let canonical_actor = CanonicalDid::new_unchecked(actor_did.as_str()); + + let content = EntryContent { + entry_type: EntryType::NamespaceClaim, + body: EntryBody::NamespaceClaim { + ecosystem: cmd.ecosystem.clone(), + package_name: cmd.package_name.clone(), + }, + actor_did: canonical_actor, + }; + + build_and_sign_entry(&content, signer, passphrase_provider, signer_alias) +} + +/// Build and sign a `NamespaceDelegate` entry. +/// +/// Creates an `EntryContent` with a `NamespaceDelegate` body, canonicalizes +/// it, and signs it with the caller's key. +/// +/// Args: +/// * `cmd`: The delegate command with ecosystem, package name, delegate DID, and registry URL. +/// * `actor_did`: The DID of the current namespace owner. +/// * `signer`: Signing backend for creating the cryptographic signature. +/// * `passphrase_provider`: Provider for obtaining key decryption passphrases. +/// * `signer_alias`: Keychain alias of the signing key. +/// +/// Usage: +/// ```ignore +/// let signed = sign_namespace_delegate(cmd, &actor_did, &signer, provider, &alias)?; +/// ``` +pub fn sign_namespace_delegate( + cmd: &DelegateNamespaceCommand, + actor_did: &IdentityDID, + signer: &dyn SecureSigner, + passphrase_provider: &dyn PassphraseProvider, + signer_alias: &KeyAlias, +) -> Result { + validate_ecosystem(&cmd.ecosystem)?; + validate_package_name(&cmd.package_name)?; + + #[allow(clippy::disallowed_methods)] + // INVARIANT: delegate_did is from CLI input, validated at presentation boundary + let delegate_identity = IdentityDID::new_unchecked(&cmd.delegate_did); + + #[allow(clippy::disallowed_methods)] + // INVARIANT: actor_did is an IdentityDID from storage, always valid + let canonical_actor = CanonicalDid::new_unchecked(actor_did.as_str()); + + let content = EntryContent { + entry_type: EntryType::NamespaceDelegate, + body: EntryBody::NamespaceDelegate { + ecosystem: cmd.ecosystem.clone(), + package_name: cmd.package_name.clone(), + delegate_did: delegate_identity, + }, + actor_did: canonical_actor, + }; + + build_and_sign_entry(&content, signer, passphrase_provider, signer_alias) +} + +/// Build and sign a `NamespaceTransfer` entry. +/// +/// Creates an `EntryContent` with a `NamespaceTransfer` body, canonicalizes +/// it, and signs it with the caller's key. +/// +/// Args: +/// * `cmd`: The transfer command with ecosystem, package name, new owner DID, and registry URL. +/// * `actor_did`: The DID of the current namespace owner. +/// * `signer`: Signing backend for creating the cryptographic signature. +/// * `passphrase_provider`: Provider for obtaining key decryption passphrases. +/// * `signer_alias`: Keychain alias of the signing key. +/// +/// Usage: +/// ```ignore +/// let signed = sign_namespace_transfer(cmd, &actor_did, &signer, provider, &alias)?; +/// ``` +pub fn sign_namespace_transfer( + cmd: &TransferNamespaceCommand, + actor_did: &IdentityDID, + signer: &dyn SecureSigner, + passphrase_provider: &dyn PassphraseProvider, + signer_alias: &KeyAlias, +) -> Result { + validate_ecosystem(&cmd.ecosystem)?; + validate_package_name(&cmd.package_name)?; + + #[allow(clippy::disallowed_methods)] + // INVARIANT: new_owner_did is from CLI input, validated at presentation boundary + let new_owner_identity = IdentityDID::new_unchecked(&cmd.new_owner_did); + + #[allow(clippy::disallowed_methods)] + // INVARIANT: actor_did is an IdentityDID from storage, always valid + let canonical_actor = CanonicalDid::new_unchecked(actor_did.as_str()); + + let content = EntryContent { + entry_type: EntryType::NamespaceTransfer, + body: EntryBody::NamespaceTransfer { + ecosystem: cmd.ecosystem.clone(), + package_name: cmd.package_name.clone(), + new_owner_did: new_owner_identity, + }, + actor_did: canonical_actor, + }; + + build_and_sign_entry(&content, signer, passphrase_provider, signer_alias) +} + +/// Parse a registry response JSON into a [`NamespaceClaimResult`]. +/// +/// Args: +/// * `ecosystem`: The claimed ecosystem. +/// * `package_name`: The claimed package name. +/// * `owner_did`: DID of the claiming identity. +/// * `response`: The JSON response body from the registry. +/// +/// Usage: +/// ```ignore +/// let result = parse_claim_response("npm", "pkg", "did:keri:E...", &response_json)?; +/// ``` +pub fn parse_claim_response( + ecosystem: &str, + package_name: &str, + owner_did: &str, + response: &serde_json::Value, +) -> NamespaceClaimResult { + let log_sequence = response + .get("sequence") + .and_then(|v| v.as_u64()) + .unwrap_or(0); + + NamespaceClaimResult { + ecosystem: ecosystem.to_string(), + package_name: package_name.to_string(), + owner_did: owner_did.to_string(), + log_sequence, + } +} + +/// Parse a registry lookup response JSON into a [`NamespaceInfo`]. +/// +/// Args: +/// * `ecosystem`: The queried ecosystem. +/// * `package_name`: The queried package name. +/// * `body`: The JSON response body from the registry. +/// +/// Usage: +/// ```ignore +/// let info = parse_lookup_response("npm", "pkg", &response_json); +/// ``` +pub fn parse_lookup_response( + ecosystem: &str, + package_name: &str, + body: &serde_json::Value, +) -> NamespaceInfo { + let owner_did = body + .get("owner_did") + .and_then(|v| v.as_str()) + .unwrap_or_default() + .to_string(); + + let delegates = body + .get("delegates") + .and_then(|v| v.as_array()) + .map(|arr| { + arr.iter() + .filter_map(|v| v.as_str().map(String::from)) + .collect() + }) + .unwrap_or_default(); + + NamespaceInfo { + ecosystem: ecosystem.to_string(), + package_name: package_name.to_string(), + owner_did, + delegates, + } +} diff --git a/crates/auths-sdk/src/workflows/transparency.rs b/crates/auths-sdk/src/workflows/transparency.rs new file mode 100644 index 00000000..f99fe3cb --- /dev/null +++ b/crates/auths-sdk/src/workflows/transparency.rs @@ -0,0 +1,569 @@ +//! SDK transparency verification workflows. + +use std::path::Path; + +use auths_core::ports::network::{NetworkError, RegistryClient}; +use auths_transparency::{ + BundleVerificationReport, ConsistencyProof, LogOrigin, OfflineBundle, SignedCheckpoint, + TrustRoot, TrustRootWitness, +}; +use auths_verifier::Ed25519PublicKey; +use chrono::{DateTime, Utc}; +use thiserror::Error; + +/// Errors from transparency verification workflows. +#[derive(Debug, Error)] +pub enum TransparencyWorkflowError { + /// Bundle verification found issues. + #[error("bundle verification found issues")] + VerificationFailed(Box), + + /// Checkpoint consistency check failed. + #[error("checkpoint inconsistent: {0}")] + CheckpointInconsistent(String), + + /// Cache I/O error. + #[error("cache I/O error: {0}")] + CacheError(#[source] std::io::Error), + + /// JSON deserialization error. + #[error("deserialization error: {0}")] + DeserializationError(String), + + /// Network error fetching trust root or other remote data. + #[error("network error: {0}")] + NetworkError(#[source] NetworkError), +} + +/// Wire-format response from the registry trust-root endpoint. +#[derive(Debug, serde::Deserialize)] +struct TrustRootResponse { + log_origin: String, + log_public_key: String, + witnesses: Vec, + #[allow(dead_code)] + version: u32, +} + +/// Wire-format witness entry from the trust-root response. +#[derive(Debug, serde::Deserialize)] +struct TrustRootWitnessResponse { + name: String, + public_key: String, + #[allow(dead_code)] + url: String, +} + +/// Fetch the trust root from a registry URL. +/// +/// Issues a GET to `{registry_url}/v1/trust-root`, parses the JSON +/// response, and converts it into a domain [`TrustRoot`]. +/// +/// Args: +/// * `registry_url` — Base URL of the auths registry. +/// * `client` — Network client for HTTP communication. +/// +/// Usage: +/// ```ignore +/// let trust_root = fetch_trust_root("https://registry.auths.dev", &http_client).await?; +/// ``` +pub async fn fetch_trust_root( + registry_url: &str, + client: &impl RegistryClient, +) -> Result { + let bytes = client + .fetch_registry_data(registry_url, "v1/trust-root") + .await + .map_err(TransparencyWorkflowError::NetworkError)?; + + let resp: TrustRootResponse = serde_json::from_slice(&bytes) + .map_err(|e| TransparencyWorkflowError::DeserializationError(e.to_string()))?; + + let log_public_key_bytes: [u8; 32] = hex::decode(&resp.log_public_key) + .map_err(|e| { + TransparencyWorkflowError::DeserializationError(format!( + "invalid hex in log_public_key: {e}" + )) + })? + .try_into() + .map_err(|_| { + TransparencyWorkflowError::DeserializationError( + "log_public_key must be exactly 32 bytes".into(), + ) + })?; + + let log_origin = LogOrigin::new(&resp.log_origin).map_err(|e| { + TransparencyWorkflowError::DeserializationError(format!("invalid log origin: {e}")) + })?; + + let witnesses = resp + .witnesses + .into_iter() + .filter(|w| !w.public_key.is_empty()) + .filter_map(|w| { + let pk_bytes: [u8; 32] = hex::decode(&w.public_key).ok()?.try_into().ok()?; + let public_key = Ed25519PublicKey::from_bytes(pk_bytes); + let witness_did = auths_verifier::DeviceDID::from_ed25519(public_key.as_bytes()); + Some(TrustRootWitness { + witness_did, + name: w.name, + public_key, + }) + }) + .collect(); + + Ok(TrustRoot { + log_public_key: Ed25519PublicKey::from_bytes(log_public_key_bytes), + log_origin, + witnesses, + }) +} + +/// Configuration for bundle verification. +pub struct BundleVerifyConfig { + /// The offline bundle serialized as JSON. + pub bundle_json: String, + /// The trust root serialized as JSON. + pub trust_root_json: String, +} + +/// Result of a consistency check between cached and new checkpoints. +#[derive(Debug)] +pub struct ConsistencyReport { + /// Tree size of the previously cached checkpoint (0 if none). + pub old_size: u64, + /// Tree size of the newly cached checkpoint. + pub new_size: u64, + /// Whether consistency was verified. + pub consistent: bool, +} + +/// Verify an offline transparency bundle. +/// +/// Deserializes the bundle and trust root from JSON, delegates to +/// `auths_transparency::verify_bundle`, and returns the report. +/// Returns `Err(VerificationFailed)` when the bundle does not pass +/// all verification checks. +/// +/// Args: +/// * `config` — Bundle and trust root JSON strings. +/// * `now` — Injected wall-clock time. +/// +/// Usage: +/// ```ignore +/// let report = verify_artifact_bundle(&config, now)?; +/// ``` +pub fn verify_artifact_bundle( + config: &BundleVerifyConfig, + now: DateTime, +) -> Result { + let bundle: OfflineBundle = serde_json::from_str(&config.bundle_json) + .map_err(|e| TransparencyWorkflowError::DeserializationError(e.to_string()))?; + let trust_root: TrustRoot = serde_json::from_str(&config.trust_root_json) + .map_err(|e| TransparencyWorkflowError::DeserializationError(e.to_string()))?; + + let report = auths_transparency::verify_bundle(&bundle, &trust_root, now); + + if !report.is_valid() { + return Err(TransparencyWorkflowError::VerificationFailed(Box::new( + report, + ))); + } + + Ok(report) +} + +/// Update the local checkpoint cache after verifying consistency. +/// +/// Loads the cached checkpoint from disk, verifies that the new checkpoint +/// is a consistent append-only extension of the cached one, and writes the +/// new checkpoint to disk. +/// +/// **Note:** Uses blocking `std::fs` I/O (not `tokio::fs`). This is acceptable +/// for the current use case — a single small JSON file read/write from CLI context. +/// If called from a multi-threaded async server, wrap in `tokio::task::spawn_blocking`. +/// +/// Args: +/// * `cache_path` — Path to the cached checkpoint JSON file. +/// * `new_checkpoint` — The newly received signed checkpoint. +/// * `consistency_proof` — Proof that old tree is a prefix of the new tree. +/// * `_trust_root` — Trust root for checkpoint signature verification (reserved for future use). +/// * `_now` — Injected wall-clock time (reserved for future use). +/// +/// Usage: +/// ```ignore +/// let report = update_checkpoint_cache( +/// &cache_path, +/// &new_checkpoint, +/// &consistency_proof, +/// &trust_root, +/// now, +/// )?; +/// ``` +#[allow(clippy::disallowed_methods)] // Filesystem I/O is intentional here — this is a top-level SDK workflow +pub fn update_checkpoint_cache( + cache_path: &Path, + new_checkpoint: &SignedCheckpoint, + consistency_proof: &ConsistencyProof, + _trust_root: &TrustRoot, + _now: DateTime, +) -> Result { + let old_checkpoint = match std::fs::read_to_string(cache_path) { + Ok(json) => { + let cp: SignedCheckpoint = serde_json::from_str(&json) + .map_err(|e| TransparencyWorkflowError::DeserializationError(e.to_string()))?; + Some(cp) + } + Err(e) if e.kind() == std::io::ErrorKind::NotFound => None, + Err(e) => return Err(TransparencyWorkflowError::CacheError(e)), + }; + + if let Some(ref old) = old_checkpoint { + auths_transparency::verify_consistency( + old.checkpoint.size, + new_checkpoint.checkpoint.size, + &consistency_proof.hashes, + &old.checkpoint.root, + &new_checkpoint.checkpoint.root, + ) + .map_err(|e| TransparencyWorkflowError::CheckpointInconsistent(e.to_string()))?; + } + + let json = serde_json::to_string_pretty(new_checkpoint) + .map_err(|e| TransparencyWorkflowError::DeserializationError(e.to_string()))?; + + if let Some(parent) = cache_path.parent() { + std::fs::create_dir_all(parent).map_err(TransparencyWorkflowError::CacheError)?; + } + std::fs::write(cache_path, json.as_bytes()).map_err(TransparencyWorkflowError::CacheError)?; + + let old_size = old_checkpoint.map(|c| c.checkpoint.size).unwrap_or(0); + + Ok(ConsistencyReport { + old_size, + new_size: new_checkpoint.checkpoint.size, + consistent: true, + }) +} + +/// Cache a checkpoint using trust-on-first-use (TOFU) semantics. +/// +/// If no cached checkpoint exists, the new checkpoint is accepted and written. +/// If a cached checkpoint exists with the same or smaller tree size and matching +/// root, the cache is left unchanged. If the cached checkpoint has the same size +/// but a different root, this is equivocation — returns a hard error. +/// If a consistency proof is provided, full Merkle consistency is verified. +/// +/// Args: +/// * `cache_path` — Path to the cached checkpoint JSON file (`~/.auths/log_checkpoint.json`). +/// * `new_checkpoint` — The checkpoint to cache. +/// * `consistency_proof` — Optional consistency proof for cache-hit cases. +/// +/// Usage: +/// ```ignore +/// try_cache_checkpoint( +/// &Path::new("~/.auths/log_checkpoint.json"), +/// &bundle.signed_checkpoint, +/// None, +/// )?; +/// ``` +#[allow(clippy::disallowed_methods)] // Filesystem I/O is intentional — top-level SDK workflow +pub fn try_cache_checkpoint( + cache_path: &Path, + new_checkpoint: &SignedCheckpoint, + consistency_proof: Option<&ConsistencyProof>, +) -> Result { + let old_checkpoint = match std::fs::read_to_string(cache_path) { + Ok(json) => { + let cp: SignedCheckpoint = serde_json::from_str(&json) + .map_err(|e| TransparencyWorkflowError::DeserializationError(e.to_string()))?; + Some(cp) + } + Err(e) if e.kind() == std::io::ErrorKind::NotFound => None, + Err(e) => return Err(TransparencyWorkflowError::CacheError(e)), + }; + + if let Some(ref old) = old_checkpoint { + // Equivocation: same size, different root + if old.checkpoint.size == new_checkpoint.checkpoint.size + && old.checkpoint.root != new_checkpoint.checkpoint.root + { + return Err(TransparencyWorkflowError::CheckpointInconsistent(format!( + "equivocation detected: same tree size {} but different roots", + old.checkpoint.size + ))); + } + + // New checkpoint must not be smaller + if new_checkpoint.checkpoint.size < old.checkpoint.size { + return Err(TransparencyWorkflowError::CheckpointInconsistent(format!( + "new checkpoint size {} is smaller than cached size {}", + new_checkpoint.checkpoint.size, old.checkpoint.size + ))); + } + + // Same checkpoint — no update needed + if old.checkpoint.size == new_checkpoint.checkpoint.size { + return Ok(ConsistencyReport { + old_size: old.checkpoint.size, + new_size: new_checkpoint.checkpoint.size, + consistent: true, + }); + } + + // If we have a consistency proof, verify it + if let Some(proof) = consistency_proof { + auths_transparency::verify_consistency( + old.checkpoint.size, + new_checkpoint.checkpoint.size, + &proof.hashes, + &old.checkpoint.root, + &new_checkpoint.checkpoint.root, + ) + .map_err(|e| TransparencyWorkflowError::CheckpointInconsistent(e.to_string()))?; + } + } + + let json = serde_json::to_string_pretty(new_checkpoint) + .map_err(|e| TransparencyWorkflowError::DeserializationError(e.to_string()))?; + + if let Some(parent) = cache_path.parent() { + std::fs::create_dir_all(parent).map_err(TransparencyWorkflowError::CacheError)?; + } + std::fs::write(cache_path, json.as_bytes()).map_err(TransparencyWorkflowError::CacheError)?; + + let old_size = old_checkpoint.map(|c| c.checkpoint.size).unwrap_or(0); + + Ok(ConsistencyReport { + old_size, + new_size: new_checkpoint.checkpoint.size, + consistent: true, + }) +} + +#[cfg(test)] +#[allow(clippy::unwrap_used, clippy::expect_used, clippy::disallowed_methods)] +mod tests { + use super::*; + use auths_transparency::checkpoint::{Checkpoint, SignedCheckpoint}; + use auths_transparency::entry::{Entry, EntryBody, EntryContent, EntryType}; + use auths_transparency::proof::InclusionProof; + use auths_transparency::types::{LogOrigin, MerkleHash}; + use auths_verifier::{CanonicalDid, DeviceDID, Ed25519PublicKey, Ed25519Signature}; + + fn dummy_signed_checkpoint(size: u64, root: MerkleHash) -> SignedCheckpoint { + SignedCheckpoint { + checkpoint: Checkpoint { + origin: LogOrigin::new("test.dev/log").unwrap(), + size, + root, + timestamp: chrono::DateTime::parse_from_rfc3339("2025-06-15T00:00:00Z") + .unwrap() + .with_timezone(&Utc), + }, + log_signature: Ed25519Signature::from_bytes([0u8; 64]), + log_public_key: Ed25519PublicKey::from_bytes([0u8; 32]), + witnesses: vec![], + } + } + + fn dummy_trust_root() -> TrustRoot { + TrustRoot { + log_public_key: Ed25519PublicKey::from_bytes([0u8; 32]), + log_origin: LogOrigin::new("test.dev/log").unwrap(), + witnesses: vec![], + } + } + + #[test] + fn verify_artifact_bundle_invalid_bundle_json() { + let config = BundleVerifyConfig { + bundle_json: "not valid json".into(), + trust_root_json: "{}".into(), + }; + let now = chrono::DateTime::parse_from_rfc3339("2025-07-01T00:00:00Z") + .unwrap() + .with_timezone(&Utc); + let err = verify_artifact_bundle(&config, now).unwrap_err(); + assert!(matches!( + err, + TransparencyWorkflowError::DeserializationError(_) + )); + } + + fn dummy_bundle() -> OfflineBundle { + let ts = chrono::DateTime::parse_from_rfc3339("2025-06-15T00:00:00Z") + .unwrap() + .with_timezone(&Utc); + let entry = Entry { + sequence: 0, + timestamp: ts, + content: EntryContent { + entry_type: EntryType::DeviceBind, + body: EntryBody::DeviceBind { + device_did: DeviceDID::new_unchecked("did:key:z6MkTest"), + public_key: Ed25519PublicKey::from_bytes([0u8; 32]), + }, + actor_did: CanonicalDid::new_unchecked("did:key:z6MkTest"), + }, + actor_sig: Ed25519Signature::empty(), + }; + let root = MerkleHash::from_bytes([0u8; 32]); + OfflineBundle { + entry, + inclusion_proof: InclusionProof { + index: 0, + size: 1, + root, + hashes: vec![], + }, + signed_checkpoint: dummy_signed_checkpoint(1, root), + delegation_chain: vec![], + } + } + + #[test] + fn verify_artifact_bundle_invalid_trust_root_json() { + let bundle = dummy_bundle(); + let bundle_json = serde_json::to_string(&bundle).unwrap(); + + let config = BundleVerifyConfig { + bundle_json, + trust_root_json: "not valid json".into(), + }; + let now = chrono::DateTime::parse_from_rfc3339("2025-07-01T00:00:00Z") + .unwrap() + .with_timezone(&Utc); + let err = verify_artifact_bundle(&config, now).unwrap_err(); + assert!(matches!( + err, + TransparencyWorkflowError::DeserializationError(_) + )); + } + + #[test] + fn update_checkpoint_cache_writes_new_file() { + let dir = tempfile::tempdir().unwrap(); + let cache_path = dir.path().join("checkpoint.json"); + + let root = MerkleHash::from_bytes([0xaa; 32]); + let new_cp = dummy_signed_checkpoint(10, root); + let proof = ConsistencyProof { + old_size: 0, + new_size: 10, + old_root: MerkleHash::from_bytes([0u8; 32]), + new_root: root, + hashes: vec![], + }; + let trust_root = dummy_trust_root(); + let now = chrono::DateTime::parse_from_rfc3339("2025-07-01T00:00:00Z") + .unwrap() + .with_timezone(&Utc); + + let report = + update_checkpoint_cache(&cache_path, &new_cp, &proof, &trust_root, now).unwrap(); + + assert_eq!(report.old_size, 0); + assert_eq!(report.new_size, 10); + assert!(report.consistent); + assert!(cache_path.exists()); + + let written: SignedCheckpoint = + serde_json::from_str(&std::fs::read_to_string(&cache_path).unwrap()).unwrap(); + assert_eq!(written.checkpoint.size, 10); + } + + #[test] + fn update_checkpoint_cache_creates_parent_dirs() { + let dir = tempfile::tempdir().unwrap(); + let cache_path = dir + .path() + .join("nested") + .join("dir") + .join("checkpoint.json"); + + let root = MerkleHash::from_bytes([0xbb; 32]); + let new_cp = dummy_signed_checkpoint(5, root); + let proof = ConsistencyProof { + old_size: 0, + new_size: 5, + old_root: MerkleHash::from_bytes([0u8; 32]), + new_root: root, + hashes: vec![], + }; + let trust_root = dummy_trust_root(); + let now = chrono::DateTime::parse_from_rfc3339("2025-07-01T00:00:00Z") + .unwrap() + .with_timezone(&Utc); + + let report = + update_checkpoint_cache(&cache_path, &new_cp, &proof, &trust_root, now).unwrap(); + + assert!(report.consistent); + assert!(cache_path.exists()); + } + + #[test] + fn try_cache_checkpoint_tofu_writes_new_file() { + let dir = tempfile::tempdir().unwrap(); + let cache_path = dir.path().join("log_checkpoint.json"); + + let root = MerkleHash::from_bytes([0xaa; 32]); + let cp = dummy_signed_checkpoint(10, root); + + let report = try_cache_checkpoint(&cache_path, &cp, None).unwrap(); + assert_eq!(report.old_size, 0); + assert_eq!(report.new_size, 10); + assert!(report.consistent); + assert!(cache_path.exists()); + } + + #[test] + fn try_cache_checkpoint_same_checkpoint_is_noop() { + let dir = tempfile::tempdir().unwrap(); + let cache_path = dir.path().join("log_checkpoint.json"); + + let root = MerkleHash::from_bytes([0xaa; 32]); + let cp = dummy_signed_checkpoint(10, root); + + try_cache_checkpoint(&cache_path, &cp, None).unwrap(); + let report = try_cache_checkpoint(&cache_path, &cp, None).unwrap(); + assert_eq!(report.old_size, 10); + assert_eq!(report.new_size, 10); + assert!(report.consistent); + } + + #[test] + fn try_cache_checkpoint_detects_equivocation() { + let dir = tempfile::tempdir().unwrap(); + let cache_path = dir.path().join("log_checkpoint.json"); + + let root1 = MerkleHash::from_bytes([0xaa; 32]); + let cp1 = dummy_signed_checkpoint(10, root1); + try_cache_checkpoint(&cache_path, &cp1, None).unwrap(); + + let root2 = MerkleHash::from_bytes([0xbb; 32]); + let cp2 = dummy_signed_checkpoint(10, root2); + let err = try_cache_checkpoint(&cache_path, &cp2, None).unwrap_err(); + assert!(matches!( + err, + TransparencyWorkflowError::CheckpointInconsistent(_) + )); + } + + #[test] + fn try_cache_checkpoint_rejects_smaller_size() { + let dir = tempfile::tempdir().unwrap(); + let cache_path = dir.path().join("log_checkpoint.json"); + + let cp1 = dummy_signed_checkpoint(10, MerkleHash::from_bytes([0xaa; 32])); + try_cache_checkpoint(&cache_path, &cp1, None).unwrap(); + + let cp2 = dummy_signed_checkpoint(5, MerkleHash::from_bytes([0xbb; 32])); + let err = try_cache_checkpoint(&cache_path, &cp2, None).unwrap_err(); + assert!(matches!( + err, + TransparencyWorkflowError::CheckpointInconsistent(_) + )); + } +} diff --git a/crates/auths-transparency/Cargo.toml b/crates/auths-transparency/Cargo.toml new file mode 100644 index 00000000..16f8c494 --- /dev/null +++ b/crates/auths-transparency/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "auths-transparency" +version.workspace = true +edition = "2024" +description = "Append-only transparency log types, Merkle math, and tile storage for Auths" +publish = true +license.workspace = true +repository.workspace = true +homepage.workspace = true +keywords = ["transparency", "merkle", "tlog", "verification"] +categories = ["cryptography", "data-structures"] + +[dependencies] +auths-crypto = { workspace = true, default-features = false } +auths-verifier = { workspace = true, default-features = false } +base64.workspace = true +chrono = { version = "0.4", features = ["serde"] } +hex = { version = "0.4.3", features = ["serde"] } +json-canon.workspace = true +ring = { workspace = true, optional = true } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0.149" +sha2 = "0.10" +thiserror.workspace = true + +# native-only deps +async-trait = { version = "0.1", optional = true } +futures = { version = "0.3", optional = true } +tokio = { workspace = true, optional = true } + +# s3-only deps +aws-sdk-s3 = { version = "1", optional = true } +aws-config = { version = "1", optional = true } + +[dev-dependencies] +auths-crypto = { workspace = true, features = ["test-utils"] } +proptest = "1.4" +ring.workspace = true +tempfile = "3" +tokio = { workspace = true, features = ["macros", "rt"] } + +[features] +default = ["native"] +native = ["dep:async-trait", "dep:futures", "dep:tokio", "dep:ring", "auths-verifier/native", "auths-crypto/native"] +s3 = ["native", "dep:aws-sdk-s3", "dep:aws-config"] + +[lints] +workspace = true diff --git a/crates/auths-transparency/clippy.toml b/crates/auths-transparency/clippy.toml new file mode 100644 index 00000000..2e11bd88 --- /dev/null +++ b/crates/auths-transparency/clippy.toml @@ -0,0 +1,21 @@ +# Duplicated from workspace clippy.toml — keep in sync +# Clippy does NOT merge per-crate configs with workspace config. +# Any changes to the workspace clippy.toml must be replicated here. + +allow-unwrap-in-tests = true +allow-expect-in-tests = true + +disallowed-methods = [ + # === Workspace rules (duplicated from root clippy.toml) === + { path = "chrono::offset::Utc::now", reason = "inject ClockProvider instead of calling Utc::now() directly", allow-invalid = true }, + { path = "std::time::SystemTime::now", reason = "inject ClockProvider instead of calling SystemTime::now() directly", allow-invalid = true }, + { path = "std::env::var", reason = "use EnvironmentConfig abstraction instead of reading env vars directly", allow-invalid = true }, + { path = "uuid::Uuid::new_v4", reason = "Use UuidProvider::new_id() instead. Inject SystemUuidProvider in production and DeterministicUuidProvider in tests." }, + + # === DID/newtype construction: prefer parse() for external input === + { path = "auths_verifier::types::IdentityDID::new_unchecked", reason = "Use IdentityDID::parse() for external input. Use #[allow(clippy::disallowed_methods)] with INVARIANT comment for proven-safe paths.", allow-invalid = true }, + { path = "auths_verifier::types::DeviceDID::new_unchecked", reason = "Use DeviceDID::parse() for external input. Use #[allow(clippy::disallowed_methods)] with INVARIANT comment for proven-safe paths.", allow-invalid = true }, + { path = "auths_verifier::types::CanonicalDid::new_unchecked", reason = "Use CanonicalDid::parse() for external input. Use #[allow(clippy::disallowed_methods)] with INVARIANT comment for proven-safe paths.", allow-invalid = true }, + { path = "auths_verifier::core::CommitOid::new_unchecked", reason = "Use CommitOid::parse() for external input. Use #[allow(clippy::disallowed_methods)] with INVARIANT comment for proven-safe paths.", allow-invalid = true }, + { path = "auths_verifier::core::PublicKeyHex::new_unchecked", reason = "Use PublicKeyHex::parse() for external input. Use #[allow(clippy::disallowed_methods)] with INVARIANT comment for proven-safe paths.", allow-invalid = true }, +] diff --git a/crates/auths-transparency/src/bundle.rs b/crates/auths-transparency/src/bundle.rs new file mode 100644 index 00000000..1678b94f --- /dev/null +++ b/crates/auths-transparency/src/bundle.rs @@ -0,0 +1,363 @@ +use auths_verifier::{DeviceDID, IdentityDID, Role}; +use serde::{Deserialize, Serialize}; + +use crate::checkpoint::SignedCheckpoint; +use crate::entry::{Entry, EntryType}; +use crate::proof::InclusionProof; + +/// An offline verification bundle containing an entry, its inclusion proof, +/// and a signed checkpoint. +/// +/// Allows clients to verify that an entry was logged without contacting the +/// transparency log server. +/// +/// Args: +/// * `entry` — The log entry being proven. +/// * `inclusion_proof` — Merkle proof that the entry is included in the log. +/// * `signed_checkpoint` — Signed checkpoint attesting to the log state. +/// * `delegation_chain` — Optional chain of delegation links. +/// +/// Usage: +/// ```ignore +/// let bundle = OfflineBundle { +/// entry, +/// inclusion_proof: proof, +/// signed_checkpoint: checkpoint, +/// delegation_chain: vec![], +/// }; +/// ``` +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[allow(missing_docs)] +pub struct OfflineBundle { + pub entry: Entry, + pub inclusion_proof: InclusionProof, + pub signed_checkpoint: SignedCheckpoint, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub delegation_chain: Vec, +} + +/// A single link in a delegation chain, containing the logged entry +/// and its Merkle inclusion proof. +/// +/// Each link proves that a delegation event (e.g., org member add) was +/// recorded in the transparency log. +/// +/// Args: +/// * `link_type` — The type of delegation event this link represents. +/// * `entry` — The full log entry for the delegation event. +/// * `inclusion_proof` — Merkle proof that the entry is included in the log. +/// +/// Usage: +/// ```ignore +/// let link = DelegationChainLink { +/// link_type: EntryType::OrgMemberAdd, +/// entry, +/// inclusion_proof: proof, +/// }; +/// ``` +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[allow(missing_docs)] +pub struct DelegationChainLink { + pub link_type: EntryType, + pub entry: Entry, + pub inclusion_proof: InclusionProof, +} + +/// Result of verifying an [`OfflineBundle`]. +/// +/// Reports the outcome of each verification dimension independently, +/// allowing consumers to make nuanced trust decisions. +/// +/// Args: +/// * `signature` — Whether the entry's actor signature verified. +/// * `inclusion` — Whether the Merkle inclusion proof verified. +/// * `checkpoint` — Whether the signed checkpoint verified. +/// * `witnesses` — Witness cosignature quorum status. +/// * `namespace` — Whether the actor is authorized for the namespace. +/// * `delegation` — Delegation chain verification status. +/// * `warnings` — Non-fatal issues encountered during verification. +/// +/// Usage: +/// ```ignore +/// let report = BundleVerificationReport { +/// signature: SignatureStatus::Verified, +/// inclusion: InclusionStatus::Verified, +/// checkpoint: CheckpointStatus::Verified, +/// witnesses: WitnessStatus::NotProvided, +/// namespace: NamespaceStatus::Authorized, +/// delegation: DelegationStatus::NoDelegationData, +/// warnings: vec![], +/// }; +/// assert!(report.is_valid()); +/// ``` +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[allow(missing_docs)] +pub struct BundleVerificationReport { + pub signature: SignatureStatus, + pub inclusion: InclusionStatus, + pub checkpoint: CheckpointStatus, + pub witnesses: WitnessStatus, + pub namespace: NamespaceStatus, + pub delegation: DelegationStatus, + pub warnings: Vec, +} + +impl BundleVerificationReport { + /// Whether the bundle passed all verification checks. + /// + /// Returns `true` when signature, inclusion, and checkpoint are all verified, + /// witnesses meet quorum (or are not provided), namespace is authorized or owned, + /// and delegation is not broken. + /// + /// **Trust note:** `NoDelegationData` is accepted as valid because many bundles + /// (e.g., direct-signing, early Epic 1 bundles) lack delegation chains. This is + /// a weaker trust signal than `ChainVerified` — callers needing full provenance + /// should check `delegation` explicitly rather than relying solely on `is_valid()`. + pub fn is_valid(&self) -> bool { + let sig_ok = matches!(self.signature, SignatureStatus::Verified); + let inc_ok = matches!(self.inclusion, InclusionStatus::Verified); + let chk_ok = matches!( + self.checkpoint, + CheckpointStatus::Verified | CheckpointStatus::NotProvided + ); + let wit_ok = matches!( + self.witnesses, + WitnessStatus::Quorum { .. } | WitnessStatus::NotProvided + ); + let ns_ok = matches!( + self.namespace, + NamespaceStatus::Authorized | NamespaceStatus::Owned + ); + let del_ok = !matches!(self.delegation, DelegationStatus::ChainBroken { .. }); + + sig_ok && inc_ok && chk_ok && wit_ok && ns_ok && del_ok + } +} + +/// Outcome of verifying the entry's actor signature. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(tag = "status", rename_all = "snake_case")] +#[non_exhaustive] +pub enum SignatureStatus { + /// The signature verified against the actor's public key. + Verified, + /// The signature did not verify. + Failed { + /// Description of the failure. + reason: String, + }, + /// No signature data was available for verification. + NotProvided, +} + +/// Outcome of verifying the Merkle inclusion proof. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(tag = "status", rename_all = "snake_case")] +#[non_exhaustive] +pub enum InclusionStatus { + /// The inclusion proof verified against the checkpoint root. + Verified, + /// The inclusion proof did not verify. + Failed { + /// Description of the failure. + reason: String, + }, + /// No inclusion proof was available for verification. + NotProvided, +} + +/// Outcome of verifying the signed checkpoint. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(tag = "status", rename_all = "snake_case")] +#[non_exhaustive] +pub enum CheckpointStatus { + /// The checkpoint signature verified against the log's public key. + Verified, + /// The checkpoint signature did not verify. + InvalidSignature, + /// No checkpoint was available for verification. + NotProvided, +} + +/// Outcome of verifying witness cosignatures. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(tag = "status", rename_all = "snake_case")] +#[non_exhaustive] +pub enum WitnessStatus { + /// Witness quorum was met. + Quorum { + /// Number of witnesses that verified. + verified: usize, + /// Number of witnesses required for quorum. + required: usize, + }, + /// Witness quorum was not met. + Insufficient { + /// Number of witnesses that verified. + verified: usize, + /// Number of witnesses required for quorum. + required: usize, + }, + /// No witness data was available for verification. + NotProvided, +} + +/// Outcome of verifying the actor's namespace authorization. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(tag = "status", rename_all = "snake_case")] +#[non_exhaustive] +pub enum NamespaceStatus { + /// The actor is authorized for the namespace via delegation. + Authorized, + /// The actor owns the namespace directly. + Owned, + /// The namespace has no owner on record. + Unowned, + /// The actor is not authorized for the namespace. + Unauthorized, +} + +/// Outcome of verifying the delegation chain. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(tag = "status", rename_all = "snake_case")] +#[non_exhaustive] +pub enum DelegationStatus { + /// The actor signed directly (no delegation needed). + Direct, + /// The delegation chain verified successfully. + ChainVerified { + /// The organization identity that issued the delegation. + org_did: IdentityDID, + /// The member identity that received the delegation. + member_did: IdentityDID, + /// The role granted to the member. + member_role: Role, + /// The device that performed the action on behalf of the member. + device_did: DeviceDID, + }, + /// The delegation chain could not be verified. + ChainBroken { + /// Description of why the chain is broken. + reason: String, + }, + /// No delegation data was present in the bundle. + NoDelegationData, +} + +#[cfg(test)] +#[allow(clippy::disallowed_methods)] +mod tests { + use super::*; + + #[test] + fn signature_status_serializes() { + let status = SignatureStatus::Verified; + let json = serde_json::to_string(&status).unwrap(); + let parsed: serde_json::Value = serde_json::from_str(&json).unwrap(); + assert_eq!(parsed["status"], "verified"); + } + + #[test] + fn witness_status_quorum_serializes() { + let status = WitnessStatus::Quorum { + verified: 3, + required: 2, + }; + let json = serde_json::to_string(&status).unwrap(); + let parsed: serde_json::Value = serde_json::from_str(&json).unwrap(); + assert_eq!(parsed["status"], "quorum"); + assert_eq!(parsed["verified"], 3); + assert_eq!(parsed["required"], 2); + } + + #[test] + fn witness_status_insufficient_serializes() { + let status = WitnessStatus::Insufficient { + verified: 1, + required: 3, + }; + let json = serde_json::to_string(&status).unwrap(); + let parsed: serde_json::Value = serde_json::from_str(&json).unwrap(); + assert_eq!(parsed["status"], "insufficient"); + assert_eq!(parsed["verified"], 1); + assert_eq!(parsed["required"], 3); + } + + #[test] + fn report_is_valid_all_verified() { + let report = BundleVerificationReport { + signature: SignatureStatus::Verified, + inclusion: InclusionStatus::Verified, + checkpoint: CheckpointStatus::Verified, + witnesses: WitnessStatus::Quorum { + verified: 2, + required: 2, + }, + namespace: NamespaceStatus::Authorized, + delegation: DelegationStatus::Direct, + warnings: vec![], + }; + assert!(report.is_valid()); + } + + #[test] + fn report_is_valid_with_not_provided_optionals() { + let report = BundleVerificationReport { + signature: SignatureStatus::Verified, + inclusion: InclusionStatus::Verified, + checkpoint: CheckpointStatus::NotProvided, + witnesses: WitnessStatus::NotProvided, + namespace: NamespaceStatus::Owned, + delegation: DelegationStatus::NoDelegationData, + warnings: vec![], + }; + assert!(report.is_valid()); + } + + #[test] + fn report_invalid_on_failed_signature() { + let report = BundleVerificationReport { + signature: SignatureStatus::Failed { + reason: "bad sig".into(), + }, + inclusion: InclusionStatus::Verified, + checkpoint: CheckpointStatus::Verified, + witnesses: WitnessStatus::NotProvided, + namespace: NamespaceStatus::Authorized, + delegation: DelegationStatus::Direct, + warnings: vec![], + }; + assert!(!report.is_valid()); + } + + #[test] + fn report_invalid_on_broken_delegation() { + let report = BundleVerificationReport { + signature: SignatureStatus::Verified, + inclusion: InclusionStatus::Verified, + checkpoint: CheckpointStatus::Verified, + witnesses: WitnessStatus::NotProvided, + namespace: NamespaceStatus::Authorized, + delegation: DelegationStatus::ChainBroken { + reason: "missing link".into(), + }, + warnings: vec![], + }; + assert!(!report.is_valid()); + } + + #[test] + fn delegation_status_chain_verified_serializes() { + let status = DelegationStatus::ChainVerified { + org_did: IdentityDID::new_unchecked("did:keri:EOrg123"), + member_did: IdentityDID::new_unchecked("did:keri:EMember456"), + member_role: Role::Admin, + device_did: DeviceDID::new_unchecked("did:key:z6MkDevice789"), + }; + let json = serde_json::to_string(&status).unwrap(); + let parsed: serde_json::Value = serde_json::from_str(&json).unwrap(); + assert_eq!(parsed["status"], "chain_verified"); + assert_eq!(parsed["org_did"], "did:keri:EOrg123"); + assert_eq!(parsed["member_did"], "did:keri:EMember456"); + } +} diff --git a/crates/auths-transparency/src/checkpoint.rs b/crates/auths-transparency/src/checkpoint.rs new file mode 100644 index 00000000..fe365fff --- /dev/null +++ b/crates/auths-transparency/src/checkpoint.rs @@ -0,0 +1,146 @@ +use auths_verifier::{Ed25519PublicKey, Ed25519Signature}; +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; + +use crate::types::{LogOrigin, MerkleHash}; + +/// An unsigned transparency log checkpoint. +/// +/// Args: +/// * `origin` — Log origin string (e.g., "auths.dev/log"). +/// * `size` — Number of entries in the log at this checkpoint. +/// * `root` — Merkle root hash of the log at this size. +/// * `timestamp` — When the checkpoint was created. +/// +/// Usage: +/// ```ignore +/// let cp = Checkpoint { +/// origin: LogOrigin::new("auths.dev/log")?, +/// size: 42, +/// root: merkle_root, +/// timestamp: Utc::now(), +/// }; +/// ``` +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[allow(missing_docs)] +pub struct Checkpoint { + pub origin: LogOrigin, + pub size: u64, + pub root: MerkleHash, + pub timestamp: DateTime, +} + +impl Checkpoint { + /// Serialize to the C2SP checkpoint body format (three lines: origin, size, base64 hash). + pub fn to_note_body(&self) -> String { + format!( + "{}\n{}\n{}\n", + self.origin, + self.size, + self.root.to_base64() + ) + } + + /// Parse from C2SP checkpoint body lines. + pub fn from_note_body( + body: &str, + timestamp: DateTime, + ) -> Result { + let lines: Vec<&str> = body.lines().collect(); + if lines.len() < 3 { + return Err(crate::error::TransparencyError::InvalidNote( + "checkpoint body must have at least 3 lines".into(), + )); + } + let origin = LogOrigin::new(lines[0])?; + let size: u64 = lines[1].parse().map_err(|e: std::num::ParseIntError| { + crate::error::TransparencyError::InvalidNote(e.to_string()) + })?; + let root = MerkleHash::from_base64(lines[2])?; + Ok(Self { + origin, + size, + root, + timestamp, + }) + } +} + +/// A checkpoint signed by the log operator (and optionally witnesses). +/// +/// Args: +/// * `checkpoint` — The unsigned checkpoint data. +/// * `log_signature` — Ed25519 signature from the log's signing key. +/// * `log_public_key` — The log operator's public key. +/// * `witnesses` — Optional witness cosignatures. +/// +/// Usage: +/// ```ignore +/// let signed = SignedCheckpoint { +/// checkpoint, +/// log_signature: sig, +/// log_public_key: log_pk, +/// witnesses: vec![], +/// }; +/// ``` +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[allow(missing_docs)] +pub struct SignedCheckpoint { + pub checkpoint: Checkpoint, + pub log_signature: Ed25519Signature, + pub log_public_key: Ed25519PublicKey, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub witnesses: Vec, +} + +/// A witness cosignature on a checkpoint. +/// +/// Witnesses independently verify the checkpoint and add their signature +/// to increase trust in the log's consistency claims. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[allow(missing_docs)] +pub struct WitnessCosignature { + pub witness_name: String, + pub witness_public_key: Ed25519PublicKey, + pub signature: Ed25519Signature, + pub timestamp: DateTime, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn checkpoint_note_body_roundtrip() { + let ts = chrono::DateTime::parse_from_rfc3339("2025-06-01T00:00:00Z") + .unwrap() + .with_timezone(&Utc); + let cp = Checkpoint { + origin: LogOrigin::new("auths.dev/log").unwrap(), + size: 42, + root: MerkleHash::from_bytes([0xab; 32]), + timestamp: ts, + }; + let body = cp.to_note_body(); + let parsed = Checkpoint::from_note_body(&body, ts).unwrap(); + assert_eq!(cp.origin, parsed.origin); + assert_eq!(cp.size, parsed.size); + assert_eq!(cp.root, parsed.root); + } + + #[test] + fn checkpoint_json_roundtrip() { + let ts = chrono::DateTime::parse_from_rfc3339("2025-06-01T00:00:00Z") + .unwrap() + .with_timezone(&Utc); + let cp = Checkpoint { + origin: LogOrigin::new("auths.dev/log").unwrap(), + size: 100, + root: MerkleHash::from_bytes([0x01; 32]), + timestamp: ts, + }; + let json = serde_json::to_string(&cp).unwrap(); + let back: Checkpoint = serde_json::from_str(&json).unwrap(); + assert_eq!(cp, back); + } +} diff --git a/crates/auths-transparency/src/entry.rs b/crates/auths-transparency/src/entry.rs new file mode 100644 index 00000000..740374cf --- /dev/null +++ b/crates/auths-transparency/src/entry.rs @@ -0,0 +1,239 @@ +use auths_verifier::{ + CanonicalDid, Capability, DeviceDID, Ed25519PublicKey, Ed25519Signature, IdentityDID, Role, +}; +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +/// The type of mutation recorded in a transparency log entry. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum EntryType { + Register, + Rotate, + Abandon, + OrgCreate, + OrgAddMember, + OrgRevokeMember, + DeviceBind, + DeviceRevoke, + Attest, + NamespaceClaim, + NamespaceDelegate, + NamespaceTransfer, + AccessGrant, + AccessRevoke, +} + +/// Access tier controlling rate limits and feature gates. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum AccessTier { + Anonymous, + Free, + Team, + Enterprise, +} + +impl AccessTier { + /// Returns the tier as a lowercase string matching the serde serialization. + pub fn as_str(&self) -> &'static str { + match self { + Self::Anonymous => "anonymous", + Self::Free => "free", + Self::Team => "team", + Self::Enterprise => "enterprise", + } + } +} + +/// The body of a log entry, specific to each [`EntryType`]. +/// +/// Designed so adding new entry types in future epics is a mechanical +/// addition of a new variant. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum EntryBody { + Register { + inception_event: Value, + }, + Rotate { + rotation_event: Value, + }, + Abandon { + reason: Option, + }, + OrgCreate { + display_name: String, + }, + OrgAddMember { + member_did: IdentityDID, + role: Role, + capabilities: Vec, + delegated_by: IdentityDID, + }, + OrgRevokeMember { + member_did: IdentityDID, + }, + DeviceBind { + device_did: DeviceDID, + public_key: Ed25519PublicKey, + }, + DeviceRevoke { + device_did: DeviceDID, + }, + Attest(Value), + NamespaceClaim { + ecosystem: String, + package_name: String, + }, + NamespaceDelegate { + ecosystem: String, + package_name: String, + delegate_did: IdentityDID, + }, + NamespaceTransfer { + ecosystem: String, + package_name: String, + new_owner_did: IdentityDID, + }, + AccessGrant { + subject_did: IdentityDID, + tier: AccessTier, + daily_limit: u32, + expires_at: DateTime, + }, + AccessRevoke { + subject_did: IdentityDID, + reason: Option, + }, +} + +/// The subset of an [`Entry`] that the actor signs. +/// +/// The sequencer assigns `sequence` and `timestamp` after — those fields +/// are authenticated by the Merkle tree, not the actor's signature. +/// +/// Usage: +/// ```ignore +/// let content = EntryContent { +/// entry_type: EntryType::Register, +/// body: EntryBody::Register { inception_event: serde_json::json!({}) }, +/// actor_did: CanonicalDid::parse("did:keri:E...")?, +/// }; +/// let canonical = content.canonicalize()?; +/// ``` +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[allow(missing_docs)] +pub struct EntryContent { + pub entry_type: EntryType, + pub body: EntryBody, + pub actor_did: CanonicalDid, +} + +impl EntryContent { + /// Canonical JSON bytes for signing (via `json-canon`). + pub fn canonicalize(&self) -> Result, crate::error::TransparencyError> { + let value = serde_json::to_value(self) + .map_err(|e| crate::error::TransparencyError::EntryError(e.to_string()))?; + json_canon::to_vec(&value) + .map_err(|e| crate::error::TransparencyError::EntryError(e.to_string())) + } +} + +/// A complete log entry with sequencer-assigned fields. +/// +/// Args: +/// * `sequence` — Monotonically increasing index assigned by the sequencer. +/// * `timestamp` — Wall-clock time assigned by the sequencer. +/// * `content` — The actor-signed payload. +/// * `actor_sig` — Ed25519 signature over the canonical `EntryContent`. +/// +/// Usage: +/// ```ignore +/// let entry = Entry { +/// sequence: 0, +/// timestamp: Utc::now(), +/// content: entry_content, +/// actor_sig: sig, +/// }; +/// let leaf_data = entry.leaf_data()?; +/// ``` +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[allow(missing_docs)] +pub struct Entry { + pub sequence: u64, + pub timestamp: DateTime, + pub content: EntryContent, + pub actor_sig: Ed25519Signature, +} + +impl Entry { + /// Canonical JSON bytes of the full entry for Merkle leaf hashing. + pub fn leaf_data(&self) -> Result, crate::error::TransparencyError> { + let value = serde_json::to_value(self) + .map_err(|e| crate::error::TransparencyError::EntryError(e.to_string()))?; + json_canon::to_vec(&value) + .map_err(|e| crate::error::TransparencyError::EntryError(e.to_string())) + } +} + +#[cfg(test)] +#[allow(clippy::disallowed_methods)] +mod tests { + use super::*; + use auths_verifier::Ed25519Signature; + + #[test] + fn entry_type_serializes_snake_case() { + let json = serde_json::to_string(&EntryType::Register).unwrap(); + assert_eq!(json, r#""register""#); + } + + #[test] + fn entry_content_canonicalize_deterministic() { + let content = EntryContent { + entry_type: EntryType::DeviceBind, + body: EntryBody::DeviceBind { + device_did: DeviceDID::new_unchecked( + "did:key:z6MkhaXgBZDvotDkL5257faiztiGiC2QtKLGpbnnEGta2doK", + ), + public_key: Ed25519PublicKey::from_bytes([0u8; 32]), + }, + actor_did: CanonicalDid::new_unchecked("did:keri:Eabc"), + }; + let a = content.canonicalize().unwrap(); + let b = content.canonicalize().unwrap(); + assert_eq!(a, b); + } + + #[test] + fn entry_json_roundtrip() { + let entry = Entry { + sequence: 42, + timestamp: chrono::DateTime::parse_from_rfc3339("2025-06-01T00:00:00Z") + .unwrap() + .with_timezone(&Utc), + content: EntryContent { + entry_type: EntryType::OrgAddMember, + body: EntryBody::OrgAddMember { + member_did: IdentityDID::new_unchecked("did:keri:Emember"), + role: Role::Admin, + capabilities: vec![Capability::sign_commit()], + delegated_by: IdentityDID::new_unchecked("did:keri:Eadmin"), + }, + actor_did: CanonicalDid::new_unchecked("did:keri:Eadmin"), + }, + actor_sig: Ed25519Signature::empty(), + }; + let json = serde_json::to_string(&entry).unwrap(); + let back: Entry = serde_json::from_str(&json).unwrap(); + assert_eq!(entry.sequence, back.sequence); + } +} diff --git a/crates/auths-transparency/src/error.rs b/crates/auths-transparency/src/error.rs new file mode 100644 index 00000000..d614f54a --- /dev/null +++ b/crates/auths-transparency/src/error.rs @@ -0,0 +1,43 @@ +/// Errors from transparency log operations. +#[derive(Debug, Clone, thiserror::Error)] +#[allow(missing_docs)] +pub enum TransparencyError { + /// Invalid Merkle proof structure. + #[error("invalid proof: {0}")] + InvalidProof(String), + + /// Merkle root mismatch during verification. + #[error("root mismatch: expected {expected}, got {actual}")] + RootMismatch { expected: String, actual: String }, + + /// Invalid signed note format. + #[error("invalid note: {0}")] + InvalidNote(String), + + /// Signature verification failed on a checkpoint note. + #[error("invalid checkpoint signature")] + InvalidCheckpointSignature, + + /// Tile path encoding error. + #[error("invalid tile path: {0}")] + InvalidTilePath(String), + + /// Invalid log origin string. + #[error("invalid log origin: {0}")] + InvalidOrigin(String), + + /// Entry serialization or deserialization failure. + #[error("entry error: {0}")] + EntryError(String), + + /// Consistency proof verification failed. + #[error("consistency check failed: {0}")] + ConsistencyError(String), + + /// Storage backend error. + #[error("store error: {0}")] + StoreError(String), +} + +/// Convenience alias for transparency operations. +pub type Result = std::result::Result; diff --git a/crates/auths-transparency/src/fs_store.rs b/crates/auths-transparency/src/fs_store.rs new file mode 100644 index 00000000..229c9c10 --- /dev/null +++ b/crates/auths-transparency/src/fs_store.rs @@ -0,0 +1,178 @@ +use std::path::PathBuf; + +use crate::error::TransparencyError; +use crate::store::TileStore; + +/// Filesystem-backed tile store. +/// +/// Stores tiles and checkpoints as plain files under a base directory. +/// Full tiles (paths without `.p/`) are write-once: subsequent writes +/// are silently skipped. Partial tiles (paths containing `.p/`) and +/// the checkpoint file are always overwritable. +/// +/// Args: +/// * `base_path` — Root directory for all tile and checkpoint files. +/// +/// Usage: +/// ```ignore +/// let store = FsTileStore::new("/home/user/.auths/tlog".into()); +/// store.write_tile("tile/0/000", &data).await?; +/// ``` +pub struct FsTileStore { + base_path: PathBuf, +} + +impl FsTileStore { + /// Creates a new filesystem tile store rooted at the given path. + /// + /// Args: + /// * `base_path` — Directory where tiles and checkpoints are stored. + /// + /// Usage: + /// ```ignore + /// let store = FsTileStore::new(PathBuf::from("/tmp/tlog")); + /// ``` + pub fn new(base_path: PathBuf) -> Self { + Self { base_path } + } +} + +fn is_partial_tile(path: &str) -> bool { + path.contains(".p/") +} + +#[async_trait::async_trait] +impl TileStore for FsTileStore { + async fn read_tile(&self, path: &str) -> Result, TransparencyError> { + let full_path = self.base_path.join(path); + tokio::fs::read(&full_path) + .await + .map_err(|e| TransparencyError::StoreError(e.to_string())) + } + + async fn write_tile(&self, path: &str, data: &[u8]) -> Result<(), TransparencyError> { + let full_path = self.base_path.join(path); + + if !is_partial_tile(path) && full_path.exists() { + return Ok(()); + } + + if let Some(parent) = full_path.parent() { + tokio::fs::create_dir_all(parent) + .await + .map_err(|e| TransparencyError::StoreError(e.to_string()))?; + } + + tokio::fs::write(&full_path, data) + .await + .map_err(|e| TransparencyError::StoreError(e.to_string())) + } + + async fn read_checkpoint(&self) -> Result>, TransparencyError> { + let path = self.base_path.join("checkpoint"); + match tokio::fs::read(&path).await { + Ok(data) => Ok(Some(data)), + Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(None), + Err(e) => Err(TransparencyError::StoreError(e.to_string())), + } + } + + async fn write_checkpoint(&self, data: &[u8]) -> Result<(), TransparencyError> { + let path = self.base_path.join("checkpoint"); + + if let Some(parent) = path.parent() { + tokio::fs::create_dir_all(parent) + .await + .map_err(|e| TransparencyError::StoreError(e.to_string()))?; + } + + tokio::fs::write(&path, data) + .await + .map_err(|e| TransparencyError::StoreError(e.to_string())) + } +} + +#[cfg(test)] +#[allow(clippy::unwrap_used, clippy::expect_used)] +mod tests { + use super::*; + + #[tokio::test] + async fn tile_write_read_roundtrip() { + let dir = tempfile::tempdir().unwrap(); + let store = FsTileStore::new(dir.path().to_path_buf()); + + let data = b"leaf data"; + store.write_tile("tile/0/000", data).await.unwrap(); + + let read_back = store.read_tile("tile/0/000").await.unwrap(); + assert_eq!(read_back, data); + } + + #[tokio::test] + async fn tile_creates_nested_directories() { + let dir = tempfile::tempdir().unwrap(); + let store = FsTileStore::new(dir.path().to_path_buf()); + + store.write_tile("tile/2/001/002", b"deep").await.unwrap(); + + let on_disk = dir.path().join("tile/2/001/002"); + assert!(on_disk.exists()); + } + + #[tokio::test] + async fn full_tile_is_immutable() { + let dir = tempfile::tempdir().unwrap(); + let store = FsTileStore::new(dir.path().to_path_buf()); + + store.write_tile("tile/0/000", b"first").await.unwrap(); + store.write_tile("tile/0/000", b"second").await.unwrap(); + + let data = store.read_tile("tile/0/000").await.unwrap(); + assert_eq!(data, b"first"); + } + + #[tokio::test] + async fn partial_tile_is_overwritable() { + let dir = tempfile::tempdir().unwrap(); + let store = FsTileStore::new(dir.path().to_path_buf()); + + store + .write_tile("tile/0/000.p/5", b"partial-v1") + .await + .unwrap(); + store + .write_tile("tile/0/000.p/5", b"partial-v2") + .await + .unwrap(); + + let data = store.read_tile("tile/0/000.p/5").await.unwrap(); + assert_eq!(data, b"partial-v2"); + } + + #[tokio::test] + async fn checkpoint_roundtrip() { + let dir = tempfile::tempdir().unwrap(); + let store = FsTileStore::new(dir.path().to_path_buf()); + + let result = store.read_checkpoint().await.unwrap(); + assert!(result.is_none()); + + store.write_checkpoint(b"cp-v1").await.unwrap(); + let data = store.read_checkpoint().await.unwrap(); + assert_eq!(data, Some(b"cp-v1".to_vec())); + + store.write_checkpoint(b"cp-v2").await.unwrap(); + let data = store.read_checkpoint().await.unwrap(); + assert_eq!(data, Some(b"cp-v2".to_vec())); + } + + #[tokio::test] + async fn read_nonexistent_tile_returns_error() { + let dir = tempfile::tempdir().unwrap(); + let store = FsTileStore::new(dir.path().to_path_buf()); + + let result = store.read_tile("tile/0/999").await; + assert!(result.is_err()); + } +} diff --git a/crates/auths-transparency/src/lib.rs b/crates/auths-transparency/src/lib.rs new file mode 100644 index 00000000..8f2cf804 --- /dev/null +++ b/crates/auths-transparency/src/lib.rs @@ -0,0 +1,132 @@ +#![warn(missing_docs)] +//! Append-only transparency log for Auths. +//! +//! Implements C2SP tlog-tiles Merkle tree types, proof verification, +//! signed note format, and tile storage abstractions. +//! +//! ## Feature Flags +//! +//! - `native` (default) — enables `TileStore` trait and async tile I/O +//! - Without features — WASM-safe core: types, Merkle math, proofs, notes + +/// Offline verification bundles. +pub mod bundle; +/// Log checkpoint types. +pub mod checkpoint; +/// Transparency log entry types. +pub mod entry; +/// Error types for transparency operations. +pub mod error; +/// RFC 6962 Merkle tree operations. +pub mod merkle; +/// C2SP signed note format. +pub mod note; +/// Inclusion and consistency proof types. +pub mod proof; +/// Tile storage trait (behind `native` feature). +pub mod store; +/// C2SP tlog-tiles path encoding. +pub mod tile; +/// Core newtypes: `MerkleHash`, `LogOrigin`. +pub mod types; +/// Offline bundle verification (requires `native` feature for Ed25519). +#[cfg(feature = "native")] +pub mod verify; +/// Witness protocol for split-view protection (requires `native` feature). +#[cfg(feature = "native")] +pub mod witness; + +// Re-export core types +pub use bundle::{ + BundleVerificationReport, CheckpointStatus, DelegationChainLink, DelegationStatus, + InclusionStatus, NamespaceStatus, OfflineBundle, SignatureStatus, WitnessStatus, +}; +pub use checkpoint::{Checkpoint, SignedCheckpoint, WitnessCosignature}; +pub use entry::{AccessTier, Entry, EntryBody, EntryContent, EntryType}; +pub use error::TransparencyError; +pub use merkle::{compute_root, hash_children, hash_leaf, verify_consistency, verify_inclusion}; +pub use note::{ + NoteSignature, build_signature_line, compute_key_id, parse_signed_note, serialize_signed_note, +}; +pub use proof::{ConsistencyProof, InclusionProof}; +pub use tile::{TILE_HEIGHT, TILE_WIDTH, leaf_tile, tile_count, tile_path}; +pub use types::{LogOrigin, MerkleHash}; + +#[cfg(feature = "native")] +mod fs_store; +#[cfg(feature = "native")] +pub use fs_store::FsTileStore; + +#[cfg(feature = "s3")] +/// S3-compatible tile store (Tigris, AWS S3, MinIO). +pub mod s3_store; +#[cfg(feature = "s3")] +pub use s3_store::S3TileStore; + +#[cfg(feature = "native")] +pub use store::TileStore; +#[cfg(feature = "native")] +pub use verify::verify_bundle; + +#[cfg(feature = "native")] +pub use witness::{ + ALG_COSIGNATURE_V1, CosignRequest, CosignResponse, DEFAULT_WITNESS_TIMEOUT, WitnessClient, + WitnessResult, build_cosignature_line, collect_witness_cosignatures, compute_witness_key_id, + cosignature_signed_message, extract_cosignatures, parse_cosignature, serialize_cosignature, +}; + +use auths_verifier::DeviceDID; + +/// Trust root for verifying transparency log checkpoints. +/// +/// Contains the log's signing public key and an optional witness list. +/// For Epic 1 (fn-72), this is hardcoded in the verifier binary. +/// TUF-based distribution comes in fn-76. +/// +/// Args: +/// * `log_public_key` — The Ed25519 public key of the log operator. +/// * `log_origin` — The log origin string for checkpoint verification. +/// * `witnesses` — List of trusted witness public keys and names. +/// +/// Usage: +/// ```ignore +/// let trust_root = TrustRoot { +/// log_public_key: Ed25519PublicKey::from_bytes(key_bytes), +/// log_origin: LogOrigin::new("auths.dev/log")?, +/// witnesses: vec![], +/// }; +/// ``` +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub struct TrustRoot { + /// The log operator's Ed25519 public key. + pub log_public_key: auths_verifier::Ed25519PublicKey, + /// The log origin string (e.g., "auths.dev/log"). + pub log_origin: LogOrigin, + /// Trusted witness keys. Empty for Epic 1. + pub witnesses: Vec, +} + +/// A trusted witness in the [`TrustRoot`]. +/// +/// Args: +/// * `witness_did` — The witness's device DID. +/// * `name` — Human-readable witness name. +/// * `public_key` — Witness Ed25519 public key. +/// +/// Usage: +/// ```ignore +/// let witness = TrustRootWitness { +/// witness_did: DeviceDID::parse("did:key:z6Mk...")?, +/// name: "witness-1".into(), +/// public_key: Ed25519PublicKey::from_bytes(key_bytes), +/// }; +/// ``` +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub struct TrustRootWitness { + /// The witness's device DID. + pub witness_did: DeviceDID, + /// Human-readable witness name. + pub name: String, + /// Witness Ed25519 public key. + pub public_key: auths_verifier::Ed25519PublicKey, +} diff --git a/crates/auths-transparency/src/merkle.rs b/crates/auths-transparency/src/merkle.rs new file mode 100644 index 00000000..26943eae --- /dev/null +++ b/crates/auths-transparency/src/merkle.rs @@ -0,0 +1,603 @@ +use sha2::{Digest, Sha256}; + +use crate::error::TransparencyError; +use crate::types::MerkleHash; + +/// RFC 6962 leaf domain separator. +const LEAF_PREFIX: u8 = 0x00; +/// RFC 6962 interior node domain separator. +const NODE_PREFIX: u8 = 0x01; + +/// Hash a leaf value with RFC 6962 domain separation: `SHA-256(0x00 || data)`. +/// +/// Args: +/// * `data` — Raw leaf bytes (typically canonical JSON of an entry). +/// +/// Usage: +/// ```ignore +/// let leaf = hash_leaf(b"entry data"); +/// ``` +pub fn hash_leaf(data: &[u8]) -> MerkleHash { + let mut hasher = Sha256::new(); + hasher.update([LEAF_PREFIX]); + hasher.update(data); + let digest = hasher.finalize(); + let mut out = [0u8; 32]; + out.copy_from_slice(&digest); + MerkleHash::from_bytes(out) +} + +/// Hash two child nodes with RFC 6962 domain separation: `SHA-256(0x01 || left || right)`. +/// +/// Args: +/// * `left` — Left child hash. +/// * `right` — Right child hash. +/// +/// Usage: +/// ```ignore +/// let parent = hash_children(&left_hash, &right_hash); +/// ``` +pub fn hash_children(left: &MerkleHash, right: &MerkleHash) -> MerkleHash { + let mut hasher = Sha256::new(); + hasher.update([NODE_PREFIX]); + hasher.update(left.as_bytes()); + hasher.update(right.as_bytes()); + let digest = hasher.finalize(); + let mut out = [0u8; 32]; + out.copy_from_slice(&digest); + MerkleHash::from_bytes(out) +} + +/// Verify a Merkle inclusion proof for a leaf at a given index in a tree of `size` leaves. +/// +/// Uses RFC 6962 proof verification: walk from the leaf hash up to the root, +/// combining with proof hashes left or right depending on the index bits. +/// +/// Args: +/// * `leaf_hash` — The hash of the leaf being proven. +/// * `index` — Zero-based index of the leaf. +/// * `size` — Total number of leaves in the tree. +/// * `proof` — Ordered list of sibling hashes from leaf to root. +/// * `root` — Expected Merkle root. +/// +/// Usage: +/// ```ignore +/// verify_inclusion(&leaf_hash, 5, 16, &proof_hashes, &expected_root)?; +/// ``` +pub fn verify_inclusion( + leaf_hash: &MerkleHash, + index: u64, + size: u64, + proof: &[MerkleHash], + root: &MerkleHash, +) -> Result<(), TransparencyError> { + if size == 0 { + return Err(TransparencyError::InvalidProof("tree size is 0".into())); + } + if index >= size { + return Err(TransparencyError::InvalidProof(format!( + "index {index} >= size {size}" + ))); + } + + let (computed, _) = root_from_inclusion_proof(leaf_hash, index, size, proof)?; + + if computed != *root { + return Err(TransparencyError::RootMismatch { + expected: root.to_string(), + actual: computed.to_string(), + }); + } + Ok(()) +} + +/// Compute the root hash from an inclusion proof. +/// +/// Returns `(root, proof_elements_consumed)`. +fn root_from_inclusion_proof( + leaf_hash: &MerkleHash, + index: u64, + size: u64, + proof: &[MerkleHash], +) -> Result<(MerkleHash, usize), TransparencyError> { + let expected_len = inclusion_proof_length(index, size); + if proof.len() != expected_len { + return Err(TransparencyError::InvalidProof(format!( + "expected {expected_len} proof elements, got {}", + proof.len() + ))); + } + + let mut hash = *leaf_hash; + let mut idx = index; + let mut level_size = size; + let mut pos = 0; + + while level_size > 1 { + if pos >= proof.len() { + return Err(TransparencyError::InvalidProof("proof too short".into())); + } + if idx & 1 == 1 || idx + 1 == level_size { + if idx & 1 == 1 { + hash = hash_children(&proof[pos], &hash); + pos += 1; + } + } else { + hash = hash_children(&hash, &proof[pos]); + pos += 1; + } + idx >>= 1; + level_size = (level_size + 1) >> 1; + } + + Ok((hash, pos)) +} + +/// Compute the expected number of proof elements for an inclusion proof. +fn inclusion_proof_length(index: u64, size: u64) -> usize { + if size <= 1 { + return 0; + } + let mut length = 0; + let mut idx = index; + let mut level_size = size; + while level_size > 1 { + if idx & 1 == 1 || idx + 1 < level_size { + length += 1; + } + idx >>= 1; + level_size = (level_size + 1) >> 1; + } + length +} + +/// Verify a consistency proof between an old tree of `old_size` and a new tree of `new_size`. +/// +/// Ensures the new tree is an append-only extension of the old tree. +/// +/// Args: +/// * `old_size` — Number of leaves in the older tree. +/// * `new_size` — Number of leaves in the newer tree. +/// * `proof` — Ordered consistency proof hashes. +/// * `old_root` — Root of the older tree. +/// * `new_root` — Root of the newer tree. +/// +/// Usage: +/// ```ignore +/// verify_consistency(8, 16, &proof, &old_root, &new_root)?; +/// ``` +pub fn verify_consistency( + old_size: u64, + new_size: u64, + proof: &[MerkleHash], + old_root: &MerkleHash, + new_root: &MerkleHash, +) -> Result<(), TransparencyError> { + if old_size == 0 { + if proof.is_empty() { + return Ok(()); + } + return Err(TransparencyError::ConsistencyError( + "non-empty proof for empty old tree".into(), + )); + } + if old_size > new_size { + return Err(TransparencyError::ConsistencyError(format!( + "old size {old_size} > new size {new_size}" + ))); + } + if old_size == new_size { + if !proof.is_empty() { + return Err(TransparencyError::ConsistencyError( + "non-empty proof for equal sizes".into(), + )); + } + if old_root != new_root { + return Err(TransparencyError::RootMismatch { + expected: old_root.to_string(), + actual: new_root.to_string(), + }); + } + return Ok(()); + } + + // Reconstruct new root from the consistency proof while implicitly verifying old root. + // For power-of-2 old_size, old_root is used directly as the starting hash. + // For non-power-of-2, proof elements reconstruct old_root via the bit-walking algorithm. + let new_computed = new_root_from_consistency_proof(old_size, new_size, proof, old_root)?; + + if new_computed != *new_root { + return Err(TransparencyError::RootMismatch { + expected: new_root.to_string(), + actual: new_computed.to_string(), + }); + } + Ok(()) +} + +/// Reconstruct new root from an RFC 6962 SUBPROOF-format consistency proof. +/// +/// The proof is produced by the SUBPROOF(m, D[0:n], b=true) algorithm from +/// RFC 6962 Section 2.1.2. Verification walks the bit pattern of (old_size - 1) +/// to reconstruct both old_root (for validation) and new_root. +/// +/// Phase 1 (decomposition): each bit of (old_size - 1) determines whether a +/// proof element is a left sibling (set bit → combines into both roots) or +/// a right sibling (unset bit → combines into new root only). +/// +/// Phase 2 (extension): remaining proof elements extend the accumulator to +/// the new root. +fn new_root_from_consistency_proof( + old_size: u64, + new_size: u64, + proof: &[MerkleHash], + old_root: &MerkleHash, +) -> Result { + let _ = new_size; // used only in debug assertions via caller + + let (mut fn_hash, mut fr_hash, start) = if old_size.is_power_of_two() { + // Old tree is a single complete subtree — no decomposition needed + (*old_root, *old_root, 0) + } else { + if proof.is_empty() { + return Err(TransparencyError::ConsistencyError( + "proof too short".into(), + )); + } + (proof[0], proof[0], 1) + }; + + let mut pos = start; + + // Phase 1: walk bits of (old_size - 1) to decompose/reconstruct old root + if !old_size.is_power_of_two() { + let mut bit = old_size - 1; + while bit > 0 { + if pos >= proof.len() { + return Err(TransparencyError::ConsistencyError( + "proof too short during decomposition".into(), + )); + } + if bit & 1 != 0 { + fn_hash = hash_children(&proof[pos], &fn_hash); + fr_hash = hash_children(&proof[pos], &fr_hash); + } else { + fr_hash = hash_children(&fr_hash, &proof[pos]); + } + pos += 1; + bit >>= 1; + } + + if fn_hash != *old_root { + return Err(TransparencyError::RootMismatch { + expected: old_root.to_string(), + actual: fn_hash.to_string(), + }); + } + } + + // Phase 2: extension elements build up to the new root + while pos < proof.len() { + fr_hash = hash_children(&fr_hash, &proof[pos]); + pos += 1; + } + + Ok(fr_hash) +} + +/// Compute the Merkle root of a list of leaf hashes per RFC 6962 Section 2.1. +/// +/// Recursively splits at the largest power of 2 less than `n`: +/// `MTH(D[0:n]) = SHA-256(0x01 || MTH(D[0:k]) || MTH(D[k:n]))` where `k = 2^(floor(log2(n-1)))`. +/// +/// Args: +/// * `leaves` — Slice of leaf hashes. Empty input returns `MerkleHash::EMPTY`. +/// +/// Usage: +/// ```ignore +/// let root = compute_root(&leaf_hashes); +/// ``` +pub fn compute_root(leaves: &[MerkleHash]) -> MerkleHash { + match leaves.len() { + 0 => MerkleHash::EMPTY, + 1 => leaves[0], + n => { + let k = largest_power_of_2_lt(n as u64) as usize; + let left = compute_root(&leaves[..k]); + let right = compute_root(&leaves[k..]); + hash_children(&left, &right) + } + } +} + +/// Largest power of 2 strictly less than `n` (for n > 1). +fn largest_power_of_2_lt(n: u64) -> u64 { + debug_assert!(n > 1); + if n.is_power_of_two() { + n / 2 + } else { + 1u64 << (63 - n.leading_zeros()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn hash_leaf_domain_separation() { + let data = b"test data"; + let h = hash_leaf(data); + + // Manually compute SHA-256(0x00 || "test data") + let mut hasher = Sha256::new(); + hasher.update([0x00]); + hasher.update(data); + let expected = hasher.finalize(); + + assert_eq!(h.as_bytes(), expected.as_slice()); + } + + #[test] + fn hash_children_domain_separation() { + let left = MerkleHash::from_bytes([0x11; 32]); + let right = MerkleHash::from_bytes([0x22; 32]); + let h = hash_children(&left, &right); + + let mut hasher = Sha256::new(); + hasher.update([0x01]); + hasher.update([0x11; 32]); + hasher.update([0x22; 32]); + let expected = hasher.finalize(); + + assert_eq!(h.as_bytes(), expected.as_slice()); + } + + #[test] + fn leaf_and_children_produce_different_hashes() { + let data = [0xab; 64]; + let leaf = hash_leaf(&data); + + let left = MerkleHash::from_bytes(data[..32].try_into().unwrap()); + let right = MerkleHash::from_bytes(data[32..].try_into().unwrap()); + let node = hash_children(&left, &right); + + assert_ne!(leaf, node); + } + + #[test] + fn compute_root_single_leaf() { + let h = MerkleHash::from_bytes([0x42; 32]); + assert_eq!(compute_root(&[h]), h); + } + + #[test] + fn compute_root_empty() { + assert_eq!(compute_root(&[]), MerkleHash::EMPTY); + } + + #[test] + fn compute_root_two_leaves() { + let a = hash_leaf(b"a"); + let b = hash_leaf(b"b"); + let root = compute_root(&[a, b]); + assert_eq!(root, hash_children(&a, &b)); + } + + #[test] + fn inclusion_proof_single_leaf() { + let leaf = hash_leaf(b"only leaf"); + let root = leaf; + verify_inclusion(&leaf, 0, 1, &[], &root).unwrap(); + } + + #[test] + fn inclusion_proof_two_leaves() { + let a = hash_leaf(b"a"); + let b = hash_leaf(b"b"); + let root = hash_children(&a, &b); + + // Prove leaf 0 (a) with sibling b + verify_inclusion(&a, 0, 2, &[b], &root).unwrap(); + // Prove leaf 1 (b) with sibling a + verify_inclusion(&b, 1, 2, &[a], &root).unwrap(); + } + + #[test] + fn inclusion_proof_four_leaves() { + let leaves: Vec = (0..4u8).map(|i| hash_leaf(&[i])).collect(); + let root = compute_root(&leaves); + + // Prove leaf 0: needs leaf 1 as sibling, then hash(leaf2, leaf3) as uncle + let ab = hash_children(&leaves[0], &leaves[1]); + let cd = hash_children(&leaves[2], &leaves[3]); + let _ = hash_children(&ab, &cd); + + verify_inclusion(&leaves[0], 0, 4, &[leaves[1], cd], &root).unwrap(); + verify_inclusion(&leaves[1], 1, 4, &[leaves[0], cd], &root).unwrap(); + verify_inclusion(&leaves[2], 2, 4, &[leaves[3], ab], &root).unwrap(); + verify_inclusion(&leaves[3], 3, 4, &[leaves[2], ab], &root).unwrap(); + } + + #[test] + fn inclusion_proof_rejects_wrong_root() { + let a = hash_leaf(b"a"); + let b = hash_leaf(b"b"); + let _root = hash_children(&a, &b); + let wrong = MerkleHash::from_bytes([0xff; 32]); + + let err = verify_inclusion(&a, 0, 2, &[b], &wrong); + assert!(err.is_err()); + } + + #[test] + fn inclusion_proof_three_leaves() { + let leaves: Vec = (0..3u8).map(|i| hash_leaf(&[i])).collect(); + let root = compute_root(&leaves); + + let ab = hash_children(&leaves[0], &leaves[1]); + + // Leaf 0: sibling = leaf[1], then uncle = leaf[2] + verify_inclusion(&leaves[0], 0, 3, &[leaves[1], leaves[2]], &root).unwrap(); + // Leaf 1: sibling = leaf[0], then uncle = leaf[2] + verify_inclusion(&leaves[1], 1, 3, &[leaves[0], leaves[2]], &root).unwrap(); + // Leaf 2: sibling = ab (promoted, no right sibling at level 0) + verify_inclusion(&leaves[2], 2, 3, &[ab], &root).unwrap(); + } + + #[test] + fn inclusion_proof_five_leaves() { + let leaves: Vec = (0..5u8).map(|i| hash_leaf(&[i])).collect(); + let root = compute_root(&leaves); + + let h01 = hash_children(&leaves[0], &leaves[1]); + let h23 = hash_children(&leaves[2], &leaves[3]); + let h0123 = hash_children(&h01, &h23); + + // Leaf 4: it's the last leaf (unpaired), needs h0123 as sibling + verify_inclusion(&leaves[4], 4, 5, &[h0123], &root).unwrap(); + // Leaf 0: sibling leaf[1], uncle h23, then uncle leaf[4] + verify_inclusion(&leaves[0], 0, 5, &[leaves[1], h23, leaves[4]], &root).unwrap(); + } + + #[test] + fn inclusion_proof_seven_leaves() { + let leaves: Vec = (0..7u8).map(|i| hash_leaf(&[i])).collect(); + let root = compute_root(&leaves); + + let h01 = hash_children(&leaves[0], &leaves[1]); + let h23 = hash_children(&leaves[2], &leaves[3]); + let h45 = hash_children(&leaves[4], &leaves[5]); + let h0123 = hash_children(&h01, &h23); + let h456 = hash_children(&h45, &leaves[6]); + + // Leaf 6: unpaired at level 0, sibling is h45, then uncle is h0123 + verify_inclusion(&leaves[6], 6, 7, &[h45, h0123], &root).unwrap(); + // Leaf 0: sibling leaf[1], uncle h23, then uncle h456 + verify_inclusion(&leaves[0], 0, 7, &[leaves[1], h23, h456], &root).unwrap(); + } + + #[test] + fn inclusion_proof_rejects_index_out_of_range() { + let a = hash_leaf(b"a"); + let root = a; + let err = verify_inclusion(&a, 1, 1, &[], &root); + assert!(err.is_err()); + } + + #[test] + fn consistency_proof_same_size() { + let root = MerkleHash::from_bytes([0x42; 32]); + verify_consistency(5, 5, &[], &root, &root).unwrap(); + } + + #[test] + fn consistency_proof_empty_old() { + let new_root = MerkleHash::from_bytes([0x42; 32]); + let old_root = MerkleHash::EMPTY; + verify_consistency(0, 5, &[], &old_root, &new_root).unwrap(); + } + + #[test] + fn consistency_proof_2_to_4() { + let leaves: Vec = (0..4u8).map(|i| hash_leaf(&[i])).collect(); + let old_root = compute_root(&leaves[..2]); + let new_root = compute_root(&leaves); + let proof = build_consistency_proof(&leaves[..2], &leaves); + verify_consistency(2, 4, &proof, &old_root, &new_root).unwrap(); + } + + #[test] + fn consistency_proof_3_to_5() { + let leaves: Vec = (0..5u8).map(|i| hash_leaf(&[i])).collect(); + let old_root = compute_root(&leaves[..3]); + let new_root = compute_root(&leaves); + let proof = build_consistency_proof(&leaves[..3], &leaves); + verify_consistency(3, 5, &proof, &old_root, &new_root).unwrap(); + } + + #[test] + fn consistency_proof_4_to_8() { + let leaves: Vec = (0..8u8).map(|i| hash_leaf(&[i])).collect(); + let old_root = compute_root(&leaves[..4]); + let new_root = compute_root(&leaves); + let proof = build_consistency_proof(&leaves[..4], &leaves); + verify_consistency(4, 8, &proof, &old_root, &new_root).unwrap(); + } + + #[test] + fn consistency_proof_7_to_15() { + let leaves: Vec = (0..15u8).map(|i| hash_leaf(&[i])).collect(); + let old_root = compute_root(&leaves[..7]); + let new_root = compute_root(&leaves); + let proof = build_consistency_proof(&leaves[..7], &leaves); + verify_consistency(7, 15, &proof, &old_root, &new_root).unwrap(); + } + + #[test] + fn consistency_proof_1_to_4() { + let leaves: Vec = (0..4u8).map(|i| hash_leaf(&[i])).collect(); + let old_root = compute_root(&leaves[..1]); + let new_root = compute_root(&leaves); + let proof = build_consistency_proof(&leaves[..1], &leaves); + verify_consistency(1, 4, &proof, &old_root, &new_root).unwrap(); + } + + #[test] + fn consistency_proof_rejects_wrong_old_root() { + let leaves: Vec = (0..4u8).map(|i| hash_leaf(&[i])).collect(); + let wrong_old = MerkleHash::from_bytes([0xff; 32]); + let new_root = compute_root(&leaves); + let proof = build_consistency_proof(&leaves[..3], &leaves); + assert!(verify_consistency(3, 4, &proof, &wrong_old, &new_root).is_err()); + } + + #[test] + fn consistency_proof_rejects_wrong_new_root() { + let leaves: Vec = (0..4u8).map(|i| hash_leaf(&[i])).collect(); + let old_root = compute_root(&leaves[..3]); + let wrong_new = MerkleHash::from_bytes([0xff; 32]); + let proof = build_consistency_proof(&leaves[..3], &leaves); + assert!(verify_consistency(3, 4, &proof, &old_root, &wrong_new).is_err()); + } + + /// Build a consistency proof using RFC 6962 SUBPROOF decomposition. Test-only. + fn build_consistency_proof( + old_leaves: &[MerkleHash], + new_leaves: &[MerkleHash], + ) -> Vec { + assert!(old_leaves.len() <= new_leaves.len()); + subproof(old_leaves.len() as u64, new_leaves, true) + } + + /// RFC 6962 Section 2.1.2 SUBPROOF(m, D[0:n], b). + fn subproof(m: u64, leaves: &[MerkleHash], b: bool) -> Vec { + let n = leaves.len() as u64; + if m == n { + if b { + return vec![]; + } + return vec![compute_root(leaves)]; + } + let k = largest_power_of_2_lt(n) as usize; + if m <= k as u64 { + let mut proof = subproof(m, &leaves[..k], b); + proof.push(compute_root(&leaves[k..])); + proof + } else { + let mut proof = subproof(m - k as u64, &leaves[k..], false); + proof.push(compute_root(&leaves[..k])); + proof + } + } + + #[test] + fn largest_pow2_lt() { + assert_eq!(largest_power_of_2_lt(2), 1); + assert_eq!(largest_power_of_2_lt(3), 2); + assert_eq!(largest_power_of_2_lt(4), 2); + assert_eq!(largest_power_of_2_lt(5), 4); + assert_eq!(largest_power_of_2_lt(8), 4); + assert_eq!(largest_power_of_2_lt(9), 8); + } +} diff --git a/crates/auths-transparency/src/note.rs b/crates/auths-transparency/src/note.rs new file mode 100644 index 00000000..b8763a32 --- /dev/null +++ b/crates/auths-transparency/src/note.rs @@ -0,0 +1,232 @@ +use base64::{Engine, engine::general_purpose::STANDARD}; + +use crate::error::TransparencyError; +use crate::types::MerkleHash; + +/// Ed25519 algorithm byte per C2SP signed-note spec. +const ALG_ED25519: u8 = 0x01; + +/// Compute a C2SP key ID from a key name and Ed25519 public key. +/// +/// `key_id = SHA-256(key_name + "\n" + 0x01 + pubkey)[0..4]` +/// +/// Args: +/// * `key_name` — The log's key name (e.g., "auths.dev/log"). +/// * `pubkey` — 32-byte Ed25519 public key. +/// +/// Usage: +/// ```ignore +/// let key_id = compute_key_id("auths.dev/log", &pubkey_bytes); +/// ``` +pub fn compute_key_id(key_name: &str, pubkey: &[u8; 32]) -> [u8; 4] { + let mut data = Vec::with_capacity(key_name.len() + 1 + 1 + 32); + data.extend_from_slice(key_name.as_bytes()); + data.push(b'\n'); + data.push(ALG_ED25519); + data.extend_from_slice(pubkey); + + let hash = MerkleHash::sha256(&data); + let mut id = [0u8; 4]; + id.copy_from_slice(&hash.as_bytes()[..4]); + id +} + +/// Build a C2SP signature line: `— `. +/// +/// Args: +/// * `key_name` — The signer's key name. +/// * `key_id` — 4-byte key ID from [`compute_key_id`]. +/// * `signature` — 64-byte Ed25519 signature. +/// +/// Usage: +/// ```ignore +/// let line = build_signature_line("auths.dev/log", &key_id, &sig_bytes); +/// ``` +pub fn build_signature_line(key_name: &str, key_id: &[u8; 4], signature: &[u8; 64]) -> String { + let mut sig_data = Vec::with_capacity(1 + 4 + 64); + sig_data.push(ALG_ED25519); + sig_data.extend_from_slice(key_id); + sig_data.extend_from_slice(signature); + let encoded = STANDARD.encode(&sig_data); + format!("\u{2014} {key_name} {encoded}\n") +} + +/// Parse a C2SP signed note into its body and signature components. +/// +/// A signed note has the format: +/// ```text +/// \n +/// \n +/// — \n +/// ``` +/// +/// Args: +/// * `note` — The full signed note text. +/// +/// Usage: +/// ```ignore +/// let (body, sigs) = parse_signed_note(note_text)?; +/// ``` +pub fn parse_signed_note(note: &str) -> Result<(String, Vec), TransparencyError> { + // Signature lines start with em-dash (U+2014) + let sig_marker = "\u{2014} "; + + let mut body_end = None; + let mut sig_start = None; + + for (i, line) in note.lines().enumerate() { + if line.starts_with(sig_marker) && sig_start.is_none() { + sig_start = Some(i); + if body_end.is_none() { + body_end = Some(i); + } + } + } + + let lines: Vec<&str> = note.lines().collect(); + + let body_end_idx = body_end + .ok_or_else(|| TransparencyError::InvalidNote("no signature lines found".into()))?; + + // Body is everything before the first signature line, trimming trailing empty line + let mut body_lines = &lines[..body_end_idx]; + if body_lines.last() == Some(&"") { + body_lines = &body_lines[..body_lines.len() - 1]; + } + let body = body_lines.join("\n") + "\n"; + + let mut signatures = Vec::new(); + for line in &lines[body_end_idx..] { + if let Some(rest) = line.strip_prefix(sig_marker) { + let sig = parse_signature_line(rest)?; + signatures.push(sig); + } + } + + Ok((body, signatures)) +} + +/// A parsed signature from a signed note. +#[derive(Debug, Clone)] +pub struct NoteSignature { + /// The signer's key name. + pub key_name: String, + /// Algorithm byte (0x01 for Ed25519). + pub algorithm: u8, + /// 4-byte key ID. + pub key_id: [u8; 4], + /// Raw signature bytes. + pub signature: Vec, +} + +fn parse_signature_line(line: &str) -> Result { + let space_idx = line + .find(' ') + .ok_or_else(|| TransparencyError::InvalidNote("malformed signature line".into()))?; + + let key_name = &line[..space_idx]; + let b64 = &line[space_idx + 1..]; + + let raw = STANDARD + .decode(b64.trim()) + .map_err(|e| TransparencyError::InvalidNote(format!("base64 decode: {e}")))?; + + if raw.len() < 5 { + return Err(TransparencyError::InvalidNote( + "signature data too short".into(), + )); + } + + let algorithm = raw[0]; + let mut key_id = [0u8; 4]; + key_id.copy_from_slice(&raw[1..5]); + let signature = raw[5..].to_vec(); + + Ok(NoteSignature { + key_name: key_name.to_string(), + algorithm, + key_id, + signature, + }) +} + +/// Serialize a signed note from body text and signatures. +/// +/// Args: +/// * `body` — The note body (must end with `\n`). +/// * `signatures` — Formatted signature lines from [`build_signature_line`]. +/// +/// Usage: +/// ```ignore +/// let note = serialize_signed_note(&body, &[sig_line]); +/// ``` +pub fn serialize_signed_note(body: &str, signatures: &[String]) -> String { + let mut out = + String::with_capacity(body.len() + signatures.iter().map(|s| s.len()).sum::() + 1); + out.push_str(body); + out.push('\n'); + for sig in signatures { + out.push_str(sig); + } + out +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn key_id_computation() { + let pubkey = [0xab; 32]; + let key_id = compute_key_id("auths.dev/log", &pubkey); + assert_eq!(key_id.len(), 4); + + // Deterministic + let key_id2 = compute_key_id("auths.dev/log", &pubkey); + assert_eq!(key_id, key_id2); + + // Different key name → different ID + let key_id3 = compute_key_id("other.dev/log", &pubkey); + assert_ne!(key_id, key_id3); + } + + #[test] + fn signature_line_format() { + let key_id = [0x01, 0x02, 0x03, 0x04]; + let sig = [0xaa; 64]; + let line = build_signature_line("auths.dev/log", &key_id, &sig); + + assert!(line.starts_with("\u{2014} auths.dev/log ")); + assert!(line.ends_with('\n')); + + // Verify the base64 decodes to alg + key_id + sig + let parts: Vec<&str> = line.trim().splitn(3, ' ').collect(); + let decoded = STANDARD.decode(parts[2]).unwrap(); + assert_eq!(decoded[0], ALG_ED25519); + assert_eq!(&decoded[1..5], &key_id); + assert_eq!(&decoded[5..], &sig); + } + + #[test] + fn signed_note_roundtrip() { + let body = "auths.dev/log\n42\nq6urq6urq6urq6urq6urq6urq6urq6urq6urq6urq6s=\n"; + let key_id = [0x01, 0x02, 0x03, 0x04]; + let sig = [0xcc; 64]; + let sig_line = build_signature_line("auths.dev/log", &key_id, &sig); + let note = serialize_signed_note(body, &[sig_line]); + + let (parsed_body, parsed_sigs) = parse_signed_note(¬e).unwrap(); + assert_eq!(parsed_body, body); + assert_eq!(parsed_sigs.len(), 1); + assert_eq!(parsed_sigs[0].key_name, "auths.dev/log"); + assert_eq!(parsed_sigs[0].algorithm, ALG_ED25519); + assert_eq!(parsed_sigs[0].key_id, key_id); + assert_eq!(parsed_sigs[0].signature, sig.to_vec()); + } + + #[test] + fn parse_note_rejects_no_signatures() { + let note = "just body\nno sigs\n"; + assert!(parse_signed_note(note).is_err()); + } +} diff --git a/crates/auths-transparency/src/proof.rs b/crates/auths-transparency/src/proof.rs new file mode 100644 index 00000000..325f3fe8 --- /dev/null +++ b/crates/auths-transparency/src/proof.rs @@ -0,0 +1,125 @@ +use serde::{Deserialize, Serialize}; + +use crate::types::MerkleHash; + +/// Merkle inclusion proof for a single entry in the log. +/// +/// Proves that a leaf at `index` is included in the tree of `size` leaves +/// with the given `root`. +/// +/// Args: +/// * `index` — Zero-based leaf index. +/// * `size` — Tree size (number of leaves) when the proof was generated. +/// * `root` — Merkle root at tree size `size`. +/// * `hashes` — Sibling hashes from leaf to root. +/// +/// Usage: +/// ```ignore +/// let proof = InclusionProof { index: 5, size: 16, root, hashes: vec![...] }; +/// proof.verify(&leaf_hash)?; +/// ``` +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[allow(missing_docs)] +pub struct InclusionProof { + pub index: u64, + pub size: u64, + pub root: MerkleHash, + pub hashes: Vec, +} + +impl InclusionProof { + /// Verify that `leaf_hash` is included in the tree. + pub fn verify(&self, leaf_hash: &MerkleHash) -> Result<(), crate::error::TransparencyError> { + crate::merkle::verify_inclusion(leaf_hash, self.index, self.size, &self.hashes, &self.root) + } +} + +/// Merkle consistency proof between two tree sizes. +/// +/// Proves that the tree at `old_size` is a prefix of the tree at `new_size`. +/// +/// Args: +/// * `old_size` — Earlier tree size. +/// * `new_size` — Later tree size. +/// * `old_root` — Root at `old_size`. +/// * `new_root` — Root at `new_size`. +/// * `hashes` — Consistency proof hashes. +/// +/// Usage: +/// ```ignore +/// let proof = ConsistencyProof { old_size: 8, new_size: 16, .. }; +/// proof.verify()?; +/// ``` +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[allow(missing_docs)] +pub struct ConsistencyProof { + pub old_size: u64, + pub new_size: u64, + pub old_root: MerkleHash, + pub new_root: MerkleHash, + pub hashes: Vec, +} + +impl ConsistencyProof { + /// Verify that the old tree is a prefix of the new tree. + pub fn verify(&self) -> Result<(), crate::error::TransparencyError> { + crate::merkle::verify_consistency( + self.old_size, + self.new_size, + &self.hashes, + &self.old_root, + &self.new_root, + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::merkle::{hash_children, hash_leaf}; + + #[test] + fn inclusion_proof_verify() { + let a = hash_leaf(b"a"); + let b = hash_leaf(b"b"); + let root = hash_children(&a, &b); + + let proof = InclusionProof { + index: 0, + size: 2, + root, + hashes: vec![b], + }; + proof.verify(&a).unwrap(); + } + + #[test] + fn inclusion_proof_json_roundtrip() { + let proof = InclusionProof { + index: 3, + size: 8, + root: MerkleHash::from_bytes([0xaa; 32]), + hashes: vec![ + MerkleHash::from_bytes([0xbb; 32]), + MerkleHash::from_bytes([0xcc; 32]), + ], + }; + let json = serde_json::to_string(&proof).unwrap(); + let back: InclusionProof = serde_json::from_str(&json).unwrap(); + assert_eq!(proof, back); + } + + #[test] + fn consistency_proof_json_roundtrip() { + let proof = ConsistencyProof { + old_size: 4, + new_size: 8, + old_root: MerkleHash::from_bytes([0x11; 32]), + new_root: MerkleHash::from_bytes([0x22; 32]), + hashes: vec![MerkleHash::from_bytes([0x33; 32])], + }; + let json = serde_json::to_string(&proof).unwrap(); + let back: ConsistencyProof = serde_json::from_str(&json).unwrap(); + assert_eq!(proof, back); + } +} diff --git a/crates/auths-transparency/src/s3_store.rs b/crates/auths-transparency/src/s3_store.rs new file mode 100644 index 00000000..d2eec7f5 --- /dev/null +++ b/crates/auths-transparency/src/s3_store.rs @@ -0,0 +1,176 @@ +use aws_sdk_s3::Client; +use aws_sdk_s3::primitives::ByteStream; + +use crate::error::TransparencyError; +use crate::store::TileStore; + +const CACHE_IMMUTABLE: &str = "immutable, max-age=31536000"; +const CACHE_SHORT: &str = "public, max-age=10"; + +/// S3-compatible tile store for transparency log persistence. +/// +/// Stores tiles and checkpoints as objects in an S3 bucket (including +/// Tigris-compatible endpoints). Cache-Control headers are applied per +/// the C2SP tlog-tiles spec: +/// +/// - Full tiles and entry bundles: `immutable, max-age=31536000` +/// - Partial tiles: `public, max-age=10` +/// - Checkpoint: `public, max-age=10` +/// +/// Full tiles (paths without `.p/`) are write-once: if the object +/// already exists, subsequent writes are silently skipped. Partial +/// tiles and the checkpoint are always overwritten. +/// +/// Args: +/// * `client` — An `aws_sdk_s3::Client` configured for the target endpoint. +/// * `bucket` — The S3 bucket name. +/// * `prefix` — An optional key prefix prepended to all object keys. +/// +/// Usage: +/// ```ignore +/// let config = aws_config::load_defaults(BehaviorVersion::latest()).await; +/// let client = aws_sdk_s3::Client::new(&config); +/// let store = S3TileStore::new(client, "my-tlog-bucket".into(), Some("v1/".into())); +/// store.write_tile("tile/0/000", &data).await?; +/// ``` +pub struct S3TileStore { + client: Client, + bucket: String, + prefix: String, +} + +impl S3TileStore { + /// Creates a new S3 tile store. + /// + /// Args: + /// * `client` — Pre-configured S3 client (handles region, endpoint, credentials). + /// * `bucket` — Target bucket name. + /// * `prefix` — Optional key prefix (e.g., `"v1/"`). Pass `None` for no prefix. + /// + /// Usage: + /// ```ignore + /// let store = S3TileStore::new(client, "tlog-bucket".into(), None); + /// ``` + pub fn new(client: Client, bucket: String, prefix: Option) -> Self { + Self { + client, + bucket, + prefix: prefix.unwrap_or_default(), + } + } + + fn object_key(&self, path: &str) -> String { + format!("{}{}", self.prefix, path) + } + + async fn object_exists(&self, key: &str) -> Result { + match self + .client + .head_object() + .bucket(&self.bucket) + .key(key) + .send() + .await + { + Ok(_) => Ok(true), + Err(err) => { + let service_err = err.into_service_error(); + if service_err.is_not_found() { + Ok(false) + } else { + Err(TransparencyError::StoreError(service_err.to_string())) + } + } + } + } + + async fn put_object( + &self, + key: &str, + data: &[u8], + cache_control: &str, + ) -> Result<(), TransparencyError> { + self.client + .put_object() + .bucket(&self.bucket) + .key(key) + .body(ByteStream::from(data.to_vec())) + .cache_control(cache_control) + .content_type("application/octet-stream") + .send() + .await + .map_err(|e| TransparencyError::StoreError(e.into_service_error().to_string()))?; + Ok(()) + } + + async fn get_object(&self, key: &str) -> Result>, TransparencyError> { + match self + .client + .get_object() + .bucket(&self.bucket) + .key(key) + .send() + .await + { + Ok(output) => { + let bytes = output + .body + .collect() + .await + .map_err(|e| TransparencyError::StoreError(e.to_string()))?; + Ok(Some(bytes.to_vec())) + } + Err(err) => { + let service_err = err.into_service_error(); + if service_err.is_no_such_key() { + Ok(None) + } else { + Err(TransparencyError::StoreError(service_err.to_string())) + } + } + } + } +} + +fn is_partial_tile(path: &str) -> bool { + path.contains(".p/") +} + +fn cache_control_for_tile(path: &str) -> &'static str { + if is_partial_tile(path) { + CACHE_SHORT + } else { + CACHE_IMMUTABLE + } +} + +#[async_trait::async_trait] +impl TileStore for S3TileStore { + async fn read_tile(&self, path: &str) -> Result, TransparencyError> { + let key = self.object_key(path); + self.get_object(&key) + .await? + .ok_or_else(|| TransparencyError::StoreError(format!("tile not found: {path}"))) + } + + async fn write_tile(&self, path: &str, data: &[u8]) -> Result<(), TransparencyError> { + let key = self.object_key(path); + + if !is_partial_tile(path) && self.object_exists(&key).await? { + return Ok(()); + } + + self.put_object(&key, data, cache_control_for_tile(path)) + .await + } + + async fn read_checkpoint(&self) -> Result>, TransparencyError> { + let key = self.object_key("checkpoint"); + self.get_object(&key).await + } + + async fn write_checkpoint(&self, data: &[u8]) -> Result<(), TransparencyError> { + let key = self.object_key("checkpoint"); + self.put_object(&key, data, CACHE_SHORT).await + } +} diff --git a/crates/auths-transparency/src/store.rs b/crates/auths-transparency/src/store.rs new file mode 100644 index 00000000..2e8cc0e5 --- /dev/null +++ b/crates/auths-transparency/src/store.rs @@ -0,0 +1,30 @@ +#[cfg(feature = "native")] +use crate::error::TransparencyError; + +/// Async tile storage backend. +/// +/// Implementations provide reading and writing of tile data and +/// checkpoint blobs. The filesystem implementation is in [`crate::FsTileStore`] +/// (available with the `native` feature). +/// +/// Usage: +/// ```ignore +/// async fn read_tile(store: &dyn TileStore) { +/// let data = store.read_tile("tile/0/000").await?; +/// } +/// ``` +#[cfg(feature = "native")] +#[async_trait::async_trait] +pub trait TileStore: Send + Sync { + /// Read a tile by its C2SP path (e.g., "tile/0/000"). + async fn read_tile(&self, path: &str) -> Result, TransparencyError>; + + /// Write a tile at the given C2SP path. + async fn write_tile(&self, path: &str, data: &[u8]) -> Result<(), TransparencyError>; + + /// Read the latest signed checkpoint. + async fn read_checkpoint(&self) -> Result>, TransparencyError>; + + /// Write a signed checkpoint. + async fn write_checkpoint(&self, data: &[u8]) -> Result<(), TransparencyError>; +} diff --git a/crates/auths-transparency/src/tile.rs b/crates/auths-transparency/src/tile.rs new file mode 100644 index 00000000..34d01523 --- /dev/null +++ b/crates/auths-transparency/src/tile.rs @@ -0,0 +1,149 @@ +use crate::error::TransparencyError; + +/// Tile height (number of hash levels per tile). C2SP default = 8 → 256 hashes. +pub const TILE_HEIGHT: u32 = 8; + +/// Number of leaf hashes per tile (2^TILE_HEIGHT). +pub const TILE_WIDTH: u64 = 1 << TILE_HEIGHT; + +/// Encode a tile path per C2SP tlog-tiles spec. +/// +/// Segments are zero-padded to 3 digits. Non-final segments get an `x` prefix. +/// E.g., index 1234067 → `x001/x234/067`. +/// +/// Args: +/// * `level` — Tile level (0 for data tiles, 1+ for hash tiles). +/// * `index` — Tile index at the given level. +/// * `width` — Partial tile width (0 means full tile, i.e., 256). +/// +/// Usage: +/// ```ignore +/// let path = tile_path(0, 1234067, 0)?; +/// assert_eq!(path, "tile/0/x001/x234/067"); +/// ``` +pub fn tile_path(level: u32, index: u64, width: u64) -> Result { + let index_path = encode_index(index)?; + let mut path = format!("tile/{level}/{index_path}"); + if width > 0 && width < TILE_WIDTH { + path.push_str(&format!(".p/{width}")); + } + Ok(path) +} + +/// Encode a tile index into C2SP path segments. +/// +/// Zero-padded 3-digit segments, non-final segments prefixed with `x`. +fn encode_index(index: u64) -> Result { + if index == 0 { + return Ok("000".into()); + } + + let mut segments = Vec::new(); + let mut remaining = index; + while remaining > 0 { + #[allow(clippy::cast_possible_truncation)] + let segment = (remaining % 1000) as u16; + segments.push(segment); + remaining /= 1000; + } + segments.reverse(); + + let mut parts = Vec::with_capacity(segments.len()); + for (i, &seg) in segments.iter().enumerate() { + if i < segments.len() - 1 { + parts.push(format!("x{seg:03}")); + } else { + parts.push(format!("{seg:03}")); + } + } + Ok(parts.join("/")) +} + +/// Compute which tile contains a given leaf index. +/// +/// Args: +/// * `leaf_index` — Zero-based leaf index. +/// +/// Usage: +/// ```ignore +/// let (tile_index, offset) = leaf_tile(42); +/// ``` +pub fn leaf_tile(leaf_index: u64) -> (u64, u64) { + (leaf_index / TILE_WIDTH, leaf_index % TILE_WIDTH) +} + +/// Compute the number of full tiles and the partial tile width for a tree of `size` leaves. +/// +/// Args: +/// * `size` — Total number of leaves. +/// +/// Usage: +/// ```ignore +/// let (full_tiles, partial_width) = tile_count(300); +/// assert_eq!(full_tiles, 1); +/// assert_eq!(partial_width, 44); +/// ``` +pub fn tile_count(size: u64) -> (u64, u64) { + (size / TILE_WIDTH, size % TILE_WIDTH) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn encode_index_zero() { + assert_eq!(encode_index(0).unwrap(), "000"); + } + + #[test] + fn encode_index_small() { + assert_eq!(encode_index(5).unwrap(), "005"); + assert_eq!(encode_index(42).unwrap(), "042"); + assert_eq!(encode_index(999).unwrap(), "999"); + } + + #[test] + fn encode_index_multi_segment() { + assert_eq!(encode_index(1000).unwrap(), "x001/000"); + assert_eq!(encode_index(1234).unwrap(), "x001/234"); + assert_eq!(encode_index(1234067).unwrap(), "x001/x234/067"); + } + + #[test] + fn tile_path_data_tile() { + assert_eq!(tile_path(0, 0, 0).unwrap(), "tile/0/000"); + assert_eq!(tile_path(0, 5, 0).unwrap(), "tile/0/005"); + } + + #[test] + fn tile_path_partial() { + assert_eq!(tile_path(0, 0, 42).unwrap(), "tile/0/000.p/42"); + } + + #[test] + fn tile_path_hash_tile() { + assert_eq!(tile_path(1, 3, 0).unwrap(), "tile/1/003"); + } + + #[test] + fn tile_path_large_index() { + assert_eq!(tile_path(0, 1234067, 0).unwrap(), "tile/0/x001/x234/067"); + } + + #[test] + fn leaf_tile_computation() { + assert_eq!(leaf_tile(0), (0, 0)); + assert_eq!(leaf_tile(255), (0, 255)); + assert_eq!(leaf_tile(256), (1, 0)); + assert_eq!(leaf_tile(300), (1, 44)); + } + + #[test] + fn tile_count_computation() { + assert_eq!(tile_count(0), (0, 0)); + assert_eq!(tile_count(256), (1, 0)); + assert_eq!(tile_count(300), (1, 44)); + assert_eq!(tile_count(512), (2, 0)); + } +} diff --git a/crates/auths-transparency/src/types.rs b/crates/auths-transparency/src/types.rs new file mode 100644 index 00000000..e6ea6799 --- /dev/null +++ b/crates/auths-transparency/src/types.rs @@ -0,0 +1,202 @@ +use std::fmt; + +use base64::{Engine, engine::general_purpose::STANDARD}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use sha2::{Digest, Sha256}; + +use crate::error::TransparencyError; + +/// SHA-256 Merkle hash (32 bytes). +/// +/// Args: +/// * Inner `[u8; 32]` — raw SHA-256 digest. +/// +/// Usage: +/// ```ignore +/// let hash = MerkleHash::from_bytes([0u8; 32]); +/// let hex_str = hash.to_string(); // lowercase hex +/// ``` +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +pub struct MerkleHash([u8; 32]); + +impl MerkleHash { + /// The all-zero hash, used as a sentinel for empty trees. + pub const EMPTY: Self = Self([0u8; 32]); + + /// Wrap raw bytes. + pub fn from_bytes(bytes: [u8; 32]) -> Self { + Self(bytes) + } + + /// Construct from a hex string. + pub fn from_hex(s: &str) -> Result { + let bytes = hex::decode(s).map_err(|e| TransparencyError::InvalidProof(e.to_string()))?; + let arr: [u8; 32] = bytes + .try_into() + .map_err(|_| TransparencyError::InvalidProof("hash must be 32 bytes".into()))?; + Ok(Self(arr)) + } + + /// Raw bytes. + pub fn as_bytes(&self) -> &[u8; 32] { + &self.0 + } + + /// Encode as standard base64 (with padding). Used in C2SP checkpoint note body. + pub fn to_base64(&self) -> String { + STANDARD.encode(self.0) + } + + /// Decode from standard base64 (with padding). + pub fn from_base64(s: &str) -> Result { + let bytes = STANDARD + .decode(s) + .map_err(|e| TransparencyError::InvalidProof(format!("base64 decode: {e}")))?; + let arr: [u8; 32] = bytes + .try_into() + .map_err(|_| TransparencyError::InvalidProof("hash must be 32 bytes".into()))?; + Ok(Self(arr)) + } + + /// Plain SHA-256 (no domain separation). Used for key-ID computation. + pub fn sha256(data: &[u8]) -> Self { + let digest = Sha256::digest(data); + let mut out = [0u8; 32]; + out.copy_from_slice(&digest); + Self(out) + } +} + +impl fmt::Debug for MerkleHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "MerkleHash({})", self) + } +} + +impl fmt::Display for MerkleHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for b in &self.0 { + write!(f, "{b:02x}")?; + } + Ok(()) + } +} + +impl Serialize for MerkleHash { + fn serialize(&self, serializer: S) -> Result { + serializer.serialize_str(&self.to_string()) + } +} + +impl<'de> Deserialize<'de> for MerkleHash { + fn deserialize>(deserializer: D) -> Result { + let s = String::deserialize(deserializer)?; + Self::from_hex(&s).map_err(serde::de::Error::custom) + } +} + +impl AsRef<[u8]> for MerkleHash { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +/// Validated log origin string (e.g., `"auths.dev/log"`). +/// +/// Must be non-empty ASCII with no control characters. +/// +/// Args: +/// * Inner `String` — validated ASCII origin. +/// +/// Usage: +/// ```ignore +/// let origin = LogOrigin::new("auths.dev/log")?; +/// ``` +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(try_from = "String", into = "String")] +pub struct LogOrigin(String); + +impl LogOrigin { + /// Create a new log origin, validating that it is non-empty ASCII. + pub fn new(s: &str) -> Result { + if s.is_empty() { + return Err(TransparencyError::InvalidOrigin("must not be empty".into())); + } + if !s.is_ascii() { + return Err(TransparencyError::InvalidOrigin("must be ASCII".into())); + } + if s.bytes().any(|b| b < 0x20) { + return Err(TransparencyError::InvalidOrigin( + "must not contain control characters".into(), + )); + } + Ok(Self(s.to_string())) + } + + /// The inner string. + pub fn as_str(&self) -> &str { + &self.0 + } +} + +impl fmt::Display for LogOrigin { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(&self.0) + } +} + +impl TryFrom for LogOrigin { + type Error = TransparencyError; + fn try_from(s: String) -> Result { + Self::new(&s) + } +} + +impl From for String { + fn from(o: LogOrigin) -> Self { + o.0 + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn merkle_hash_hex_roundtrip() { + let bytes = [0xabu8; 32]; + let h = MerkleHash::from_bytes(bytes); + let hex_str = h.to_string(); + let h2 = MerkleHash::from_hex(&hex_str).unwrap(); + assert_eq!(h, h2); + } + + #[test] + fn merkle_hash_json_roundtrip() { + let h = MerkleHash::from_bytes([0x42u8; 32]); + let json = serde_json::to_string(&h).unwrap(); + let h2: MerkleHash = serde_json::from_str(&json).unwrap(); + assert_eq!(h, h2); + } + + #[test] + fn log_origin_rejects_empty() { + assert!(LogOrigin::new("").is_err()); + } + + #[test] + fn log_origin_rejects_non_ascii() { + assert!(LogOrigin::new("日本語").is_err()); + } + + #[test] + fn log_origin_rejects_control_chars() { + assert!(LogOrigin::new("auths\x00log").is_err()); + } + + #[test] + fn log_origin_valid() { + let o = LogOrigin::new("auths.dev/log").unwrap(); + assert_eq!(o.as_str(), "auths.dev/log"); + } +} diff --git a/crates/auths-transparency/src/verify.rs b/crates/auths-transparency/src/verify.rs new file mode 100644 index 00000000..3970fec6 --- /dev/null +++ b/crates/auths-transparency/src/verify.rs @@ -0,0 +1,795 @@ +//! Offline bundle verification logic. +//! +//! Provides [`verify_bundle`] — a synchronous, I/O-free function that verifies +//! an [`OfflineBundle`] against a [`TrustRoot`]. + +use chrono::{DateTime, Duration, Utc}; +use ring::signature::{ED25519, UnparsedPublicKey}; + +use crate::bundle::{ + BundleVerificationReport, CheckpointStatus, DelegationStatus, InclusionStatus, NamespaceStatus, + SignatureStatus, WitnessStatus, +}; +use crate::checkpoint::SignedCheckpoint; +use crate::entry::{EntryBody, EntryType}; +use crate::merkle::hash_leaf; +use crate::{OfflineBundle, TrustRoot}; +use auths_verifier::{Capability, IdentityDID}; + +const STALE_BUNDLE_DAYS: i64 = 90; + +/// Verifies an offline transparency bundle against a trust root. +/// +/// Each verification dimension (signature, inclusion, checkpoint, witnesses, +/// namespace, delegation) is evaluated independently so callers can make +/// nuanced trust decisions. +/// +/// Args: +/// * `bundle` — The offline bundle to verify. +/// * `trust_root` — Trusted log public key and witness set. +/// * `now` — Current wall-clock time (injected, never read from system clock). +/// +/// Usage: +/// ```ignore +/// let report = verify_bundle(&bundle, &trust_root, now); +/// if report.is_valid() { +/// // bundle is trustworthy +/// } +/// ``` +pub fn verify_bundle( + bundle: &OfflineBundle, + trust_root: &TrustRoot, + now: DateTime, +) -> BundleVerificationReport { + let signature = verify_signature(bundle); + let inclusion = verify_inclusion_proof(bundle); + let checkpoint = verify_checkpoint(&bundle.signed_checkpoint, trust_root); + let witnesses = verify_witnesses(&bundle.signed_checkpoint, trust_root); + let delegation = verify_delegation_chain(bundle); + let namespace = derive_namespace_status(&delegation, bundle); + + let mut warnings = Vec::new(); + check_staleness(&bundle.signed_checkpoint, now, &mut warnings); + + BundleVerificationReport { + signature, + inclusion, + checkpoint, + witnesses, + namespace, + delegation, + warnings, + } +} + +fn resolve_actor_public_key(bundle: &OfflineBundle) -> Option<[u8; 32]> { + let actor_did = bundle.entry.content.actor_did.as_str(); + + if actor_did.starts_with("did:key:z") { + return auths_crypto::did_key_to_ed25519(actor_did).ok(); + } + + if actor_did.starts_with("did:keri:") { + for link in &bundle.delegation_chain { + if link.link_type == EntryType::DeviceBind + && let EntryBody::DeviceBind { + ref device_did, + ref public_key, + } = link.entry.content.body + && device_did.as_str() == actor_did + { + return Some(*public_key.as_bytes()); + } + } + } + + None +} + +fn verify_signature(bundle: &OfflineBundle) -> SignatureStatus { + let public_key_bytes = match resolve_actor_public_key(bundle) { + Some(pk) => pk, + None => { + return SignatureStatus::Failed { + reason: format!( + "could not resolve public key for actor DID: {}", + bundle.entry.content.actor_did + ), + }; + } + }; + + let canonical = match bundle.entry.content.canonicalize() { + Ok(c) => c, + Err(e) => { + return SignatureStatus::Failed { + reason: format!("canonicalization failed: {e}"), + }; + } + }; + + let peer_key = UnparsedPublicKey::new(&ED25519, &public_key_bytes); + match peer_key.verify(&canonical, bundle.entry.actor_sig.as_bytes()) { + Ok(()) => SignatureStatus::Verified, + Err(_) => SignatureStatus::Failed { + reason: "Ed25519 signature verification failed".into(), + }, + } +} + +fn verify_inclusion_proof(bundle: &OfflineBundle) -> InclusionStatus { + let leaf_data = match bundle.entry.leaf_data() { + Ok(d) => d, + Err(e) => { + return InclusionStatus::Failed { + reason: format!("leaf data serialization failed: {e}"), + }; + } + }; + let leaf_hash = hash_leaf(&leaf_data); + + let proof = &bundle.inclusion_proof; + if let Err(e) = crate::merkle::verify_inclusion( + &leaf_hash, + proof.index, + proof.size, + &proof.hashes, + &proof.root, + ) { + return InclusionStatus::Failed { + reason: format!("Merkle inclusion failed: {e}"), + }; + } + + if proof.root != bundle.signed_checkpoint.checkpoint.root { + return InclusionStatus::Failed { + reason: "inclusion proof root does not match checkpoint root".into(), + }; + } + + InclusionStatus::Verified +} + +fn verify_checkpoint(signed: &SignedCheckpoint, trust_root: &TrustRoot) -> CheckpointStatus { + if signed.checkpoint.origin != trust_root.log_origin { + return CheckpointStatus::InvalidSignature; + } + + let note_body = signed.checkpoint.to_note_body(); + + let peer_key = UnparsedPublicKey::new(&ED25519, trust_root.log_public_key.as_bytes()); + match peer_key.verify(note_body.as_bytes(), signed.log_signature.as_bytes()) { + Ok(()) => CheckpointStatus::Verified, + Err(_) => CheckpointStatus::InvalidSignature, + } +} + +fn verify_witnesses(signed: &SignedCheckpoint, trust_root: &TrustRoot) -> WitnessStatus { + if trust_root.witnesses.is_empty() { + return WitnessStatus::NotProvided; + } + + let note_body = signed.checkpoint.to_note_body(); + let required = trust_root.witnesses.len() / 2 + 1; + let mut verified = 0usize; + + for cosig in &signed.witnesses { + let trusted = trust_root + .witnesses + .iter() + .find(|w| w.public_key.as_bytes() == cosig.witness_public_key.as_bytes()); + + if let Some(_witness) = trusted { + let peer_key = UnparsedPublicKey::new(&ED25519, cosig.witness_public_key.as_bytes()); + if peer_key + .verify(note_body.as_bytes(), cosig.signature.as_bytes()) + .is_ok() + { + verified += 1; + } + } + } + + if verified >= required { + WitnessStatus::Quorum { verified, required } + } else { + WitnessStatus::Insufficient { verified, required } + } +} + +fn check_staleness(signed: &SignedCheckpoint, now: DateTime, warnings: &mut Vec) { + #[allow(clippy::expect_used)] // INVARIANT: 90 days always fits in Duration + let stale_threshold = + Duration::try_days(STALE_BUNDLE_DAYS).expect("STALE_BUNDLE_DAYS is a small constant"); + if now - signed.checkpoint.timestamp > stale_threshold { + warnings.push(format!( + "bundle checkpoint is older than {} days", + STALE_BUNDLE_DAYS + )); + } +} + +fn validate_chain_link_order( + chain: &[crate::bundle::DelegationChainLink], +) -> Option { + if chain[0].link_type != EntryType::DeviceBind { + return Some(DelegationStatus::ChainBroken { + reason: format!( + "link[0] expected type {:?}, got {:?}", + EntryType::DeviceBind, + chain[0].link_type + ), + }); + } + + let allowed_order = [ + EntryType::OrgAddMember, + EntryType::NamespaceClaim, + EntryType::NamespaceDelegate, + ]; + + let mut order_idx = 0; + for (i, link) in chain.iter().enumerate().skip(1) { + while order_idx < allowed_order.len() && link.link_type != allowed_order[order_idx] { + order_idx += 1; + } + if order_idx >= allowed_order.len() { + return Some(DelegationStatus::ChainBroken { + reason: format!( + "link[{i}] unexpected type {:?} at this position", + link.link_type + ), + }); + } + if link.link_type == EntryType::NamespaceDelegate { + // NamespaceDelegate can repeat (multi-hop delegation) + } else { + order_idx += 1; + } + } + + let has_namespace_claim = chain + .iter() + .any(|l| l.link_type == EntryType::NamespaceClaim); + let has_namespace_delegate = chain + .iter() + .any(|l| l.link_type == EntryType::NamespaceDelegate); + if has_namespace_delegate && !has_namespace_claim { + return Some(DelegationStatus::ChainBroken { + reason: "NamespaceDelegate requires a preceding NamespaceClaim".into(), + }); + } + + if !has_namespace_claim + && !chain + .iter() + .skip(1) + .any(|l| l.link_type == EntryType::OrgAddMember) + { + return Some(DelegationStatus::ChainBroken { + reason: "chain must contain at least OrgAddMember or NamespaceClaim after DeviceBind" + .into(), + }); + } + + None +} + +fn verify_link_inclusion_proofs( + chain: &[crate::bundle::DelegationChainLink], + checkpoint_root: &crate::types::MerkleHash, +) -> Option { + let mut sequences: Vec = chain.iter().map(|l| l.entry.sequence).collect(); + sequences.sort_unstable(); + sequences.dedup(); + if sequences.len() != chain.len() { + return Some(DelegationStatus::ChainBroken { + reason: "duplicate sequence numbers in delegation chain".into(), + }); + } + + for (i, link) in chain.iter().enumerate() { + let leaf_data = match link.entry.leaf_data() { + Ok(d) => d, + Err(e) => { + return Some(DelegationStatus::ChainBroken { + reason: format!("link[{i}] leaf data failed: {e}"), + }); + } + }; + let leaf_hash = hash_leaf(&leaf_data); + let proof = &link.inclusion_proof; + if let Err(e) = crate::merkle::verify_inclusion( + &leaf_hash, + proof.index, + proof.size, + &proof.hashes, + &proof.root, + ) { + return Some(DelegationStatus::ChainBroken { + reason: format!("link[{i}] inclusion proof failed: {e}"), + }); + } + if &proof.root != checkpoint_root { + return Some(DelegationStatus::ChainBroken { + reason: format!("link[{i}] proof root does not match checkpoint"), + }); + } + } + + None +} + +fn extract_namespace_from_entry(body: &EntryBody) -> Option<(&str, &str)> { + match body { + EntryBody::NamespaceClaim { + ecosystem, + package_name, + } + | EntryBody::NamespaceDelegate { + ecosystem, + package_name, + .. + } + | EntryBody::NamespaceTransfer { + ecosystem, + package_name, + .. + } => Some((ecosystem.as_str(), package_name.as_str())), + _ => None, + } +} + +fn verify_delegation_chain(bundle: &OfflineBundle) -> DelegationStatus { + if bundle.delegation_chain.is_empty() { + return DelegationStatus::NoDelegationData; + } + + let chain = &bundle.delegation_chain; + + if chain.len() < 2 { + return DelegationStatus::ChainBroken { + reason: format!("chain must have at least 2 links, got {}", chain.len()), + }; + } + + if let Some(broken) = validate_chain_link_order(chain) { + return broken; + } + + let checkpoint_root = &bundle.signed_checkpoint.checkpoint.root; + if let Some(broken) = verify_link_inclusion_proofs(chain, checkpoint_root) { + return broken; + } + + let device_did = match &chain[0].entry.content.body { + EntryBody::DeviceBind { device_did, .. } => device_did.clone(), + _ => { + return DelegationStatus::ChainBroken { + reason: "link[0] body is not DeviceBind".into(), + }; + } + }; + + #[allow(clippy::disallowed_methods)] + // INVARIANT: actor_did from a parsed Entry is already valid + let identity_did = IdentityDID::new_unchecked(chain[0].entry.content.actor_did.as_str()); + + let org_add_member = chain + .iter() + .enumerate() + .find(|(_, l)| l.link_type == EntryType::OrgAddMember); + + let namespace_claim = chain + .iter() + .enumerate() + .find(|(_, l)| l.link_type == EntryType::NamespaceClaim); + + let (member_did, member_role, org_did) = if let Some((idx, link)) = org_add_member { + match &link.entry.content.body { + EntryBody::OrgAddMember { + member_did, + role, + capabilities, + .. + } => { + let bundle_is_namespace_op = + extract_namespace_from_entry(&bundle.entry.content.body).is_some(); + if bundle_is_namespace_op && !capabilities.contains(&Capability::sign_release()) { + return DelegationStatus::ChainBroken { + reason: format!( + "link[{idx}] OrgAddMember lacks sign_release capability required for namespace operations" + ), + }; + } + + #[allow(clippy::disallowed_methods)] + // INVARIANT: actor_did from a parsed Entry is already valid + let org = IdentityDID::new_unchecked(link.entry.content.actor_did.as_str()); + (member_did.clone(), *role, org) + } + _ => { + return DelegationStatus::ChainBroken { + reason: format!("link[{idx}] body is not OrgAddMember"), + }; + } + } + } else { + // 2-link chain: [DeviceBind, NamespaceClaim] — direct ownership, no org + #[allow(clippy::disallowed_methods)] + // INVARIANT: actor_did from a parsed Entry is already valid + let owner_did = IdentityDID::new_unchecked(chain[0].entry.content.actor_did.as_str()); + (owner_did.clone(), auths_verifier::Role::Admin, owner_did) + }; + + if member_did.as_str() != identity_did.as_str() { + return DelegationStatus::ChainBroken { + reason: format!( + "DID connectivity broken: OrgAddMember member_did ({}) != DeviceBind actor_did ({})", + member_did, identity_did + ), + }; + } + + if let Some((ns_idx, ns_link)) = namespace_claim { + if let EntryBody::NamespaceClaim { + ecosystem: claim_ecosystem, + package_name: claim_package, + } = &ns_link.entry.content.body + { + if org_add_member.is_some() { + #[allow(clippy::disallowed_methods)] + // INVARIANT: actor_did from a parsed Entry is already valid + let claim_actor = + IdentityDID::new_unchecked(ns_link.entry.content.actor_did.as_str()); + if claim_actor.as_str() != org_did.as_str() { + return DelegationStatus::ChainBroken { + reason: format!( + "link[{ns_idx}] NamespaceClaim actor_did ({}) != org_did ({})", + claim_actor, org_did + ), + }; + } + } + + if let Some((bundle_ecosystem, bundle_package)) = + extract_namespace_from_entry(&bundle.entry.content.body) + && (claim_ecosystem != bundle_ecosystem || claim_package != bundle_package) + { + return DelegationStatus::ChainBroken { + reason: format!( + "link[{ns_idx}] NamespaceClaim namespace ({}/{}) does not match bundle entry ({}/{})", + claim_ecosystem, claim_package, bundle_ecosystem, bundle_package + ), + }; + } + } else { + return DelegationStatus::ChainBroken { + reason: format!("link[{ns_idx}] body is not NamespaceClaim"), + }; + } + } + + DelegationStatus::ChainVerified { + org_did, + member_did, + member_role, + device_did, + } +} + +fn derive_namespace_status( + delegation: &DelegationStatus, + bundle: &OfflineBundle, +) -> NamespaceStatus { + match delegation { + DelegationStatus::ChainVerified { .. } => { + let has_namespace_delegate = bundle + .delegation_chain + .iter() + .any(|link| link.link_type == EntryType::NamespaceDelegate); + let has_namespace_claim = bundle + .delegation_chain + .iter() + .any(|link| link.link_type == EntryType::NamespaceClaim); + if has_namespace_delegate || has_namespace_claim { + NamespaceStatus::Authorized + } else { + NamespaceStatus::Owned + } + } + DelegationStatus::Direct => NamespaceStatus::Owned, + DelegationStatus::NoDelegationData => NamespaceStatus::Owned, + DelegationStatus::ChainBroken { .. } => NamespaceStatus::Unauthorized, + } +} + +#[cfg(test)] +#[allow(clippy::disallowed_methods)] +mod tests { + use super::*; + use crate::TrustRootWitness; + use crate::bundle::DelegationChainLink; + use crate::checkpoint::{Checkpoint, WitnessCosignature}; + use crate::entry::{Entry, EntryContent}; + use crate::merkle::compute_root; + use crate::proof::InclusionProof; + use crate::types::LogOrigin; + use auths_verifier::{CanonicalDid, DeviceDID, Ed25519PublicKey, Ed25519Signature}; + use ring::signature::{Ed25519KeyPair, KeyPair}; + + fn fixed_now() -> DateTime { + chrono::DateTime::parse_from_rfc3339("2025-07-01T00:00:00Z") + .unwrap() + .with_timezone(&Utc) + } + + fn fixed_ts() -> DateTime { + chrono::DateTime::parse_from_rfc3339("2025-06-15T00:00:00Z") + .unwrap() + .with_timezone(&Utc) + } + + struct TestFixture { + log_keypair: Ed25519KeyPair, + log_public_key: [u8; 32], + actor_keypair: Ed25519KeyPair, + actor_public_key: [u8; 32], + actor_did: String, + trust_root: TrustRoot, + } + + fn setup() -> TestFixture { + let log_keypair = Ed25519KeyPair::from_seed_unchecked(&[1u8; 32]).unwrap(); + let log_public_key: [u8; 32] = log_keypair.public_key().as_ref().try_into().unwrap(); + + let actor_keypair = Ed25519KeyPair::from_seed_unchecked(&[2u8; 32]).unwrap(); + let actor_public_key: [u8; 32] = actor_keypair.public_key().as_ref().try_into().unwrap(); + let actor_did = auths_crypto::ed25519_pubkey_to_did_key(&actor_public_key); + + let trust_root = TrustRoot { + log_public_key: Ed25519PublicKey::from_bytes(log_public_key), + log_origin: LogOrigin::new("test.dev/log").unwrap(), + witnesses: vec![], + }; + + TestFixture { + log_keypair, + log_public_key, + actor_keypair, + actor_public_key, + actor_did, + trust_root, + } + } + + fn make_entry(fixture: &TestFixture) -> Entry { + let content = EntryContent { + entry_type: EntryType::DeviceBind, + body: EntryBody::DeviceBind { + device_did: DeviceDID::new_unchecked(&fixture.actor_did), + public_key: Ed25519PublicKey::from_bytes(fixture.actor_public_key), + }, + actor_did: CanonicalDid::new_unchecked(&fixture.actor_did), + }; + let canonical = content.canonicalize().unwrap(); + let sig_bytes = fixture.actor_keypair.sign(&canonical); + let actor_sig = Ed25519Signature::try_from_slice(sig_bytes.as_ref()).unwrap(); + + Entry { + sequence: 0, + timestamp: fixed_ts(), + content, + actor_sig, + } + } + + fn make_signed_checkpoint( + entry: &Entry, + fixture: &TestFixture, + ) -> (SignedCheckpoint, InclusionProof) { + let leaf_data = entry.leaf_data().unwrap(); + let leaf_hash = hash_leaf(&leaf_data); + let root = compute_root(&[leaf_hash]); + + let checkpoint = Checkpoint { + origin: LogOrigin::new("test.dev/log").unwrap(), + size: 1, + root, + timestamp: fixed_ts(), + }; + + let note_body = checkpoint.to_note_body(); + let log_sig_bytes = fixture.log_keypair.sign(note_body.as_bytes()); + let log_signature = Ed25519Signature::try_from_slice(log_sig_bytes.as_ref()).unwrap(); + + let signed = SignedCheckpoint { + checkpoint, + log_signature, + log_public_key: Ed25519PublicKey::from_bytes(fixture.log_public_key), + witnesses: vec![], + }; + + let proof = InclusionProof { + index: 0, + size: 1, + root, + hashes: vec![], + }; + + (signed, proof) + } + + fn make_valid_bundle(fixture: &TestFixture) -> OfflineBundle { + let entry = make_entry(fixture); + let (signed_checkpoint, inclusion_proof) = make_signed_checkpoint(&entry, fixture); + + OfflineBundle { + entry, + inclusion_proof, + signed_checkpoint, + delegation_chain: vec![], + } + } + + #[test] + fn valid_bundle_all_verified() { + let fixture = setup(); + let bundle = make_valid_bundle(&fixture); + let report = verify_bundle(&bundle, &fixture.trust_root, fixed_now()); + + assert_eq!(report.signature, SignatureStatus::Verified); + assert_eq!(report.inclusion, InclusionStatus::Verified); + assert_eq!(report.checkpoint, CheckpointStatus::Verified); + assert_eq!(report.witnesses, WitnessStatus::NotProvided); + assert!(report.is_valid()); + assert!(report.warnings.is_empty()); + } + + #[test] + fn bad_signature_fails() { + let fixture = setup(); + let mut bundle = make_valid_bundle(&fixture); + bundle.entry.actor_sig = Ed25519Signature::from_bytes([0xaa; 64]); + + let report = verify_bundle(&bundle, &fixture.trust_root, fixed_now()); + + assert!(matches!(report.signature, SignatureStatus::Failed { .. })); + assert!(!report.is_valid()); + } + + #[test] + fn bad_inclusion_proof_fails() { + let fixture = setup(); + let mut bundle = make_valid_bundle(&fixture); + bundle + .inclusion_proof + .hashes + .push(crate::types::MerkleHash::from_bytes([0xff; 32])); + + let report = verify_bundle(&bundle, &fixture.trust_root, fixed_now()); + + assert!(matches!(report.inclusion, InclusionStatus::Failed { .. })); + } + + #[test] + fn stale_checkpoint_produces_warning() { + let fixture = setup(); + let mut bundle = make_valid_bundle(&fixture); + + let old_ts = chrono::DateTime::parse_from_rfc3339("2025-01-01T00:00:00Z") + .unwrap() + .with_timezone(&Utc); + bundle.signed_checkpoint.checkpoint.timestamp = old_ts; + + let report = verify_bundle(&bundle, &fixture.trust_root, fixed_now()); + + assert!(!report.warnings.is_empty()); + assert!(report.warnings[0].contains("older than 90 days")); + } + + #[test] + fn witness_quorum_met() { + let w1_keypair = Ed25519KeyPair::from_seed_unchecked(&[10u8; 32]).unwrap(); + let w1_pk: [u8; 32] = w1_keypair.public_key().as_ref().try_into().unwrap(); + let w2_keypair = Ed25519KeyPair::from_seed_unchecked(&[11u8; 32]).unwrap(); + let w2_pk: [u8; 32] = w2_keypair.public_key().as_ref().try_into().unwrap(); + + let fixture = setup(); + let bundle = make_valid_bundle(&fixture); + + let note_body = bundle.signed_checkpoint.checkpoint.to_note_body(); + let w1_sig = w1_keypair.sign(note_body.as_bytes()); + let w2_sig = w2_keypair.sign(note_body.as_bytes()); + + let mut bundle = bundle; + bundle.signed_checkpoint.witnesses = vec![ + WitnessCosignature { + witness_name: "w1".into(), + witness_public_key: Ed25519PublicKey::from_bytes(w1_pk), + signature: Ed25519Signature::try_from_slice(w1_sig.as_ref()).unwrap(), + timestamp: fixed_ts(), + }, + WitnessCosignature { + witness_name: "w2".into(), + witness_public_key: Ed25519PublicKey::from_bytes(w2_pk), + signature: Ed25519Signature::try_from_slice(w2_sig.as_ref()).unwrap(), + timestamp: fixed_ts(), + }, + ]; + + let trust_root = TrustRoot { + log_public_key: Ed25519PublicKey::from_bytes(fixture.log_public_key), + log_origin: LogOrigin::new("test.dev/log").unwrap(), + witnesses: vec![ + TrustRootWitness { + witness_did: DeviceDID::new_unchecked(auths_crypto::ed25519_pubkey_to_did_key( + &w1_pk, + )), + name: "w1".into(), + public_key: Ed25519PublicKey::from_bytes(w1_pk), + }, + TrustRootWitness { + witness_did: DeviceDID::new_unchecked(auths_crypto::ed25519_pubkey_to_did_key( + &w2_pk, + )), + name: "w2".into(), + public_key: Ed25519PublicKey::from_bytes(w2_pk), + }, + ], + }; + + let report = verify_bundle(&bundle, &trust_root, fixed_now()); + assert!(matches!( + report.witnesses, + WitnessStatus::Quorum { + verified: 2, + required: 2, + } + )); + } + + #[test] + fn empty_delegation_yields_no_delegation_data() { + let fixture = setup(); + let bundle = make_valid_bundle(&fixture); + let report = verify_bundle(&bundle, &fixture.trust_root, fixed_now()); + assert_eq!(report.delegation, DelegationStatus::NoDelegationData); + } + + #[test] + fn delegation_chain_wrong_length_is_broken() { + let fixture = setup(); + let mut bundle = make_valid_bundle(&fixture); + + let entry = make_entry(&fixture); + let root = bundle.signed_checkpoint.checkpoint.root; + + bundle.delegation_chain = vec![DelegationChainLink { + link_type: EntryType::DeviceBind, + entry, + inclusion_proof: InclusionProof { + index: 0, + size: 1, + root, + hashes: vec![], + }, + }]; + + let report = verify_bundle(&bundle, &fixture.trust_root, fixed_now()); + assert!(matches!( + report.delegation, + DelegationStatus::ChainBroken { .. } + )); + } + + #[test] + fn checkpoint_origin_mismatch_fails() { + let fixture = setup(); + let mut bundle = make_valid_bundle(&fixture); + bundle.signed_checkpoint.checkpoint.origin = LogOrigin::new("other.dev/log").unwrap(); + + let report = verify_bundle(&bundle, &fixture.trust_root, fixed_now()); + assert_eq!(report.checkpoint, CheckpointStatus::InvalidSignature); + } +} diff --git a/crates/auths-transparency/src/witness.rs b/crates/auths-transparency/src/witness.rs new file mode 100644 index 00000000..26afe7a5 --- /dev/null +++ b/crates/auths-transparency/src/witness.rs @@ -0,0 +1,554 @@ +//! Witness protocol for transparency log split-view protection. +//! +//! Implements the C2SP tlog-witness cosignature protocol. Witnesses +//! independently verify checkpoint consistency and produce timestamped +//! Ed25519 cosignatures (algorithm byte `0x04`). + +use std::time::Duration; + +use async_trait::async_trait; +use chrono::DateTime; +use tokio::time::timeout; + +use crate::checkpoint::{SignedCheckpoint, WitnessCosignature}; +use crate::error::TransparencyError; +use crate::proof::ConsistencyProof; +use crate::types::MerkleHash; +use auths_verifier::Ed25519PublicKey; + +/// C2SP timestamped Ed25519 algorithm byte for witness cosignatures. +pub const ALG_COSIGNATURE_V1: u8 = 0x04; + +/// Default timeout for each witness cosigning request. +pub const DEFAULT_WITNESS_TIMEOUT: Duration = Duration::from_secs(5); + +/// Request sent to a witness to cosign a checkpoint. +/// +/// Args: +/// * `old_size` — The size of the checkpoint the witness last cosigned (0 if first). +/// * `consistency_proof` — Proof that the old tree is a prefix of the new tree. +/// * `signed_checkpoint` — The new checkpoint to cosign. +/// +/// Usage: +/// ```ignore +/// let req = CosignRequest { +/// old_size: 0, +/// consistency_proof: None, +/// signed_checkpoint: checkpoint.clone(), +/// }; +/// ``` +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub struct CosignRequest { + /// The tree size the witness last saw (0 for fresh witnesses). + pub old_size: u64, + /// Consistency proof from `old_size` to the new checkpoint size. + /// `None` when `old_size == 0` (first checkpoint). + pub consistency_proof: Option, + /// The signed checkpoint to cosign. + pub signed_checkpoint: SignedCheckpoint, +} + +/// Response from a witness after cosigning. +/// +/// Args: +/// * `cosignature` — The witness's cosignature on the checkpoint. +/// +/// Usage: +/// ```ignore +/// let resp = client.submit_checkpoint(req).await?; +/// let cosig = resp.cosignature; +/// ``` +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub struct CosignResponse { + /// The witness's cosignature. + pub cosignature: WitnessCosignature, +} + +/// Async trait for submitting checkpoints to a witness for cosigning. +/// +/// Implementors handle the HTTP transport to a specific witness endpoint. +/// +/// Args: +/// * `submit_checkpoint` — Sends a cosign request and returns the cosignature. +/// +/// Usage: +/// ```ignore +/// let response = witness_client.submit_checkpoint(request).await?; +/// ``` +#[async_trait] +pub trait WitnessClient: Send + Sync { + /// Submit a checkpoint to the witness for cosigning. + /// + /// Args: + /// * `request` — The cosign request containing the checkpoint and consistency proof. + /// + /// Usage: + /// ```ignore + /// let resp = client.submit_checkpoint(req).await?; + /// ``` + async fn submit_checkpoint( + &self, + request: CosignRequest, + ) -> Result; +} + +/// Result of a witness cosigning attempt. +#[derive(Debug)] +pub enum WitnessResult { + /// Witness returned a valid cosignature. + Success(WitnessCosignature), + /// Witness timed out or returned an error. + Failed { + /// Name of the witness that failed. + witness_name: String, + /// Reason for the failure. + reason: String, + }, +} + +/// Fan out cosign requests to multiple witnesses and collect cosignatures. +/// +/// Returns once `quorum` cosignatures are collected, or all witnesses +/// have responded/timed out. Each witness gets `timeout_per_witness` +/// to respond. +/// +/// Args: +/// * `witnesses` — List of witness clients to contact. +/// * `request` — The cosign request to send to each witness. +/// * `quorum` — Number of cosignatures required. +/// * `timeout_per_witness` — Maximum time to wait for each witness. +/// +/// Usage: +/// ```ignore +/// let cosigs = collect_witness_cosignatures( +/// &witnesses, +/// request, +/// 2, +/// Duration::from_secs(5), +/// ).await; +/// ``` +pub async fn collect_witness_cosignatures( + witnesses: &[Box], + request: CosignRequest, + quorum: usize, + timeout_per_witness: Duration, +) -> Vec { + use futures::stream::{FuturesUnordered, StreamExt}; + + let mut futures = FuturesUnordered::new(); + for (i, witness) in witnesses.iter().enumerate() { + let req = request.clone(); + futures.push(async move { + let result = timeout(timeout_per_witness, witness.submit_checkpoint(req)).await; + (i, result) + }); + } + + let mut results = Vec::with_capacity(witnesses.len()); + let mut success_count = 0usize; + + while let Some((i, result)) = futures.next().await { + let witness_result = match result { + Ok(Ok(resp)) => { + success_count += 1; + WitnessResult::Success(resp.cosignature) + } + Ok(Err(e)) => WitnessResult::Failed { + witness_name: format!("witness-{i}"), + reason: e.to_string(), + }, + Err(_elapsed) => WitnessResult::Failed { + witness_name: format!("witness-{i}"), + reason: "timeout".into(), + }, + }; + results.push(witness_result); + + if success_count >= quorum { + break; + } + } + + results +} + +/// Extract successful cosignatures from witness results, checking if quorum is met. +/// +/// Args: +/// * `results` — Results from [`collect_witness_cosignatures`]. +/// * `quorum` — Number of cosignatures required for quorum. +/// +/// Usage: +/// ```ignore +/// let (cosigs, met) = extract_cosignatures(&results, 2); +/// ``` +pub fn extract_cosignatures( + results: &[WitnessResult], + quorum: usize, +) -> (Vec, bool) { + let cosigs: Vec = results + .iter() + .filter_map(|r| match r { + WitnessResult::Success(c) => Some(c.clone()), + WitnessResult::Failed { .. } => None, + }) + .collect(); + let met = cosigs.len() >= quorum; + (cosigs, met) +} + +/// Build the C2SP cosignature signed message. +/// +/// Per C2SP tlog-cosignature spec, the signed message is: +/// `"cosignature/v1\ntime \n" + checkpoint_body` +/// +/// Args: +/// * `checkpoint_body` — The C2SP checkpoint body (from `Checkpoint::to_note_body()`). +/// * `timestamp` — Seconds since epoch for the cosignature timestamp. +/// +/// Usage: +/// ```ignore +/// let msg = cosignature_signed_message(&checkpoint.to_note_body(), timestamp_secs); +/// ``` +pub fn cosignature_signed_message(checkpoint_body: &str, timestamp: u64) -> Vec { + let header = format!("cosignature/v1\ntime {timestamp}\n"); + let mut msg = Vec::with_capacity(header.len() + checkpoint_body.len()); + msg.extend_from_slice(header.as_bytes()); + msg.extend_from_slice(checkpoint_body.as_bytes()); + msg +} + +/// Compute a C2SP witness key ID using algorithm byte 0x04 (timestamped Ed25519). +/// +/// `key_id = SHA-256(witness_name + "\n" + 0x04 + pubkey)[0..4]` +/// +/// Args: +/// * `witness_name` — The witness's key name. +/// * `pubkey` — 32-byte Ed25519 public key. +/// +/// Usage: +/// ```ignore +/// let key_id = compute_witness_key_id("witness-1", &pubkey_bytes); +/// ``` +pub fn compute_witness_key_id(witness_name: &str, pubkey: &[u8; 32]) -> [u8; 4] { + let mut data = Vec::with_capacity(witness_name.len() + 1 + 1 + 32); + data.extend_from_slice(witness_name.as_bytes()); + data.push(b'\n'); + data.push(ALG_COSIGNATURE_V1); + data.extend_from_slice(pubkey); + + let hash = MerkleHash::sha256(&data); + let mut id = [0u8; 4]; + id.copy_from_slice(&hash.as_bytes()[..4]); + id +} + +/// Build a C2SP cosignature note signature line. +/// +/// Uses algorithm byte 0x04 (timestamped Ed25519). The encoded payload is: +/// `base64(0x04 + key_id + timestamp_bytes + signature)` +/// +/// Args: +/// * `witness_name` — The witness's key name. +/// * `key_id` — 4-byte key ID from [`compute_witness_key_id`]. +/// * `timestamp` — Seconds since epoch. +/// * `signature` — 64-byte Ed25519 signature. +/// +/// Usage: +/// ```ignore +/// let line = build_cosignature_line("witness-1", &key_id, timestamp, &sig); +/// ``` +pub fn build_cosignature_line( + witness_name: &str, + key_id: &[u8; 4], + timestamp: u64, + signature: &[u8; 64], +) -> String { + use base64::{Engine, engine::general_purpose::STANDARD}; + // C2SP format: alg_byte + key_id + 8-byte timestamp + 64-byte signature + let mut sig_data = Vec::with_capacity(1 + 4 + 8 + 64); + sig_data.push(ALG_COSIGNATURE_V1); + sig_data.extend_from_slice(key_id); + sig_data.extend_from_slice(×tamp.to_be_bytes()); + sig_data.extend_from_slice(signature); + let encoded = STANDARD.encode(&sig_data); + format!("\u{2014} {witness_name} {encoded}\n") +} + +/// Parse a C2SP cosignature from raw bytes. +/// +/// Expected format: 8-byte big-endian timestamp + 64-byte Ed25519 signature. +/// +/// Args: +/// * `witness_name` — Name of the witness. +/// * `witness_public_key` — The witness's Ed25519 public key. +/// * `raw` — 72 bytes: 8-byte timestamp + 64-byte signature. +/// +/// Usage: +/// ```ignore +/// let cosig = parse_cosignature("w1", &pk, &raw_bytes)?; +/// ``` +pub fn parse_cosignature( + witness_name: &str, + witness_public_key: Ed25519PublicKey, + raw: &[u8], +) -> Result { + if raw.len() != 72 { + return Err(TransparencyError::InvalidNote(format!( + "cosignature must be 72 bytes (8 timestamp + 64 signature), got {}", + raw.len() + ))); + } + + let timestamp_secs = u64::from_be_bytes( + raw[..8] + .try_into() + .map_err(|_| TransparencyError::InvalidNote("invalid timestamp bytes".into()))?, + ); + + let sig_bytes: [u8; 64] = raw[8..72] + .try_into() + .map_err(|_| TransparencyError::InvalidNote("invalid signature bytes".into()))?; + + let timestamp = DateTime::from_timestamp(timestamp_secs as i64, 0).ok_or_else(|| { + TransparencyError::InvalidNote(format!("invalid timestamp: {timestamp_secs}")) + })?; + + Ok(WitnessCosignature { + witness_name: witness_name.to_string(), + witness_public_key, + signature: auths_verifier::Ed25519Signature::from_bytes(sig_bytes), + timestamp, + }) +} + +/// Serialize a cosignature to its raw wire format. +/// +/// Output: 8-byte big-endian timestamp + 64-byte Ed25519 signature. +/// +/// Args: +/// * `cosig` — The witness cosignature to serialize. +/// +/// Usage: +/// ```ignore +/// let raw = serialize_cosignature(&cosig); +/// assert_eq!(raw.len(), 72); +/// ``` +pub fn serialize_cosignature(cosig: &WitnessCosignature) -> [u8; 72] { + let mut out = [0u8; 72]; + let timestamp_secs = cosig.timestamp.timestamp() as u64; + out[..8].copy_from_slice(×tamp_secs.to_be_bytes()); + out[8..72].copy_from_slice(cosig.signature.as_bytes()); + out +} + +#[cfg(test)] +mod tests { + use super::*; + use auths_verifier::Ed25519Signature; + use chrono::Utc; + + fn fixed_ts() -> DateTime { + DateTime::from_timestamp(1_700_000_000, 0).unwrap() + } + + #[test] + fn cosignature_signed_message_format() { + let body = "auths.dev/log\n42\nq6urq6urq6urq6urq6urq6urq6urq6urq6urq6urq6s=\n"; + let msg = cosignature_signed_message(body, 1_700_000_000); + let msg_str = String::from_utf8(msg).unwrap(); + assert!(msg_str.starts_with("cosignature/v1\ntime 1700000000\n")); + assert!(msg_str.ends_with(body)); + } + + #[test] + fn witness_key_id_uses_alg_0x04() { + let pubkey = [0xab; 32]; + let key_id = compute_witness_key_id("witness-1", &pubkey); + assert_eq!(key_id.len(), 4); + + // Different from regular note key ID (which uses 0x01) + let regular_key_id = crate::note::compute_key_id("witness-1", &pubkey); + assert_ne!(key_id, regular_key_id); + } + + #[test] + fn witness_key_id_deterministic() { + let pubkey = [0xab; 32]; + let id1 = compute_witness_key_id("w1", &pubkey); + let id2 = compute_witness_key_id("w1", &pubkey); + assert_eq!(id1, id2); + } + + #[test] + fn witness_key_id_differs_by_name() { + let pubkey = [0xab; 32]; + let id1 = compute_witness_key_id("w1", &pubkey); + let id2 = compute_witness_key_id("w2", &pubkey); + assert_ne!(id1, id2); + } + + #[test] + fn cosignature_roundtrip() { + let pk = Ed25519PublicKey::from_bytes([0xaa; 32]); + let sig = Ed25519Signature::from_bytes([0xbb; 64]); + let cosig = WitnessCosignature { + witness_name: "w1".into(), + witness_public_key: pk, + signature: sig, + timestamp: fixed_ts(), + }; + + let raw = serialize_cosignature(&cosig); + assert_eq!(raw.len(), 72); + + let parsed = + parse_cosignature("w1", Ed25519PublicKey::from_bytes([0xaa; 32]), &raw).unwrap(); + assert_eq!(parsed.witness_name, "w1"); + assert_eq!(parsed.timestamp, fixed_ts()); + assert_eq!(parsed.signature.as_bytes(), cosig.signature.as_bytes()); + } + + #[test] + fn parse_cosignature_rejects_short_input() { + let pk = Ed25519PublicKey::from_bytes([0xaa; 32]); + let result = parse_cosignature("w1", pk, &[0u8; 10]); + assert!(result.is_err()); + } + + #[test] + fn cosignature_line_format() { + let key_id = [0x01, 0x02, 0x03, 0x04]; + let sig = [0xcc; 64]; + let line = build_cosignature_line("witness-1", &key_id, 1_700_000_000, &sig); + + assert!(line.starts_with("\u{2014} witness-1 ")); + assert!(line.ends_with('\n')); + + // Decode and verify structure + use base64::{Engine, engine::general_purpose::STANDARD}; + let parts: Vec<&str> = line.trim().splitn(3, ' ').collect(); + let decoded = STANDARD.decode(parts[2]).unwrap(); + assert_eq!(decoded[0], ALG_COSIGNATURE_V1); + assert_eq!(&decoded[1..5], &key_id); + let ts_bytes: [u8; 8] = decoded[5..13].try_into().unwrap(); + assert_eq!(u64::from_be_bytes(ts_bytes), 1_700_000_000); + assert_eq!(&decoded[13..], &sig); + } + + #[test] + fn extract_cosignatures_quorum_met() { + let pk = Ed25519PublicKey::from_bytes([0xaa; 32]); + let results = vec![ + WitnessResult::Success(WitnessCosignature { + witness_name: "w1".into(), + witness_public_key: pk, + signature: Ed25519Signature::from_bytes([0xbb; 64]), + timestamp: fixed_ts(), + }), + WitnessResult::Failed { + witness_name: "w2".into(), + reason: "timeout".into(), + }, + WitnessResult::Success(WitnessCosignature { + witness_name: "w3".into(), + witness_public_key: Ed25519PublicKey::from_bytes([0xcc; 32]), + signature: Ed25519Signature::from_bytes([0xdd; 64]), + timestamp: fixed_ts(), + }), + ]; + + let (cosigs, met) = extract_cosignatures(&results, 2); + assert!(met); + assert_eq!(cosigs.len(), 2); + } + + #[test] + fn extract_cosignatures_quorum_not_met() { + let results = vec![ + WitnessResult::Failed { + witness_name: "w1".into(), + reason: "timeout".into(), + }, + WitnessResult::Failed { + witness_name: "w2".into(), + reason: "error".into(), + }, + ]; + + let (cosigs, met) = extract_cosignatures(&results, 2); + assert!(!met); + assert_eq!(cosigs.len(), 0); + } + + #[tokio::test] + async fn collect_cosignatures_with_mock_witnesses() { + use crate::checkpoint::Checkpoint; + use crate::types::LogOrigin; + + struct MockWitness { + name: String, + should_fail: bool, + } + + #[async_trait] + impl WitnessClient for MockWitness { + async fn submit_checkpoint( + &self, + _request: CosignRequest, + ) -> Result { + if self.should_fail { + return Err(TransparencyError::ConsistencyError("mock failure".into())); + } + Ok(CosignResponse { + cosignature: WitnessCosignature { + witness_name: self.name.clone(), + witness_public_key: Ed25519PublicKey::from_bytes([0xaa; 32]), + signature: Ed25519Signature::from_bytes([0xbb; 64]), + timestamp: fixed_ts(), + }, + }) + } + } + + let witnesses: Vec> = vec![ + Box::new(MockWitness { + name: "w1".into(), + should_fail: false, + }), + Box::new(MockWitness { + name: "w2".into(), + should_fail: true, + }), + Box::new(MockWitness { + name: "w3".into(), + should_fail: false, + }), + ]; + + let checkpoint = Checkpoint { + origin: LogOrigin::new("test.dev/log").unwrap(), + size: 10, + root: MerkleHash::from_bytes([0x01; 32]), + timestamp: fixed_ts(), + }; + + let request = CosignRequest { + old_size: 0, + consistency_proof: None, + signed_checkpoint: SignedCheckpoint { + checkpoint, + log_signature: Ed25519Signature::from_bytes([0xcc; 64]), + log_public_key: Ed25519PublicKey::from_bytes([0xdd; 32]), + witnesses: vec![], + }, + }; + + let results = + collect_witness_cosignatures(&witnesses, request, 2, DEFAULT_WITNESS_TIMEOUT).await; + + let (cosigs, met) = extract_cosignatures(&results, 2); + assert!(met); + assert!(cosigs.len() >= 2); + } +} diff --git a/crates/auths-transparency/tests/cases/merkle.rs b/crates/auths-transparency/tests/cases/merkle.rs new file mode 100644 index 00000000..4392ee22 --- /dev/null +++ b/crates/auths-transparency/tests/cases/merkle.rs @@ -0,0 +1,113 @@ +use auths_transparency::merkle::{compute_root, hash_children, hash_leaf, verify_inclusion}; +use auths_transparency::types::MerkleHash; +use sha2::{Digest, Sha256}; + +#[test] +fn hash_leaf_rfc6962_test_vector() { + // SHA-256(0x00 || "") — empty leaf + let h = hash_leaf(b""); + let mut hasher = Sha256::new(); + hasher.update([0x00]); + let expected = hasher.finalize(); + assert_eq!(h.as_bytes(), expected.as_slice()); +} + +#[test] +fn hash_children_rfc6962_test_vector() { + let left = MerkleHash::from_bytes([0x00; 32]); + let right = MerkleHash::from_bytes([0x01; 32]); + let h = hash_children(&left, &right); + + let mut hasher = Sha256::new(); + hasher.update([0x01]); + hasher.update([0x00; 32]); + hasher.update([0x01; 32]); + let expected = hasher.finalize(); + assert_eq!(h.as_bytes(), expected.as_slice()); +} + +#[test] +fn merkle_tree_known_structure_four_leaves() { + let leaves: Vec = (0..4u8).map(|i| hash_leaf(&[i])).collect(); + let root = compute_root(&leaves); + + let left = hash_children(&leaves[0], &leaves[1]); + let right = hash_children(&leaves[2], &leaves[3]); + let expected = hash_children(&left, &right); + assert_eq!(root, expected); +} + +#[test] +fn merkle_tree_three_leaves_odd() { + let leaves: Vec = (0..3u8).map(|i| hash_leaf(&[i])).collect(); + let root = compute_root(&leaves); + + let left = hash_children(&leaves[0], &leaves[1]); + let expected = hash_children(&left, &leaves[2]); + assert_eq!(root, expected); +} + +#[test] +fn inclusion_proof_eight_leaves() { + let leaves: Vec = (0..8u8).map(|i| hash_leaf(&[i])).collect(); + let root = compute_root(&leaves); + + // Manually compute proof for leaf 5 + let h01 = hash_children(&leaves[0], &leaves[1]); + let h23 = hash_children(&leaves[2], &leaves[3]); + let h45 = hash_children(&leaves[4], &leaves[5]); + let h67 = hash_children(&leaves[6], &leaves[7]); + let h0123 = hash_children(&h01, &h23); + let _h4567 = hash_children(&h45, &h67); + + // Leaf 5 → sibling leaf[4], uncle h67, great-uncle h0123 + verify_inclusion(&leaves[5], 5, 8, &[leaves[4], h67, h0123], &root).unwrap(); +} + +#[test] +fn inclusion_proof_rejects_tampered_proof() { + let leaves: Vec = (0..4u8).map(|i| hash_leaf(&[i])).collect(); + let root = compute_root(&leaves); + + let cd = hash_children(&leaves[2], &leaves[3]); + let tampered = MerkleHash::from_bytes([0xff; 32]); + let result = verify_inclusion(&leaves[0], 0, 4, &[tampered, cd], &root); + assert!(result.is_err()); +} + +#[test] +fn inclusion_proof_rejects_wrong_proof_length() { + let a = hash_leaf(b"a"); + let b = hash_leaf(b"b"); + let root = hash_children(&a, &b); + + let result = verify_inclusion(&a, 0, 2, &[], &root); + assert!(result.is_err()); +} + +mod proptest_merkle { + use super::*; + use proptest::prelude::*; + + proptest! { + #[test] + fn compute_root_is_deterministic(data in proptest::collection::vec(any::(), 1..32)) { + let leaves: Vec = data.iter().map(|b| hash_leaf(&[*b])).collect(); + let r1 = compute_root(&leaves); + let r2 = compute_root(&leaves); + prop_assert_eq!(r1, r2); + } + + #[test] + fn hash_leaf_is_different_from_hash_children(left_byte in any::(), right_byte in any::()) { + let combined = [left_byte, right_byte]; + let leaf = hash_leaf(&combined); + + let left = hash_leaf(&[left_byte]); + let right = hash_leaf(&[right_byte]); + let node = hash_children(&left, &right); + + prop_assert_ne!(leaf, node); + } + } +} diff --git a/crates/auths-transparency/tests/cases/mod.rs b/crates/auths-transparency/tests/cases/mod.rs new file mode 100644 index 00000000..95b4dc00 --- /dev/null +++ b/crates/auths-transparency/tests/cases/mod.rs @@ -0,0 +1,6 @@ +mod merkle; +mod note; +mod tile; +mod verify; +#[allow(clippy::unwrap_used)] +mod witness; diff --git a/crates/auths-transparency/tests/cases/note.rs b/crates/auths-transparency/tests/cases/note.rs new file mode 100644 index 00000000..7bd7d07d --- /dev/null +++ b/crates/auths-transparency/tests/cases/note.rs @@ -0,0 +1,85 @@ +use auths_transparency::note::{ + build_signature_line, compute_key_id, parse_signed_note, serialize_signed_note, +}; + +#[test] +fn key_id_matches_c2sp_spec_construction() { + // key_id = SHA-256("auths.dev/log\n" + 0x01 + pubkey)[0..4] + let pubkey = [0x42; 32]; + let key_id = compute_key_id("auths.dev/log", &pubkey); + + let mut data = Vec::new(); + data.extend_from_slice(b"auths.dev/log\n"); + data.push(0x01); + data.extend_from_slice(&pubkey); + + use sha2::{Digest, Sha256}; + let hash = Sha256::digest(&data); + assert_eq!(key_id, hash[..4]); +} + +#[test] +fn key_id_deterministic() { + let pubkey = [0xaa; 32]; + let id1 = compute_key_id("test/log", &pubkey); + let id2 = compute_key_id("test/log", &pubkey); + assert_eq!(id1, id2); +} + +#[test] +fn key_id_varies_with_key_name() { + let pubkey = [0xaa; 32]; + let id1 = compute_key_id("auths.dev/log", &pubkey); + let id2 = compute_key_id("other.dev/log", &pubkey); + assert_ne!(id1, id2); +} + +#[test] +fn key_id_varies_with_pubkey() { + let id1 = compute_key_id("auths.dev/log", &[0xaa; 32]); + let id2 = compute_key_id("auths.dev/log", &[0xbb; 32]); + assert_ne!(id1, id2); +} + +#[test] +fn signed_note_full_roundtrip() { + let body = "auths.dev/log\n100\nabababababababababababababababababababababababababababababababababab\n"; + let key_id = compute_key_id("auths.dev/log", &[0x42; 32]); + let sig = [0xdd; 64]; + + let sig_line = build_signature_line("auths.dev/log", &key_id, &sig); + let note = serialize_signed_note(body, &[sig_line]); + + let (parsed_body, parsed_sigs) = parse_signed_note(¬e).unwrap(); + assert_eq!(parsed_body, body); + assert_eq!(parsed_sigs.len(), 1); + assert_eq!(parsed_sigs[0].key_name, "auths.dev/log"); + assert_eq!(parsed_sigs[0].algorithm, 0x01); + assert_eq!(parsed_sigs[0].key_id, key_id); + assert_eq!(parsed_sigs[0].signature.len(), 64); + assert_eq!(&parsed_sigs[0].signature[..], &sig[..]); +} + +#[test] +fn signed_note_multiple_signatures() { + let body = "log\n1\n0000000000000000000000000000000000000000000000000000000000000000\n"; + let key_id1 = [0x01, 0x02, 0x03, 0x04]; + let key_id2 = [0x05, 0x06, 0x07, 0x08]; + let sig1 = [0xaa; 64]; + let sig2 = [0xbb; 64]; + + let line1 = build_signature_line("log-operator", &key_id1, &sig1); + let line2 = build_signature_line("witness-1", &key_id2, &sig2); + let note = serialize_signed_note(body, &[line1, line2]); + + let (_, parsed_sigs) = parse_signed_note(¬e).unwrap(); + assert_eq!(parsed_sigs.len(), 2); + assert_eq!(parsed_sigs[0].key_name, "log-operator"); + assert_eq!(parsed_sigs[1].key_name, "witness-1"); +} + +#[test] +fn parse_note_rejects_missing_signatures() { + let note = "just a body\nno signature\n"; + assert!(parse_signed_note(note).is_err()); +} diff --git a/crates/auths-transparency/tests/cases/tile.rs b/crates/auths-transparency/tests/cases/tile.rs new file mode 100644 index 00000000..f1ffa88c --- /dev/null +++ b/crates/auths-transparency/tests/cases/tile.rs @@ -0,0 +1,70 @@ +use auths_transparency::tile::{TILE_WIDTH, leaf_tile, tile_count, tile_path}; + +#[test] +fn tile_path_zero_index() { + assert_eq!(tile_path(0, 0, 0).unwrap(), "tile/0/000"); +} + +#[test] +fn tile_path_small_indices() { + assert_eq!(tile_path(0, 1, 0).unwrap(), "tile/0/001"); + assert_eq!(tile_path(0, 42, 0).unwrap(), "tile/0/042"); + assert_eq!(tile_path(0, 999, 0).unwrap(), "tile/0/999"); +} + +#[test] +fn tile_path_multi_segment_c2sp_spec() { + // C2SP spec example: 1234067 → x001/x234/067 + assert_eq!(tile_path(0, 1234067, 0).unwrap(), "tile/0/x001/x234/067"); +} + +#[test] +fn tile_path_two_segment() { + assert_eq!(tile_path(0, 1000, 0).unwrap(), "tile/0/x001/000"); + assert_eq!(tile_path(0, 1234, 0).unwrap(), "tile/0/x001/234"); +} + +#[test] +fn tile_path_hash_level() { + assert_eq!(tile_path(1, 5, 0).unwrap(), "tile/1/005"); + assert_eq!(tile_path(2, 0, 0).unwrap(), "tile/2/000"); +} + +#[test] +fn tile_path_partial_tile() { + assert_eq!(tile_path(0, 0, 42).unwrap(), "tile/0/000.p/42"); + assert_eq!(tile_path(0, 5, 128).unwrap(), "tile/0/005.p/128"); +} + +#[test] +fn tile_path_full_width_no_suffix() { + // width == TILE_WIDTH (256) is treated same as 0 → full tile, no .p suffix + assert_eq!(tile_path(0, 0, 0).unwrap(), "tile/0/000"); +} + +#[test] +fn leaf_tile_within_first_tile() { + assert_eq!(leaf_tile(0), (0, 0)); + assert_eq!(leaf_tile(1), (0, 1)); + assert_eq!(leaf_tile(255), (0, 255)); +} + +#[test] +fn leaf_tile_boundary() { + assert_eq!(leaf_tile(256), (1, 0)); + assert_eq!(leaf_tile(257), (1, 1)); +} + +#[test] +fn tile_count_exact_multiples() { + assert_eq!(tile_count(0), (0, 0)); + assert_eq!(tile_count(TILE_WIDTH), (1, 0)); + assert_eq!(tile_count(TILE_WIDTH * 3), (3, 0)); +} + +#[test] +fn tile_count_with_remainder() { + assert_eq!(tile_count(1), (0, 1)); + assert_eq!(tile_count(300), (1, 44)); + assert_eq!(tile_count(TILE_WIDTH + 1), (1, 1)); +} diff --git a/crates/auths-transparency/tests/cases/verify.rs b/crates/auths-transparency/tests/cases/verify.rs new file mode 100644 index 00000000..44eac063 --- /dev/null +++ b/crates/auths-transparency/tests/cases/verify.rs @@ -0,0 +1,276 @@ +#![allow(clippy::unwrap_used, clippy::expect_used, clippy::disallowed_methods)] + +use auths_transparency::bundle::{ + CheckpointStatus, InclusionStatus, OfflineBundle, SignatureStatus, WitnessStatus, +}; +use auths_transparency::checkpoint::{Checkpoint, SignedCheckpoint, WitnessCosignature}; +use auths_transparency::entry::{Entry, EntryBody, EntryContent, EntryType}; +use auths_transparency::merkle::{compute_root, hash_leaf}; +use auths_transparency::proof::InclusionProof; +use auths_transparency::types::{LogOrigin, MerkleHash}; +use auths_transparency::{TrustRoot, TrustRootWitness, verify_bundle}; +use auths_verifier::{CanonicalDid, DeviceDID, Ed25519PublicKey, Ed25519Signature}; +use chrono::{DateTime, Utc}; +use ring::signature::{Ed25519KeyPair, KeyPair}; + +fn fixed_ts() -> DateTime { + chrono::DateTime::parse_from_rfc3339("2025-06-15T00:00:00Z") + .unwrap() + .with_timezone(&Utc) +} + +fn fixed_now() -> DateTime { + chrono::DateTime::parse_from_rfc3339("2025-07-01T00:00:00Z") + .unwrap() + .with_timezone(&Utc) +} + +/// End-to-end: generate keys, sign entry, build tree, sign checkpoint, verify bundle. +#[test] +fn verify_bundle_end_to_end_single_entry() { + let log_kp = Ed25519KeyPair::from_seed_unchecked(&[1u8; 32]).unwrap(); + let log_pk: [u8; 32] = log_kp.public_key().as_ref().try_into().unwrap(); + + let actor_kp = Ed25519KeyPair::from_seed_unchecked(&[2u8; 32]).unwrap(); + let actor_pk: [u8; 32] = actor_kp.public_key().as_ref().try_into().unwrap(); + let actor_did = auths_crypto::ed25519_pubkey_to_did_key(&actor_pk); + + // Build entry + let content = EntryContent { + entry_type: EntryType::DeviceBind, + body: EntryBody::DeviceBind { + device_did: DeviceDID::new_unchecked(&actor_did), + public_key: Ed25519PublicKey::from_bytes(actor_pk), + }, + actor_did: CanonicalDid::new_unchecked(&actor_did), + }; + let canonical = content.canonicalize().unwrap(); + let sig_bytes = actor_kp.sign(&canonical); + let actor_sig = Ed25519Signature::try_from_slice(sig_bytes.as_ref()).unwrap(); + + let entry = Entry { + sequence: 0, + timestamp: fixed_ts(), + content, + actor_sig, + }; + + // Build Merkle tree (single leaf) + let leaf_data = entry.leaf_data().unwrap(); + let leaf_hash = hash_leaf(&leaf_data); + let root = compute_root(&[leaf_hash]); + + // Sign checkpoint + let checkpoint = Checkpoint { + origin: LogOrigin::new("test.dev/log").unwrap(), + size: 1, + root, + timestamp: fixed_ts(), + }; + let note_body = checkpoint.to_note_body(); + let log_sig = + Ed25519Signature::try_from_slice(log_kp.sign(note_body.as_bytes()).as_ref()).unwrap(); + + let bundle = OfflineBundle { + entry, + inclusion_proof: InclusionProof { + index: 0, + size: 1, + root, + hashes: vec![], + }, + signed_checkpoint: SignedCheckpoint { + checkpoint, + log_signature: log_sig, + log_public_key: Ed25519PublicKey::from_bytes(log_pk), + witnesses: vec![], + }, + delegation_chain: vec![], + }; + + let trust_root = TrustRoot { + log_public_key: Ed25519PublicKey::from_bytes(log_pk), + log_origin: LogOrigin::new("test.dev/log").unwrap(), + witnesses: vec![], + }; + + let report = verify_bundle(&bundle, &trust_root, fixed_now()); + assert_eq!(report.signature, SignatureStatus::Verified); + assert_eq!(report.inclusion, InclusionStatus::Verified); + assert_eq!(report.checkpoint, CheckpointStatus::Verified); + assert!(report.is_valid()); +} + +/// End-to-end with multiple entries and inclusion proof. +#[test] +fn verify_bundle_multi_leaf_tree() { + let log_kp = Ed25519KeyPair::from_seed_unchecked(&[1u8; 32]).unwrap(); + let log_pk: [u8; 32] = log_kp.public_key().as_ref().try_into().unwrap(); + + let actor_kp = Ed25519KeyPair::from_seed_unchecked(&[2u8; 32]).unwrap(); + let actor_pk: [u8; 32] = actor_kp.public_key().as_ref().try_into().unwrap(); + let actor_did = auths_crypto::ed25519_pubkey_to_did_key(&actor_pk); + + // Build 4 entries for a proper inclusion proof + let mut entries = Vec::new(); + for seq in 0..4u64 { + let content = EntryContent { + entry_type: EntryType::DeviceBind, + body: EntryBody::DeviceBind { + device_did: DeviceDID::new_unchecked(&actor_did), + public_key: Ed25519PublicKey::from_bytes(actor_pk), + }, + actor_did: CanonicalDid::new_unchecked(&actor_did), + }; + let canonical = content.canonicalize().unwrap(); + let sig_bytes = actor_kp.sign(&canonical); + let actor_sig = Ed25519Signature::try_from_slice(sig_bytes.as_ref()).unwrap(); + + entries.push(Entry { + sequence: seq, + timestamp: fixed_ts(), + content, + actor_sig, + }); + } + + let leaf_hashes: Vec = entries + .iter() + .map(|e| hash_leaf(&e.leaf_data().unwrap())) + .collect(); + let root = compute_root(&leaf_hashes); + + // Build inclusion proof for entry 2 (index 2 in 4-leaf tree) + // Siblings: leaf[3], then hash(leaf[0], leaf[1]) + let h01 = auths_transparency::merkle::hash_children(&leaf_hashes[0], &leaf_hashes[1]); + let proof_hashes = vec![leaf_hashes[3], h01]; + + let checkpoint = Checkpoint { + origin: LogOrigin::new("test.dev/log").unwrap(), + size: 4, + root, + timestamp: fixed_ts(), + }; + let note_body = checkpoint.to_note_body(); + let log_sig = + Ed25519Signature::try_from_slice(log_kp.sign(note_body.as_bytes()).as_ref()).unwrap(); + + let bundle = OfflineBundle { + entry: entries[2].clone(), + inclusion_proof: InclusionProof { + index: 2, + size: 4, + root, + hashes: proof_hashes, + }, + signed_checkpoint: SignedCheckpoint { + checkpoint, + log_signature: log_sig, + log_public_key: Ed25519PublicKey::from_bytes(log_pk), + witnesses: vec![], + }, + delegation_chain: vec![], + }; + + let trust_root = TrustRoot { + log_public_key: Ed25519PublicKey::from_bytes(log_pk), + log_origin: LogOrigin::new("test.dev/log").unwrap(), + witnesses: vec![], + }; + + let report = verify_bundle(&bundle, &trust_root, fixed_now()); + assert_eq!(report.signature, SignatureStatus::Verified); + assert_eq!(report.inclusion, InclusionStatus::Verified); + assert_eq!(report.checkpoint, CheckpointStatus::Verified); + assert!(report.is_valid()); +} + +/// Witness quorum verification end-to-end. +#[test] +fn verify_bundle_with_witnesses() { + let log_kp = Ed25519KeyPair::from_seed_unchecked(&[1u8; 32]).unwrap(); + let log_pk: [u8; 32] = log_kp.public_key().as_ref().try_into().unwrap(); + + let actor_kp = Ed25519KeyPair::from_seed_unchecked(&[2u8; 32]).unwrap(); + let actor_pk: [u8; 32] = actor_kp.public_key().as_ref().try_into().unwrap(); + let actor_did = auths_crypto::ed25519_pubkey_to_did_key(&actor_pk); + + let w1_kp = Ed25519KeyPair::from_seed_unchecked(&[10u8; 32]).unwrap(); + let w1_pk: [u8; 32] = w1_kp.public_key().as_ref().try_into().unwrap(); + + let content = EntryContent { + entry_type: EntryType::DeviceBind, + body: EntryBody::DeviceBind { + device_did: DeviceDID::new_unchecked(&actor_did), + public_key: Ed25519PublicKey::from_bytes(actor_pk), + }, + actor_did: CanonicalDid::new_unchecked(&actor_did), + }; + let canonical = content.canonicalize().unwrap(); + let sig_bytes = actor_kp.sign(&canonical); + let actor_sig = Ed25519Signature::try_from_slice(sig_bytes.as_ref()).unwrap(); + + let entry = Entry { + sequence: 0, + timestamp: fixed_ts(), + content, + actor_sig, + }; + + let leaf_hash = hash_leaf(&entry.leaf_data().unwrap()); + let root = compute_root(&[leaf_hash]); + + let checkpoint = Checkpoint { + origin: LogOrigin::new("test.dev/log").unwrap(), + size: 1, + root, + timestamp: fixed_ts(), + }; + let note_body = checkpoint.to_note_body(); + let log_sig = + Ed25519Signature::try_from_slice(log_kp.sign(note_body.as_bytes()).as_ref()).unwrap(); + let w1_sig = + Ed25519Signature::try_from_slice(w1_kp.sign(note_body.as_bytes()).as_ref()).unwrap(); + + let bundle = OfflineBundle { + entry, + inclusion_proof: InclusionProof { + index: 0, + size: 1, + root, + hashes: vec![], + }, + signed_checkpoint: SignedCheckpoint { + checkpoint, + log_signature: log_sig, + log_public_key: Ed25519PublicKey::from_bytes(log_pk), + witnesses: vec![WitnessCosignature { + witness_name: "w1".into(), + witness_public_key: Ed25519PublicKey::from_bytes(w1_pk), + signature: w1_sig, + timestamp: fixed_ts(), + }], + }, + delegation_chain: vec![], + }; + + let trust_root = TrustRoot { + log_public_key: Ed25519PublicKey::from_bytes(log_pk), + log_origin: LogOrigin::new("test.dev/log").unwrap(), + witnesses: vec![TrustRootWitness { + witness_did: DeviceDID::new_unchecked(auths_crypto::ed25519_pubkey_to_did_key(&w1_pk)), + name: "w1".into(), + public_key: Ed25519PublicKey::from_bytes(w1_pk), + }], + }; + + let report = verify_bundle(&bundle, &trust_root, fixed_now()); + assert!(matches!( + report.witnesses, + WitnessStatus::Quorum { + verified: 1, + required: 1 + } + )); + assert!(report.is_valid()); +} diff --git a/crates/auths-transparency/tests/cases/witness.rs b/crates/auths-transparency/tests/cases/witness.rs new file mode 100644 index 00000000..f17f2977 --- /dev/null +++ b/crates/auths-transparency/tests/cases/witness.rs @@ -0,0 +1,239 @@ +use async_trait::async_trait; +use auths_transparency::TransparencyError; +use auths_transparency::checkpoint::{Checkpoint, SignedCheckpoint, WitnessCosignature}; +use auths_transparency::types::{LogOrigin, MerkleHash}; +use auths_transparency::witness::{ + ALG_COSIGNATURE_V1, CosignRequest, CosignResponse, DEFAULT_WITNESS_TIMEOUT, WitnessClient, + WitnessResult, build_cosignature_line, collect_witness_cosignatures, compute_witness_key_id, + cosignature_signed_message, extract_cosignatures, parse_cosignature, serialize_cosignature, +}; +use auths_verifier::{Ed25519PublicKey, Ed25519Signature}; +use chrono::{DateTime, Utc}; +use std::time::Duration; + +fn fixed_ts() -> DateTime { + DateTime::from_timestamp(1_700_000_000, 0).unwrap() +} + +fn make_test_checkpoint() -> SignedCheckpoint { + SignedCheckpoint { + checkpoint: Checkpoint { + origin: LogOrigin::new("test.dev/log").unwrap(), + size: 10, + root: MerkleHash::from_bytes([0x01; 32]), + timestamp: fixed_ts(), + }, + log_signature: Ed25519Signature::from_bytes([0xcc; 64]), + log_public_key: Ed25519PublicKey::from_bytes([0xdd; 32]), + witnesses: vec![], + } +} + +struct MockWitness { + name: String, + should_fail: bool, + delay: Option, +} + +#[async_trait] +impl WitnessClient for MockWitness { + async fn submit_checkpoint( + &self, + _request: CosignRequest, + ) -> Result { + if let Some(d) = self.delay { + tokio::time::sleep(d).await; + } + if self.should_fail { + return Err(TransparencyError::ConsistencyError("mock failure".into())); + } + Ok(CosignResponse { + cosignature: WitnessCosignature { + witness_name: self.name.clone(), + witness_public_key: Ed25519PublicKey::from_bytes([0xaa; 32]), + signature: Ed25519Signature::from_bytes([0xbb; 64]), + timestamp: fixed_ts(), + }, + }) + } +} + +#[test] +fn cosignature_serialization_roundtrip() { + let cosig = WitnessCosignature { + witness_name: "witness-alpha".into(), + witness_public_key: Ed25519PublicKey::from_bytes([0x11; 32]), + signature: Ed25519Signature::from_bytes([0x22; 64]), + timestamp: fixed_ts(), + }; + + let raw = serialize_cosignature(&cosig); + assert_eq!(raw.len(), 72); + + let parsed = parse_cosignature( + "witness-alpha", + Ed25519PublicKey::from_bytes([0x11; 32]), + &raw, + ) + .unwrap(); + assert_eq!(parsed.witness_name, "witness-alpha"); + assert_eq!(parsed.timestamp, fixed_ts()); + assert_eq!(parsed.signature.as_bytes(), cosig.signature.as_bytes()); + assert_eq!( + parsed.witness_public_key.as_bytes(), + cosig.witness_public_key.as_bytes() + ); +} + +#[test] +fn witness_key_id_uses_algorithm_byte_0x04() { + let pubkey = [0xab; 32]; + let witness_id = compute_witness_key_id("w1", &pubkey); + let note_id = auths_transparency::note::compute_key_id("w1", &pubkey); + assert_ne!( + witness_id, note_id, + "witness key ID (alg 0x04) must differ from note key ID (alg 0x01)" + ); +} + +#[test] +fn cosignature_signed_message_follows_spec() { + let body = "auths.dev/log\n100\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n"; + let ts = 1_700_000_000u64; + let msg = cosignature_signed_message(body, ts); + let msg_str = String::from_utf8(msg).unwrap(); + + let expected_prefix = "cosignature/v1\ntime 1700000000\n"; + assert!(msg_str.starts_with(expected_prefix)); + assert!(msg_str.ends_with(body)); + assert_eq!(msg_str.len(), expected_prefix.len() + body.len()); +} + +#[test] +fn cosignature_line_encodes_timestamp_in_payload() { + let key_id = [0x01, 0x02, 0x03, 0x04]; + let sig = [0xff; 64]; + let ts = 1_700_000_000u64; + let line = build_cosignature_line("w1", &key_id, ts, &sig); + + use base64::{Engine, engine::general_purpose::STANDARD}; + let parts: Vec<&str> = line.trim().splitn(3, ' ').collect(); + let decoded = STANDARD.decode(parts[2]).unwrap(); + + assert_eq!(decoded[0], ALG_COSIGNATURE_V1); + assert_eq!(&decoded[1..5], &key_id); + // 8-byte timestamp + let ts_bytes: [u8; 8] = decoded[5..13].try_into().unwrap(); + assert_eq!(u64::from_be_bytes(ts_bytes), ts); + // 64-byte signature + assert_eq!(&decoded[13..], &sig); + // Total: 1 + 4 + 8 + 64 = 77 bytes + assert_eq!(decoded.len(), 77); +} + +#[tokio::test] +async fn collect_cosignatures_quorum_met() { + let witnesses: Vec> = vec![ + Box::new(MockWitness { + name: "w1".into(), + should_fail: false, + delay: None, + }), + Box::new(MockWitness { + name: "w2".into(), + should_fail: false, + delay: None, + }), + Box::new(MockWitness { + name: "w3".into(), + should_fail: true, + delay: None, + }), + ]; + + let request = CosignRequest { + old_size: 0, + consistency_proof: None, + signed_checkpoint: make_test_checkpoint(), + }; + + let results = + collect_witness_cosignatures(&witnesses, request, 2, DEFAULT_WITNESS_TIMEOUT).await; + let (cosigs, met) = extract_cosignatures(&results, 2); + assert!(met); + assert_eq!(cosigs.len(), 2); +} + +#[tokio::test] +async fn collect_cosignatures_quorum_not_met() { + let witnesses: Vec> = vec![ + Box::new(MockWitness { + name: "w1".into(), + should_fail: true, + delay: None, + }), + Box::new(MockWitness { + name: "w2".into(), + should_fail: true, + delay: None, + }), + Box::new(MockWitness { + name: "w3".into(), + should_fail: false, + delay: None, + }), + ]; + + let request = CosignRequest { + old_size: 0, + consistency_proof: None, + signed_checkpoint: make_test_checkpoint(), + }; + + let results = + collect_witness_cosignatures(&witnesses, request, 2, DEFAULT_WITNESS_TIMEOUT).await; + let (cosigs, met) = extract_cosignatures(&results, 2); + assert!(!met); + assert_eq!(cosigs.len(), 1); +} + +#[tokio::test] +async fn collect_cosignatures_handles_timeout() { + let witnesses: Vec> = vec![ + Box::new(MockWitness { + name: "w1".into(), + should_fail: false, + delay: None, + }), + Box::new(MockWitness { + name: "slow".into(), + should_fail: false, + delay: Some(Duration::from_secs(10)), + }), + ]; + + let request = CosignRequest { + old_size: 0, + consistency_proof: None, + signed_checkpoint: make_test_checkpoint(), + }; + + // quorum=2 forces waiting for both; slow witness times out + let results = + collect_witness_cosignatures(&witnesses, request, 2, Duration::from_millis(100)).await; + + let successes = results + .iter() + .filter(|r| matches!(r, WitnessResult::Success(_))) + .count(); + let failures = results + .iter() + .filter(|r| matches!(r, WitnessResult::Failed { .. })) + .count(); + + assert_eq!(successes, 1); + assert_eq!(failures, 1); + + let (_, met) = extract_cosignatures(&results, 2); + assert!(!met); +} diff --git a/crates/auths-transparency/tests/integration.rs b/crates/auths-transparency/tests/integration.rs new file mode 100644 index 00000000..dc861bfb --- /dev/null +++ b/crates/auths-transparency/tests/integration.rs @@ -0,0 +1,2 @@ +#[cfg(not(target_arch = "wasm32"))] +mod cases; diff --git a/crates/xtask/src/check_clippy_sync.rs b/crates/xtask/src/check_clippy_sync.rs index 75fbc3ef..68d13da3 100644 --- a/crates/xtask/src/check_clippy_sync.rs +++ b/crates/xtask/src/check_clippy_sync.rs @@ -158,7 +158,7 @@ mod tests { #[test] fn test_is_deferred() { - assert!(is_deferred("auths_verifier::IdentityDID::new_unchecked")); + assert!(!is_deferred("auths_verifier::IdentityDID::new_unchecked")); assert!(!is_deferred("chrono::offset::Utc::now")); assert!(!is_deferred("std::fs::read")); } diff --git a/docs/contributing/architecture-decisions.md b/docs/contributing/architecture-decisions.md index 5467c089..995ec2c0 100644 --- a/docs/contributing/architecture-decisions.md +++ b/docs/contributing/architecture-decisions.md @@ -11,6 +11,7 @@ This page summarizes the key architectural decisions in Auths and links to the f | [ADR-003](#adr-003-tiered-cache-and-write-contention) | Tiered cache and write-contention mitigation | Accepted | 2026-02-27 | | [ADR-004](#adr-004-async-executor-protection) | Async executor protection | Accepted | 2026-02-27 | | [ADR-005](#adr-005-ed25519-only-for-hsm) | Ed25519-only for HSM | Accepted | 2026-03-05 | +| [ADR-006](#adr-006-c2sp-tlog-tiles-transparency-log) | C2SP tlog-tiles transparency log | Accepted | 2026-03-13 | ## ADR format @@ -155,6 +156,94 @@ KERI events are stored as Git commits (ADR-002), giving the KEL content-addresse - Cloud HSM adapters (AWS CloudHSM, Azure, GCP) also gated behind multi-curve. - Monitor Apple exposing Ed25519 Secure Enclave APIs publicly (Platform SSO already uses it internally). +## ADR-006: C2SP tlog-tiles transparency log + +**Status:** Accepted + +**Context:** + +KERI Key Event Logs stored in Git (ADR-002) provide per-identity tamper evidence, but they don't answer a global question: "has the registry ever presented different views of the same identity to different parties?" This is the split-view attack — the log operator shows one version of history to the verifier and a different version to the auditor. [Certificate Transparency](https://certificate.transparency.dev/) solved this for TLS with append-only Merkle trees and independent witnesses. Auths needed the same guarantee for identity operations. + +Three options were evaluated: + +1. **Rekor (Sigstore's log).** Production-proven, but introduces a Sigstore infrastructure dependency and an OIDC trust requirement — contradicting the self-sovereign design. +2. **Custom append-only log.** Full control, but requires designing a tile format, proof serialization, witness protocol, and client caching from scratch. +3. **C2SP tlog-tiles specification.** An open spec on [Tiled Transparency Logs](https://github.com/C2SP/C2SP/blob/main/tlog-tiles.md) from the C2SP (Cryptographic Specification Project) that defines Merkle tree tiling, checkpoint signed notes, and witness cosignature formats. Already used by Go's `sumdb` and Sigsum. + +**Decision:** Implement C2SP tlog-tiles as the `auths-transparency` crate. Use the spec's tile layout, signed note format, and witness cosignature protocol. Build the Merkle tree using [RFC 6962](https://www.rfc-editor.org/rfc/rfc6962.html) hash functions ([SHA-256 with domain-separated leaf/node prefixes](https://github.com/C2SP/C2SP/blob/main/tlog-tiles.md#merkle-tree)). + +**Architecture:** + +The crate is split into two feature tiers: + +- **No features (WASM-safe):** Core types (`MerkleHash`, `LogOrigin`), Merkle math (`hash_leaf`, `hash_children`, `compute_root`), proof verification (`verify_inclusion`, `verify_consistency`), signed note parsing, tile path encoding. This compiles to WASM for in-browser verification. +- **`native` feature:** `TileStore` trait, `FsTileStore` implementation, `WitnessClient` trait, cosignature collection, offline bundle verification. This runs in the registry server and CLI. + +Key types: + +``` +Checkpoint { origin, size, root, timestamp } + → SignedCheckpoint { checkpoint, log_signature, log_public_key, witnesses[] } + → WitnessCosignature { witness_key_id, witness_name, signature, timestamp } + +Entry { sequence, entry_type, content, actor_did, timestamp, signature } + → EntryType: Register | Rotate | DeviceBind | DeviceRevoke | Attest | NamespaceClaim | OrgAddMember | ... + → EntryContent: typed body specific to each EntryType + +InclusionProof { leaf_index, tree_size, hashes[] } +ConsistencyProof { old_size, new_size, hashes[] } +``` + +The Merkle tree uses tiles — fixed-size blocks of 256 hashes (2^8, per C2SP `TILE_HEIGHT=8`). Tile paths follow C2SP encoding: `tile/{level}/{index}` with 3-digit zero-padded segments. Full tiles are immutable and cached aggressively; partial tiles have short TTLs. + +**Witness protocol:** + +Witnesses are independent servers that verify checkpoint consistency before cosigning. The protocol: + +1. Sequencer produces a new `SignedCheckpoint` after appending entries. +2. Background task fans out `CosignRequest` to configured witness endpoints. +3. Each witness verifies the consistency proof from its last-seen size to the new size. +4. Witnesses return `CosignResponse` with a timestamped Ed25519 cosignature (algorithm byte `0x04`). +5. When quorum is met, the witnessed checkpoint is cached for serving via `GET /v1/log/checkpoint`. + +The witness quorum, endpoint list, and per-witness timeout are configurable. Witnesses that fail or timeout are skipped — the system degrades gracefully to log-signed-only checkpoints. + +**Entry signing:** + +Every mutation to registry state is recorded as a log entry. The sequencer: + +1. Receives the entry content and actor signature. +2. Validates the entry (signature, authorization, deduplication). +3. Computes the leaf hash: `SHA-256(0x00 || canonical_json(entry))`. +4. Appends the leaf to the Merkle tree, updates tiles. +5. Signs a new checkpoint over the updated root. +6. Materializes the entry to Postgres for query serving. + +Deduplication uses an in-memory LRU cache keyed by `(actor_did, content_hash)` with a 60-second TTL. + +**Consequences:** + +Positive: +- Split-view attacks are detectable by any party that monitors checkpoints from multiple vantage points. +- The WASM-safe core means browsers can verify inclusion proofs without trusting the server. +- C2SP compatibility means existing tlog tooling (Go's `tlog` package, Sigsum monitors) can audit the Auths log with minimal adaptation. +- Tile-based storage enables efficient CDN caching — full tiles are immutable, so `Cache-Control: immutable` applies. + +Negative: +- SHA-256 for Merkle hashing (required by RFC 6962 / C2SP) differs from Blake3 used in KERI SAIDs. Two hash functions in the system, requiring `blake3` and `sha2` crates. That's ~50KB of compiled code, so treating it as negligible bloat. +- The sequencer is a single-writer bottleneck. Horizontal scaling requires a distributed sequencer (future work). +- Witness liveness affects checkpoint freshness but not correctness — a distinction that requires documentation for operators. + +**Trade-offs vs. alternatives:** + +| | C2SP tlog-tiles (chosen) | Rekor | Custom log | +|---|---|---|---| +| Spec compliance | C2SP, RFC 6962 | Sigstore-specific | None | +| WASM verification | Yes | No (gRPC client) | Depends on impl | +| Infrastructure dependency | None (self-hosted) | Sigstore infra | None | +| Witness protocol | C2SP cosignature | N/A (centralized) | Custom | +| Ecosystem tooling | Go sumdb, Sigsum | Sigstore clients | None | + --- ## STRIDE threat model diff --git a/docs/contributing/oo_cloud_boundaries.md b/docs/contributing/oo_cloud_boundaries.md new file mode 100644 index 00000000..2a1dd392 --- /dev/null +++ b/docs/contributing/oo_cloud_boundaries.md @@ -0,0 +1,144 @@ +# Open-Source / Cloud Boundaries + +How the open-source `auths` repo and the proprietary `auths-cloud` repo relate, where code belongs, and how to keep the boundary clean. + +## Two repos, one product + +``` +auths (open-source, MIT) auths-cloud (proprietary) +├── auths-crypto ├── auths-idp (IdP verification) +├── auths-verifier ├── auths-cloud-sdk (cloud business logic) +├── auths-core ├── auths-cloud-cli (cloud CLI presentation) +├── auths-id ├── auths-registry-server +├── auths-sdk ├── auths-auth-server +├── auths-storage ├── auths-oidc-bridge +├── auths-cli ├── auths-scim-server +└── ... └── auths-cache +``` + +`auths` is the self-contained identity system: key management, signing, verification, KERI, Git storage. A developer can use it without ever touching `auths-cloud`. + +`auths-cloud` adds enterprise features: IdP binding (OIDC/SAML), hosted registry, OIDC token bridge, SCIM provisioning. It depends on published `auths` crates from crates.io but never the reverse. + +## Dependency direction + +``` +auths-cloud crates + │ + │ depends on (via crates.io) + ▼ +auths crates +``` + +**The open-source repo must never depend on or reference the cloud repo.** This is the single most important rule. If a cloud feature needs something in core, the core change must be independently useful and merged first. + +## Layer model + +Both repos follow the same three-layer pattern: + +| Layer | auths | auths-cloud | Responsibility | +|-------|-------|-------------|----------------| +| Domain | `auths-id`, `auths-core`, `auths-verifier` | `auths-idp` | Types, traits, pure logic. No I/O. | +| SDK | `auths-sdk` | `auths-cloud-sdk` | Orchestration. Calls domain functions, injects time, wires ports. No direct I/O -- uses trait abstractions. | +| Presentation | `auths-cli` | `auths-cloud-cli` | Argument parsing, user interaction, display. Owns concrete I/O implementations. | + +Dependencies flow strictly downward: **Presentation -> SDK -> Domain**. Never upward. + +## Where code belongs + +**Put it in `auths` (open-source) when:** +- It's useful without an enterprise IdP (verification, signing, KERI, DID resolution) +- It's a type that must travel across the boundary (e.g., `SealType::IdpBinding`, `IdpBindingSummary`) +- It's read-side infrastructure (the verifier surfaces IdP bindings anchored in the KEL, but doesn't validate the IdP itself) + +**Put it in `auths-cloud` (proprietary) when:** +- It requires enterprise IdP credentials or protocols (OIDC client secrets, SAML metadata) +- It's a cloud service (registry, auth server, OIDC bridge) +- It's write-side IdP logic (verifying tokens, creating bindings) + +**Rule of thumb:** If removing the cloud repo would break the feature, it belongs in cloud. If the feature still works (just without enterprise IdP data), the core parts belong in open-source. + +## Cross-boundary types + +Some types must exist in both repos. These live in the open-source crate that needs them, with minimal dependencies: + +| Type | Lives in | Why | +|------|----------|-----| +| `SealType::IdpBinding` | `auths-id` | KEL must recognize the seal variant during replay | +| `IdpBindingSummary` | `auths-verifier` | Verification reports include binding data (WASM/FFI compatible) | +| `IDP_BINDING_SEAL_TYPE` | `auths-verifier` | Constant for seal type matching | +| `IdpBindingAttestation` | `auths-idp` (cloud) | Full attestation with enterprise fields -- cloud only | +| `IdpVerifier` trait | `auths-idp` (cloud) | Provider-specific verification -- cloud only | + +The pattern: **minimal summary types in open-source, full implementation types in cloud.** + +## CLI delegation pattern + +The core `auths-cli` provides a stub for cloud commands that delegates to the `auths-cloud` binary: + +``` +$ auths id bind-idp --provider okta ... + + auths-cli (open-source) + │ + │ checks: is `auths-cloud` on $PATH? + │ + ├─ YES -> spawns `auths-cloud id bind-idp --provider okta ...` + │ (forwards all arguments, inherits stdio) + │ + └─ NO -> prints: "IdP binding requires Auths Cloud." + "Learn more: https://auths.dev/cloud" +``` + +This keeps enterprise dependencies out of the open-source binary while making the feature discoverable. The core CLI never imports cloud crates -- it only checks for an external binary. + +## SDK port pattern + +Both SDKs use port traits to abstract I/O, keeping business logic testable: + +```rust +// auths-cloud-sdk/src/ports.rs +#[async_trait] +pub trait BrowserOpener: Send + Sync { + fn open_url(&self, url: &str) -> Result<(), BindIdpError>; +} + +#[async_trait] +pub trait CallbackServer: Send + Sync { + async fn start_and_wait_for_callback(&self, port: Option) + -> Result; +} +``` + +Concrete implementations (`SystemBrowserOpener`, `LocalCallbackServer`) live in the CLI crate, not the SDK. Tests use mock implementations. + +## Versioning and publishing + +The cloud repo depends on published versions of core crates: + +```toml +# auths-cloud/Cargo.toml +auths-id = "0.0.1-rc.3" # from crates.io +auths-verifier = "0.0.1-rc.3" +``` + +When a core change is needed for a cloud feature: +1. Make the core change in `auths`, merge to main +2. Publish the affected crate(s) to crates.io +3. Bump the version in `auths-cloud/Cargo.toml` +4. Build the cloud feature on top + +Never use `path = "../auths/crates/..."` dependencies in cloud. The published version is the contract. + +## Checklist for cross-boundary work + +When building a feature that spans both repos: + +- [ ] Core types/traits merged and published first +- [ ] Cloud crate depends on published core version, not path +- [ ] No `auths-cloud` imports in any `auths` crate +- [ ] Domain logic has no direct I/O (file, network, browser) +- [ ] SDK orchestrates via injected ports, receives `now: DateTime` +- [ ] CLI owns concrete I/O and calls SDK functions +- [ ] Stub command in core CLI delegates to cloud binary (no enterprise deps in open-source) +- [ ] Summary/read-side types in open-source, full implementation types in cloud diff --git a/packages/auths-python/Cargo.lock b/packages/auths-python/Cargo.lock index 1466f22d..695b9514 100644 --- a/packages/auths-python/Cargo.lock +++ b/packages/auths-python/Cargo.lock @@ -315,6 +315,7 @@ dependencies = [ "auths-id", "auths-policy", "auths-telemetry", + "auths-transparency", "auths-verifier", "base64", "chrono", @@ -369,6 +370,26 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "auths-transparency" +version = "0.0.1-rc.8" +dependencies = [ + "async-trait", + "auths-crypto", + "auths-verifier", + "base64", + "chrono", + "futures", + "hex", + "json-canon", + "ring", + "serde", + "serde_json", + "sha2", + "thiserror 2.0.18", + "tokio", +] + [[package]] name = "auths-utils" version = "0.0.1-rc.8"