diff --git a/.agent/rules/solidity_zksync.md b/.agent/rules/solidity_zksync.md new file mode 100644 index 0000000..642f108 --- /dev/null +++ b/.agent/rules/solidity_zksync.md @@ -0,0 +1,33 @@ +# Solidity & ZkSync Development Standards + +## Toolchain & Environment +- **Primary Tool**: `forge` (ZkSync fork). Use for compilation, testing, and generic scripting. +- **Secondary Tool**: `hardhat`. Use only when `forge` encounters compatibility issues (e.g., complex deployments, specific plugin needs). +- **Network Target**: ZkSync Era (Layer 2). +- **Solidity Version**: `^0.8.20` (or `0.8.24` if strictly supported by the zk-compiler). + +## Modern Solidity Best Practices +- **Safety First**: + - **Checks-Effects-Interactions (CEI)** pattern must be strictly followed. + - When a contract requires an owner (e.g., admin-configurable parameters), prefer `Ownable2Step` over `Ownable`. Do **not** add ownership to contracts that don't need it — many contracts are fully permissionless by design. + - Prefer `ReentrancyGuard` for external calls where appropriate. +- **Gas & Efficiency**: + - Use **Custom Errors** (`error MyError();`) instead of `require` strings. + - Use `mapping` over arrays for membership checks where possible. + - Minimize on-chain storage; use events for off-chain indexing. + +## Testing Standards +- **Framework**: Foundry (Forge). +- **Methodology**: + - **Unit Tests**: Comprehensive coverage for all functions. + - **Fuzz Testing**: Required for arithmetic and purely functional logic. + - **Invariant Testing**: Define invariants for stateful system properties. +- **Naming Convention**: + - `test_Description` + - `testFuzz_Description` + - `test_RevertIf_Condition` + +## ZkSync Specifics +- **System Contracts**: Be aware of ZkSync system contracts (e.g., `ContractDeployer`, `L2EthToken`) when interacting with low-level features. +- **Gas Model**: Account for ZkSync's different gas metering if performing low-level optimization. +- **Compiler Differences**: Be mindful of differences between `solc` and `zksolc` (e.g., `create2` address derivation). diff --git a/.cspell.json b/.cspell.json index c990957..04ce2bc 100644 --- a/.cspell.json +++ b/.cspell.json @@ -12,7 +12,8 @@ "deployments-zk", "cache_hardhat-zk", "zkout", - "clk-gateway/src/validators.test.ts" + "clk-gateway/src/validators.test.ts", + "src/swarms/doc/iso3166-2" ], "ignoreWords": [ "NODL", @@ -60,6 +61,21 @@ "Frontends", "testuser", "testhandle", - "douglasacost" + "douglasacost", + "IBEACON", + "AABBCCDD", + "SSTORE", + "Permissionless", + "Reentrancy", + "SFID", + "EXTCODECOPY", + "solady", + "SLOAD", + "Bitmask", + "mstore", + "MBOND", + "USCA", + "USNY", + "usca" ] } diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 0000000..37ae67c --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,50 @@ +# Solidity & ZkSync Development Standards + +## Toolchain & Environment + +- **Primary Tool**: `forge` (ZkSync fork). Use for compilation, testing, and generic scripting. +- **Secondary Tool**: `hardhat`. Use only when `forge` encounters compatibility issues (e.g., complex deployments, specific plugin needs). +- **Network Target**: ZkSync Era (Layer 2). +- **Solidity Version**: `^0.8.20` (or `0.8.24` if strictly supported by the zk-compiler). + +## Modern Solidity Best Practices + +- **Safety First**: + - **Checks-Effects-Interactions (CEI)** pattern must be strictly followed. + - Use `Ownable2Step` over `Ownable` for privileged access. + - Prefer `ReentrancyGuard` for external calls where appropriate. +- **Gas & Efficiency**: + - Use **Custom Errors** (`error MyError();`) instead of `require` strings. + - Use `mapping` over arrays for membership checks where possible. + - Minimize on-chain storage; use events for off-chain indexing. + +## Testing Standards + +- **Framework**: Foundry (Forge). +- **Methodology**: + - **Unit Tests**: Comprehensive coverage for all functions. + - **Fuzz Testing**: Required for arithmetic and purely functional logic. + - **Invariant Testing**: Define invariants for stateful system properties. +- **Naming Convention**: + - `test_Description` + - `testFuzz_Description` + - `test_RevertIf_Condition` + +## ZkSync Specifics + +- **System Contracts**: Be aware of ZkSync system contracts (e.g., `ContractDeployer`, `L2EthToken`) when interacting with low-level features. +- **Gas Model**: Account for ZkSync's different gas metering if performing low-level optimization. +- **Compiler Differences**: Be mindful of differences between `solc` and `zksolc` (e.g., `create2` address derivation). + +## L1-Only Contracts (No --zksync flag) + +The following contracts use opcodes/patterns incompatible with ZkSync Era and must be built/tested **without** the `--zksync` flag: + +- **SwarmRegistryL1**: Uses `SSTORE2` (relies on `EXTCODECOPY` which is unsupported on ZkSync). + +For these contracts, use: + +```bash +forge build --match-path src/swarms/SwarmRegistryL1.sol +forge test --match-path test/SwarmRegistryL1.t.sol +``` diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 64ac137..1ceaaf3 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -34,4 +34,4 @@ jobs: run: yarn lint - name: Run tests - run: forge test --zksync + run: forge test diff --git a/.gitmodules b/.gitmodules index 9540dda..c6c1a45 100644 --- a/.gitmodules +++ b/.gitmodules @@ -10,3 +10,6 @@ [submodule "lib/era-contracts"] path = lib/era-contracts url = https://github.com/matter-labs/era-contracts +[submodule "lib/solady"] + path = lib/solady + url = https://github.com/vectorized/solady diff --git a/.vscode/settings.json b/.vscode/settings.json index 4d04fd2..8ab6c21 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -13,5 +13,8 @@ "editor.formatOnSave": true, "[solidity]": { "editor.defaultFormatter": "JuanBlanco.solidity" + }, + "chat.tools.terminal.autoApprove": { + "forge": true } } diff --git a/foundry.lock b/foundry.lock new file mode 100644 index 0000000..7a3effd --- /dev/null +++ b/foundry.lock @@ -0,0 +1,20 @@ +{ + "lib/zksync-storage-proofs": { + "rev": "4b20401ce44c1ec966a29d893694f65db885304b" + }, + "lib/openzeppelin-contracts": { + "rev": "e4f70216d759d8e6a64144a9e1f7bbeed78e7079" + }, + "lib/solady": { + "tag": { + "name": "v0.1.26", + "rev": "acd959aa4bd04720d640bf4e6a5c71037510cc4b" + } + }, + "lib/forge-std": { + "rev": "1eea5bae12ae557d589f9f0f0edae2faa47cb262" + }, + "lib/era-contracts": { + "rev": "84d5e3716f645909e8144c7d50af9dd6dd9ded62" + } +} \ No newline at end of file diff --git a/lib/solady b/lib/solady new file mode 160000 index 0000000..acd959a --- /dev/null +++ b/lib/solady @@ -0,0 +1 @@ +Subproject commit acd959aa4bd04720d640bf4e6a5c71037510cc4b diff --git a/logs/deploy_l1_bridge.log b/logs/deploy_l1_bridge.log new file mode 100644 index 0000000..b3ac3d4 --- /dev/null +++ b/logs/deploy_l1_bridge.log @@ -0,0 +1,32 @@ +Compiling 1 files with Solc 0.8.26 +Solc 0.8.26 finished in 2.60s +Compiler run successful! +Script ran successfully. + +== Logs == + Deployed L1Bridge at 0x2D02b651Ea9630351719c8c55210e042e940d69a + Granted MINTER_ROLE on NodlL1(0x6dd0E17ec6fE56c5f58a0Fe2Bb813B9b5cc25990) to bridge + +## Setting up 1 EVM. + +========================== + +Chain 1 + +Estimated gas price: 0.222068762 gwei + +Estimated total gas used for script: 2685066 + +Estimated amount required: 0.000596269282508292 ETH + +========================== + + +========================== + +ONCHAIN EXECUTION COMPLETE & SUCCESSFUL. + +Transactions saved to: /Users/alex/Documents/rollup/broadcast/DeployL1Bridge.s.sol/1/run-latest.json + +Sensitive values saved to: /Users/alex/Documents/rollup/cache/DeployL1Bridge.s.sol/1/run-latest.json + diff --git a/logs/deploy_l1_nodl.log b/logs/deploy_l1_nodl.log new file mode 100644 index 0000000..ae13ee1 --- /dev/null +++ b/logs/deploy_l1_nodl.log @@ -0,0 +1,29 @@ +No files changed, compilation skipped +Script ran successfully. + +== Logs == + Deployed L1Nodl at 0x6dd0E17ec6fE56c5f58a0Fe2Bb813B9b5cc25990 + +## Setting up 1 EVM. + +========================== + +Chain 1 + +Estimated gas price: 0.251645298 gwei + +Estimated total gas used for script: 4998146 + +Estimated amount required: 0.001257759939617508 ETH + +========================== + + +========================== + +ONCHAIN EXECUTION COMPLETE & SUCCESSFUL. + +Transactions saved to: /Users/alex/Documents/rollup/broadcast/DeployL1Nodl.s.sol/1/run-latest.json + +Sensitive values saved to: /Users/alex/Documents/rollup/cache/DeployL1Nodl.s.sol/1/run-latest.json + diff --git a/logs/deploy_l2_bridge.log b/logs/deploy_l2_bridge.log new file mode 100644 index 0000000..5047416 --- /dev/null +++ b/logs/deploy_l2_bridge.log @@ -0,0 +1,98 @@ +Compiling 1 files with Solc 0.8.26 +Solc 0.8.26 finished in 1.70s +Compiler run successful! + +Compiling 1 files with zksolc and solc 0.8.26 +zksolc and solc 0.8.26 finished in 4.05s +Compiler run successful with warnings: +Warning +ZKsync Era comes with native account abstraction support, and therefore the initiator of a +transaction might be different from the contract calling your code. It is highly recommended NOT +to rely on tx.origin, but use msg.sender instead. +Learn more about Account Abstraction at https://docs.zksync.io/build/developer-reference/account-abstraction/ +You may disable this warning with: + a. `suppressedWarnings = ["txorigin"]` in standard JSON. + b. `--suppress-warnings txorigin` in the CLI. + --> lib/era-contracts/l1-contracts/contracts/vendor/AddressAliasHelper.sol:56:42 | + 56 | _recipient = _prevMsgSender == tx.origin + | ^^^^^^^^^ + +Warning +EraVM does not use bytecode for contract deployment. Instead, it refers to contracts using their bytecode hashes. +In order to deploy a contract, please use the `new` operator in Solidity instead of raw 'create'/'create2' in assembly. +In Solidity v0.6 and older, it can be a false-positive warning if there is 'create(' or 'create2(' in comments within assembly. +Learn more about CREATE/CREATE2 EraVM limitations at https://docs.zksync.io/zksync-protocol/differences/evm-instructions#create-create2 +You may disable this warning with: + 1. `suppressedWarnings = ["assemblycreate"]` in standard JSON. + 2. `--suppress-warnings assemblycreate` in the CLI. + --> lib/forge-std/src/StdCheats.sol:494:19 | + 494 | addr := create(0, add(bytecode, 0x20), mload(bytecode)) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Warning +EraVM does not use bytecode for contract deployment. Instead, it refers to contracts using their bytecode hashes. +In order to deploy a contract, please use the `new` operator in Solidity instead of raw 'create'/'create2' in assembly. +In Solidity v0.6 and older, it can be a false-positive warning if there is 'create(' or 'create2(' in comments within assembly. +Learn more about CREATE/CREATE2 EraVM limitations at https://docs.zksync.io/zksync-protocol/differences/evm-instructions#create-create2 +You may disable this warning with: + 1. `suppressedWarnings = ["assemblycreate"]` in standard JSON. + 2. `--suppress-warnings assemblycreate` in the CLI. + --> lib/forge-std/src/StdCheats.sol:504:19 | + 504 | addr := create(0, add(bytecode, 0x20), mload(bytecode)) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Warning +EraVM does not use bytecode for contract deployment. Instead, it refers to contracts using their bytecode hashes. +In order to deploy a contract, please use the `new` operator in Solidity instead of raw 'create'/'create2' in assembly. +In Solidity v0.6 and older, it can be a false-positive warning if there is 'create(' or 'create2(' in comments within assembly. +Learn more about CREATE/CREATE2 EraVM limitations at https://docs.zksync.io/zksync-protocol/differences/evm-instructions#create-create2 +You may disable this warning with: + 1. `suppressedWarnings = ["assemblycreate"]` in standard JSON. + 2. `--suppress-warnings assemblycreate` in the CLI. + --> lib/forge-std/src/StdCheats.sol:515:19 | + 515 | addr := create(val, add(bytecode, 0x20), mload(bytecode)) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Warning +EraVM does not use bytecode for contract deployment. Instead, it refers to contracts using their bytecode hashes. +In order to deploy a contract, please use the `new` operator in Solidity instead of raw 'create'/'create2' in assembly. +In Solidity v0.6 and older, it can be a false-positive warning if there is 'create(' or 'create2(' in comments within assembly. +Learn more about CREATE/CREATE2 EraVM limitations at https://docs.zksync.io/zksync-protocol/differences/evm-instructions#create-create2 +You may disable this warning with: + 1. `suppressedWarnings = ["assemblycreate"]` in standard JSON. + 2. `--suppress-warnings assemblycreate` in the CLI. + --> lib/forge-std/src/StdCheats.sol:525:19 | + 525 | addr := create(val, add(bytecode, 0x20), mload(bytecode)) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +2025-10-15T00:35:52.095529Z ERROR backendhandler: failed to get block err=failed to get block; error sending request for url (https://mainnet.era.zksync.io/); operation timed out number=65260273 +2025-10-15T00:35:52.096034Z ERROR sharedbackend: Failed to send/recv `block_hash` err=failed to get block hash for 65260273: failed to get block; error sending request for url (https://mainnet.era.zksync.io/); operation timed out number=65260273 +Script ran successfully. + +== Logs == + Deployed L2Bridge at 0x2c1B65dA72d5Cf19b41dE6eDcCFB7DD83d1B529E + Granted MINTER_ROLE on NODL(0xBD4372e44c5eE654dd838304006E1f0f69983154) to bridge + +## Setting up 1 EVM. + +========================== + +Chain 324 + +Estimated gas price: 0.090500001 gwei + +Estimated total gas used for script: 209410861 + +Estimated amount required: 0.018951683129910861 ETH + +========================== + + +========================== + +ONCHAIN EXECUTION COMPLETE & SUCCESSFUL. + +Transactions saved to: /Users/alex/Documents/rollup/broadcast/DeployL2Bridge.s.sol/324/run-latest.json + +Sensitive values saved to: /Users/alex/Documents/rollup/cache/DeployL2Bridge.s.sol/324/run-latest.json + diff --git a/remappings.txt b/remappings.txt index 1e95077..53468b3 100644 --- a/remappings.txt +++ b/remappings.txt @@ -1 +1,2 @@ -@openzeppelin=lib/openzeppelin-contracts/ \ No newline at end of file +@openzeppelin=lib/openzeppelin-contracts/ +solady/=lib/solady/src/ \ No newline at end of file diff --git a/src/swarms/FleetIdentity.sol b/src/swarms/FleetIdentity.sol new file mode 100644 index 0000000..0dba18b --- /dev/null +++ b/src/swarms/FleetIdentity.sol @@ -0,0 +1,1208 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear + +pragma solidity ^0.8.24; + +import {ERC721} from "@openzeppelin/contracts/token/ERC721/ERC721.sol"; +import {ERC721Enumerable} from "@openzeppelin/contracts/token/ERC721/extensions/ERC721Enumerable.sol"; +import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; +import {SafeERC20} from "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol"; +import {ReentrancyGuard} from "@openzeppelin/contracts/utils/ReentrancyGuard.sol"; + +/** + * @title FleetIdentity + * @notice ERC-721 with ERC721Enumerable representing ownership of a BLE fleet, + * secured by an ERC-20 bond organized into geometric tiers. + * + * @dev **Two-level geographic registration** + * + * Fleets register at exactly one level: + * - Country — regionKey = countryCode (ISO 3166-1 numeric, 1-999) + * - Admin Area — regionKey = (countryCode << 10) | adminCode (>= 1024) + * + * Each regionKey has its **own independent tier namespace** — tier indices + * start at 0 for every region. The first fleet in any region always pays + * the level-appropriate bond (LOCAL: BASE_BOND, COUNTRY: BASE_BOND * 8). + * + * **Economic Model** + * + * - Tier capacity: 4 members per tier (unified across levels) + * - Local bond: BASE_BOND * 2^tier + * - Country bond: BASE_BOND * COUNTRY_BOND_MULTIPLIER * 2^tier (8× local) + * + * Country fleets pay 8× more but appear in all admin-area bundles within + * their country. This economic difference provides locals a significant + * advantage: a local can reach tier 3 for the same cost a country player + * pays for tier 0. Bundle slots are filled by simple tier-descent priority: + * higher tier first, locals before country within each tier. + * + * EdgeBeaconScanner discovery uses 2-level fallback: + * 1. Admin area (highest priority) + * 2. Country (lower priority) + * + * On-chain indexes track which countries and admin areas have active fleets, + * enabling EdgeBeaconScanner enumeration without off-chain indexers. + * + * **TokenID Encoding** + * + * TokenID = (regionKey << 128) | uuid + * - Bits 0-127: UUID (bytes16 Proximity UUID) + * - Bits 128-159: Region key (32-bit country or admin-area code) + * + * This allows the same UUID to be registered in multiple regions, + * each with a distinct token. Region and UUID can be extracted: + * - uuid = bytes16(uint128(tokenId)) + * - region = uint32(tokenId >> 128) + */ +contract FleetIdentity is ERC721Enumerable, ReentrancyGuard { + using SafeERC20 for IERC20; + + // ────────────────────────────────────────────── + // Errors + // ────────────────────────────────────────────── + error InvalidUUID(); + error NotTokenOwner(); + error MaxTiersReached(); + error TierFull(); + error TargetTierNotHigher(); + error TargetTierNotLower(); + error TargetTierSameAsCurrent(); + error InvalidCountryCode(); + error InvalidAdminCode(); + error AdminAreaRequired(); + error UuidOwnerMismatch(); + error UuidLevelMismatch(); + error UuidAlreadyOwned(); + error UuidNotOwned(); + error NotUuidOwner(); + error CannotUnregisterMultipleTokens(); + error AlreadyRegistered(); + error NotOperator(); + error OperatorNotAllowedForOwnedOnly(); + + // ────────────────────────────────────────────── + // Enums + // ────────────────────────────────────────────── + + /// @notice Registration level for a UUID. + enum RegistrationLevel { + None, // 0 - not registered (default) + Owned, // 1 - owned but not registered in any region + Local, // 2 - admin area (local) level + Country // 3 - country level + } + + // ────────────────────────────────────────────── + // Constants & Immutables + // ────────────────────────────────────────────── + + /// @notice Unified tier capacity for all levels. + uint256 public constant TIER_CAPACITY = 4; + + /// @notice Bond multiplier for country-level registration (16× local). + uint256 public constant COUNTRY_BOND_MULTIPLIER = 16; + + /// @notice Hard cap on tier count per region. + /// @dev Derived from anti-spam analysis: with a bond doubling per tier + /// and capacity 4, a spammer spending half the total token supply + /// against a BASE_BOND set 10 000× too low fills ~20 tiers. + /// 24 provides comfortable headroom. + uint256 public constant MAX_TIERS = 24; + + /// @notice Maximum UUIDs returned by buildHighestBondedUuidBundle. + uint256 public constant MAX_BONDED_UUID_BUNDLE_SIZE = 20; + + /// @notice ISO 3166-1 numeric upper bound for country codes. + uint16 internal constant MAX_COUNTRY_CODE = 999; + + /// @notice Upper bound for admin-area codes within a country. + /// @dev Set to 255 to cover all real-world countries (UK has ~172, the highest). + /// Dense indices from ISO 3166-2 mappings range 0-254, stored as adminCode 1-255. + uint16 internal constant MAX_ADMIN_CODE = 255; + + /// @dev Bit shift for packing countryCode into an admin-area region key. + uint256 private constant ADMIN_SHIFT = 10; + /// @dev Bitmask for extracting adminCode from an admin-area region key. + uint32 private constant ADMIN_CODE_MASK = 0x3FF; + + /// @notice Region key for owned-only UUIDs (not registered in any region). + uint32 public constant OWNED_REGION_KEY = 0; + + /// @notice The ERC-20 token used for bonds (immutable, e.g. NODL). + IERC20 public immutable BOND_TOKEN; + + /// @notice Base bond for tier 0 in any region. Tier K requires BASE_BOND * 2^K. + uint256 public immutable BASE_BOND; + + // ────────────────────────────────────────────── + // Region-namespaced tier data + // ────────────────────────────────────────────── + + /// @notice regionKey -> number of tiers opened in that region. + mapping(uint32 => uint256) public regionTierCount; + + /// @notice regionKey -> tierIndex -> list of token IDs. + mapping(uint32 => mapping(uint256 => uint256[])) internal _regionTierMembers; + + /// @notice Token ID -> index within its tier's member array (for O(1) removal). + mapping(uint256 => uint256) internal _indexInTier; + + // ────────────────────────────────────────────── + // Fleet data + // ────────────────────────────────────────────── + + /// @notice Token ID -> tier index (within its region) the fleet belongs to. + mapping(uint256 => uint256) public fleetTier; + + // ────────────────────────────────────────────── + // UUID ownership tracking + // ────────────────────────────────────────────── + + /// @notice UUID -> address that first registered a token for this UUID. + /// All subsequent registrations for the same UUID must come from this address. + mapping(bytes16 => address) public uuidOwner; + + /// @notice UUID -> count of active tokens for this UUID (across all regions). + /// When this reaches 0, uuidOwner is cleared. + mapping(bytes16 => uint256) public uuidTokenCount; + + /// @notice UUID -> registration level. + /// All tokens for a UUID must be at the same level. + mapping(bytes16 => RegistrationLevel) public uuidLevel; + + /// @notice UUID -> operator address for tier maintenance. + /// If address(0), the uuidOwner acts as operator. + /// Operator can only be set for registered UUIDs (Local or Country level). + /// The operator pays/receives tier bond differentials; owner pays BASE_BOND. + mapping(bytes16 => address) public uuidOperator; + + // ────────────────────────────────────────────── + // On-chain region indexes + // ────────────────────────────────────────────── + + /// @dev Set of country codes with at least one active fleet. + uint16[] internal _activeCountries; + mapping(uint16 => uint256) internal _activeCountryIndex; // value = index+1 (0 = not present) + + /// @dev Set of admin-area region keys with at least one active fleet. + uint32[] internal _activeAdminAreas; + mapping(uint32 => uint256) internal _activeAdminAreaIndex; // value = index+1 (0 = not present) + + // ────────────────────────────────────────────── + // Events + // ────────────────────────────────────────────── + + event FleetRegistered( + address indexed owner, + bytes16 indexed uuid, + uint256 indexed tokenId, + uint32 regionKey, + uint256 tierIndex, + uint256 bondAmount, + address operator + ); + event OperatorSet( + bytes16 indexed uuid, + address indexed oldOperator, + address indexed newOperator, + uint256 tierExcessTransferred + ); + event FleetPromoted( + uint256 indexed tokenId, uint256 indexed fromTier, uint256 indexed toTier, uint256 additionalBond + ); + event FleetDemoted(uint256 indexed tokenId, uint256 indexed fromTier, uint256 indexed toTier, uint256 bondRefund); + event FleetBurned( + address indexed owner, uint256 indexed tokenId, uint32 indexed regionKey, uint256 tierIndex, uint256 bondRefund + ); + event UuidClaimed(address indexed owner, bytes16 indexed uuid, uint256 tokenId, uint256 bond); + event UuidUnregistered( + address indexed owner, bytes16 indexed uuid, uint256 oldTokenId, uint256 newTokenId, uint256 refund + ); + event UuidReleased(address indexed owner, bytes16 indexed uuid, uint256 refund); + + // ────────────────────────────────────────────── + // Constructor + // ────────────────────────────────────────────── + + /// @param _bondToken Address of the ERC-20 token used for bonds. + /// @param _baseBond Base bond for tier 0 in any region. + constructor(address _bondToken, uint256 _baseBond) ERC721("Swarm Fleet Identity", "SFID") { + BOND_TOKEN = IERC20(_bondToken); + BASE_BOND = _baseBond; + } + + // ══════════════════════════════════════════════ + // Registration: Country (explicit tier only — use countryInclusionHint) + // ══════════════════════════════════════════════ + + /// @notice Register a fleet under a country into a specific tier. + /// @dev No auto-assign: the cheapest-inclusion tier requires scanning all + /// admin areas in the country (unbounded), so callers must query + /// `countryInclusionHint(countryCode)` off-chain and supply the tier. + /// @param countryCode ISO 3166-1 numeric country code (1-999). + function registerFleetCountry(bytes16 uuid, uint16 countryCode, uint256 targetTier) + external + nonReentrant + returns (uint256 tokenId) + { + if (uuid == bytes16(0)) revert InvalidUUID(); + if (countryCode == 0 || countryCode > MAX_COUNTRY_CODE) revert InvalidCountryCode(); + uint32 regionKey = uint32(countryCode); + _validateExplicitTier(regionKey, targetTier); + tokenId = _register(uuid, regionKey, targetTier, address(0)); + } + + /// @notice Register a fleet under a country with a designated operator. + /// @dev The owner pays BASE_BOND, the operator pays tier excess. + /// Setting operator to address(0) or msg.sender makes owner the operator. + /// @param uuid Proximity UUID for the fleet. + /// @param countryCode ISO 3166-1 numeric country code (1-999). + /// @param targetTier Target tier for registration. + /// @param operator Address responsible for tier maintenance. address(0) = owner. + function registerFleetCountryWithOperator( + bytes16 uuid, + uint16 countryCode, + uint256 targetTier, + address operator + ) external nonReentrant returns (uint256 tokenId) { + if (uuid == bytes16(0)) revert InvalidUUID(); + if (countryCode == 0 || countryCode > MAX_COUNTRY_CODE) revert InvalidCountryCode(); + uint32 regionKey = uint32(countryCode); + _validateExplicitTier(regionKey, targetTier); + tokenId = _register(uuid, regionKey, targetTier, operator); + } + + // ══════════════════════════════════════════════ + // Registration: Admin Area (local) + // ══════════════════════════════════════════════ + + /// @notice Register a fleet under a country + admin area into a specific tier. + /// @dev Use `localInclusionHint(countryCode, adminCode)` to find the cheapest + /// tier that guarantees bundle inclusion. + function registerFleetLocal(bytes16 uuid, uint16 countryCode, uint16 adminCode, uint256 targetTier) + external + nonReentrant + returns (uint256 tokenId) + { + if (uuid == bytes16(0)) revert InvalidUUID(); + if (countryCode == 0 || countryCode > MAX_COUNTRY_CODE) revert InvalidCountryCode(); + if (adminCode == 0 || adminCode > MAX_ADMIN_CODE) revert InvalidAdminCode(); + uint32 regionKey = makeAdminRegion(countryCode, adminCode); + _validateExplicitTier(regionKey, targetTier); + tokenId = _register(uuid, regionKey, targetTier, address(0)); + } + + /// @notice Register a fleet under a country + admin area with a designated operator. + /// @dev The owner pays BASE_BOND, the operator pays tier excess. + /// Setting operator to address(0) or msg.sender makes owner the operator. + /// @param uuid Proximity UUID for the fleet. + /// @param countryCode ISO 3166-1 numeric country code (1-999). + /// @param adminCode Admin area code (1-255). + /// @param targetTier Target tier for registration. + /// @param operator Address responsible for tier maintenance. address(0) = owner. + function registerFleetLocalWithOperator( + bytes16 uuid, + uint16 countryCode, + uint16 adminCode, + uint256 targetTier, + address operator + ) external nonReentrant returns (uint256 tokenId) { + if (uuid == bytes16(0)) revert InvalidUUID(); + if (countryCode == 0 || countryCode > MAX_COUNTRY_CODE) revert InvalidCountryCode(); + if (adminCode == 0 || adminCode > MAX_ADMIN_CODE) revert InvalidAdminCode(); + uint32 regionKey = makeAdminRegion(countryCode, adminCode); + _validateExplicitTier(regionKey, targetTier); + tokenId = _register(uuid, regionKey, targetTier, operator); + } + + // ══════════════════════════════════════════════ + // Promote / Demote (region-aware) + // ══════════════════════════════════════════════ + + /// @notice Promotes a fleet to the next tier within its region. + /// Only callable by the effective operator (or owner if no operator set). + function promote(uint256 tokenId) external nonReentrant { + _promote(tokenId, fleetTier[tokenId] + 1); + } + + /// @notice Moves a fleet to a different tier within its region. + /// If targetTier > current tier, promotes (pulls additional bond from operator). + /// If targetTier < current tier, demotes (refunds bond difference to operator). + /// Only callable by the effective operator (or owner if no operator set). + function reassignTier(uint256 tokenId, uint256 targetTier) external nonReentrant { + uint256 currentTier = fleetTier[tokenId]; + if (targetTier == currentTier) revert TargetTierSameAsCurrent(); + if (targetTier > currentTier) { + _promote(tokenId, targetTier); + } else { + _demote(tokenId, targetTier); + } + } + + // ══════════════════════════════════════════════ + // Operator Management + // ══════════════════════════════════════════════ + + /// @notice Sets or changes the operator for a UUID. + /// The operator is responsible for tier maintenance (promote/demote). + /// When changing operators, the new operator must pay the old operator + /// for all accumulated tier bond excess across all registered regions. + /// @dev Only the UUID owner can call this. Cannot be called for owned-only UUIDs. + /// Setting operator to owner address or address(0) clears the explicit operator. + /// @param uuid The UUID to set the operator for. + /// @param newOperator The new operator address. Use address(0) or owner to clear. + function setOperator(bytes16 uuid, address newOperator) external nonReentrant { + // Only owner can set operator + if (uuidOwner[uuid] != msg.sender) revert NotUuidOwner(); + + // Cannot set operator for owned-only UUIDs (no tiers to manage) + RegistrationLevel level = uuidLevel[uuid]; + if (level == RegistrationLevel.None || level == RegistrationLevel.Owned) { + revert OperatorNotAllowedForOwnedOnly(); + } + + address oldOperator = operatorOf(uuid); + + // Normalize: if newOperator is owner, store as address(0) + address storedOperator = (newOperator == msg.sender) ? address(0) : newOperator; + address effectiveNewOperator = (storedOperator == address(0)) ? msg.sender : storedOperator; + + // Calculate tier excess to transfer between operators + uint256 tierExcess = _computeTotalTierExcess(uuid); + + // Effects: Update operator + uuidOperator[uuid] = storedOperator; + + // Interactions: Transfer tier bond excess from new operator to old operator + if (tierExcess > 0 && oldOperator != effectiveNewOperator) { + // Pull from new operator + _pullBond(effectiveNewOperator, tierExcess); + // Refund to old operator + _refundBond(oldOperator, tierExcess); + } + + emit OperatorSet(uuid, oldOperator, effectiveNewOperator, tierExcess); + } + + // ══════════════════════════════════════════════ + // Burn + // ══════════════════════════════════════════════ + + /// @notice Burns the fleet NFT and refunds the bond. + /// For owned-only tokens: full BASE_BOND refund to owner. + /// For registered fleets: BASE_BOND to owner, tier excess to operator. + /// Only the token owner can burn. + function burn(uint256 tokenId) external nonReentrant { + address tokenHolder = ownerOf(tokenId); + if (tokenHolder != msg.sender) revert NotTokenOwner(); + + uint32 region = tokenRegion(tokenId); + bytes16 uuid = tokenUuid(tokenId); + address owner = uuidOwner[uuid]; + uint256 tier; + uint256 ownerRefund; + uint256 operatorRefund; + + if (region == OWNED_REGION_KEY) { + // Owned-only token: no tier structures, just BASE_BOND to owner + ownerRefund = BASE_BOND; + operatorRefund = 0; + tier = 0; + _burn(tokenId); + } else { + // Registered fleet: split refund between owner and operator + tier = fleetTier[tokenId]; + uint256 totalBond = tierBond(tier, _isCountryRegion(region)); + ownerRefund = BASE_BOND; + operatorRefund = totalBond - BASE_BOND; + + address operator = operatorOf(uuid); + + _cleanupFleetFromTier(tokenId, region, tier); + _burn(tokenId); + + // Refund tier excess to operator + _refundBond(operator, operatorRefund); + } + + _decrementUuidCount(uuid); + _refundBond(owner, ownerRefund); + + emit FleetBurned(tokenHolder, tokenId, region, tier, ownerRefund + operatorRefund); + } + + // ══════════════════════════════════════════════ + // UUID Ownership (Owned-Only Mode) + // ══════════════════════════════════════════════ + + /// @notice Claim ownership of a UUID without registering in any region. + /// Costs BASE_BOND. The UUID can later be registered via registerFleetLocal/Country. + /// @param uuid The Proximity UUID to claim. + /// @return tokenId The token ID for the owned-only UUID (region=0). + function claimUuid(bytes16 uuid) external nonReentrant returns (uint256 tokenId) { + if (uuid == bytes16(0)) revert InvalidUUID(); + if (uuidOwner[uuid] != address(0)) revert UuidAlreadyOwned(); + + // Set ownership + uuidOwner[uuid] = msg.sender; + uuidLevel[uuid] = RegistrationLevel.Owned; + uuidTokenCount[uuid] = 1; + + // Mint token with region=0 + tokenId = uint256(uint128(uuid)); + _mint(msg.sender, tokenId); + + _pullBond(msg.sender, BASE_BOND); + + emit UuidClaimed(msg.sender, uuid, tokenId, BASE_BOND); + } + + /// @notice Move a registered fleet back to owned-only mode, receiving a partial refund. + /// Tier excess is refunded to the operator. Only works with a single token. + /// @param tokenId The fleet token to unregister. + /// @return newTokenId The new owned-only token ID (region=0). + function unregisterToOwned(uint256 tokenId) external nonReentrant returns (uint256 newTokenId) { + address tokenOwner = ownerOf(tokenId); + if (tokenOwner != msg.sender) revert NotTokenOwner(); + + bytes16 uuid = tokenUuid(tokenId); + uint32 region = tokenRegion(tokenId); + + // Must be a registered fleet, not already owned-only + if (region == OWNED_REGION_KEY) revert UuidNotOwned(); + + // Must be the only token for this UUID + if (uuidTokenCount[uuid] > 1) revert CannotUnregisterMultipleTokens(); + + uint256 tier = fleetTier[tokenId]; + uint256 currentBond = tierBond(tier, _isCountryRegion(region)); + uint256 refund = currentBond - BASE_BOND; + + // Get operator before clearing state + address operator = operatorOf(uuid); + + // Effects: Remove from region + _cleanupFleetFromTier(tokenId, region, tier); + _burn(tokenId); + + // Update level to Owned (resets level for future registration flexibility) + uuidLevel[uuid] = RegistrationLevel.Owned; + // Clear operator (owned-only has no tiers to manage) + delete uuidOperator[uuid]; + + // Mint owned-only token + newTokenId = uint256(uint128(uuid)); + _mint(msg.sender, newTokenId); + + // Refund tier excess to operator (not owner) + _refundBond(operator, refund); + + emit UuidUnregistered(msg.sender, uuid, tokenId, newTokenId, refund); + } + + /// @notice Release an owned-only UUID, refunding the BASE_BOND. + /// After release, the UUID can be claimed by anyone. + /// @param uuid The UUID to release (must be in owned-only state). + function releaseUuid(bytes16 uuid) external nonReentrant { + if (uuidLevel[uuid] != RegistrationLevel.Owned) revert UuidNotOwned(); + if (uuidOwner[uuid] != msg.sender) revert NotUuidOwner(); + + // Get the token ID for this owned-only UUID + uint256 tokenId = uint256(uint128(uuid)); + address tokenOwner = ownerOf(tokenId); + + // Effects + _burn(tokenId); + _clearUuidOwnership(uuid); + + // Interaction + _refundBond(tokenOwner, BASE_BOND); + + emit UuidReleased(tokenOwner, uuid, BASE_BOND); + } + + // ══════════════════════════════════════════════ + // Views: Bond & tier helpers + // ══════════════════════════════════════════════ + + /// @notice Bond required for tier K. + /// Local (admin area): BASE_BOND * 2^K + /// Country: BASE_BOND * COUNTRY_BOND_MULTIPLIER * 2^K (8× local) + function tierBond(uint256 tier, bool isCountry) public view returns (uint256) { + uint256 base = BASE_BOND << tier; + return isCountry ? base * COUNTRY_BOND_MULTIPLIER : base; + } + + /// @notice Returns the cheapest tier that guarantees a **local** fleet + /// appears in `buildHighestBondedUuidBundle` for (countryCode, adminCode). + /// Bounded: O(MAX_TIERS). + function localInclusionHint(uint16 countryCode, uint16 adminCode) + external + view + returns (uint256 inclusionTier, uint256 bond) + { + if (countryCode == 0 || countryCode > MAX_COUNTRY_CODE) revert InvalidCountryCode(); + if (adminCode == 0 || adminCode > MAX_ADMIN_CODE) revert InvalidAdminCode(); + inclusionTier = _findCheapestInclusionTier(countryCode, adminCode, false); + bond = tierBond(inclusionTier, false); + } + + /// @notice Returns the cheapest tier that guarantees a **country** fleet + /// appears in every `buildHighestBondedUuidBundle` query within + /// the country (across all active admin areas). + /// @dev Unbounded view — iterates over all active admin areas in the + /// country. Free off-chain; callers pass the result to + /// `registerFleetCountry(uuid, cc, tier)`. + function countryInclusionHint(uint16 countryCode) external view returns (uint256 inclusionTier, uint256 bond) { + if (countryCode == 0 || countryCode > MAX_COUNTRY_CODE) revert InvalidCountryCode(); + + // Check the country-only location (no admin area active). + inclusionTier = _findCheapestInclusionTier(countryCode, 0, true); + + // Scan all active admin areas belonging to this country. + for (uint256 i = 0; i < _activeAdminAreas.length; ++i) { + uint32 rk = _activeAdminAreas[i]; + if (_countryFromRegion(rk) != countryCode) continue; + uint16 admin = _adminFromRegion(rk); + uint256 t = _findCheapestInclusionTier(countryCode, admin, true); + if (t > inclusionTier) inclusionTier = t; + } + bond = tierBond(inclusionTier, true); + } + + /// @notice Highest non-empty tier in a region, or 0 if none. + function highestActiveTier(uint32 regionKey) external view returns (uint256) { + uint256 tierCount = regionTierCount[regionKey]; + if (tierCount == 0) return 0; + return tierCount - 1; + } + + /// @notice Number of members in a specific tier of a region. + function tierMemberCount(uint32 regionKey, uint256 tier) external view returns (uint256) { + return _regionTierMembers[regionKey][tier].length; + } + + /// @notice All token IDs in a specific tier of a region. + function getTierMembers(uint32 regionKey, uint256 tier) external view returns (uint256[] memory) { + return _regionTierMembers[regionKey][tier]; + } + + /// @notice All UUIDs in a specific tier of a region. + function getTierUuids(uint32 regionKey, uint256 tier) external view returns (bytes16[] memory uuids) { + uint256[] storage members = _regionTierMembers[regionKey][tier]; + uuids = new bytes16[](members.length); + for (uint256 i = 0; i < members.length; ++i) { + uuids[i] = tokenUuid(members[i]); + } + } + + /// @notice UUID for a token ID (extracts lower 128 bits). + function tokenUuid(uint256 tokenId) public pure returns (bytes16) { + return bytes16(uint128(tokenId)); + } + + /// @notice Region key encoded in a token ID (extracts bits 128-159). + function tokenRegion(uint256 tokenId) public pure returns (uint32) { + return uint32(tokenId >> 128); + } + + /// @notice Computes the deterministic token ID for a uuid+region pair. + function computeTokenId(bytes16 uuid, uint32 regionKey) public pure returns (uint256) { + return (uint256(regionKey) << 128) | uint256(uint128(uuid)); + } + + /// @notice Bond amount for a token. Returns 0 for nonexistent tokens. + function bonds(uint256 tokenId) external view returns (uint256) { + if (_ownerOf(tokenId) == address(0)) return 0; + uint32 region = tokenRegion(tokenId); + if (region == OWNED_REGION_KEY) return BASE_BOND; + return tierBond(fleetTier[tokenId], _isCountryRegion(region)); + } + + /// @notice Returns true if the UUID is in owned-only state (claimed but not registered). + function isOwnedOnly(bytes16 uuid) external view returns (bool) { + return uuidLevel[uuid] == RegistrationLevel.Owned; + } + + /// @notice Returns the effective operator for a UUID. + /// If no explicit operator is set, returns the uuidOwner (owner acts as operator). + /// Returns address(0) if UUID is not registered. + /// @param uuid The UUID to query. + /// @return operator The effective operator address responsible for tier maintenance. + function operatorOf(bytes16 uuid) public view returns (address operator) { + operator = uuidOperator[uuid]; + if (operator == address(0)) { + operator = uuidOwner[uuid]; + } + } + + // ══════════════════════════════════════════════ + // Views: EdgeBeaconScanner discovery + // ══════════════════════════════════════════════ + + /// @notice Builds a priority-ordered bundle of up to 20 UUIDs for an EdgeBeaconScanner, + /// merging the highest-bonded tiers across admin-area and country levels. + /// + /// @dev **Priority Rules:** + /// 1. Higher bond tier always beats lower bond tier + /// 2. Within same tier: local (admin area) beats country + /// 3. Within same tier + level: earlier registration wins + /// + /// **Economic Fairness:** Country fleets pay 8× more (COUNTRY_BOND_MULTIPLIER) + /// than local fleets at the same tier. This means a local can reach tier 3 + /// for the same cost a country player pays for tier 0, giving locals a + /// significant economic advantage when competing for bundle slots. + /// + /// @param countryCode EdgeBeaconScanner country (must be > 0). + /// @param adminCode EdgeBeaconScanner admin area (must be > 0). + /// @return uuids The merged UUID bundle (up to 20). + /// @return count Actual number of UUIDs returned. + function buildHighestBondedUuidBundle(uint16 countryCode, uint16 adminCode) + external + view + returns (bytes16[] memory uuids, uint256 count) + { + if (countryCode == 0) revert InvalidCountryCode(); + if (adminCode == 0) revert AdminAreaRequired(); + + uint32 countryKey = uint32(countryCode); + uint32 adminKey = makeAdminRegion(countryCode, adminCode); + + (uuids, count, , ) = _buildHighestBondedUuidBundle(countryKey, adminKey); + } + + /// @notice Builds a bundle containing ONLY country-level fleets for a country. + /// Use this when no admin areas are active to verify country fleet positions. + /// + /// @dev When no admin areas exist in a country, EdgeBeaconScanners are not yet + /// active there. This function lets country fleet owners inspect their + /// competitive position before scanners come online. + /// + /// The returned bundle represents the country-only contribution to any + /// future admin-area bundle. Local fleets (when they appear) will have + /// priority over country fleets at the same tier. + /// + /// @param countryCode ISO 3166-1 numeric country code (1-999). + /// @return uuids The country-only UUID bundle (up to 20). + /// @return count Actual number of UUIDs returned. + function buildCountryOnlyBundle(uint16 countryCode) + external + view + returns (bytes16[] memory uuids, uint256 count) + { + if (countryCode == 0 || countryCode > MAX_COUNTRY_CODE) revert InvalidCountryCode(); + + uint32 countryKey = uint32(countryCode); + // Use a virtual admin region with no members (adminCode=0) + uint32 adminKey = makeAdminRegion(countryCode, 0); + + (uuids, count, , ) = _buildHighestBondedUuidBundle(countryKey, adminKey); + } + + /// @dev Internal bundle builder that returns additional state for `_findCheapestInclusionTier`. + /// + /// Builds a priority-ordered bundle by descending from highestTier to tier 0, + /// including admin-area members before country members at each tier. + /// + /// @return uuids The UUIDs included in the bundle (trimmed to actual count). + /// @return count Number of UUIDs in the bundle. + /// @return highestTier The highest tier with any registered members. + /// @return lowestTier The lowest tier processed (may be > 0 if bundle filled early). + function _buildHighestBondedUuidBundle(uint32 countryKey, uint32 adminKey) + internal + view + returns (bytes16[] memory uuids, uint256 count, uint256 highestTier, uint256 lowestTier) + { + highestTier = _findMaxTierIndex(countryKey, adminKey); + + uuids = new bytes16[](MAX_BONDED_UUID_BUNDLE_SIZE); + + // Simple tier-descent: at each tier, locals first, then country + for (lowestTier = highestTier + 1; lowestTier > 0 && count < MAX_BONDED_UUID_BUNDLE_SIZE;) { + unchecked { --lowestTier; } + + // Include local (admin area) members first + count = _appendTierUuids(adminKey, lowestTier, uuids, count); + + // Include country members + count = _appendTierUuids(countryKey, lowestTier, uuids, count); + } + + // Trim array to actual size + assembly { + mstore(uuids, count) + } + } + + /// @dev Appends UUIDs from a region's tier to the bundle array. + /// If the tier has no members (empty region or tier beyond regionTierCount), + /// this is a no-op. Returns the updated count. + function _appendTierUuids( + uint32 regionKey, + uint256 tier, + bytes16[] memory uuids, + uint256 count + ) internal view returns (uint256) { + uint256[] storage members = _regionTierMembers[regionKey][tier]; + uint256 len = members.length; + uint256 room = MAX_BONDED_UUID_BUNDLE_SIZE - count; + uint256 toInclude = len < room ? len : room; + + for (uint256 i = 0; i < toInclude; ++i) { + uuids[count] = tokenUuid(members[i]); + unchecked { ++count; } + } + return count; + } + + // ══════════════════════════════════════════════ + // Views: Region indexes + // ══════════════════════════════════════════════ + + /// @notice Returns all country codes with at least one active fleet. + function getActiveCountries() external view returns (uint16[] memory) { + return _activeCountries; + } + + /// @notice Returns all admin-area region keys with at least one active fleet. + function getActiveAdminAreas() external view returns (uint32[] memory) { + return _activeAdminAreas; + } + + /// @notice Builds an admin-area region key from country + admin codes. + /// @dev Country region key is simply uint32(countryCode) - no helper needed. + function makeAdminRegion(uint16 countryCode, uint16 adminCode) public pure returns (uint32) { + return (uint32(countryCode) << uint32(ADMIN_SHIFT)) | uint32(adminCode); + } + + // ══════════════════════════════════════════════ + // Internals + // ══════════════════════════════════════════════ + + // -- Region key encoding -- + + /// @dev Extracts the country code from an admin-area region key. + function _countryFromRegion(uint32 adminRegion) internal pure returns (uint16) { + return uint16(adminRegion >> uint32(ADMIN_SHIFT)); + } + + /// @dev Extracts the admin code from an admin-area region key. + function _adminFromRegion(uint32 adminRegion) internal pure returns (uint16) { + return uint16(adminRegion & ADMIN_CODE_MASK); + } + + /// @dev Returns true if the region key represents a country-level registration. + /// Region 0 (owned-only) is not a country region. + function _isCountryRegion(uint32 regionKey) internal pure returns (bool) { + return regionKey > 0 && regionKey <= MAX_COUNTRY_CODE; + } + + // -- Bond transfer helpers -- + + /// @dev Pulls bond tokens from an address (CEI: call after state changes). + function _pullBond(address from, uint256 amount) internal { + if (amount > 0) { + BOND_TOKEN.safeTransferFrom(from, address(this), amount); + } + } + + /// @dev Refunds bond tokens to an address (CEI: call after state changes). + function _refundBond(address to, uint256 amount) internal { + if (amount > 0) { + BOND_TOKEN.safeTransfer(to, amount); + } + } + + // -- UUID ownership helpers -- + + /// @dev Clears all UUID ownership state. Used when last token for a UUID is burned. + function _clearUuidOwnership(bytes16 uuid) internal { + delete uuidOwner[uuid]; + delete uuidTokenCount[uuid]; + delete uuidLevel[uuid]; + delete uuidOperator[uuid]; + } + + /// @dev Decrements UUID token count. Clears ownership if count reaches zero. + /// @return newCount The new token count after decrement. + function _decrementUuidCount(bytes16 uuid) internal returns (uint256 newCount) { + newCount = uuidTokenCount[uuid] - 1; + if (newCount == 0) { + _clearUuidOwnership(uuid); + } else { + uuidTokenCount[uuid] = newCount; + } + } + + /// @dev Computes total tier bond excess for a UUID across all registered regions. + /// Tier excess = sum of (tierBond - BASE_BOND) for each token. + /// This represents the amount the operator has paid beyond the base ownership bond. + /// @param uuid The UUID to compute tier excess for. + /// @return excess Total tier bond excess across all tokens for this UUID. + function _computeTotalTierExcess(bytes16 uuid) internal view returns (uint256 excess) { + address owner = uuidOwner[uuid]; + if (owner == address(0)) return 0; + + uint256 tokenCount = uuidTokenCount[uuid]; + if (tokenCount == 0) return 0; + + // Iterate through owner's tokens to find those belonging to this UUID + uint256 ownerBalance = balanceOf(owner); + uint256 found = 0; + + for (uint256 i = 0; i < ownerBalance && found < tokenCount; ++i) { + uint256 tokenId = tokenOfOwnerByIndex(owner, i); + bytes16 tokenUuidVal = tokenUuid(tokenId); + + if (tokenUuidVal == uuid) { + uint32 region = tokenRegion(tokenId); + // Skip owned-only tokens (region == 0) + if (region != OWNED_REGION_KEY) { + uint256 tier = fleetTier[tokenId]; + bool isCountry = _isCountryRegion(region); + uint256 tokenBond = tierBond(tier, isCountry); + excess += tokenBond - BASE_BOND; + } + ++found; + } + } + } + + // -- Tier cleanup helpers -- + + /// @dev Removes a fleet from its tier and cleans up associated state. + /// Does NOT burn the token - caller must handle that. + function _cleanupFleetFromTier(uint256 tokenId, uint32 region, uint256 tier) internal { + _removeFromTier(tokenId, region, tier); + delete fleetTier[tokenId]; + delete _indexInTier[tokenId]; + _trimTierCount(region); + _removeFromRegionIndex(region); + } + + // -- Registration helpers -- + + /// @dev Mints a fleet token and sets up tier membership. Does NOT handle bonds or UUID ownership. + /// @return tokenId The newly minted token ID. + function _mintFleetToken(bytes16 uuid, uint32 region, uint256 tier) internal returns (uint256 tokenId) { + tokenId = computeTokenId(uuid, region); + fleetTier[tokenId] = tier; + _addToTier(tokenId, region, tier); + _addToRegionIndex(region); + _mint(msg.sender, tokenId); + } + + /// @dev Shared registration logic. Handles fresh, Owned → Registered, and multi-region registrations. + /// Supports split bond payment: owner pays BASE_BOND, operator pays tier excess. + /// @param uuid The Proximity UUID to register. + /// @param region The region key (country or admin area). + /// @param tier Target tier for registration. + /// @param operator Operator address. address(0) or msg.sender = owner acts as operator. + function _register(bytes16 uuid, uint32 region, uint256 tier, address operator) internal returns (uint256 tokenId) { + RegistrationLevel existingLevel = uuidLevel[uuid]; + bool isCountry = _isCountryRegion(region); + RegistrationLevel targetLevel = isCountry ? RegistrationLevel.Country : RegistrationLevel.Local; + + // Normalize operator: address(0) or msg.sender means owner is operator + address storedOperator = (operator == address(0) || operator == msg.sender) ? address(0) : operator; + address effectiveOperator = (storedOperator == address(0)) ? msg.sender : storedOperator; + + if (existingLevel == RegistrationLevel.Owned) { + // Owned → Registered transition: burn owned token, credit BASE_BOND already paid + if (uuidOwner[uuid] != msg.sender) revert UuidOwnerMismatch(); + + _burn(uint256(uint128(uuid))); // Burn owned-only token + uuidLevel[uuid] = targetLevel; + uuidOperator[uuid] = storedOperator; + + tokenId = _mintFleetToken(uuid, region, tier); + + // Tier excess goes from operator (BASE_BOND was already paid by owner) + uint256 tierExcess = tierBond(tier, isCountry) - BASE_BOND; + _pullBond(effectiveOperator, tierExcess); + + emit FleetRegistered(msg.sender, uuid, tokenId, region, tier, tierExcess, effectiveOperator); + } else if (existingLevel == RegistrationLevel.None) { + // Fresh registration: set UUID ownership + uuidOwner[uuid] = msg.sender; + uuidLevel[uuid] = targetLevel; + uuidTokenCount[uuid] = 1; + uuidOperator[uuid] = storedOperator; + + tokenId = _mintFleetToken(uuid, region, tier); + + // Owner pays BASE_BOND, operator pays tier excess + uint256 fullBond = tierBond(tier, isCountry); + uint256 tierExcess = fullBond - BASE_BOND; + _pullBond(msg.sender, BASE_BOND); + _pullBond(effectiveOperator, tierExcess); + + emit FleetRegistered(msg.sender, uuid, tokenId, region, tier, fullBond, effectiveOperator); + } else { + // Multi-region registration: same owner, same level, use existing operator + if (uuidOwner[uuid] != msg.sender) revert UuidOwnerMismatch(); + if (existingLevel != targetLevel) revert UuidLevelMismatch(); + + uuidTokenCount[uuid]++; + + tokenId = _mintFleetToken(uuid, region, tier); + + // For multi-region, use existing operator and they pay full tierBond for new region + address existingOperator = operatorOf(uuid); + uint256 bond = tierBond(tier, isCountry); + + // Owner pays BASE_BOND, operator pays tier excess + uint256 tierExcess = bond - BASE_BOND; + _pullBond(msg.sender, BASE_BOND); + _pullBond(existingOperator, tierExcess); + + emit FleetRegistered(msg.sender, uuid, tokenId, region, tier, bond, existingOperator); + } + } + + /// @dev Shared promotion logic. Only operator can call. + function _promote(uint256 tokenId, uint256 targetTier) internal { + bytes16 uuid = tokenUuid(tokenId); + address operator = operatorOf(uuid); + if (operator != msg.sender) revert NotOperator(); + + uint32 region = tokenRegion(tokenId); + uint256 currentTier = fleetTier[tokenId]; + if (targetTier <= currentTier) revert TargetTierNotHigher(); + if (targetTier >= MAX_TIERS) revert MaxTiersReached(); + if (_regionTierMembers[region][targetTier].length >= TIER_CAPACITY) revert TierFull(); + + bool isCountry = _isCountryRegion(region); + uint256 currentBond = tierBond(currentTier, isCountry); + uint256 targetBond = tierBond(targetTier, isCountry); + uint256 additionalBond = targetBond - currentBond; + + // Effects + _removeFromTier(tokenId, region, currentTier); + fleetTier[tokenId] = targetTier; + _addToTier(tokenId, region, targetTier); + + // Interaction: pull from operator + _pullBond(operator, additionalBond); + + emit FleetPromoted(tokenId, currentTier, targetTier, additionalBond); + } + + /// @dev Shared demotion logic. Refunds bond difference to operator. + function _demote(uint256 tokenId, uint256 targetTier) internal { + bytes16 uuid = tokenUuid(tokenId); + address operator = operatorOf(uuid); + if (operator != msg.sender) revert NotOperator(); + + uint32 region = tokenRegion(tokenId); + uint256 currentTier = fleetTier[tokenId]; + if (targetTier >= currentTier) revert TargetTierNotLower(); + if (_regionTierMembers[region][targetTier].length >= TIER_CAPACITY) revert TierFull(); + + bool isCountry = _isCountryRegion(region); + uint256 currentBond = tierBond(currentTier, isCountry); + uint256 targetBond = tierBond(targetTier, isCountry); + uint256 refund = currentBond - targetBond; + + // Effects + _removeFromTier(tokenId, region, currentTier); + fleetTier[tokenId] = targetTier; + _addToTier(tokenId, region, targetTier); + _trimTierCount(region); + + // Interaction: refund to operator + _refundBond(operator, refund); + + emit FleetDemoted(tokenId, currentTier, targetTier, refund); + } + + /// @dev Validates that a tier is available for registration (pure validation, no state changes). + function _validateExplicitTier(uint32 region, uint256 targetTier) internal view { + if (targetTier >= MAX_TIERS) revert MaxTiersReached(); + if (_regionTierMembers[region][targetTier].length >= TIER_CAPACITY) revert TierFull(); + } + + // -- Bundle-level helpers (shared by buildHighestBondedUuidBundle & inclusion hints) -- + + /// @dev Finds the highest active tier index across both bundle levels. + function _findMaxTierIndex(uint32 countryKey, uint32 adminKey) + internal + view + returns (uint256 maxTierIndex) + { + uint256 adminTiers = regionTierCount[adminKey]; + uint256 countryTiers = regionTierCount[countryKey]; + + uint256 maxTier = adminTiers > 0 ? adminTiers - 1 : 0; + if (countryTiers > 0 && countryTiers - 1 > maxTier) maxTier = countryTiers - 1; + return maxTier; + } + + // -- Inclusion-tier logic -- + + /// @dev Uses `_buildHighestBondedUuidBundle` to determine the cheapest tier at + /// `candidateRegion` that guarantees bundle inclusion. Bounded: O(MAX_TIERS). + /// + /// Walks from the bundle's lowestTier upward, "unwinding" the bundle count + /// by subtracting both regions' contributions at each tier. Returns the first + /// tier where: + /// (a) The tier has capacity (< TIER_CAPACITY members). + /// (b) The unwound count shows room in the bundle (< MAX_BONDED_UUID_BUNDLE_SIZE). + /// + /// If no existing tier qualifies and highestTier + 1 < MAX_TIERS, returns + /// highestTier + 1 (joining above current max guarantees inclusion). + /// + /// @param countryCode The country code for the bundle location. + /// @param adminCode The admin area code (0 for country-only bundles). + /// @param isCountry True if candidate is joining country region, false for admin. + function _findCheapestInclusionTier(uint16 countryCode, uint16 adminCode, bool isCountry) + internal + view + returns (uint256) + { + uint32 countryKey = uint32(countryCode); + uint32 adminKey = makeAdminRegion(countryCode, adminCode); + uint32 candidateRegion = isCountry ? countryKey : adminKey; + + (, uint256 count, uint256 highestTier, uint256 lowestTier) = _buildHighestBondedUuidBundle(countryKey, adminKey); + + // Walk from lowestTier upward, unwinding the bundle count at each tier. + // Subtracting both regions' contributions simulates "what if we built the + // bundle stopping at this tier instead". + for (uint256 tier = lowestTier; tier <= highestTier; ++tier) { + bool tierHasCapacity = _regionTierMembers[candidateRegion][tier].length < TIER_CAPACITY; + bool bundleHasRoom = count < MAX_BONDED_UUID_BUNDLE_SIZE; + + if (tierHasCapacity && bundleHasRoom) { + return tier; + } + + // Unwind: subtract both regions' contributions at this tier. + // Use saturating subtraction to handle edge cases gracefully. + uint256 adminMembers = _regionTierMembers[adminKey][tier].length; + uint256 countryMembers = _regionTierMembers[countryKey][tier].length; + uint256 tierTotal = adminMembers + countryMembers; + count = tierTotal > count ? 0 : count - tierTotal; + } + + // No fit in existing tiers — try joining above current max. + if (highestTier < MAX_TIERS - 1) { + return highestTier + 1; + } + + revert MaxTiersReached(); + } + + /// @dev Appends a token to a region's tier member array and records its index. + /// Updates regionTierCount if this opens a new highest tier. + function _addToTier(uint256 tokenId, uint32 region, uint256 tier) internal { + _regionTierMembers[region][tier].push(tokenId); + _indexInTier[tokenId] = _regionTierMembers[region][tier].length - 1; + + // Update tier count if we're opening a new tier + if (tier >= regionTierCount[region]) { + regionTierCount[region] = tier + 1; + } + } + + /// @dev Swap-and-pop removal from a region's tier member array. + function _removeFromTier(uint256 tokenId, uint32 region, uint256 tier) internal { + uint256[] storage members = _regionTierMembers[region][tier]; + uint256 idx = _indexInTier[tokenId]; + uint256 lastIdx = members.length - 1; + + if (idx != lastIdx) { + uint256 lastTokenId = members[lastIdx]; + members[idx] = lastTokenId; + _indexInTier[lastTokenId] = idx; + } + members.pop(); + } + + /// @dev Shrinks regionTierCount so the top tier is always non-empty. + function _trimTierCount(uint32 region) internal { + uint256 tierCount = regionTierCount[region]; + while (tierCount > 0 && _regionTierMembers[region][tierCount - 1].length == 0) { + tierCount--; + } + regionTierCount[region] = tierCount; + } + + // -- Region index maintenance -- + + /// @dev Adds a region to the appropriate index set if not already present. + function _addToRegionIndex(uint32 region) internal { + if (_isCountryRegion(region)) { + // Country + uint16 cc = uint16(region); + if (_activeCountryIndex[cc] == 0) { + _activeCountries.push(cc); + _activeCountryIndex[cc] = _activeCountries.length; // 1-indexed + } + } else { + // Admin area + if (_activeAdminAreaIndex[region] == 0) { + _activeAdminAreas.push(region); + _activeAdminAreaIndex[region] = _activeAdminAreas.length; + } + } + } + + /// @dev Removes a region from the index set if the region is now completely empty. + function _removeFromRegionIndex(uint32 region) internal { + if (regionTierCount[region] > 0) return; // still has fleets + + if (_isCountryRegion(region)) { + uint16 cc = uint16(region); + uint256 oneIdx = _activeCountryIndex[cc]; + if (oneIdx > 0) { + uint256 lastIdx = _activeCountries.length - 1; + uint256 removeIdx = oneIdx - 1; + if (removeIdx != lastIdx) { + uint16 lastCountryCode = _activeCountries[lastIdx]; + _activeCountries[removeIdx] = lastCountryCode; + _activeCountryIndex[lastCountryCode] = oneIdx; + } + _activeCountries.pop(); + delete _activeCountryIndex[cc]; + } + } else { + uint256 oneIdx = _activeAdminAreaIndex[region]; + if (oneIdx > 0) { + uint256 lastIdx = _activeAdminAreas.length - 1; + uint256 removeIdx = oneIdx - 1; + if (removeIdx != lastIdx) { + uint32 lastAdminArea = _activeAdminAreas[lastIdx]; + _activeAdminAreas[removeIdx] = lastAdminArea; + _activeAdminAreaIndex[lastAdminArea] = oneIdx; + } + _activeAdminAreas.pop(); + delete _activeAdminAreaIndex[region]; + } + } + } + + // ────────────────────────────────────────────── + // Overrides required by ERC721Enumerable + // ────────────────────────────────────────────── + + function _update(address to, uint256 tokenId, address auth) internal override(ERC721Enumerable) returns (address) { + address from = super._update(to, tokenId, auth); + + // For owned-only tokens, transfer uuidOwner when the token is transferred + // This allows marketplace trading of owned-only UUIDs + uint32 region = tokenRegion(tokenId); + if (region == OWNED_REGION_KEY && from != address(0) && to != address(0)) { + uuidOwner[tokenUuid(tokenId)] = to; + } + + return from; + } + + function _increaseBalance(address account, uint128 value) internal override(ERC721Enumerable) { + super._increaseBalance(account, value); + } + + function supportsInterface(bytes4 interfaceId) public view override(ERC721Enumerable) returns (bool) { + return super.supportsInterface(interfaceId); + } +} diff --git a/src/swarms/ServiceProvider.sol b/src/swarms/ServiceProvider.sol new file mode 100644 index 0000000..e4a777b --- /dev/null +++ b/src/swarms/ServiceProvider.sol @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear + +pragma solidity ^0.8.24; + +import {ERC721} from "@openzeppelin/contracts/token/ERC721/ERC721.sol"; + +/** + * @title ServiceProvider + * @notice Permissionless ERC-721 representing ownership of a service endpoint URL. + * @dev TokenID = keccak256(url), guaranteeing one owner per URL. + */ +contract ServiceProvider is ERC721 { + error EmptyURL(); + error NotTokenOwner(); + + // Maps TokenID -> Provider URL + mapping(uint256 => string) public providerUrls; + + event ProviderRegistered(address indexed owner, string url, uint256 indexed tokenId); + event ProviderBurned(address indexed owner, uint256 indexed tokenId); + + constructor() ERC721("Swarm Service Provider", "SSV") {} + + /// @notice Mints a new provider NFT for the given URL. + /// @param url The backend service URL (must be unique). + /// @return tokenId The deterministic token ID derived from `url`. + function registerProvider(string calldata url) external returns (uint256 tokenId) { + if (bytes(url).length == 0) { + revert EmptyURL(); + } + + tokenId = uint256(keccak256(bytes(url))); + + providerUrls[tokenId] = url; + + _mint(msg.sender, tokenId); + + emit ProviderRegistered(msg.sender, url, tokenId); + } + + /// @notice Burns the provider NFT. Caller must be the token owner. + /// @param tokenId The provider token ID to burn. + function burn(uint256 tokenId) external { + if (ownerOf(tokenId) != msg.sender) { + revert NotTokenOwner(); + } + + delete providerUrls[tokenId]; + + _burn(tokenId); + + emit ProviderBurned(msg.sender, tokenId); + } +} diff --git a/src/swarms/SwarmRegistryL1.sol b/src/swarms/SwarmRegistryL1.sol new file mode 100644 index 0000000..b9bf4f7 --- /dev/null +++ b/src/swarms/SwarmRegistryL1.sol @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear + +pragma solidity ^0.8.24; + +// NOTE: SSTORE2 is not compatible with ZkSync Era due to EXTCODECOPY limitation. +// For ZkSync deployment, consider using chunked storage or calldata alternatives. +import {SSTORE2} from "solady/utils/SSTORE2.sol"; +import {ReentrancyGuard} from "@openzeppelin/contracts/utils/ReentrancyGuard.sol"; +import {FleetIdentity} from "./FleetIdentity.sol"; +import {ServiceProvider} from "./ServiceProvider.sol"; + +/** + * @title SwarmRegistryL1 + * @notice Permissionless BLE swarm registry optimized for Ethereum L1 (uses SSTORE2 for filter storage). + * @dev Not compatible with ZkSync Era — use SwarmRegistryUniversal instead. + * + * Swarms are defined for a **fleet UUID** (not a token ID), allowing swarms to be + * registered for any UUID that has been claimed/registered in FleetIdentity, + * regardless of whether it's assigned to a region or is in "owned-only" mode. + * This decouples swarm management from geographic tier placement. + */ +contract SwarmRegistryL1 is ReentrancyGuard { + error InvalidFingerprintSize(); + error InvalidFilterSize(); + error InvalidUuid(); + error NotUuidOwner(); + error ProviderDoesNotExist(); + error NotProviderOwner(); + error SwarmNotFound(); + error InvalidSwarmData(); + error SwarmAlreadyExists(); + error SwarmNotOrphaned(); + error SwarmOrphaned(); + + enum SwarmStatus { + REGISTERED, + ACCEPTED, + REJECTED + } + + // Internal Schema version for Tag ID construction + enum TagType { + IBEACON_PAYLOAD_ONLY, // 0x00: proxUUID || major || minor + IBEACON_INCLUDES_MAC, // 0x01: proxUUID || major || minor || MAC (Normalized) + VENDOR_ID, // 0x02: companyID || hash(vendorBytes) + GENERIC // 0x03 + + } + + struct Swarm { + bytes16 fleetUuid; // Fleet UUID (not token ID) - allows swarms for any registered UUID + uint256 providerId; // The Service Provider TokenID + address filterPointer; // SSTORE2 pointer + uint8 fingerprintSize; + TagType tagType; + SwarmStatus status; + } + + uint8 public constant MAX_FINGERPRINT_SIZE = 16; + + FleetIdentity public immutable FLEET_CONTRACT; + + ServiceProvider public immutable PROVIDER_CONTRACT; + + // SwarmID -> Swarm + mapping(uint256 => Swarm) public swarms; + + // UUID -> List of SwarmIDs (keyed by fleet UUID, not token ID) + mapping(bytes16 => uint256[]) public uuidSwarms; + + // SwarmID -> index in uuidSwarms[fleetUuid] (for O(1) removal) + mapping(uint256 => uint256) public swarmIndexInUuid; + + event SwarmRegistered(uint256 indexed swarmId, bytes16 indexed fleetUuid, uint256 indexed providerId, address owner); + event SwarmStatusChanged(uint256 indexed swarmId, SwarmStatus status); + event SwarmFilterUpdated(uint256 indexed swarmId, address indexed owner, uint32 filterSize); + event SwarmProviderUpdated(uint256 indexed swarmId, uint256 indexed oldProvider, uint256 indexed newProvider); + event SwarmDeleted(uint256 indexed swarmId, bytes16 indexed fleetUuid, address indexed owner); + event SwarmPurged(uint256 indexed swarmId, bytes16 indexed fleetUuid, address indexed purgedBy); + + /// @notice Derives a deterministic swarm ID. Callable off-chain to predict IDs before registration. + /// @return swarmId keccak256(fleetUuid, providerId, filterData) + function computeSwarmId(bytes16 fleetUuid, uint256 providerId, bytes calldata filterData) + public + pure + returns (uint256) + { + return uint256(keccak256(abi.encode(fleetUuid, providerId, filterData))); + } + + constructor(address _fleetContract, address _providerContract) { + if (_fleetContract == address(0) || _providerContract == address(0)) { + revert InvalidSwarmData(); + } + FLEET_CONTRACT = FleetIdentity(_fleetContract); + PROVIDER_CONTRACT = ServiceProvider(_providerContract); + } + + /// @notice Registers a new swarm. Caller must own the fleet UUID (via FleetIdentity.uuidOwner). + /// @param fleetUuid Fleet UUID (bytes16) - the UUID must be registered in FleetIdentity. + /// @param providerId Service provider token ID. + /// @param filterData XOR filter blob (1–24 576 bytes). + /// @param fingerprintSize Fingerprint width in bits (1–16). + /// @param tagType Tag identity schema. + /// @return swarmId Deterministic ID for this swarm. + function registerSwarm( + bytes16 fleetUuid, + uint256 providerId, + bytes calldata filterData, + uint8 fingerprintSize, + TagType tagType + ) external nonReentrant returns (uint256 swarmId) { + if (fleetUuid == bytes16(0)) { + revert InvalidUuid(); + } + if (fingerprintSize == 0 || fingerprintSize > MAX_FINGERPRINT_SIZE) { + revert InvalidFingerprintSize(); + } + if (filterData.length == 0 || filterData.length > 24576) { + revert InvalidFilterSize(); + } + + // Check UUID ownership - works for any registered UUID regardless of region + if (FLEET_CONTRACT.uuidOwner(fleetUuid) != msg.sender) { + revert NotUuidOwner(); + } + if (PROVIDER_CONTRACT.ownerOf(providerId) == address(0)) { + revert ProviderDoesNotExist(); + } + + swarmId = computeSwarmId(fleetUuid, providerId, filterData); + + if (swarms[swarmId].filterPointer != address(0)) { + revert SwarmAlreadyExists(); + } + + Swarm storage s = swarms[swarmId]; + s.fleetUuid = fleetUuid; + s.providerId = providerId; + s.fingerprintSize = fingerprintSize; + s.tagType = tagType; + s.status = SwarmStatus.REGISTERED; + + uuidSwarms[fleetUuid].push(swarmId); + swarmIndexInUuid[swarmId] = uuidSwarms[fleetUuid].length - 1; + + s.filterPointer = SSTORE2.write(filterData); + + emit SwarmRegistered(swarmId, fleetUuid, providerId, msg.sender); + } + + /// @notice Approves a swarm. Caller must own the provider NFT. + /// @param swarmId The swarm to accept. + function acceptSwarm(uint256 swarmId) external { + Swarm storage s = swarms[swarmId]; + if (s.filterPointer == address(0)) revert SwarmNotFound(); + + (bool fleetValid, bool providerValid) = isSwarmValid(swarmId); + if (!fleetValid || !providerValid) revert SwarmOrphaned(); + + if (PROVIDER_CONTRACT.ownerOf(s.providerId) != msg.sender) { + revert NotProviderOwner(); + } + s.status = SwarmStatus.ACCEPTED; + emit SwarmStatusChanged(swarmId, SwarmStatus.ACCEPTED); + } + + /// @notice Rejects a swarm. Caller must own the provider NFT. + /// @param swarmId The swarm to reject. + function rejectSwarm(uint256 swarmId) external { + Swarm storage s = swarms[swarmId]; + if (s.filterPointer == address(0)) revert SwarmNotFound(); + + (bool fleetValid, bool providerValid) = isSwarmValid(swarmId); + if (!fleetValid || !providerValid) revert SwarmOrphaned(); + + if (PROVIDER_CONTRACT.ownerOf(s.providerId) != msg.sender) { + revert NotProviderOwner(); + } + s.status = SwarmStatus.REJECTED; + emit SwarmStatusChanged(swarmId, SwarmStatus.REJECTED); + } + + /// @notice Replaces the XOR filter. Resets status to REGISTERED. Caller must own the fleet UUID. + /// @param swarmId The swarm to update. + /// @param newFilterData Replacement filter blob. + function updateSwarmFilter(uint256 swarmId, bytes calldata newFilterData) external nonReentrant { + Swarm storage s = swarms[swarmId]; + if (s.filterPointer == address(0)) { + revert SwarmNotFound(); + } + if (FLEET_CONTRACT.uuidOwner(s.fleetUuid) != msg.sender) { + revert NotUuidOwner(); + } + if (newFilterData.length == 0 || newFilterData.length > 24576) { + revert InvalidFilterSize(); + } + + s.status = SwarmStatus.REGISTERED; + + s.filterPointer = SSTORE2.write(newFilterData); + + emit SwarmFilterUpdated(swarmId, msg.sender, uint32(newFilterData.length)); + } + + /// @notice Reassigns the service provider. Resets status to REGISTERED. Caller must own the fleet UUID. + /// @param swarmId The swarm to update. + /// @param newProviderId New provider token ID. + function updateSwarmProvider(uint256 swarmId, uint256 newProviderId) external { + Swarm storage s = swarms[swarmId]; + if (s.filterPointer == address(0)) { + revert SwarmNotFound(); + } + if (FLEET_CONTRACT.uuidOwner(s.fleetUuid) != msg.sender) { + revert NotUuidOwner(); + } + if (PROVIDER_CONTRACT.ownerOf(newProviderId) == address(0)) { + revert ProviderDoesNotExist(); + } + + uint256 oldProvider = s.providerId; + + s.providerId = newProviderId; + + s.status = SwarmStatus.REGISTERED; + + emit SwarmProviderUpdated(swarmId, oldProvider, newProviderId); + } + + /// @notice Permanently deletes a swarm. Caller must own the fleet UUID. + /// @param swarmId The swarm to delete. + function deleteSwarm(uint256 swarmId) external { + Swarm storage s = swarms[swarmId]; + if (s.filterPointer == address(0)) { + revert SwarmNotFound(); + } + if (FLEET_CONTRACT.uuidOwner(s.fleetUuid) != msg.sender) { + revert NotUuidOwner(); + } + + bytes16 fleetUuid = s.fleetUuid; + + _removeFromUuidSwarms(fleetUuid, swarmId); + + delete swarms[swarmId]; + + emit SwarmDeleted(swarmId, fleetUuid, msg.sender); + } + + /// @notice Returns whether the swarm's fleet UUID and provider NFT are still valid. + /// @param swarmId The swarm to check. + /// @return fleetValid True if the fleet UUID is still owned (uuidOwner != address(0)). + /// @return providerValid True if the provider NFT exists. + function isSwarmValid(uint256 swarmId) public view returns (bool fleetValid, bool providerValid) { + Swarm storage s = swarms[swarmId]; + if (s.filterPointer == address(0)) revert SwarmNotFound(); + + // Fleet is valid if UUID is still owned (not released) + fleetValid = FLEET_CONTRACT.uuidOwner(s.fleetUuid) != address(0); + + try PROVIDER_CONTRACT.ownerOf(s.providerId) returns (address) { + providerValid = true; + } catch { + providerValid = false; + } + } + + /// @notice Permissionless-ly removes a swarm whose fleet UUID has been released or provider NFT has been burned. + /// @param swarmId The orphaned swarm to purge. + function purgeOrphanedSwarm(uint256 swarmId) external { + Swarm storage s = swarms[swarmId]; + if (s.filterPointer == address(0)) revert SwarmNotFound(); + + (bool fleetValid, bool providerValid) = isSwarmValid(swarmId); + if (fleetValid && providerValid) revert SwarmNotOrphaned(); + + bytes16 fleetUuid = s.fleetUuid; + + _removeFromUuidSwarms(fleetUuid, swarmId); + + delete swarms[swarmId]; + + emit SwarmPurged(swarmId, fleetUuid, msg.sender); + } + + /// @notice Tests tag membership against the swarm's XOR filter. + /// @param swarmId The swarm to query. + /// @param tagHash keccak256 of the tag identity bytes (caller must pre-normalize per tagType). + /// @return isValid True if the tag passes the XOR filter check. + function checkMembership(uint256 swarmId, bytes32 tagHash) external view returns (bool isValid) { + Swarm storage s = swarms[swarmId]; + if (s.filterPointer == address(0)) { + revert SwarmNotFound(); + } + + // Reject queries against orphaned swarms + (bool fleetValid, bool providerValid) = isSwarmValid(swarmId); + if (!fleetValid || !providerValid) revert SwarmOrphaned(); + + uint256 dataLen; + address pointer = s.filterPointer; + assembly { + dataLen := extcodesize(pointer) + } + + // SSTORE2 adds 1 byte overhead (0x00), So actual data length = codeSize - 1. + if (dataLen > 0) { + unchecked { + --dataLen; + } + } + + // 2. Calculate M (number of slots) + uint256 m = (dataLen * 8) / s.fingerprintSize; + if (m == 0) return false; + + bytes32 h = tagHash; + + uint32 h1 = uint32(uint256(h)) % uint32(m); + uint32 h2 = uint32(uint256(h) >> 32) % uint32(m); + uint32 h3 = uint32(uint256(h) >> 64) % uint32(m); + + uint256 fpMask = (1 << s.fingerprintSize) - 1; + uint256 expectedFp = (uint256(h) >> 96) & fpMask; + + uint256 f1 = _readFingerprint(pointer, h1, s.fingerprintSize); + uint256 f2 = _readFingerprint(pointer, h2, s.fingerprintSize); + uint256 f3 = _readFingerprint(pointer, h3, s.fingerprintSize); + + return (f1 ^ f2 ^ f3) == expectedFp; + } + + /** + * @dev O(1) removal of a swarm from its UUID's swarm list using index tracking. + */ + function _removeFromUuidSwarms(bytes16 fleetUuid, uint256 swarmId) internal { + uint256[] storage arr = uuidSwarms[fleetUuid]; + uint256 index = swarmIndexInUuid[swarmId]; + uint256 lastId = arr[arr.length - 1]; + + arr[index] = lastId; + swarmIndexInUuid[lastId] = index; + arr.pop(); + delete swarmIndexInUuid[swarmId]; + } + + /** + * @dev Reads a packed fingerprint of arbitrary bit size from SSTORE2 blob. + * @param pointer The contract address storing data. + * @param index The slot index. + * @param bits The bit size of the fingerprint. + */ + function _readFingerprint(address pointer, uint256 index, uint8 bits) internal view returns (uint256) { + uint256 bitOffset = index * bits; + uint256 startByte = bitOffset / 8; + uint256 endByte = (bitOffset + bits - 1) / 8; + + // Read raw bytes. SSTORE2 uses 0-based index relative to data. + bytes memory chunk = SSTORE2.read(pointer, startByte, endByte + 1); + + // Convert chunk to uint256 + uint256 raw; + for (uint256 i = 0; i < chunk.length;) { + raw = (raw << 8) | uint8(chunk[i]); + unchecked { + ++i; + } + } + + uint256 totalBitsRead = chunk.length * 8; + uint256 localStart = bitOffset % 8; + uint256 shiftRight = totalBitsRead - (localStart + bits); + + return (raw >> shiftRight) & ((1 << bits) - 1); + } +} diff --git a/src/swarms/SwarmRegistryUniversal.sol b/src/swarms/SwarmRegistryUniversal.sol new file mode 100644 index 0000000..cce4316 --- /dev/null +++ b/src/swarms/SwarmRegistryUniversal.sol @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear + +pragma solidity ^0.8.24; + +import {ReentrancyGuard} from "@openzeppelin/contracts/utils/ReentrancyGuard.sol"; +import {FleetIdentity} from "./FleetIdentity.sol"; +import {ServiceProvider} from "./ServiceProvider.sol"; + +/** + * @title SwarmRegistryUniversal + * @notice Permissionless BLE swarm registry compatible with all EVM chains (including ZkSync Era). + * @dev Uses native `bytes` storage for cross-chain compatibility. + * + * Swarms are defined for a **fleet UUID** (not a token ID), allowing swarms to be + * registered for any UUID that has been claimed/registered in FleetIdentity, + * regardless of whether it's assigned to a region or is in "owned-only" mode. + * This decouples swarm management from geographic tier placement. + */ +contract SwarmRegistryUniversal is ReentrancyGuard { + error InvalidFingerprintSize(); + error InvalidFilterSize(); + error InvalidUuid(); + error NotUuidOwner(); + error ProviderDoesNotExist(); + error NotProviderOwner(); + error SwarmNotFound(); + error InvalidSwarmData(); + error FilterTooLarge(); + error SwarmAlreadyExists(); + error SwarmNotOrphaned(); + error SwarmOrphaned(); + + enum SwarmStatus { + REGISTERED, + ACCEPTED, + REJECTED + } + + enum TagType { + IBEACON_PAYLOAD_ONLY, // 0x00: proxUUID || major || minor + IBEACON_INCLUDES_MAC, // 0x01: proxUUID || major || minor || MAC (Normalized) + VENDOR_ID, // 0x02: companyID || hash(vendorBytes) + GENERIC // 0x03 + + } + + struct Swarm { + bytes16 fleetUuid; // Fleet UUID (not token ID) - allows swarms for any registered UUID + uint256 providerId; + uint32 filterLength; // Length of filter in bytes (max ~4GB, practically limited) + uint8 fingerprintSize; + TagType tagType; + SwarmStatus status; + } + + uint8 public constant MAX_FINGERPRINT_SIZE = 16; + + /// @notice Maximum filter size per swarm (24KB - fits in ~15M gas on cold write) + uint32 public constant MAX_FILTER_SIZE = 24576; + + FleetIdentity public immutable FLEET_CONTRACT; + + ServiceProvider public immutable PROVIDER_CONTRACT; + + /// @notice SwarmID -> Swarm metadata + mapping(uint256 => Swarm) public swarms; + + /// @notice SwarmID -> XOR filter data (stored as bytes) + mapping(uint256 => bytes) internal filterData; + + /// @notice UUID -> List of SwarmIDs (keyed by fleet UUID, not token ID) + mapping(bytes16 => uint256[]) public uuidSwarms; + + /// @notice SwarmID -> index in uuidSwarms[fleetUuid] (for O(1) removal) + mapping(uint256 => uint256) public swarmIndexInUuid; + + event SwarmRegistered( + uint256 indexed swarmId, bytes16 indexed fleetUuid, uint256 indexed providerId, address owner, uint32 filterSize + ); + + event SwarmStatusChanged(uint256 indexed swarmId, SwarmStatus status); + event SwarmFilterUpdated(uint256 indexed swarmId, address indexed owner, uint32 filterSize); + event SwarmProviderUpdated(uint256 indexed swarmId, uint256 indexed oldProvider, uint256 indexed newProvider); + event SwarmDeleted(uint256 indexed swarmId, bytes16 indexed fleetUuid, address indexed owner); + event SwarmPurged(uint256 indexed swarmId, bytes16 indexed fleetUuid, address indexed purgedBy); + + /// @notice Derives a deterministic swarm ID. Callable off-chain to predict IDs before registration. + /// @return swarmId keccak256(fleetUuid, providerId, filter) + function computeSwarmId(bytes16 fleetUuid, uint256 providerId, bytes calldata filter) public pure returns (uint256) { + return uint256(keccak256(abi.encode(fleetUuid, providerId, filter))); + } + + constructor(address _fleetContract, address _providerContract) { + if (_fleetContract == address(0) || _providerContract == address(0)) { + revert InvalidSwarmData(); + } + FLEET_CONTRACT = FleetIdentity(_fleetContract); + PROVIDER_CONTRACT = ServiceProvider(_providerContract); + } + + /// @notice Registers a new swarm. Caller must own the fleet UUID (via FleetIdentity.uuidOwner). + /// @param fleetUuid Fleet UUID (bytes16) - the UUID must be registered in FleetIdentity. + /// @param providerId Service provider token ID. + /// @param filter XOR filter blob (1–24 576 bytes). + /// @param fingerprintSize Fingerprint width in bits (1–16). + /// @param tagType Tag identity schema. + /// @return swarmId Deterministic ID for this swarm. + function registerSwarm( + bytes16 fleetUuid, + uint256 providerId, + bytes calldata filter, + uint8 fingerprintSize, + TagType tagType + ) external nonReentrant returns (uint256 swarmId) { + if (fleetUuid == bytes16(0)) { + revert InvalidUuid(); + } + if (fingerprintSize == 0 || fingerprintSize > MAX_FINGERPRINT_SIZE) { + revert InvalidFingerprintSize(); + } + if (filter.length == 0) { + revert InvalidFilterSize(); + } + if (filter.length > MAX_FILTER_SIZE) { + revert FilterTooLarge(); + } + + // Check UUID ownership - works for any registered UUID regardless of region + if (FLEET_CONTRACT.uuidOwner(fleetUuid) != msg.sender) { + revert NotUuidOwner(); + } + if (PROVIDER_CONTRACT.ownerOf(providerId) == address(0)) { + revert ProviderDoesNotExist(); + } + + swarmId = computeSwarmId(fleetUuid, providerId, filter); + + if (swarms[swarmId].filterLength != 0) { + revert SwarmAlreadyExists(); + } + + Swarm storage s = swarms[swarmId]; + s.fleetUuid = fleetUuid; + s.providerId = providerId; + s.filterLength = uint32(filter.length); + s.fingerprintSize = fingerprintSize; + s.tagType = tagType; + s.status = SwarmStatus.REGISTERED; + + filterData[swarmId] = filter; + + uuidSwarms[fleetUuid].push(swarmId); + swarmIndexInUuid[swarmId] = uuidSwarms[fleetUuid].length - 1; + + emit SwarmRegistered(swarmId, fleetUuid, providerId, msg.sender, uint32(filter.length)); + } + + /// @notice Approves a swarm. Caller must own the provider NFT. + /// @param swarmId The swarm to accept. + function acceptSwarm(uint256 swarmId) external { + Swarm storage s = swarms[swarmId]; + if (s.filterLength == 0) revert SwarmNotFound(); + + (bool fleetValid, bool providerValid) = isSwarmValid(swarmId); + if (!fleetValid || !providerValid) revert SwarmOrphaned(); + + if (PROVIDER_CONTRACT.ownerOf(s.providerId) != msg.sender) { + revert NotProviderOwner(); + } + s.status = SwarmStatus.ACCEPTED; + emit SwarmStatusChanged(swarmId, SwarmStatus.ACCEPTED); + } + + /// @notice Rejects a swarm. Caller must own the provider NFT. + /// @param swarmId The swarm to reject. + function rejectSwarm(uint256 swarmId) external { + Swarm storage s = swarms[swarmId]; + if (s.filterLength == 0) revert SwarmNotFound(); + + (bool fleetValid, bool providerValid) = isSwarmValid(swarmId); + if (!fleetValid || !providerValid) revert SwarmOrphaned(); + + if (PROVIDER_CONTRACT.ownerOf(s.providerId) != msg.sender) { + revert NotProviderOwner(); + } + s.status = SwarmStatus.REJECTED; + emit SwarmStatusChanged(swarmId, SwarmStatus.REJECTED); + } + + /// @notice Replaces the XOR filter. Resets status to REGISTERED. Caller must own the fleet UUID. + /// @param swarmId The swarm to update. + /// @param newFilterData Replacement filter blob. + function updateSwarmFilter(uint256 swarmId, bytes calldata newFilterData) external nonReentrant { + Swarm storage s = swarms[swarmId]; + if (s.filterLength == 0) { + revert SwarmNotFound(); + } + if (FLEET_CONTRACT.uuidOwner(s.fleetUuid) != msg.sender) { + revert NotUuidOwner(); + } + if (newFilterData.length == 0) { + revert InvalidFilterSize(); + } + if (newFilterData.length > MAX_FILTER_SIZE) { + revert FilterTooLarge(); + } + + s.filterLength = uint32(newFilterData.length); + s.status = SwarmStatus.REGISTERED; + filterData[swarmId] = newFilterData; + + emit SwarmFilterUpdated(swarmId, msg.sender, uint32(newFilterData.length)); + } + + /// @notice Reassigns the service provider. Resets status to REGISTERED. Caller must own the fleet UUID. + /// @param swarmId The swarm to update. + /// @param newProviderId New provider token ID. + function updateSwarmProvider(uint256 swarmId, uint256 newProviderId) external { + Swarm storage s = swarms[swarmId]; + if (s.filterLength == 0) { + revert SwarmNotFound(); + } + if (FLEET_CONTRACT.uuidOwner(s.fleetUuid) != msg.sender) { + revert NotUuidOwner(); + } + if (PROVIDER_CONTRACT.ownerOf(newProviderId) == address(0)) { + revert ProviderDoesNotExist(); + } + + uint256 oldProvider = s.providerId; + + // Effects — update provider and reset status + s.providerId = newProviderId; + s.status = SwarmStatus.REGISTERED; + + emit SwarmProviderUpdated(swarmId, oldProvider, newProviderId); + } + + /// @notice Permanently deletes a swarm. Caller must own the fleet UUID. + /// @param swarmId The swarm to delete. + function deleteSwarm(uint256 swarmId) external { + Swarm storage s = swarms[swarmId]; + if (s.filterLength == 0) { + revert SwarmNotFound(); + } + if (FLEET_CONTRACT.uuidOwner(s.fleetUuid) != msg.sender) { + revert NotUuidOwner(); + } + + bytes16 fleetUuid = s.fleetUuid; + + _removeFromUuidSwarms(fleetUuid, swarmId); + + delete swarms[swarmId]; + delete filterData[swarmId]; + + emit SwarmDeleted(swarmId, fleetUuid, msg.sender); + } + + /// @notice Returns whether the swarm's fleet UUID and provider NFT are still valid. + /// @param swarmId The swarm to check. + /// @return fleetValid True if the fleet UUID is still owned (uuidOwner != address(0)). + /// @return providerValid True if the provider NFT exists. + function isSwarmValid(uint256 swarmId) public view returns (bool fleetValid, bool providerValid) { + Swarm storage s = swarms[swarmId]; + if (s.filterLength == 0) revert SwarmNotFound(); + + // Fleet is valid if UUID is still owned (not released) + fleetValid = FLEET_CONTRACT.uuidOwner(s.fleetUuid) != address(0); + + try PROVIDER_CONTRACT.ownerOf(s.providerId) returns (address) { + providerValid = true; + } catch { + providerValid = false; + } + } + + /// @notice Permissionless-ly removes a swarm whose fleet UUID has been released or provider NFT has been burned. + /// @param swarmId The orphaned swarm to purge. + function purgeOrphanedSwarm(uint256 swarmId) external { + Swarm storage s = swarms[swarmId]; + if (s.filterLength == 0) revert SwarmNotFound(); + + (bool fleetValid, bool providerValid) = isSwarmValid(swarmId); + if (fleetValid && providerValid) revert SwarmNotOrphaned(); + + bytes16 fleetUuid = s.fleetUuid; + + _removeFromUuidSwarms(fleetUuid, swarmId); + + delete swarms[swarmId]; + delete filterData[swarmId]; + + emit SwarmPurged(swarmId, fleetUuid, msg.sender); + } + + /// @notice Tests tag membership against the swarm's XOR filter. + /// @param swarmId The swarm to query. + /// @param tagHash keccak256 of the tag identity bytes (caller must pre-normalize per tagType). + /// @return isValid True if the tag passes the XOR filter check. + function checkMembership(uint256 swarmId, bytes32 tagHash) external view returns (bool isValid) { + Swarm storage s = swarms[swarmId]; + if (s.filterLength == 0) { + revert SwarmNotFound(); + } + + // Reject queries against orphaned swarms + (bool fleetValid, bool providerValid) = isSwarmValid(swarmId); + if (!fleetValid || !providerValid) revert SwarmOrphaned(); + + bytes storage filter = filterData[swarmId]; + uint256 dataLen = s.filterLength; + + // Calculate M (number of fingerprint slots) + uint256 m = (dataLen * 8) / s.fingerprintSize; + if (m == 0) return false; + + // Derive 3 indices and expected fingerprint from hash + uint32 h1 = uint32(uint256(tagHash)) % uint32(m); + uint32 h2 = uint32(uint256(tagHash) >> 32) % uint32(m); + uint32 h3 = uint32(uint256(tagHash) >> 64) % uint32(m); + + uint256 fpMask = (1 << s.fingerprintSize) - 1; + uint256 expectedFp = (uint256(tagHash) >> 96) & fpMask; + + // Read and XOR fingerprints + uint256 f1 = _readFingerprint(filter, h1, s.fingerprintSize); + uint256 f2 = _readFingerprint(filter, h2, s.fingerprintSize); + uint256 f3 = _readFingerprint(filter, h3, s.fingerprintSize); + + return (f1 ^ f2 ^ f3) == expectedFp; + } + + /// @notice Returns the raw XOR filter bytes for a swarm. + /// @param swarmId The swarm to query. + /// @return filter The XOR filter blob. + function getFilterData(uint256 swarmId) external view returns (bytes memory filter) { + if (swarms[swarmId].filterLength == 0) { + revert SwarmNotFound(); + } + return filterData[swarmId]; + } + + /** + * @dev O(1) removal of a swarm from its UUID's swarm list using index tracking. + */ + function _removeFromUuidSwarms(bytes16 fleetUuid, uint256 swarmId) internal { + uint256[] storage arr = uuidSwarms[fleetUuid]; + uint256 index = swarmIndexInUuid[swarmId]; + uint256 lastId = arr[arr.length - 1]; + + arr[index] = lastId; + swarmIndexInUuid[lastId] = index; + arr.pop(); + delete swarmIndexInUuid[swarmId]; + } + + /** + * @dev Reads a packed fingerprint from storage bytes. + * @param filter The filter bytes in storage. + * @param index The fingerprint slot index. + * @param bits The fingerprint size in bits. + */ + function _readFingerprint(bytes storage filter, uint256 index, uint8 bits) internal view returns (uint256) { + uint256 bitOffset = index * bits; + uint256 startByte = bitOffset / 8; + uint256 endByte = (bitOffset + bits - 1) / 8; + + // Read bytes and assemble into uint256 + uint256 raw; + for (uint256 i = startByte; i <= endByte;) { + raw = (raw << 8) | uint8(filter[i]); + unchecked { + ++i; + } + } + + // Extract the fingerprint bits + uint256 totalBitsRead = (endByte - startByte + 1) * 8; + uint256 localStart = bitOffset % 8; + uint256 shiftRight = totalBitsRead - (localStart + bits); + + return (raw >> shiftRight) & ((1 << bits) - 1); + } +} diff --git a/src/swarms/doc/README.md b/src/swarms/doc/README.md new file mode 100644 index 0000000..16ee026 --- /dev/null +++ b/src/swarms/doc/README.md @@ -0,0 +1,163 @@ +# Swarm System Technical Specification + +BLE tag registry enabling decentralized device discovery using cryptographic membership proofs. Individual tags within a swarm are not enumerated on-chain. + +## Architecture + +```mermaid +graph TB + subgraph NFTs["Identity Layer (ERC-721)"] + FI["FleetIdentity
SFID
tokenId = (regionKey << 128) | uuid"] + SP["ServiceProvider
SSV
tokenId = keccak256(url)"] + end + + subgraph Registries["Registry Layer"] + REG["SwarmRegistry
L1: SSTORE2 filter storage
Universal: native bytes storage"] + end + + subgraph Actors + FO(("Fleet
Owner")) + PRV(("Service
Provider")) + ANY(("Client /
Purger")) + end + + FO -- "registerFleet* / claimUuid" --> FI + FO -- "registerSwarm / update / delete" --> REG + PRV -- "registerProvider(url)" --> SP + PRV -- "acceptSwarm / rejectSwarm" --> REG + ANY -- "buildHighestBondedUuidBundle /
checkMembership / purge" --> REG + + REG -. "uuidOwner(fleetUuid)" .-> FI + REG -. "ownerOf(providerId)" .-> SP + + style FI fill:#4a9eff,color:#fff + style SP fill:#4a9eff,color:#fff + style REG fill:#ff9f43,color:#fff + style FO fill:#2ecc71,color:#fff + style PRV fill:#2ecc71,color:#fff + style ANY fill:#95a5a6,color:#fff +``` + +## Core Components + +| Contract | Role | Identity | Token | +| :------------------------- | :----------------------------- | :----------------------------------------- | :---- | +| **FleetIdentity** | Fleet registry (ERC-721) | `(regionKey << 128) \| uuid` | SFID | +| **ServiceProvider** | Backend URL registry (ERC-721) | `keccak256(url)` | SSV | +| **SwarmRegistryL1** | Tag group registry (L1) | `keccak256(fleetUuid, providerId, filter)` | — | +| **SwarmRegistryUniversal** | Tag group registry (ZkSync+) | `keccak256(fleetUuid, providerId, filter)` | — | + +All contracts are **permissionless**—access control via NFT ownership. FleetIdentity requires ERC-20 bond (anti-spam). + +## Key Concepts + +### Swarm + +A group of ~10k-20k BLE tags represented by an XOR filter. Tags are never enumerated on-chain; membership is verified via cryptographic filter. + +### UUID Ownership + +UUIDs (iBeacon Proximity UUID) have ownership levels: + +| Level | Region Key | Bond | Description | +| :------ | :--------- | :---------------------- | :-------------------------- | +| Owned | 0 | BASE_BOND | Reserved, not in any region | +| Local | ≥1024 | BASE_BOND × 2^tier | Registered in admin area | +| Country | 1-999 | BASE_BOND × 16 × 2^tier | Registered at country level | + +### Geographic Tiers + +Each region has independent tier competition: + +- **Tier capacity**: 4 members per tier +- **Max tiers**: 24 per region +- **Bundle size**: Up to 20 UUIDs returned to clients + +Country fleets pay 16× more but appear in all admin-area bundles within their country. + +### Operator Delegation + +UUID owners can delegate tier maintenance to an **operator**: + +- **Default**: `operatorOf(uuid)` returns the UUID owner +- **Delegation**: UUID owner calls `setOperator(uuid, operator)` +- **Bond Split**: Owner pays BASE_BOND, operator pays tier excess +- **Permissions**: Only operator can promote/demote; owner retains burn rights +- **Transfer**: When changing operators, tier bonds transfer atomically + +This enables cold-wallet ownership with hot-wallet tier management. + +### Token ID Encoding + +``` +tokenId = (regionKey << 128) | uint256(uint128(uuid)) +``` + +- Bits 0-127: UUID +- Bits 128-159: Region key + +## Privacy Model + +The system provides **non-enumerating** tag verification—individual tags aren't listed on-chain; membership is proven via XOR filter. + +| Data | Visibility | Notes | +| :---------- | :--------------- | :------------------------------------------- | +| UUID | Public | Required for iOS background beacon detection | +| Major/Minor | Filter-protected | Hashed, not enumerated | +| MAC address | Android-only | iOS does not expose BLE MAC addresses | + +**Limitation**: UUID must be public for iOS `CLBeaconRegion` background monitoring. The system protects the specific Major/Minor combinations within that UUID's swarm. + +## Documentation + +| Document | Description | +| :--------------------------------------------- | :------------------------------------------------ | +| [data-model.md](data-model.md) | Contract interfaces, enums, storage layout | +| [fleet-registration.md](fleet-registration.md) | Fleet & UUID registration, tier economics | +| [swarm-operations.md](swarm-operations.md) | Swarm registration, filters, provider approval | +| [lifecycle.md](lifecycle.md) | State machines, updates, deletion, orphan cleanup | +| [discovery.md](discovery.md) | Client discovery flows, tag hash construction | +| [maintenance.md](maintenance.md) | Bundle inclusion monitoring, tier optimization | +| [iso3166-reference.md](iso3166-reference.md) | ISO 3166-1/2 codes and admin area mappings | + +## End-to-End Flow + +```mermaid +sequenceDiagram + participant FO as Fleet Owner + participant PO as Provider Owner + participant FI as FleetIdentity + participant SR as SwarmRegistry + participant SP as ServiceProvider + participant Client as EdgeBeaconScanner + + Note over FO: 1. Register fleet + FO->>FI: registerFleetLocal(uuid, cc, admin, tier) + + Note over FO: 2. Register provider + PO->>SP: registerProvider(url) + + Note over FO: 3. Register swarm + FO->>SR: registerSwarm(uuid, providerId, filter, ...) + + Note over FO: 4. Provider approves + PO->>SR: acceptSwarm(swarmId) + + Note over Client: 5. Client discovers + Client->>FI: buildHighestBondedUuidBundle(cc, admin) + Client->>SR: uuidSwarms(uuid, 0) + Client->>SR: checkMembership(swarmId, tagHash) + Client->>SP: providerUrls(providerId) + Note over Client: Connect to service URL +``` + +## Storage Variants + +| Variant | Chain | Filter Storage | Deletion Behavior | +| :------------------------- | :------------------ | :-------------------------- | :-------------------------------- | +| **SwarmRegistryL1** | Ethereum L1 | SSTORE2 (contract bytecode) | Struct cleared; bytecode persists | +| **SwarmRegistryUniversal** | ZkSync Era, all EVM | `mapping(uint256 => bytes)` | Full deletion, gas refund | + +--- + +_For implementation details, see individual documentation pages._ diff --git a/src/swarms/doc/assistant-guide.md b/src/swarms/doc/assistant-guide.md new file mode 100644 index 0000000..1a7376c --- /dev/null +++ b/src/swarms/doc/assistant-guide.md @@ -0,0 +1,432 @@ +# Swarm System Architecture & Implementation Guide + +> **Context for AI Agents**: This document outlines the architecture, constraints, and operational logic of the Swarm Smart Contract system. Use this context when modifying contracts, writing SDKs, or debugging verifiers. + +## 1. System Overview + +The Swarm System is a **non-enumerating** registry for **BLE (Bluetooth Low Energy)** tag swarms. It allows Fleet Owners to manage large sets of tags (~10k-20k) and link them to Service Providers (Backend URLs) using cryptographic membership proofs—individual tags are never listed on-chain. + +Two registry variants exist for different deployment targets: + +- **`SwarmRegistryL1`** — Ethereum L1, uses SSTORE2 (contract bytecode) for gas-efficient filter storage. Not compatible with ZkSync Era. +- **`SwarmRegistryUniversal`** — All EVM chains including ZkSync Era, uses native `bytes` storage. + +### Core Components + +| Contract | Role | Key Identity | Token | +| :--------------------------- | :---------------------------------- | :----------------------------------------- | :---- | +| **`FleetIdentity`** | Fleet Registry (ERC-721 Enumerable) | `(regionKey << 128) \| uint128(uuid)` | SFID | +| **`ServiceProvider`** | Service Registry (ERC-721) | `keccak256(url)` | SSV | +| **`SwarmRegistryL1`** | Swarm Registry (L1) | `keccak256(fleetUuid, providerId, filter)` | — | +| **`SwarmRegistryUniversal`** | Swarm Registry (Universal) | `keccak256(fleetUuid, providerId, filter)` | — | + +All contracts are **permissionless** — access control is enforced through NFT ownership rather than admin roles. `FleetIdentity` additionally requires an ERC-20 bond (e.g. NODL) to register a fleet, acting as an anti-spam / anti-abuse mechanism. + +Both NFT contracts support **burning** — the token owner can call `burn(tokenId)` to destroy their NFT. Burning a `FleetIdentity` token refunds the tier bond to the owner. Burning either NFT makes any swarms referencing that token _orphaned_. + +### FleetIdentity: Two-Level Geographic Registration + +`FleetIdentity` implements a **two-level geographic registration** system: + +- **Country Level** — `regionKey = countryCode` (ISO 3166-1 numeric, 1-999) +- **Admin Area (Local) Level** — `regionKey = (countryCode << 10) | adminCode` (>= 1024) + +Each region has its own independent tier namespace. The first fleet in any region always pays the level-appropriate base bond. + +**TokenID Encoding:** + +``` +tokenId = (regionKey << 128) | uint256(uint128(uuid)) +``` + +- Bits 0-127: UUID (Proximity UUID as bytes16) +- Bits 128-159: Region key (country or admin-area code) + +This allows the same UUID to be registered in multiple regions, each with a distinct token. + +### Economic Model (Tier System) + +| Parameter | Value | +| :------------------ | :--------------------------------------------------------- | +| **Tier Capacity** | 4 members per tier | +| **Max Tiers** | 24 per region | +| **Local Bond** | `BASE_BOND * 2^tier` | +| **Country Bond** | `BASE_BOND * COUNTRY_BOND_MULTIPLIER * 2^tier` (16× local) | +| **Max Bundle Size** | 20 UUIDs | + +Country fleets pay 16× more but appear in all admin-area bundles within their country. This economic difference provides locals a significant advantage: a local can reach tier 3 for the same cost a country player pays for tier 0. + +### UUID Ownership Model + +UUIDs have an ownership model with registration levels: + +| Level | Value | Description | +| :-------- | :---- | :--------------------------------------- | +| `None` | 0 | Not registered (default) | +| `Owned` | 1 | Claimed but not registered in any region | +| `Local` | 2 | Registered at admin area level | +| `Country` | 3 | Registered at country level | + +- **UUID Owner**: The address that first registered a token for a UUID. All subsequent registrations must come from this address. +- **Multi-Region**: The same UUID can have multiple tokens in different regions (all at the same level, all by the same owner). +- **Transfer**: Owned-only tokens transfer `uuidOwner` when the NFT is transferred. + +--- + +## 2. Operational Workflows + +### A. Provider Setup (One-Time) + +**Service Provider** calls `ServiceProvider.registerProvider("https://cms.example.com")`. Receives `providerTokenId` (= `keccak256(url)`). + +### B. Fleet Registration Options + +Fleet Owners have multiple paths to register fleets: + +#### B1. Direct Registration (Country Level) + +```solidity +// 1. Approve bond token +NODL.approve(fleetIdentityAddress, requiredBond); + +// 2. Get inclusion hint (off-chain call - free) +(uint256 tier, uint256 bond) = fleetIdentity.countryInclusionHint(840); // US = 840 + +// 3. Register at the recommended tier +uint256 tokenId = fleetIdentity.registerFleetCountry(uuid, 840, tier); +// Returns tokenId = (840 << 128) | uint128(uuid) +``` + +#### B2. Direct Registration (Local/Admin Area Level) + +```solidity +// 1. Approve bond token +NODL.approve(fleetIdentityAddress, requiredBond); + +// 2. Get inclusion hint (off-chain call - free) +(uint256 tier, uint256 bond) = fleetIdentity.localInclusionHint(840, 5); // US, California + +// 3. Register at the recommended tier +uint256 tokenId = fleetIdentity.registerFleetLocal(uuid, 840, 5, tier); +// Returns tokenId = ((840 << 10 | 5) << 128) | uint128(uuid) +``` + +#### B3. Claim-First Flow (Reserve UUID, Register Later) + +```solidity +// 1. Claim UUID ownership (costs BASE_BOND) +NODL.approve(fleetIdentityAddress, BASE_BOND); +uint256 ownedTokenId = fleetIdentity.claimUuid(uuid); +// Returns tokenId = uint128(uuid) (regionKey = 0) + +// 2. Later: Register from owned state (burns owned token, mints regional token) +// Only pays incremental bond (tier bond - BASE_BOND already paid) +uint256 tokenId = fleetIdentity.registerFleetLocal(uuid, 840, 5, targetTier); +``` + +### C. Fleet Tier Management + +Fleets can promote or demote within their region: + +```solidity +// Promote to next tier (pulls additional bond) +fleetIdentity.promote(tokenId); + +// Reassign to any tier (promotes or demotes) +fleetIdentity.reassignTier(tokenId, targetTier); +// If targetTier > current: pulls additional bond +// If targetTier < current: refunds bond difference +``` + +### D. Unregister to Owned State + +A fleet with a **single token** can unregister back to owned-only state: + +```solidity +// Returns to owned state, refunds (tierBond - BASE_BOND) +uint256 ownedTokenId = fleetIdentity.unregisterToOwned(tokenId); +// Reverts if UUID has multiple tokens (multi-region registration) +``` + +### E. Operator Delegation + +UUID owners can delegate tier management to an operator wallet: + +```solidity +// Set operator at registration time (owner pays BASE_BOND, operator pays tier excess) +fleetIdentity.registerFleetLocalWithOperator(uuid, 840, 5, tier, operatorAddress); + +// Or set operator after registration (transfers tier bonds atomically) +fleetIdentity.setOperator(uuid, operatorAddress); + +// Check current operator (returns owner if none set) +address manager = fleetIdentity.operatorOf(uuid); + +// Clear operator (reverts to owner-managed) +fleetIdentity.setOperator(uuid, address(0)); +``` + +**Key Points:** + +- Operator handles `promote()` and `reassignTier()` calls +- Owner retains `burn()` rights and `setOperator()` control +- Tier excess bonds transfer between operators when changing +- Cannot set operator for owned-only UUIDs (must be registered) + +### F. Release UUID Ownership + +An owned-only UUID can be fully released, refunding BASE_BOND: + +```solidity +// Must be in Owned state (not registered in any region) +fleetIdentity.releaseUuid(uuid); +// Clears uuidOwner, allows anyone to claim the UUID +``` + +### G. Burn Fleet Token + +```solidity +// Burns token and refunds bonds (split between owner and operator) +fleetIdentity.burn(tokenId); +// For owned-only tokens: refunds BASE_BOND to owner +// For registered tokens: refunds BASE_BOND to owner, tier excess to operator +``` + +### H. Swarm Registration (Per Batch of Tags) + +A Fleet Owner groups tags into a "Swarm" (chunk of ~10k-20k tags) and registers them. + +1. **Construct `TagID`s**: Generate the unique ID for every tag in the swarm (see "Tag Schemas" below). +2. **Build XOR Filter**: Create a binary XOR filter (Peeling Algorithm) containing the hashes of all `TagID`s. +3. **(Optional) Predict Swarm ID**: Call `computeSwarmId(fleetUuid, providerId, filterData)` off-chain to obtain the deterministic ID before submitting the transaction. +4. **Register**: + ```solidity + swarmRegistry.registerSwarm( + fleetUuid, + providerId, + filterData, + 16, // Fingerprint size in bits (1–16) + TagType.IBEACON_INCLUDES_MAC // or PAYLOAD_ONLY, VENDOR_ID, GENERIC + ); + // Returns the deterministic swarmId + ``` + +### I. Swarm Approval Flow + +After registration a swarm starts in `REGISTERED` status and requires provider approval: + +1. **Provider approves**: `swarmRegistry.acceptSwarm(swarmId)` → status becomes `ACCEPTED`. +2. **Provider rejects**: `swarmRegistry.rejectSwarm(swarmId)` → status becomes `REJECTED`. + +Only the owner of the provider NFT (`providerId`) can accept or reject. + +### J. Swarm Updates + +The fleet owner can modify a swarm at any time. Both operations reset status to `REGISTERED`, requiring fresh provider approval: + +- **Replace the XOR filter**: `swarmRegistry.updateSwarmFilter(swarmId, newFilterData)` +- **Change service provider**: `swarmRegistry.updateSwarmProvider(swarmId, newProviderId)` + +### K. Swarm Deletion + +The fleet owner can permanently remove a swarm: + +```solidity +swarmRegistry.deleteSwarm(swarmId); +``` + +### L. Orphan Detection & Cleanup + +When a fleet or provider NFT is burned, swarms referencing it become _orphaned_: + +- **Check validity**: `swarmRegistry.isSwarmValid(swarmId)` returns `(fleetValid, providerValid)`. +- **Purge**: Anyone can call `swarmRegistry.purgeOrphanedSwarm(swarmId)` to remove stale state. The caller receives the SSTORE gas refund as an incentive. +- **Guards**: `acceptSwarm`, `rejectSwarm`, and `checkMembership` all revert with `SwarmOrphaned()` if the swarm's NFTs have been burned. + +--- + +## 3. Off-Chain Logic: Filter & Tag Construction + +### Tag Schemas (`TagType`) + +The system supports different ways of constructing the unique `TagID` based on the hardware capabilities. + +**Enum: `TagType`** + +- **`0x00`: IBEACON_PAYLOAD_ONLY** + - **Format**: `UUID (16b) || Major (2b) || Minor (2b)` + - **Use Case**: When Major/Minor pairs are globally unique (standard iBeacon). +- **`0x01`: IBEACON_INCLUDES_MAC** + - **Format**: `UUID (16b) || Major (2b) || Minor (2b) || MAC (6b)` + - **Use Case**: Anti-spoofing logic or Shared Major/Minor fleets. + - **CRITICAL: MAC Normalization Rule**: + - If MAC is **Public/Static** (Address Type bits `00`): Use the **Real MAC Address**. + - If MAC is **Random/Private** (Address Type bits `01` or `11`): Replace with `FF:FF:FF:FF:FF:FF`. + - _Why?_ To support rotating privacy MACs while still validating "It's a privacy tag". +- **`0x02`: VENDOR_ID** + - **Format**: `companyID || hash(vendorBytes)` + - **Use Case**: Non-iBeacon BLE devices identified by Bluetooth SIG company ID. +- **`0x03`: GENERIC** + - **Use Case**: Catch-all for custom tag identity schemes. + +### Filter Construction (The Math) + +To verify membership on-chain, the contract uses **3-hash XOR logic**. + +1. **Input**: `h = keccak256(TagID)` (where TagID is constructed via schema above). +2. **Indices** (M = number of fingerprint slots = `filterLength * 8 / fingerprintSize`): + - `h1 = uint32(h) % M` + - `h2 = uint32(h >> 32) % M` + - `h3 = uint32(h >> 64) % M` +3. **Fingerprint**: `fp = (h >> 96) & ((1 << fingerprintSize) - 1)` +4. **Verification**: `Filter[h1] ^ Filter[h2] ^ Filter[h3] == fp` + +### Swarm ID Derivation + +Swarm IDs are **deterministic** — derived from the swarm's core identity: + +``` +swarmId = uint256(keccak256(abi.encode(fleetUuid, providerId, filterData))) +``` + +This means the same (UUID, provider, filter) triple always produces the same ID, and duplicate registrations revert with `SwarmAlreadyExists()`. The `computeSwarmId` function is `public pure`, so it can be called off-chain at zero cost via `eth_call`. + +--- + +## 4. Client Discovery Flow (The "EdgeBeaconScanner" Perspective) + +A client (mobile phone or gateway) scans a BLE beacon and wants to find its owner and backend service. + +### Discovery Option A: Geographic Bundle Discovery (Recommended) + +Use the priority-ordered bundle based on EdgeBeaconScanner location. + +#### Step 1: Get Priority Bundle + +```solidity +// EdgeBeaconScanner knows its location: US, California (country=840, admin=5) +(bytes16[] memory uuids, uint256 count) = fleetIdentity.buildHighestBondedUuidBundle(840, 5); +// Returns up to 20 UUIDs, priority-ordered: +// 1. Higher tier first +// 2. Local (admin area) before country within same tier +// 3. Earlier registration within same tier+level +``` + +#### Step 2: Match Detected Beacon UUID + +```solidity +bytes16 detectedUUID = ...; // From iBeacon advertisement + +for (uint256 i = 0; i < count; i++) { + if (uuids[i] == detectedUUID) { + // Found! Now find the token ID + // Try local region first, then country + uint32 localRegion = (840 << 10) | 5; + uint256 tokenId = fleetIdentity.computeTokenId(detectedUUID, localRegion); + if (fleetIdentity.ownerOf(tokenId) exists) { ... } + // else try country region + uint256 tokenId = fleetIdentity.computeTokenId(detectedUUID, 840); + } +} +``` + +#### Step 3: Enumerate Swarms & Check Membership + +Same as Option B Steps 3-5. + +### Discovery Option B: Direct Fleet Lookup + +For when you know the UUID and want to find its fleet directly. + +#### Step 1: Enumerate Active Regions + +```solidity +// Get all countries with active fleets +uint16[] memory countries = fleetIdentity.getActiveCountries(); + +// Get all admin areas with active fleets +uint32[] memory adminAreas = fleetIdentity.getActiveAdminAreas(); +``` + +#### Step 2: Find Fleet Token + +```solidity +bytes16 uuid = ...; // From iBeacon + +// Try each potential region (start with user's location) +uint32 region = (840 << 10) | 5; // US-CA +uint256 tokenId = fleetIdentity.computeTokenId(uuid, region); + +try fleetIdentity.ownerOf(tokenId) returns (address owner) { + // Found the fleet! +} catch { + // Try country-level + tokenId = fleetIdentity.computeTokenId(uuid, 840); +} +``` + +#### Step 3: Find Swarms + +```solidity +// Enumerate swarms for this UUID +uint256[] memory swarmIds = new uint256[](100); // estimate +for (uint256 i = 0; ; i++) { + try swarmRegistry.uuidSwarms(detectedUUID, i) returns (uint256 swarmId) { + swarmIds[i] = swarmId; + } catch { + break; // End of array + } +} +``` + +#### Step 4: Membership Check + +```solidity +// Construct tagHash based on swarm's tagType +(bytes16 fleetUuid, uint256 providerId, uint32 filterLen, uint8 fpSize, + SwarmStatus status, TagType tagType) = swarmRegistry.swarms(swarmId); + +// Build tagId per schema (see Section 3) +bytes memory tagId; +if (tagType == TagType.IBEACON_PAYLOAD_ONLY) { + tagId = abi.encodePacked(uuid, major, minor); +} else if (tagType == TagType.IBEACON_INCLUDES_MAC) { + bytes6 normalizedMac = isRandomMac ? bytes6(0xFFFFFFFFFFFF) : realMac; + tagId = abi.encodePacked(uuid, major, minor, normalizedMac); +} + +bytes32 tagHash = keccak256(tagId); +bool isMember = swarmRegistry.checkMembership(swarmId, tagHash); +``` + +#### Step 5: Service Discovery + +```solidity +if (isMember && status == SwarmStatus.ACCEPTED) { + string memory url = serviceProvider.providerUrls(providerId); + // Connect to url +} +``` + +--- + +## 5. Storage & Deletion Notes + +### SwarmRegistryL1 (SSTORE2) + +- Filter data is stored as **immutable contract bytecode** via SSTORE2. +- On `deleteSwarm` / `purgeOrphanedSwarm`, the struct is cleared but the deployed bytecode **cannot be erased** (accepted trade-off of the SSTORE2 pattern). + +### SwarmRegistryUniversal (native bytes) + +- Filter data is stored in a `mapping(uint256 => bytes)`. +- On `deleteSwarm` / `purgeOrphanedSwarm`, both the struct and the filter bytes are fully deleted (`delete filterData[swarmId]`), reclaiming storage. +- Exposes `getFilterData(swarmId)` for off-chain filter retrieval. + +### Deletion Performance + +Both registries use an **O(1) swap-and-pop** strategy for removing swarms from the `uuidSwarms` array, tracked via the `swarmIndexInUuid` mapping. + +--- + +**Note**: This architecture ensures that an EdgeBeaconScanner can go from **Raw Signal** → **Verified Service URL** entirely on-chain (data-wise), without a centralized indexer, while privacy of the 10,000 other tags in the swarm is preserved. diff --git a/src/swarms/doc/data-model.md b/src/swarms/doc/data-model.md new file mode 100644 index 0000000..36617fe --- /dev/null +++ b/src/swarms/doc/data-model.md @@ -0,0 +1,176 @@ +# Data Model & Contract Interfaces + +## Contract Classes + +```mermaid +classDiagram + class FleetIdentity { + +IERC20 BOND_TOKEN + +uint256 BASE_BOND + +uint256 TIER_CAPACITY = 4 + +uint256 MAX_TIERS = 24 + +uint256 COUNTRY_BOND_MULTIPLIER = 16 + +uint256 MAX_BONDED_UUID_BUNDLE_SIZE = 20 + +mapping uuidOwner : bytes16 → address + +mapping uuidOperator : bytes16 → address + +mapping uuidLevel : bytes16 → RegistrationLevel + +mapping uuidTokenCount : bytes16 → uint256 + +mapping regionTierCount : uint32 → uint256 + +mapping fleetTier : uint256 → uint256 + -- + +claimUuid(uuid) → tokenId + +registerFleetLocal(uuid, cc, admin, tier) → tokenId + +registerFleetLocalWithOperator(uuid, cc, admin, tier, operator) → tokenId + +registerFleetCountry(uuid, cc, tier) → tokenId + +registerFleetCountryWithOperator(uuid, cc, tier, operator) → tokenId + +promote(tokenId) + +reassignTier(tokenId, targetTier) + +unregisterToOwned(tokenId) → newTokenId + +releaseUuid(uuid) + +burn(tokenId) + +setOperator(uuid, newOperator) + +operatorOf(uuid) → address + -- + +localInclusionHint(cc, admin) → tier, bond + +countryInclusionHint(cc) → tier, bond + +buildHighestBondedUuidBundle(cc, admin) → uuids[], count + +buildCountryOnlyBundle(cc) → uuids[], count + +getActiveCountries() → uint16[] + +getActiveAdminAreas() → uint32[] + +tokenUuid(tokenId) → bytes16 + +tokenRegion(tokenId) → uint32 + +computeTokenId(uuid, region) → uint256 + +tierBond(tier, isCountry) → uint256 + } + + class ServiceProvider { + +mapping providerUrls : uint256 → string + -- + +registerProvider(url) → tokenId + +burn(tokenId) + } + + class SwarmRegistry { + +mapping swarms : uint256 → Swarm + +mapping uuidSwarms : bytes16 → uint256[] + +mapping swarmIndexInUuid : uint256 → uint256 + -- + +computeSwarmId(fleetUuid, providerId, filter) → swarmId + +registerSwarm(fleetUuid, providerId, filter, fpSize, tagType) → swarmId + +acceptSwarm(swarmId) + +rejectSwarm(swarmId) + +updateSwarmFilter(swarmId, newFilter) + +updateSwarmProvider(swarmId, newProviderId) + +deleteSwarm(swarmId) + +isSwarmValid(swarmId) → fleetValid, providerValid + +purgeOrphanedSwarm(swarmId) + +checkMembership(swarmId, tagHash) → bool + } +``` + +## Struct: Swarm + +```solidity +struct Swarm { + bytes16 fleetUuid; // UUID that owns this swarm + uint256 providerId; // ServiceProvider token ID + uint32 filterLength; // XOR filter byte length + uint8 fingerprintSize; // Fingerprint bits (1-16) + SwarmStatus status; // Registration state + TagType tagType; // Tag identity scheme +} +``` + +## Enumerations + +### SwarmStatus + +| Value | Description | +| :----------- | :------------------------- | +| `REGISTERED` | Awaiting provider approval | +| `ACCEPTED` | Provider approved; active | +| `REJECTED` | Provider rejected | + +### TagType + +| Value | Format | Use Case | +| :--------------------- | :------------------------------- | :--------------- | +| `IBEACON_PAYLOAD_ONLY` | UUID ∥ Major ∥ Minor (20B) | Standard iBeacon | +| `IBEACON_INCLUDES_MAC` | UUID ∥ Major ∥ Minor ∥ MAC (26B) | Anti-spoofing | +| `VENDOR_ID` | companyID ∥ hash(vendorBytes) | Non-iBeacon BLE | +| `GENERIC` | Custom | Extensible | + +### RegistrationLevel + +| Value | Region Key | Description | +| :------------ | :--------- | :----------------- | +| `None` (0) | — | Not registered | +| `Owned` (1) | 0 | Claimed, no region | +| `Local` (2) | ≥1024 | Admin area | +| `Country` (3) | 1-999 | Country-wide | + +## Region Key Encoding + +``` +Country: regionKey = countryCode (1-999) +Admin Area: regionKey = (countryCode << 10) | adminCode (≥1024) +``` + +**Token ID:** + +``` +tokenId = (regionKey << 128) | uint256(uint128(uuid)) +``` + +**Helper functions:** + +```solidity +bytes16 uuid = fleetIdentity.tokenUuid(tokenId); +uint32 region = fleetIdentity.tokenRegion(tokenId); +uint256 tokenId = fleetIdentity.computeTokenId(uuid, regionKey); +uint32 adminRegion = fleetIdentity.makeAdminRegion(countryCode, adminCode); +``` + +## Swarm ID Derivation + +Deterministic and collision-free: + +```solidity +swarmId = uint256(keccak256(abi.encode(fleetUuid, providerId, filterData))) +``` + +Duplicate registration reverts with `SwarmAlreadyExists()`. + +## XOR Filter Membership + +3-hash XOR verification: + +``` +Input: h = keccak256(tagId) +M = filterLength * 8 / fingerprintSize // slots + +h1 = uint32(h) % M +h2 = uint32(h >> 32) % M +h3 = uint32(h >> 64) % M +fp = (h >> 96) & ((1 << fingerprintSize) - 1) + +Valid if: Filter[h1] ^ Filter[h2] ^ Filter[h3] == fp +``` + +## Storage Notes + +### SwarmRegistryL1 + +- Filter stored as **contract bytecode** via SSTORE2 +- Gas-efficient reads (EXTCODECOPY) +- Bytecode persists after deletion (immutable) + +### SwarmRegistryUniversal + +- Filter stored in `mapping(uint256 => bytes)` +- Full deletion reclaims storage +- `getFilterData(swarmId)` for off-chain retrieval + +### Deletion Performance + +O(1) swap-and-pop via `swarmIndexInUuid` mapping. diff --git a/src/swarms/doc/discovery.md b/src/swarms/doc/discovery.md new file mode 100644 index 0000000..a71d0ef --- /dev/null +++ b/src/swarms/doc/discovery.md @@ -0,0 +1,169 @@ +# Client Discovery + +## Overview + +Clients (mobile apps, gateways) discover BLE tags and resolve them to backend services entirely on-chain. + +``` +BLE Signal → UUID Match → Swarm Lookup → Membership Check → Service URL +``` + +## Geographic Bundle Discovery (Recommended) + +Use location-based priority bundles for efficient discovery. + +```mermaid +sequenceDiagram + actor Client as EdgeBeaconScanner + participant FI as FleetIdentity + participant SR as SwarmRegistry + participant SP as ServiceProvider + + Note over Client: Location: US-California (840, 5)
Detected: UUID, Major, Minor, MAC + + Client->>+FI: buildHighestBondedUuidBundle(840, 5) + FI-->>-Client: (uuids[], count) — up to 20 UUIDs + + Note over Client: Check if detectedUUID in bundle + + Client->>+SR: uuidSwarms(uuid, 0) + SR-->>-Client: swarmId + Note over Client: Iterate until revert + + Note over Client: Build tagHash per TagType + Client->>+SR: checkMembership(swarmId, tagHash) + SR-->>-Client: true + + Client->>+SR: swarms(swarmId) + SR-->>-Client: {providerId, status: ACCEPTED, ...} + + Client->>+SP: providerUrls(providerId) + SP-->>-Client: "https://api.example.com" + + Note over Client: Connect to service +``` + +### Bundle Priority + +1. **Tier**: Higher tier first +2. **Level**: Local before country (same tier) +3. **Time**: Earlier registration (same tier+level) + +## Direct UUID Lookup + +When UUID is known but location isn't: + +```solidity +// Try regions +uint32 localRegion = (840 << 10) | 5; +uint256 tokenId = fleetIdentity.computeTokenId(uuid, localRegion); +try fleetIdentity.ownerOf(tokenId) { /* found */ } +catch { /* try country: computeTokenId(uuid, 840) */ } + +// Enumerate swarms +for (uint i = 0; ; i++) { + try swarmRegistry.uuidSwarms(uuid, i) returns (uint256 swarmId) { + // process swarmId + } catch { break; } +} +``` + +## Tag Hash Construction + +```mermaid +flowchart TD + A[Read swarm.tagType] --> B{TagType?} + + B -->|IBEACON_PAYLOAD_ONLY| C["UUID ∥ Major ∥ Minor (20B)"] + B -->|IBEACON_INCLUDES_MAC| D{MAC type?} + B -->|VENDOR_ID| E["companyID ∥ hash(vendorBytes)"] + B -->|GENERIC| F["custom scheme"] + + D -->|Public| G["UUID ∥ Major ∥ Minor ∥ realMAC (26B)"] + D -->|Random| H["UUID ∥ Major ∥ Minor ∥ FF:FF:FF:FF:FF:FF"] + + C --> I["tagHash = keccak256(tagId)"] + G --> I + H --> I + E --> I + F --> I + + I --> J["checkMembership(swarmId, tagHash)"] + + style I fill:#4a9eff,color:#fff + style J fill:#2ecc71,color:#fff +``` + +### MAC Address Types + +| Address Type Bits | MAC Type | Action | +| :---------------- | :------------- | :---------------------- | +| `00` | Public | Use real MAC | +| `01`, `11` | Random/Private | Use `FF:FF:FF:FF:FF:FF` | + +## Region Enumeration (Indexers) + +```solidity +// Active countries +uint16[] memory countries = fleetIdentity.getActiveCountries(); +// [840, 276, 392, ...] + +// Active admin areas +uint32[] memory adminAreas = fleetIdentity.getActiveAdminAreas(); +// [860165, 282629, ...] → (cc << 10) | admin + +// Tier data +uint256 tierCount = fleetIdentity.regionTierCount(regionKey); +uint256[] memory tokenIds = fleetIdentity.getTierMembers(regionKey, tier); +bytes16[] memory uuids = fleetIdentity.getTierUuids(regionKey, tier); +``` + +## Complete Discovery Example + +```solidity +function discoverService( + bytes16 uuid, + uint16 major, + uint16 minor, + bytes6 mac, + uint16 countryCode, + uint8 adminCode +) external view returns (string memory serviceUrl, bool found) { + // 1. Check bundle + (bytes16[] memory uuids, uint256 count) = + fleetIdentity.buildHighestBondedUuidBundle(countryCode, adminCode); + + for (uint i = 0; i < count; i++) { + if (uuids[i] != uuid) continue; + + // 2. Find swarms + for (uint j = 0; ; j++) { + uint256 swarmId; + try swarmRegistry.uuidSwarms(uuid, j) returns (uint256 id) { + swarmId = id; + } catch { break; } + + // 3. Get swarm data + (,uint256 providerId,,,SwarmStatus status, TagType tagType) = + swarmRegistry.swarms(swarmId); + + if (status != SwarmStatus.ACCEPTED) continue; + + // 4. Build tagId + bytes memory tagId; + if (tagType == TagType.IBEACON_PAYLOAD_ONLY) { + tagId = abi.encodePacked(uuid, major, minor); + } else if (tagType == TagType.IBEACON_INCLUDES_MAC) { + tagId = abi.encodePacked(uuid, major, minor, mac); + } + + // 5. Check membership + if (swarmRegistry.checkMembership(swarmId, keccak256(tagId))) { + return (serviceProvider.providerUrls(providerId), true); + } + } + } + + return ("", false); +} +``` diff --git a/src/swarms/doc/fleet-registration.md b/src/swarms/doc/fleet-registration.md new file mode 100644 index 0000000..a577d2b --- /dev/null +++ b/src/swarms/doc/fleet-registration.md @@ -0,0 +1,294 @@ +# Fleet Registration + +## Registration Paths + +```mermaid +stateDiagram-v2 + [*] --> None : (default) + + None --> Owned : claimUuid() + None --> Local : registerFleetLocal() + None --> Country : registerFleetCountry() + + Owned --> Local : registerFleetLocal() + Owned --> Country : registerFleetCountry() + Owned --> [*] : releaseUuid() / burn() + + Local --> Owned : unregisterToOwned() + Local --> [*] : burn() + + Country --> Owned : unregisterToOwned() + Country --> [*] : burn() +``` + +## Direct Registration + +### Local (Admin Area) + +```solidity +// 1. Approve bond +NODL.approve(fleetIdentityAddress, requiredBond); + +// 2. Get recommended tier (free off-chain call) +(uint256 tier, uint256 bond) = fleetIdentity.localInclusionHint(840, 5); + +// 3. Register +uint256 tokenId = fleetIdentity.registerFleetLocal(uuid, 840, 5, tier); +// tokenId = ((840 << 10 | 5) << 128) | uint128(uuid) +``` + +### Country + +```solidity +// 1. Approve bond +NODL.approve(fleetIdentityAddress, requiredBond); + +// 2. Get recommended tier +(uint256 tier, uint256 bond) = fleetIdentity.countryInclusionHint(840); + +// 3. Register +uint256 tokenId = fleetIdentity.registerFleetCountry(uuid, 840, tier); +// tokenId = (840 << 128) | uint128(uuid) +``` + +## Claim-First Flow + +Reserve UUID, register later: + +```solidity +// 1. Claim (costs BASE_BOND) +NODL.approve(fleetIdentityAddress, BASE_BOND); +uint256 ownedTokenId = fleetIdentity.claimUuid(uuid); +// tokenId = uint128(uuid), regionKey = 0 + +// 2. Later: Register (pays incremental: tierBond - BASE_BOND) +uint256 tokenId = fleetIdentity.registerFleetLocal(uuid, 840, 5, tier); +// Burns owned token, mints regional token +``` + +```mermaid +sequenceDiagram + actor FO as Fleet Owner + participant FI as FleetIdentity + participant TOKEN as BOND_TOKEN + + FO->>TOKEN: approve(FleetIdentity, BASE_BOND) + FO->>+FI: claimUuid(uuid) + FI->>TOKEN: transferFrom(owner, this, BASE_BOND) + FI-->>-FO: tokenId = uint128(uuid) + + Note over FO: Later... + + FO->>TOKEN: approve(FleetIdentity, incrementalBond) + FO->>+FI: registerFleetLocal(uuid, cc, admin, tier) + Note over FI: Burns owned token + FI->>TOKEN: transferFrom(owner, this, tierBond - BASE_BOND) + FI-->>-FO: tokenId = ((cc<<10|admin)<<128) | uuid +``` + +## Registering with Operator + +Delegate tier management to a separate wallet at registration time: + +```solidity +// Owner approves BASE_BOND +NODL.approve(fleetIdentityAddress, BASE_BOND); + +// Operator approves tier excess +// (as operator wallet or via prior approval) +uint256 tierExcess = fleetIdentity.tierBond(tier, false) - BASE_BOND; +// Operator must have approved tierExcess to FleetIdentity + +// Register with operator (pulls BASE_BOND from owner, tierExcess from operator) +uint256 tokenId = fleetIdentity.registerFleetLocalWithOperator(uuid, 840, 5, tier, operatorAddress); +``` + +```mermaid +sequenceDiagram + actor Owner + actor Operator + participant FI as FleetIdentity + participant TOKEN as BOND_TOKEN + + Owner->>TOKEN: approve(FleetIdentity, BASE_BOND) + Operator->>TOKEN: approve(FleetIdentity, tierExcess) + Owner->>+FI: registerFleetLocalWithOperator(uuid, cc, admin, tier, operator) + FI->>TOKEN: transferFrom(owner, this, BASE_BOND) + FI->>TOKEN: transferFrom(operator, this, tierExcess) + FI-->>-Owner: tokenId + + Note over Owner,Operator: Operator can now promote/demote +``` + +### Set or Change Operator Later + +```solidity +// Owner sets operator (transfers tier bonds atomically) +fleetIdentity.setOperator(uuid, newOperator); +// - Refunds tier excess to old operator (or owner if none set) +// - Pulls tier excess from new operator +// - Emits OperatorSet(uuid, oldOperator, newOperator, tierExcessTransferred) +``` + +### Clear Operator + +```solidity +// Owner clears operator (reverts to owner-managed) +fleetIdentity.setOperator(uuid, address(0)); +// - Refunds tier excess to old operator +// - Pulls tier excess from owner +// - operatorOf(uuid) returns owner again +``` + +## Tier Economics + +### Bond Formula + +| Level | Formula | +| :------ | :------------------------ | +| Owned | `BASE_BOND` | +| Local | `BASE_BOND × 2^tier` | +| Country | `BASE_BOND × 16 × 2^tier` | + +**Example (BASE_BOND = 100):** + +| Tier | Local | Country | +| :--- | ----: | ------: | +| 0 | 100 | 1,600 | +| 1 | 200 | 3,200 | +| 2 | 400 | 6,400 | +| 3 | 800 | 12,800 | + +### Economic Design + +- **Tier capacity**: 4 members per tier +- **Max tiers**: 24 per region +- **Bundle limit**: 20 UUIDs per location + +Country fleets pay 16× but appear in **all** admin-area bundles. Locals have cost advantage within their area. + +## Tier Management + +### Promote + +```solidity +// Approve additional bond +fleetIdentity.promote(tokenId); +// Moves to currentTier + 1 +``` + +### Reassign + +```solidity +// Move to any tier +fleetIdentity.reassignTier(tokenId, targetTier); +// Promotion: pulls difference +// Demotion: refunds difference +``` + +```mermaid +sequenceDiagram + actor FO as Fleet Owner + participant FI as FleetIdentity + participant TOKEN as BOND_TOKEN + + alt Promote + FO->>TOKEN: approve(additionalBond) + FO->>+FI: reassignTier(tokenId, higherTier) + FI->>TOKEN: transferFrom(owner, this, diff) + FI-->>-FO: FleetPromoted + else Demote + FO->>+FI: reassignTier(tokenId, lowerTier) + FI->>TOKEN: transfer(owner, refund) + FI-->>-FO: FleetDemoted + end +``` + +## Unregister to Owned + +Return to owned-only state (single token required): + +```solidity +uint256 ownedTokenId = fleetIdentity.unregisterToOwned(tokenId); +// Refunds tierBond - BASE_BOND +// Reverts if UUID has multiple tokens +``` + +## Release UUID + +Fully release UUID ownership: + +```solidity +fleetIdentity.releaseUuid(uuid); +// Must be in Owned state +// Refunds BASE_BOND +// UUID can be claimed by anyone +``` + +## Multi-Region Registration + +Same UUID can have multiple tokens at the **same level**: + +```mermaid +sequenceDiagram + actor FO as Fleet Owner + participant FI as FleetIdentity + + FO->>+FI: registerFleetLocal(uuid, 840, 5, 0) + Note over FI: uuidLevel = Local, tokenCount = 1 + FI-->>-FO: tokenId_US + + FO->>+FI: registerFleetLocal(uuid, 276, 1, 0) + Note over FI: Same owner, same level, tokenCount = 2 + FI-->>-FO: tokenId_DE + + FO->>+FI: registerFleetCountry(uuid, 392, 0) + FI-->>-FO: ❌ UuidLevelMismatch() +``` + +**Constraints:** + +- All tokens must be same level (Local or Country) +- Cannot `unregisterToOwned` with multiple tokens +- Each region pays its own tier bond + +## Burning + +```solidity +fleetIdentity.burn(tokenId); +// Refunds full tier bond +// Decrements tokenCount +// Clears uuidOwner if last token +``` + +## Owned Token Transfer + +Owned-only tokens transfer UUID ownership: + +```solidity +// ERC-721 transfer +fleetIdentity.transferFrom(alice, bob, tokenId); +// uuidOwner[uuid] = bob +// Bob can now register to regions +``` + +Registered tokens can also transfer but do not change `uuidOwner`. + +## Inclusion Hints + +View functions that recommend cheapest tier guaranteeing bundle inclusion. + +### Local Hint + +```solidity +(uint256 tier, uint256 bond) = fleetIdentity.localInclusionHint(cc, admin); +// Simulates bundle for specific admin area +``` + +### Country Hint + +```solidity +(uint256 tier, uint256 bond) = fleetIdentity.countryInclusionHint(cc); +// Scans ALL active admin areas (unbounded, free off-chain) +// Returns tier guaranteeing inclusion everywhere +``` diff --git a/src/swarms/doc/iso3166-2/036-Australia.md b/src/swarms/doc/iso3166-2/036-Australia.md new file mode 100644 index 0000000..1e00f10 --- /dev/null +++ b/src/swarms/doc/iso3166-2/036-Australia.md @@ -0,0 +1,18 @@ +# Australia (036) + +ISO 3166-1 numeric: **036** + +## Admin Area Mappings + +| Admin Code | ISO 3166-2 | Name | +|-------------|------------|------| +| 1 | ACT | Australian Capital Territory | +| 2 | NSW | New South Wales | +| 3 | NT | Northern Territory | +| 4 | QLD | Queensland | +| 5 | SA | South Australia | +| 6 | TAS | Tasmania | +| 7 | VIC | Victoria | +| 8 | WA | Western Australia | + +**Total subdivisions:** 8 diff --git a/src/swarms/doc/iso3166-2/076-Brazil.md b/src/swarms/doc/iso3166-2/076-Brazil.md new file mode 100644 index 0000000..665733d --- /dev/null +++ b/src/swarms/doc/iso3166-2/076-Brazil.md @@ -0,0 +1,37 @@ +# Brazil (076) + +ISO 3166-1 numeric: **076** + +## Admin Area Mappings + +| Admin Code | ISO 3166-2 | Name | +|-------------|------------|------| +| 1 | AC | Acre | +| 2 | AL | Alagoas | +| 3 | AP | Amapá | +| 4 | AM | Amazonas | +| 5 | BA | Bahia | +| 6 | CE | Ceará | +| 7 | DF | Federal District | +| 8 | ES | Espírito Santo | +| 9 | GO | Goiás | +| 10 | MA | Maranhão | +| 11 | MT | Mato Grosso | +| 12 | MS | Mato Grosso do Sul | +| 13 | MG | Minas Gerais | +| 14 | PA | Pará | +| 15 | PB | Paraíba | +| 16 | PR | Paraná | +| 17 | PE | Pernambuco | +| 18 | PI | Piauí | +| 19 | RJ | Rio de Janeiro | +| 20 | RN | Rio Grande do Norte | +| 21 | RS | Rio Grande do Sul | +| 22 | RO | Rondônia | +| 23 | RR | Roraima | +| 24 | SC | Santa Catarina | +| 25 | SP | São Paulo | +| 26 | SE | Sergipe | +| 27 | TO | Tocantins | + +**Total subdivisions:** 27 diff --git a/src/swarms/doc/iso3166-2/124-Canada.md b/src/swarms/doc/iso3166-2/124-Canada.md new file mode 100644 index 0000000..f55ed1e --- /dev/null +++ b/src/swarms/doc/iso3166-2/124-Canada.md @@ -0,0 +1,23 @@ +# Canada (124) + +ISO 3166-1 numeric: **124** + +## Admin Area Mappings + +| Admin Code | ISO 3166-2 | Name | +|-------------|------------|------| +| 1 | AB | Alberta | +| 2 | BC | British Columbia | +| 3 | MB | Manitoba | +| 4 | NB | New Brunswick | +| 5 | NL | Newfoundland and Labrador | +| 6 | NT | Northwest Territories | +| 7 | NS | Nova Scotia | +| 8 | NU | Nunavut | +| 9 | ON | Ontario | +| 10 | PE | Prince Edward Island | +| 11 | QC | Quebec | +| 12 | SK | Saskatchewan | +| 13 | YT | Yukon | + +**Total subdivisions:** 13 diff --git a/src/swarms/doc/iso3166-2/156-China.md b/src/swarms/doc/iso3166-2/156-China.md new file mode 100644 index 0000000..c934c1c --- /dev/null +++ b/src/swarms/doc/iso3166-2/156-China.md @@ -0,0 +1,44 @@ +# China (156) + +ISO 3166-1 numeric: **156** + +## Admin Area Mappings + +| Admin Code | ISO 3166-2 | Name | +|-------------|------------|------| +| 1 | AH | Anhui | +| 2 | BJ | Beijing | +| 3 | CQ | Chongqing | +| 4 | FJ | Fujian | +| 5 | GS | Gansu | +| 6 | GD | Guangdong | +| 7 | GX | Guangxi | +| 8 | GZ | Guizhou | +| 9 | HI | Hainan | +| 10 | HE | Hebei | +| 11 | HL | Heilongjiang | +| 12 | HA | Henan | +| 13 | HB | Hubei | +| 14 | HN | Hunan | +| 15 | JS | Jiangsu | +| 16 | JX | Jiangxi | +| 17 | JL | Jilin | +| 18 | LN | Liaoning | +| 19 | NM | Inner Mongolia | +| 20 | NX | Ningxia | +| 21 | QH | Qinghai | +| 22 | SN | Shaanxi | +| 23 | SD | Shandong | +| 24 | SH | Shanghai | +| 25 | SX | Shanxi | +| 26 | SC | Sichuan | +| 27 | TJ | Tianjin | +| 28 | XJ | Xinjiang | +| 29 | XZ | Tibet | +| 30 | YN | Yunnan | +| 31 | ZJ | Zhejiang | +| 32 | HK | Hong Kong | +| 33 | MO | Macao | +| 34 | TW | Taiwan | + +**Total subdivisions:** 34 diff --git a/src/swarms/doc/iso3166-2/250-France.md b/src/swarms/doc/iso3166-2/250-France.md new file mode 100644 index 0000000..1d25387 --- /dev/null +++ b/src/swarms/doc/iso3166-2/250-France.md @@ -0,0 +1,28 @@ +# France (250) + +ISO 3166-1 numeric: **250** + +## Admin Area Mappings + +| Admin Code | ISO 3166-2 | Name | +|-------------|------------|------| +| 1 | ARA | Auvergne-Rhône-Alpes | +| 2 | BFC | Bourgogne-Franche-Comté | +| 3 | BRE | Brittany | +| 4 | CVL | Centre-Val de Loire | +| 5 | COR | Corsica | +| 6 | GES | Grand Est | +| 7 | HDF | Hauts-de-France | +| 8 | IDF | Île-de-France | +| 9 | NOR | Normandy | +| 10 | NAQ | Nouvelle-Aquitaine | +| 11 | OCC | Occitanie | +| 12 | PDL | Pays de la Loire | +| 13 | PAC | Provence-Alpes-Côte d'Azur | +| 14 | GP | Guadeloupe | +| 15 | MQ | Martinique | +| 16 | GF | French Guiana | +| 17 | RE | Réunion | +| 18 | YT | Mayotte | + +**Total subdivisions:** 18 diff --git a/src/swarms/doc/iso3166-2/276-Germany.md b/src/swarms/doc/iso3166-2/276-Germany.md new file mode 100644 index 0000000..dea7312 --- /dev/null +++ b/src/swarms/doc/iso3166-2/276-Germany.md @@ -0,0 +1,26 @@ +# Germany (276) + +ISO 3166-1 numeric: **276** + +## Admin Area Mappings + +| Admin Code | ISO 3166-2 | Name | +|-------------|------------|------| +| 1 | BW | Baden-Württemberg | +| 2 | BY | Bavaria | +| 3 | BE | Berlin | +| 4 | BB | Brandenburg | +| 5 | HB | Bremen | +| 6 | HH | Hamburg | +| 7 | HE | Hesse | +| 8 | MV | Mecklenburg-Vorpommern | +| 9 | NI | Lower Saxony | +| 10 | NW | North Rhine-Westphalia | +| 11 | RP | Rhineland-Palatinate | +| 12 | SL | Saarland | +| 13 | SN | Saxony | +| 14 | ST | Saxony-Anhalt | +| 15 | SH | Schleswig-Holstein | +| 16 | TH | Thuringia | + +**Total subdivisions:** 16 diff --git a/src/swarms/doc/iso3166-2/356-India.md b/src/swarms/doc/iso3166-2/356-India.md new file mode 100644 index 0000000..b6ec245 --- /dev/null +++ b/src/swarms/doc/iso3166-2/356-India.md @@ -0,0 +1,46 @@ +# India (356) + +ISO 3166-1 numeric: **356** + +## Admin Area Mappings + +| Admin Code | ISO 3166-2 | Name | +|-------------|------------|------| +| 1 | AN | Andaman and Nicobar Islands | +| 2 | AP | Andhra Pradesh | +| 3 | AR | Arunachal Pradesh | +| 4 | AS | Assam | +| 5 | BR | Bihar | +| 6 | CH | Chandigarh | +| 7 | CT | Chhattisgarh | +| 8 | DH | Dadra and Nagar Haveli and Daman and Diu | +| 9 | DL | Delhi | +| 10 | GA | Goa | +| 11 | GJ | Gujarat | +| 12 | HR | Haryana | +| 13 | HP | Himachal Pradesh | +| 14 | JK | Jammu and Kashmir | +| 15 | JH | Jharkhand | +| 16 | KA | Karnataka | +| 17 | KL | Kerala | +| 18 | LA | Ladakh | +| 19 | LD | Lakshadweep | +| 20 | MP | Madhya Pradesh | +| 21 | MH | Maharashtra | +| 22 | MN | Manipur | +| 23 | ML | Meghalaya | +| 24 | MZ | Mizoram | +| 25 | NL | Nagaland | +| 26 | OR | Odisha | +| 27 | PY | Puducherry | +| 28 | PB | Punjab | +| 29 | RJ | Rajasthan | +| 30 | SK | Sikkim | +| 31 | TN | Tamil Nadu | +| 32 | TG | Telangana | +| 33 | TR | Tripura | +| 34 | UP | Uttar Pradesh | +| 35 | UT | Uttarakhand | +| 36 | WB | West Bengal | + +**Total subdivisions:** 36 diff --git a/src/swarms/doc/iso3166-2/380-Italy.md b/src/swarms/doc/iso3166-2/380-Italy.md new file mode 100644 index 0000000..06d5c5f --- /dev/null +++ b/src/swarms/doc/iso3166-2/380-Italy.md @@ -0,0 +1,30 @@ +# Italy (380) + +ISO 3166-1 numeric: **380** + +## Admin Area Mappings + +| Admin Code | ISO 3166-2 | Name | +|-------------|------------|------| +| 1 | 65 | Abruzzo | +| 2 | 77 | Basilicata | +| 3 | 78 | Calabria | +| 4 | 72 | Campania | +| 5 | 45 | Emilia-Romagna | +| 6 | 36 | Friuli-Venezia Giulia | +| 7 | 62 | Lazio | +| 8 | 42 | Liguria | +| 9 | 25 | Lombardy | +| 10 | 57 | Marche | +| 11 | 67 | Molise | +| 12 | 21 | Piedmont | +| 13 | 75 | Apulia | +| 14 | 88 | Sardinia | +| 15 | 82 | Sicily | +| 16 | 52 | Tuscany | +| 17 | 32 | Trentino-South Tyrol | +| 18 | 55 | Umbria | +| 19 | 23 | Aosta Valley | +| 20 | 34 | Veneto | + +**Total subdivisions:** 20 diff --git a/src/swarms/doc/iso3166-2/392-Japan.md b/src/swarms/doc/iso3166-2/392-Japan.md new file mode 100644 index 0000000..d7952e8 --- /dev/null +++ b/src/swarms/doc/iso3166-2/392-Japan.md @@ -0,0 +1,57 @@ +# Japan (392) + +ISO 3166-1 numeric: **392** + +## Admin Area Mappings + +| Admin Code | ISO 3166-2 | Name | +|-------------|------------|------| +| 1 | 01 | Hokkaido | +| 2 | 02 | Aomori | +| 3 | 03 | Iwate | +| 4 | 04 | Miyagi | +| 5 | 05 | Akita | +| 6 | 06 | Yamagata | +| 7 | 07 | Fukushima | +| 8 | 08 | Ibaraki | +| 9 | 09 | Tochigi | +| 10 | 10 | Gunma | +| 11 | 11 | Saitama | +| 12 | 12 | Chiba | +| 13 | 13 | Tokyo | +| 14 | 14 | Kanagawa | +| 15 | 15 | Niigata | +| 16 | 16 | Toyama | +| 17 | 17 | Ishikawa | +| 18 | 18 | Fukui | +| 19 | 19 | Yamanashi | +| 20 | 20 | Nagano | +| 21 | 21 | Gifu | +| 22 | 22 | Shizuoka | +| 23 | 23 | Aichi | +| 24 | 24 | Mie | +| 25 | 25 | Shiga | +| 26 | 26 | Kyoto | +| 27 | 27 | Osaka | +| 28 | 28 | Hyogo | +| 29 | 29 | Nara | +| 30 | 30 | Wakayama | +| 31 | 31 | Tottori | +| 32 | 32 | Shimane | +| 33 | 33 | Okayama | +| 34 | 34 | Hiroshima | +| 35 | 35 | Yamaguchi | +| 36 | 36 | Tokushima | +| 37 | 37 | Kagawa | +| 38 | 38 | Ehime | +| 39 | 39 | Kochi | +| 40 | 40 | Fukuoka | +| 41 | 41 | Saga | +| 42 | 42 | Nagasaki | +| 43 | 43 | Kumamoto | +| 44 | 44 | Oita | +| 45 | 45 | Miyazaki | +| 46 | 46 | Kagoshima | +| 47 | 47 | Okinawa | + +**Total subdivisions:** 47 diff --git a/src/swarms/doc/iso3166-2/410-South_Korea.md b/src/swarms/doc/iso3166-2/410-South_Korea.md new file mode 100644 index 0000000..fc145bc --- /dev/null +++ b/src/swarms/doc/iso3166-2/410-South_Korea.md @@ -0,0 +1,27 @@ +# South Korea (410) + +ISO 3166-1 numeric: **410** + +## Admin Area Mappings + +| Admin Code | ISO 3166-2 | Name | +|-------------|------------|------| +| 1 | 11 | Seoul | +| 2 | 26 | Busan | +| 3 | 27 | Daegu | +| 4 | 28 | Incheon | +| 5 | 29 | Gwangju | +| 6 | 30 | Daejeon | +| 7 | 31 | Ulsan | +| 8 | 41 | Gyeonggi | +| 9 | 42 | Gangwon | +| 10 | 43 | North Chungcheong | +| 11 | 44 | South Chungcheong | +| 12 | 45 | North Jeolla | +| 13 | 46 | South Jeolla | +| 14 | 47 | North Gyeongsang | +| 15 | 48 | South Gyeongsang | +| 16 | 49 | Jeju | +| 17 | 50 | Sejong | + +**Total subdivisions:** 17 diff --git a/src/swarms/doc/iso3166-2/484-Mexico.md b/src/swarms/doc/iso3166-2/484-Mexico.md new file mode 100644 index 0000000..61b384f --- /dev/null +++ b/src/swarms/doc/iso3166-2/484-Mexico.md @@ -0,0 +1,42 @@ +# Mexico (484) + +ISO 3166-1 numeric: **484** + +## Admin Area Mappings + +| Admin Code | ISO 3166-2 | Name | +|-------------|------------|------| +| 1 | AGU | Aguascalientes | +| 2 | BCN | Baja California | +| 3 | BCS | Baja California Sur | +| 4 | CAM | Campeche | +| 5 | CHP | Chiapas | +| 6 | CHH | Chihuahua | +| 7 | CMX | Mexico City | +| 8 | COA | Coahuila | +| 9 | COL | Colima | +| 10 | DUR | Durango | +| 11 | GUA | Guanajuato | +| 12 | GRO | Guerrero | +| 13 | HID | Hidalgo | +| 14 | JAL | Jalisco | +| 15 | MEX | State of Mexico | +| 16 | MIC | Michoacán | +| 17 | MOR | Morelos | +| 18 | NAY | Nayarit | +| 19 | NLE | Nuevo León | +| 20 | OAX | Oaxaca | +| 21 | PUE | Puebla | +| 22 | QUE | Querétaro | +| 23 | ROO | Quintana Roo | +| 24 | SLP | San Luis Potosí | +| 25 | SIN | Sinaloa | +| 26 | SON | Sonora | +| 27 | TAB | Tabasco | +| 28 | TAM | Tamaulipas | +| 29 | TLA | Tlaxcala | +| 30 | VER | Veracruz | +| 31 | YUC | Yucatán | +| 32 | ZAC | Zacatecas | + +**Total subdivisions:** 32 diff --git a/src/swarms/doc/iso3166-2/566-Nigeria.md b/src/swarms/doc/iso3166-2/566-Nigeria.md new file mode 100644 index 0000000..83b523c --- /dev/null +++ b/src/swarms/doc/iso3166-2/566-Nigeria.md @@ -0,0 +1,47 @@ +# Nigeria (566) + +ISO 3166-1 numeric: **566** + +## Admin Area Mappings + +| Admin Code | ISO 3166-2 | Name | +|-------------|------------|------| +| 1 | AB | Abia | +| 2 | FC | Abuja Federal Capital Territory | +| 3 | AD | Adamawa | +| 4 | AK | Akwa Ibom | +| 5 | AN | Anambra | +| 6 | BA | Bauchi | +| 7 | BY | Bayelsa | +| 8 | BE | Benue | +| 9 | BO | Borno | +| 10 | CR | Cross River | +| 11 | DE | Delta | +| 12 | EB | Ebonyi | +| 13 | ED | Edo | +| 14 | EK | Ekiti | +| 15 | EN | Enugu | +| 16 | GO | Gombe | +| 17 | IM | Imo | +| 18 | JI | Jigawa | +| 19 | KD | Kaduna | +| 20 | KN | Kano | +| 21 | KT | Katsina | +| 22 | KE | Kebbi | +| 23 | KO | Kogi | +| 24 | KW | Kwara | +| 25 | LA | Lagos | +| 26 | NA | Nasarawa | +| 27 | NI | Niger | +| 28 | OG | Ogun | +| 29 | ON | Ondo | +| 30 | OS | Osun | +| 31 | OY | Oyo | +| 32 | PL | Plateau | +| 33 | RI | Rivers | +| 34 | SO | Sokoto | +| 35 | TA | Taraba | +| 36 | YO | Yobe | +| 37 | ZA | Zamfara | + +**Total subdivisions:** 37 diff --git a/src/swarms/doc/iso3166-2/643-Russia.md b/src/swarms/doc/iso3166-2/643-Russia.md new file mode 100644 index 0000000..0705c6c --- /dev/null +++ b/src/swarms/doc/iso3166-2/643-Russia.md @@ -0,0 +1,93 @@ +# Russia (643) + +ISO 3166-1 numeric: **643** + +## Admin Area Mappings + +| Admin Code | ISO 3166-2 | Name | +|-------------|------------|------| +| 1 | AD | Adygea, Republic of | +| 2 | AL | Altai Republic | +| 3 | ALT | Altai Krai | +| 4 | AMU | Amur Oblast | +| 5 | ARK | Arkhangelsk Oblast | +| 6 | AST | Astrakhan Oblast | +| 7 | BA | Bashkortostan, Republic of | +| 8 | BEL | Belgorod Oblast | +| 9 | BRY | Bryansk Oblast | +| 10 | BU | Buryatia, Republic of | +| 11 | CE | Chechen Republic | +| 12 | CHE | Chelyabinsk Oblast | +| 13 | CHU | Chukotka Autonomous Okrug | +| 14 | CU | Chuvash Republic | +| 15 | DA | Dagestan, Republic of | +| 16 | IN | Ingushetia, Republic of | +| 17 | IRK | Irkutsk Oblast | +| 18 | IVA | Ivanovo Oblast | +| 19 | KB | Kabardino-Balkar Republic | +| 20 | KGD | Kaliningrad Oblast | +| 21 | KL | Kalmykia, Republic of | +| 22 | KLU | Kaluga Oblast | +| 23 | KAM | Kamchatka Krai | +| 24 | KC | Karachay-Cherkess Republic | +| 25 | KR | Karelia, Republic of | +| 26 | KEM | Kemerovo Oblast | +| 27 | KHA | Khabarovsk Krai | +| 28 | KK | Khakassia, Republic of | +| 29 | KHM | Khanty-Mansi Autonomous Okrug | +| 30 | KIR | Kirov Oblast | +| 31 | KO | Komi Republic | +| 32 | KOS | Kostroma Oblast | +| 33 | KDA | Krasnodar Krai | +| 34 | KYA | Krasnoyarsk Krai | +| 35 | KGN | Kurgan Oblast | +| 36 | KRS | Kursk Oblast | +| 37 | LEN | Leningrad Oblast | +| 38 | LIP | Lipetsk Oblast | +| 39 | MAG | Magadan Oblast | +| 40 | ME | Mari El Republic | +| 41 | MO | Mordovia, Republic of | +| 42 | MOS | Moscow Oblast | +| 43 | MOW | Moscow | +| 44 | MUR | Murmansk Oblast | +| 45 | NEN | Nenets Autonomous Okrug | +| 46 | NIZ | Nizhny Novgorod Oblast | +| 47 | NGR | Novgorod Oblast | +| 48 | NVS | Novosibirsk Oblast | +| 49 | OMS | Omsk Oblast | +| 50 | ORE | Orenburg Oblast | +| 51 | ORL | Oryol Oblast | +| 52 | PNZ | Penza Oblast | +| 53 | PER | Perm Krai | +| 54 | PRI | Primorsky Krai | +| 55 | PSK | Pskov Oblast | +| 56 | ROS | Rostov Oblast | +| 57 | RYA | Ryazan Oblast | +| 58 | SA | Sakha (Yakutia), Republic of | +| 59 | SAK | Sakhalin Oblast | +| 60 | SAM | Samara Oblast | +| 61 | SPE | Saint Petersburg | +| 62 | SAR | Saratov Oblast | +| 63 | SE | North Ossetia-Alania, Republic of | +| 64 | SMO | Smolensk Oblast | +| 65 | STA | Stavropol Krai | +| 66 | SVE | Sverdlovsk Oblast | +| 67 | TAM | Tambov Oblast | +| 68 | TA | Tatarstan, Republic of | +| 69 | TOM | Tomsk Oblast | +| 70 | TUL | Tula Oblast | +| 71 | TVE | Tver Oblast | +| 72 | TY | Tuva Republic | +| 73 | TYU | Tyumen Oblast | +| 74 | UD | Udmurt Republic | +| 75 | ULY | Ulyanovsk Oblast | +| 76 | VLA | Vladimir Oblast | +| 77 | VGG | Volgograd Oblast | +| 78 | VLG | Vologda Oblast | +| 79 | VOR | Voronezh Oblast | +| 80 | YAN | Yamalo-Nenets Autonomous Okrug | +| 81 | YAR | Yaroslavl Oblast | +| 82 | YEV | Jewish Autonomous Oblast | +| 83 | ZAB | Zabaykalsky Krai | + +**Total subdivisions:** 83 diff --git a/src/swarms/doc/iso3166-2/710-South_Africa.md b/src/swarms/doc/iso3166-2/710-South_Africa.md new file mode 100644 index 0000000..99a19bd --- /dev/null +++ b/src/swarms/doc/iso3166-2/710-South_Africa.md @@ -0,0 +1,19 @@ +# South Africa (710) + +ISO 3166-1 numeric: **710** + +## Admin Area Mappings + +| Admin Code | ISO 3166-2 | Name | +|-------------|------------|------| +| 1 | EC | Eastern Cape | +| 2 | FS | Free State | +| 3 | GT | Gauteng | +| 4 | NL | KwaZulu-Natal | +| 5 | LP | Limpopo | +| 6 | MP | Mpumalanga | +| 7 | NW | North West | +| 8 | NC | Northern Cape | +| 9 | WC | Western Cape | + +**Total subdivisions:** 9 diff --git a/src/swarms/doc/iso3166-2/724-Spain.md b/src/swarms/doc/iso3166-2/724-Spain.md new file mode 100644 index 0000000..6c43550 --- /dev/null +++ b/src/swarms/doc/iso3166-2/724-Spain.md @@ -0,0 +1,29 @@ +# Spain (724) + +ISO 3166-1 numeric: **724** + +## Admin Area Mappings + +| Admin Code | ISO 3166-2 | Name | +|-------------|------------|------| +| 1 | AN | Andalusia | +| 2 | AR | Aragon | +| 3 | AS | Asturias, Principality of | +| 4 | CN | Canary Islands | +| 5 | CB | Cantabria | +| 6 | CL | Castile and León | +| 7 | CM | Castilla-La Mancha | +| 8 | CT | Catalonia | +| 9 | CE | Ceuta | +| 10 | EX | Extremadura | +| 11 | GA | Galicia | +| 12 | IB | Balearic Islands | +| 13 | RI | La Rioja | +| 14 | MD | Community of Madrid | +| 15 | ML | Melilla | +| 16 | MC | Murcia, Region of | +| 17 | NC | Navarre, Chartered Community of | +| 18 | PV | Basque Country | +| 19 | VC | Valencian Community | + +**Total subdivisions:** 19 diff --git a/src/swarms/doc/iso3166-2/756-Switzerland.md b/src/swarms/doc/iso3166-2/756-Switzerland.md new file mode 100644 index 0000000..978590e --- /dev/null +++ b/src/swarms/doc/iso3166-2/756-Switzerland.md @@ -0,0 +1,36 @@ +# Switzerland (756) + +ISO 3166-1 numeric: **756** + +## Admin Area Mappings + +| Admin Code | ISO 3166-2 | Name | +|-------------|------------|------| +| 1 | AG | Aargau | +| 2 | AI | Appenzell Innerrhoden | +| 3 | AR | Appenzell Ausserrhoden | +| 4 | BE | Bern | +| 5 | BL | Basel-Landschaft | +| 6 | BS | Basel-Stadt | +| 7 | FR | Fribourg | +| 8 | GE | Geneva | +| 9 | GL | Glarus | +| 10 | GR | Graubünden | +| 11 | JU | Jura | +| 12 | LU | Lucerne | +| 13 | NE | Neuchâtel | +| 14 | NW | Nidwalden | +| 15 | OW | Obwalden | +| 16 | SG | St. Gallen | +| 17 | SH | Schaffhausen | +| 18 | SO | Solothurn | +| 19 | SZ | Schwyz | +| 20 | TG | Thurgau | +| 21 | TI | Ticino | +| 22 | UR | Uri | +| 23 | VD | Vaud | +| 24 | VS | Valais | +| 25 | ZG | Zug | +| 26 | ZH | Zurich | + +**Total subdivisions:** 26 diff --git a/src/swarms/doc/iso3166-2/826-United_Kingdom.md b/src/swarms/doc/iso3166-2/826-United_Kingdom.md new file mode 100644 index 0000000..96eea19 --- /dev/null +++ b/src/swarms/doc/iso3166-2/826-United_Kingdom.md @@ -0,0 +1,182 @@ +# United Kingdom (826) + +ISO 3166-1 numeric: **826** + +## Admin Area Mappings + +| Admin Code | ISO 3166-2 | Name | +|-------------|------------|------| +| 1 | ENG | England | +| 2 | NIR | Northern Ireland | +| 3 | SCT | Scotland | +| 4 | WLS | Wales | +| 5 | BKM | Buckinghamshire | +| 6 | CAM | Cambridgeshire | +| 7 | CMA | Cumbria | +| 8 | DBY | Derbyshire | +| 9 | DEV | Devon | +| 10 | DOR | Dorset | +| 11 | ESX | East Sussex | +| 12 | ESS | Essex | +| 13 | GLS | Gloucestershire | +| 14 | HAM | Hampshire | +| 15 | HRT | Hertfordshire | +| 16 | KEN | Kent | +| 17 | LAN | Lancashire | +| 18 | LEC | Leicestershire | +| 19 | LIN | Lincolnshire | +| 20 | NFK | Norfolk | +| 21 | NYK | North Yorkshire | +| 22 | NTH | Northamptonshire | +| 23 | NTT | Nottinghamshire | +| 24 | OXF | Oxfordshire | +| 25 | SOM | Somerset | +| 26 | STS | Staffordshire | +| 27 | SFK | Suffolk | +| 28 | SRY | Surrey | +| 29 | WAR | Warwickshire | +| 30 | WSX | West Sussex | +| 31 | WOR | Worcestershire | +| 32 | LND | London, City of | +| 33 | BDG | Barking and Dagenham | +| 34 | BNE | Barnet | +| 35 | BEX | Bexley | +| 36 | BEN | Brent | +| 37 | BRY | Bromley | +| 38 | CMD | Camden | +| 39 | CRY | Croydon | +| 40 | EAL | Ealing | +| 41 | ENF | Enfield | +| 42 | GRE | Greenwich | +| 43 | HCK | Hackney | +| 44 | HMF | Hammersmith and Fulham | +| 45 | HRY | Haringey | +| 46 | HRW | Harrow | +| 47 | HAV | Havering | +| 48 | HIL | Hillingdon | +| 49 | HNS | Hounslow | +| 50 | ISL | Islington | +| 51 | KEC | Kensington and Chelsea | +| 52 | KTT | Kingston upon Thames | +| 53 | LBH | Lambeth | +| 54 | LEW | Lewisham | +| 55 | MRT | Merton | +| 56 | NWM | Newham | +| 57 | RDB | Redbridge | +| 58 | RIC | Richmond upon Thames | +| 59 | SWK | Southwark | +| 60 | STN | Sutton | +| 61 | TWH | Tower Hamlets | +| 62 | WFT | Waltham Forest | +| 63 | WND | Wandsworth | +| 64 | WSM | Westminster | +| 65 | BNS | Barnsley | +| 66 | BIR | Birmingham | +| 67 | BOL | Bolton | +| 68 | BRD | Bradford | +| 69 | BRI | Brighton and Hove | +| 70 | BST | Bristol, City of | +| 71 | CAL | Calderdale | +| 72 | COV | Coventry | +| 73 | DER | Derby | +| 74 | DUD | Dudley | +| 75 | GAT | Gateshead | +| 76 | KIR | Kirklees | +| 77 | KWL | Knowsley | +| 78 | LDS | Leeds | +| 79 | LCE | Leicester | +| 80 | LIV | Liverpool | +| 81 | MAN | Manchester | +| 82 | NET | Newcastle upon Tyne | +| 83 | NTY | North Tyneside | +| 84 | OLD | Oldham | +| 85 | PTE | Peterborough | +| 86 | PLY | Plymouth | +| 87 | RCH | Rochdale | +| 88 | ROT | Rotherham | +| 89 | SLF | Salford | +| 90 | SAW | Sandwell | +| 91 | SFT | Sefton | +| 92 | SHF | Sheffield | +| 93 | SOL | Solihull | +| 94 | STY | South Tyneside | +| 95 | SHN | Southampton | +| 96 | SGC | South Gloucestershire | +| 97 | STH | Southend-on-Sea | +| 98 | SKP | Stockport | +| 99 | STE | Stoke-on-Trent | +| 100 | SND | Sunderland | +| 101 | TAM | Tameside | +| 102 | TRF | Trafford | +| 103 | WKF | Wakefield | +| 104 | WLL | Walsall | +| 105 | WGN | Wigan | +| 106 | WRL | Wirral | +| 107 | WLV | Wolverhampton | +| 108 | ABE | Aberdeen City | +| 109 | ABD | Aberdeenshire | +| 110 | ANS | Angus | +| 111 | AGB | Argyll and Bute | +| 112 | CLK | Clackmannanshire | +| 113 | DGY | Dumfries and Galloway | +| 114 | DND | Dundee City | +| 115 | EAY | East Ayrshire | +| 116 | EDU | East Dunbartonshire | +| 117 | ELN | East Lothian | +| 118 | ERW | East Renfrewshire | +| 119 | EDH | Edinburgh, City of | +| 120 | ELS | Eilean Siar | +| 121 | FAL | Falkirk | +| 122 | FIF | Fife | +| 123 | GLG | Glasgow City | +| 124 | HLD | Highland | +| 125 | IVC | Inverclyde | +| 126 | MLN | Midlothian | +| 127 | MRY | Moray | +| 128 | NAY | North Ayrshire | +| 129 | NLK | North Lanarkshire | +| 130 | ORK | Orkney Islands | +| 131 | PKN | Perth and Kinross | +| 132 | RFW | Renfrewshire | +| 133 | SCB | Scottish Borders | +| 134 | ZET | Shetland Islands | +| 135 | SAY | South Ayrshire | +| 136 | SLK | South Lanarkshire | +| 137 | STG | Stirling | +| 138 | WDU | West Dunbartonshire | +| 139 | WLN | West Lothian | +| 140 | BGW | Blaenau Gwent | +| 141 | BGE | Bridgend | +| 142 | CAY | Caerphilly | +| 143 | CRF | Cardiff | +| 144 | CMN | Carmarthenshire | +| 145 | CGN | Ceredigion | +| 146 | CWY | Conwy | +| 147 | DEN | Denbighshire | +| 148 | FLN | Flintshire | +| 149 | GWN | Gwynedd | +| 150 | AGY | Isle of Anglesey | +| 151 | MTY | Merthyr Tydfil | +| 152 | MON | Monmouthshire | +| 153 | NTL | Neath Port Talbot | +| 154 | NWP | Newport | +| 155 | PEM | Pembrokeshire | +| 156 | POW | Powys | +| 157 | RCT | Rhondda Cynon Taf | +| 158 | SWA | Swansea | +| 159 | TOF | Torfaen | +| 160 | VGL | Vale of Glamorgan | +| 161 | WRX | Wrexham | +| 162 | ANT | Antrim and Newtownabbey | +| 163 | ARD | Ards and North Down | +| 164 | ABC | Armagh City, Banbridge and Craigavon | +| 165 | BFS | Belfast | +| 166 | CCG | Causeway Coast and Glens | +| 167 | DRS | Derry City and Strabane | +| 168 | FMO | Fermanagh and Omagh | +| 169 | LBC | Lisburn and Castlereagh | +| 170 | MEA | Mid and East Antrim | +| 171 | MUL | Mid Ulster | +| 172 | NMD | Newry, Mourne and Down | + +**Total subdivisions:** 172 diff --git a/src/swarms/doc/iso3166-2/840-United_States.md b/src/swarms/doc/iso3166-2/840-United_States.md new file mode 100644 index 0000000..1259bd4 --- /dev/null +++ b/src/swarms/doc/iso3166-2/840-United_States.md @@ -0,0 +1,67 @@ +# United States (840) + +ISO 3166-1 numeric: **840** + +## Admin Area Mappings + +| Admin Code | ISO 3166-2 | Name | +|-------------|------------|------| +| 1 | AL | Alabama | +| 2 | AK | Alaska | +| 3 | AZ | Arizona | +| 4 | AR | Arkansas | +| 5 | CA | California | +| 6 | CO | Colorado | +| 7 | CT | Connecticut | +| 8 | DE | Delaware | +| 9 | FL | Florida | +| 10 | GA | Georgia | +| 11 | HI | Hawaii | +| 12 | ID | Idaho | +| 13 | IL | Illinois | +| 14 | IN | Indiana | +| 15 | IA | Iowa | +| 16 | KS | Kansas | +| 17 | KY | Kentucky | +| 18 | LA | Louisiana | +| 19 | ME | Maine | +| 20 | MD | Maryland | +| 21 | MA | Massachusetts | +| 22 | MI | Michigan | +| 23 | MN | Minnesota | +| 24 | MS | Mississippi | +| 25 | MO | Missouri | +| 26 | MT | Montana | +| 27 | NE | Nebraska | +| 28 | NV | Nevada | +| 29 | NH | New Hampshire | +| 30 | NJ | New Jersey | +| 31 | NM | New Mexico | +| 32 | NY | New York | +| 33 | NC | North Carolina | +| 34 | ND | North Dakota | +| 35 | OH | Ohio | +| 36 | OK | Oklahoma | +| 37 | OR | Oregon | +| 38 | PA | Pennsylvania | +| 39 | RI | Rhode Island | +| 40 | SC | South Carolina | +| 41 | SD | South Dakota | +| 42 | TN | Tennessee | +| 43 | TX | Texas | +| 44 | UT | Utah | +| 45 | VT | Vermont | +| 46 | VA | Virginia | +| 47 | WA | Washington | +| 48 | WV | West Virginia | +| 49 | WI | Wisconsin | +| 50 | WY | Wyoming | +| 51 | DC | District of Columbia | +| 52 | AS | American Samoa | +| 53 | GU | Guam | +| 54 | MP | Northern Mariana Islands | +| 55 | PR | Puerto Rico | +| 56 | UM | United States Minor Outlying Islands | +| 57 | VI | Virgin Islands, U.S. | + +**Total subdivisions:** 57 diff --git a/src/swarms/doc/iso3166-reference.md b/src/swarms/doc/iso3166-reference.md new file mode 100644 index 0000000..855865d --- /dev/null +++ b/src/swarms/doc/iso3166-reference.md @@ -0,0 +1,97 @@ +# ISO 3166 Reference + +## Country Codes (ISO 3166-1 Numeric) + +FleetIdentity uses ISO 3166-1 numeric codes (1-999) for country identification. + +| Code | Country | +| :--- | :------------- | +| 124 | Canada | +| 250 | France | +| 276 | Germany | +| 392 | Japan | +| 826 | United Kingdom | +| 840 | United States | + +## Admin Area Codes + +Admin codes map ISO 3166-2 subdivisions to 1-indexed integers. + +### Region Key Encoding + +``` +Country: regionKey = countryCode +Admin Area: regionKey = (countryCode << 10) | adminCode +``` + +**Examples:** +| Location | Country | Admin | Region Key | +| :------- | ------: | ----: | ---------: | +| United States | 840 | — | 840 | +| US-California | 840 | 5 | 860,165 | +| Canada | 124 | — | 124 | +| CA-Alberta | 124 | 1 | 127,001 | + +## Admin Area Mapping Files + +The [iso3166-2/](iso3166-2/) directory contains per-country mappings. + +### File Format + +Filename: `{ISO_3166-1_numeric}-{Country_Name}.md` + +| Admin Code | ISO 3166-2 | Name | +| ---------: | :--------- | :-------------------- | +| 1 | XX | Full subdivision name | +| 2 | YY | ... | + +### Constraints + +- Admin codes: 1-indexed integers +- Valid range: 1-255 (covers all real-world subdivisions) +- Code 0 is invalid (reverts with `InvalidAdminCode()`) + +## United States (840) + +Selected entries from [iso3166-2/840-United_States.md](iso3166-2/840-United_States.md): + +| Admin | ISO 3166-2 | State | +| ----: | :--------- | :--------- | +| 1 | AL | Alabama | +| 5 | CA | California | +| 32 | NY | New York | +| 43 | TX | Texas | + +## Usage + +```solidity +// US-California +uint16 countryCode = 840; +uint8 adminCode = 5; +uint32 regionKey = fleetIdentity.makeAdminRegion(countryCode, adminCode); +// regionKey = (840 << 10) | 5 = 860165 + +// Register +fleetIdentity.registerFleetLocal(uuid, countryCode, adminCode, tier); +// tokenId = (860165 << 128) | uint128(uuid) +``` + +## Contract Functions + +```solidity +// Build region key +uint32 region = fleetIdentity.makeAdminRegion(countryCode, adminCode); + +// Active regions +uint16[] memory countries = fleetIdentity.getActiveCountries(); +uint32[] memory adminAreas = fleetIdentity.getActiveAdminAreas(); + +// Extract from token +uint32 region = fleetIdentity.tokenRegion(tokenId); +// If region < 1024: country-level +// If region >= 1024: adminCode = region & 0x3FF, countryCode = region >> 10 +``` + +## Data Source + +Mappings based on ISO 3166-2 standard maintained by ISO and national statistical agencies. diff --git a/src/swarms/doc/lifecycle.md b/src/swarms/doc/lifecycle.md new file mode 100644 index 0000000..c7aae38 --- /dev/null +++ b/src/swarms/doc/lifecycle.md @@ -0,0 +1,130 @@ +# Lifecycle & State Machines + +## UUID Registration States + +```mermaid +stateDiagram-v2 + [*] --> None + + None --> Owned : claimUuid() + None --> Local : registerFleetLocal() + None --> Country : registerFleetCountry() + + Owned --> Local : registerFleetLocal() + Owned --> Country : registerFleetCountry() + Owned --> [*] : releaseUuid() / burn() + + Local --> Owned : unregisterToOwned() + Local --> [*] : burn() all tokens + + Country --> Owned : unregisterToOwned() + Country --> [*] : burn() all tokens + + note right of Owned : regionKey = 0 + note right of Local : regionKey ≥ 1024 + note right of Country : regionKey 1-999 +``` + +### State Transitions + +| From | To | Function | Bond Effect | +| :------------ | :------ | :------------------------- | :-------------------------------------------------- | +| None | Owned | `claimUuid()` | Pull BASE_BOND from owner | +| None | Local | `registerFleetLocal()` | Pull tierBond from owner (or split with operator) | +| None | Country | `registerFleetCountry()` | Pull tierBond from owner (or split with operator) | +| Owned | Local | `registerFleetLocal()` | Pull (tierBond - BASE_BOND) from owner/operator | +| Owned | Country | `registerFleetCountry()` | Pull (tierBond - BASE_BOND) from owner/operator | +| Local/Country | Owned | `unregisterToOwned()` | Refund (tierBond - BASE_BOND) to operator, clear op | +| Owned | None | `releaseUuid()` / `burn()` | Refund BASE_BOND to owner | +| Local/Country | None | `burn()` | Refund BASE_BOND to owner, tier excess to operator | + +## Swarm Status States + +```mermaid +stateDiagram-v2 + [*] --> REGISTERED : registerSwarm() + + REGISTERED --> ACCEPTED : acceptSwarm() + REGISTERED --> REJECTED : rejectSwarm() + + ACCEPTED --> REGISTERED : updateSwarm*() + REJECTED --> REGISTERED : updateSwarm*() + + REGISTERED --> [*] : delete / purge + ACCEPTED --> [*] : delete / purge + REJECTED --> [*] : delete / purge +``` + +### Status Effects + +| Status | checkMembership | Provider Action Required | +| :--------- | :-------------- | :------------------------------- | +| REGISTERED | Reverts | Accept or reject | +| ACCEPTED | Works | None | +| REJECTED | Reverts | None (fleet can update to retry) | + +## Fleet Token Lifecycle + +```mermaid +sequenceDiagram + participant TOKEN as BOND_TOKEN + participant FI as FleetIdentity + participant Owner + participant Operator + + Note over FI: Registration (with operator) + FI->>TOKEN: transferFrom(owner, this, BASE_BOND) + FI->>TOKEN: transferFrom(operator, this, tierExcess) + + Note over FI: Promotion (operator pays) + FI->>TOKEN: transferFrom(operator, this, additionalBond) + + Note over FI: Demotion (operator receives) + FI->>TOKEN: transfer(operator, refund) + + Note over FI: Change Operator + FI->>TOKEN: transfer(oldOperator, tierExcess) + FI->>TOKEN: transferFrom(newOperator, this, tierExcess) + + Note over FI: Unregister to Owned + FI->>TOKEN: transfer(operator, tierExcess) + + Note over FI: Burn + FI->>TOKEN: transfer(owner, BASE_BOND) + FI->>TOKEN: transfer(operator, tierExcess) +``` + +## Orphan Lifecycle + +```mermaid +flowchart TD + ACTIVE[Swarm Active] --> BURN{NFT burned?} + BURN -->|No| ACTIVE + BURN -->|Yes| ORPHAN[Swarm Orphaned] + ORPHAN --> CHECK[isSwarmValid returns false] + CHECK --> PURGE[Anyone: purgeOrphanedSwarm] + PURGE --> DELETED[Swarm Deleted + Gas Refund] +``` + +### Orphan Guards + +These operations revert with `SwarmOrphaned()` if either NFT invalid: + +- `acceptSwarm(swarmId)` +- `rejectSwarm(swarmId)` +- `checkMembership(swarmId, tagHash)` + +## Region Index Maintenance + +```mermaid +flowchart LR + REG[registerFleet*] --> FIRST{First in region?} + FIRST -->|Yes| ADD[Add to activeCountries/activeAdminAreas] + FIRST -->|No| SKIP[Already indexed] + + BURN[burn / demotion] --> EMPTY{Region empty?} + EMPTY -->|Yes| REMOVE[Remove from index] + EMPTY -->|No| KEEP[Keep] +``` + +Indexes are automatically maintained—no manual intervention needed. diff --git a/src/swarms/doc/maintenance.md b/src/swarms/doc/maintenance.md new file mode 100644 index 0000000..77a1b42 --- /dev/null +++ b/src/swarms/doc/maintenance.md @@ -0,0 +1,195 @@ +# Fleet Maintenance + +## Overview + +After registration, fleet owners (or their designated operators) must monitor bundle inclusion as market conditions change: + +- New fleets registering at higher tiers +- Existing fleets promoting +- Bundle slots limited to 20 per location + +## Operator Delegation + +Fleet tier management can be delegated to an **operator**: + +```solidity +// Owner delegates to operator (transfers tier bonds) +fleetIdentity.setOperator(uuid, operatorAddress); + +// Check who manages tiers +address manager = fleetIdentity.operatorOf(uuid); // returns operator or owner +``` + +**Bond responsibilities:** + +- **Owner**: Always holds BASE_BOND +- **Operator**: Holds tier excess (tierBond - BASE_BOND) + +**Permissions:** + +- **Promote/Demote**: Only operator (or owner if no operator set) +- **Burn**: Only token holder (ERC-721 ownerOf) +- **SetOperator**: Only UUID owner + +## Maintenance Cycle + +```mermaid +flowchart TD + START([Registered]) --> CHECK{In bundle?} + + CHECK -->|Yes| OPTIMIZE{Lower tier
possible?} + CHECK -->|No| PROMOTE[Promote] + + OPTIMIZE -->|Yes| DEMOTE[Demote → refund] + OPTIMIZE -->|No| WAIT + + DEMOTE --> WAIT + PROMOTE --> WAIT + + WAIT[Wait 24h] --> CHECK + + style DEMOTE fill:#2ecc71,color:#fff + style PROMOTE fill:#ff9f43,color:#fff +``` + +## Check Inclusion + +### Local Fleets + +```typescript +const [uuids, count] = await fleetIdentity.buildHighestBondedUuidBundle( + countryCode, + adminCode, +); +const isIncluded = uuids + .slice(0, count) + .some((u) => u.toLowerCase() === myUuid.toLowerCase()); +``` + +### Country Fleets + +Must check **every** active admin area in their country: + +```typescript +const adminAreas = await fleetIdentity.getActiveAdminAreas(); +const myAdminAreas = adminAreas.filter( + (rk) => Number(rk >> 10n) === myCountryCode, +); + +const missingAreas = []; +for (const rk of myAdminAreas) { + const adminCode = Number(rk & 0x3ffn); + const [uuids, count] = await fleetIdentity.buildHighestBondedUuidBundle( + myCountryCode, + adminCode, + ); + if (!uuids.slice(0, count).some((u) => u === myUuid)) { + missingAreas.push(adminCode); + } +} +``` + +### No Active Admin Areas + +When no EdgeBeaconScanners deployed yet: + +```typescript +const [uuids, count] = await fleetIdentity.buildCountryOnlyBundle(countryCode); +// Check position among country-level competitors only +``` + +## Get Required Tier + +### Local + +```solidity +(uint256 tier, uint256 bond) = fleetIdentity.localInclusionHint(cc, admin); +``` + +### Country + +```solidity +(uint256 tier, uint256 bond) = fleetIdentity.countryInclusionHint(cc); +// Scans ALL active admin areas (unbounded view, free off-chain) +``` + +## Promote + +```mermaid +sequenceDiagram + actor FO as Fleet Owner + participant FI as FleetIdentity + participant TOKEN as BOND_TOKEN + + FO->>+FI: tierBond(currentTier, isCountry) + FI-->>-FO: currentBond + FO->>+FI: tierBond(targetTier, isCountry) + FI-->>-FO: targetBond + + Note over FO: additionalBond = targetBond - currentBond + + FO->>TOKEN: approve(FleetIdentity, additionalBond) + FO->>+FI: reassignTier(tokenId, targetTier) + FI->>TOKEN: transferFrom(...) + FI-->>-FO: FleetPromoted +``` + +### Quick Promote + +```solidity +fleetIdentity.promote(tokenId); +// Moves to currentTier + 1 +``` + +### Handle TierFull + +```typescript +while (attempts < 3) { + try { + await fleetIdentity.reassignTier(tokenId, requiredTier); + break; + } catch (e) { + if (e.message.includes("TierFull")) { + const [newTier] = await fleetIdentity.localInclusionHint(cc, admin); + requiredTier = newTier; + // Re-approve if needed + } else throw e; + } +} +``` + +## Demote (Save Bond) + +No approval needed—refunds automatically: + +```typescript +const [suggestedTier] = await fleetIdentity.localInclusionHint(cc, admin); +const currentTier = await fleetIdentity.fleetTier(tokenId); + +if (suggestedTier < currentTier) { + await fleetIdentity.reassignTier(tokenId, suggestedTier); + // Refund deposited to owner +} +``` + +## Propagation Timing + +| Phase | Duration | +| :----------------------- | :--------------- | +| Transaction confirmation | ~1-2s (ZkSync) | +| Event indexing | ~1-10s | +| Edge network sync | Minutes to hours | + +**Recommendation**: 24-hour check interval. + +## Summary + +| Task | Method | +| :-------------------------- | :---------------------------------------- | +| Check inclusion (local) | `buildHighestBondedUuidBundle(cc, admin)` | +| Check inclusion (country) | Loop all admin areas | +| Get required tier (local) | `localInclusionHint(cc, admin)` | +| Get required tier (country) | `countryInclusionHint(cc)` | +| Calculate bond | `tierBond(tier, isCountry)` | +| Move tier | `reassignTier(tokenId, tier)` | +| Quick promote | `promote(tokenId)` | diff --git a/src/swarms/doc/swarm-operations.md b/src/swarms/doc/swarm-operations.md new file mode 100644 index 0000000..853352b --- /dev/null +++ b/src/swarms/doc/swarm-operations.md @@ -0,0 +1,199 @@ +# Swarm Operations + +## Overview + +A **Swarm** is a cryptographic representation of ~10k-20k BLE tags. Individual tags are never enumerated on-chain—membership is verified via XOR filter. + +## Registration Flow + +```mermaid +sequenceDiagram + actor FO as Fleet Owner + actor PRV as Provider Owner + participant SR as SwarmRegistry + participant FI as FleetIdentity + participant SP as ServiceProvider + + Note over FO: Build XOR filter off-chain + + FO->>+SR: registerSwarm(fleetUuid, providerId, filter, fpSize, tagType) + SR->>FI: uuidOwner(fleetUuid) + SR->>SP: ownerOf(providerId) + Note over SR: swarmId = keccak256(fleetUuid, providerId, filter) + SR-->>-FO: swarmId (status: REGISTERED) + + PRV->>+SR: acceptSwarm(swarmId) + SR->>SP: ownerOf(providerId) + SR-->>-PRV: status: ACCEPTED +``` + +### Parameters + +| Parameter | Type | Description | +| :----------- | :------ | :--------------------------- | +| `fleetUuid` | bytes16 | UUID that owns this swarm | +| `providerId` | uint256 | ServiceProvider token ID | +| `filter` | bytes | XOR filter data | +| `fpSize` | uint8 | Fingerprint size (1-16 bits) | +| `tagType` | TagType | Tag identity scheme | + +### Swarm ID + +Deterministic derivation: + +```solidity +swarmId = uint256(keccak256(abi.encode(fleetUuid, providerId, filter))) +``` + +Duplicate registration reverts with `SwarmAlreadyExists()`. + +## XOR Filter Construction + +### Off-Chain Steps + +1. **Build TagIDs** for all tags per TagType schema +2. **Hash each TagID**: `tagHash = keccak256(tagId)` +3. **Construct XOR filter** using Peeling Algorithm +4. **Submit filter** in `registerSwarm()` + +### TagType Schemas + +| Type | Format | Bytes | +| :--------------------- | :---------------------------- | -----: | +| `IBEACON_PAYLOAD_ONLY` | UUID ∥ Major ∥ Minor | 20 | +| `IBEACON_INCLUDES_MAC` | UUID ∥ Major ∥ Minor ∥ MAC | 26 | +| `VENDOR_ID` | companyID ∥ hash(vendorBytes) | varies | +| `GENERIC` | custom | varies | + +### MAC Normalization (IBEACON_INCLUDES_MAC) + +| MAC Type | Action | +| :---------------------- | :------------------------------- | +| Public/Static (00) | Use real MAC | +| Random/Private (01, 11) | Replace with `FF:FF:FF:FF:FF:FF` | + +This supports rotating privacy MACs while validating "it's a privacy tag." + +### Filter Membership Math + +``` +h = keccak256(tagId) +M = filterLength * 8 / fingerprintSize // slot count + +h1 = uint32(h) % M +h2 = uint32(h >> 32) % M +h3 = uint32(h >> 64) % M +fp = (h >> 96) & ((1 << fingerprintSize) - 1) + +Member if: Filter[h1] ^ Filter[h2] ^ Filter[h3] == fp +``` + +## Provider Approval + +```mermaid +stateDiagram-v2 + [*] --> REGISTERED : registerSwarm() + + REGISTERED --> ACCEPTED : acceptSwarm() + REGISTERED --> REJECTED : rejectSwarm() + + ACCEPTED --> REGISTERED : updateSwarm*() + REJECTED --> REGISTERED : updateSwarm*() + + REGISTERED --> [*] : deleteSwarm() / purge + ACCEPTED --> [*] : deleteSwarm() / purge + REJECTED --> [*] : deleteSwarm() / purge +``` + +| Action | Caller | Effect | +| :--------------------- | :------------- | :---------------- | +| `acceptSwarm(swarmId)` | Provider owner | status → ACCEPTED | +| `rejectSwarm(swarmId)` | Provider owner | status → REJECTED | + +Only `ACCEPTED` swarms pass `checkMembership()`. + +## Updates + +Both operations reset status to `REGISTERED`: + +```solidity +// Replace filter +swarmRegistry.updateSwarmFilter(swarmId, newFilterData); + +// Change provider +swarmRegistry.updateSwarmProvider(swarmId, newProviderId); +``` + +```mermaid +sequenceDiagram + actor FO as Fleet Owner + participant SR as SwarmRegistry + participant FI as FleetIdentity + + FO->>+SR: updateSwarmFilter(swarmId, newFilter) + SR->>FI: uuidOwner(fleetUuid) + Note over SR: status → REGISTERED + SR-->>-FO: ✓ (requires re-approval) +``` + +## Deletion + +```solidity +swarmRegistry.deleteSwarm(swarmId); +``` + +- Removes from `uuidSwarms[]` (O(1) swap-and-pop) +- Deletes `swarms[swarmId]` +- Universal variant: deletes `filterData[swarmId]` + +## Orphan Handling + +When fleet or provider NFT is burned, referencing swarms become **orphaned**. + +### Detection + +```solidity +(bool fleetValid, bool providerValid) = swarmRegistry.isSwarmValid(swarmId); +// Returns (false, _) if UUID has no owner +// Returns (_, false) if provider NFT burned +``` + +### Cleanup + +Anyone can purge orphaned swarms: + +```solidity +swarmRegistry.purgeOrphanedSwarm(swarmId); +// Gas refund incentive +``` + +```mermaid +sequenceDiagram + actor Anyone as Purger + participant SR as SwarmRegistry + + Anyone->>+SR: isSwarmValid(swarmId) + SR-->>-Anyone: (false, true) + + Anyone->>+SR: purgeOrphanedSwarm(swarmId) + Note over SR: Verify orphaned + Note over SR: Delete swarm data + SR-->>-Anyone: SwarmPurged event + gas refund +``` + +### Guards + +Operations revert with `SwarmOrphaned()` if either NFT is invalid: + +- `acceptSwarm()` +- `rejectSwarm()` +- `checkMembership()` + +## Storage Variants + +| Variant | Filter Storage | Deletion | +| :------------ | :-------------------------- | :-------------------------------- | +| **L1** | SSTORE2 (contract bytecode) | Struct cleared; bytecode persists | +| **Universal** | `mapping(uint256 => bytes)` | Full deletion | + +Universal exposes `getFilterData(swarmId)` for off-chain retrieval. diff --git a/test/FleetIdentity.t.sol b/test/FleetIdentity.t.sol new file mode 100644 index 0000000..e0ed401 --- /dev/null +++ b/test/FleetIdentity.t.sol @@ -0,0 +1,3394 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.24; + +import "forge-std/Test.sol"; +import "../src/swarms/FleetIdentity.sol"; +import {ERC20} from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; + +/// @dev Minimal ERC-20 mock with public mint for testing. +contract MockERC20 is ERC20 { + constructor() ERC20("Mock Bond Token", "MBOND") {} + + function mint(address to, uint256 amount) external { + _mint(to, amount); + } +} + +/// @dev ERC-20 that returns false on transfer instead of reverting. +contract BadERC20 is ERC20 { + bool public shouldFail; + + constructor() ERC20("Bad Token", "BAD") {} + + function mint(address to, uint256 amount) external { + _mint(to, amount); + } + + function setFail(bool _fail) external { + shouldFail = _fail; + } + + function transfer(address to, uint256 amount) public override returns (bool) { + if (shouldFail) return false; + return super.transfer(to, amount); + } + + function transferFrom(address from, address to, uint256 amount) public override returns (bool) { + if (shouldFail) return false; + return super.transferFrom(from, to, amount); + } +} + +contract FleetIdentityTest is Test { + FleetIdentity fleet; + MockERC20 bondToken; + + address alice = address(0xA); + address bob = address(0xB); + address carol = address(0xC); + + bytes16 constant UUID_1 = bytes16(keccak256("fleet-alpha")); + bytes16 constant UUID_2 = bytes16(keccak256("fleet-bravo")); + bytes16 constant UUID_3 = bytes16(keccak256("fleet-charlie")); + + uint256 constant BASE_BOND = 100 ether; + + uint16 constant US = 840; + uint16 constant DE = 276; + uint16 constant FR = 250; + uint16 constant JP = 392; + uint16 constant ADMIN_CA = 1; + uint16 constant ADMIN_NY = 2; + + event FleetRegistered( + address indexed owner, + bytes16 indexed uuid, + uint256 indexed tokenId, + uint32 regionKey, + uint256 tierIndex, + uint256 bondAmount, + address operator + ); + event OperatorSet( + bytes16 indexed uuid, + address indexed oldOperator, + address indexed newOperator, + uint256 tierExcessTransferred + ); + event FleetPromoted( + uint256 indexed tokenId, uint256 indexed fromTier, uint256 indexed toTier, uint256 additionalBond + ); + event FleetDemoted(uint256 indexed tokenId, uint256 indexed fromTier, uint256 indexed toTier, uint256 bondRefund); + event FleetBurned( + address indexed owner, uint256 indexed tokenId, uint32 indexed regionKey, uint256 tierIndex, uint256 bondRefund + ); + + function setUp() public { + bondToken = new MockERC20(); + fleet = new FleetIdentity(address(bondToken), BASE_BOND); + + // Mint enough for all 24 tiers (tier 23 bond = BASE_BOND * 2^23 ≈ 838M ether) + // Total for 8 members across 24 tiers ≈ 13.4 billion ether + bondToken.mint(alice, 100_000_000_000_000 ether); + bondToken.mint(bob, 100_000_000_000_000 ether); + bondToken.mint(carol, 100_000_000_000_000 ether); + + vm.prank(alice); + bondToken.approve(address(fleet), type(uint256).max); + vm.prank(bob); + bondToken.approve(address(fleet), type(uint256).max); + vm.prank(carol); + bondToken.approve(address(fleet), type(uint256).max); + } + + // --- Helpers --- + + /// @dev Compute tokenId from (uuid, region) using new encoding + function _tokenId(bytes16 uuid, uint32 region) internal pure returns (uint256) { + return (uint256(region) << 128) | uint256(uint128(uuid)); + } + + /// @dev Given a UUID from buildBundle, find tokenId by checking local first, then country + function _findTokenId(bytes16 uuid, uint16 cc, uint16 admin) internal view returns (uint256) { + uint32 localRegion = (uint32(cc) << 10) | uint32(admin); + uint256 localTokenId = _tokenId(uuid, localRegion); + // Check if local token exists by trying to get its owner + try fleet.ownerOf(localTokenId) returns (address) { + return localTokenId; + } catch { + uint32 countryRegion = uint32(cc); + return _tokenId(uuid, countryRegion); + } + } + + function _uuid(uint256 i) internal pure returns (bytes16) { + return bytes16(keccak256(abi.encodePacked("fleet-", i))); + } + + function _regionUS() internal pure returns (uint32) { + return uint32(US); + } + + function _regionDE() internal pure returns (uint32) { + return uint32(DE); + } + + function _regionUSCA() internal pure returns (uint32) { + return (uint32(US) << 10) | uint32(ADMIN_CA); + } + + function _regionUSNY() internal pure returns (uint32) { + return (uint32(US) << 10) | uint32(ADMIN_NY); + } + + function _makeAdminRegion(uint16 cc, uint16 admin) internal pure returns (uint32) { + return (uint32(cc) << 10) | uint32(admin); + } + + function _registerNCountry(address owner, uint16 cc, uint256 count, uint256 startSeed) + internal + returns (uint256[] memory ids) + { + ids = new uint256[](count); + for (uint256 i = 0; i < count; i++) { + vm.prank(owner); + ids[i] = fleet.registerFleetCountry(_uuid(startSeed + i), cc, i / 4); // TIER_CAPACITY = 4 + } + } + + function _registerNCountryAt(address owner, uint16 cc, uint256 count, uint256 startSeed, uint256 tier) + internal + returns (uint256[] memory ids) + { + ids = new uint256[](count); + for (uint256 i = 0; i < count; i++) { + vm.prank(owner); + ids[i] = fleet.registerFleetCountry(_uuid(startSeed + i), cc, tier); + } + } + + function _registerNLocal(address owner, uint16 cc, uint16 admin, uint256 count, uint256 startSeed) + internal + returns (uint256[] memory ids) + { + ids = new uint256[](count); + for (uint256 i = 0; i < count; i++) { + vm.prank(owner); + ids[i] = fleet.registerFleetLocal(_uuid(startSeed + i), cc, admin, i / 4); // TIER_CAPACITY = 4 + } + } + + function _registerNLocalAt(address owner, uint16 cc, uint16 admin, uint256 count, uint256 startSeed, uint256 tier) + internal + returns (uint256[] memory ids) + { + ids = new uint256[](count); + for (uint256 i = 0; i < count; i++) { + vm.prank(owner); + ids[i] = fleet.registerFleetLocal(_uuid(startSeed + i), cc, admin, tier); + } + } + + // --- Constructor --- + + function test_constructor_setsImmutables() public view { + assertEq(address(fleet.BOND_TOKEN()), address(bondToken)); + assertEq(fleet.BASE_BOND(), BASE_BOND); + assertEq(fleet.name(), "Swarm Fleet Identity"); + assertEq(fleet.symbol(), "SFID"); + } + + function test_constructor_constants() public view { + assertEq(fleet.TIER_CAPACITY(), 4); + assertEq(fleet.MAX_TIERS(), 24); + assertEq(fleet.MAX_BONDED_UUID_BUNDLE_SIZE(), 20); + assertEq(fleet.COUNTRY_BOND_MULTIPLIER(), 16); + } + + // --- tierBond --- + + function test_tierBond_local_tier0() public view { + // Local regions get 1× multiplier + assertEq(fleet.tierBond(0, false), BASE_BOND); + } + + function test_tierBond_country_tier0() public view { + // Country regions get 16x multiplier + assertEq(fleet.tierBond(0, true), BASE_BOND * fleet.COUNTRY_BOND_MULTIPLIER()); + } + + function test_tierBond_local_tier1() public view { + assertEq(fleet.tierBond(1, false), BASE_BOND * 2); + } + + function test_tierBond_country_tier1() public view { + assertEq(fleet.tierBond(1, true), BASE_BOND * fleet.COUNTRY_BOND_MULTIPLIER() * 2); + } + + function test_tierBond_geometricProgression() public view { + for (uint256 i = 1; i <= 5; i++) { + assertEq(fleet.tierBond(i, false), fleet.tierBond(i - 1, false) * 2); + assertEq(fleet.tierBond(i, true), fleet.tierBond(i - 1, true) * 2); + } + } + + // --- registerFleetCountry --- + + function test_registerFleetCountry_auto_setsRegionAndTier() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetCountry(UUID_1, US, 0); + + assertEq(fleet.tokenRegion(tokenId), _regionUS()); + assertEq(fleet.fleetTier(tokenId), 0); + assertEq(fleet.bonds(tokenId), BASE_BOND * fleet.COUNTRY_BOND_MULTIPLIER()); // Country gets 16x multiplier + assertEq(fleet.regionTierCount(_regionUS()), 1); + } + + function test_registerFleetCountry_explicit_tier() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetCountry(UUID_1, US, 3); + + assertEq(fleet.fleetTier(tokenId), 3); + assertEq(fleet.bonds(tokenId), fleet.tierBond(3, true)); + assertEq(fleet.regionTierCount(_regionUS()), 4); + } + + function test_RevertIf_registerFleetCountry_invalidCode_zero() public { + vm.prank(alice); + vm.expectRevert(FleetIdentity.InvalidCountryCode.selector); + fleet.registerFleetCountry(UUID_1, 0, 0); + } + + function test_RevertIf_registerFleetCountry_invalidCode_over999() public { + vm.prank(alice); + vm.expectRevert(FleetIdentity.InvalidCountryCode.selector); + fleet.registerFleetCountry(UUID_1, 1000, 0); + } + + // --- registerFleetLocal --- + + function test_registerFleetLocal_setsRegionAndTier() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + assertEq(fleet.tokenRegion(tokenId), _regionUSCA()); + assertEq(fleet.fleetTier(tokenId), 0); + assertEq(fleet.bonds(tokenId), BASE_BOND); + } + + function test_registerFleetLocal_explicit_tier() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 2); + + assertEq(fleet.fleetTier(tokenId), 2); + assertEq(fleet.bonds(tokenId), fleet.tierBond(2, false)); + } + + function test_RevertIf_registerFleetLocal_invalidCountry() public { + vm.prank(alice); + vm.expectRevert(FleetIdentity.InvalidCountryCode.selector); + fleet.registerFleetLocal(UUID_1, 0, ADMIN_CA, 0); + } + + function test_RevertIf_registerFleetLocal_invalidAdmin_zero() public { + vm.prank(alice); + vm.expectRevert(FleetIdentity.InvalidAdminCode.selector); + fleet.registerFleetLocal(UUID_1, US, 0, 0); + } + + function test_RevertIf_registerFleetLocal_invalidAdmin_over4095() public { + vm.prank(alice); + vm.expectRevert(FleetIdentity.InvalidAdminCode.selector); + fleet.registerFleetLocal(UUID_1, US, 4096, 0); + } + + // --- Per-region independent tier indexing (KEY REQUIREMENT) --- + + function test_perRegionTiers_firstFleetInEachLevelPaysBondWithMultiplier() public { + // Country level pays 16x multiplier + vm.prank(alice); + uint256 c1 = fleet.registerFleetCountry(UUID_1, US, 0); + // Local level pays 1× multiplier + vm.prank(alice); + uint256 l1 = fleet.registerFleetLocal(UUID_2, US, ADMIN_CA, 0); + + assertEq(fleet.fleetTier(c1), 0); + assertEq(fleet.fleetTier(l1), 0); + + assertEq(fleet.bonds(c1), BASE_BOND * fleet.COUNTRY_BOND_MULTIPLIER()); // Country gets 16× multiplier + assertEq(fleet.bonds(l1), BASE_BOND); // Local gets 1× multiplier + } + + function test_perRegionTiers_fillOneRegionDoesNotAffectOthers() public { + // Fill US country tier 0 with 4 fleets + _registerNCountryAt(alice, US, 4, 0, 0); + assertEq(fleet.regionTierCount(_regionUS()), 1); + assertEq(fleet.tierMemberCount(_regionUS(), 0), 4); + + // Next US country fleet goes to tier 1 + vm.prank(bob); + uint256 us21 = fleet.registerFleetCountry(_uuid(100), US, 1); + assertEq(fleet.fleetTier(us21), 1); + assertEq(fleet.bonds(us21), BASE_BOND * fleet.COUNTRY_BOND_MULTIPLIER() * 2); // Country tier 1: 16× * 2^1 + + // DE country is independent - can still join tier 0 + vm.prank(bob); + uint256 de1 = fleet.registerFleetCountry(_uuid(200), DE, 0); + assertEq(fleet.fleetTier(de1), 0); + assertEq(fleet.bonds(de1), BASE_BOND * fleet.COUNTRY_BOND_MULTIPLIER()); + assertEq(fleet.regionTierCount(_regionDE()), 1); + + // US local is independent - can still join tier 0 + vm.prank(bob); + uint256 usca1 = fleet.registerFleetLocal(_uuid(300), US, ADMIN_CA, 0); + assertEq(fleet.fleetTier(usca1), 0); + assertEq(fleet.bonds(usca1), BASE_BOND); + } + + function test_perRegionTiers_twoCountriesIndependent() public { + // Register 4 US country fleets at tier 0 + _registerNCountryAt(alice, US, 4, 0, 0); + assertEq(fleet.tierMemberCount(_regionUS(), 0), 4); + + // Next US country fleet explicitly goes to tier 1 + vm.prank(bob); + uint256 us21 = fleet.registerFleetCountry(_uuid(500), US, 1); + assertEq(fleet.fleetTier(us21), 1); + assertEq(fleet.bonds(us21), BASE_BOND * fleet.COUNTRY_BOND_MULTIPLIER() * 2); // Country tier 1: 16× * 2^1 + + // DE country is independent - can still join tier 0 + vm.prank(bob); + uint256 de1 = fleet.registerFleetCountry(_uuid(600), DE, 0); + assertEq(fleet.fleetTier(de1), 0); + assertEq(fleet.bonds(de1), BASE_BOND * fleet.COUNTRY_BOND_MULTIPLIER()); // Country tier 0: 16× * 2^0 + } + + function test_perRegionTiers_twoAdminAreasIndependent() public { + // Register 4 local fleets at tier 0 in US/CA + _registerNLocalAt(alice, US, ADMIN_CA, 4, 0, 0); + assertEq(fleet.tierMemberCount(_regionUSCA(), 0), 4); + + // NY is independent - can still join tier 0 + vm.prank(bob); + uint256 ny1 = fleet.registerFleetLocal(_uuid(500), US, ADMIN_NY, 0); + assertEq(fleet.fleetTier(ny1), 0); + assertEq(fleet.bonds(ny1), BASE_BOND); + } + + // --- Local inclusion hint tier logic --- + + function test_localInclusionHint_emptyRegionReturnsTier0() public { + // No fleets anywhere — localInclusionHint returns tier 0. + (uint256 inclusionTier,) = fleet.localInclusionHint(US, ADMIN_CA); + assertEq(inclusionTier, 0); + + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, inclusionTier); + assertEq(fleet.fleetTier(tokenId), 0); + assertEq(fleet.regionTierCount(_regionUSCA()), 1); + } + + function test_localInclusionHint_returnsCheapestInclusionTier() public { + // Fill admin-area tier 0 (4 members) so tier 0 is full. + _registerNLocalAt(alice, US, ADMIN_CA, 4, 0, 0); + + // localInclusionHint should return tier 1 (cheapest tier with capacity). + (uint256 inclusionTier,) = fleet.localInclusionHint(US, ADMIN_CA); + assertEq(inclusionTier, 1); + + vm.prank(bob); + uint256 tokenId = fleet.registerFleetLocal(_uuid(100), US, ADMIN_CA, inclusionTier); + assertEq(fleet.fleetTier(tokenId), 1); + assertEq(fleet.regionTierCount(_regionUSCA()), 2); + } + + // --- promote --- + + function test_promote_next_movesToNextTierInRegion() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetCountry(UUID_1, US, 0); + + vm.prank(alice); + fleet.promote(tokenId); + + assertEq(fleet.fleetTier(tokenId), 1); + assertEq(fleet.bonds(tokenId), fleet.tierBond(1, true)); + } + + function test_promote_next_pullsBondDifference() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + uint256 balBefore = bondToken.balanceOf(alice); + uint256 diff = fleet.tierBond(1, false) - fleet.tierBond(0, false); + + vm.prank(alice); + fleet.promote(tokenId); + + assertEq(bondToken.balanceOf(alice), balBefore - diff); + } + + function test_reassignTier_promotesWhenTargetHigher() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + vm.prank(alice); + fleet.reassignTier(tokenId, 3); + + assertEq(fleet.fleetTier(tokenId), 3); + assertEq(fleet.bonds(tokenId), fleet.tierBond(3, false)); + assertEq(fleet.regionTierCount(_regionUSCA()), 4); + } + + function test_promote_emitsEvent() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + uint256 diff = fleet.tierBond(1, false) - fleet.tierBond(0, false); + + vm.expectEmit(true, true, true, true); + emit FleetPromoted(tokenId, 0, 1, diff); + + vm.prank(alice); + fleet.promote(tokenId); + } + + function test_RevertIf_promote_notOperator() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + vm.prank(bob); + vm.expectRevert(FleetIdentity.NotOperator.selector); + fleet.promote(tokenId); + } + + function test_RevertIf_reassignTier_targetSameAsCurrent() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 2); + + vm.prank(alice); + vm.expectRevert(FleetIdentity.TargetTierSameAsCurrent.selector); + fleet.reassignTier(tokenId, 2); + } + + function test_RevertIf_promote_targetTierFull() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + // Fill tier 1 with 4 members + for (uint256 i = 0; i < 4; i++) { + vm.prank(bob); + fleet.registerFleetLocal(_uuid(50 + i), US, ADMIN_CA, 1); + } + + vm.prank(alice); + vm.expectRevert(FleetIdentity.TierFull.selector); + fleet.promote(tokenId); + } + + function test_RevertIf_reassignTier_exceedsMaxTiers() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + vm.prank(alice); + vm.expectRevert(FleetIdentity.MaxTiersReached.selector); + fleet.reassignTier(tokenId, 50); + } + + // --- reassignTier (demote direction) --- + + function test_reassignTier_demotesWhenTargetLower() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetCountry(UUID_1, DE, 3); + + vm.prank(alice); + fleet.reassignTier(tokenId, 1); + + assertEq(fleet.fleetTier(tokenId), 1); + assertEq(fleet.bonds(tokenId), fleet.tierBond(1, true)); + } + + function test_reassignTier_demoteRefundsBondDifference() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 3); + + uint256 balBefore = bondToken.balanceOf(alice); + uint256 refund = fleet.tierBond(3, false) - fleet.tierBond(1, false); + + vm.prank(alice); + fleet.reassignTier(tokenId, 1); + + assertEq(bondToken.balanceOf(alice), balBefore + refund); + } + + function test_reassignTier_demoteEmitsEvent() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 3); + uint256 refund = fleet.tierBond(3, false) - fleet.tierBond(1, false); + + vm.expectEmit(true, true, true, true); + emit FleetDemoted(tokenId, 3, 1, refund); + + vm.prank(alice); + fleet.reassignTier(tokenId, 1); + } + + function test_reassignTier_demoteTrimsTierCountWhenTopEmpties() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 3); + assertEq(fleet.regionTierCount(_regionUSCA()), 4); + + vm.prank(alice); + fleet.reassignTier(tokenId, 0); + assertEq(fleet.regionTierCount(_regionUSCA()), 1); + } + + function test_RevertIf_reassignTier_demoteNotOperator() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 2); + + vm.prank(bob); + vm.expectRevert(FleetIdentity.NotOperator.selector); + fleet.reassignTier(tokenId, 0); + } + + function test_RevertIf_reassignTier_demoteTargetTierFull() public { + _registerNLocalAt(alice, US, ADMIN_CA, 4, 0, 0); + + vm.prank(bob); + uint256 tokenId = fleet.registerFleetLocal(_uuid(100), US, ADMIN_CA, 2); + + vm.prank(bob); + vm.expectRevert(FleetIdentity.TierFull.selector); + fleet.reassignTier(tokenId, 0); + } + + function test_RevertIf_reassignTier_promoteNotOperator() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + vm.prank(bob); + vm.expectRevert(FleetIdentity.NotOperator.selector); + fleet.reassignTier(tokenId, 3); + } + + // --- burn --- + + function test_burn_refundsTierBond() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + uint256 balBefore = bondToken.balanceOf(alice); + + vm.prank(alice); + fleet.burn(tokenId); + + assertEq(bondToken.balanceOf(alice), balBefore + BASE_BOND); + assertEq(bondToken.balanceOf(address(fleet)), 0); + assertEq(fleet.bonds(tokenId), 0); + } + + function test_burn_emitsEvent() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + vm.expectEmit(true, true, true, true); + emit FleetBurned(alice, tokenId, _regionUSCA(), 0, BASE_BOND); + + vm.prank(alice); + fleet.burn(tokenId); + } + + function test_burn_trimsTierCount() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetCountry(UUID_1, US, 3); + assertEq(fleet.regionTierCount(_regionUS()), 4); + + vm.prank(alice); + fleet.burn(tokenId); + assertEq(fleet.regionTierCount(_regionUS()), 0); + } + + function test_burn_allowsReregistration_sameRegion() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + vm.prank(alice); + fleet.burn(tokenId); + + // Same UUID can be re-registered in same region, same tokenId + vm.prank(bob); + uint256 newId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + assertEq(newId, tokenId); + assertEq(fleet.tokenRegion(newId), _regionUSCA()); + } + + function test_multiRegion_sameUuidCanRegisterInDifferentRegions() public { + // Same UUID can be registered in multiple regions simultaneously (by SAME owner, SAME level) + vm.prank(alice); + uint256 localId1 = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + vm.prank(alice); + uint256 localId2 = fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0); + + // Different tokenIds for different regions + assertTrue(localId1 != localId2, "Different regions should have different tokenIds"); + + // Both have same UUID but different regions + assertEq(fleet.tokenUuid(localId1), UUID_1); + assertEq(fleet.tokenUuid(localId2), UUID_1); + assertEq(fleet.tokenRegion(localId1), _regionUSCA()); + assertEq(fleet.tokenRegion(localId2), _makeAdminRegion(DE, ADMIN_CA)); + + // Both owned by alice + assertEq(fleet.ownerOf(localId1), alice); + assertEq(fleet.ownerOf(localId2), alice); + } + + function test_RevertIf_burn_notOwner() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + vm.prank(bob); + vm.expectRevert(FleetIdentity.NotTokenOwner.selector); + fleet.burn(tokenId); + } + + // --- localInclusionHint --- + + function test_localInclusionHint_emptyRegion() public view { + (uint256 tier, uint256 bond) = fleet.localInclusionHint(US, ADMIN_CA); + assertEq(tier, 0); + assertEq(bond, BASE_BOND); + } + + function test_localInclusionHint_afterFillingAdminTier0() public { + _registerNLocalAt(alice, US, ADMIN_CA, 4, 0, 0); + + // Admin tier 0 full → cheapest inclusion is tier 1. + (uint256 tier, uint256 bond) = fleet.localInclusionHint(US, ADMIN_CA); + assertEq(tier, 1); + assertEq(bond, BASE_BOND * 2); + } + + // --- highestActiveTier --- + + function test_highestActiveTier_noFleets() public view { + assertEq(fleet.highestActiveTier(_regionUS()), 0); + assertEq(fleet.highestActiveTier(_regionUSCA()), 0); + } + + function test_highestActiveTier_afterRegistrations() public { + vm.prank(alice); + fleet.registerFleetCountry(UUID_1, US, 3); + assertEq(fleet.highestActiveTier(_regionUS()), 3); + + // Different region still at 0 + assertEq(fleet.highestActiveTier(_regionDE()), 0); + } + + // --- EdgeBeaconScanner helpers --- + + function test_tierMemberCount_perRegion() public { + _registerNLocalAt(alice, US, ADMIN_CA, 3, 0, 0); + _registerNCountryAt(bob, US, 4, 100, 0); + + assertEq(fleet.tierMemberCount(_regionUSCA(), 0), 3); + assertEq(fleet.tierMemberCount(_regionUS(), 0), 4); + } + + function test_getTierMembers_perRegion() public { + vm.prank(alice); + uint256 usId = fleet.registerFleetCountry(UUID_1, US, 0); + + vm.prank(bob); + uint256 uscaId = fleet.registerFleetLocal(UUID_2, US, ADMIN_CA, 0); + + uint256[] memory usMembers = fleet.getTierMembers(_regionUS(), 0); + assertEq(usMembers.length, 1); + assertEq(usMembers[0], usId); + + uint256[] memory uscaMembers = fleet.getTierMembers(_regionUSCA(), 0); + assertEq(uscaMembers.length, 1); + assertEq(uscaMembers[0], uscaId); + } + + function test_getTierUuids_perRegion() public { + vm.prank(alice); + fleet.registerFleetCountry(UUID_1, US, 0); + + vm.prank(bob); + fleet.registerFleetLocal(UUID_2, US, ADMIN_CA, 0); + + bytes16[] memory usUUIDs = fleet.getTierUuids(_regionUS(), 0); + assertEq(usUUIDs.length, 1); + assertEq(usUUIDs[0], UUID_1); + + bytes16[] memory uscaUUIDs = fleet.getTierUuids(_regionUSCA(), 0); + assertEq(uscaUUIDs.length, 1); + assertEq(uscaUUIDs[0], UUID_2); + } + + // --- Region indexes --- + + function test_activeCountries_addedOnRegistration() public { + vm.prank(alice); + fleet.registerFleetCountry(UUID_1, US, 0); + vm.prank(bob); + fleet.registerFleetCountry(UUID_2, DE, 0); + + uint16[] memory countries = fleet.getActiveCountries(); + assertEq(countries.length, 2); + } + + function test_activeCountries_removedWhenAllBurned() public { + vm.prank(alice); + uint256 id1 = fleet.registerFleetCountry(UUID_1, US, 0); + + uint16[] memory before_ = fleet.getActiveCountries(); + assertEq(before_.length, 1); + + vm.prank(alice); + fleet.burn(id1); + + uint16[] memory after_ = fleet.getActiveCountries(); + assertEq(after_.length, 0); + } + + function test_activeCountries_notDuplicated() public { + vm.prank(alice); + fleet.registerFleetCountry(UUID_1, US, 0); + vm.prank(bob); + fleet.registerFleetCountry(UUID_2, US, 0); + + uint16[] memory countries = fleet.getActiveCountries(); + assertEq(countries.length, 1); + assertEq(countries[0], US); + } + + function test_activeAdminAreas_trackedCorrectly() public { + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + vm.prank(bob); + fleet.registerFleetLocal(UUID_2, US, ADMIN_NY, 0); + + uint32[] memory areas = fleet.getActiveAdminAreas(); + assertEq(areas.length, 2); + } + + function test_activeAdminAreas_removedWhenAllBurned() public { + vm.prank(alice); + uint256 id1 = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + assertEq(fleet.getActiveAdminAreas().length, 1); + + vm.prank(alice); + fleet.burn(id1); + + assertEq(fleet.getActiveAdminAreas().length, 0); + } + + // --- Region key helpers --- + + function test_makeAdminRegion() public view { + assertEq(fleet.makeAdminRegion(US, ADMIN_CA), (uint32(US) << 10) | uint32(ADMIN_CA)); + } + + function test_regionKeyNoOverlap_countryVsAdmin() public pure { + uint32 maxCountry = 999; + uint32 minAdmin = (uint32(1) << 10) | uint32(1); + assertTrue(minAdmin > maxCountry); + } + + // --- tokenUuid / bonds --- + + function test_tokenUuid_roundTrip() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + assertEq(fleet.tokenUuid(tokenId), UUID_1); + } + + function test_bonds_returnsTierBond() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + assertEq(fleet.bonds(tokenId), BASE_BOND); + } + + function test_bonds_zeroForNonexistentToken() public view { + assertEq(fleet.bonds(99999), 0); + } + + // --- ERC721Enumerable --- + + function test_enumerable_totalSupply() public { + assertEq(fleet.totalSupply(), 0); + + vm.prank(alice); + fleet.registerFleetCountry(UUID_1, US, 0); + assertEq(fleet.totalSupply(), 1); + + vm.prank(bob); + fleet.registerFleetCountry(UUID_2, DE, 0); + assertEq(fleet.totalSupply(), 2); + + vm.prank(carol); + fleet.registerFleetLocal(UUID_3, US, ADMIN_CA, 0); + assertEq(fleet.totalSupply(), 3); + } + + function test_enumerable_supportsInterface() public view { + assertTrue(fleet.supportsInterface(0x780e9d63)); + assertTrue(fleet.supportsInterface(0x80ac58cd)); + assertTrue(fleet.supportsInterface(0x01ffc9a7)); + } + + // --- Bond accounting --- + + function test_bondAccounting_acrossRegions() public { + vm.prank(alice); + uint256 c1 = fleet.registerFleetCountry(UUID_1, US, 0); + vm.prank(bob); + uint256 c2 = fleet.registerFleetCountry(UUID_2, DE, 0); + vm.prank(carol); + uint256 l1 = fleet.registerFleetLocal(UUID_3, US, ADMIN_CA, 0); + + // c1 and c2 are country (16x multiplier), l1 is local (1× multiplier) + assertEq(bondToken.balanceOf(address(fleet)), BASE_BOND * fleet.COUNTRY_BOND_MULTIPLIER() + BASE_BOND * fleet.COUNTRY_BOND_MULTIPLIER() + BASE_BOND); + + vm.prank(bob); + fleet.burn(c2); + assertEq(bondToken.balanceOf(address(fleet)), BASE_BOND * fleet.COUNTRY_BOND_MULTIPLIER() + BASE_BOND); + + vm.prank(alice); + fleet.burn(c1); + vm.prank(carol); + fleet.burn(l1); + assertEq(bondToken.balanceOf(address(fleet)), 0); + } + + function test_bondAccounting_reassignTierRoundTrip() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + uint256 balStart = bondToken.balanceOf(alice); + + vm.prank(alice); + fleet.reassignTier(tokenId, 3); + + vm.prank(alice); + fleet.reassignTier(tokenId, 0); + + assertEq(bondToken.balanceOf(alice), balStart); + assertEq(fleet.bonds(tokenId), BASE_BOND); + } + + // --- ERC-20 edge case --- + + function test_RevertIf_bondToken_transferFromReturnsFalse() public { + BadERC20 badToken = new BadERC20(); + FleetIdentity f = new FleetIdentity(address(badToken), BASE_BOND); + + badToken.mint(alice, 1_000 ether); + vm.prank(alice); + badToken.approve(address(f), type(uint256).max); + + badToken.setFail(true); + + vm.prank(alice); + vm.expectRevert(); + f.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + } + + // --- Transfer preserves region and tier --- + + function test_transfer_regionAndTierStayWithToken() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetCountry(UUID_1, US, 2); + + vm.prank(alice); + fleet.transferFrom(alice, bob, tokenId); + + assertEq(fleet.tokenRegion(tokenId), _regionUS()); + assertEq(fleet.fleetTier(tokenId), 2); + assertEq(fleet.bonds(tokenId), fleet.tierBond(2, true)); + + // After transfer, bob holds the token but alice is still uuidOwner/operator. + // On burn, alice (as uuidOwner) gets BASE_BOND, and alice (as operator) gets tier excess. + uint256 aliceBefore = bondToken.balanceOf(alice); + vm.prank(bob); + fleet.burn(tokenId); + assertEq(bondToken.balanceOf(alice), aliceBefore + fleet.tierBond(2, true)); + } + + // --- Tier lifecycle --- + + function test_tierLifecycle_fillBurnBackfillPerRegion() public { + // Register 4 US country fleets at tier 0 (fills capacity) + uint256[] memory usIds = _registerNCountryAt(alice, US, 4, 0, 0); + assertEq(fleet.tierMemberCount(_regionUS(), 0), 4); + + // Next country fleet goes to tier 1 + vm.prank(bob); + uint256 us5 = fleet.registerFleetCountry(_uuid(100), US, 1); + assertEq(fleet.fleetTier(us5), 1); + + // Burn from tier 0 — now tier 0 has 3, tier 1 has 1. + vm.prank(alice); + fleet.burn(usIds[3]); + + // Explicitly register into tier 1. + vm.prank(carol); + uint256 backfill = fleet.registerFleetCountry(_uuid(200), US, 1); + assertEq(fleet.fleetTier(backfill), 1); + assertEq(fleet.tierMemberCount(_regionUS(), 1), 2); + } + + // --- Edge cases --- + + function test_zeroBaseBond_allowsRegistration() public { + FleetIdentity f = new FleetIdentity(address(bondToken), 0); + vm.prank(alice); + bondToken.approve(address(f), type(uint256).max); + + vm.prank(alice); + uint256 tokenId = f.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + assertEq(f.bonds(tokenId), 0); + + vm.prank(alice); + f.burn(tokenId); + } + + // --- Fuzz Tests --- + + function testFuzz_registerFleetCountry_validCountryCodes(uint16 cc) public { + cc = uint16(bound(cc, 1, 999)); + + vm.prank(alice); + uint256 tokenId = fleet.registerFleetCountry(UUID_1, cc, 0); + + assertEq(fleet.tokenRegion(tokenId), uint32(cc)); + assertEq(fleet.fleetTier(tokenId), 0); + assertEq(fleet.bonds(tokenId), BASE_BOND * fleet.COUNTRY_BOND_MULTIPLIER()); // Country gets 16x multiplier + } + + function testFuzz_registerFleetLocal_validCodes(uint16 cc, uint16 admin) public { + cc = uint16(bound(cc, 1, 999)); + admin = uint16(bound(admin, 1, 255)); + + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, cc, admin, 0); + + uint32 expectedRegion = (uint32(cc) << 10) | uint32(admin); + assertEq(fleet.tokenRegion(tokenId), expectedRegion); + assertEq(fleet.fleetTier(tokenId), 0); + assertEq(fleet.bonds(tokenId), BASE_BOND); + } + + function testFuzz_promote_onlyOperator(address caller) public { + vm.assume(caller != alice); + vm.assume(caller != address(0)); + + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + vm.prank(caller); + vm.expectRevert(FleetIdentity.NotOperator.selector); + fleet.promote(tokenId); + } + + function testFuzz_burn_onlyOwner(address caller) public { + vm.assume(caller != alice); + vm.assume(caller != address(0)); + + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + vm.prank(caller); + vm.expectRevert(FleetIdentity.NotTokenOwner.selector); + fleet.burn(tokenId); + } + + // ══════════════════════════════════════════════ + // UUID Ownership Enforcement Tests + // ══════════════════════════════════════════════ + + function test_uuidOwner_setOnFirstRegistration() public { + assertEq(fleet.uuidOwner(UUID_1), address(0), "No owner before registration"); + assertEq(fleet.uuidTokenCount(UUID_1), 0, "No tokens before registration"); + + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + assertEq(fleet.uuidOwner(UUID_1), alice, "Alice is UUID owner after registration"); + assertEq(fleet.uuidTokenCount(UUID_1), 1, "Token count is 1 after registration"); + } + + function test_uuidOwner_sameOwnerCanRegisterMultipleRegions() public { + // Alice registers UUID_1 in first region (same level across all) + vm.prank(alice); + uint256 id1 = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + // Alice can register same UUID in second region (same level) + vm.prank(alice); + uint256 id2 = fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0); + + // And a third region (same level) + vm.prank(alice); + uint256 id3 = fleet.registerFleetLocal(UUID_1, FR, ADMIN_CA, 0); + + assertEq(fleet.uuidOwner(UUID_1), alice, "Alice is still UUID owner"); + assertEq(fleet.uuidTokenCount(UUID_1), 3, "Token count is 3"); + assertEq(fleet.ownerOf(id1), alice); + assertEq(fleet.ownerOf(id2), alice); + assertEq(fleet.ownerOf(id3), alice); + } + + function test_RevertIf_differentOwnerRegistersSameUuid_local() public { + // Alice registers UUID_1 first + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + // Bob tries to register same UUID in different region → revert + vm.prank(bob); + vm.expectRevert(FleetIdentity.UuidOwnerMismatch.selector); + fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0); + } + + function test_RevertIf_differentOwnerRegistersSameUuid_country() public { + // Alice registers UUID_1 first + vm.prank(alice); + fleet.registerFleetCountry(UUID_1, US, 0); + + // Bob tries to register same UUID in different country → revert + vm.prank(bob); + vm.expectRevert(FleetIdentity.UuidOwnerMismatch.selector); + fleet.registerFleetCountry(UUID_1, DE, 0); + } + + function test_RevertIf_differentOwnerRegistersSameUuid_crossLevel() public { + // Alice registers UUID_1 at country level + vm.prank(alice); + fleet.registerFleetCountry(UUID_1, US, 0); + + // Bob tries to register same UUID at local level → revert + vm.prank(bob); + vm.expectRevert(FleetIdentity.UuidOwnerMismatch.selector); + fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0); + } + + function test_uuidOwner_clearedWhenAllTokensBurned() public { + // Alice registers UUID_1 in one region + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + assertEq(fleet.uuidOwner(UUID_1), alice); + assertEq(fleet.uuidTokenCount(UUID_1), 1); + + // Burn the token + vm.prank(alice); + fleet.burn(tokenId); + + // UUID owner should be cleared + assertEq(fleet.uuidOwner(UUID_1), address(0), "UUID owner cleared after all tokens burned"); + assertEq(fleet.uuidTokenCount(UUID_1), 0, "Token count is 0 after all burned"); + } + + function test_uuidOwner_notClearedWhileTokensRemain() public { + // Alice registers UUID_1 in two regions (same level) + vm.prank(alice); + uint256 id1 = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + vm.prank(alice); + uint256 id2 = fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0); + + assertEq(fleet.uuidTokenCount(UUID_1), 2); + + // Burn first token + vm.prank(alice); + fleet.burn(id1); + + // UUID owner should still be alice (one token remains) + assertEq(fleet.uuidOwner(UUID_1), alice, "UUID owner still alice with remaining token"); + assertEq(fleet.uuidTokenCount(UUID_1), 1, "Token count decremented to 1"); + + // Burn second token + vm.prank(alice); + fleet.burn(id2); + + // Now UUID owner should be cleared + assertEq(fleet.uuidOwner(UUID_1), address(0), "UUID owner cleared after all burned"); + assertEq(fleet.uuidTokenCount(UUID_1), 0); + } + + function test_uuidOwner_differentUuidsHaveDifferentOwners() public { + // Alice registers UUID_1 + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + // Bob registers UUID_2 (different UUID, no conflict) + vm.prank(bob); + fleet.registerFleetLocal(UUID_2, US, ADMIN_CA, 0); + + assertEq(fleet.uuidOwner(UUID_1), alice); + assertEq(fleet.uuidOwner(UUID_2), bob); + } + + function test_uuidOwner_canReRegisterAfterBurningAll() public { + // Alice registers and burns UUID_1 + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + vm.prank(alice); + fleet.burn(tokenId); + + // Bob can now register the same UUID (uuid owner was cleared) + vm.prank(bob); + uint256 newTokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + assertEq(fleet.uuidOwner(UUID_1), bob, "Bob is now UUID owner"); + assertEq(fleet.uuidTokenCount(UUID_1), 1); + assertEq(fleet.ownerOf(newTokenId), bob); + } + + function test_uuidOwner_transferDoesNotChangeUuidOwner() public { + // Alice registers UUID_1 + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + assertEq(fleet.uuidOwner(UUID_1), alice); + + // Alice transfers to Bob + vm.prank(alice); + fleet.transferFrom(alice, bob, tokenId); + + // Token owner changed but UUID owner did not + assertEq(fleet.ownerOf(tokenId), bob); + assertEq(fleet.uuidOwner(UUID_1), alice, "UUID owner still alice after transfer"); + } + + function test_RevertIf_transferRecipientTriesToRegisterSameUuid() public { + // Alice registers UUID_1 + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + // Alice transfers to Bob + vm.prank(alice); + fleet.transferFrom(alice, bob, tokenId); + + // Bob now owns tokenId, but cannot register NEW tokens for UUID_1 + vm.prank(bob); + vm.expectRevert(FleetIdentity.UuidOwnerMismatch.selector); + fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0); + } + + function test_uuidOwner_originalOwnerCanStillRegisterAfterTransfer() public { + // Alice registers UUID_1 in one region + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + // Alice transfers to Bob + vm.prank(alice); + fleet.transferFrom(alice, bob, tokenId); + + // Alice can still register UUID_1 in new regions (she's still uuidOwner, same level) + vm.prank(alice); + uint256 newTokenId = fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0); + + assertEq(fleet.ownerOf(newTokenId), alice); + assertEq(fleet.uuidTokenCount(UUID_1), 2); + } + + function testFuzz_uuidOwner_enforcedAcrossAllRegions(uint16 cc1, uint16 cc2, uint16 admin1, uint16 admin2) public { + cc1 = uint16(bound(cc1, 1, 999)); + cc2 = uint16(bound(cc2, 1, 999)); + admin1 = uint16(bound(admin1, 1, 255)); + admin2 = uint16(bound(admin2, 1, 255)); + + // Alice registers first + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, cc1, admin1, 0); + + // Bob cannot register same UUID anywhere + vm.prank(bob); + vm.expectRevert(FleetIdentity.UuidOwnerMismatch.selector); + fleet.registerFleetLocal(UUID_1, cc2, admin2, 0); + + vm.prank(bob); + vm.expectRevert(FleetIdentity.UuidOwnerMismatch.selector); + fleet.registerFleetCountry(UUID_1, cc2, 0); + } + + function testFuzz_uuidOwner_multiRegionTokenCount(uint8 regionCount) public { + regionCount = uint8(bound(regionCount, 1, 10)); + + for (uint8 i = 0; i < regionCount; i++) { + uint16 cc = uint16(1 + i); + vm.prank(alice); + fleet.registerFleetCountry(UUID_1, cc, 0); + } + + assertEq(fleet.uuidTokenCount(UUID_1), regionCount); + assertEq(fleet.uuidOwner(UUID_1), alice); + } + + function testFuzz_uuidOwner_partialBurnPreservesOwnership(uint8 burnCount) public { + uint8 totalTokens = 5; + burnCount = uint8(bound(burnCount, 1, totalTokens - 1)); + + // Register tokens + uint256[] memory tokenIds = new uint256[](totalTokens); + for (uint8 i = 0; i < totalTokens; i++) { + uint16 cc = uint16(1 + i); + vm.prank(alice); + tokenIds[i] = fleet.registerFleetCountry(UUID_1, cc, 0); + } + + assertEq(fleet.uuidTokenCount(UUID_1), totalTokens); + + // Burn some tokens + for (uint8 i = 0; i < burnCount; i++) { + vm.prank(alice); + fleet.burn(tokenIds[i]); + } + + // Owner still alice, count decreased + assertEq(fleet.uuidOwner(UUID_1), alice); + assertEq(fleet.uuidTokenCount(UUID_1), totalTokens - burnCount); + } + + // ══════════════════════════════════════════════ + // UUID Level Enforcement Tests + // ══════════════════════════════════════════════ + + function test_uuidLevel_setOnFirstRegistration_local() public { + assertEq(uint8(fleet.uuidLevel(UUID_1)), 0, "No level before registration"); + + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + assertEq(uint8(fleet.uuidLevel(UUID_1)), 2, "Level is 2 (local) after local registration"); + } + + function test_uuidLevel_setOnFirstRegistration_country() public { + assertEq(uint8(fleet.uuidLevel(UUID_1)), 0, "No level before registration"); + + vm.prank(alice); + fleet.registerFleetCountry(UUID_1, US, 0); + + assertEq(uint8(fleet.uuidLevel(UUID_1)), 3, "Level is 3 (country) after country registration"); + } + + function test_RevertIf_crossLevelRegistration_localThenCountry() public { + // Alice registers UUID_1 at local level + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + // Alice tries to register same UUID at country level → revert + vm.prank(alice); + vm.expectRevert(FleetIdentity.UuidLevelMismatch.selector); + fleet.registerFleetCountry(UUID_1, DE, 0); + } + + function test_RevertIf_crossLevelRegistration_countryThenLocal() public { + // Alice registers UUID_1 at country level + vm.prank(alice); + fleet.registerFleetCountry(UUID_1, US, 0); + + // Alice tries to register same UUID at local level → revert + vm.prank(alice); + vm.expectRevert(FleetIdentity.UuidLevelMismatch.selector); + fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0); + } + + function test_uuidLevel_clearedOnLastTokenBurn() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + assertEq(uint8(fleet.uuidLevel(UUID_1)), 2); + + vm.prank(alice); + fleet.burn(tokenId); + + assertEq(uint8(fleet.uuidLevel(UUID_1)), 0, "Level cleared after all tokens burned"); + } + + function test_uuidLevel_notClearedWhileTokensRemain() public { + vm.prank(alice); + uint256 id1 = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0); + + assertEq(uint8(fleet.uuidLevel(UUID_1)), 2); + + vm.prank(alice); + fleet.burn(id1); + + assertEq(uint8(fleet.uuidLevel(UUID_1)), 2, "Level preserved while tokens remain"); + } + + function test_uuidLevel_canChangeLevelAfterBurningAll() public { + // Register as local + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + assertEq(uint8(fleet.uuidLevel(UUID_1)), 2); + + // Burn + vm.prank(alice); + fleet.burn(tokenId); + + // Now can register as country + vm.prank(alice); + fleet.registerFleetCountry(UUID_1, US, 0); + assertEq(uint8(fleet.uuidLevel(UUID_1)), 3); + } + + // ══════════════════════════════════════════════ + // Owned-Only Mode Tests + // ══════════════════════════════════════════════ + + function test_claimUuid_basic() public { + uint256 aliceBalanceBefore = bondToken.balanceOf(alice); + + vm.prank(alice); + uint256 tokenId = fleet.claimUuid(UUID_1); + + // Token minted + assertEq(fleet.ownerOf(tokenId), alice); + assertEq(fleet.tokenUuid(tokenId), UUID_1); + assertEq(fleet.tokenRegion(tokenId), 0); // OWNED_REGION_KEY + + // UUID ownership set + assertEq(fleet.uuidOwner(UUID_1), alice); + assertEq(fleet.uuidTokenCount(UUID_1), 1); + assertTrue(fleet.isOwnedOnly(UUID_1)); + assertEq(uint8(fleet.uuidLevel(UUID_1)), 1); // Owned + + // Bond pulled + assertEq(aliceBalanceBefore - bondToken.balanceOf(alice), BASE_BOND); + + // bonds() returns BASE_BOND for owned-only + assertEq(fleet.bonds(tokenId), BASE_BOND); + } + + function test_RevertIf_claimUuid_alreadyOwned() public { + vm.prank(alice); + fleet.claimUuid(UUID_1); + + vm.prank(bob); + vm.expectRevert(FleetIdentity.UuidAlreadyOwned.selector); + fleet.claimUuid(UUID_1); + } + + function test_RevertIf_claimUuid_alreadyRegistered() public { + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + vm.prank(bob); + vm.expectRevert(FleetIdentity.UuidAlreadyOwned.selector); + fleet.claimUuid(UUID_1); + } + + function test_RevertIf_claimUuid_invalidUuid() public { + vm.prank(alice); + vm.expectRevert(FleetIdentity.InvalidUUID.selector); + fleet.claimUuid(bytes16(0)); + } + + function test_registerFromOwned_local() public { + // First claim + vm.prank(alice); + uint256 ownedTokenId = fleet.claimUuid(UUID_1); + + uint256 aliceBalanceBefore = bondToken.balanceOf(alice); + + // Register from owned state + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + // Old owned token burned + vm.expectRevert(); + fleet.ownerOf(ownedTokenId); + + // New token exists + assertEq(fleet.ownerOf(tokenId), alice); + assertEq(fleet.tokenRegion(tokenId), _regionUSCA()); + assertEq(fleet.fleetTier(tokenId), 0); + + // UUID state updated + assertEq(fleet.uuidOwner(UUID_1), alice); + assertEq(fleet.uuidTokenCount(UUID_1), 1); // still 1 + assertFalse(fleet.isOwnedOnly(UUID_1)); + assertEq(uint8(fleet.uuidLevel(UUID_1)), 2); // Local + + // Only incremental bond pulled (tier 0 local = BASE_BOND, already paid BASE_BOND) + assertEq(aliceBalanceBefore - bondToken.balanceOf(alice), 0); + } + + function test_registerFromOwned_country() public { + vm.prank(alice); + fleet.claimUuid(UUID_1); + + uint256 aliceBalanceBefore = bondToken.balanceOf(alice); + + vm.prank(alice); + uint256 tokenId = fleet.registerFleetCountry(UUID_1, US, 0); + + assertEq(fleet.ownerOf(tokenId), alice); + assertEq(fleet.tokenRegion(tokenId), uint32(US)); + assertEq(uint8(fleet.uuidLevel(UUID_1)), 3); // Country + + // Incremental bond: country tier 0 = 16*BASE_BOND, already paid BASE_BOND, so 15*BASE_BOND + assertEq(aliceBalanceBefore - bondToken.balanceOf(alice), 15 * BASE_BOND); + } + + function test_registerFromOwned_higherTier() public { + vm.prank(alice); + fleet.claimUuid(UUID_1); + + uint256 aliceBalanceBefore = bondToken.balanceOf(alice); + + // Register at tier 2 local (4*BASE_BOND) + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 2); + + // Incremental: 4*BASE_BOND - BASE_BOND = 3*BASE_BOND + assertEq(aliceBalanceBefore - bondToken.balanceOf(alice), 3 * BASE_BOND); + } + + function test_unregisterToOwned_basic() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + uint256 aliceBalanceBefore = bondToken.balanceOf(alice); + + vm.prank(alice); + uint256 ownedTokenId = fleet.unregisterToOwned(tokenId); + + // Old token burned + vm.expectRevert(); + fleet.ownerOf(tokenId); + + // New owned-only token exists + assertEq(fleet.ownerOf(ownedTokenId), alice); + assertEq(fleet.tokenRegion(ownedTokenId), 0); + + // UUID state updated + assertTrue(fleet.isOwnedOnly(UUID_1)); + assertEq(uint8(fleet.uuidLevel(UUID_1)), 1); // Owned + + // No refund for tier 0 local (BASE_BOND - BASE_BOND = 0) + assertEq(bondToken.balanceOf(alice) - aliceBalanceBefore, 0); + } + + function test_unregisterToOwned_withRefund() public { + // Register at tier 2 local (4*BASE_BOND) + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 2); + + uint256 aliceBalanceBefore = bondToken.balanceOf(alice); + + vm.prank(alice); + fleet.unregisterToOwned(tokenId); + + // Refund: 4*BASE_BOND - BASE_BOND = 3*BASE_BOND + assertEq(bondToken.balanceOf(alice) - aliceBalanceBefore, 3 * BASE_BOND); + } + + function test_unregisterToOwned_fromCountry() public { + // Register country tier 0 (16*BASE_BOND) + vm.prank(alice); + uint256 tokenId = fleet.registerFleetCountry(UUID_1, US, 0); + + uint256 aliceBalanceBefore = bondToken.balanceOf(alice); + + vm.prank(alice); + fleet.unregisterToOwned(tokenId); + + // Refund: 16*BASE_BOND - BASE_BOND = 15*BASE_BOND + assertEq(bondToken.balanceOf(alice) - aliceBalanceBefore, 15 * BASE_BOND); + + // Level reset to Owned + assertEq(uint8(fleet.uuidLevel(UUID_1)), 1); + } + + function test_RevertIf_unregisterToOwned_multipleTokens() public { + vm.prank(alice); + uint256 id1 = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0); + + vm.prank(alice); + vm.expectRevert(FleetIdentity.CannotUnregisterMultipleTokens.selector); + fleet.unregisterToOwned(id1); + } + + function test_RevertIf_unregisterToOwned_alreadyOwned() public { + vm.prank(alice); + uint256 tokenId = fleet.claimUuid(UUID_1); + + vm.prank(alice); + vm.expectRevert(FleetIdentity.UuidNotOwned.selector); + fleet.unregisterToOwned(tokenId); + } + + function test_releaseUuid_basic() public { + vm.prank(alice); + uint256 tokenId = fleet.claimUuid(UUID_1); + + uint256 aliceBalanceBefore = bondToken.balanceOf(alice); + + vm.prank(alice); + fleet.releaseUuid(UUID_1); + + // Token burned + vm.expectRevert(); + fleet.ownerOf(tokenId); + + // UUID cleared + assertEq(fleet.uuidOwner(UUID_1), address(0)); + assertEq(fleet.uuidTokenCount(UUID_1), 0); + assertEq(uint8(fleet.uuidLevel(UUID_1)), 0); // None + + // Refund received + assertEq(bondToken.balanceOf(alice) - aliceBalanceBefore, BASE_BOND); + } + + function test_releaseUuid_afterTransfer() public { + vm.prank(alice); + uint256 tokenId = fleet.claimUuid(UUID_1); + + // Transfer to bob + vm.prank(alice); + fleet.transferFrom(alice, bob, tokenId); + + // uuidOwner should have updated + assertEq(fleet.uuidOwner(UUID_1), bob); + + // Alice cannot release + vm.prank(alice); + vm.expectRevert(FleetIdentity.NotUuidOwner.selector); + fleet.releaseUuid(UUID_1); + + // Bob can release + uint256 bobBalanceBefore = bondToken.balanceOf(bob); + vm.prank(bob); + fleet.releaseUuid(UUID_1); + assertEq(bondToken.balanceOf(bob) - bobBalanceBefore, BASE_BOND); + } + + function test_RevertIf_releaseUuid_notOwned() public { + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + vm.prank(alice); + vm.expectRevert(FleetIdentity.UuidNotOwned.selector); + fleet.releaseUuid(UUID_1); + } + + function test_ownedOnly_transfer_updatesUuidOwner() public { + vm.prank(alice); + uint256 tokenId = fleet.claimUuid(UUID_1); + + assertEq(fleet.uuidOwner(UUID_1), alice); + + vm.prank(alice); + fleet.transferFrom(alice, bob, tokenId); + + // uuidOwner updated on transfer for owned-only tokens + assertEq(fleet.uuidOwner(UUID_1), bob); + assertEq(fleet.ownerOf(tokenId), bob); + } + + function test_ownedOnly_notInBundle() public { + // Claim some UUIDs as owned-only + vm.prank(alice); + fleet.claimUuid(UUID_1); + vm.prank(alice); + fleet.claimUuid(UUID_2); + + // Bundle should be empty + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 0); + + // Now register one + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + // Bundle should contain only the registered one + (uuids, count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 1); + assertEq(uuids[0], UUID_1); + } + + function test_burn_ownedOnly() public { + vm.prank(alice); + uint256 tokenId = fleet.claimUuid(UUID_1); + + uint256 aliceBalanceBefore = bondToken.balanceOf(alice); + + vm.prank(alice); + fleet.burn(tokenId); + + // Token burned + vm.expectRevert(); + fleet.ownerOf(tokenId); + + // UUID cleared + assertEq(fleet.uuidOwner(UUID_1), address(0)); + + // Refund received + assertEq(bondToken.balanceOf(alice) - aliceBalanceBefore, BASE_BOND); + } + + function test_ownedOnly_canReRegisterAfterRelease() public { + vm.prank(alice); + fleet.claimUuid(UUID_1); + + vm.prank(alice); + fleet.releaseUuid(UUID_1); + + // Bob can now claim or register + vm.prank(bob); + uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + assertEq(fleet.ownerOf(tokenId), bob); + assertEq(fleet.uuidOwner(UUID_1), bob); + } + + function test_migration_viaUnregisterAndReregister() public { + // This test shows the new migration pattern using unregisterToOwned + + // Register local in US + vm.prank(alice); + uint256 oldTokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + uint256 aliceBalanceAfterRegister = bondToken.balanceOf(alice); + + // Unregister to owned (no refund at tier 0 local) + vm.prank(alice); + fleet.unregisterToOwned(oldTokenId); + + // Re-register in DE as country (pays 16*BASE_BOND - BASE_BOND = 15*BASE_BOND) + vm.prank(alice); + uint256 newTokenId = fleet.registerFleetCountry(UUID_1, DE, 0); + + assertEq(fleet.ownerOf(newTokenId), alice); + assertEq(fleet.tokenRegion(newTokenId), uint32(DE)); + assertEq(uint8(fleet.uuidLevel(UUID_1)), 3); // Country + + // Net bond change: 15*BASE_BOND additional + assertEq(aliceBalanceAfterRegister - bondToken.balanceOf(alice), 15 * BASE_BOND); + } + + function testFuzz_tierBond_geometric(uint256 tier) public view { + tier = bound(tier, 0, 10); + uint256 expected = BASE_BOND; + for (uint256 i = 0; i < tier; i++) { + expected *= 2; + } + // Local regions get 1× multiplier + assertEq(fleet.tierBond(tier, false), expected); + // Country regions get 16x multiplier + assertEq(fleet.tierBond(tier, true), expected * fleet.COUNTRY_BOND_MULTIPLIER()); + } + + function testFuzz_perRegionTiers_newRegionAlwaysStartsAtTier0(uint16 cc) public { + cc = uint16(bound(cc, 1, 999)); + vm.assume(cc != US); // Skip US since we fill it below + + // Fill one country with 8 fleets + _registerNCountry(alice, US, 8, 0); + assertEq(fleet.regionTierCount(_regionUS()), 2); + + // New country should start at tier 0 regardless of other regions + vm.prank(bob); + uint256 tokenId = fleet.registerFleetCountry(_uuid(999), cc, 0); + assertEq(fleet.fleetTier(tokenId), 0); + assertEq(fleet.bonds(tokenId), BASE_BOND * fleet.COUNTRY_BOND_MULTIPLIER()); // Country gets 16x multiplier + } + + function testFuzz_tierAssignment_autoFillsSequentiallyPerRegion(uint8 count) public { + count = uint8(bound(count, 1, 40)); + + for (uint256 i = 0; i < count; i++) { + uint256 expectedTier = i / 4; // TIER_CAPACITY = 4 + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocal(_uuid(i + 300), US, ADMIN_CA, expectedTier); + + assertEq(fleet.fleetTier(tokenId), expectedTier); + } + + uint256 expectedTiers = (uint256(count) + 3) / 4; // TIER_CAPACITY = 4 + assertEq(fleet.regionTierCount(_regionUSCA()), expectedTiers); + } + + // --- Invariants --- + + function test_invariant_contractBalanceEqualsSumOfBonds() public { + vm.prank(alice); + uint256 id1 = fleet.registerFleetCountry(UUID_1, US, 0); + vm.prank(bob); + uint256 id2 = fleet.registerFleetCountry(UUID_2, DE, 0); + vm.prank(carol); + uint256 id3 = fleet.registerFleetLocal(UUID_3, US, ADMIN_CA, 0); + + uint256 sumBonds = fleet.bonds(id1) + fleet.bonds(id2) + fleet.bonds(id3); + assertEq(bondToken.balanceOf(address(fleet)), sumBonds); + + vm.prank(alice); + fleet.burn(id1); + + assertEq(bondToken.balanceOf(address(fleet)), fleet.bonds(id2) + fleet.bonds(id3)); + } + + function test_invariant_contractBalanceAfterReassignTierBurn() public { + vm.prank(alice); + uint256 id1 = fleet.registerFleetCountry(UUID_1, US, 0); + vm.prank(bob); + uint256 id2 = fleet.registerFleetLocal(UUID_2, US, ADMIN_CA, 0); + vm.prank(carol); + uint256 id3 = fleet.registerFleetLocal(UUID_3, DE, ADMIN_NY, 0); + + vm.prank(alice); + fleet.reassignTier(id1, 3); + + vm.prank(alice); + fleet.reassignTier(id1, 1); + + uint256 expected = fleet.bonds(id1) + fleet.bonds(id2) + fleet.bonds(id3); + assertEq(bondToken.balanceOf(address(fleet)), expected); + + vm.prank(alice); + fleet.burn(id1); + vm.prank(bob); + fleet.burn(id2); + vm.prank(carol); + fleet.burn(id3); + + assertEq(bondToken.balanceOf(address(fleet)), 0); + } + + // --- countryInclusionHint --- + + function test_countryInclusionHint_emptyReturnsZero() public view { + (uint256 tier, uint256 bond) = fleet.countryInclusionHint(US); + assertEq(tier, 0); + assertEq(bond, BASE_BOND * fleet.COUNTRY_BOND_MULTIPLIER()); // Country pays 16x multiplier + } + + function test_countryInclusionHint_onlyCountryFleets() public { + _registerNCountryAt(alice, US, 4, 1000, 0); // fills tier 0 (TIER_CAPACITY=4) + vm.prank(bob); + fleet.registerFleetCountry(_uuid(9000), US, 1); // tier 1 + + // Tier 0 is full → cheapest inclusion = tier 1. + (uint256 tier, uint256 bond) = fleet.countryInclusionHint(US); + assertEq(tier, 1); + assertEq(bond, BASE_BOND * fleet.COUNTRY_BOND_MULTIPLIER() * 2); // Country pays 16x multiplier, tier 1 = 2× base + } + + function test_countryInclusionHint_adminAreaCreatesPressure() public { + // Country US: tier 0 with 1 member + vm.prank(alice); + fleet.registerFleetCountry(_uuid(1000), US, 0); + + // US-CA: push to tier 3 (1 member at tier 3) + vm.prank(bob); + fleet.registerFleetLocal(_uuid(2000), US, ADMIN_CA, 3); + + // Country fleet needs to be included in bundle(US, ADMIN_CA). + // Simulation: cursor 3→0. At cursor 3: admin=1 (fits). At cursor 0: admin=0, country=1+1=2 (fits). + // Country tier 0 with 2 members: 2 <= 20-1 = 19. Fits. + // So cheapest = 0 (tier 0 has room: 1/4). + (uint256 tier,) = fleet.countryInclusionHint(US); + assertEq(tier, 0); + } + + function test_countryInclusionHint_multipleAdminAreas_takesMax() public { + // US-CA: fill admin tier 0 (4) + fill country tier 0 (4) = 8 + _registerNLocalAt(alice, US, ADMIN_CA, 4, 0, 0); + _registerNCountryAt(alice, US, 4, 100, 0); + // US-NY: light (3 admin) + _registerNLocal(alice, US, ADMIN_NY, 3, 200); + + // Country tier 0 has 4/4 members → tier 0 is full. + // Even though the bundle has room, the tier capacity is exhausted. + // So cheapest inclusion tier for a country fleet = 1. + (uint256 tier,) = fleet.countryInclusionHint(US); + assertEq(tier, 1); + } + + function test_countryInclusionHint_ignoresOtherCountries() public { + // DE admin area at tier 5 — should NOT affect US hint + vm.prank(alice); + fleet.registerFleetLocal(_uuid(1000), DE, 1, 5); + + // US-CA at tier 1 + vm.prank(bob); + fleet.registerFleetLocal(_uuid(2000), US, ADMIN_CA, 1); + + (uint256 usTier,) = fleet.countryInclusionHint(US); + // US country fleet needs inclusion in bundle(US, ADMIN_CA). + // Admin has 1 at tier 1. Country at tier 0: +1=1, fits. + assertEq(usTier, 0); + } + + function test_countryInclusionHint_afterBurn_updates() public { + vm.prank(alice); + uint256 id = fleet.registerFleetLocal(_uuid(1000), US, ADMIN_CA, 3); + + vm.prank(alice); + fleet.burn(id); + + (uint256 after_,) = fleet.countryInclusionHint(US); + assertEq(after_, 0); + } + + function test_countryInclusionHint_registrantCanActOnHint() public { + // Fill up to create pressure + _registerNLocal(alice, US, ADMIN_CA, 8, 0); + _registerNCountry(alice, US, 8, 100); + + (uint256 inclusionTier, uint256 hintBond) = fleet.countryInclusionHint(US); + + // Bob registers at country level at the hinted tier + vm.prank(bob); + fleet.registerFleetCountry(_uuid(2000), US, inclusionTier); + + uint256 tokenId = _tokenId(_uuid(2000), _regionUS()); + assertEq(fleet.fleetTier(tokenId), inclusionTier); + assertEq(fleet.bonds(tokenId), hintBond); + + // Bundle for US-CA includes Bob's fleet + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertGt(count, 0); + bool foundCountry; + for (uint256 i = 0; i < count; i++) { + if (uuids[i] == _uuid(2000)) foundCountry = true; + } + assertTrue(foundCountry, "Country fleet should appear in bundle"); + } + + // --- buildHighestBondedUuidBundle (shared-cursor fair-stop) --- + + // ── Empty / Single-level basics ── + + function test_buildBundle_emptyReturnsZero() public view { + (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 0); + } + + function test_RevertIf_buildBundle_adminCodeZero() public { + vm.prank(alice); + fleet.registerFleetCountry(UUID_1, US, 0); + + vm.expectRevert(FleetIdentity.AdminAreaRequired.selector); + fleet.buildHighestBondedUuidBundle(US, 0); + } + + function test_buildBundle_singleCountry() public { + vm.prank(alice); + fleet.registerFleetCountry(UUID_1, US, 0); + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 1); + assertEq(uuids[0], UUID_1); + } + + function test_buildBundle_singleLocal() public { + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 1); + assertEq(uuids[0], UUID_1); + } + + // ── Same cursor, both levels at tier 0 ── + + function test_buildBundle_bothLevelsTied_levelPriorityOrder() public { + // Both at tier 0 → shared cursor 0 → level priority: local, country + vm.prank(alice); + fleet.registerFleetLocal(UUID_2, US, ADMIN_CA, 0); + vm.prank(alice); + fleet.registerFleetCountry(UUID_1, US, 0); + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 2); + assertEq(uuids[0], UUID_2); // local first + assertEq(uuids[1], UUID_1); // country second + } + + function test_buildBundle_2LevelsTier0_fullCapacity() public { + // 4 local + 4 country at tier 0 = 8 + // Bundle fits all since max is 20 + _registerNLocalAt(alice, US, ADMIN_CA, 4, 1000, 0); + _registerNCountryAt(alice, US, 4, 2000, 0); + + (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 8); + } + + function test_buildBundle_2LevelsTier0_partialFill() public { + // 3 local + 2 country = 5 + _registerNLocalAt(alice, US, ADMIN_CA, 3, 1000, 0); + _registerNCountryAt(alice, US, 2, 2000, 0); + + (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 5); + } + + // ── Bond priority: higher tier index = higher bond = comes first ── + + function test_buildBundle_higherBondFirst() public { + // Country: promote to tier 2 (bond=8*4*BASE) + vm.prank(alice); + uint256 usId = fleet.registerFleetCountry(UUID_1, US, 0); + vm.prank(alice); + fleet.reassignTier(usId, 2); + // Local: tier 0 (bond=BASE) + vm.prank(alice); + fleet.registerFleetLocal(UUID_2, US, ADMIN_CA, 0); + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 2); + assertEq(uuids[0], UUID_1); // highest bond first (country tier 2) + assertEq(uuids[1], UUID_2); // local tier 0 + } + + function test_buildBundle_multiTierDescendingBond() public { + // Local tier 2 (bond=4*BASE) + vm.prank(alice); + uint256 id1 = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + vm.prank(alice); + fleet.reassignTier(id1, 2); + + // Country tier 1 (bond=8*2*BASE) + vm.prank(alice); + uint256 id2 = fleet.registerFleetCountry(UUID_2, US, 0); + vm.prank(alice); + fleet.reassignTier(id2, 1); + + // Local tier 0 (bond=BASE) + vm.prank(alice); + fleet.registerFleetLocal(UUID_3, US, ADMIN_CA, 0); + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 3); + assertEq(uuids[0], UUID_1); // local tier 2: bond=4*BASE + assertEq(uuids[1], UUID_2); // country tier 1: bond=16*BASE (but added after local at cursor) + } + + function test_buildBundle_multiTierMultiLevel_correctOrder() public { + // Admin: tier 0 (4 members) + tier 1 (1 member) + _registerNLocalAt(alice, US, ADMIN_CA, 4, 8000, 0); + vm.prank(alice); + fleet.registerFleetLocal(_uuid(8100), US, ADMIN_CA, 1); + + // Country: promote to tier 1 (bond=8*2*BASE) + vm.prank(alice); + uint256 countryId = fleet.registerFleetCountry(_uuid(8200), US, 0); + vm.prank(alice); + fleet.reassignTier(countryId, 1); + + // Country: promote to tier 2 (bond=8*4*BASE) + vm.prank(alice); + uint256 country2Id = fleet.registerFleetCountry(_uuid(8300), US, 0); + vm.prank(alice); + fleet.reassignTier(country2Id, 2); + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + // Cursor=2: country(1)→include. Count=1. + // Cursor=1: local(1)+country(1)→include. Count=3. + // Cursor=0: local(4)→include. Count=7. + assertEq(count, 7); + assertEq(uuids[0], fleet.tokenUuid(country2Id)); // tier 2 first + } + + // ── All-or-nothing ── + + function test_buildBundle_allOrNothing_tierSkippedWhenDoesNotFit() public { + // Fill room so that at a cursor position a tier can't fit. + // Admin tier 1: 4 members + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(5100 + i), US, ADMIN_CA, 1); + } + // Country tier 1: 4 members + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetCountry(_uuid(6100 + i), US, 1); + } + + // Tier 0: local(4), country(3) + _registerNLocalAt(alice, US, ADMIN_CA, 4, 5000, 0); + _registerNCountryAt(alice, US, 3, 6000, 0); + + (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + // Cursor=1: local(4)+country(4)=8. Count=8, room=12. + // Cursor=0: local(4)≤12→include[count=12,room=8]. country(3)≤8→include[count=15,room=5]. + assertEq(count, 15); + } + + function test_buildBundle_allOrNothing_noPartialCollection() public { + // Room=3, tier has 5 members → some members skipped. + // Local tier 1: 4 members + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(2000 + i), US, ADMIN_CA, 1); + } + // Country tier 1: 4 members + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetCountry(_uuid(3000 + i), US, 1); + } + + (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + // Cursor=1: local(4)+country(4)=8. Count=8. + // Cursor=0: all empty at tier 0. Done. + assertEq(count, 8); + } + + function test_buildBundle_partialInclusion_fillsRemainingSlots() public { + // With partial inclusion: bundle fills remaining slots. + // Country tier 0: 4 members + _registerNCountryAt(alice, US, 4, 0, 0); + + // Local: 4 at tier 0 + 4 at tier 1 (TIER_CAPACITY = 4) + _registerNLocalAt(alice, US, ADMIN_CA, 4, 5000, 0); + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(5100 + i), US, ADMIN_CA, 1); + } + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + // Cursor=1: local(4)=4. Count=4, room=16. + // Cursor=0: local(4)≤16→include 4[count=8,room=12]. country(4)≤12→include 4. + // Final count=12. + assertEq(count, 12); + + // Verify country UUIDs ARE in the result + uint256 countryCount; + for (uint256 i = 0; i < count; i++) { + uint256 tokenId = _findTokenId(uuids[i], US, ADMIN_CA); + uint32 region = fleet.tokenRegion(tokenId); + if (region == _regionUS()) countryCount++; + } + assertEq(countryCount, 4, "4 country members included"); + } + + // ── Partial inclusion (replaces all-or-nothing + fair-stop) ── + + function test_buildBundle_partialInclusion_fillsBundleCompletely() public { + // With partial inclusion, we fill the bundle completely by including + // as many members as fit, in array order. + + // Consume 6 slots at tier 1. + for (uint256 i = 0; i < 3; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(1000 + i), US, ADMIN_CA, 1); + } + for (uint256 i = 0; i < 3; i++) { + vm.prank(alice); + fleet.registerFleetCountry(_uuid(2000 + i), US, 1); + } + + // Tier 0: full capacities (TIER_CAPACITY = 4). + _registerNLocalAt(alice, US, ADMIN_CA, 4, 3000, 0); + _registerNCountryAt(alice, US, 4, 4000, 0); + + (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + // Cursor=1: local(3)+country(3)=6. Count=6, room=14. + // Cursor=0: local(4)≤14→include 4[count=10,room=10]. + // country(4)≤10→include 4[count=14,room=6]. + assertEq(count, 14); + } + + function test_buildBundle_partialFill_localAndCountry() public { + // Two local tiers consume 8 slots, leaving 12 for cursor=0. + // At cursor=0: local(4) fits. country(4) included. + + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(1000 + i), US, ADMIN_CA, 1); + } + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(2000 + i), US, ADMIN_CA, 2); + } + + // Tier 0: 4 local + 4 country (TIER_CAPACITY = 4) + _registerNLocalAt(alice, US, ADMIN_CA, 4, 3000, 0); + _registerNCountryAt(alice, US, 4, 4000, 0); + + (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + // Cursor=2: local(4)→include. Count=4. + // Cursor=1: local(4)→include. Count=8, room=12. + // Cursor=0: local(4)≤12→include[count=12,room=8]. country(4)≤8→include[count=16,room=4]. + assertEq(count, 16); + } + + function test_buildBundle_partialInclusion_allLevelsPartiallyIncluded() public { + // With partial inclusion, both levels get included partially if needed. + + // Consume 8 slots at tier 1. + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(1000 + i), US, ADMIN_CA, 1); + } + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetCountry(_uuid(2000 + i), US, 1); + } + + // Tier 0: local=4, country=4 (TIER_CAPACITY = 4) + _registerNLocalAt(alice, US, ADMIN_CA, 4, 3000, 0); + _registerNCountryAt(alice, US, 4, 4000, 0); + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + // Cursor=1: local(4)+country(4)=8. Count=8, room=12. + // Cursor=0: local(4)≤12→include 4[count=12,room=8]. + // country(4)≤8→include 4[count=16]. + assertEq(count, 16); + + // Verify local tier 0 is present + bool foundLocal = false; + for (uint256 i = 0; i < count; i++) { + if (uuids[i] == _uuid(3000)) foundLocal = true; + } + assertTrue(foundLocal, "local tier 0 should be included"); + + // Count how many country tier 0 members are included + uint256 countryT0Count; + for (uint256 i = 0; i < count; i++) { + uint256 tokenId = _findTokenId(uuids[i], US, ADMIN_CA); + if (fleet.tokenRegion(tokenId) == _regionUS() && fleet.fleetTier(tokenId) == 0) countryT0Count++; + } + assertEq(countryT0Count, 4, "4 country tier 0 members included"); + } + + function test_buildBundle_doesNotDescendAfterBundleFull() public { + // When cursor=1 fills bundle, cursor=0 tiers are NOT included. + + // Tier 1: local(4) + country(4) + more local(4) + more country(4) = 16 + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(1000 + i), US, ADMIN_CA, 1); + } + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetCountry(_uuid(2000 + i), US, 1); + } + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(3000 + i), US, ADMIN_CA, 2); + } + + // Tier 0: extras that might not all fit + _registerNLocalAt(alice, US, ADMIN_CA, 4, 4000, 0); + _registerNCountryAt(alice, US, 4, 5000, 0); + + (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + // Cursor=1: admin(8)+country(8)+global(4)=20. Bundle full. + assertEq(count, 20); + } + + function test_buildBundle_partialInclusion_fillsAtHighTier() public { + // With TIER_CAPACITY = 4: + // Cursor=2: local(3)→include. Count=3. + // Cursor=1: local(4)+country(4)=8→include. Count=11, room=9. + // Cursor=0: local(1)≤9→include[count=12,room=8]. country(1)≤8→include[count=13,room=7]. + + for (uint256 i = 0; i < 3; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(1000 + i), US, ADMIN_CA, 2); + } + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(2000 + i), US, ADMIN_CA, 1); + } + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetCountry(_uuid(3000 + i), US, 1); + } + + // Tier 0 extras (would be included with more room): + vm.prank(alice); + fleet.registerFleetLocal(_uuid(5000), US, ADMIN_CA, 0); + vm.prank(alice); + fleet.registerFleetCountry(_uuid(5001), US, 0); + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + // Cursor=2: local(3)→include. Count=3, room=17. + // Cursor=1: local(4)+country(4)→include. Count=11, room=9. + // Cursor=0: local(1)≤9→include[count=12,room=8]. country(1)≤8→include[count=13,room=7]. + assertEq(count, 13); + } + + function test_buildBundle_partialInclusion_higherPriorityFirst() public { + // Partial inclusion fills higher-priority levels first at each tier. + // Local gets slots before country. + + // Local tier 1: 4, Country tier 1: 4 + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(1000 + i), US, ADMIN_CA, 1); + } + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetCountry(_uuid(2000 + i), US, 1); + } + + // Tier 0: local=4, country=4 (TIER_CAPACITY = 4) + _registerNLocalAt(alice, US, ADMIN_CA, 4, 3000, 0); + _registerNCountryAt(alice, US, 4, 4000, 0); + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + // Cursor=1: local(4)+country(4)=8. Count=8, room=12. + // Cursor=0: local(4)≤12→include 4[count=12,room=8]. country(4)≤8→include 4[count=16]. + assertEq(count, 16); + + // Verify local tier 0 full inclusion (4 of 4) + uint256 localT0Count; + for (uint256 i = 0; i < count; i++) { + uint256 tokenId = _findTokenId(uuids[i], US, ADMIN_CA); + if (fleet.tokenRegion(tokenId) == _regionUSCA() && fleet.fleetTier(tokenId) == 0) localT0Count++; + } + assertEq(localT0Count, 4, "4 local tier 0 included"); + } + + // ── Tie-breaker: local before country at same cursor ── + + function test_buildBundle_tieBreaker_localBeforeCountry() public { + // Room=8 after higher tiers. Local tier 0 (4) tried before country tier 0 (4). + // Local fits (4), then country (4). + + // Eat 12 room at tier 1 and 2. + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(1000 + i), US, ADMIN_CA, 1); + } + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetCountry(_uuid(2000 + i), US, 1); + } + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(3000 + i), US, ADMIN_CA, 2); + } + + // Tier 0: local=4, country=4 (TIER_CAPACITY = 4) + _registerNLocalAt(alice, US, ADMIN_CA, 4, 4000, 0); + _registerNCountryAt(alice, US, 4, 5000, 0); + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + // Cursor=2: local(4)→include. Count=4, room=16. + // Cursor=1: local(4)+country(4)=8→include. Count=12, room=8. + // Cursor=0: local(4)≤8→include[count=16,room=4]. country(4)≤4→include 4[count=20,room=0]. + assertEq(count, 20); + + // Verify: local(12) + country(8) + uint256 localCount; + uint256 countryCount; + for (uint256 i = 0; i < count; i++) { + uint256 tokenId = _findTokenId(uuids[i], US, ADMIN_CA); + uint32 region = fleet.tokenRegion(tokenId); + if (region == _regionUS()) countryCount++; + else if (region == _regionUSCA()) localCount++; + } + assertEq(localCount, 12); // tier 0 (4) + tier 1 (4) + tier 2 (4) + assertEq(countryCount, 8); // tier 1 (4) + tier 0 (4) + } + + // ── Empty tiers and gaps ── + + function test_buildBundle_emptyTiersSkippedCleanly() public { + // Register at tier 0 then promote to tier 2, leaving tier 1 empty. + vm.prank(alice); + uint256 id = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + vm.prank(alice); + fleet.reassignTier(id, 2); + + vm.prank(alice); + fleet.registerFleetCountry(UUID_2, US, 0); + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + // Cursor=2: local(1)→include. Count=1. + // Cursor=1: all empty. No skip. Descend. + // Cursor=0: country(1)→include. Count=2. + assertEq(count, 2); + assertEq(uuids[0], UUID_1); + assertEq(uuids[1], UUID_2); + } + + function test_buildBundle_multipleEmptyTiersInMiddle() public { + // Local at tier 5, country at tier 0. Tiers 1-4 empty. + vm.prank(alice); + uint256 id = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + vm.prank(alice); + fleet.reassignTier(id, 5); + vm.prank(alice); + fleet.registerFleetCountry(UUID_2, US, 0); + + (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 2); + } + + function test_buildBundle_emptyTiersInMiddle_countryToo() public { + // Country: register at tier 0 and tier 2 (tier 1 empty) + vm.prank(alice); + fleet.registerFleetCountry(UUID_1, US, 0); + vm.prank(alice); + fleet.registerFleetCountry(UUID_2, US, 2); + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 2); + assertEq(uuids[0], UUID_2); // higher bond first + assertEq(uuids[1], UUID_1); + } + + // ── Local isolation ── + + function test_buildBundle_multipleAdminAreas_isolated() public { + _registerNLocalAt(alice, US, ADMIN_CA, 4, 1000, 0); + _registerNLocalAt(alice, US, ADMIN_NY, 4, 2000, 0); + + (, uint256 countCA) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + // CA locals + any country + assertEq(countCA, 4); + (, uint256 countNY) = fleet.buildHighestBondedUuidBundle(US, ADMIN_NY); + // NY locals + any country (same country) + assertEq(countNY, 4); + } + + // ── Single level, multiple tiers ── + + function test_buildBundle_singleLevelMultipleTiers() public { + // Only country, multiple tiers. Country fleets fill all available slots. + _registerNCountryAt(alice, US, 4, 1000, 0); // tier 0: 4 members + _registerNCountryAt(alice, US, 4, 2000, 1); // tier 1: 4 members + _registerNCountryAt(alice, US, 4, 3000, 2); // tier 2: 4 members + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 12); // all country fleets included + // Verify order: tier 2 first (highest bond) + uint256[] memory t2 = fleet.getTierMembers(_regionUS(), 2); + for (uint256 i = 0; i < 4; i++) { + assertEq(uuids[i], bytes16(uint128(t2[i]))); + } + } + + function test_buildBundle_singleLevelOnlyLocal() public { + _registerNLocalAt(alice, US, ADMIN_CA, 4, 1000, 0); + (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 4); + } + + function test_buildBundle_onlyCountry() public { + // TIER_CAPACITY = 4, so split across two tiers + _registerNCountryAt(alice, US, 4, 1000, 0); + _registerNCountryAt(alice, US, 4, 1100, 1); + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 8); + assertEq(uuids[0], _uuid(1100)); // tier 1 comes first (higher bond) + } + + function test_buildBundle_countryFillsSlots() public { + // Test that country fleets fill bundle slots when room is available. + // + // Setup: 2 local fleets + 12 country fleets across 3 tiers + // Expected: All 14 should be included since bundle has room + _registerNLocalAt(alice, US, ADMIN_CA, 2, 1000, 0); + _registerNCountryAt(alice, US, 4, 2000, 0); // tier 0: 4 country + _registerNCountryAt(alice, US, 4, 3000, 1); // tier 1: 4 country + _registerNCountryAt(alice, US, 4, 4000, 2); // tier 2: 4 country + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + + // All 14 should be included: 2 local + 12 country + assertEq(count, 14); + + // Verify order: tier 2 country (highest bond) → tier 1 country → tier 0 local/country + // First 4 should be tier 2 country fleets + for (uint256 i = 0; i < 4; i++) { + assertEq(uuids[i], _uuid(4000 + i)); + } + } + + function test_buildBundle_localsPriorityWithinTier() public { + // When locals and country compete at same tier, locals are included first. + // + // Setup: 8 local fleets + 12 country fleets + _registerNLocalAt(alice, US, ADMIN_CA, 4, 1000, 0); + _registerNLocalAt(alice, US, ADMIN_CA, 4, 1100, 1); + _registerNCountryAt(alice, US, 4, 2000, 0); + _registerNCountryAt(alice, US, 4, 3000, 1); + _registerNCountryAt(alice, US, 4, 4000, 2); + + (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + + // Total: 8 local + 12 country = 20 (bundle max) + assertEq(count, 20); + } + + // ── Shared cursor: different max tier indices per level ── + + function test_buildBundle_sharedCursor_levelsAtDifferentMaxTiers() public { + // Local at tier 3, Country at tier 1. + vm.prank(alice); + uint256 id1 = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + vm.prank(alice); + fleet.reassignTier(id1, 3); + vm.prank(alice); + uint256 id2 = fleet.registerFleetCountry(UUID_2, US, 0); + vm.prank(alice); + fleet.reassignTier(id2, 1); + vm.prank(alice); + fleet.registerFleetLocal(UUID_3, US, ADMIN_CA, 0); + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 3); + assertEq(uuids[0], UUID_1); // tier 3 + assertEq(uuids[1], UUID_2); // tier 1 + assertEq(uuids[2], UUID_3); // tier 0 + } + + function test_buildBundle_sharedCursor_sameTierIndex_differentBondByRegion() public view { + // Local tier 0 = BASE_BOND, Country tier 0 = BASE_BOND * fleet.COUNTRY_BOND_MULTIPLIER() (multiplier) + assertEq(fleet.tierBond(0, false), BASE_BOND); + assertEq(fleet.tierBond(0, true), BASE_BOND * fleet.COUNTRY_BOND_MULTIPLIER()); + assertEq(fleet.tierBond(1, false), BASE_BOND * 2); + assertEq(fleet.tierBond(1, true), BASE_BOND * fleet.COUNTRY_BOND_MULTIPLIER() * 2); + } + + // ── Lifecycle ── + + function test_buildBundle_afterBurn_reflects() public { + vm.prank(alice); + uint256 id1 = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + vm.prank(bob); + fleet.registerFleetLocal(UUID_2, US, ADMIN_CA, 0); + vm.prank(carol); + fleet.registerFleetLocal(UUID_3, US, ADMIN_CA, 0); + + (, uint256 countBefore) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(countBefore, 3); + + vm.prank(alice); + fleet.burn(id1); + + (, uint256 countAfter) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(countAfter, 2); + } + + function test_buildBundle_exhaustsBothLevels() public { + vm.prank(alice); + fleet.registerFleetCountry(UUID_1, US, 0); + vm.prank(alice); + fleet.registerFleetLocal(UUID_2, US, ADMIN_CA, 0); + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 2); + bool found1; + bool found2; + for (uint256 i = 0; i < count; i++) { + if (uuids[i] == UUID_1) found1 = true; + if (uuids[i] == UUID_2) found2 = true; + } + assertTrue(found1 && found2); + } + + function test_buildBundle_lifecycle_promotionsAndBurns() public { + vm.prank(alice); + uint256 l1 = fleet.registerFleetLocal(_uuid(100), US, ADMIN_CA, 0); + vm.prank(alice); + fleet.registerFleetLocal(_uuid(101), US, ADMIN_CA, 0); + vm.prank(alice); + fleet.registerFleetLocal(_uuid(102), US, ADMIN_CA, 0); + + vm.prank(alice); + uint256 c1 = fleet.registerFleetCountry(_uuid(200), US, 0); + vm.prank(alice); + fleet.registerFleetCountry(_uuid(201), US, 0); + + vm.prank(alice); + fleet.registerFleetLocal(_uuid(300), US, ADMIN_CA, 0); + + vm.prank(alice); + fleet.reassignTier(l1, 3); + vm.prank(alice); + fleet.reassignTier(c1, 1); + + (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + // Cursor=3: local(1)→include. Count=1. + // Cursor=2: empty. Descend. + // Cursor=1: country(1)→include. Count=2. + // Cursor=0: local(3)+country(1)=4→include. Count=6. + assertEq(count, 6); + + vm.prank(alice); + fleet.burn(l1); + + (, count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 5); + } + + // ── Cap enforcement ── + + function test_buildBundle_capsAt20() public { + // Fill local: 4+4+4 = 12 in 3 tiers + _registerNLocalAt(alice, US, ADMIN_CA, 4, 0, 0); + _registerNLocalAt(alice, US, ADMIN_CA, 4, 100, 1); + _registerNLocalAt(alice, US, ADMIN_CA, 4, 200, 2); + // Fill country US: 4+4 = 8 in 2 tiers (TIER_CAPACITY = 4) + _registerNCountryAt(bob, US, 4, 1000, 0); + _registerNCountryAt(bob, US, 4, 1100, 1); + + (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 20); + } + + function test_buildBundle_exactlyFillsToCapacity() public { + // 12 local + 8 country = 20 exactly, spread across tiers (TIER_CAPACITY = 4). + _registerNLocalAt(alice, US, ADMIN_CA, 4, 1000, 0); + _registerNLocalAt(alice, US, ADMIN_CA, 4, 1100, 1); + _registerNLocalAt(alice, US, ADMIN_CA, 4, 1200, 2); + _registerNCountryAt(alice, US, 4, 2000, 0); + _registerNCountryAt(alice, US, 4, 2100, 1); + + (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 20); + } + + function test_buildBundle_twentyOneMembers_partialInclusion() public { + // 21 total: local 12 + country 8 + 1 extra country at tier 2. + // With partial inclusion, bundle fills to 20. + // TIER_CAPACITY = 4, so spread across tiers. + _registerNLocalAt(alice, US, ADMIN_CA, 4, 1000, 0); + _registerNLocalAt(alice, US, ADMIN_CA, 4, 1100, 1); + _registerNLocalAt(alice, US, ADMIN_CA, 4, 1200, 2); + _registerNCountryAt(alice, US, 4, 2000, 0); + _registerNCountryAt(alice, US, 4, 2100, 1); + vm.prank(alice); + fleet.registerFleetCountry(_uuid(3000), US, 2); + + // Cursor=2: local(4)+country(1)=5. Count=5, room=15. + // Cursor=1: local(4)+country(4)=8. Count=13, room=7. + // Cursor=0: local(4)≤7→include 4[count=17,room=3]. + // country(4)>3→include 3 of 4[count=20,room=0]. + (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 20); // caps at max bundle size + } + + // ── Integrity ── + + function test_buildBundle_noDuplicateUUIDs() public { + _registerNLocalAt(alice, US, ADMIN_CA, 4, 1000, 0); + _registerNCountryAt(bob, US, 4, 2000, 0); + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + for (uint256 i = 0; i < count; i++) { + for (uint256 j = i + 1; j < count; j++) { + assertTrue(uuids[i] != uuids[j], "Duplicate UUID found"); + } + } + } + + function test_buildBundle_noNonExistentUUIDs() public { + _registerNLocalAt(alice, US, ADMIN_CA, 3, 1000, 0); + _registerNCountryAt(bob, US, 2, 2000, 0); + vm.prank(carol); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 6); + for (uint256 i = 0; i < count; i++) { + uint256 tokenId = _findTokenId(uuids[i], US, ADMIN_CA); + assertTrue(fleet.ownerOf(tokenId) != address(0)); + } + } + + function test_buildBundle_allReturnedAreFromCorrectRegions() public { + // Verify returned UUIDs are from local or country regions. + _registerNLocalAt(alice, US, ADMIN_CA, 4, 1000, 0); + _registerNCountryAt(alice, US, 3, 2000, 0); + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + + uint256 localFound; + uint256 countryFound; + for (uint256 i = 0; i < count; i++) { + uint256 tid = _findTokenId(uuids[i], US, ADMIN_CA); + uint32 region = fleet.tokenRegion(tid); + if (region == _regionUSCA()) localFound++; + else if (region == _regionUS()) countryFound++; + } + assertEq(localFound, 4, "local count"); + assertEq(countryFound, 3, "country count"); + } + + // ── Fuzz ── + + function testFuzz_buildBundle_neverExceeds20(uint8 cCount, uint8 lCount) public { + cCount = uint8(bound(cCount, 0, 15)); + lCount = uint8(bound(lCount, 0, 15)); + + for (uint256 i = 0; i < cCount; i++) { + vm.prank(alice); + fleet.registerFleetCountry(_uuid(31_000 + i), US, i / 4); + } + for (uint256 i = 0; i < lCount; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(32_000 + i), US, ADMIN_CA, i / 4); + } + + (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertLe(count, 20); + } + + function testFuzz_buildBundle_noDuplicates(uint8 cCount, uint8 lCount) public { + cCount = uint8(bound(cCount, 0, 12)); + lCount = uint8(bound(lCount, 0, 12)); + + for (uint256 i = 0; i < cCount; i++) { + vm.prank(alice); + fleet.registerFleetCountry(_uuid(41_000 + i), US, i / 4); + } + for (uint256 i = 0; i < lCount; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(42_000 + i), US, ADMIN_CA, i / 4); + } + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + for (uint256 i = 0; i < count; i++) { + for (uint256 j = i + 1; j < count; j++) { + assertTrue(uuids[i] != uuids[j], "Fuzz: duplicate UUID"); + } + } + } + + function testFuzz_buildBundle_allReturnedUUIDsExist(uint8 cCount, uint8 lCount) public { + cCount = uint8(bound(cCount, 0, 12)); + lCount = uint8(bound(lCount, 0, 12)); + + for (uint256 i = 0; i < cCount; i++) { + vm.prank(alice); + fleet.registerFleetCountry(_uuid(51_000 + i), US, i / 4); + } + for (uint256 i = 0; i < lCount; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(52_000 + i), US, ADMIN_CA, i / 4); + } + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + for (uint256 i = 0; i < count; i++) { + uint256 tokenId = _findTokenId(uuids[i], US, ADMIN_CA); + assertTrue(fleet.ownerOf(tokenId) != address(0), "Fuzz: UUID does not exist"); + } + } + + function testFuzz_buildBundle_partialInclusionInvariant(uint8 cCount, uint8 lCount) public { + cCount = uint8(bound(cCount, 0, 12)); + lCount = uint8(bound(lCount, 0, 12)); + + for (uint256 i = 0; i < cCount; i++) { + vm.prank(alice); + fleet.registerFleetCountry(_uuid(61_000 + i), US, i / 4); + } + for (uint256 i = 0; i < lCount; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(62_000 + i), US, ADMIN_CA, i / 4); + } + + (bytes16[] memory uuids2, uint256 count2) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + + // With partial inclusion: for each (region, tier) group in the bundle, + // the included members should be a PREFIX of the full tier (registration order). + // We verify this by checking that included members are the first N in the tier's array. + for (uint256 i = 0; i < count2; i++) { + uint256 tid = _findTokenId(uuids2[i], US, ADMIN_CA); + uint32 region = fleet.tokenRegion(tid); + uint256 tier = fleet.fleetTier(tid); + + // Count how many from this (region, tier) are in the bundle + uint256 inBundle; + for (uint256 j = 0; j < count2; j++) { + uint256 tjd = _findTokenId(uuids2[j], US, ADMIN_CA); + if (fleet.tokenRegion(tjd) == region && fleet.fleetTier(tjd) == tier) { + inBundle++; + } + } + + // Get the full tier members + uint256[] memory tierMembers = fleet.getTierMembers(region, tier); + + // The included count should be <= total tier members + assertLe(inBundle, tierMembers.length, "Fuzz: more included than exist"); + + // Verify the included members are exactly the first `inBundle` members of the tier + // (prefix property for partial inclusion) + uint256 found; + for (uint256 m = 0; m < inBundle && m < tierMembers.length; m++) { + bytes16 expectedUuid = bytes16(uint128(tierMembers[m])); + for (uint256 j = 0; j < count2; j++) { + if (uuids2[j] == expectedUuid) { + found++; + break; + } + } + } + assertEq(found, inBundle, "Fuzz: included members not a prefix of tier"); + } + } + + // ══════════════════════════════════════════════════════════════════════════════════ + // Edge Cases: _findCheapestInclusionTier & MaxTiersReached + // ══════════════════════════════════════════════════════════════════════════════════ + + /// @notice When all 24 tiers of a region are full, localInclusionHint should revert. + function test_RevertIf_localInclusionHint_allTiersFull() public { + // Fill all 24 tiers of US/ADMIN_CA (4 members each = 96 fleets) + // TIER_CAPACITY = 4, MAX_TIERS = 24 + for (uint256 tier = 0; tier < 24; tier++) { + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(tier * 100 + i), US, ADMIN_CA, tier); + } + } + + // Verify all tiers are full + for (uint256 tier = 0; tier < 24; tier++) { + assertEq(fleet.tierMemberCount(fleet.makeAdminRegion(US, ADMIN_CA), tier), 4); + } + + // localInclusionHint should revert + vm.expectRevert(FleetIdentity.MaxTiersReached.selector); + fleet.localInclusionHint(US, ADMIN_CA); + } + + /// @notice When all tiers are full, registering at any tier should revert with TierFull. + function test_RevertIf_registerFleetLocal_allTiersFull() public { + // Fill all 24 tiers (TIER_CAPACITY = 4) + for (uint256 tier = 0; tier < 24; tier++) { + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(tier * 100 + i), US, ADMIN_CA, tier); + } + } + + // Registration at tier 0 (or any full tier) should revert with TierFull + vm.prank(bob); + vm.expectRevert(FleetIdentity.TierFull.selector); + fleet.registerFleetLocal(_uuid(99999), US, ADMIN_CA, 0); + } + + /// @notice countryInclusionHint reverts when all tiers in the country region are full. + function test_RevertIf_countryInclusionHint_allTiersFull() public { + // Fill all 24 tiers of country US (4 members each, TIER_CAPACITY = 4) + for (uint256 tier = 0; tier < 24; tier++) { + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetCountry(_uuid(tier * 100 + i), US, tier); + } + } + + vm.expectRevert(FleetIdentity.MaxTiersReached.selector); + fleet.countryInclusionHint(US); + } + + /// @notice Proves cheapest inclusion tier can be ABOVE maxTierIndex when bundle is + /// constrained by higher-priority levels at existing tiers. + /// + /// Scenario: + /// - Fill admin tiers 0, 1, 2 with 4 members each (full) + /// - Country US has 4 fleets at tier 2 (maxTierIndex) + /// - Admin tier 0-2 are FULL (4 members each), so a new fleet cannot join any. + /// - Cheapest inclusion should be tier 3 (above maxTierIndex=2). + function test_cheapestInclusionTier_aboveMaxTierIndex() public { + // Fill admin tiers 0, 1, 2 with 4 members each (TIER_CAPACITY = 4) + _registerNLocalAt(alice, US, ADMIN_CA, 4, 4000, 0); + _registerNLocalAt(alice, US, ADMIN_CA, 4, 5000, 1); + _registerNLocalAt(alice, US, ADMIN_CA, 4, 6000, 2); + // Country at tier 2 (sets maxTierIndex across regions) + _registerNCountryAt(alice, US, 4, 7000, 2); + + // Verify tier 2 is maxTierIndex + assertEq(fleet.regionTierCount(uint32(US)), 3); + assertEq(fleet.regionTierCount(fleet.makeAdminRegion(US, ADMIN_CA)), 3); + + // All admin tiers 0-2 are full (4 members each = TIER_CAPACITY) + assertEq(fleet.tierMemberCount(fleet.makeAdminRegion(US, ADMIN_CA), 0), 4); + assertEq(fleet.tierMemberCount(fleet.makeAdminRegion(US, ADMIN_CA), 1), 4); + assertEq(fleet.tierMemberCount(fleet.makeAdminRegion(US, ADMIN_CA), 2), 4); + + // At tiers 0-2: all tiers are full (4 members = cap), cannot join. + // At tier 3: above maxTierIndex, countBefore = 0, has room. + (uint256 inclusionTier, uint256 bond) = fleet.localInclusionHint(US, ADMIN_CA); + assertEq(inclusionTier, 3, "Should recommend tier 3 (above maxTierIndex=2)"); + assertEq(bond, BASE_BOND * 8); // local tier 3 bond = BASE_BOND * 2^3 + + // Verify registration at tier 3 works + vm.prank(bob); + uint256 tokenId = fleet.registerFleetLocal(_uuid(9999), US, ADMIN_CA, 3); + assertEq(fleet.fleetTier(tokenId), 3); + + // Confirm new fleet appears in bundle at the TOP (first position) + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + // tier 3 (1) + tier 2 admin (4) + tier 2 country (4) + tier 1 admin (4) + tier 0 admin (4) = 17 + // But capped at 12 local + 8 country = 20 max. We have 13 local + 4 country = 17. + assertEq(count, 17, "tier 3 (1) + tier 2 admin (4) + country (4) + tier 1 admin (4) + tier 0 admin (4) = 17"); + assertEq(uuids[0], _uuid(9999), "Tier 3 fleet should be first in bundle"); + } + + /// @notice Edge case: bundle is full from tier maxTierIndex, and all tiers 0..maxTierIndex + /// at the candidate region are also full. The cheapest tier is above maxTierIndex. + function test_cheapestInclusionTier_aboveMaxTierIndex_candidateTiersFull() public { + // Country tier 0 has 4 fleets + _registerNCountryAt(alice, US, 4, 1000, 0); + + // Admin tier 0 has 4 fleets (full) + _registerNLocalAt(alice, US, ADMIN_CA, 4, 2000, 0); + + // Verify admin tier 0 is full + assertEq(fleet.tierMemberCount(fleet.makeAdminRegion(US, ADMIN_CA), 0), 4); + + // Admin tier 0 is full (4 members = TIER_CAPACITY), so candidate must go elsewhere. + // Cheapest inclusion tier should be 1 (above maxTierIndex=0). + (uint256 inclusionTier,) = fleet.localInclusionHint(US, ADMIN_CA); + assertEq(inclusionTier, 1, "Should recommend tier 1 since tier 0 is full"); + } + + /// @notice When going above maxTierIndex would require tier >= MAX_TIERS, revert. + /// + /// Scenario: Fill global tiers 0-23 with 4 members each (96 global fleets). + /// A new LOCAL fleet cannot fit in any tier because: + /// - The bundle simulation runs through tiers 23→0 + /// - At each tier, global's 4 members + potential admin members need to fit + /// - With global filling 4 slots at every tier, and country/admin potentially + /// competing, we design a scenario where no tier works. + /// + /// Simpler approach: Fill all 24 admin tiers AND make bundle full at every tier. + function test_RevertIf_cheapestInclusionTier_exceedsMaxTiers() public { + // Fill all 24 tiers of admin area US/CA with 4 members each (TIER_CAPACITY = 4) + for (uint256 tier = 0; tier < 24; tier++) { + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetLocal(_uuid(tier * 100 + i), US, ADMIN_CA, tier); + } + } + + // Now all admin tiers 0-23 are full. A new admin fleet must go to tier 24, + // which exceeds MAX_TIERS=24 (valid tiers are 0-23). + vm.expectRevert(FleetIdentity.MaxTiersReached.selector); + fleet.localInclusionHint(US, ADMIN_CA); + } + + /// @notice Verify that when bundle is full due to higher-tier members preventing + /// lower-tier inclusion, the hint correctly identifies the cheapest viable tier. + function test_cheapestInclusionTier_bundleFullFromHigherTiers() public { + // Create a scenario where: + // - Admin tiers 0-5 are all full (4 each = TIER_CAPACITY) + // - Country tier 5 has 4 members + // Total at tier 5: 4 country + 4 admin = 8 + // All admin tiers 0-5 are full, so must go to tier 6. + + // Fill admin tiers 0-5 with 4 members each + for (uint256 tier = 0; tier <= 5; tier++) { + _registerNLocalAt(alice, US, ADMIN_CA, 4, 10000 + tier * 100, tier); + } + // Country at tier 5 + _registerNCountryAt(alice, US, 4, 11000, 5); + + // maxTierIndex = 5 + // All admin tiers 0-5 are full (4 = capacity). Cannot join any. + // At tier 6: above maxTierIndex, countBefore = 0. Has room. + (uint256 inclusionTier,) = fleet.localInclusionHint(US, ADMIN_CA); + assertEq(inclusionTier, 6, "Must go above maxTierIndex=5 to tier 6"); + } + + /// @notice Verifies the bundle correctly includes a fleet registered above maxTierIndex. + function test_buildBundle_includesFleetAboveMaxTierIndex() public { + // Only country tier 0 has fleets (maxTierIndex = 0) + _registerNCountryAt(alice, US, 4, 20000, 0); + + // New admin registers at tier 2 (above maxTierIndex) + vm.prank(bob); + uint256 adminToken = fleet.registerFleetLocal(_uuid(21000), US, ADMIN_CA, 2); + + // Bundle should include admin tier 2 first (highest), then country tier 0 + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(count, 5, "Admin tier 2 (1) + Country tier 0 (4) = 5"); + + // First should be admin tier 2 + assertEq(_tokenId(uuids[0], _regionUSCA()), adminToken, "Admin tier 2 fleet should be first"); + } + + // ══════════════════════════════════════════════════════════════════════════════════ + // Demonstration: Partial inclusion prevents total tier displacement + // ══════════════════════════════════════════════════════════════════════════════════ + + /// @notice DEMONSTRATES that partial inclusion prevents the scenario where a single + /// fleet registration could push an entire tier out of the bundle. + /// + /// Scenario (2-level system: country + local): + /// BEFORE: + /// - Admin tier 0: 4 members + /// - Country tier 0: 4 members + /// - Bundle: all 8 members included (4+4=8) + /// + /// AFTER (single admin tier 1 registration): + /// - Admin tier 1: 1 member (NEW - above previous maxTierIndex) + /// - With PARTIAL INCLUSION: + /// - Tier 1: admin(1) → count=1 + /// - Tier 0: admin(4) + country(4) = 8, count=9 + /// - Final bundle: 9 members (all fit) + /// + /// Result: All original fleets remain included. + function test_DEMO_partialInclusionPreventsFullDisplacement() public { + // === BEFORE STATE === + uint32 countryRegion = uint32(US); + + // Fill with admin(4) + country(4) = 8 + uint256[] memory localIds = _registerNLocalAt(alice, US, ADMIN_CA, 4, 30000, 0); // Admin tier 0: 4 + uint256[] memory countryIds = _registerNCountryAt(alice, US, 4, 31000, 0); // Country tier 0: 4 + + // Verify BEFORE: all 8 members in bundle + (bytes16[] memory uuidsBefore, uint256 countBefore) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + assertEq(countBefore, 8, "BEFORE: All 8 members should be in bundle"); + + // Verify all 4 country fleets are included BEFORE + uint256 countryCountBefore; + for (uint256 i = 0; i < countBefore; i++) { + uint256 tokenId = _findTokenId(uuidsBefore[i], US, ADMIN_CA); + if (fleet.tokenRegion(tokenId) == countryRegion) countryCountBefore++; + } + assertEq(countryCountBefore, 4, "BEFORE: All 4 country fleets in bundle"); + + // === SINGLE REGISTRATION === + // Bob registers just ONE fleet at admin tier 1 + vm.prank(bob); + fleet.registerFleetLocal(_uuid(99999), US, ADMIN_CA, 1); + + // === AFTER STATE === + (bytes16[] memory uuidsAfter, uint256 countAfter) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA); + + // Bundle now has 9 members (tier 1: 1 + tier 0: 4+4) + assertEq(countAfter, 9, "AFTER: Bundle should have 9 members"); + + // Count how many country fleets are included AFTER + uint256 countryCountAfter; + for (uint256 i = 0; i < countAfter; i++) { + uint256 tokenId = _findTokenId(uuidsAfter[i], US, ADMIN_CA); + if (fleet.tokenRegion(tokenId) == countryRegion) countryCountAfter++; + } + assertEq(countryCountAfter, 4, "AFTER: All 4 country fleets still in bundle"); + + // Verify all country fleets are still included + bool[] memory countryIncluded = new bool[](4); + for (uint256 i = 0; i < countAfter; i++) { + uint256 tokenId = _findTokenId(uuidsAfter[i], US, ADMIN_CA); + for (uint256 c = 0; c < 4; c++) { + if (tokenId == countryIds[c]) countryIncluded[c] = true; + } + } + assertTrue(countryIncluded[0], "First country fleet included"); + assertTrue(countryIncluded[1], "Second country fleet included"); + assertTrue(countryIncluded[2], "Third country fleet included"); + assertTrue(countryIncluded[3], "Fourth country fleet included"); + + // === IMPROVEMENT SUMMARY === + emit log_string("=== PARTIAL INCLUSION FIX DEMONSTRATED ==="); + emit log_string("A single tier-1 registration does not displace any country fleets"); + emit log_named_uint("Country fleets displaced", 0); + emit log_named_uint("Country fleets still included", 4); + } + + // ══════════════════════════════════════════════════════════════════════════════ + // buildCountryOnlyBundle tests + // ══════════════════════════════════════════════════════════════════════════════ + + function test_buildCountryOnlyBundle_emptyCountry() public view { + // No fleets registered yet + (bytes16[] memory uuids, uint256 count) = fleet.buildCountryOnlyBundle(US); + assertEq(count, 0, "Empty country should have 0 UUIDs"); + assertEq(uuids.length, 0, "Array should be trimmed to 0"); + } + + function test_buildCountryOnlyBundle_onlyCountryFleets() public { + // Register 3 country fleets at different tiers + vm.prank(alice); + fleet.registerFleetCountry(_uuid(1), US, 0); + vm.prank(alice); + fleet.registerFleetCountry(_uuid(2), US, 1); + vm.prank(alice); + fleet.registerFleetCountry(_uuid(3), US, 2); + + (bytes16[] memory uuids, uint256 count) = fleet.buildCountryOnlyBundle(US); + assertEq(count, 3, "Should include all 3 country fleets"); + + // Verify tier priority order (highest first) + assertEq(uuids[0], _uuid(3), "Tier 2 should be first"); + assertEq(uuids[1], _uuid(2), "Tier 1 should be second"); + assertEq(uuids[2], _uuid(1), "Tier 0 should be third"); + } + + function test_buildCountryOnlyBundle_excludesLocalFleets() public { + // Register country fleet + vm.prank(alice); + fleet.registerFleetCountry(_uuid(1), US, 0); + + // Register local fleet in same country + vm.prank(alice); + fleet.registerFleetLocal(_uuid(2), US, ADMIN_CA, 0); + + // Country-only bundle should ONLY include country fleet + (bytes16[] memory uuids, uint256 count) = fleet.buildCountryOnlyBundle(US); + assertEq(count, 1, "Should only include country fleet"); + assertEq(uuids[0], _uuid(1), "Should be the country fleet UUID"); + } + + function test_buildCountryOnlyBundle_respectsMaxBundleSize() public { + // Register 24 country fleets across 6 tiers (4 per tier = TIER_CAPACITY) + // This gives us more than MAX_BONDED_UUID_BUNDLE_SIZE (20) + for (uint256 tier = 0; tier < 6; tier++) { + for (uint256 i = 0; i < 4; i++) { + vm.prank(alice); + fleet.registerFleetCountry(_uuid(tier * 100 + i), US, tier); + } + } + + (bytes16[] memory uuids, uint256 count) = fleet.buildCountryOnlyBundle(US); + assertEq(count, 20, "Should cap at 20 UUIDs"); + assertEq(uuids.length, 20, "Array should be trimmed to 20"); + } + + function test_RevertIf_buildCountryOnlyBundle_invalidCountryCode() public { + vm.expectRevert(FleetIdentity.InvalidCountryCode.selector); + fleet.buildCountryOnlyBundle(0); + + vm.expectRevert(FleetIdentity.InvalidCountryCode.selector); + fleet.buildCountryOnlyBundle(1000); // > MAX_COUNTRY_CODE (999) + } + + function test_buildCountryOnlyBundle_multipleCountriesIndependent() public { + // Register in US (country 840) + vm.prank(alice); + fleet.registerFleetCountry(_uuid(1), US, 0); + + // Register in Germany (country 276) + vm.prank(alice); + fleet.registerFleetCountry(_uuid(2), DE, 0); + + // US bundle should only have US fleet + (bytes16[] memory usUuids, uint256 usCount) = fleet.buildCountryOnlyBundle(US); + assertEq(usCount, 1, "US should have 1 fleet"); + assertEq(usUuids[0], _uuid(1), "Should be US fleet"); + + // Germany bundle should only have Germany fleet + (bytes16[] memory deUuids, uint256 deCount) = fleet.buildCountryOnlyBundle(DE); + assertEq(deCount, 1, "Germany should have 1 fleet"); + assertEq(deUuids[0], _uuid(2), "Should be Germany fleet"); + } + + // ══════════════════════════════════════════════ + // Operator Tests + // ══════════════════════════════════════════════ + + function test_operatorOf_defaultsToUuidOwner() public { + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + // No operator set, should default to uuidOwner + assertEq(fleet.operatorOf(UUID_1), alice); + } + + function test_operatorOf_returnsSetOperator() public { + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + vm.prank(alice); + fleet.setOperator(UUID_1, bob); + + assertEq(fleet.operatorOf(UUID_1), bob); + } + + function test_setOperator_emitsEvent() public { + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + // Just verify setOperator succeeds and changes state + vm.prank(alice); + fleet.setOperator(UUID_1, bob); + assertEq(fleet.operatorOf(UUID_1), bob); + } + + function test_setOperator_transfersTierExcess() public { + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 2); + + uint256 tierExcess = fleet.tierBond(2, false) - BASE_BOND; + uint256 aliceBefore = bondToken.balanceOf(alice); + uint256 bobBefore = bondToken.balanceOf(bob); + + vm.prank(alice); + fleet.setOperator(UUID_1, bob); + + // Alice gets tier excess refunded, bob pays tier excess + assertEq(bondToken.balanceOf(alice), aliceBefore + tierExcess); + assertEq(bondToken.balanceOf(bob), bobBefore - tierExcess); + } + + function test_setOperator_multiRegion_transfersAllTierExcess() public { + // Register in two local regions at different tiers + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 2); + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_NY, 1); + + uint256 tierExcessFirst = fleet.tierBond(2, false) - BASE_BOND; + uint256 tierExcessSecond = fleet.tierBond(1, false) - BASE_BOND; + uint256 totalTierExcess = tierExcessFirst + tierExcessSecond; + + uint256 aliceBefore = bondToken.balanceOf(alice); + uint256 bobBefore = bondToken.balanceOf(bob); + + vm.prank(alice); + fleet.setOperator(UUID_1, bob); + + assertEq(bondToken.balanceOf(alice), aliceBefore + totalTierExcess); + assertEq(bondToken.balanceOf(bob), bobBefore - totalTierExcess); + } + + function test_setOperator_zeroTierExcess_noTransfer() public { + // Register at tier 0, tierExcess = 0 + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + uint256 aliceBefore = bondToken.balanceOf(alice); + uint256 bobBefore = bondToken.balanceOf(bob); + + vm.prank(alice); + fleet.setOperator(UUID_1, bob); + + // No tier excess, no transfer + assertEq(bondToken.balanceOf(alice), aliceBefore); + assertEq(bondToken.balanceOf(bob), bobBefore); + } + + function test_setOperator_changeOperator_transfersBetweenOperators() public { + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 2); + + // Set bob as operator + vm.prank(alice); + fleet.setOperator(UUID_1, bob); + + uint256 tierExcess = fleet.tierBond(2, false) - BASE_BOND; + uint256 bobBefore = bondToken.balanceOf(bob); + uint256 carolBefore = bondToken.balanceOf(carol); + + // Change operator from bob to carol + vm.prank(alice); + fleet.setOperator(UUID_1, carol); + + assertEq(bondToken.balanceOf(bob), bobBefore + tierExcess); + assertEq(bondToken.balanceOf(carol), carolBefore - tierExcess); + } + + function test_setOperator_clearOperator_refundsToOwner() public { + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 2); + + vm.prank(alice); + fleet.setOperator(UUID_1, bob); + + uint256 tierExcess = fleet.tierBond(2, false) - BASE_BOND; + uint256 aliceBefore = bondToken.balanceOf(alice); + uint256 bobBefore = bondToken.balanceOf(bob); + + // Clear operator (set to address(0)) + vm.prank(alice); + fleet.setOperator(UUID_1, address(0)); + + assertEq(bondToken.balanceOf(bob), bobBefore + tierExcess); + assertEq(bondToken.balanceOf(alice), aliceBefore - tierExcess); + assertEq(fleet.operatorOf(UUID_1), alice); // defaults to owner again + } + + function test_RevertIf_setOperator_notUuidOwner() public { + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + vm.prank(bob); + vm.expectRevert(FleetIdentity.NotUuidOwner.selector); + fleet.setOperator(UUID_1, carol); + } + + function test_RevertIf_setOperator_ownedOnly() public { + vm.prank(alice); + fleet.claimUuid(UUID_1); + + vm.prank(alice); + vm.expectRevert(FleetIdentity.OperatorNotAllowedForOwnedOnly.selector); + fleet.setOperator(UUID_1, bob); + } + + function test_registerWithOperator_country() public { + uint256 aliceBefore = bondToken.balanceOf(alice); + uint256 bobBefore = bondToken.balanceOf(bob); + + vm.prank(alice); + uint256 tokenId = fleet.registerFleetCountryWithOperator(UUID_1, US, 2, bob); + + // Alice pays BASE_BOND, bob pays tier excess + uint256 tierExcess = fleet.tierBond(2, true) - BASE_BOND; + assertEq(bondToken.balanceOf(alice), aliceBefore - BASE_BOND); + assertEq(bondToken.balanceOf(bob), bobBefore - tierExcess); + assertEq(fleet.operatorOf(UUID_1), bob); + assertEq(fleet.uuidOwner(UUID_1), alice); + } + + function test_registerWithOperator_local() public { + uint256 aliceBefore = bondToken.balanceOf(alice); + uint256 bobBefore = bondToken.balanceOf(bob); + + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocalWithOperator(UUID_1, US, ADMIN_CA, 2, bob); + + uint256 tierExcess = fleet.tierBond(2, false) - BASE_BOND; + assertEq(bondToken.balanceOf(alice), aliceBefore - BASE_BOND); + assertEq(bondToken.balanceOf(bob), bobBefore - tierExcess); + assertEq(fleet.operatorOf(UUID_1), bob); + } + + function test_registerWithOperator_emitsEventWithOperator() public { + // Verify registration with operator sets up correctly + vm.prank(alice); + uint256 tokenId = fleet.registerFleetCountryWithOperator(UUID_1, US, 0, bob); + + assertEq(fleet.ownerOf(tokenId), alice); + assertEq(fleet.operatorOf(UUID_1), bob); + assertEq(fleet.uuidOwner(UUID_1), alice); + } + + function test_operatorCanPromote() public { + vm.prank(alice); + fleet.registerFleetLocalWithOperator(UUID_1, US, ADMIN_CA, 0, bob); + + uint256 bobBefore = bondToken.balanceOf(bob); + uint256 tokenId = _tokenId(UUID_1, _makeAdminRegion(US, ADMIN_CA)); + + vm.prank(bob); + fleet.promote(tokenId); + + assertEq(fleet.fleetTier(tokenId), 1); + // Bob paid the tier difference + uint256 tierDiff = fleet.tierBond(1, false) - fleet.tierBond(0, false); + assertEq(bondToken.balanceOf(bob), bobBefore - tierDiff); + } + + function test_operatorCanDemote() public { + vm.prank(alice); + fleet.registerFleetLocalWithOperator(UUID_1, US, ADMIN_CA, 2, bob); + + uint256 bobBefore = bondToken.balanceOf(bob); + uint256 tokenId = _tokenId(UUID_1, _makeAdminRegion(US, ADMIN_CA)); + + vm.prank(bob); + fleet.reassignTier(tokenId, 0); + + assertEq(fleet.fleetTier(tokenId), 0); + // Bob gets tier difference refunded + uint256 tierDiff = fleet.tierBond(2, false) - fleet.tierBond(0, false); + assertEq(bondToken.balanceOf(bob), bobBefore + tierDiff); + } + + function test_RevertIf_ownerCannotPromoteWhenOperatorSet() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocalWithOperator(UUID_1, US, ADMIN_CA, 0, bob); + + vm.prank(alice); + vm.expectRevert(FleetIdentity.NotOperator.selector); + fleet.promote(tokenId); + } + + function test_ownerCanBurnEvenWithOperator() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocalWithOperator(UUID_1, US, ADMIN_CA, 2, bob); + + uint256 aliceBefore = bondToken.balanceOf(alice); + uint256 bobBefore = bondToken.balanceOf(bob); + + vm.prank(alice); + fleet.burn(tokenId); + + // Alice gets BASE_BOND, bob gets tier excess + assertEq(bondToken.balanceOf(alice), aliceBefore + BASE_BOND); + uint256 tierExcess = fleet.tierBond(2, false) - BASE_BOND; + assertEq(bondToken.balanceOf(bob), bobBefore + tierExcess); + } + + function test_RevertIf_operatorCannotBurn() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocalWithOperator(UUID_1, US, ADMIN_CA, 0, bob); + + vm.prank(bob); + vm.expectRevert(FleetIdentity.NotTokenOwner.selector); + fleet.burn(tokenId); + } + + function test_unregisterToOwned_refundsOperatorAndClearsOperator() public { + vm.prank(alice); + uint256 tokenId = fleet.registerFleetLocalWithOperator(UUID_1, US, ADMIN_CA, 2, bob); + + uint256 bobBefore = bondToken.balanceOf(bob); + + vm.prank(alice); + fleet.unregisterToOwned(tokenId); + + // Bob gets tier excess refunded + uint256 tierExcess = fleet.tierBond(2, false) - BASE_BOND; + assertEq(bondToken.balanceOf(bob), bobBefore + tierExcess); + // Operator is now cleared (defaults to owner) + assertEq(fleet.operatorOf(UUID_1), alice); + } + + function test_registerFromOwned_preservesOperator() public { + // Alice claims UUID with no operator + vm.prank(alice); + fleet.claimUuid(UUID_1); + + // She registers to local (which removes owned-only token and creates registered token) + vm.prank(alice); + uint256 registeredToken = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0); + + assertEq(fleet.operatorOf(UUID_1), alice); + + // Now alice can set an operator + vm.prank(alice); + fleet.setOperator(UUID_1, bob); + assertEq(fleet.operatorOf(UUID_1), bob); + } + + function testFuzz_setOperator_tierExcessCalculation(uint8 tier1, uint8 tier2) public { + tier1 = uint8(bound(tier1, 0, 7)); + tier2 = uint8(bound(tier2, 0, 7)); + + // Register in two local regions + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, tier1); + vm.prank(alice); + fleet.registerFleetLocal(UUID_1, US, ADMIN_NY, tier2); + + uint256 expectedTierExcess = + (fleet.tierBond(tier1, false) - BASE_BOND) + + (fleet.tierBond(tier2, false) - BASE_BOND); + + uint256 aliceBefore = bondToken.balanceOf(alice); + uint256 bobBefore = bondToken.balanceOf(bob); + + vm.prank(alice); + fleet.setOperator(UUID_1, bob); + + assertEq(bondToken.balanceOf(alice), aliceBefore + expectedTierExcess); + assertEq(bondToken.balanceOf(bob), bobBefore - expectedTierExcess); + } +} diff --git a/test/FleetIdentityFairness.t.sol b/test/FleetIdentityFairness.t.sol new file mode 100644 index 0000000..f1923c5 --- /dev/null +++ b/test/FleetIdentityFairness.t.sol @@ -0,0 +1,562 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.24; + +import "forge-std/Test.sol"; +import "../src/swarms/FleetIdentity.sol"; +import {ERC20} from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; + +/// @dev Minimal ERC-20 mock with public mint for testing. +contract MockERC20Fairness is ERC20 { + constructor() ERC20("Mock Bond Token", "MBOND") {} + + function mint(address to, uint256 amount) external { + _mint(to, amount); + } +} + +/** + * @title FleetIdentityFairness Tests + * @notice Economic fairness analysis for FleetIdentity bundle allocation. + * + * @dev **Fairness Philosophy - Economic Advantage Model** + * + * The FleetIdentity contract uses a simple tier-descent algorithm: + * - Iterate from highest tier to lowest + * - At each tier: include local fleets first, then country fleets + * - Stop when bundle is full (20 slots) + * + * **Economic Fairness via COUNTRY_BOND_MULTIPLIER (8×)** + * + * Country fleets pay 8× more than local fleets at the same tier: + * - Local tier 0: BASE_BOND * 1 = 100 NODL + * - Country tier 0: BASE_BOND * 8 = 800 NODL + * - Local tier 3: BASE_BOND * 8 = 800 NODL (same cost!) + * + * This means a local player can reach tier 3 for the same cost a country player + * pays for tier 0. The 8× multiplier provides significant economic advantage to locals: + * + * | Tier | Local Bond | Country Bond | Country Overpay vs Local Same Tier | + * |------|------------|--------------|-----------------------------------| + * | 0 | 100 NODL | 800 NODL | 8× | + * | 1 | 200 NODL | 1600 NODL | 8× | + * | 2 | 400 NODL | 3200 NODL | 8× | + * | 3 | 800 NODL | 6400 NODL | 8× | + * + * **Priority Rules** + * + * 1. Higher tier always wins (regardless of level) + * 2. Within same tier: local beats country + * 3. Within same tier + level: earlier registration wins + * + * **Whale Attack Analysis** + * + * A country whale trying to dominate must pay significantly more: + * - To fill 8 country slots at tier 3: 8 × 6400 NODL = 51,200 NODL + * - 12 locals could counter at tier 3 for: 12 × 800 NODL = 9,600 NODL + * - Whale pays 5.3× more to compete at the same tier level + */ +contract FleetIdentityFairnessTest is Test { + MockERC20Fairness bondToken; + + // Test addresses representing different market participants + address[] localPlayers; + address[] countryPlayers; + address whale; + + uint256 constant BASE_BOND = 100 ether; + uint256 constant NUM_LOCAL_PLAYERS = 20; + uint256 constant NUM_COUNTRY_PLAYERS = 10; + + // Test country and admin areas + uint16 constant COUNTRY_US = 840; + uint16[] adminAreas; + uint256 constant NUM_ADMIN_AREAS = 5; + + function setUp() public { + bondToken = new MockERC20Fairness(); + + // Create test players + whale = address(0xABCDEF); + for (uint256 i = 0; i < NUM_LOCAL_PLAYERS; i++) { + localPlayers.push(address(uint160(0x1000 + i))); + } + for (uint256 i = 0; i < NUM_COUNTRY_PLAYERS; i++) { + countryPlayers.push(address(uint160(0x2000 + i))); + } + + // Create admin areas + for (uint16 i = 1; i <= NUM_ADMIN_AREAS; i++) { + adminAreas.push(i); + } + + // Fund all players generously + uint256 funding = 1_000_000_000_000 ether; + bondToken.mint(whale, funding); + for (uint256 i = 0; i < NUM_LOCAL_PLAYERS; i++) { + bondToken.mint(localPlayers[i], funding); + } + for (uint256 i = 0; i < NUM_COUNTRY_PLAYERS; i++) { + bondToken.mint(countryPlayers[i], funding); + } + } + + // ══════════════════════════════════════════════════════════════════════════════════ + // Helper Functions + // ══════════════════════════════════════════════════════════════════════════════════ + + function _deployFleet() internal returns (FleetIdentity) { + FleetIdentity fleet = new FleetIdentity(address(bondToken), BASE_BOND); + + // Approve all players + vm.prank(whale); + bondToken.approve(address(fleet), type(uint256).max); + for (uint256 i = 0; i < localPlayers.length; i++) { + vm.prank(localPlayers[i]); + bondToken.approve(address(fleet), type(uint256).max); + } + for (uint256 i = 0; i < countryPlayers.length; i++) { + vm.prank(countryPlayers[i]); + bondToken.approve(address(fleet), type(uint256).max); + } + + return fleet; + } + + function _uuid(uint256 seed) internal pure returns (bytes16) { + return bytes16(keccak256(abi.encodePacked("fleet-fairness-", seed))); + } + + function _makeAdminRegion(uint16 cc, uint16 admin) internal pure returns (uint32) { + return (uint32(cc) << 10) | uint32(admin); + } + + /// @dev Count how many slots in a bundle are from country vs local registrations + function _countBundleComposition(FleetIdentity fleet, uint16 cc, uint16 admin) + internal + view + returns (uint256 localCount, uint256 countryCount) + { + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(cc, admin); + uint32 countryRegion = uint32(cc); + + for (uint256 i = 0; i < count; i++) { + // Try to find token in country region first + uint256 countryTokenId = fleet.computeTokenId(uuids[i], countryRegion); + try fleet.ownerOf(countryTokenId) returns (address) { + countryCount++; + } catch { + localCount++; + } + } + } + + // ══════════════════════════════════════════════════════════════════════════════════ + // Scenario Tests: Priority & Economic Behavior + // ══════════════════════════════════════════════════════════════════════════════════ + + /** + * @notice Scenario A: Local-Heavy Market + * Many local players competing, few country players. + * Tests that locals correctly fill slots by tier-descent priority. + */ + function test_scenarioA_localHeavyMarket() public { + FleetIdentity fleet = _deployFleet(); + uint16 targetAdmin = adminAreas[0]; + + // 16 local players at tiers 0-3 (4 per tier due to TIER_CAPACITY) + for (uint256 i = 0; i < 16; i++) { + vm.prank(localPlayers[i % NUM_LOCAL_PLAYERS]); + fleet.registerFleetLocal(_uuid(1000 + i), COUNTRY_US, targetAdmin, i / 4); + } + + // 4 country players at tier 0 + for (uint256 i = 0; i < 4; i++) { + vm.prank(countryPlayers[i]); + fleet.registerFleetCountry(_uuid(2000 + i), COUNTRY_US, 0); + } + + (uint256 localCount, uint256 countryCount) = _countBundleComposition(fleet, COUNTRY_US, targetAdmin); + (, uint256 totalCount) = fleet.buildHighestBondedUuidBundle(COUNTRY_US, targetAdmin); + + emit log_string("=== Scenario A: Local-Heavy Market ==="); + emit log_named_uint("Total bundle size", totalCount); + emit log_named_uint("Local slots used", localCount); + emit log_named_uint("Country slots used", countryCount); + + // With tier-descent priority, all 16 locals fill first, then 4 country + assertEq(localCount, 16, "All 16 locals should be included"); + assertEq(countryCount, 4, "All 4 country should fill remaining slots"); + assertEq(totalCount, 20, "Bundle should be full"); + } + + /** + * @notice Scenario B: Country-Heavy Market + * Few local players, many country players at higher tiers. + * Tests that higher-tier country beats lower-tier local. + */ + function test_scenarioB_countryHighTierDominance() public { + FleetIdentity fleet = _deployFleet(); + uint16 targetAdmin = adminAreas[0]; + + // 4 local players at tier 0 + for (uint256 i = 0; i < 4; i++) { + vm.prank(localPlayers[i]); + fleet.registerFleetLocal(_uuid(1000 + i), COUNTRY_US, targetAdmin, 0); + } + + // 12 country players at tiers 1-3 (4 per tier) + // These are at HIGHER tiers, so they come first in bundle + for (uint256 i = 0; i < 12; i++) { + vm.prank(countryPlayers[i % NUM_COUNTRY_PLAYERS]); + fleet.registerFleetCountry(_uuid(2000 + i), COUNTRY_US, (i / 4) + 1); + } + + (uint256 localCount, uint256 countryCount) = _countBundleComposition(fleet, COUNTRY_US, targetAdmin); + (, uint256 totalCount) = fleet.buildHighestBondedUuidBundle(COUNTRY_US, targetAdmin); + + emit log_string("=== Scenario B: Country High-Tier Dominance ==="); + emit log_named_uint("Total bundle size", totalCount); + emit log_named_uint("Local slots used", localCount); + emit log_named_uint("Country slots used", countryCount); + + // Country at tiers 1-3 comes before locals at tier 0 + assertEq(countryCount, 12, "All 12 country (higher tiers) included first"); + assertEq(localCount, 4, "Tier-0 locals fill remaining slots"); + assertEq(totalCount, 16, "Total should equal all registered fleets"); + } + + /** + * @notice Scenario C: Same-Tier Competition + * Locals and country at the same tier. + * Tests that locals get priority within the same tier. + */ + function test_scenarioC_sameTierLocalPriority() public { + FleetIdentity fleet = _deployFleet(); + uint16 targetAdmin = adminAreas[0]; + + // 4 local at tier 0 + for (uint256 i = 0; i < 4; i++) { + vm.prank(localPlayers[i]); + fleet.registerFleetLocal(_uuid(1000 + i), COUNTRY_US, targetAdmin, 0); + } + + // 4 country at tier 0 (same tier) + for (uint256 i = 0; i < 4; i++) { + vm.prank(countryPlayers[i]); + fleet.registerFleetCountry(_uuid(2000 + i), COUNTRY_US, 0); + } + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(COUNTRY_US, targetAdmin); + + emit log_string("=== Scenario C: Same-Tier Local Priority ==="); + emit log_named_uint("Total bundle size", count); + + // First 4 should be locals (priority within same tier) + for (uint256 i = 0; i < 4; i++) { + assertEq(uuids[i], _uuid(1000 + i), "Locals should come first"); + } + // Next 4 should be country + for (uint256 i = 0; i < 4; i++) { + assertEq(uuids[4 + i], _uuid(2000 + i), "Country should follow locals"); + } + } + + /** + * @notice Scenario D: Country Whale at High Tier + * Single whale registers many high-tier country fleets. + * Tests that whale can dominate IF they outbid locals on tier level. + */ + function test_scenarioD_countryWhaleHighTier() public { + FleetIdentity fleet = _deployFleet(); + uint16 targetAdmin = adminAreas[0]; + + // 12 locals at tiers 0-2 (4 per tier) + for (uint256 i = 0; i < 12; i++) { + vm.prank(localPlayers[i]); + fleet.registerFleetLocal(_uuid(1000 + i), COUNTRY_US, targetAdmin, i / 4); + } + + // Whale registers 8 country fleets at tiers 3-4 (4 per tier due to TIER_CAPACITY) + // This is above all locals (tiers 0-2) + for (uint256 i = 0; i < 8; i++) { + vm.prank(whale); + fleet.registerFleetCountry(_uuid(3000 + i), COUNTRY_US, 3 + (i / 4)); + } + + (, uint256 count) = fleet.buildHighestBondedUuidBundle(COUNTRY_US, targetAdmin); + (uint256 localCount, uint256 countryCount) = _countBundleComposition(fleet, COUNTRY_US, targetAdmin); + + emit log_string("=== Scenario D: Country Whale at High Tier ==="); + emit log_named_uint("Total bundle size", count); + emit log_named_uint("Local slots", localCount); + emit log_named_uint("Country slots", countryCount); + + // Whale's tier-3/4 country fleets come first (highest tiers) + // Then locals at tiers 0-2 fill remaining slots + assertEq(countryCount, 8, "Whale's 8 high-tier country fleets included"); + assertEq(localCount, 12, "All 12 locals at lower tiers included"); + assertEq(count, 20, "Bundle full"); + } + + /** + * @notice Scenario E: Locals Counter Whale by Matching Tier + * Shows that locals can economically counter a country whale. + */ + function test_scenarioE_localsCounterWhale() public { + FleetIdentity fleet = _deployFleet(); + uint16 targetAdmin = adminAreas[0]; + + // Whale registers 4 country fleets at tier 3 + // Cost: 4 × (BASE_BOND × 8 × 8) = 4 × 6400 = 25,600 NODL + for (uint256 i = 0; i < 4; i++) { + vm.prank(whale); + fleet.registerFleetCountry(_uuid(3000 + i), COUNTRY_US, 3); + } + + // 4 locals match at tier 3 (same priority, but cheaper!) + // Cost: 4 × (BASE_BOND × 8) = 4 × 800 = 3,200 NODL + for (uint256 i = 0; i < 4; i++) { + vm.prank(localPlayers[i]); + fleet.registerFleetLocal(_uuid(1000 + i), COUNTRY_US, targetAdmin, 3); + } + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(COUNTRY_US, targetAdmin); + + emit log_string("=== Scenario E: Locals Counter Whale ==="); + emit log_named_uint("Total bundle size", count); + + // Locals get priority at tier 3 (same tier, local-first) + for (uint256 i = 0; i < 4; i++) { + assertEq(uuids[i], _uuid(1000 + i), "Locals come first at same tier"); + } + for (uint256 i = 0; i < 4; i++) { + assertEq(uuids[4 + i], _uuid(3000 + i), "Country follows at same tier"); + } + + // Calculate cost ratio + uint256 whaleCost = 4 * fleet.tierBond(3, true); // 25,600 NODL + uint256 localCost = 4 * fleet.tierBond(3, false); // 3,200 NODL + + emit log_named_uint("Whale total cost (ether)", whaleCost / 1 ether); + emit log_named_uint("Locals total cost (ether)", localCost / 1 ether); + emit log_named_uint("Whale overpay factor", whaleCost / localCost); + + assertEq(whaleCost / localCost, 8, "Whale pays 8x more for same tier"); + } + + // ══════════════════════════════════════════════════════════════════════════════════ + // Economic Metrics & Analysis + // ══════════════════════════════════════════════════════════════════════════════════ + + /** + * @notice Verify the 8× economic advantage constants. + */ + function test_economicAdvantage_8xMultiplier() public { + FleetIdentity fleet = _deployFleet(); + + // Verify multiplier + assertEq(fleet.COUNTRY_BOND_MULTIPLIER(), 8, "Multiplier should be 8"); + + // At every tier, country pays exactly 8× local + for (uint256 tier = 0; tier < 6; tier++) { + uint256 localBond = fleet.tierBond(tier, false); + uint256 countryBond = fleet.tierBond(tier, true); + assertEq(countryBond, localBond * 8, "Country should pay 8x at every tier"); + } + } + + /** + * @notice Demonstrate that a local at tier N+3 costs the same as country at tier N. + */ + function test_economicAdvantage_localTierEquivalence() public { + FleetIdentity fleet = _deployFleet(); + + // Local tier 3 = Country tier 0 + assertEq( + fleet.tierBond(3, false), + fleet.tierBond(0, true), + "Local tier 3 should equal country tier 0" + ); + + // Local tier 4 = Country tier 1 + assertEq( + fleet.tierBond(4, false), + fleet.tierBond(1, true), + "Local tier 4 should equal country tier 1" + ); + + // Local tier 5 = Country tier 2 + assertEq( + fleet.tierBond(5, false), + fleet.tierBond(2, true), + "Local tier 5 should equal country tier 2" + ); + + emit log_string("=== Local Tier Equivalence ==="); + emit log_string("Local tier N+3 costs the same as Country tier N"); + emit log_string("This gives locals a 3-tier economic advantage"); + } + + /** + * @notice Analyze country registration efficiency across admin areas. + */ + function test_economicAdvantage_multiRegionEfficiency() public { + FleetIdentity fleet = _deployFleet(); + + // Single country registration covers ALL admin areas + uint256 countryBond = fleet.tierBond(0, true); // 800 NODL + + // To cover N admin areas locally, costs N × local_bond + uint256 localPerArea = fleet.tierBond(0, false); // 100 NODL + + emit log_string("=== Multi-Region Efficiency Analysis ==="); + emit log_named_uint("Country tier-0 bond (ether)", countryBond / 1 ether); + emit log_named_uint("Local tier-0 bond per area (ether)", localPerArea / 1 ether); + + // Country is MORE efficient when covering > 8 admin areas + // Break-even: 8 local registrations = 1 country registration + uint256 breakEvenAreas = countryBond / localPerArea; + emit log_named_uint("Break-even admin areas", breakEvenAreas); + + assertEq(breakEvenAreas, 8, "Country efficient for 8+ admin areas"); + } + + /** + * @notice Bond escalation analysis showing geometric growth. + */ + function test_bondEscalationAnalysis() public { + FleetIdentity fleet = _deployFleet(); + + emit log_string(""); + emit log_string("=== BOND ESCALATION ANALYSIS ==="); + emit log_string(""); + emit log_string("Tier | Local Bond (ether) | Country Bond (ether)"); + emit log_string("-----+--------------------+---------------------"); + + for (uint256 tier = 0; tier <= 6; tier++) { + uint256 localBond = fleet.tierBond(tier, false); + uint256 countryBond = fleet.tierBond(tier, true); + + // Verify geometric progression (2× per tier) + if (tier > 0) { + assertEq(localBond, fleet.tierBond(tier - 1, false) * 2, "Local should double each tier"); + assertEq(countryBond, fleet.tierBond(tier - 1, true) * 2, "Country should double each tier"); + } + } + } + + // ══════════════════════════════════════════════════════════════════════════════════ + // Invariant Tests + // ══════════════════════════════════════════════════════════════════════════════════ + + /** + * @notice CRITICAL: Core invariants that must ALWAYS hold. + */ + function test_invariant_coreGuarantees() public { + FleetIdentity fleet = _deployFleet(); + + // Invariant 1: Country multiplier is exactly 8 + assertEq(fleet.COUNTRY_BOND_MULTIPLIER(), 8, "INVARIANT: Country multiplier must be 8"); + + // Invariant 2: Tier capacity allows fair competition + assertEq(fleet.TIER_CAPACITY(), 4, "INVARIANT: Tier capacity must be 4"); + + // Invariant 3: Bundle size reasonable for discovery + assertEq(fleet.MAX_BONDED_UUID_BUNDLE_SIZE(), 20, "INVARIANT: Bundle size must be 20"); + + // Invariant 4: Bond doubles per tier (geometric) + for (uint256 t = 1; t <= 5; t++) { + assertEq( + fleet.tierBond(t, false), + fleet.tierBond(t - 1, false) * 2, + "INVARIANT: Bond must double per tier" + ); + } + + emit log_string("[PASS] All core invariants verified"); + } + + /** + * @notice Bundle always respects tier-descent priority. + */ + function test_invariant_tierDescentPriority() public { + FleetIdentity fleet = _deployFleet(); + uint16 targetAdmin = adminAreas[0]; + + // Mixed setup: locals at tier 1, country at tier 2 + for (uint256 i = 0; i < 4; i++) { + vm.prank(localPlayers[i]); + fleet.registerFleetLocal(_uuid(1000 + i), COUNTRY_US, targetAdmin, 1); + } + for (uint256 i = 0; i < 4; i++) { + vm.prank(countryPlayers[i]); + fleet.registerFleetCountry(_uuid(2000 + i), COUNTRY_US, 2); + } + + (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(COUNTRY_US, targetAdmin); + + // Tier 2 (country) must come before tier 1 (local) - higher tier wins + for (uint256 i = 0; i < 4; i++) { + assertEq(uuids[i], _uuid(2000 + i), "INVARIANT: Higher tier must come first"); + } + for (uint256 i = 0; i < 4; i++) { + assertEq(uuids[4 + i], _uuid(1000 + i), "Lower tier follows"); + } + + assertEq(count, 8); + } + + // ══════════════════════════════════════════════════════════════════════════════════ + // Fuzz Tests + // ══════════════════════════════════════════════════════════════════════════════════ + + /** + * @notice Fuzz test to verify bundle properties across random market conditions. + */ + function testFuzz_bundleProperties(uint8 numLocals, uint8 numCountry) public { + // Bound inputs to reasonable ranges + numLocals = uint8(bound(numLocals, 1, 16)); + numCountry = uint8(bound(numCountry, 1, 12)); + + FleetIdentity fleet = _deployFleet(); + uint16 targetAdmin = adminAreas[0]; + + // Register local players (spread across tiers for variety) + for (uint256 i = 0; i < numLocals; i++) { + vm.prank(localPlayers[i % NUM_LOCAL_PLAYERS]); + fleet.registerFleetLocal(_uuid(8000 + i), COUNTRY_US, targetAdmin, i / 4); + } + + // Register country players + for (uint256 i = 0; i < numCountry; i++) { + vm.prank(countryPlayers[i % NUM_COUNTRY_PLAYERS]); + fleet.registerFleetCountry(_uuid(9000 + i), COUNTRY_US, i / 4); + } + + // Get bundle + (, uint256 count) = fleet.buildHighestBondedUuidBundle(COUNTRY_US, targetAdmin); + + // Properties that must always hold: + + // 1. Bundle never exceeds max size + assertLe(count, fleet.MAX_BONDED_UUID_BUNDLE_SIZE(), "Bundle must not exceed max"); + + // 2. Bundle includes as many as possible (up to registered count) + uint256 totalRegistered = uint256(numLocals) + uint256(numCountry); + uint256 expectedMax = totalRegistered < 20 ? totalRegistered : 20; + assertEq(count, expectedMax, "Bundle should maximize utilization"); + } + + /** + * @notice Fuzz that 8x multiplier always holds at any tier. + */ + function testFuzz_constantMultiplier(uint8 tier) public { + tier = uint8(bound(tier, 0, 20)); + FleetIdentity fleet = _deployFleet(); + + uint256 localBond = fleet.tierBond(tier, false); + uint256 countryBond = fleet.tierBond(tier, true); + + assertEq(countryBond, localBond * 8, "8x multiplier must hold at all tiers"); + } +} diff --git a/test/ServiceProvider.t.sol b/test/ServiceProvider.t.sol new file mode 100644 index 0000000..9672dd1 --- /dev/null +++ b/test/ServiceProvider.t.sol @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.24; + +import "forge-std/Test.sol"; +import "../src/swarms/ServiceProvider.sol"; + +contract ServiceProviderTest is Test { + ServiceProvider provider; + + address alice = address(0xA); + address bob = address(0xB); + + string constant URL_1 = "https://backend.swarm.example.com/api/v1"; + string constant URL_2 = "https://relay.nodle.network:8443"; + string constant URL_3 = "https://provider.third.io"; + + event ProviderRegistered(address indexed owner, string url, uint256 indexed tokenId); + event ProviderBurned(address indexed owner, uint256 indexed tokenId); + + function setUp() public { + provider = new ServiceProvider(); + } + + // ============================== + // registerProvider + // ============================== + + function test_registerProvider_mintsAndStoresURL() public { + vm.prank(alice); + uint256 tokenId = provider.registerProvider(URL_1); + + assertEq(provider.ownerOf(tokenId), alice); + assertEq(keccak256(bytes(provider.providerUrls(tokenId))), keccak256(bytes(URL_1))); + } + + function test_registerProvider_deterministicTokenId() public { + vm.prank(alice); + uint256 tokenId = provider.registerProvider(URL_1); + + assertEq(tokenId, uint256(keccak256(bytes(URL_1)))); + } + + function test_registerProvider_emitsEvent() public { + uint256 expectedTokenId = uint256(keccak256(bytes(URL_1))); + + vm.expectEmit(true, true, true, true); + emit ProviderRegistered(alice, URL_1, expectedTokenId); + + vm.prank(alice); + provider.registerProvider(URL_1); + } + + function test_registerProvider_multipleProviders() public { + vm.prank(alice); + uint256 id1 = provider.registerProvider(URL_1); + + vm.prank(bob); + uint256 id2 = provider.registerProvider(URL_2); + + assertEq(provider.ownerOf(id1), alice); + assertEq(provider.ownerOf(id2), bob); + assertTrue(id1 != id2); + } + + function test_RevertIf_registerProvider_emptyURL() public { + vm.prank(alice); + vm.expectRevert(ServiceProvider.EmptyURL.selector); + provider.registerProvider(""); + } + + function test_RevertIf_registerProvider_duplicateURL() public { + vm.prank(alice); + provider.registerProvider(URL_1); + + vm.prank(bob); + vm.expectRevert(); // ERC721: token already minted + provider.registerProvider(URL_1); + } + + // ============================== + // burn + // ============================== + + function test_burn_deletesURLAndToken() public { + vm.prank(alice); + uint256 tokenId = provider.registerProvider(URL_1); + + vm.prank(alice); + provider.burn(tokenId); + + // URL mapping cleared + assertEq(bytes(provider.providerUrls(tokenId)).length, 0); + + // Token no longer exists + vm.expectRevert(); // ownerOf reverts for non-existent token + provider.ownerOf(tokenId); + } + + function test_burn_emitsEvent() public { + vm.prank(alice); + uint256 tokenId = provider.registerProvider(URL_1); + + vm.expectEmit(true, true, true, true); + emit ProviderBurned(alice, tokenId); + + vm.prank(alice); + provider.burn(tokenId); + } + + function test_RevertIf_burn_notOwner() public { + vm.prank(alice); + uint256 tokenId = provider.registerProvider(URL_1); + + vm.prank(bob); + vm.expectRevert(ServiceProvider.NotTokenOwner.selector); + provider.burn(tokenId); + } + + function test_burn_allowsReregistration() public { + vm.prank(alice); + uint256 tokenId = provider.registerProvider(URL_1); + + vm.prank(alice); + provider.burn(tokenId); + + // Same URL can now be registered by someone else + vm.prank(bob); + uint256 newTokenId = provider.registerProvider(URL_1); + + assertEq(newTokenId, tokenId); // Same deterministic ID + assertEq(provider.ownerOf(newTokenId), bob); + } + + // ============================== + // Fuzz Tests + // ============================== + + function testFuzz_registerProvider_anyValidURL(string calldata url) public { + vm.assume(bytes(url).length > 0); + + vm.prank(alice); + uint256 tokenId = provider.registerProvider(url); + + assertEq(tokenId, uint256(keccak256(bytes(url)))); + assertEq(provider.ownerOf(tokenId), alice); + } + + function testFuzz_burn_onlyOwner(address caller) public { + vm.assume(caller != alice); + vm.assume(caller != address(0)); + + vm.prank(alice); + uint256 tokenId = provider.registerProvider(URL_1); + + vm.prank(caller); + vm.expectRevert(ServiceProvider.NotTokenOwner.selector); + provider.burn(tokenId); + } +} diff --git a/test/SwarmRegistryL1.t.sol b/test/SwarmRegistryL1.t.sol new file mode 100644 index 0000000..f97bcbe --- /dev/null +++ b/test/SwarmRegistryL1.t.sol @@ -0,0 +1,1031 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.24; + +import "forge-std/Test.sol"; +import "../src/swarms/SwarmRegistryL1.sol"; +import "../src/swarms/FleetIdentity.sol"; +import "../src/swarms/ServiceProvider.sol"; +import {ERC20} from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; + +contract MockBondTokenL1 is ERC20 { + constructor() ERC20("Mock Bond", "MBOND") {} + + function mint(address to, uint256 amount) external { + _mint(to, amount); + } +} + +contract SwarmRegistryL1Test is Test { + SwarmRegistryL1 swarmRegistry; + FleetIdentity fleetContract; + ServiceProvider providerContract; + MockBondTokenL1 bondToken; + + address fleetOwner = address(0x1); + address providerOwner = address(0x2); + address caller = address(0x3); + + uint256 constant FLEET_BOND = 100 ether; + + // Region constants for fleet registration + uint16 constant US = 840; + uint16 constant ADMIN_CA = 6; // California + + event SwarmRegistered(uint256 indexed swarmId, bytes16 indexed fleetUuid, uint256 indexed providerId, address owner); + event SwarmStatusChanged(uint256 indexed swarmId, SwarmRegistryL1.SwarmStatus status); + event SwarmFilterUpdated(uint256 indexed swarmId, address indexed owner, uint32 filterSize); + event SwarmProviderUpdated(uint256 indexed swarmId, uint256 indexed oldProvider, uint256 indexed newProvider); + event SwarmDeleted(uint256 indexed swarmId, bytes16 indexed fleetUuid, address indexed owner); + event SwarmPurged(uint256 indexed swarmId, bytes16 indexed fleetUuid, address indexed purgedBy); + + function setUp() public { + bondToken = new MockBondTokenL1(); + fleetContract = new FleetIdentity(address(bondToken), FLEET_BOND); + providerContract = new ServiceProvider(); + swarmRegistry = new SwarmRegistryL1(address(fleetContract), address(providerContract)); + + // Fund fleet owner and approve + bondToken.mint(fleetOwner, 1_000_000 ether); + vm.prank(fleetOwner); + bondToken.approve(address(fleetContract), type(uint256).max); + } + + // ============================== + // Helpers + // ============================== + + function _registerFleet(address owner, bytes memory seed) internal returns (uint256) { + vm.prank(owner); + return fleetContract.registerFleetLocal(bytes16(keccak256(seed)), US, ADMIN_CA, 0); + } + + function _getFleetUuid(uint256 fleetId) internal pure returns (bytes16) { + return bytes16(uint128(fleetId)); + } + + function _registerProvider(address owner, string memory url) internal returns (uint256) { + vm.prank(owner); + return providerContract.registerProvider(url); + } + + function _registerSwarm( + address owner, + uint256 fleetId, + uint256 providerId, + bytes memory filter, + uint8 fpSize, + SwarmRegistryL1.TagType tagType + ) internal returns (uint256) { + bytes16 fleetUuid = _getFleetUuid(fleetId); + vm.prank(owner); + return swarmRegistry.registerSwarm(fleetUuid, providerId, filter, fpSize, tagType); + } + + function getExpectedValues(bytes memory tagId, uint256 m, uint8 fpSize) + public + pure + returns (uint32 h1, uint32 h2, uint32 h3, uint256 fp) + { + bytes32 h = keccak256(tagId); + h1 = uint32(uint256(h)) % uint32(m); + h2 = uint32(uint256(h) >> 32) % uint32(m); + h3 = uint32(uint256(h) >> 64) % uint32(m); + uint256 fpMask = (1 << fpSize) - 1; + fp = (uint256(h) >> 96) & fpMask; + } + + function _write16Bit(bytes memory data, uint256 slotIndex, uint16 value) internal pure { + uint256 bitOffset = slotIndex * 16; + uint256 byteOffset = bitOffset / 8; + data[byteOffset] = bytes1(uint8(value >> 8)); + data[byteOffset + 1] = bytes1(uint8(value)); + } + + function _write8Bit(bytes memory data, uint256 slotIndex, uint8 value) internal pure { + data[slotIndex] = bytes1(value); + } + + // ============================== + // Constructor + // ============================== + + function test_constructor_setsImmutables() public view { + assertEq(address(swarmRegistry.FLEET_CONTRACT()), address(fleetContract)); + assertEq(address(swarmRegistry.PROVIDER_CONTRACT()), address(providerContract)); + } + + function test_RevertIf_constructor_zeroFleetAddress() public { + vm.expectRevert(SwarmRegistryL1.InvalidSwarmData.selector); + new SwarmRegistryL1(address(0), address(providerContract)); + } + + function test_RevertIf_constructor_zeroProviderAddress() public { + vm.expectRevert(SwarmRegistryL1.InvalidSwarmData.selector); + new SwarmRegistryL1(address(fleetContract), address(0)); + } + + function test_RevertIf_constructor_bothZero() public { + vm.expectRevert(SwarmRegistryL1.InvalidSwarmData.selector); + new SwarmRegistryL1(address(0), address(0)); + } + + // ============================== + // registerSwarm — happy path + // ============================== + + function test_registerSwarm_basicFlow() public { + uint256 fleetId = _registerFleet(fleetOwner, "my-fleet"); + uint256 providerId = _registerProvider(providerOwner, "https://api.example.com"); + + uint256 swarmId = _registerSwarm( + fleetOwner, fleetId, providerId, new bytes(100), 16, SwarmRegistryL1.TagType.IBEACON_INCLUDES_MAC + ); + + // Swarm ID is deterministic hash of (fleetUuid, providerId, filter) + uint256 expectedId = swarmRegistry.computeSwarmId(_getFleetUuid(fleetId), providerId, new bytes(100)); + assertEq(swarmId, expectedId); + } + + function test_registerSwarm_storesMetadataCorrectly() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.VENDOR_ID); + + ( + bytes16 storedFleetUuid, + uint256 storedProviderId, + address filterPointer, + uint8 storedFpSize, + SwarmRegistryL1.TagType storedTagType, + SwarmRegistryL1.SwarmStatus storedStatus + ) = swarmRegistry.swarms(swarmId); + + assertEq(storedFleetUuid, _getFleetUuid(fleetId)); + assertEq(storedProviderId, providerId); + assertTrue(filterPointer != address(0)); + assertEq(storedFpSize, 8); + assertEq(uint8(storedTagType), uint8(SwarmRegistryL1.TagType.VENDOR_ID)); + assertEq(uint8(storedStatus), uint8(SwarmRegistryL1.SwarmStatus.REGISTERED)); + } + + function test_registerSwarm_deterministicId() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + bytes memory filter = new bytes(32); + + uint256 expectedId = swarmRegistry.computeSwarmId(_getFleetUuid(fleetId), providerId, filter); + + uint256 swarmId = _registerSwarm(fleetOwner, fleetId, providerId, filter, 8, SwarmRegistryL1.TagType.GENERIC); + assertEq(swarmId, expectedId); + } + + function test_RevertIf_registerSwarm_duplicateSwarm() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(32), 8, SwarmRegistryL1.TagType.GENERIC); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryL1.SwarmAlreadyExists.selector); + swarmRegistry.registerSwarm(_getFleetUuid(fleetId), providerId, new bytes(32), 8, SwarmRegistryL1.TagType.GENERIC); + } + + function test_registerSwarm_emitsSwarmRegistered() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + bytes memory filter = new bytes(50); + uint256 expectedId = swarmRegistry.computeSwarmId(_getFleetUuid(fleetId), providerId, filter); + + vm.expectEmit(true, true, true, true); + emit SwarmRegistered(expectedId, _getFleetUuid(fleetId), providerId, fleetOwner); + + _registerSwarm(fleetOwner, fleetId, providerId, filter, 16, SwarmRegistryL1.TagType.GENERIC); + } + + function test_registerSwarm_linksUuidSwarms() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId1 = _registerProvider(providerOwner, "url1"); + uint256 providerId2 = _registerProvider(providerOwner, "url2"); + + uint256 swarmId1 = + _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + uint256 swarmId2 = + _registerSwarm(fleetOwner, fleetId, providerId2, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + assertEq(swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 0), swarmId1); + assertEq(swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 1), swarmId2); + } + + function test_registerSwarm_allTagTypes() public { + uint256 fleetId1 = _registerFleet(fleetOwner, "f1"); + uint256 fleetId2 = _registerFleet(fleetOwner, "f2"); + uint256 fleetId3 = _registerFleet(fleetOwner, "f3"); + uint256 fleetId4 = _registerFleet(fleetOwner, "f4"); + uint256 providerId = _registerProvider(providerOwner, "url"); + + uint256 s1 = _registerSwarm( + fleetOwner, fleetId1, providerId, new bytes(32), 8, SwarmRegistryL1.TagType.IBEACON_PAYLOAD_ONLY + ); + uint256 s2 = _registerSwarm( + fleetOwner, fleetId2, providerId, new bytes(32), 8, SwarmRegistryL1.TagType.IBEACON_INCLUDES_MAC + ); + uint256 s3 = + _registerSwarm(fleetOwner, fleetId3, providerId, new bytes(32), 8, SwarmRegistryL1.TagType.VENDOR_ID); + uint256 s4 = _registerSwarm(fleetOwner, fleetId4, providerId, new bytes(32), 8, SwarmRegistryL1.TagType.GENERIC); + + (,,,, SwarmRegistryL1.TagType t1,) = swarmRegistry.swarms(s1); + (,,,, SwarmRegistryL1.TagType t2,) = swarmRegistry.swarms(s2); + (,,,, SwarmRegistryL1.TagType t3,) = swarmRegistry.swarms(s3); + (,,,, SwarmRegistryL1.TagType t4,) = swarmRegistry.swarms(s4); + + assertEq(uint8(t1), uint8(SwarmRegistryL1.TagType.IBEACON_PAYLOAD_ONLY)); + assertEq(uint8(t2), uint8(SwarmRegistryL1.TagType.IBEACON_INCLUDES_MAC)); + assertEq(uint8(t3), uint8(SwarmRegistryL1.TagType.VENDOR_ID)); + assertEq(uint8(t4), uint8(SwarmRegistryL1.TagType.GENERIC)); + } + + // ============================== + // registerSwarm — reverts + // ============================== + + function test_RevertIf_registerSwarm_notUuidOwner() public { + uint256 fleetId = _registerFleet(fleetOwner, "my-fleet"); + + vm.prank(caller); + vm.expectRevert(SwarmRegistryL1.NotUuidOwner.selector); + swarmRegistry.registerSwarm(_getFleetUuid(fleetId), 1, new bytes(10), 16, SwarmRegistryL1.TagType.GENERIC); + } + + function test_RevertIf_registerSwarm_fingerprintSizeZero() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryL1.InvalidFingerprintSize.selector); + swarmRegistry.registerSwarm(_getFleetUuid(fleetId), providerId, new bytes(32), 0, SwarmRegistryL1.TagType.GENERIC); + } + + function test_RevertIf_registerSwarm_fingerprintSizeExceedsMax() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryL1.InvalidFingerprintSize.selector); + swarmRegistry.registerSwarm(_getFleetUuid(fleetId), providerId, new bytes(32), 17, SwarmRegistryL1.TagType.GENERIC); + } + + function test_RevertIf_registerSwarm_emptyFilter() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryL1.InvalidFilterSize.selector); + swarmRegistry.registerSwarm(_getFleetUuid(fleetId), providerId, new bytes(0), 8, SwarmRegistryL1.TagType.GENERIC); + } + + function test_RevertIf_registerSwarm_filterTooLarge() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryL1.InvalidFilterSize.selector); + swarmRegistry.registerSwarm(_getFleetUuid(fleetId), providerId, new bytes(24577), 8, SwarmRegistryL1.TagType.GENERIC); + } + + function test_registerSwarm_maxFingerprintSize() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + // fpSize=16 is MAX_FINGERPRINT_SIZE, should succeed + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(100), 16, SwarmRegistryL1.TagType.GENERIC); + assertTrue(swarmId != 0); + } + + function test_registerSwarm_maxFilterSize() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + // Exactly 24576 bytes should succeed + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(24576), 8, SwarmRegistryL1.TagType.GENERIC); + assertTrue(swarmId != 0); + } + + // ============================== + // acceptSwarm / rejectSwarm + // ============================== + + function test_acceptSwarm_setsStatusAndEmits() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + vm.expectEmit(true, true, true, true); + emit SwarmStatusChanged(swarmId, SwarmRegistryL1.SwarmStatus.ACCEPTED); + + vm.prank(providerOwner); + swarmRegistry.acceptSwarm(swarmId); + + (,,,,, SwarmRegistryL1.SwarmStatus status) = swarmRegistry.swarms(swarmId); + assertEq(uint8(status), uint8(SwarmRegistryL1.SwarmStatus.ACCEPTED)); + } + + function test_rejectSwarm_setsStatusAndEmits() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + vm.expectEmit(true, true, true, true); + emit SwarmStatusChanged(swarmId, SwarmRegistryL1.SwarmStatus.REJECTED); + + vm.prank(providerOwner); + swarmRegistry.rejectSwarm(swarmId); + + (,,,,, SwarmRegistryL1.SwarmStatus status) = swarmRegistry.swarms(swarmId); + assertEq(uint8(status), uint8(SwarmRegistryL1.SwarmStatus.REJECTED)); + } + + function test_RevertIf_acceptSwarm_notProviderOwner() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + vm.prank(caller); + vm.expectRevert(SwarmRegistryL1.NotProviderOwner.selector); + swarmRegistry.acceptSwarm(swarmId); + } + + function test_RevertIf_rejectSwarm_notProviderOwner() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + vm.prank(fleetOwner); // fleet owner != provider owner + vm.expectRevert(SwarmRegistryL1.NotProviderOwner.selector); + swarmRegistry.rejectSwarm(swarmId); + } + + function test_acceptSwarm_afterReject() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + vm.prank(providerOwner); + swarmRegistry.rejectSwarm(swarmId); + + // Provider changes mind + vm.prank(providerOwner); + swarmRegistry.acceptSwarm(swarmId); + + (,,,,, SwarmRegistryL1.SwarmStatus status) = swarmRegistry.swarms(swarmId); + assertEq(uint8(status), uint8(SwarmRegistryL1.SwarmStatus.ACCEPTED)); + } + + // ============================== + // checkMembership — XOR logic + // ============================== + + function test_checkMembership_XORLogic16Bit() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "u1"); + + bytes memory tagId = hex"1122334455"; + uint8 fpSize = 16; + uint256 dataLen = 100; + uint256 m = (dataLen * 8) / fpSize; // 50 slots + + (uint32 h1, uint32 h2, uint32 h3, uint256 expectedFp) = getExpectedValues(tagId, m, fpSize); + + // Skip if collision (extremely unlikely with 50 slots) + if (h1 == h2 || h1 == h3 || h2 == h3) { + return; + } + + bytes memory filter = new bytes(dataLen); + _write16Bit(filter, h1, uint16(expectedFp)); + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, filter, fpSize, SwarmRegistryL1.TagType.GENERIC); + + // Positive check + assertTrue(swarmRegistry.checkMembership(swarmId, keccak256(tagId)), "Valid tag should pass"); + + // Negative check + assertFalse(swarmRegistry.checkMembership(swarmId, keccak256(hex"999999")), "Invalid tag should fail"); + } + + function test_checkMembership_XORLogic8Bit() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "u1"); + + bytes memory tagId = hex"AABBCCDD"; + uint8 fpSize = 8; + // SSTORE2 prepends 0x00 STOP byte, so on-chain: + // extcodesize = rawLen + 1, dataLen = extcodesize - 1 = rawLen + // But SSTORE2.read offsets reads by +1 (skips STOP byte), so + // the data bytes read on-chain map 1:1 to the bytes we pass in. + // Therefore m = (rawLen * 8) / fpSize and slot indices match directly. + uint256 rawLen = 80; + uint256 m = (rawLen * 8) / fpSize; // 80 + + (uint32 h1, uint32 h2, uint32 h3, uint256 expectedFp) = getExpectedValues(tagId, m, fpSize); + + if (h1 == h2 || h1 == h3 || h2 == h3) { + return; + } + + bytes memory filter = new bytes(rawLen); + _write8Bit(filter, h1, uint8(expectedFp)); + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, filter, fpSize, SwarmRegistryL1.TagType.GENERIC); + + assertTrue(swarmRegistry.checkMembership(swarmId, keccak256(tagId)), "8-bit valid tag should pass"); + assertFalse(swarmRegistry.checkMembership(swarmId, keccak256(hex"FFFFFF")), "8-bit invalid tag should fail"); + } + + function test_RevertIf_checkMembership_swarmNotFound() public { + vm.expectRevert(SwarmRegistryL1.SwarmNotFound.selector); + swarmRegistry.checkMembership(999, keccak256("anything")); + } + + function test_checkMembership_allZeroFilter_returnsConsistent() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "u1"); + + // All-zero filter: f1^f2^f3 = 0^0^0 = 0 + // Only matches if expectedFp is also 0 + bytes memory filter = new bytes(64); + uint256 swarmId = _registerSwarm(fleetOwner, fleetId, providerId, filter, 16, SwarmRegistryL1.TagType.GENERIC); + + // Some tags will match (those with expectedFp=0), most won't + // The point is it doesn't revert + swarmRegistry.checkMembership(swarmId, keccak256("test1")); + swarmRegistry.checkMembership(swarmId, keccak256("test2")); + } + + // ============================== + // Multiple swarms per fleet + // ============================== + + function test_multipleSwarms_sameFleet() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId1 = _registerProvider(providerOwner, "url1"); + uint256 providerId2 = _registerProvider(providerOwner, "url2"); + uint256 providerId3 = _registerProvider(providerOwner, "url3"); + + uint256 s1 = _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(32), 8, SwarmRegistryL1.TagType.GENERIC); + uint256 s2 = + _registerSwarm(fleetOwner, fleetId, providerId2, new bytes(64), 16, SwarmRegistryL1.TagType.VENDOR_ID); + uint256 s3 = _registerSwarm( + fleetOwner, fleetId, providerId3, new bytes(50), 12, SwarmRegistryL1.TagType.IBEACON_PAYLOAD_ONLY + ); + + // IDs are distinct hashes + assertTrue(s1 != s2 && s2 != s3 && s1 != s3); + + assertEq(swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 0), s1); + assertEq(swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 1), s2); + assertEq(swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 2), s3); + } + + // ============================== + // Constants + // ============================== + + function test_constants() public view { + assertEq(swarmRegistry.MAX_FINGERPRINT_SIZE(), 16); + } + + // ============================== + // Fuzz + // ============================== + + function testFuzz_registerSwarm_validFingerprintSizes(uint8 fpSize) public { + fpSize = uint8(bound(fpSize, 1, 16)); + + uint256 fleetId = _registerFleet(fleetOwner, abi.encodePacked("fleet-", fpSize)); + uint256 providerId = _registerProvider(providerOwner, string(abi.encodePacked("url-", fpSize))); + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(64), fpSize, SwarmRegistryL1.TagType.GENERIC); + + (,,, uint8 storedFp,,) = swarmRegistry.swarms(swarmId); + assertEq(storedFp, fpSize); + } + + function testFuzz_registerSwarm_invalidFingerprintSizes(uint8 fpSize) public { + vm.assume(fpSize == 0 || fpSize > 16); + + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryL1.InvalidFingerprintSize.selector); + swarmRegistry.registerSwarm(_getFleetUuid(fleetId), providerId, new bytes(32), fpSize, SwarmRegistryL1.TagType.GENERIC); + } + + // ============================== + // updateSwarmFilter + // ============================== + + function test_updateSwarmFilter_updatesFilterAndResetsStatus() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + // Provider accepts + vm.prank(providerOwner); + swarmRegistry.acceptSwarm(swarmId); + + // Fleet owner updates filter + bytes memory newFilter = new bytes(100); + vm.expectEmit(true, true, true, true); + emit SwarmFilterUpdated(swarmId, fleetOwner, 100); + + vm.prank(fleetOwner); + swarmRegistry.updateSwarmFilter(swarmId, newFilter); + + // Status should be reset to REGISTERED + (,,,,, SwarmRegistryL1.SwarmStatus status) = swarmRegistry.swarms(swarmId); + assertEq(uint8(status), uint8(SwarmRegistryL1.SwarmStatus.REGISTERED)); + } + + function test_updateSwarmFilter_changesFilterPointer() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + (,, address oldPointer,,,) = swarmRegistry.swarms(swarmId); + + bytes memory newFilter = new bytes(100); + vm.prank(fleetOwner); + swarmRegistry.updateSwarmFilter(swarmId, newFilter); + + (,, address newPointer,,,) = swarmRegistry.swarms(swarmId); + assertTrue(newPointer != oldPointer); + assertTrue(newPointer != address(0)); + } + + function test_RevertIf_updateSwarmFilter_swarmNotFound() public { + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryL1.SwarmNotFound.selector); + swarmRegistry.updateSwarmFilter(999, new bytes(50)); + } + + function test_RevertIf_updateSwarmFilter_notFleetOwner() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + vm.prank(caller); + vm.expectRevert(SwarmRegistryL1.NotUuidOwner.selector); + swarmRegistry.updateSwarmFilter(swarmId, new bytes(100)); + } + + function test_RevertIf_updateSwarmFilter_emptyFilter() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryL1.InvalidFilterSize.selector); + swarmRegistry.updateSwarmFilter(swarmId, new bytes(0)); + } + + function test_RevertIf_updateSwarmFilter_filterTooLarge() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryL1.InvalidFilterSize.selector); + swarmRegistry.updateSwarmFilter(swarmId, new bytes(24577)); + } + + // ============================== + // updateSwarmProvider + // ============================== + + function test_updateSwarmProvider_updatesProviderAndResetsStatus() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId1 = _registerProvider(providerOwner, "url1"); + uint256 providerId2 = _registerProvider(providerOwner, "url2"); + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + // Provider accepts + vm.prank(providerOwner); + swarmRegistry.acceptSwarm(swarmId); + + // Fleet owner updates provider + vm.expectEmit(true, true, true, true); + emit SwarmProviderUpdated(swarmId, providerId1, providerId2); + + vm.prank(fleetOwner); + swarmRegistry.updateSwarmProvider(swarmId, providerId2); + + // Check new provider and status reset + (, uint256 newProviderId,,,, SwarmRegistryL1.SwarmStatus status) = swarmRegistry.swarms(swarmId); + assertEq(newProviderId, providerId2); + assertEq(uint8(status), uint8(SwarmRegistryL1.SwarmStatus.REGISTERED)); + } + + function test_RevertIf_updateSwarmProvider_swarmNotFound() public { + uint256 providerId = _registerProvider(providerOwner, "url1"); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryL1.SwarmNotFound.selector); + swarmRegistry.updateSwarmProvider(999, providerId); + } + + function test_RevertIf_updateSwarmProvider_notFleetOwner() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId1 = _registerProvider(providerOwner, "url1"); + uint256 providerId2 = _registerProvider(providerOwner, "url2"); + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + vm.prank(caller); + vm.expectRevert(SwarmRegistryL1.NotUuidOwner.selector); + swarmRegistry.updateSwarmProvider(swarmId, providerId2); + } + + function test_RevertIf_updateSwarmProvider_providerDoesNotExist() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + vm.prank(fleetOwner); + // ERC721 reverts before our custom error is reached + vm.expectRevert(); + swarmRegistry.updateSwarmProvider(swarmId, 99999); + } + + // ============================== + // deleteSwarm + // ============================== + + function test_deleteSwarm_removesSwarmAndEmits() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + vm.expectEmit(true, true, true, true); + emit SwarmDeleted(swarmId, _getFleetUuid(fleetId), fleetOwner); + + vm.prank(fleetOwner); + swarmRegistry.deleteSwarm(swarmId); + + // Swarm should be zeroed + (,, address pointer,,,) = swarmRegistry.swarms(swarmId); + assertEq(pointer, address(0)); + } + + function test_deleteSwarm_removesFromUuidSwarms() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId1 = _registerProvider(providerOwner, "url1"); + uint256 providerId2 = _registerProvider(providerOwner, "url2"); + + uint256 swarm1 = + _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + uint256 swarm2 = + _registerSwarm(fleetOwner, fleetId, providerId2, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + // Delete first swarm + vm.prank(fleetOwner); + swarmRegistry.deleteSwarm(swarm1); + + // Only swarm2 should remain in fleetSwarms + assertEq(swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 0), swarm2); + vm.expectRevert(); + swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 1); // Should be out of bounds + } + + function test_deleteSwarm_swapAndPop() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId1 = _registerProvider(providerOwner, "url1"); + uint256 providerId2 = _registerProvider(providerOwner, "url2"); + uint256 providerId3 = _registerProvider(providerOwner, "url3"); + + uint256 swarm1 = + _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + uint256 swarm2 = + _registerSwarm(fleetOwner, fleetId, providerId2, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + uint256 swarm3 = + _registerSwarm(fleetOwner, fleetId, providerId3, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + // Delete middle swarm + vm.prank(fleetOwner); + swarmRegistry.deleteSwarm(swarm2); + + // swarm3 should be swapped to index 1 + assertEq(swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 0), swarm1); + assertEq(swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 1), swarm3); + vm.expectRevert(); + swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 2); // Should be out of bounds + } + + function test_RevertIf_deleteSwarm_swarmNotFound() public { + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryL1.SwarmNotFound.selector); + swarmRegistry.deleteSwarm(999); + } + + function test_RevertIf_deleteSwarm_notFleetOwner() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + vm.prank(caller); + vm.expectRevert(SwarmRegistryL1.NotUuidOwner.selector); + swarmRegistry.deleteSwarm(swarmId); + } + + function test_deleteSwarm_afterUpdate() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + // Update then delete + vm.prank(fleetOwner); + swarmRegistry.updateSwarmFilter(swarmId, new bytes(100)); + + vm.prank(fleetOwner); + swarmRegistry.deleteSwarm(swarmId); + + (,, address pointer,,,) = swarmRegistry.swarms(swarmId); + assertEq(pointer, address(0)); + } + + function test_deleteSwarm_updatesSwarmIndexInUuid() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 p1 = _registerProvider(providerOwner, "url1"); + uint256 p2 = _registerProvider(providerOwner, "url2"); + uint256 p3 = _registerProvider(providerOwner, "url3"); + + uint256 s1 = _registerSwarm(fleetOwner, fleetId, p1, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + uint256 s2 = _registerSwarm(fleetOwner, fleetId, p2, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + uint256 s3 = _registerSwarm(fleetOwner, fleetId, p3, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + // Verify initial indices + assertEq(swarmRegistry.swarmIndexInUuid(s1), 0); + assertEq(swarmRegistry.swarmIndexInUuid(s2), 1); + assertEq(swarmRegistry.swarmIndexInUuid(s3), 2); + + // Delete s1 — s3 should be swapped to index 0 + vm.prank(fleetOwner); + swarmRegistry.deleteSwarm(s1); + + assertEq(swarmRegistry.swarmIndexInUuid(s3), 0); + assertEq(swarmRegistry.swarmIndexInUuid(s2), 1); + assertEq(swarmRegistry.swarmIndexInUuid(s1), 0); // deleted, reset to 0 + } + + // ============================== + // isSwarmValid + // ============================== + + function test_isSwarmValid_bothValid() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + (bool fleetValid, bool providerValid) = swarmRegistry.isSwarmValid(swarmId); + assertTrue(fleetValid); + assertTrue(providerValid); + } + + function test_isSwarmValid_providerBurned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + // Burn provider + vm.prank(providerOwner); + providerContract.burn(providerId); + + (bool fleetValid, bool providerValid) = swarmRegistry.isSwarmValid(swarmId); + assertTrue(fleetValid); + assertFalse(providerValid); + } + + function test_isSwarmValid_fleetBurned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + // Burn fleet + vm.prank(fleetOwner); + fleetContract.burn(fleetId); + + (bool fleetValid, bool providerValid) = swarmRegistry.isSwarmValid(swarmId); + assertFalse(fleetValid); + assertTrue(providerValid); + } + + function test_isSwarmValid_bothBurned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + vm.prank(fleetOwner); + fleetContract.burn(fleetId); + vm.prank(providerOwner); + providerContract.burn(providerId); + + (bool fleetValid, bool providerValid) = swarmRegistry.isSwarmValid(swarmId); + assertFalse(fleetValid); + assertFalse(providerValid); + } + + function test_RevertIf_isSwarmValid_swarmNotFound() public { + vm.expectRevert(SwarmRegistryL1.SwarmNotFound.selector); + swarmRegistry.isSwarmValid(999); + } + + // ============================== + // purgeOrphanedSwarm + // ============================== + + function test_purgeOrphanedSwarm_providerBurned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + // Burn provider + vm.prank(providerOwner); + providerContract.burn(providerId); + + // Anyone can purge + vm.expectEmit(true, true, true, true); + emit SwarmPurged(swarmId, _getFleetUuid(fleetId), caller); + + vm.prank(caller); + swarmRegistry.purgeOrphanedSwarm(swarmId); + + // Swarm should be zeroed + (,, address pointer,,,) = swarmRegistry.swarms(swarmId); + assertEq(pointer, address(0)); + } + + function test_purgeOrphanedSwarm_fleetBurned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + // Burn fleet + vm.prank(fleetOwner); + fleetContract.burn(fleetId); + + vm.prank(caller); + swarmRegistry.purgeOrphanedSwarm(swarmId); + + (,, address pointer,,,) = swarmRegistry.swarms(swarmId); + assertEq(pointer, address(0)); + } + + function test_purgeOrphanedSwarm_removesFromUuidSwarms() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 p1 = _registerProvider(providerOwner, "url1"); + uint256 p2 = _registerProvider(providerOwner, "url2"); + + uint256 s1 = _registerSwarm(fleetOwner, fleetId, p1, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + uint256 s2 = _registerSwarm(fleetOwner, fleetId, p2, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + // Burn provider of s1 + vm.prank(providerOwner); + providerContract.burn(p1); + + vm.prank(caller); + swarmRegistry.purgeOrphanedSwarm(s1); + + // s2 should be swapped to index 0 + assertEq(swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 0), s2); + vm.expectRevert(); + swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 1); + } + + function test_RevertIf_purgeOrphanedSwarm_swarmNotFound() public { + vm.expectRevert(SwarmRegistryL1.SwarmNotFound.selector); + swarmRegistry.purgeOrphanedSwarm(999); + } + + function test_RevertIf_purgeOrphanedSwarm_swarmNotOrphaned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + vm.expectRevert(SwarmRegistryL1.SwarmNotOrphaned.selector); + swarmRegistry.purgeOrphanedSwarm(swarmId); + } + + // ============================== + // Orphan guards on accept/reject/checkMembership + // ============================== + + function test_RevertIf_acceptSwarm_orphaned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + // Burn provider + vm.prank(providerOwner); + providerContract.burn(providerId); + + vm.prank(providerOwner); + vm.expectRevert(SwarmRegistryL1.SwarmOrphaned.selector); + swarmRegistry.acceptSwarm(swarmId); + } + + function test_RevertIf_rejectSwarm_orphaned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + // Burn fleet + vm.prank(fleetOwner); + fleetContract.burn(fleetId); + + vm.prank(providerOwner); + vm.expectRevert(SwarmRegistryL1.SwarmOrphaned.selector); + swarmRegistry.rejectSwarm(swarmId); + } + + function test_RevertIf_checkMembership_orphaned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + // Burn provider + vm.prank(providerOwner); + providerContract.burn(providerId); + + vm.expectRevert(SwarmRegistryL1.SwarmOrphaned.selector); + swarmRegistry.checkMembership(swarmId, keccak256("test")); + } + + function test_RevertIf_acceptSwarm_fleetBurned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + vm.prank(fleetOwner); + fleetContract.burn(fleetId); + + vm.prank(providerOwner); + vm.expectRevert(SwarmRegistryL1.SwarmOrphaned.selector); + swarmRegistry.acceptSwarm(swarmId); + } + + function test_purge_thenAcceptReverts() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC); + + vm.prank(providerOwner); + providerContract.burn(providerId); + + vm.prank(caller); + swarmRegistry.purgeOrphanedSwarm(swarmId); + + // After purge, swarm no longer exists + vm.prank(providerOwner); + vm.expectRevert(SwarmRegistryL1.SwarmNotFound.selector); + swarmRegistry.acceptSwarm(swarmId); + } +} diff --git a/test/SwarmRegistryUniversal.t.sol b/test/SwarmRegistryUniversal.t.sol new file mode 100644 index 0000000..644a1a7 --- /dev/null +++ b/test/SwarmRegistryUniversal.t.sol @@ -0,0 +1,1167 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.24; + +import "forge-std/Test.sol"; +import "../src/swarms/SwarmRegistryUniversal.sol"; +import "../src/swarms/FleetIdentity.sol"; +import "../src/swarms/ServiceProvider.sol"; +import {ERC20} from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; + +contract MockBondTokenUniv is ERC20 { + constructor() ERC20("Mock Bond", "MBOND") {} + + function mint(address to, uint256 amount) external { + _mint(to, amount); + } +} + +contract SwarmRegistryUniversalTest is Test { + SwarmRegistryUniversal swarmRegistry; + FleetIdentity fleetContract; + ServiceProvider providerContract; + MockBondTokenUniv bondToken; + + address fleetOwner = address(0x1); + address providerOwner = address(0x2); + address caller = address(0x3); + + uint256 constant FLEET_BOND = 100 ether; + + // Region constants for fleet registration + uint16 constant US = 840; + uint16 constant ADMIN_CA = 6; // California + + event SwarmRegistered( + uint256 indexed swarmId, bytes16 indexed fleetUuid, uint256 indexed providerId, address owner, uint32 filterSize + ); + event SwarmStatusChanged(uint256 indexed swarmId, SwarmRegistryUniversal.SwarmStatus status); + event SwarmFilterUpdated(uint256 indexed swarmId, address indexed owner, uint32 newFilterSize); + event SwarmProviderUpdated(uint256 indexed swarmId, uint256 indexed oldProviderId, uint256 indexed newProviderId); + event SwarmDeleted(uint256 indexed swarmId, bytes16 indexed fleetUuid, address indexed owner); + event SwarmPurged(uint256 indexed swarmId, bytes16 indexed fleetUuid, address indexed purgedBy); + + function setUp() public { + bondToken = new MockBondTokenUniv(); + fleetContract = new FleetIdentity(address(bondToken), FLEET_BOND); + providerContract = new ServiceProvider(); + swarmRegistry = new SwarmRegistryUniversal(address(fleetContract), address(providerContract)); + + // Fund fleet owner and approve + bondToken.mint(fleetOwner, 1_000_000 ether); + vm.prank(fleetOwner); + bondToken.approve(address(fleetContract), type(uint256).max); + } + + // ============================== + // Helpers + // ============================== + + function _registerFleet(address owner, bytes memory seed) internal returns (uint256) { + vm.prank(owner); + return fleetContract.registerFleetLocal(bytes16(keccak256(seed)), US, ADMIN_CA, 0); + } + + function _getFleetUuid(uint256 fleetId) internal pure returns (bytes16) { + return bytes16(uint128(fleetId)); + } + + function _registerProvider(address owner, string memory url) internal returns (uint256) { + vm.prank(owner); + return providerContract.registerProvider(url); + } + + function _registerSwarm( + address owner, + uint256 fleetId, + uint256 providerId, + bytes memory filter, + uint8 fpSize, + SwarmRegistryUniversal.TagType tagType + ) internal returns (uint256) { + bytes16 fleetUuid = _getFleetUuid(fleetId); + vm.prank(owner); + return swarmRegistry.registerSwarm(fleetUuid, providerId, filter, fpSize, tagType); + } + + function getExpectedValues(bytes memory tagId, uint256 m, uint8 fpSize) + public + pure + returns (uint32 h1, uint32 h2, uint32 h3, uint256 fp) + { + bytes32 h = keccak256(tagId); + h1 = uint32(uint256(h)) % uint32(m); + h2 = uint32(uint256(h) >> 32) % uint32(m); + h3 = uint32(uint256(h) >> 64) % uint32(m); + uint256 fpMask = (1 << fpSize) - 1; + fp = (uint256(h) >> 96) & fpMask; + } + + function _write16Bit(bytes memory data, uint256 slotIndex, uint16 value) internal pure { + uint256 byteOffset = (slotIndex * 16) / 8; + data[byteOffset] = bytes1(uint8(value >> 8)); + data[byteOffset + 1] = bytes1(uint8(value)); + } + + function _write8Bit(bytes memory data, uint256 slotIndex, uint8 value) internal pure { + data[slotIndex] = bytes1(value); + } + + // ============================== + // Constructor + // ============================== + + function test_constructor_setsImmutables() public view { + assertEq(address(swarmRegistry.FLEET_CONTRACT()), address(fleetContract)); + assertEq(address(swarmRegistry.PROVIDER_CONTRACT()), address(providerContract)); + } + + function test_RevertIf_constructor_zeroFleetAddress() public { + vm.expectRevert(SwarmRegistryUniversal.InvalidSwarmData.selector); + new SwarmRegistryUniversal(address(0), address(providerContract)); + } + + function test_RevertIf_constructor_zeroProviderAddress() public { + vm.expectRevert(SwarmRegistryUniversal.InvalidSwarmData.selector); + new SwarmRegistryUniversal(address(fleetContract), address(0)); + } + + function test_RevertIf_constructor_bothZero() public { + vm.expectRevert(SwarmRegistryUniversal.InvalidSwarmData.selector); + new SwarmRegistryUniversal(address(0), address(0)); + } + + // ============================== + // registerSwarm — happy path + // ============================== + + function test_registerSwarm_basicFlow() public { + uint256 fleetId = _registerFleet(fleetOwner, "my-fleet"); + uint256 providerId = _registerProvider(providerOwner, "https://api.example.com"); + + uint256 swarmId = _registerSwarm( + fleetOwner, fleetId, providerId, new bytes(100), 16, SwarmRegistryUniversal.TagType.IBEACON_INCLUDES_MAC + ); + + // Swarm ID is deterministic hash of (fleetUuid, providerId, filter) + uint256 expectedId = swarmRegistry.computeSwarmId(_getFleetUuid(fleetId), providerId, new bytes(100)); + assertEq(swarmId, expectedId); + } + + function test_registerSwarm_storesMetadataCorrectly() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 12, SwarmRegistryUniversal.TagType.VENDOR_ID); + + ( + bytes16 storedFleetUuid, + uint256 storedProviderId, + uint32 storedFilterLen, + uint8 storedFpSize, + SwarmRegistryUniversal.TagType storedTagType, + SwarmRegistryUniversal.SwarmStatus storedStatus + ) = swarmRegistry.swarms(swarmId); + + assertEq(storedFleetUuid, _getFleetUuid(fleetId)); + assertEq(storedProviderId, providerId); + assertEq(storedFilterLen, 50); + assertEq(storedFpSize, 12); + assertEq(uint8(storedTagType), uint8(SwarmRegistryUniversal.TagType.VENDOR_ID)); + assertEq(uint8(storedStatus), uint8(SwarmRegistryUniversal.SwarmStatus.REGISTERED)); + } + + function test_registerSwarm_storesFilterData() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + bytes memory filter = new bytes(100); + // Write some non-zero data + filter[0] = 0xAB; + filter[50] = 0xCD; + filter[99] = 0xEF; + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, filter, 16, SwarmRegistryUniversal.TagType.GENERIC); + + bytes memory storedFilter = swarmRegistry.getFilterData(swarmId); + assertEq(storedFilter.length, 100); + assertEq(uint8(storedFilter[0]), 0xAB); + assertEq(uint8(storedFilter[50]), 0xCD); + assertEq(uint8(storedFilter[99]), 0xEF); + } + + function test_registerSwarm_deterministicId() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + bytes memory filter = new bytes(32); + + uint256 expectedId = swarmRegistry.computeSwarmId(_getFleetUuid(fleetId), providerId, filter); + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, filter, 8, SwarmRegistryUniversal.TagType.GENERIC); + assertEq(swarmId, expectedId); + } + + function test_RevertIf_registerSwarm_duplicateSwarm() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(32), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryUniversal.SwarmAlreadyExists.selector); + swarmRegistry.registerSwarm(_getFleetUuid(fleetId), providerId, new bytes(32), 8, SwarmRegistryUniversal.TagType.GENERIC); + } + + function test_registerSwarm_emitsSwarmRegistered() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + bytes memory filter = new bytes(50); + uint256 expectedId = swarmRegistry.computeSwarmId(_getFleetUuid(fleetId), providerId, filter); + + vm.expectEmit(true, true, true, true); + emit SwarmRegistered(expectedId, _getFleetUuid(fleetId), providerId, fleetOwner, 50); + + _registerSwarm(fleetOwner, fleetId, providerId, filter, 16, SwarmRegistryUniversal.TagType.GENERIC); + } + + function test_registerSwarm_linksUuidSwarms() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId1 = _registerProvider(providerOwner, "url1"); + uint256 providerId2 = _registerProvider(providerOwner, "url2"); + + uint256 s1 = + _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + uint256 s2 = + _registerSwarm(fleetOwner, fleetId, providerId2, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + assertEq(swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 0), s1); + assertEq(swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 1), s2); + } + + function test_registerSwarm_allTagTypes() public { + uint256 fleetId1 = _registerFleet(fleetOwner, "f1"); + uint256 fleetId2 = _registerFleet(fleetOwner, "f2"); + uint256 fleetId3 = _registerFleet(fleetOwner, "f3"); + uint256 fleetId4 = _registerFleet(fleetOwner, "f4"); + uint256 providerId = _registerProvider(providerOwner, "url"); + + uint256 s1 = _registerSwarm( + fleetOwner, fleetId1, providerId, new bytes(32), 8, SwarmRegistryUniversal.TagType.IBEACON_PAYLOAD_ONLY + ); + uint256 s2 = _registerSwarm( + fleetOwner, fleetId2, providerId, new bytes(32), 8, SwarmRegistryUniversal.TagType.IBEACON_INCLUDES_MAC + ); + uint256 s3 = + _registerSwarm(fleetOwner, fleetId3, providerId, new bytes(32), 8, SwarmRegistryUniversal.TagType.VENDOR_ID); + uint256 s4 = + _registerSwarm(fleetOwner, fleetId4, providerId, new bytes(32), 8, SwarmRegistryUniversal.TagType.GENERIC); + + (,,,, SwarmRegistryUniversal.TagType t1,) = swarmRegistry.swarms(s1); + (,,,, SwarmRegistryUniversal.TagType t2,) = swarmRegistry.swarms(s2); + (,,,, SwarmRegistryUniversal.TagType t3,) = swarmRegistry.swarms(s3); + (,,,, SwarmRegistryUniversal.TagType t4,) = swarmRegistry.swarms(s4); + + assertEq(uint8(t1), uint8(SwarmRegistryUniversal.TagType.IBEACON_PAYLOAD_ONLY)); + assertEq(uint8(t2), uint8(SwarmRegistryUniversal.TagType.IBEACON_INCLUDES_MAC)); + assertEq(uint8(t3), uint8(SwarmRegistryUniversal.TagType.VENDOR_ID)); + assertEq(uint8(t4), uint8(SwarmRegistryUniversal.TagType.GENERIC)); + } + + // ============================== + // registerSwarm — reverts + // ============================== + + function test_RevertIf_registerSwarm_notFleetOwner() public { + uint256 fleetId = _registerFleet(fleetOwner, "my-fleet"); + + vm.prank(caller); + vm.expectRevert(SwarmRegistryUniversal.NotUuidOwner.selector); + swarmRegistry.registerSwarm(_getFleetUuid(fleetId), 1, new bytes(10), 16, SwarmRegistryUniversal.TagType.GENERIC); + } + + function test_RevertIf_registerSwarm_fingerprintSizeZero() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryUniversal.InvalidFingerprintSize.selector); + swarmRegistry.registerSwarm(_getFleetUuid(fleetId), providerId, new bytes(32), 0, SwarmRegistryUniversal.TagType.GENERIC); + } + + function test_RevertIf_registerSwarm_fingerprintSizeExceedsMax() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryUniversal.InvalidFingerprintSize.selector); + swarmRegistry.registerSwarm(_getFleetUuid(fleetId), providerId, new bytes(32), 17, SwarmRegistryUniversal.TagType.GENERIC); + } + + function test_RevertIf_registerSwarm_emptyFilter() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryUniversal.InvalidFilterSize.selector); + swarmRegistry.registerSwarm(_getFleetUuid(fleetId), providerId, new bytes(0), 8, SwarmRegistryUniversal.TagType.GENERIC); + } + + function test_RevertIf_registerSwarm_filterTooLarge() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryUniversal.FilterTooLarge.selector); + swarmRegistry.registerSwarm(_getFleetUuid(fleetId), providerId, new bytes(24577), 8, SwarmRegistryUniversal.TagType.GENERIC); + } + + function test_registerSwarm_maxFingerprintSize() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(100), 16, SwarmRegistryUniversal.TagType.GENERIC); + assertTrue(swarmId != 0); + } + + function test_registerSwarm_maxFilterSize() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + // Exactly MAX_FILTER_SIZE (24576) should succeed + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(24576), 8, SwarmRegistryUniversal.TagType.GENERIC); + assertTrue(swarmId != 0); + } + + function test_registerSwarm_minFilterSize() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + // 1 byte filter + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(1), 8, SwarmRegistryUniversal.TagType.GENERIC); + assertTrue(swarmId != 0); + } + + // ============================== + // acceptSwarm / rejectSwarm + // ============================== + + function test_acceptSwarm_setsStatusAndEmits() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.expectEmit(true, true, true, true); + emit SwarmStatusChanged(swarmId, SwarmRegistryUniversal.SwarmStatus.ACCEPTED); + + vm.prank(providerOwner); + swarmRegistry.acceptSwarm(swarmId); + + (,,,,, SwarmRegistryUniversal.SwarmStatus status) = swarmRegistry.swarms(swarmId); + assertEq(uint8(status), uint8(SwarmRegistryUniversal.SwarmStatus.ACCEPTED)); + } + + function test_rejectSwarm_setsStatusAndEmits() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.expectEmit(true, true, true, true); + emit SwarmStatusChanged(swarmId, SwarmRegistryUniversal.SwarmStatus.REJECTED); + + vm.prank(providerOwner); + swarmRegistry.rejectSwarm(swarmId); + + (,,,,, SwarmRegistryUniversal.SwarmStatus status) = swarmRegistry.swarms(swarmId); + assertEq(uint8(status), uint8(SwarmRegistryUniversal.SwarmStatus.REJECTED)); + } + + function test_RevertIf_acceptSwarm_notProviderOwner() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(caller); + vm.expectRevert(SwarmRegistryUniversal.NotProviderOwner.selector); + swarmRegistry.acceptSwarm(swarmId); + } + + function test_RevertIf_rejectSwarm_notProviderOwner() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(fleetOwner); // fleet owner != provider owner + vm.expectRevert(SwarmRegistryUniversal.NotProviderOwner.selector); + swarmRegistry.rejectSwarm(swarmId); + } + + function test_RevertIf_acceptSwarm_fleetOwnerNotProvider() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryUniversal.NotProviderOwner.selector); + swarmRegistry.acceptSwarm(swarmId); + } + + function test_acceptSwarm_afterReject() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(providerOwner); + swarmRegistry.rejectSwarm(swarmId); + + vm.prank(providerOwner); + swarmRegistry.acceptSwarm(swarmId); + + (,,,,, SwarmRegistryUniversal.SwarmStatus status) = swarmRegistry.swarms(swarmId); + assertEq(uint8(status), uint8(SwarmRegistryUniversal.SwarmStatus.ACCEPTED)); + } + + function test_rejectSwarm_afterAccept() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(providerOwner); + swarmRegistry.acceptSwarm(swarmId); + + vm.prank(providerOwner); + swarmRegistry.rejectSwarm(swarmId); + + (,,,,, SwarmRegistryUniversal.SwarmStatus status) = swarmRegistry.swarms(swarmId); + assertEq(uint8(status), uint8(SwarmRegistryUniversal.SwarmStatus.REJECTED)); + } + + // ============================== + // checkMembership — XOR logic + // ============================== + + function test_checkMembership_XORLogic16Bit() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "u1"); + + bytes memory tagId = hex"1122334455"; + uint8 fpSize = 16; + uint256 dataLen = 100; + uint256 m = (dataLen * 8) / fpSize; // 50 slots + + (uint32 h1, uint32 h2, uint32 h3, uint256 expectedFp) = getExpectedValues(tagId, m, fpSize); + + if (h1 == h2 || h1 == h3 || h2 == h3) { + return; + } + + bytes memory filter = new bytes(dataLen); + _write16Bit(filter, h1, uint16(expectedFp)); + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, filter, fpSize, SwarmRegistryUniversal.TagType.GENERIC); + + bytes32 tagHash = keccak256(tagId); + assertTrue(swarmRegistry.checkMembership(swarmId, tagHash), "Tag should be member"); + + bytes32 fakeHash = keccak256("not-a-tag"); + assertFalse(swarmRegistry.checkMembership(swarmId, fakeHash), "Fake tag should not be member"); + } + + function test_checkMembership_XORLogic8Bit() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "u1"); + + bytes memory tagId = hex"AABBCCDD"; + uint8 fpSize = 8; + uint256 dataLen = 80; + uint256 m = (dataLen * 8) / fpSize; // 80 slots + + (uint32 h1, uint32 h2, uint32 h3, uint256 expectedFp) = getExpectedValues(tagId, m, fpSize); + + if (h1 == h2 || h1 == h3 || h2 == h3) { + return; + } + + bytes memory filter = new bytes(dataLen); + _write8Bit(filter, h1, uint8(expectedFp)); + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, filter, fpSize, SwarmRegistryUniversal.TagType.GENERIC); + + assertTrue(swarmRegistry.checkMembership(swarmId, keccak256(tagId)), "8-bit valid tag should pass"); + assertFalse(swarmRegistry.checkMembership(swarmId, keccak256(hex"FFFFFF")), "8-bit invalid tag should fail"); + } + + function test_RevertIf_checkMembership_swarmNotFound() public { + vm.expectRevert(SwarmRegistryUniversal.SwarmNotFound.selector); + swarmRegistry.checkMembership(999, keccak256("anything")); + } + + function test_checkMembership_allZeroFilter_returnsConsistent() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "u1"); + + // All-zero filter: f1^f2^f3 = 0^0^0 = 0 + bytes memory filter = new bytes(64); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, filter, 16, SwarmRegistryUniversal.TagType.GENERIC); + + // Should not revert regardless of result + swarmRegistry.checkMembership(swarmId, keccak256("test1")); + swarmRegistry.checkMembership(swarmId, keccak256("test2")); + } + + // ============================== + // getFilterData + // ============================== + + function test_getFilterData_returnsCorrectData() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + bytes memory filter = new bytes(100); + filter[0] = 0xFF; + filter[99] = 0x01; + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, filter, 16, SwarmRegistryUniversal.TagType.GENERIC); + + bytes memory stored = swarmRegistry.getFilterData(swarmId); + assertEq(stored.length, 100); + assertEq(uint8(stored[0]), 0xFF); + assertEq(uint8(stored[99]), 0x01); + } + + function test_RevertIf_getFilterData_swarmNotFound() public { + vm.expectRevert(SwarmRegistryUniversal.SwarmNotFound.selector); + swarmRegistry.getFilterData(999); + } + + // ============================== + // Multiple swarms per fleet + // ============================== + + function test_multipleSwarms_sameFleet() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId1 = _registerProvider(providerOwner, "url1"); + uint256 providerId2 = _registerProvider(providerOwner, "url2"); + uint256 providerId3 = _registerProvider(providerOwner, "url3"); + + uint256 s1 = + _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(32), 8, SwarmRegistryUniversal.TagType.GENERIC); + uint256 s2 = _registerSwarm( + fleetOwner, fleetId, providerId2, new bytes(64), 16, SwarmRegistryUniversal.TagType.VENDOR_ID + ); + uint256 s3 = _registerSwarm( + fleetOwner, fleetId, providerId3, new bytes(50), 12, SwarmRegistryUniversal.TagType.IBEACON_PAYLOAD_ONLY + ); + + // IDs are distinct hashes + assertTrue(s1 != s2 && s2 != s3 && s1 != s3); + + assertEq(swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 0), s1); + assertEq(swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 1), s2); + assertEq(swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 2), s3); + } + + // ============================== + // Constants + // ============================== + + function test_constants() public view { + assertEq(swarmRegistry.MAX_FINGERPRINT_SIZE(), 16); + assertEq(swarmRegistry.MAX_FILTER_SIZE(), 24576); + } + + // ============================== + // Fuzz + // ============================== + + function testFuzz_registerSwarm_validFingerprintSizes(uint8 fpSize) public { + fpSize = uint8(bound(fpSize, 1, 16)); + + uint256 fleetId = _registerFleet(fleetOwner, abi.encodePacked("fleet-", fpSize)); + uint256 providerId = _registerProvider(providerOwner, string(abi.encodePacked("url-", fpSize))); + + uint256 swarmId = _registerSwarm( + fleetOwner, fleetId, providerId, new bytes(64), fpSize, SwarmRegistryUniversal.TagType.GENERIC + ); + + (,,, uint8 storedFp,,) = swarmRegistry.swarms(swarmId); + assertEq(storedFp, fpSize); + } + + function testFuzz_registerSwarm_invalidFingerprintSizes(uint8 fpSize) public { + vm.assume(fpSize == 0 || fpSize > 16); + + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryUniversal.InvalidFingerprintSize.selector); + swarmRegistry.registerSwarm(_getFleetUuid(fleetId), providerId, new bytes(32), fpSize, SwarmRegistryUniversal.TagType.GENERIC); + } + + function testFuzz_registerSwarm_filterSizeRange(uint256 size) public { + size = bound(size, 1, 24576); + + uint256 fleetId = _registerFleet(fleetOwner, abi.encodePacked("f-", size)); + uint256 providerId = _registerProvider(providerOwner, string(abi.encodePacked("url-", size))); + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(size), 8, SwarmRegistryUniversal.TagType.GENERIC); + + (,, uint32 storedLen,,,) = swarmRegistry.swarms(swarmId); + assertEq(storedLen, uint32(size)); + } + + // ============================== + // updateSwarmFilter + // ============================== + + function test_updateSwarmFilter_updatesFilterAndResetsStatus() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + // Provider accepts + vm.prank(providerOwner); + swarmRegistry.acceptSwarm(swarmId); + + // Fleet owner updates filter + bytes memory newFilter = new bytes(100); + for (uint256 i = 0; i < 100; i++) { + newFilter[i] = bytes1(uint8(i % 256)); + } + + vm.expectEmit(true, true, true, true); + emit SwarmFilterUpdated(swarmId, fleetOwner, 100); + + vm.prank(fleetOwner); + swarmRegistry.updateSwarmFilter(swarmId, newFilter); + + // Status should be reset to REGISTERED + (,, uint32 filterLength,,, SwarmRegistryUniversal.SwarmStatus status) = swarmRegistry.swarms(swarmId); + assertEq(uint8(status), uint8(SwarmRegistryUniversal.SwarmStatus.REGISTERED)); + assertEq(filterLength, 100); + } + + function test_updateSwarmFilter_changesFilterLength() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + (,, uint32 oldLen,,,) = swarmRegistry.swarms(swarmId); + assertEq(oldLen, 50); + + bytes memory newFilter = new bytes(100); + vm.prank(fleetOwner); + swarmRegistry.updateSwarmFilter(swarmId, newFilter); + + (,, uint32 newLen,,,) = swarmRegistry.swarms(swarmId); + assertEq(newLen, 100); + } + + function test_RevertIf_updateSwarmFilter_swarmNotFound() public { + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryUniversal.SwarmNotFound.selector); + swarmRegistry.updateSwarmFilter(999, new bytes(50)); + } + + function test_RevertIf_updateSwarmFilter_notFleetOwner() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(caller); + vm.expectRevert(SwarmRegistryUniversal.NotUuidOwner.selector); + swarmRegistry.updateSwarmFilter(swarmId, new bytes(100)); + } + + function test_RevertIf_updateSwarmFilter_emptyFilter() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryUniversal.InvalidFilterSize.selector); + swarmRegistry.updateSwarmFilter(swarmId, new bytes(0)); + } + + function test_RevertIf_updateSwarmFilter_filterTooLarge() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryUniversal.FilterTooLarge.selector); + swarmRegistry.updateSwarmFilter(swarmId, new bytes(24577)); + } + + // ============================== + // updateSwarmProvider + // ============================== + + function test_updateSwarmProvider_updatesProviderAndResetsStatus() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId1 = _registerProvider(providerOwner, "url1"); + uint256 providerId2 = _registerProvider(providerOwner, "url2"); + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + // Provider accepts + vm.prank(providerOwner); + swarmRegistry.acceptSwarm(swarmId); + + // Fleet owner updates provider + vm.expectEmit(true, true, true, true); + emit SwarmProviderUpdated(swarmId, providerId1, providerId2); + + vm.prank(fleetOwner); + swarmRegistry.updateSwarmProvider(swarmId, providerId2); + + // Check new provider and status reset + (, uint256 newProviderId,,,, SwarmRegistryUniversal.SwarmStatus status) = swarmRegistry.swarms(swarmId); + assertEq(newProviderId, providerId2); + assertEq(uint8(status), uint8(SwarmRegistryUniversal.SwarmStatus.REGISTERED)); + } + + function test_RevertIf_updateSwarmProvider_swarmNotFound() public { + uint256 providerId = _registerProvider(providerOwner, "url1"); + + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryUniversal.SwarmNotFound.selector); + swarmRegistry.updateSwarmProvider(999, providerId); + } + + function test_RevertIf_updateSwarmProvider_notFleetOwner() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId1 = _registerProvider(providerOwner, "url1"); + uint256 providerId2 = _registerProvider(providerOwner, "url2"); + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(caller); + vm.expectRevert(SwarmRegistryUniversal.NotUuidOwner.selector); + swarmRegistry.updateSwarmProvider(swarmId, providerId2); + } + + function test_RevertIf_updateSwarmProvider_providerDoesNotExist() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(fleetOwner); + // ERC721 reverts before our custom error is reached + vm.expectRevert(); + swarmRegistry.updateSwarmProvider(swarmId, 99999); + } + + // ============================== + // deleteSwarm + // ============================== + + function test_deleteSwarm_removesSwarmAndEmits() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.expectEmit(true, true, true, true); + emit SwarmDeleted(swarmId, _getFleetUuid(fleetId), fleetOwner); + + vm.prank(fleetOwner); + swarmRegistry.deleteSwarm(swarmId); + + // Swarm should be zeroed + (bytes16 fleetUuidAfter,, uint32 filterLength,,,) = swarmRegistry.swarms(swarmId); + assertEq(fleetUuidAfter, bytes16(0)); + assertEq(filterLength, 0); + } + + function test_deleteSwarm_removesFromUuidSwarms() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId1 = _registerProvider(providerOwner, "url1"); + uint256 providerId2 = _registerProvider(providerOwner, "url2"); + + uint256 swarm1 = + _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + uint256 swarm2 = + _registerSwarm(fleetOwner, fleetId, providerId2, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + // Delete first swarm + vm.prank(fleetOwner); + swarmRegistry.deleteSwarm(swarm1); + + // Only swarm2 should remain in fleetSwarms + assertEq(swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 0), swarm2); + vm.expectRevert(); + swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 1); // Should be out of bounds + } + + function test_deleteSwarm_swapAndPop() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId1 = _registerProvider(providerOwner, "url1"); + uint256 providerId2 = _registerProvider(providerOwner, "url2"); + uint256 providerId3 = _registerProvider(providerOwner, "url3"); + + uint256 swarm1 = + _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + uint256 swarm2 = + _registerSwarm(fleetOwner, fleetId, providerId2, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + uint256 swarm3 = + _registerSwarm(fleetOwner, fleetId, providerId3, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + // Delete middle swarm + vm.prank(fleetOwner); + swarmRegistry.deleteSwarm(swarm2); + + // swarm3 should be swapped to index 1 + assertEq(swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 0), swarm1); + assertEq(swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 1), swarm3); + vm.expectRevert(); + swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 2); // Should be out of bounds + } + + function test_deleteSwarm_clearsFilterData() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + bytes memory filterData = new bytes(50); + for (uint256 i = 0; i < 50; i++) { + filterData[i] = bytes1(uint8(i)); + } + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, filterData, 8, SwarmRegistryUniversal.TagType.GENERIC); + + // Delete swarm + vm.prank(fleetOwner); + swarmRegistry.deleteSwarm(swarmId); + + // filterLength should be cleared + (,, uint32 filterLength,,,) = swarmRegistry.swarms(swarmId); + assertEq(filterLength, 0); + } + + function test_RevertIf_deleteSwarm_swarmNotFound() public { + vm.prank(fleetOwner); + vm.expectRevert(SwarmRegistryUniversal.SwarmNotFound.selector); + swarmRegistry.deleteSwarm(999); + } + + function test_RevertIf_deleteSwarm_notFleetOwner() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(caller); + vm.expectRevert(SwarmRegistryUniversal.NotUuidOwner.selector); + swarmRegistry.deleteSwarm(swarmId); + } + + function test_deleteSwarm_afterUpdate() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + // Update then delete + vm.prank(fleetOwner); + swarmRegistry.updateSwarmFilter(swarmId, new bytes(100)); + + vm.prank(fleetOwner); + swarmRegistry.deleteSwarm(swarmId); + + (bytes16 fleetUuidAfter,,,,,) = swarmRegistry.swarms(swarmId); + assertEq(fleetUuidAfter, bytes16(0)); + } + + function test_deleteSwarm_updatesSwarmIndexInUuid() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 p1 = _registerProvider(providerOwner, "url1"); + uint256 p2 = _registerProvider(providerOwner, "url2"); + uint256 p3 = _registerProvider(providerOwner, "url3"); + + uint256 s1 = _registerSwarm(fleetOwner, fleetId, p1, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + uint256 s2 = _registerSwarm(fleetOwner, fleetId, p2, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + uint256 s3 = _registerSwarm(fleetOwner, fleetId, p3, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + // Verify initial indices + assertEq(swarmRegistry.swarmIndexInUuid(s1), 0); + assertEq(swarmRegistry.swarmIndexInUuid(s2), 1); + assertEq(swarmRegistry.swarmIndexInUuid(s3), 2); + + // Delete s1 — s3 should be swapped to index 0 + vm.prank(fleetOwner); + swarmRegistry.deleteSwarm(s1); + + assertEq(swarmRegistry.swarmIndexInUuid(s3), 0); + assertEq(swarmRegistry.swarmIndexInUuid(s2), 1); + assertEq(swarmRegistry.swarmIndexInUuid(s1), 0); // deleted, reset to 0 + } + + // ============================== + // isSwarmValid + // ============================== + + function test_isSwarmValid_bothValid() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + (bool fleetValid, bool providerValid) = swarmRegistry.isSwarmValid(swarmId); + assertTrue(fleetValid); + assertTrue(providerValid); + } + + function test_isSwarmValid_providerBurned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(providerOwner); + providerContract.burn(providerId); + + (bool fleetValid, bool providerValid) = swarmRegistry.isSwarmValid(swarmId); + assertTrue(fleetValid); + assertFalse(providerValid); + } + + function test_isSwarmValid_fleetBurned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(fleetOwner); + fleetContract.burn(fleetId); + + (bool fleetValid, bool providerValid) = swarmRegistry.isSwarmValid(swarmId); + assertFalse(fleetValid); + assertTrue(providerValid); + } + + function test_isSwarmValid_bothBurned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(fleetOwner); + fleetContract.burn(fleetId); + vm.prank(providerOwner); + providerContract.burn(providerId); + + (bool fleetValid, bool providerValid) = swarmRegistry.isSwarmValid(swarmId); + assertFalse(fleetValid); + assertFalse(providerValid); + } + + function test_RevertIf_isSwarmValid_swarmNotFound() public { + vm.expectRevert(SwarmRegistryUniversal.SwarmNotFound.selector); + swarmRegistry.isSwarmValid(999); + } + + // ============================== + // purgeOrphanedSwarm + // ============================== + + function test_purgeOrphanedSwarm_providerBurned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(providerOwner); + providerContract.burn(providerId); + + vm.expectEmit(true, true, true, true); + emit SwarmPurged(swarmId, _getFleetUuid(fleetId), caller); + + vm.prank(caller); + swarmRegistry.purgeOrphanedSwarm(swarmId); + + (,, uint32 filterLength,,,) = swarmRegistry.swarms(swarmId); + assertEq(filterLength, 0); + } + + function test_purgeOrphanedSwarm_fleetBurned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(fleetOwner); + fleetContract.burn(fleetId); + + vm.prank(caller); + swarmRegistry.purgeOrphanedSwarm(swarmId); + + (bytes16 fUuid,, uint32 filterLength,,,) = swarmRegistry.swarms(swarmId); + assertEq(fUuid, bytes16(0)); + assertEq(filterLength, 0); + } + + function test_purgeOrphanedSwarm_removesFromUuidSwarms() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 p1 = _registerProvider(providerOwner, "url1"); + uint256 p2 = _registerProvider(providerOwner, "url2"); + + uint256 s1 = _registerSwarm(fleetOwner, fleetId, p1, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + uint256 s2 = _registerSwarm(fleetOwner, fleetId, p2, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + // Burn provider of s1 + vm.prank(providerOwner); + providerContract.burn(p1); + + vm.prank(caller); + swarmRegistry.purgeOrphanedSwarm(s1); + + // s2 should be swapped to index 0 + assertEq(swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 0), s2); + vm.expectRevert(); + swarmRegistry.uuidSwarms(_getFleetUuid(fleetId), 1); + } + + function test_purgeOrphanedSwarm_clearsFilterData() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + + bytes memory filter = new bytes(50); + for (uint256 i = 0; i < 50; i++) { + filter[i] = bytes1(uint8(i)); + } + + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, filter, 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(providerOwner); + providerContract.burn(providerId); + + vm.prank(caller); + swarmRegistry.purgeOrphanedSwarm(swarmId); + + // filterLength should be cleared + (,, uint32 filterLength,,,) = swarmRegistry.swarms(swarmId); + assertEq(filterLength, 0); + } + + function test_RevertIf_purgeOrphanedSwarm_swarmNotFound() public { + vm.expectRevert(SwarmRegistryUniversal.SwarmNotFound.selector); + swarmRegistry.purgeOrphanedSwarm(999); + } + + function test_RevertIf_purgeOrphanedSwarm_swarmNotOrphaned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.expectRevert(SwarmRegistryUniversal.SwarmNotOrphaned.selector); + swarmRegistry.purgeOrphanedSwarm(swarmId); + } + + // ============================== + // Orphan guards on accept/reject/checkMembership + // ============================== + + function test_RevertIf_acceptSwarm_orphaned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(providerOwner); + providerContract.burn(providerId); + + vm.prank(providerOwner); + vm.expectRevert(SwarmRegistryUniversal.SwarmOrphaned.selector); + swarmRegistry.acceptSwarm(swarmId); + } + + function test_RevertIf_rejectSwarm_orphaned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(fleetOwner); + fleetContract.burn(fleetId); + + vm.prank(providerOwner); + vm.expectRevert(SwarmRegistryUniversal.SwarmOrphaned.selector); + swarmRegistry.rejectSwarm(swarmId); + } + + function test_RevertIf_checkMembership_orphaned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(providerOwner); + providerContract.burn(providerId); + + vm.expectRevert(SwarmRegistryUniversal.SwarmOrphaned.selector); + swarmRegistry.checkMembership(swarmId, keccak256("test")); + } + + function test_RevertIf_acceptSwarm_fleetBurned() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(fleetOwner); + fleetContract.burn(fleetId); + + vm.prank(providerOwner); + vm.expectRevert(SwarmRegistryUniversal.SwarmOrphaned.selector); + swarmRegistry.acceptSwarm(swarmId); + } + + function test_purge_thenAcceptReverts() public { + uint256 fleetId = _registerFleet(fleetOwner, "f1"); + uint256 providerId = _registerProvider(providerOwner, "url1"); + uint256 swarmId = + _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC); + + vm.prank(providerOwner); + providerContract.burn(providerId); + + vm.prank(caller); + swarmRegistry.purgeOrphanedSwarm(swarmId); + + // After purge, swarm no longer exists + vm.prank(providerOwner); + vm.expectRevert(SwarmRegistryUniversal.SwarmNotFound.selector); + swarmRegistry.acceptSwarm(swarmId); + } +} diff --git a/test/contentsign/BaseContentSign.t.sol b/test/contentsign/BaseContentSign.t.sol index b52438b..ef5a538 100644 --- a/test/contentsign/BaseContentSign.t.sol +++ b/test/contentsign/BaseContentSign.t.sol @@ -2,7 +2,7 @@ pragma solidity ^0.8.20; -import {Test, console} from "forge-std/Test.sol"; +import {Test} from "forge-std/Test.sol"; import {BaseContentSign} from "../../src/contentsign/BaseContentSign.sol"; contract MockContentSign is BaseContentSign { diff --git a/test/contentsign/PaymentMiddleware.t.sol b/test/contentsign/PaymentMiddleware.t.sol index 3d5c24b..5c7bb6e 100644 --- a/test/contentsign/PaymentMiddleware.t.sol +++ b/test/contentsign/PaymentMiddleware.t.sol @@ -2,14 +2,13 @@ pragma solidity ^0.8.20; -import {Test, console} from "forge-std/Test.sol"; +import {Test} from "forge-std/Test.sol"; import {BaseContentSign} from "../../src/contentsign/BaseContentSign.sol"; import {ERC20} from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; import {IERC20Errors} from "@openzeppelin/contracts/interfaces/draft-IERC6093.sol"; import {ERC721} from "@openzeppelin/contracts/token/ERC721/ERC721.sol"; import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol"; import {PaymentMiddleware} from "../../src/contentsign/PaymentMiddleware.sol"; -import {SafeERC20} from "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol"; contract MockToken is ERC20 { constructor() ERC20("Mock Token", "MTK") {} diff --git a/typescript b/typescript new file mode 100644 index 0000000..00563f8 --- /dev/null +++ b/typescript @@ -0,0 +1,3 @@ +Script started on Fri Feb 13 12:41:51 2026 +% alex@Alexs-MacBook-Pro-2 rollup % [?2004h[?2004l +% alex@Alexs-MacBook-Pro-2 rollup % [?2004h \ No newline at end of file