@@ -488,6 +488,7 @@ pub fn handle_audit_challenge(
488488mod tests {
489489 use super :: * ;
490490 use crate :: replication:: protocol:: compute_audit_digest;
491+ use crate :: replication:: types:: NeighborSyncState ;
491492 use crate :: storage:: LmdbStorageConfig ;
492493 use tempfile:: TempDir ;
493494
@@ -980,4 +981,289 @@ mod tests {
980981 "Different record bytes must produce different digests"
981982 ) ;
982983 }
984+
985+ // -- Scenario 29: Audit start gate ------------------------------------------
986+
987+ /// Scenario 29: `handle_audit_challenge` returns `Bootstrapping` when the
988+ /// node is still bootstrapping — audit digests are never computed, and no
989+ /// `AuditFailure` evidence is emitted by the caller.
990+ ///
991+ /// This is the responder-side gate. The challenger-side gate is enforced
992+ /// by `check_bootstrap_drained()` in the engine loop (tested in
993+ /// `bootstrap.rs`); this test confirms the complementary responder behavior.
994+ #[ tokio:: test]
995+ async fn scenario_29_audit_start_gate_during_bootstrap ( ) {
996+ let ( storage, _temp) = create_test_storage ( ) . await ;
997+
998+ // Store data so there *would* be work to audit.
999+ let content = b"should not be audited during bootstrap" ;
1000+ let addr = LmdbStorage :: compute_address ( content) ;
1001+ storage. put ( & addr, content) . await . expect ( "put" ) ;
1002+
1003+ let challenge = make_challenge ( 2900 , [ 0x29 ; 32 ] , [ 0x29 ; 32 ] , vec ! [ addr] ) ;
1004+
1005+ // Responder is bootstrapping → Bootstrapping response, NOT Digests.
1006+ let response = handle_audit_challenge ( & challenge, & storage, true ) ;
1007+ assert ! (
1008+ matches!(
1009+ response,
1010+ AuditResponse :: Bootstrapping { challenge_id: 2900 }
1011+ ) ,
1012+ "bootstrapping node must not compute digests — audit start gate"
1013+ ) ;
1014+
1015+ // Responder is NOT bootstrapping → normal Digests.
1016+ let response = handle_audit_challenge ( & challenge, & storage, false ) ;
1017+ assert ! (
1018+ matches!( response, AuditResponse :: Digests { .. } ) ,
1019+ "drained node should compute digests normally"
1020+ ) ;
1021+ }
1022+
1023+ // -- Scenario 30: Audit peer selection from sampled keys --------------------
1024+
1025+ /// Scenario 30: Key sampling respects `audit_batch_size` and
1026+ /// `RepairOpportunity` filtering excludes never-synced peers.
1027+ ///
1028+ /// Full `audit_tick` requires a live network. This test verifies the two
1029+ /// deterministic sub-steps the function relies on:
1030+ /// (a) `audit_batch_size.min(all_keys.len())` caps the sample count.
1031+ /// (b) `PeerSyncRecord::has_repair_opportunity` gates peer eligibility.
1032+ #[ test]
1033+ fn scenario_30_audit_peer_selection_from_sampled_keys ( ) {
1034+ let config = ReplicationConfig :: default ( ) ; // audit_batch_size = 8
1035+
1036+ // (a) Sample count is capped at audit_batch_size.
1037+ let many_keys = 100usize ;
1038+ assert_eq ! (
1039+ config. audit_batch_size. min( many_keys) ,
1040+ config. audit_batch_size,
1041+ "sample count should be capped at audit_batch_size when local store is larger"
1042+ ) ;
1043+
1044+ let few_keys = 3usize ;
1045+ assert_eq ! (
1046+ config. audit_batch_size. min( few_keys) ,
1047+ few_keys,
1048+ "sample count should equal key count when store is smaller than batch size"
1049+ ) ;
1050+
1051+ // (b) Peer eligibility via RepairOpportunity.
1052+ // Never synced → not eligible.
1053+ let never = PeerSyncRecord {
1054+ last_sync : None ,
1055+ cycles_since_sync : 10 ,
1056+ } ;
1057+ assert ! ( !never. has_repair_opportunity( ) ) ;
1058+
1059+ // Synced but zero subsequent cycles → not eligible.
1060+ let too_soon = PeerSyncRecord {
1061+ last_sync : Some ( Instant :: now ( ) ) ,
1062+ cycles_since_sync : 0 ,
1063+ } ;
1064+ assert ! ( !too_soon. has_repair_opportunity( ) ) ;
1065+
1066+ // Synced with ≥1 cycle → eligible.
1067+ let eligible = PeerSyncRecord {
1068+ last_sync : Some ( Instant :: now ( ) ) ,
1069+ cycles_since_sync : 2 ,
1070+ } ;
1071+ assert ! ( eligible. has_repair_opportunity( ) ) ;
1072+ }
1073+
1074+ // -- Scenario 32: Dynamic challenge size ------------------------------------
1075+
1076+ /// Scenario 32: Challenge key count equals `|PeerKeySet(challenged_peer)|`,
1077+ /// which is dynamic per round. If no eligible peer remains after filtering,
1078+ /// the tick is idle.
1079+ ///
1080+ /// Verified via `handle_audit_challenge`: the response digest count always
1081+ /// equals the number of keys in the challenge.
1082+ #[ tokio:: test]
1083+ async fn scenario_32_dynamic_challenge_size ( ) {
1084+ let ( storage, _temp) = create_test_storage ( ) . await ;
1085+
1086+ // Store varying numbers of chunks.
1087+ let mut addrs = Vec :: new ( ) ;
1088+ for i in 0u8 ..5 {
1089+ let content = format ! ( "dynamic challenge key {i}" ) ;
1090+ let addr = LmdbStorage :: compute_address ( content. as_bytes ( ) ) ;
1091+ storage. put ( & addr, content. as_bytes ( ) ) . await . expect ( "put" ) ;
1092+ addrs. push ( addr) ;
1093+ }
1094+
1095+ let nonce = [ 0x32 ; 32 ] ;
1096+ let peer_id = [ 0x32 ; 32 ] ;
1097+
1098+ // Challenge with 1 key.
1099+ let challenge1 = make_challenge ( 3201 , nonce, peer_id, vec ! [ addrs[ 0 ] ] ) ;
1100+ let resp1 = handle_audit_challenge ( & challenge1, & storage, false ) ;
1101+ if let AuditResponse :: Digests { digests, .. } = resp1 {
1102+ assert_eq ! ( digests. len( ) , 1 , "|PeerKeySet| = 1 → 1 digest" ) ;
1103+ }
1104+
1105+ // Challenge with 3 keys.
1106+ let challenge3 = make_challenge ( 3203 , nonce, peer_id, addrs[ 0 ..3 ] . to_vec ( ) ) ;
1107+ let resp3 = handle_audit_challenge ( & challenge3, & storage, false ) ;
1108+ if let AuditResponse :: Digests { digests, .. } = resp3 {
1109+ assert_eq ! ( digests. len( ) , 3 , "|PeerKeySet| = 3 → 3 digests" ) ;
1110+ }
1111+
1112+ // Challenge with all 5 keys.
1113+ let challenge5 = make_challenge ( 3205 , nonce, peer_id, addrs. clone ( ) ) ;
1114+ let resp5 = handle_audit_challenge ( & challenge5, & storage, false ) ;
1115+ if let AuditResponse :: Digests { digests, .. } = resp5 {
1116+ assert_eq ! ( digests. len( ) , 5 , "|PeerKeySet| = 5 → 5 digests" ) ;
1117+ }
1118+
1119+ // Challenge with 0 keys (idle equivalent — no work).
1120+ let challenge0 = make_challenge ( 3200 , nonce, peer_id, vec ! [ ] ) ;
1121+ let resp0 = handle_audit_challenge ( & challenge0, & storage, false ) ;
1122+ if let AuditResponse :: Digests { digests, .. } = resp0 {
1123+ assert ! ( digests. is_empty( ) , "|PeerKeySet| = 0 → 0 digests (idle)" ) ;
1124+ }
1125+ }
1126+
1127+ // -- Scenario 47: Bootstrap claim grace period (audit) ----------------------
1128+
1129+ /// Scenario 47: Challenged peer responds with bootstrapping claim during
1130+ /// audit. `handle_audit_challenge` returns `Bootstrapping`; caller records
1131+ /// `BootstrapClaimFirstSeen`. No `AuditFailure` evidence is emitted.
1132+ #[ tokio:: test]
1133+ async fn scenario_47_bootstrap_claim_grace_period_audit ( ) {
1134+ let ( storage, _temp) = create_test_storage ( ) . await ;
1135+
1136+ // Store data so there is an auditable key.
1137+ let content = b"bootstrap grace test" ;
1138+ let addr = LmdbStorage :: compute_address ( content) ;
1139+ storage. put ( & addr, content) . await . expect ( "put" ) ;
1140+
1141+ let challenge = make_challenge ( 4700 , [ 0x47 ; 32 ] , [ 0x47 ; 32 ] , vec ! [ addr] ) ;
1142+
1143+ // Bootstrapping peer → Bootstrapping response (grace period start).
1144+ let response = handle_audit_challenge ( & challenge, & storage, true ) ;
1145+ let challenge_id = match response {
1146+ AuditResponse :: Bootstrapping { challenge_id } => challenge_id,
1147+ AuditResponse :: Digests { .. } => {
1148+ panic ! ( "Expected Bootstrapping response during grace period" )
1149+ }
1150+ } ;
1151+ assert_eq ! ( challenge_id, 4700 ) ;
1152+
1153+ // Caller records BootstrapClaimFirstSeen — verify the types support it.
1154+ let peer = PeerId :: from_bytes ( [ 0x47 ; 32 ] ) ;
1155+ let mut state = NeighborSyncState :: new_cycle ( vec ! [ peer] ) ;
1156+ let now = Instant :: now ( ) ;
1157+ state. bootstrap_claims . entry ( peer) . or_insert ( now) ;
1158+
1159+ assert ! (
1160+ state. bootstrap_claims. contains_key( & peer) ,
1161+ "BootstrapClaimFirstSeen should be recorded after grace-period claim"
1162+ ) ;
1163+ }
1164+
1165+ // -- Scenario 53: Audit partial per-key failure with mixed responsibility ---
1166+
1167+ /// Scenario 53: P challenged on {K1, K2, K3}. K1 matches, K2 and K3
1168+ /// mismatch. Responsibility confirmation: P is responsible for K2 but
1169+ /// not K3. `AuditFailure` emitted for {K2} only.
1170+ ///
1171+ /// Full `verify_digests` + `handle_audit_failure` requires a `P2PNode` for
1172+ /// network lookups. This test verifies the conceptual steps:
1173+ /// (1) Digest comparison correctly identifies K2 and K3 as failures.
1174+ /// (2) `FailureEvidence::AuditFailure` carries only confirmed keys.
1175+ #[ tokio:: test]
1176+ async fn scenario_53_partial_failure_mixed_responsibility ( ) {
1177+ let ( storage, _temp) = create_test_storage ( ) . await ;
1178+ let nonce = [ 0x53 ; 32 ] ;
1179+ let peer_id = [ 0x53 ; 32 ] ;
1180+
1181+ // Store K1, K2, K3.
1182+ let c1 = b"scenario 53 key one" ;
1183+ let c2 = b"scenario 53 key two" ;
1184+ let c3 = b"scenario 53 key three" ;
1185+ let k1 = LmdbStorage :: compute_address ( c1) ;
1186+ let k2 = LmdbStorage :: compute_address ( c2) ;
1187+ let k3 = LmdbStorage :: compute_address ( c3) ;
1188+ storage. put ( & k1, c1) . await . expect ( "put k1" ) ;
1189+ storage. put ( & k2, c2) . await . expect ( "put k2" ) ;
1190+ storage. put ( & k3, c3) . await . expect ( "put k3" ) ;
1191+
1192+ // Correct digests from challenger's local store.
1193+ let d1_expected = compute_audit_digest ( & nonce, & peer_id, & k1, c1) ;
1194+ let d2_expected = compute_audit_digest ( & nonce, & peer_id, & k2, c2) ;
1195+ let d3_expected = compute_audit_digest ( & nonce, & peer_id, & k3, c3) ;
1196+
1197+ // Simulate peer response: K1 matches, K2 wrong data, K3 wrong data.
1198+ let d2_wrong = compute_audit_digest ( & nonce, & peer_id, & k2, b"tampered k2" ) ;
1199+ let d3_wrong = compute_audit_digest ( & nonce, & peer_id, & k3, b"tampered k3" ) ;
1200+
1201+ assert_eq ! ( d1_expected, d1_expected, "K1 should match" ) ;
1202+ assert_ne ! ( d2_wrong, d2_expected, "K2 should mismatch" ) ;
1203+ assert_ne ! ( d3_wrong, d3_expected, "K3 should mismatch" ) ;
1204+
1205+ // Step 1: Identify failed keys (digest comparison).
1206+ let digests = [ d1_expected, d2_wrong, d3_wrong] ;
1207+ let keys = [ k1, k2, k3] ;
1208+ let contents: [ & [ u8 ] ; 3 ] = [ c1, c2, c3] ;
1209+
1210+ let mut failed_keys = Vec :: new ( ) ;
1211+ for ( i, key) in keys. iter ( ) . enumerate ( ) {
1212+ if digests[ i] == ABSENT_KEY_DIGEST {
1213+ failed_keys. push ( * key) ;
1214+ continue ;
1215+ }
1216+ let expected = compute_audit_digest ( & nonce, & peer_id, key, contents[ i] ) ;
1217+ if digests[ i] != expected {
1218+ failed_keys. push ( * key) ;
1219+ }
1220+ }
1221+
1222+ assert_eq ! ( failed_keys. len( ) , 2 , "K2 and K3 should be in failure set" ) ;
1223+ assert ! ( failed_keys. contains( & k2) ) ;
1224+ assert ! ( failed_keys. contains( & k3) ) ;
1225+ assert ! ( !failed_keys. contains( & k1) , "K1 passed digest check" ) ;
1226+
1227+ // Step 2: Responsibility confirmation removes K3 (not responsible).
1228+ // Simulate: P is in closest peers for K2 but not K3.
1229+ let responsible_for_k2 = true ;
1230+ let responsible_for_k3 = false ;
1231+ let mut confirmed = Vec :: new ( ) ;
1232+ for key in & failed_keys {
1233+ let is_responsible = if * key == k2 {
1234+ responsible_for_k2
1235+ } else {
1236+ responsible_for_k3
1237+ } ;
1238+ if is_responsible {
1239+ confirmed. push ( * key) ;
1240+ }
1241+ }
1242+
1243+ assert_eq ! ( confirmed, vec![ k2] , "Only K2 should be in confirmed set" ) ;
1244+
1245+ // Step 3: Construct evidence for confirmed failures only.
1246+ let challenged_peer = PeerId :: from_bytes ( peer_id) ;
1247+ let evidence = FailureEvidence :: AuditFailure {
1248+ challenge_id : 5300 ,
1249+ challenged_peer,
1250+ confirmed_failed_keys : confirmed,
1251+ reason : AuditFailureReason :: DigestMismatch ,
1252+ } ;
1253+
1254+ match evidence {
1255+ FailureEvidence :: AuditFailure {
1256+ confirmed_failed_keys,
1257+ ..
1258+ } => {
1259+ assert_eq ! (
1260+ confirmed_failed_keys. len( ) ,
1261+ 1 ,
1262+ "Only K2 should generate evidence"
1263+ ) ;
1264+ assert_eq ! ( confirmed_failed_keys[ 0 ] , k2) ;
1265+ }
1266+ _ => panic ! ( "Expected AuditFailure evidence" ) ,
1267+ }
1268+ }
9831269}
0 commit comments