diff --git a/boot.sh b/boot.sh index b6a57d6..bf778db 100755 --- a/boot.sh +++ b/boot.sh @@ -28,8 +28,8 @@ download_controller_grpc() { # Try to use tag, use master if not found PROTO_URLS=( - "https://raw.githubusercontent.com/w180112/fastrg-controller/$CURRENT_TAG/proto/controller.proto" - "https://raw.githubusercontent.com/w180112/fastrg-controller/master/proto/controller.proto" + "https://raw.githubusercontent.com/FastResidentialGateway/fastrg-controller/$CURRENT_TAG/proto/controller.proto" + "https://raw.githubusercontent.com/FastResidentialGateway/fastrg-controller/master/proto/controller.proto" ) DOWNLOAD_SUCCESS=0 diff --git a/src/pppd/fsm.c b/src/pppd/fsm.c index 6097833..e40a8a9 100644 --- a/src/pppd/fsm.c +++ b/src/pppd/fsm.c @@ -709,8 +709,6 @@ STATUS A_this_layer_up(__attribute__((unused)) struct rte_timer *ppp_timer, ppp_ } else if (s_ppp_ccb->ppp_phase[s_ppp_ccb->cp].ppp_payload.ppp_protocol == rte_cpu_to_be_16(IPCP_PROTOCOL)) { rte_atomic16_set(&s_ppp_ccb->dp_start_bool, (BIT16)1); s_ppp_ccb->phase = DATA_PHASE; - rte_timer_reset(&(s_ppp_ccb->nat), fastrg_get_cycles_in_sec(), PERIODICAL, - fastrg_ccb->lcore.ctrl_thread, (rte_timer_cb_t)nat_rule_timer, s_ppp_ccb); FastRG_LOG(INFO, fastrg_ccb->fp, s_ppp_ccb, PPPLOGMSG, "User %" PRIu16 " IPCP connection establish successfully.", s_ppp_ccb->user_num); FastRG_LOG(INFO, fastrg_ccb->fp, s_ppp_ccb, PPPLOGMSG, "Now user %" PRIu16 " can start to send data via pppoe session id 0x%x and vlan is %" PRIu16 ".\n", s_ppp_ccb->user_num, rte_be_to_cpu_16(s_ppp_ccb->session_id), rte_atomic16_read(&s_ppp_ccb->vlan_id)); diff --git a/src/pppd/nat.c b/src/pppd/nat.c deleted file mode 100644 index 48a4d13..0000000 --- a/src/pppd/nat.c +++ /dev/null @@ -1,27 +0,0 @@ -#include -#include -#include - -#include "pppd.h" -#include "nat.h" - -void nat_rule_timer(__attribute__((unused)) struct rte_timer *tim, ppp_ccb_t *s_ppp_ccb) -{ - addr_table_t *table = s_ppp_ccb->addr_table; - for(int i=0; i 0) { - rte_atomic16_sub(&table[i].is_alive, 1); - } else { - if (rte_atomic16_cmpset((volatile uint16_t *)&table[i].is_fill, - NAT_ENTRY_READY, NAT_ENTRY_FREE)) { - rte_atomic_thread_fence(rte_memory_order_acq_rel); - - if (rte_atomic16_read(&table[i].is_alive) > 0) - rte_atomic16_set(&table[i].is_fill, NAT_ENTRY_READY); - } - } - } -} diff --git a/src/pppd/nat.h b/src/pppd/nat.h index fe864bf..cd57cfd 100644 --- a/src/pppd/nat.h +++ b/src/pppd/nat.h @@ -25,7 +25,7 @@ #include "pppd.h" #include "tcp_conntrack.h" -#define NAT_ENTRY_TIMEOUT_TICKS 10 +#define NAT_ENTRY_TIMEOUT_SEC 10 #define MAX_L4_PORT_NUM 0xffff #define SYS_MAX_PORT 1000 @@ -35,7 +35,33 @@ #define NAT_ENTRY_FILLING 1 #define NAT_ENTRY_READY 2 -void nat_rule_timer(__attribute__((unused)) struct rte_timer *tim, ppp_ccb_t *s_ppp_ccb); +/** + * @fn nat_expiry_cycles + * + * @brief Compute the absolute TSC timestamp at which a new/refreshed NAT entry + * should expire (now + NAT_ENTRY_TIMEOUT_SEC seconds). + * + * @return Expiry timestamp in CPU cycles + */ +static inline U64 nat_expiry_cycles(void) +{ + return fastrg_get_cur_cycles() + (U64)NAT_ENTRY_TIMEOUT_SEC * fastrg_get_cycles_in_sec(); +} + +/** + * @fn nat_entry_is_expired + * + * @brief Check whether a READY NAT entry has passed its expiry timestamp. + * Must only be called on entries whose is_fill == NAT_ENTRY_READY. + * + * @param entry Pointer to NAT address table entry + * + * @return Non-zero if the entry has expired, 0 otherwise + */ +static inline int nat_entry_is_expired(addr_table_t *entry) +{ + return fastrg_get_cur_cycles() > (U64)rte_atomic64_read(&entry->expire_at); +} /** * @fn compute_nat_table_index @@ -202,7 +228,7 @@ static inline U16 nat_learning_port_reuse(struct rte_ether_hdr *eth_hdr, entry->nat_port = nat_port; entry->tcp_state = TCP_CONNTRACK_NONE; entry->tcp_fin_flags = 0; - rte_atomic16_set(&entry->is_alive, NAT_ENTRY_TIMEOUT_TICKS); + rte_atomic64_set(&entry->expire_at, nat_expiry_cycles()); rte_atomic_thread_fence(rte_memory_order_release); rte_atomic16_set(&entry->is_fill, NAT_ENTRY_READY); @@ -223,20 +249,47 @@ static inline U16 nat_learning_port_reuse(struct rte_ether_hdr *eth_hdr, /* Case 2: Entry is READY - safe to read */ if (entry_state == NAT_ENTRY_READY) { - /* Same flow already exists - return existing nat_port */ + /* Same flow already exists - refresh expiry and return */ if (nat_entry_same_flow(entry, nat_port, src_ip, src_port, dst_ip, dst_port)) { - rte_atomic16_set(&entry->is_alive, NAT_ENTRY_TIMEOUT_TICKS); + rte_atomic64_set(&entry->expire_at, nat_expiry_cycles()); return entry->nat_port; } - /* Same (nat_port, dst_ip, dst_port) but different source = CONFLICT - * This means another source already uses this nat_port for this destination - * Must try a different nat_port */ + /* Entry is expired - try to evict and reuse this slot */ + if (nat_entry_is_expired(entry)) { + if (rte_atomic16_cmpset((volatile uint16_t *)&entry->is_fill, + NAT_ENTRY_READY, NAT_ENTRY_FILLING)) { + if (nat_entry_is_expired(entry)) { + /* Still expired after CAS: evict and reuse */ + rte_ether_addr_copy(ð_hdr->src_addr, &entry->mac_addr); + entry->src_ip = src_ip; + entry->dst_ip = dst_ip; + entry->src_port = src_port; + entry->dst_port = dst_port; + entry->nat_port = nat_port; + entry->tcp_state = TCP_CONNTRACK_NONE; + entry->tcp_fin_flags = 0; + rte_atomic64_set(&entry->expire_at, nat_expiry_cycles()); + rte_atomic_thread_fence(rte_memory_order_release); + rte_atomic16_set(&entry->is_fill, NAT_ENTRY_READY); + return entry->nat_port; + } + /* Entry was refreshed between our check and CAS: restore it */ + rte_atomic16_set(&entry->is_fill, NAT_ENTRY_READY); + } + /* CAS failed or entry restored: advance to next slot */ + table_idx++; + if (table_idx >= MAX_NAT_ENTRIES) + table_idx = 0; + continue; + } + + /* Active conflict: same (nat_port, dst_ip, dst_port), different source */ if (nat_entry_matches_key(entry, nat_port, dst_ip, dst_port)) break; } - /* Case 4: Hash collision (different key, same bucket) - try next slot */ + /* Case 3: Hash collision (active entry, different key) - try next slot */ table_idx++; if (table_idx >= MAX_NAT_ENTRIES) table_idx = 0; @@ -293,8 +346,18 @@ static inline addr_table_t *nat_reverse_lookup(U16 nat_port, U32 remote_ip, U16 /* Entry is ready - safe to read */ rte_atomic_thread_fence(rte_memory_order_acquire); + /* Evict expired entries encountered during the walk */ + if (nat_entry_is_expired(entry)) { + rte_atomic16_cmpset((volatile uint16_t *)&entry->is_fill, + NAT_ENTRY_READY, NAT_ENTRY_FREE); + table_idx++; + if (table_idx >= MAX_NAT_ENTRIES) + table_idx = 0; + continue; + } + if (nat_entry_matches_key(entry, nat_port, remote_ip, remote_port)) { - rte_atomic16_set(&entry->is_alive, NAT_ENTRY_TIMEOUT_TICKS); + rte_atomic64_set(&entry->expire_at, nat_expiry_cycles()); return entry; } diff --git a/src/pppd/pppd.c b/src/pppd/pppd.c index 5e74a77..106ed5c 100644 --- a/src/pppd/pppd.c +++ b/src/pppd/pppd.c @@ -45,7 +45,6 @@ void PPP_bye(ppp_ccb_t *s_ppp_ccb) rte_timer_stop(&(s_ppp_ccb->ppp)); rte_timer_stop(&(s_ppp_ccb->pppoe)); rte_timer_stop(&(s_ppp_ccb->ppp_alive)); - rte_timer_stop(&s_ppp_ccb->nat); rte_atomic16_cmpset((volatile uint16_t *)&s_ppp_ccb->dp_start_bool.cnt, (BIT16)1, (BIT16)0); switch(s_ppp_ccb->phase) { case END_PHASE: @@ -130,13 +129,12 @@ STATUS ppp_init_config_by_user(FastRG_t *fastrg_ccb, ppp_ccb_t *ppp_ccb, U16 ccb ppp_ccb->magic_num = rte_cpu_to_be_32((rand() % 0xFFFFFFFE) + 1); ppp_ccb->identifier = 0x0; for(int j=0; jaddr_table[j].is_alive); rte_atomic16_init(&ppp_ccb->addr_table[j].is_fill); + rte_atomic64_init(&ppp_ccb->addr_table[j].expire_at); } memset(ppp_ccb->PPP_dst_mac.addr_bytes, 0, ETH_ALEN); rte_timer_init(&(ppp_ccb->pppoe)); rte_timer_init(&(ppp_ccb->ppp)); - rte_timer_init(&(ppp_ccb->nat)); rte_timer_init(&(ppp_ccb->ppp_alive)); rte_timer_init(&(ppp_ccb->etcd_pppoe_status_timer)); rte_atomic16_init(&ppp_ccb->dp_start_bool); @@ -625,7 +623,6 @@ void exit_ppp(ppp_ccb_t *ppp_ccb) rte_timer_stop(&(ppp_ccb->ppp)); rte_timer_stop(&(ppp_ccb->pppoe)); rte_timer_stop(&(ppp_ccb->ppp_alive)); - rte_timer_stop(&ppp_ccb->nat); fastrg_ccb->cur_user--; ppp_ccb->phase = END_PHASE; ppp_ccb->ppp_phase[0].state = S_INIT; diff --git a/src/pppd/pppd.h b/src/pppd/pppd.h index ca6e929..dc8f396 100644 --- a/src/pppd/pppd.h +++ b/src/pppd/pppd.h @@ -58,8 +58,8 @@ typedef struct addr_table { U16 nat_port; U8 tcp_state; // TCP conntrack state (tcp_conntrack_state_t), 0 = NONE U8 tcp_fin_flags; // bitmask: bit0 = originator FIN, bit1 = responder FIN - rte_atomic16_t is_fill; // is this entry filled or not - rte_atomic16_t is_alive; // counter for checking entry alive or not every second + rte_atomic16_t is_fill; // is this entry filled or not + rte_atomic64_t expire_at; // absolute TSC timestamp when entry expires }__rte_cache_aligned addr_table_t; /** @@ -97,7 +97,6 @@ typedef struct { arp_pending_queue_t arp_pq; /* ARP pending queue for unresolved port-fwd destinations */ struct rte_timer pppoe; /* pppoe timer */ struct rte_timer ppp; /* ppp timer */ - struct rte_timer nat; /* nat table timer */ struct rte_timer ppp_alive; /* PPP connection checking timer */ struct rte_timer etcd_pppoe_status_timer; /* etcd pppoe status checking timer */ rte_atomic64_t pppoes_rx_bytes; diff --git a/src/pppd/tcp_conntrack.c b/src/pppd/tcp_conntrack.c index f82d4af..61b21a4 100644 --- a/src/pppd/tcp_conntrack.c +++ b/src/pppd/tcp_conntrack.c @@ -22,55 +22,64 @@ static STATUS tcp_act_timeout_none(struct addr_table *entry) { - rte_atomic16_set(&((addr_table_t *)entry)->is_alive, TCP_TIMEOUT_NONE); + rte_atomic64_set(&((addr_table_t *)entry)->expire_at, + fastrg_get_cur_cycles() + (U64)TCP_TIMEOUT_NONE * fastrg_get_cycles_in_sec()); return SUCCESS; } static STATUS tcp_act_timeout_syn_sent(struct addr_table *entry) { - rte_atomic16_set(&((addr_table_t *)entry)->is_alive, TCP_TIMEOUT_SYN_SENT); + rte_atomic64_set(&((addr_table_t *)entry)->expire_at, + fastrg_get_cur_cycles() + (U64)TCP_TIMEOUT_SYN_SENT * fastrg_get_cycles_in_sec()); return SUCCESS; } static STATUS tcp_act_timeout_syn_recv(struct addr_table *entry) { - rte_atomic16_set(&((addr_table_t *)entry)->is_alive, TCP_TIMEOUT_SYN_RECV); + rte_atomic64_set(&((addr_table_t *)entry)->expire_at, + fastrg_get_cur_cycles() + (U64)TCP_TIMEOUT_SYN_RECV * fastrg_get_cycles_in_sec()); return SUCCESS; } static STATUS tcp_act_timeout_established(struct addr_table *entry) { - rte_atomic16_set(&((addr_table_t *)entry)->is_alive, TCP_TIMEOUT_ESTABLISHED); + rte_atomic64_set(&((addr_table_t *)entry)->expire_at, + fastrg_get_cur_cycles() + (U64)TCP_TIMEOUT_ESTABLISHED * fastrg_get_cycles_in_sec()); return SUCCESS; } static STATUS tcp_act_timeout_fin_wait(struct addr_table *entry) { - rte_atomic16_set(&((addr_table_t *)entry)->is_alive, TCP_TIMEOUT_FIN_WAIT); + rte_atomic64_set(&((addr_table_t *)entry)->expire_at, + fastrg_get_cur_cycles() + (U64)TCP_TIMEOUT_FIN_WAIT * fastrg_get_cycles_in_sec()); return SUCCESS; } static STATUS tcp_act_timeout_close_wait(struct addr_table *entry) { - rte_atomic16_set(&((addr_table_t *)entry)->is_alive, TCP_TIMEOUT_CLOSE_WAIT); + rte_atomic64_set(&((addr_table_t *)entry)->expire_at, + fastrg_get_cur_cycles() + (U64)TCP_TIMEOUT_CLOSE_WAIT * fastrg_get_cycles_in_sec()); return SUCCESS; } static STATUS tcp_act_timeout_last_ack(struct addr_table *entry) { - rte_atomic16_set(&((addr_table_t *)entry)->is_alive, TCP_TIMEOUT_LAST_ACK); + rte_atomic64_set(&((addr_table_t *)entry)->expire_at, + fastrg_get_cur_cycles() + (U64)TCP_TIMEOUT_LAST_ACK * fastrg_get_cycles_in_sec()); return SUCCESS; } static STATUS tcp_act_timeout_time_wait(struct addr_table *entry) { - rte_atomic16_set(&((addr_table_t *)entry)->is_alive, TCP_TIMEOUT_TIME_WAIT); + rte_atomic64_set(&((addr_table_t *)entry)->expire_at, + fastrg_get_cur_cycles() + (U64)TCP_TIMEOUT_TIME_WAIT * fastrg_get_cycles_in_sec()); return SUCCESS; } static STATUS tcp_act_timeout_close(struct addr_table *entry) { - rte_atomic16_set(&((addr_table_t *)entry)->is_alive, TCP_TIMEOUT_CLOSE); + rte_atomic64_set(&((addr_table_t *)entry)->expire_at, + fastrg_get_cur_cycles() + (U64)TCP_TIMEOUT_CLOSE * fastrg_get_cycles_in_sec()); return SUCCESS; } @@ -86,6 +95,12 @@ static STATUS tcp_act_set_fin_resp(struct addr_table *entry) return SUCCESS; } +static STATUS tcp_act_reset_fin_flags(struct addr_table *entry) +{ + ((addr_table_t *)entry)->tcp_fin_flags = 0; + return SUCCESS; +} + /*////////////////////////////////////////////////////////////////////////////////// STATE EVENT NEXT-STATE HANDLERS Splitting TCP_EV_FIN into TCP_EV_FIN_ORIG / TCP_EV_FIN_RESP eliminates @@ -134,9 +149,12 @@ static tcp_conntrack_state_tbl_t tcp_conntrack_tbl[] = { /* TIME_WAIT: both FINs acked, waiting 2*MSL */ { TCP_CONNTRACK_TIME_WAIT, TCP_EV_ACK, TCP_CONNTRACK_TIME_WAIT, { tcp_act_timeout_time_wait, NULL } }, /* late ACK retransmit */ { TCP_CONNTRACK_TIME_WAIT, TCP_EV_RST, TCP_CONNTRACK_CLOSE, { tcp_act_timeout_close, NULL } }, + { TCP_CONNTRACK_TIME_WAIT, TCP_EV_SYN, TCP_CONNTRACK_SYN_SENT, { tcp_act_reset_fin_flags, tcp_act_timeout_syn_sent, NULL } }, /* new connection reuse during 2MSL */ /* CLOSE: RST or fully closed */ { TCP_CONNTRACK_CLOSE, TCP_EV_RST, TCP_CONNTRACK_CLOSE, { tcp_act_timeout_close, NULL } }, + { TCP_CONNTRACK_CLOSE, TCP_EV_SYN, TCP_CONNTRACK_SYN_SENT, { tcp_act_reset_fin_flags, tcp_act_timeout_syn_sent, NULL } }, /* new connection from originator */ + { TCP_CONNTRACK_CLOSE, TCP_EV_SYN_ACK, TCP_CONNTRACK_SYN_RECV, { tcp_act_reset_fin_flags, tcp_act_timeout_syn_recv, NULL } }, /* new passive connection */ /* Sentinel */ { TCP_CONNTRACK_INVLD, 0, 0, { NULL } }, diff --git a/src/pppd/tcp_conntrack.h b/src/pppd/tcp_conntrack.h index a07b427..c147a93 100644 --- a/src/pppd/tcp_conntrack.h +++ b/src/pppd/tcp_conntrack.h @@ -39,7 +39,7 @@ typedef enum { TCP_EV_INVLD, /* Sentinel */ } tcp_conntrack_event_t; -/*--------- TIMEOUT VALUES (in timer ticks, 1 tick = 1 second) ----------*/ +/*--------- TIMEOUT VALUES (in seconds, used to compute absolute expiry timestamp) ----------*/ #define TCP_TIMEOUT_NONE 10 #define TCP_TIMEOUT_SYN_SENT 30 #define TCP_TIMEOUT_SYN_RECV 30 diff --git a/unit_test/pppd/tcp_conntrack_test.c b/unit_test/pppd/tcp_conntrack_test.c index 16d00f8..3a74718 100644 --- a/unit_test/pppd/tcp_conntrack_test.c +++ b/unit_test/pppd/tcp_conntrack_test.c @@ -21,7 +21,8 @@ static void init_entry(addr_table_t *entry, U8 initial_state) entry->tcp_state = initial_state; entry->tcp_fin_flags = 0; rte_atomic16_set(&entry->is_fill, NAT_ENTRY_READY); - rte_atomic16_set(&entry->is_alive, TCP_TIMEOUT_NONE); + rte_atomic64_set(&entry->expire_at, + fastrg_get_cur_cycles() + (U64)TCP_TIMEOUT_NONE * fastrg_get_cycles_in_sec()); } /** @@ -55,6 +56,19 @@ static void test_three_way_handshake(void) "expected ESTABLISHED(%d), got %d", TCP_CONNTRACK_ESTABLISHED, entry.tcp_state); } +/* + * Helper: verify expire_at was set to approximately (now + expected_secs * hz). + * Returns 1 if in range, 0 otherwise. + */ +static int expire_at_approx(U64 before_cycles, U64 expire_at, U64 expected_secs) +{ + U64 hz = fastrg_get_cycles_in_sec(); + U64 tolerance = hz / 1000; /* 1 ms tolerance */ + U64 expected = (U64)expected_secs * hz; + return expire_at >= before_cycles + expected && + expire_at <= before_cycles + expected + tolerance; +} + /** * Test 2: Timeout values per state */ @@ -64,41 +78,47 @@ static void test_timeout_values(void) printf("=================================\n\n"); addr_table_t entry; + U64 before; /* SYN_SENT timeout */ init_entry(&entry, TCP_CONNTRACK_NONE); + before = fastrg_get_cur_cycles(); tcp_conntrack_fsm(&entry, RTE_TCP_SYN_FLAG, FALSE); - TEST_ASSERT(rte_atomic16_read(&entry.is_alive) == TCP_TIMEOUT_SYN_SENT, + TEST_ASSERT(expire_at_approx(before, rte_atomic64_read(&entry.expire_at), TCP_TIMEOUT_SYN_SENT), "SYN_SENT timeout", - "expected %d, got %d", TCP_TIMEOUT_SYN_SENT, rte_atomic16_read(&entry.is_alive)); + "expire_at not set to ~%d seconds", TCP_TIMEOUT_SYN_SENT); /* ESTABLISHED timeout */ init_entry(&entry, TCP_CONNTRACK_SYN_RECV); + before = fastrg_get_cur_cycles(); tcp_conntrack_fsm(&entry, RTE_TCP_ACK_FLAG, FALSE); - TEST_ASSERT(rte_atomic16_read(&entry.is_alive) == TCP_TIMEOUT_ESTABLISHED, + TEST_ASSERT(expire_at_approx(before, rte_atomic64_read(&entry.expire_at), TCP_TIMEOUT_ESTABLISHED), "ESTABLISHED timeout", - "expected %d, got %d", TCP_TIMEOUT_ESTABLISHED, rte_atomic16_read(&entry.is_alive)); + "expire_at not set to ~%d seconds", TCP_TIMEOUT_ESTABLISHED); /* CLOSE timeout (via RST) */ init_entry(&entry, TCP_CONNTRACK_ESTABLISHED); + before = fastrg_get_cur_cycles(); tcp_conntrack_fsm(&entry, RTE_TCP_RST_FLAG, FALSE); - TEST_ASSERT(rte_atomic16_read(&entry.is_alive) == TCP_TIMEOUT_CLOSE, + TEST_ASSERT(expire_at_approx(before, rte_atomic64_read(&entry.expire_at), TCP_TIMEOUT_CLOSE), "CLOSE timeout after RST", - "expected %d, got %d", TCP_TIMEOUT_CLOSE, rte_atomic16_read(&entry.is_alive)); + "expire_at not set to ~%d seconds", TCP_TIMEOUT_CLOSE); /* FIN_WAIT timeout */ init_entry(&entry, TCP_CONNTRACK_ESTABLISHED); + before = fastrg_get_cur_cycles(); tcp_conntrack_fsm(&entry, RTE_TCP_FIN_FLAG, FALSE); - TEST_ASSERT(rte_atomic16_read(&entry.is_alive) == TCP_TIMEOUT_FIN_WAIT, + TEST_ASSERT(expire_at_approx(before, rte_atomic64_read(&entry.expire_at), TCP_TIMEOUT_FIN_WAIT), "FIN_WAIT timeout", - "expected %d, got %d", TCP_TIMEOUT_FIN_WAIT, rte_atomic16_read(&entry.is_alive)); + "expire_at not set to ~%d seconds", TCP_TIMEOUT_FIN_WAIT); /* CLOSE_WAIT timeout */ init_entry(&entry, TCP_CONNTRACK_ESTABLISHED); + before = fastrg_get_cur_cycles(); tcp_conntrack_fsm(&entry, RTE_TCP_FIN_FLAG, TRUE); - TEST_ASSERT(rte_atomic16_read(&entry.is_alive) == TCP_TIMEOUT_CLOSE_WAIT, + TEST_ASSERT(expire_at_approx(before, rte_atomic64_read(&entry.expire_at), TCP_TIMEOUT_CLOSE_WAIT), "CLOSE_WAIT timeout", - "expected %d, got %d", TCP_TIMEOUT_CLOSE_WAIT, rte_atomic16_read(&entry.is_alive)); + "expire_at not set to ~%d seconds", TCP_TIMEOUT_CLOSE_WAIT); } /** @@ -275,15 +295,18 @@ static void test_syn_retransmit(void) addr_table_t entry; init_entry(&entry, TCP_CONNTRACK_SYN_SENT); - rte_atomic16_set(&entry.is_alive, 5); /* simulate partial timeout */ + /* simulate a partially elapsed timeout (5 seconds remaining) */ + rte_atomic64_set(&entry.expire_at, + fastrg_get_cur_cycles() + 5ULL * fastrg_get_cycles_in_sec()); + U64 before = fastrg_get_cur_cycles(); tcp_conntrack_fsm(&entry, RTE_TCP_SYN_FLAG, FALSE); TEST_ASSERT(entry.tcp_state == TCP_CONNTRACK_SYN_SENT, "SYN retransmit stays SYN_SENT", "expected SYN_SENT(%d), got %d", TCP_CONNTRACK_SYN_SENT, entry.tcp_state); - TEST_ASSERT(rte_atomic16_read(&entry.is_alive) == TCP_TIMEOUT_SYN_SENT, + TEST_ASSERT(expire_at_approx(before, rte_atomic64_read(&entry.expire_at), TCP_TIMEOUT_SYN_SENT), "SYN retransmit refreshes timeout", - "expected %d, got %d", TCP_TIMEOUT_SYN_SENT, rte_atomic16_read(&entry.is_alive)); + "expire_at not reset to ~%d seconds", TCP_TIMEOUT_SYN_SENT); } /** @@ -317,6 +340,72 @@ static void test_direction_aware_fin(void) "expected TIME_WAIT(%d), got %d", TCP_CONNTRACK_TIME_WAIT, entry.tcp_state); } +/** + * Test 11: New connection reuse from CLOSE state + * CLOSE + SYN → SYN_SENT (fin_flags cleared) + * CLOSE + SYN_ACK → SYN_RECV (passive open, fin_flags cleared) + */ +static void test_close_new_connection(void) +{ + printf("\nTesting new connection from CLOSE state:\n"); + printf("=========================================\n\n"); + + addr_table_t entry; + + /* CLOSE + SYN → SYN_SENT, fin_flags cleared */ + init_entry(&entry, TCP_CONNTRACK_CLOSE); + entry.tcp_fin_flags = TCP_FIN_FLAG_ORIGINATOR | TCP_FIN_FLAG_RESPONDER; + U64 before = fastrg_get_cur_cycles(); + tcp_conntrack_fsm(&entry, RTE_TCP_SYN_FLAG, FALSE); + TEST_ASSERT(entry.tcp_state == TCP_CONNTRACK_SYN_SENT, + "CLOSE + SYN → SYN_SENT", + "expected SYN_SENT(%d), got %d", TCP_CONNTRACK_SYN_SENT, entry.tcp_state); + TEST_ASSERT(entry.tcp_fin_flags == 0, + "CLOSE + SYN clears fin_flags", + "expected 0, got %d", entry.tcp_fin_flags); + TEST_ASSERT(expire_at_approx(before, rte_atomic64_read(&entry.expire_at), TCP_TIMEOUT_SYN_SENT), + "CLOSE + SYN sets SYN_SENT timeout", + "expire_at not set to ~%d seconds", TCP_TIMEOUT_SYN_SENT); + + /* CLOSE + SYN_ACK → SYN_RECV (passive open), fin_flags cleared */ + init_entry(&entry, TCP_CONNTRACK_CLOSE); + entry.tcp_fin_flags = TCP_FIN_FLAG_ORIGINATOR | TCP_FIN_FLAG_RESPONDER; + before = fastrg_get_cur_cycles(); + tcp_conntrack_fsm(&entry, RTE_TCP_SYN_FLAG | RTE_TCP_ACK_FLAG, TRUE); + TEST_ASSERT(entry.tcp_state == TCP_CONNTRACK_SYN_RECV, + "CLOSE + SYN_ACK → SYN_RECV", + "expected SYN_RECV(%d), got %d", TCP_CONNTRACK_SYN_RECV, entry.tcp_state); + TEST_ASSERT(entry.tcp_fin_flags == 0, + "CLOSE + SYN_ACK clears fin_flags", + "expected 0, got %d", entry.tcp_fin_flags); +} + +/** + * Test 12: New connection reuse from TIME_WAIT state + * TIME_WAIT + SYN → SYN_SENT (fin_flags cleared) + */ +static void test_time_wait_new_connection(void) +{ + printf("\nTesting new connection from TIME_WAIT state:\n"); + printf("=============================================\n\n"); + + addr_table_t entry; + init_entry(&entry, TCP_CONNTRACK_TIME_WAIT); + entry.tcp_fin_flags = TCP_FIN_FLAG_ORIGINATOR | TCP_FIN_FLAG_RESPONDER; + + U64 before = fastrg_get_cur_cycles(); + tcp_conntrack_fsm(&entry, RTE_TCP_SYN_FLAG, FALSE); + TEST_ASSERT(entry.tcp_state == TCP_CONNTRACK_SYN_SENT, + "TIME_WAIT + SYN → SYN_SENT", + "expected SYN_SENT(%d), got %d", TCP_CONNTRACK_SYN_SENT, entry.tcp_state); + TEST_ASSERT(entry.tcp_fin_flags == 0, + "TIME_WAIT + SYN clears fin_flags", + "expected 0, got %d", entry.tcp_fin_flags); + TEST_ASSERT(expire_at_approx(before, rte_atomic64_read(&entry.expire_at), TCP_TIMEOUT_SYN_SENT), + "TIME_WAIT + SYN sets SYN_SENT timeout", + "expire_at not set to ~%d seconds", TCP_TIMEOUT_SYN_SENT); +} + void test_tcp_conntrack(FastRG_t *fastrg_ccb, U32 *total_tests, U32 *total_pass) { (void)fastrg_ccb; @@ -334,6 +423,8 @@ void test_tcp_conntrack(FastRG_t *fastrg_ccb, U32 *total_tests, U32 *total_pass) test_simultaneous_close(); test_syn_retransmit(); test_direction_aware_fin(); + test_close_new_connection(); + test_time_wait_new_connection(); *total_tests += test_count; *total_pass += pass_count;