diff --git a/README.md b/README.md index e73bbbd..6aedcbe 100644 --- a/README.md +++ b/README.md @@ -25,16 +25,13 @@ next-generation intelligent vehicles: manned and unmanned aircraft, spacecraft, - Support for redundant network interfaces with seamless interface aggregation and zero fail-over delay. - Robust message reassembler supporting highly distorted datagram streams: out-of-order fragments, fragment/message deduplication, interleaving, variable MTU, ... -- Packet loss mitigation via: - - reliable topics (retransmit until acknowledged; callback notifications for successful/failed deliveries). - - redundant interfaces (packet lost on one interface may be received on another, transparent to the application); - Heap not required (but supported); the library can be used with fixed-size block pool allocators. - Detailed time complexity and memory requirement models for the benefit of real-time high-integrity applications. -- Scalable: designed to handle thousands of topics and hundreds of concurrent transfers with minimal resources. +- Scalable: designed to handle thousands of subjects and hundreds of concurrent transfers with minimal resources. - Runs anywhere out of the box, including extremely resource-constrained baremetal environments with ~100K ROM/RAM. No porting required. - Partial MISRA C compliance (reach out to ). -- Full implementation in a single C file with only 2k lines of straightforward C99! +- Full implementation in a single C file with less than 2k lines of straightforward C99! - Extensive verification suite. ## Usage @@ -71,7 +68,7 @@ standards-compliant C99 compiler is available. ### v3.0 -- WORK IN PROGRESS -The library has been redesigned from scratch to support Cyphal v1.1, named topics, and reliable transfers. +The library has been redesigned from scratch to support Cyphal v1.1 and named topics. No porting guide is provided since the changes are too significant; please refer to the new API docs in `libudpard/udpard.h`. diff --git a/cyphal_udp_header.dsdl b/cyphal_udp_header.dsdl index c4ce87a..42f112e 100644 --- a/cyphal_udp_header.dsdl +++ b/cyphal_udp_header.dsdl @@ -1,40 +1,31 @@ +# Cyphal/UDP provides UNRELIABLE UNORDERED DEDUPLICATED (at most one) delivery of UNICAST or MULTICAST datagrams +# with GUARANTEED INTEGRITY (messages either delivered correct or not delivered). +# # All Cyphal/UDP traffic is sent to port 9382. # The subject multicast group address is composed as 239.0.0.0 (=0xEF000000) + subject_id (23 bits). +# # All frames of a transfer must share the same field values unless otherwise noted. # Frames may arrive out-of-order, possibly interleaved with neighboring transfers; implementations must cope. +# +# The origin UID is a 64-bit globally unique identifier (e.g., EUI-64). It allows nodes to use redundant interfaces +# without source address ambiguity, and also allows live interface migration. +# +# P2P traffic is sent directly to the source endpoint of the destination node, which is discovered dynamically. +# The destination UID is not included explicitly since the IP endpoint is considered adequate for node identification. uint5 version #=2 in this version. uint3 priority # 0=highest, 7=lowest. -uint2 KIND_MSG_BEST_EFFORT = 0 # No ack must be sent. -uint2 KIND_MSG_RELIABLE = 1 # Remote must acknowledge reception by sending an ACK frame back. -uint2 KIND_ACK = 2 # Sent P2P; the transfer_id is of the acknowledged frame. Payload empty/ignored. -uint2 kind -uint6 reserved_incompat # Discard frame if any incompatibility flags are set that are not understood. +void5 # Send zero, ignore on reception. +uint3 incompatibility # Send zero, drop frame if nonzero. -void16 # Reserved for compatibility flags and fields (transmit zero, ignore on reception). - -# Payload reassembly information. -# We provide both the frame index and the frame payload offset to allow various reassembly strategies depending on the -# preferences of the implementation. The provided information is sufficient for zero-copy out-of-order reassembly. -# Offset 4 bytes. +uint48 transfer_id # For multi-frame reassembly and dedup. +uint64 sender_uid # Origin identifier ensures invariance to the source IP address for reassembly. -uint24 frame_index # Zero-based index of the payload fragment carried by this frame. -void8 uint32 frame_payload_offset # The offset of the frame payload relative to the start of the transfer payload. uint32 transfer_payload_size # Total for all frames. -# Transfer identification information. -# The transfer-ID is a single field that segregates transfers by topic hash and epoch (publisher sequence restarts). -# Offset 16 bytes. - -uint64 transfer_id # For multi-frame reassembly and dedup. ACK specifies the acked tfer here. -uint64 sender_uid # Origin identifier ensures invariance to the source IP address for reassembly. - -# Integrity checking information. -# Offset 32 bytes. - uint32 prefix_crc32c # crc32c(payload[0:(frame_payload_offset+payload_size)]) uint32 header_crc32c # Covers all fields above. Same as the transfer payload CRC. -# End of header at 40 bytes. Payload follows. +# Header size 32 bytes; payload follows. diff --git a/libudpard/udpard.c b/libudpard/udpard.c index e0e70a3..a5e75db 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -43,14 +43,14 @@ typedef unsigned char byte_t; ///< For compatibility with platforms where byte s /// Sessions will be garbage-collected after being idle for this long, along with unfinished transfers, if any. /// Pending slots within a live session will also be reset after this timeout to avoid storing stale data indefinitely. -#define SESSION_LIFETIME (60 * MEGA) +#define SESSION_LIFETIME (30 * MEGA) /// The maximum number of incoming transfers that can be in the state of incomplete reassembly simultaneously. /// Additional transfers will replace the oldest ones. /// This number should normally be at least as large as there are priority levels. More is fine but rarely useful. #define RX_SLOT_COUNT UDPARD_PRIORITY_COUNT -/// The number of most recent transfers to keep in the history for ACK retransmission and duplicate detection. +/// The number of most recent transfers to keep in the history for duplicate rejection. /// Should be a power of two to allow replacement of modulo operation with a bitwise AND. /// /// Implementation node: we used to store bitmap windows instead of a full list of recent transfer-IDs, but they @@ -70,9 +70,6 @@ static_assert((UDPARD_IPv4_SUBJECT_ID_MAX & (UDPARD_IPv4_SUBJECT_ID_MAX + 1)) == #define KILO 1000LL #define MEGA 1000000LL -/// Pending ack transfers expire after this long if not transmitted. -#define ACK_TX_DEADLINE MEGA - static size_t smaller(const size_t a, const size_t b) { return (a < b) ? a : b; } static size_t larger(const size_t a, const size_t b) { return (a > b) ? a : b; } static int64_t min_i64(const int64_t a, const int64_t b) { return (a < b) ? a : b; } @@ -111,7 +108,15 @@ static bool mem_validate(const udpard_mem_t mem) static byte_t* serialize_u32(byte_t* ptr, const uint32_t value) { - for (size_t i = 0; i < sizeof(value); i++) { + for (size_t i = 0; i < 4U; i++) { + *ptr++ = (byte_t)((byte_t)(value >> (i * 8U)) & 0xFFU); + } + return ptr; +} + +static byte_t* serialize_u48(byte_t* ptr, const uint64_t value) +{ + for (size_t i = 0; i < 6U; i++) { *ptr++ = (byte_t)((byte_t)(value >> (i * 8U)) & 0xFFU); } return ptr; @@ -119,7 +124,7 @@ static byte_t* serialize_u32(byte_t* ptr, const uint32_t value) static byte_t* serialize_u64(byte_t* ptr, const uint64_t value) { - for (size_t i = 0; i < sizeof(value); i++) { + for (size_t i = 0; i < 8U; i++) { *ptr++ = (byte_t)((byte_t)(value >> (i * 8U)) & 0xFFU); } return ptr; @@ -129,18 +134,29 @@ static const byte_t* deserialize_u32(const byte_t* ptr, uint32_t* const out_valu { UDPARD_ASSERT((ptr != NULL) && (out_value != NULL)); *out_value = 0; - for (size_t i = 0; i < sizeof(*out_value); i++) { + for (size_t i = 0; i < 4U; i++) { *out_value |= (uint32_t)((uint32_t)*ptr << (i * 8U)); // NOLINT(google-readability-casting) NOSONAR ptr++; } return ptr; } +static const byte_t* deserialize_u48(const byte_t* ptr, uint64_t* const out_value) +{ + UDPARD_ASSERT((ptr != NULL) && (out_value != NULL)); + *out_value = 0; + for (size_t i = 0; i < 6U; i++) { + *out_value |= ((uint64_t)*ptr << (i * 8U)); + ptr++; + } + return ptr; +} + static const byte_t* deserialize_u64(const byte_t* ptr, uint64_t* const out_value) { UDPARD_ASSERT((ptr != NULL) && (out_value != NULL)); *out_value = 0; - for (size_t i = 0; i < sizeof(*out_value); i++) { + for (size_t i = 0; i < 8U; i++) { *out_value |= ((uint64_t)*ptr << (i * 8U)); ptr++; } @@ -447,24 +463,14 @@ static void* ptr_unbias(const void* const ptr, const size_t offset) // --------------------------------------------------------------------------------------------------------------------- /// See cyphal_udp_header.dsdl for the layout. -#define HEADER_SIZE_BYTES 40U -#define HEADER_VERSION 2U -#define HEADER_FRAME_INDEX_MAX 0xFFFFFFU /// 4 GiB with 256-byte MTU; 21.6 GiB with 1384-byte MTU - -typedef enum frame_kind_t -{ - frame_msg_best, - frame_msg_reliable, - frame_ack, -} frame_kind_t; +#define HEADER_SIZE_BYTES 32U +#define HEADER_VERSION 2U /// The transfer-ID is designed to be unique per pending transfer. The uniqueness is achieved by randomization. -/// For extra entropy, P2P transfers have their transfer-ID computed as (base_counter++)+destination_uid; -/// the base counter is seeded with a random value. +/// For extra entropy, P2P transfers may have their transfer-ID mixed with the destination UID. typedef struct { udpard_prio_t priority; - frame_kind_t kind; uint32_t transfer_payload_size; uint64_t transfer_id; uint64_t sender_uid; @@ -472,21 +478,17 @@ typedef struct static byte_t* header_serialize(byte_t* const buffer, const meta_t meta, - const uint32_t frame_index, const uint32_t frame_payload_offset, const uint32_t prefix_crc) { - UDPARD_ASSERT((meta.kind == frame_msg_best) || (meta.kind == frame_msg_reliable) || (meta.kind == frame_ack)); + assert(meta.priority < 8U); byte_t* ptr = buffer; *ptr++ = (byte_t)(HEADER_VERSION | (meta.priority << 5U)); - *ptr++ = (byte_t)meta.kind; *ptr++ = 0; - *ptr++ = 0; - ptr = serialize_u32(ptr, frame_index & HEADER_FRAME_INDEX_MAX); + ptr = serialize_u48(ptr, meta.transfer_id); + ptr = serialize_u64(ptr, meta.sender_uid); ptr = serialize_u32(ptr, frame_payload_offset); ptr = serialize_u32(ptr, meta.transfer_payload_size); - ptr = serialize_u64(ptr, meta.transfer_id); - ptr = serialize_u64(ptr, meta.sender_uid); ptr = serialize_u32(ptr, prefix_crc); ptr = serialize_u32(ptr, crc_full(HEADER_SIZE_BYTES - CRC_SIZE_BYTES, buffer)); UDPARD_ASSERT((size_t)(ptr - buffer) == HEADER_SIZE_BYTES); @@ -495,49 +497,44 @@ static byte_t* header_serialize(byte_t* const buffer, static bool header_deserialize(const udpard_bytes_mut_t dgram_payload, meta_t* const out_meta, - uint32_t* const frame_index, uint32_t* const frame_payload_offset, uint32_t* const prefix_crc, udpard_bytes_t* const out_payload) { UDPARD_ASSERT(out_payload != NULL); - bool ok = (dgram_payload.size >= HEADER_SIZE_BYTES) && (dgram_payload.data != NULL) && // - (crc_full(HEADER_SIZE_BYTES, dgram_payload.data) == CRC_RESIDUE_AFTER_OUTPUT_XOR); - if (ok) { - const byte_t* ptr = dgram_payload.data; - const byte_t head = *ptr++; - const byte_t version = head & 0x1FU; - if (version == HEADER_VERSION) { - out_meta->priority = (udpard_prio_t)((byte_t)(head >> 5U) & 0x07U); - out_meta->kind = (frame_kind_t)*ptr++; - ptr += 2U; - ptr = deserialize_u32(ptr, frame_index); + if ((dgram_payload.size < HEADER_SIZE_BYTES) || (dgram_payload.data == NULL) || // + (crc_full(HEADER_SIZE_BYTES, dgram_payload.data) != CRC_RESIDUE_AFTER_OUTPUT_XOR)) { + return false; + } + const byte_t* ptr = dgram_payload.data; + const byte_t head = *ptr++; + switch (head & 0x1FU) { + case HEADER_VERSION: { + out_meta->priority = (udpard_prio_t)((byte_t)(head >> 5U) & 0x07U); + const byte_t incompat = (*ptr++) >> 5U; + if (incompat != 0) { + return false; // Incompatible feature(s) not supported by this implementation. + } + ptr = deserialize_u48(ptr, &out_meta->transfer_id); + ptr = deserialize_u64(ptr, &out_meta->sender_uid); ptr = deserialize_u32(ptr, frame_payload_offset); ptr = deserialize_u32(ptr, &out_meta->transfer_payload_size); - ptr = deserialize_u64(ptr, &out_meta->transfer_id); - ptr = deserialize_u64(ptr, &out_meta->sender_uid); ptr = deserialize_u32(ptr, prefix_crc); (void)ptr; - // Set up the output payload view. out_payload->size = dgram_payload.size - HEADER_SIZE_BYTES; out_payload->data = (byte_t*)dgram_payload.data + HEADER_SIZE_BYTES; - // Finalize the fields. - *frame_index = HEADER_FRAME_INDEX_MAX & *frame_index; - // Validate the fields. - ok = ok && ((out_meta->kind == frame_msg_best) || (out_meta->kind == frame_msg_reliable) || - (out_meta->kind == frame_ack)); - ok = ok && (((uint64_t)*frame_payload_offset + (uint64_t)out_payload->size) <= - (uint64_t)out_meta->transfer_payload_size); - ok = ok && ((0 == *frame_index) == (0 == *frame_payload_offset)); - // The prefix-CRC of the first frame of a transfer equals the CRC of its payload. - ok = ok && ((0 < *frame_payload_offset) || (crc_full(out_payload->size, out_payload->data) == *prefix_crc)); - // ACK frame requires zero offset, single-frame transfer. - ok = ok && ((out_meta->kind != frame_ack) || (*frame_payload_offset == 0U)); - } else { - ok = false; + if ((*frame_payload_offset + (uint64_t)out_payload->size) > out_meta->transfer_payload_size) { + return false; + } + if ((0 == *frame_payload_offset) && (crc_full(out_payload->size, out_payload->data) != *prefix_crc)) { + return false; + } + break; } + default: + return false; } - return ok; + return true; } // --------------------------------------------------------------------------------------------------------------------- @@ -580,42 +577,23 @@ static tx_frame_t* tx_frame_new(udpard_tx_t* const tx, const udpard_mem_t mem, c return frame; } -/// The transmission scheduler maintains several indexes for the transfers in the pipeline. -/// The segregated priority queue only contains transfers that are ready for transmission. -/// The staged index contains transfers ordered by readiness for retransmission; -/// transfers that will no longer be transmitted but are retained waiting for the ack are in neither of these. -/// The deadline index contains ALL transfers, ordered by their deadlines, used for purging expired transfers. typedef struct tx_transfer_t { udpard_listed_t queue[UDPARD_IFACE_COUNT_MAX]; ///< Listed when ready for transmission. - udpard_tree_t index_transfer_id; ///< ALL transfers by transfer_id, then by seq_no. udpard_tree_t index_deadline; ///< Soonest to expire on the left. Key: deadline + transfer identity - udpard_tree_t index_staged; ///< Soonest to be ready on the left. Key: staged_until + transfer identity - udpard_listed_t agewise; ///< Listed when created; oldest at the tail. + udpard_listed_t agewise; ///< Listed when created; oldest at the tail. /// Mutable transmission state. All other fields, except for the index handles, are immutable. /// We always keep a pointer to the head, plus a cursor that scans the frames during transmission. - /// Both are NULL if the payload is destroyed. /// The transmission iface set is indicated by which head[] entries are non-NULL. - /// The head points to the first frame unless it is known that no (further) retransmissions are needed, - /// in which case the old head is deleted and the head points to the next frame to transmit. - tx_frame_t* head[UDPARD_IFACE_COUNT_MAX]; - tx_frame_t* cursor[UDPARD_IFACE_COUNT_MAX]; - uint_fast8_t epoch; ///< Does not overflow due to exponential backoff; e.g. 1us with epoch=48 => 9 years. - udpard_us_t staged_until; + tx_frame_t* head[UDPARD_IFACE_COUNT_MAX]; + tx_frame_t* cursor[UDPARD_IFACE_COUNT_MAX]; /// Constant transfer properties supplied by the client. - uint64_t seq_no; ///< Tie breaker; greater in later transfers; agewise orders by this. - uint64_t transfer_id; - udpard_us_t deadline; - frame_kind_t kind; - udpard_prio_t priority; - bool is_p2p; - udpard_remote_t p2p_remote; ///< Only valid if is_p2p. - - /// Application closure. - udpard_user_context_t user; - void (*feedback)(udpard_tx_t*, udpard_tx_feedback_t); + void* user; + udpard_prio_t priority; + udpard_us_t deadline; + udpard_udpip_ep_t endpoints[UDPARD_IFACE_COUNT_MAX]; } tx_transfer_t; static bool tx_validate_mem_resources(const udpard_tx_mem_resources_t memory) @@ -643,49 +621,19 @@ static void tx_transfer_free_payload(tx_transfer_t* const tr) } } -/// Currently, we use a very simple implementation that ceases delivery attempts after the first acknowledgment -/// is received, similar to the CAN bus. Such mode of reliability is useful in the following scenarios: -/// -/// - With topics with a single subscriber, or sent via P2P transport (responses to published messages). -/// With a single recipient, a single acknowledgement is sufficient to guarantee delivery. -/// -/// - The application only cares about one acknowledgement (anycast), e.g., with modular redundant nodes. -/// -/// - The application assumes that if one copy was delivered successfully, then other copies have likely -/// succeeded as well (depends on the required reliability guarantees), similar to the CAN bus. -/// -/// TODO In the future, there are plans to extend this mechanism to track the number of acknowledgements per topic, -/// such that we can retain transfers until a specified number of acknowledgements have been received. A remote -/// node can be considered to have disappeared if it failed to acknowledge a transfer after the maximum number -/// of attempts have been made. This is somewhat similar in principle to the connection-oriented DDS/RTPS approach, -/// where pub/sub associations are established and removed automatically, transparently to the application. -static void tx_transfer_retire(udpard_tx_t* const tx, tx_transfer_t* const tr, const bool success) -{ - // Save the feedback state first before the transfer is destroyed. - const udpard_tx_feedback_t fb = { .user = tr->user, .acknowledgements = success ? 1 : 0 }; - void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t) = tr->feedback; - UDPARD_ASSERT((feedback == NULL) ? ((tr->kind == frame_msg_best) || (tr->kind == frame_ack)) - : (tr->kind == frame_msg_reliable)); - +static void tx_transfer_retire(udpard_tx_t* const tx, tx_transfer_t* const tr) +{ // Remove from all indexes and lists. for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { delist(&tx->queue[i][tr->priority], &tr->queue[i]); } delist(&tx->agewise, &tr->agewise); - (void)cavl2_remove_if(&tx->index_staged, &tr->index_staged); UDPARD_ASSERT(cavl2_is_inserted(tx->index_deadline, &tr->index_deadline)); - UDPARD_ASSERT(cavl2_is_inserted(tx->index_transfer_id, &tr->index_transfer_id)); cavl2_remove(&tx->index_deadline, &tr->index_deadline); - cavl2_remove(&tx->index_transfer_id, &tr->index_transfer_id); // Free the memory. The payload memory may already be empty depending on where we were invoked from. tx_transfer_free_payload(tr); mem_free(tx->memory.transfer, sizeof(tx_transfer_t), tr); - - // Finally, when the internal state is updated and consistent, invoke the feedback callback if any. - if (feedback != NULL) { - feedback(tx, fb); - } } /// When the queue is exhausted, finds a transfer to sacrifice using simple heuristics and returns it. @@ -706,52 +654,20 @@ static bool tx_ensure_queue_space(udpard_tx_t* const tx, const size_t total_fram if (tr == NULL) { break; // We may have no transfers anymore but the NIC TX driver could still be holding some frames. } - tx_transfer_retire(tx, tr, false); + tx_transfer_retire(tx, tr); tx->errors_sacrifice++; } return total_frames_needed <= (tx->enqueued_frames_limit - tx->enqueued_frames_count); } -static int32_t tx_cavl_compare_staged(const void* const user, const udpard_tree_t* const node) -{ - const tx_transfer_t* const outer = (const tx_transfer_t*)user; - const tx_transfer_t* const inner = CAVL2_TO_OWNER(node, tx_transfer_t, index_staged); // clang-format off - if (outer->staged_until < inner->staged_until) { return -1; } - if (outer->staged_until > inner->staged_until) { return +1; } - if (outer->seq_no < inner->seq_no) { return -1; } - if (outer->seq_no > inner->seq_no) { return +1; } - return 0; // clang-format on -} - static int32_t tx_cavl_compare_deadline(const void* const user, const udpard_tree_t* const node) { const tx_transfer_t* const outer = (const tx_transfer_t*)user; - const tx_transfer_t* const inner = CAVL2_TO_OWNER(node, tx_transfer_t, index_deadline); // clang-format off - if (outer->deadline < inner->deadline) { return -1; } - if (outer->deadline > inner->deadline) { return +1; } - if (outer->seq_no < inner->seq_no) { return -1; } - if (outer->seq_no > inner->seq_no) { return +1; } - return 0; // clang-format on -} - -/// Shall a transfer-ID collision occur due to PRNG faults, we want to handle it correctly, which is to allow -/// non-unique transfer-IDs in the reliable index such that they are co-located, then use lower bound search. -/// Lookups will then disambiguate ad-hoc. -typedef struct tx_key_transfer_id_t -{ - uint64_t transfer_id; - uint64_t seq_no; -} tx_key_transfer_id_t; - -static int32_t tx_cavl_compare_transfer_id(const void* const user, const udpard_tree_t* const node) -{ - const tx_key_transfer_id_t* const key = (const tx_key_transfer_id_t*)user; - const tx_transfer_t* const tr = CAVL2_TO_OWNER(node, tx_transfer_t, index_transfer_id); // clang-format off - if (key->transfer_id < tr->transfer_id) { return -1; } - if (key->transfer_id > tr->transfer_id) { return +1; } - if (key->seq_no < tr->seq_no) { return -1; } - if (key->seq_no > tr->seq_no) { return +1; } - return 0; // clang-format on + const tx_transfer_t* const inner = CAVL2_TO_OWNER(node, tx_transfer_t, index_deadline); + if (outer->deadline != inner->deadline) { + return (outer->deadline < inner->deadline) ? -1 : +1; + } + return (((uintptr_t)outer) < ((uintptr_t)inner)) ? -1 : +1; // deterministic tiebreak, not essential } /// True iff listed in at least one interface queue. @@ -773,12 +689,11 @@ static tx_frame_t* tx_spool(udpard_tx_t* const tx, const udpard_bytes_scattered_t payload) { UDPARD_ASSERT(mtu > 0); - uint32_t prefix_crc = CRC_INITIAL; - tx_frame_t* head = NULL; - tx_frame_t* tail = NULL; - size_t frame_index = 0U; - size_t offset = 0U; - bytes_scattered_reader_t reader = { .cursor = &payload, .position = 0U }; + uint32_t prefix_crc = CRC_INITIAL; + tx_frame_t* head = NULL; + tx_frame_t* tail = NULL; + size_t offset = 0U; + bytes_scattered_reader_t reader = { .cursor = &payload, .position = 0U }; do { // Compute the size of the next frame, allocate it and link it up in the chain. const size_t progress = smaller(meta.transfer_payload_size - offset, mtu); @@ -803,11 +718,10 @@ static tx_frame_t* tx_spool(udpard_tx_t* const tx, bytes_scattered_read(&reader, progress, payload_ptr); prefix_crc = crc_add(prefix_crc, progress, payload_ptr); const byte_t* const end_of_header = - header_serialize(tail->data, meta, (uint32_t)frame_index, (uint32_t)offset, prefix_crc ^ CRC_OUTPUT_XOR); + header_serialize(tail->data, meta, (uint32_t)offset, prefix_crc ^ CRC_OUTPUT_XOR); UDPARD_ASSERT(end_of_header == payload_ptr); (void)end_of_header; // Advance the state. - ++frame_index; offset += progress; UDPARD_ASSERT(offset <= meta.transfer_payload_size); } while (offset < meta.transfer_payload_size); @@ -815,44 +729,12 @@ static tx_frame_t* tx_spool(udpard_tx_t* const tx, return head; } -/// Derives the ack timeout for an outgoing transfer. -static udpard_us_t tx_ack_timeout(const udpard_us_t baseline, const udpard_prio_t prio, const size_t attempts) -{ - UDPARD_ASSERT(baseline > 0); - UDPARD_ASSERT(prio < UDPARD_PRIORITY_COUNT); - return baseline * (1LL << smaller((size_t)prio + attempts, 62)); // NOLINT(*-signed-bitwise) -} - -/// Updates the next attempt time and inserts the transfer into the staged index, unless the next scheduled -/// transmission time is too close to the deadline, in which case no further attempts will be made. -/// When invoking for the first time, staged_until must be set to the time of the first attempt (usually now). -/// Once can deduce whether further attempts are planned by checking if the transfer is in the staged index. -/// -/// The idea is that retransmitting the transfer too close to the deadline is pointless, because -/// the ack may arrive just after the deadline and the transfer would be considered failed anyway. -/// The solution is to add a small margin before the deadline. The margin is derived using a simple heuristic, -/// which is subject to review and improvement later on (this is not an API-visible trait). -static void tx_stage_if(udpard_tx_t* const tx, tx_transfer_t* const tr) -{ - UDPARD_ASSERT(!cavl2_is_inserted(tx->index_staged, &tr->index_staged)); - UDPARD_ASSERT(tr->kind == frame_msg_reliable); - const uint_fast8_t epoch = tr->epoch++; - const udpard_us_t timeout = tx_ack_timeout(tx->ack_baseline_timeout, tr->priority, epoch); - tr->staged_until += timeout; - if ((tr->deadline - timeout) >= tr->staged_until) { - const udpard_tree_t* const tree_staged = - cavl2_find_or_insert(&tx->index_staged, tr, tx_cavl_compare_staged, &tr->index_staged, cavl2_trivial_factory); - UDPARD_ASSERT(tree_staged == &tr->index_staged); - (void)tree_staged; - } -} - static void tx_purge_expired_transfers(udpard_tx_t* const self, const udpard_us_t now) { while (true) { // we can use next_greater instead of doing min search every time tx_transfer_t* const tr = CAVL2_TO_OWNER(cavl2_min(self->index_deadline), tx_transfer_t, index_deadline); if ((tr != NULL) && (now > tr->deadline)) { - tx_transfer_retire(self, tr, false); + tx_transfer_retire(self, tr); self->errors_expiration++; } else { break; @@ -860,27 +742,6 @@ static void tx_purge_expired_transfers(udpard_tx_t* const self, const udpard_us_ } } -static void tx_promote_staged_transfers(udpard_tx_t* const self, const udpard_us_t now) -{ - while (true) { // we can use next_greater instead of doing min search every time - tx_transfer_t* const tr = CAVL2_TO_OWNER(cavl2_min(self->index_staged), tx_transfer_t, index_staged); - if ((tr != NULL) && (now >= tr->staged_until)) { - // Reinsert into the staged index at the new position, when the next attempt is due (if any). - cavl2_remove(&self->index_staged, &tr->index_staged); - tx_stage_if(self, tr); - // Enqueue for transmission unless it's been there since the last attempt (stalled interface?) - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - if ((tr->head[i] != NULL) && !is_listed(&self->queue[i][tr->priority], &tr->queue[i])) { - UDPARD_ASSERT(tr->cursor[i] == tr->head[i]); // must have been rewound after last attempt - enlist_head(&self->queue[i][tr->priority], &tr->queue[i]); - } - } - } else { - break; - } - } -} - /// A transfer can use the same fragments between two interfaces if /// (both have the same MTU OR the transfer fits in both MTU) AND both use the same allocator. /// Either they will share the same spool, or there is only a single frame so the MTU difference does not matter. @@ -924,24 +785,20 @@ static bool tx_push(udpard_tx_t* const tx, const udpard_us_t now, const udpard_us_t deadline, const meta_t meta, - const uint16_t iface_bitmap, - const udpard_remote_t* const p2p_remote, // only for P2P transfers + const udpard_udpip_ep_t endpoints[UDPARD_IFACE_COUNT_MAX], const udpard_bytes_scattered_t payload, - void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), - const udpard_user_context_t user) + void* const user) { UDPARD_ASSERT(now <= deadline); UDPARD_ASSERT(tx != NULL); + + const uint16_t iface_bitmap = valid_ep_bitmap(endpoints); UDPARD_ASSERT((iface_bitmap & UDPARD_IFACE_BITMAP_ALL) != 0); UDPARD_ASSERT((iface_bitmap & UDPARD_IFACE_BITMAP_ALL) == iface_bitmap); // Purge expired transfers before accepting a new one to make room in the queue. tx_purge_expired_transfers(tx, now); - // Promote staged transfers that are now eligible for retransmission to ensure fairness: - // if they have the same priority as the new transfer, they should get a chance to go first. - tx_promote_staged_transfers(tx, now); - // Construct the empty transfer object, without the frames for now. The frame spools will be constructed next. tx_transfer_t* const tr = mem_alloc(tx->memory.transfer, sizeof(tx_transfer_t)); if (tr == NULL) { @@ -949,19 +806,12 @@ static bool tx_push(udpard_tx_t* const tx, return false; } mem_zero(sizeof(*tr), tr); - tr->epoch = 0; - tr->staged_until = now; - tr->seq_no = tx->next_seq_no++; - tr->transfer_id = meta.transfer_id; - tr->deadline = deadline; - tr->kind = meta.kind; - tr->priority = meta.priority; - tr->user = user; - tr->feedback = feedback; - tr->is_p2p = (p2p_remote != NULL); - tr->p2p_remote = (p2p_remote != NULL) ? *p2p_remote : (udpard_remote_t){ 0 }; + tr->user = user; + tr->priority = meta.priority; + tr->deadline = deadline; for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { tr->head[i] = tr->cursor[i] = NULL; + tr->endpoints[i] = endpoints[i]; } // Ensure the queue has enough space. @@ -1028,126 +878,18 @@ static bool tx_push(udpard_tx_t* const tx, } } - // Add to the staged index so that it is repeatedly re-enqueued later until acknowledged or expired. - if (meta.kind == frame_msg_reliable) { - tx_stage_if(tx, tr); - } - // Add to the deadline index for expiration management. const udpard_tree_t* const tree_deadline = cavl2_find_or_insert( &tx->index_deadline, tr, tx_cavl_compare_deadline, &tr->index_deadline, cavl2_trivial_factory); UDPARD_ASSERT(tree_deadline == &tr->index_deadline); (void)tree_deadline; - // Add to the transfer index for incoming ack management and cancellation. - const tx_key_transfer_id_t key_id = { .transfer_id = tr->transfer_id, .seq_no = tr->seq_no }; - const udpard_tree_t* const tree_id = cavl2_find_or_insert( - &tx->index_transfer_id, &key_id, tx_cavl_compare_transfer_id, &tr->index_transfer_id, cavl2_trivial_factory); - UDPARD_ASSERT(tree_id == &tr->index_transfer_id); - (void)tree_id; - // Add to the agewise list for sacrifice management on queue exhaustion. enlist_head(&tx->agewise, &tr->agewise); return true; } -/// Handle an ACK received from a remote node. -static void tx_receive_ack(udpard_rx_t* const rx, const uint64_t sender_uid, const uint64_t transfer_id) -{ - if (rx->tx != NULL) { - // A transfer-ID collision is astronomically unlikely: given 10k simultaneously pending reliable transfers, - // which is outside typical usage, the probability of a collision is about 1 in 500 billion. However, we - // take into account that the PRNG used to seed the transfer-ID may be imperfect, so we add explicit collision - // handling. In all practical scenarios at most a single iteration will be needed. - const tx_key_transfer_id_t key = { .transfer_id = transfer_id, .seq_no = 0 }; - tx_transfer_t* tr = - CAVL2_TO_OWNER(cavl2_lower_bound(rx->tx->index_transfer_id, &key, &tx_cavl_compare_transfer_id), - tx_transfer_t, - index_transfer_id); - while ((tr != NULL) && (tr->transfer_id == transfer_id)) { - // Outgoing reliable P2P transfers only accept acks from the intended recipient. - // Non-P2P accept acks from any sender. - const bool destination_match = !tr->is_p2p || (tr->p2p_remote.uid == sender_uid); - if ((tr->kind == frame_msg_reliable) && destination_match) { - tx_transfer_retire(rx->tx, tr, true); - break; - } - tr = CAVL2_TO_OWNER(cavl2_next_greater(&tr->index_transfer_id), tx_transfer_t, index_transfer_id); - } - } -} - -/// Generate an ack transfer for the specified remote transfer. -/// Do nothing if an ack for the same transfer is already enqueued with equal or better endpoint coverage. -static void tx_send_ack(udpard_rx_t* const rx, - const udpard_us_t now, - const udpard_prio_t priority, - const uint64_t transfer_id, - const udpard_remote_t remote) -{ - udpard_tx_t* const tx = rx->tx; - if (tx != NULL) { - // Check if an ack for this transfer is already enqueued. - // A transfer-ID collision is astronomically unlikely: given 10k simultaneously pending reliable transfers, - // which is outside typical usage, the probability of a collision is about 1 in 500 billion. However, we - // take into account that the PRNG used to seed the transfer-ID may be imperfect, so we add explicit collision - // handling. In all practical scenarios at most a single iteration will be needed. - const tx_key_transfer_id_t key = { .transfer_id = transfer_id, .seq_no = 0 }; - tx_transfer_t* prior = - CAVL2_TO_OWNER(cavl2_lower_bound(rx->tx->index_transfer_id, &key, &tx_cavl_compare_transfer_id), - tx_transfer_t, - index_transfer_id); - // Scan all matches (there will be at most 1 barring extremely unlikely hash collisions) to find the ack. - while (prior != NULL) { - if (prior->transfer_id != transfer_id) { - prior = NULL; - break; // scanned all contenders, no match - } - if ((prior->kind == frame_ack) && (prior->p2p_remote.uid == remote.uid)) { - break; // match found - } - prior = CAVL2_TO_OWNER(cavl2_next_greater(&prior->index_transfer_id), tx_transfer_t, index_transfer_id); - } - - // Determine if the new ack has better return path discovery than the prior one (if any). - const uint16_t prior_ep_bitmap = (prior != NULL) ? valid_ep_bitmap(prior->p2p_remote.endpoints) : 0U; - const uint16_t new_ep_bitmap = valid_ep_bitmap(remote.endpoints); - const bool new_better = (new_ep_bitmap & (uint16_t)(~prior_ep_bitmap)) != 0U; - if (!new_better) { - return; // Can we get an ack? We have ack at home! - } - if (prior != NULL) { // avoid redundant acks for the same transfer -- replace with better one - UDPARD_ASSERT(prior->feedback == NULL); - tx_transfer_retire(tx, prior, false); // this will free up a queue slot and some memory - } - // Even if the new, better ack fails to enqueue for some reason, it's no big deal -- we will send the next one. - // The only reason it might fail is an OOM but we just freed a slot so it should be fine. - - // Enqueue the transfer. - const meta_t meta = { .priority = priority, - .kind = frame_ack, - .transfer_payload_size = 0, - .transfer_id = transfer_id, - .sender_uid = tx->local_uid }; - const uint32_t count = tx_push(tx, - now, - now + ACK_TX_DEADLINE, - meta, - new_ep_bitmap, - &remote, - (udpard_bytes_scattered_t){ .bytes = { .size = 0, .data = "" }, .next = NULL }, - NULL, - UDPARD_USER_CONTEXT_NULL); - UDPARD_ASSERT(count <= 1); - if (count != 1) { // ack is always a single-frame transfer - rx->errors_ack_tx++; - } - } else { - rx->errors_ack_tx++; - } -} - bool udpard_tx_new(udpard_tx_t* const self, const uint64_t local_uid, const uint64_t p2p_transfer_id_seed, @@ -1156,20 +898,16 @@ bool udpard_tx_new(udpard_tx_t* const self, const udpard_tx_vtable_t* const vtable) { const bool ok = (NULL != self) && (local_uid != 0) && tx_validate_mem_resources(memory) && (vtable != NULL) && - (vtable->eject_subject != NULL) && (vtable->eject_p2p != NULL); + (vtable->eject != NULL); if (ok) { mem_zero(sizeof(*self), self); self->vtable = vtable; self->local_uid = local_uid; - self->p2p_transfer_id = p2p_transfer_id_seed ^ local_uid; // extra entropy - self->ack_baseline_timeout = UDPARD_TX_ACK_BASELINE_TIMEOUT_DEFAULT_us; + self->p2p_transfer_id = p2p_transfer_id_seed + local_uid; // extra entropy self->enqueued_frames_limit = enqueued_frames_limit; self->enqueued_frames_count = 0; - self->next_seq_no = 0; self->memory = memory; - self->index_transfer_id = NULL; self->index_deadline = NULL; - self->index_staged = NULL; self->agewise = (udpard_list_t){ NULL, NULL }; self->user = NULL; for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { @@ -1189,22 +927,27 @@ bool udpard_tx_push(udpard_tx_t* const self, const uint16_t iface_bitmap, const udpard_prio_t priority, const uint64_t transfer_id, + const udpard_udpip_ep_t endpoint, const udpard_bytes_scattered_t payload, - void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. - const udpard_user_context_t user) + void* const user) { bool ok = (self != NULL) && (deadline >= now) && (now >= 0) && (self->local_uid != 0) && ((iface_bitmap & UDPARD_IFACE_BITMAP_ALL) != 0) && (priority < UDPARD_PRIORITY_COUNT) && - ((payload.bytes.data != NULL) || (payload.bytes.size == 0U)); + udpard_is_valid_endpoint(endpoint) && ((payload.bytes.data != NULL) || (payload.bytes.size == 0U)); if (ok) { const meta_t meta = { .priority = priority, - .kind = (feedback != NULL) ? frame_msg_reliable : frame_msg_best, .transfer_payload_size = (uint32_t)bytes_scattered_size(payload), .transfer_id = transfer_id, .sender_uid = self->local_uid, }; - ok = tx_push(self, now, deadline, meta, iface_bitmap & UDPARD_IFACE_BITMAP_ALL, NULL, payload, feedback, user); + udpard_udpip_ep_t eps[UDPARD_IFACE_COUNT_MAX] = { 0 }; + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { + if ((iface_bitmap & (1U << i)) != 0) { + eps[i] = endpoint; + } + } + ok = tx_push(self, now, deadline, meta, eps, payload, user); } return ok; } @@ -1213,27 +956,21 @@ bool udpard_tx_push_p2p(udpard_tx_t* const self, const udpard_us_t now, const udpard_us_t deadline, const udpard_prio_t priority, - const udpard_remote_t remote, + const udpard_udpip_ep_t endpoints[UDPARD_IFACE_COUNT_MAX], const udpard_bytes_scattered_t payload, - void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. - const udpard_user_context_t user, - uint64_t* const out_transfer_id) + void* const user) { - const uint16_t iface_bitmap = valid_ep_bitmap(remote.endpoints); - bool ok = (self != NULL) && (deadline >= now) && (now >= 0) && (self->local_uid != 0) && (iface_bitmap != 0) && - (priority < UDPARD_PRIORITY_COUNT) && ((payload.bytes.data != NULL) || (payload.bytes.size == 0U)); + bool ok = (self != NULL) && (deadline >= now) && (now >= 0) && (self->local_uid != 0) && + (valid_ep_bitmap(endpoints) != 0) && (priority < UDPARD_PRIORITY_COUNT) && + ((payload.bytes.data != NULL) || (payload.bytes.size == 0U)); if (ok) { const meta_t meta = { .priority = priority, - .kind = (feedback != NULL) ? frame_msg_reliable : frame_msg_best, .transfer_payload_size = (uint32_t)bytes_scattered_size(payload), - .transfer_id = self->p2p_transfer_id++, // Shared for all remotes, hence no ack ambiguity. + .transfer_id = self->p2p_transfer_id++, .sender_uid = self->local_uid, }; - if (out_transfer_id != NULL) { - *out_transfer_id = meta.transfer_id; - } - ok = tx_push(self, now, deadline, meta, iface_bitmap, &remote, payload, feedback, user); + ok = tx_push(self, now, deadline, meta, endpoints, payload, user); } return ok; } @@ -1259,42 +996,38 @@ static void tx_eject_pending_frames(udpard_tx_t* const self, const udpard_us_t n UDPARD_ASSERT(tr->priority < UDPARD_PRIORITY_COUNT); // Eject the frame. - const tx_frame_t* const frame = tr->cursor[ifindex]; - tx_frame_t* const frame_next = frame->next; - const bool last_attempt = !cavl2_is_inserted(self->index_staged, &tr->index_staged); + const tx_frame_t* const frame = tr->cursor[ifindex]; + tx_frame_t* const frame_next = frame->next; const bool last_frame = frame_next == NULL; // if not last attempt we will have to rewind to head. { udpard_tx_ejection_t ejection = { .now = now, .deadline = tr->deadline, + .destination = tr->endpoints[ifindex], .iface_index = ifindex, .dscp = self->dscp_value_per_priority[tr->priority], .datagram = tx_frame_view(frame), .user = tr->user }; // - const bool ejected = tr->is_p2p - ? self->vtable->eject_p2p(self, &ejection, tr->p2p_remote.endpoints[ifindex]) - : self->vtable->eject_subject(self, &ejection); + const bool ejected = self->vtable->eject(self, &ejection); if (!ejected) { // The easy case -- no progress was made at this time; break; // don't change anything, just try again later as-is } } // Frame ejected successfully. Update the transfer state to get ready for the next frame. - if (last_attempt) { // no need to keep frames that we will no longer use; free early to reduce pressure - UDPARD_ASSERT(tr->head[ifindex] == tr->cursor[ifindex]); - tr->head[ifindex] = frame_next; - udpard_tx_refcount_dec(tx_frame_view(frame)); - } + // No need to keep frames that we will no longer use; free early to reduce memory pressure. + UDPARD_ASSERT(tr->head[ifindex] == tr->cursor[ifindex]); + tr->head[ifindex] = frame_next; + udpard_tx_refcount_dec(tx_frame_view(frame)); tr->cursor[ifindex] = frame_next; // Finalize the transmission if this was the last frame of the transfer. if (last_frame) { tr->cursor[ifindex] = tr->head[ifindex]; delist(&self->queue[ifindex][tr->priority], &tr->queue[ifindex]); // no longer pending for transmission - UDPARD_ASSERT(!last_attempt || (tr->head[ifindex] == NULL)); // this iface is done with the payload - if (last_attempt && (tr->kind != frame_msg_reliable) && !tx_is_pending(self, tr)) { - UDPARD_ASSERT(tr->feedback == NULL); // non-reliable transfers have no feedback callback - tx_transfer_retire(self, tr, true); // remove early once all ifaces are done + UDPARD_ASSERT(tr->head[ifindex] == NULL); // this iface is done with the payload + if (!tx_is_pending(self, tr)) { + tx_transfer_retire(self, tr); } } } @@ -1302,9 +1035,8 @@ static void tx_eject_pending_frames(udpard_tx_t* const self, const udpard_us_t n void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now, const uint16_t iface_bitmap) { - if ((self != NULL) && (now >= 0)) { // This is the main scheduler state machine update tick. - tx_purge_expired_transfers(self, now); // This may free up some memory and some queue slots. - tx_promote_staged_transfers(self, now); // This may add some new transfers to the queue. + if ((self != NULL) && (now >= 0)) { // This is the main scheduler state machine update tick. + tx_purge_expired_transfers(self, now); // This may free up some memory and some queue slots. for (uint_fast8_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { if ((iface_bitmap & (1U << i)) != 0U) { tx_eject_pending_frames(self, now, i); @@ -1313,32 +1045,6 @@ void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now, const uint16 } } -bool udpard_tx_cancel(udpard_tx_t* const self, const uint64_t transfer_id, const bool reliable) -{ - bool cancelled = false; - if (self != NULL) { - // A transfer-ID collision is astronomically unlikely: given 10k simultaneously pending reliable transfers, - // which is outside typical usage, the probability of a collision is about 1 in 500 billion. However, we - // take into account that the PRNG used to seed the transfer-ID may be imperfect, so we add explicit collision - // handling. In all practical scenarios at most a single iteration will be needed. - const tx_key_transfer_id_t key = { .transfer_id = transfer_id, .seq_no = 0 }; - tx_transfer_t* tr = - CAVL2_TO_OWNER(cavl2_lower_bound(self->index_transfer_id, &key, &tx_cavl_compare_transfer_id), - tx_transfer_t, - index_transfer_id); - while ((tr != NULL) && (tr->transfer_id == transfer_id)) { - tx_transfer_t* const next = - CAVL2_TO_OWNER(cavl2_next_greater(&tr->index_transfer_id), tx_transfer_t, index_transfer_id); - if (tr->kind == (reliable ? frame_msg_reliable : frame_msg_best)) { // Cancel all matching (normally <=1). - tx_transfer_retire(self, tr, false); - cancelled = true; - } - tr = next; - } - } - return cancelled; -} - uint16_t udpard_tx_pending_ifaces(const udpard_tx_t* const self) { uint16_t bitmap = 0; @@ -1385,7 +1091,7 @@ void udpard_tx_free(udpard_tx_t* const self) { if (self != NULL) { while (self->agewise.tail != NULL) { - tx_transfer_retire(self, LIST_TAIL(self->agewise, tx_transfer_t, agewise), false); + tx_transfer_retire(self, LIST_TAIL(self->agewise, tx_transfer_t, agewise)); } } } @@ -1414,13 +1120,7 @@ void udpard_tx_free(udpard_tx_t* const self) // the overall reliability. The message reception machine always operates at the throughput and latency of the // best-performing interface at any given time with seamless failover. // -// Each session keeps track of recently received/seen transfers, which is used for ack retransmission -// if the remote end attempts to retransmit a transfer that was already fully received, and is also used for duplicate -// rejection. -// -// Acks are transmitted immediately upon successful reception of a transfer. If the remote end retransmits the transfer -// (e.g., if the first ack was lost or due to a spurious duplication), repeat acks are only retransmitted -// for the first frame of the transfer because we don't want to flood the network with duplicate ACKs for every +// Each session keeps track of recently received/seen transfers, which is used for duplicate rejection. // // The redundant interfaces may have distinct MTUs, so the fragment offsets and sizes may vary significantly. // The reassembler decides if a newly arrived fragment is needed based on gap/overlap detection in the fragment tree. @@ -1430,10 +1130,10 @@ void udpard_tx_free(udpard_tx_t* const self) // The reassembler prefers to keep fewer large fragments over many small fragments to reduce the overhead of // managing the fragment tree and the amount of auxiliary memory required for it. // -// The code here does a lot of linear lookups. This is intentional and is not expected to bring any performance issues -// because all loops are tightly bounded with a compile-time known maximum number of iterations that is very small -// in practice (e.g., number of slots per session, number of priority levels, number of interfaces). For small -// number of iterations this is much faster than more sophisticated lookup structures. +// The code here does some linear lookups. This is intentional and is not expected to bring any performance issues +// because all loops are tightly bounded with a compile-time known maximum number of iterations that is very small in +// practice (e.g., number of slots per session, number of priority levels, number of interfaces) and is unrollable. +// For small number of iterations this is much faster than more sophisticated lookup structures. /// All but the transfer metadata: fields that change from frame to frame within the same transfer. typedef struct @@ -1771,7 +1471,7 @@ typedef struct rx_session_t udpard_listed_t list_by_animation; udpard_us_t last_animated_ts; - /// Most recently received transfer-IDs, used for duplicate detection and ACK retransmission. + /// Most recently received transfer-IDs. /// The index is always in [0,RX_TRANSFER_HISTORY_COUNT), pointing to the last added (newest) entry. uint64_t history[RX_TRANSFER_HISTORY_COUNT]; uint_fast8_t history_current; @@ -1871,8 +1571,8 @@ static void rx_session_eject(rx_session_t* const self, udpard_rx_t* const rx, rx const udpard_rx_transfer_t transfer = { .timestamp = slot->ts_min, .priority = slot->priority, - .transfer_id = slot->transfer_id, .remote = self->remote, + .transfer_id = slot->transfer_id, .payload_size_stored = slot->covered_prefix, .payload_size_wire = slot->total_size, .payload = (udpard_fragment_t*)slot->fragments, @@ -1964,9 +1664,6 @@ static void rx_session_update(rx_session_t* const self, const rx_slot_update_result_t upd_res = rx_slot_update( slot, ts, self->port->memory.fragment, payload_deleter, frame, self->port->extent, &rx->errors_oom); if (upd_res == rx_slot_complete) { - if (frame->meta.kind == frame_msg_reliable) { - tx_send_ack(rx, ts, slot->priority, slot->transfer_id, self->remote); - } rx_session_eject(self, rx, slot_ref); // will destroy the slot. } else if (upd_res == rx_slot_failure) { rx->errors_transfer_malformed++; @@ -1975,11 +1672,8 @@ static void rx_session_update(rx_session_t* const self, UDPARD_ASSERT(upd_res == rx_slot_incomplete); } } - } else { // retransmit ACK if needed - if ((frame->meta.kind == frame_msg_reliable) && (frame->base.offset == 0U)) { - UDPARD_ASSERT(rx_session_is_transfer_ejected(self, frame->meta.transfer_id)); - tx_send_ack(rx, ts, frame->meta.priority, frame->meta.transfer_id, self->remote); - } + } else { + // The transfer has been seen before, so just release this duplicate payload. mem_free_payload(payload_deleter, frame->base.origin); } } @@ -2035,8 +1729,8 @@ static void rx_port_accept_stateless(udpard_rx_t* const rx, const udpard_rx_transfer_t transfer = { .timestamp = timestamp, .priority = frame->meta.priority, - .transfer_id = frame->meta.transfer_id, .remote = remote, + .transfer_id = frame->meta.transfer_id, .payload_size_stored = frame->base.payload.size, .payload_size_wire = frame->meta.transfer_payload_size, .payload = frag, @@ -2062,7 +1756,7 @@ static bool rx_validate_mem_resources(const udpard_rx_mem_resources_t memory) return mem_validate(memory.session) && mem_validate(memory.slot) && mem_validate(memory.fragment); } -void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx) +void udpard_rx_new(udpard_rx_t* const self) { UDPARD_ASSERT(self != NULL); mem_zero(sizeof(*self), self); @@ -2070,7 +1764,6 @@ void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx) self->errors_oom = 0; self->errors_frame_malformed = 0; self->errors_transfer_malformed = 0; - self->tx = tx; self->user = NULL; } @@ -2148,25 +1841,14 @@ bool udpard_rx_port_push(udpard_rx_t* const rx, (datagram_payload.data != NULL) && (iface_index < UDPARD_IFACE_COUNT_MAX) && (payload_deleter.vtable != NULL) && (payload_deleter.vtable->free != NULL); if (ok) { - // Parse and validate the frame. - rx_frame_t frame = { 0 }; - uint32_t frame_index = 0; - uint32_t offset_32 = 0; - bool frame_valid = header_deserialize( - datagram_payload, &frame.meta, &frame_index, &offset_32, &frame.base.crc, &frame.base.payload); - frame_valid = frame_valid && ((frame.meta.kind != frame_ack) || port->is_p2p); // ACKs only valid in P2P ports. - (void)frame_index; // currently not used by this reassembler implementation. + rx_frame_t frame = { 0 }; + uint32_t offset_32 = 0; + const bool frame_valid = + header_deserialize(datagram_payload, &frame.meta, &offset_32, &frame.base.crc, &frame.base.payload); frame.base.offset = (size_t)offset_32; frame.base.origin = datagram_payload; // Take ownership of the payload. - - // Process the parsed frame. if (frame_valid) { - if (frame.meta.kind != frame_ack) { - port->vtable_private->accept(rx, port, timestamp, source_ep, &frame, payload_deleter, iface_index); - } else { - tx_receive_ack(rx, frame.meta.sender_uid, frame.meta.transfer_id); - mem_free_payload(payload_deleter, frame.base.origin); - } + port->vtable_private->accept(rx, port, timestamp, source_ep, &frame, payload_deleter, iface_index); } else { mem_free_payload(payload_deleter, frame.base.origin); rx->errors_frame_malformed++; @@ -2180,7 +1862,7 @@ bool udpard_rx_port_push(udpard_rx_t* const rx, // On a 32-bit platform, the block overhead of o1heap is 8 bytes. // Rounding up to the power of 2 results in possible allocation sizes of 8, 24, 56, 120, 248, 504, 1016, ... bytes. -static_assert((sizeof(void*) > 4) || (sizeof(tx_transfer_t) <= (256 - 8)), "tx_transfer_t is too large"); +static_assert((sizeof(void*) > 4) || (sizeof(tx_transfer_t) <= (128 - 8)), "tx_transfer_t is too large"); static_assert((sizeof(void*) > 4) || (sizeof(rx_session_t) <= (512 - 8)), "rx_session_t is too large"); static_assert((sizeof(void*) > 4) || (sizeof(rx_slot_t) <= (64 - 8)), "rx_slot_t is too large"); static_assert((sizeof(void*) > 4) || (sizeof(udpard_fragment_t) <= (64 - 8)), "udpard_fragment_t is too large"); diff --git a/libudpard/udpard.h b/libudpard/udpard.h index cdf6033..6a6324a 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -68,13 +68,13 @@ extern "C" /// RFC 791 states that hosts must be prepared to accept datagrams of up to 576 octets and it is expected that this /// library will receive non IP-fragmented datagrams thus the minimum MTU should be larger than 576. /// That being said, the MTU here is set to a larger value that is derived as: -/// 1500B Ethernet MTU (RFC 894) - 60B IPv4 max header - 8B UDP Header - 48B Cyphal header +/// 1500B Ethernet MTU (RFC 894) - 60B IPv4 max header - 8B UDP Header - 32B Cyphal header /// This is also the default maximum size of a single-frame transfer. /// The application can change this value at runtime as needed. -#define UDPARD_MTU_DEFAULT 1384U +#define UDPARD_MTU_DEFAULT 1400U /// MTU less than this should not be used. This value may be increased in a future version of the library. -#define UDPARD_MTU_MIN 460U +#define UDPARD_MTU_MIN 476U /// The library supports at most this many local redundant network interfaces. #define UDPARD_IFACE_COUNT_MAX 3U @@ -84,17 +84,16 @@ extern "C" /// Timestamps supplied by the application must be non-negative monotonically increasing counts of microseconds. typedef int64_t udpard_us_t; -/// See udpard_tx_t::ack_baseline_timeout. -/// This default value might be a good starting point for many applications running over a local network. -/// The baseline timeout should be greater than the expected round-trip time (RTT) between the most distant -/// nodes in the network for a message at the highest priority level. -#define UDPARD_TX_ACK_BASELINE_TIMEOUT_DEFAULT_us 16000LL - /// The subject-ID only affects the formation of the multicast UDP/IP endpoint address. /// In IPv4 networks, it is limited to 23 bits only due to the limited MAC multicast address space. /// In IPv6 networks, 32 bits are supported. #define UDPARD_IPv4_SUBJECT_ID_MAX 0x7FFFFFUL +/// Only 48 least significant bits of transfer-IDs are used. This provides sufficient random draw uniqueness +/// probability. +#define UDPARD_TRANSFER_ID_BITS 48U +#define UDPARD_TRANSFER_ID_MASK ((1ULL << UDPARD_TRANSFER_ID_BITS) - 1ULL) + typedef enum udpard_prio_t { udpard_prio_exceptional = 0, @@ -145,25 +144,6 @@ typedef struct udpard_bytes_mut_t void* data; } udpard_bytes_mut_t; -/// The size can be changed arbitrarily. This value is a compromise between copy size and footprint and utility. -#ifndef UDPARD_USER_CONTEXT_PTR_COUNT -#define UDPARD_USER_CONTEXT_PTR_COUNT 4 -#endif - -/// The library carries the user-provided context from inputs to outputs without interpreting it, -/// allowing the application to associate its own data with various entities inside the library. -typedef union udpard_user_context_t -{ - void* ptr[UDPARD_USER_CONTEXT_PTR_COUNT]; - unsigned char bytes[sizeof(void*) * UDPARD_USER_CONTEXT_PTR_COUNT]; -} udpard_user_context_t; -#ifdef __cplusplus -#define UDPARD_USER_CONTEXT_NULL \ - udpard_user_context_t {} -#else -#define UDPARD_USER_CONTEXT_NULL ((udpard_user_context_t){ .ptr = { NULL } }) -#endif - /// Zeros if invalid/unset/unavailable. typedef struct udpard_udpip_ep_t { @@ -190,8 +170,7 @@ typedef struct udpard_remote_t bool udpard_is_valid_endpoint(const udpard_udpip_ep_t ep); /// Returns the destination multicast UDP/IP endpoint for the given subject-ID. -/// The application should use this function when setting up subscription sockets or sending datagrams in -/// udpard_tx_vtable_t::eject_subject(). +/// The application should use this function when setting up subscription sockets or sending datagrams. /// If the subject-ID exceeds UDPARD_IPv4_SUBJECT_ID_MAX, the excessive bits are masked out. /// For P2P use the unicast node address directly instead, as provided by the RX pipeline per received transfer. udpard_udpip_ep_t udpard_make_subject_endpoint(const uint32_t subject_id); @@ -302,20 +281,6 @@ size_t udpard_fragment_gather(const udpard_fragment_t** cursor, /// | /// +---> ... /// -/// The RX pipeline is linked with the TX pipeline for reliable message management: the RX pipeline notifies -/// the TX when acknowledgments are received, and also enqueues outgoing acknowledgments to confirm received messages. -/// Thus the transmission pipeline is inherently remote-controlled by other nodes and one needs to keep in mind -/// that new frames may appear in the TX pipeline even while the application is idle. -/// -/// The reliable delivery mechanism informs the application about the number of remote subscribers that confirmed the -/// reception of each reliable message. The library uses heuristics to determine the number of attempts needed to -/// deliver the message, but it is guaranteed to cease attempts by the specified deadline. -/// Rudimentary congestion control is implemented by exponential backoff of retransmission intervals. -/// The reliability is chosen by the publisher on a per-message basis; as such, the same topic may carry both -/// reliable and unreliable messages depending on who is publishing at any given time. -/// -/// Reliable messages published over high-fanout topics will generate a large amount of feedback acknowledgments, -/// which must be kept in mind when designing the network. typedef struct udpard_tx_t udpard_tx_t; typedef struct udpard_tx_mem_resources_t @@ -334,16 +299,6 @@ typedef struct udpard_tx_mem_resources_t udpard_mem_t payload[UDPARD_IFACE_COUNT_MAX]; } udpard_tx_mem_resources_t; -/// Outcome notification for a reliable transfer previously scheduled for transmission. -typedef struct udpard_tx_feedback_t -{ - udpard_user_context_t user; ///< Same value that was passed to udpard_tx_push(). - - /// The number of remote nodes that acknowledged the reception of the transfer. - /// For P2P transfers, this value is either 0 (failure) or 1 (success). - uint16_t acknowledgements; -} udpard_tx_feedback_t; - /// Request to transmit a UDP datagram over the specified interface. /// Which interface indexes are available is determined by the user when pushing the transfer. /// If Berkeley sockets or similar API is used, the application should use a dedicated socket per redundant interface. @@ -357,16 +312,16 @@ typedef struct udpard_tx_ejection_t /// The library guarantees that now >= deadline at the time of ejection -- expired frames are purged beforehand. udpard_us_t deadline; - uint_fast8_t iface_index; ///< The interface index on which the datagram is to be transmitted. - uint_fast8_t dscp; ///< Set the DSCP field of the outgoing UDP packet to this. + udpard_udpip_ep_t destination; + uint_fast8_t iface_index; ///< The interface index on which the datagram is to be transmitted. + uint_fast8_t dscp; ///< Set the DSCP field of the outgoing UDP packet to this. /// If the datagram pointer is retained by the application, udpard_tx_refcount_inc() must be invoked on it /// to prevent it from being garbage collected. When no longer needed (e.g, upon transmission), /// udpard_tx_refcount_dec() must be invoked to release the reference. udpard_bytes_t datagram; - /// This is the same value that was passed to udpard_tx_push(). - udpard_user_context_t user; + void* user; ///< Same value that was passed to udpard_tx_push(). } udpard_tx_ejection_t; /// Virtual function table for the TX pipeline, to be provided by the application. @@ -375,14 +330,8 @@ typedef struct udpard_tx_vtable_t /// Invoked from udpard_tx_poll() to push outgoing UDP datagrams into the socket/NIC driver. /// It is GUARANTEED that ONLY udpard_tx_poll() can invoke this function; in particular, pushing new transfers /// will not trigger ejection callbacks. - /// The callback must not mutate the TX pipeline (no udpard_tx_push/cancel/free). - /// - /// The destination endpoint is provided only for P2P transfers; for multicast transfers, the application - /// must compute the endpoint using udpard_make_subject_endpoint() based on the subject-ID. This is because - /// the subject-ID may be changed by the consensus algorithm at any time if a collision/divergence is detected. - /// The application is expected to rely on the user context to access the topic context for subject-ID derivation. - bool (*eject_subject)(udpard_tx_t*, udpard_tx_ejection_t*); - bool (*eject_p2p)(udpard_tx_t*, udpard_tx_ejection_t*, udpard_udpip_ep_t destination); + /// The callback must not mutate the TX pipeline (no udpard_tx_push/free). + bool (*eject)(udpard_tx_t*, udpard_tx_ejection_t*); } udpard_tx_vtable_t; /// The application must create a single instance of this struct to manage the TX pipeline. @@ -407,15 +356,6 @@ struct udpard_tx_t /// able to avoid frame duplication and instead reuse each frame across all interfaces. size_t mtu[UDPARD_IFACE_COUNT_MAX]; - /// This duration is used to derive the acknowledgment timeout for reliable transfers in tx_ack_timeout(). - /// It must be a positive number of microseconds. - /// - /// The baseline timeout should be greater than the expected round-trip time (RTT) between the most distant - /// nodes in the network for a message at the highest priority level. - /// - /// A sensible default is provided at initialization, which can be overridden by the application. - udpard_us_t ack_baseline_timeout; - /// Optional user-managed mapping from the Cyphal priority level in [0,7] (highest priority at index 0) /// to the IP DSCP field value for use by the application when transmitting. By default, all entries are zero. uint_least8_t dscp_value_per_priority[UDPARD_PRIORITY_COUNT]; @@ -431,10 +371,6 @@ struct udpard_tx_t /// READ-ONLY! size_t enqueued_frames_count; - /// Starts at zero and increments with every enqueued transfer. Do not modify! - /// This is used internally as a tiebreaker in non-unique indexes. - uint64_t next_seq_no; - udpard_tx_mem_resources_t memory; /// Error counters incremented automatically when the corresponding error condition occurs. @@ -446,9 +382,7 @@ struct udpard_tx_t /// Internal use only, do not modify! See tx_transfer_t for details. udpard_list_t queue[UDPARD_IFACE_COUNT_MAX][UDPARD_PRIORITY_COUNT]; ///< Next to transmit at the tail. - udpard_tree_t* index_transfer_id; udpard_tree_t* index_deadline; - udpard_tree_t* index_staged; udpard_list_t agewise; ///< Oldest at the tail. /// Opaque pointer for the application use only. Not accessed by the library. @@ -459,11 +393,11 @@ struct udpard_tx_t /// and can be changed later by modifying the struct fields directly. No memory allocation is going to take place /// until the first transfer is successfully pushed via udpard_tx_push(). /// -/// The local UID should be a globally unique EUI-64 identifier assigned to the local node. It may be a random -/// EUI-64, which is especially useful for short-lived software nodes. +/// The local UID should be a globally unique EUI-64 identifier assigned to the local node. It may be a random EUI-64, +/// which is especially useful for short-lived software nodes. /// -/// The p2p_transfer_id_initial value must be chosen randomly such that it is likely to be distinct per application -/// startup. See the transfer-ID counter requirements in udpard_tx_push() for details. +/// The p2p_transfer_id_seed value must be chosen randomly such that it is likely to be distinct per startup. +/// See the transfer-ID counter requirements in udpard_tx_push() for details. Excess most significant bits are ignored. /// /// The enqueued_frames_limit should be large enough to accommodate the expected burstiness of the application traffic. /// If the limit is reached, the library will apply heuristics to sacrifice some older transfers to make room @@ -481,35 +415,22 @@ bool udpard_tx_new(udpard_tx_t* const self, /// invalidated immediately after this function returns. When redundant interfaces are used, the library will attempt to /// minimize the number of copies by reusing frames across interfaces with identical MTU values and memory resources. /// +/// The endpoint depends on the subject-ID and is computed using udpard_make_subject_endpoint(). +/// The endpoint must satisfy udpard_is_valid_endpoint(). +/// /// The caller shall increment the transfer-ID counter after each successful invocation of this function per subject. /// The initial value shall be chosen randomly such that it is likely to be distinct per application startup /// (embedded systems can use noinit memory sections, hash uninitialized SRAM, use timers or ADC noise, etc). /// The random starting point will ensure global uniqueness across different subjects. +/// Excess most significant bits are ignored. /// Related thread on random transfer-ID init: https://forum.opencyphal.org/t/improve-the-transfer-id-timeout/2375 /// -/// The user context value is carried through to the callbacks. It must contain enough context to allow subject-ID -/// derivation inside udpard_tx_vtable_t::eject_subject(). For example, it may contain a pointer to the topic struct. -/// -/// Returns true on success. Runtime failures increment the corresponding error counters, -/// while invocations with invalid arguments just return zero without modifying the queue state. -/// /// The enqueued transfer will be emitted over all interfaces specified in the iface_bitmap. -/// The subject-ID is computed inside the udpard_tx_vtable::eject_subject() callback at the time of transmission. -/// The subject-ID cannot be computed beforehand at the time of enqueuing because the topic->subject consensus protocol -/// may find a different subject-ID allocation between the time of enqueuing and the time of (re)transmission. /// -/// The feedback callback is set to NULL for best-effort (non-acknowledged) transfers. Otherwise, the transfer is -/// treated as reliable, requesting a delivery acknowledgement from remote subscribers with repeated retransmissions if -/// necessary; it is guaranteed that delivery attempts will cease no later than by the specified deadline. -/// The feedback callback is ALWAYS invoked EXACTLY ONCE per reliable transfer pushed via udpard_tx_push() successfully, -/// indicating the number of remote nodes that acknowledged the reception of the transfer. -/// The retransmission delay is increased exponentially with each retransmission attempt as a means of congestion -/// control and latency adaptation; please refer to udpard_tx_t::ack_baseline_timeout for details. +/// The user context value is carried through to the callbacks. /// -/// Beware that reliable delivery may cause message reordering. For example, when sending messages A and B, -/// and A is lost on the first attempt, the next attempt may be scheduled after B is published, -/// so that the remote sees B followed by A. Most applications tolerate it without issues; if this is not the case, -/// the subscriber should reconstruct the original message ordering. +/// Returns true on success. Runtime failures increment the corresponding error counters, +/// while invocations with invalid arguments just return zero without modifying the queue state. /// /// On success, the function allocates a single transfer state instance and a number of payload fragments. /// The time complexity is O(p + log e), where p is the transfer payload size, and e is the number of @@ -520,22 +441,20 @@ bool udpard_tx_push(udpard_tx_t* const self, const uint16_t iface_bitmap, const udpard_prio_t priority, const uint64_t transfer_id, + const udpard_udpip_ep_t endpoint, const udpard_bytes_scattered_t payload, - void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. - const udpard_user_context_t user); + void* const user); /// This is a specialization of the general push function for P2P transfers. /// The transfer-ID counter is managed automatically. -/// If out_transfer_id is not NULL, the assigned internal transfer-ID is stored there for use with udpard_tx_cancel_p2p. +/// Endpoints may be empty (zero) for some ifaces, in which case no transmission over those ifaces will be attempted. bool udpard_tx_push_p2p(udpard_tx_t* const self, const udpard_us_t now, const udpard_us_t deadline, const udpard_prio_t priority, - const udpard_remote_t remote, // Endpoints may be empty (zero) for some ifaces. + const udpard_udpip_ep_t endpoints[UDPARD_IFACE_COUNT_MAX], const udpard_bytes_scattered_t payload, - void (*const feedback)(udpard_tx_t*, udpard_tx_feedback_t), // NULL if best-effort. - const udpard_user_context_t user, - uint64_t* const out_transfer_id); + void* const user); /// This should be invoked whenever the socket/NIC of this queue becomes ready to accept new datagrams for transmission. /// It is fine to also invoke it periodically unconditionally to drive the transmission process. @@ -545,15 +464,6 @@ bool udpard_tx_push_p2p(udpard_tx_t* const self, /// The function may deallocate memory. The time complexity is logarithmic in the number of enqueued transfers. void udpard_tx_poll(udpard_tx_t* const self, const udpard_us_t now, const uint16_t iface_bitmap); -/// Cancel a previously enqueued transfer of the specified transfer-ID and QoS. -/// If provided, the feedback callback will be invoked with success==false. -/// Not safe to call from the eject() callback. -/// Returns true if a transfer was found and cancelled, false if no such transfer was found. -/// The complexity is O(log t + f), where t is the number of enqueued transfers, -/// and f is the number of frames in the transfer. -/// The function will free the memory associated with the transfer. -bool udpard_tx_cancel(udpard_tx_t* const self, const uint64_t transfer_id, const bool reliable); - /// Returns a bitmap of interfaces that have pending transmissions. This is useful for IO multiplexing loops. /// Zero indicates that there are no pending transmissions. /// Which interfaces are usable is defined by the remote endpoints provided when pushing transfers. @@ -564,7 +474,7 @@ uint16_t udpard_tx_pending_ifaces(const udpard_tx_t* const self); void udpard_tx_refcount_inc(const udpard_bytes_t tx_payload_view); void udpard_tx_refcount_dec(const udpard_bytes_t tx_payload_view); -/// Drops all enqueued items; afterward, the instance is safe to discard. Reliable transfer callbacks are still invoked. +/// Drops all enqueued items; afterward, the instance is safe to discard. void udpard_tx_free(udpard_tx_t* const self); // ===================================================================================================================== @@ -605,15 +515,6 @@ typedef struct udpard_rx_t uint64_t errors_frame_malformed; ///< A received frame was malformed and thus dropped. uint64_t errors_transfer_malformed; ///< A transfer could not be reassembled correctly. - /// Incremented when an ack cannot be enqueued (including when tx is NULL). - /// If tx is available, inspect its error counters for details. - uint64_t errors_ack_tx; - - /// The transmission pipeline is needed to manage ack transmission and removal of acknowledged transfers. - /// If the application wants to only listen, the pointer may be NULL (no acks will be sent). - /// When initializing the library, the TX instance needs to be created first. - udpard_tx_t* tx; - void* user; ///< Opaque pointer for the application use only. Not accessed by the library. } udpard_rx_t; @@ -698,8 +599,8 @@ struct udpard_rx_transfer_t { udpard_us_t timestamp; udpard_prio_t priority; - uint64_t transfer_id; udpard_remote_t remote; + uint64_t transfer_id; /// The total size of the payload available to the application, in bytes, is provided for convenience; /// it is the sum of the sizes of all its fragments. For example, if the sender emitted a transfer of 2000 @@ -728,9 +629,7 @@ struct udpard_rx_transfer_t /// The RX instance holds no resources and can be destroyed at any time by simply freeing all its ports first /// using udpard_rx_port_free(), then discarding the instance itself. The self pointer must not be NULL. -/// The TX instance must be initialized beforehand, unless the application wants to only listen, -/// in which case it may be NULL. -void udpard_rx_new(udpard_rx_t* const self, udpard_tx_t* const tx); +void udpard_rx_new(udpard_rx_t* const self); /// Must be invoked at least every few milliseconds (more often is fine). /// If this is invoked simultaneously with rx subscription reception, diff --git a/tests/src/helpers.h b/tests/src/helpers.h index 60ee34a..4ec168c 100644 --- a/tests/src/helpers.h +++ b/tests/src/helpers.h @@ -64,14 +64,6 @@ static inline udpard_bytes_scattered_t make_scattered(const void* const data, co return out; } -// Wraps an application pointer for user context plumbing. -static inline udpard_user_context_t make_user_context(void* const obj) -{ - udpard_user_context_t out = UDPARD_USER_CONTEXT_NULL; - out.ptr[0] = obj; - return out; -} - /// The instrumented allocator tracks memory consumption, checks for heap corruption, and can be configured to fail /// allocations above a certain threshold. #define INSTRUMENTED_ALLOCATOR_CANARY_SIZE 1024U diff --git a/tests/src/test_e2e_api.cpp b/tests/src/test_e2e_api.cpp index 2f507ed..61a0963 100644 --- a/tests/src/test_e2e_api.cpp +++ b/tests/src/test_e2e_api.cpp @@ -3,438 +3,175 @@ /// Copyright Amazon.com Inc. or its affiliates. /// SPDX-License-Identifier: MIT -// ReSharper disable CppPassValueParameterByConstReference - #include #include "helpers.h" #include -#include +#include +#include #include namespace { struct CapturedFrame { - udpard_bytes_mut_t datagram; - uint_fast8_t iface_index; -}; - -struct FeedbackState -{ - size_t count = 0; - uint16_t acknowledgements = 0; + std::vector bytes; + uint_fast8_t iface_index = 0; + udpard_udpip_ep_t destination{}; }; -struct RxContext +struct RxState { - std::vector expected; - std::array sources{}; - uint64_t remote_uid = 0; - size_t received = 0; + std::size_t count; + uint64_t transfer_id; + std::vector payload; + udpard_remote_t remote; }; -// Refcount helpers keep captured datagrams alive. -void tx_refcount_free(void* const user, const size_t size, void* const payload) -{ - (void)user; - udpard_tx_refcount_dec(udpard_bytes_t{ .size = size, .data = payload }); -} - -// Shared deleter for captured TX frames. -constexpr udpard_deleter_vtable_t tx_refcount_deleter_vt{ .free = &tx_refcount_free }; - -bool capture_tx_frame_impl(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) +// Captures each ejected datagram for manual delivery to RX. +bool capture_tx(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) { - auto* frames = static_cast*>(tx->user); - if (frames == nullptr) { + auto* out = static_cast*>(tx->user); + if (out == nullptr) { return false; } - udpard_tx_refcount_inc(ejection->datagram); - void* const data = const_cast(ejection->datagram.data); // NOLINT(cppcoreguidelines-pro-type-const-cast) - frames->push_back(CapturedFrame{ .datagram = { .size = ejection->datagram.size, .data = data }, - .iface_index = ejection->iface_index }); + CapturedFrame frame{}; + frame.bytes.assign(static_cast(ejection->datagram.data), + static_cast(ejection->datagram.data) + ejection->datagram.size); + frame.iface_index = ejection->iface_index; + frame.destination = ejection->destination; + out->push_back(frame); return true; } -bool capture_tx_frame_subject(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) -{ - return capture_tx_frame_impl(tx, ejection); -} - -bool capture_tx_frame_p2p(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t /*dest*/) -{ - return capture_tx_frame_impl(tx, ejection); -} - -void drop_frame(const CapturedFrame& frame) -{ - udpard_tx_refcount_dec(udpard_bytes_t{ .size = frame.datagram.size, .data = frame.datagram.data }); -} +constexpr udpard_tx_vtable_t tx_vtable{ .eject = &capture_tx }; -void fill_random(std::vector& data) -{ - for (auto& byte : data) { - byte = static_cast(rand()) & 0xFFU; - } -} - -constexpr udpard_tx_vtable_t tx_vtable{ .eject_subject = &capture_tx_frame_subject, - .eject_p2p = &capture_tx_frame_p2p }; - -// Feedback callback records completion. -void record_feedback(udpard_tx_t*, const udpard_tx_feedback_t fb) -{ - auto* st = static_cast(fb.user.ptr[0]); - if (st != nullptr) { - st->count++; - st->acknowledgements = fb.acknowledgements; - } -} - -// RX callbacks validate payload and sender. +// Collects the received transfer and frees its fragment tree. void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) { - auto* ctx = static_cast(rx->user); - TEST_ASSERT_EQUAL_UINT64(ctx->remote_uid, transfer.remote.uid); - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - if ((transfer.remote.endpoints[i].ip != 0U) || (transfer.remote.endpoints[i].port != 0U)) { - TEST_ASSERT_EQUAL_UINT32(ctx->sources[i].ip, transfer.remote.endpoints[i].ip); - TEST_ASSERT_EQUAL_UINT16(ctx->sources[i].port, transfer.remote.endpoints[i].port); - } - } - std::vector assembled(transfer.payload_size_stored); + auto* st = static_cast(rx->user); + TEST_ASSERT_NOT_NULL(st); + st->count++; + st->transfer_id = transfer.transfer_id; + st->remote = transfer.remote; + st->payload.resize(transfer.payload_size_stored); const udpard_fragment_t* cursor = transfer.payload; - const size_t gathered = udpard_fragment_gather(&cursor, 0, transfer.payload_size_stored, assembled.data()); - TEST_ASSERT_EQUAL_size_t(transfer.payload_size_stored, gathered); - TEST_ASSERT_EQUAL_size_t(ctx->expected.size(), transfer.payload_size_wire); - if (!ctx->expected.empty()) { - TEST_ASSERT_EQUAL_MEMORY(ctx->expected.data(), assembled.data(), transfer.payload_size_stored); - } + (void)udpard_fragment_gather(&cursor, 0, transfer.payload_size_stored, st->payload.data()); udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); - ctx->received++; } -constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message }; - -// Ack port frees responses. -void on_ack_response(udpard_rx_t*, udpard_rx_port_t* port, const udpard_rx_transfer_t tr) -{ - udpard_fragment_free_all(tr.payload, udpard_make_deleter(port->memory.fragment)); -} -constexpr udpard_rx_port_vtable_t ack_callbacks{ .on_message = &on_ack_response }; +constexpr udpard_rx_port_vtable_t rx_vtable{ .on_message = &on_message }; -// Reliable delivery must survive data and ack loss. -// Each node uses exactly one TX and one RX instance as per the library design. -void test_reliable_delivery_under_losses() +// Allocates and sends one captured frame into RX. +void deliver_frame(const CapturedFrame& frame, + const udpard_mem_t mem, + const udpard_deleter_t del, + udpard_rx_t* const rx, + udpard_rx_port_t* const port, + const udpard_us_t ts, + const udpard_udpip_ep_t source) { - seed_prng(); - - // Allocators - one TX and one RX per node. - // Publisher node allocators. - instrumented_allocator_t pub_tx_alloc_transfer{}; - instrumented_allocator_t pub_tx_alloc_payload{}; - instrumented_allocator_t pub_rx_alloc_frag{}; - instrumented_allocator_t pub_rx_alloc_session{}; - instrumented_allocator_new(&pub_tx_alloc_transfer); - instrumented_allocator_new(&pub_tx_alloc_payload); - instrumented_allocator_new(&pub_rx_alloc_frag); - instrumented_allocator_new(&pub_rx_alloc_session); - - // Subscriber node allocators. - instrumented_allocator_t sub_tx_alloc_transfer{}; - instrumented_allocator_t sub_tx_alloc_payload{}; - instrumented_allocator_t sub_rx_alloc_frag{}; - instrumented_allocator_t sub_rx_alloc_session{}; - instrumented_allocator_new(&sub_tx_alloc_transfer); - instrumented_allocator_new(&sub_tx_alloc_payload); - instrumented_allocator_new(&sub_rx_alloc_frag); - instrumented_allocator_new(&sub_rx_alloc_session); - - // Memory resources. - udpard_tx_mem_resources_t pub_tx_mem{}; - pub_tx_mem.transfer = instrumented_allocator_make_resource(&pub_tx_alloc_transfer); - for (auto& res : pub_tx_mem.payload) { - res = instrumented_allocator_make_resource(&pub_tx_alloc_payload); - } - const udpard_rx_mem_resources_t pub_rx_mem{ .session = instrumented_allocator_make_resource(&pub_rx_alloc_session), - .slot = instrumented_allocator_make_resource(&pub_rx_alloc_session), - .fragment = instrumented_allocator_make_resource(&pub_rx_alloc_frag) }; - - udpard_tx_mem_resources_t sub_tx_mem{}; - sub_tx_mem.transfer = instrumented_allocator_make_resource(&sub_tx_alloc_transfer); - for (auto& res : sub_tx_mem.payload) { - res = instrumented_allocator_make_resource(&sub_tx_alloc_payload); - } - const udpard_rx_mem_resources_t sub_rx_mem{ .session = instrumented_allocator_make_resource(&sub_rx_alloc_session), - .slot = instrumented_allocator_make_resource(&sub_rx_alloc_session), - .fragment = instrumented_allocator_make_resource(&sub_rx_alloc_frag) }; - - // Publisher node: single TX, single RX (linked to TX for ACK processing). - constexpr uint64_t pub_uid = 0x1111222233334444ULL; - udpard_tx_t pub_tx{}; - std::vector pub_frames; - TEST_ASSERT_TRUE(udpard_tx_new(&pub_tx, pub_uid, 10U, 64, pub_tx_mem, &tx_vtable)); - pub_tx.user = &pub_frames; - pub_tx.ack_baseline_timeout = 8000; - - udpard_rx_t pub_rx{}; - udpard_rx_new(&pub_rx, &pub_tx); - udpard_rx_port_t pub_p2p_port{}; - TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&pub_p2p_port, 16, pub_rx_mem, &ack_callbacks)); - - // Subscriber node: single TX, single RX (linked to TX for sending ACKs). - constexpr uint64_t sub_uid = 0xABCDEF0012345678ULL; - udpard_tx_t sub_tx{}; - std::vector sub_frames; - TEST_ASSERT_TRUE(udpard_tx_new(&sub_tx, sub_uid, 77U, 8, sub_tx_mem, &tx_vtable)); - sub_tx.user = &sub_frames; - - udpard_rx_t sub_rx{}; - udpard_rx_new(&sub_rx, &sub_tx); - udpard_rx_port_t sub_port{}; - TEST_ASSERT_TRUE(udpard_rx_port_new(&sub_port, 6000, sub_rx_mem, &callbacks)); - - // Endpoints. - const std::array publisher_sources{ - udpard_udpip_ep_t{ .ip = 0x0A000001U, .port = 7400U }, - udpard_udpip_ep_t{ .ip = 0x0A000002U, .port = 7401U }, - udpard_udpip_ep_t{ .ip = 0x0A000003U, .port = 7402U }, - }; - const std::array subscriber_sources{ - udpard_udpip_ep_t{ .ip = 0x0A000010U, .port = 7600U }, - udpard_udpip_ep_t{ .ip = 0x0A000011U, .port = 7601U }, - udpard_udpip_ep_t{ .ip = 0x0A000012U, .port = 7602U }, - }; - // Payload and context. - std::vector payload(4096); - fill_random(payload); - RxContext ctx{}; - ctx.expected = payload; - ctx.sources = publisher_sources; - ctx.remote_uid = pub_uid; - sub_rx.user = &ctx; - - // Reliable transfer with staged losses. - FeedbackState fb{}; - const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); - pub_tx.mtu[0] = 600; - pub_tx.mtu[1] = 900; - pub_tx.mtu[2] = 500; - const udpard_us_t start = 0; - const udpard_us_t deadline = start + 200000; - const uint16_t iface_bitmap_all = UDPARD_IFACE_BITMAP_ALL; - const udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; - TEST_ASSERT_TRUE(udpard_tx_push(&pub_tx, - start, - deadline, - iface_bitmap_all, - udpard_prio_fast, - 1U, - payload_view, - &record_feedback, - make_user_context(&fb))); - - // Send until acked; drop first data frame and first ack. - bool first_round = true; - udpard_us_t now = start; - size_t attempts = 0; - const size_t attempt_cap = 6; - while ((fb.count == 0) && (attempts < attempt_cap)) { - // Publisher transmits topic message. - pub_frames.clear(); - udpard_tx_poll(&pub_tx, now, UDPARD_IFACE_BITMAP_ALL); - bool data_loss_done = false; - for (const auto& frame : pub_frames) { - const bool drop = first_round && !data_loss_done && (frame.iface_index == 1U); - if (drop) { - drop_frame(frame); - data_loss_done = true; - continue; - } - TEST_ASSERT_TRUE(udpard_rx_port_push(&sub_rx, - &sub_port, - now, - publisher_sources[frame.iface_index], - frame.datagram, - tx_payload_deleter, - frame.iface_index)); - } - udpard_rx_poll(&sub_rx, now); - - // Subscriber transmits ACKs (via sub_tx since sub_rx is linked to it). - sub_frames.clear(); - udpard_tx_poll(&sub_tx, now, UDPARD_IFACE_BITMAP_ALL); - bool ack_sent = false; - for (const auto& ack : sub_frames) { - const bool drop_ack = first_round && !ack_sent; - if (drop_ack) { - drop_frame(ack); - continue; - } - ack_sent = true; - TEST_ASSERT_TRUE(udpard_rx_port_push(&pub_rx, - &pub_p2p_port, - now, - subscriber_sources[ack.iface_index], - ack.datagram, - tx_payload_deleter, - ack.iface_index)); - } - udpard_rx_poll(&pub_rx, now); - first_round = false; - attempts++; - now += pub_tx.ack_baseline_timeout + 5000; - } - - TEST_ASSERT_EQUAL_size_t(1, fb.count); - TEST_ASSERT_EQUAL_UINT32(1, fb.acknowledgements); - TEST_ASSERT_EQUAL_size_t(1, ctx.received); - // Cleanup. - udpard_rx_port_free(&sub_rx, &sub_port); - udpard_rx_port_free(&pub_rx, &pub_p2p_port); - udpard_tx_free(&pub_tx); - udpard_tx_free(&sub_tx); - - TEST_ASSERT_EQUAL_size_t(0, pub_tx_alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, pub_tx_alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, pub_rx_alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, pub_rx_alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, sub_tx_alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, sub_tx_alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, sub_rx_alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, sub_rx_alloc_session.allocated_fragments); - - instrumented_allocator_reset(&pub_tx_alloc_transfer); - instrumented_allocator_reset(&pub_tx_alloc_payload); - instrumented_allocator_reset(&pub_rx_alloc_frag); - instrumented_allocator_reset(&pub_rx_alloc_session); - instrumented_allocator_reset(&sub_tx_alloc_transfer); - instrumented_allocator_reset(&sub_tx_alloc_payload); - instrumented_allocator_reset(&sub_rx_alloc_frag); - instrumented_allocator_reset(&sub_rx_alloc_session); + void* const dgram = mem_res_alloc(mem, frame.bytes.size()); + TEST_ASSERT_NOT_NULL(dgram); + (void)memcpy(dgram, frame.bytes.data(), frame.bytes.size()); + TEST_ASSERT_TRUE(udpard_rx_port_push( + rx, port, ts, source, udpard_bytes_mut_t{ .size = frame.bytes.size(), .data = dgram }, del, frame.iface_index)); } -// Counters must reflect expired deliveries and ack failures. -void test_reliable_stats_and_failures() +void test_subject_roundtrip() { seed_prng(); - // Expiration path. - instrumented_allocator_t exp_alloc_transfer{}; - instrumented_allocator_t exp_alloc_payload{}; - instrumented_allocator_new(&exp_alloc_transfer); - instrumented_allocator_new(&exp_alloc_payload); - udpard_tx_mem_resources_t exp_mem{}; - exp_mem.transfer = instrumented_allocator_make_resource(&exp_alloc_transfer); - for (auto& res : exp_mem.payload) { - res = instrumented_allocator_make_resource(&exp_alloc_payload); - } - udpard_tx_t exp_tx{}; - std::vector exp_frames; - TEST_ASSERT_TRUE(udpard_tx_new(&exp_tx, 0x9999000011112222ULL, 2U, 4, exp_mem, &tx_vtable)); - exp_tx.user = &exp_frames; - FeedbackState fb_fail{}; - const uint16_t iface_bitmap_1 = (1U << 0U); - const udpard_bytes_scattered_t exp_payload = make_scattered("ping", 4); - TEST_ASSERT_TRUE(udpard_tx_push(&exp_tx, - 0, - 10, - iface_bitmap_1, - udpard_prio_fast, - 5U, - exp_payload, - &record_feedback, - make_user_context(&fb_fail))); - udpard_tx_poll(&exp_tx, 0, UDPARD_IFACE_BITMAP_ALL); - for (const auto& f : exp_frames) { - drop_frame(f); + // Configure TX fixture with captured ejections. + instrumented_allocator_t tx_alloc_transfer{}; + instrumented_allocator_t tx_alloc_payload{}; + instrumented_allocator_new(&tx_alloc_transfer); + instrumented_allocator_new(&tx_alloc_payload); + udpard_tx_mem_resources_t tx_mem{}; + tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); + for (auto& res : tx_mem.payload) { + res = instrumented_allocator_make_resource(&tx_alloc_payload); } - exp_frames.clear(); - udpard_tx_poll(&exp_tx, 20, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_EQUAL_size_t(1, fb_fail.count); - TEST_ASSERT_EQUAL_UINT32(0, fb_fail.acknowledgements); - TEST_ASSERT_GREATER_THAN_UINT64(0, exp_tx.errors_expiration); - udpard_tx_free(&exp_tx); - TEST_ASSERT_EQUAL_size_t(0, exp_alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, exp_alloc_payload.allocated_fragments); - instrumented_allocator_reset(&exp_alloc_transfer); - instrumented_allocator_reset(&exp_alloc_payload); - - // Ack push failure increments counters. - instrumented_allocator_t rx_alloc_frag{}; + udpard_tx_t tx{}; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x1010101010101010ULL, 123U, 32U, tx_mem, &tx_vtable)); + tx.mtu[0] = 256U; + tx.mtu[1] = 256U; + tx.mtu[2] = 256U; + std::vector frames; + tx.user = &frames; + + // Configure RX fixture. instrumented_allocator_t rx_alloc_session{}; - instrumented_allocator_t src_alloc_transfer{}; - instrumented_allocator_t src_alloc_payload{}; - instrumented_allocator_new(&rx_alloc_frag); + instrumented_allocator_t rx_alloc_fragment{}; instrumented_allocator_new(&rx_alloc_session); - instrumented_allocator_new(&src_alloc_transfer); - instrumented_allocator_new(&src_alloc_payload); - const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), - .slot = instrumented_allocator_make_resource(&rx_alloc_session), - .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; - udpard_tx_mem_resources_t src_mem{}; - src_mem.transfer = instrumented_allocator_make_resource(&src_alloc_transfer); - for (auto& res : src_mem.payload) { - res = instrumented_allocator_make_resource(&src_alloc_payload); + instrumented_allocator_new(&rx_alloc_fragment); + const udpard_rx_mem_resources_t rx_mem{ + .session = instrumented_allocator_make_resource(&rx_alloc_session), + .slot = instrumented_allocator_make_resource(&rx_alloc_session), + .fragment = instrumented_allocator_make_resource(&rx_alloc_fragment), + }; + const udpard_deleter_t del = instrumented_allocator_make_deleter(&rx_alloc_fragment); + udpard_rx_t rx{}; + udpard_rx_port_t port{}; + RxState state{}; + udpard_rx_new(&rx); + rx.user = &state; + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 1024U, rx_mem, &rx_vtable)); + + // Send one multi-frame transfer over two interfaces. + std::vector payload(300U); + for (std::size_t i = 0; i < payload.size(); i++) { + payload[i] = static_cast(i); } - - udpard_tx_t src_tx{}; - std::vector src_frames; - TEST_ASSERT_TRUE(udpard_tx_new(&src_tx, 0x5555AAAABBBBCCCCULL, 3U, 4, src_mem, &tx_vtable)); - src_tx.user = &src_frames; - udpard_rx_t rx{}; - udpard_rx_port_t port{}; - RxContext ctx{}; - ctx.remote_uid = src_tx.local_uid; - ctx.sources = { udpard_udpip_ep_t{ .ip = 0x0A000021U, .port = 7700U }, udpard_udpip_ep_t{}, udpard_udpip_ep_t{} }; - ctx.expected.assign({ 1U, 2U, 3U, 4U }); - udpard_rx_new(&rx, nullptr); - rx.user = &ctx; - TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 64, rx_mem, &callbacks)); - - const udpard_bytes_scattered_t src_payload = make_scattered(ctx.expected.data(), ctx.expected.size()); - FeedbackState fb_ignore{}; - TEST_ASSERT_TRUE(udpard_tx_push(&src_tx, - 0, + const udpard_udpip_ep_t destination = udpard_make_subject_endpoint(1234U); + TEST_ASSERT_TRUE(udpard_tx_push(&tx, 1000, - iface_bitmap_1, - udpard_prio_fast, - 7U, - src_payload, - &record_feedback, - make_user_context(&fb_ignore))); - udpard_tx_poll(&src_tx, 0, UDPARD_IFACE_BITMAP_ALL); - const udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; - for (const auto& f : src_frames) { - TEST_ASSERT_TRUE(udpard_rx_port_push( - &rx, &port, 0, ctx.sources[f.iface_index], f.datagram, tx_payload_deleter, f.iface_index)); + 100000, + (1U << 0U) | (1U << 1U), + udpard_prio_nominal, + 55U, + destination, + make_scattered(payload.data(), payload.size()), + nullptr)); + udpard_tx_poll(&tx, 1001, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_TRUE(!frames.empty()); + + // Deliver the first interface copy only. + for (const auto& frame : frames) { + if (frame.iface_index == 0U) { + deliver_frame( + frame, rx_mem.fragment, del, &rx, &port, 2000, udpard_udpip_ep_t{ .ip = 0x0A000001U, .port = 9382U }); + } } - udpard_rx_poll(&rx, 0); - TEST_ASSERT_GREATER_THAN_UINT64(0, rx.errors_ack_tx); - TEST_ASSERT_EQUAL_size_t(1, ctx.received); + udpard_rx_poll(&rx, 3000); + + // Validate the received transfer. + TEST_ASSERT_EQUAL_size_t(1, state.count); + TEST_ASSERT_EQUAL_UINT64(55U, state.transfer_id); + TEST_ASSERT_EQUAL_size_t(payload.size(), state.payload.size()); + TEST_ASSERT_EQUAL_MEMORY(payload.data(), state.payload.data(), payload.size()); + TEST_ASSERT_EQUAL_UINT64(0x1010101010101010ULL, state.remote.uid); + // Release all resources. udpard_rx_port_free(&rx, &port); - udpard_tx_free(&src_tx); - TEST_ASSERT_EQUAL_size_t(0, rx_alloc_frag.allocated_fragments); + udpard_tx_free(&tx); + TEST_ASSERT_EQUAL_size_t(0, tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, tx_alloc_payload.allocated_fragments); TEST_ASSERT_EQUAL_size_t(0, rx_alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, src_alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, src_alloc_payload.allocated_fragments); - instrumented_allocator_reset(&rx_alloc_frag); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_fragment.allocated_fragments); + instrumented_allocator_reset(&tx_alloc_transfer); + instrumented_allocator_reset(&tx_alloc_payload); instrumented_allocator_reset(&rx_alloc_session); - instrumented_allocator_reset(&src_alloc_transfer); - instrumented_allocator_reset(&src_alloc_payload); + instrumented_allocator_reset(&rx_alloc_fragment); } } // namespace -extern "C" void setUp() {} - -extern "C" void tearDown() {} +void setUp() {} +void tearDown() {} int main() { UNITY_BEGIN(); - RUN_TEST(test_reliable_delivery_under_losses); - RUN_TEST(test_reliable_stats_and_failures); + RUN_TEST(test_subject_roundtrip); return UNITY_END(); } diff --git a/tests/src/test_e2e_edge.cpp b/tests/src/test_e2e_edge.cpp index 261369b..6904c58 100644 --- a/tests/src/test_e2e_edge.cpp +++ b/tests/src/test_e2e_edge.cpp @@ -3,694 +3,255 @@ /// Copyright Amazon.com Inc. or its affiliates. /// SPDX-License-Identifier: MIT -// ReSharper disable CppPassValueParameterByConstReference - #include #include "helpers.h" #include -#include +#include +#include +#include #include namespace { -void on_message(udpard_rx_t* rx, udpard_rx_port_t* port, udpard_rx_transfer_t transfer); -constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message }; - -struct FbState -{ - size_t count = 0; - uint16_t acknowledgements = 0; -}; - struct CapturedFrame { - udpard_bytes_mut_t datagram; - uint_fast8_t iface_index; + std::vector bytes; + std::uint_fast8_t iface_index = 0; }; -void tx_refcount_free(void* const user, const size_t size, void* const payload) +struct RxState { - (void)user; - udpard_tx_refcount_dec(udpard_bytes_t{ .size = size, .data = payload }); -} - -// Shared deleter for captured TX frames. -constexpr udpard_deleter_vtable_t tx_refcount_deleter_vt{ .free = &tx_refcount_free }; + std::size_t count = 0; + std::size_t payload_size_wire = 0; + std::uint64_t transfer_id = 0; + std::vector payload; +}; -bool capture_tx_frame_impl(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) +// Captures each TX frame into a vector. +bool capture_tx(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) { - auto* frames = static_cast*>(tx->user); - if (frames == nullptr) { + auto* out = static_cast*>(tx->user); + if (out == nullptr) { return false; } - udpard_tx_refcount_inc(ejection->datagram); - void* const data = const_cast(ejection->datagram.data); // NOLINT(cppcoreguidelines-pro-type-const-cast) - frames->push_back(CapturedFrame{ .datagram = { .size = ejection->datagram.size, .data = data }, - .iface_index = ejection->iface_index }); + CapturedFrame frame{}; + frame.bytes.assign(static_cast(ejection->datagram.data), + static_cast(ejection->datagram.data) + ejection->datagram.size); + frame.iface_index = ejection->iface_index; + out->push_back(std::move(frame)); return true; } -bool capture_tx_frame_subject(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) -{ - return capture_tx_frame_impl(tx, ejection); -} - -bool capture_tx_frame_p2p(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t /*dest*/) -{ - return capture_tx_frame_impl(tx, ejection); -} - -constexpr udpard_tx_vtable_t tx_vtable{ .eject_subject = &capture_tx_frame_subject, - .eject_p2p = &capture_tx_frame_p2p }; - -void fb_record(udpard_tx_t*, const udpard_tx_feedback_t fb) -{ - auto* st = static_cast(fb.user.ptr[0]); - if (st != nullptr) { - st->count++; - st->acknowledgements = fb.acknowledgements; - } -} - -void release_frames(std::vector& frames) -{ - for (const auto& [datagram, iface_index] : frames) { - udpard_tx_refcount_dec(udpard_bytes_t{ .size = datagram.size, .data = datagram.data }); - } - frames.clear(); -} - -struct Context -{ - std::vector ids; - uint64_t expected_uid = 0; - udpard_udpip_ep_t source{}; -}; - -struct Fixture -{ - instrumented_allocator_t tx_alloc_transfer{}; - instrumented_allocator_t tx_alloc_payload{}; - instrumented_allocator_t rx_alloc_frag{}; - instrumented_allocator_t rx_alloc_session{}; - udpard_tx_t tx{}; - udpard_rx_t rx{}; - udpard_rx_port_t port{}; - udpard_deleter_t tx_payload_deleter{}; - std::vector frames; - Context ctx{}; - udpard_udpip_ep_t dest{}; - udpard_udpip_ep_t source{}; - - Fixture(const Fixture&) = delete; - Fixture& operator=(const Fixture&) = delete; - Fixture(Fixture&&) = delete; - Fixture& operator=(Fixture&&) = delete; - - explicit Fixture() - { - instrumented_allocator_new(&tx_alloc_transfer); - instrumented_allocator_new(&tx_alloc_payload); - instrumented_allocator_new(&rx_alloc_frag); - instrumented_allocator_new(&rx_alloc_session); - udpard_tx_mem_resources_t tx_mem{}; - tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); - for (auto& res : tx_mem.payload) { - res = instrumented_allocator_make_resource(&tx_alloc_payload); - } - const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), - .slot = instrumented_allocator_make_resource(&rx_alloc_session), - .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; - tx_payload_deleter = udpard_deleter_t{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; - source = { .ip = 0x0A000001U, .port = 7501U }; - dest = udpard_make_subject_endpoint(222U); - - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0A0B0C0D0E0F1011ULL, 42U, 16, tx_mem, &tx_vtable)); - tx.user = &frames; - udpard_rx_new(&rx, nullptr); - ctx.expected_uid = tx.local_uid; - ctx.source = source; - rx.user = &ctx; - TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 1024, rx_mem, &callbacks)); - } - - ~Fixture() - { - udpard_rx_port_free(&rx, &port); - udpard_tx_free(&tx); - TEST_ASSERT_EQUAL_size_t(0, rx_alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, rx_alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, tx_alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, tx_alloc_payload.allocated_fragments); - instrumented_allocator_reset(&rx_alloc_frag); - instrumented_allocator_reset(&rx_alloc_session); - instrumented_allocator_reset(&tx_alloc_transfer); - instrumented_allocator_reset(&tx_alloc_payload); - } - - void push_single(const udpard_us_t ts, const uint64_t transfer_id) - { - frames.clear(); - std::array payload_buf{}; - for (size_t i = 0; i < payload_buf.size(); i++) { - payload_buf[i] = static_cast(transfer_id >> (i * 8U)); - } - const udpard_bytes_scattered_t payload = make_scattered(payload_buf.data(), payload_buf.size()); - const udpard_us_t deadline = ts + 1000000; - for (auto& mtu_value : tx.mtu) { - mtu_value = UDPARD_MTU_DEFAULT; - } - constexpr uint16_t iface_bitmap_1 = (1U << 0U); - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - ts, - deadline, - iface_bitmap_1, - udpard_prio_slow, - transfer_id, - payload, - nullptr, - UDPARD_USER_CONTEXT_NULL)); - udpard_tx_poll(&tx, ts, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_GREATER_THAN_UINT32(0U, static_cast(frames.size())); - for (const auto& [datagram, iface_index] : frames) { - TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port, ts, source, datagram, tx_payload_deleter, iface_index)); - } - } -}; +constexpr udpard_tx_vtable_t tx_vtable{ .eject = &capture_tx }; -/// Callbacks keep the payload memory under control. +// Stores one received transfer and frees its payload. void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) { - auto* const ctx = static_cast(rx->user); - ctx->ids.push_back(transfer.transfer_id); - TEST_ASSERT_EQUAL_UINT64(ctx->expected_uid, transfer.remote.uid); - TEST_ASSERT_EQUAL_UINT32(ctx->source.ip, transfer.remote.endpoints[0].ip); - TEST_ASSERT_EQUAL_UINT16(ctx->source.port, transfer.remote.endpoints[0].port); + auto* st = static_cast(rx->user); + TEST_ASSERT_NOT_NULL(st); + st->count++; + st->payload_size_wire = transfer.payload_size_wire; + st->transfer_id = transfer.transfer_id; + st->payload.resize(transfer.payload_size_stored); + const udpard_fragment_t* cursor = transfer.payload; + (void)udpard_fragment_gather(&cursor, 0, transfer.payload_size_stored, st->payload.data()); udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); } -/// UNORDERED mode should drop duplicates while keeping arrival order. -void test_udpard_rx_unordered_duplicates() -{ - Fixture fix{}; - udpard_us_t now = 0; - - constexpr std::array ids{ 100, 20000, 10100, 5000, 20000, 100 }; - for (const auto id : ids) { - fix.push_single(now, id); - udpard_rx_poll(&fix.rx, now); - now++; - } - udpard_rx_poll(&fix.rx, now + 100); +constexpr udpard_rx_port_vtable_t rx_vtable{ .on_message = &on_message }; - constexpr std::array expected{ 100, 20000, 10100, 5000 }; - TEST_ASSERT_EQUAL_size_t(expected.size(), fix.ctx.ids.size()); - for (size_t i = 0; i < expected.size(); i++) { - TEST_ASSERT_EQUAL_UINT64(expected[i], fix.ctx.ids[i]); +// Builds TX memory resources. +udpard_tx_mem_resources_t make_tx_mem(instrumented_allocator_t& transfer, instrumented_allocator_t& payload) +{ + udpard_tx_mem_resources_t out{}; + out.transfer = instrumented_allocator_make_resource(&transfer); + for (auto& r : out.payload) { + r = instrumented_allocator_make_resource(&payload); } + return out; } -// Feedback must fire regardless of disposal path. -void test_udpard_tx_feedback_always_called() +// Builds RX memory resources. +udpard_rx_mem_resources_t make_rx_mem(instrumented_allocator_t& session, instrumented_allocator_t& fragment) { - instrumented_allocator_t tx_alloc_transfer{}; - instrumented_allocator_t tx_alloc_payload{}; - instrumented_allocator_new(&tx_alloc_transfer); - instrumented_allocator_new(&tx_alloc_payload); - udpard_tx_mem_resources_t tx_mem{}; - tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); - for (auto& res : tx_mem.payload) { - res = instrumented_allocator_make_resource(&tx_alloc_payload); - } - constexpr uint16_t iface_bitmap_1 = (1U << 0U); - - // Expiration path triggers feedback=false. - { - std::vector frames; - udpard_tx_t tx{}; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 1U, 1U, 4, tx_mem, &tx_vtable)); - tx.user = &frames; - FbState fb{}; - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 10, - 10, - iface_bitmap_1, - udpard_prio_fast, - 11, - make_scattered(nullptr, 0), - fb_record, - make_user_context(&fb))); - udpard_tx_poll(&tx, 11, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_EQUAL_size_t(1, fb.count); - TEST_ASSERT_EQUAL_UINT32(0, fb.acknowledgements); - release_frames(frames); - udpard_tx_free(&tx); - } - - // Sacrifice path should also emit feedback. - { - std::vector frames; - udpard_tx_t tx{}; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 2U, 1U, 1, tx_mem, &tx_vtable)); - tx.user = &frames; - FbState fb_old{}; - FbState fb_new{}; - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 1000, - iface_bitmap_1, - udpard_prio_fast, - 21, - make_scattered(nullptr, 0), - fb_record, - make_user_context(&fb_old))); - (void)udpard_tx_push(&tx, - 0, - 1000, - iface_bitmap_1, - udpard_prio_fast, - 22, - make_scattered(nullptr, 0), - fb_record, - make_user_context(&fb_new)); - TEST_ASSERT_EQUAL_size_t(1, fb_old.count); - TEST_ASSERT_EQUAL_UINT32(0, fb_old.acknowledgements); - TEST_ASSERT_GREATER_OR_EQUAL_UINT64(1, tx.errors_sacrifice); - TEST_ASSERT_EQUAL_size_t(0, fb_new.count); - release_frames(frames); - udpard_tx_free(&tx); - } - - // Destroying a TX with pending transfers still calls feedback. - { - std::vector frames; - udpard_tx_t tx{}; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 3U, 1U, 4, tx_mem, &tx_vtable)); - tx.user = &frames; - FbState fb{}; - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 1000, - iface_bitmap_1, - udpard_prio_fast, - 33, - make_scattered(nullptr, 0), - fb_record, - make_user_context(&fb))); - udpard_tx_free(&tx); - TEST_ASSERT_EQUAL_size_t(1, fb.count); - TEST_ASSERT_EQUAL_UINT32(0, fb.acknowledgements); - release_frames(frames); - } - - instrumented_allocator_reset(&tx_alloc_transfer); - instrumented_allocator_reset(&tx_alloc_payload); + return udpard_rx_mem_resources_t{ + .session = instrumented_allocator_make_resource(&session), + .slot = instrumented_allocator_make_resource(&session), + .fragment = instrumented_allocator_make_resource(&fragment), + }; } -/// P2P helper should emit frames with auto transfer-ID and proper addressing. -void test_udpard_tx_push_p2p() +// Delivers one captured frame into RX. +void deliver(const CapturedFrame& frame, + const udpard_mem_t mem, + const udpard_deleter_t del, + udpard_rx_t* const rx, + udpard_rx_port_t* const port, + const udpard_us_t ts) { - instrumented_allocator_t tx_alloc_transfer{}; - instrumented_allocator_t tx_alloc_payload{}; - instrumented_allocator_t rx_alloc_frag{}; - instrumented_allocator_t rx_alloc_session{}; - instrumented_allocator_new(&tx_alloc_transfer); - instrumented_allocator_new(&tx_alloc_payload); - instrumented_allocator_new(&rx_alloc_frag); - instrumented_allocator_new(&rx_alloc_session); - udpard_tx_mem_resources_t tx_mem{}; - tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); - for (auto& res : tx_mem.payload) { - res = instrumented_allocator_make_resource(&tx_alloc_payload); - } - udpard_tx_t tx{}; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x1122334455667788ULL, 5U, 8, tx_mem, &tx_vtable)); - std::vector frames; - tx.user = &frames; - - const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), - .slot = instrumented_allocator_make_resource(&rx_alloc_session), - .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; - udpard_rx_t rx{}; - udpard_rx_port_t port{}; - Context ctx{}; - const udpard_udpip_ep_t source{ .ip = 0x0A0000AAU, .port = 7600U }; - const udpard_udpip_ep_t dest{ .ip = 0x0A000010U, .port = 7400U }; - ctx.expected_uid = tx.local_uid; - ctx.source = source; - rx.user = &ctx; - TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&port, 1024, rx_mem, &callbacks)); - - const uint64_t remote_uid = 0xCAFEBABECAFED00DULL; - udpard_remote_t remote{}; - remote.uid = remote_uid; - remote.endpoints[0U] = dest; - - const std::array user_payload{ 0xAAU, 0xBBU, 0xCCU }; - const udpard_bytes_scattered_t payload = make_scattered(user_payload.data(), user_payload.size()); - const udpard_us_t now = 0; - uint64_t out_tid = 0; - TEST_ASSERT_TRUE(udpard_tx_push_p2p( - &tx, now, now + 1000000, udpard_prio_nominal, remote, payload, nullptr, UDPARD_USER_CONTEXT_NULL, &out_tid)); - udpard_tx_poll(&tx, now, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_FALSE(frames.empty()); - - const udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; - for (const auto& f : frames) { - TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port, now, source, f.datagram, tx_payload_deleter, f.iface_index)); - } - udpard_rx_poll(&rx, now); - TEST_ASSERT_EQUAL_size_t(1, ctx.ids.size()); - TEST_ASSERT_EQUAL_UINT64(out_tid, ctx.ids[0]); - udpard_rx_port_free(&rx, &port); - udpard_tx_free(&tx); - TEST_ASSERT_EQUAL(0, tx_alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL(0, tx_alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, rx_alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(0, rx_alloc_session.allocated_fragments); - instrumented_allocator_reset(&tx_alloc_transfer); - instrumented_allocator_reset(&tx_alloc_payload); - instrumented_allocator_reset(&rx_alloc_frag); - instrumented_allocator_reset(&rx_alloc_session); + void* const dgram = mem_res_alloc(mem, frame.bytes.size()); + TEST_ASSERT_NOT_NULL(dgram); + (void)std::memcpy(dgram, frame.bytes.data(), frame.bytes.size()); + TEST_ASSERT_TRUE(udpard_rx_port_push(rx, + port, + ts, + udpard_udpip_ep_t{ .ip = 0x0A000001U, .port = 9382U }, + udpard_bytes_mut_t{ .size = frame.bytes.size(), .data = dgram }, + del, + frame.iface_index)); } -/// Test TX with minimum MTU to verify fragmentation at the edge. -void test_udpard_tx_minimum_mtu() +void test_zero_payload_transfer() { + seed_prng(); + + // Configure TX and RX. instrumented_allocator_t tx_alloc_transfer{}; instrumented_allocator_t tx_alloc_payload{}; - instrumented_allocator_t rx_alloc_frag{}; instrumented_allocator_t rx_alloc_session{}; + instrumented_allocator_t rx_alloc_fragment{}; instrumented_allocator_new(&tx_alloc_transfer); instrumented_allocator_new(&tx_alloc_payload); - instrumented_allocator_new(&rx_alloc_frag); instrumented_allocator_new(&rx_alloc_session); + instrumented_allocator_new(&rx_alloc_fragment); - udpard_tx_mem_resources_t tx_mem{}; - tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); - for (auto& res : tx_mem.payload) { - res = instrumented_allocator_make_resource(&tx_alloc_payload); - } - const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), - .slot = instrumented_allocator_make_resource(&rx_alloc_session), - .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; - - udpard_tx_t tx{}; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0xDEADBEEF12345678ULL, 100U, 256, tx_mem, &tx_vtable)); + udpard_tx_t tx{}; std::vector frames; - tx.user = &frames; - - // Set MTU to minimum value - for (auto& mtu : tx.mtu) { - mtu = UDPARD_MTU_MIN; - } - - udpard_rx_t rx{}; - udpard_rx_port_t port{}; - Context ctx{}; - ctx.expected_uid = tx.local_uid; - ctx.source = { .ip = 0x0A000001U, .port = 7501U }; - udpard_rx_new(&rx, nullptr); - rx.user = &ctx; - TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 4096, rx_mem, &callbacks)); - - // Send a payload that will require fragmentation at minimum MTU - std::array payload{}; - for (size_t i = 0; i < payload.size(); i++) { - payload[i] = static_cast(i & 0xFFU); - } - - const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); - constexpr uint16_t iface_bitmap_1 = (1U << 0U); - - const udpard_us_t now = 0; - frames.clear(); + TEST_ASSERT_TRUE(udpard_tx_new( + &tx, 0x1111222233334444ULL, 123U, 8U, make_tx_mem(tx_alloc_transfer, tx_alloc_payload), &tx_vtable)); + tx.mtu[0] = 128U; + tx.mtu[1] = 128U; + tx.mtu[2] = 128U; + tx.user = &frames; + + const auto rx_mem = make_rx_mem(rx_alloc_session, rx_alloc_fragment); + const udpard_deleter_t del = instrumented_allocator_make_deleter(&rx_alloc_fragment); + udpard_rx_t rx{}; + udpard_rx_port_t port{}; + RxState state{}; + udpard_rx_new(&rx); + rx.user = &state; + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 1024U, rx_mem, &rx_vtable)); + + // Send a zero-size payload transfer. TEST_ASSERT_TRUE(udpard_tx_push(&tx, - now, - now + 1000000, - iface_bitmap_1, + 100, + 10000, + 1U, udpard_prio_nominal, 1U, - payload_view, - nullptr, - UDPARD_USER_CONTEXT_NULL)); - udpard_tx_poll(&tx, now, UDPARD_IFACE_BITMAP_ALL); - - // With minimum MTU, we should have multiple frames - TEST_ASSERT_TRUE(frames.size() > 1); - - // Deliver frames to RX - const udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; - for (const auto& f : frames) { - TEST_ASSERT_TRUE( - udpard_rx_port_push(&rx, &port, now, ctx.source, f.datagram, tx_payload_deleter, f.iface_index)); - } - udpard_rx_poll(&rx, now); - - // Verify the transfer was received correctly - TEST_ASSERT_EQUAL_size_t(1, ctx.ids.size()); - TEST_ASSERT_EQUAL_UINT64(1U, ctx.ids[0]); - - // Cleanup + udpard_make_subject_endpoint(1U), + make_scattered(nullptr, 0U), + nullptr)); + udpard_tx_poll(&tx, 200, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_EQUAL_size_t(1, frames.size()); + + // Deliver and verify. + deliver(frames.front(), rx_mem.fragment, del, &rx, &port, 300); + udpard_rx_poll(&rx, 400); + TEST_ASSERT_EQUAL_size_t(1, state.count); + TEST_ASSERT_EQUAL_size_t(0, state.payload.size()); + TEST_ASSERT_EQUAL_size_t(0, state.payload_size_wire); + + // Release all resources. udpard_rx_port_free(&rx, &port); udpard_tx_free(&tx); - TEST_ASSERT_EQUAL(0, tx_alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL(0, tx_alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, rx_alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(0, rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_fragment.allocated_fragments); instrumented_allocator_reset(&tx_alloc_transfer); instrumented_allocator_reset(&tx_alloc_payload); - instrumented_allocator_reset(&rx_alloc_frag); instrumented_allocator_reset(&rx_alloc_session); + instrumented_allocator_reset(&rx_alloc_fragment); } -/// Test with transfer-ID at uint64 boundary values (0, large values) -void test_udpard_transfer_id_boundaries() +void test_out_of_order_multiframe_reassembly() { - Fixture fix{}; - - // Test transfer-ID = 0 (first valid value) - fix.push_single(0, 0); - udpard_rx_poll(&fix.rx, 0); - TEST_ASSERT_EQUAL_size_t(1, fix.ctx.ids.size()); - TEST_ASSERT_EQUAL_UINT64(0U, fix.ctx.ids[0]); - - // Test a large transfer-ID value - fix.push_single(1, 0x7FFFFFFFFFFFFFFFULL); // Large but not at the extreme edge - udpard_rx_poll(&fix.rx, 1); - TEST_ASSERT_EQUAL_size_t(2, fix.ctx.ids.size()); - TEST_ASSERT_EQUAL_UINT64(0x7FFFFFFFFFFFFFFFULL, fix.ctx.ids[1]); - - // Test another large value to verify the history doesn't reject it - fix.push_single(2, 0x8000000000000000ULL); - udpard_rx_poll(&fix.rx, 2); - TEST_ASSERT_EQUAL_size_t(3, fix.ctx.ids.size()); - TEST_ASSERT_EQUAL_UINT64(0x8000000000000000ULL, fix.ctx.ids[2]); -} + seed_prng(); -/// Test zero extent handling - should accept transfers but truncate payload -void test_udpard_rx_zero_extent() -{ + // Configure TX and RX. instrumented_allocator_t tx_alloc_transfer{}; instrumented_allocator_t tx_alloc_payload{}; - instrumented_allocator_t rx_alloc_frag{}; instrumented_allocator_t rx_alloc_session{}; + instrumented_allocator_t rx_alloc_fragment{}; instrumented_allocator_new(&tx_alloc_transfer); instrumented_allocator_new(&tx_alloc_payload); - instrumented_allocator_new(&rx_alloc_frag); instrumented_allocator_new(&rx_alloc_session); + instrumented_allocator_new(&rx_alloc_fragment); - udpard_tx_mem_resources_t tx_mem{}; - tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); - for (auto& res : tx_mem.payload) { - res = instrumented_allocator_make_resource(&tx_alloc_payload); - } - const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), - .slot = instrumented_allocator_make_resource(&rx_alloc_session), - .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; - - udpard_tx_t tx{}; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0xAAAABBBBCCCCDDDDULL, 200U, 64, tx_mem, &tx_vtable)); + udpard_tx_t tx{}; std::vector frames; - tx.user = &frames; - - udpard_rx_t rx{}; - udpard_rx_port_t port{}; - udpard_rx_new(&rx, nullptr); - - // Create port with zero extent - TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 0, rx_mem, &callbacks)); - - // Track received transfers - struct ZeroExtentContext - { - size_t count = 0; - size_t payload_size_stored = 0; - size_t payload_size_wire = 0; - }; - ZeroExtentContext zctx{}; - - // Custom callback for zero extent test - struct ZeroExtentCallbacks - { - static void on_message(udpard_rx_t* const rx_arg, - udpard_rx_port_t* const port_arg, - const udpard_rx_transfer_t transfer) - { - auto* z = static_cast(rx_arg->user); - z->count++; - z->payload_size_stored = transfer.payload_size_stored; - z->payload_size_wire = transfer.payload_size_wire; - udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port_arg->memory.fragment)); - } - }; - static constexpr udpard_rx_port_vtable_t zero_callbacks{ .on_message = &ZeroExtentCallbacks::on_message }; - port.vtable = &zero_callbacks; - rx.user = &zctx; - - // Send a small single-frame transfer - std::array payload{}; - for (size_t i = 0; i < payload.size(); i++) { - payload[i] = static_cast(i); + TEST_ASSERT_TRUE(udpard_tx_new( + &tx, 0xAAAABBBBCCCCDDDDULL, 321U, 32U, make_tx_mem(tx_alloc_transfer, tx_alloc_payload), &tx_vtable)); + tx.mtu[0] = 96U; + tx.mtu[1] = 96U; + tx.mtu[2] = 96U; + tx.user = &frames; + + const auto rx_mem = make_rx_mem(rx_alloc_session, rx_alloc_fragment); + const udpard_deleter_t del = instrumented_allocator_make_deleter(&rx_alloc_fragment); + udpard_rx_t rx{}; + udpard_rx_port_t port{}; + RxState state{}; + udpard_rx_new(&rx); + rx.user = &state; + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 4096U, rx_mem, &rx_vtable)); + + // Send a payload that spans multiple frames. + std::vector payload(280U); + for (std::size_t i = 0; i < payload.size(); i++) { + payload[i] = static_cast(i ^ 0x5AU); } - - const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); - constexpr uint16_t iface_bitmap_1 = (1U << 0U); - const udpard_udpip_ep_t source{ .ip = 0x0A000002U, .port = 7502U }; - - const udpard_us_t now = 0; - frames.clear(); + const std::uint64_t transfer_id = 0xABCDEF0123456789ULL; TEST_ASSERT_TRUE(udpard_tx_push(&tx, - now, - now + 1000000, - iface_bitmap_1, - udpard_prio_nominal, - 5U, - payload_view, - nullptr, - UDPARD_USER_CONTEXT_NULL)); - udpard_tx_poll(&tx, now, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_FALSE(frames.empty()); - - // Deliver to RX with zero extent - const udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; - for (const auto& f : frames) { - TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port, now, source, f.datagram, tx_payload_deleter, f.iface_index)); + 1000, + 100000, + 1U, + udpard_prio_fast, + transfer_id, + udpard_make_subject_endpoint(55U), + make_scattered(payload.data(), payload.size()), + nullptr)); + udpard_tx_poll(&tx, 1001, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_TRUE(!frames.empty()); + + // Deliver frames in reverse order to exercise out-of-order reassembly. + std::reverse(frames.begin(), frames.end()); + udpard_us_t ts = 2000; + for (const auto& frame : frames) { + deliver(frame, rx_mem.fragment, del, &rx, &port, ts++); } - udpard_rx_poll(&rx, now); + udpard_rx_poll(&rx, ts + 10); - // Transfer should be received - zero extent means minimal/no truncation for single-frame - // The library may still store some payload for single-frame transfers even with zero extent - TEST_ASSERT_EQUAL_size_t(1, zctx.count); - TEST_ASSERT_TRUE(zctx.payload_size_stored <= payload.size()); // At most the original size - TEST_ASSERT_EQUAL_size_t(payload.size(), zctx.payload_size_wire); // Wire size is original + // Verify that transfer reassembled correctly. + TEST_ASSERT_EQUAL_size_t(1, state.count); + TEST_ASSERT_EQUAL_UINT64(transfer_id & UDPARD_TRANSFER_ID_MASK, state.transfer_id); + TEST_ASSERT_EQUAL_size_t(payload.size(), state.payload.size()); + TEST_ASSERT_EQUAL_MEMORY(payload.data(), state.payload.data(), payload.size()); - // Cleanup + // Release all resources. udpard_rx_port_free(&rx, &port); udpard_tx_free(&tx); - TEST_ASSERT_EQUAL(0, tx_alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL(0, tx_alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL(0, rx_alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(0, rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_fragment.allocated_fragments); instrumented_allocator_reset(&tx_alloc_transfer); instrumented_allocator_reset(&tx_alloc_payload); - instrumented_allocator_reset(&rx_alloc_frag); instrumented_allocator_reset(&rx_alloc_session); -} - -/// Test empty payload transfer (zero-size payload) -void test_udpard_empty_payload() -{ - Fixture fix{}; - - // Send an empty payload - fix.frames.clear(); - const udpard_bytes_scattered_t empty_payload = make_scattered(nullptr, 0); - const udpard_us_t deadline = 1000000; - constexpr uint16_t iface_bitmap_1 = (1U << 0U); - - TEST_ASSERT_TRUE(udpard_tx_push(&fix.tx, - 0, - deadline, - iface_bitmap_1, - udpard_prio_nominal, - 10U, - empty_payload, - nullptr, - UDPARD_USER_CONTEXT_NULL)); - udpard_tx_poll(&fix.tx, 0, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_FALSE(fix.frames.empty()); - - // Deliver to RX - for (const auto& f : fix.frames) { - TEST_ASSERT_TRUE( - udpard_rx_port_push(&fix.rx, &fix.port, 0, fix.source, f.datagram, fix.tx_payload_deleter, f.iface_index)); - } - udpard_rx_poll(&fix.rx, 0); - - // Empty transfer should be received - TEST_ASSERT_EQUAL_size_t(1, fix.ctx.ids.size()); - TEST_ASSERT_EQUAL_UINT64(10U, fix.ctx.ids[0]); -} - -/// Test priority levels from exceptional (0) to optional (7) -void test_udpard_all_priority_levels() -{ - Fixture fix{}; - udpard_us_t now = 0; - - constexpr uint16_t iface_bitmap_1 = (1U << 0U); - - // Test all 8 priority levels - for (uint8_t prio = 0; prio < UDPARD_PRIORITY_COUNT; prio++) { - fix.frames.clear(); - std::array payload{}; - payload[0] = prio; - const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); - - TEST_ASSERT_TRUE(udpard_tx_push(&fix.tx, - now, - now + 1000000, - iface_bitmap_1, - static_cast(prio), - 100U + prio, - payload_view, - nullptr, - UDPARD_USER_CONTEXT_NULL)); - udpard_tx_poll(&fix.tx, now, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_FALSE(fix.frames.empty()); - - for (const auto& f : fix.frames) { - TEST_ASSERT_TRUE(udpard_rx_port_push( - &fix.rx, &fix.port, now, fix.source, f.datagram, fix.tx_payload_deleter, f.iface_index)); - } - udpard_rx_poll(&fix.rx, now); - now++; - } - - // All 8 transfers should be received - TEST_ASSERT_EQUAL_size_t(UDPARD_PRIORITY_COUNT, fix.ctx.ids.size()); - for (uint8_t prio = 0; prio < UDPARD_PRIORITY_COUNT; prio++) { - TEST_ASSERT_EQUAL_UINT64(100U + prio, fix.ctx.ids[prio]); - } + instrumented_allocator_reset(&rx_alloc_fragment); } } // namespace -extern "C" void setUp() {} - -extern "C" void tearDown() {} +void setUp() {} +void tearDown() {} int main() { UNITY_BEGIN(); - RUN_TEST(test_udpard_rx_unordered_duplicates); - RUN_TEST(test_udpard_tx_feedback_always_called); - RUN_TEST(test_udpard_tx_push_p2p); - RUN_TEST(test_udpard_tx_minimum_mtu); - RUN_TEST(test_udpard_transfer_id_boundaries); - RUN_TEST(test_udpard_rx_zero_extent); - RUN_TEST(test_udpard_empty_payload); - RUN_TEST(test_udpard_all_priority_levels); + RUN_TEST(test_zero_payload_transfer); + RUN_TEST(test_out_of_order_multiframe_reassembly); return UNITY_END(); } diff --git a/tests/src/test_e2e_random.cpp b/tests/src/test_e2e_random.cpp index 92da30b..eb3cc12 100644 --- a/tests/src/test_e2e_random.cpp +++ b/tests/src/test_e2e_random.cpp @@ -3,392 +3,221 @@ /// Copyright Amazon.com Inc. or its affiliates. /// SPDX-License-Identifier: MIT -// ReSharper disable CppPassValueParameterByConstReference - #include #include "helpers.h" #include #include -#include +#include +#include +#include #include #include namespace { -struct TransferKey -{ - uint64_t transfer_id; - bool operator==(const TransferKey& other) const { return transfer_id == other.transfer_id; } -}; - -struct TransferKeyHash -{ - size_t operator()(const TransferKey& key) const { return std::hash{}(key.transfer_id); } -}; - -struct ExpectedPayload -{ - std::vector payload; - size_t payload_size_wire; -}; - -struct Context +struct CapturedFrame { - std::unordered_map expected; - size_t received = 0; - size_t truncated = 0; - uint64_t remote_uid = 0; - size_t reliable_feedback_success = 0; - size_t reliable_feedback_failure = 0; - std::array remote_endpoints{}; + std::vector bytes; + std::uint_fast8_t iface_index = 0; }; -struct Arrival +struct ReceivedTransfer { - udpard_bytes_mut_t datagram; - uint_fast8_t iface_index; + std::vector payload; + std::size_t count = 0; }; -struct CapturedFrame +struct RxContext { - udpard_bytes_mut_t datagram; - uint_fast8_t iface_index; + std::unordered_map received; }; -size_t random_range(const size_t min, const size_t max) -{ - const size_t span = max - min + 1U; - return min + (static_cast(rand()) % span); -} - -void fill_random(std::vector& data) -{ - for (auto& byte : data) { - byte = static_cast(random_range(0, UINT8_MAX)); - } -} - -void shuffle_frames(std::vector& frames) +// Captures every ejected frame. +bool capture_tx(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) { - for (size_t i = frames.size(); i > 1; i--) { - const size_t j = random_range(0, i - 1); - std::swap(frames[i - 1U], frames[j]); - } -} - -void tx_refcount_free(void* const user, const size_t size, void* const payload) -{ - (void)user; - udpard_tx_refcount_dec(udpard_bytes_t{ .size = size, .data = payload }); -} - -// Shared deleter for captured TX frames. -constexpr udpard_deleter_vtable_t tx_refcount_deleter_vt{ .free = &tx_refcount_free }; - -bool capture_tx_frame_impl(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) -{ - auto* frames = static_cast*>(tx->user); - if (frames == nullptr) { + auto* out = static_cast*>(tx->user); + if (out == nullptr) { return false; } - udpard_tx_refcount_inc(ejection->datagram); - void* const data = const_cast(ejection->datagram.data); // NOLINT(cppcoreguidelines-pro-type-const-cast) - frames->push_back(CapturedFrame{ .datagram = { .size = ejection->datagram.size, .data = data }, - .iface_index = ejection->iface_index }); + CapturedFrame frame{}; + frame.bytes.assign(static_cast(ejection->datagram.data), + static_cast(ejection->datagram.data) + ejection->datagram.size); + frame.iface_index = ejection->iface_index; + out->push_back(std::move(frame)); return true; } -bool capture_tx_frame_subject(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) -{ - return capture_tx_frame_impl(tx, ejection); -} +constexpr udpard_tx_vtable_t tx_vtable{ .eject = &capture_tx }; -bool capture_tx_frame_p2p(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t /*dest*/) +// Records received transfer payload by transfer-ID. +void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) { - return capture_tx_frame_impl(tx, ejection); + auto* ctx = static_cast(rx->user); + TEST_ASSERT_NOT_NULL(ctx); + auto& rec = ctx->received[transfer.transfer_id]; + rec.count++; + rec.payload.resize(transfer.payload_size_stored); + const udpard_fragment_t* cursor = transfer.payload; + (void)udpard_fragment_gather(&cursor, 0, transfer.payload_size_stored, rec.payload.data()); + udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); } -constexpr udpard_tx_vtable_t tx_vtable{ .eject_subject = &capture_tx_frame_subject, - .eject_p2p = &capture_tx_frame_p2p }; +constexpr udpard_rx_port_vtable_t rx_vtable{ .on_message = &on_message }; -void record_feedback(udpard_tx_t*, const udpard_tx_feedback_t fb) +// Builds TX memory resources. +udpard_tx_mem_resources_t make_tx_mem(instrumented_allocator_t& transfer, instrumented_allocator_t& payload) { - auto* ctx = static_cast(fb.user.ptr[0]); - if (ctx != nullptr) { - if (fb.acknowledgements > 0U) { - ctx->reliable_feedback_success++; - } else { - ctx->reliable_feedback_failure++; - } + udpard_tx_mem_resources_t out{}; + out.transfer = instrumented_allocator_make_resource(&transfer); + for (auto& r : out.payload) { + r = instrumented_allocator_make_resource(&payload); } + return out; } -void on_ack_response(udpard_rx_t*, udpard_rx_port_t* port, const udpard_rx_transfer_t tr) +// Builds RX memory resources. +udpard_rx_mem_resources_t make_rx_mem(instrumented_allocator_t& session, instrumented_allocator_t& fragment) { - udpard_fragment_free_all(tr.payload, udpard_make_deleter(port->memory.fragment)); + return udpard_rx_mem_resources_t{ + .session = instrumented_allocator_make_resource(&session), + .slot = instrumented_allocator_make_resource(&session), + .fragment = instrumented_allocator_make_resource(&fragment), + }; } -constexpr udpard_rx_port_vtable_t ack_callbacks{ .on_message = &on_ack_response }; -void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) -{ - auto* const ctx = static_cast(rx->user); - - // Match the incoming transfer against the expected table keyed by topic hash and transfer-ID. - const TransferKey key{ .transfer_id = transfer.transfer_id }; - const auto it = ctx->expected.find(key); - if (it == ctx->expected.end()) { - udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); - return; - } - - // Gather fragments into a contiguous buffer so we can compare the stored prefix (payload may be truncated). - std::vector assembled(transfer.payload_size_stored); - const udpard_fragment_t* payload_cursor = transfer.payload; - const size_t gathered = udpard_fragment_gather(&payload_cursor, 0, transfer.payload_size_stored, assembled.data()); - TEST_ASSERT_EQUAL_size_t(transfer.payload_size_stored, gathered); - TEST_ASSERT_TRUE(transfer.payload_size_stored <= it->second.payload.size()); - TEST_ASSERT_EQUAL_size_t(it->second.payload_size_wire, transfer.payload_size_wire); - if (transfer.payload_size_stored > 0U) { - TEST_ASSERT_EQUAL_MEMORY(it->second.payload.data(), assembled.data(), transfer.payload_size_stored); - } - - // Verify remote and the return path discovery. - TEST_ASSERT_EQUAL_UINT64(ctx->remote_uid, transfer.remote.uid); - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - if ((transfer.remote.endpoints[i].ip != 0U) || (transfer.remote.endpoints[i].port != 0U)) { - TEST_ASSERT_EQUAL_UINT32(ctx->remote_endpoints[i].ip, transfer.remote.endpoints[i].ip); - TEST_ASSERT_EQUAL_UINT16(ctx->remote_endpoints[i].port, transfer.remote.endpoints[i].port); - } - } - if (transfer.payload_size_stored < transfer.payload_size_wire) { - ctx->truncated++; - } - - // Clean up. - udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); - ctx->expected.erase(it); - ctx->received++; +// Delivers one captured frame to RX. +void deliver(const CapturedFrame& frame, + const udpard_mem_t mem, + const udpard_deleter_t del, + udpard_rx_t* const rx, + udpard_rx_port_t* const port, + const udpard_us_t ts) +{ + void* const dgram = mem_res_alloc(mem, frame.bytes.size()); + TEST_ASSERT_NOT_NULL(dgram); + (void)std::memcpy(dgram, frame.bytes.data(), frame.bytes.size()); + TEST_ASSERT_TRUE(udpard_rx_port_push(rx, + port, + ts, + udpard_udpip_ep_t{ .ip = 0x0A000001U, .port = 9382U }, + udpard_bytes_mut_t{ .size = frame.bytes.size(), .data = dgram }, + del, + frame.iface_index)); } -constexpr udpard_rx_port_vtable_t callbacks{ .on_message = &on_message }; - -/// Randomized end-to-end TX/RX covering fragmentation, reordering, and extent-driven truncation. -void test_udpard_tx_rx_end_to_end() +void test_randomized_deduplication() { seed_prng(); + std::mt19937 prng{ static_cast(rand()) }; + std::uniform_int_distribution payload_len{ 0, 180 }; - // TX allocator setup and pipeline initialization. + // Configure TX. instrumented_allocator_t tx_alloc_transfer{}; - instrumented_allocator_new(&tx_alloc_transfer); instrumented_allocator_t tx_alloc_payload{}; + instrumented_allocator_new(&tx_alloc_transfer); instrumented_allocator_new(&tx_alloc_payload); - udpard_tx_mem_resources_t tx_mem{}; - tx_mem.transfer = instrumented_allocator_make_resource(&tx_alloc_transfer); - for (auto& res : tx_mem.payload) { - res = instrumented_allocator_make_resource(&tx_alloc_payload); - } - udpard_tx_t tx{}; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 0x0A0B0C0D0E0F1011ULL, 123U, 256, tx_mem, &tx_vtable)); - instrumented_allocator_t ack_alloc_transfer{}; - instrumented_allocator_t ack_alloc_payload{}; - instrumented_allocator_new(&ack_alloc_transfer); - instrumented_allocator_new(&ack_alloc_payload); - udpard_tx_mem_resources_t ack_mem{}; - ack_mem.transfer = instrumented_allocator_make_resource(&ack_alloc_transfer); - for (auto& res : ack_mem.payload) { - res = instrumented_allocator_make_resource(&ack_alloc_payload); - } - udpard_tx_t ack_tx{}; - TEST_ASSERT_TRUE(udpard_tx_new(&ack_tx, 0x1020304050607080ULL, 321U, 256, ack_mem, &tx_vtable)); - - // RX allocator setup and shared RX instance with callbacks. - instrumented_allocator_t rx_alloc_frag{}; - instrumented_allocator_new(&rx_alloc_frag); + udpard_tx_t tx{}; + std::vector frames; + TEST_ASSERT_TRUE(udpard_tx_new( + &tx, 0x1010101010101010ULL, 123U, 512U, make_tx_mem(tx_alloc_transfer, tx_alloc_payload), &tx_vtable)); + tx.mtu[0] = 192U; + tx.mtu[1] = 192U; + tx.mtu[2] = 192U; + tx.user = &frames; + + // Configure RX. instrumented_allocator_t rx_alloc_session{}; + instrumented_allocator_t rx_alloc_fragment{}; instrumented_allocator_new(&rx_alloc_session); - const udpard_rx_mem_resources_t rx_mem{ .session = instrumented_allocator_make_resource(&rx_alloc_session), - .slot = instrumented_allocator_make_resource(&rx_alloc_session), - .fragment = instrumented_allocator_make_resource(&rx_alloc_frag) }; - udpard_rx_t rx; - udpard_rx_new(&rx, &ack_tx); - instrumented_allocator_t ack_rx_alloc_frag{}; - instrumented_allocator_t ack_rx_alloc_session{}; - instrumented_allocator_new(&ack_rx_alloc_frag); - instrumented_allocator_new(&ack_rx_alloc_session); - const udpard_rx_mem_resources_t ack_rx_mem{ .session = instrumented_allocator_make_resource(&ack_rx_alloc_session), - .slot = instrumented_allocator_make_resource(&ack_rx_alloc_session), - .fragment = instrumented_allocator_make_resource(&ack_rx_alloc_frag) }; - udpard_rx_t ack_rx{}; - udpard_rx_port_t ack_port{}; - udpard_rx_new(&ack_rx, &tx); - - // Test parameters. - constexpr std::array extents{ 1000, 5000, SIZE_MAX }; - - // Configure ports with varied extents and reordering windows to cover truncation and different RX modes. - std::array ports{}; - for (size_t i = 0; i < ports.size(); i++) { - TEST_ASSERT_TRUE(udpard_rx_port_new(&ports[i], extents[i], rx_mem, &callbacks)); - } - - // Setup the context. - Context ctx{}; - ctx.remote_uid = tx.local_uid; - for (size_t i = 0; i < ports.size(); i++) { - ctx.remote_endpoints[i] = { .ip = static_cast(0x0A000001U + i), - .port = static_cast(7400U + i) }; - } + instrumented_allocator_new(&rx_alloc_fragment); + const auto rx_mem = make_rx_mem(rx_alloc_session, rx_alloc_fragment); + const udpard_deleter_t del = instrumented_allocator_make_deleter(&rx_alloc_fragment); + udpard_rx_t rx{}; + udpard_rx_port_t port{}; + RxContext ctx{}; + udpard_rx_new(&rx); rx.user = &ctx; - constexpr udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; - // Ack path wiring. - std::vector frames; - tx.user = &frames; - std::vector ack_frames; - ack_tx.user = &ack_frames; - TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&ack_port, 16, ack_rx_mem, &ack_callbacks)); - std::array ack_sources{}; - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - ack_sources[i] = { .ip = static_cast(0x0A000020U + i), .port = static_cast(7700U + i) }; - } - - // Main test loop: generate transfers, push into TX, drain and shuffle frames, push into RX. - uint64_t next_transfer_id = (static_cast(rand()) << 32U) ^ static_cast(rand()); - size_t reliable_total = 0; - udpard_us_t now = 0; - for (size_t transfer_index = 0; transfer_index < 1000; transfer_index++) { - now += static_cast(random_range(1000, 5000)); - frames.clear(); - - // Pick a port, build a random payload, and remember what to expect on that topic. - const size_t port_index = random_range(0, ports.size() - 1U); - const uint64_t transfer_id = next_transfer_id++; - const size_t payload_size = random_range(0, 10000); - std::vector payload(payload_size); - fill_random(payload); - const bool reliable = (random_range(0, 3) == 0); // About a quarter reliable. - if (reliable) { - reliable_total++; + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 2048U, rx_mem, &rx_vtable)); + + // Push many transfers and keep the expected payload map. + std::unordered_map> expected; + constexpr std::size_t transfer_count = 80U; + for (std::size_t i = 0; i < transfer_count; i++) { + const auto len = static_cast(payload_len(prng)); + std::vector payload(len); + for (std::size_t j = 0; j < len; j++) { + payload[j] = static_cast(prng() & 0xFFU); } - - // Each transfer is sent on all redundant interfaces with different MTUs to exercise fragmentation variety. - const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); - const auto priority = static_cast(random_range(0, UDPARD_PRIORITY_COUNT - 1U)); - const TransferKey key{ .transfer_id = transfer_id }; - const bool inserted = - ctx.expected.emplace(key, ExpectedPayload{ .payload = payload, .payload_size_wire = payload.size() }).second; - TEST_ASSERT_TRUE(inserted); - - // Generate MTUs per redundant interface. - std::array mtu_values{}; - for (auto& x : mtu_values) { - x = random_range(UDPARD_MTU_MIN, 3000U); - } - for (size_t iface = 0; iface < UDPARD_IFACE_COUNT_MAX; iface++) { - tx.mtu[iface] = mtu_values[iface]; - } - // Enqueue one transfer spanning all interfaces. - const udpard_us_t deadline = now + 1000000; + const std::uint64_t transfer_id = 1000U + i; + expected[transfer_id] = payload; TEST_ASSERT_TRUE(udpard_tx_push(&tx, - now, - deadline, - UDPARD_IFACE_BITMAP_ALL, - priority, + 1000 + static_cast(i), + 1000000, + (1U << 0U) | (1U << 1U), + udpard_prio_nominal, transfer_id, - payload_view, - reliable ? &record_feedback : nullptr, - reliable ? make_user_context(&ctx) : UDPARD_USER_CONTEXT_NULL)); - udpard_tx_poll(&tx, now, UDPARD_IFACE_BITMAP_ALL); + udpard_make_subject_endpoint(77U), + make_scattered(payload.data(), payload.size()), + nullptr)); + udpard_tx_poll(&tx, 2000 + static_cast(i), UDPARD_IFACE_BITMAP_ALL); + } - // Shuffle and push frames into the RX pipeline, simulating out-of-order redundant arrival. - std::vector arrivals; - arrivals.reserve(frames.size()); - for (const auto& [datagram, iface_index] : frames) { - arrivals.push_back(Arrival{ .datagram = datagram, .iface_index = iface_index }); - } - shuffle_frames(arrivals); - const size_t keep_iface = reliable ? random_range(0, UDPARD_IFACE_COUNT_MAX - 1U) : 0U; - const size_t loss_iface = reliable ? ((keep_iface + 1U) % UDPARD_IFACE_COUNT_MAX) : UDPARD_IFACE_COUNT_MAX; - const size_t ack_loss_iface = loss_iface; - for (const auto& [datagram, iface_index] : arrivals) { - const bool drop = reliable && (iface_index == loss_iface) && ((rand() % 3) == 0); - if (drop) { - udpard_tx_refcount_dec(udpard_bytes_t{ .size = datagram.size, .data = datagram.data }); - } else { - TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, - &ports[port_index], - now, - ctx.remote_endpoints[iface_index], - datagram, - tx_payload_deleter, - iface_index)); - } - now += 1; + // Randomize arrival order and inject all captured frames. + std::shuffle(frames.begin(), frames.end(), prng); + udpard_us_t ts = 5000; + for (const auto& frame : frames) { + deliver(frame, rx_mem.fragment, del, &rx, &port, ts++); + } + udpard_rx_poll(&rx, ts + 10); + + // Payloads must match; one transfer may be skipped due RX history initialization policy. + TEST_ASSERT_LESS_OR_EQUAL_size_t(expected.size(), ctx.received.size()); + TEST_ASSERT_GREATER_OR_EQUAL_size_t(expected.size() - 1U, ctx.received.size()); + for (const auto& [transfer_id, payload] : ctx.received) { + auto it = expected.find(transfer_id); + TEST_ASSERT_TRUE(it != expected.end()); + TEST_ASSERT_GREATER_OR_EQUAL_size_t(1, payload.count); + TEST_ASSERT_EQUAL_size_t(it->second.size(), payload.payload.size()); + if (!it->second.empty()) { + TEST_ASSERT_EQUAL_MEMORY(it->second.data(), payload.payload.data(), it->second.size()); } - - // Let the RX pipeline purge timeouts and deliver ready transfers. - udpard_rx_poll(&rx, now); - ack_frames.clear(); - udpard_tx_poll(&ack_tx, now, UDPARD_IFACE_BITMAP_ALL); - bool ack_delivered = false; - for (const auto& [datagram, iface_index] : ack_frames) { - const bool drop_ack = reliable && (iface_index == ack_loss_iface); - if (drop_ack) { - udpard_tx_refcount_dec(udpard_bytes_t{ .size = datagram.size, .data = datagram.data }); - continue; - } - ack_delivered = true; - TEST_ASSERT_TRUE(udpard_rx_port_push( - &ack_rx, &ack_port, now, ack_sources[iface_index], datagram, tx_payload_deleter, iface_index)); + } + size_t missing = 0U; + for (const auto& [transfer_id, payload] : expected) { + auto it = ctx.received.find(transfer_id); + if (it == ctx.received.end()) { + missing++; + continue; } - if (reliable && !ack_delivered && !ack_frames.empty()) { - const auto& [datagram, iface_index] = ack_frames.front(); - TEST_ASSERT_TRUE(udpard_rx_port_push( - &ack_rx, &ack_port, now, ack_sources[iface_index], datagram, tx_payload_deleter, iface_index)); + TEST_ASSERT_GREATER_OR_EQUAL_size_t(1, it->second.count); + TEST_ASSERT_EQUAL_size_t(payload.size(), it->second.payload.size()); + if (!payload.empty()) { + TEST_ASSERT_EQUAL_MEMORY(payload.data(), it->second.payload.data(), payload.size()); } - udpard_rx_poll(&ack_rx, now); } + TEST_ASSERT_LESS_OR_EQUAL_size_t(1, missing); - // Final poll/validation and cleanup. - udpard_rx_poll(&rx, now + 1000000); - udpard_rx_poll(&ack_rx, now + 1000000); - TEST_ASSERT_TRUE(ctx.expected.empty()); - TEST_ASSERT_EQUAL_size_t(1000, ctx.received); - TEST_ASSERT_TRUE(ctx.truncated > 0); - TEST_ASSERT_EQUAL_size_t(reliable_total, ctx.reliable_feedback_success); - TEST_ASSERT_EQUAL_size_t(0, ctx.reliable_feedback_failure); - for (auto& port : ports) { - udpard_rx_port_free(&rx, &port); - } - udpard_rx_port_free(&ack_rx, &ack_port); + // Release resources. + udpard_rx_port_free(&rx, &port); udpard_tx_free(&tx); - udpard_tx_free(&ack_tx); - TEST_ASSERT_EQUAL_size_t(0, rx_alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, rx_alloc_session.allocated_fragments); TEST_ASSERT_EQUAL_size_t(0, tx_alloc_transfer.allocated_fragments); TEST_ASSERT_EQUAL_size_t(0, tx_alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, ack_alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, ack_alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, ack_rx_alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, ack_rx_alloc_session.allocated_fragments); - instrumented_allocator_reset(&rx_alloc_frag); - instrumented_allocator_reset(&rx_alloc_session); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_fragment.allocated_fragments); instrumented_allocator_reset(&tx_alloc_transfer); instrumented_allocator_reset(&tx_alloc_payload); - instrumented_allocator_reset(&ack_alloc_transfer); - instrumented_allocator_reset(&ack_alloc_payload); - instrumented_allocator_reset(&ack_rx_alloc_frag); - instrumented_allocator_reset(&ack_rx_alloc_session); + instrumented_allocator_reset(&rx_alloc_session); + instrumented_allocator_reset(&rx_alloc_fragment); } } // namespace -extern "C" void setUp() {} - -extern "C" void tearDown() {} +void setUp() {} +void tearDown() {} int main() { UNITY_BEGIN(); - RUN_TEST(test_udpard_tx_rx_end_to_end); + RUN_TEST(test_randomized_deduplication); return UNITY_END(); } diff --git a/tests/src/test_e2e_responses.cpp b/tests/src/test_e2e_responses.cpp index 8ec7e9f..bbc76aa 100644 --- a/tests/src/test_e2e_responses.cpp +++ b/tests/src/test_e2e_responses.cpp @@ -6,767 +6,175 @@ #include #include "helpers.h" #include -#include -#include +#include #include +#include namespace { -// -------------------------------------------------------------------------------------------------------------------- -// COMMON INFRASTRUCTURE -// -------------------------------------------------------------------------------------------------------------------- - struct CapturedFrame { - udpard_bytes_mut_t datagram; - uint_fast8_t iface_index; + std::vector bytes; + uint_fast8_t iface_index = 0; + udpard_udpip_ep_t destination{}; }; -void tx_refcount_free(void* const user, const size_t size, void* const payload) +struct RxState { - (void)user; - udpard_tx_refcount_dec(udpard_bytes_t{ .size = size, .data = payload }); -} + std::size_t count; + uint64_t transfer_id; + std::vector payload; + udpard_remote_t remote; +}; -bool capture_tx_frame_impl(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) +// Captures TX ejections for manual RX delivery. +bool capture_tx(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) { - auto* frames = static_cast*>(tx->user); - if (frames == nullptr) { + auto* out = static_cast*>(tx->user); + if (out == nullptr) { return false; } - udpard_tx_refcount_inc(ejection->datagram); - void* const data = const_cast(ejection->datagram.data); // NOLINT - frames->push_back(CapturedFrame{ .datagram = { .size = ejection->datagram.size, .data = data }, - .iface_index = ejection->iface_index }); + CapturedFrame frame{}; + frame.bytes.assign(static_cast(ejection->datagram.data), + static_cast(ejection->datagram.data) + ejection->datagram.size); + frame.iface_index = ejection->iface_index; + frame.destination = ejection->destination; + out->push_back(frame); return true; } -bool capture_tx_frame_subject(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) -{ - return capture_tx_frame_impl(tx, ejection); -} - -bool capture_tx_frame_p2p(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t /*dest*/) -{ - return capture_tx_frame_impl(tx, ejection); -} - -void drop_frame(const CapturedFrame& frame) -{ - udpard_tx_refcount_dec(udpard_bytes_t{ .size = frame.datagram.size, .data = frame.datagram.data }); -} - -constexpr udpard_tx_vtable_t tx_vtable{ .eject_subject = &capture_tx_frame_subject, - .eject_p2p = &capture_tx_frame_p2p }; -// Shared deleter for captured TX frames. -constexpr udpard_deleter_vtable_t tx_refcount_deleter_vt{ .free = &tx_refcount_free }; -constexpr udpard_deleter_t tx_payload_deleter{ .vtable = &tx_refcount_deleter_vt, .context = nullptr }; - -// Check the ACK flag in the Cyphal/UDP header. -constexpr size_t HeaderSizeBytes = 40U; -bool is_ack_frame(const udpard_bytes_mut_t& datagram) -{ - if (datagram.size < HeaderSizeBytes) { - return false; - } - const auto* p = static_cast(datagram.data); - return p[1] == 2U; -} - -// -------------------------------------------------------------------------------------------------------------------- -// FEEDBACK AND CONTEXT STRUCTURES -// -------------------------------------------------------------------------------------------------------------------- - -struct FeedbackState -{ - size_t count = 0; - uint16_t acknowledgements = 0; -}; - -void record_feedback(udpard_tx_t*, const udpard_tx_feedback_t fb) -{ - auto* st = static_cast(fb.user.ptr[0]); - if (st != nullptr) { - st->count++; - st->acknowledgements = fb.acknowledgements; - } -} - -struct NodeBTopicContext -{ - std::vector received_payload; - std::array sender_sources{}; - uint64_t sender_uid = 0; - uint64_t received_tid = 0; - size_t message_count = 0; -}; - -struct NodeAResponseContext -{ - std::vector received_response; - uint64_t transfer_id = 0; - size_t response_count = 0; -}; - -// Combined context for a node's RX instance -struct NodeContext -{ - NodeBTopicContext* topic_ctx = nullptr; - NodeAResponseContext* response_ctx = nullptr; -}; - -// -------------------------------------------------------------------------------------------------------------------- -// CALLBACK IMPLEMENTATIONS -// -------------------------------------------------------------------------------------------------------------------- +constexpr udpard_tx_vtable_t tx_vtable{ .eject = &capture_tx }; -// Node B's message reception callback - receives the topic message from A -void node_b_on_topic_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) +// Receives one transfer and frees its fragment tree. +void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) { - auto* node_ctx = static_cast(rx->user); - auto* ctx = node_ctx->topic_ctx; - if (ctx == nullptr) { - udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); - return; - } - ctx->message_count++; - ctx->sender_uid = transfer.remote.uid; - ctx->sender_sources = {}; - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - ctx->sender_sources[i] = transfer.remote.endpoints[i]; - } - ctx->received_tid = transfer.transfer_id; - - ctx->received_payload.resize(transfer.payload_size_stored); + auto* st = static_cast(rx->user); + TEST_ASSERT_NOT_NULL(st); + st->count++; + st->transfer_id = transfer.transfer_id; + st->remote = transfer.remote; + st->payload.resize(transfer.payload_size_stored); const udpard_fragment_t* cursor = transfer.payload; - (void)udpard_fragment_gather(&cursor, 0, transfer.payload_size_stored, ctx->received_payload.data()); - + (void)udpard_fragment_gather(&cursor, 0, transfer.payload_size_stored, st->payload.data()); udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); } -constexpr udpard_rx_port_vtable_t topic_callbacks{ .on_message = &node_b_on_topic_message }; +constexpr udpard_rx_port_vtable_t rx_vtable{ .on_message = &on_message }; -// Node A's P2P response reception callback - receives the response from B -void node_a_on_p2p_response(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) +// Builds TX memory resources. +udpard_tx_mem_resources_t make_tx_mem(instrumented_allocator_t& transfer, instrumented_allocator_t& payload) { - auto* node_ctx = static_cast(rx->user); - auto* ctx = node_ctx->response_ctx; - if (ctx == nullptr) { - udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); - return; + udpard_tx_mem_resources_t out{}; + out.transfer = instrumented_allocator_make_resource(&transfer); + for (auto& res : out.payload) { + res = instrumented_allocator_make_resource(&payload); } - ctx->response_count++; - ctx->transfer_id = transfer.transfer_id; - - ctx->received_response.resize(transfer.payload_size_stored); - const udpard_fragment_t* cursor = transfer.payload; - (void)udpard_fragment_gather(&cursor, 0, transfer.payload_size_stored, ctx->received_response.data()); - - udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); + return out; } -constexpr udpard_rx_port_vtable_t p2p_response_callbacks{ .on_message = &node_a_on_p2p_response }; - -// ACK-only P2P port callback (for receiving ACKs, which have no user payload) -void on_ack_only(udpard_rx_t*, udpard_rx_port_t* port, const udpard_rx_transfer_t tr) +// Builds RX memory resources. +udpard_rx_mem_resources_t make_rx_mem(instrumented_allocator_t& session, instrumented_allocator_t& fragment) { - udpard_fragment_free_all(tr.payload, udpard_make_deleter(port->memory.fragment)); -} - -constexpr udpard_rx_port_vtable_t ack_only_callbacks{ .on_message = &on_ack_only }; - -// -------------------------------------------------------------------------------------------------------------------- -// TEST: Basic topic message with P2P response flow -// -------------------------------------------------------------------------------------------------------------------- - -/// Node A publishes a reliable topic message, Node B receives it and sends a reliable P2P response. -/// Both nodes verify that their delivery callbacks are correctly invoked. -/// Each node uses exactly one TX and one RX instance. -void test_topic_with_p2p_response() -{ - seed_prng(); - - // ================================================================================================================ - // ALLOCATORS - One TX and one RX per node - // ================================================================================================================ - instrumented_allocator_t a_tx_alloc_transfer{}; - instrumented_allocator_t a_tx_alloc_payload{}; - instrumented_allocator_t a_rx_alloc_frag{}; - instrumented_allocator_t a_rx_alloc_session{}; - instrumented_allocator_new(&a_tx_alloc_transfer); - instrumented_allocator_new(&a_tx_alloc_payload); - instrumented_allocator_new(&a_rx_alloc_frag); - instrumented_allocator_new(&a_rx_alloc_session); - - instrumented_allocator_t b_tx_alloc_transfer{}; - instrumented_allocator_t b_tx_alloc_payload{}; - instrumented_allocator_t b_rx_alloc_frag{}; - instrumented_allocator_t b_rx_alloc_session{}; - instrumented_allocator_new(&b_tx_alloc_transfer); - instrumented_allocator_new(&b_tx_alloc_payload); - instrumented_allocator_new(&b_rx_alloc_frag); - instrumented_allocator_new(&b_rx_alloc_session); - - // ================================================================================================================ - // MEMORY RESOURCES - // ================================================================================================================ - udpard_tx_mem_resources_t a_tx_mem{}; - a_tx_mem.transfer = instrumented_allocator_make_resource(&a_tx_alloc_transfer); - for (auto& res : a_tx_mem.payload) { - res = instrumented_allocator_make_resource(&a_tx_alloc_payload); - } - const udpard_rx_mem_resources_t a_rx_mem{ .session = instrumented_allocator_make_resource(&a_rx_alloc_session), - .slot = instrumented_allocator_make_resource(&a_rx_alloc_session), - .fragment = instrumented_allocator_make_resource(&a_rx_alloc_frag) }; - - udpard_tx_mem_resources_t b_tx_mem{}; - b_tx_mem.transfer = instrumented_allocator_make_resource(&b_tx_alloc_transfer); - for (auto& res : b_tx_mem.payload) { - res = instrumented_allocator_make_resource(&b_tx_alloc_payload); - } - const udpard_rx_mem_resources_t b_rx_mem{ .session = instrumented_allocator_make_resource(&b_rx_alloc_session), - .slot = instrumented_allocator_make_resource(&b_rx_alloc_session), - .fragment = instrumented_allocator_make_resource(&b_rx_alloc_frag) }; - - // ================================================================================================================ - // NODE UIDs AND ENDPOINTS - // ================================================================================================================ - constexpr uint64_t node_a_uid = 0xAAAA1111BBBB2222ULL; - constexpr uint64_t node_b_uid = 0xCCCC3333DDDD4444ULL; - - const std::array node_a_sources{ - udpard_udpip_ep_t{ .ip = 0x0A000001U, .port = 7400U }, - udpard_udpip_ep_t{ .ip = 0x0A000002U, .port = 7401U }, - udpard_udpip_ep_t{ .ip = 0x0A000003U, .port = 7402U }, - }; - const std::array node_b_sources{ - udpard_udpip_ep_t{ .ip = 0x0A000011U, .port = 7500U }, - udpard_udpip_ep_t{ .ip = 0x0A000012U, .port = 7501U }, - udpard_udpip_ep_t{ .ip = 0x0A000013U, .port = 7502U }, + return udpard_rx_mem_resources_t{ + .session = instrumented_allocator_make_resource(&session), + .slot = instrumented_allocator_make_resource(&session), + .fragment = instrumented_allocator_make_resource(&fragment), }; - - constexpr uint64_t transfer_id = 42; - - // ================================================================================================================ - // TX/RX PIPELINES - One TX and one RX per node - // ================================================================================================================ - // Node A: single TX, single RX (linked to TX for ACK processing) - udpard_tx_t a_tx{}; - std::vector a_frames; - TEST_ASSERT_TRUE(udpard_tx_new(&a_tx, node_a_uid, 100, 64, a_tx_mem, &tx_vtable)); - a_tx.user = &a_frames; - a_tx.ack_baseline_timeout = 10000; - - udpard_rx_t a_rx{}; - udpard_rx_new(&a_rx, &a_tx); - NodeAResponseContext a_response_ctx{}; - NodeContext a_node_ctx{ .topic_ctx = nullptr, .response_ctx = &a_response_ctx }; - a_rx.user = &a_node_ctx; - - // A's P2P port for receiving responses and ACKs - udpard_rx_port_t a_p2p_port{}; - TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&a_p2p_port, 4096, a_rx_mem, &p2p_response_callbacks)); - - // Node B: single TX, single RX (linked to TX for ACK processing) - udpard_tx_t b_tx{}; - std::vector b_frames; - TEST_ASSERT_TRUE(udpard_tx_new(&b_tx, node_b_uid, 200, 64, b_tx_mem, &tx_vtable)); - b_tx.user = &b_frames; - b_tx.ack_baseline_timeout = 10000; - - udpard_rx_t b_rx{}; - udpard_rx_new(&b_rx, &b_tx); - NodeBTopicContext b_topic_ctx{}; - NodeContext b_node_ctx{ .topic_ctx = &b_topic_ctx, .response_ctx = nullptr }; - b_rx.user = &b_node_ctx; - - // B's topic subscription port - udpard_rx_port_t b_topic_port{}; - TEST_ASSERT_TRUE(udpard_rx_port_new(&b_topic_port, 4096, b_rx_mem, &topic_callbacks)); - - // B's P2P port for receiving response ACKs - udpard_rx_port_t b_p2p_port{}; - TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&b_p2p_port, 16, b_rx_mem, &ack_only_callbacks)); - - // ================================================================================================================ - // PAYLOADS AND FEEDBACK STATES - // ================================================================================================================ - const std::vector topic_payload = { 0x01, 0x02, 0x03, 0x04, 0x05 }; - const std::vector response_payload = { 0xAA, 0xBB, 0xCC, 0xDD }; - const udpard_bytes_scattered_t topic_payload_scat = make_scattered(topic_payload.data(), topic_payload.size()); - - FeedbackState a_topic_fb{}; - FeedbackState b_response_fb{}; - - // ================================================================================================================ - // STEP 1: Node A publishes a reliable topic message - // ================================================================================================================ - udpard_us_t now = 0; - constexpr uint16_t iface_bitmap_1 = (1U << 0U); - TEST_ASSERT_TRUE(udpard_tx_push(&a_tx, - now, - now + 1000000, - iface_bitmap_1, - udpard_prio_nominal, - transfer_id, - topic_payload_scat, - &record_feedback, - make_user_context(&a_topic_fb))); - a_frames.clear(); - udpard_tx_poll(&a_tx, now, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_FALSE(a_frames.empty()); - - // ================================================================================================================ - // STEP 2: Deliver topic message to Node B - // ================================================================================================================ - for (const auto& frame : a_frames) { - TEST_ASSERT_TRUE(udpard_rx_port_push(&b_rx, - &b_topic_port, - now, - node_a_sources[frame.iface_index], - frame.datagram, - tx_payload_deleter, - frame.iface_index)); - } - udpard_rx_poll(&b_rx, now); - a_frames.clear(); - - // Verify B received the message - TEST_ASSERT_EQUAL_size_t(1, b_topic_ctx.message_count); - TEST_ASSERT_EQUAL_UINT64(node_a_uid, b_topic_ctx.sender_uid); - TEST_ASSERT_EQUAL_size_t(topic_payload.size(), b_topic_ctx.received_payload.size()); - TEST_ASSERT_EQUAL_MEMORY(topic_payload.data(), b_topic_ctx.received_payload.data(), topic_payload.size()); - - // ================================================================================================================ - // STEP 3: Node B sends ACK back to A (for the topic message) - via b_tx since b_rx is linked to it - // ================================================================================================================ - b_frames.clear(); - udpard_tx_poll(&b_tx, now, UDPARD_IFACE_BITMAP_ALL); - - // Deliver ACK frames to A - for (const auto& frame : b_frames) { - TEST_ASSERT_TRUE(udpard_rx_port_push(&a_rx, - &a_p2p_port, - now, - node_b_sources[frame.iface_index], - frame.datagram, - tx_payload_deleter, - frame.iface_index)); - } - udpard_rx_poll(&a_rx, now); - b_frames.clear(); - - // Now A should have received the ACK - poll to process feedback - now += 100; - udpard_tx_poll(&a_tx, now, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_EQUAL_size_t(1, a_topic_fb.count); - TEST_ASSERT_EQUAL_UINT32(1, a_topic_fb.acknowledgements); - - // ================================================================================================================ - // STEP 4: Node B sends a reliable P2P response to A - // ================================================================================================================ - udpard_remote_t remote_a{}; - remote_a.uid = b_topic_ctx.sender_uid; - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - remote_a.endpoints[i] = node_a_sources[i]; - } - - const udpard_bytes_scattered_t response_scat = make_scattered(response_payload.data(), response_payload.size()); - uint64_t b_response_tid = 0; - TEST_ASSERT_TRUE(udpard_tx_push_p2p(&b_tx, - now, - now + 1000000, - udpard_prio_nominal, - remote_a, - response_scat, - &record_feedback, - make_user_context(&b_response_fb), - &b_response_tid)); - - b_frames.clear(); - udpard_tx_poll(&b_tx, now, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_FALSE(b_frames.empty()); - - // Deliver response frames to A - for (const auto& frame : b_frames) { - TEST_ASSERT_TRUE(udpard_rx_port_push(&a_rx, - &a_p2p_port, - now, - node_b_sources[frame.iface_index], - frame.datagram, - tx_payload_deleter, - frame.iface_index)); - } - udpard_rx_poll(&a_rx, now); - b_frames.clear(); - - // Verify A received the response - TEST_ASSERT_EQUAL_size_t(1, a_response_ctx.response_count); - TEST_ASSERT_EQUAL_UINT64(b_response_tid, a_response_ctx.transfer_id); - TEST_ASSERT_EQUAL_size_t(response_payload.size(), a_response_ctx.received_response.size()); - TEST_ASSERT_EQUAL_MEMORY(response_payload.data(), a_response_ctx.received_response.data(), response_payload.size()); - - // ================================================================================================================ - // STEP 5: A sends ACK for the response back to B - via a_tx since a_rx is linked to it - // ================================================================================================================ - a_frames.clear(); - udpard_tx_poll(&a_tx, now, UDPARD_IFACE_BITMAP_ALL); - - // Deliver ACK frames to B - for (const auto& frame : a_frames) { - TEST_ASSERT_TRUE(udpard_rx_port_push(&b_rx, - &b_p2p_port, - now, - node_a_sources[frame.iface_index], - frame.datagram, - tx_payload_deleter, - frame.iface_index)); - } - udpard_rx_poll(&b_rx, now); - a_frames.clear(); - - // Now B should have received the ACK for the response - now += 100; - udpard_tx_poll(&b_tx, now, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_EQUAL_size_t(1, b_response_fb.count); - TEST_ASSERT_EQUAL_UINT32(1, b_response_fb.acknowledgements); - - // ================================================================================================================ - // CLEANUP - // ================================================================================================================ - udpard_rx_port_free(&b_rx, &b_topic_port); - udpard_rx_port_free(&b_rx, &b_p2p_port); - udpard_rx_port_free(&a_rx, &a_p2p_port); - udpard_tx_free(&a_tx); - udpard_tx_free(&b_tx); - - TEST_ASSERT_EQUAL_size_t(0, a_tx_alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, a_tx_alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, a_rx_alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, a_rx_alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, b_tx_alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, b_tx_alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, b_rx_alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, b_rx_alloc_session.allocated_fragments); - - instrumented_allocator_reset(&a_tx_alloc_transfer); - instrumented_allocator_reset(&a_tx_alloc_payload); - instrumented_allocator_reset(&a_rx_alloc_frag); - instrumented_allocator_reset(&a_rx_alloc_session); - instrumented_allocator_reset(&b_tx_alloc_transfer); - instrumented_allocator_reset(&b_tx_alloc_payload); - instrumented_allocator_reset(&b_rx_alloc_frag); - instrumented_allocator_reset(&b_rx_alloc_session); } -// -------------------------------------------------------------------------------------------------------------------- -// TEST: Topic message and response with simulated losses -// -------------------------------------------------------------------------------------------------------------------- +// Delivers a captured frame into RX. +void deliver(const CapturedFrame& frame, + const udpard_mem_t mem, + const udpard_deleter_t del, + udpard_rx_t* const rx, + udpard_rx_port_t* const port, + const udpard_udpip_ep_t src) +{ + void* const dgram = mem_res_alloc(mem, frame.bytes.size()); + TEST_ASSERT_NOT_NULL(dgram); + (void)std::memcpy(dgram, frame.bytes.data(), frame.bytes.size()); + TEST_ASSERT_TRUE(udpard_rx_port_push( + rx, port, 5000, src, udpard_bytes_mut_t{ .size = frame.bytes.size(), .data = dgram }, del, frame.iface_index)); +} -/// Same as above, but with simulated packet loss on both the response and the response ACK. -/// Tests that reliable delivery works correctly with retransmissions. -/// Each node uses exactly one TX and one RX instance. -void test_topic_with_p2p_response_under_loss() +void test_p2p_response_roundtrip() { seed_prng(); - // ================================================================================================================ - // ALLOCATORS - One TX and one RX per node - // ================================================================================================================ - instrumented_allocator_t a_tx_alloc_transfer{}; - instrumented_allocator_t a_tx_alloc_payload{}; - instrumented_allocator_t a_rx_alloc_frag{}; - instrumented_allocator_t a_rx_alloc_session{}; - instrumented_allocator_new(&a_tx_alloc_transfer); - instrumented_allocator_new(&a_tx_alloc_payload); - instrumented_allocator_new(&a_rx_alloc_frag); - instrumented_allocator_new(&a_rx_alloc_session); - - instrumented_allocator_t b_tx_alloc_transfer{}; - instrumented_allocator_t b_tx_alloc_payload{}; - instrumented_allocator_t b_rx_alloc_frag{}; - instrumented_allocator_t b_rx_alloc_session{}; - instrumented_allocator_new(&b_tx_alloc_transfer); - instrumented_allocator_new(&b_tx_alloc_payload); - instrumented_allocator_new(&b_rx_alloc_frag); - instrumented_allocator_new(&b_rx_alloc_session); - - // ================================================================================================================ - // MEMORY RESOURCES - // ================================================================================================================ - udpard_tx_mem_resources_t a_tx_mem{}; - a_tx_mem.transfer = instrumented_allocator_make_resource(&a_tx_alloc_transfer); - for (auto& res : a_tx_mem.payload) { - res = instrumented_allocator_make_resource(&a_tx_alloc_payload); - } - const udpard_rx_mem_resources_t a_rx_mem{ .session = instrumented_allocator_make_resource(&a_rx_alloc_session), - .slot = instrumented_allocator_make_resource(&a_rx_alloc_session), - .fragment = instrumented_allocator_make_resource(&a_rx_alloc_frag) }; - - udpard_tx_mem_resources_t b_tx_mem{}; - b_tx_mem.transfer = instrumented_allocator_make_resource(&b_tx_alloc_transfer); - for (auto& res : b_tx_mem.payload) { - res = instrumented_allocator_make_resource(&b_tx_alloc_payload); - } - const udpard_rx_mem_resources_t b_rx_mem{ .session = instrumented_allocator_make_resource(&b_rx_alloc_session), - .slot = instrumented_allocator_make_resource(&b_rx_alloc_session), - .fragment = instrumented_allocator_make_resource(&b_rx_alloc_frag) }; - - // ================================================================================================================ - // NODE UIDs AND ENDPOINTS - // ================================================================================================================ - constexpr uint64_t node_a_uid = 0x1111AAAA2222BBBBULL; - constexpr uint64_t node_b_uid = 0x3333CCCC4444DDDDULL; - - const std::array node_a_sources{ - udpard_udpip_ep_t{ .ip = 0x0A000021U, .port = 8400U }, - udpard_udpip_ep_t{}, - udpard_udpip_ep_t{}, - }; - const std::array node_b_sources{ - udpard_udpip_ep_t{ .ip = 0x0A000031U, .port = 8500U }, - udpard_udpip_ep_t{}, - udpard_udpip_ep_t{}, - }; - - constexpr uint64_t transfer_id = 99; - - // ================================================================================================================ - // TX/RX PIPELINES - One TX and one RX per node - // ================================================================================================================ - udpard_tx_t a_tx{}; - std::vector a_frames; - TEST_ASSERT_TRUE(udpard_tx_new(&a_tx, node_a_uid, 100, 64, a_tx_mem, &tx_vtable)); - a_tx.user = &a_frames; - a_tx.ack_baseline_timeout = 8000; - - udpard_rx_t a_rx{}; - udpard_rx_new(&a_rx, &a_tx); - NodeAResponseContext a_response_ctx{}; - NodeContext a_node_ctx{ .topic_ctx = nullptr, .response_ctx = &a_response_ctx }; - a_rx.user = &a_node_ctx; - - udpard_rx_port_t a_p2p_port{}; - TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&a_p2p_port, 4096, a_rx_mem, &p2p_response_callbacks)); - + // Configure B (sender) TX. + instrumented_allocator_t b_tx_transfer{}; + instrumented_allocator_t b_tx_payload{}; + instrumented_allocator_new(&b_tx_transfer); + instrumented_allocator_new(&b_tx_payload); udpard_tx_t b_tx{}; std::vector b_frames; - TEST_ASSERT_TRUE(udpard_tx_new(&b_tx, node_b_uid, 200, 64, b_tx_mem, &tx_vtable)); - b_tx.user = &b_frames; - b_tx.ack_baseline_timeout = 8000; - - udpard_rx_t b_rx{}; - udpard_rx_new(&b_rx, &b_tx); - NodeBTopicContext b_topic_ctx{}; - NodeContext b_node_ctx{ .topic_ctx = &b_topic_ctx, .response_ctx = nullptr }; - b_rx.user = &b_node_ctx; - - udpard_rx_port_t b_topic_port{}; - TEST_ASSERT_TRUE(udpard_rx_port_new(&b_topic_port, 4096, b_rx_mem, &topic_callbacks)); - - udpard_rx_port_t b_p2p_port{}; - TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&b_p2p_port, 16, b_rx_mem, &ack_only_callbacks)); - - // ================================================================================================================ - // PAYLOADS AND FEEDBACK STATES - // ================================================================================================================ - const std::vector topic_payload = { 0x10, 0x20, 0x30 }; - const std::vector response_payload = { 0xDE, 0xAD, 0xBE, 0xEF }; - const udpard_bytes_scattered_t topic_payload_scat = make_scattered(topic_payload.data(), topic_payload.size()); - - FeedbackState a_topic_fb{}; - FeedbackState b_response_fb{}; - - // ================================================================================================================ - // STEP 1: Node A publishes a reliable topic message - // ================================================================================================================ - udpard_us_t now = 0; - constexpr uint16_t iface_bitmap_1 = (1U << 0U); - TEST_ASSERT_TRUE(udpard_tx_push(&a_tx, - now, - now + 500000, - iface_bitmap_1, - udpard_prio_fast, - transfer_id, - topic_payload_scat, - &record_feedback, - make_user_context(&a_topic_fb))); - - // ================================================================================================================ - // SIMULATION LOOP WITH LOSSES - // ================================================================================================================ - size_t iterations = 0; - constexpr size_t max_iterations = 30; - bool first_response_dropped = false; - bool first_resp_ack_dropped = false; - bool response_sent = false; - uint64_t b_response_tid = 0; - - while (iterations < max_iterations) { - iterations++; - - // --- Node A transmits (topic message, topic ACKs, or response ACKs) --- - a_frames.clear(); - udpard_tx_poll(&a_tx, now, UDPARD_IFACE_BITMAP_ALL); - - for (const auto& frame : a_frames) { - if (b_topic_ctx.message_count == 0) { - // Topic message frames go to B's topic port - (void)udpard_rx_port_push(&b_rx, - &b_topic_port, - now, - node_a_sources[frame.iface_index], - frame.datagram, - tx_payload_deleter, - frame.iface_index); - } else { - // Response ACK frames go to B's P2P port - if (!first_resp_ack_dropped && (a_response_ctx.response_count > 0) && (b_response_fb.count == 0)) { - first_resp_ack_dropped = true; - drop_frame(frame); - continue; - } - - (void)udpard_rx_port_push(&b_rx, - &b_p2p_port, - now, - node_a_sources[frame.iface_index], - frame.datagram, - tx_payload_deleter, - frame.iface_index); - } - } - a_frames.clear(); - udpard_rx_poll(&b_rx, now); - - // --- Node B transmits (topic ACKs first, before pushing response) --- - b_frames.clear(); - udpard_tx_poll(&b_tx, now, UDPARD_IFACE_BITMAP_ALL); - - // Deliver B's frames (topic ACKs) to A before pushing response - for (const auto& frame : b_frames) { - (void)udpard_rx_port_push(&a_rx, - &a_p2p_port, - now, - node_b_sources[frame.iface_index], - frame.datagram, - tx_payload_deleter, - frame.iface_index); - } - b_frames.clear(); - udpard_rx_poll(&a_rx, now); - - // --- If B received topic, send response --- - if ((b_topic_ctx.message_count > 0) && !response_sent) { - response_sent = true; - - udpard_remote_t remote_a{}; - remote_a.uid = b_topic_ctx.sender_uid; - remote_a.endpoints[0] = node_a_sources[0]; - - const udpard_bytes_scattered_t response_scat = - make_scattered(response_payload.data(), response_payload.size()); - TEST_ASSERT_TRUE(udpard_tx_push_p2p(&b_tx, - now, - now + 500000, - udpard_prio_fast, - remote_a, - response_scat, - &record_feedback, - make_user_context(&b_response_fb), - &b_response_tid)); - } - - // --- Node B transmits (responses) --- - b_frames.clear(); - udpard_tx_poll(&b_tx, now, UDPARD_IFACE_BITMAP_ALL); - - for (const auto& frame : b_frames) { - // Check if this frame is an ACK vs response. - const bool is_ack = is_ack_frame(frame.datagram); - - // Drop first response (non-ACK) to test retransmission. - if (!first_response_dropped && response_sent && !is_ack) { - first_response_dropped = true; - drop_frame(frame); - continue; - } - - (void)udpard_rx_port_push(&a_rx, - &a_p2p_port, - now, - node_b_sources[frame.iface_index], - frame.datagram, - tx_payload_deleter, - frame.iface_index); - } - b_frames.clear(); - udpard_rx_poll(&a_rx, now); - - // Check if both feedbacks have fired - if ((a_topic_fb.count > 0) && (b_response_fb.count > 0)) { - break; - } - - now += a_tx.ack_baseline_timeout + 5000; - } - - // ================================================================================================================ - // VERIFY - // ================================================================================================================ - TEST_ASSERT_LESS_THAN_size_t(max_iterations, iterations); - TEST_ASSERT_TRUE(first_response_dropped); - TEST_ASSERT_TRUE(first_resp_ack_dropped); - - TEST_ASSERT_EQUAL_size_t(1, a_topic_fb.count); - TEST_ASSERT_EQUAL_UINT32(1, a_topic_fb.acknowledgements); - - TEST_ASSERT_EQUAL_size_t(1, b_response_fb.count); - TEST_ASSERT_EQUAL_UINT32(1, b_response_fb.acknowledgements); - - TEST_ASSERT_GREATER_OR_EQUAL_size_t(1, b_topic_ctx.message_count); - TEST_ASSERT_EQUAL_size_t(1, a_response_ctx.response_count); - TEST_ASSERT_EQUAL_UINT64(b_response_tid, a_response_ctx.transfer_id); - TEST_ASSERT_EQUAL_size_t(response_payload.size(), a_response_ctx.received_response.size()); - TEST_ASSERT_EQUAL_MEMORY(response_payload.data(), a_response_ctx.received_response.data(), response_payload.size()); - - // ================================================================================================================ - // CLEANUP - // ================================================================================================================ - udpard_rx_port_free(&b_rx, &b_topic_port); - udpard_rx_port_free(&b_rx, &b_p2p_port); - udpard_rx_port_free(&a_rx, &a_p2p_port); - udpard_tx_free(&a_tx); + TEST_ASSERT_TRUE( + udpard_tx_new(&b_tx, 0xBBBBBBBBBBBBBBBBULL, 10U, 16U, make_tx_mem(b_tx_transfer, b_tx_payload), &tx_vtable)); + b_tx.mtu[0] = 256U; + b_tx.mtu[1] = 256U; + b_tx.mtu[2] = 256U; + b_tx.user = &b_frames; + + // Configure A (receiver) RX P2P port. + instrumented_allocator_t a_rx_session{}; + instrumented_allocator_t a_rx_fragment{}; + instrumented_allocator_new(&a_rx_session); + instrumented_allocator_new(&a_rx_fragment); + const auto rx_mem = make_rx_mem(a_rx_session, a_rx_fragment); + const udpard_deleter_t del = instrumented_allocator_make_deleter(&a_rx_fragment); + udpard_rx_t a_rx{}; + udpard_rx_port_t a_p2p{}; + RxState a_state{}; + udpard_rx_new(&a_rx); + a_rx.user = &a_state; + TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&a_p2p, 1024U, rx_mem, &rx_vtable)); + + // Emit one P2P response from B to A on iface 0. + const udpard_udpip_ep_t a_endpoint = { .ip = 0x0A0000A1U, .port = 9382U }; + udpard_udpip_ep_t endpoints[UDPARD_IFACE_COUNT_MAX] = {}; + endpoints[0] = a_endpoint; + const std::vector response_payload{ 0xDE, 0xAD, 0xBE, 0xEF }; + TEST_ASSERT_TRUE(udpard_tx_push_p2p(&b_tx, + 1000, + 100000, + udpard_prio_high, + endpoints, + make_scattered(response_payload.data(), response_payload.size()), + nullptr)); + udpard_tx_poll(&b_tx, 1001, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_EQUAL_size_t(1, b_frames.size()); + TEST_ASSERT_EQUAL_UINT32(a_endpoint.ip, b_frames[0].destination.ip); + TEST_ASSERT_EQUAL_UINT16(a_endpoint.port, b_frames[0].destination.port); + + // Deliver and verify A has received the response. + deliver(b_frames[0], rx_mem.fragment, del, &a_rx, &a_p2p, udpard_udpip_ep_t{ .ip = 0x0A0000B2U, .port = 9382U }); + udpard_rx_poll(&a_rx, 6000); + TEST_ASSERT_EQUAL_size_t(1, a_state.count); + TEST_ASSERT_EQUAL_size_t(response_payload.size(), a_state.payload.size()); + TEST_ASSERT_EQUAL_MEMORY(response_payload.data(), a_state.payload.data(), response_payload.size()); + TEST_ASSERT_EQUAL_UINT64(0xBBBBBBBBBBBBBBBBULL, a_state.remote.uid); + + // Release all resources. + udpard_rx_port_free(&a_rx, &a_p2p); udpard_tx_free(&b_tx); - - TEST_ASSERT_EQUAL_size_t(0, a_tx_alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, a_tx_alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, a_rx_alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, a_rx_alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, b_tx_alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, b_tx_alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, b_rx_alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, b_rx_alloc_session.allocated_fragments); - - instrumented_allocator_reset(&a_tx_alloc_transfer); - instrumented_allocator_reset(&a_tx_alloc_payload); - instrumented_allocator_reset(&a_rx_alloc_frag); - instrumented_allocator_reset(&a_rx_alloc_session); - instrumented_allocator_reset(&b_tx_alloc_transfer); - instrumented_allocator_reset(&b_tx_alloc_payload); - instrumented_allocator_reset(&b_rx_alloc_frag); - instrumented_allocator_reset(&b_rx_alloc_session); + TEST_ASSERT_EQUAL_size_t(0, b_tx_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_tx_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, a_rx_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, a_rx_fragment.allocated_fragments); + instrumented_allocator_reset(&b_tx_transfer); + instrumented_allocator_reset(&b_tx_payload); + instrumented_allocator_reset(&a_rx_session); + instrumented_allocator_reset(&a_rx_fragment); } } // namespace -extern "C" void setUp() {} - -extern "C" void tearDown() {} +void setUp() {} +void tearDown() {} int main() { UNITY_BEGIN(); - RUN_TEST(test_topic_with_p2p_response); - RUN_TEST(test_topic_with_p2p_response_under_loss); + RUN_TEST(test_p2p_response_roundtrip); return UNITY_END(); } diff --git a/tests/src/test_integration_sockets.cpp b/tests/src/test_integration_sockets.cpp index 4a15c2a..a617b12 100644 --- a/tests/src/test_integration_sockets.cpp +++ b/tests/src/test_integration_sockets.cpp @@ -2,670 +2,288 @@ /// Copyright (C) OpenCyphal Development Team /// Copyright Amazon.com Inc. or its affiliates. /// SPDX-License-Identifier: MIT -/// -/// Integration test that verifies end-to-end behavior with frame capture/injection, -/// random packet loss, and reordering simulation. #include #include "helpers.h" #include - #include -#include -#include +#include #include #include #include namespace { -// Brief network simulator with loss/reorder support. -class NetworkSimulator +struct CapturedFrame { - public: - NetworkSimulator(const double loss_rate, const bool enable_reorder, const uint32_t seed = 1U) - : loss_rate_(std::clamp(loss_rate, 0.0, 1.0)) - , enable_reorder_(enable_reorder) - , rng_(seed) - , drop_(loss_rate_) - { - } - - // Shuffle frames to simulate reordering. - template - void shuffle(std::vector& items) - { - if (enable_reorder_ && (items.size() > 1U)) { - std::shuffle(items.begin(), items.end(), rng_); - reordered_ = true; - } - } - - // Decide whether to drop; guarantee at least one drop if loss is enabled. - bool drop_next(const size_t frames_left) - { - bool drop = (loss_rate_ > 0.0) && drop_(rng_); - if ((!drop) && (loss_rate_ > 0.0) && (frames_left == 1U) && (dropped_ == 0U)) { - drop = true; - } - if (drop) { - dropped_++; - } - return drop; - } - - [[nodiscard]] size_t dropped() const { return dropped_; } - [[nodiscard]] bool reordered() const { return reordered_; } - - private: - double loss_rate_; - bool enable_reorder_; - std::mt19937 rng_; - std::bernoulli_distribution drop_; - size_t dropped_ = 0; - bool reordered_ = false; + std::vector bytes; + std::uint_fast8_t iface_index = 0; }; -// ===================================================================================================================== -// Test context for tracking received transfers -// ===================================================================================================================== - struct ReceivedTransfer { - std::vector payload; - uint64_t transfer_id; - uint64_t remote_uid; - size_t payload_size_wire; + std::uint64_t transfer_id = 0; + std::uint64_t remote_uid = 0; + std::vector payload; }; -struct TestContext +struct RxContext { - std::vector received_transfers; + std::vector transfers; }; -// ===================================================================================================================== -// Captured frame for TX ejection -// ===================================================================================================================== - -struct CapturedFrame +// Captures TX frames into a test-owned vector. +bool capture_tx(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) { - std::vector data; - uint_fast8_t iface_index; -}; - -// ===================================================================================================================== -// Callbacks -// ===================================================================================================================== - -bool capture_frame_impl(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) -{ - auto* frames = static_cast*>(tx->user); - if (frames == nullptr) { + auto* out = static_cast*>(tx->user); + if (out == nullptr) { return false; } - CapturedFrame frame{}; - frame.data.assign(static_cast(ejection->datagram.data), - static_cast(ejection->datagram.data) + ejection->datagram.size); + frame.bytes.assign(static_cast(ejection->datagram.data), + static_cast(ejection->datagram.data) + ejection->datagram.size); frame.iface_index = ejection->iface_index; - frames->push_back(frame); - + out->push_back(std::move(frame)); return true; } -bool capture_frame_subject(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) -{ - return capture_frame_impl(tx, ejection); -} -bool capture_frame_p2p(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t /*dest*/) -{ - return capture_frame_impl(tx, ejection); -} -constexpr udpard_tx_vtable_t tx_vtable{ .eject_subject = &capture_frame_subject, .eject_p2p = &capture_frame_p2p }; +constexpr udpard_tx_vtable_t tx_vtable{ .eject = &capture_tx }; +// Stores each received transfer and frees the payload. void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) { - auto* ctx = static_cast(rx->user); - if (ctx != nullptr) { - ReceivedTransfer rt{}; - rt.transfer_id = transfer.transfer_id; - rt.remote_uid = transfer.remote.uid; - rt.payload_size_wire = transfer.payload_size_wire; - - rt.payload.resize(transfer.payload_size_stored); - const udpard_fragment_t* cursor = transfer.payload; - (void)udpard_fragment_gather(&cursor, 0, transfer.payload_size_stored, rt.payload.data()); - - ctx->received_transfers.push_back(std::move(rt)); - } - + auto* ctx = static_cast(rx->user); + TEST_ASSERT_NOT_NULL(ctx); + ReceivedTransfer out{}; + out.transfer_id = transfer.transfer_id; + out.remote_uid = transfer.remote.uid; + out.payload.resize(transfer.payload_size_stored); + const udpard_fragment_t* cursor = transfer.payload; + (void)udpard_fragment_gather(&cursor, 0, transfer.payload_size_stored, out.payload.data()); + ctx->transfers.push_back(std::move(out)); udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); } -constexpr udpard_rx_port_vtable_t rx_port_vtable{ .on_message = &on_message }; - -// ===================================================================================================================== -// Fixtures and helpers -// ===================================================================================================================== +constexpr udpard_rx_port_vtable_t rx_vtable{ .on_message = &on_message }; -// Build a random payload of requested size. -std::vector make_payload(const size_t size) +// Builds TX memory resources. +udpard_tx_mem_resources_t make_tx_mem(instrumented_allocator_t& transfer, instrumented_allocator_t& payload) { - std::vector payload(size); - for (auto& byte : payload) { - byte = static_cast(rand() % 256); + udpard_tx_mem_resources_t out{}; + out.transfer = instrumented_allocator_make_resource(&transfer); + for (auto& r : out.payload) { + r = instrumented_allocator_make_resource(&payload); } - return payload; + return out; } -// Simple TX owner that captures frames. -struct TxFixture -{ - instrumented_allocator_t transfer{}; - instrumented_allocator_t payload{}; - udpard_tx_mem_resources_t mem{}; - udpard_tx_t tx{}; - std::vector frames; - - void init(const uint64_t uid, const uint64_t timeout, const uint16_t mtu) - { - instrumented_allocator_new(&transfer); - instrumented_allocator_new(&payload); - - mem.transfer = instrumented_allocator_make_resource(&transfer); - for (auto& res : mem.payload) { - res = instrumented_allocator_make_resource(&payload); - } - - TEST_ASSERT_TRUE(udpard_tx_new(&tx, uid, timeout, mtu, mem, &tx_vtable)); - tx.user = &frames; - } - - void fini() - { - udpard_tx_free(&tx); - TEST_ASSERT_EQUAL_size_t(0, transfer.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, payload.allocated_fragments); - } -}; - -// Simple RX owner with context. -struct RxFixture +// Builds RX memory resources. +udpard_rx_mem_resources_t make_rx_mem(instrumented_allocator_t& session, instrumented_allocator_t& fragment) { - instrumented_allocator_t session{}; - instrumented_allocator_t fragment{}; - udpard_rx_mem_resources_t mem{}; - udpard_rx_t rx{}; - TestContext ctx{}; - - void init() - { - instrumented_allocator_new(&session); - instrumented_allocator_new(&fragment); - mem.session = instrumented_allocator_make_resource(&session); - mem.slot = instrumented_allocator_make_resource(&session); - mem.fragment = instrumented_allocator_make_resource(&fragment); - udpard_rx_new(&rx, nullptr); - rx.user = &ctx; - } - - void fini() const - { - TEST_ASSERT_EQUAL_size_t(0, session.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, fragment.allocated_fragments); - } -}; - -// Create a subject port. -udpard_rx_port_t make_subject_port(const size_t extent, RxFixture& rx) -{ - udpard_rx_port_t port{}; - TEST_ASSERT_TRUE(udpard_rx_port_new(&port, extent, rx.mem, &rx_port_vtable)); - return port; -} - -// ===================================================================================================================== -// Helper to deliver frames with optional loss/reorder. -void deliver_frames(std::vector frames, - udpard_rx_t* rx, - udpard_rx_port_t* port, - const udpard_rx_mem_resources_t& rx_mem, - const udpard_udpip_ep_t& src_ep, - udpard_us_t now, - NetworkSimulator* sim = nullptr) -{ - if (sim != nullptr) { - sim->shuffle(frames); - } - const size_t total = frames.size(); - for (size_t i = 0; i < total; i++) { - if ((sim != nullptr) && sim->drop_next(total - i)) { - now++; - continue; - } - - const auto& frame = frames[i]; - const udpard_deleter_t deleter{ .vtable = &rx_mem.fragment.vtable->base, .context = rx_mem.fragment.context }; - void* dgram = mem_res_alloc(rx_mem.fragment, frame.data.size()); - TEST_ASSERT_NOT_NULL(dgram); - std::memcpy(dgram, frame.data.data(), frame.data.size()); - - const udpard_bytes_mut_t dgram_view{ frame.data.size(), dgram }; - - TEST_ASSERT_TRUE(udpard_rx_port_push(rx, port, now, src_ep, dgram_view, deleter, frame.iface_index)); - now++; - } - udpard_rx_poll(rx, now); -} - -// ===================================================================================================================== -// Tests -// ===================================================================================================================== - -/// Basic single-frame transfer end-to-end -void test_single_frame_transfer() -{ - seed_prng(); - - constexpr uint64_t publisher_uid = 0x1111222233334444ULL; - constexpr uint64_t transfer_id = 42U; - - // Set up publisher. - TxFixture pub{}; - pub.init(publisher_uid, 100U, 256); - - // Set up subscriber. - RxFixture sub{}; - sub.init(); - udpard_rx_port_t sub_port = make_subject_port(4096, sub); - - // Send a small payload. - const std::vector payload = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 }; - const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); - - const udpard_us_t now = 1000000; - const udpard_us_t deadline = now + 1000000; - - TEST_ASSERT_TRUE(udpard_tx_push(&pub.tx, - now, - deadline, - 1U, // iface_bitmap: interface 0 only - udpard_prio_nominal, - transfer_id, - payload_view, - nullptr, - UDPARD_USER_CONTEXT_NULL)); - - udpard_tx_poll(&pub.tx, now, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_EQUAL_size_t(1, pub.frames.size()); - - // Deliver frames to subscriber. - const udpard_udpip_ep_t src_ep{ .ip = 0x7F000001, .port = 12345 }; - deliver_frames(pub.frames, &sub.rx, &sub_port, sub.mem, src_ep, now); - - // Verify transfer. - TEST_ASSERT_EQUAL_size_t(1, sub.ctx.received_transfers.size()); - TEST_ASSERT_EQUAL_UINT64(transfer_id, sub.ctx.received_transfers[0].transfer_id); - TEST_ASSERT_EQUAL_UINT64(publisher_uid, sub.ctx.received_transfers[0].remote_uid); - TEST_ASSERT_EQUAL_size_t(payload.size(), sub.ctx.received_transfers[0].payload.size()); - TEST_ASSERT_EQUAL_MEMORY(payload.data(), sub.ctx.received_transfers[0].payload.data(), payload.size()); - - // Cleanup. - udpard_rx_port_free(&sub.rx, &sub_port); - pub.fini(); - sub.fini(); -} - -/// Large multi-frame transfer end-to-end -void test_multi_frame_transfer() -{ - seed_prng(); - - constexpr uint64_t publisher_uid = 0x5555666677778888ULL; - constexpr size_t payload_size = 50000; // Large enough to require many frames - - // Set up publisher. - TxFixture pub{}; - pub.init(publisher_uid, 200U, 512); - - // Set up subscriber. - RxFixture sub{}; - sub.init(); - udpard_rx_port_t sub_port = make_subject_port(payload_size + 1024, sub); - - // Generate random payload. - const std::vector payload = make_payload(payload_size); - const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); - - const udpard_us_t now = 1000000; - const udpard_us_t deadline = now + 5000000; - - TEST_ASSERT_TRUE(udpard_tx_push(&pub.tx, - now, - deadline, - 1U, // iface_bitmap - udpard_prio_nominal, - 100, - payload_view, - nullptr, - UDPARD_USER_CONTEXT_NULL)); - - udpard_tx_poll(&pub.tx, now, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_TRUE(pub.frames.size() > 1U); - - // Deliver frames to subscriber. - const udpard_udpip_ep_t src_ep{ .ip = 0x7F000001, .port = 12345 }; - deliver_frames(pub.frames, &sub.rx, &sub_port, sub.mem, src_ep, now); - - // Verify full transfer. - TEST_ASSERT_EQUAL_size_t(1, sub.ctx.received_transfers.size()); - TEST_ASSERT_EQUAL_size_t(payload_size, sub.ctx.received_transfers[0].payload.size()); - TEST_ASSERT_EQUAL_MEMORY(payload.data(), sub.ctx.received_transfers[0].payload.data(), payload_size); - - // Cleanup. - udpard_rx_port_free(&sub.rx, &sub_port); - pub.fini(); - sub.fini(); + return udpard_rx_mem_resources_t{ + .session = instrumented_allocator_make_resource(&session), + .slot = instrumented_allocator_make_resource(&session), + .fragment = instrumented_allocator_make_resource(&fragment), + }; } -/// Multi-frame transfer with random reordering -void test_multi_frame_with_reordering() +// Delivers one captured frame into RX. +void deliver(const CapturedFrame& frame, + const udpard_mem_t mem, + const udpard_deleter_t del, + udpard_rx_t* const rx, + udpard_rx_port_t* const port, + const udpard_udpip_ep_t src, + const udpard_us_t ts) { - seed_prng(); - - constexpr uint64_t publisher_uid = 0xABCDEF0123456789ULL; - constexpr size_t payload_size = 20000; - - NetworkSimulator sim(0.0, true, static_cast(rand())); // No loss, deterministic shuffle - - // Set up publisher. - TxFixture pub{}; - pub.init(publisher_uid, 300U, 256); - - // Set up subscriber. - RxFixture sub{}; - sub.init(); - udpard_rx_port_t sub_port = make_subject_port(payload_size + 1024, sub); - - // Generate random payload and send. - const std::vector payload = make_payload(payload_size); - const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); - - const udpard_us_t now = 1000000; - TEST_ASSERT_TRUE(udpard_tx_push(&pub.tx, - now, - now + 5000000, - 1U, // iface_bitmap - udpard_prio_nominal, - 50, - payload_view, - nullptr, - UDPARD_USER_CONTEXT_NULL)); - - udpard_tx_poll(&pub.tx, now, UDPARD_IFACE_BITMAP_ALL); - - // Deliver reordered frames. - const udpard_udpip_ep_t src_ep{ .ip = 0x7F000001, .port = 12345 }; - deliver_frames(pub.frames, &sub.rx, &sub_port, sub.mem, src_ep, now, &sim); - - // Verify reordering recovery. - TEST_ASSERT_EQUAL_size_t(1, sub.ctx.received_transfers.size()); - TEST_ASSERT_EQUAL_size_t(payload_size, sub.ctx.received_transfers[0].payload.size()); - TEST_ASSERT_EQUAL_MEMORY(payload.data(), sub.ctx.received_transfers[0].payload.data(), payload_size); - TEST_ASSERT_TRUE((pub.frames.size() < 2U) || sim.reordered()); - - // Cleanup. - udpard_rx_port_free(&sub.rx, &sub_port); - pub.fini(); - sub.fini(); + void* const dgram = mem_res_alloc(mem, frame.bytes.size()); + TEST_ASSERT_NOT_NULL(dgram); + (void)std::memcpy(dgram, frame.bytes.data(), frame.bytes.size()); + TEST_ASSERT_TRUE(udpard_rx_port_push( + rx, port, ts, src, udpard_bytes_mut_t{ .size = frame.bytes.size(), .data = dgram }, del, frame.iface_index)); } -/// Multiple publishers sending to single subscriber -void test_multiple_publishers() +void test_reordered_multiframe_delivery() { seed_prng(); - constexpr size_t num_publishers = 3; - constexpr size_t num_transfers_per_pub = 5; - constexpr size_t payload_size = 100; - - // Set up subscriber. - RxFixture sub{}; - sub.init(); - udpard_rx_port_t sub_port = make_subject_port(1024, sub); - - // Set up publishers and send. - std::array publishers{}; - std::array>, num_publishers> expected_payloads{}; - - for (size_t i = 0; i < num_publishers; i++) { - const uint64_t uid = 0x1000000000000000ULL + i; - publishers[i].init(uid, static_cast(rand()), 256); - - for (size_t tid = 0; tid < num_transfers_per_pub; tid++) { - std::vector payload = make_payload(payload_size); - payload[0] = static_cast(i); - payload[1] = static_cast(tid); - expected_payloads[i].push_back(payload); - - const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); - const udpard_us_t now = - 1000000LL + (static_cast(i) * 10000LL) + (static_cast(tid) * 100LL); - const uint64_t transfer_id = (static_cast(i) * 1000ULL) + static_cast(tid); - - TEST_ASSERT_TRUE(udpard_tx_push(&publishers[i].tx, - now, - now + 1000000, - 1U, // iface_bitmap - udpard_prio_nominal, - transfer_id, - payload_view, - nullptr, - UDPARD_USER_CONTEXT_NULL)); - - udpard_tx_poll(&publishers[i].tx, now, UDPARD_IFACE_BITMAP_ALL); - } - } - - // Deliver all frames in publisher order. - udpard_us_t now = 2000000; - for (size_t pub = 0; pub < num_publishers; pub++) { - const udpard_udpip_ep_t src_ep{ static_cast(0x7F000001U + pub), static_cast(12345U + pub) }; - deliver_frames(publishers[pub].frames, &sub.rx, &sub_port, sub.mem, src_ep, now); - now += publishers[pub].frames.size(); - } - - // Verify every transfer and payload. - const size_t expected_transfers = num_publishers * num_transfers_per_pub; - TEST_ASSERT_EQUAL_size_t(expected_transfers, sub.ctx.received_transfers.size()); - for (size_t i = 0; i < num_publishers; i++) { - const uint64_t uid = 0x1000000000000000ULL + i; - for (size_t tid = 0; tid < num_transfers_per_pub; tid++) { - const uint64_t transfer_id = (static_cast(i) * 1000ULL) + static_cast(tid); - const auto it = std::find_if( - sub.ctx.received_transfers.begin(), sub.ctx.received_transfers.end(), [=](const ReceivedTransfer& rt) { - return (rt.remote_uid == uid) && (rt.transfer_id == transfer_id); - }); - TEST_ASSERT_TRUE(it != sub.ctx.received_transfers.end()); - TEST_ASSERT_EQUAL_size_t(payload_size, it->payload.size()); - TEST_ASSERT_EQUAL_MEMORY(expected_payloads[i][tid].data(), it->payload.data(), payload_size); - } + // Configure one TX node. + instrumented_allocator_t tx_alloc_transfer{}; + instrumented_allocator_t tx_alloc_payload{}; + instrumented_allocator_new(&tx_alloc_transfer); + instrumented_allocator_new(&tx_alloc_payload); + udpard_tx_t tx{}; + std::vector frames; + TEST_ASSERT_TRUE( + udpard_tx_new(&tx, 0xAAAAAAAABBBBBBBBULL, 1U, 32U, make_tx_mem(tx_alloc_transfer, tx_alloc_payload), &tx_vtable)); + tx.mtu[0] = 96U; + tx.mtu[1] = 96U; + tx.mtu[2] = 96U; + tx.user = &frames; + + // Configure one RX node. + instrumented_allocator_t rx_alloc_session{}; + instrumented_allocator_t rx_alloc_fragment{}; + instrumented_allocator_new(&rx_alloc_session); + instrumented_allocator_new(&rx_alloc_fragment); + const auto rx_mem = make_rx_mem(rx_alloc_session, rx_alloc_fragment); + const udpard_deleter_t del = instrumented_allocator_make_deleter(&rx_alloc_fragment); + udpard_rx_t rx{}; + udpard_rx_port_t port{}; + RxContext ctx{}; + udpard_rx_new(&rx); + rx.user = &ctx; + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 4096U, rx_mem, &rx_vtable)); + + // Emit one large transfer over two interfaces. + std::vector payload(260U); + for (std::size_t i = 0; i < payload.size(); i++) { + payload[i] = static_cast(i); } - - // Cleanup. - udpard_rx_port_free(&sub.rx, &sub_port); - for (auto& pub : publishers) { - pub.fini(); + TEST_ASSERT_TRUE(udpard_tx_push(&tx, + 1000, + 100000, + (1U << 0U) | (1U << 1U), + udpard_prio_fast, + 44U, + udpard_make_subject_endpoint(123U), + make_scattered(payload.data(), payload.size()), + nullptr)); + udpard_tx_poll(&tx, 1001, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_TRUE(!frames.empty()); + + // Reorder arrivals and deliver all frames. + std::mt19937 prng{ static_cast(rand()) }; + std::shuffle(frames.begin(), frames.end(), prng); + udpard_us_t ts = 2000; + for (const auto& frame : frames) { + deliver(frame, rx_mem.fragment, del, &rx, &port, udpard_udpip_ep_t{ .ip = 0x0A000001U, .port = 9382U }, ts++); } - sub.fini(); + udpard_rx_poll(&rx, ts + 1); + + // Deduplication must keep exactly one delivered transfer. + TEST_ASSERT_EQUAL_size_t(1, ctx.transfers.size()); + TEST_ASSERT_EQUAL_UINT64(44U, ctx.transfers[0].transfer_id); + TEST_ASSERT_EQUAL_UINT64(0xAAAAAAAABBBBBBBBULL, ctx.transfers[0].remote_uid); + TEST_ASSERT_EQUAL_size_t(payload.size(), ctx.transfers[0].payload.size()); + TEST_ASSERT_EQUAL_MEMORY(payload.data(), ctx.transfers[0].payload.data(), payload.size()); + + // Release resources. + udpard_rx_port_free(&rx, &port); + udpard_tx_free(&tx); + TEST_ASSERT_EQUAL_size_t(0, tx_alloc_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, tx_alloc_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_fragment.allocated_fragments); + instrumented_allocator_reset(&tx_alloc_transfer); + instrumented_allocator_reset(&tx_alloc_payload); + instrumented_allocator_reset(&rx_alloc_session); + instrumented_allocator_reset(&rx_alloc_fragment); } -/// Multi-frame transfer with simulated packet loss (all frames except one lost = incomplete transfer) -void test_partial_frame_loss() +void test_two_publishers() { seed_prng(); - constexpr uint64_t publisher_uid = 0xDEADBEEFCAFEBABEULL; - constexpr size_t payload_size = 5000; // Multi-frame transfer - - NetworkSimulator sim(0.35, false, static_cast(rand())); // Ensure some loss - - // Set up publisher. - TxFixture pub{}; - pub.init(publisher_uid, 300U, 256); - - // Set up subscriber. - RxFixture sub{}; - sub.init(); - udpard_rx_port_t sub_port = make_subject_port(payload_size + 1024, sub); - - // Generate payload and send. - const std::vector payload = make_payload(payload_size); - const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); - - const udpard_us_t now = 1000000; - TEST_ASSERT_TRUE(udpard_tx_push(&pub.tx, - now, - now + 5000000, - 1U, // iface_bitmap - udpard_prio_nominal, - 50, - payload_view, - nullptr, - UDPARD_USER_CONTEXT_NULL)); - - udpard_tx_poll(&pub.tx, now, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_TRUE(pub.frames.size() > 1U); - - // Deliver with packet loss. - const udpard_udpip_ep_t src_ep{ .ip = 0x7F000001, .port = 12345 }; - deliver_frames(pub.frames, &sub.rx, &sub_port, sub.mem, src_ep, now, &sim); - - // Verify incomplete transfer is dropped. - TEST_ASSERT_TRUE(sim.dropped() > 0U); - TEST_ASSERT_EQUAL_size_t(0, sub.ctx.received_transfers.size()); - - // Cleanup. - udpard_rx_port_free(&sub.rx, &sub_port); - pub.fini(); - sub.fini(); -} - -/// Test with all frames delivered - no loss (baseline for loss tests) -void test_no_loss_baseline() -{ - seed_prng(); - - constexpr uint64_t publisher_uid = 0xAAAABBBBCCCCDDDDULL; - constexpr size_t payload_size = 10000; - - // Set up publisher. - TxFixture pub{}; - pub.init(publisher_uid, 400U, 256); - - // Set up subscriber. - RxFixture sub{}; - sub.init(); - udpard_rx_port_t sub_port = make_subject_port(payload_size + 1024, sub); - - // Generate payload and send. - const std::vector payload = make_payload(payload_size); - const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); - - const udpard_us_t now = 1000000; - TEST_ASSERT_TRUE(udpard_tx_push(&pub.tx, - now, - now + 5000000, - 1U, // iface_bitmap - udpard_prio_nominal, - 75, - payload_view, - nullptr, - UDPARD_USER_CONTEXT_NULL)); - - udpard_tx_poll(&pub.tx, now, UDPARD_IFACE_BITMAP_ALL); - - // Deliver all frames. - const udpard_udpip_ep_t src_ep{ .ip = 0x7F000001, .port = 12345 }; - deliver_frames(pub.frames, &sub.rx, &sub_port, sub.mem, src_ep, now); - - // Verify success path. - TEST_ASSERT_EQUAL_size_t(1, sub.ctx.received_transfers.size()); - TEST_ASSERT_EQUAL_size_t(payload_size, sub.ctx.received_transfers[0].payload.size()); - TEST_ASSERT_EQUAL_MEMORY(payload.data(), sub.ctx.received_transfers[0].payload.data(), payload_size); - - // Cleanup. - udpard_rx_port_free(&sub.rx, &sub_port); - pub.fini(); - sub.fini(); -} - -/// Test with extent-based truncation -void test_extent_truncation() -{ - seed_prng(); - - constexpr uint64_t publisher_uid = 0x1234567890ABCDEFULL; - constexpr size_t payload_size = 5000; - constexpr size_t extent = 1000; // Less than payload_size - - // Set up publisher. - TxFixture pub{}; - pub.init(publisher_uid, 500U, 256); - - // Set up subscriber with limited extent. - RxFixture sub{}; - sub.init(); - udpard_rx_port_t sub_port = make_subject_port(extent, sub); - - // Generate payload and send. - const std::vector payload = make_payload(payload_size); - const udpard_bytes_scattered_t payload_view = make_scattered(payload.data(), payload.size()); - - const udpard_us_t now = 1000000; - TEST_ASSERT_TRUE(udpard_tx_push(&pub.tx, - now, - now + 5000000, - 1U, // iface_bitmap + // Configure two TX nodes. + instrumented_allocator_t a_tx_transfer{}; + instrumented_allocator_t a_tx_payload{}; + instrumented_allocator_t b_tx_transfer{}; + instrumented_allocator_t b_tx_payload{}; + instrumented_allocator_new(&a_tx_transfer); + instrumented_allocator_new(&a_tx_payload); + instrumented_allocator_new(&b_tx_transfer); + instrumented_allocator_new(&b_tx_payload); + udpard_tx_t a_tx{}; + udpard_tx_t b_tx{}; + std::vector a_frames; + std::vector b_frames; + TEST_ASSERT_TRUE( + udpard_tx_new(&a_tx, 0x1111111111111111ULL, 2U, 16U, make_tx_mem(a_tx_transfer, a_tx_payload), &tx_vtable)); + TEST_ASSERT_TRUE( + udpard_tx_new(&b_tx, 0x2222222222222222ULL, 3U, 16U, make_tx_mem(b_tx_transfer, b_tx_payload), &tx_vtable)); + a_tx.mtu[0] = 128U; + a_tx.mtu[1] = 128U; + a_tx.mtu[2] = 128U; + b_tx.mtu[0] = 128U; + b_tx.mtu[1] = 128U; + b_tx.mtu[2] = 128U; + a_tx.user = &a_frames; + b_tx.user = &b_frames; + + // Configure shared RX node. + instrumented_allocator_t rx_alloc_session{}; + instrumented_allocator_t rx_alloc_fragment{}; + instrumented_allocator_new(&rx_alloc_session); + instrumented_allocator_new(&rx_alloc_fragment); + const auto rx_mem = make_rx_mem(rx_alloc_session, rx_alloc_fragment); + const udpard_deleter_t del = instrumented_allocator_make_deleter(&rx_alloc_fragment); + udpard_rx_t rx{}; + udpard_rx_port_t port{}; + RxContext ctx{}; + udpard_rx_new(&rx); + rx.user = &ctx; + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 1024U, rx_mem, &rx_vtable)); + + // Emit one transfer per publisher. + static const std::uint8_t a_payload[] = { 1, 3, 5 }; + static const std::uint8_t b_payload[] = { 2, 4, 6, 8 }; + TEST_ASSERT_TRUE(udpard_tx_push(&a_tx, + 100, + 10000, + 1U, udpard_prio_nominal, + 10U, + udpard_make_subject_endpoint(5U), + make_scattered(a_payload, sizeof(a_payload)), + nullptr)); + TEST_ASSERT_TRUE(udpard_tx_push(&b_tx, 100, - payload_view, - nullptr, - UDPARD_USER_CONTEXT_NULL)); - - udpard_tx_poll(&pub.tx, now, UDPARD_IFACE_BITMAP_ALL); - - // Deliver all frames. - const udpard_udpip_ep_t src_ep{ .ip = 0x7F000001, .port = 12345 }; - deliver_frames(pub.frames, &sub.rx, &sub_port, sub.mem, src_ep, now); - - // Verify truncation. - TEST_ASSERT_EQUAL_size_t(1, sub.ctx.received_transfers.size()); - TEST_ASSERT_TRUE(sub.ctx.received_transfers[0].payload.size() <= extent + UDPARD_MTU_DEFAULT); - TEST_ASSERT_EQUAL_size_t(payload_size, sub.ctx.received_transfers[0].payload_size_wire); - TEST_ASSERT_EQUAL_MEMORY( - payload.data(), sub.ctx.received_transfers[0].payload.data(), sub.ctx.received_transfers[0].payload.size()); - - // Cleanup. - udpard_rx_port_free(&sub.rx, &sub_port); - pub.fini(); - sub.fini(); + 10000, + 1U, + udpard_prio_nominal, + 20U, + udpard_make_subject_endpoint(5U), + make_scattered(b_payload, sizeof(b_payload)), + nullptr)); + udpard_tx_poll(&a_tx, 101, UDPARD_IFACE_BITMAP_ALL); + udpard_tx_poll(&b_tx, 101, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_EQUAL_size_t(1, a_frames.size()); + TEST_ASSERT_EQUAL_size_t(1, b_frames.size()); + + // Deliver frames and verify both senders are represented. + deliver(a_frames[0], rx_mem.fragment, del, &rx, &port, udpard_udpip_ep_t{ .ip = 0x0A000011U, .port = 9382U }, 200); + deliver(b_frames[0], rx_mem.fragment, del, &rx, &port, udpard_udpip_ep_t{ .ip = 0x0A000022U, .port = 9382U }, 201); + udpard_rx_poll(&rx, 300); + TEST_ASSERT_EQUAL_size_t(2, ctx.transfers.size()); + + // Release resources. + udpard_rx_port_free(&rx, &port); + udpard_tx_free(&a_tx); + udpard_tx_free(&b_tx); + TEST_ASSERT_EQUAL_size_t(0, a_tx_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, a_tx_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_tx_transfer.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, b_tx_payload.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_session.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, rx_alloc_fragment.allocated_fragments); + instrumented_allocator_reset(&a_tx_transfer); + instrumented_allocator_reset(&a_tx_payload); + instrumented_allocator_reset(&b_tx_transfer); + instrumented_allocator_reset(&b_tx_payload); + instrumented_allocator_reset(&rx_alloc_session); + instrumented_allocator_reset(&rx_alloc_fragment); } } // namespace -extern "C" void setUp() {} -extern "C" void tearDown() {} +void setUp() {} +void tearDown() {} int main() { UNITY_BEGIN(); - RUN_TEST(test_single_frame_transfer); - RUN_TEST(test_multi_frame_transfer); - RUN_TEST(test_multi_frame_with_reordering); - RUN_TEST(test_multiple_publishers); - RUN_TEST(test_partial_frame_loss); - RUN_TEST(test_no_loss_baseline); - RUN_TEST(test_extent_truncation); + RUN_TEST(test_reordered_multiframe_delivery); + RUN_TEST(test_two_publishers); return UNITY_END(); } diff --git a/tests/src/test_intrusive_guards.c b/tests/src/test_intrusive_guards.c index 1a48fc6..f067e60 100644 --- a/tests/src/test_intrusive_guards.c +++ b/tests/src/test_intrusive_guards.c @@ -4,398 +4,192 @@ /// SPDX-License-Identifier: MIT #include // NOLINT(bugprone-suspicious-include) +#include "helpers.h" #include +#include -// Minimal helpers to avoid heap use in guard paths. -static void free_noop(void* const user, const size_t size, void* const pointer) +// Heap-backed free helper for guard-path allocations. +static void free_heap(void* const user, const size_t size, void* const pointer) { (void)user; (void)size; - (void)pointer; + free(pointer); } -static void* alloc_stub(void* const user, const size_t size) +// Heap-backed allocator for guard-path allocations. +static void* alloc_heap(void* const user, const size_t size) { - (void)size; - return (size > 0U) ? user : NULL; -} - -static void* alloc_alt(void* const user, const size_t size) -{ - (void)size; - return (byte_t*)user + 1; + (void)user; + return (size > 0U) ? malloc(size) : NULL; } -// Minimal vtables for guard-path allocators. -static const udpard_mem_vtable_t mem_vtable_stub = { .base = { .free = free_noop }, .alloc = alloc_stub }; -static const udpard_mem_vtable_t mem_vtable_alt = { .base = { .free = free_noop }, .alloc = alloc_alt }; -static const udpard_deleter_vtable_t deleter_vtable = { .free = free_noop }; +// Shared vtables for guard-path checks. +static const udpard_mem_vtable_t mem_vtable = { .base = { .free = free_heap }, .alloc = alloc_heap }; +static const udpard_deleter_vtable_t del_vtable = { .free = free_heap }; static udpard_mem_t make_mem(void* const tag) { - const udpard_mem_t out = { .vtable = &mem_vtable_stub, .context = tag }; - return out; + (void)tag; + return (udpard_mem_t){ .vtable = &mem_vtable, .context = NULL }; } -static bool eject_subject_stub(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) +// TX eject stub used for constructor checks. +static bool eject_stub(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) { (void)tx; (void)ejection; return true; } -static bool eject_p2p_stub(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t dest) -{ - (void)tx; - (void)ejection; - (void)dest; - return true; -} +static const udpard_tx_vtable_t tx_vtable = { .eject = eject_stub }; -static void on_message_stub(udpard_rx_t* const rx, udpard_rx_port_t* const port, udpard_rx_transfer_t transfer) +// RX callback stub used for constructor checks. +static void on_message_stub(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) { (void)rx; (void)port; (void)transfer; } -static void test_mem_endpoint_list_guards(void) -{ - // mem_same covers identical and divergent resources. - static char tag_a; - static char tag_b; - const udpard_mem_t mem_a = make_mem(&tag_a); - const udpard_mem_t mem_b = make_mem(&tag_b); - const udpard_mem_t mem_c = { .vtable = &mem_vtable_alt, .context = &tag_a }; - TEST_ASSERT_TRUE(mem_same(mem_a, mem_a)); - TEST_ASSERT_FALSE(mem_same(mem_a, mem_b)); - TEST_ASSERT_FALSE(mem_same(mem_a, mem_c)); +static const udpard_rx_port_vtable_t rx_vtable = { .on_message = on_message_stub }; - // Endpoint validation handles invalid inputs. +static void test_misc_guards(void) +{ + // Endpoint validity. TEST_ASSERT_TRUE(udpard_is_valid_endpoint((udpard_udpip_ep_t){ .ip = 1U, .port = UDP_PORT })); TEST_ASSERT_FALSE(udpard_is_valid_endpoint((udpard_udpip_ep_t){ .ip = 0U, .port = UDP_PORT })); TEST_ASSERT_FALSE(udpard_is_valid_endpoint((udpard_udpip_ep_t){ .ip = UINT32_MAX, .port = UDP_PORT })); TEST_ASSERT_FALSE(udpard_is_valid_endpoint((udpard_udpip_ep_t){ .ip = 1U, .port = 0U })); - // is_listed covers empty and populated state. - udpard_list_t list = { 0 }; - udpard_listed_t member = { 0 }; - TEST_ASSERT_FALSE(is_listed(&list, &member)); - enlist_head(&list, &member); - TEST_ASSERT_TRUE(is_listed(&list, &member)); - // is_listed returns true for non-head members too. - udpard_listed_t tail = { 0 }; - enlist_head(&list, &tail); - TEST_ASSERT_TRUE(is_listed(&list, &member)); - // is_listed returns true when next is populated. - TEST_ASSERT_TRUE(is_listed(&list, &tail)); - - // NULL endpoint list yields empty bitmap. - TEST_ASSERT_EQUAL_UINT16(0U, valid_ep_bitmap(NULL)); -} - -static void test_fragment_guards(void) -{ - // Null fragments return NULL paths cleanly. - TEST_ASSERT_NULL(udpard_fragment_seek(NULL, 0)); - TEST_ASSERT_NULL(udpard_fragment_next(NULL)); - - // Offsets past the end yield no data. - static const byte_t payload[] = { 1U, 2U }; - udpard_fragment_t frag = { .index_offset = { NULL, { NULL, NULL }, 0 }, - .offset = 4U, - .view = { .size = sizeof(payload), .data = payload }, - .origin = { .size = 0U, .data = NULL }, - .payload_deleter = { 0 } }; - const udpard_fragment_t* cursor = &frag; - byte_t out[2] = { 0 }; - TEST_ASSERT_NULL(udpard_fragment_seek(&frag, frag.offset + frag.view.size)); - TEST_ASSERT_EQUAL_UINT(0, udpard_fragment_gather(NULL, 0, 1, out)); - TEST_ASSERT_EQUAL_UINT(0, udpard_fragment_gather(&cursor, frag.offset + frag.view.size, 1, out)); - // Offsets inside yield the fragment. - TEST_ASSERT_EQUAL_PTR(&frag, udpard_fragment_seek(&frag, frag.offset)); + // Subject endpoint masking. + const udpard_udpip_ep_t ep = udpard_make_subject_endpoint(0xFFFFFFFFUL); + TEST_ASSERT_EQUAL_UINT16(UDP_PORT, ep.port); + TEST_ASSERT_EQUAL_UINT32(IPv4_MCAST_PREFIX | UDPARD_IPv4_SUBJECT_ID_MAX, ep.ip); } -static void test_header_guard(void) +static void test_tx_new_guards(void) { - // Deserializer rejects missing payload pointers. - meta_t meta = { 0 }; - udpard_bytes_t payload; - uint32_t frame_index = 0; - uint32_t frame_offset = 0; - uint32_t prefix_crc = 0; - TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = HEADER_SIZE_BYTES, .data = NULL }, - &meta, - &frame_index, - &frame_offset, - &prefix_crc, - &payload)); + // Prepare valid memory resources. + static byte_t transfer_pool[1024]; + static byte_t payload_pool[1024]; + const udpard_tx_mem_resources_t mem_ok = { + .transfer = make_mem(transfer_pool), + .payload = { make_mem(payload_pool), make_mem(payload_pool), make_mem(payload_pool) }, + }; + + // Validate constructor argument checks. + udpard_tx_t tx = { 0 }; + TEST_ASSERT_FALSE(udpard_tx_new(NULL, 1U, 1U, 1U, mem_ok, &tx_vtable)); + TEST_ASSERT_FALSE(udpard_tx_new(&tx, 0U, 1U, 1U, mem_ok, &tx_vtable)); + TEST_ASSERT_FALSE(udpard_tx_new(&tx, 1U, 1U, 1U, mem_ok, NULL)); + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 1U, 1U, 4U, mem_ok, &tx_vtable)); + udpard_tx_free(&tx); } -static void test_tx_guards(void) +static void test_tx_push_guards(void) { - // Prepare reusable TX resources. - static char tx_tag; - static char payload_tags[UDPARD_IFACE_COUNT_MAX]; - udpard_tx_mem_resources_t mem = { .transfer = make_mem(&tx_tag) }; - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - mem.payload[i] = make_mem(&payload_tags[i]); - } - const udpard_tx_vtable_t vt_ok = { .eject_subject = eject_subject_stub, .eject_p2p = eject_p2p_stub }; - - // Reject bad initialization inputs. + // Prepare a valid TX instance. + static byte_t transfer_pool[1024]; + static byte_t payload_pool[1024]; + const udpard_tx_mem_resources_t mem_ok = { + .transfer = make_mem(transfer_pool), + .payload = { make_mem(payload_pool), make_mem(payload_pool), make_mem(payload_pool) }, + }; udpard_tx_t tx = { 0 }; - TEST_ASSERT_FALSE(udpard_tx_new(NULL, 1U, 0U, 1U, mem, &vt_ok)); - TEST_ASSERT_FALSE(udpard_tx_new(&tx, 0U, 0U, 1U, mem, &vt_ok)); - TEST_ASSERT_FALSE(udpard_tx_new(&tx, 1U, 0U, 1U, mem, NULL)); - udpard_tx_mem_resources_t mem_bad = mem; - mem_bad.payload[0].vtable = NULL; - TEST_ASSERT_FALSE(udpard_tx_new(&tx, 1U, 0U, 1U, mem_bad, &vt_ok)); - const udpard_tx_vtable_t vt_bad_subject = { .eject_subject = NULL, .eject_p2p = eject_p2p_stub }; - TEST_ASSERT_FALSE(udpard_tx_new(&tx, 1U, 0U, 1U, mem, &vt_bad_subject)); - const udpard_tx_vtable_t vt_bad_p2p = { .eject_subject = eject_subject_stub, .eject_p2p = NULL }; - TEST_ASSERT_FALSE(udpard_tx_new(&tx, 1U, 0U, 1U, mem, &vt_bad_p2p)); - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 1U, 0U, 2U, mem, &vt_ok)); + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 1U, 1U, 4U, mem_ok, &tx_vtable)); - // Push helpers reject invalid timing and null handles. - const uint16_t iface_bitmap_1 = (1U << 0U); - const udpard_bytes_scattered_t empty_payload = { .bytes = { .size = 0U, .data = NULL }, .next = NULL }; - const udpard_remote_t remote_ok = { .uid = 1, .endpoints = { { .ip = 1U, .port = UDP_PORT } } }; - TEST_ASSERT_FALSE( - udpard_tx_push(&tx, 10, 5, iface_bitmap_1, udpard_prio_fast, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); - TEST_ASSERT_FALSE( - udpard_tx_push(NULL, 0, 0, iface_bitmap_1, udpard_prio_fast, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); - TEST_ASSERT_FALSE( - udpard_tx_push_p2p(NULL, 0, 0, udpard_prio_fast, remote_ok, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL, NULL)); - // P2P pushes reject expired deadlines. - TEST_ASSERT_FALSE( - udpard_tx_push_p2p(&tx, 2, 1, udpard_prio_fast, remote_ok, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL, NULL)); - // P2P pushes reject negative timestamps. + // Validate argument checks for subject push. + const udpard_bytes_scattered_t empty_payload = make_scattered("", 0U); TEST_ASSERT_FALSE( - udpard_tx_push_p2p(&tx, -1, 0, udpard_prio_fast, remote_ok, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL, NULL)); - // Reject invalid payload pointer and empty interface bitmap. - const udpard_bytes_scattered_t bad_payload = { .bytes = { .size = 1U, .data = NULL }, .next = NULL }; + udpard_tx_push(NULL, 0, 1, 1U, udpard_prio_fast, 1U, udpard_make_subject_endpoint(1U), empty_payload, NULL)); TEST_ASSERT_FALSE( - udpard_tx_push(&tx, 0, 1, iface_bitmap_1, udpard_prio_fast, 1U, bad_payload, NULL, UDPARD_USER_CONTEXT_NULL)); + udpard_tx_push(&tx, 2, 1, 1U, udpard_prio_fast, 1U, udpard_make_subject_endpoint(1U), empty_payload, NULL)); TEST_ASSERT_FALSE( - udpard_tx_push(&tx, 0, 1, 0U, udpard_prio_fast, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); - const udpard_remote_t remote_bad = { .uid = 1, .endpoints = { { 0 } } }; - TEST_ASSERT_FALSE( - udpard_tx_push_p2p(&tx, 0, 1, udpard_prio_fast, remote_bad, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL, NULL)); - - // Reject invalid timestamps and priority. - TEST_ASSERT_FALSE( - udpard_tx_push(&tx, -1, 0, iface_bitmap_1, udpard_prio_fast, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); - // Use an out-of-range priority without a constant enum cast. - udpard_prio_t bad_prio = udpard_prio_optional; - const unsigned bad_prio_raw = UDPARD_PRIORITY_COUNT; - memcpy(&bad_prio, &bad_prio_raw, sizeof(bad_prio)); - TEST_ASSERT_FALSE( - udpard_tx_push(&tx, 0, 1, iface_bitmap_1, bad_prio, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); - - // Reject zero local UID. - const uint64_t saved_uid = tx.local_uid; - tx.local_uid = 0U; - TEST_ASSERT_FALSE( - udpard_tx_push(&tx, 0, 1, iface_bitmap_1, udpard_prio_fast, 1U, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL)); - tx.local_uid = saved_uid; - - // P2P guard paths cover local UID, priority, and payload pointer. - uint64_t out_tid = 0; - tx.local_uid = 0U; - TEST_ASSERT_FALSE(udpard_tx_push_p2p( - &tx, 0, 1, udpard_prio_fast, remote_ok, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL, &out_tid)); - tx.local_uid = saved_uid; - TEST_ASSERT_FALSE( - udpard_tx_push_p2p(&tx, 0, 1, bad_prio, remote_ok, empty_payload, NULL, UDPARD_USER_CONTEXT_NULL, &out_tid)); - TEST_ASSERT_FALSE(udpard_tx_push_p2p( - &tx, 0, 1, udpard_prio_fast, remote_ok, bad_payload, NULL, UDPARD_USER_CONTEXT_NULL, &out_tid)); - - // Poll and refcount no-ops on null data. - udpard_tx_poll(NULL, 0, 0); - udpard_tx_poll(&tx, (udpard_us_t)-1, 0); - // Pending ifaces are zero for NULL. - TEST_ASSERT_EQUAL_UINT16(0U, udpard_tx_pending_ifaces(NULL)); - udpard_tx_refcount_inc((udpard_bytes_t){ .size = 0U, .data = NULL }); - udpard_tx_refcount_dec((udpard_bytes_t){ .size = 0U, .data = NULL }); - udpard_tx_free(NULL); + udpard_tx_push(&tx, 0, 1, 0U, udpard_prio_fast, 1U, udpard_make_subject_endpoint(1U), empty_payload, NULL)); + TEST_ASSERT_FALSE(udpard_tx_push( + &tx, 0, 1, 1U, udpard_prio_fast, 1U, (udpard_udpip_ep_t){ .ip = 0U, .port = 0U }, empty_payload, NULL)); + TEST_ASSERT_TRUE( + udpard_tx_push(&tx, 0, 1, 1U, udpard_prio_fast, 1U, udpard_make_subject_endpoint(1U), empty_payload, NULL)); udpard_tx_free(&tx); } -static void test_tx_predictor_sharing(void) +static void test_tx_push_p2p_guards(void) { - // Shared spool suppresses duplicate frame counts. - static char shared_tag[2]; - const udpard_mem_t mem_shared = make_mem(&shared_tag[0]); - const udpard_mem_t mem_arr[UDPARD_IFACE_COUNT_MAX] = { mem_shared, mem_shared, make_mem(&shared_tag[1]) }; - const size_t mtu[UDPARD_IFACE_COUNT_MAX] = { 64U, 64U, 128U }; - const uint16_t iface_bitmap_12 = (1U << 0U) | (1U << 1U); - TEST_ASSERT_EQUAL_size_t(1U, tx_predict_frame_count(mtu, mem_arr, iface_bitmap_12, 16U)); - // Non-shared spool counts each interface. - const udpard_mem_t mem_arr_split[UDPARD_IFACE_COUNT_MAX] = { make_mem(&shared_tag[0]), - make_mem(&shared_tag[1]), - make_mem(&shared_tag[1]) }; - TEST_ASSERT_EQUAL_size_t(2U, tx_predict_frame_count(mtu, mem_arr_split, iface_bitmap_12, 16U)); - - // Shared spool when payload fits smaller MTU despite mismatch. - const size_t mtu_mixed[UDPARD_IFACE_COUNT_MAX] = { 64U, 128U, 128U }; - const uint16_t iface_bitmap_01 = (1U << 0U) | (1U << 1U); - TEST_ASSERT_EQUAL_size_t(1U, tx_predict_frame_count(mtu_mixed, mem_arr, iface_bitmap_01, 32U)); - - // Gapped bitmap exercises the unset-bit branch. - static char gap_tag[3]; - const udpard_mem_t mem_gap[UDPARD_IFACE_COUNT_MAX] = { make_mem(&gap_tag[0]), - make_mem(&gap_tag[1]), - make_mem(&gap_tag[2]) }; - const size_t mtu_gap[UDPARD_IFACE_COUNT_MAX] = { 64U, 64U, 64U }; - const uint16_t iface_bitmap_02 = (1U << 0U) | (1U << 2U); - TEST_ASSERT_EQUAL_size_t(2U, tx_predict_frame_count(mtu_gap, mem_gap, iface_bitmap_02, 16U)); + // Prepare a valid TX instance. + static byte_t transfer_pool[1024]; + static byte_t payload_pool[1024]; + const udpard_tx_mem_resources_t mem_ok = { + .transfer = make_mem(transfer_pool), + .payload = { make_mem(payload_pool), make_mem(payload_pool), make_mem(payload_pool) }, + }; + udpard_tx_t tx = { 0 }; + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 2U, 2U, 4U, mem_ok, &tx_vtable)); + + // Validate argument checks for P2P push. + const udpard_bytes_scattered_t empty_payload = make_scattered("", 0U); + udpard_udpip_ep_t endpoints[UDPARD_IFACE_COUNT_MAX] = { 0 }; + endpoints[0] = (udpard_udpip_ep_t){ .ip = 0x0A000001U, .port = 9000U }; + TEST_ASSERT_FALSE(udpard_tx_push_p2p(NULL, 0, 1, udpard_prio_nominal, endpoints, empty_payload, NULL)); + TEST_ASSERT_FALSE(udpard_tx_push_p2p(&tx, 2, 1, udpard_prio_nominal, endpoints, empty_payload, NULL)); + TEST_ASSERT_TRUE(udpard_tx_push_p2p(&tx, 0, 1, udpard_prio_nominal, endpoints, empty_payload, NULL)); + endpoints[0] = (udpard_udpip_ep_t){ .ip = 0U, .port = 0U }; + TEST_ASSERT_FALSE(udpard_tx_push_p2p(&tx, 0, 1, udpard_prio_nominal, endpoints, empty_payload, NULL)); + udpard_tx_free(&tx); } -static void test_rx_guards(void) +static void test_rx_port_push_guards(void) { - // RX port creation guards reject invalid parameters. - static char rx_tag_a; - static char rx_tag_b; - const udpard_rx_mem_resources_t rx_mem = { .session = make_mem(&rx_tag_a), - .slot = make_mem(&rx_tag_a), - .fragment = make_mem(&rx_tag_b) }; - const udpard_rx_port_vtable_t rx_vtb = { .on_message = on_message_stub }; - udpard_rx_port_t port; - TEST_ASSERT_FALSE(udpard_rx_port_new(NULL, 0, rx_mem, &rx_vtb)); - TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 0, rx_mem, NULL)); - const udpard_rx_port_vtable_t rx_vtb_no_msg = { .on_message = NULL }; - TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 0, rx_mem, &rx_vtb_no_msg)); - udpard_rx_mem_resources_t bad_rx_mem = rx_mem; - bad_rx_mem.session.vtable = NULL; - TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 0, bad_rx_mem, &rx_vtb)); - // rx_validate_mem_resources rejects missing hooks. - const udpard_mem_vtable_t vtable_no_free = { .base = { .free = NULL }, .alloc = alloc_stub }; - const udpard_mem_vtable_t vtable_no_alloc = { .base = { .free = free_noop }, .alloc = NULL }; - udpard_rx_mem_resources_t bad_session = rx_mem; - bad_session.session.vtable = &vtable_no_free; - TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_session)); - bad_session.session.vtable = &vtable_no_alloc; - TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_session)); - udpard_rx_mem_resources_t bad_slot = rx_mem; - bad_slot.slot.vtable = &vtable_no_free; - TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_slot)); - bad_slot.slot.vtable = &vtable_no_alloc; - TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_slot)); - udpard_rx_mem_resources_t bad_fragment = rx_mem; - bad_fragment.fragment.vtable = &vtable_no_free; - TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_fragment)); - bad_fragment.fragment.vtable = &vtable_no_alloc; - TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_fragment)); - bad_fragment.fragment.vtable = NULL; - TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_fragment)); - TEST_ASSERT_TRUE(udpard_rx_port_new_stateless(&port, 8U, rx_mem, &rx_vtb)); - TEST_ASSERT_FALSE(udpard_rx_port_new_stateless(&port, 8U, bad_fragment, &rx_vtb)); - TEST_ASSERT_FALSE(udpard_rx_port_new_p2p(&port, 8U, bad_fragment, &rx_vtb)); - - // Invalid datagram inputs are rejected without processing. - udpard_rx_t rx; - udpard_rx_new(&rx, NULL); + // Prepare RX and port. + static byte_t session_pool[1024]; + static byte_t fragment_pool[1024]; + const udpard_rx_mem_resources_t rx_mem = { + .session = make_mem(session_pool), + .slot = make_mem(session_pool), + .fragment = make_mem(fragment_pool), + }; + udpard_rx_t rx = { 0 }; + udpard_rx_port_t port = { 0 }; + udpard_rx_new(&rx); + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 256U, rx_mem, &rx_vtable)); + + // Build one valid datagram then check argument validation. + byte_t datagram[HEADER_SIZE_BYTES] = { 0 }; + const meta_t meta = { + .priority = udpard_prio_nominal, + .transfer_payload_size = 0U, + .transfer_id = 1U, + .sender_uid = 3U, + }; + (void)header_serialize(datagram, meta, 0U, crc_full(0U, NULL)); + const udpard_bytes_mut_t payload = { .size = sizeof(datagram), .data = datagram }; + const udpard_deleter_t del = { .vtable = &del_vtable, .context = NULL }; + TEST_ASSERT_FALSE( + udpard_rx_port_push(NULL, &port, 0, (udpard_udpip_ep_t){ .ip = 1U, .port = 1U }, payload, del, 0U)); + TEST_ASSERT_FALSE(udpard_rx_port_push(&rx, NULL, 0, (udpard_udpip_ep_t){ .ip = 1U, .port = 1U }, payload, del, 0U)); + TEST_ASSERT_FALSE( + udpard_rx_port_push(&rx, &port, 0, (udpard_udpip_ep_t){ .ip = 1U, .port = 1U }, payload, del, 99U)); TEST_ASSERT_FALSE(udpard_rx_port_push(&rx, &port, 0, - (udpard_udpip_ep_t){ 0U, 0U }, - (udpard_bytes_mut_t){ .size = 0U, .data = NULL }, - (udpard_deleter_t){ .vtable = NULL, .context = NULL }, - UDPARD_IFACE_COUNT_MAX)); - const udpard_bytes_mut_t small_payload = { .size = 1U, .data = (void*)1 }; - TEST_ASSERT_FALSE( - udpard_rx_port_push(&rx, - &port, - 0, - (udpard_udpip_ep_t){ .ip = 1U, .port = 1U }, - small_payload, - (udpard_deleter_t){ .vtable = &(udpard_deleter_vtable_t){ .free = NULL }, .context = NULL }, - 0)); - // Cover each guard term with a valid baseline payload. - const udpard_deleter_t deleter_ok = { .vtable = &deleter_vtable, .context = NULL }; - byte_t dgram[HEADER_SIZE_BYTES]; - const meta_t meta = { .priority = udpard_prio_nominal, - .kind = frame_msg_best, - .transfer_payload_size = 0, - .transfer_id = 1, - .sender_uid = 2 }; - header_serialize(dgram, meta, 0, 0, crc_full(0, NULL)); - const udpard_bytes_mut_t dgram_view = { .size = sizeof(dgram), .data = dgram }; - const udpard_udpip_ep_t ep_ok = { .ip = 1U, .port = UDP_PORT }; - TEST_ASSERT_FALSE(udpard_rx_port_push(NULL, &port, 0, ep_ok, dgram_view, deleter_ok, 0)); - TEST_ASSERT_FALSE(udpard_rx_port_push(&rx, NULL, 0, ep_ok, dgram_view, deleter_ok, 0)); - TEST_ASSERT_FALSE(udpard_rx_port_push(&rx, &port, -1, ep_ok, dgram_view, deleter_ok, 0)); - TEST_ASSERT_FALSE( - udpard_rx_port_push(&rx, &port, 0, (udpard_udpip_ep_t){ .ip = 0U, .port = UDP_PORT }, dgram_view, deleter_ok, 0)); - TEST_ASSERT_FALSE( - udpard_rx_port_push(&rx, &port, 0, ep_ok, (udpard_bytes_mut_t){ .size = 1U, .data = NULL }, deleter_ok, 0)); - TEST_ASSERT_FALSE(udpard_rx_port_push(&rx, &port, 0, ep_ok, dgram_view, deleter_ok, UDPARD_IFACE_COUNT_MAX)); - TEST_ASSERT_FALSE( - udpard_rx_port_push(&rx, &port, 0, ep_ok, dgram_view, (udpard_deleter_t){ .vtable = NULL, .context = NULL }, 0)); - TEST_ASSERT_FALSE( - udpard_rx_port_push(&rx, - &port, - 0, - ep_ok, - dgram_view, - (udpard_deleter_t){ .vtable = &(udpard_deleter_vtable_t){ .free = NULL }, .context = NULL }, - 0)); - - // ACK frames are accepted on P2P ports. - udpard_rx_port_t port_p2p; - TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&port_p2p, 8U, rx_mem, &rx_vtb)); - const meta_t ack_meta = { .priority = udpard_prio_nominal, - .kind = frame_ack, - .transfer_payload_size = 0, - .transfer_id = 2, - .sender_uid = 3 }; - header_serialize(dgram, ack_meta, 0, 0, crc_full(0, NULL)); - TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port_p2p, 0, ep_ok, dgram_view, deleter_ok, 0)); - - // ACK frames are rejected on non-P2P ports. - const uint64_t errors_before_ack = rx.errors_frame_malformed; - header_serialize(dgram, ack_meta, 0, 0, crc_full(0, NULL)); - TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port, 0, ep_ok, dgram_view, deleter_ok, 0)); - TEST_ASSERT_EQUAL_UINT64(errors_before_ack + 1U, rx.errors_frame_malformed); + (udpard_udpip_ep_t){ .ip = 1U, .port = 1U }, + (udpard_bytes_mut_t){ .size = 1U, .data = NULL }, + del, + 0U)); + TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port, 0, (udpard_udpip_ep_t){ .ip = 1U, .port = 1U }, payload, del, 0U)); - // Malformed frames are rejected after parsing. - const uint64_t errors_before_bad = rx.errors_frame_malformed; - header_serialize(dgram, meta, 0, 0, crc_full(0, NULL)); - dgram[HEADER_SIZE_BYTES - 1] ^= 0xFFU; - TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port, 0, ep_ok, dgram_view, deleter_ok, 0)); - TEST_ASSERT_EQUAL_UINT64(errors_before_bad + 1U, rx.errors_frame_malformed); - - // Port freeing should tolerate null rx. - udpard_rx_port_free(NULL, &port); - udpard_rx_port_free(&rx, NULL); - - // Fragments past extent are discarded early. - udpard_tree_t* root = NULL; - byte_t buf[1] = { 0 }; - size_t covered = 0; - const rx_frame_base_t frame = { .offset = 1U, - .payload = { .size = sizeof(buf), .data = buf }, - .origin = { .size = sizeof(buf), .data = buf } }; - static char frag_tag; - const udpard_mem_t frag_mem = make_mem(&frag_tag); - const udpard_deleter_t deleter = { .vtable = &deleter_vtable, .context = NULL }; - TEST_ASSERT_EQUAL(rx_fragment_tree_rejected, - rx_fragment_tree_update(&root, frag_mem, deleter, frame, 0U, 0U, &covered)); + udpard_rx_port_free(&rx, &port); } void setUp(void) {} - void tearDown(void) {} int main(void) { UNITY_BEGIN(); - RUN_TEST(test_mem_endpoint_list_guards); - RUN_TEST(test_fragment_guards); - RUN_TEST(test_header_guard); - RUN_TEST(test_tx_guards); - RUN_TEST(test_tx_predictor_sharing); - RUN_TEST(test_rx_guards); + RUN_TEST(test_misc_guards); + RUN_TEST(test_tx_new_guards); + RUN_TEST(test_tx_push_guards); + RUN_TEST(test_tx_push_p2p_guards); + RUN_TEST(test_rx_port_push_guards); return UNITY_END(); } diff --git a/tests/src/test_intrusive_header.c b/tests/src/test_intrusive_header.c index bbc367e..43f5cf5 100644 --- a/tests/src/test_intrusive_header.c +++ b/tests/src/test_intrusive_header.c @@ -6,194 +6,116 @@ #include // NOLINT(bugprone-suspicious-include) #include -static void test_header_v2(void) +// Recomputes and stores the header CRC after local edits. +static void rewrite_header_crc(byte_t* const datagram) { - byte_t buffer[64]; - meta_t meta_in = { - .priority = udpard_prio_high, - .kind = frame_msg_best, - .transfer_payload_size = 0xDEADBEEF, + const uint32_t crc = crc_full(HEADER_SIZE_BYTES - CRC_SIZE_BYTES, datagram); + (void)serialize_u32(&datagram[HEADER_SIZE_BYTES - CRC_SIZE_BYTES], crc); +} + +static void test_header_roundtrip(void) +{ + byte_t dgram[64] = { 0 }; + for (size_t i = HEADER_SIZE_BYTES; i < sizeof(dgram); i++) { + dgram[i] = (byte_t)i; + } + + // Build and serialize a valid first frame. + const meta_t meta_in = { + .priority = udpard_prio_fast, + .transfer_payload_size = (uint32_t)(sizeof(dgram) - HEADER_SIZE_BYTES), .transfer_id = 0xAABBCCDDEEFF0011ULL, .sender_uid = 0x1122334455667788ULL, }; - // For a first frame (frame_payload_offset=0), frame_index must also be 0 - // Compute the correct prefix_crc from the payload - memset(&buffer[HEADER_SIZE_BYTES], 0, sizeof(buffer) - HEADER_SIZE_BYTES); // Initialize payload - const uint32_t payload_crc = crc_full(sizeof(buffer) - HEADER_SIZE_BYTES, &buffer[HEADER_SIZE_BYTES]); - header_serialize(buffer, meta_in, 0, 0, payload_crc); // frame_index=0, frame_payload_offset=0 for first frame - memset(&buffer[HEADER_SIZE_BYTES], 0, sizeof(buffer) - HEADER_SIZE_BYTES); // Re-initialize payload to match - - // We don't validate the exact byte layout anymore since we compute prefix_crc dynamically - // Just verify deserialization works correctly - - meta_t meta_out; - udpard_bytes_t payload_out; - uint32_t frame_index = 0; - uint32_t frame_payload_offset = 0; - uint32_t prefix_crc = 0; - TEST_ASSERT(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, - &meta_out, - &frame_index, - &frame_payload_offset, - &prefix_crc, - &payload_out)); - TEST_ASSERT_EQUAL(sizeof(buffer) - HEADER_SIZE_BYTES, payload_out.size); - TEST_ASSERT_EQUAL(&buffer[HEADER_SIZE_BYTES], payload_out.data); - - TEST_ASSERT_EQUAL_UINT8(meta_in.priority, meta_out.priority); - TEST_ASSERT_EQUAL_UINT32(meta_in.kind, meta_out.kind); - TEST_ASSERT_EQUAL_UINT32(0, frame_index); // First frame has index 0 - TEST_ASSERT_EQUAL_UINT32(0, frame_payload_offset); // First frame has offset 0 - TEST_ASSERT_EQUAL_UINT32(payload_crc, prefix_crc); // For first frame, prefix_crc equals payload CRC + const uint32_t payload_crc = crc_full(sizeof(dgram) - HEADER_SIZE_BYTES, &dgram[HEADER_SIZE_BYTES]); + (void)header_serialize(dgram, meta_in, 0, payload_crc); + + // Deserialize and verify all fields. + meta_t meta_out = { 0 }; + uint32_t offset = 0; + uint32_t prefix = 0; + udpard_bytes_t payload = { 0 }; + TEST_ASSERT_TRUE(header_deserialize( + (udpard_bytes_mut_t){ .size = sizeof(dgram), .data = dgram }, &meta_out, &offset, &prefix, &payload)); + TEST_ASSERT_EQUAL_UINT32(0, offset); + TEST_ASSERT_EQUAL_UINT32(payload_crc, prefix); + TEST_ASSERT_EQUAL_UINT32(sizeof(dgram) - HEADER_SIZE_BYTES, payload.size); + TEST_ASSERT_EQUAL_PTR(&dgram[HEADER_SIZE_BYTES], payload.data); + TEST_ASSERT_EQUAL_UINT32(meta_in.priority, meta_out.priority); TEST_ASSERT_EQUAL_UINT32(meta_in.transfer_payload_size, meta_out.transfer_payload_size); - TEST_ASSERT_EQUAL_UINT64(meta_in.transfer_id, meta_out.transfer_id); + TEST_ASSERT_EQUAL_UINT64(meta_in.transfer_id & UDPARD_TRANSFER_ID_MASK, meta_out.transfer_id); TEST_ASSERT_EQUAL_UINT64(meta_in.sender_uid, meta_out.sender_uid); - - TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = 23, .data = buffer }, - &meta_out, - &frame_index, - &frame_payload_offset, - &prefix_crc, - &payload_out)); - - TEST_ASSERT(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, - &meta_out, - &frame_index, - &frame_payload_offset, - &prefix_crc, - &payload_out)); - buffer[HEADER_SIZE_BYTES - 1] ^= 0xFFU; // Corrupt the CRC. - TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, - &meta_out, - &frame_index, - &frame_payload_offset, - &prefix_crc, - &payload_out)); } -static void test_header_deserialize_edge_cases(void) +static void test_header_validation(void) { - byte_t buffer[64]; - meta_t meta_in = { + byte_t dgram[64] = { 0 }; + for (size_t i = HEADER_SIZE_BYTES; i < sizeof(dgram); i++) { + dgram[i] = (byte_t)(0x55U ^ (byte_t)i); + } + const meta_t meta = { .priority = udpard_prio_nominal, - .kind = frame_msg_reliable, - .transfer_payload_size = 1000, - .transfer_id = 0x1234567890ABCDEFULL, - .sender_uid = 0xFEDCBA9876543210ULL, + .transfer_payload_size = (uint32_t)(sizeof(dgram) - HEADER_SIZE_BYTES), + .transfer_id = 123, + .sender_uid = 456, }; - - meta_t meta_out; - udpard_bytes_t payload_out; - uint32_t frame_index = 0; - uint32_t frame_payload_offset = 0; - uint32_t prefix_crc = 0; - - // Test invalid version (version != 2) - memset(&buffer[HEADER_SIZE_BYTES], 0, sizeof(buffer) - HEADER_SIZE_BYTES); // Initialize payload - const uint32_t payload_crc_v1 = crc_full(sizeof(buffer) - HEADER_SIZE_BYTES, &buffer[HEADER_SIZE_BYTES]); - header_serialize(buffer, meta_in, 0, 0, payload_crc_v1); - buffer[0] = (buffer[0] & 0xE0U) | 3U; // Set version to 3 instead of 2 - // Recalculate CRC for the corrupted header - const uint32_t new_crc = crc_full(HEADER_SIZE_BYTES - CRC_SIZE_BYTES, buffer); - buffer[HEADER_SIZE_BYTES - 4] = (byte_t)(new_crc & 0xFFU); - buffer[HEADER_SIZE_BYTES - 3] = (byte_t)((new_crc >> 8U) & 0xFFU); - buffer[HEADER_SIZE_BYTES - 2] = (byte_t)((new_crc >> 16U) & 0xFFU); - buffer[HEADER_SIZE_BYTES - 1] = (byte_t)((new_crc >> 24U) & 0xFFU); - TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, - &meta_out, - &frame_index, - &frame_payload_offset, - &prefix_crc, - &payload_out)); - - // Test frame_payload_offset validation: offset + payload > transfer_payload_size - // For non-first frames, prefix_crc can be any value (not validated) - header_serialize(buffer, meta_in, 5, 900, 0x12345678); // frame_index=5, offset=900 - // Payload size in buffer after header is 64-48=16 bytes - // So offset(900) + payload(16) = 916 > transfer_payload_size(1000) is OK - // But offset(995) + payload(16) = 1011 > transfer_payload_size(1000) should fail - buffer[8] = 0xE3; // Change offset to 995 (0x03E3) little-endian - buffer[9] = 0x03; - buffer[10] = 0x00; - buffer[11] = 0x00; - const uint32_t new_crc2 = crc_full(HEADER_SIZE_BYTES - CRC_SIZE_BYTES, buffer); - buffer[HEADER_SIZE_BYTES - 4] = (byte_t)(new_crc2 & 0xFFU); - buffer[HEADER_SIZE_BYTES - 3] = (byte_t)((new_crc2 >> 8U) & 0xFFU); - buffer[HEADER_SIZE_BYTES - 2] = (byte_t)((new_crc2 >> 16U) & 0xFFU); - buffer[HEADER_SIZE_BYTES - 1] = (byte_t)((new_crc2 >> 24U) & 0xFFU); - TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, - &meta_out, - &frame_index, - &frame_payload_offset, - &prefix_crc, - &payload_out)); - - // Test frame_index != 0 but frame_payload_offset == 0 (invalid) - const uint32_t payload_crc_v3 = crc_full(sizeof(buffer) - HEADER_SIZE_BYTES, &buffer[HEADER_SIZE_BYTES]); - header_serialize(buffer, meta_in, 1, 0, payload_crc_v3); // frame_index=1, offset=0 is invalid - TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, - &meta_out, - &frame_index, - &frame_payload_offset, - &prefix_crc, - &payload_out)); - - // Test invalid prefix_crc on first frame (offset=0, prefix_crc must match payload CRC) - header_serialize(buffer, meta_in, 0, 0, 0xDEADBEEF); // Wrong CRC for first frame - TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, - &meta_out, - &frame_index, - &frame_payload_offset, - &prefix_crc, - &payload_out)); - - // Test valid case with reliable kind (first frame, so prefix_crc must match payload) - const uint32_t payload_crc_v4 = crc_full(sizeof(buffer) - HEADER_SIZE_BYTES, &buffer[HEADER_SIZE_BYTES]); - header_serialize(buffer, meta_in, 0, 0, payload_crc_v4); - TEST_ASSERT(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, - &meta_out, - &frame_index, - &frame_payload_offset, - &prefix_crc, - &payload_out)); - TEST_ASSERT_EQUAL_UINT32(frame_msg_reliable, meta_out.kind); - TEST_ASSERT_EQUAL_UINT32(payload_crc_v4, prefix_crc); - - // Reject ACK frames with nonzero offset. - meta_in.kind = frame_ack; - header_serialize(buffer, meta_in, 1, 1, 0U); - TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, - &meta_out, - &frame_index, - &frame_payload_offset, - &prefix_crc, - &payload_out)); - - // Reject invalid kind. - meta_in.kind = frame_msg_best; - header_serialize(buffer, meta_in, 0, 0, payload_crc_v4); - buffer[1] = 0xFFU; - const uint32_t new_crc3 = crc_full(HEADER_SIZE_BYTES - CRC_SIZE_BYTES, buffer); - buffer[HEADER_SIZE_BYTES - 4] = (byte_t)(new_crc3 & 0xFFU); - buffer[HEADER_SIZE_BYTES - 3] = (byte_t)((new_crc3 >> 8U) & 0xFFU); - buffer[HEADER_SIZE_BYTES - 2] = (byte_t)((new_crc3 >> 16U) & 0xFFU); - buffer[HEADER_SIZE_BYTES - 1] = (byte_t)((new_crc3 >> 24U) & 0xFFU); - TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = sizeof(buffer), .data = buffer }, - &meta_out, - &frame_index, - &frame_payload_offset, - &prefix_crc, - &payload_out)); + const uint32_t payload_crc = crc_full(sizeof(dgram) - HEADER_SIZE_BYTES, &dgram[HEADER_SIZE_BYTES]); + (void)header_serialize(dgram, meta, 0, payload_crc); + + // Baseline validity. + meta_t meta_out = { 0 }; + uint32_t offset = 0; + uint32_t prefix = 0; + udpard_bytes_t payload = { 0 }; + TEST_ASSERT_TRUE(header_deserialize( + (udpard_bytes_mut_t){ .size = sizeof(dgram), .data = dgram }, &meta_out, &offset, &prefix, &payload)); + + // Reject malformed datagram length. + TEST_ASSERT_FALSE(header_deserialize( + (udpard_bytes_mut_t){ .size = HEADER_SIZE_BYTES - 1U, .data = dgram }, &meta_out, &offset, &prefix, &payload)); + + // Reject bad CRC. + dgram[HEADER_SIZE_BYTES - 1U] ^= 0xA5U; + TEST_ASSERT_FALSE(header_deserialize( + (udpard_bytes_mut_t){ .size = sizeof(dgram), .data = dgram }, &meta_out, &offset, &prefix, &payload)); + dgram[HEADER_SIZE_BYTES - 1U] ^= 0xA5U; + rewrite_header_crc(dgram); + + // Reject unsupported version. + dgram[0] = (byte_t)((dgram[0] & 0xE0U) | 3U); + rewrite_header_crc(dgram); + TEST_ASSERT_FALSE(header_deserialize( + (udpard_bytes_mut_t){ .size = sizeof(dgram), .data = dgram }, &meta_out, &offset, &prefix, &payload)); + dgram[0] = (byte_t)((dgram[0] & 0xE0U) | HEADER_VERSION); + rewrite_header_crc(dgram); + + // Reject unsupported incompatibility flags. + dgram[1] = 0x20U; + rewrite_header_crc(dgram); + TEST_ASSERT_FALSE(header_deserialize( + (udpard_bytes_mut_t){ .size = sizeof(dgram), .data = dgram }, &meta_out, &offset, &prefix, &payload)); + dgram[1] = 0x00U; + rewrite_header_crc(dgram); + + // Reject offset that would exceed the declared transfer payload size. + (void)serialize_u32(&dgram[16], 0xFFFFFFF0U); + rewrite_header_crc(dgram); + TEST_ASSERT_FALSE(header_deserialize( + (udpard_bytes_mut_t){ .size = sizeof(dgram), .data = dgram }, &meta_out, &offset, &prefix, &payload)); + + // Reject bad first-frame prefix CRC. + (void)header_serialize(dgram, meta, 0, payload_crc ^ 0xFFFFFFFFU); + TEST_ASSERT_FALSE(header_deserialize( + (udpard_bytes_mut_t){ .size = sizeof(dgram), .data = dgram }, &meta_out, &offset, &prefix, &payload)); } void setUp(void) {} - void tearDown(void) {} int main(void) { UNITY_BEGIN(); - RUN_TEST(test_header_v2); - RUN_TEST(test_header_deserialize_edge_cases); + RUN_TEST(test_header_roundtrip); + RUN_TEST(test_header_validation); return UNITY_END(); } diff --git a/tests/src/test_intrusive_rx.c b/tests/src/test_intrusive_rx.c index cdeaffa..b02a03b 100644 --- a/tests/src/test_intrusive_rx.c +++ b/tests/src/test_intrusive_rx.c @@ -3,2699 +3,277 @@ /// Copyright Amazon.com Inc. or its affiliates. /// SPDX-License-Identifier: MIT -// ReSharper disable CppDFATimeOver - #include // NOLINT(bugprone-suspicious-include) #include "helpers.h" #include +#include -static size_t tree_count(udpard_tree_t* const root) // how many make a forest? -{ - size_t count = 0; - for (udpard_tree_t* p = cavl2_min(root); p != NULL; p = cavl2_next_greater(p)) { - count++; - } - return count; -} - -/// Allocates the payload on the heap, emulating normal frame reception. -static rx_frame_base_t make_frame_base(const udpard_mem_t mem_payload, - const size_t offset, - const size_t size, - const void* const payload) -{ - void* data = mem_res_alloc(mem_payload, size); - if (size > 0) { - memcpy(data, payload, size); - } - return (rx_frame_base_t){ .offset = offset, - .payload = { .data = data, .size = size }, - .origin = { .data = data, .size = size } }; -} -/// The payload string cannot contain NUL characters. -static rx_frame_base_t make_frame_base_str(const udpard_mem_t mem_payload, - const size_t offset, - const char* const payload) -{ - return make_frame_base(mem_payload, offset, (payload != NULL) ? (strlen(payload) + 1) : 0U, payload); -} - -/// The created frame will copy the given full transfer payload at the specified offset, of the specified size. -/// The full transfer payload can be invalidated after this call. It is needed here so that we could compute the -/// CRC prefix correctly, which covers the transfer payload bytes in [0,(offset+size)]. -static rx_frame_t make_frame(const meta_t meta, - const udpard_mem_t mem_payload, - const void* const full_transfer_payload, - const size_t frame_payload_offset, - const size_t frame_payload_size) -{ - rx_frame_base_t base = make_frame_base(mem_payload, - frame_payload_offset, - frame_payload_size, - (const uint8_t*)full_transfer_payload + frame_payload_offset); - base.crc = crc_full(frame_payload_offset + frame_payload_size, (const uint8_t*)full_transfer_payload); - return (rx_frame_t){ .base = base, .meta = meta }; -} -/// A helper that creates a frame in static storage and returns a reference to it. This is a testing aid. -static rx_frame_t* make_frame_ptr(const meta_t meta, - const udpard_mem_t mem_payload, - const void* const full_transfer_payload, - const size_t frame_payload_offset, - const size_t frame_payload_size) -{ - static rx_frame_t frame; - frame = make_frame(meta, mem_payload, full_transfer_payload, frame_payload_offset, frame_payload_size); - return &frame; -} - -/// Scans the transfer payload ensuring that its payload exactly matches the reference. -/// The node can be any node in the tree. -static bool transfer_payload_verify(udpard_rx_transfer_t* const transfer, - const size_t payload_size_stored, - const void* const payload, - const size_t payload_size_wire) -{ - const udpard_fragment_t* frag = udpard_fragment_seek(transfer->payload, 0); - size_t offset = 0; - while (frag != NULL) { - if (frag->offset != offset) { - return false; - } - if ((offset + frag->view.size) > payload_size_stored) { - return false; - } - if (memcmp(frag->view.data, (const uint8_t*)payload + offset, frag->view.size) != 0) { - return false; - } - offset += frag->view.size; - frag = udpard_fragment_next(frag); - } - return (transfer->payload_size_wire == payload_size_wire) && (offset == payload_size_stored); -} - -// --------------------------------------------- RX FRAGMENT TREE --------------------------------------------- - -static udpard_fragment_t* fragment_at(udpard_tree_t* const root, uint32_t index) -{ - for (udpard_fragment_t* it = (udpard_fragment_t*)cavl2_min(root); it != NULL; - it = (udpard_fragment_t*)cavl2_next_greater(&it->index_offset)) { - if (index-- == 0U) { - return it; - } - } - return NULL; -} - -static bool fragment_equals(udpard_fragment_t* const frag, - const size_t offset, - const size_t size, - const void* const payload) -{ - if ((frag == NULL) || (frag->offset != offset) || (frag->view.size != size)) { - return false; - } - return (size == 0U) || (memcmp(frag->view.data, payload, size) == 0); -} - -/// Scans the fragment tree ensuring that its payload exactly matches the reference. -/// The node can be any node in the tree. -static bool fragment_tree_verify(udpard_tree_t* const root, - const size_t payload_size, - const void* const payload, - const uint32_t crc) -{ - // Remove redundancies from the payload tree and check the CRC. - if (!rx_fragment_tree_finalize(root, crc)) { - return false; - } - // Scan the payload tree. - size_t offset = 0; - for (udpard_fragment_t* it = (udpard_fragment_t*)cavl2_min(root); it != NULL; - it = (udpard_fragment_t*)cavl2_next_greater(&it->index_offset)) { - if (it->offset != offset) { - return false; - } - if ((offset + it->view.size) > payload_size) { - return false; - } - if ((it->view.size > 0) && (memcmp(it->view.data, (const uint8_t*)payload + offset, it->view.size) != 0)) { - return false; - } - offset += it->view.size; - } - return offset == payload_size; -} - -/// Reference CRC calculation: -/// >>> from pycyphal.transport.commons.crc import CRC32C -/// >>> hex(CRC32C.new(b"abc\0").value) + "UL" -static void test_rx_fragment_tree_update_a(void) -{ - instrumented_allocator_t alloc_frag = { 0 }; - instrumented_allocator_new(&alloc_frag); - const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - - instrumented_allocator_t alloc_payload = { 0 }; - instrumented_allocator_new(&alloc_payload); - const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - - // Empty payload test - { - udpard_tree_t* root = NULL; - size_t cov = 0; - rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; - // - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base_str(mem_payload, 0, NULL), - 0, - 0, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_done, res); - TEST_ASSERT_EQUAL_size_t(0, cov); - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT_EQUAL(1, tree_count(root)); - // Check the retained payload. - TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->offset); - TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->view.size); - TEST_ASSERT_NULL(fragment_at(root, 1)); - // Check the heap. - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); // bc payload empty - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_alloc); - TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_alloc); - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.count_free); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.count_free); - // Verify the payload and free the tree. - TEST_ASSERT(fragment_tree_verify(root, 0, "", 0)); - udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); - // Check the heap. - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_alloc); - TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_alloc); - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_free); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.count_free); // bc payload empty - } - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_payload); - - // Redundant fragment removal when a larger fragment bridges neighbors. - { - udpard_tree_t* root = NULL; - size_t cov = 0; - rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; - const char payload[] = "abcdefghij"; - - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 0, 2, payload), - 10, - 10, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 2, 2, payload + 2), - 10, - 10, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 6, 2, payload + 6), - 10, - 10, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(3, tree_count(root)); - - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 1, 6, payload + 1), - 10, - 10, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(3, tree_count(root)); - TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->offset); - TEST_ASSERT_EQUAL_size_t(1, fragment_at(root, 1)->offset); - TEST_ASSERT_EQUAL_size_t(6, fragment_at(root, 2)->offset); - - // Cleanup. - udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); - } - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_payload); - - // Non-empty payload test with zero extent. - { - udpard_tree_t* root = NULL; - size_t cov = 0; - rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; - // Add fragment. - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base_str(mem_payload, 0, "abc"), - 4, - 0, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_done, res); - TEST_ASSERT_EQUAL_size_t(4, cov); - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT_EQUAL(1, tree_count(root)); - // Check the retained payload. - TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->offset); - TEST_ASSERT_EQUAL_size_t(4, fragment_at(root, 0)->view.size); - TEST_ASSERT_NULL(fragment_at(root, 1)); - // Check the heap. - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(1, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_alloc); - TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_alloc); - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.count_free); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.count_free); - // Verify and free the tree (as in freedom). - TEST_ASSERT(fragment_tree_verify(root, 4, "abc", 0x34940e4cUL)); - udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); - // Check the heap. - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_alloc); - TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_alloc); - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_free); - TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_free); - } - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_payload); - - // Non-empty payload with non-zero extent. - { - udpard_tree_t* root = NULL; - size_t cov = 0; - rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; - // Add fragment beyond the extent, dropped early. - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base_str(mem_payload, 3, "abcdef"), - 8, - 3, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_rejected, res); - TEST_ASSERT_EQUAL_size_t(0, cov); - TEST_ASSERT_NULL(root); - TEST_ASSERT_EQUAL(0, tree_count(root)); - // Add fragment. - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base_str(mem_payload, 0, "abcdef"), - 7, - 3, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_done, res); - TEST_ASSERT_EQUAL_size_t(7, cov); - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT_EQUAL(1, tree_count(root)); - // Check the retained payload. - TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->offset); - TEST_ASSERT_EQUAL_size_t(7, fragment_at(root, 0)->view.size); - TEST_ASSERT_EQUAL_STRING("abcdef", fragment_at(root, 0)->view.data); - TEST_ASSERT_NULL(fragment_at(root, 1)); - // Check the heap. - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(1, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_alloc); - TEST_ASSERT_EQUAL_size_t(2, alloc_payload.count_alloc); - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.count_free); - TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_free); - // Free the tree (as in freedom). - TEST_ASSERT(fragment_tree_verify(root, 7, "abcdef", 0x532b03c8UL)); - udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); - // Check the heap. - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_alloc); - TEST_ASSERT_EQUAL_size_t(2, alloc_payload.count_alloc); - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_free); - TEST_ASSERT_EQUAL_size_t(2, alloc_payload.count_free); - } - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_payload); - - // Multi-frame reassembly test: "abc def xyz "; the last nul is beyond the extent. - { - udpard_tree_t* root = NULL; - size_t cov = 0; - rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; - // Add fragment. - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base_str(mem_payload, 0, "abc"), - 100, - 10, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(4, cov); - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT_EQUAL(1, tree_count(root)); - // Add fragment. - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base_str(mem_payload, 8, "xyz"), - 100, - 11, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(4, cov); // not extended due to the gap in the middle. - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT_EQUAL(2, tree_count(root)); - // Add fragment. - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base_str(mem_payload, 4, "def"), - 100, - 11, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_done, res); - TEST_ASSERT_EQUAL_size_t(12, cov); // extended to cover the two remaining frames. - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT_EQUAL(3, tree_count(root)); - // Check the retained payload. - TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->offset); - TEST_ASSERT_EQUAL_size_t(4, fragment_at(root, 0)->view.size); - TEST_ASSERT_EQUAL_STRING("abc", fragment_at(root, 0)->view.data); - TEST_ASSERT_EQUAL_size_t(4, fragment_at(root, 1)->offset); - TEST_ASSERT_EQUAL_size_t(4, fragment_at(root, 1)->view.size); - TEST_ASSERT_EQUAL_STRING("def", fragment_at(root, 1)->view.data); - TEST_ASSERT_EQUAL_size_t(8, fragment_at(root, 2)->offset); - TEST_ASSERT_EQUAL_size_t(4, fragment_at(root, 2)->view.size); - TEST_ASSERT_EQUAL_STRING("xyz", fragment_at(root, 2)->view.data); - TEST_ASSERT_NULL(fragment_at(root, 3)); - // Check the heap. - TEST_ASSERT_EQUAL_size_t(3, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(3, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(3, alloc_frag.count_alloc); - TEST_ASSERT_EQUAL_size_t(3, alloc_payload.count_alloc); - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.count_free); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.count_free); - // Free the tree (as in freedom). - TEST_ASSERT(fragment_tree_verify(root, 12, "abc\0def\0xyz", 0x2758cbe6UL)); - udpard_fragment_free_all(udpard_fragment_seek((udpard_fragment_t*)root, 0), udpard_make_deleter(mem_frag)); - // Check the heap. - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(3, alloc_frag.count_alloc); - TEST_ASSERT_EQUAL_size_t(3, alloc_payload.count_alloc); - TEST_ASSERT_EQUAL_size_t(3, alloc_frag.count_free); - TEST_ASSERT_EQUAL_size_t(3, alloc_payload.count_free); - } - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_payload); - - // Multi-frame reassembly test with defragmentation: "0123456789". - { - udpard_tree_t* root = NULL; - size_t cov = 0; - rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; - // Add fragment. - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 0, 2, "01"), - 100, - 10, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(2, cov); - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT_EQUAL(1, tree_count(root)); - // Add fragment. - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 4, 2, "45"), - 100, - 10, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(2, cov); // not extended - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT_EQUAL(2, tree_count(root)); - // Add fragment. - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 3, 2, "34"), - 100, - 10, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(2, cov); // not extended - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT_EQUAL(3, tree_count(root)); - // Intermediate check on the current state of the tree so far. - TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->offset); - TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 0)->view.size); - TEST_ASSERT_EQUAL_STRING_LEN("01", fragment_at(root, 0)->view.data, 2); - TEST_ASSERT_EQUAL_size_t(3, fragment_at(root, 1)->offset); - TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 1)->view.size); - TEST_ASSERT_EQUAL_STRING_LEN("34", fragment_at(root, 1)->view.data, 2); - TEST_ASSERT_EQUAL_size_t(4, fragment_at(root, 2)->offset); - TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 2)->view.size); - TEST_ASSERT_EQUAL_STRING_LEN("45", fragment_at(root, 2)->view.data, 2); - TEST_ASSERT_NULL(fragment_at(root, 3)); - TEST_ASSERT_EQUAL_size_t(3, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(3, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(3, alloc_frag.count_alloc); - TEST_ASSERT_EQUAL_size_t(3, alloc_payload.count_alloc); - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.count_free); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.count_free); - // Add fragment. BRIDGE THE LEFT GAP, EVICT `34` FRAGMENT AS REDUNDANT. - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 2, 2, "23"), - 100, - 10, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(6, cov); // extended! - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT_EQUAL(3, tree_count(root)); - // Check the updated tree state after the eviction. Fragment `34` should be gone. - TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->offset); - TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 0)->view.size); - TEST_ASSERT_EQUAL_STRING_LEN("01", fragment_at(root, 0)->view.data, 2); - TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 1)->offset); - TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 1)->view.size); - TEST_ASSERT_EQUAL_STRING_LEN("23", fragment_at(root, 1)->view.data, 2); - TEST_ASSERT_EQUAL_size_t(4, fragment_at(root, 2)->offset); - TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 2)->view.size); - TEST_ASSERT_EQUAL_STRING_LEN("45", fragment_at(root, 2)->view.data, 2); - TEST_ASSERT_NULL(fragment_at(root, 3)); - TEST_ASSERT_EQUAL_size_t(3, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(3, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(4, alloc_frag.count_alloc); - TEST_ASSERT_EQUAL_size_t(4, alloc_payload.count_alloc); - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_free); - TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_free); - // Add a fully-contained (redundant) fragment. Should be discarded. - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 1, 1, "z"), - 100, - 10, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_rejected, res); - TEST_ASSERT_EQUAL_size_t(6, cov); // no new information is added - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT_EQUAL(3, tree_count(root)); // no new frames added - TEST_ASSERT_EQUAL_size_t(3, alloc_frag.allocated_fragments); // no new allocations - TEST_ASSERT_EQUAL_size_t(3, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(4, alloc_frag.count_alloc); - TEST_ASSERT_EQUAL_size_t(5, alloc_payload.count_alloc); // the payload was briefly allocated and discarded - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_free); - TEST_ASSERT_EQUAL_size_t(2, alloc_payload.count_free); // yeah, discarded - // Add fragment. Slight overlap on the right, candidate for eviction in the future. - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 5, 2, "56"), - 100, - 10, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(7, cov); // extended by 1 byte - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT_EQUAL(4, tree_count(root)); - // Check the updated tree state. - TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->offset); - TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 0)->view.size); - TEST_ASSERT_EQUAL_STRING_LEN("01", fragment_at(root, 0)->view.data, 2); - TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 1)->offset); - TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 1)->view.size); - TEST_ASSERT_EQUAL_STRING_LEN("23", fragment_at(root, 1)->view.data, 2); - TEST_ASSERT_EQUAL_size_t(4, fragment_at(root, 2)->offset); - TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 2)->view.size); - TEST_ASSERT_EQUAL_STRING_LEN("45", fragment_at(root, 2)->view.data, 2); - TEST_ASSERT_EQUAL_size_t(5, fragment_at(root, 3)->offset); - TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 3)->view.size); - TEST_ASSERT_EQUAL_STRING_LEN("56", fragment_at(root, 3)->view.data, 2); - TEST_ASSERT_NULL(fragment_at(root, 4)); - TEST_ASSERT_EQUAL_size_t(4, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(4, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(5, alloc_frag.count_alloc); - TEST_ASSERT_EQUAL_size_t(6, alloc_payload.count_alloc); - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.count_free); - TEST_ASSERT_EQUAL_size_t(2, alloc_payload.count_free); - // Add fragment. Completes the transfer and evicts redundant `45` and `56` fragments. - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 4, 8, "456789--"), - 100, - 10, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_done, res); - TEST_ASSERT_EQUAL_size_t(12, cov); // extended all the way, beyond the extent. - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT_EQUAL(3, tree_count(root)); // the tree shrunk due to evictions - // Check the updated tree state. - TEST_ASSERT_EQUAL_size_t(0, fragment_at(root, 0)->offset); - TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 0)->view.size); - TEST_ASSERT_EQUAL_STRING_LEN("01", fragment_at(root, 0)->view.data, 2); - TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 1)->offset); - TEST_ASSERT_EQUAL_size_t(2, fragment_at(root, 1)->view.size); - TEST_ASSERT_EQUAL_STRING_LEN("23", fragment_at(root, 1)->view.data, 2); - TEST_ASSERT_EQUAL_size_t(4, fragment_at(root, 2)->offset); - TEST_ASSERT_EQUAL_size_t(8, fragment_at(root, 2)->view.size); - TEST_ASSERT_EQUAL_STRING_LEN("456789--", fragment_at(root, 2)->view.data, 8); - TEST_ASSERT_NULL(fragment_at(root, 3)); - TEST_ASSERT_EQUAL_size_t(3, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(3, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(6, alloc_frag.count_alloc); - TEST_ASSERT_EQUAL_size_t(7, alloc_payload.count_alloc); - TEST_ASSERT_EQUAL_size_t(3, alloc_frag.count_free); - TEST_ASSERT_EQUAL_size_t(4, alloc_payload.count_free); - // Free the tree (as in freedom). The free tree is free to manifest its own destiny. - TEST_ASSERT(fragment_tree_verify(root, 12, "0123456789--", 0xc73f3ad8UL)); - udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); - // Check the heap. - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(6, alloc_frag.count_alloc); - TEST_ASSERT_EQUAL_size_t(7, alloc_payload.count_alloc); - TEST_ASSERT_EQUAL_size_t(6, alloc_frag.count_free); - TEST_ASSERT_EQUAL_size_t(7, alloc_payload.count_free); - } - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_payload); - - // Multi-frame reassembly test with defragmentation: "abcdefghijklmnopqrst". Split with various MTU: - // - // MTU 4: abcd efgh ijkl mnop qrst - // 0 4 8 12 16 - // - // MTU 5: abcde fghij klmno pqrst - // 0 5 10 15 - // - // MTU 11: abcdefghijk lmnopqrst - // 0 11 - // - // Offset helper: - // abcdefghijklmnopqrst - // 01234567890123456789 - // 00000000001111111111 - { - udpard_tree_t* root = NULL; - size_t cov = 0; - rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; - - // Add fragment. - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 0, 5, "abcde"), - 21, - 21, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(5, cov); - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 5, "abcde")); - TEST_ASSERT_NULL(fragment_at(root, 1)); - - // Add fragment. Rejected because contained by existing. - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 0, 4, "abcd"), - 21, - 21, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_rejected, res); - TEST_ASSERT_EQUAL_size_t(5, cov); - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 5, "abcde")); - TEST_ASSERT_NULL(fragment_at(root, 1)); - - // Add 2 fragments. They cover new ground with a gap but they are small, to be replaced later. - // Resulting state: - // 0 |abcde | - // 1 | ijkl | - // 2 | mnop | - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 8, 4, "ijkl"), - 21, - 21, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 12, 4, "mnop"), - 21, - 21, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(5, cov); // not extended due to a gap - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 5, "abcde")); - TEST_ASSERT(fragment_equals(fragment_at(root, 1), 8, 4, "ijkl")); - TEST_ASSERT(fragment_equals(fragment_at(root, 2), 12, 4, "mnop")); - TEST_ASSERT_NULL(fragment_at(root, 3)); - TEST_ASSERT_EQUAL_size_t(3, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(3, alloc_payload.allocated_fragments); - - // Add another fragment that doesn't add any new information but is accepted anyway because it is larger. - // This may enable defragmentation in the future. - // Resulting state: - // 0 |abcde | - // 1 | ijkl | - // 2 | klmno | - // 3 | mnop | - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 10, 5, "klmno"), - 21, - 21, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(5, cov); // not extended due to a gap - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 5, "abcde")); - TEST_ASSERT(fragment_equals(fragment_at(root, 1), 8, 4, "ijkl")); - TEST_ASSERT(fragment_equals(fragment_at(root, 2), 10, 5, "klmno")); - TEST_ASSERT(fragment_equals(fragment_at(root, 3), 12, 4, "mnop")); - TEST_ASSERT_NULL(fragment_at(root, 4)); - TEST_ASSERT_EQUAL_size_t(4, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(4, alloc_payload.allocated_fragments); - - // Add another fragment that bridges the gap and allows removing ijkl. - // Resulting state: - // 0 |abcde | - // 1 | fghij | replaces the old 1 - // 2 | klmno | - // 3 | mnop | kept because it has 'p' - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 5, 5, "fghij"), - 21, - 21, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(16, cov); // jumps to the end because the gap is covered - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 5, "abcde")); - TEST_ASSERT(fragment_equals(fragment_at(root, 1), 5, 5, "fghij")); - TEST_ASSERT(fragment_equals(fragment_at(root, 2), 10, 5, "klmno")); - TEST_ASSERT(fragment_equals(fragment_at(root, 3), 12, 4, "mnop")); - TEST_ASSERT_NULL(fragment_at(root, 4)); - TEST_ASSERT_EQUAL_size_t(4, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(4, alloc_payload.allocated_fragments); - - // Add the last smallest fragment. The transfer is not detected as complete because it is set to 21 bytes. - // Resulting state: - // 0 |abcde | - // 1 | fghij | replaces the old 1 - // 2 | klmno | - // 3 | mnop | kept because it has 'p' - // 4 | qrst| - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 16, 4, "qrst"), - 21, - 21, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(20, cov); // updated - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 5, "abcde")); - TEST_ASSERT(fragment_equals(fragment_at(root, 1), 5, 5, "fghij")); - TEST_ASSERT(fragment_equals(fragment_at(root, 2), 10, 5, "klmno")); - TEST_ASSERT(fragment_equals(fragment_at(root, 3), 12, 4, "mnop")); - TEST_ASSERT(fragment_equals(fragment_at(root, 4), 16, 4, "qrst")); - TEST_ASSERT_NULL(fragment_at(root, 5)); - TEST_ASSERT_EQUAL_size_t(5, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(5, alloc_payload.allocated_fragments); - - // Send redundant fragments. State unchanged. - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 4, 4, "efgh"), - 21, - 21, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_rejected, res); - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 5, 5, "fghij"), - 21, - 21, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_rejected, res); - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 0, 5, "abcde"), - 21, - 21, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_rejected, res); - TEST_ASSERT_EQUAL_size_t(20, cov); // no change - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 5, "abcde")); - TEST_ASSERT(fragment_equals(fragment_at(root, 1), 5, 5, "fghij")); - TEST_ASSERT(fragment_equals(fragment_at(root, 2), 10, 5, "klmno")); - TEST_ASSERT(fragment_equals(fragment_at(root, 3), 12, 4, "mnop")); - TEST_ASSERT(fragment_equals(fragment_at(root, 4), 16, 4, "qrst")); - TEST_ASSERT_NULL(fragment_at(root, 5)); - TEST_ASSERT_EQUAL_size_t(5, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(5, alloc_payload.allocated_fragments); - - // Add the first max-MTU fragment. Replaces the smaller initial fragments. - // Resulting state: - // 0 |abcdefghijk | replaces 0 and 1 - // 1 | klmno | kept because it has 'lmno' - // 2 | mnop | kept because it has 'p' - // 3 | qrst| - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 0, 11, "abcdefghijk"), - 21, - 21, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(20, cov); - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 11, "abcdefghijk")); - TEST_ASSERT(fragment_equals(fragment_at(root, 1), 10, 5, "klmno")); - TEST_ASSERT(fragment_equals(fragment_at(root, 2), 12, 4, "mnop")); - TEST_ASSERT(fragment_equals(fragment_at(root, 3), 16, 4, "qrst")); - TEST_ASSERT_NULL(fragment_at(root, 4)); - TEST_ASSERT_EQUAL_size_t(4, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(4, alloc_payload.allocated_fragments); - - // Add the last MTU 5 fragment. Replaces the last two MTU 4 fragments. - // Resulting state: - // 0 |abcdefghijk | - // 1 | klmno | kept because it has 'lmno' - // 2 | pqrst| - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 15, 5, "pqrst"), - 21, - 21, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(20, cov); - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 11, "abcdefghijk")); - TEST_ASSERT(fragment_equals(fragment_at(root, 1), 10, 5, "klmno")); - TEST_ASSERT(fragment_equals(fragment_at(root, 2), 15, 5, "pqrst")); - TEST_ASSERT_NULL(fragment_at(root, 3)); - TEST_ASSERT_EQUAL_size_t(3, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(3, alloc_payload.allocated_fragments); - - // Add the last max-MTU fragment. Replaces the last two fragments. - // Resulting state: - // 0 |abcdefghijk | - // 1 | lmnopqrst| - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 11, 9, "lmnopqrst"), - 21, - 21, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(20, cov); - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 11, "abcdefghijk")); - TEST_ASSERT(fragment_equals(fragment_at(root, 1), 11, 9, "lmnopqrst")); - TEST_ASSERT_NULL(fragment_at(root, 2)); - TEST_ASSERT_EQUAL_size_t(2, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(2, alloc_payload.allocated_fragments); - - // Replace everything with a single huge fragment. - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 0, 20, "abcdefghijklmnopqrst"), - 21, - 21, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(20, cov); - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 20, "abcdefghijklmnopqrst")); - TEST_ASSERT_NULL(fragment_at(root, 1)); - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(1, alloc_payload.allocated_fragments); - - // One tiny boi will complete the transfer. - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base(mem_payload, 19, 2, "t-"), - 21, - 21, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_done, res); - TEST_ASSERT_EQUAL_size_t(21, cov); - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT(fragment_equals(fragment_at(root, 0), 0, 20, "abcdefghijklmnopqrst")); - TEST_ASSERT(fragment_equals(fragment_at(root, 1), 19, 2, "t-")); - TEST_ASSERT_NULL(fragment_at(root, 2)); - TEST_ASSERT_EQUAL_size_t(2, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(2, alloc_payload.allocated_fragments); - - // Verify the final state. - TEST_ASSERT(fragment_tree_verify(root, 21, "abcdefghijklmnopqrst-", 0xe7a60f1eUL)); - - // Cleanup. - udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); - } - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_payload); -} - -/// Exhaustive test for rx_fragment_tree_update with random fragmentation patterns. -/// Tests a fixed payload split into every possible non-empty substring, -/// fed in random order with possible duplicates, and verifies correct completion detection. -static void test_rx_fragment_tree_update_exhaustive(void) -{ - instrumented_allocator_t alloc_frag = { 0 }; - instrumented_allocator_new(&alloc_frag); - const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - - instrumented_allocator_t alloc_payload = { 0 }; - instrumented_allocator_new(&alloc_payload); - const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - - const char payload[] = "0123456789"; - const size_t payload_length = strlen(payload); - - // Generate all possible non-empty substrings (offset, length pairs). - // For a string of length N, there are N*(N+1)/2 possible substrings. - typedef struct - { - size_t offset; - size_t length; - } substring_t; - - const size_t max_substrings = (payload_length * (payload_length + 1)) / 2; - substring_t substrings[max_substrings]; - size_t substring_count = 0; - - for (size_t offset = 0; offset < payload_length; offset++) { - for (size_t length = 1; length <= (payload_length - offset); length++) { - substrings[substring_count].offset = offset; - substrings[substring_count].length = length; - substring_count++; - } - } - TEST_ASSERT_EQUAL_size_t(max_substrings, substring_count); - - // Run multiple randomized test iterations to explore different orderings. - // We use fewer iterations to keep test time reasonable. - const size_t num_iterations = 10000; - - for (size_t iteration = 0; iteration < num_iterations; iteration++) { - udpard_tree_t* root = NULL; - size_t cov = 0; - - // Create a randomized schedule of fragments to feed. - // We'll randomly select which substrings to use and in what order. - // Some may be duplicated, some may be omitted initially. - - // Track which bytes have been covered by submitted fragments. - bool byte_covered[10] = { false }; - bool transfer_complete = false; - - // Shuffle the substring indices to get a random order. - size_t schedule[substring_count]; - for (size_t i = 0; i < substring_count; i++) { - schedule[i] = i; - } - - // Fisher-Yates shuffle - for (size_t i = substring_count - 1; i > 0; i--) { - const size_t j = (size_t)(rand() % (int)(i + 1)); - const size_t tmp = schedule[i]; - schedule[i] = schedule[j]; - schedule[j] = tmp; - } - - // Feed fragments in the shuffled order. - // We stop after we've seen every byte at least once. - for (size_t sched_idx = 0; sched_idx < substring_count; sched_idx++) { - const substring_t sub = substrings[schedule[sched_idx]]; - - // Allocate and copy the substring payload. - char* const frag_data = mem_res_alloc(mem_payload, sub.length); - memcpy(frag_data, payload + sub.offset, sub.length); - - const rx_frame_base_t frame = { .offset = sub.offset, - .payload = { .data = frag_data, .size = sub.length }, - .origin = { .data = frag_data, .size = sub.length } }; - - const rx_fragment_tree_update_result_t res = - rx_fragment_tree_update(&root, mem_frag, del_payload, frame, payload_length, payload_length, &cov); - - // Update our tracking of covered bytes. - for (size_t i = 0; i < sub.length; i++) { - byte_covered[sub.offset + i] = true; - } - - // Check if all bytes are covered. - bool all_covered = true; - for (size_t i = 0; i < payload_length; i++) { - if (!byte_covered[i]) { - all_covered = false; - break; - } - } - if (all_covered) { - TEST_ASSERT_EQUAL(rx_fragment_tree_done, res); - transfer_complete = true; - break; - } - TEST_ASSERT((res == rx_fragment_tree_accepted) || (res == rx_fragment_tree_rejected)); - } - TEST_ASSERT_TRUE(transfer_complete); - TEST_ASSERT_EQUAL_size_t(payload_length, cov); - - // Verify the final state. - TEST_ASSERT(fragment_tree_verify(root, 10, "0123456789", 0x280c069eUL)); - udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); - } - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); - - // Test with duplicates: feed the same fragments multiple times. - for (size_t iteration = 0; iteration < num_iterations; iteration++) { - udpard_tree_t* root = NULL; - size_t cov = 0; - - bool byte_covered[10] = { false }; - bool transfer_complete = false; - - // Create a schedule with duplicates. - const size_t schedule_length = substring_count * 3; // 3x duplication factor - size_t schedule[schedule_length]; - for (size_t i = 0; i < schedule_length; i++) { - schedule[i] = (size_t)(rand() % (int)substring_count); - } - - // Feed fragments with duplicates. - for (size_t sched_idx = 0; sched_idx < schedule_length; sched_idx++) { - const substring_t sub = substrings[schedule[sched_idx]]; - - char* const frag_data = mem_res_alloc(mem_payload, sub.length); - memcpy(frag_data, payload + sub.offset, sub.length); - - const rx_frame_base_t frame = { .offset = sub.offset, - .payload = { .data = frag_data, .size = sub.length }, - .origin = { .data = frag_data, .size = sub.length } }; - - const rx_fragment_tree_update_result_t res = - rx_fragment_tree_update(&root, mem_frag, del_payload, frame, payload_length, payload_length, &cov); - - // Update tracking. - for (size_t i = 0; i < sub.length; i++) { - byte_covered[sub.offset + i] = true; - } - - // Check completion. - bool all_covered = true; - for (size_t i = 0; i < payload_length; i++) { - if (!byte_covered[i]) { - all_covered = false; - break; - } - } - if (all_covered) { - TEST_ASSERT_EQUAL(rx_fragment_tree_done, res); - transfer_complete = true; - break; - } - TEST_ASSERT((res == rx_fragment_tree_accepted) || (res == rx_fragment_tree_rejected)); - } - TEST_ASSERT_TRUE(transfer_complete); - TEST_ASSERT_EQUAL_size_t(payload_length, cov); - - // Verify the final state. - TEST_ASSERT(fragment_tree_verify(root, 10, "0123456789", 0x280c069eUL)); - udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); - } - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); -} - -static void test_rx_fragment_tree_oom(void) -{ - instrumented_allocator_t alloc_frag = { 0 }; - instrumented_allocator_new(&alloc_frag); - const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - - instrumented_allocator_t alloc_payload = { 0 }; - instrumented_allocator_new(&alloc_payload); - const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - - // Test OOM during fragment allocation - { - udpard_tree_t* root = NULL; - size_t cov = 0; - rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; - - // Set fragment allocation limit to zero - fragment allocation will fail - alloc_frag.limit_fragments = 0; - - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base_str(mem_payload, 0, "abc"), - 100, - 10, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_oom, res); - TEST_ASSERT_EQUAL_size_t(0, cov); - TEST_ASSERT_NULL(root); - // Payload should have been freed - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_alloc); // payload was allocated by make_frame_base_str - TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_free); // but freed due to OOM - } - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_payload); - - // Test OOM during multi-fragment reassembly - { - udpard_tree_t* root = NULL; - size_t cov = 0; - rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; - - // First fragment succeeds - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base_str(mem_payload, 0, "abc"), - 100, - 10, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(4, cov); - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT_EQUAL(1, tree_count(root)); - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(1, alloc_payload.allocated_fragments); - - // Second fragment fails due to OOM - alloc_frag.limit_fragments = 1; // Already used the limit - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base_str(mem_payload, 4, "def"), - 100, - 10, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_oom, res); - TEST_ASSERT_EQUAL_size_t(4, cov); // Coverage unchanged - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT_EQUAL(1, tree_count(root)); // Still only one fragment - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(1, alloc_payload.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(2, alloc_payload.count_alloc); // second payload was allocated - TEST_ASSERT_EQUAL_size_t(1, alloc_payload.count_free); // but freed due to OOM - - // Reset limit and add the second fragment successfully - alloc_frag.limit_fragments = SIZE_MAX; - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base_str(mem_payload, 4, "def"), - 100, - 10, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_accepted, res); - TEST_ASSERT_EQUAL_size_t(8, cov); - TEST_ASSERT_EQUAL(2, tree_count(root)); - TEST_ASSERT_EQUAL_size_t(2, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(2, alloc_payload.allocated_fragments); - - // Cleanup - udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); - } - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_payload); - - // Test OOM recovery: fragment allocation fails, then succeeds on retry - { - udpard_tree_t* root = NULL; - size_t cov = 0; - rx_fragment_tree_update_result_t res = rx_fragment_tree_rejected; - - // First attempt fails - alloc_frag.limit_fragments = 0; - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base_str(mem_payload, 0, "abcdef"), - 7, - 3, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_oom, res); - TEST_ASSERT_EQUAL_size_t(0, cov); - TEST_ASSERT_NULL(root); - - // Second attempt succeeds - alloc_frag.limit_fragments = SIZE_MAX; - res = rx_fragment_tree_update(&root, // - mem_frag, - del_payload, - make_frame_base_str(mem_payload, 0, "abcdef"), - 7, - 3, - &cov); - TEST_ASSERT_EQUAL(rx_fragment_tree_done, res); - TEST_ASSERT_EQUAL_size_t(7, cov); - TEST_ASSERT_NOT_NULL(root); - TEST_ASSERT_EQUAL(1, tree_count(root)); - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(1, alloc_payload.allocated_fragments); - - // Cleanup - udpard_fragment_free_all((udpard_fragment_t*)root, udpard_make_deleter(mem_frag)); - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); - } - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_payload); -} - -// --------------------------------------------- RX SLOT --------------------------------------------- - -static void test_rx_slot_update(void) -{ - instrumented_allocator_t alloc_frag = { 0 }; - instrumented_allocator_new(&alloc_frag); - const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - - instrumented_allocator_t alloc_slot = { 0 }; - instrumented_allocator_new(&alloc_slot); - const udpard_mem_t mem_slot = instrumented_allocator_make_resource(&alloc_slot); - - instrumented_allocator_t alloc_payload = { 0 }; - instrumented_allocator_new(&alloc_payload); - const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - - uint64_t errors_oom = 0; - - // Test 1: Initialize slot from idle state. - { - rx_slot_t* slot = rx_slot_new(mem_slot); - TEST_ASSERT_NOT_NULL(slot); - - rx_frame_t frame = { 0 }; - frame.base = make_frame_base(mem_payload, 0, 5, "hello"); - frame.base.crc = 0x9a71bb4cUL; // CRC32C for "hello" - frame.meta.transfer_id = 123; - frame.meta.transfer_payload_size = 5; - - const udpard_us_t ts = 1000; - - const rx_slot_update_result_t res = rx_slot_update(slot, ts, mem_frag, del_payload, &frame, 5, &errors_oom); - - TEST_ASSERT_EQUAL(rx_slot_complete, res); - TEST_ASSERT_EQUAL(123, slot->transfer_id); - TEST_ASSERT_EQUAL(ts, slot->ts_min); - TEST_ASSERT_EQUAL(ts, slot->ts_max); - TEST_ASSERT_EQUAL_size_t(5, slot->covered_prefix); - TEST_ASSERT_EQUAL(0, errors_oom); - - rx_slot_destroy(&slot, mem_frag, mem_slot); - TEST_ASSERT_NULL(slot); - } - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_slot); - instrumented_allocator_reset(&alloc_payload); - - // Test 2: Multi-frame transfer with timestamp updates. - { - rx_slot_t* slot = rx_slot_new(mem_slot); - TEST_ASSERT_NOT_NULL(slot); - - rx_frame_t frame1 = { 0 }; - frame1.base = make_frame_base(mem_payload, 0, 3, "abc"); - frame1.base.crc = 0x12345678; - frame1.meta.transfer_id = 456; - frame1.meta.transfer_payload_size = 10; - - const udpard_us_t ts1 = 2000; - const rx_slot_update_result_t res1 = rx_slot_update(slot, ts1, mem_frag, del_payload, &frame1, 10, &errors_oom); - - TEST_ASSERT_EQUAL(rx_slot_incomplete, res1); - TEST_ASSERT_EQUAL(ts1, slot->ts_min); - TEST_ASSERT_EQUAL(ts1, slot->ts_max); - TEST_ASSERT_EQUAL_size_t(3, slot->covered_prefix); - TEST_ASSERT_EQUAL(3, slot->crc_end); - TEST_ASSERT_EQUAL(0x12345678, slot->crc); - - rx_frame_t frame2 = { 0 }; - frame2.base = make_frame_base(mem_payload, 5, 3, "def"); - frame2.base.crc = 0x87654321; - frame2.meta.transfer_id = 456; - frame2.meta.transfer_payload_size = 10; - - const udpard_us_t ts2 = 3000; - const rx_slot_update_result_t res2 = rx_slot_update(slot, ts2, mem_frag, del_payload, &frame2, 10, &errors_oom); - - TEST_ASSERT_EQUAL(rx_slot_incomplete, res2); - TEST_ASSERT_EQUAL(ts1, slot->ts_min); - TEST_ASSERT_EQUAL(ts2, slot->ts_max); - TEST_ASSERT_EQUAL_size_t(3, slot->covered_prefix); - TEST_ASSERT_EQUAL(8, slot->crc_end); - TEST_ASSERT_EQUAL(0x87654321, slot->crc); - - rx_frame_t frame3 = { 0 }; - frame3.base = make_frame_base(mem_payload, 3, 2, "XX"); - frame3.base.crc = 0xAABBCCDD; - frame3.meta.transfer_id = 456; - frame3.meta.transfer_payload_size = 10; - - const udpard_us_t ts3 = 1500; - const rx_slot_update_result_t res3 = rx_slot_update(slot, ts3, mem_frag, del_payload, &frame3, 10, &errors_oom); - - TEST_ASSERT_EQUAL(rx_slot_incomplete, res3); - TEST_ASSERT_EQUAL(ts3, slot->ts_min); - TEST_ASSERT_EQUAL(ts2, slot->ts_max); - TEST_ASSERT_EQUAL_size_t(8, slot->covered_prefix); - TEST_ASSERT_EQUAL(8, slot->crc_end); - TEST_ASSERT_EQUAL(0x87654321, slot->crc); - - rx_slot_destroy(&slot, mem_frag, mem_slot); - TEST_ASSERT_NULL(slot); - } - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_slot); - instrumented_allocator_reset(&alloc_payload); - - // Test 3: OOM handling. - { - rx_slot_t* slot = rx_slot_new(mem_slot); - TEST_ASSERT_NOT_NULL(slot); - errors_oom = 0; - - alloc_frag.limit_fragments = 0; - - rx_frame_t frame = { 0 }; - frame.base = make_frame_base(mem_payload, 0, 5, "hello"); - frame.base.crc = 0x9a71bb4cUL; // CRC32C for "hello" - frame.meta.transfer_id = 789; - frame.meta.transfer_payload_size = 5; - - const rx_slot_update_result_t res = rx_slot_update(slot, 5000, mem_frag, del_payload, &frame, 5, &errors_oom); - - TEST_ASSERT_EQUAL(rx_slot_incomplete, res); - TEST_ASSERT_EQUAL(1, errors_oom); - TEST_ASSERT_EQUAL_size_t(0, slot->covered_prefix); - - rx_slot_destroy(&slot, mem_frag, mem_slot); - TEST_ASSERT_NULL(slot); - } - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_slot); - instrumented_allocator_reset(&alloc_payload); - - // Test 4: Malformed transfer handling (CRC failure). - { - rx_slot_t* slot = rx_slot_new(mem_slot); - TEST_ASSERT_NOT_NULL(slot); - errors_oom = 0; - - rx_frame_t frame = { 0 }; - frame.base = make_frame_base(mem_payload, 0, 4, "test"); - frame.base.crc = 0xDEADBEEF; // Incorrect CRC - frame.meta.transfer_id = 999; - frame.meta.transfer_payload_size = 4; - - const rx_slot_update_result_t res = rx_slot_update(slot, 6000, mem_frag, del_payload, &frame, 4, &errors_oom); - - TEST_ASSERT_EQUAL(rx_slot_failure, res); - - rx_slot_destroy(&slot, mem_frag, mem_slot); - TEST_ASSERT_NULL(slot); - } - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_slot); - instrumented_allocator_reset(&alloc_payload); - - // Test 5: Successful completion with correct CRC. - { - rx_slot_t* slot = rx_slot_new(mem_slot); - TEST_ASSERT_NOT_NULL(slot); - errors_oom = 0; - - // CRC value computed from "test". - const uint32_t correct_crc = 0x86a072c0UL; - - rx_frame_t frame = { 0 }; - frame.base = make_frame_base(mem_payload, 0, 4, "test"); - frame.base.crc = correct_crc; - frame.meta.transfer_id = 1111; - frame.meta.transfer_payload_size = 4; - - const rx_slot_update_result_t res = rx_slot_update(slot, 7000, mem_frag, del_payload, &frame, 4, &errors_oom); - - TEST_ASSERT_EQUAL(rx_slot_complete, res); - TEST_ASSERT_EQUAL(0, errors_oom); - TEST_ASSERT_EQUAL_size_t(4, slot->covered_prefix); - TEST_ASSERT_NOT_NULL(slot->fragments); - - rx_slot_destroy(&slot, mem_frag, mem_slot); - TEST_ASSERT_NULL(slot); - } - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_slot); - instrumented_allocator_reset(&alloc_payload); - - // Test 6: CRC end update rules. - { - rx_slot_t* slot = rx_slot_new(mem_slot); - TEST_ASSERT_NOT_NULL(slot); - errors_oom = 0; - - rx_frame_t frame1 = { 0 }; - frame1.base = make_frame_base(mem_payload, 5, 5, "world"); - frame1.base.crc = 0xAAAAAAAA; - frame1.meta.transfer_id = 2222; - frame1.meta.transfer_payload_size = 20; - - const rx_slot_update_result_t res1 = - rx_slot_update(slot, 8000, mem_frag, del_payload, &frame1, 20, &errors_oom); - - TEST_ASSERT_EQUAL(rx_slot_incomplete, res1); - TEST_ASSERT_EQUAL(10, slot->crc_end); - TEST_ASSERT_EQUAL(0xAAAAAAAA, slot->crc); - - rx_frame_t frame2 = { 0 }; - frame2.base = make_frame_base(mem_payload, 0, 3, "abc"); - frame2.base.crc = 0xBBBBBBBB; - frame2.meta.transfer_id = 2222; - frame2.meta.transfer_payload_size = 20; - - const rx_slot_update_result_t res2 = - rx_slot_update(slot, 8100, mem_frag, del_payload, &frame2, 20, &errors_oom); - - TEST_ASSERT_EQUAL(rx_slot_incomplete, res2); - TEST_ASSERT_EQUAL(10, slot->crc_end); - TEST_ASSERT_EQUAL(0xAAAAAAAA, slot->crc); - - rx_frame_t frame3 = { 0 }; - frame3.base = make_frame_base(mem_payload, 10, 5, "hello"); - frame3.base.crc = 0xCCCCCCCC; - frame3.meta.transfer_id = 2222; - frame3.meta.transfer_payload_size = 20; - - const rx_slot_update_result_t res3 = - rx_slot_update(slot, 8200, mem_frag, del_payload, &frame3, 20, &errors_oom); - - TEST_ASSERT_EQUAL(rx_slot_incomplete, res3); - TEST_ASSERT_EQUAL(15, slot->crc_end); - TEST_ASSERT_EQUAL(0xCCCCCCCC, slot->crc); - - rx_slot_destroy(&slot, mem_frag, mem_slot); - TEST_ASSERT_NULL(slot); - } - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_slot); - instrumented_allocator_reset(&alloc_payload); - - // Test 7: Inconsistent frame fields. - { - errors_oom = 0; - - // Total size mismatch. - rx_slot_t* slot = rx_slot_new(mem_slot); - TEST_ASSERT_NOT_NULL(slot); - - rx_frame_t frame1 = { 0 }; - frame1.base = make_frame_base(mem_payload, 0, 5, "hello"); - frame1.base.crc = 0x12345678; - frame1.meta.transfer_id = 3333; - frame1.meta.transfer_payload_size = 20; - frame1.meta.priority = udpard_prio_high; - - const rx_slot_update_result_t res1 = - rx_slot_update(slot, 9000, mem_frag, del_payload, &frame1, 20, &errors_oom); - TEST_ASSERT_EQUAL(rx_slot_incomplete, res1); - - rx_frame_t frame2 = { 0 }; - frame2.base = make_frame_base(mem_payload, 5, 5, "world"); - frame2.base.crc = 0xABCDEF00; - frame2.meta.transfer_id = 3333; - frame2.meta.transfer_payload_size = 25; - frame2.meta.priority = udpard_prio_high; - - const rx_slot_update_result_t res2 = - rx_slot_update(slot, 9100, mem_frag, del_payload, &frame2, 25, &errors_oom); - TEST_ASSERT_EQUAL(rx_slot_failure, res2); - - rx_slot_destroy(&slot, mem_frag, mem_slot); - TEST_ASSERT_NULL(slot); - - // Priority mismatch. - slot = rx_slot_new(mem_slot); - TEST_ASSERT_NOT_NULL(slot); - - rx_frame_t frame3 = { 0 }; - frame3.base = make_frame_base(mem_payload, 0, 5, "test1"); - frame3.base.crc = 0x11111111; - frame3.meta.transfer_id = 4444; - frame3.meta.transfer_payload_size = 30; - frame3.meta.priority = udpard_prio_low; - - const rx_slot_update_result_t res3 = - rx_slot_update(slot, 9200, mem_frag, del_payload, &frame3, 30, &errors_oom); - TEST_ASSERT_EQUAL(rx_slot_incomplete, res3); - - rx_frame_t frame4 = { 0 }; - frame4.base = make_frame_base(mem_payload, 5, 5, "test2"); - frame4.base.crc = 0x22222222; - frame4.meta.transfer_id = 4444; - frame4.meta.transfer_payload_size = 30; - frame4.meta.priority = udpard_prio_high; - - const rx_slot_update_result_t res4 = - rx_slot_update(slot, 9300, mem_frag, del_payload, &frame4, 30, &errors_oom); - TEST_ASSERT_EQUAL(rx_slot_failure, res4); - - rx_slot_destroy(&slot, mem_frag, mem_slot); - TEST_ASSERT_NULL(slot); - - // Total size and priority mismatch. - slot = rx_slot_new(mem_slot); - TEST_ASSERT_NOT_NULL(slot); - - rx_frame_t frame5 = { 0 }; - frame5.base = make_frame_base(mem_payload, 0, 5, "test3"); - frame5.base.crc = 0x33333333; - frame5.meta.transfer_id = 5555; - frame5.meta.transfer_payload_size = 40; - frame5.meta.priority = udpard_prio_nominal; - - const rx_slot_update_result_t res5 = - rx_slot_update(slot, 9400, mem_frag, del_payload, &frame5, 40, &errors_oom); - TEST_ASSERT_EQUAL(rx_slot_incomplete, res5); - - rx_frame_t frame6 = { 0 }; - frame6.base = make_frame_base(mem_payload, 5, 5, "test4"); - frame6.base.crc = 0x44444444; - frame6.meta.transfer_id = 5555; - frame6.meta.transfer_payload_size = 50; - frame6.meta.priority = udpard_prio_fast; - - const rx_slot_update_result_t res6 = - rx_slot_update(slot, 9500, mem_frag, del_payload, &frame6, 50, &errors_oom); - TEST_ASSERT_EQUAL(rx_slot_failure, res6); - - rx_slot_destroy(&slot, mem_frag, mem_slot); - TEST_ASSERT_NULL(slot); - } - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_slot); - instrumented_allocator_reset(&alloc_payload); - - // Verify no memory leaks. - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_slot.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); -} - -// --------------------------------------------- RX SESSION --------------------------------------------- - -// Captures ack transfers emitted into the TX pipelines. typedef struct { - udpard_prio_t priority; - uint64_t transfer_id; - udpard_udpip_ep_t destination; -} ack_tx_info_t; - -typedef struct -{ - instrumented_allocator_t alloc_transfer; - instrumented_allocator_t alloc_payload; - udpard_tx_t tx; - ack_tx_info_t captured[16]; - size_t captured_count; -} tx_fixture_t; - -static bool tx_capture_ack_subject(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) -{ - (void)tx; - (void)ejection; - return true; // ACKs are P2P, subject eject should not be called for them -} - -static bool tx_capture_ack_p2p(udpard_tx_t* const tx, - udpard_tx_ejection_t* const ejection, - const udpard_udpip_ep_t destination) -{ - tx_fixture_t* const self = (tx_fixture_t*)tx->user; - if ((self == NULL) || (self->captured_count >= (sizeof(self->captured) / sizeof(self->captured[0])))) { - return false; - } - udpard_tx_refcount_inc(ejection->datagram); - meta_t meta = { 0 }; - uint32_t frame_index = 0; - uint32_t frame_offset = 0; - uint32_t prefix_crc = 0; - udpard_bytes_t payload = { 0 }; - const bool ok = header_deserialize( - (udpard_bytes_mut_t){ .size = ejection->datagram.size, .data = (void*)ejection->datagram.data }, - &meta, - &frame_index, - &frame_offset, - &prefix_crc, - &payload); - if (ok && (frame_index == 0U) && (frame_offset == 0U) && (meta.kind == frame_ack) && (payload.size == 0U)) { - ack_tx_info_t* const info = &self->captured[self->captured_count++]; - info->priority = meta.priority; - info->transfer_id = meta.transfer_id; - info->destination = destination; - } - udpard_tx_refcount_dec(ejection->datagram); - return true; -} - -static void tx_fixture_init(tx_fixture_t* const self, const uint64_t uid, const size_t capacity) -{ - instrumented_allocator_new(&self->alloc_transfer); - instrumented_allocator_new(&self->alloc_payload); - self->captured_count = 0; - udpard_tx_mem_resources_t mem = { 0 }; - mem.transfer = instrumented_allocator_make_resource(&self->alloc_transfer); - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - mem.payload[i] = instrumented_allocator_make_resource(&self->alloc_payload); - } - static const udpard_tx_vtable_t vtb = { .eject_subject = &tx_capture_ack_subject, - .eject_p2p = &tx_capture_ack_p2p }; - TEST_ASSERT(udpard_tx_new(&self->tx, uid, 1U, capacity, mem, &vtb)); - self->tx.user = self; -} - -static void tx_fixture_free(tx_fixture_t* const self) -{ - udpard_tx_free(&self->tx); - TEST_ASSERT_EQUAL(0, self->alloc_transfer.allocated_fragments); - TEST_ASSERT_EQUAL(0, self->alloc_payload.allocated_fragments); - instrumented_allocator_reset(&self->alloc_transfer); - instrumented_allocator_reset(&self->alloc_payload); -} - -typedef struct -{ - udpard_rx_t* rx; - udpard_rx_port_t* port; - struct - { - /// The most recently received transfer is at index #0; older transfers follow. - /// The history is needed to allow batch ejection when multiple interned transfers are released. - /// There cannot be more than RX_SLOT_COUNT transfers in the history because that is the maximum - /// number of concurrent transfers that can be in-flight for a given session. - udpard_rx_transfer_t history[RX_SLOT_COUNT]; - uint64_t count; - } message; - struct - { - ack_tx_info_t last; - uint64_t count; - } ack; -} callback_result_t; + size_t count; + uint64_t transfer_id; + size_t payload_size; + byte_t payload[256]; + udpard_remote_t remote; +} capture_t; +// Captures one transfer and frees its payload tree immediately. static void on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) { - printf("on_message: ts=%lld transfer_id=%llu payload_size_stored=%zu\n", - (long long)transfer.timestamp, - (unsigned long long)transfer.transfer_id, - transfer.payload_size_stored); - callback_result_t* const cb_result = (callback_result_t* const)rx->user; - cb_result->rx = rx; - cb_result->port = port; - for (size_t i = RX_SLOT_COUNT - 1; i > 0; i--) { - cb_result->message.history[i] = cb_result->message.history[i - 1]; + capture_t* const cap = (capture_t*)rx->user; + TEST_ASSERT_NOT_NULL(cap); + cap->count++; + cap->transfer_id = transfer.transfer_id; + cap->payload_size = transfer.payload_size_stored; + cap->remote = transfer.remote; + if (transfer.payload_size_stored > 0U) { + const udpard_fragment_t* cursor = transfer.payload; + TEST_ASSERT_EQUAL_size_t(transfer.payload_size_stored, + udpard_fragment_gather(&cursor, 0, transfer.payload_size_stored, cap->payload)); } - cb_result->message.history[0] = transfer; - cb_result->message.count++; + udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); } -static const udpard_rx_port_vtable_t callbacks = { .on_message = &on_message }; - -/// Checks that ack transfers are emitted into the TX queues. -static void test_rx_ack_enqueued(void) -{ - instrumented_allocator_t alloc_frag = { 0 }; - instrumented_allocator_new(&alloc_frag); - const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - - instrumented_allocator_t alloc_session = { 0 }; - instrumented_allocator_new(&alloc_session); - const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); - - instrumented_allocator_t alloc_payload = { 0 }; - instrumented_allocator_new(&alloc_payload); - const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session, .slot = mem_session }; - - tx_fixture_t tx_fix = { 0 }; - tx_fixture_init(&tx_fix, 0xBADC0FFEE0DDF00DULL, 8); - - udpard_rx_t rx; - udpard_rx_new(&rx, &tx_fix.tx); - callback_result_t cb_result = { 0 }; - rx.user = &cb_result; - - udpard_rx_port_t port; - const uint64_t remote_uid = 0xA1B2C3D4E5F60718ULL; - const size_t extent = 1000; - TEST_ASSERT(udpard_rx_port_new(&port, extent, rx_mem, &callbacks)); - rx_session_factory_args_t fac_args = { - .owner = &port, - .sessions_by_animation = &rx.list_session_by_animation, - .remote_uid = remote_uid, - .now = 0, +static const udpard_rx_port_vtable_t callbacks = { .on_message = on_message }; + +// Builds a valid datagram in allocator-backed memory. +static udpard_bytes_mut_t make_datagram(const udpard_mem_t mem, + const udpard_prio_t prio, + const uint64_t transfer_id, + const uint64_t sender_uid, + const size_t offset, + const void* const payload, + const size_t payload_size) +{ + const size_t total_size = HEADER_SIZE_BYTES + payload_size; + byte_t* const data = mem_res_alloc(mem, total_size); + TEST_ASSERT_NOT_NULL(data); + if (payload_size > 0U) { + (void)memcpy(&data[HEADER_SIZE_BYTES], payload, payload_size); + } + const meta_t meta = { + .priority = prio, + .transfer_payload_size = (uint32_t)(offset + payload_size), + .transfer_id = transfer_id, + .sender_uid = sender_uid, }; - rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, - &remote_uid, - &cavl_compare_rx_session_by_remote_uid, - &fac_args, - &cavl_factory_rx_session_by_remote_uid); - TEST_ASSERT_NOT_NULL(ses); - - meta_t meta = { .priority = udpard_prio_high, - .kind = frame_msg_reliable, - .transfer_payload_size = 5, - .transfer_id = 77, - .sender_uid = remote_uid }; - udpard_us_t now = 0; - const udpard_udpip_ep_t ep0 = { .ip = 0x0A000001, .port = 0x1234 }; - now += 100; - rx_session_update(ses, &rx, now, ep0, make_frame_ptr(meta, mem_payload, "hello", 0, 5), del_payload, 0); - TEST_ASSERT_EQUAL(1, cb_result.message.count); - udpard_tx_poll(&tx_fix.tx, now, (uint_fast8_t)(1U << 0U)); - cb_result.ack.count = tx_fix.captured_count; - if (tx_fix.captured_count > 0) { - cb_result.ack.last = tx_fix.captured[tx_fix.captured_count - 1U]; - } - TEST_ASSERT(cb_result.ack.count >= 1); - TEST_ASSERT_EQUAL_UINT64(meta.transfer_id, cb_result.ack.last.transfer_id); - TEST_ASSERT_EQUAL_UINT32(ep0.ip, cb_result.ack.last.destination.ip); - TEST_ASSERT_EQUAL_UINT16(ep0.port, cb_result.ack.last.destination.port); - - udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); - cb_result.message.history[0].payload = NULL; - cb_result.message.history[0].payload = NULL; - cb_result.message.history[0].payload = NULL; - - const udpard_udpip_ep_t ep1 = { .ip = 0x0A000002, .port = 0x5678 }; - now += 100; - rx_session_update(ses, &rx, now, ep1, make_frame_ptr(meta, mem_payload, "hello", 0, 5), del_payload, 1); - udpard_tx_poll(&tx_fix.tx, now, (uint_fast8_t)(1U << 1U)); - cb_result.ack.count = tx_fix.captured_count; - if (tx_fix.captured_count > 0) { - cb_result.ack.last = tx_fix.captured[tx_fix.captured_count - 1U]; - } - TEST_ASSERT(cb_result.ack.count >= 2); // acks on interfaces 0 and 1 - TEST_ASSERT_EQUAL_UINT64(meta.transfer_id, cb_result.ack.last.transfer_id); - - udpard_rx_port_free(&rx, &port); - tx_fixture_free(&tx_fix); - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_session); - instrumented_allocator_reset(&alloc_payload); + const uint32_t prefix_crc = crc_full(offset + payload_size, &data[HEADER_SIZE_BYTES - offset]); + (void)header_serialize(data, meta, (uint32_t)offset, prefix_crc); + return (udpard_bytes_mut_t){ .size = total_size, .data = data }; } -static void test_rx_session_unordered(void) -{ - // Memory and rx for P2P unordered session. - instrumented_allocator_t alloc_frag = { 0 }; - instrumented_allocator_new(&alloc_frag); - instrumented_allocator_t alloc_session = { 0 }; - instrumented_allocator_new(&alloc_session); - instrumented_allocator_t alloc_payload = { 0 }; - instrumented_allocator_new(&alloc_payload); - const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); - const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session, .slot = mem_session }; - - udpard_rx_t rx; - udpard_rx_new(&rx, NULL); - callback_result_t cb_result = { 0 }; - rx.user = &cb_result; - - udpard_rx_port_t port = { 0 }; - TEST_ASSERT(udpard_rx_port_new(&port, SIZE_MAX, rx_mem, &callbacks)); - - udpard_us_t now = 0; - const uint64_t remote_uid = 0xA1B2C3D4E5F60718ULL; - rx_session_factory_args_t fac_args = { - .owner = &port, - .sessions_by_animation = &rx.list_session_by_animation, - .remote_uid = remote_uid, - .now = now, +static void test_rx_single_frame(void) +{ + // Prepare RX and allocators. + instrumented_allocator_t alloc_rx_frag = { 0 }; + instrumented_allocator_t alloc_rx_ses = { 0 }; + instrumented_allocator_t alloc_dgram = { 0 }; + instrumented_allocator_new(&alloc_rx_frag); + instrumented_allocator_new(&alloc_rx_ses); + instrumented_allocator_new(&alloc_dgram); + const udpard_rx_mem_resources_t rx_mem = { + .session = instrumented_allocator_make_resource(&alloc_rx_ses), + .slot = instrumented_allocator_make_resource(&alloc_rx_ses), + .fragment = instrumented_allocator_make_resource(&alloc_rx_frag), }; - rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, - &remote_uid, - &cavl_compare_rx_session_by_remote_uid, - &fac_args, - &cavl_factory_rx_session_by_remote_uid); - TEST_ASSERT_NOT_NULL(ses); - - // Single-frame transfer is ejected immediately. - meta_t meta = { .priority = udpard_prio_high, - .kind = frame_msg_best, - .transfer_payload_size = 5, - .transfer_id = 100, - .sender_uid = remote_uid }; - now += 1000; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - make_frame_ptr(meta, mem_payload, "hello", 0, 5), - del_payload, - 0); - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(100, cb_result.message.history[0].transfer_id); - TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], 5, "hello", 5)); - udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); - cb_result.message.history[0].payload = NULL; - - // Out-of-order arrivals are accepted. - meta.transfer_id = 103; - now += 1000; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000002, .port = 0x5678 }, - make_frame_ptr(meta, mem_payload, "tid103", 0, 6), - del_payload, - 1); - TEST_ASSERT_EQUAL(2, cb_result.message.count); - TEST_ASSERT_EQUAL(103, cb_result.message.history[0].transfer_id); - udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); - cb_result.message.history[0].payload = NULL; - - meta.transfer_id = 102; - now += 500; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x9999 }, - make_frame_ptr(meta, mem_payload, "tid102", 0, 6), - del_payload, - 0); - TEST_ASSERT_EQUAL(3, cb_result.message.count); - TEST_ASSERT_EQUAL(102, cb_result.message.history[0].transfer_id); - udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); - cb_result.message.history[0].payload = NULL; - - // Duplicate is ignored. - meta.transfer_id = 103; - now += 100; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000002, .port = 0x5678 }, - make_frame_ptr(meta, mem_payload, "dup103", 0, 6), - del_payload, - 1); - TEST_ASSERT_EQUAL(3, cb_result.message.count); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - - // Multi-frame transfer completes once all pieces arrive. - meta.transfer_id = 200; - meta.transfer_payload_size = 10; - meta.priority = udpard_prio_fast; - meta.kind = frame_msg_reliable; - now += 500; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000002, .port = 0x5678 }, - make_frame_ptr(meta, mem_payload, "0123456789", 5, 5), - del_payload, - 1); - TEST_ASSERT_EQUAL(3, cb_result.message.count); - TEST_ASSERT_EQUAL(1, alloc_frag.allocated_fragments); - now += 200; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - make_frame_ptr(meta, mem_payload, "0123456789", 0, 5), - del_payload, - 0); - TEST_ASSERT(cb_result.message.count >= 1); - TEST_ASSERT_EQUAL(200, cb_result.message.history[0].transfer_id); - TEST_ASSERT(transfer_payload_verify(&cb_result.message.history[0], 10, "0123456789", 10)); - TEST_ASSERT_EQUAL(0x0A000001, cb_result.message.history[0].remote.endpoints[0].ip); - TEST_ASSERT_EQUAL(0x0A000002, cb_result.message.history[0].remote.endpoints[1].ip); - udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); - cb_result.message.history[0].payload = NULL; - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - - udpard_rx_port_free(&rx, &port); - TEST_ASSERT_EQUAL(0, alloc_session.allocated_fragments); - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_session); - instrumented_allocator_reset(&alloc_payload); -} - -static void test_rx_session_unordered_reject_old(void) -{ - // Memory and rx with TX for ack replay. - instrumented_allocator_t alloc_frag = { 0 }; - instrumented_allocator_new(&alloc_frag); - const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - instrumented_allocator_t alloc_session = { 0 }; - instrumented_allocator_new(&alloc_session); - const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); - instrumented_allocator_t alloc_payload = { 0 }; - instrumented_allocator_new(&alloc_payload); - const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session, .slot = mem_session }; - - tx_fixture_t tx_fix = { 0 }; - tx_fixture_init(&tx_fix, 0xF00DCAFEF00DCAFEULL, 4); - udpard_rx_t rx; - udpard_rx_new(&rx, &tx_fix.tx); - callback_result_t cb_result = { 0 }; - rx.user = &cb_result; + const udpard_mem_t dgram_mem = instrumented_allocator_make_resource(&alloc_dgram); + const udpard_deleter_t dgram_del = instrumented_allocator_make_deleter(&alloc_dgram); + // Create RX and one normal port. + capture_t cap = { 0 }; + udpard_rx_t rx = { 0 }; udpard_rx_port_t port = { 0 }; - TEST_ASSERT(udpard_rx_port_new(&port, SIZE_MAX, rx_mem, &callbacks)); - - udpard_us_t now = 0; - const uint64_t remote_uid = 0x0123456789ABCDEFULL; - rx_session_factory_args_t fac_args = { - .owner = &port, - .sessions_by_animation = &rx.list_session_by_animation, - .remote_uid = remote_uid, - .now = now, - }; - rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, - &remote_uid, - &cavl_compare_rx_session_by_remote_uid, - &fac_args, - &cavl_factory_rx_session_by_remote_uid); - TEST_ASSERT_NOT_NULL(ses); - - meta_t meta = { .priority = udpard_prio_fast, - .kind = frame_msg_best, - .transfer_payload_size = 3, - .transfer_id = 10, - .sender_uid = remote_uid }; - now += 1000; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A00000A, .port = 0x0A00 }, - make_frame_ptr(meta, mem_payload, "old", 0, 3), - del_payload, - 0); - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(10, cb_result.message.history[0].transfer_id); - udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); - - // Jump far ahead then report the old transfer again. - meta.transfer_id = 2050; - meta.transfer_payload_size = 4; - now += 1000; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A00000B, .port = 0x0B00 }, - make_frame_ptr(meta, mem_payload, "jump", 0, 4), - del_payload, - 1); - TEST_ASSERT_EQUAL(2, cb_result.message.count); - TEST_ASSERT_EQUAL(2050, cb_result.message.history[0].transfer_id); - udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); - - meta.transfer_id = 10; - meta.transfer_payload_size = 3; - meta.kind = frame_msg_reliable; - now += 1000; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A00000A, .port = 0x0A00 }, - make_frame_ptr(meta, mem_payload, "dup", 0, 3), - del_payload, - 0); - TEST_ASSERT_EQUAL(2, cb_result.message.count); - udpard_tx_poll(&tx_fix.tx, now, UDPARD_IFACE_BITMAP_ALL); - cb_result.ack.count = tx_fix.captured_count; - if (tx_fix.captured_count > 0) { - cb_result.ack.last = tx_fix.captured[tx_fix.captured_count - 1U]; - } - TEST_ASSERT_GREATER_OR_EQUAL_UINT64(1, cb_result.ack.count); - TEST_ASSERT_EQUAL_UINT64(10, cb_result.ack.last.transfer_id); - TEST_ASSERT_EQUAL_UINT64(meta.transfer_id, cb_result.ack.last.transfer_id); - + udpard_rx_new(&rx); + rx.user = ∩ + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 1024U, rx_mem, &callbacks)); + + // Push one valid single-frame transfer. + static const byte_t payload[] = { 1, 2, 3, 4, 5 }; + const udpard_bytes_mut_t dgram = + make_datagram(dgram_mem, udpard_prio_high, 42U, 0x1122334455667788ULL, 0U, payload, sizeof(payload)); + TEST_ASSERT_TRUE(udpard_rx_port_push( + &rx, &port, 1000, (udpard_udpip_ep_t){ .ip = 0x0A000001U, .port = 7000U }, dgram, dgram_del, 0U)); + udpard_rx_poll(&rx, 1001); + + // Verify callback output and no memory leaks. + TEST_ASSERT_EQUAL_size_t(1, cap.count); + TEST_ASSERT_EQUAL_UINT64(42U, cap.transfer_id); + TEST_ASSERT_EQUAL_size_t(sizeof(payload), cap.payload_size); + TEST_ASSERT_EQUAL_MEMORY(payload, cap.payload, sizeof(payload)); + TEST_ASSERT_EQUAL_size_t(0, alloc_dgram.allocated_fragments); + + // Tear down and validate allocator state. udpard_rx_port_free(&rx, &port); - tx_fixture_free(&tx_fix); - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_session); - instrumented_allocator_reset(&alloc_payload); + TEST_ASSERT_EQUAL_size_t(0, alloc_rx_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_rx_ses.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_dgram.allocated_fragments); + instrumented_allocator_reset(&alloc_rx_frag); + instrumented_allocator_reset(&alloc_rx_ses); + instrumented_allocator_reset(&alloc_dgram); } -static void test_rx_session_unordered_duplicates(void) -{ - // Unordered session accepts earlier arrivals but rejects duplicates. - instrumented_allocator_t alloc_frag = { 0 }; - instrumented_allocator_new(&alloc_frag); - instrumented_allocator_t alloc_session = { 0 }; - instrumented_allocator_new(&alloc_session); - instrumented_allocator_t alloc_payload = { 0 }; - instrumented_allocator_new(&alloc_payload); - const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); - const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session, .slot = mem_session }; - - udpard_rx_t rx; - udpard_rx_new(&rx, NULL); - callback_result_t cb_result = { 0 }; - rx.user = &cb_result; - - udpard_rx_port_t port = { 0 }; - TEST_ASSERT(udpard_rx_port_new(&port, SIZE_MAX, rx_mem, &callbacks)); - - udpard_us_t now = 0; - const uint64_t remote_uid = 0xAABBCCDDEEFF0011ULL; - rx_session_factory_args_t fac_args = { - .owner = &port, - .sessions_by_animation = &rx.list_session_by_animation, - .remote_uid = remote_uid, - .now = now, +static void test_rx_duplicate_rejected_and_freed(void) +{ + // Prepare RX and allocators. + instrumented_allocator_t alloc_rx_frag = { 0 }; + instrumented_allocator_t alloc_rx_ses = { 0 }; + instrumented_allocator_t alloc_dgram = { 0 }; + instrumented_allocator_new(&alloc_rx_frag); + instrumented_allocator_new(&alloc_rx_ses); + instrumented_allocator_new(&alloc_dgram); + const udpard_rx_mem_resources_t rx_mem = { + .session = instrumented_allocator_make_resource(&alloc_rx_ses), + .slot = instrumented_allocator_make_resource(&alloc_rx_ses), + .fragment = instrumented_allocator_make_resource(&alloc_rx_frag), }; - rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, - &remote_uid, - &cavl_compare_rx_session_by_remote_uid, - &fac_args, - &cavl_factory_rx_session_by_remote_uid); - TEST_ASSERT_NOT_NULL(ses); - - meta_t meta = { .priority = udpard_prio_nominal, - .kind = frame_msg_best, - .transfer_payload_size = 2, - .transfer_id = 5, - .sender_uid = remote_uid }; - now += 1000; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x11223344, .port = 0x1111 }, - make_frame_ptr(meta, mem_payload, "aa", 0, 2), - del_payload, - 0); - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(5, cb_result.message.history[0].transfer_id); - udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); - cb_result.message.history[0].payload = NULL; - - // Duplicate dropped. - now += 10; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x11223344, .port = 0x1111 }, - make_frame_ptr(meta, mem_payload, "bb", 0, 2), - del_payload, - 0); - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - - udpard_rx_port_free(&rx, &port); - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_session); - instrumented_allocator_reset(&alloc_payload); -} - -static void test_rx_session_malformed(void) -{ - // Malformed transfer increments error counter and drops slot. - instrumented_allocator_t alloc_frag = { 0 }; - instrumented_allocator_new(&alloc_frag); - instrumented_allocator_t alloc_session = { 0 }; - instrumented_allocator_new(&alloc_session); - instrumented_allocator_t alloc_slot = { 0 }; - instrumented_allocator_new(&alloc_slot); - instrumented_allocator_t alloc_payload = { 0 }; - instrumented_allocator_new(&alloc_payload); - const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); - const udpard_mem_t mem_slot = instrumented_allocator_make_resource(&alloc_slot); - const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session, .slot = mem_slot }; - - udpard_rx_t rx; - udpard_rx_new(&rx, NULL); - callback_result_t cb_result = { 0 }; - rx.user = &cb_result; + const udpard_mem_t dgram_mem = instrumented_allocator_make_resource(&alloc_dgram); + const udpard_deleter_t dgram_del = instrumented_allocator_make_deleter(&alloc_dgram); + // Create RX and one normal port. + capture_t cap = { 0 }; + udpard_rx_t rx = { 0 }; udpard_rx_port_t port = { 0 }; - TEST_ASSERT(udpard_rx_port_new(&port, 64, rx_mem, &callbacks)); - - const uint64_t remote_uid = 0xABCDEF1234567890ULL; - rx_session_factory_args_t fac_args = { - .owner = &port, - .sessions_by_animation = &rx.list_session_by_animation, - .remote_uid = remote_uid, - .now = 0, - }; - rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, - &remote_uid, - &cavl_compare_rx_session_by_remote_uid, - &fac_args, - &cavl_factory_rx_session_by_remote_uid); - TEST_ASSERT_NOT_NULL(ses); - - meta_t meta = { .priority = udpard_prio_nominal, - .kind = frame_msg_best, - .transfer_payload_size = 8, - .transfer_id = 1, - .sender_uid = remote_uid }; - udpard_us_t now = 0; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1111 }, - make_frame_ptr(meta, mem_payload, "ABCDEFGH", 0, 4), - del_payload, - 0); - TEST_ASSERT_EQUAL_UINT64(0, rx.errors_transfer_malformed); - TEST_ASSERT_EQUAL(0, cb_result.message.count); - TEST_ASSERT_EQUAL_size_t(1, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(1, alloc_slot.allocated_fragments); - - meta.priority = udpard_prio_high; - now += 10; - rx_session_update(ses, - &rx, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1111 }, - make_frame_ptr(meta, mem_payload, "ABCDEFGH", 4, 4), - del_payload, - 0); - TEST_ASSERT_EQUAL_UINT64(1, rx.errors_transfer_malformed); - TEST_ASSERT_EQUAL(0, cb_result.message.count); - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_slot.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); - + udpard_rx_new(&rx); + rx.user = ∩ + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 1024U, rx_mem, &callbacks)); + + // Deliver the first transfer. + static const byte_t payload_a[] = { 9, 8, 7 }; + const udpard_bytes_mut_t first = + make_datagram(dgram_mem, udpard_prio_nominal, 7U, 0xAABBCCDDEEFF0011ULL, 0U, payload_a, sizeof(payload_a)); + TEST_ASSERT_TRUE(udpard_rx_port_push( + &rx, &port, 2000, (udpard_udpip_ep_t){ .ip = 0x0A000002U, .port = 7100U }, first, dgram_del, 0U)); + udpard_rx_poll(&rx, 2001); + TEST_ASSERT_EQUAL_size_t(1, cap.count); + TEST_ASSERT_EQUAL_size_t(0, alloc_dgram.allocated_fragments); + + // Deliver the same transfer-ID again; it must be dropped and freed. + static const byte_t payload_b[] = { 1, 1, 1 }; + const udpard_bytes_mut_t duplicate = + make_datagram(dgram_mem, udpard_prio_nominal, 7U, 0xAABBCCDDEEFF0011ULL, 0U, payload_b, sizeof(payload_b)); + TEST_ASSERT_TRUE(udpard_rx_port_push( + &rx, &port, 2010, (udpard_udpip_ep_t){ .ip = 0x0A000002U, .port = 7100U }, duplicate, dgram_del, 0U)); + udpard_rx_poll(&rx, 2011); + TEST_ASSERT_EQUAL_size_t(1, cap.count); + TEST_ASSERT_EQUAL_size_t(0, alloc_dgram.allocated_fragments); + + // Tear down and validate allocator state. udpard_rx_port_free(&rx, &port); - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_session); - instrumented_allocator_reset(&alloc_slot); - instrumented_allocator_reset(&alloc_payload); + TEST_ASSERT_EQUAL_size_t(0, alloc_rx_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_rx_ses.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_dgram.allocated_fragments); + instrumented_allocator_reset(&alloc_rx_frag); + instrumented_allocator_reset(&alloc_rx_ses); + instrumented_allocator_reset(&alloc_dgram); } -static void test_rx_port(void) -{ - // P2P ports behave like ordinary ports for payload delivery. - instrumented_allocator_t alloc_frag = { 0 }; - instrumented_allocator_new(&alloc_frag); - instrumented_allocator_t alloc_session = { 0 }; - instrumented_allocator_new(&alloc_session); - instrumented_allocator_t alloc_payload = { 0 }; - instrumented_allocator_new(&alloc_payload); - const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); - const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session, .slot = mem_session }; - - udpard_rx_t rx; - udpard_rx_new(&rx, NULL); - callback_result_t cb_result = { 0 }; - rx.user = &cb_result; - - udpard_rx_port_t port = { 0 }; - TEST_ASSERT(udpard_rx_port_new_p2p(&port, 64, rx_mem, &callbacks)); - - // Compose a P2P response datagram without a P2P header. - const uint64_t resp_tid = 55; - const uint8_t payload[3] = { 'a', 'b', 'c' }; - - meta_t meta = { .priority = udpard_prio_fast, - .kind = frame_msg_best, - .transfer_payload_size = sizeof(payload), - .transfer_id = resp_tid, - .sender_uid = 0x0BADF00D0BADF00DULL }; - rx_frame_t* frame = make_frame_ptr(meta, mem_payload, payload, 0, sizeof(payload)); - byte_t dgram[HEADER_SIZE_BYTES + sizeof(payload)]; - header_serialize(dgram, meta, 0, 0, frame->base.crc); - memcpy(dgram + HEADER_SIZE_BYTES, payload, sizeof(payload)); - mem_free(mem_payload, frame->base.origin.size, frame->base.origin.data); - void* push_payload = mem_res_alloc(mem_payload, sizeof(dgram)); - memcpy(push_payload, dgram, sizeof(dgram)); - - udpard_us_t now = 0; - TEST_ASSERT(udpard_rx_port_push(&rx, - &port, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - (udpard_bytes_mut_t){ .data = push_payload, .size = sizeof(dgram) }, - del_payload, - 0)); - TEST_ASSERT_EQUAL(1, cb_result.message.count); - TEST_ASSERT_EQUAL(resp_tid, cb_result.message.history[0].transfer_id); - udpard_fragment_t* const frag = udpard_fragment_seek(cb_result.message.history[0].payload, 0); - TEST_ASSERT_NOT_NULL(frag); - TEST_ASSERT_EQUAL_size_t(3, frag->view.size); - TEST_ASSERT_EQUAL_MEMORY("abc", frag->view.data, 3); - udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); - cb_result.message.history[0].payload = NULL; - - udpard_rx_port_free(&rx, &port); - TEST_ASSERT_EQUAL(0, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_session); - instrumented_allocator_reset(&alloc_payload); -} - -static void test_rx_port_timeouts(void) -{ - // Sessions are retired after SESSION_LIFETIME. - instrumented_allocator_t alloc_frag = { 0 }; - instrumented_allocator_new(&alloc_frag); - instrumented_allocator_t alloc_session = { 0 }; - instrumented_allocator_new(&alloc_session); - instrumented_allocator_t alloc_payload = { 0 }; - instrumented_allocator_new(&alloc_payload); - const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); - const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session, .slot = mem_session }; - - udpard_rx_t rx; - udpard_rx_new(&rx, NULL); - callback_result_t cb_result = { 0 }; - rx.user = &cb_result; +static void test_rx_malformed_frame(void) +{ + // Prepare RX and allocators. + instrumented_allocator_t alloc_rx_frag = { 0 }; + instrumented_allocator_t alloc_rx_ses = { 0 }; + instrumented_allocator_t alloc_dgram = { 0 }; + instrumented_allocator_new(&alloc_rx_frag); + instrumented_allocator_new(&alloc_rx_ses); + instrumented_allocator_new(&alloc_dgram); + const udpard_rx_mem_resources_t rx_mem = { + .session = instrumented_allocator_make_resource(&alloc_rx_ses), + .slot = instrumented_allocator_make_resource(&alloc_rx_ses), + .fragment = instrumented_allocator_make_resource(&alloc_rx_frag), + }; + const udpard_mem_t dgram_mem = instrumented_allocator_make_resource(&alloc_dgram); + const udpard_deleter_t dgram_del = instrumented_allocator_make_deleter(&alloc_dgram); + // Create RX and one normal port. + capture_t cap = { 0 }; + udpard_rx_t rx = { 0 }; udpard_rx_port_t port = { 0 }; - TEST_ASSERT(udpard_rx_port_new(&port, 128, rx_mem, &callbacks)); - - meta_t meta = { .priority = udpard_prio_nominal, - .kind = frame_msg_best, - .transfer_payload_size = 4, - .transfer_id = 1, - .sender_uid = 0x1111222233334444ULL }; - rx_frame_t* frame = make_frame_ptr(meta, mem_payload, "ping", 0, 4); - const byte_t payload_bytes[] = { 'p', 'i', 'n', 'g' }; - byte_t dgram[HEADER_SIZE_BYTES + sizeof(payload_bytes)]; - header_serialize(dgram, meta, 0, 0, frame->base.crc); - memcpy(dgram + HEADER_SIZE_BYTES, payload_bytes, sizeof(payload_bytes)); - mem_free(mem_payload, frame->base.origin.size, frame->base.origin.data); - void* payload_buf = mem_res_alloc(mem_payload, sizeof(dgram)); - memcpy(payload_buf, dgram, sizeof(dgram)); - - udpard_us_t now = 0; - TEST_ASSERT(udpard_rx_port_push(&rx, - &port, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - (udpard_bytes_mut_t){ .data = payload_buf, .size = sizeof(dgram) }, - del_payload, - 0)); - TEST_ASSERT_GREATER_THAN_UINT32(0, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(1, cb_result.message.count); - udpard_fragment_free_all(cb_result.message.history[0].payload, udpard_make_deleter(mem_frag)); - cb_result.message.history[0].payload = NULL; + udpard_rx_new(&rx); + rx.user = ∩ + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 1024U, rx_mem, &callbacks)); + + // Corrupt the header CRC and ensure the frame is rejected. + static const byte_t payload[] = { 0xAA, 0xBB }; + udpard_bytes_mut_t dgram = + make_datagram(dgram_mem, udpard_prio_low, 99U, 0x123456789ABCDEF0ULL, 0U, payload, sizeof(payload)); + ((byte_t*)dgram.data)[HEADER_SIZE_BYTES - 1U] ^= 0x5AU; + const uint64_t malformed_before = rx.errors_frame_malformed; + TEST_ASSERT_TRUE(udpard_rx_port_push( + &rx, &port, 3000, (udpard_udpip_ep_t){ .ip = 0x0A000003U, .port = 7200U }, dgram, dgram_del, 0U)); + TEST_ASSERT_EQUAL_UINT64(malformed_before + 1U, rx.errors_frame_malformed); + TEST_ASSERT_EQUAL_size_t(0, cap.count); + TEST_ASSERT_EQUAL_size_t(0, alloc_dgram.allocated_fragments); - now += SESSION_LIFETIME + 1; - udpard_rx_poll(&rx, now); - TEST_ASSERT_EQUAL(0, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + // Tear down and validate allocator state. udpard_rx_port_free(&rx, &port); - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_session); - instrumented_allocator_reset(&alloc_payload); + TEST_ASSERT_EQUAL_size_t(0, alloc_rx_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_rx_ses.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_dgram.allocated_fragments); + instrumented_allocator_reset(&alloc_rx_frag); + instrumented_allocator_reset(&alloc_rx_ses); + instrumented_allocator_reset(&alloc_dgram); } -static void test_rx_port_oom(void) -{ - // Session allocation failure should be reported gracefully. - instrumented_allocator_t alloc_frag = { 0 }; - instrumented_allocator_new(&alloc_frag); - instrumented_allocator_t alloc_session = { 0 }; - instrumented_allocator_new(&alloc_session); - alloc_session.limit_fragments = 0; // force allocation failure - instrumented_allocator_t alloc_payload = { 0 }; - instrumented_allocator_new(&alloc_payload); - const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); - const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session, .slot = mem_session }; - - udpard_rx_t rx; - udpard_rx_new(&rx, NULL); - callback_result_t cb_result = { 0 }; - rx.user = &cb_result; +static void test_rx_p2p_remote_endpoint_tracking(void) +{ + // Prepare RX and allocators. + instrumented_allocator_t alloc_rx_frag = { 0 }; + instrumented_allocator_t alloc_rx_ses = { 0 }; + instrumented_allocator_t alloc_dgram = { 0 }; + instrumented_allocator_new(&alloc_rx_frag); + instrumented_allocator_new(&alloc_rx_ses); + instrumented_allocator_new(&alloc_dgram); + const udpard_rx_mem_resources_t rx_mem = { + .session = instrumented_allocator_make_resource(&alloc_rx_ses), + .slot = instrumented_allocator_make_resource(&alloc_rx_ses), + .fragment = instrumented_allocator_make_resource(&alloc_rx_frag), + }; + const udpard_mem_t dgram_mem = instrumented_allocator_make_resource(&alloc_dgram); + const udpard_deleter_t dgram_del = instrumented_allocator_make_deleter(&alloc_dgram); + // Create RX and one P2P port. + capture_t cap = { 0 }; + udpard_rx_t rx = { 0 }; udpard_rx_port_t port = { 0 }; - TEST_ASSERT(udpard_rx_port_new(&port, 64, rx_mem, &callbacks)); - - meta_t meta = { .priority = udpard_prio_nominal, - .kind = frame_msg_best, - .transfer_payload_size = 4, - .transfer_id = 1, - .sender_uid = 0x0101010101010101ULL }; - rx_frame_t* frame = make_frame_ptr(meta, mem_payload, "oom!", 0, 4); - const byte_t payload_bytes[] = { 'o', 'o', 'm', '!' }; - byte_t dgram[HEADER_SIZE_BYTES + sizeof(payload_bytes)]; - header_serialize(dgram, meta, 0, 0, frame->base.crc); - memcpy(dgram + HEADER_SIZE_BYTES, payload_bytes, sizeof(payload_bytes)); - mem_free(mem_payload, frame->base.origin.size, frame->base.origin.data); - void* payload_buf = mem_res_alloc(mem_payload, sizeof(dgram)); - memcpy(payload_buf, dgram, sizeof(dgram)); - - udpard_us_t now = 0; - TEST_ASSERT(udpard_rx_port_push(&rx, - &port, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - (udpard_bytes_mut_t){ .data = payload_buf, .size = sizeof(dgram) }, - del_payload, - 0)); - TEST_ASSERT_GREATER_THAN_UINT64(0, rx.errors_oom); - TEST_ASSERT_EQUAL(0, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL(0, cb_result.message.count); - TEST_ASSERT_EQUAL(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload.allocated_fragments); + udpard_rx_new(&rx); + rx.user = ∩ + TEST_ASSERT_TRUE(udpard_rx_port_new_p2p(&port, 1024U, rx_mem, &callbacks)); + + // Push a frame from iface 1 and verify endpoint discovery. + static const byte_t payload[] = { 0x10, 0x20, 0x30 }; + const uint64_t remote_uid = 0xCAFEBABE12345678ULL; + const udpard_bytes_mut_t dgram = + make_datagram(dgram_mem, udpard_prio_nominal, 501U, remote_uid, 0U, payload, sizeof(payload)); + const udpard_udpip_ep_t src = { .ip = 0x0A00000AU, .port = 7300U }; + TEST_ASSERT_TRUE(udpard_rx_port_push(&rx, &port, 4000, src, dgram, dgram_del, 1U)); + udpard_rx_poll(&rx, 4001); + + // Validate transfer metadata and endpoint tracking. + TEST_ASSERT_EQUAL_size_t(1, cap.count); + TEST_ASSERT_EQUAL_UINT64(remote_uid, cap.remote.uid); + TEST_ASSERT_EQUAL_UINT32(src.ip, cap.remote.endpoints[1].ip); + TEST_ASSERT_EQUAL_UINT16(src.port, cap.remote.endpoints[1].port); + TEST_ASSERT_EQUAL_size_t(0, alloc_dgram.allocated_fragments); + + // Tear down and validate allocator state. udpard_rx_port_free(&rx, &port); - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_session); - instrumented_allocator_reset(&alloc_payload); - - // Slot allocation failure should be reported gracefully. - instrumented_allocator_t alloc_frag_slot = { 0 }; - instrumented_allocator_new(&alloc_frag_slot); - instrumented_allocator_t alloc_session_slot = { 0 }; - instrumented_allocator_new(&alloc_session_slot); - instrumented_allocator_t alloc_slot = { 0 }; - instrumented_allocator_new(&alloc_slot); - alloc_slot.limit_fragments = 0; // force slot allocation failure - instrumented_allocator_t alloc_payload_slot = { 0 }; - instrumented_allocator_new(&alloc_payload_slot); - const udpard_mem_t mem_frag_slot = instrumented_allocator_make_resource(&alloc_frag_slot); - const udpard_mem_t mem_session_slot = instrumented_allocator_make_resource(&alloc_session_slot); - const udpard_mem_t mem_slot = instrumented_allocator_make_resource(&alloc_slot); - const udpard_mem_t mem_payload_slot = instrumented_allocator_make_resource(&alloc_payload_slot); - const udpard_deleter_t del_payload_slot = instrumented_allocator_make_deleter(&alloc_payload_slot); - const udpard_rx_mem_resources_t rx_mem_slot = { .fragment = mem_frag_slot, - .session = mem_session_slot, - .slot = mem_slot }; - - udpard_rx_t rx_slot; - udpard_rx_new(&rx_slot, NULL); - callback_result_t cb_result_slot = { 0 }; - rx_slot.user = &cb_result_slot; - - udpard_rx_port_t port_slot = { 0 }; - TEST_ASSERT(udpard_rx_port_new(&port_slot, 64, rx_mem_slot, &callbacks)); - - meta_t meta_slot = { .priority = udpard_prio_nominal, - .kind = frame_msg_best, - .transfer_payload_size = 4, - .transfer_id = 1, - .sender_uid = 0x0202020202020202ULL }; - rx_frame_t* frame_slot = make_frame_ptr(meta_slot, mem_payload_slot, "oom!", 0, 4); - const byte_t payload_slot[] = { 'o', 'o', 'm', '!' }; - byte_t dgram_slot[HEADER_SIZE_BYTES + sizeof(payload_slot)]; - header_serialize(dgram_slot, meta_slot, 0, 0, frame_slot->base.crc); - memcpy(dgram_slot + HEADER_SIZE_BYTES, payload_slot, sizeof(payload_slot)); - mem_free(mem_payload_slot, frame_slot->base.origin.size, frame_slot->base.origin.data); - void* payload_buf_slot = mem_res_alloc(mem_payload_slot, sizeof(dgram_slot)); - memcpy(payload_buf_slot, dgram_slot, sizeof(dgram_slot)); - - now = 0; - TEST_ASSERT(udpard_rx_port_push(&rx_slot, - &port_slot, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - (udpard_bytes_mut_t){ .data = payload_buf_slot, .size = sizeof(dgram_slot) }, - del_payload_slot, - 0)); - TEST_ASSERT_GREATER_THAN_UINT64(0, rx_slot.errors_oom); - TEST_ASSERT_EQUAL(1, alloc_session_slot.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_slot.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_frag_slot.allocated_fragments); - TEST_ASSERT_EQUAL(0, alloc_payload_slot.allocated_fragments); - udpard_rx_port_free(&rx_slot, &port_slot); - TEST_ASSERT_EQUAL(0, alloc_session_slot.allocated_fragments); - instrumented_allocator_reset(&alloc_frag_slot); - instrumented_allocator_reset(&alloc_session_slot); - instrumented_allocator_reset(&alloc_slot); - instrumented_allocator_reset(&alloc_payload_slot); -} - -static void test_rx_port_free_loop(void) -{ - // Freeing ports with in-flight transfers releases all allocations. - instrumented_allocator_t alloc_frag = { 0 }; - instrumented_allocator_new(&alloc_frag); - instrumented_allocator_t alloc_session = { 0 }; - instrumented_allocator_new(&alloc_session); - instrumented_allocator_t alloc_payload = { 0 }; - instrumented_allocator_new(&alloc_payload); - const udpard_mem_t mem_frag = instrumented_allocator_make_resource(&alloc_frag); - const udpard_mem_t mem_session = instrumented_allocator_make_resource(&alloc_session); - const udpard_mem_t mem_payload = instrumented_allocator_make_resource(&alloc_payload); - const udpard_deleter_t del_payload = instrumented_allocator_make_deleter(&alloc_payload); - const udpard_rx_mem_resources_t rx_mem = { .fragment = mem_frag, .session = mem_session, .slot = mem_session }; - - udpard_rx_t rx; - udpard_rx_new(&rx, NULL); - callback_result_t cb_result = { 0 }; - rx.user = &cb_result; - - udpard_rx_port_t port_p2p = { 0 }; - TEST_ASSERT(udpard_rx_port_new_p2p(&port_p2p, SIZE_MAX, rx_mem, &callbacks)); - udpard_rx_port_t port_extra = { 0 }; - TEST_ASSERT(udpard_rx_port_new(&port_extra, 1000, rx_mem, &callbacks)); - - udpard_us_t now = 0; - - // Incomplete transfer on the p2p port. - { - const char* payload = "INCOMPLETE"; - meta_t meta = { .priority = udpard_prio_slow, - .kind = frame_msg_best, - .transfer_payload_size = (uint32_t)strlen(payload), - .transfer_id = 10, - .sender_uid = 0xAAAAULL }; - rx_frame_t* frame = make_frame_ptr(meta, mem_payload, payload, 0, 4); - byte_t dgram[HEADER_SIZE_BYTES + 4]; - header_serialize(dgram, meta, 0, 0, frame->base.crc); - memcpy(dgram + HEADER_SIZE_BYTES, payload, 4); - mem_free(mem_payload, frame->base.origin.size, frame->base.origin.data); - void* push_payload = mem_res_alloc(mem_payload, sizeof(dgram)); - memcpy(push_payload, dgram, sizeof(dgram)); - now += 1000; - TEST_ASSERT(udpard_rx_port_push(&rx, - &port_p2p, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000001, .port = 0x1234 }, - (udpard_bytes_mut_t){ .data = push_payload, .size = sizeof(dgram) }, - del_payload, - 0)); - } - - // Incomplete transfer on the extra port. - { - const char* payload = "FRAGMENTS"; - meta_t meta = { .priority = udpard_prio_fast, - .kind = frame_msg_best, - .transfer_payload_size = (uint32_t)strlen(payload), - .transfer_id = 20, - .sender_uid = 0xBBBBULL }; - rx_frame_t* frame = make_frame_ptr(meta, mem_payload, payload, 0, 3); - byte_t dgram[HEADER_SIZE_BYTES + 3]; - header_serialize(dgram, meta, 0, 0, frame->base.crc); - memcpy(dgram + HEADER_SIZE_BYTES, payload, 3); - mem_free(mem_payload, frame->base.origin.size, frame->base.origin.data); - void* push_payload = mem_res_alloc(mem_payload, sizeof(dgram)); - memcpy(push_payload, dgram, sizeof(dgram)); - now += 1000; - TEST_ASSERT(udpard_rx_port_push(&rx, - &port_extra, - now, - (udpard_udpip_ep_t){ .ip = 0x0A000002, .port = 0x5678 }, - (udpard_bytes_mut_t){ .data = push_payload, .size = sizeof(dgram) }, - del_payload, - 1)); - } - - TEST_ASSERT(alloc_session.allocated_fragments >= 2); - TEST_ASSERT(alloc_frag.allocated_fragments >= 2); - udpard_rx_port_free(&rx, &port_p2p); - udpard_rx_port_free(&rx, &port_extra); - TEST_ASSERT_EQUAL_size_t(0, alloc_session.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_frag.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_payload.allocated_fragments); - - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_session); - instrumented_allocator_reset(&alloc_payload); -} - -static void stub_on_message(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_rx_transfer_t transfer) -{ - (void)rx; - udpard_fragment_free_all(transfer.payload, udpard_make_deleter(port->memory.fragment)); -} - -static udpard_udpip_ep_t make_ep(const uint32_t ip) { return (udpard_udpip_ep_t){ .ip = ip, .port = 1U }; } - -static void test_rx_additional_coverage(void) -{ - instrumented_allocator_t alloc_frag = { 0 }; - instrumented_allocator_t alloc_ses = { 0 }; - instrumented_allocator_new(&alloc_frag); - instrumented_allocator_new(&alloc_ses); - const udpard_rx_mem_resources_t mem = { .session = instrumented_allocator_make_resource(&alloc_ses), - .slot = instrumented_allocator_make_resource(&alloc_ses), - .fragment = instrumented_allocator_make_resource(&alloc_frag) }; - // Memory validation rejects missing hooks. - const udpard_mem_vtable_t vtable_no_free = { .base = { .free = NULL }, .alloc = dummy_alloc }; - const udpard_mem_vtable_t vtable_no_alloc = { .base = { .free = dummy_free }, .alloc = NULL }; - udpard_rx_mem_resources_t bad_mem = mem; - bad_mem.session.vtable = &vtable_no_free; - TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_mem)); - bad_mem.session.vtable = &vtable_no_alloc; - TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_mem)); - bad_mem = mem; - bad_mem.fragment.vtable = &vtable_no_free; - TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_mem)); - bad_mem.fragment.vtable = &vtable_no_alloc; - TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_mem)); - bad_mem = mem; - bad_mem.slot.vtable = &vtable_no_free; - TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_mem)); - bad_mem.slot.vtable = &vtable_no_alloc; - TEST_ASSERT_FALSE(rx_validate_mem_resources(bad_mem)); - - // Session helpers and free paths. - const udpard_rx_port_vtable_t vtb = { .on_message = stub_on_message }; - udpard_rx_port_t port = { 0 }; - TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 8, mem, &vtb)); - udpard_list_t anim_list = { 0 }; - rx_session_factory_args_t fac_args = { - .owner = &port, .sessions_by_animation = &anim_list, .remote_uid = 77, .now = 0 - }; - rx_session_t* const ses = (rx_session_t*)cavl2_find_or_insert(&port.index_session_by_remote_uid, - &fac_args.remote_uid, - &cavl_compare_rx_session_by_remote_uid, - &fac_args, - &cavl_factory_rx_session_by_remote_uid); - TEST_ASSERT_NOT_NULL(ses); - for (size_t i = 0; i < RX_TRANSFER_HISTORY_COUNT; i++) { - ses->history[i] = 1; - } - ses->history[0] = 5; - TEST_ASSERT_TRUE(rx_session_is_transfer_ejected(ses, 5)); - TEST_ASSERT_FALSE(rx_session_is_transfer_ejected(ses, 6)); - TEST_ASSERT_EQUAL(-1, cavl_compare_rx_session_by_remote_uid(&(uint64_t){ 10 }, &ses->index_remote_uid)); - TEST_ASSERT_EQUAL(1, cavl_compare_rx_session_by_remote_uid(&(uint64_t){ 100 }, &ses->index_remote_uid)); - rx_session_free(ses, &anim_list); - - // Slot acquisition covers stale cleanup and eviction. - udpard_rx_t rx = { 0 }; - rx_session_t ses_slots; - mem_zero(sizeof(ses_slots), &ses_slots); - ses_slots.port = &port; - ses_slots.history_current = 0; - for (size_t i = 0; i < RX_TRANSFER_HISTORY_COUNT; i++) { - ses_slots.history[i] = 1; - } - // Allocate one slot to simulate a stale in-progress transfer. - ses_slots.slots[0] = rx_slot_new(mem.slot); - TEST_ASSERT_NOT_NULL(ses_slots.slots[0]); - ses_slots.slots[0]->ts_max = 0; - ses_slots.slots[0]->transfer_id = 1; - rx_slot_t** slot_ref = rx_session_get_slot(&ses_slots, SESSION_LIFETIME + 1, 99); - TEST_ASSERT_NOT_NULL(slot_ref); - TEST_ASSERT_NOT_NULL(*slot_ref); - // Fill all slots to exercise eviction. - for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - if (ses_slots.slots[i] == NULL) { - ses_slots.slots[i] = rx_slot_new(mem.slot); - } - TEST_ASSERT_NOT_NULL(ses_slots.slots[i]); - ses_slots.slots[i]->ts_max = 10 + (udpard_us_t)i; - } - slot_ref = rx_session_get_slot(&ses_slots, 50, 2); - TEST_ASSERT_NOT_NULL(slot_ref); - TEST_ASSERT_NOT_NULL(*slot_ref); - // Release slot allocations from the helper session. - for (size_t i = 0; i < RX_SLOT_COUNT; i++) { - if (ses_slots.slots[i] != NULL) { - rx_slot_destroy(&ses_slots.slots[i], mem.fragment, mem.slot); - } - } - - // Stateless accept success, OOM, malformed. - udpard_rx_port_t port_stateless = { 0 }; - TEST_ASSERT_TRUE(udpard_rx_port_new_stateless(&port_stateless, 8, mem, &vtb)); - rx_frame_t frame; - byte_t payload[4] = { 1, 2, 3, 4 }; - mem_zero(sizeof(frame), &frame); - void* payload_buf = mem_res_alloc(mem.fragment, sizeof(payload)); - memcpy(payload_buf, payload, sizeof(payload)); - frame.base.payload = (udpard_bytes_t){ .data = payload_buf, .size = sizeof(payload) }; - frame.base.origin = (udpard_bytes_mut_t){ .data = payload_buf, .size = sizeof(payload) }; - frame.base.crc = crc_full(frame.base.payload.size, frame.base.payload.data); - frame.meta.priority = udpard_prio_nominal; - frame.meta.transfer_payload_size = (uint32_t)frame.base.payload.size; - frame.meta.sender_uid = 9; - frame.meta.transfer_id = 11; - rx_port_accept_stateless( - &rx, &port_stateless, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); - alloc_frag.limit_fragments = 0; - frame.base.payload.data = payload; - frame.base.payload.size = sizeof(payload); - frame.base.origin = (udpard_bytes_mut_t){ 0 }; - frame.base.crc = crc_full(frame.base.payload.size, frame.base.payload.data); - rx_port_accept_stateless( - &rx, &port_stateless, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); - frame.base.payload.size = 0; - frame.meta.transfer_payload_size = 8; - rx_port_accept_stateless( - &rx, &port_stateless, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); - // Stateless accept rejects nonzero offsets. - alloc_frag.limit_fragments = SIZE_MAX; - void* payload_buf2 = mem_res_alloc(mem.fragment, sizeof(payload)); - TEST_ASSERT_NOT_NULL(payload_buf2); - memcpy(payload_buf2, payload, sizeof(payload)); - frame.base.payload = (udpard_bytes_t){ .data = payload_buf2, .size = sizeof(payload) }; - frame.base.origin = (udpard_bytes_mut_t){ .data = payload_buf2, .size = sizeof(payload) }; - frame.base.offset = 1U; - frame.meta.transfer_payload_size = (uint32_t)sizeof(payload); - rx_port_accept_stateless( - &rx, &port_stateless, 0, make_ep(1), &frame, instrumented_allocator_make_deleter(&alloc_frag), 0); - frame.base.offset = 0; - udpard_rx_port_free(&rx, &port_stateless); - - // ACK frames are rejected on non-P2P ports. - udpard_rx_port_t port_normal = { 0 }; - TEST_ASSERT_TRUE(udpard_rx_port_new(&port_normal, 8, mem, &vtb)); - byte_t ack_dgram[HEADER_SIZE_BYTES] = { 0 }; - meta_t ack_meta = { .priority = udpard_prio_nominal, - .kind = frame_ack, - .transfer_payload_size = 0, - .transfer_id = 1, - .sender_uid = 2 }; - header_serialize(ack_dgram, ack_meta, 0, 0, crc_full(0, NULL)); - udpard_bytes_mut_t ack_payload = { .data = mem_res_alloc(mem.fragment, sizeof(ack_dgram)), - .size = sizeof(ack_dgram) }; - memcpy(ack_payload.data, ack_dgram, sizeof(ack_dgram)); - const uint64_t malformed_before = rx.errors_frame_malformed; - TEST_ASSERT(udpard_rx_port_push( - &rx, &port_normal, 0, make_ep(3), ack_payload, instrumented_allocator_make_deleter(&alloc_frag), 0)); - TEST_ASSERT_EQUAL_UINT64(malformed_before + 1U, rx.errors_frame_malformed); - udpard_rx_port_free(&rx, &port_normal); - - instrumented_allocator_reset(&alloc_frag); - instrumented_allocator_reset(&alloc_ses); + TEST_ASSERT_EQUAL_size_t(0, alloc_rx_frag.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_rx_ses.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, alloc_dgram.allocated_fragments); + instrumented_allocator_reset(&alloc_rx_frag); + instrumented_allocator_reset(&alloc_rx_ses); + instrumented_allocator_reset(&alloc_dgram); } void setUp(void) {} - void tearDown(void) {} int main(void) { UNITY_BEGIN(); - - RUN_TEST(test_rx_fragment_tree_update_a); - RUN_TEST(test_rx_fragment_tree_update_exhaustive); - RUN_TEST(test_rx_fragment_tree_oom); - - RUN_TEST(test_rx_slot_update); - - RUN_TEST(test_rx_ack_enqueued); - - RUN_TEST(test_rx_session_unordered); - RUN_TEST(test_rx_session_unordered_reject_old); - RUN_TEST(test_rx_session_unordered_duplicates); - RUN_TEST(test_rx_session_malformed); - - RUN_TEST(test_rx_port); - RUN_TEST(test_rx_port_timeouts); - RUN_TEST(test_rx_port_oom); - RUN_TEST(test_rx_port_free_loop); - RUN_TEST(test_rx_additional_coverage); - + RUN_TEST(test_rx_single_frame); + RUN_TEST(test_rx_duplicate_rejected_and_freed); + RUN_TEST(test_rx_malformed_frame); + RUN_TEST(test_rx_p2p_remote_endpoint_tracking); return UNITY_END(); } diff --git a/tests/src/test_intrusive_tx.c b/tests/src/test_intrusive_tx.c index a92410e..bb914e4 100644 --- a/tests/src/test_intrusive_tx.c +++ b/tests/src/test_intrusive_tx.c @@ -9,1340 +9,191 @@ typedef struct { - size_t count; bool allow; + size_t count; + struct + { + uint64_t transfer_id; + udpard_udpip_ep_t destination; + uint_fast8_t iface_index; + } items[16]; } eject_state_t; typedef struct { - size_t count; - udpard_tx_feedback_t last; -} feedback_state_t; - -typedef struct -{ - size_t count; - udpard_us_t when[8]; -} eject_log_t; - -static void noop_free(void* const user, const size_t size, void* const pointer) -{ - (void)user; - (void)size; - (void)pointer; -} - -// No-op memory vtable for guard checks. -static const udpard_mem_vtable_t mem_vtable_noop_alloc = { .base = { .free = noop_free }, .alloc = dummy_alloc }; - -// Ejects with a configurable outcome (subject variant). -static bool eject_subject_with_flag(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) -{ - (void)ejection; - eject_state_t* const st = (eject_state_t*)tx->user; - if (st != NULL) { - st->count++; - return st->allow; - } - return true; -} + instrumented_allocator_t transfer_alloc; + instrumented_allocator_t payload_alloc; + udpard_tx_mem_resources_t mem; + udpard_tx_t tx; + eject_state_t eject; +} tx_fixture_t; -// Ejects with a configurable outcome (P2P variant). -static bool eject_p2p_with_flag(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t dest) +// Captures metadata from each ejected frame. +static bool eject_capture(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) { - (void)ejection; - (void)dest; eject_state_t* const st = (eject_state_t*)tx->user; - if (st != NULL) { - st->count++; - return st->allow; - } + TEST_ASSERT_NOT_NULL(st); + if (!st->allow) { + return false; + } + if (st->count < (sizeof(st->items) / sizeof(st->items[0]))) { + meta_t meta = { 0 }; + uint32_t offset = 0; + uint32_t prefix = 0; + udpard_bytes_t payload = { 0 }; + TEST_ASSERT_TRUE(header_deserialize( + (udpard_bytes_mut_t){ .size = ejection->datagram.size, .data = (void*)ejection->datagram.data }, // NOLINT + &meta, + &offset, + &prefix, + &payload)); + st->items[st->count].transfer_id = meta.transfer_id; + st->items[st->count].destination = ejection->destination; + st->items[st->count].iface_index = ejection->iface_index; + } + st->count++; return true; } -// Records ejection timestamps for later inspection (subject variant). -static bool eject_subject_with_log(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection) -{ - eject_log_t* const st = (eject_log_t*)tx->user; - if ((st != NULL) && (st->count < (sizeof(st->when) / sizeof(st->when[0])))) { - st->when[st->count++] = ejection->now; - } - return true; -} - -// Records ejection timestamps for later inspection (P2P variant). -static bool eject_p2p_with_log(udpard_tx_t* const tx, udpard_tx_ejection_t* const ejection, udpard_udpip_ep_t dest) -{ - (void)dest; - eject_log_t* const st = (eject_log_t*)tx->user; - if ((st != NULL) && (st->count < (sizeof(st->when) / sizeof(st->when[0])))) { - st->when[st->count++] = ejection->now; - } - return true; -} - -// Records feedback into the provided state via user context. -static void record_feedback(udpard_tx_t* const tx, const udpard_tx_feedback_t fb) -{ - (void)tx; - feedback_state_t* const st = (feedback_state_t*)fb.user.ptr[0]; - if (st != NULL) { - st->count++; - st->last = fb; - } -} - -// Minimal endpoint helper. -static udpard_udpip_ep_t make_ep(const uint32_t ip) { return (udpard_udpip_ep_t){ .ip = ip, .port = 1U }; } - -// Small helpers for intrusive checks. -static size_t frames_for(const size_t mtu, const size_t payload) { return larger(1, (payload + mtu - 1U) / mtu); } -static tx_transfer_t* latest_transfer(udpard_tx_t* const tx) -{ - return LIST_MEMBER(tx->agewise.head, tx_transfer_t, agewise); -} - -// Looks up a transfer by transfer-ID. -static tx_transfer_t* find_transfer_by_id(udpard_tx_t* const tx, const uint64_t transfer_id) -{ - if (tx == NULL) { - return NULL; - } - const tx_key_transfer_id_t key = { .transfer_id = transfer_id, .seq_no = 0 }; - tx_transfer_t* const tr = CAVL2_TO_OWNER( - cavl2_lower_bound(tx->index_transfer_id, &key, &tx_cavl_compare_transfer_id), tx_transfer_t, index_transfer_id); - return ((tr != NULL) && (tr->transfer_id == transfer_id)) ? tr : NULL; -} - -// Counts transfers by transfer-ID and kind. -static size_t count_transfers_by_id_and_kind(udpard_tx_t* const tx, const uint64_t transfer_id, const frame_kind_t kind) -{ - if (tx == NULL) { - return 0; - } - size_t count = 0; - const tx_key_transfer_id_t key = { .transfer_id = transfer_id, .seq_no = 0 }; - for (tx_transfer_t* tr = - CAVL2_TO_OWNER(cavl2_lower_bound(tx->index_transfer_id, &key, &tx_cavl_compare_transfer_id), - tx_transfer_t, - index_transfer_id); - (tr != NULL) && (tr->transfer_id == transfer_id); - tr = CAVL2_TO_OWNER(cavl2_next_greater(&tr->index_transfer_id), tx_transfer_t, index_transfer_id)) { - if (tr->kind == kind) { - count++; - } - } - return count; -} - -static void test_bytes_scattered_read(void) -{ - // Skips empty fragments and spans boundaries. - { - const byte_t frag_a[] = { 1U, 2U, 3U }; - const byte_t frag_c[] = { 4U, 5U, 6U, 7U, 8U }; - const udpard_bytes_scattered_t frag3 = { .bytes = { .size = sizeof(frag_c), .data = frag_c }, .next = NULL }; - const udpard_bytes_scattered_t frag2 = { .bytes = { .size = 0U, .data = NULL }, .next = &frag3 }; - const udpard_bytes_scattered_t frag1 = { .bytes = { .size = sizeof(frag_a), .data = frag_a }, .next = &frag2 }; - const udpard_bytes_scattered_t frag0 = { .bytes = { .size = 0U, .data = NULL }, .next = &frag1 }; - bytes_scattered_reader_t reader = { .cursor = &frag0, .position = 0U }; - byte_t out[7] = { 0 }; - bytes_scattered_read(&reader, sizeof(out), out); - const byte_t expected[] = { 1U, 2U, 3U, 4U, 5U, 6U, 7U }; - TEST_ASSERT_EQUAL_UINT8_ARRAY(expected, out, sizeof(expected)); - TEST_ASSERT_EQUAL_PTR(&frag3, reader.cursor); - TEST_ASSERT_EQUAL_size_t(4U, reader.position); - } - - // Resumes mid-fragment when data remains. - { - const byte_t frag_tail[] = { 9U, 10U, 11U }; - const udpard_bytes_scattered_t frag = { .bytes = { .size = sizeof(frag_tail), .data = frag_tail }, - .next = NULL }; - bytes_scattered_reader_t reader = { .cursor = &frag, .position = 1U }; - byte_t out[2] = { 0 }; - bytes_scattered_read(&reader, sizeof(out), out); - const byte_t expected[] = { 10U, 11U }; - TEST_ASSERT_EQUAL_UINT8_ARRAY(expected, out, sizeof(out)); - TEST_ASSERT_EQUAL_PTR(&frag, reader.cursor); - TEST_ASSERT_EQUAL_size_t(frag.bytes.size, reader.position); - } - - // Size accounts for chained fragments. - { - const byte_t frag_a[] = { 1U, 2U }; - const byte_t frag_b[] = { 3U, 4U, 5U }; - const udpard_bytes_scattered_t tail = { .bytes = { .size = sizeof(frag_b), .data = frag_b }, .next = NULL }; - const udpard_bytes_scattered_t head = { .bytes = { .size = sizeof(frag_a), .data = frag_a }, .next = &tail }; - TEST_ASSERT_EQUAL_size_t(sizeof(frag_a) + sizeof(frag_b), bytes_scattered_size(head)); - } -} +static const udpard_tx_vtable_t tx_vtable = { .eject = eject_capture }; -static void test_tx_serialize_header(void) +// Initializes a TX fixture with instrumented allocators. +static void fixture_init(tx_fixture_t* const self, const size_t queue_limit, const size_t mtu, const bool allow_eject) { - typedef struct - { - byte_t data[HEADER_SIZE_BYTES]; - } header_buffer_t; - - // Test case 1: Basic header serialization - { - header_buffer_t buffer; - const meta_t meta = { - .priority = udpard_prio_fast, - .kind = frame_msg_best, - .transfer_payload_size = 12345, - .transfer_id = 0xBADC0FFEE0DDF00DULL, - .sender_uid = 0x0123456789ABCDEFULL, - }; - (void)header_serialize(buffer.data, meta, 12345, 0, 0); - TEST_ASSERT_EQUAL(HEADER_SIZE_BYTES, sizeof(buffer.data)); - // Verify version and priority in first byte - TEST_ASSERT_EQUAL((HEADER_VERSION | ((unsigned)udpard_prio_fast << 5U)), buffer.data[0]); - TEST_ASSERT_EQUAL_UINT8(frame_msg_best, buffer.data[1]); - } - // Test case 2: Reliable flag - { - header_buffer_t buffer; - const meta_t meta = { - .priority = udpard_prio_nominal, - .kind = frame_msg_reliable, - .transfer_payload_size = 5000, - .transfer_id = 0xAAAAAAAAAAAAAAAAULL, - .sender_uid = 0xBBBBBBBBBBBBBBBBULL, - }; - (void)header_serialize(buffer.data, meta, 100, 200, 0); - TEST_ASSERT_EQUAL((HEADER_VERSION | ((unsigned)udpard_prio_nominal << 5U)), buffer.data[0]); - TEST_ASSERT_EQUAL_UINT8(frame_msg_reliable, buffer.data[1]); - } - // Test case 3: ACK flag - { - header_buffer_t buffer; - const meta_t meta = { - .priority = udpard_prio_nominal, - .kind = frame_ack, - .transfer_payload_size = 0, - .transfer_id = 0x1111111111111111ULL, - .sender_uid = 0x2222222222222222ULL, - }; - (void)header_serialize(buffer.data, meta, 0, 0, 0); - TEST_ASSERT_EQUAL((HEADER_VERSION | ((unsigned)udpard_prio_nominal << 5U)), buffer.data[0]); - TEST_ASSERT_EQUAL_UINT8(frame_ack, buffer.data[1]); - } -} - -static void test_tx_validation_and_free(void) -{ - // Invalid memory config fails fast. - udpard_tx_mem_resources_t bad = { 0 }; - TEST_ASSERT_FALSE(tx_validate_mem_resources(bad)); - // Reject payload vtables with missing hooks. - const udpard_mem_vtable_t vtable_no_free = { .base = { .free = NULL }, .alloc = dummy_alloc }; - const udpard_mem_vtable_t vtable_no_alloc = { .base = { .free = noop_free }, .alloc = NULL }; - const udpard_mem_vtable_t vtable_ok = { .base = { .free = noop_free }, .alloc = dummy_alloc }; - udpard_tx_mem_resources_t bad_payload = { .transfer = { .vtable = &vtable_ok, .context = NULL } }; - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - bad_payload.payload[i] = (udpard_mem_t){ .vtable = &vtable_no_free, .context = NULL }; - } - TEST_ASSERT_FALSE(tx_validate_mem_resources(bad_payload)); + instrumented_allocator_new(&self->transfer_alloc); + instrumented_allocator_new(&self->payload_alloc); + self->mem.transfer = instrumented_allocator_make_resource(&self->transfer_alloc); for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - bad_payload.payload[i] = (udpard_mem_t){ .vtable = &vtable_no_alloc, .context = NULL }; + self->mem.payload[i] = instrumented_allocator_make_resource(&self->payload_alloc); } - TEST_ASSERT_FALSE(tx_validate_mem_resources(bad_payload)); - // Reject transfer vtables with missing hooks. - udpard_tx_mem_resources_t bad_transfer = bad_payload; + self->eject = (eject_state_t){ .allow = allow_eject, .count = 0U }; + TEST_ASSERT_TRUE(udpard_tx_new(&self->tx, 0x1122334455667788ULL, 123U, queue_limit, self->mem, &tx_vtable)); for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - bad_transfer.payload[i] = (udpard_mem_t){ .vtable = &vtable_ok, .context = NULL }; + self->tx.mtu[i] = mtu; } - bad_transfer.transfer = (udpard_mem_t){ .vtable = &vtable_no_free, .context = NULL }; - TEST_ASSERT_FALSE(tx_validate_mem_resources(bad_transfer)); - bad_transfer.transfer = (udpard_mem_t){ .vtable = &vtable_no_alloc, .context = NULL }; - TEST_ASSERT_FALSE(tx_validate_mem_resources(bad_transfer)); - // Reject null transfer vtable. - bad_transfer.transfer = (udpard_mem_t){ .vtable = NULL, .context = NULL }; - TEST_ASSERT_FALSE(tx_validate_mem_resources(bad_transfer)); - - instrumented_allocator_t alloc_transfer = { 0 }; - instrumented_allocator_t alloc_payload = { 0 }; - instrumented_allocator_new(&alloc_transfer); - instrumented_allocator_new(&alloc_payload); - udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc_transfer) }; - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - mem.payload[i] = instrumented_allocator_make_resource(&alloc_payload); - } - - // Populate indexes then free to hit all removal paths. - udpard_tx_t tx = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx, - 1U, - 1U, - 4U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - tx_transfer_t* const tr = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); - mem_zero(sizeof(*tr), tr); - tr->priority = udpard_prio_fast; - tr->deadline = 10; - tr->staged_until = 1; - tr->seq_no = 1; - tr->transfer_id = 7; - tr->kind = frame_msg_best; - // Insert with stable ordering keys. - (void)cavl2_find_or_insert(&tx.index_staged, tr, tx_cavl_compare_staged, &tr->index_staged, cavl2_trivial_factory); - (void)cavl2_find_or_insert( - &tx.index_deadline, tr, tx_cavl_compare_deadline, &tr->index_deadline, cavl2_trivial_factory); - const tx_key_transfer_id_t key_id = { .transfer_id = tr->transfer_id, .seq_no = tr->seq_no }; - (void)cavl2_find_or_insert( - &tx.index_transfer_id, &key_id, tx_cavl_compare_transfer_id, &tr->index_transfer_id, cavl2_trivial_factory); - enlist_head(&tx.agewise, &tr->agewise); - tx_transfer_retire(&tx, tr, true); - TEST_ASSERT_NULL(tx.index_staged); - TEST_ASSERT_NULL(tx.index_transfer_id); - TEST_ASSERT_NULL(tx.index_deadline); - instrumented_allocator_reset(&alloc_transfer); - instrumented_allocator_reset(&alloc_payload); + self->tx.user = &self->eject; } -static void test_tx_comparators_and_feedback(void) +// Frees TX fixture and checks allocator state. +static void fixture_fini(tx_fixture_t* const self) { - tx_transfer_t tr; - mem_zero(sizeof(tr), &tr); - tr.staged_until = 5; - tr.deadline = 7; - tr.transfer_id = 20; - tr.seq_no = 9; - - // Staged/deadline comparisons both ways. - tx_transfer_t key = tr; - key.staged_until = 6; - TEST_ASSERT_EQUAL(1, tx_cavl_compare_staged(&key, &tr.index_staged)); - key.staged_until = 4; - TEST_ASSERT_EQUAL(-1, tx_cavl_compare_staged(&key, &tr.index_staged)); - key.deadline = 8; - TEST_ASSERT_EQUAL(1, tx_cavl_compare_deadline(&key, &tr.index_deadline)); - key.deadline = 6; - TEST_ASSERT_EQUAL(-1, tx_cavl_compare_deadline(&key, &tr.index_deadline)); - - // Equality returns zero for staged and deadline comparators. - key.staged_until = tr.staged_until; - key.seq_no = tr.seq_no; - TEST_ASSERT_EQUAL(0, tx_cavl_compare_staged(&key, &tr.index_staged)); - key.deadline = tr.deadline; - key.seq_no = tr.seq_no; - TEST_ASSERT_EQUAL(0, tx_cavl_compare_deadline(&key, &tr.index_deadline)); - // Staged comparator covers seq_no branches. - key.staged_until = tr.staged_until; - key.seq_no = tr.seq_no - 1; - TEST_ASSERT_EQUAL(-1, tx_cavl_compare_staged(&key, &tr.index_staged)); - key.seq_no = tr.seq_no + 1; - TEST_ASSERT_EQUAL(1, tx_cavl_compare_staged(&key, &tr.index_staged)); - // Deadline comparator covers seq_no branches. - key.deadline = tr.deadline; - key.seq_no = tr.seq_no - 1; - TEST_ASSERT_EQUAL(-1, tx_cavl_compare_deadline(&key, &tr.index_deadline)); - key.seq_no = tr.seq_no + 1; - TEST_ASSERT_EQUAL(1, tx_cavl_compare_deadline(&key, &tr.index_deadline)); - - // Transfer-ID comparator covers all branches. - tx_key_transfer_id_t key_id = { .transfer_id = 10, .seq_no = tr.seq_no }; - TEST_ASSERT_EQUAL(-1, tx_cavl_compare_transfer_id(&key_id, &tr.index_transfer_id)); - key_id.transfer_id = 30; - TEST_ASSERT_EQUAL(1, tx_cavl_compare_transfer_id(&key_id, &tr.index_transfer_id)); - key_id.transfer_id = tr.transfer_id; - key_id.seq_no = tr.seq_no - 1; - TEST_ASSERT_EQUAL(-1, tx_cavl_compare_transfer_id(&key_id, &tr.index_transfer_id)); - key_id.seq_no = tr.seq_no + 1; - TEST_ASSERT_EQUAL(1, tx_cavl_compare_transfer_id(&key_id, &tr.index_transfer_id)); - key_id.seq_no = tr.seq_no; - TEST_ASSERT_EQUAL(0, tx_cavl_compare_transfer_id(&key_id, &tr.index_transfer_id)); -} - -static void test_tx_spool_and_queue_errors(void) -{ - // OOM in spool after first frame. - instrumented_allocator_t alloc_payload = { 0 }; - instrumented_allocator_new(&alloc_payload); - alloc_payload.limit_fragments = 1; - udpard_tx_t tx = { .enqueued_frames_limit = 1, .enqueued_frames_count = 0 }; - tx.memory.payload[0] = instrumented_allocator_make_resource(&alloc_payload); - byte_t buffer[64] = { 0 }; - const udpard_bytes_scattered_t payload = make_scattered(buffer, sizeof(buffer)); - const meta_t meta = { - .priority = udpard_prio_fast, - .kind = frame_msg_best, - .transfer_payload_size = (uint32_t)payload.bytes.size, - .transfer_id = 1, - .sender_uid = 1, - }; - TEST_ASSERT_NULL(tx_spool(&tx, tx.memory.payload[0], 32, meta, payload)); - TEST_ASSERT_EQUAL_size_t(0, tx.enqueued_frames_count); - TEST_ASSERT_EQUAL_UINT64(80, tx_ack_timeout(5, udpard_prio_high, 1)); - instrumented_allocator_reset(&alloc_payload); - - // Capacity exhaustion. - instrumented_allocator_new(&alloc_payload); - udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc_payload) }; - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - mem.payload[i] = instrumented_allocator_make_resource(&alloc_payload); - } - TEST_ASSERT_TRUE(udpard_tx_new( - &tx, - 2U, - 2U, - 1U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - byte_t big_buf[2000] = { 0 }; - const udpard_bytes_scattered_t big_payload = make_scattered(big_buf, sizeof(big_buf)); - const uint16_t iface_bitmap_01 = (1U << 0U); - TEST_ASSERT_FALSE( - udpard_tx_push(&tx, 0, 1000, iface_bitmap_01, udpard_prio_fast, 11, big_payload, NULL, UDPARD_USER_CONTEXT_NULL)); - TEST_ASSERT_EQUAL_size_t(1, tx.errors_capacity); - - // Immediate rejection when the request exceeds limits. - udpard_tx_t tx_limit; - mem_zero(sizeof(tx_limit), &tx_limit); - tx_limit.enqueued_frames_limit = 1; - tx_limit.enqueued_frames_count = 0; - tx_limit.memory.transfer = (udpard_mem_t){ .vtable = &mem_vtable_noop_alloc, .context = NULL }; - TEST_ASSERT_FALSE(tx_ensure_queue_space(&tx_limit, 3)); - - // Sacrifice clears space when the queue is full. - udpard_tx_t tx_sac; - mem_zero(sizeof(tx_sac), &tx_sac); - tx_sac.enqueued_frames_limit = 1; - tx_sac.enqueued_frames_count = 1; - tx_sac.errors_sacrifice = 0; - tx_sac.memory.transfer = (udpard_mem_t){ .vtable = &mem_vtable_noop_alloc, .context = NULL }; - tx_transfer_t victim; - mem_zero(sizeof(victim), &victim); - victim.priority = udpard_prio_fast; - victim.deadline = 1; - victim.transfer_id = 9; - victim.seq_no = 1; - victim.kind = frame_msg_best; - // Insert into deadline index with stable key. - (void)cavl2_find_or_insert( - &tx_sac.index_deadline, &victim, tx_cavl_compare_deadline, &victim.index_deadline, cavl2_trivial_factory); - (void)cavl2_find_or_insert(&tx_sac.index_transfer_id, - &(tx_key_transfer_id_t){ .transfer_id = victim.transfer_id, .seq_no = victim.seq_no }, - tx_cavl_compare_transfer_id, - &victim.index_transfer_id, - cavl2_trivial_factory); - enlist_head(&tx_sac.agewise, &victim.agewise); - TEST_ASSERT_FALSE(tx_ensure_queue_space(&tx_sac, 1)); - TEST_ASSERT_EQUAL_size_t(1, tx_sac.errors_sacrifice); - - // Transfer allocation OOM. - alloc_payload.limit_fragments = 0; - tx.errors_capacity = 0; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx, - 3U, - 3U, - 2U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - TEST_ASSERT_FALSE(udpard_tx_push( - &tx, 0, 1000, iface_bitmap_01, udpard_prio_fast, 12, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL)); - TEST_ASSERT_EQUAL_size_t(1, tx.errors_oom); - - // Spool OOM inside tx_push. - alloc_payload.limit_fragments = 1; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx, - 4U, - 4U, - 4U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - TEST_ASSERT_FALSE( - udpard_tx_push(&tx, 0, 1000, iface_bitmap_01, udpard_prio_fast, 13, big_payload, NULL, UDPARD_USER_CONTEXT_NULL)); - TEST_ASSERT_EQUAL_size_t(1, tx.errors_oom); - - // Reliable transfer gets staged. - alloc_payload.limit_fragments = SIZE_MAX; - feedback_state_t fstate = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx, - 5U, - 5U, - 4U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - tx.ack_baseline_timeout = 1; - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 100000, - iface_bitmap_01, - udpard_prio_nominal, - 14, - make_scattered(NULL, 0), - record_feedback, - make_user_context(&fstate))); - TEST_ASSERT_NOT_NULL(tx.index_staged); - udpard_tx_free(&tx); - instrumented_allocator_reset(&alloc_payload); + udpard_tx_free(&self->tx); + TEST_ASSERT_EQUAL_size_t(0, self->transfer_alloc.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(0, self->payload_alloc.allocated_fragments); + instrumented_allocator_reset(&self->transfer_alloc); + instrumented_allocator_reset(&self->payload_alloc); } -static void test_tx_ack_and_scheduler(void) +static void test_tx_subject_ejection(void) { - instrumented_allocator_t alloc = { 0 }; - instrumented_allocator_new(&alloc); - udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc) }; - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - mem.payload[i] = instrumented_allocator_make_resource(&alloc); - } - const uint16_t iface_bitmap_01 = (1U << 0U); - - // Ack reception triggers feedback. - feedback_state_t fstate = { 0 }; - udpard_tx_t tx1 = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx1, - 10U, - 1U, - 8U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - TEST_ASSERT_TRUE(udpard_tx_push(&tx1, - 0, - 1000, - iface_bitmap_01, - udpard_prio_fast, - 42, - make_scattered(NULL, 0), - record_feedback, - make_user_context(&fstate))); - TEST_ASSERT_EQUAL_UINT32(1U << 0U, udpard_tx_pending_ifaces(&tx1)); - udpard_rx_t rx = { .tx = &tx1 }; - tx_receive_ack(&rx, 21, 42); - TEST_ASSERT_EQUAL_size_t(1, fstate.count); - TEST_ASSERT_EQUAL_UINT32(0U, udpard_tx_pending_ifaces(&tx1)); - // Ignore ACKs when RX has no TX. - rx.tx = NULL; - tx_receive_ack(&rx, 21, 42); - udpard_tx_free(&tx1); - - // Best-effort transfers ignore ACKs. - udpard_tx_t tx_be = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx_be, - 10U, - 1U, - 8U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); + // Push one subject transfer on two interfaces and verify ejections. + tx_fixture_t fx = { 0 }; + fixture_init(&fx, 8U, 128U, true); + const byte_t data[] = { 1, 2, 3, 4, 5, 6 }; + const udpard_udpip_ep_t subject = udpard_make_subject_endpoint(321U); + const udpard_bytes_scattered_t payload = make_scattered(data, sizeof(data)); TEST_ASSERT_TRUE(udpard_tx_push( - &tx_be, 0, 1000, iface_bitmap_01, udpard_prio_fast, 43, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL)); - udpard_rx_t rx_be = { .tx = &tx_be }; - tx_receive_ack(&rx_be, 22, 43); - TEST_ASSERT_NOT_NULL(find_transfer_by_id(&tx_be, 43)); - udpard_tx_free(&tx_be); - - // Ack lookup misses when the lower bound has a different transfer-ID. - udpard_tx_t tx_miss = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx_miss, - 10U, - 1U, - 8U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - tx_transfer_t* miss = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); - mem_zero(sizeof(*miss), miss); - miss->kind = frame_msg_best; - miss->transfer_id = 100; - miss->seq_no = 1; - miss->deadline = 50; - miss->priority = udpard_prio_fast; - cavl2_find_or_insert( - &tx_miss.index_deadline, miss, tx_cavl_compare_deadline, &miss->index_deadline, cavl2_trivial_factory); - cavl2_find_or_insert(&tx_miss.index_transfer_id, - &(tx_key_transfer_id_t){ .transfer_id = miss->transfer_id, .seq_no = miss->seq_no }, - tx_cavl_compare_transfer_id, - &miss->index_transfer_id, - cavl2_trivial_factory); - enlist_head(&tx_miss.agewise, &miss->agewise); - udpard_rx_t rx_miss = { .tx = &tx_miss }; - tx_receive_ack(&rx_miss, 21, 99); - TEST_ASSERT_NOT_NULL(find_transfer_by_id(&tx_miss, 100)); - udpard_tx_free(&tx_miss); - - // ACK acceptance skips colliding P2P transfers from other remotes. - udpard_tx_t tx_coll_rx = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx_coll_rx, - 10U, - 1U, - 8U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - udpard_rx_t rx_coll = { .tx = &tx_coll_rx }; - feedback_state_t fb_a = { 0 }; - feedback_state_t fb_b = { 0 }; - const uint64_t coll_id = 55; - // Insert first colliding transfer. - tx_transfer_t* tr_a = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); - mem_zero(sizeof(*tr_a), tr_a); - tr_a->kind = frame_msg_reliable; - tr_a->is_p2p = true; - tr_a->transfer_id = coll_id; - tr_a->seq_no = 1; - tr_a->deadline = 10; - tr_a->priority = udpard_prio_fast; - tr_a->p2p_remote.uid = 1001; - tr_a->user = make_user_context(&fb_a); - tr_a->feedback = record_feedback; - cavl2_find_or_insert( - &tx_coll_rx.index_deadline, tr_a, tx_cavl_compare_deadline, &tr_a->index_deadline, cavl2_trivial_factory); - cavl2_find_or_insert(&tx_coll_rx.index_transfer_id, - &(tx_key_transfer_id_t){ .transfer_id = tr_a->transfer_id, .seq_no = tr_a->seq_no }, - tx_cavl_compare_transfer_id, - &tr_a->index_transfer_id, - cavl2_trivial_factory); - enlist_head(&tx_coll_rx.agewise, &tr_a->agewise); - // Insert second colliding transfer with different remote UID. - tx_transfer_t* tr_b = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); - mem_zero(sizeof(*tr_b), tr_b); - tr_b->kind = frame_msg_reliable; - tr_b->is_p2p = true; - tr_b->transfer_id = coll_id; - tr_b->seq_no = 2; - tr_b->deadline = 10; - tr_b->priority = udpard_prio_fast; - tr_b->p2p_remote.uid = 1002; - tr_b->user = make_user_context(&fb_b); - tr_b->feedback = record_feedback; - cavl2_find_or_insert( - &tx_coll_rx.index_deadline, tr_b, tx_cavl_compare_deadline, &tr_b->index_deadline, cavl2_trivial_factory); - cavl2_find_or_insert(&tx_coll_rx.index_transfer_id, - &(tx_key_transfer_id_t){ .transfer_id = tr_b->transfer_id, .seq_no = tr_b->seq_no }, - tx_cavl_compare_transfer_id, - &tr_b->index_transfer_id, - cavl2_trivial_factory); - enlist_head(&tx_coll_rx.agewise, &tr_b->agewise); - // Accept ack for the second transfer only. - tx_receive_ack(&rx_coll, tr_b->p2p_remote.uid, coll_id); - TEST_ASSERT_EQUAL_size_t(0, fb_a.count); - TEST_ASSERT_EQUAL_size_t(1, fb_b.count); - TEST_ASSERT_EQUAL_size_t(1, count_transfers_by_id_and_kind(&tx_coll_rx, coll_id, frame_msg_reliable)); - // Accept ack for the first transfer. - tx_receive_ack(&rx_coll, tr_a->p2p_remote.uid, coll_id); - TEST_ASSERT_EQUAL_size_t(1, fb_a.count); - TEST_ASSERT_EQUAL_size_t(0, count_transfers_by_id_and_kind(&tx_coll_rx, coll_id, frame_msg_reliable)); - udpard_tx_free(&tx_coll_rx); - - // Ack suppressed when coverage not improved. - udpard_tx_t tx2 = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx2, - 11U, - 2U, - 4U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - tx_transfer_t* prior = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); - mem_zero(sizeof(*prior), prior); - prior->kind = frame_ack; - prior->is_p2p = true; - prior->transfer_id = 8; - prior->seq_no = 1; - prior->deadline = 100; - prior->priority = udpard_prio_fast; - prior->p2p_remote.uid = 9; - prior->p2p_remote.endpoints[0] = make_ep(3); - cavl2_find_or_insert( - &tx2.index_deadline, prior, tx_cavl_compare_deadline, &prior->index_deadline, cavl2_trivial_factory); - cavl2_find_or_insert(&tx2.index_transfer_id, - &(tx_key_transfer_id_t){ .transfer_id = prior->transfer_id, .seq_no = prior->seq_no }, - tx_cavl_compare_transfer_id, - &prior->index_transfer_id, - cavl2_trivial_factory); - enlist_head(&tx2.agewise, &prior->agewise); - rx.errors_ack_tx = 0; - rx.tx = &tx2; - tx_send_ack(&rx, 0, udpard_prio_fast, 8, (udpard_remote_t){ .uid = 9, .endpoints = { make_ep(3) } }); - TEST_ASSERT_EQUAL_UINT64(0, rx.errors_ack_tx); - TEST_ASSERT_EQUAL_UINT32(0U, udpard_tx_pending_ifaces(&tx2)); - tx_transfer_retire(&tx2, prior, false); - udpard_tx_free(&tx2); - - // Ack search skips prior with the same transfer-ID but different UID. - udpard_tx_t tx_uid = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx_uid, - 11U, - 2U, - 4U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - rx.tx = &tx_uid; - rx.errors_ack_tx = 0; - tx_transfer_t* prior_uid = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); - mem_zero(sizeof(*prior_uid), prior_uid); - prior_uid->kind = frame_ack; - prior_uid->is_p2p = true; - prior_uid->transfer_id = 7; - prior_uid->seq_no = 1; - prior_uid->deadline = 100; - prior_uid->priority = udpard_prio_fast; - prior_uid->p2p_remote.uid = 1; - prior_uid->p2p_remote.endpoints[0] = make_ep(2); - cavl2_find_or_insert( - &tx_uid.index_deadline, prior_uid, tx_cavl_compare_deadline, &prior_uid->index_deadline, cavl2_trivial_factory); - cavl2_find_or_insert(&tx_uid.index_transfer_id, - &(tx_key_transfer_id_t){ .transfer_id = prior_uid->transfer_id, .seq_no = prior_uid->seq_no }, - tx_cavl_compare_transfer_id, - &prior_uid->index_transfer_id, - cavl2_trivial_factory); - enlist_head(&tx_uid.agewise, &prior_uid->agewise); - tx_send_ack(&rx, 0, udpard_prio_fast, 7, (udpard_remote_t){ .uid = 2, .endpoints = { make_ep(3) } }); - TEST_ASSERT_EQUAL_size_t(2, count_transfers_by_id_and_kind(&tx_uid, 7, frame_ack)); - udpard_tx_free(&tx_uid); - - // Ack replaced with broader coverage. - udpard_tx_t tx3 = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx3, - 12U, - 3U, - 4U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - rx.tx = &tx3; - tx_send_ack(&rx, 0, udpard_prio_fast, 9, (udpard_remote_t){ .uid = 11, .endpoints = { make_ep(4) } }); - tx_send_ack(&rx, 0, udpard_prio_fast, 9, (udpard_remote_t){ .uid = 11, .endpoints = { make_ep(4), make_ep(5) } }); - TEST_ASSERT_NOT_EQUAL(0U, udpard_tx_pending_ifaces(&tx3)); - udpard_tx_free(&tx3); - - // Ack search ignores prior with different transfer-ID. - udpard_tx_t tx_mismatch = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx_mismatch, - 12U, - 3U, - 4U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - rx.tx = &tx_mismatch; - rx.errors_ack_tx = 0; - tx_transfer_t* prior_ack = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); - mem_zero(sizeof(*prior_ack), prior_ack); - prior_ack->kind = frame_ack; - prior_ack->is_p2p = true; - prior_ack->transfer_id = 100; - prior_ack->seq_no = 1; - prior_ack->deadline = 100; - prior_ack->priority = udpard_prio_fast; - prior_ack->p2p_remote.uid = 9; - prior_ack->p2p_remote.endpoints[0] = make_ep(3); - cavl2_find_or_insert(&tx_mismatch.index_deadline, - prior_ack, - tx_cavl_compare_deadline, - &prior_ack->index_deadline, - cavl2_trivial_factory); - cavl2_find_or_insert(&tx_mismatch.index_transfer_id, - &(tx_key_transfer_id_t){ .transfer_id = prior_ack->transfer_id, .seq_no = prior_ack->seq_no }, - tx_cavl_compare_transfer_id, - &prior_ack->index_transfer_id, - cavl2_trivial_factory); - enlist_head(&tx_mismatch.agewise, &prior_ack->agewise); - tx_send_ack(&rx, 0, udpard_prio_fast, 99, (udpard_remote_t){ .uid = 9, .endpoints = { make_ep(4) } }); - TEST_ASSERT_EQUAL_UINT64(0, rx.errors_ack_tx); - TEST_ASSERT_EQUAL_size_t(1, count_transfers_by_id_and_kind(&tx_mismatch, 100, frame_ack)); - TEST_ASSERT_EQUAL_size_t(1, count_transfers_by_id_and_kind(&tx_mismatch, 99, frame_ack)); - udpard_tx_free(&tx_mismatch); - - // Ack emission ignores colliding non-ack transfers. - udpard_tx_t tx_coll_ack = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx_coll_ack, - 12U, - 3U, - 4U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - rx.tx = &tx_coll_ack; - rx.errors_ack_tx = 0; - TEST_ASSERT_TRUE(udpard_tx_push(&tx_coll_ack, - 0, - 1000, - iface_bitmap_01, - udpard_prio_fast, - 60, - make_scattered(NULL, 0), - record_feedback, - make_user_context(&fstate))); - TEST_ASSERT_EQUAL_size_t(1, count_transfers_by_id_and_kind(&tx_coll_ack, 60, frame_msg_reliable)); - tx_send_ack(&rx, 0, udpard_prio_fast, 60, (udpard_remote_t){ .uid = 77, .endpoints = { make_ep(7) } }); - TEST_ASSERT_EQUAL_UINT64(0, rx.errors_ack_tx); - TEST_ASSERT_EQUAL_size_t(1, count_transfers_by_id_and_kind(&tx_coll_ack, 60, frame_msg_reliable)); - TEST_ASSERT_EQUAL_size_t(1, count_transfers_by_id_and_kind(&tx_coll_ack, 60, frame_ack)); - udpard_tx_free(&tx_coll_ack); - - // Ack push failure with TX present. - udpard_tx_mem_resources_t fail_mem = { .transfer = { .vtable = &mem_vtable_noop_alloc, .context = NULL } }; - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - fail_mem.payload[i] = fail_mem.transfer; - } - udpard_tx_t tx6 = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx6, - 15U, - 6U, - 1U, - fail_mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - rx.errors_ack_tx = 0; - rx.tx = &tx6; - tx_send_ack(&rx, 0, udpard_prio_fast, 2, (udpard_remote_t){ .uid = 1, .endpoints = { make_ep(6) } }); - TEST_ASSERT_GREATER_THAN_UINT64(0, rx.errors_ack_tx); - udpard_tx_free(&tx6); - - // Ack push failure increments error. - udpard_rx_t rx_fail = { .tx = NULL }; - tx_send_ack(&rx_fail, 0, udpard_prio_fast, 1, (udpard_remote_t){ 0 }); - TEST_ASSERT_GREATER_THAN_UINT64(0, rx_fail.errors_ack_tx); - - // Expired transfer purge with feedback. - udpard_tx_t tx4 = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx4, - 13U, - 4U, - 4U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - tx4.errors_expiration = 0; - tx_transfer_t* exp = mem_alloc(mem.transfer, sizeof(tx_transfer_t)); - mem_zero(sizeof(*exp), exp); - exp->deadline = 1; - exp->priority = udpard_prio_slow; - exp->transfer_id = 66; - exp->seq_no = 1; - exp->kind = frame_msg_reliable; - exp->user = make_user_context(&fstate); - exp->feedback = record_feedback; - // Insert into deadline index with stable key. - (void)cavl2_find_or_insert( - &tx4.index_deadline, exp, tx_cavl_compare_deadline, &exp->index_deadline, cavl2_trivial_factory); - (void)cavl2_find_or_insert(&tx4.index_transfer_id, - &(tx_key_transfer_id_t){ .transfer_id = exp->transfer_id, .seq_no = exp->seq_no }, - tx_cavl_compare_transfer_id, - &exp->index_transfer_id, - cavl2_trivial_factory); - tx_purge_expired_transfers(&tx4, 2); - TEST_ASSERT_GREATER_THAN_UINT64(0, tx4.errors_expiration); - udpard_tx_free(&tx4); - - // Staged promotion re-enqueues transfer. - udpard_tx_t tx5 = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx5, - 14U, - 5U, - 4U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - tx_transfer_t staged; - mem_zero(sizeof(staged), &staged); - staged.staged_until = 0; - staged.deadline = 100; - staged.priority = udpard_prio_fast; - staged.seq_no = 1; - staged.transfer_id = 7; - staged.kind = frame_msg_reliable; - tx_frame_t dummy_frame = { 0 }; - staged.head[0] = staged.cursor[0] = &dummy_frame; - // Insert into staged index with stable key. - cavl2_find_or_insert( - &tx5.index_staged, &staged, tx_cavl_compare_staged, &staged.index_staged, cavl2_trivial_factory); - tx5.ack_baseline_timeout = 1; - tx_promote_staged_transfers(&tx5, 1); - TEST_ASSERT_NOT_NULL(tx5.queue[0][staged.priority].head); - TEST_ASSERT_EQUAL_UINT32(1U << 0U, udpard_tx_pending_ifaces(&tx5)); - // Already-listed transfers stay in the queue. - tx_promote_staged_transfers(&tx5, 1000); - TEST_ASSERT_EQUAL_PTR(&staged.queue[0], tx5.queue[0][staged.priority].head); - - // Ejection stops when NIC refuses. - staged.cursor[0] = staged.head[0]; - staged.queue[0].next = NULL; - staged.queue[0].prev = NULL; - tx5.queue[0][staged.priority].head = &staged.queue[0]; - tx5.queue[0][staged.priority].tail = &staged.queue[0]; - eject_state_t eject_flag = { .count = 0, .allow = false }; - tx5.vtable = &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag }; - tx5.user = &eject_flag; - tx_eject_pending_frames(&tx5, 5, 0); - TEST_ASSERT_EQUAL_size_t(1, eject_flag.count); - udpard_tx_free(&tx5); - - instrumented_allocator_reset(&alloc); -} - -static void test_tx_stage_if(void) -{ - // Exercises retransmission gating near deadline. - udpard_tx_t tx = { 0 }; - tx.ack_baseline_timeout = 10; - - tx_transfer_t tr; - mem_zero(sizeof(tr), &tr); - tr.priority = udpard_prio_nominal; - tr.deadline = 1000; - tr.staged_until = 100; - tr.kind = frame_msg_reliable; - - udpard_us_t expected = tr.staged_until; - - tx_stage_if(&tx, &tr); - expected += tx_ack_timeout(tx.ack_baseline_timeout, tr.priority, 0); - TEST_ASSERT_EQUAL_UINT8(1, tr.epoch); - TEST_ASSERT_EQUAL(expected, tr.staged_until); - TEST_ASSERT_NOT_NULL(tx.index_staged); - cavl2_remove(&tx.index_staged, &tr.index_staged); - - tx_stage_if(&tx, &tr); - expected += tx_ack_timeout(tx.ack_baseline_timeout, tr.priority, 1); - TEST_ASSERT_EQUAL_UINT8(2, tr.epoch); - TEST_ASSERT_EQUAL(expected, tr.staged_until); - TEST_ASSERT_NOT_NULL(tx.index_staged); - cavl2_remove(&tx.index_staged, &tr.index_staged); - - tx_stage_if(&tx, &tr); - expected += tx_ack_timeout(tx.ack_baseline_timeout, tr.priority, 2); - TEST_ASSERT_EQUAL_UINT8(3, tr.epoch); - TEST_ASSERT_EQUAL(expected, tr.staged_until); - TEST_ASSERT_NULL(tx.index_staged); -} - -static void test_tx_stage_if_via_tx_push(void) -{ - // Tracks retransmission times via the scheduler. - instrumented_allocator_t alloc = { 0 }; - instrumented_allocator_new(&alloc); - udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc) }; - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - mem.payload[i] = instrumented_allocator_make_resource(&alloc); - } + &fx.tx, 0, 10000, (1U << 0U) | (1U << 2U), udpard_prio_fast, 0x0000AABBCCDDEEFFULL, subject, payload, NULL)); + TEST_ASSERT_EQUAL_UINT16((1U << 0U) | (1U << 2U), udpard_tx_pending_ifaces(&fx.tx)); - udpard_tx_t tx = { 0 }; - eject_log_t log = { 0 }; - feedback_state_t fb = { 0 }; - udpard_tx_vtable_t vt = { .eject_subject = eject_subject_with_log, .eject_p2p = eject_p2p_with_log }; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 30U, 1U, 4U, mem, &vt)); - tx.user = &log; - tx.ack_baseline_timeout = 10; - const uint16_t iface_bitmap_12 = (1U << 0U) | (1U << 1U); + udpard_tx_poll(&fx.tx, 1, (1U << 0U) | (1U << 2U)); + TEST_ASSERT_EQUAL_size_t(2, fx.eject.count); + TEST_ASSERT_EQUAL_UINT64(0x0000AABBCCDDEEFFULL, fx.eject.items[0].transfer_id); + TEST_ASSERT_EQUAL_UINT32(subject.ip, fx.eject.items[0].destination.ip); + TEST_ASSERT_EQUAL_UINT16(subject.port, fx.eject.items[0].destination.port); + TEST_ASSERT_EQUAL_UINT16(0U, udpard_tx_pending_ifaces(&fx.tx)); - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 500, - iface_bitmap_12, - udpard_prio_nominal, - 77, - make_scattered(NULL, 0), - record_feedback, - make_user_context(&fb))); - TEST_ASSERT_EQUAL_UINT32(iface_bitmap_12, udpard_tx_pending_ifaces(&tx)); - - udpard_tx_poll(&tx, 0, UDPARD_IFACE_BITMAP_ALL); - udpard_tx_poll(&tx, 160, UDPARD_IFACE_BITMAP_ALL); - udpard_tx_poll(&tx, 400, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_EQUAL_UINT32(0U, udpard_tx_pending_ifaces(&tx)); - - TEST_ASSERT_EQUAL_size_t(4, log.count); - TEST_ASSERT_EQUAL(0, log.when[0]); - TEST_ASSERT_EQUAL(0, log.when[1]); - TEST_ASSERT_EQUAL(160, log.when[2]); - TEST_ASSERT_EQUAL(160, log.when[3]); - TEST_ASSERT_NULL(tx.index_staged); - udpard_tx_free(&tx); - instrumented_allocator_reset(&alloc); + fixture_fini(&fx); } -static void test_tx_stage_if_short_deadline(void) +static void test_tx_p2p_endpoints(void) { - // Ensures retransmission is skipped when deadline is too close. - instrumented_allocator_t alloc = { 0 }; - instrumented_allocator_new(&alloc); - udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc) }; - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - mem.payload[i] = instrumented_allocator_make_resource(&alloc); - } - - udpard_tx_t tx = { 0 }; - eject_log_t log = { 0 }; - feedback_state_t fb = { 0 }; - udpard_tx_vtable_t vt = { .eject_subject = eject_subject_with_log, .eject_p2p = eject_p2p_with_log }; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 31U, 1U, 4U, mem, &vt)); - tx.user = &log; - tx.ack_baseline_timeout = 10; - const uint16_t iface_bitmap_1 = (1U << 0U); + // Push one P2P transfer and verify only valid endpoints are used. + tx_fixture_t fx = { 0 }; + fixture_init(&fx, 8U, 128U, true); + const byte_t data[] = { 9, 8, 7 }; + const udpard_bytes_scattered_t payload = make_scattered(data, sizeof(data)); + udpard_udpip_ep_t eps[UDPARD_IFACE_COUNT_MAX] = { 0 }; + eps[0] = (udpard_udpip_ep_t){ .ip = 0x0A000001U, .port = 8001U }; + eps[2] = (udpard_udpip_ep_t){ .ip = 0x0A000003U, .port = 8003U }; + TEST_ASSERT_TRUE(udpard_tx_push_p2p(&fx.tx, 0, 10000, udpard_prio_nominal, eps, payload, NULL)); + TEST_ASSERT_EQUAL_UINT16((1U << 0U) | (1U << 2U), udpard_tx_pending_ifaces(&fx.tx)); - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 50, - iface_bitmap_1, - udpard_prio_nominal, - 78, - make_scattered(NULL, 0), - record_feedback, - make_user_context(&fb))); + udpard_tx_poll(&fx.tx, 1, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_EQUAL_size_t(2, fx.eject.count); + TEST_ASSERT_EQUAL_UINT32(eps[0].ip, fx.eject.items[0].destination.ip); + TEST_ASSERT_EQUAL_UINT16(eps[0].port, fx.eject.items[0].destination.port); + TEST_ASSERT_EQUAL_UINT32(eps[2].ip, fx.eject.items[1].destination.ip); + TEST_ASSERT_EQUAL_UINT16(eps[2].port, fx.eject.items[1].destination.port); - udpard_tx_poll(&tx, 0, UDPARD_IFACE_BITMAP_ALL); - udpard_tx_poll(&tx, 30, UDPARD_IFACE_BITMAP_ALL); - udpard_tx_poll(&tx, 60, UDPARD_IFACE_BITMAP_ALL); - - TEST_ASSERT_EQUAL_size_t(1, log.count); - TEST_ASSERT_EQUAL(0, log.when[0]); - udpard_tx_free(&tx); - instrumented_allocator_reset(&alloc); -} - -static void test_tx_push_p2p_success(void) -{ - // Successful P2P push uses valid endpoints and returns a transfer-ID. - instrumented_allocator_t alloc = { 0 }; - instrumented_allocator_new(&alloc); - udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc) }; - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - mem.payload[i] = instrumented_allocator_make_resource(&alloc); - } - udpard_tx_t tx = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx, - 1U, - 2U, - 8U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - const udpard_remote_t remote = { .uid = 42, .endpoints = { make_ep(11) } }; - uint64_t out_tid = 0; - TEST_ASSERT_TRUE(udpard_tx_push_p2p( - &tx, 0, 10, udpard_prio_fast, remote, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL, &out_tid)); - TEST_ASSERT_NOT_EQUAL(0U, out_tid); - TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, out_tid, false)); - udpard_tx_free(&tx); - instrumented_allocator_reset(&alloc); + fixture_fini(&fx); } -// Cancels transfers and reports outcome. -static void test_tx_cancel(void) +static void test_tx_expiration(void) { - TEST_ASSERT_FALSE(udpard_tx_cancel(NULL, 0, 0)); - - instrumented_allocator_t alloc = { 0 }; - instrumented_allocator_new(&alloc); - udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc) }; - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - mem.payload[i] = instrumented_allocator_make_resource(&alloc); - } - - udpard_tx_t tx = { 0 }; - feedback_state_t fstate = { 0 }; - const uint16_t iface_bitmap_1 = (1U << 0U); - udpard_tx_vtable_t vt = { .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag }; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 20U, 1U, 8U, mem, &vt)); - - // Reliable transfer cancels with failure feedback. - TEST_ASSERT_GREATER_THAN_UINT32(0, - udpard_tx_push(&tx, - 0, - 100, - iface_bitmap_1, - udpard_prio_fast, - 200, - make_scattered(NULL, 0), - record_feedback, - make_user_context(&fstate))); - TEST_ASSERT_NOT_NULL(find_transfer_by_id(&tx, 200)); - TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, 200, true)); - TEST_ASSERT_NULL(find_transfer_by_id(&tx, 200)); - TEST_ASSERT_EQUAL_size_t(1, fstate.count); - TEST_ASSERT_EQUAL_UINT32(0, fstate.last.acknowledgements); - TEST_ASSERT_EQUAL_size_t(0, tx.enqueued_frames_count); - TEST_ASSERT_FALSE(udpard_tx_cancel(&tx, 200, true)); - - // Best-effort transfer cancels quietly. - TEST_ASSERT_GREATER_THAN_UINT32( - 0, - udpard_tx_push( - &tx, 0, 100, iface_bitmap_1, udpard_prio_fast, 201, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL)); - TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, 201, false)); - TEST_ASSERT_EQUAL_size_t(0, tx.enqueued_frames_count); - - // Collisions cancel all reliable transfers with the same ID. - fstate.count = 0; - const uint64_t coll_id = 300; - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 100, - iface_bitmap_1, - udpard_prio_fast, - coll_id, - make_scattered(NULL, 0), - record_feedback, - make_user_context(&fstate))); - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 100, - iface_bitmap_1, - udpard_prio_fast, - coll_id, - make_scattered(NULL, 0), - record_feedback, - make_user_context(&fstate))); - TEST_ASSERT_EQUAL_size_t(2, count_transfers_by_id_and_kind(&tx, coll_id, frame_msg_reliable)); - TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, coll_id, true)); - TEST_ASSERT_EQUAL_size_t(0, count_transfers_by_id_and_kind(&tx, coll_id, frame_msg_reliable)); - TEST_ASSERT_EQUAL_size_t(2, fstate.count); - - // Best-effort collisions do not cancel reliable transfers. - fstate.count = 0; - const uint64_t coll_id2 = 301; - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 100, - iface_bitmap_1, - udpard_prio_fast, - coll_id2, - make_scattered(NULL, 0), - record_feedback, - make_user_context(&fstate))); - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 100, - iface_bitmap_1, - udpard_prio_fast, - coll_id2, - make_scattered(NULL, 0), - NULL, - UDPARD_USER_CONTEXT_NULL)); - TEST_ASSERT_EQUAL_size_t(1, count_transfers_by_id_and_kind(&tx, coll_id2, frame_msg_reliable)); - TEST_ASSERT_EQUAL_size_t(1, count_transfers_by_id_and_kind(&tx, coll_id2, frame_msg_best)); - TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, coll_id2, false)); - TEST_ASSERT_EQUAL_size_t(1, count_transfers_by_id_and_kind(&tx, coll_id2, frame_msg_reliable)); - TEST_ASSERT_EQUAL_size_t(0, count_transfers_by_id_and_kind(&tx, coll_id2, frame_msg_best)); - TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, coll_id2, true)); - TEST_ASSERT_EQUAL_size_t(0, count_transfers_by_id_and_kind(&tx, coll_id2, frame_msg_reliable)); - - // Cancel misses when ID is not present but tree is non-empty. + // Keep ejection blocked and ensure expired transfers are purged. + tx_fixture_t fx = { 0 }; + fixture_init(&fx, 8U, 128U, false); + const byte_t data[] = { 0xAA }; + const udpard_bytes_scattered_t payload = make_scattered(data, sizeof(data)); TEST_ASSERT_TRUE(udpard_tx_push( - &tx, 0, 100, iface_bitmap_1, udpard_prio_fast, 400, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL)); - TEST_ASSERT_FALSE(udpard_tx_cancel(&tx, 399, false)); - TEST_ASSERT_NOT_NULL(find_transfer_by_id(&tx, 400)); - TEST_ASSERT_TRUE(udpard_tx_cancel(&tx, 400, false)); - TEST_ASSERT_NULL(find_transfer_by_id(&tx, 400)); + &fx.tx, 0, 10, (1U << 1U), udpard_prio_high, 5U, udpard_make_subject_endpoint(111U), payload, NULL)); + TEST_ASSERT_EQUAL_UINT16((1U << 1U), udpard_tx_pending_ifaces(&fx.tx)); - udpard_tx_free(&tx); - instrumented_allocator_reset(&alloc); + udpard_tx_poll(&fx.tx, 11, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_EQUAL_UINT16(0U, udpard_tx_pending_ifaces(&fx.tx)); + TEST_ASSERT_EQUAL_UINT64(1U, fx.tx.errors_expiration); + TEST_ASSERT_EQUAL_size_t(0, fx.eject.count); + + fixture_fini(&fx); } -static void test_tx_spool_deduplication(void) +static void test_tx_sacrifice_oldest(void) { - instrumented_allocator_t alloc_a = { 0 }; - instrumented_allocator_t alloc_b = { 0 }; - instrumented_allocator_new(&alloc_a); - instrumented_allocator_new(&alloc_b); - udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc_a) }; - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - mem.payload[i] = instrumented_allocator_make_resource(&alloc_a); - } - - // Dedup when MTU and allocator match (multi-frame). - udpard_tx_t tx = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx, - 99U, - 1U, - 16U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - tx.mtu[0] = 600; - tx.mtu[1] = 600; - const uint16_t iface_bitmap_12 = (1U << 0U) | (1U << 1U); - byte_t payload_big[1300] = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 1000, - iface_bitmap_12, - udpard_prio_nominal, - 1, - make_scattered(payload_big, sizeof(payload_big)), - NULL, - UDPARD_USER_CONTEXT_NULL)); - tx_transfer_t* tr = latest_transfer(&tx); - TEST_ASSERT_EQUAL_size_t(frames_for(tx.mtu[0], sizeof(payload_big)), tx.enqueued_frames_count); - TEST_ASSERT_EQUAL_PTR(tr->head[0], tr->head[1]); - for (tx_frame_t* f = tr->head[0]; f != NULL; f = f->next) { - TEST_ASSERT_EQUAL_size_t(2, f->refcount); - } - udpard_tx_free(&tx); + // Force queue pressure and verify oldest transfer is sacrificed. + tx_fixture_t fx = { 0 }; + fixture_init(&fx, 1U, 128U, true); + const byte_t data[] = { 0x01, 0x02 }; + const udpard_bytes_scattered_t payload = make_scattered(data, sizeof(data)); + const udpard_udpip_ep_t ep = udpard_make_subject_endpoint(222U); + TEST_ASSERT_TRUE(udpard_tx_push(&fx.tx, 0, 10000, 1U, udpard_prio_nominal, 10U, ep, payload, NULL)); + TEST_ASSERT_TRUE(udpard_tx_push(&fx.tx, 1, 10000, 1U, udpard_prio_nominal, 20U, ep, payload, NULL)); + TEST_ASSERT_EQUAL_UINT64(1U, fx.tx.errors_sacrifice); - // Dedup when payload fits both MTU despite mismatch. - TEST_ASSERT_TRUE(udpard_tx_new( - &tx, - 99U, - 1U, - 8U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - tx.mtu[0] = 500; - tx.mtu[1] = 900; - byte_t payload_small[300] = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 1000, - iface_bitmap_12, - udpard_prio_nominal, - 2, - make_scattered(payload_small, sizeof(payload_small)), - NULL, - UDPARD_USER_CONTEXT_NULL)); - tr = latest_transfer(&tx); - TEST_ASSERT_EQUAL_size_t(1, tx.enqueued_frames_count); - TEST_ASSERT_EQUAL_PTR(tr->head[0], tr->head[1]); - TEST_ASSERT_EQUAL_size_t(2, tr->head[0]->refcount); - udpard_tx_free(&tx); + udpard_tx_poll(&fx.tx, 2, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_EQUAL_size_t(1, fx.eject.count); + TEST_ASSERT_EQUAL_UINT64(20U, fx.eject.items[0].transfer_id); - // No dedup when MTU differs and payload exceeds the smaller MTU. - TEST_ASSERT_TRUE(udpard_tx_new( - &tx, - 99U, - 1U, - 8U, - mem, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - tx.mtu[0] = 500; - tx.mtu[1] = 900; - byte_t payload_split[800] = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 1000, - iface_bitmap_12, - udpard_prio_nominal, - 3, - make_scattered(payload_split, sizeof(payload_split)), - NULL, - UDPARD_USER_CONTEXT_NULL)); - tr = latest_transfer(&tx); - TEST_ASSERT_EQUAL_size_t(frames_for(tx.mtu[0], sizeof(payload_split)) + - frames_for(tx.mtu[1], sizeof(payload_split)), - tx.enqueued_frames_count); - TEST_ASSERT_TRUE(tr->head[0] != tr->head[1]); - TEST_ASSERT_EQUAL_size_t(1, tr->head[0]->refcount); - TEST_ASSERT_EQUAL_size_t(1, tr->head[1]->refcount); - udpard_tx_free(&tx); - - // No dedup when allocators differ even with matching MTU and single frame. - udpard_tx_mem_resources_t mem_split = { .transfer = instrumented_allocator_make_resource(&alloc_a) }; - mem_split.payload[0] = instrumented_allocator_make_resource(&alloc_a); - mem_split.payload[1] = instrumented_allocator_make_resource(&alloc_b); - mem_split.payload[2] = mem_split.payload[0]; - TEST_ASSERT_TRUE(udpard_tx_new( - &tx, - 99U, - 1U, - 8U, - mem_split, - &(udpard_tx_vtable_t){ .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag })); - tx.mtu[0] = 600; - tx.mtu[1] = 600; - byte_t payload_one[400] = { 0 }; - TEST_ASSERT_TRUE(udpard_tx_push(&tx, - 0, - 1000, - iface_bitmap_12, - udpard_prio_nominal, - 4, - make_scattered(payload_one, sizeof(payload_one)), - NULL, - UDPARD_USER_CONTEXT_NULL)); - tr = latest_transfer(&tx); - TEST_ASSERT_EQUAL_size_t(2, tx.enqueued_frames_count); - TEST_ASSERT_TRUE(tr->head[0] != tr->head[1]); - udpard_tx_free(&tx); - - TEST_ASSERT_EQUAL_size_t(0, alloc_a.allocated_fragments); - TEST_ASSERT_EQUAL_size_t(0, alloc_b.allocated_fragments); + fixture_fini(&fx); } -// Verifies that eject callbacks are ONLY invoked from udpard_tx_poll(), never from push functions. -static void test_tx_eject_only_from_poll(void) +static void test_tx_transfer_id_masking(void) { - instrumented_allocator_t alloc = { 0 }; - instrumented_allocator_new(&alloc); - udpard_tx_mem_resources_t mem = { .transfer = instrumented_allocator_make_resource(&alloc) }; - for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { - mem.payload[i] = instrumented_allocator_make_resource(&alloc); - } - - udpard_tx_t tx = { 0 }; - eject_state_t eject = { .count = 0, .allow = true }; - udpard_tx_vtable_t vt = { .eject_subject = eject_subject_with_flag, .eject_p2p = eject_p2p_with_flag }; - TEST_ASSERT_TRUE(udpard_tx_new(&tx, 60U, 1U, 16U, mem, &vt)); - tx.user = &eject; - - const uint16_t iface_bitmap_1 = (1U << 0U); - - // Push a subject transfer; eject must NOT be called. - eject.count = 0; + // Verify only low 48 bits of transfer-ID are serialized. + tx_fixture_t fx = { 0 }; + fixture_init(&fx, 8U, 128U, true); + const byte_t data[] = { 0x55 }; + const udpard_bytes_scattered_t payload = make_scattered(data, sizeof(data)); + const uint64_t transfer_id = 0xABCDEF0123456789ULL; TEST_ASSERT_TRUE(udpard_tx_push( - &tx, 0, 1000, iface_bitmap_1, udpard_prio_fast, 100, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL)); - TEST_ASSERT_EQUAL_size_t(0, eject.count); // eject NOT called from push - - // Push a P2P transfer; eject must NOT be called. - const udpard_remote_t remote = { .uid = 999, .endpoints = { make_ep(10) } }; - TEST_ASSERT_TRUE(udpard_tx_push_p2p( - &tx, 0, 1000, udpard_prio_fast, remote, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL, NULL)); - TEST_ASSERT_EQUAL_size_t(0, eject.count); // eject NOT called from push_p2p + &fx.tx, 0, 10000, 1U, udpard_prio_nominal, transfer_id, udpard_make_subject_endpoint(333U), payload, NULL)); + udpard_tx_poll(&fx.tx, 1, UDPARD_IFACE_BITMAP_ALL); + TEST_ASSERT_EQUAL_size_t(1, fx.eject.count); + TEST_ASSERT_EQUAL_UINT64(transfer_id & UDPARD_TRANSFER_ID_MASK, fx.eject.items[0].transfer_id); - // Now poll; eject MUST be called. - udpard_tx_poll(&tx, 0, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_GREATER_THAN_size_t(0, eject.count); // eject called from poll - - // Push more transfers while frames are pending; eject still must NOT be called. - const size_t eject_count_before = eject.count; - eject.allow = false; // block ejection to keep frames pending - TEST_ASSERT_TRUE(udpard_tx_push( - &tx, 0, 1000, iface_bitmap_1, udpard_prio_nominal, 200, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL)); - TEST_ASSERT_EQUAL_size_t(eject_count_before, eject.count); // eject NOT called from push - - TEST_ASSERT_TRUE(udpard_tx_push_p2p( - &tx, 0, 1000, udpard_prio_nominal, remote, make_scattered(NULL, 0), NULL, UDPARD_USER_CONTEXT_NULL, NULL)); - TEST_ASSERT_EQUAL_size_t(eject_count_before, eject.count); // eject NOT called from push_p2p - - // Poll again; eject called again (but rejected by callback). - udpard_tx_poll(&tx, 0, UDPARD_IFACE_BITMAP_ALL); - TEST_ASSERT_GREATER_THAN_size_t(eject_count_before, eject.count); // eject called from poll - - udpard_tx_free(&tx); - instrumented_allocator_reset(&alloc); + fixture_fini(&fx); } void setUp(void) {} - void tearDown(void) {} int main(void) { UNITY_BEGIN(); - RUN_TEST(test_bytes_scattered_read); - RUN_TEST(test_tx_serialize_header); - RUN_TEST(test_tx_validation_and_free); - RUN_TEST(test_tx_comparators_and_feedback); - RUN_TEST(test_tx_spool_and_queue_errors); - RUN_TEST(test_tx_stage_if); - RUN_TEST(test_tx_stage_if_via_tx_push); - RUN_TEST(test_tx_stage_if_short_deadline); - RUN_TEST(test_tx_push_p2p_success); - RUN_TEST(test_tx_cancel); - RUN_TEST(test_tx_spool_deduplication); - RUN_TEST(test_tx_eject_only_from_poll); - RUN_TEST(test_tx_ack_and_scheduler); + RUN_TEST(test_tx_subject_ejection); + RUN_TEST(test_tx_p2p_endpoints); + RUN_TEST(test_tx_expiration); + RUN_TEST(test_tx_sacrifice_oldest); + RUN_TEST(test_tx_transfer_id_masking); return UNITY_END(); }