From 1440f488015f9702ff81013aa9a82a5e69824392 Mon Sep 17 00:00:00 2001 From: Mathieu Carbou Date: Wed, 17 Sep 2025 15:26:23 +0200 Subject: [PATCH] Switch from rand() to an XOR Shift to favor speed over random quality --- src/AsyncTCP.cpp | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 09e1050..2b05594 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -190,6 +190,15 @@ class queue_mutex_guard { static SimpleIntrusiveList _async_queue; static TaskHandle_t _async_service_task_handle = NULL; +static uint32_t _xor_shift_state = 31; // any nonzero seed will do +static uint32_t _xor_shift_next() { + uint32_t x = _xor_shift_state; + x ^= x << 13; + x ^= x >> 17; + x ^= x << 5; + return _xor_shift_state = x; +} + static void _free_event(lwip_tcp_event_packet_t *evpkt) { if ((evpkt->event == LWIP_TCP_RECV) && (evpkt->recv.pb != nullptr)) { pbuf_free(evpkt->recv.pb); @@ -246,7 +255,7 @@ static inline lwip_tcp_event_packet_t *_get_async_event() { Let's discard poll events processing using linear-increasing probability curve when queue size grows over 3/4 Poll events are periodic and connection could get another chance next time */ - if (_async_queue.size() > (rand() % CONFIG_ASYNC_TCP_QUEUE_SIZE / 4 + CONFIG_ASYNC_TCP_QUEUE_SIZE * 3 / 4)) { + if (_async_queue.size() > (_xor_shift_next() % CONFIG_ASYNC_TCP_QUEUE_SIZE / 4 + CONFIG_ASYNC_TCP_QUEUE_SIZE * 3 / 4)) { _free_event(e); async_tcp_log_d("discarding poll due to queue congestion"); continue; @@ -422,7 +431,7 @@ int8_t AsyncTCP_detail::tcp_poll(void *arg, struct tcp_pcb *pcb) { { queue_mutex_guard guard; // async_tcp_log_d("qs:%u", _async_queue.size()); - if (_async_queue.size() > (rand() % CONFIG_ASYNC_TCP_QUEUE_SIZE / 2 + CONFIG_ASYNC_TCP_QUEUE_SIZE / 4)) { + if (_async_queue.size() > (_xor_shift_next() % CONFIG_ASYNC_TCP_QUEUE_SIZE / 2 + CONFIG_ASYNC_TCP_QUEUE_SIZE / 4)) { async_tcp_log_d("throttling"); return ERR_OK; }