From ef9d8f47e2118325c40c5e8e8f7f602bf2137828 Mon Sep 17 00:00:00 2001 From: Srikanth Patchava Date: Tue, 5 May 2026 21:51:39 -0700 Subject: [PATCH 1/2] fix(ipc): guard LOG_D against null thread pointer in rt_susp_list_dequeue MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When rt_susp_list_dequeue() returns RT_NULL (empty suspended list), the LOG_D call dereferenced thread->parent.name without a null check, causing a crash in debug builds. Per @wdfk-prog review feedback, push the null check into the LOG_D argument using a ternary so the entire expression — including the null check — is compiled out when LOG_D is disabled. This avoids an unnecessary runtime branch in release builds. Also fix a typo in the change-log header: 'redesigen' -> 'redesign'. Signed-off-by: Srikanth Patchava --- src/ipc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ipc.c b/src/ipc.c index 7c52956b1c2..bbfa66c666b 100644 --- a/src/ipc.c +++ b/src/ipc.c @@ -44,7 +44,7 @@ * 2022-04-08 Stanley Correct descriptions * 2022-10-15 Bernard add nested mutex feature * 2022-10-16 Bernard add prioceiling feature in mutex - * 2023-04-16 Xin-zheqi redesigen queue recv and send function return real message size + * 2023-04-16 Xin-zheqi redesign queue recv and send function return real message size * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable */ @@ -140,7 +140,7 @@ struct rt_thread *rt_susp_list_dequeue(rt_list_t *susp_list, rt_err_t thread_err } rt_sched_unlock(slvl); - LOG_D("resume thread:%s\n", thread->parent.name); + LOG_D("resume thread:%s\n", (thread == RT_NULL) ? "NULL" : thread->parent.name); return thread; } From 626ca31da3de59676103f7a12285dcf26da85094 Mon Sep 17 00:00:00 2001 From: Srikanth Patchava Date: Tue, 5 May 2026 21:51:39 -0700 Subject: [PATCH 2/2] fix(ipc): check overflow before modifying IPC state in send functions _rt_mb_send_wait, _rt_mq_send_wait, and rt_mq_urgent modified mailbox and message queue data structures before checking overflow conditions. On overflow, they returned errors without rolling back changes, causing state corruption. Moved overflow checks before state modifications so the data structures stay consistent on the error path. Signed-off-by: Srikanth Patchava --- src/ipc.c | 61 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 31 insertions(+), 30 deletions(-) diff --git a/src/ipc.c b/src/ipc.c index bbfa66c666b..2db1271f149 100644 --- a/src/ipc.c +++ b/src/ipc.c @@ -2661,6 +2661,12 @@ static rt_err_t _rt_mb_send_wait(rt_mailbox_t mb, } } + if(mb->entry >= RT_MB_ENTRY_MAX) + { + rt_spin_unlock_irqrestore(&(mb->spinlock), level); + return -RT_EFULL; /* value overflowed */ + } + /* set ptr */ mb->msg_pool[mb->in_offset] = value; /* increase input offset */ @@ -2668,16 +2674,8 @@ static rt_err_t _rt_mb_send_wait(rt_mailbox_t mb, if (mb->in_offset >= mb->size) mb->in_offset = 0; - if(mb->entry < RT_MB_ENTRY_MAX) - { - /* increase message entry */ - mb->entry ++; - } - else - { - rt_spin_unlock_irqrestore(&(mb->spinlock), level); - return -RT_EFULL; /* value overflowed */ - } + /* increase message entry */ + mb->entry ++; /* resume suspended thread */ if (!rt_list_isempty(&mb->parent.suspend_thread)) @@ -3503,6 +3501,16 @@ static rt_err_t _rt_mq_send_wait(rt_mq_t mq, /* disable interrupt */ level = rt_spin_lock_irqsave(&(mq->spinlock)); + + if(mq->entry >= RT_MQ_ENTRY_MAX) + { + /* return message to free list */ + msg->next = (struct rt_mq_message *)mq->msg_queue_free; + mq->msg_queue_free = msg; + rt_spin_unlock_irqrestore(&(mq->spinlock), level); + return -RT_EFULL; /* value overflowed */ + } + #ifdef RT_USING_MESSAGEQUEUE_PRIORITY msg->prio = prio; if (mq->msg_queue_head == RT_NULL) @@ -3544,16 +3552,8 @@ static rt_err_t _rt_mq_send_wait(rt_mq_t mq, mq->msg_queue_head = msg; #endif - if(mq->entry < RT_MQ_ENTRY_MAX) - { - /* increase message entry */ - mq->entry ++; - } - else - { - rt_spin_unlock_irqrestore(&(mq->spinlock), level); - return -RT_EFULL; /* value overflowed */ - } + /* increase message entry */ + mq->entry ++; /* resume suspended thread */ if (!rt_list_isempty(&mq->parent.suspend_thread)) @@ -3694,6 +3694,15 @@ rt_err_t rt_mq_urgent(rt_mq_t mq, const void *buffer, rt_size_t size) level = rt_spin_lock_irqsave(&(mq->spinlock)); + if(mq->entry >= RT_MQ_ENTRY_MAX) + { + /* return message to free list */ + msg->next = (struct rt_mq_message *)mq->msg_queue_free; + mq->msg_queue_free = msg; + rt_spin_unlock_irqrestore(&(mq->spinlock), level); + return -RT_EFULL; /* value overflowed */ + } + /* link msg to the beginning of message queue */ msg->next = (struct rt_mq_message *)mq->msg_queue_head; mq->msg_queue_head = msg; @@ -3702,16 +3711,8 @@ rt_err_t rt_mq_urgent(rt_mq_t mq, const void *buffer, rt_size_t size) if (mq->msg_queue_tail == RT_NULL) mq->msg_queue_tail = msg; - if(mq->entry < RT_MQ_ENTRY_MAX) - { - /* increase message entry */ - mq->entry ++; - } - else - { - rt_spin_unlock_irqrestore(&(mq->spinlock), level); - return -RT_EFULL; /* value overflowed */ - } + /* increase message entry */ + mq->entry ++; /* resume suspended thread */ if (!rt_list_isempty(&mq->parent.suspend_thread))