diff --git a/components/drivers/audio/audio_pipe.c b/components/drivers/audio/audio_pipe.c index 35a3faf73f4..a87e504303c 100644 --- a/components/drivers/audio/audio_pipe.c +++ b/components/drivers/audio/audio_pipe.c @@ -26,9 +26,9 @@ static void _rt_pipe_resume_writer(struct rt_audio_pipe *pipe) tlist); /* resume the write thread */ - rt_thread_resume(thread); + if(rt_thread_resume(thread) == RT_EOK) + rt_schedule(); - rt_schedule(); } } @@ -108,9 +108,8 @@ static void _rt_pipe_resume_reader(struct rt_audio_pipe *pipe) tlist); /* resume the read thread */ - rt_thread_resume(thread); - - rt_schedule(); + if(rt_thread_resume(thread) == RT_EOK) + rt_schedule(); } } diff --git a/components/drivers/ipc/completion.c b/components/drivers/ipc/completion.c index 84dfb740b15..ca8b434610c 100644 --- a/components/drivers/ipc/completion.c +++ b/components/drivers/ipc/completion.c @@ -124,6 +124,7 @@ RTM_EXPORT(rt_completion_wait); void rt_completion_done(struct rt_completion *completion) { rt_base_t level; + rt_bool_t need_schedule = RT_FALSE; RT_ASSERT(completion != RT_NULL); if (completion->flag == RT_COMPLETED) @@ -143,11 +144,14 @@ void rt_completion_done(struct rt_completion *completion) tlist); /* resume it */ - rt_thread_resume(thread); + if(rt_thread_resume(thread) == RT_EOK) + need_schedule = RT_TRUE; + rt_hw_interrupt_enable(level); /* perform a schedule */ - rt_schedule(); + if (need_schedule == RT_TRUE) + rt_schedule(); } else { diff --git a/components/drivers/ipc/dataqueue.c b/components/drivers/ipc/dataqueue.c index f2befca64b3..cddde87776e 100644 --- a/components/drivers/ipc/dataqueue.c +++ b/components/drivers/ipc/dataqueue.c @@ -92,6 +92,7 @@ rt_err_t rt_data_queue_push(struct rt_data_queue *queue, rt_int32_t timeout) { rt_base_t level; + rt_bool_t need_schedule = RT_FALSE; rt_thread_t thread; rt_err_t result; @@ -165,11 +166,14 @@ rt_err_t rt_data_queue_push(struct rt_data_queue *queue, tlist); /* resume it */ - rt_thread_resume(thread); + if(rt_thread_resume(thread) == RT_EOK) + need_schedule = RT_TRUE; + rt_hw_interrupt_enable(level); /* perform a schedule */ - rt_schedule(); + if (need_schedule == RT_TRUE) + rt_schedule(); return result; } @@ -209,6 +213,7 @@ rt_err_t rt_data_queue_pop(struct rt_data_queue *queue, rt_int32_t timeout) { rt_base_t level; + rt_bool_t need_schedule = RT_FALSE; rt_thread_t thread; rt_err_t result; @@ -286,11 +291,14 @@ rt_err_t rt_data_queue_pop(struct rt_data_queue *queue, tlist); /* resume it */ - rt_thread_resume(thread); + if(rt_thread_resume(thread) == RT_EOK) + need_schedule = RT_TRUE; + rt_hw_interrupt_enable(level); /* perform a schedule */ - rt_schedule(); + if (need_schedule == RT_TRUE) + rt_schedule(); } else { @@ -362,6 +370,7 @@ RTM_EXPORT(rt_data_queue_peek); void rt_data_queue_reset(struct rt_data_queue *queue) { rt_base_t level; + rt_bool_t need_schedule = RT_FALSE; struct rt_thread *thread; RT_ASSERT(queue != RT_NULL); @@ -397,7 +406,8 @@ void rt_data_queue_reset(struct rt_data_queue *queue) * In rt_thread_resume function, it will remove current thread from * suspend list */ - rt_thread_resume(thread); + if(rt_thread_resume(thread) == RT_EOK) + need_schedule = RT_TRUE; /* enable interrupt */ rt_hw_interrupt_enable(level); @@ -421,14 +431,16 @@ void rt_data_queue_reset(struct rt_data_queue *queue) * In rt_thread_resume function, it will remove current thread from * suspend list */ - rt_thread_resume(thread); + if(rt_thread_resume(thread) == RT_EOK) + need_schedule = RT_TRUE; /* enable interrupt */ rt_hw_interrupt_enable(level); } rt_exit_critical(); - rt_schedule(); + if (need_schedule == RT_TRUE) + rt_schedule(); } RTM_EXPORT(rt_data_queue_reset); diff --git a/components/drivers/ipc/waitqueue.c b/components/drivers/ipc/waitqueue.c index 7bf40d3add5..9895b383510 100644 --- a/components/drivers/ipc/waitqueue.c +++ b/components/drivers/ipc/waitqueue.c @@ -75,7 +75,7 @@ int __wqueue_default_wake(struct rt_wqueue_node *wait, void *key) void rt_wqueue_wakeup(rt_wqueue_t *queue, void *key) { rt_base_t level; - int need_schedule = 0; + rt_bool_t need_schedule = RT_FALSE; rt_list_t *queue_list; struct rt_list_node *node; @@ -94,8 +94,8 @@ void rt_wqueue_wakeup(rt_wqueue_t *queue, void *key) entry = rt_list_entry(node, struct rt_wqueue_node, list); if (entry->wakeup(entry, key) == 0) { - rt_thread_resume(entry->polling_thread); - need_schedule = 1; + if(rt_thread_resume(entry->polling_thread) == RT_EOK) + need_schedule = RT_TRUE; rt_wqueue_remove(entry); break; @@ -104,7 +104,7 @@ void rt_wqueue_wakeup(rt_wqueue_t *queue, void *key) } rt_hw_interrupt_enable(level); - if (need_schedule) + if (need_schedule == RT_TRUE) rt_schedule(); } diff --git a/components/drivers/ipc/workqueue.c b/components/drivers/ipc/workqueue.c index 58cb0ddf569..0c088a07a50 100644 --- a/components/drivers/ipc/workqueue.c +++ b/components/drivers/ipc/workqueue.c @@ -93,6 +93,7 @@ static rt_err_t _workqueue_submit_work(struct rt_workqueue *queue, struct rt_work *work, rt_tick_t ticks) { rt_base_t level; + rt_bool_t need_schedule = RT_FALSE; rt_err_t err; level = rt_hw_interrupt_disable(); @@ -119,9 +120,14 @@ static rt_err_t _workqueue_submit_work(struct rt_workqueue *queue, ((queue->work_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_SUSPEND)) { /* resume work thread */ - rt_thread_resume(queue->work_thread); + if(rt_thread_resume(queue->work_thread) == RT_EOK) + need_schedule = RT_TRUE; + rt_hw_interrupt_enable(level); - rt_schedule(); + + /* perform a schedule */ + if (need_schedule == RT_TRUE) + rt_schedule(); } else { @@ -180,6 +186,7 @@ static void _delayed_work_timeout_handler(void *parameter) struct rt_work *work; struct rt_workqueue *queue; rt_base_t level; + rt_bool_t need_schedule = RT_FALSE; work = (struct rt_work *)parameter; queue = work->workqueue; @@ -201,9 +208,14 @@ static void _delayed_work_timeout_handler(void *parameter) ((queue->work_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_SUSPEND)) { /* resume work thread */ - rt_thread_resume(queue->work_thread); + if(rt_thread_resume(queue->work_thread) == RT_EOK) + need_schedule = RT_TRUE; + rt_hw_interrupt_enable(level); - rt_schedule(); + + /* perform a schedule */ + if (need_schedule == RT_TRUE) + rt_schedule(); } else { @@ -346,6 +358,7 @@ rt_err_t rt_workqueue_submit_work(struct rt_workqueue *queue, struct rt_work *wo rt_err_t rt_workqueue_urgent_work(struct rt_workqueue *queue, struct rt_work *work) { rt_base_t level; + rt_bool_t need_schedule = RT_FALSE; RT_ASSERT(queue != RT_NULL); RT_ASSERT(work != RT_NULL); @@ -359,9 +372,14 @@ rt_err_t rt_workqueue_urgent_work(struct rt_workqueue *queue, struct rt_work *wo ((queue->work_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_SUSPEND)) { /* resume work thread */ - rt_thread_resume(queue->work_thread); + if(rt_thread_resume(queue->work_thread) == RT_EOK) + need_schedule = RT_TRUE; + rt_hw_interrupt_enable(level); - rt_schedule(); + + /* perform a schedule */ + if (need_schedule == RT_TRUE) + rt_schedule(); } else { diff --git a/components/vbus/prio_queue.c b/components/vbus/prio_queue.c index dc915ae7d8a..96d1b2051b0 100644 --- a/components/vbus/prio_queue.c +++ b/components/vbus/prio_queue.c @@ -137,6 +137,7 @@ rt_err_t rt_prio_queue_push(struct rt_prio_queue *que, rt_int32_t timeout) { rt_base_t level; + rt_bool_t need_schedule = RT_FALSE; struct rt_prio_queue_item *item; RT_ASSERT(que); @@ -164,11 +165,14 @@ rt_err_t rt_prio_queue_push(struct rt_prio_queue *que, struct rt_thread, tlist); /* resume it */ - rt_thread_resume(thread); + if(rt_thread_resume(thread) == RT_EOK) + need_schedule = RT_TRUE; + rt_hw_interrupt_enable(level); /* perform a schedule */ - rt_schedule(); + if (need_schedule == RT_TRUE) + rt_schedule(); return RT_EOK; } diff --git a/components/vbus/vbus.c b/components/vbus/vbus.c index 665f56b9a46..c5f7f84b277 100644 --- a/components/vbus/vbus.c +++ b/components/vbus/vbus.c @@ -288,8 +288,8 @@ static void _bus_out_entry(void *param) void rt_vbus_resume_out_thread(void) { - rt_thread_resume(&_bus_out_thread); - rt_schedule(); + if(rt_thread_resume(&_bus_out_thread) == RT_EOK) + rt_schedule(); } rt_err_t rt_vbus_post(rt_uint8_t id, diff --git a/components/vbus/watermark_queue.h b/components/vbus/watermark_queue.h index c74e54fee32..1cccc3afcb7 100644 --- a/components/vbus/watermark_queue.h +++ b/components/vbus/watermark_queue.h @@ -100,7 +100,8 @@ rt_inline rt_err_t rt_wm_que_inc(struct rt_watermark_queue *wg, */ rt_inline void rt_wm_que_dec(struct rt_watermark_queue *wg) { - int need_sched = 0; + rt_bool_t need_schedule = RT_FALSE; + rt_base_t level; if (wg->level == 0) @@ -119,12 +120,14 @@ rt_inline void rt_wm_que_dec(struct rt_watermark_queue *wg) thread = rt_list_entry(wg->suspended_threads.next, struct rt_thread, tlist); - rt_thread_resume(thread); - need_sched = 1; + /* resume it */ + if(rt_thread_resume(thread) == RT_EOK) + need_schedule = RT_TRUE; } } rt_hw_interrupt_enable(level); - if (need_sched) + /* perform a schedule */ + if (need_schedule == RT_TRUE) rt_schedule(); } diff --git a/src/ipc.c b/src/ipc.c index 4a6244ac3b4..f976e9045d7 100644 --- a/src/ipc.c +++ b/src/ipc.c @@ -181,8 +181,9 @@ rt_inline rt_err_t _ipc_list_suspend(rt_list_t *list, * * @param list is a pointer to a suspended thread list of the IPC object. * - * @return Return the operation status. When the return value is RT_EOK, the function is successfully executed. - * When the return value is any other values, it means this operation failed. + * @return Return the operation status. + * When the return value is RT_EOK, the new ready thread have highest priority, need to schedule immediately + * When the return value is RT_EBUSY, although have readied the thread but its priority is not the highest. * * @warning This function is generally called by the following functions: * rt_sem_release(), rt_mutex_release(), rt_mb_send_wait(), rt_mq_send_wait(), @@ -197,10 +198,11 @@ rt_inline rt_err_t _ipc_list_resume(rt_list_t *list) RT_DEBUG_LOG(RT_DEBUG_IPC, ("resume thread:%s\n", thread->name)); - /* resume it */ - rt_thread_resume(thread); + /* resume it and schedule if necessary */ + if(rt_thread_resume(thread) == RT_EOK) + return RT_EOK; - return RT_EOK; + return RT_EBUSY; } @@ -627,8 +629,8 @@ rt_err_t rt_sem_release(rt_sem_t sem) if (!rt_list_isempty(&sem->parent.suspend_thread)) { /* resume the suspended thread */ - _ipc_list_resume(&(sem->parent.suspend_thread)); - need_schedule = RT_TRUE; + if(_ipc_list_resume(&(sem->parent.suspend_thread)) != RT_EBUSY) + need_schedule = RT_TRUE; } else { @@ -1164,9 +1166,9 @@ rt_err_t rt_mutex_release(rt_mutex_t mutex) } /* resume thread */ - _ipc_list_resume(&(mutex->parent.suspend_thread)); + if(_ipc_list_resume(&(mutex->parent.suspend_thread)) != RT_EBUSY) + need_schedule = RT_TRUE; - need_schedule = RT_TRUE; } else { @@ -1510,10 +1512,9 @@ rt_err_t rt_event_send(rt_event_t event, rt_uint32_t set) event->set &= ~thread->event_set; /* resume thread, and thread list breaks out */ - rt_thread_resume(thread); - - /* need do a scheduling */ - need_schedule = RT_TRUE; + if(rt_thread_resume(thread) == RT_EOK) + /* need do a scheduling */ + need_schedule = RT_TRUE; } } } @@ -1994,6 +1995,7 @@ rt_err_t rt_mb_send_wait(rt_mailbox_t mb, { struct rt_thread *thread; rt_base_t level; + rt_bool_t need_schedule = RT_FALSE; rt_uint32_t tick_delta; /* parameter check */ @@ -2103,12 +2105,14 @@ rt_err_t rt_mb_send_wait(rt_mailbox_t mb, /* resume suspended thread */ if (!rt_list_isempty(&mb->parent.suspend_thread)) { - _ipc_list_resume(&(mb->parent.suspend_thread)); - + if(_ipc_list_resume(&(mb->parent.suspend_thread)) != RT_EBUSY) + need_schedule = RT_TRUE; /* enable interrupt */ rt_hw_interrupt_enable(level); - rt_schedule(); + /* resume a thread, re-schedule */ + if (need_schedule == RT_TRUE) + rt_schedule(); return RT_EOK; } @@ -2164,6 +2168,7 @@ RTM_EXPORT(rt_mb_send); rt_err_t rt_mb_urgent(rt_mailbox_t mb, rt_ubase_t value) { rt_base_t level; + rt_bool_t need_schedule = RT_FALSE; /* parameter check */ RT_ASSERT(mb != RT_NULL); @@ -2199,12 +2204,15 @@ rt_err_t rt_mb_urgent(rt_mailbox_t mb, rt_ubase_t value) /* resume suspended thread */ if (!rt_list_isempty(&mb->parent.suspend_thread)) { - _ipc_list_resume(&(mb->parent.suspend_thread)); + if(_ipc_list_resume(&(mb->parent.suspend_thread)) != RT_EBUSY) + need_schedule = RT_TRUE; /* enable interrupt */ rt_hw_interrupt_enable(level); - rt_schedule(); + /* resume a thread, re-schedule */ + if (need_schedule == RT_TRUE) + rt_schedule(); return RT_EOK; } @@ -2246,6 +2254,7 @@ rt_err_t rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeout) { struct rt_thread *thread; rt_base_t level; + rt_bool_t need_schedule = RT_FALSE; rt_uint32_t tick_delta; /* parameter check */ @@ -2354,14 +2363,17 @@ rt_err_t rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeout) /* resume suspended thread */ if (!rt_list_isempty(&(mb->suspend_sender_thread))) { - _ipc_list_resume(&(mb->suspend_sender_thread)); + if(_ipc_list_resume(&(mb->suspend_sender_thread)) != RT_EBUSY) + need_schedule = RT_TRUE; /* enable interrupt */ rt_hw_interrupt_enable(level); RT_OBJECT_HOOK_CALL(rt_object_take_hook, (&(mb->parent.parent))); - rt_schedule(); + /* resume a thread, re-schedule */ + if (need_schedule == RT_TRUE) + rt_schedule(); return RT_EOK; } @@ -2752,6 +2764,7 @@ rt_err_t rt_mq_send_wait(rt_mq_t mq, rt_int32_t timeout) { rt_base_t level; + rt_bool_t need_schedule = RT_FALSE; struct rt_mq_message *msg; rt_uint32_t tick_delta; struct rt_thread *thread; @@ -2892,12 +2905,15 @@ rt_err_t rt_mq_send_wait(rt_mq_t mq, /* resume suspended thread */ if (!rt_list_isempty(&mq->parent.suspend_thread)) { - _ipc_list_resume(&(mq->parent.suspend_thread)); + if(_ipc_list_resume(&(mq->parent.suspend_thread)) != RT_EBUSY) + need_schedule = RT_TRUE; /* enable interrupt */ rt_hw_interrupt_enable(level); - rt_schedule(); + /* resume a thread, re-schedule */ + if (need_schedule == RT_TRUE) + rt_schedule(); return RT_EOK; } @@ -2960,6 +2976,7 @@ RTM_EXPORT(rt_mq_send); rt_err_t rt_mq_urgent(rt_mq_t mq, const void *buffer, rt_size_t size) { rt_base_t level; + rt_bool_t need_schedule = RT_FALSE; struct rt_mq_message *msg; /* parameter check */ @@ -3021,12 +3038,15 @@ rt_err_t rt_mq_urgent(rt_mq_t mq, const void *buffer, rt_size_t size) /* resume suspended thread */ if (!rt_list_isempty(&mq->parent.suspend_thread)) { - _ipc_list_resume(&(mq->parent.suspend_thread)); + if(_ipc_list_resume(&(mq->parent.suspend_thread)) != RT_EBUSY) + need_schedule = RT_TRUE; /* enable interrupt */ rt_hw_interrupt_enable(level); - rt_schedule(); + /* resume a thread, re-schedule */ + if (need_schedule == RT_TRUE) + rt_schedule(); return RT_EOK; } @@ -3072,6 +3092,7 @@ rt_err_t rt_mq_recv(rt_mq_t mq, { struct rt_thread *thread; rt_base_t level; + rt_bool_t need_schedule = RT_FALSE; struct rt_mq_message *msg; rt_uint32_t tick_delta; @@ -3195,14 +3216,17 @@ rt_err_t rt_mq_recv(rt_mq_t mq, /* resume suspended thread */ if (!rt_list_isempty(&(mq->suspend_sender_thread))) { - _ipc_list_resume(&(mq->suspend_sender_thread)); + if(_ipc_list_resume(&(mq->suspend_sender_thread)) != RT_EBUSY) + need_schedule = RT_TRUE; /* enable interrupt */ rt_hw_interrupt_enable(level); RT_OBJECT_HOOK_CALL(rt_object_take_hook, (&(mq->parent.parent))); - rt_schedule(); + /* resume a thread, re-schedule */ + if (need_schedule == RT_TRUE) + rt_schedule(); return RT_EOK; } diff --git a/src/mempool.c b/src/mempool.c index 197c8af488c..03790e9e066 100644 --- a/src/mempool.c +++ b/src/mempool.c @@ -418,6 +418,7 @@ void rt_mp_free(void *block) struct rt_mempool *mp; struct rt_thread *thread; rt_base_t level; + rt_bool_t need_schedule = RT_FALSE; /* parameter check */ if (block == RT_NULL) return; @@ -449,13 +450,14 @@ void rt_mp_free(void *block) thread->error = RT_EOK; /* resume thread */ - rt_thread_resume(thread); - + if( rt_thread_resume(thread) == RT_EOK) + need_schedule = RT_TRUE; /* enable interrupt */ rt_hw_interrupt_enable(level); /* do a schedule */ - rt_schedule(); + if(need_schedule == RT_TRUE) + rt_schedule(); return; } diff --git a/src/signal.c b/src/signal.c index 60996a6e08d..2283e2089d4 100644 --- a/src/signal.c +++ b/src/signal.c @@ -92,6 +92,7 @@ static void _signal_entry(void *parameter) static void _signal_deliver(rt_thread_t tid) { rt_base_t level; + rt_bool_t need_schedule = RT_FALSE; level = rt_hw_interrupt_disable(); @@ -105,14 +106,16 @@ static void _signal_deliver(rt_thread_t tid) if ((tid->stat & RT_THREAD_STAT_MASK) == RT_THREAD_SUSPEND) { /* resume thread to handle signal */ - rt_thread_resume(tid); + if(rt_thread_resume(tid) == RT_EOK) + need_schedule = RT_TRUE; /* add signal state */ tid->stat |= (RT_THREAD_STAT_SIGNAL | RT_THREAD_STAT_SIGNAL_PENDING); rt_hw_interrupt_enable(level); /* re-schedule */ - rt_schedule(); + if (need_schedule == RT_TRUE) + rt_schedule(); } else { diff --git a/src/thread.c b/src/thread.c index eea42dab8e2..82b4e0ab764 100755 --- a/src/thread.c +++ b/src/thread.c @@ -875,13 +875,18 @@ RTM_EXPORT(rt_thread_suspend); * * @param thread is the thread to be resumed. * - * @return Return the operation status. If the return value is RT_EOK, the function is successfully executed. - * If the return value is any other values, it means this operation failed. + * @return Return the operation status. + * When the return value is RT_EOK, the new ready thread have highest priority, need to schedule immediately + * When the return value is RT_EBUSY, although have readied the thread but its priority is not the highest. + * If the return value is any other values, it means this operation failed. */ rt_err_t rt_thread_resume(rt_thread_t thread) { rt_base_t level; - + rt_err_t result = RT_EOK; +#ifndef RT_USING_SMP + extern rt_uint8_t rt_current_priority; +#endif /* parameter check */ RT_ASSERT(thread != RT_NULL); RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread); @@ -907,11 +912,23 @@ rt_err_t rt_thread_resume(rt_thread_t thread) /* insert to schedule ready list */ rt_schedule_insert_thread(thread); +#ifndef RT_USING_SMP + /* compare the priority with rt_current_priority*/ + if(thread->current_priority < rt_current_priority) + { + result = RT_EOK; + } + else + { + result = RT_EBUSY; + } +#endif + /* enable interrupt */ rt_hw_interrupt_enable(level); RT_OBJECT_HOOK_CALL(rt_thread_resume_hook, (thread)); - return RT_EOK; + return result; } RTM_EXPORT(rt_thread_resume); diff --git a/src/timer.c b/src/timer.c index 0034a53c711..772637702b6 100644 --- a/src/timer.c +++ b/src/timer.c @@ -505,8 +505,8 @@ rt_err_t rt_timer_start(rt_timer_t timer) ((_timer_thread.stat & RT_THREAD_STAT_MASK) == RT_THREAD_SUSPEND)) { /* resume timer thread to check soft timer */ - rt_thread_resume(&_timer_thread); - need_schedule = RT_TRUE; + if(rt_thread_resume(&_timer_thread) == RT_EOK) + need_schedule = RT_TRUE; } } #endif /* RT_USING_TIMER_SOFT */ @@ -514,7 +514,7 @@ rt_err_t rt_timer_start(rt_timer_t timer) /* enable interrupt */ rt_hw_interrupt_enable(level); - if (need_schedule) + if (need_schedule == RT_TRUE) { rt_schedule(); }