@@ -414,6 +414,8 @@ void sched_wakeup_task(tcb_t *task)
414
414
*/
415
415
uint16_t sched_select_next_task (void )
416
416
{
417
+ uint32_t flag = 0 ;
418
+
417
419
if (unlikely (!get_task_current () || !get_task_current ()-> data ))
418
420
panic (ERR_NO_TASKS );
419
421
@@ -482,6 +484,8 @@ void dispatcher(void)
482
484
/* Top-level context-switch for preemptive scheduling. */
483
485
void dispatch (void )
484
486
{
487
+ uint32_t flag = 0 ;
488
+
485
489
if (unlikely (!kcb || !get_task_current () || !get_task_current ()-> data ))
486
490
panic (ERR_NO_TASKS );
487
491
@@ -515,6 +519,8 @@ void dispatch(void)
515
519
/* Cooperative context switch */
516
520
void yield (void )
517
521
{
522
+ uint32_t flag = 0 ;
523
+
518
524
if (unlikely (!kcb || !get_task_current () || !get_task_current ()-> data ))
519
525
return ;
520
526
@@ -530,6 +536,8 @@ void yield(void)
530
536
#endif
531
537
532
538
/* In cooperative mode, delays are only processed on an explicit yield. */
539
+ spin_lock_irqsave (& kcb -> kcb_lock , & flag );
540
+
533
541
if (!kcb -> preemptive )
534
542
list_foreach (kcb -> tasks , delay_update , NULL );
535
543
@@ -824,10 +832,13 @@ uint16_t mo_task_id(void)
824
832
825
833
int32_t mo_task_idref (void * task_entry )
826
834
{
827
- if (!task_entry || !kcb -> tasks )
835
+ spin_lock_irqsave (& kcb -> kcb_lock , & task_flags );
836
+
837
+ if (!task_entry || !kcb -> tasks ) {
838
+ spin_unlock_irqrestore (& kcb -> kcb_lock , task_flags );
828
839
return ERR_TASK_NOT_FOUND ;
840
+ }
829
841
830
- spin_lock_irqsave (& kcb -> kcb_lock , & task_flags );
831
842
list_node_t * node = list_foreach (kcb -> tasks , refcmp , task_entry );
832
843
spin_unlock_irqrestore (& kcb -> kcb_lock , task_flags );
833
844
@@ -838,23 +849,46 @@ void mo_task_wfi(void)
838
849
{
839
850
/* Process deferred timer work before waiting */
840
851
process_deferred_timer_work ();
852
+ uint32_t flag = 0 ;
841
853
842
854
if (!kcb -> preemptive )
843
855
return ;
844
856
857
+ spin_lock_irqsave (& kcb -> kcb_lock , & flag );
845
858
volatile uint32_t current_ticks = kcb -> ticks ;
846
- while (current_ticks == kcb -> ticks )
859
+ spin_unlock_irqrestore (& kcb -> kcb_lock , flag );
860
+
861
+ while (1 ) {
862
+ spin_lock_irqsave (& kcb -> kcb_lock , & flag );
863
+ if (current_ticks != kcb -> ticks ) {
864
+ spin_unlock_irqrestore (& kcb -> kcb_lock , flag );
865
+ break ;
866
+ }
867
+ spin_unlock_irqrestore (& kcb -> kcb_lock , flag );
847
868
hal_cpu_idle ();
869
+ }
848
870
}
849
871
850
872
uint16_t mo_task_count (void )
851
873
{
852
- return kcb -> task_count ;
874
+ uint32_t task_count ;
875
+ uint32_t flag ;
876
+
877
+ spin_lock_irqsave (& kcb -> kcb_lock , & flag );
878
+ task_count = kcb -> task_count ;
879
+ spin_unlock_irqrestore (& kcb -> kcb_lock , flag );
880
+ return task_count ;
853
881
}
854
882
855
883
uint32_t mo_ticks (void )
856
884
{
857
- return kcb -> ticks ;
885
+ uint32_t ticks ;
886
+ uint32_t flag ;
887
+
888
+ spin_lock_irqsave (& kcb -> kcb_lock , & flag );
889
+ ticks = kcb -> ticks ;
890
+ spin_unlock_irqrestore (& kcb -> kcb_lock , flag );
891
+ return ticks ;
858
892
}
859
893
860
894
uint64_t mo_uptime (void )
0 commit comments