@@ -315,6 +315,8 @@ pub(crate) struct Task {
315315 pub thread_semaphore : Option < Semaphore > ,
316316 pub state : TaskState ,
317317 pub stack : * mut [ MaybeUninit < u32 > ] ,
318+ #[ cfg( any( hw_task_overflow_detection, sw_task_overflow_detection) ) ]
319+ pub stack_guard : * mut u32 ,
318320 pub priority : usize ,
319321 #[ cfg( multi_core) ]
320322 pub pinned_to : Option < Cpu > ,
@@ -342,6 +344,7 @@ pub(crate) struct Task {
342344 pub ( crate ) heap_allocated : bool ,
343345}
344346
347+ #[ cfg( sw_task_overflow_detection) ]
345348const STACK_CANARY : u32 =
346349 const { esp_config:: esp_config_int!( u32 , "ESP_HAL_CONFIG_STACK_GUARD_VALUE" ) } ;
347350
@@ -366,14 +369,17 @@ impl Task {
366369 name, task_fn, param, task_stack_size, priority, pinned_to
367370 ) ;
368371
369- let extra_stack = if cfg ! ( debug_build ) {
370- // This is a lot, but debug builds fail in different ways without.
371- 6 * 1024
372+ // Make sure the stack guard doesn't eat into the stack size.
373+ let extra_stack = if cfg ! ( any ( hw_task_overflow_detection , sw_task_overflow_detection ) ) {
374+ 4 + esp_config :: esp_config_int! ( usize , "ESP_HAL_CONFIG_STACK_GUARD_OFFSET" )
372375 } else {
373- // Make sure the stack guard doesn't eat into the stack size.
374- 4
376+ 0
375377 } ;
376378
379+ #[ cfg( debug_build) ]
380+ // This is a lot, but debug builds fail in different ways without.
381+ let extra_stack = extra_stack. max ( 6 * 1024 ) ;
382+
377383 let task_stack_size = task_stack_size + extra_stack;
378384
379385 // Make sure stack size is also aligned to 16 bytes.
@@ -393,17 +399,21 @@ impl Task {
393399
394400 let stack_bottom = stack. cast :: < MaybeUninit < u32 > > ( ) ;
395401 let stack_len_bytes = layout. size ( ) ;
396- unsafe { stack_bottom. write ( MaybeUninit :: new ( STACK_CANARY ) ) } ;
402+
403+ let stack_guard_offset =
404+ esp_config:: esp_config_int!( usize , "ESP_HAL_CONFIG_STACK_GUARD_OFFSET" ) ;
397405
398406 let stack_words = core:: ptr:: slice_from_raw_parts_mut ( stack_bottom, stack_len_bytes / 4 ) ;
399407 let stack_top = unsafe { stack_bottom. add ( stack_words. len ( ) ) . cast ( ) } ;
400408
401- Task {
409+ let mut task = Task {
402410 cpu_context : new_task_context ( task_fn, param, stack_top) ,
403411 #[ cfg( feature = "esp-radio" ) ]
404412 thread_semaphore : None ,
405413 state : TaskState :: Ready ,
406414 stack : stack_words,
415+ #[ cfg( any( hw_task_overflow_detection, sw_task_overflow_detection) ) ]
416+ stack_guard : stack_words. cast ( ) ,
407417 current_queue : None ,
408418 priority,
409419 #[ cfg( multi_core) ]
@@ -418,19 +428,49 @@ impl Task {
418428
419429 #[ cfg( feature = "alloc" ) ]
420430 heap_allocated : false ,
431+ } ;
432+
433+ task. set_up_stack_guard ( stack_guard_offset) ;
434+
435+ task
436+ }
437+
438+ fn set_up_stack_guard ( & mut self , offset : usize ) {
439+ let stack_bottom = self . stack . cast :: < MaybeUninit < u32 > > ( ) ;
440+ let stack_guard = unsafe { stack_bottom. byte_add ( offset) } ;
441+
442+ #[ cfg( sw_task_overflow_detection) ]
443+ unsafe {
444+ // avoid touching the main stack's canary on the first core
445+ if stack_guard. read ( ) . assume_init ( ) != STACK_CANARY {
446+ stack_guard. write ( MaybeUninit :: new ( STACK_CANARY ) ) ;
447+ }
448+ }
449+
450+ #[ cfg( any( hw_task_overflow_detection, sw_task_overflow_detection) ) ]
451+ {
452+ self . stack_guard = stack_guard. cast ( ) ;
421453 }
422454 }
423455
424456 pub ( crate ) fn ensure_no_stack_overflow ( & self ) {
457+ #[ cfg( sw_task_overflow_detection) ]
425458 assert_eq ! (
426459 // This cast is safe to do from MaybeUninit<u32> because this is the word we've written
427460 // during initialization.
428- unsafe { self . stack . cast :: < u32 > ( ) . read( ) } ,
461+ unsafe { self . stack_guard . read( ) } ,
429462 STACK_CANARY ,
430463 "Stack overflow detected in {:?}" ,
431464 self as * const Task
432465 ) ;
433466 }
467+
468+ pub ( crate ) fn set_up_stack_watchpoint ( & self ) {
469+ #[ cfg( hw_task_overflow_detection) ]
470+ unsafe {
471+ esp_hal:: debugger:: set_stack_watchpoint ( self . stack_guard as usize ) ;
472+ }
473+ }
434474}
435475
436476impl Drop for Task {
@@ -451,18 +491,20 @@ impl Drop for Task {
451491 }
452492}
453493
454- pub ( super ) fn allocate_main_task ( scheduler : & mut SchedulerState , stack : * mut [ MaybeUninit < u32 > ] ) {
494+ pub ( super ) fn allocate_main_task (
495+ scheduler : & mut SchedulerState ,
496+ stack : * mut [ MaybeUninit < u32 > ] ,
497+ stack_guard_offset : usize ,
498+ ) {
455499 let cpu = Cpu :: current ( ) ;
456500 let current_cpu = cpu as usize ;
457501
458- unsafe {
459- // avoid touching the main stack's canary on the first core
460- if stack. cast :: < MaybeUninit < u32 > > ( ) . read ( ) . assume_init ( ) != STACK_CANARY {
461- stack
462- . cast :: < MaybeUninit < u32 > > ( )
463- . write ( MaybeUninit :: new ( STACK_CANARY ) ) ;
464- }
465- }
502+ debug_assert ! (
503+ !scheduler. per_cpu[ current_cpu] . initialized,
504+ "Tried to allocate main task multiple times"
505+ ) ;
506+
507+ scheduler. per_cpu [ current_cpu] . initialized = true ;
466508
467509 // Reset main task properties. The rest should be cleared when the task is deleted.
468510 scheduler. per_cpu [ current_cpu] . main_task . priority = 0 ;
@@ -473,12 +515,13 @@ pub(super) fn allocate_main_task(scheduler: &mut SchedulerState, stack: *mut [Ma
473515 scheduler. per_cpu [ current_cpu] . main_task . pinned_to = Some ( cpu) ;
474516 }
475517
476- debug_assert ! (
477- !scheduler. per_cpu[ current_cpu] . initialized,
478- "Tried to allocate main task multiple times"
479- ) ;
518+ scheduler. per_cpu [ current_cpu]
519+ . main_task
520+ . set_up_stack_guard ( stack_guard_offset) ;
480521
481- scheduler. per_cpu [ current_cpu] . initialized = true ;
522+ scheduler. per_cpu [ current_cpu]
523+ . main_task
524+ . set_up_stack_watchpoint ( ) ;
482525
483526 // This is slightly questionable as we don't ensure SchedulerState is pinned, but it's always
484527 // part of a static object so taking the pointer is fine.
0 commit comments