Skip to content

Commit 779f7a8

Browse files
authored
esp-preempt: don't switch to sleeping tasks (#4081)
* Separate the alloc and run lists * Replace circular task list with ready queue * Remove separate SCHEDULER_STATE static * Move scheduler to new file * Reorganize, allow restarting scheduler * Fix InternalMemory polyfill * Use SingleShotTimer internally * Implement a simple timer queue * Extract run queue, wake tasks, store reason of scheduler event * Add inherent function to get current task ptr * Reimplement usleep with the timer queue * Store current task in timer queue * Sleep in timer queue task * Remove ability to sleep arbitrary tasks * More logging * Clear timer interrupt in timer handler * Even more logging * Merge mutexes into semaphores
1 parent e888f57 commit 779f7a8

File tree

20 files changed

+1056
-910
lines changed

20 files changed

+1056
-910
lines changed

esp-preempt/Cargo.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,10 @@ esp-hal = { version = "1.0.0-rc.0", path = "../esp-hal", features = ["unstable"]
4949
default = ["esp-alloc"]
5050

5151
## Enable the use of the `esp-alloc` crate for dynamic memory allocation.
52+
##
53+
## If you opt-out, you need to provide implementations for the following functions:
54+
## - `pub extern "C" fn malloc_internal(size: usize) -> *mut u8`
55+
## - `pub extern "C" fn free_internal(ptr: *mut u8)`
5256
esp-alloc = ["dep:esp-alloc"]
5357

5458
#! ### Chip selection

esp-preempt/src/lib.rs

Lines changed: 16 additions & 283 deletions
Original file line numberDiff line numberDiff line change
@@ -23,27 +23,24 @@ extern crate alloc;
2323
// MUST be the first module
2424
mod fmt;
2525

26-
mod mutex;
2726
mod queue;
27+
mod run_queue;
28+
mod scheduler;
2829
mod semaphore;
2930
mod task;
3031
mod timer;
3132
mod timer_queue;
3233

33-
use core::ffi::c_void;
34-
35-
use allocator_api2::boxed::Box;
34+
pub(crate) use esp_alloc::InternalMemory;
3635
use esp_hal::{
3736
Blocking,
38-
time::{Duration, Instant, Rate},
39-
timer::{AnyTimer, PeriodicTimer},
37+
timer::{AnyTimer, OneShotTimer},
4038
};
41-
use esp_radio_preempt_driver::semaphore::{SemaphoreImplementation, SemaphorePtr};
42-
use esp_sync::NonReentrantMutex;
39+
pub(crate) use scheduler::SCHEDULER;
4340

44-
use crate::{semaphore::Semaphore, task::Context, timer::TIMER};
41+
use crate::timer::TimeDriver;
4542

46-
type TimeBase = PeriodicTimer<'static, Blocking>;
43+
type TimeBase = OneShotTimer<'static, Blocking>;
4744

4845
// Polyfill the InternalMemory allocator
4946
#[cfg(not(feature = "esp-alloc"))]
@@ -52,32 +49,28 @@ mod esp_alloc {
5249

5350
use allocator_api2::alloc::{AllocError, Allocator};
5451

52+
unsafe extern "C" {
53+
fn malloc_internal(size: usize) -> *mut u8;
54+
55+
fn free_internal(ptr: *mut u8);
56+
}
57+
5558
/// An allocator that uses internal memory only.
5659
pub struct InternalMemory;
5760

5861
unsafe impl Allocator for InternalMemory {
5962
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
60-
unsafe extern "C" {
61-
fn esp_radio_allocate_from_internal_ram(size: usize) -> *mut u8;
62-
}
63-
let raw_ptr = unsafe { esp_radio_allocate_from_internal_ram(layout.size()) };
63+
let raw_ptr = unsafe { malloc_internal(layout.size()) };
6464
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
6565
Ok(NonNull::slice_from_raw_parts(ptr, layout.size()))
6666
}
6767

6868
unsafe fn deallocate(&self, ptr: NonNull<u8>, _layout: Layout) {
69-
unsafe extern "C" {
70-
fn esp_radio_deallocate_internal_ram(ptr: *mut u8);
71-
}
72-
unsafe {
73-
esp_radio_deallocate_internal_ram(ptr.as_ptr());
74-
}
69+
unsafe { free_internal(ptr.as_ptr()) };
7570
}
7671
}
7772
}
7873

79-
pub(crate) use esp_alloc::InternalMemory;
80-
8174
/// A trait to allow better UX for initializing esp-preempt.
8275
///
8376
/// This trait is meant to be used only for the `init` function.
@@ -107,174 +100,6 @@ where
107100
}
108101
}
109102

110-
struct SchedulerState {
111-
/// Pointer to the current task.
112-
///
113-
/// Tasks are stored in a circular linked list. CTX_NOW points to the
114-
/// current task.
115-
current_task: *mut Context,
116-
117-
/// Pointer to the task that is scheduled for deletion.
118-
to_delete: *mut Context,
119-
}
120-
121-
unsafe impl Send for SchedulerState {}
122-
123-
impl SchedulerState {
124-
const fn new() -> Self {
125-
Self {
126-
current_task: core::ptr::null_mut(),
127-
to_delete: core::ptr::null_mut(),
128-
}
129-
}
130-
131-
fn delete_task(&mut self, task: *mut Context) {
132-
let mut current_task = self.current_task;
133-
// Save the first pointer so we can prevent an accidental infinite loop.
134-
let initial = current_task;
135-
loop {
136-
// We don't have the previous pointer, so we need to walk forward in the circle
137-
// even if we need to delete the first task.
138-
139-
// If the next task is the one we want to delete, we need to remove it from the
140-
// list, then drop it.
141-
let next_task = unsafe { (*current_task).next };
142-
if core::ptr::eq(next_task, task) {
143-
unsafe {
144-
(*current_task).next = (*next_task).next;
145-
146-
core::ptr::drop_in_place(task);
147-
break;
148-
}
149-
}
150-
151-
// If the next task is the first task, we can stop. If we needed to delete the
152-
// first task, we have already handled it in the above case. If we needed to
153-
// delete another task, it has already been deleted in a previous iteration.
154-
if core::ptr::eq(next_task, initial) {
155-
break;
156-
}
157-
158-
// Move to the next task.
159-
current_task = next_task;
160-
}
161-
}
162-
163-
fn delete_marked_tasks(&mut self) {
164-
while !self.to_delete.is_null() {
165-
let task_to_delete = core::mem::take(&mut self.to_delete);
166-
self.to_delete = unsafe { (*task_to_delete).next_to_delete };
167-
self.delete_task(task_to_delete);
168-
}
169-
}
170-
171-
fn select_next_task(&mut self) -> Option<*mut Context> {
172-
let mut current = self.current_task;
173-
loop {
174-
let next_task = unsafe { (*current).next };
175-
176-
if next_task == self.current_task {
177-
// We didn't find a new task to switch to.
178-
// TODO: mark the current task as Running
179-
// Once we have actual task states, yield should marked the current task as Ready,
180-
// other stuff as Waiting.
181-
return None;
182-
}
183-
184-
if unsafe { (*next_task).state }.is_ready() {
185-
// TODO: mark the selected task as Running
186-
return Some(next_task);
187-
}
188-
current = next_task;
189-
}
190-
}
191-
192-
#[cfg(xtensa)]
193-
fn switch_task(&mut self, trap_frame: &mut esp_hal::trapframe::TrapFrame) {
194-
self.delete_marked_tasks();
195-
196-
let Some(next_task) = self.select_next_task() else {
197-
return;
198-
};
199-
200-
task::save_task_context(unsafe { &mut *self.current_task }, trap_frame);
201-
202-
self.current_task = next_task;
203-
204-
task::restore_task_context(unsafe { &mut *self.current_task }, trap_frame);
205-
}
206-
207-
#[cfg(riscv)]
208-
fn switch_task(&mut self) {
209-
self.delete_marked_tasks();
210-
211-
let Some(next_task) = self.select_next_task() else {
212-
return;
213-
};
214-
215-
let old_ctx = unsafe { &raw mut (*self.current_task).trap_frame };
216-
let new_ctx = unsafe { &raw mut (*next_task).trap_frame };
217-
218-
if crate::task::arch_specific::task_switch(old_ctx, new_ctx) {
219-
unsafe { self.current_task = (*self.current_task).next };
220-
}
221-
}
222-
223-
fn schedule_task_deletion(&mut self, mut task_to_delete: *mut Context) -> bool {
224-
if task_to_delete.is_null() {
225-
task_to_delete = self.current_task;
226-
}
227-
let is_current = core::ptr::eq(task_to_delete, self.current_task);
228-
229-
unsafe { (*task_to_delete).next_to_delete = self.to_delete };
230-
self.to_delete = task_to_delete;
231-
232-
is_current
233-
}
234-
}
235-
236-
fn usleep(us: u32) {
237-
trace!("usleep");
238-
unsafe extern "C" {
239-
fn esp_rom_delay_us(us: u32);
240-
}
241-
242-
const MIN_YIELD_TIME: u32 = 1_000_000 / TICK_RATE;
243-
if us < MIN_YIELD_TIME {
244-
// Short wait, just sleep
245-
unsafe { esp_rom_delay_us(us) };
246-
} else {
247-
const MIN_YIELD_DURATION: Duration = Duration::from_micros(MIN_YIELD_TIME as u64);
248-
let sleep_for = Duration::from_micros(us as u64);
249-
let start = Instant::now();
250-
loop {
251-
// Yield to other tasks
252-
timer::yield_task();
253-
254-
let elapsed = start.elapsed();
255-
if elapsed.as_micros() > us as u64 {
256-
break;
257-
}
258-
259-
let remaining = sleep_for - elapsed;
260-
261-
if remaining < MIN_YIELD_DURATION {
262-
// If the remaining time is less than the minimum yield time, we can just sleep
263-
// for the remaining time.
264-
unsafe { esp_rom_delay_us(remaining.as_micros() as u32) };
265-
break;
266-
}
267-
}
268-
}
269-
}
270-
271-
static SCHEDULER_STATE: NonReentrantMutex<SchedulerState> =
272-
NonReentrantMutex::new(SchedulerState::new());
273-
274-
struct Scheduler {}
275-
276-
esp_radio_preempt_driver::scheduler_impl!(static SCHEDULER: Scheduler = Scheduler {});
277-
278103
/// Initializes the scheduler.
279104
///
280105
/// # The `timer` argument
@@ -288,99 +113,7 @@ esp_radio_preempt_driver::scheduler_impl!(static SCHEDULER: Scheduler = Schedule
288113
///
289114
/// For an example, see the [crate-level documentation][self].
290115
pub fn init(timer: impl TimerSource) {
291-
timer::setup_timebase(timer.timer());
116+
SCHEDULER.with(move |scheduler| scheduler.set_time_driver(TimeDriver::new(timer.timer())))
292117
}
293118

294119
const TICK_RATE: u32 = esp_config::esp_config_int!(u32, "ESP_PREEMPT_CONFIG_TICK_RATE_HZ");
295-
const TIMESLICE_FREQUENCY: Rate = Rate::from_hz(TICK_RATE);
296-
297-
impl esp_radio_preempt_driver::Scheduler for Scheduler {
298-
fn initialized(&self) -> bool {
299-
timer::initialized()
300-
}
301-
302-
fn enable(&self) {
303-
// allocate the main task
304-
task::allocate_main_task();
305-
timer::setup_multitasking();
306-
timer_queue::create_timer_task();
307-
308-
TIMER.with(|t| {
309-
let t = unwrap!(t.as_mut());
310-
unwrap!(t.start(TIMESLICE_FREQUENCY.as_duration()));
311-
});
312-
}
313-
314-
fn disable(&self) {
315-
timer::disable_timebase();
316-
timer::disable_multitasking();
317-
task::delete_all_tasks();
318-
}
319-
320-
fn yield_task(&self) {
321-
timer::yield_task()
322-
}
323-
324-
fn yield_task_from_isr(&self) {
325-
self.yield_task();
326-
}
327-
328-
fn max_task_priority(&self) -> u32 {
329-
255
330-
}
331-
332-
fn task_create(
333-
&self,
334-
task: extern "C" fn(*mut c_void),
335-
param: *mut c_void,
336-
_priority: u32,
337-
_pin_to_core: Option<u32>,
338-
task_stack_size: usize,
339-
) -> *mut c_void {
340-
let task = Box::new_in(Context::new(task, param, task_stack_size), InternalMemory);
341-
let task_ptr = Box::into_raw(task);
342-
343-
SCHEDULER_STATE.with(|state| unsafe {
344-
let current_task = state.current_task;
345-
debug_assert!(
346-
!current_task.is_null(),
347-
"Tried to allocate a task before allocating the main task"
348-
);
349-
// Insert the new task at the next position.
350-
let next = (*current_task).next;
351-
(*task_ptr).next = next;
352-
(*current_task).next = task_ptr;
353-
});
354-
355-
task_ptr as *mut c_void
356-
}
357-
358-
fn current_task(&self) -> *mut c_void {
359-
task::current_task() as *mut c_void
360-
}
361-
362-
fn schedule_task_deletion(&self, task_handle: *mut c_void) {
363-
task::schedule_task_deletion(task_handle as *mut Context)
364-
}
365-
366-
fn current_task_thread_semaphore(&self) -> SemaphorePtr {
367-
task::with_current_task(|task| {
368-
if task.thread_semaphore.is_none() {
369-
task.thread_semaphore = Some(Semaphore::create(1, 0));
370-
}
371-
372-
unwrap!(task.thread_semaphore)
373-
})
374-
}
375-
376-
fn usleep(&self, us: u32) {
377-
usleep(us)
378-
}
379-
380-
fn now(&self) -> u64 {
381-
// FIXME: this function needs to return the timestamp of the scheduler's timer
382-
esp_hal::time::Instant::now()
383-
.duration_since_epoch()
384-
.as_micros()
385-
}
386-
}

0 commit comments

Comments
 (0)