7
7
use atomic_ref:: AtomicRef ;
8
8
use constance:: {
9
9
kernel:: {
10
- self , ClearInterruptLineError , EnableInterruptLineError , InterruptNum , InterruptPriority ,
10
+ ClearInterruptLineError , EnableInterruptLineError , InterruptNum , InterruptPriority ,
11
11
PendInterruptLineError , Port , PortToKernel , QueryInterruptLineError ,
12
12
SetInterruptLinePriorityError , TaskCb , UTicks ,
13
13
} ,
@@ -16,11 +16,7 @@ use constance::{
16
16
} ;
17
17
use once_cell:: sync:: OnceCell ;
18
18
use parking_lot:: { lock_api:: RawMutex , Mutex } ;
19
- use std:: {
20
- cell:: Cell ,
21
- collections:: { BTreeSet , HashMap } ,
22
- time:: Instant ,
23
- } ;
19
+ use std:: { cell:: Cell , time:: Instant } ;
24
20
25
21
#[ cfg( unix) ]
26
22
#[ path = "threading_unix.rs" ]
@@ -33,6 +29,7 @@ mod threading;
33
29
#[ cfg( test) ]
34
30
mod threading_test;
35
31
32
+ mod sched;
36
33
mod ums;
37
34
mod utils;
38
35
@@ -74,7 +71,7 @@ pub unsafe trait PortInstance: Kernel + Port<PortTaskState = TaskState> {
74
71
/// the corresponding trait methods of `Port*`.
75
72
#[ doc( hidden) ]
76
73
pub struct State {
77
- thread_group : OnceCell < ums:: ThreadGroup < SchedState > > ,
74
+ thread_group : OnceCell < ums:: ThreadGroup < sched :: SchedState > > ,
78
75
join_handle : Mutex < Option < ums:: ThreadGroupJoinHandle > > ,
79
76
origin : AtomicRef < ' static , Instant > ,
80
77
}
@@ -84,6 +81,10 @@ pub struct TaskState {
84
81
tsm : Mutex < Tsm > ,
85
82
}
86
83
84
+ impl Init for TaskState {
85
+ const INIT : Self = Self :: new ( ) ;
86
+ }
87
+
87
88
/// Task state machine
88
89
///
89
90
/// These don't exactly align with the task states defined in the kernel.
@@ -98,45 +99,6 @@ enum Tsm {
98
99
Running ( ums:: ThreadId ) ,
99
100
}
100
101
101
- /// The state of the simulated hardware-based scheduler.
102
- struct SchedState {
103
- /// Interrupt lines.
104
- int_lines : HashMap < InterruptNum , IntLine > ,
105
- /// `int_lines.iter().filter(|_,a| a.pended && a.enable)
106
- /// .map(|i,a| (a.priority, i)).collect()`.
107
- pended_lines : BTreeSet < ( InterruptPriority , InterruptNum ) > ,
108
- active_int_handlers : Vec < ( InterruptPriority , ums:: ThreadId ) > ,
109
- cpu_lock : bool ,
110
-
111
- /// The currently-selected task thread.
112
- task_thread : Option < ums:: ThreadId > ,
113
-
114
- /// Garbage can
115
- zombies : Vec < ums:: ThreadId > ,
116
- }
117
-
118
- /// The configuration of an interrupt line.
119
- #[ derive( Debug ) ]
120
- struct IntLine {
121
- priority : InterruptPriority ,
122
- start : Option < kernel:: cfg:: InterruptHandlerFn > ,
123
- enable : bool ,
124
- pended : bool ,
125
- }
126
-
127
- impl Init for TaskState {
128
- const INIT : Self = Self :: new ( ) ;
129
- }
130
-
131
- impl Init for IntLine {
132
- const INIT : Self = IntLine {
133
- priority : 0 ,
134
- start : None ,
135
- enable : false ,
136
- pended : false ,
137
- } ;
138
- }
139
-
140
102
/// The role of a thread.
141
103
#[ derive( Debug , Clone , Copy , PartialEq , Eq ) ]
142
104
enum ThreadRole {
@@ -186,7 +148,7 @@ impl TaskState {
186
148
// the kernel will never choose this task again. However, the underlying
187
149
// UMS thread is still alive. Thus, we need to temporarily override the
188
150
// normal scheduling to ensure this thread will run to completion.
189
- lock. scheduler ( ) . zombies . push ( thread_id) ;
151
+ lock. scheduler ( ) . recycle_thread ( thread_id) ;
190
152
lock. scheduler ( ) . cpu_lock = false ;
191
153
drop ( lock) ;
192
154
@@ -198,192 +160,6 @@ impl TaskState {
198
160
}
199
161
}
200
162
201
- struct BadIntLineError ;
202
-
203
- impl SchedState {
204
- fn new < System : Kernel > ( ) -> Self {
205
- let mut this = Self {
206
- int_lines : HashMap :: new ( ) ,
207
- pended_lines : BTreeSet :: new ( ) ,
208
- active_int_handlers : Vec :: new ( ) ,
209
- cpu_lock : true ,
210
- task_thread : None ,
211
- zombies : Vec :: new ( ) ,
212
- } ;
213
-
214
- for i in 0 ..NUM_INTERRUPT_LINES {
215
- if let Some ( handler) = System :: INTERRUPT_HANDLERS . get ( i) {
216
- this. int_lines . insert (
217
- i as InterruptNum ,
218
- IntLine {
219
- start : Some ( handler) ,
220
- ..IntLine :: INIT
221
- } ,
222
- ) ;
223
- }
224
- }
225
-
226
- this
227
- }
228
-
229
- fn update_line (
230
- & mut self ,
231
- i : InterruptNum ,
232
- f : impl FnOnce ( & mut IntLine ) ,
233
- ) -> Result < ( ) , BadIntLineError > {
234
- if i >= NUM_INTERRUPT_LINES {
235
- return Err ( BadIntLineError ) ;
236
- }
237
- let line = self . int_lines . entry ( i) . or_insert_with ( || IntLine :: INIT ) ;
238
- self . pended_lines . remove ( & ( line. priority , i) ) ;
239
- f ( line) ;
240
- if line. enable && line. pended {
241
- self . pended_lines . insert ( ( line. priority , i) ) ;
242
- }
243
- Ok ( ( ) )
244
- }
245
-
246
- fn is_line_pended ( & self , i : InterruptNum ) -> Result < bool , BadIntLineError > {
247
- if i >= NUM_INTERRUPT_LINES {
248
- return Err ( BadIntLineError ) ;
249
- }
250
-
251
- if let Some ( line) = self . int_lines . get ( & i) {
252
- Ok ( line. pended )
253
- } else {
254
- Ok ( false )
255
- }
256
- }
257
- }
258
-
259
- impl ums:: Scheduler for SchedState {
260
- fn choose_next_thread ( & mut self ) -> Option < ums:: ThreadId > {
261
- if let Some ( & thread_id) = self . zombies . first ( ) {
262
- // Clean up zombie threads as soon as possible
263
- Some ( thread_id)
264
- } else if let Some ( & ( _, thread_id) ) = self . active_int_handlers . last ( ) {
265
- Some ( thread_id)
266
- } else if self . cpu_lock {
267
- // CPU Lock owned by a task thread
268
- Some ( self . task_thread . unwrap ( ) )
269
- } else {
270
- self . task_thread
271
- }
272
- }
273
-
274
- fn thread_exited ( & mut self , thread_id : ums:: ThreadId ) {
275
- if let Some ( i) = self . zombies . iter ( ) . position ( |id| * id == thread_id) {
276
- log:: trace!( "removing the zombie thread {:?}" , thread_id) ;
277
- self . zombies . swap_remove ( i) ;
278
- return ;
279
- }
280
-
281
- log:: warn!( "thread_exited: unexpected thread {:?}" , thread_id) ;
282
- }
283
- }
284
-
285
- /// Check for any pending interrupts that can be activated under the current
286
- /// condition. If there are one or more of them, activate them and return
287
- /// `true`, in which case the caller should call
288
- /// [`ums::ThreadGroupLockGuard::preempt`] or [`ums::yield_now`].
289
- #[ must_use]
290
- fn check_preemption_by_interrupt (
291
- thread_group : & ' static ums:: ThreadGroup < SchedState > ,
292
- lock : & mut ums:: ThreadGroupLockGuard < SchedState > ,
293
- ) -> bool {
294
- let mut activated_any = false ;
295
-
296
- // Check pending interrupts
297
- loop {
298
- let sched_state = lock. scheduler ( ) ;
299
-
300
- // Find the highest pended priority
301
- let ( pri, num) = if let Some ( & x) = sched_state. pended_lines . iter ( ) . next ( ) {
302
- x
303
- } else {
304
- // No interrupt is pended
305
- break ;
306
- } ;
307
-
308
- // Masking by CPU Lock
309
- if sched_state. cpu_lock && is_interrupt_priority_managed ( pri) {
310
- log:: trace!(
311
- "not handling an interrupt with priority {} because of CPU Lock" ,
312
- pri
313
- ) ;
314
- break ;
315
- }
316
-
317
- // Masking by an already active interrupt
318
- if let Some ( & ( existing_pri, _) ) = sched_state. active_int_handlers . last ( ) {
319
- if existing_pri < pri {
320
- log:: trace!(
321
- "not handling an interrupt with priority {} because of \
322
- an active interrupt handler with priority {}",
323
- pri,
324
- existing_pri,
325
- ) ;
326
- break ;
327
- }
328
- }
329
-
330
- // Take the interrupt
331
- sched_state. pended_lines . remove ( & ( pri, num) ) ;
332
-
333
- // Find the interrupt handler for `num`. Return
334
- // `default_interrupt_handler` if there's none.
335
- let start = sched_state
336
- . int_lines
337
- . get ( & num)
338
- . and_then ( |line| line. start )
339
- . unwrap_or ( default_interrupt_handler) ;
340
-
341
- let thread_id = lock. spawn ( move |thread_id| {
342
- THREAD_ROLE . with ( |role| role. set ( ThreadRole :: Interrupt ) ) ;
343
-
344
- // Safety: The port can call an interrupt handler
345
- unsafe { start ( ) }
346
-
347
- let mut lock = thread_group. lock ( ) ;
348
-
349
- // Make this interrupt handler inactive
350
- let ( _, popped_thread_id) = lock. scheduler ( ) . active_int_handlers . pop ( ) . unwrap ( ) ;
351
- assert_eq ! ( thread_id, popped_thread_id) ;
352
- log:: trace!(
353
- "an interrupt handler for an interrupt {} (priority = {}) exited" ,
354
- num,
355
- pri
356
- ) ;
357
-
358
- // Make sure this thread will run to completion
359
- lock. scheduler ( ) . zombies . push ( thread_id) ;
360
-
361
- let _ = check_preemption_by_interrupt ( thread_group, & mut lock) ;
362
- } ) ;
363
-
364
- log:: trace!(
365
- "handling an interrupt {} (priority = {}) with thread {:?}" ,
366
- num,
367
- pri,
368
- thread_id
369
- ) ;
370
-
371
- lock. scheduler ( ) . active_int_handlers . push ( ( pri, thread_id) ) ;
372
-
373
- activated_any = true ;
374
- }
375
-
376
- activated_any
377
- }
378
-
379
- fn is_interrupt_priority_managed ( p : InterruptPriority ) -> bool {
380
- p >= 0
381
- }
382
-
383
- extern "C" fn default_interrupt_handler ( ) {
384
- panic ! ( "Unhandled interrupt" ) ;
385
- }
386
-
387
163
#[ allow( clippy:: missing_safety_doc) ]
388
164
impl State {
389
165
pub const fn new ( ) -> Self {
@@ -396,7 +172,7 @@ impl State {
396
172
397
173
pub fn init < System : Kernel > ( & self ) {
398
174
// Create a UMS thread group.
399
- let ( thread_group, join_handle) = ums:: ThreadGroup :: new ( SchedState :: new :: < System > ( ) ) ;
175
+ let ( thread_group, join_handle) = ums:: ThreadGroup :: new ( sched :: SchedState :: new :: < System > ( ) ) ;
400
176
401
177
self . thread_group . set ( thread_group) . ok ( ) . unwrap ( ) ;
402
178
* self . join_handle . lock ( ) = Some ( join_handle) ;
@@ -427,7 +203,7 @@ impl State {
427
203
lock. scheduler ( ) . cpu_lock = false ;
428
204
429
205
// Start scheduling
430
- assert ! ( check_preemption_by_interrupt(
206
+ assert ! ( sched :: check_preemption_by_interrupt(
431
207
self . thread_group. get( ) . unwrap( ) ,
432
208
& mut lock
433
209
) ) ;
@@ -551,7 +327,7 @@ impl State {
551
327
assert ! ( lock. scheduler( ) . cpu_lock) ;
552
328
lock. scheduler ( ) . cpu_lock = false ;
553
329
554
- if check_preemption_by_interrupt ( self . thread_group . get ( ) . unwrap ( ) , & mut lock) {
330
+ if sched :: check_preemption_by_interrupt ( self . thread_group . get ( ) . unwrap ( ) , & mut lock) {
555
331
drop ( lock) ;
556
332
ums:: yield_now ( ) ;
557
333
}
@@ -600,9 +376,9 @@ impl State {
600
376
let mut lock = self . thread_group . get ( ) . unwrap ( ) . lock ( ) ;
601
377
lock. scheduler ( )
602
378
. update_line ( num, |line| line. priority = priority)
603
- . map_err ( |BadIntLineError | SetInterruptLinePriorityError :: BadParam ) ?;
379
+ . map_err ( |sched :: BadIntLineError | SetInterruptLinePriorityError :: BadParam ) ?;
604
380
605
- if check_preemption_by_interrupt ( self . thread_group . get ( ) . unwrap ( ) , & mut lock) {
381
+ if sched :: check_preemption_by_interrupt ( self . thread_group . get ( ) . unwrap ( ) , & mut lock) {
606
382
drop ( lock) ;
607
383
ums:: yield_now ( ) ;
608
384
}
@@ -619,9 +395,9 @@ impl State {
619
395
let mut lock = self . thread_group . get ( ) . unwrap ( ) . lock ( ) ;
620
396
lock. scheduler ( )
621
397
. update_line ( num, |line| line. enable = true )
622
- . map_err ( |BadIntLineError | EnableInterruptLineError :: BadParam ) ?;
398
+ . map_err ( |sched :: BadIntLineError | EnableInterruptLineError :: BadParam ) ?;
623
399
624
- if check_preemption_by_interrupt ( self . thread_group . get ( ) . unwrap ( ) , & mut lock) {
400
+ if sched :: check_preemption_by_interrupt ( self . thread_group . get ( ) . unwrap ( ) , & mut lock) {
625
401
drop ( lock) ;
626
402
ums:: yield_now ( ) ;
627
403
}
@@ -638,7 +414,7 @@ impl State {
638
414
( self . thread_group . get ( ) . unwrap ( ) . lock ( ) )
639
415
. scheduler ( )
640
416
. update_line ( num, |line| line. enable = false )
641
- . map_err ( |BadIntLineError | EnableInterruptLineError :: BadParam )
417
+ . map_err ( |sched :: BadIntLineError | EnableInterruptLineError :: BadParam )
642
418
}
643
419
644
420
pub fn pend_interrupt_line (
@@ -650,9 +426,9 @@ impl State {
650
426
let mut lock = self . thread_group . get ( ) . unwrap ( ) . lock ( ) ;
651
427
lock. scheduler ( )
652
428
. update_line ( num, |line| line. pended = true )
653
- . map_err ( |BadIntLineError | PendInterruptLineError :: BadParam ) ?;
429
+ . map_err ( |sched :: BadIntLineError | PendInterruptLineError :: BadParam ) ?;
654
430
655
- if check_preemption_by_interrupt ( self . thread_group . get ( ) . unwrap ( ) , & mut lock) {
431
+ if sched :: check_preemption_by_interrupt ( self . thread_group . get ( ) . unwrap ( ) , & mut lock) {
656
432
drop ( lock) ;
657
433
ums:: yield_now ( ) ;
658
434
}
@@ -666,7 +442,7 @@ impl State {
666
442
( self . thread_group . get ( ) . unwrap ( ) . lock ( ) )
667
443
. scheduler ( )
668
444
. update_line ( num, |line| line. pended = false )
669
- . map_err ( |BadIntLineError | ClearInterruptLineError :: BadParam )
445
+ . map_err ( |sched :: BadIntLineError | ClearInterruptLineError :: BadParam )
670
446
}
671
447
672
448
pub fn is_interrupt_line_pending (
@@ -676,7 +452,7 @@ impl State {
676
452
( self . thread_group . get ( ) . unwrap ( ) . lock ( ) )
677
453
. scheduler ( )
678
454
. is_line_pended ( num)
679
- . map_err ( |BadIntLineError | QueryInterruptLineError :: BadParam )
455
+ . map_err ( |sched :: BadIntLineError | QueryInterruptLineError :: BadParam )
680
456
}
681
457
682
458
// TODO: Make these customizable to test the kernel under multiple conditions
0 commit comments