@@ -414,30 +414,45 @@ static void flag_ipi(void)
414
414
415
415
static int slice_ticks ;
416
416
static int slice_max_prio ;
417
- struct _timeout slice_timeouts [CONFIG_MP_MAX_NUM_CPUS ];
418
- bool slice_expired [CONFIG_MP_MAX_NUM_CPUS ];
417
+ static struct _timeout slice_timeouts [CONFIG_MP_MAX_NUM_CPUS ];
418
+ static bool slice_expired [CONFIG_MP_MAX_NUM_CPUS ];
419
419
420
- static inline int slice_time (struct k_thread * curr )
420
+ #ifdef CONFIG_SWAP_NONATOMIC
421
+ /* If z_swap() isn't atomic, then it's possible for a timer interrupt
422
+ * to try to timeslice away _current after it has already pended
423
+ * itself but before the corresponding context switch. Treat that as
424
+ * a noop condition in z_time_slice().
425
+ */
426
+ static struct k_thread * pending_current ;
427
+ #endif
428
+
429
+ static inline int slice_time (struct k_thread * thread )
421
430
{
422
431
int ret = slice_ticks ;
423
432
424
433
#ifdef CONFIG_TIMESLICE_PER_THREAD
425
- if (curr -> base .slice_ticks != 0 ) {
426
- ret = curr -> base .slice_ticks ;
434
+ if (thread -> base .slice_ticks != 0 ) {
435
+ ret = thread -> base .slice_ticks ;
427
436
}
428
437
#endif
429
438
return ret ;
430
439
}
431
440
432
- #ifdef CONFIG_SWAP_NONATOMIC
433
- /* If z_swap() isn't atomic, then it's possible for a timer interrupt
434
- * to try to timeslice away _current after it has already pended
435
- * itself but before the corresponding context switch. Treat that as
436
- * a noop condition in z_time_slice().
437
- */
438
- static struct k_thread * pending_current ;
441
+ static inline bool sliceable (struct k_thread * thread )
442
+ {
443
+ bool ret = is_preempt (thread )
444
+ && slice_time (thread ) != 0
445
+ && !z_is_prio_higher (thread -> base .prio , slice_max_prio )
446
+ && !z_is_thread_prevented_from_running (thread )
447
+ && !z_is_idle_thread_object (thread );
448
+
449
+ #ifdef CONFIG_TIMESLICE_PER_THREAD
450
+ ret |= thread -> base .slice_ticks != 0 ;
439
451
#endif
440
452
453
+ return ret ;
454
+ }
455
+
441
456
static void slice_timeout (struct _timeout * t )
442
457
{
443
458
int cpu = ARRAY_INDEX (slice_timeouts , t );
@@ -458,8 +473,8 @@ void z_reset_time_slice(struct k_thread *curr)
458
473
int cpu = _current_cpu -> id ;
459
474
460
475
z_abort_timeout (& slice_timeouts [cpu ]);
461
- if ( slice_time ( curr ) != 0 ) {
462
- slice_expired [ cpu ] = false;
476
+ slice_expired [ cpu ] = false;
477
+ if ( sliceable ( curr )) {
463
478
z_add_timeout (& slice_timeouts [cpu ], slice_timeout ,
464
479
K_TICKS (slice_time (curr ) - 1 ));
465
480
}
@@ -492,63 +507,33 @@ void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
492
507
}
493
508
#endif
494
509
495
- static inline bool sliceable (struct k_thread * thread )
496
- {
497
- bool ret = is_preempt (thread )
498
- && !z_is_thread_prevented_from_running (thread )
499
- && !z_is_prio_higher (thread -> base .prio , slice_max_prio )
500
- && !z_is_idle_thread_object (thread );
501
-
502
- #ifdef CONFIG_TIMESLICE_PER_THREAD
503
- ret |= thread -> base .slice_ticks != 0 ;
504
- #endif
505
-
506
- return ret ;
507
- }
508
-
509
- static k_spinlock_key_t slice_expired_locked (k_spinlock_key_t sched_lock_key )
510
- {
511
- struct k_thread * curr = _current ;
512
-
513
- #ifdef CONFIG_TIMESLICE_PER_THREAD
514
- if (curr -> base .slice_expired ) {
515
- k_spin_unlock (& sched_spinlock , sched_lock_key );
516
- curr -> base .slice_expired (curr , curr -> base .slice_data );
517
- sched_lock_key = k_spin_lock (& sched_spinlock );
518
- }
519
- #endif
520
- if (!z_is_thread_prevented_from_running (curr )) {
521
- move_thread_to_end_of_prio_q (curr );
522
- }
523
- z_reset_time_slice (curr );
524
-
525
- return sched_lock_key ;
526
- }
527
-
528
510
/* Called out of each timer interrupt */
529
511
void z_time_slice (void )
530
512
{
531
513
k_spinlock_key_t key = k_spin_lock (& sched_spinlock );
514
+ struct k_thread * curr = _current ;
532
515
533
516
#ifdef CONFIG_SWAP_NONATOMIC
534
- if (pending_current == _current ) {
535
- z_reset_time_slice (_current );
517
+ if (pending_current == curr ) {
518
+ z_reset_time_slice (curr );
536
519
k_spin_unlock (& sched_spinlock , key );
537
520
return ;
538
521
}
539
522
pending_current = NULL ;
540
523
#endif
541
524
542
- if (slice_time (_current ) && sliceable (_current )) {
543
- if (slice_expired [_current_cpu -> id ]) {
544
- /* Note: this will (if so enabled) internally
545
- * drop and reacquire the scheduler lock
546
- * around the callback! Don't put anything
547
- * after this line that requires
548
- * synchronization.
549
- */
550
- key = slice_expired_locked (key );
525
+ if (slice_expired [_current_cpu -> id ] && sliceable (curr )) {
526
+ #ifdef CONFIG_TIMESLICE_PER_THREAD
527
+ if (curr -> base .slice_expired ) {
528
+ k_spin_unlock (& sched_spinlock , key );
529
+ curr -> base .slice_expired (curr , curr -> base .slice_data );
530
+ key = k_spin_lock (& sched_spinlock );
531
+ }
532
+ #endif
533
+ if (!z_is_thread_prevented_from_running (curr )) {
534
+ move_thread_to_end_of_prio_q (curr );
551
535
}
536
+ z_reset_time_slice (curr );
552
537
}
553
538
k_spin_unlock (& sched_spinlock , key );
554
539
}
@@ -1582,7 +1567,7 @@ void z_sched_ipi(void)
1582
1567
#endif
1583
1568
1584
1569
#ifdef CONFIG_TIMESLICING
1585
- if (slice_time ( _current ) && sliceable (_current )) {
1570
+ if (sliceable (_current )) {
1586
1571
z_time_slice ();
1587
1572
}
1588
1573
#endif
0 commit comments