@@ -495,7 +495,7 @@ static bool tmigr_check_lonely(struct tmigr_group *group)
495
495
* outcome is a CPU which might wake up a little early.
496
496
* @evt: Pointer to tmigr_event which needs to be queued (of idle
497
497
* child group)
498
- * @childmask: childmask of child group
498
+ * @childmask: groupmask of child group
499
499
* @remote: Is set, when the new timer path is executed in
500
500
* tmigr_handle_remote_cpu()
501
501
* @basej: timer base in jiffies
@@ -535,7 +535,7 @@ static void __walk_groups(up_f up, struct tmigr_walk *data,
535
535
536
536
child = group ;
537
537
group = group -> parent ;
538
- data -> childmask = child -> childmask ;
538
+ data -> childmask = child -> groupmask ;
539
539
} while (group );
540
540
}
541
541
@@ -669,7 +669,7 @@ static void __tmigr_cpu_activate(struct tmigr_cpu *tmc)
669
669
{
670
670
struct tmigr_walk data ;
671
671
672
- data .childmask = tmc -> childmask ;
672
+ data .childmask = tmc -> groupmask ;
673
673
674
674
trace_tmigr_cpu_active (tmc );
675
675
@@ -1049,15 +1049,15 @@ void tmigr_handle_remote(void)
1049
1049
if (tmigr_is_not_available (tmc ))
1050
1050
return ;
1051
1051
1052
- data .childmask = tmc -> childmask ;
1052
+ data .childmask = tmc -> groupmask ;
1053
1053
data .firstexp = KTIME_MAX ;
1054
1054
1055
1055
/*
1056
1056
* NOTE: This is a doubled check because the migrator test will be done
1057
1057
* in tmigr_handle_remote_up() anyway. Keep this check to speed up the
1058
1058
* return when nothing has to be done.
1059
1059
*/
1060
- if (!tmigr_check_migrator (tmc -> tmgroup , tmc -> childmask )) {
1060
+ if (!tmigr_check_migrator (tmc -> tmgroup , tmc -> groupmask )) {
1061
1061
/*
1062
1062
* If this CPU was an idle migrator, make sure to clear its wakeup
1063
1063
* value so it won't chase timers that have already expired elsewhere.
@@ -1150,7 +1150,7 @@ bool tmigr_requires_handle_remote(void)
1150
1150
return ret ;
1151
1151
1152
1152
data .now = get_jiffies_update (& jif );
1153
- data .childmask = tmc -> childmask ;
1153
+ data .childmask = tmc -> groupmask ;
1154
1154
data .firstexp = KTIME_MAX ;
1155
1155
data .tmc_active = !tmc -> idle ;
1156
1156
data .check = false;
@@ -1310,7 +1310,7 @@ static u64 __tmigr_cpu_deactivate(struct tmigr_cpu *tmc, u64 nextexp)
1310
1310
struct tmigr_walk data = { .nextexp = nextexp ,
1311
1311
.firstexp = KTIME_MAX ,
1312
1312
.evt = & tmc -> cpuevt ,
1313
- .childmask = tmc -> childmask };
1313
+ .childmask = tmc -> groupmask };
1314
1314
1315
1315
/*
1316
1316
* If nextexp is KTIME_MAX, the CPU event will be ignored because the
@@ -1388,7 +1388,7 @@ u64 tmigr_quick_check(u64 nextevt)
1388
1388
if (WARN_ON_ONCE (tmc -> idle ))
1389
1389
return nextevt ;
1390
1390
1391
- if (!tmigr_check_migrator_and_lonely (tmc -> tmgroup , tmc -> childmask ))
1391
+ if (!tmigr_check_migrator_and_lonely (tmc -> tmgroup , tmc -> groupmask ))
1392
1392
return KTIME_MAX ;
1393
1393
1394
1394
do {
@@ -1552,7 +1552,7 @@ static void tmigr_connect_child_parent(struct tmigr_group *child,
1552
1552
raw_spin_lock_nested (& parent -> lock , SINGLE_DEPTH_NESTING );
1553
1553
1554
1554
child -> parent = parent ;
1555
- child -> childmask = BIT (parent -> num_children ++ );
1555
+ child -> groupmask = BIT (parent -> num_children ++ );
1556
1556
1557
1557
raw_spin_unlock (& parent -> lock );
1558
1558
raw_spin_unlock_irq (& child -> lock );
@@ -1586,7 +1586,7 @@ static void tmigr_connect_child_parent(struct tmigr_group *child,
1586
1586
* the new childmask and parent to subsequent walkers through this
1587
1587
* @child. Therefore propagate active state unconditionally.
1588
1588
*/
1589
- data .childmask = child -> childmask ;
1589
+ data .childmask = child -> groupmask ;
1590
1590
1591
1591
/*
1592
1592
* There is only one new level per time (which is protected by
@@ -1652,7 +1652,7 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
1652
1652
raw_spin_lock_irq (& group -> lock );
1653
1653
1654
1654
tmc -> tmgroup = group ;
1655
- tmc -> childmask = BIT (group -> num_children ++ );
1655
+ tmc -> groupmask = BIT (group -> num_children ++ );
1656
1656
1657
1657
raw_spin_unlock_irq (& group -> lock );
1658
1658
@@ -1731,7 +1731,7 @@ static int tmigr_cpu_prepare(unsigned int cpu)
1731
1731
if (ret < 0 )
1732
1732
return ret ;
1733
1733
1734
- if (tmc -> childmask == 0 )
1734
+ if (tmc -> groupmask == 0 )
1735
1735
return - EINVAL ;
1736
1736
1737
1737
return ret ;
0 commit comments