@@ -207,7 +207,7 @@ struct pool_workqueue {
207
207
/* L: nr of in_flight works */
208
208
int nr_active ; /* L: nr of active works */
209
209
int max_active ; /* L: max active works */
210
- struct list_head delayed_works ; /* L: delayed works */
210
+ struct list_head inactive_works ; /* L: inactive works */
211
211
struct list_head pwqs_node ; /* WR: node on wq->pwqs */
212
212
struct list_head mayday_node ; /* MD: node on wq->maydays */
213
213
@@ -1136,24 +1136,24 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
1136
1136
}
1137
1137
}
1138
1138
1139
- static void pwq_activate_delayed_work (struct work_struct * work )
1139
+ static void pwq_activate_inactive_work (struct work_struct * work )
1140
1140
{
1141
1141
struct pool_workqueue * pwq = get_work_pwq (work );
1142
1142
1143
1143
trace_workqueue_activate_work (work );
1144
1144
if (list_empty (& pwq -> pool -> worklist ))
1145
1145
pwq -> pool -> watchdog_ts = jiffies ;
1146
1146
move_linked_works (work , & pwq -> pool -> worklist , NULL );
1147
- __clear_bit (WORK_STRUCT_DELAYED_BIT , work_data_bits (work ));
1147
+ __clear_bit (WORK_STRUCT_INACTIVE_BIT , work_data_bits (work ));
1148
1148
pwq -> nr_active ++ ;
1149
1149
}
1150
1150
1151
- static void pwq_activate_first_delayed (struct pool_workqueue * pwq )
1151
+ static void pwq_activate_first_inactive (struct pool_workqueue * pwq )
1152
1152
{
1153
- struct work_struct * work = list_first_entry (& pwq -> delayed_works ,
1153
+ struct work_struct * work = list_first_entry (& pwq -> inactive_works ,
1154
1154
struct work_struct , entry );
1155
1155
1156
- pwq_activate_delayed_work (work );
1156
+ pwq_activate_inactive_work (work );
1157
1157
}
1158
1158
1159
1159
/**
@@ -1176,10 +1176,10 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
1176
1176
pwq -> nr_in_flight [color ]-- ;
1177
1177
1178
1178
pwq -> nr_active -- ;
1179
- if (!list_empty (& pwq -> delayed_works )) {
1180
- /* one down, submit a delayed one */
1179
+ if (!list_empty (& pwq -> inactive_works )) {
1180
+ /* one down, submit an inactive one */
1181
1181
if (pwq -> nr_active < pwq -> max_active )
1182
- pwq_activate_first_delayed (pwq );
1182
+ pwq_activate_first_inactive (pwq );
1183
1183
}
1184
1184
1185
1185
/* is flush in progress and are we at the flushing tip? */
@@ -1281,14 +1281,14 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1281
1281
debug_work_deactivate (work );
1282
1282
1283
1283
/*
1284
- * A delayed work item cannot be grabbed directly because
1284
+ * An inactive work item cannot be grabbed directly because
1285
1285
* it might have linked NO_COLOR work items which, if left
1286
- * on the delayed_list , will confuse pwq->nr_active
1286
+ * on the inactive_works list , will confuse pwq->nr_active
1287
1287
* management later on and cause stall. Make sure the work
1288
1288
* item is activated before grabbing.
1289
1289
*/
1290
- if (* work_data_bits (work ) & WORK_STRUCT_DELAYED )
1291
- pwq_activate_delayed_work (work );
1290
+ if (* work_data_bits (work ) & WORK_STRUCT_INACTIVE )
1291
+ pwq_activate_inactive_work (work );
1292
1292
1293
1293
list_del_init (& work -> entry );
1294
1294
pwq_dec_nr_in_flight (pwq , get_work_color (work ));
@@ -1490,8 +1490,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
1490
1490
if (list_empty (worklist ))
1491
1491
pwq -> pool -> watchdog_ts = jiffies ;
1492
1492
} else {
1493
- work_flags |= WORK_STRUCT_DELAYED ;
1494
- worklist = & pwq -> delayed_works ;
1493
+ work_flags |= WORK_STRUCT_INACTIVE ;
1494
+ worklist = & pwq -> inactive_works ;
1495
1495
}
1496
1496
1497
1497
debug_work_activate (work );
@@ -2530,7 +2530,7 @@ static int rescuer_thread(void *__rescuer)
2530
2530
/*
2531
2531
* The above execution of rescued work items could
2532
2532
* have created more to rescue through
2533
- * pwq_activate_first_delayed () or chained
2533
+ * pwq_activate_first_inactive () or chained
2534
2534
* queueing. Let's put @pwq back on mayday list so
2535
2535
* that such back-to-back work items, which may be
2536
2536
* being used to relieve memory pressure, don't
@@ -2956,7 +2956,7 @@ void drain_workqueue(struct workqueue_struct *wq)
2956
2956
bool drained ;
2957
2957
2958
2958
raw_spin_lock_irq (& pwq -> pool -> lock );
2959
- drained = !pwq -> nr_active && list_empty (& pwq -> delayed_works );
2959
+ drained = !pwq -> nr_active && list_empty (& pwq -> inactive_works );
2960
2960
raw_spin_unlock_irq (& pwq -> pool -> lock );
2961
2961
2962
2962
if (drained )
@@ -3712,7 +3712,7 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
3712
3712
* @pwq: target pool_workqueue
3713
3713
*
3714
3714
* If @pwq isn't freezing, set @pwq->max_active to the associated
3715
- * workqueue's saved_max_active and activate delayed work items
3715
+ * workqueue's saved_max_active and activate inactive work items
3716
3716
* accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
3717
3717
*/
3718
3718
static void pwq_adjust_max_active (struct pool_workqueue * pwq )
@@ -3741,9 +3741,9 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3741
3741
3742
3742
pwq -> max_active = wq -> saved_max_active ;
3743
3743
3744
- while (!list_empty (& pwq -> delayed_works ) &&
3744
+ while (!list_empty (& pwq -> inactive_works ) &&
3745
3745
pwq -> nr_active < pwq -> max_active ) {
3746
- pwq_activate_first_delayed (pwq );
3746
+ pwq_activate_first_inactive (pwq );
3747
3747
kick = true;
3748
3748
}
3749
3749
@@ -3774,7 +3774,7 @@ static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3774
3774
pwq -> wq = wq ;
3775
3775
pwq -> flush_color = -1 ;
3776
3776
pwq -> refcnt = 1 ;
3777
- INIT_LIST_HEAD (& pwq -> delayed_works );
3777
+ INIT_LIST_HEAD (& pwq -> inactive_works );
3778
3778
INIT_LIST_HEAD (& pwq -> pwqs_node );
3779
3779
INIT_LIST_HEAD (& pwq -> mayday_node );
3780
3780
INIT_WORK (& pwq -> unbound_release_work , pwq_unbound_release_workfn );
@@ -4361,7 +4361,7 @@ static bool pwq_busy(struct pool_workqueue *pwq)
4361
4361
4362
4362
if ((pwq != pwq -> wq -> dfl_pwq ) && (pwq -> refcnt > 1 ))
4363
4363
return true;
4364
- if (pwq -> nr_active || !list_empty (& pwq -> delayed_works ))
4364
+ if (pwq -> nr_active || !list_empty (& pwq -> inactive_works ))
4365
4365
return true;
4366
4366
4367
4367
return false;
@@ -4557,7 +4557,7 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4557
4557
else
4558
4558
pwq = unbound_pwq_by_node (wq , cpu_to_node (cpu ));
4559
4559
4560
- ret = !list_empty (& pwq -> delayed_works );
4560
+ ret = !list_empty (& pwq -> inactive_works );
4561
4561
preempt_enable ();
4562
4562
rcu_read_unlock ();
4563
4563
@@ -4753,11 +4753,11 @@ static void show_pwq(struct pool_workqueue *pwq)
4753
4753
pr_cont ("\n" );
4754
4754
}
4755
4755
4756
- if (!list_empty (& pwq -> delayed_works )) {
4756
+ if (!list_empty (& pwq -> inactive_works )) {
4757
4757
bool comma = false;
4758
4758
4759
- pr_info (" delayed :" );
4760
- list_for_each_entry (work , & pwq -> delayed_works , entry ) {
4759
+ pr_info (" inactive :" );
4760
+ list_for_each_entry (work , & pwq -> inactive_works , entry ) {
4761
4761
pr_cont_work (comma , work );
4762
4762
comma = !(* work_data_bits (work ) & WORK_STRUCT_LINKED );
4763
4763
}
@@ -4787,7 +4787,7 @@ void show_workqueue_state(void)
4787
4787
bool idle = true;
4788
4788
4789
4789
for_each_pwq (pwq , wq ) {
4790
- if (pwq -> nr_active || !list_empty (& pwq -> delayed_works )) {
4790
+ if (pwq -> nr_active || !list_empty (& pwq -> inactive_works )) {
4791
4791
idle = false;
4792
4792
break ;
4793
4793
}
@@ -4799,7 +4799,7 @@ void show_workqueue_state(void)
4799
4799
4800
4800
for_each_pwq (pwq , wq ) {
4801
4801
raw_spin_lock_irqsave (& pwq -> pool -> lock , flags );
4802
- if (pwq -> nr_active || !list_empty (& pwq -> delayed_works ))
4802
+ if (pwq -> nr_active || !list_empty (& pwq -> inactive_works ))
4803
4803
show_pwq (pwq );
4804
4804
raw_spin_unlock_irqrestore (& pwq -> pool -> lock , flags );
4805
4805
/*
@@ -5182,7 +5182,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu_safe);
5182
5182
* freeze_workqueues_begin - begin freezing workqueues
5183
5183
*
5184
5184
* Start freezing workqueues. After this function returns, all freezable
5185
- * workqueues will queue new works to their delayed_works list instead of
5185
+ * workqueues will queue new works to their inactive_works list instead of
5186
5186
* pool->worklist.
5187
5187
*
5188
5188
* CONTEXT:
0 commit comments