@@ -3292,7 +3292,7 @@ int schedule_on_each_cpu(work_func_t func)
3292
3292
if (!works )
3293
3293
return - ENOMEM ;
3294
3294
3295
- get_online_cpus ();
3295
+ cpus_read_lock ();
3296
3296
3297
3297
for_each_online_cpu (cpu ) {
3298
3298
struct work_struct * work = per_cpu_ptr (works , cpu );
@@ -3304,7 +3304,7 @@ int schedule_on_each_cpu(work_func_t func)
3304
3304
for_each_online_cpu (cpu )
3305
3305
flush_work (per_cpu_ptr (works , cpu ));
3306
3306
3307
- put_online_cpus ();
3307
+ cpus_read_unlock ();
3308
3308
free_percpu (works );
3309
3309
return 0 ;
3310
3310
}
@@ -4015,14 +4015,14 @@ static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
4015
4015
static void apply_wqattrs_lock (void )
4016
4016
{
4017
4017
/* CPUs should stay stable across pwq creations and installations */
4018
- get_online_cpus ();
4018
+ cpus_read_lock ();
4019
4019
mutex_lock (& wq_pool_mutex );
4020
4020
}
4021
4021
4022
4022
static void apply_wqattrs_unlock (void )
4023
4023
{
4024
4024
mutex_unlock (& wq_pool_mutex );
4025
- put_online_cpus ();
4025
+ cpus_read_unlock ();
4026
4026
}
4027
4027
4028
4028
static int apply_workqueue_attrs_locked (struct workqueue_struct * wq ,
@@ -4067,7 +4067,7 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
4067
4067
*
4068
4068
* Performs GFP_KERNEL allocations.
4069
4069
*
4070
- * Assumes caller has CPU hotplug read exclusion, i.e. get_online_cpus ().
4070
+ * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock ().
4071
4071
*
4072
4072
* Return: 0 on success and -errno on failure.
4073
4073
*/
@@ -4195,7 +4195,7 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4195
4195
return 0 ;
4196
4196
}
4197
4197
4198
- get_online_cpus ();
4198
+ cpus_read_lock ();
4199
4199
if (wq -> flags & __WQ_ORDERED ) {
4200
4200
ret = apply_workqueue_attrs (wq , ordered_wq_attrs [highpri ]);
4201
4201
/* there should only be single pwq for ordering guarantee */
@@ -4205,7 +4205,7 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4205
4205
} else {
4206
4206
ret = apply_workqueue_attrs (wq , unbound_std_wq_attrs [highpri ]);
4207
4207
}
4208
- put_online_cpus ();
4208
+ cpus_read_unlock ();
4209
4209
4210
4210
return ret ;
4211
4211
}
@@ -5167,10 +5167,10 @@ long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
5167
5167
{
5168
5168
long ret = - ENODEV ;
5169
5169
5170
- get_online_cpus ();
5170
+ cpus_read_lock ();
5171
5171
if (cpu_online (cpu ))
5172
5172
ret = work_on_cpu (cpu , fn , arg );
5173
- put_online_cpus ();
5173
+ cpus_read_unlock ();
5174
5174
return ret ;
5175
5175
}
5176
5176
EXPORT_SYMBOL_GPL (work_on_cpu_safe );
@@ -5442,7 +5442,7 @@ static ssize_t wq_pool_ids_show(struct device *dev,
5442
5442
const char * delim = "" ;
5443
5443
int node , written = 0 ;
5444
5444
5445
- get_online_cpus ();
5445
+ cpus_read_lock ();
5446
5446
rcu_read_lock ();
5447
5447
for_each_node (node ) {
5448
5448
written += scnprintf (buf + written , PAGE_SIZE - written ,
@@ -5452,7 +5452,7 @@ static ssize_t wq_pool_ids_show(struct device *dev,
5452
5452
}
5453
5453
written += scnprintf (buf + written , PAGE_SIZE - written , "\n" );
5454
5454
rcu_read_unlock ();
5455
- put_online_cpus ();
5455
+ cpus_read_unlock ();
5456
5456
5457
5457
return written ;
5458
5458
}
0 commit comments