@@ -63,7 +63,7 @@ static int max_id __read_mostly;
63
63
/* Array of zone pointers */
64
64
static struct zone_device * * zones ;
65
65
/* Serializes interrupt notification, work and hotplug */
66
- static DEFINE_SPINLOCK (pkg_temp_lock );
66
+ static DEFINE_RAW_SPINLOCK (pkg_temp_lock );
67
67
/* Protects zone operation in the work function against hotplug removal */
68
68
static DEFINE_MUTEX (thermal_zone_mutex );
69
69
@@ -266,12 +266,12 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
266
266
u64 msr_val , wr_val ;
267
267
268
268
mutex_lock (& thermal_zone_mutex );
269
- spin_lock_irq (& pkg_temp_lock );
269
+ raw_spin_lock_irq (& pkg_temp_lock );
270
270
++ pkg_work_cnt ;
271
271
272
272
zonedev = pkg_temp_thermal_get_dev (cpu );
273
273
if (!zonedev ) {
274
- spin_unlock_irq (& pkg_temp_lock );
274
+ raw_spin_unlock_irq (& pkg_temp_lock );
275
275
mutex_unlock (& thermal_zone_mutex );
276
276
return ;
277
277
}
@@ -285,7 +285,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
285
285
}
286
286
287
287
enable_pkg_thres_interrupt ();
288
- spin_unlock_irq (& pkg_temp_lock );
288
+ raw_spin_unlock_irq (& pkg_temp_lock );
289
289
290
290
/*
291
291
* If tzone is not NULL, then thermal_zone_mutex will prevent the
@@ -310,7 +310,7 @@ static int pkg_thermal_notify(u64 msr_val)
310
310
struct zone_device * zonedev ;
311
311
unsigned long flags ;
312
312
313
- spin_lock_irqsave (& pkg_temp_lock , flags );
313
+ raw_spin_lock_irqsave (& pkg_temp_lock , flags );
314
314
++ pkg_interrupt_cnt ;
315
315
316
316
disable_pkg_thres_interrupt ();
@@ -322,7 +322,7 @@ static int pkg_thermal_notify(u64 msr_val)
322
322
pkg_thermal_schedule_work (zonedev -> cpu , & zonedev -> work );
323
323
}
324
324
325
- spin_unlock_irqrestore (& pkg_temp_lock , flags );
325
+ raw_spin_unlock_irqrestore (& pkg_temp_lock , flags );
326
326
return 0 ;
327
327
}
328
328
@@ -368,9 +368,9 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
368
368
zonedev -> msr_pkg_therm_high );
369
369
370
370
cpumask_set_cpu (cpu , & zonedev -> cpumask );
371
- spin_lock_irq (& pkg_temp_lock );
371
+ raw_spin_lock_irq (& pkg_temp_lock );
372
372
zones [id ] = zonedev ;
373
- spin_unlock_irq (& pkg_temp_lock );
373
+ raw_spin_unlock_irq (& pkg_temp_lock );
374
374
return 0 ;
375
375
}
376
376
@@ -407,7 +407,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
407
407
}
408
408
409
409
/* Protect against work and interrupts */
410
- spin_lock_irq (& pkg_temp_lock );
410
+ raw_spin_lock_irq (& pkg_temp_lock );
411
411
412
412
/*
413
413
* Check whether this cpu was the current target and store the new
@@ -439,9 +439,9 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
439
439
* To cancel the work we need to drop the lock, otherwise
440
440
* we might deadlock if the work needs to be flushed.
441
441
*/
442
- spin_unlock_irq (& pkg_temp_lock );
442
+ raw_spin_unlock_irq (& pkg_temp_lock );
443
443
cancel_delayed_work_sync (& zonedev -> work );
444
- spin_lock_irq (& pkg_temp_lock );
444
+ raw_spin_lock_irq (& pkg_temp_lock );
445
445
/*
446
446
* If this is not the last cpu in the package and the work
447
447
* did not run after we dropped the lock above, then we
@@ -452,7 +452,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
452
452
pkg_thermal_schedule_work (target , & zonedev -> work );
453
453
}
454
454
455
- spin_unlock_irq (& pkg_temp_lock );
455
+ raw_spin_unlock_irq (& pkg_temp_lock );
456
456
457
457
/* Final cleanup if this is the last cpu */
458
458
if (lastcpu )
0 commit comments