@@ -256,16 +256,13 @@ static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
256
256
static inline bool rwsem_write_trylock (struct rw_semaphore * sem )
257
257
{
258
258
long tmp = RWSEM_UNLOCKED_VALUE ;
259
- bool ret = false;
260
259
261
- preempt_disable ();
262
260
if (atomic_long_try_cmpxchg_acquire (& sem -> count , & tmp , RWSEM_WRITER_LOCKED )) {
263
261
rwsem_set_owner (sem );
264
- ret = true;
262
+ return true;
265
263
}
266
264
267
- preempt_enable ();
268
- return ret ;
265
+ return false;
269
266
}
270
267
271
268
/*
@@ -624,18 +621,16 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
624
621
*/
625
622
if (first -> handoff_set && (waiter != first ))
626
623
return false;
627
-
628
- /*
629
- * First waiter can inherit a previously set handoff
630
- * bit and spin on rwsem if lock acquisition fails.
631
- */
632
- if (waiter == first )
633
- waiter -> handoff_set = true;
634
624
}
635
625
636
626
new = count ;
637
627
638
628
if (count & RWSEM_LOCK_MASK ) {
629
+ /*
630
+ * A waiter (first or not) can set the handoff bit
631
+ * if it is an RT task or wait in the wait queue
632
+ * for too long.
633
+ */
639
634
if (has_handoff || (!rt_task (waiter -> task ) &&
640
635
!time_after (jiffies , waiter -> timeout )))
641
636
return false;
@@ -651,11 +646,12 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
651
646
} while (!atomic_long_try_cmpxchg_acquire (& sem -> count , & count , new ));
652
647
653
648
/*
654
- * We have either acquired the lock with handoff bit cleared or
655
- * set the handoff bit.
649
+ * We have either acquired the lock with handoff bit cleared or set
650
+ * the handoff bit. Only the first waiter can have its handoff_set
651
+ * set here to enable optimistic spinning in slowpath loop.
656
652
*/
657
653
if (new & RWSEM_FLAG_HANDOFF ) {
658
- waiter -> handoff_set = true;
654
+ first -> handoff_set = true;
659
655
lockevent_inc (rwsem_wlock_handoff );
660
656
return false;
661
657
}
@@ -717,7 +713,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
717
713
return false;
718
714
}
719
715
720
- preempt_disable ();
721
716
/*
722
717
* Disable preemption is equal to the RCU read-side crital section,
723
718
* thus the task_strcut structure won't go away.
@@ -729,7 +724,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
729
724
if ((flags & RWSEM_NONSPINNABLE ) ||
730
725
(owner && !(flags & RWSEM_READER_OWNED ) && !owner_on_cpu (owner )))
731
726
ret = false;
732
- preempt_enable ();
733
727
734
728
lockevent_cond_inc (rwsem_opt_fail , !ret );
735
729
return ret ;
@@ -829,8 +823,6 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
829
823
int loop = 0 ;
830
824
u64 rspin_threshold = 0 ;
831
825
832
- preempt_disable ();
833
-
834
826
/* sem->wait_lock should not be held when doing optimistic spinning */
835
827
if (!osq_lock (& sem -> osq ))
836
828
goto done ;
@@ -938,7 +930,6 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
938
930
}
939
931
osq_unlock (& sem -> osq );
940
932
done :
941
- preempt_enable ();
942
933
lockevent_cond_inc (rwsem_opt_fail , !taken );
943
934
return taken ;
944
935
}
@@ -1092,7 +1083,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat
1092
1083
/* Ordered by sem->wait_lock against rwsem_mark_wake(). */
1093
1084
break ;
1094
1085
}
1095
- schedule ();
1086
+ schedule_preempt_disabled ();
1096
1087
lockevent_inc (rwsem_sleep_reader );
1097
1088
}
1098
1089
@@ -1179,15 +1170,12 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
1179
1170
if (waiter .handoff_set ) {
1180
1171
enum owner_state owner_state ;
1181
1172
1182
- preempt_disable ();
1183
1173
owner_state = rwsem_spin_on_owner (sem );
1184
- preempt_enable ();
1185
-
1186
1174
if (owner_state == OWNER_NULL )
1187
1175
goto trylock_again ;
1188
1176
}
1189
1177
1190
- schedule ();
1178
+ schedule_preempt_disabled ();
1191
1179
lockevent_inc (rwsem_sleep_writer );
1192
1180
set_current_state (state );
1193
1181
trylock_again :
@@ -1254,14 +1242,20 @@ static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1254
1242
*/
1255
1243
static inline int __down_read_common (struct rw_semaphore * sem , int state )
1256
1244
{
1245
+ int ret = 0 ;
1257
1246
long count ;
1258
1247
1248
+ preempt_disable ();
1259
1249
if (!rwsem_read_trylock (sem , & count )) {
1260
- if (IS_ERR (rwsem_down_read_slowpath (sem , count , state )))
1261
- return - EINTR ;
1250
+ if (IS_ERR (rwsem_down_read_slowpath (sem , count , state ))) {
1251
+ ret = - EINTR ;
1252
+ goto out ;
1253
+ }
1262
1254
DEBUG_RWSEMS_WARN_ON (!is_rwsem_reader_owned (sem ), sem );
1263
1255
}
1264
- return 0 ;
1256
+ out :
1257
+ preempt_enable ();
1258
+ return ret ;
1265
1259
}
1266
1260
1267
1261
static inline void __down_read (struct rw_semaphore * sem )
@@ -1281,32 +1275,39 @@ static inline int __down_read_killable(struct rw_semaphore *sem)
1281
1275
1282
1276
static inline int __down_read_trylock (struct rw_semaphore * sem )
1283
1277
{
1278
+ int ret = 0 ;
1284
1279
long tmp ;
1285
1280
1286
1281
DEBUG_RWSEMS_WARN_ON (sem -> magic != sem , sem );
1287
1282
1283
+ preempt_disable ();
1288
1284
tmp = atomic_long_read (& sem -> count );
1289
1285
while (!(tmp & RWSEM_READ_FAILED_MASK )) {
1290
1286
if (atomic_long_try_cmpxchg_acquire (& sem -> count , & tmp ,
1291
1287
tmp + RWSEM_READER_BIAS )) {
1292
1288
rwsem_set_reader_owned (sem );
1293
- return 1 ;
1289
+ ret = 1 ;
1290
+ break ;
1294
1291
}
1295
1292
}
1296
- return 0 ;
1293
+ preempt_enable ();
1294
+ return ret ;
1297
1295
}
1298
1296
1299
1297
/*
1300
1298
* lock for writing
1301
1299
*/
1302
1300
static inline int __down_write_common (struct rw_semaphore * sem , int state )
1303
1301
{
1302
+ int ret = 0 ;
1303
+
1304
+ preempt_disable ();
1304
1305
if (unlikely (!rwsem_write_trylock (sem ))) {
1305
1306
if (IS_ERR (rwsem_down_write_slowpath (sem , state )))
1306
- return - EINTR ;
1307
+ ret = - EINTR ;
1307
1308
}
1308
-
1309
- return 0 ;
1309
+ preempt_enable ();
1310
+ return ret ;
1310
1311
}
1311
1312
1312
1313
static inline void __down_write (struct rw_semaphore * sem )
@@ -1321,8 +1322,14 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
1321
1322
1322
1323
static inline int __down_write_trylock (struct rw_semaphore * sem )
1323
1324
{
1325
+ int ret ;
1326
+
1327
+ preempt_disable ();
1324
1328
DEBUG_RWSEMS_WARN_ON (sem -> magic != sem , sem );
1325
- return rwsem_write_trylock (sem );
1329
+ ret = rwsem_write_trylock (sem );
1330
+ preempt_enable ();
1331
+
1332
+ return ret ;
1326
1333
}
1327
1334
1328
1335
/*
@@ -1335,6 +1342,7 @@ static inline void __up_read(struct rw_semaphore *sem)
1335
1342
DEBUG_RWSEMS_WARN_ON (sem -> magic != sem , sem );
1336
1343
DEBUG_RWSEMS_WARN_ON (!is_rwsem_reader_owned (sem ), sem );
1337
1344
1345
+ preempt_disable ();
1338
1346
rwsem_clear_reader_owned (sem );
1339
1347
tmp = atomic_long_add_return_release (- RWSEM_READER_BIAS , & sem -> count );
1340
1348
DEBUG_RWSEMS_WARN_ON (tmp < 0 , sem );
@@ -1343,6 +1351,7 @@ static inline void __up_read(struct rw_semaphore *sem)
1343
1351
clear_nonspinnable (sem );
1344
1352
rwsem_wake (sem );
1345
1353
}
1354
+ preempt_enable ();
1346
1355
}
1347
1356
1348
1357
/*
@@ -1363,9 +1372,9 @@ static inline void __up_write(struct rw_semaphore *sem)
1363
1372
preempt_disable ();
1364
1373
rwsem_clear_owner (sem );
1365
1374
tmp = atomic_long_fetch_add_release (- RWSEM_WRITER_LOCKED , & sem -> count );
1366
- preempt_enable ();
1367
1375
if (unlikely (tmp & RWSEM_FLAG_WAITERS ))
1368
1376
rwsem_wake (sem );
1377
+ preempt_enable ();
1369
1378
}
1370
1379
1371
1380
/*
@@ -1383,11 +1392,13 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
1383
1392
* write side. As such, rely on RELEASE semantics.
1384
1393
*/
1385
1394
DEBUG_RWSEMS_WARN_ON (rwsem_owner (sem ) != current , sem );
1395
+ preempt_disable ();
1386
1396
tmp = atomic_long_fetch_add_release (
1387
1397
- RWSEM_WRITER_LOCKED + RWSEM_READER_BIAS , & sem -> count );
1388
1398
rwsem_set_reader_owned (sem );
1389
1399
if (tmp & RWSEM_FLAG_WAITERS )
1390
1400
rwsem_downgrade_wake (sem );
1401
+ preempt_enable ();
1391
1402
}
1392
1403
1393
1404
#else /* !CONFIG_PREEMPT_RT */
@@ -1662,6 +1673,12 @@ void down_read_non_owner(struct rw_semaphore *sem)
1662
1673
{
1663
1674
might_sleep ();
1664
1675
__down_read (sem );
1676
+ /*
1677
+ * The owner value for a reader-owned lock is mostly for debugging
1678
+ * purpose only and is not critical to the correct functioning of
1679
+ * rwsem. So it is perfectly fine to set it in a preempt-enabled
1680
+ * context here.
1681
+ */
1665
1682
__rwsem_set_reader_owned (sem , NULL );
1666
1683
}
1667
1684
EXPORT_SYMBOL (down_read_non_owner );
0 commit comments