@@ -4404,7 +4404,8 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
4404
4404
static inline void __netif_tx_lock (struct netdev_queue * txq , int cpu )
4405
4405
{
4406
4406
spin_lock (& txq -> _xmit_lock );
4407
- txq -> xmit_lock_owner = cpu ;
4407
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4408
+ WRITE_ONCE (txq -> xmit_lock_owner , cpu );
4408
4409
}
4409
4410
4410
4411
static inline bool __netif_tx_acquire (struct netdev_queue * txq )
@@ -4421,26 +4422,32 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
4421
4422
static inline void __netif_tx_lock_bh (struct netdev_queue * txq )
4422
4423
{
4423
4424
spin_lock_bh (& txq -> _xmit_lock );
4424
- txq -> xmit_lock_owner = smp_processor_id ();
4425
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4426
+ WRITE_ONCE (txq -> xmit_lock_owner , smp_processor_id ());
4425
4427
}
4426
4428
4427
4429
static inline bool __netif_tx_trylock (struct netdev_queue * txq )
4428
4430
{
4429
4431
bool ok = spin_trylock (& txq -> _xmit_lock );
4430
- if (likely (ok ))
4431
- txq -> xmit_lock_owner = smp_processor_id ();
4432
+
4433
+ if (likely (ok )) {
4434
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4435
+ WRITE_ONCE (txq -> xmit_lock_owner , smp_processor_id ());
4436
+ }
4432
4437
return ok ;
4433
4438
}
4434
4439
4435
4440
static inline void __netif_tx_unlock (struct netdev_queue * txq )
4436
4441
{
4437
- txq -> xmit_lock_owner = -1 ;
4442
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4443
+ WRITE_ONCE (txq -> xmit_lock_owner , -1 );
4438
4444
spin_unlock (& txq -> _xmit_lock );
4439
4445
}
4440
4446
4441
4447
static inline void __netif_tx_unlock_bh (struct netdev_queue * txq )
4442
4448
{
4443
- txq -> xmit_lock_owner = -1 ;
4449
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4450
+ WRITE_ONCE (txq -> xmit_lock_owner , -1 );
4444
4451
spin_unlock_bh (& txq -> _xmit_lock );
4445
4452
}
4446
4453
0 commit comments