@@ -4403,7 +4403,8 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
4403
4403
static inline void __netif_tx_lock (struct netdev_queue * txq , int cpu )
4404
4404
{
4405
4405
spin_lock (& txq -> _xmit_lock );
4406
- txq -> xmit_lock_owner = cpu ;
4406
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4407
+ WRITE_ONCE (txq -> xmit_lock_owner , cpu );
4407
4408
}
4408
4409
4409
4410
static inline bool __netif_tx_acquire (struct netdev_queue * txq )
@@ -4420,26 +4421,32 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
4420
4421
static inline void __netif_tx_lock_bh (struct netdev_queue * txq )
4421
4422
{
4422
4423
spin_lock_bh (& txq -> _xmit_lock );
4423
- txq -> xmit_lock_owner = smp_processor_id ();
4424
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4425
+ WRITE_ONCE (txq -> xmit_lock_owner , smp_processor_id ());
4424
4426
}
4425
4427
4426
4428
static inline bool __netif_tx_trylock (struct netdev_queue * txq )
4427
4429
{
4428
4430
bool ok = spin_trylock (& txq -> _xmit_lock );
4429
- if (likely (ok ))
4430
- txq -> xmit_lock_owner = smp_processor_id ();
4431
+
4432
+ if (likely (ok )) {
4433
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4434
+ WRITE_ONCE (txq -> xmit_lock_owner , smp_processor_id ());
4435
+ }
4431
4436
return ok ;
4432
4437
}
4433
4438
4434
4439
static inline void __netif_tx_unlock (struct netdev_queue * txq )
4435
4440
{
4436
- txq -> xmit_lock_owner = -1 ;
4441
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4442
+ WRITE_ONCE (txq -> xmit_lock_owner , -1 );
4437
4443
spin_unlock (& txq -> _xmit_lock );
4438
4444
}
4439
4445
4440
4446
static inline void __netif_tx_unlock_bh (struct netdev_queue * txq )
4441
4447
{
4442
- txq -> xmit_lock_owner = -1 ;
4448
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4449
+ WRITE_ONCE (txq -> xmit_lock_owner , -1 );
4443
4450
spin_unlock_bh (& txq -> _xmit_lock );
4444
4451
}
4445
4452
0 commit comments