Skip to content

Commit b824c3e

Browse files
Sebastian Andrzej SiewiorPaolo Abeni
authored andcommitted
net: Provide a PREEMPT_RT specific check for netdev_queue::_xmit_lock
After acquiring netdev_queue::_xmit_lock the number of the CPU owning the lock is recorded in netdev_queue::xmit_lock_owner. This works as long as the BH context is not preemptible. On PREEMPT_RT the softirq context is preemptible and without the softirq-lock it is possible to have multiple user in __dev_queue_xmit() submitting a skb on the same CPU. This is fine in general but this means also that the current CPU is recorded as netdev_queue::xmit_lock_owner. This in turn leads to the recursion alert and the skb is dropped. Instead checking the for CPU number, that owns the lock, PREEMPT_RT can check if the lockowner matches the current task. Add netif_tx_owned() which returns true if the current context owns the lock by comparing the provided CPU number with the recorded number. This resembles the current check by negating the condition (the current check returns true if the lock is not owned). On PREEMPT_RT use rt_mutex_owner() to return the lock owner and compare the current task against it. Use the new helper in __dev_queue_xmit() and netif_local_xmit_active() which provides a similar check. Update comments regarding pairing READ_ONCE(). Reported-by: Bert Karwatzki <[email protected]> Closes: https://lore.kernel.org/all/[email protected] Fixes: 3253cb4 ("softirq: Allow to drop the softirq-BKL lock on PREEMPT_RT") Signed-off-by: Sebastian Andrzej Siewior <[email protected]> Reported-by: Bert Karwatzki <[email protected]> Signed-off-by: Sebastian Andrzej Siewior <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Paolo Abeni <[email protected]>
1 parent ae779bc commit b824c3e

3 files changed

Lines changed: 24 additions & 10 deletions

File tree

include/linux/netdevice.h

Lines changed: 22 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4711,7 +4711,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
47114711
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
47124712
{
47134713
spin_lock(&txq->_xmit_lock);
4714-
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4714+
/* Pairs with READ_ONCE() in netif_tx_owned() */
47154715
WRITE_ONCE(txq->xmit_lock_owner, cpu);
47164716
}
47174717

@@ -4729,7 +4729,7 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
47294729
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
47304730
{
47314731
spin_lock_bh(&txq->_xmit_lock);
4732-
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4732+
/* Pairs with READ_ONCE() in netif_tx_owned() */
47334733
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
47344734
}
47354735

@@ -4738,22 +4738,22 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
47384738
bool ok = spin_trylock(&txq->_xmit_lock);
47394739

47404740
if (likely(ok)) {
4741-
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4741+
/* Pairs with READ_ONCE() in netif_tx_owned() */
47424742
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
47434743
}
47444744
return ok;
47454745
}
47464746

47474747
static inline void __netif_tx_unlock(struct netdev_queue *txq)
47484748
{
4749-
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4749+
/* Pairs with READ_ONCE() in netif_tx_owned() */
47504750
WRITE_ONCE(txq->xmit_lock_owner, -1);
47514751
spin_unlock(&txq->_xmit_lock);
47524752
}
47534753

47544754
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
47554755
{
4756-
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4756+
/* Pairs with READ_ONCE() in netif_tx_owned() */
47574757
WRITE_ONCE(txq->xmit_lock_owner, -1);
47584758
spin_unlock_bh(&txq->_xmit_lock);
47594759
}
@@ -4846,6 +4846,23 @@ static inline void netif_tx_disable(struct net_device *dev)
48464846
local_bh_enable();
48474847
}
48484848

4849+
#ifndef CONFIG_PREEMPT_RT
4850+
static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
4851+
{
4852+
/* Other cpus might concurrently change txq->xmit_lock_owner
4853+
* to -1 or to their cpu id, but not to our id.
4854+
*/
4855+
return READ_ONCE(txq->xmit_lock_owner) == cpu;
4856+
}
4857+
4858+
#else
4859+
static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
4860+
{
4861+
return rt_mutex_owner(&txq->_xmit_lock.lock) == current;
4862+
}
4863+
4864+
#endif
4865+
48494866
static inline void netif_addr_lock(struct net_device *dev)
48504867
{
48514868
unsigned char nest_level = 0;

net/core/dev.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4818,10 +4818,7 @@ int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
48184818
if (dev->flags & IFF_UP) {
48194819
int cpu = smp_processor_id(); /* ok because BHs are off */
48204820

4821-
/* Other cpus might concurrently change txq->xmit_lock_owner
4822-
* to -1 or to their cpu id, but not to our id.
4823-
*/
4824-
if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
4821+
if (!netif_tx_owned(txq, cpu)) {
48254822
bool is_list = false;
48264823

48274824
if (dev_xmit_recursion())

net/core/netpoll.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ static int netif_local_xmit_active(struct net_device *dev)
132132
for (i = 0; i < dev->num_tx_queues; i++) {
133133
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
134134

135-
if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id())
135+
if (netif_tx_owned(txq, smp_processor_id()))
136136
return 1;
137137
}
138138

0 commit comments

Comments
 (0)