Skip to content

Commit

Permalink
softirq: Check preemption after reenabling interrupts
Browse files Browse the repository at this point in the history
raise_softirq_irqoff() disables interrupts and wakes the softirq
daemon, but after reenabling interrupts there is no preemption check,
so the execution of the softirq thread might be delayed arbitrarily.

In principle we could add that check to local_irq_enable/restore, but
that's overkill as the rasie_softirq_irqoff() sections are the only
ones which show this behaviour.

Reported-by: Carsten Emde <cbe@osadl.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
  • Loading branch information
Thomas Gleixner authored and Sebastian Andrzej Siewior committed Sep 13, 2021
1 parent 5d15c5e commit b9369c0
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 0 deletions.
3 changes: 3 additions & 0 deletions include/linux/preempt.h
Original file line number Diff line number Diff line change
Expand Up @@ -190,8 +190,10 @@ do { \

#ifdef CONFIG_PREEMPT_RT
# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
# define preempt_check_resched_rt() preempt_check_resched()
#else
# define preempt_enable_no_resched() preempt_enable()
# define preempt_check_resched_rt() barrier();
#endif

#define preemptible() (preempt_count() == 0 && !irqs_disabled())
Expand Down Expand Up @@ -262,6 +264,7 @@ do { \
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
#define preempt_check_resched_rt() barrier()
#define preemptible() 0

#endif /* CONFIG_PREEMPT_COUNT */
Expand Down
5 changes: 5 additions & 0 deletions lib/irq_poll.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ void irq_poll_sched(struct irq_poll *iop)
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_restore(flags);
preempt_check_resched_rt();
}
EXPORT_SYMBOL(irq_poll_sched);

Expand Down Expand Up @@ -72,6 +73,7 @@ void irq_poll_complete(struct irq_poll *iop)
local_irq_save(flags);
__irq_poll_complete(iop);
local_irq_restore(flags);
preempt_check_resched_rt();
}
EXPORT_SYMBOL(irq_poll_complete);

Expand All @@ -96,6 +98,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
}

local_irq_enable();
preempt_check_resched_rt();

/* Even though interrupts have been re-enabled, this
* access is safe because interrupts can only add new
Expand Down Expand Up @@ -133,6 +136,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);

local_irq_enable();
preempt_check_resched_rt();
}

/**
Expand Down Expand Up @@ -196,6 +200,7 @@ static int irq_poll_cpu_dead(unsigned int cpu)
this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_enable();
preempt_check_resched_rt();

return 0;
}
Expand Down
7 changes: 7 additions & 0 deletions net/core/dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -3040,6 +3040,7 @@ static void __netif_reschedule(struct Qdisc *q)
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
preempt_check_resched_rt();
}

void __netif_schedule(struct Qdisc *q)
Expand Down Expand Up @@ -3102,6 +3103,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
__this_cpu_write(softnet_data.completion_queue, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
preempt_check_resched_rt();
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);

Expand Down Expand Up @@ -4644,6 +4646,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
rps_unlock(sd);

local_irq_restore(flags);
preempt_check_resched_rt();

atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
Expand Down Expand Up @@ -6387,12 +6390,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
sd->rps_ipi_list = NULL;

local_irq_enable();
preempt_check_resched_rt();

/* Send pending IPI's to kick RPS processing on remote cpus. */
net_rps_send_ipi(remsd);
} else
#endif
local_irq_enable();
preempt_check_resched_rt();
}

static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
Expand Down Expand Up @@ -6470,6 +6475,7 @@ void __napi_schedule(struct napi_struct *n)
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
preempt_check_resched_rt();
}
EXPORT_SYMBOL(__napi_schedule);

Expand Down Expand Up @@ -11288,6 +11294,7 @@ static int dev_cpu_dead(unsigned int oldcpu)

raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
preempt_check_resched_rt();

#ifdef CONFIG_RPS
remsd = oldsd->rps_ipi_list;
Expand Down

0 comments on commit b9369c0

Please sign in to comment.