sched/preempt: Simplify irqentry_exit_cond_resched() callers
Currently callers of irqentry_exit_cond_resched() need to be aware of whether the function should be indirected via a static call, leading to ugly ifdeffery in callers. Save them the hassle with a static inline wrapper that does the right thing. The raw_irqentry_exit_cond_resched() will also be useful in subsequent patches which will add conditional wrappers for preemption functions. Note: in arch/x86/entry/common.c, xen_pv_evtchn_do_upcall() always calls irqentry_exit_cond_resched() directly, even when PREEMPT_DYNAMIC is in use. I believe this is a latent bug (which this patch corrects), but I'm not entirely certain this wasn't deliberate. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Ard Biesheuvel <ardb@kernel.org> Acked-by: Frederic Weisbecker <frederic@kernel.org> Link: https://lore.kernel.org/r/20220214165216.2231574-4-mark.rutland@arm.com
This commit is contained in:
parent
8a69fe0be1
commit
4624a14f4d
|
@ -454,11 +454,14 @@ irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
|
||||||
*
|
*
|
||||||
* Conditional reschedule with additional sanity checks.
|
* Conditional reschedule with additional sanity checks.
|
||||||
*/
|
*/
|
||||||
void irqentry_exit_cond_resched(void);
|
void raw_irqentry_exit_cond_resched(void);
|
||||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||||
#define irqentry_exit_cond_resched_dynamic_enabled irqentry_exit_cond_resched
|
#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
|
||||||
#define irqentry_exit_cond_resched_dynamic_disabled NULL
|
#define irqentry_exit_cond_resched_dynamic_disabled NULL
|
||||||
DECLARE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
|
DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
|
||||||
|
#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
|
||||||
|
#else
|
||||||
|
#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -380,7 +380,7 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void irqentry_exit_cond_resched(void)
|
void raw_irqentry_exit_cond_resched(void)
|
||||||
{
|
{
|
||||||
if (!preempt_count()) {
|
if (!preempt_count()) {
|
||||||
/* Sanity check RCU and thread stack */
|
/* Sanity check RCU and thread stack */
|
||||||
|
@ -392,7 +392,7 @@ void irqentry_exit_cond_resched(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||||
DEFINE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
|
DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
|
noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
|
||||||
|
@ -420,13 +420,9 @@ noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
|
||||||
}
|
}
|
||||||
|
|
||||||
instrumentation_begin();
|
instrumentation_begin();
|
||||||
if (IS_ENABLED(CONFIG_PREEMPTION)) {
|
if (IS_ENABLED(CONFIG_PREEMPTION))
|
||||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
|
||||||
static_call(irqentry_exit_cond_resched)();
|
|
||||||
#else
|
|
||||||
irqentry_exit_cond_resched();
|
irqentry_exit_cond_resched();
|
||||||
#endif
|
|
||||||
}
|
|
||||||
/* Covers both tracing and lockdep */
|
/* Covers both tracing and lockdep */
|
||||||
trace_hardirqs_on();
|
trace_hardirqs_on();
|
||||||
instrumentation_end();
|
instrumentation_end();
|
||||||
|
|
Loading…
Reference in New Issue