arm64 fixes for -rc5
Just a couple of stragglers here: - Fix an issue migrating interrupts on CPU hotplug - Fix a potential information leak of TLS registers across an exec (Nathan has sent a corresponding patch for arch/arm/ to rmk) -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABCgAGBQJUEfKCAAoJELescNyEwWM0I/8H/RLpR9kvk0npB8lroFJZUJfa yIveU5kWnFpEpycjkDDHTYmXbbAMni1t6wII4ofMErDtMkJMW3y11gAp2iUEdP8w YNGSO9WV8uddbEamoDnO1jMS2eE1sHSSFjXN5529ygM00mAdSq/sIYUkGrjkbRmo 6DHWFvaHYjZDIAb1teFFqtuaL5c4SX+DTwInqwO0hXIPXfgjmSD9PDk8KXJN0Qiu daX3sNHlFyb4Bh4Q2/aIvQHrkFPVcNUnekCwNoHGgYJ/FMjTV67Kb5SmnlV41rSu GU4dUuc26gumgrOQ9Yhob2AU6RhC4Auuht7ck+STZWy5kllmjX5TLZMLXmrLIRM= =0A4L -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fixes from Will Deacon: "Just a couple of stragglers here: - fix an issue migrating interrupts on CPU hotplug - fix a potential information leak of TLS registers across an exec (Nathan has sent a corresponding patch for arch/arm/ to rmk)" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: flush TLS registers during exec arm64: use irq_set_affinity with force=false when migrating irqs
This commit is contained in:
commit
9925cc1396
|
@ -97,19 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
|
|||
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
|
||||
return false;
|
||||
|
||||
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids)
|
||||
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
|
||||
affinity = cpu_online_mask;
|
||||
ret = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* when using forced irq_set_affinity we must ensure that the cpu
|
||||
* being offlined is not present in the affinity mask, it may be
|
||||
* selected as the target CPU otherwise
|
||||
*/
|
||||
affinity = cpu_online_mask;
|
||||
c = irq_data_get_irq_chip(d);
|
||||
if (!c->irq_set_affinity)
|
||||
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
|
||||
else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
|
||||
else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
|
||||
cpumask_copy(d->affinity, affinity);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -230,9 +230,27 @@ void exit_thread(void)
|
|||
{
|
||||
}
|
||||
|
||||
static void tls_thread_flush(void)
|
||||
{
|
||||
asm ("msr tpidr_el0, xzr");
|
||||
|
||||
if (is_compat_task()) {
|
||||
current->thread.tp_value = 0;
|
||||
|
||||
/*
|
||||
* We need to ensure ordering between the shadow state and the
|
||||
* hardware state, so that we don't corrupt the hardware state
|
||||
* with a stale shadow state during context switch.
|
||||
*/
|
||||
barrier();
|
||||
asm ("msr tpidrro_el0, xzr");
|
||||
}
|
||||
}
|
||||
|
||||
void flush_thread(void)
|
||||
{
|
||||
fpsimd_flush_thread();
|
||||
tls_thread_flush();
|
||||
flush_ptrace_hw_breakpoint(current);
|
||||
}
|
||||
|
||||
|
|
|
@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs)
|
|||
|
||||
case __ARM_NR_compat_set_tls:
|
||||
current->thread.tp_value = regs->regs[0];
|
||||
|
||||
/*
|
||||
* Protect against register corruption from context switch.
|
||||
* See comment in tls_thread_flush.
|
||||
*/
|
||||
barrier();
|
||||
asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
|
||||
return 0;
|
||||
|
||||
|
|
Loading…
Reference in New Issue