mm: mmu_notifier fix for tlb_end_vma
The generic tlb_end_vma does not call invalidate_range mmu notifier, and it resets resets the mmu_gather range, which means the notifier won't be called on part of the range in case of an unmap that spans multiple vmas. ARM64 seems to be the only arch I could see that has notifiers and uses the generic tlb_end_vma. I have not actually tested it. [ Catalin and Will point out that ARM64 currently only uses the notifiers for KVM, which doesn't use the ->invalidate_range() callback right now, so it's a bug, but one that happens to not affect them. So not necessary for stable. - Linus ] Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d86564a2f0
commit
fd1102f0aa
|
@ -15,6 +15,7 @@
|
|||
#ifndef _ASM_GENERIC__TLB_H
|
||||
#define _ASM_GENERIC__TLB_H
|
||||
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <linux/swap.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
@ -138,6 +139,16 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
|
||||
{
|
||||
if (!tlb->end)
|
||||
return;
|
||||
|
||||
tlb_flush(tlb);
|
||||
mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
|
||||
__tlb_reset_range(tlb);
|
||||
}
|
||||
|
||||
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
|
||||
struct page *page, int page_size)
|
||||
{
|
||||
|
@ -186,10 +197,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
|||
|
||||
#define __tlb_end_vma(tlb, vma) \
|
||||
do { \
|
||||
if (!tlb->fullmm && tlb->end) { \
|
||||
tlb_flush(tlb); \
|
||||
__tlb_reset_range(tlb); \
|
||||
} \
|
||||
if (!tlb->fullmm) \
|
||||
tlb_flush_mmu_tlbonly(tlb); \
|
||||
} while (0)
|
||||
|
||||
#ifndef tlb_end_vma
|
||||
|
|
10
mm/memory.c
10
mm/memory.c
|
@ -238,16 +238,6 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
|||
__tlb_reset_range(tlb);
|
||||
}
|
||||
|
||||
static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
|
||||
{
|
||||
if (!tlb->end)
|
||||
return;
|
||||
|
||||
tlb_flush(tlb);
|
||||
mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
|
||||
__tlb_reset_range(tlb);
|
||||
}
|
||||
|
||||
static void tlb_flush_mmu_free(struct mmu_gather *tlb)
|
||||
{
|
||||
struct mmu_gather_batch *batch;
|
||||
|
|
Loading…
Reference in New Issue