mm/vmemmap: allow architectures to override how vmemmap optimization works
Architectures like powerpc will like to use different page table allocators and mapping mechanisms to implement vmemmap optimization. Similar to vmemmap_populate allow architectures to implement vmemap_populate_compound_pages Link: https://lkml.kernel.org/r/20230724190759.483013-5-aneesh.kumar@linux.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Joao Martins <joao.m.martins@oracle.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
c1a6c536fb
commit
40135fc718
|
@ -358,6 +358,7 @@ int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifndef vmemmap_populate_compound_pages
|
||||
/*
|
||||
* For compound pages bigger than section size (e.g. x86 1G compound
|
||||
* pages with 2M subsection size) fill the rest of sections as tail
|
||||
|
@ -446,6 +447,8 @@ static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
struct page * __meminit __populate_section_memmap(unsigned long pfn,
|
||||
unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
|
||||
struct dev_pagemap *pgmap)
|
||||
|
|
Loading…
Reference in New Issue