14 hotfixes, 6 of which are cc:stable.
All except the nilfs2 fix affect MM and all are singletons - see the chagelogs for details. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZmOJLgAKCRDdBJ7gKXxA jinQAQC0AjAhN7zuxfCb9ljCsqyyAfsWbeyXAlqdhuRt2xZONgD+Nv2XwSUw0ZUv xHGgPodMCrmEvuLo048qRpdJRbYo8gw= =sM9B -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2024-06-07-15-24' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "14 hotfixes, 6 of which are cc:stable. All except the nilfs2 fix affect MM and all are singletons - see the chagelogs for details" * tag 'mm-hotfixes-stable-2024-06-07-15-24' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: nilfs2: fix nilfs_empty_dir() misjudgment and long loop on I/O errors mm: fix xyz_noprof functions calling profiled functions codetag: avoid race at alloc_slab_obj_exts mm/hugetlb: do not call vma_add_reservation upon ENOMEM mm/ksm: fix ksm_zero_pages accounting mm/ksm: fix ksm_pages_scanned accounting kmsan: do not wipe out origin when doing partial unpoisoning vmalloc: check CONFIG_EXECMEM in is_vmalloc_or_module_addr() mm: page_alloc: fix highatomic typing in multi-block buddies nilfs2: fix potential kernel bug due to lack of writeback flag waiting memcg: remove the lockdep assert from __mod_objcg_mlstate() mm: arm64: fix the out-of-bounds issue in contpte_clear_young_dirty_ptes mm: huge_mm: fix undefined reference to `mthp_stats' for CONFIG_SYSFS=n mm: drop the 'anon_' prefix for swap-out mTHP counters
This commit is contained in:
commit
dc772f8237
|
@ -467,11 +467,11 @@ anon_fault_fallback_charge
|
|||
instead falls back to using huge pages with lower orders or
|
||||
small pages even though the allocation was successful.
|
||||
|
||||
anon_swpout
|
||||
swpout
|
||||
is incremented every time a huge page is swapped out in one
|
||||
piece without splitting.
|
||||
|
||||
anon_swpout_fallback
|
||||
swpout_fallback
|
||||
is incremented if a huge page has to be split before swapout.
|
||||
Usually because failed to allocate some continuous swap space
|
||||
for the huge page.
|
||||
|
|
|
@ -376,7 +376,7 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
|
|||
* clearing access/dirty for the whole block.
|
||||
*/
|
||||
unsigned long start = addr;
|
||||
unsigned long end = start + nr;
|
||||
unsigned long end = start + nr * PAGE_SIZE;
|
||||
|
||||
if (pte_cont(__ptep_get(ptep + nr - 1)))
|
||||
end = ALIGN(end, CONT_PTE_SIZE);
|
||||
|
@ -386,7 +386,7 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
|
|||
ptep = contpte_align_down(ptep);
|
||||
}
|
||||
|
||||
__clear_young_dirty_ptes(vma, start, ptep, end - start, flags);
|
||||
__clear_young_dirty_ptes(vma, start, ptep, (end - start) / PAGE_SIZE, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes);
|
||||
|
||||
|
|
|
@ -607,7 +607,7 @@ int nilfs_empty_dir(struct inode *inode)
|
|||
|
||||
kaddr = nilfs_get_folio(inode, i, &folio);
|
||||
if (IS_ERR(kaddr))
|
||||
continue;
|
||||
return 0;
|
||||
|
||||
de = (struct nilfs_dir_entry *)kaddr;
|
||||
kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1);
|
||||
|
|
|
@ -1652,6 +1652,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
|
|||
if (bh->b_folio != bd_folio) {
|
||||
if (bd_folio) {
|
||||
folio_lock(bd_folio);
|
||||
folio_wait_writeback(bd_folio);
|
||||
folio_clear_dirty_for_io(bd_folio);
|
||||
folio_start_writeback(bd_folio);
|
||||
folio_unlock(bd_folio);
|
||||
|
@ -1665,6 +1666,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
|
|||
if (bh == segbuf->sb_super_root) {
|
||||
if (bh->b_folio != bd_folio) {
|
||||
folio_lock(bd_folio);
|
||||
folio_wait_writeback(bd_folio);
|
||||
folio_clear_dirty_for_io(bd_folio);
|
||||
folio_start_writeback(bd_folio);
|
||||
folio_unlock(bd_folio);
|
||||
|
@ -1681,6 +1683,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
|
|||
}
|
||||
if (bd_folio) {
|
||||
folio_lock(bd_folio);
|
||||
folio_wait_writeback(bd_folio);
|
||||
folio_clear_dirty_for_io(bd_folio);
|
||||
folio_start_writeback(bd_folio);
|
||||
folio_unlock(bd_folio);
|
||||
|
|
|
@ -3214,7 +3214,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
|
|||
mm = get_task_mm(task);
|
||||
if (mm) {
|
||||
seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items);
|
||||
seq_printf(m, "ksm_zero_pages %lu\n", mm->ksm_zero_pages);
|
||||
seq_printf(m, "ksm_zero_pages %ld\n", mm_ksm_zero_pages(mm));
|
||||
seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
|
||||
seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
|
||||
mmput(mm);
|
||||
|
|
|
@ -269,8 +269,8 @@ enum mthp_stat_item {
|
|||
MTHP_STAT_ANON_FAULT_ALLOC,
|
||||
MTHP_STAT_ANON_FAULT_FALLBACK,
|
||||
MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
|
||||
MTHP_STAT_ANON_SWPOUT,
|
||||
MTHP_STAT_ANON_SWPOUT_FALLBACK,
|
||||
MTHP_STAT_SWPOUT,
|
||||
MTHP_STAT_SWPOUT_FALLBACK,
|
||||
__MTHP_STAT_COUNT
|
||||
};
|
||||
|
||||
|
@ -278,6 +278,7 @@ struct mthp_stat {
|
|||
unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
|
||||
|
||||
static inline void count_mthp_stat(int order, enum mthp_stat_item item)
|
||||
|
@ -287,6 +288,11 @@ static inline void count_mthp_stat(int order, enum mthp_stat_item item)
|
|||
|
||||
this_cpu_inc(mthp_stats.stats[order][item]);
|
||||
}
|
||||
#else
|
||||
static inline void count_mthp_stat(int order, enum mthp_stat_item item)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#define transparent_hugepage_use_zero_page() \
|
||||
(transparent_hugepage_flags & \
|
||||
|
|
|
@ -33,16 +33,27 @@ void __ksm_exit(struct mm_struct *mm);
|
|||
*/
|
||||
#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
|
||||
|
||||
extern unsigned long ksm_zero_pages;
|
||||
extern atomic_long_t ksm_zero_pages;
|
||||
|
||||
static inline void ksm_map_zero_page(struct mm_struct *mm)
|
||||
{
|
||||
atomic_long_inc(&ksm_zero_pages);
|
||||
atomic_long_inc(&mm->ksm_zero_pages);
|
||||
}
|
||||
|
||||
static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
|
||||
{
|
||||
if (is_ksm_zero_pte(pte)) {
|
||||
ksm_zero_pages--;
|
||||
mm->ksm_zero_pages--;
|
||||
atomic_long_dec(&ksm_zero_pages);
|
||||
atomic_long_dec(&mm->ksm_zero_pages);
|
||||
}
|
||||
}
|
||||
|
||||
static inline long mm_ksm_zero_pages(struct mm_struct *mm)
|
||||
{
|
||||
return atomic_long_read(&mm->ksm_zero_pages);
|
||||
}
|
||||
|
||||
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||
{
|
||||
if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
|
||||
|
|
|
@ -985,7 +985,7 @@ struct mm_struct {
|
|||
* Represent how many empty pages are merged with kernel zero
|
||||
* pages when enabling KSM use_zero_pages.
|
||||
*/
|
||||
unsigned long ksm_zero_pages;
|
||||
atomic_long_t ksm_zero_pages;
|
||||
#endif /* CONFIG_KSM */
|
||||
#ifdef CONFIG_LRU_GEN_WALKS_MMU
|
||||
struct {
|
||||
|
|
|
@ -1000,7 +1000,7 @@ struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
|
|||
do {
|
||||
cpuset_mems_cookie = read_mems_allowed_begin();
|
||||
n = cpuset_mem_spread_node();
|
||||
folio = __folio_alloc_node(gfp, order, n);
|
||||
folio = __folio_alloc_node_noprof(gfp, order, n);
|
||||
} while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
|
||||
|
||||
return folio;
|
||||
|
|
|
@ -558,15 +558,15 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
|
|||
DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
|
||||
DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
|
||||
DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
|
||||
DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT);
|
||||
DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK);
|
||||
DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
|
||||
DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
|
||||
|
||||
static struct attribute *stats_attrs[] = {
|
||||
&anon_fault_alloc_attr.attr,
|
||||
&anon_fault_fallback_attr.attr,
|
||||
&anon_fault_fallback_charge_attr.attr,
|
||||
&anon_swpout_attr.attr,
|
||||
&anon_swpout_fallback_attr.attr,
|
||||
&swpout_attr.attr,
|
||||
&swpout_fallback_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
|
16
mm/hugetlb.c
16
mm/hugetlb.c
|
@ -5768,8 +5768,20 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|||
* do_exit() will not see it, and will keep the reservation
|
||||
* forever.
|
||||
*/
|
||||
if (adjust_reservation && vma_needs_reservation(h, vma, address))
|
||||
vma_add_reservation(h, vma, address);
|
||||
if (adjust_reservation) {
|
||||
int rc = vma_needs_reservation(h, vma, address);
|
||||
|
||||
if (rc < 0)
|
||||
/* Pressumably allocate_file_region_entries failed
|
||||
* to allocate a file_region struct. Clear
|
||||
* hugetlb_restore_reserve so that global reserve
|
||||
* count will not be incremented by free_huge_folio.
|
||||
* Act as if we consumed the reservation.
|
||||
*/
|
||||
folio_clear_hugetlb_restore_reserve(page_folio(page));
|
||||
else if (rc)
|
||||
vma_add_reservation(h, vma, address);
|
||||
}
|
||||
|
||||
tlb_remove_page_size(tlb, page, huge_page_size(h));
|
||||
/*
|
||||
|
|
|
@ -196,8 +196,7 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
|
|||
u32 origin, bool checked)
|
||||
{
|
||||
u64 address = (u64)addr;
|
||||
void *shadow_start;
|
||||
u32 *origin_start;
|
||||
u32 *shadow_start, *origin_start;
|
||||
size_t pad = 0;
|
||||
|
||||
KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
|
||||
|
@ -225,8 +224,16 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
|
|||
origin_start =
|
||||
(u32 *)kmsan_get_metadata((void *)address, KMSAN_META_ORIGIN);
|
||||
|
||||
for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++)
|
||||
origin_start[i] = origin;
|
||||
/*
|
||||
* If the new origin is non-zero, assume that the shadow byte is also non-zero,
|
||||
* and unconditionally overwrite the old origin slot.
|
||||
* If the new origin is zero, overwrite the old origin slot iff the
|
||||
* corresponding shadow slot is zero.
|
||||
*/
|
||||
for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) {
|
||||
if (origin || !shadow_start[i])
|
||||
origin_start[i] = origin;
|
||||
}
|
||||
}
|
||||
|
||||
struct page *kmsan_vmalloc_to_page_or_null(void *vaddr)
|
||||
|
|
17
mm/ksm.c
17
mm/ksm.c
|
@ -296,7 +296,7 @@ static bool ksm_use_zero_pages __read_mostly;
|
|||
static bool ksm_smart_scan = true;
|
||||
|
||||
/* The number of zero pages which is placed by KSM */
|
||||
unsigned long ksm_zero_pages;
|
||||
atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0);
|
||||
|
||||
/* The number of pages that have been skipped due to "smart scanning" */
|
||||
static unsigned long ksm_pages_skipped;
|
||||
|
@ -1429,8 +1429,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
|
|||
* the dirty bit in zero page's PTE is set.
|
||||
*/
|
||||
newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
|
||||
ksm_zero_pages++;
|
||||
mm->ksm_zero_pages++;
|
||||
ksm_map_zero_page(mm);
|
||||
/*
|
||||
* We're replacing an anonymous page with a zero page, which is
|
||||
* not anonymous. We need to do proper accounting otherwise we
|
||||
|
@ -2754,18 +2753,16 @@ static void ksm_do_scan(unsigned int scan_npages)
|
|||
{
|
||||
struct ksm_rmap_item *rmap_item;
|
||||
struct page *page;
|
||||
unsigned int npages = scan_npages;
|
||||
|
||||
while (npages-- && likely(!freezing(current))) {
|
||||
while (scan_npages-- && likely(!freezing(current))) {
|
||||
cond_resched();
|
||||
rmap_item = scan_get_next_rmap_item(&page);
|
||||
if (!rmap_item)
|
||||
return;
|
||||
cmp_and_merge_page(page, rmap_item);
|
||||
put_page(page);
|
||||
ksm_pages_scanned++;
|
||||
}
|
||||
|
||||
ksm_pages_scanned += scan_npages - npages;
|
||||
}
|
||||
|
||||
static int ksmd_should_run(void)
|
||||
|
@ -3376,7 +3373,7 @@ static void wait_while_offlining(void)
|
|||
#ifdef CONFIG_PROC_FS
|
||||
long ksm_process_profit(struct mm_struct *mm)
|
||||
{
|
||||
return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE -
|
||||
return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE -
|
||||
mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);
|
||||
}
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
@ -3665,7 +3662,7 @@ KSM_ATTR_RO(pages_skipped);
|
|||
static ssize_t ksm_zero_pages_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%ld\n", ksm_zero_pages);
|
||||
return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages));
|
||||
}
|
||||
KSM_ATTR_RO(ksm_zero_pages);
|
||||
|
||||
|
@ -3674,7 +3671,7 @@ static ssize_t general_profit_show(struct kobject *kobj,
|
|||
{
|
||||
long general_profit;
|
||||
|
||||
general_profit = (ksm_pages_sharing + ksm_zero_pages) * PAGE_SIZE -
|
||||
general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE -
|
||||
ksm_rmap_items * sizeof(struct ksm_rmap_item);
|
||||
|
||||
return sysfs_emit(buf, "%ld\n", general_profit);
|
||||
|
|
|
@ -3147,8 +3147,6 @@ static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
|
|||
struct mem_cgroup *memcg;
|
||||
struct lruvec *lruvec;
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
rcu_read_lock();
|
||||
memcg = obj_cgroup_memcg(objcg);
|
||||
lruvec = mem_cgroup_lruvec(memcg, pgdat);
|
||||
|
|
|
@ -273,7 +273,7 @@ mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn,
|
|||
{
|
||||
mempool_t *pool;
|
||||
|
||||
pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
|
||||
pool = kmalloc_node_noprof(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id);
|
||||
if (!pool)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -1955,10 +1955,12 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
|
|||
}
|
||||
|
||||
/*
|
||||
* Reserve a pageblock for exclusive use of high-order atomic allocations if
|
||||
* there are no empty page blocks that contain a page with a suitable order
|
||||
* Reserve the pageblock(s) surrounding an allocation request for
|
||||
* exclusive use of high-order atomic allocations if there are no
|
||||
* empty page blocks that contain a page with a suitable order
|
||||
*/
|
||||
static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
|
||||
static void reserve_highatomic_pageblock(struct page *page, int order,
|
||||
struct zone *zone)
|
||||
{
|
||||
int mt;
|
||||
unsigned long max_managed, flags;
|
||||
|
@ -1984,10 +1986,17 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
|
|||
/* Yoink! */
|
||||
mt = get_pageblock_migratetype(page);
|
||||
/* Only reserve normal pageblocks (i.e., they can merge with others) */
|
||||
if (migratetype_is_mergeable(mt))
|
||||
if (move_freepages_block(zone, page, mt,
|
||||
MIGRATE_HIGHATOMIC) != -1)
|
||||
zone->nr_reserved_highatomic += pageblock_nr_pages;
|
||||
if (!migratetype_is_mergeable(mt))
|
||||
goto out_unlock;
|
||||
|
||||
if (order < pageblock_order) {
|
||||
if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1)
|
||||
goto out_unlock;
|
||||
zone->nr_reserved_highatomic += pageblock_nr_pages;
|
||||
} else {
|
||||
change_pageblock_range(page, order, MIGRATE_HIGHATOMIC);
|
||||
zone->nr_reserved_highatomic += 1 << order;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&zone->lock, flags);
|
||||
|
@ -1999,7 +2008,7 @@ out_unlock:
|
|||
* intense memory pressure but failed atomic allocations should be easier
|
||||
* to recover from than an OOM.
|
||||
*
|
||||
* If @force is true, try to unreserve a pageblock even though highatomic
|
||||
* If @force is true, try to unreserve pageblocks even though highatomic
|
||||
* pageblock is exhausted.
|
||||
*/
|
||||
static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
|
||||
|
@ -2041,6 +2050,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
|
|||
* adjust the count once.
|
||||
*/
|
||||
if (is_migrate_highatomic(mt)) {
|
||||
unsigned long size;
|
||||
/*
|
||||
* It should never happen but changes to
|
||||
* locking could inadvertently allow a per-cpu
|
||||
|
@ -2048,9 +2058,9 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
|
|||
* while unreserving so be safe and watch for
|
||||
* underflows.
|
||||
*/
|
||||
zone->nr_reserved_highatomic -= min(
|
||||
pageblock_nr_pages,
|
||||
zone->nr_reserved_highatomic);
|
||||
size = max(pageblock_nr_pages, 1UL << order);
|
||||
size = min(size, zone->nr_reserved_highatomic);
|
||||
zone->nr_reserved_highatomic -= size;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2062,11 +2072,19 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
|
|||
* of pageblocks that cannot be completely freed
|
||||
* may increase.
|
||||
*/
|
||||
ret = move_freepages_block(zone, page, mt,
|
||||
ac->migratetype);
|
||||
if (order < pageblock_order)
|
||||
ret = move_freepages_block(zone, page, mt,
|
||||
ac->migratetype);
|
||||
else {
|
||||
move_to_free_list(page, zone, order, mt,
|
||||
ac->migratetype);
|
||||
change_pageblock_range(page, order,
|
||||
ac->migratetype);
|
||||
ret = 1;
|
||||
}
|
||||
/*
|
||||
* Reserving this block already succeeded, so this should
|
||||
* not fail on zone boundaries.
|
||||
* Reserving the block(s) already succeeded,
|
||||
* so this should not fail on zone boundaries.
|
||||
*/
|
||||
WARN_ON_ONCE(ret == -1);
|
||||
if (ret > 0) {
|
||||
|
@ -3406,7 +3424,7 @@ try_this_zone:
|
|||
* if the pageblock should be reserved for the future
|
||||
*/
|
||||
if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
|
||||
reserve_highatomic_pageblock(page, zone);
|
||||
reserve_highatomic_pageblock(page, order, zone);
|
||||
|
||||
return page;
|
||||
} else {
|
||||
|
|
|
@ -217,7 +217,7 @@ static inline void count_swpout_vm_event(struct folio *folio)
|
|||
count_memcg_folio_events(folio, THP_SWPOUT, 1);
|
||||
count_vm_event(THP_SWPOUT);
|
||||
}
|
||||
count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_SWPOUT);
|
||||
count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT);
|
||||
#endif
|
||||
count_vm_events(PSWPOUT, folio_nr_pages(folio));
|
||||
}
|
||||
|
|
|
@ -1952,7 +1952,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
|
|||
#ifdef CONFIG_MEMCG
|
||||
new_exts |= MEMCG_DATA_OBJEXTS;
|
||||
#endif
|
||||
old_exts = slab->obj_exts;
|
||||
old_exts = READ_ONCE(slab->obj_exts);
|
||||
handle_failed_objexts_alloc(old_exts, vec, objects);
|
||||
if (new_slab) {
|
||||
/*
|
||||
|
@ -1961,7 +1961,8 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
|
|||
* be simply assigned.
|
||||
*/
|
||||
slab->obj_exts = new_exts;
|
||||
} else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
|
||||
} else if ((old_exts & ~OBJEXTS_FLAGS_MASK) ||
|
||||
cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
|
||||
/*
|
||||
* If the slab is already in use, somebody can allocate and
|
||||
* assign slabobj_exts in parallel. In this case the existing
|
||||
|
|
10
mm/util.c
10
mm/util.c
|
@ -705,7 +705,7 @@ void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flag
|
|||
|
||||
if (oldsize >= newsize)
|
||||
return (void *)p;
|
||||
newp = kvmalloc(newsize, flags);
|
||||
newp = kvmalloc_noprof(newsize, flags);
|
||||
if (!newp)
|
||||
return NULL;
|
||||
memcpy(newp, p, oldsize);
|
||||
|
@ -726,7 +726,7 @@ void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
|
|||
|
||||
if (unlikely(check_mul_overflow(n, size, &bytes)))
|
||||
return NULL;
|
||||
return __vmalloc(bytes, flags);
|
||||
return __vmalloc_noprof(bytes, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(__vmalloc_array_noprof);
|
||||
|
||||
|
@ -737,7 +737,7 @@ EXPORT_SYMBOL(__vmalloc_array_noprof);
|
|||
*/
|
||||
void *vmalloc_array_noprof(size_t n, size_t size)
|
||||
{
|
||||
return __vmalloc_array(n, size, GFP_KERNEL);
|
||||
return __vmalloc_array_noprof(n, size, GFP_KERNEL);
|
||||
}
|
||||
EXPORT_SYMBOL(vmalloc_array_noprof);
|
||||
|
||||
|
@ -749,7 +749,7 @@ EXPORT_SYMBOL(vmalloc_array_noprof);
|
|||
*/
|
||||
void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
|
||||
{
|
||||
return __vmalloc_array(n, size, flags | __GFP_ZERO);
|
||||
return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO);
|
||||
}
|
||||
EXPORT_SYMBOL(__vcalloc_noprof);
|
||||
|
||||
|
@ -760,7 +760,7 @@ EXPORT_SYMBOL(__vcalloc_noprof);
|
|||
*/
|
||||
void *vcalloc_noprof(size_t n, size_t size)
|
||||
{
|
||||
return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
|
||||
return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO);
|
||||
}
|
||||
EXPORT_SYMBOL(vcalloc_noprof);
|
||||
|
||||
|
|
|
@ -722,7 +722,7 @@ int is_vmalloc_or_module_addr(const void *x)
|
|||
* and fall back on vmalloc() if that fails. Others
|
||||
* just put it in the vmalloc space.
|
||||
*/
|
||||
#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
|
||||
#if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR)
|
||||
unsigned long addr = (unsigned long)kasan_reset_tag(x);
|
||||
if (addr >= MODULES_VADDR && addr < MODULES_END)
|
||||
return 1;
|
||||
|
|
|
@ -1227,7 +1227,7 @@ retry:
|
|||
THP_SWPOUT_FALLBACK, 1);
|
||||
count_vm_event(THP_SWPOUT_FALLBACK);
|
||||
}
|
||||
count_mthp_stat(order, MTHP_STAT_ANON_SWPOUT_FALLBACK);
|
||||
count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
|
||||
#endif
|
||||
if (!add_to_swap(folio))
|
||||
goto activate_locked_split;
|
||||
|
|
Loading…
Reference in New Issue