mm: memcg: add NULL check to obj_cgroup_put()
9 out of 16 callers perform a NULL check before calling obj_cgroup_put(). Move the NULL check in the function, similar to mem_cgroup_put(). The unlikely() NULL check in current_objcg_update() was left alone to avoid dropping the unlikey() annotation as this a fast path. Link: https://lkml.kernel.org/r/20240316015803.2777252-1-yosryahmed@google.com Signed-off-by: Yosry Ahmed <yosryahmed@google.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Roman Gushchin <roman.gushchin@linux.dev> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Shakeel Butt <shakeel.butt@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
5b0a67008b
commit
91b71e78b8
|
@ -818,7 +818,8 @@ static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
|
|||
|
||||
static inline void obj_cgroup_put(struct obj_cgroup *objcg)
|
||||
{
|
||||
percpu_ref_put(&objcg->refcnt);
|
||||
if (objcg)
|
||||
percpu_ref_put(&objcg->refcnt);
|
||||
}
|
||||
|
||||
static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
|
||||
|
|
|
@ -759,8 +759,7 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
|
|||
rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
|
||||
rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
|
||||
}
|
||||
if (ma->objcg)
|
||||
obj_cgroup_put(ma->objcg);
|
||||
obj_cgroup_put(ma->objcg);
|
||||
destroy_mem_alloc(ma, rcu_in_progress);
|
||||
}
|
||||
if (ma->caches) {
|
||||
|
@ -776,8 +775,7 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
|
|||
rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
|
||||
}
|
||||
}
|
||||
if (ma->objcg)
|
||||
obj_cgroup_put(ma->objcg);
|
||||
obj_cgroup_put(ma->objcg);
|
||||
destroy_mem_alloc(ma, rcu_in_progress);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2369,8 +2369,7 @@ static void drain_local_stock(struct work_struct *dummy)
|
|||
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
|
||||
|
||||
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
|
||||
if (old)
|
||||
obj_cgroup_put(old);
|
||||
obj_cgroup_put(old);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3145,8 +3144,7 @@ static struct obj_cgroup *current_objcg_update(void)
|
|||
if (old) {
|
||||
old = (struct obj_cgroup *)
|
||||
((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
|
||||
if (old)
|
||||
obj_cgroup_put(old);
|
||||
obj_cgroup_put(old);
|
||||
|
||||
old = NULL;
|
||||
}
|
||||
|
@ -3418,8 +3416,7 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
|
|||
mod_objcg_mlstate(objcg, pgdat, idx, nr);
|
||||
|
||||
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
|
||||
if (old)
|
||||
obj_cgroup_put(old);
|
||||
obj_cgroup_put(old);
|
||||
}
|
||||
|
||||
static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
|
||||
|
@ -3546,8 +3543,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
|
|||
}
|
||||
|
||||
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
|
||||
if (old)
|
||||
obj_cgroup_put(old);
|
||||
obj_cgroup_put(old);
|
||||
|
||||
if (nr_pages)
|
||||
obj_cgroup_uncharge_pages(objcg, nr_pages);
|
||||
|
@ -5468,8 +5464,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
|
|||
{
|
||||
int node;
|
||||
|
||||
if (memcg->orig_objcg)
|
||||
obj_cgroup_put(memcg->orig_objcg);
|
||||
obj_cgroup_put(memcg->orig_objcg);
|
||||
|
||||
for_each_node(node)
|
||||
free_mem_cgroup_per_node_info(memcg, node);
|
||||
|
@ -6620,8 +6615,7 @@ static void mem_cgroup_exit(struct task_struct *task)
|
|||
|
||||
objcg = (struct obj_cgroup *)
|
||||
((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
|
||||
if (objcg)
|
||||
obj_cgroup_put(objcg);
|
||||
obj_cgroup_put(objcg);
|
||||
|
||||
/*
|
||||
* Some kernel allocations can happen after this point,
|
||||
|
|
|
@ -1618,8 +1618,7 @@ put_pool:
|
|||
freepage:
|
||||
zswap_entry_cache_free(entry);
|
||||
reject:
|
||||
if (objcg)
|
||||
obj_cgroup_put(objcg);
|
||||
obj_cgroup_put(objcg);
|
||||
check_old:
|
||||
/*
|
||||
* If the zswap store fails or zswap is disabled, we must invalidate the
|
||||
|
|
Loading…
Reference in New Issue