pcpcntr: add group allocation/free

Allocations and frees are globally serialized on the pcpu lock (and the
CPU hotplug lock if enabled, which is the case on Debian).

At least one frequent consumer allocates 4 back-to-back counters (and
frees them in the same manner), exacerbating the problem.

While this does not fully remedy scalability issues, it is a step
towards that goal and provides immediate relief.

Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
Reviewed-by: Dennis Zhou <dennis@kernel.org>
Reviewed-by: Vegard Nossum <vegard.nossum@oracle.com>
Link: https://lore.kernel.org/r/20230823050609.2228718-2-mjguzik@gmail.com
[Dennis: reflowed a few lines]
Signed-off-by: Dennis Zhou <dennis@kernel.org>
This commit is contained in:
Mateusz Guzik 2023-08-23 07:06:08 +02:00 committed by Dennis Zhou
parent f7d77dfc91
commit c439d5e8a0
2 changed files with 76 additions and 25 deletions

View File

@ -30,17 +30,28 @@ struct percpu_counter {
extern int percpu_counter_batch; extern int percpu_counter_batch;
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp, int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
gfp_t gfp, u32 nr_counters,
struct lock_class_key *key); struct lock_class_key *key);
#define percpu_counter_init(fbc, value, gfp) \ #define percpu_counter_init_many(fbc, value, gfp, nr_counters) \
({ \ ({ \
static struct lock_class_key __key; \ static struct lock_class_key __key; \
\ \
__percpu_counter_init(fbc, value, gfp, &__key); \ __percpu_counter_init_many(fbc, value, gfp, nr_counters,\
&__key); \
}) })
void percpu_counter_destroy(struct percpu_counter *fbc);
#define percpu_counter_init(fbc, value, gfp) \
percpu_counter_init_many(fbc, value, gfp, 1)
void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters);
static inline void percpu_counter_destroy(struct percpu_counter *fbc)
{
percpu_counter_destroy_many(fbc, 1);
}
void percpu_counter_set(struct percpu_counter *fbc, s64 amount); void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
s32 batch); s32 batch);
@ -116,11 +127,27 @@ struct percpu_counter {
s64 count; s64 count;
}; };
static inline int percpu_counter_init_many(struct percpu_counter *fbc,
s64 amount, gfp_t gfp,
u32 nr_counters)
{
u32 i;
for (i = 0; i < nr_counters; i++)
fbc[i].count = amount;
return 0;
}
static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount, static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
gfp_t gfp) gfp_t gfp)
{ {
fbc->count = amount; return percpu_counter_init_many(fbc, amount, gfp, 1);
return 0; }
static inline void percpu_counter_destroy_many(struct percpu_counter *fbc,
u32 nr_counters)
{
} }
static inline void percpu_counter_destroy(struct percpu_counter *fbc) static inline void percpu_counter_destroy(struct percpu_counter *fbc)

View File

@ -151,48 +151,72 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
} }
EXPORT_SYMBOL(__percpu_counter_sum); EXPORT_SYMBOL(__percpu_counter_sum);
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp, int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
gfp_t gfp, u32 nr_counters,
struct lock_class_key *key) struct lock_class_key *key)
{ {
unsigned long flags __maybe_unused; unsigned long flags __maybe_unused;
size_t counter_size;
s32 __percpu *counters;
u32 i;
raw_spin_lock_init(&fbc->lock); counter_size = ALIGN(sizeof(*counters), __alignof__(*counters));
lockdep_set_class(&fbc->lock, key); counters = __alloc_percpu_gfp(nr_counters * counter_size,
fbc->count = amount; __alignof__(*counters), gfp);
fbc->counters = alloc_percpu_gfp(s32, gfp); if (!counters) {
if (!fbc->counters) fbc[0].counters = NULL;
return -ENOMEM; return -ENOMEM;
}
debug_percpu_counter_activate(fbc); for (i = 0; i < nr_counters; i++) {
raw_spin_lock_init(&fbc[i].lock);
lockdep_set_class(&fbc[i].lock, key);
#ifdef CONFIG_HOTPLUG_CPU
INIT_LIST_HEAD(&fbc[i].list);
#endif
fbc[i].count = amount;
fbc[i].counters = (void *)counters + (i * counter_size);
debug_percpu_counter_activate(&fbc[i]);
}
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
INIT_LIST_HEAD(&fbc->list);
spin_lock_irqsave(&percpu_counters_lock, flags); spin_lock_irqsave(&percpu_counters_lock, flags);
list_add(&fbc->list, &percpu_counters); for (i = 0; i < nr_counters; i++)
list_add(&fbc[i].list, &percpu_counters);
spin_unlock_irqrestore(&percpu_counters_lock, flags); spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif #endif
return 0; return 0;
} }
EXPORT_SYMBOL(__percpu_counter_init); EXPORT_SYMBOL(__percpu_counter_init_many);
void percpu_counter_destroy(struct percpu_counter *fbc) void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters)
{ {
unsigned long flags __maybe_unused; unsigned long flags __maybe_unused;
u32 i;
if (!fbc->counters) if (WARN_ON_ONCE(!fbc))
return; return;
debug_percpu_counter_deactivate(fbc); if (!fbc[0].counters)
return;
for (i = 0; i < nr_counters; i++)
debug_percpu_counter_deactivate(&fbc[i]);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
spin_lock_irqsave(&percpu_counters_lock, flags); spin_lock_irqsave(&percpu_counters_lock, flags);
list_del(&fbc->list); for (i = 0; i < nr_counters; i++)
list_del(&fbc[i].list);
spin_unlock_irqrestore(&percpu_counters_lock, flags); spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif #endif
free_percpu(fbc->counters);
fbc->counters = NULL; free_percpu(fbc[0].counters);
for (i = 0; i < nr_counters; i++)
fbc[i].counters = NULL;
} }
EXPORT_SYMBOL(percpu_counter_destroy); EXPORT_SYMBOL(percpu_counter_destroy_many);
int percpu_counter_batch __read_mostly = 32; int percpu_counter_batch __read_mostly = 32;
EXPORT_SYMBOL(percpu_counter_batch); EXPORT_SYMBOL(percpu_counter_batch);