- A single data race fix on the perf event cleanup path to avoid endless
loops due to insufficient locking -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmLTu80ACgkQEsHwGGHe VUrophAApPj8K9M6+JLeVKNocQMA+1XhWL/HRVmabI+1TpxO4/663wcbbUI04Z5e 51dGvvCBK413duoDUAn8tYAPjQStTrwqAS/toHcaSj+dDPHzZDd3M/Gn68SRy08d if26OjsXIGTZoHsCYJx0y01m9XHY4ZhVTtonsc3jZCmb/b8/feSBZcMtw+tASDAw 8m/P9rHfzVlfBYmZnyf2NH24NTVcHgoQUGobDo16ve1CTvH8d4jEr+YPsNLTYN+P 4cUslnvRG4HhC/u8namO8CbQVuXicyJBVdVBtfUJ0+IKojie7zCkVUOIPv+mWgQ7 r1XE2MPSeFQRa0IptiA0vIXQCgs9BRj6cBzgo2f3Y0QjU0GGKLTcIKrILv95aej7 X12+uNLKfnkYU4vuyG4o4AnXh047YxgfWLSQ569c/hHKuw8klTQkh0PbJEs6Epn0 21dU+9/p66ZPTCXXjEDDNsMHeVY00+lkdEOu9YzNzMUfR5Fo+zbAN7X9jiDAQDqc D9IdDeEmhdmrEKNOkankMTBF1tG1XiU5zWerREeMHRMKpJhxC5X1BGIDpuEq1PJD xa7uAPvc0O6WmNfVvXaJ2GFPzx8oq9inlocNk/0I2ZJxgkGFqKCYUZQI0AdtzPAj dHx66z09uXMQN+ecXwf5pF1QS/R6BEajOaUhBEFPUZ21pkEl12c= =/ETy -----END PGP SIGNATURE----- Merge tag 'perf_urgent_for_v5.19_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull perf fix from Borislav Petkov: - A single data race fix on the perf event cleanup path to avoid endless loops due to insufficient locking * tag 'perf_urgent_for_v5.19_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/core: Fix data race between perf_event_set_output() and perf_mmap_close()
This commit is contained in:
commit
2b18593e4b
|
@ -6253,10 +6253,10 @@ again:
|
|||
|
||||
if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
|
||||
/*
|
||||
* Raced against perf_mmap_close() through
|
||||
* perf_event_set_output(). Try again, hope for better
|
||||
* luck.
|
||||
* Raced against perf_mmap_close(); remove the
|
||||
* event and try again.
|
||||
*/
|
||||
ring_buffer_attach(event, NULL);
|
||||
mutex_unlock(&event->mmap_mutex);
|
||||
goto again;
|
||||
}
|
||||
|
@ -11825,14 +11825,25 @@ err_size:
|
|||
goto out;
|
||||
}
|
||||
|
||||
static void mutex_lock_double(struct mutex *a, struct mutex *b)
|
||||
{
|
||||
if (b < a)
|
||||
swap(a, b);
|
||||
|
||||
mutex_lock(a);
|
||||
mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
|
||||
static int
|
||||
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
|
||||
{
|
||||
struct perf_buffer *rb = NULL;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!output_event)
|
||||
if (!output_event) {
|
||||
mutex_lock(&event->mmap_mutex);
|
||||
goto set;
|
||||
}
|
||||
|
||||
/* don't allow circular references */
|
||||
if (event == output_event)
|
||||
|
@ -11870,8 +11881,15 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
|
|||
event->pmu != output_event->pmu)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Hold both mmap_mutex to serialize against perf_mmap_close(). Since
|
||||
* output_event is already on rb->event_list, and the list iteration
|
||||
* restarts after every removal, it is guaranteed this new event is
|
||||
* observed *OR* if output_event is already removed, it's guaranteed we
|
||||
* observe !rb->mmap_count.
|
||||
*/
|
||||
mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex);
|
||||
set:
|
||||
mutex_lock(&event->mmap_mutex);
|
||||
/* Can't redirect output if we've got an active mmap() */
|
||||
if (atomic_read(&event->mmap_count))
|
||||
goto unlock;
|
||||
|
@ -11881,6 +11899,12 @@ set:
|
|||
rb = ring_buffer_get(output_event);
|
||||
if (!rb)
|
||||
goto unlock;
|
||||
|
||||
/* did we race against perf_mmap_close() */
|
||||
if (!atomic_read(&rb->mmap_count)) {
|
||||
ring_buffer_put(rb);
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
ring_buffer_attach(event, rb);
|
||||
|
@ -11888,20 +11912,13 @@ set:
|
|||
ret = 0;
|
||||
unlock:
|
||||
mutex_unlock(&event->mmap_mutex);
|
||||
if (output_event)
|
||||
mutex_unlock(&output_event->mmap_mutex);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mutex_lock_double(struct mutex *a, struct mutex *b)
|
||||
{
|
||||
if (b < a)
|
||||
swap(a, b);
|
||||
|
||||
mutex_lock(a);
|
||||
mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
|
||||
static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
|
||||
{
|
||||
bool nmi_safe = false;
|
||||
|
|
Loading…
Reference in New Issue