dm snapshot: wait for chunks in destructor
If there are several snapshots sharing an origin and one is removed while the origin is being written to, the snapshot's mempool may get deleted while elements are still referenced. Prior to dm-snapshot-use-per-device-mempools.patch the pending exceptions may still have been referenced after the snapshot was destroyed, but this was not a problem because the shared mempool was still there. This patch fixes the problem by tracking the number of mempool elements in use. The scenario: - You have an origin and two snapshots 1 and 2. - Someone writes to the origin. - It creates two exceptions in the snapshots, snapshot 1 will be primary exception, snapshot 2's pending_exception->primary_pe will point to the exception in snapshot 1. - The exceptions are being relocated, relocation of exception 1 finishes (but it's pending_exception is still allocated, because it is referenced by an exception from snapshot 2) - The user lvremoves snapshot 1 --- it calls just suspend (does nothing) and destructor. md->pending is zero (there is no I/O submitted to the snapshot by md layer), so it won't help us. - The destructor waits for kcopyd jobs to finish on snapshot 1 --- but there are none. - The destructor on snapshot 1 cleans up everything. - The relocation of exception on snapshot 2 finishes, it drops reference on primary_pe. This frees its primary_pe pointer. Primary_pe points to pending exception created for snapshot 1. So it frees memory into non-existing mempool. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
This commit is contained in:
parent
60c856c8e2
commit
879129d208
|
@ -370,6 +370,7 @@ static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snaps
|
|||
struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
|
||||
GFP_NOIO);
|
||||
|
||||
atomic_inc(&s->pending_exceptions_count);
|
||||
pe->snap = s;
|
||||
|
||||
return pe;
|
||||
|
@ -377,7 +378,11 @@ static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snaps
|
|||
|
||||
static void free_pending_exception(struct dm_snap_pending_exception *pe)
|
||||
{
|
||||
mempool_free(pe, pe->snap->pending_pool);
|
||||
struct dm_snapshot *s = pe->snap;
|
||||
|
||||
mempool_free(pe, s->pending_pool);
|
||||
smp_mb__before_atomic_dec();
|
||||
atomic_dec(&s->pending_exceptions_count);
|
||||
}
|
||||
|
||||
static void insert_completed_exception(struct dm_snapshot *s,
|
||||
|
@ -602,6 +607,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
|
||||
s->valid = 1;
|
||||
s->active = 0;
|
||||
atomic_set(&s->pending_exceptions_count, 0);
|
||||
init_rwsem(&s->lock);
|
||||
spin_lock_init(&s->pe_lock);
|
||||
s->ti = ti;
|
||||
|
@ -728,6 +734,14 @@ static void snapshot_dtr(struct dm_target *ti)
|
|||
/* After this returns there can be no new kcopyd jobs. */
|
||||
unregister_snapshot(s);
|
||||
|
||||
while (atomic_read(&s->pending_exceptions_count))
|
||||
yield();
|
||||
/*
|
||||
* Ensure instructions in mempool_destroy aren't reordered
|
||||
* before atomic_read.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
#ifdef CONFIG_DM_DEBUG
|
||||
for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
|
||||
BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
|
||||
|
|
|
@ -160,6 +160,8 @@ struct dm_snapshot {
|
|||
|
||||
mempool_t *pending_pool;
|
||||
|
||||
atomic_t pending_exceptions_count;
|
||||
|
||||
struct exception_table pending;
|
||||
struct exception_table complete;
|
||||
|
||||
|
|
Loading…
Reference in New Issue