bcachefs: move replica_set from bch_dev to bch_fs

This is needed for the next patch - the write submit path has to be able
to allocate a replica bio even when we weren't able to get a ref on the
device.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2024-04-30 20:32:44 -04:00
parent 633cf06944
commit dbd0408087
3 changed files with 5 additions and 10 deletions

View File

@ -569,9 +569,6 @@ struct bch_dev {
struct bch_devs_mask self; struct bch_devs_mask self;
/* biosets used in cloned bios for writing multiple replicas */
struct bio_set replica_set;
/* /*
* Buckets: * Buckets:
* Per-bucket arrays are protected by c->mark_lock, bucket_lock and * Per-bucket arrays are protected by c->mark_lock, bucket_lock and
@ -995,6 +992,7 @@ struct bch_fs {
struct bio_set bio_read; struct bio_set bio_read;
struct bio_set bio_read_split; struct bio_set bio_read_split;
struct bio_set bio_write; struct bio_set bio_write;
struct bio_set replica_set;
struct mutex bio_bounce_pages_lock; struct mutex bio_bounce_pages_lock;
mempool_t bio_bounce_pages; mempool_t bio_bounce_pages;
struct bucket_nocow_lock_table struct bucket_nocow_lock_table

View File

@ -412,8 +412,7 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev); struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
if (to_entry(ptr + 1) < ptrs.end) { if (to_entry(ptr + 1) < ptrs.end) {
n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, GFP_NOFS, &c->replica_set));
GFP_NOFS, &ca->replica_set));
n->bio.bi_end_io = wbio->bio.bi_end_io; n->bio.bi_end_io = wbio->bio.bi_end_io;
n->bio.bi_private = wbio->bio.bi_private; n->bio.bi_private = wbio->bio.bi_private;
@ -1667,13 +1666,14 @@ void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
void bch2_fs_io_write_exit(struct bch_fs *c) void bch2_fs_io_write_exit(struct bch_fs *c)
{ {
mempool_exit(&c->bio_bounce_pages); mempool_exit(&c->bio_bounce_pages);
bioset_exit(&c->replica_set);
bioset_exit(&c->bio_write); bioset_exit(&c->bio_write);
} }
int bch2_fs_io_write_init(struct bch_fs *c) int bch2_fs_io_write_init(struct bch_fs *c)
{ {
if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio), if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio), BIOSET_NEED_BVECS) ||
BIOSET_NEED_BVECS)) bioset_init(&c->replica_set, 4, offsetof(struct bch_write_bio, bio), 0))
return -BCH_ERR_ENOMEM_bio_write_init; return -BCH_ERR_ENOMEM_bio_write_init;
if (mempool_init_page_pool(&c->bio_bounce_pages, if (mempool_init_page_pool(&c->bio_bounce_pages,

View File

@ -1193,7 +1193,6 @@ static void bch2_dev_free(struct bch_dev *ca)
bch2_dev_journal_exit(ca); bch2_dev_journal_exit(ca);
free_percpu(ca->io_done); free_percpu(ca->io_done);
bioset_exit(&ca->replica_set);
bch2_dev_buckets_free(ca); bch2_dev_buckets_free(ca);
free_page((unsigned long) ca->sb_read_scratch); free_page((unsigned long) ca->sb_read_scratch);
@ -1317,8 +1316,6 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
PERCPU_REF_INIT_DEAD, GFP_KERNEL) || PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
!(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) || !(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) ||
bch2_dev_buckets_alloc(c, ca) || bch2_dev_buckets_alloc(c, ca) ||
bioset_init(&ca->replica_set, 4,
offsetof(struct bch_write_bio, bio), 0) ||
!(ca->io_done = alloc_percpu(*ca->io_done))) !(ca->io_done = alloc_percpu(*ca->io_done)))
goto err; goto err;