bd_inode series
Replacement of bdev->bd_inode with sane(r) set of primitives. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQQqUNBr3gm4hGXdBJlZ7Krx/gZQ6wUCZkwjlgAKCRBZ7Krx/gZQ 66OmAP9nhZLASn/iM2+979I6O0GW+vid+uLh48uW3d+LbsmVIgD9GYpR+cuLQ/xj mJESWfYKOVSpFFSrqlzKg9PQlU/GFgs= =6LRp -----END PGP SIGNATURE----- Merge tag 'pull-bd_inode-1' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs Pull bdev bd_inode updates from Al Viro: "Replacement of bdev->bd_inode with sane(r) set of primitives by me and Yu Kuai" * tag 'pull-bd_inode-1' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: RIP ->bd_inode dasd_format(): killing the last remaining user of ->bd_inode nilfs_attach_log_writer(): use ->bd_mapping->host instead of ->bd_inode block/bdev.c: use the knowledge of inode/bdev coallocation gfs2: more obvious initializations of mapping->host fs/buffer.c: massage the remaining users of ->bd_inode to ->bd_mapping blk_ioctl_{discard,zeroout}(): we only want ->bd_inode->i_mapping here... grow_dev_folio(): we only want ->bd_inode->i_mapping there use ->bd_mapping instead of ->bd_inode->i_mapping block_device: add a pointer to struct address_space (page cache of bdev) missing helpers: bdev_unhash(), bdev_drop() block: move two helpers into bdev.c block2mtd: prevent direct access of bd_inode dm-vdo: use bdev_nr_bytes(bdev) instead of i_size_read(bdev->bd_inode) blkdev_write_iter(): saner way to get inode and bdev bcachefs: remove dead function bdev_sectors() ext4: remove block_device_ejected() erofs_buf: store address_space instead of inode erofs: switch erofs_bread() to passing offset instead of block number
This commit is contained in:
commit
38da32ee70
66
block/bdev.c
66
block/bdev.c
|
@ -43,6 +43,11 @@ static inline struct bdev_inode *BDEV_I(struct inode *inode)
|
|||
return container_of(inode, struct bdev_inode, vfs_inode);
|
||||
}
|
||||
|
||||
static inline struct inode *BD_INODE(struct block_device *bdev)
|
||||
{
|
||||
return &container_of(bdev, struct bdev_inode, bdev)->vfs_inode;
|
||||
}
|
||||
|
||||
struct block_device *I_BDEV(struct inode *inode)
|
||||
{
|
||||
return &BDEV_I(inode)->bdev;
|
||||
|
@ -57,7 +62,7 @@ EXPORT_SYMBOL(file_bdev);
|
|||
|
||||
static void bdev_write_inode(struct block_device *bdev)
|
||||
{
|
||||
struct inode *inode = bdev->bd_inode;
|
||||
struct inode *inode = BD_INODE(bdev);
|
||||
int ret;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
|
@ -76,7 +81,7 @@ static void bdev_write_inode(struct block_device *bdev)
|
|||
/* Kill _all_ buffers and pagecache , dirty or not.. */
|
||||
static void kill_bdev(struct block_device *bdev)
|
||||
{
|
||||
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
||||
struct address_space *mapping = bdev->bd_mapping;
|
||||
|
||||
if (mapping_empty(mapping))
|
||||
return;
|
||||
|
@ -88,7 +93,7 @@ static void kill_bdev(struct block_device *bdev)
|
|||
/* Invalidate clean unused buffers and pagecache. */
|
||||
void invalidate_bdev(struct block_device *bdev)
|
||||
{
|
||||
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
||||
struct address_space *mapping = bdev->bd_mapping;
|
||||
|
||||
if (mapping->nrpages) {
|
||||
invalidate_bh_lrus();
|
||||
|
@ -116,7 +121,7 @@ int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
|
|||
goto invalidate;
|
||||
}
|
||||
|
||||
truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
|
||||
truncate_inode_pages_range(bdev->bd_mapping, lstart, lend);
|
||||
if (!(mode & BLK_OPEN_EXCL))
|
||||
bd_abort_claiming(bdev, truncate_bdev_range);
|
||||
return 0;
|
||||
|
@ -126,7 +131,7 @@ invalidate:
|
|||
* Someone else has handle exclusively open. Try invalidating instead.
|
||||
* The 'end' argument is inclusive so the rounding is safe.
|
||||
*/
|
||||
return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
|
||||
return invalidate_inode_pages2_range(bdev->bd_mapping,
|
||||
lstart >> PAGE_SHIFT,
|
||||
lend >> PAGE_SHIFT);
|
||||
}
|
||||
|
@ -134,14 +139,14 @@ invalidate:
|
|||
static void set_init_blocksize(struct block_device *bdev)
|
||||
{
|
||||
unsigned int bsize = bdev_logical_block_size(bdev);
|
||||
loff_t size = i_size_read(bdev->bd_inode);
|
||||
loff_t size = i_size_read(BD_INODE(bdev));
|
||||
|
||||
while (bsize < PAGE_SIZE) {
|
||||
if (size & bsize)
|
||||
break;
|
||||
bsize <<= 1;
|
||||
}
|
||||
bdev->bd_inode->i_blkbits = blksize_bits(bsize);
|
||||
BD_INODE(bdev)->i_blkbits = blksize_bits(bsize);
|
||||
}
|
||||
|
||||
int set_blocksize(struct file *file, int size)
|
||||
|
@ -198,7 +203,7 @@ int sync_blockdev_nowait(struct block_device *bdev)
|
|||
{
|
||||
if (!bdev)
|
||||
return 0;
|
||||
return filemap_flush(bdev->bd_inode->i_mapping);
|
||||
return filemap_flush(bdev->bd_mapping);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
|
||||
|
||||
|
@ -210,13 +215,13 @@ int sync_blockdev(struct block_device *bdev)
|
|||
{
|
||||
if (!bdev)
|
||||
return 0;
|
||||
return filemap_write_and_wait(bdev->bd_inode->i_mapping);
|
||||
return filemap_write_and_wait(bdev->bd_mapping);
|
||||
}
|
||||
EXPORT_SYMBOL(sync_blockdev);
|
||||
|
||||
int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
|
||||
{
|
||||
return filemap_write_and_wait_range(bdev->bd_inode->i_mapping,
|
||||
return filemap_write_and_wait_range(bdev->bd_mapping,
|
||||
lstart, lend);
|
||||
}
|
||||
EXPORT_SYMBOL(sync_blockdev_range);
|
||||
|
@ -418,7 +423,7 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
|
|||
spin_lock_init(&bdev->bd_size_lock);
|
||||
mutex_init(&bdev->bd_holder_lock);
|
||||
bdev->bd_partno = partno;
|
||||
bdev->bd_inode = inode;
|
||||
bdev->bd_mapping = &inode->i_data;
|
||||
bdev->bd_queue = disk->queue;
|
||||
if (partno)
|
||||
bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio;
|
||||
|
@ -436,19 +441,30 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
|
|||
void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
|
||||
{
|
||||
spin_lock(&bdev->bd_size_lock);
|
||||
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
|
||||
i_size_write(BD_INODE(bdev), (loff_t)sectors << SECTOR_SHIFT);
|
||||
bdev->bd_nr_sectors = sectors;
|
||||
spin_unlock(&bdev->bd_size_lock);
|
||||
}
|
||||
|
||||
void bdev_add(struct block_device *bdev, dev_t dev)
|
||||
{
|
||||
struct inode *inode = BD_INODE(bdev);
|
||||
if (bdev_stable_writes(bdev))
|
||||
mapping_set_stable_writes(bdev->bd_inode->i_mapping);
|
||||
mapping_set_stable_writes(bdev->bd_mapping);
|
||||
bdev->bd_dev = dev;
|
||||
bdev->bd_inode->i_rdev = dev;
|
||||
bdev->bd_inode->i_ino = dev;
|
||||
insert_inode_hash(bdev->bd_inode);
|
||||
inode->i_rdev = dev;
|
||||
inode->i_ino = dev;
|
||||
insert_inode_hash(inode);
|
||||
}
|
||||
|
||||
void bdev_unhash(struct block_device *bdev)
|
||||
{
|
||||
remove_inode_hash(BD_INODE(bdev));
|
||||
}
|
||||
|
||||
void bdev_drop(struct block_device *bdev)
|
||||
{
|
||||
iput(BD_INODE(bdev));
|
||||
}
|
||||
|
||||
long nr_blockdev_pages(void)
|
||||
|
@ -923,7 +939,7 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
|
|||
bdev_file->f_mode |= FMODE_NOWAIT;
|
||||
if (mode & BLK_OPEN_RESTRICT_WRITES)
|
||||
bdev_file->f_mode |= FMODE_WRITE_RESTRICTED;
|
||||
bdev_file->f_mapping = bdev->bd_inode->i_mapping;
|
||||
bdev_file->f_mapping = bdev->bd_mapping;
|
||||
bdev_file->f_wb_err = filemap_sample_wb_err(bdev_file->f_mapping);
|
||||
bdev_file->private_data = holder;
|
||||
|
||||
|
@ -985,13 +1001,13 @@ struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
|
|||
return ERR_PTR(-ENXIO);
|
||||
|
||||
flags = blk_to_file_flags(mode);
|
||||
bdev_file = alloc_file_pseudo_noaccount(bdev->bd_inode,
|
||||
bdev_file = alloc_file_pseudo_noaccount(BD_INODE(bdev),
|
||||
blockdev_mnt, "", flags | O_LARGEFILE, &def_blk_fops);
|
||||
if (IS_ERR(bdev_file)) {
|
||||
blkdev_put_no_open(bdev);
|
||||
return bdev_file;
|
||||
}
|
||||
ihold(bdev->bd_inode);
|
||||
ihold(BD_INODE(bdev));
|
||||
|
||||
ret = bdev_open(bdev, mode, holder, hops, bdev_file);
|
||||
if (ret) {
|
||||
|
@ -1266,6 +1282,18 @@ void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
|
|||
blkdev_put_no_open(bdev);
|
||||
}
|
||||
|
||||
bool disk_live(struct gendisk *disk)
|
||||
{
|
||||
return !inode_unhashed(BD_INODE(disk->part0));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(disk_live);
|
||||
|
||||
unsigned int block_size(struct block_device *bdev)
|
||||
{
|
||||
return 1 << BD_INODE(bdev)->i_blkbits;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(block_size);
|
||||
|
||||
static int __init setup_bdev_allow_write_mounted(char *str)
|
||||
{
|
||||
if (kstrtobool(str, &bdev_allow_write_mounted))
|
||||
|
|
|
@ -416,7 +416,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
|
|||
op = REQ_OP_ZONE_RESET;
|
||||
|
||||
/* Invalidate the page cache, including dirty pages. */
|
||||
filemap_invalidate_lock(bdev->bd_inode->i_mapping);
|
||||
filemap_invalidate_lock(bdev->bd_mapping);
|
||||
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
@ -438,7 +438,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
|
|||
|
||||
fail:
|
||||
if (cmd == BLKRESETZONE)
|
||||
filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
|
||||
filemap_invalidate_unlock(bdev->bd_mapping);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -499,6 +499,8 @@ static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
|
|||
|
||||
struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
|
||||
void bdev_add(struct block_device *bdev, dev_t dev);
|
||||
void bdev_unhash(struct block_device *bdev);
|
||||
void bdev_drop(struct block_device *bdev);
|
||||
|
||||
int blk_alloc_ext_minor(void);
|
||||
void blk_free_ext_minor(unsigned int minor);
|
||||
|
|
|
@ -663,8 +663,8 @@ static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
|
|||
static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct block_device *bdev = I_BDEV(file->f_mapping->host);
|
||||
struct inode *bd_inode = bdev->bd_inode;
|
||||
struct inode *bd_inode = bdev_file_inode(file);
|
||||
struct block_device *bdev = I_BDEV(bd_inode);
|
||||
loff_t size = bdev_nr_bytes(bdev);
|
||||
size_t shorted = 0;
|
||||
ssize_t ret;
|
||||
|
|
|
@ -653,7 +653,7 @@ void del_gendisk(struct gendisk *disk)
|
|||
*/
|
||||
mutex_lock(&disk->open_mutex);
|
||||
xa_for_each(&disk->part_tbl, idx, part)
|
||||
remove_inode_hash(part->bd_inode);
|
||||
bdev_unhash(part);
|
||||
mutex_unlock(&disk->open_mutex);
|
||||
|
||||
/*
|
||||
|
@ -742,7 +742,7 @@ void invalidate_disk(struct gendisk *disk)
|
|||
struct block_device *bdev = disk->part0;
|
||||
|
||||
invalidate_bdev(bdev);
|
||||
bdev->bd_inode->i_mapping->wb_err = 0;
|
||||
bdev->bd_mapping->wb_err = 0;
|
||||
set_capacity(disk, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(invalidate_disk);
|
||||
|
@ -1191,7 +1191,7 @@ static void disk_release(struct device *dev)
|
|||
if (test_bit(GD_ADDED, &disk->state) && disk->fops->free_disk)
|
||||
disk->fops->free_disk(disk);
|
||||
|
||||
iput(disk->part0->bd_inode); /* frees the disk */
|
||||
bdev_drop(disk->part0); /* frees the disk */
|
||||
}
|
||||
|
||||
static int block_uevent(const struct device *dev, struct kobj_uevent_env *env)
|
||||
|
@ -1379,7 +1379,7 @@ out_erase_part0:
|
|||
out_destroy_part_tbl:
|
||||
xa_destroy(&disk->part_tbl);
|
||||
disk->part0->bd_disk = NULL;
|
||||
iput(disk->part0->bd_inode);
|
||||
bdev_drop(disk->part0);
|
||||
out_free_bdi:
|
||||
bdi_put(disk->bdi);
|
||||
out_free_bioset:
|
||||
|
|
|
@ -96,7 +96,6 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
|
|||
unsigned long arg)
|
||||
{
|
||||
unsigned int bs_mask = bdev_logical_block_size(bdev) - 1;
|
||||
struct inode *inode = bdev->bd_inode;
|
||||
uint64_t range[2], start, len, end;
|
||||
struct bio *prev = NULL, *bio;
|
||||
sector_t sector, nr_sects;
|
||||
|
@ -126,7 +125,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
|
|||
end > bdev_nr_bytes(bdev))
|
||||
return -EINVAL;
|
||||
|
||||
filemap_invalidate_lock(inode->i_mapping);
|
||||
filemap_invalidate_lock(bdev->bd_mapping);
|
||||
err = truncate_bdev_range(bdev, mode, start, start + len - 1);
|
||||
if (err)
|
||||
goto fail;
|
||||
|
@ -157,7 +156,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
|
|||
out_unplug:
|
||||
blk_finish_plug(&plug);
|
||||
fail:
|
||||
filemap_invalidate_unlock(inode->i_mapping);
|
||||
filemap_invalidate_unlock(bdev->bd_mapping);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -182,12 +181,12 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
|
|||
if (start + len > bdev_nr_bytes(bdev))
|
||||
return -EINVAL;
|
||||
|
||||
filemap_invalidate_lock(bdev->bd_inode->i_mapping);
|
||||
filemap_invalidate_lock(bdev->bd_mapping);
|
||||
err = truncate_bdev_range(bdev, mode, start, start + len - 1);
|
||||
if (!err)
|
||||
err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
|
||||
GFP_KERNEL);
|
||||
filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
|
||||
filemap_invalidate_unlock(bdev->bd_mapping);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -197,7 +196,6 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
|
|||
{
|
||||
uint64_t range[2];
|
||||
uint64_t start, end, len;
|
||||
struct inode *inode = bdev->bd_inode;
|
||||
int err;
|
||||
|
||||
if (!(mode & BLK_OPEN_WRITE))
|
||||
|
@ -220,7 +218,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
|
|||
return -EINVAL;
|
||||
|
||||
/* Invalidate the page cache, including dirty pages */
|
||||
filemap_invalidate_lock(inode->i_mapping);
|
||||
filemap_invalidate_lock(bdev->bd_mapping);
|
||||
err = truncate_bdev_range(bdev, mode, start, end);
|
||||
if (err)
|
||||
goto fail;
|
||||
|
@ -229,7 +227,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
|
|||
BLKDEV_ZERO_NOUNMAP);
|
||||
|
||||
fail:
|
||||
filemap_invalidate_unlock(inode->i_mapping);
|
||||
filemap_invalidate_unlock(bdev->bd_mapping);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -243,7 +243,7 @@ static const struct attribute_group *part_attr_groups[] = {
|
|||
static void part_release(struct device *dev)
|
||||
{
|
||||
put_disk(dev_to_bdev(dev)->bd_disk);
|
||||
iput(dev_to_bdev(dev)->bd_inode);
|
||||
bdev_drop(dev_to_bdev(dev));
|
||||
}
|
||||
|
||||
static int part_uevent(const struct device *dev, struct kobj_uevent_env *env)
|
||||
|
@ -469,7 +469,7 @@ int bdev_del_partition(struct gendisk *disk, int partno)
|
|||
* Just delete the partition and invalidate it.
|
||||
*/
|
||||
|
||||
remove_inode_hash(part->bd_inode);
|
||||
bdev_unhash(part);
|
||||
invalidate_bdev(part);
|
||||
drop_partition(part);
|
||||
ret = 0;
|
||||
|
@ -652,7 +652,7 @@ rescan:
|
|||
* it cannot be looked up any more even when openers
|
||||
* still hold references.
|
||||
*/
|
||||
remove_inode_hash(part->bd_inode);
|
||||
bdev_unhash(part);
|
||||
|
||||
/*
|
||||
* If @disk->open_partitions isn't elevated but there's
|
||||
|
@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(bdev_disk_changed);
|
|||
|
||||
void *read_part_sector(struct parsed_partitions *state, sector_t n, Sector *p)
|
||||
{
|
||||
struct address_space *mapping = state->disk->part0->bd_inode->i_mapping;
|
||||
struct address_space *mapping = state->disk->part0->bd_mapping;
|
||||
struct folio *folio;
|
||||
|
||||
if (n >= get_capacity(state->disk)) {
|
||||
|
|
|
@ -171,7 +171,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
|
|||
struct page *page;
|
||||
unsigned int i;
|
||||
|
||||
page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
|
||||
page = read_cache_page_gfp(bdev->bd_mapping,
|
||||
SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
|
||||
if (IS_ERR(page))
|
||||
return "IO error";
|
||||
|
|
|
@ -878,7 +878,7 @@ static int parse_device_config(int argc, char **argv, struct dm_target *ti,
|
|||
}
|
||||
|
||||
if (config->version == 0) {
|
||||
u64 device_size = i_size_read(config->owned_device->bdev->bd_inode);
|
||||
u64 device_size = bdev_nr_bytes(config->owned_device->bdev);
|
||||
|
||||
config->physical_blocks = device_size / VDO_BLOCK_SIZE;
|
||||
}
|
||||
|
@ -1011,7 +1011,7 @@ static void vdo_status(struct dm_target *ti, status_type_t status_type,
|
|||
|
||||
static block_count_t __must_check get_underlying_device_block_count(const struct vdo *vdo)
|
||||
{
|
||||
return i_size_read(vdo_get_backing_device(vdo)->bd_inode) / VDO_BLOCK_SIZE;
|
||||
return bdev_nr_bytes(vdo_get_backing_device(vdo)) / VDO_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
static int __must_check process_vdo_message_locked(struct vdo *vdo, unsigned int argc,
|
||||
|
|
|
@ -90,7 +90,7 @@ void uds_put_io_factory(struct io_factory *factory)
|
|||
|
||||
size_t uds_get_writable_size(struct io_factory *factory)
|
||||
{
|
||||
return i_size_read(factory->bdev->bd_inode);
|
||||
return bdev_nr_bytes(factory->bdev);
|
||||
}
|
||||
|
||||
/* Create a struct dm_bufio_client for an index region starting at offset. */
|
||||
|
|
|
@ -265,6 +265,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
|
|||
struct file *bdev_file;
|
||||
struct block_device *bdev;
|
||||
struct block2mtd_dev *dev;
|
||||
loff_t size;
|
||||
char *name;
|
||||
|
||||
if (!devname)
|
||||
|
@ -291,7 +292,8 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
|
|||
goto err_free_block2mtd;
|
||||
}
|
||||
|
||||
if ((long)bdev->bd_inode->i_size % erase_size) {
|
||||
size = bdev_nr_bytes(bdev);
|
||||
if ((long)size % erase_size) {
|
||||
pr_err("erasesize must be a divisor of device size\n");
|
||||
goto err_free_block2mtd;
|
||||
}
|
||||
|
@ -309,7 +311,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
|
|||
|
||||
dev->mtd.name = name;
|
||||
|
||||
dev->mtd.size = bdev->bd_inode->i_size & PAGE_MASK;
|
||||
dev->mtd.size = size & PAGE_MASK;
|
||||
dev->mtd.erasesize = erase_size;
|
||||
dev->mtd.writesize = 1;
|
||||
dev->mtd.writebufsize = PAGE_SIZE;
|
||||
|
|
|
@ -215,7 +215,7 @@ dasd_format(struct dasd_block *block, struct format_data_t *fdata)
|
|||
* enabling the device later.
|
||||
*/
|
||||
if (fdata->start_unit == 0) {
|
||||
block->gdp->part0->bd_inode->i_blkbits =
|
||||
block->gdp->part0->bd_mapping->host->i_blkbits =
|
||||
blksize_bits(fdata->blksize);
|
||||
}
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
*/
|
||||
unsigned char *scsi_bios_ptable(struct block_device *dev)
|
||||
{
|
||||
struct address_space *mapping = bdev_whole(dev)->bd_inode->i_mapping;
|
||||
struct address_space *mapping = bdev_whole(dev)->bd_mapping;
|
||||
unsigned char *res = NULL;
|
||||
struct folio *folio;
|
||||
|
||||
|
|
|
@ -445,11 +445,6 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
|
|||
void bch2_bio_map(struct bio *bio, void *base, size_t);
|
||||
int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t);
|
||||
|
||||
static inline sector_t bdev_sectors(struct block_device *bdev)
|
||||
{
|
||||
return bdev->bd_inode->i_size >> 9;
|
||||
}
|
||||
|
||||
#define closure_bio_submit(bio, cl) \
|
||||
do { \
|
||||
closure_get(cl); \
|
||||
|
|
|
@ -3656,7 +3656,7 @@ struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
|
|||
struct btrfs_super_block *super;
|
||||
struct page *page;
|
||||
u64 bytenr, bytenr_orig;
|
||||
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
||||
struct address_space *mapping = bdev->bd_mapping;
|
||||
int ret;
|
||||
|
||||
bytenr_orig = btrfs_sb_offset(copy_num);
|
||||
|
@ -3743,7 +3743,7 @@ static int write_dev_supers(struct btrfs_device *device,
|
|||
struct btrfs_super_block *sb, int max_mirrors)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = device->fs_info;
|
||||
struct address_space *mapping = device->bdev->bd_inode->i_mapping;
|
||||
struct address_space *mapping = device->bdev->bd_mapping;
|
||||
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
|
||||
int i;
|
||||
int ret;
|
||||
|
@ -3861,7 +3861,7 @@ static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
|
|||
device->commit_total_bytes)
|
||||
break;
|
||||
|
||||
folio = filemap_get_folio(device->bdev->bd_inode->i_mapping,
|
||||
folio = filemap_get_folio(device->bdev->bd_mapping,
|
||||
bytenr >> PAGE_SHIFT);
|
||||
/* If the folio has been removed, then we know it completed. */
|
||||
if (IS_ERR(folio))
|
||||
|
|
|
@ -1290,7 +1290,7 @@ static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev
|
|||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/* pull in the page with our super */
|
||||
page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
|
||||
page = read_cache_page_gfp(bdev->bd_mapping, index, GFP_KERNEL);
|
||||
|
||||
if (IS_ERR(page))
|
||||
return ERR_CAST(page);
|
||||
|
|
|
@ -118,7 +118,7 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
|
|||
return -ENOENT;
|
||||
} else if (full[0] && full[1]) {
|
||||
/* Compare two super blocks */
|
||||
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
||||
struct address_space *mapping = bdev->bd_mapping;
|
||||
struct page *page[BTRFS_NR_SB_LOG_ZONES];
|
||||
struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
|
||||
int i;
|
||||
|
|
26
fs/buffer.c
26
fs/buffer.c
|
@ -189,8 +189,8 @@ EXPORT_SYMBOL(end_buffer_write_sync);
|
|||
static struct buffer_head *
|
||||
__find_get_block_slow(struct block_device *bdev, sector_t block)
|
||||
{
|
||||
struct inode *bd_inode = bdev->bd_inode;
|
||||
struct address_space *bd_mapping = bd_inode->i_mapping;
|
||||
struct address_space *bd_mapping = bdev->bd_mapping;
|
||||
const int blkbits = bd_mapping->host->i_blkbits;
|
||||
struct buffer_head *ret = NULL;
|
||||
pgoff_t index;
|
||||
struct buffer_head *bh;
|
||||
|
@ -199,7 +199,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
|
|||
int all_mapped = 1;
|
||||
static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
|
||||
|
||||
index = ((loff_t)block << bd_inode->i_blkbits) / PAGE_SIZE;
|
||||
index = ((loff_t)block << blkbits) / PAGE_SIZE;
|
||||
folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
|
||||
if (IS_ERR(folio))
|
||||
goto out;
|
||||
|
@ -233,7 +233,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
|
|||
(unsigned long long)block,
|
||||
(unsigned long long)bh->b_blocknr,
|
||||
bh->b_state, bh->b_size, bdev,
|
||||
1 << bd_inode->i_blkbits);
|
||||
1 << blkbits);
|
||||
}
|
||||
out_unlock:
|
||||
spin_unlock(&bd_mapping->i_private_lock);
|
||||
|
@ -1041,12 +1041,12 @@ static sector_t folio_init_buffers(struct folio *folio,
|
|||
static bool grow_dev_folio(struct block_device *bdev, sector_t block,
|
||||
pgoff_t index, unsigned size, gfp_t gfp)
|
||||
{
|
||||
struct inode *inode = bdev->bd_inode;
|
||||
struct address_space *mapping = bdev->bd_mapping;
|
||||
struct folio *folio;
|
||||
struct buffer_head *bh;
|
||||
sector_t end_block = 0;
|
||||
|
||||
folio = __filemap_get_folio(inode->i_mapping, index,
|
||||
folio = __filemap_get_folio(mapping, index,
|
||||
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
|
||||
if (IS_ERR(folio))
|
||||
return false;
|
||||
|
@ -1080,10 +1080,10 @@ static bool grow_dev_folio(struct block_device *bdev, sector_t block,
|
|||
* lock to be atomic wrt __find_get_block(), which does not
|
||||
* run under the folio lock.
|
||||
*/
|
||||
spin_lock(&inode->i_mapping->i_private_lock);
|
||||
spin_lock(&mapping->i_private_lock);
|
||||
link_dev_buffers(folio, bh);
|
||||
end_block = folio_init_buffers(folio, bdev, size);
|
||||
spin_unlock(&inode->i_mapping->i_private_lock);
|
||||
spin_unlock(&mapping->i_private_lock);
|
||||
unlock:
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
|
@ -1486,7 +1486,7 @@ struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
|
|||
{
|
||||
struct buffer_head *bh;
|
||||
|
||||
gfp |= mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS);
|
||||
gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
|
||||
|
||||
/*
|
||||
* Prefer looping in the allocator rather than here, at least that
|
||||
|
@ -1719,16 +1719,16 @@ EXPORT_SYMBOL(create_empty_buffers);
|
|||
*/
|
||||
void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
|
||||
{
|
||||
struct inode *bd_inode = bdev->bd_inode;
|
||||
struct address_space *bd_mapping = bd_inode->i_mapping;
|
||||
struct address_space *bd_mapping = bdev->bd_mapping;
|
||||
const int blkbits = bd_mapping->host->i_blkbits;
|
||||
struct folio_batch fbatch;
|
||||
pgoff_t index = ((loff_t)block << bd_inode->i_blkbits) / PAGE_SIZE;
|
||||
pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
|
||||
pgoff_t end;
|
||||
int i, count;
|
||||
struct buffer_head *bh;
|
||||
struct buffer_head *head;
|
||||
|
||||
end = ((loff_t)(block + len - 1) << bd_inode->i_blkbits) / PAGE_SIZE;
|
||||
end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
|
||||
folio_batch_init(&fbatch);
|
||||
while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
|
||||
count = folio_batch_count(&fbatch);
|
||||
|
|
|
@ -183,7 +183,7 @@ static int next_buffer;
|
|||
static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
|
||||
unsigned int len)
|
||||
{
|
||||
struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
|
||||
struct address_space *mapping = sb->s_bdev->bd_mapping;
|
||||
struct file_ra_state ra = {};
|
||||
struct page *pages[BLKS_PER_BUF];
|
||||
unsigned i, blocknr, buffer;
|
||||
|
|
|
@ -29,11 +29,9 @@ void erofs_put_metabuf(struct erofs_buf *buf)
|
|||
* Derive the block size from inode->i_blkbits to make compatible with
|
||||
* anonymous inode in fscache mode.
|
||||
*/
|
||||
void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr,
|
||||
void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset,
|
||||
enum erofs_kmap_type type)
|
||||
{
|
||||
struct inode *inode = buf->inode;
|
||||
erofs_off_t offset = (erofs_off_t)blkaddr << inode->i_blkbits;
|
||||
pgoff_t index = offset >> PAGE_SHIFT;
|
||||
struct page *page = buf->page;
|
||||
struct folio *folio;
|
||||
|
@ -43,7 +41,7 @@ void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr,
|
|||
erofs_put_metabuf(buf);
|
||||
|
||||
nofs_flag = memalloc_nofs_save();
|
||||
folio = read_cache_folio(inode->i_mapping, index, NULL, NULL);
|
||||
folio = read_cache_folio(buf->mapping, index, NULL, NULL);
|
||||
memalloc_nofs_restore(nofs_flag);
|
||||
if (IS_ERR(folio))
|
||||
return folio;
|
||||
|
@ -68,16 +66,16 @@ void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr,
|
|||
void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
|
||||
{
|
||||
if (erofs_is_fscache_mode(sb))
|
||||
buf->inode = EROFS_SB(sb)->s_fscache->inode;
|
||||
buf->mapping = EROFS_SB(sb)->s_fscache->inode->i_mapping;
|
||||
else
|
||||
buf->inode = sb->s_bdev->bd_inode;
|
||||
buf->mapping = sb->s_bdev->bd_mapping;
|
||||
}
|
||||
|
||||
void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
|
||||
erofs_blk_t blkaddr, enum erofs_kmap_type type)
|
||||
{
|
||||
erofs_init_metabuf(buf, sb);
|
||||
return erofs_bread(buf, blkaddr, type);
|
||||
return erofs_bread(buf, erofs_pos(sb, blkaddr), type);
|
||||
}
|
||||
|
||||
static int erofs_map_blocks_flatmode(struct inode *inode,
|
||||
|
|
|
@ -58,12 +58,12 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
|
|||
int err = 0;
|
||||
bool initial = true;
|
||||
|
||||
buf.inode = dir;
|
||||
buf.mapping = dir->i_mapping;
|
||||
while (ctx->pos < dirsize) {
|
||||
struct erofs_dirent *de;
|
||||
unsigned int nameoff, maxsize;
|
||||
|
||||
de = erofs_bread(&buf, i, EROFS_KMAP);
|
||||
de = erofs_bread(&buf, erofs_pos(sb, i), EROFS_KMAP);
|
||||
if (IS_ERR(de)) {
|
||||
erofs_err(sb, "fail to readdir of logical block %u of nid %llu",
|
||||
i, EROFS_I(dir)->nid);
|
||||
|
|
|
@ -216,7 +216,7 @@ enum erofs_kmap_type {
|
|||
};
|
||||
|
||||
struct erofs_buf {
|
||||
struct inode *inode;
|
||||
struct address_space *mapping;
|
||||
struct page *page;
|
||||
void *base;
|
||||
enum erofs_kmap_type kmap_type;
|
||||
|
@ -402,7 +402,7 @@ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
|
|||
erofs_off_t *offset, int *lengthp);
|
||||
void erofs_unmap_metabuf(struct erofs_buf *buf);
|
||||
void erofs_put_metabuf(struct erofs_buf *buf);
|
||||
void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr,
|
||||
void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset,
|
||||
enum erofs_kmap_type type);
|
||||
void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb);
|
||||
void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
|
||||
|
|
|
@ -99,8 +99,8 @@ static void *erofs_find_target_block(struct erofs_buf *target,
|
|||
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
|
||||
struct erofs_dirent *de;
|
||||
|
||||
buf.inode = dir;
|
||||
de = erofs_bread(&buf, mid, EROFS_KMAP);
|
||||
buf.mapping = dir->i_mapping;
|
||||
de = erofs_bread(&buf, erofs_pos(dir->i_sb, mid), EROFS_KMAP);
|
||||
if (!IS_ERR(de)) {
|
||||
const int nameoff = nameoff_from_disk(de->nameoff, bsz);
|
||||
const int ndirents = nameoff / sizeof(*de);
|
||||
|
@ -171,7 +171,7 @@ int erofs_namei(struct inode *dir, const struct qstr *name, erofs_nid_t *nid,
|
|||
|
||||
qn.name = name->name;
|
||||
qn.end = name->name + name->len;
|
||||
buf.inode = dir;
|
||||
buf.mapping = dir->i_mapping;
|
||||
|
||||
ndirents = 0;
|
||||
de = erofs_find_target_block(&buf, dir, &qn, &ndirents);
|
||||
|
|
|
@ -132,11 +132,11 @@ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
|
|||
int len, i, cnt;
|
||||
|
||||
*offset = round_up(*offset, 4);
|
||||
ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
|
||||
ptr = erofs_bread(buf, *offset, EROFS_KMAP);
|
||||
if (IS_ERR(ptr))
|
||||
return ptr;
|
||||
|
||||
len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(sb, *offset)]);
|
||||
len = le16_to_cpu(*(__le16 *)ptr);
|
||||
if (!len)
|
||||
len = U16_MAX + 1;
|
||||
buffer = kmalloc(len, GFP_KERNEL);
|
||||
|
@ -148,12 +148,12 @@ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
|
|||
for (i = 0; i < len; i += cnt) {
|
||||
cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset),
|
||||
len - i);
|
||||
ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
|
||||
ptr = erofs_bread(buf, *offset, EROFS_KMAP);
|
||||
if (IS_ERR(ptr)) {
|
||||
kfree(buffer);
|
||||
return ptr;
|
||||
}
|
||||
memcpy(buffer + i, ptr + erofs_blkoff(sb, *offset), cnt);
|
||||
memcpy(buffer + i, ptr, cnt);
|
||||
*offset += cnt;
|
||||
}
|
||||
return buffer;
|
||||
|
|
|
@ -81,13 +81,13 @@ static int erofs_init_inode_xattrs(struct inode *inode)
|
|||
it.pos = erofs_iloc(inode) + vi->inode_isize;
|
||||
|
||||
/* read in shared xattr array (non-atomic, see kmalloc below) */
|
||||
it.kaddr = erofs_bread(&it.buf, erofs_blknr(sb, it.pos), EROFS_KMAP);
|
||||
it.kaddr = erofs_bread(&it.buf, it.pos, EROFS_KMAP);
|
||||
if (IS_ERR(it.kaddr)) {
|
||||
ret = PTR_ERR(it.kaddr);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ih = it.kaddr + erofs_blkoff(sb, it.pos);
|
||||
ih = it.kaddr;
|
||||
vi->xattr_name_filter = le32_to_cpu(ih->h_name_filter);
|
||||
vi->xattr_shared_count = ih->h_shared_count;
|
||||
vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
|
||||
|
@ -102,16 +102,14 @@ static int erofs_init_inode_xattrs(struct inode *inode)
|
|||
it.pos += sizeof(struct erofs_xattr_ibody_header);
|
||||
|
||||
for (i = 0; i < vi->xattr_shared_count; ++i) {
|
||||
it.kaddr = erofs_bread(&it.buf, erofs_blknr(sb, it.pos),
|
||||
EROFS_KMAP);
|
||||
it.kaddr = erofs_bread(&it.buf, it.pos, EROFS_KMAP);
|
||||
if (IS_ERR(it.kaddr)) {
|
||||
kfree(vi->xattr_shared_xattrs);
|
||||
vi->xattr_shared_xattrs = NULL;
|
||||
ret = PTR_ERR(it.kaddr);
|
||||
goto out_unlock;
|
||||
}
|
||||
vi->xattr_shared_xattrs[i] = le32_to_cpu(*(__le32 *)
|
||||
(it.kaddr + erofs_blkoff(sb, it.pos)));
|
||||
vi->xattr_shared_xattrs[i] = le32_to_cpu(*(__le32 *)it.kaddr);
|
||||
it.pos += sizeof(__le32);
|
||||
}
|
||||
erofs_put_metabuf(&it.buf);
|
||||
|
@ -185,12 +183,11 @@ static int erofs_xattr_copy_to_buffer(struct erofs_xattr_iter *it,
|
|||
void *src;
|
||||
|
||||
for (processed = 0; processed < len; processed += slice) {
|
||||
it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos),
|
||||
EROFS_KMAP);
|
||||
it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP);
|
||||
if (IS_ERR(it->kaddr))
|
||||
return PTR_ERR(it->kaddr);
|
||||
|
||||
src = it->kaddr + erofs_blkoff(sb, it->pos);
|
||||
src = it->kaddr;
|
||||
slice = min_t(unsigned int, sb->s_blocksize -
|
||||
erofs_blkoff(sb, it->pos), len - processed);
|
||||
memcpy(it->buffer + it->buffer_ofs, src, slice);
|
||||
|
@ -208,8 +205,7 @@ static int erofs_listxattr_foreach(struct erofs_xattr_iter *it)
|
|||
int err;
|
||||
|
||||
/* 1. handle xattr entry */
|
||||
entry = *(struct erofs_xattr_entry *)
|
||||
(it->kaddr + erofs_blkoff(it->sb, it->pos));
|
||||
entry = *(struct erofs_xattr_entry *)it->kaddr;
|
||||
it->pos += sizeof(struct erofs_xattr_entry);
|
||||
|
||||
base_index = entry.e_name_index;
|
||||
|
@ -259,8 +255,7 @@ static int erofs_getxattr_foreach(struct erofs_xattr_iter *it)
|
|||
unsigned int slice, processed, value_sz;
|
||||
|
||||
/* 1. handle xattr entry */
|
||||
entry = *(struct erofs_xattr_entry *)
|
||||
(it->kaddr + erofs_blkoff(sb, it->pos));
|
||||
entry = *(struct erofs_xattr_entry *)it->kaddr;
|
||||
it->pos += sizeof(struct erofs_xattr_entry);
|
||||
value_sz = le16_to_cpu(entry.e_value_size);
|
||||
|
||||
|
@ -291,8 +286,7 @@ static int erofs_getxattr_foreach(struct erofs_xattr_iter *it)
|
|||
|
||||
/* 2. handle xattr name */
|
||||
for (processed = 0; processed < entry.e_name_len; processed += slice) {
|
||||
it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos),
|
||||
EROFS_KMAP);
|
||||
it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP);
|
||||
if (IS_ERR(it->kaddr))
|
||||
return PTR_ERR(it->kaddr);
|
||||
|
||||
|
@ -300,7 +294,7 @@ static int erofs_getxattr_foreach(struct erofs_xattr_iter *it)
|
|||
sb->s_blocksize - erofs_blkoff(sb, it->pos),
|
||||
entry.e_name_len - processed);
|
||||
if (memcmp(it->name.name + it->infix_len + processed,
|
||||
it->kaddr + erofs_blkoff(sb, it->pos), slice))
|
||||
it->kaddr, slice))
|
||||
return -ENOATTR;
|
||||
it->pos += slice;
|
||||
}
|
||||
|
@ -336,13 +330,11 @@ static int erofs_xattr_iter_inline(struct erofs_xattr_iter *it,
|
|||
it->pos = erofs_iloc(inode) + vi->inode_isize + xattr_header_sz;
|
||||
|
||||
while (remaining) {
|
||||
it->kaddr = erofs_bread(&it->buf, erofs_blknr(it->sb, it->pos),
|
||||
EROFS_KMAP);
|
||||
it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP);
|
||||
if (IS_ERR(it->kaddr))
|
||||
return PTR_ERR(it->kaddr);
|
||||
|
||||
entry_sz = erofs_xattr_entry_size(it->kaddr +
|
||||
erofs_blkoff(it->sb, it->pos));
|
||||
entry_sz = erofs_xattr_entry_size(it->kaddr);
|
||||
/* xattr on-disk corruption: xattr entry beyond xattr_isize */
|
||||
if (remaining < entry_sz) {
|
||||
DBG_BUGON(1);
|
||||
|
@ -375,8 +367,7 @@ static int erofs_xattr_iter_shared(struct erofs_xattr_iter *it,
|
|||
for (i = 0; i < vi->xattr_shared_count; ++i) {
|
||||
it->pos = erofs_pos(sb, sbi->xattr_blkaddr) +
|
||||
vi->xattr_shared_xattrs[i] * sizeof(__le32);
|
||||
it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos),
|
||||
EROFS_KMAP);
|
||||
it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP);
|
||||
if (IS_ERR(it->kaddr))
|
||||
return PTR_ERR(it->kaddr);
|
||||
|
||||
|
@ -492,7 +483,7 @@ int erofs_xattr_prefixes_init(struct super_block *sb)
|
|||
return -ENOMEM;
|
||||
|
||||
if (sbi->packed_inode)
|
||||
buf.inode = sbi->packed_inode;
|
||||
buf.mapping = sbi->packed_inode->i_mapping;
|
||||
else
|
||||
erofs_init_metabuf(&buf, sb);
|
||||
|
||||
|
|
|
@ -936,16 +936,16 @@ static int z_erofs_read_fragment(struct super_block *sb, struct page *page,
|
|||
if (!packed_inode)
|
||||
return -EFSCORRUPTED;
|
||||
|
||||
buf.inode = packed_inode;
|
||||
buf.mapping = packed_inode->i_mapping;
|
||||
for (; cur < end; cur += cnt, pos += cnt) {
|
||||
cnt = min_t(unsigned int, end - cur,
|
||||
sb->s_blocksize - erofs_blkoff(sb, pos));
|
||||
src = erofs_bread(&buf, erofs_blknr(sb, pos), EROFS_KMAP);
|
||||
src = erofs_bread(&buf, pos, EROFS_KMAP);
|
||||
if (IS_ERR(src)) {
|
||||
erofs_put_metabuf(&buf);
|
||||
return PTR_ERR(src);
|
||||
}
|
||||
memcpy_to_page(page, cur, src + erofs_blkoff(sb, pos), cnt);
|
||||
memcpy_to_page(page, cur, src, cnt);
|
||||
}
|
||||
erofs_put_metabuf(&buf);
|
||||
return 0;
|
||||
|
|
|
@ -192,7 +192,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
|
|||
(PAGE_SHIFT - inode->i_blkbits);
|
||||
if (!ra_has_index(&file->f_ra, index))
|
||||
page_cache_sync_readahead(
|
||||
sb->s_bdev->bd_inode->i_mapping,
|
||||
sb->s_bdev->bd_mapping,
|
||||
&file->f_ra, file,
|
||||
index, 1);
|
||||
file->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT;
|
||||
|
|
|
@ -206,7 +206,7 @@ static void ext4_journal_abort_handle(const char *caller, unsigned int line,
|
|||
|
||||
static void ext4_check_bdev_write_error(struct super_block *sb)
|
||||
{
|
||||
struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
|
||||
struct address_space *mapping = sb->s_bdev->bd_mapping;
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
int err;
|
||||
|
||||
|
|
|
@ -244,7 +244,7 @@ static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb,
|
|||
struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block,
|
||||
blk_opf_t op_flags)
|
||||
{
|
||||
gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_inode->i_mapping,
|
||||
gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping,
|
||||
~__GFP_FS) | __GFP_MOVABLE;
|
||||
|
||||
return __ext4_sb_bread_gfp(sb, block, op_flags, gfp);
|
||||
|
@ -253,7 +253,7 @@ struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block,
|
|||
struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
|
||||
sector_t block)
|
||||
{
|
||||
gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_inode->i_mapping,
|
||||
gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping,
|
||||
~__GFP_FS);
|
||||
|
||||
return __ext4_sb_bread_gfp(sb, block, 0, gfp);
|
||||
|
@ -492,22 +492,6 @@ static void ext4_maybe_update_superblock(struct super_block *sb)
|
|||
schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
|
||||
}
|
||||
|
||||
/*
|
||||
* The del_gendisk() function uninitializes the disk-specific data
|
||||
* structures, including the bdi structure, without telling anyone
|
||||
* else. Once this happens, any attempt to call mark_buffer_dirty()
|
||||
* (for example, by ext4_commit_super), will cause a kernel OOPS.
|
||||
* This is a kludge to prevent these oops until we can put in a proper
|
||||
* hook in del_gendisk() to inform the VFS and file system layers.
|
||||
*/
|
||||
static int block_device_ejected(struct super_block *sb)
|
||||
{
|
||||
struct inode *bd_inode = sb->s_bdev->bd_inode;
|
||||
struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
|
||||
|
||||
return bdi->dev == NULL;
|
||||
}
|
||||
|
||||
static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
|
||||
{
|
||||
struct super_block *sb = journal->j_private;
|
||||
|
@ -5563,7 +5547,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
|
|||
* used to detect the metadata async write error.
|
||||
*/
|
||||
spin_lock_init(&sbi->s_bdev_wb_lock);
|
||||
errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
|
||||
errseq_check_and_advance(&sb->s_bdev->bd_mapping->wb_err,
|
||||
&sbi->s_bdev_wb_err);
|
||||
EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
|
||||
ext4_orphan_cleanup(sb, es);
|
||||
|
@ -6164,8 +6148,6 @@ static int ext4_commit_super(struct super_block *sb)
|
|||
|
||||
if (!sbh)
|
||||
return -EINVAL;
|
||||
if (block_device_ejected(sb))
|
||||
return -ENODEV;
|
||||
|
||||
ext4_update_super(sb);
|
||||
|
||||
|
|
|
@ -1267,7 +1267,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
|
|||
mapping = gfs2_glock2aspace(gl);
|
||||
if (mapping) {
|
||||
mapping->a_ops = &gfs2_meta_aops;
|
||||
mapping->host = s->s_bdev->bd_inode;
|
||||
mapping->host = s->s_bdev->bd_mapping->host;
|
||||
mapping->flags = 0;
|
||||
mapping_set_gfp_mask(mapping, GFP_NOFS);
|
||||
mapping->i_private_data = NULL;
|
||||
|
|
|
@ -114,7 +114,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
|||
|
||||
address_space_init_once(mapping);
|
||||
mapping->a_ops = &gfs2_rgrp_aops;
|
||||
mapping->host = sb->s_bdev->bd_inode;
|
||||
mapping->host = sb->s_bdev->bd_mapping->host;
|
||||
mapping->flags = 0;
|
||||
mapping_set_gfp_mask(mapping, GFP_NOFS);
|
||||
mapping->i_private_data = NULL;
|
||||
|
|
|
@ -2009,7 +2009,7 @@ static int __jbd2_journal_erase(journal_t *journal, unsigned int flags)
|
|||
byte_count = (block_stop - block_start + 1) *
|
||||
journal->j_blocksize;
|
||||
|
||||
truncate_inode_pages_range(journal->j_dev->bd_inode->i_mapping,
|
||||
truncate_inode_pages_range(journal->j_dev->bd_mapping,
|
||||
byte_start, byte_stop);
|
||||
|
||||
if (flags & JBD2_JOURNAL_FLUSH_DISCARD) {
|
||||
|
|
|
@ -2784,7 +2784,7 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
|
|||
if (!nilfs->ns_writer)
|
||||
return -ENOMEM;
|
||||
|
||||
inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
|
||||
inode_attach_wb(nilfs->ns_bdev->bd_mapping->host, NULL);
|
||||
|
||||
err = nilfs_segctor_start_thread(nilfs->ns_writer);
|
||||
if (unlikely(err))
|
||||
|
|
|
@ -50,7 +50,7 @@ struct block_device {
|
|||
bool bd_write_holder;
|
||||
bool bd_has_submit_bio;
|
||||
dev_t bd_dev;
|
||||
struct inode *bd_inode; /* will die */
|
||||
struct address_space *bd_mapping; /* page cache */
|
||||
|
||||
atomic_t bd_openers;
|
||||
spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
|
||||
|
|
|
@ -212,11 +212,6 @@ struct gendisk {
|
|||
struct blk_independent_access_ranges *ia_ranges;
|
||||
};
|
||||
|
||||
static inline bool disk_live(struct gendisk *disk)
|
||||
{
|
||||
return !inode_unhashed(disk->part0->bd_inode);
|
||||
}
|
||||
|
||||
/**
|
||||
* disk_openers - returns how many openers are there for a disk
|
||||
* @disk: disk to check
|
||||
|
@ -1371,11 +1366,6 @@ static inline unsigned int blksize_bits(unsigned int size)
|
|||
return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned int block_size(struct block_device *bdev)
|
||||
{
|
||||
return 1 << bdev->bd_inode->i_blkbits;
|
||||
}
|
||||
|
||||
int kblockd_schedule_work(struct work_struct *work);
|
||||
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
|
||||
|
||||
|
@ -1543,6 +1533,8 @@ void blkdev_put_no_open(struct block_device *bdev);
|
|||
|
||||
struct block_device *I_BDEV(struct inode *inode);
|
||||
struct block_device *file_bdev(struct file *bdev_file);
|
||||
bool disk_live(struct gendisk *disk);
|
||||
unsigned int block_size(struct block_device *bdev);
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
void invalidate_bdev(struct block_device *bdev);
|
||||
|
|
|
@ -364,7 +364,7 @@ static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
|
|||
{
|
||||
gfp_t gfp;
|
||||
|
||||
gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS);
|
||||
gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
|
||||
gfp |= __GFP_NOFAIL;
|
||||
|
||||
return bdev_getblk(bdev, block, size, gfp);
|
||||
|
@ -375,7 +375,7 @@ static inline struct buffer_head *__getblk(struct block_device *bdev,
|
|||
{
|
||||
gfp_t gfp;
|
||||
|
||||
gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS);
|
||||
gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
|
||||
gfp |= __GFP_MOVABLE | __GFP_NOFAIL;
|
||||
|
||||
return bdev_getblk(bdev, block, size, gfp);
|
||||
|
|
|
@ -1694,7 +1694,7 @@ static inline void jbd2_journal_abort_handle(handle_t *handle)
|
|||
|
||||
static inline void jbd2_init_fs_dev_write_error(journal_t *journal)
|
||||
{
|
||||
struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping;
|
||||
struct address_space *mapping = journal->j_fs_dev->bd_mapping;
|
||||
|
||||
/*
|
||||
* Save the original wb_err value of client fs's bdev mapping which
|
||||
|
@ -1705,7 +1705,7 @@ static inline void jbd2_init_fs_dev_write_error(journal_t *journal)
|
|||
|
||||
static inline int jbd2_check_fs_dev_write_error(journal_t *journal)
|
||||
{
|
||||
struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping;
|
||||
struct address_space *mapping = journal->j_fs_dev->bd_mapping;
|
||||
|
||||
return errseq_check(&mapping->wb_err,
|
||||
READ_ONCE(journal->j_fs_dev_wb_err));
|
||||
|
|
Loading…
Reference in New Issue