VFIO fixes for v6.10-rc4

- Fix long standing lockdep issue of using remap_pfn_range() from
    the vfio-pci fault handler for mapping device MMIO.  Commit
    ba168b52bf ("mm: use rwsem assertion macros for mmap_lock") now
    exposes this as a warning forcing this to be addressed.
 
    remap_pfn_range() was used here to efficiently map the entire vma,
    but it really never should have been used in the fault handler and
    doesn't handle concurrency, which introduced complex locking.  We
    also needed to track vmas mapping the device memory in order to zap
    those vmas when the memory is disabled resulting in a vma list.
 
    Instead of all that mess, setup an address space on the device fd
    such that we can use unmap_mapping_range() for zapping to avoid
    the tracking overhead and use the standard vmf_insert_pfn() to
    insert mappings on fault.  For now we'll iterate the vma and
    opportunistically try to insert mappings for the entire vma.  This
    aligns with typical use cases, but hopefully in the future we can
    drop the iterative approach and make use of huge_fault instead,
    once vmf_insert_pfn{pud,pmd}() learn to handle pfnmaps.
    (Alex Williamson)
 -----BEGIN PGP SIGNATURE-----
 
 iQJPBAABCAA5FiEEQvbATlQL0amee4qQI5ubbjuwiyIFAmZsu1AbHGFsZXgud2ls
 bGlhbXNvbkByZWRoYXQuY29tAAoJECObm247sIsiliUQAIYXnii7HumOdcMIPnre
 twv9K4JFixpaH1bRCKd+bCoAH/7RZDRj0oSKa5OHtYiOfnlalzLAKCq84BRiUndj
 cj9gYlpSlEpZl4Aa9gr2YR9ng5poQjeVq5GIzWQGZwszX2hYCk7Bz5zKbdG95zAm
 qVOIFYTU+i+9D5NRMXwXEnDyKmazwuCwbmlYp4nZijMsP3/rNAy3dmDZ6ljEhIa0
 p0QKvRi7L3BbTu1Zy6PXEowo1neF4d8KgViY7B1eYLsR48awmnXzMcHmvcBk14Mb
 79GwripnXI2SXiGdzzqt0ODuVHs2xyV4P/Ddb6lUpZhO5KgpuxVblW5NMKSKm9ta
 /12WrknWlpIKcPljWVgyDU70O9Umm3f39lUQ6Ns4e/ieS8c8GHC+5Nl5Q8PSEpqj
 VYbSRuObwXSa6qzyB6O2QtNaJ8B55/bjl+FSoN4qnfccprZ7R4k96O/4hu+StZOZ
 4wNaQXyB0FIakelotBy9T/ZbI4YQmhlC4FcsDXugz4wOdVUkwOVVFZ8R7jAUTNxn
 Ty8RzBTSAX4alvpYhe+WFBLq3TnS+c8J7tK1q9ihfwjrEWd3gnx4M+if60XPpPHt
 WxKJnvSzYvasO32AF2yTSmm3S5NlEQvQ2LTY3yfsGHfk0x/W4FEi78KcnjLZLTFk
 dhf3X5qIIw8AGrLHKlgKcu61
 =bSXX
 -----END PGP SIGNATURE-----

Merge tag 'vfio-v6.10-rc4' of https://github.com/awilliam/linux-vfio

Pull VFIO fixes from Alex Williamson:
 "Fix long standing lockdep issue of using remap_pfn_range() from the
  vfio-pci fault handler for mapping device MMIO. Commit ba168b52bf
  ("mm: use rwsem assertion macros for mmap_lock") now exposes this as a
  warning forcing this to be addressed.

  remap_pfn_range() was used here to efficiently map the entire vma, but
  it really never should have been used in the fault handler and doesn't
  handle concurrency, which introduced complex locking. We also needed
  to track vmas mapping the device memory in order to zap those vmas
  when the memory is disabled resulting in a vma list.

  Instead of all that mess, setup an address space on the device fd
  such that we can use unmap_mapping_range() for zapping to avoid the
  tracking overhead and use the standard vmf_insert_pfn() to insert
  mappings on fault.

  For now we'll iterate the vma and opportunistically try to insert
  mappings for the entire vma. This aligns with typical use cases, but
  hopefully in the future we can drop the iterative approach and make
  use of huge_fault instead, once vmf_insert_pfn{pud,pmd}() learn to
  handle pfnmaps"

* tag 'vfio-v6.10-rc4' of https://github.com/awilliam/linux-vfio:
  vfio/pci: Insert full vma on mmap'd MMIO fault
  vfio/pci: Use unmap_mapping_range()
  vfio: Create vfio_fs_type with inode per device
This commit is contained in:
Linus Torvalds 2024-06-14 18:46:53 -07:00
commit 68132b3536
6 changed files with 128 additions and 210 deletions

View File

@ -39,6 +39,13 @@ int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep)
filep->private_data = df;
/*
* Use the pseudo fs inode on the device to link all mmaps
* to the same address space, allowing us to unmap all vmas
* associated to this device using unmap_mapping_range().
*/
filep->f_mapping = device->inode->i_mapping;
return 0;
err_put_registration:

View File

@ -286,6 +286,13 @@ static struct file *vfio_device_open_file(struct vfio_device *device)
*/
filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE);
/*
* Use the pseudo fs inode on the device to link all mmaps
* to the same address space, allowing us to unmap all vmas
* associated to this device using unmap_mapping_range().
*/
filep->f_mapping = device->inode->i_mapping;
if (device->group->type == VFIO_NO_IOMMU)
dev_warn(device->dev, "vfio-noiommu device opened by user "
"(%s:%d)\n", current->comm, task_pid_nr(current));

View File

@ -1610,100 +1610,20 @@ ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *bu
}
EXPORT_SYMBOL_GPL(vfio_pci_core_write);
/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
static int vfio_pci_zap_and_vma_lock(struct vfio_pci_core_device *vdev, bool try)
static void vfio_pci_zap_bars(struct vfio_pci_core_device *vdev)
{
struct vfio_pci_mmap_vma *mmap_vma, *tmp;
struct vfio_device *core_vdev = &vdev->vdev;
loff_t start = VFIO_PCI_INDEX_TO_OFFSET(VFIO_PCI_BAR0_REGION_INDEX);
loff_t end = VFIO_PCI_INDEX_TO_OFFSET(VFIO_PCI_ROM_REGION_INDEX);
loff_t len = end - start;
/*
* Lock ordering:
* vma_lock is nested under mmap_lock for vm_ops callback paths.
* The memory_lock semaphore is used by both code paths calling
* into this function to zap vmas and the vm_ops.fault callback
* to protect the memory enable state of the device.
*
* When zapping vmas we need to maintain the mmap_lock => vma_lock
* ordering, which requires using vma_lock to walk vma_list to
* acquire an mm, then dropping vma_lock to get the mmap_lock and
* reacquiring vma_lock. This logic is derived from similar
* requirements in uverbs_user_mmap_disassociate().
*
* mmap_lock must always be the top-level lock when it is taken.
* Therefore we can only hold the memory_lock write lock when
* vma_list is empty, as we'd need to take mmap_lock to clear
* entries. vma_list can only be guaranteed empty when holding
* vma_lock, thus memory_lock is nested under vma_lock.
*
* This enables the vm_ops.fault callback to acquire vma_lock,
* followed by memory_lock read lock, while already holding
* mmap_lock without risk of deadlock.
*/
while (1) {
struct mm_struct *mm = NULL;
if (try) {
if (!mutex_trylock(&vdev->vma_lock))
return 0;
} else {
mutex_lock(&vdev->vma_lock);
}
while (!list_empty(&vdev->vma_list)) {
mmap_vma = list_first_entry(&vdev->vma_list,
struct vfio_pci_mmap_vma,
vma_next);
mm = mmap_vma->vma->vm_mm;
if (mmget_not_zero(mm))
break;
list_del(&mmap_vma->vma_next);
kfree(mmap_vma);
mm = NULL;
}
if (!mm)
return 1;
mutex_unlock(&vdev->vma_lock);
if (try) {
if (!mmap_read_trylock(mm)) {
mmput(mm);
return 0;
}
} else {
mmap_read_lock(mm);
}
if (try) {
if (!mutex_trylock(&vdev->vma_lock)) {
mmap_read_unlock(mm);
mmput(mm);
return 0;
}
} else {
mutex_lock(&vdev->vma_lock);
}
list_for_each_entry_safe(mmap_vma, tmp,
&vdev->vma_list, vma_next) {
struct vm_area_struct *vma = mmap_vma->vma;
if (vma->vm_mm != mm)
continue;
list_del(&mmap_vma->vma_next);
kfree(mmap_vma);
zap_vma_ptes(vma, vma->vm_start,
vma->vm_end - vma->vm_start);
}
mutex_unlock(&vdev->vma_lock);
mmap_read_unlock(mm);
mmput(mm);
}
unmap_mapping_range(core_vdev->inode->i_mapping, start, len, true);
}
void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev)
{
vfio_pci_zap_and_vma_lock(vdev, false);
down_write(&vdev->memory_lock);
mutex_unlock(&vdev->vma_lock);
vfio_pci_zap_bars(vdev);
}
u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev)
@ -1725,99 +1645,56 @@ void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, u16 c
up_write(&vdev->memory_lock);
}
/* Caller holds vma_lock */
static int __vfio_pci_add_vma(struct vfio_pci_core_device *vdev,
struct vm_area_struct *vma)
{
struct vfio_pci_mmap_vma *mmap_vma;
mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL_ACCOUNT);
if (!mmap_vma)
return -ENOMEM;
mmap_vma->vma = vma;
list_add(&mmap_vma->vma_next, &vdev->vma_list);
return 0;
}
/*
* Zap mmaps on open so that we can fault them in on access and therefore
* our vma_list only tracks mappings accessed since last zap.
*/
static void vfio_pci_mmap_open(struct vm_area_struct *vma)
{
zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
}
static void vfio_pci_mmap_close(struct vm_area_struct *vma)
static unsigned long vma_to_pfn(struct vm_area_struct *vma)
{
struct vfio_pci_core_device *vdev = vma->vm_private_data;
struct vfio_pci_mmap_vma *mmap_vma;
int index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
u64 pgoff;
mutex_lock(&vdev->vma_lock);
list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
if (mmap_vma->vma == vma) {
list_del(&mmap_vma->vma_next);
kfree(mmap_vma);
break;
}
}
mutex_unlock(&vdev->vma_lock);
pgoff = vma->vm_pgoff &
((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
return (pci_resource_start(vdev->pdev, index) >> PAGE_SHIFT) + pgoff;
}
static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct vfio_pci_core_device *vdev = vma->vm_private_data;
struct vfio_pci_mmap_vma *mmap_vma;
vm_fault_t ret = VM_FAULT_NOPAGE;
unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
unsigned long addr = vma->vm_start;
vm_fault_t ret = VM_FAULT_SIGBUS;
pfn = vma_to_pfn(vma);
mutex_lock(&vdev->vma_lock);
down_read(&vdev->memory_lock);
/*
* Memory region cannot be accessed if the low power feature is engaged
* or memory access is disabled.
*/
if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) {
ret = VM_FAULT_SIGBUS;
goto up_out;
}
if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev))
goto out_unlock;
ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff);
if (ret & VM_FAULT_ERROR)
goto out_unlock;
/*
* We populate the whole vma on fault, so we need to test whether
* the vma has already been mapped, such as for concurrent faults
* to the same vma. io_remap_pfn_range() will trigger a BUG_ON if
* we ask it to fill the same range again.
* Pre-fault the remainder of the vma, abort further insertions and
* supress error if fault is encountered during pre-fault.
*/
list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
if (mmap_vma->vma == vma)
goto up_out;
for (; addr < vma->vm_end; addr += PAGE_SIZE, pfn++) {
if (addr == vmf->address)
continue;
if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR)
break;
}
if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot)) {
ret = VM_FAULT_SIGBUS;
zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
goto up_out;
}
if (__vfio_pci_add_vma(vdev, vma)) {
ret = VM_FAULT_OOM;
zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
}
up_out:
out_unlock:
up_read(&vdev->memory_lock);
mutex_unlock(&vdev->vma_lock);
return ret;
}
static const struct vm_operations_struct vfio_pci_mmap_ops = {
.open = vfio_pci_mmap_open,
.close = vfio_pci_mmap_close,
.fault = vfio_pci_mmap_fault,
};
@ -1880,11 +1757,12 @@ int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma
vma->vm_private_data = vdev;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
/*
* See remap_pfn_range(), called from vfio_pci_fault() but we can't
* change vm_flags within the fault handler. Set them now.
* Set vm_flags now, they should not be changed in the fault handler.
* We want the same flags and page protection (decrypted above) as
* io_remap_pfn_range() would set.
*
* VM_ALLOW_ANY_UNCACHED: The VMA flag is implemented for ARM64,
* allowing KVM stage 2 device mapping attributes to use Normal-NC
@ -2202,8 +2080,6 @@ int vfio_pci_core_init_dev(struct vfio_device *core_vdev)
mutex_init(&vdev->ioeventfds_lock);
INIT_LIST_HEAD(&vdev->dummy_resources_list);
INIT_LIST_HEAD(&vdev->ioeventfds_list);
mutex_init(&vdev->vma_lock);
INIT_LIST_HEAD(&vdev->vma_list);
INIT_LIST_HEAD(&vdev->sriov_pfs_item);
init_rwsem(&vdev->memory_lock);
xa_init(&vdev->ctx);
@ -2219,7 +2095,6 @@ void vfio_pci_core_release_dev(struct vfio_device *core_vdev)
mutex_destroy(&vdev->igate);
mutex_destroy(&vdev->ioeventfds_lock);
mutex_destroy(&vdev->vma_lock);
kfree(vdev->region);
kfree(vdev->pm_save);
}
@ -2497,26 +2372,15 @@ unwind:
return ret;
}
/*
* We need to get memory_lock for each device, but devices can share mmap_lock,
* therefore we need to zap and hold the vma_lock for each device, and only then
* get each memory_lock.
*/
static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
struct vfio_pci_group_info *groups,
struct iommufd_ctx *iommufd_ctx)
{
struct vfio_pci_core_device *cur_mem;
struct vfio_pci_core_device *cur_vma;
struct vfio_pci_core_device *cur;
struct vfio_pci_core_device *vdev;
struct pci_dev *pdev;
bool is_mem = true;
int ret;
mutex_lock(&dev_set->lock);
cur_mem = list_first_entry(&dev_set->device_list,
struct vfio_pci_core_device,
vdev.dev_set_list);
pdev = vfio_pci_dev_set_resettable(dev_set);
if (!pdev) {
@ -2533,7 +2397,7 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
if (ret)
goto err_unlock;
list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) {
list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list) {
bool owned;
/*
@ -2557,38 +2421,38 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
* Otherwise, reset is not allowed.
*/
if (iommufd_ctx) {
int devid = vfio_iommufd_get_dev_id(&cur_vma->vdev,
int devid = vfio_iommufd_get_dev_id(&vdev->vdev,
iommufd_ctx);
owned = (devid > 0 || devid == -ENOENT);
} else {
owned = vfio_dev_in_groups(&cur_vma->vdev, groups);
owned = vfio_dev_in_groups(&vdev->vdev, groups);
}
if (!owned) {
ret = -EINVAL;
goto err_undo;
break;
}
/*
* Locking multiple devices is prone to deadlock, runaway and
* unwind if we hit contention.
* Take the memory write lock for each device and zap BAR
* mappings to prevent the user accessing the device while in
* reset. Locking multiple devices is prone to deadlock,
* runaway and unwind if we hit contention.
*/
if (!vfio_pci_zap_and_vma_lock(cur_vma, true)) {
if (!down_write_trylock(&vdev->memory_lock)) {
ret = -EBUSY;
goto err_undo;
break;
}
}
cur_vma = NULL;
list_for_each_entry(cur_mem, &dev_set->device_list, vdev.dev_set_list) {
if (!down_write_trylock(&cur_mem->memory_lock)) {
ret = -EBUSY;
goto err_undo;
}
mutex_unlock(&cur_mem->vma_lock);
vfio_pci_zap_bars(vdev);
}
if (!list_entry_is_head(vdev,
&dev_set->device_list, vdev.dev_set_list)) {
vdev = list_prev_entry(vdev, vdev.dev_set_list);
goto err_undo;
}
cur_mem = NULL;
/*
* The pci_reset_bus() will reset all the devices in the bus.
@ -2599,25 +2463,22 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
* cause the PCI config space reset without restoring the original
* state (saved locally in 'vdev->pm_save').
*/
list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
vfio_pci_set_power_state(cur, PCI_D0);
list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list)
vfio_pci_set_power_state(vdev, PCI_D0);
ret = pci_reset_bus(pdev);
err_undo:
list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
if (cur == cur_mem)
is_mem = false;
if (cur == cur_vma)
break;
if (is_mem)
up_write(&cur->memory_lock);
else
mutex_unlock(&cur->vma_lock);
}
vdev = list_last_entry(&dev_set->device_list,
struct vfio_pci_core_device, vdev.dev_set_list);
err_undo:
list_for_each_entry_from_reverse(vdev, &dev_set->device_list,
vdev.dev_set_list)
up_write(&vdev->memory_lock);
list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list)
pm_runtime_put(&vdev->pdev->dev);
list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
pm_runtime_put(&cur->pdev->dev);
err_unlock:
mutex_unlock(&dev_set->lock);
return ret;

View File

@ -22,8 +22,10 @@
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pseudo_fs.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
@ -43,9 +45,13 @@
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
#define DRIVER_DESC "VFIO - User Level meta-driver"
#define VFIO_MAGIC 0x5646494f /* "VFIO" */
static struct vfio {
struct class *device_class;
struct ida device_ida;
struct vfsmount *vfs_mount;
int fs_count;
} vfio;
#ifdef CONFIG_VFIO_NOIOMMU
@ -186,6 +192,8 @@ static void vfio_device_release(struct device *dev)
if (device->ops->release)
device->ops->release(device);
iput(device->inode);
simple_release_fs(&vfio.vfs_mount, &vfio.fs_count);
kvfree(device);
}
@ -228,6 +236,34 @@ out_free:
}
EXPORT_SYMBOL_GPL(_vfio_alloc_device);
static int vfio_fs_init_fs_context(struct fs_context *fc)
{
return init_pseudo(fc, VFIO_MAGIC) ? 0 : -ENOMEM;
}
static struct file_system_type vfio_fs_type = {
.name = "vfio",
.owner = THIS_MODULE,
.init_fs_context = vfio_fs_init_fs_context,
.kill_sb = kill_anon_super,
};
static struct inode *vfio_fs_inode_new(void)
{
struct inode *inode;
int ret;
ret = simple_pin_fs(&vfio_fs_type, &vfio.vfs_mount, &vfio.fs_count);
if (ret)
return ERR_PTR(ret);
inode = alloc_anon_inode(vfio.vfs_mount->mnt_sb);
if (IS_ERR(inode))
simple_release_fs(&vfio.vfs_mount, &vfio.fs_count);
return inode;
}
/*
* Initialize a vfio_device so it can be registered to vfio core.
*/
@ -246,6 +282,11 @@ static int vfio_init_device(struct vfio_device *device, struct device *dev,
init_completion(&device->comp);
device->dev = dev;
device->ops = ops;
device->inode = vfio_fs_inode_new();
if (IS_ERR(device->inode)) {
ret = PTR_ERR(device->inode);
goto out_inode;
}
if (ops->init) {
ret = ops->init(device);
@ -260,6 +301,9 @@ static int vfio_init_device(struct vfio_device *device, struct device *dev,
return 0;
out_uninit:
iput(device->inode);
simple_release_fs(&vfio.vfs_mount, &vfio.fs_count);
out_inode:
vfio_release_device_set(device);
ida_free(&vfio.device_ida, device->index);
return ret;

View File

@ -64,6 +64,7 @@ struct vfio_device {
struct completion comp;
struct iommufd_access *iommufd_access;
void (*put_kvm)(struct kvm *kvm);
struct inode *inode;
#if IS_ENABLED(CONFIG_IOMMUFD)
struct iommufd_device *iommufd_device;
u8 iommufd_attached:1;

View File

@ -93,8 +93,6 @@ struct vfio_pci_core_device {
struct list_head sriov_pfs_item;
struct vfio_pci_core_device *sriov_pf_core_dev;
struct notifier_block nb;
struct mutex vma_lock;
struct list_head vma_list;
struct rw_semaphore memory_lock;
};