KVM: pfncache: check the need for invalidation under read lock first
When processing mmu_notifier invalidations for gpc caches, pre-check for overlap with the invalidation event while holding gpc->lock for read, and only take gpc->lock for write if the cache needs to be invalidated. Doing a pre-check without taking gpc->lock for write avoids unnecessarily contending the lock for unrelated invalidations, which is very beneficial for caches that are heavily used (but rarely subjected to mmu_notifier invalidations). Signed-off-by: Paul Durrant <pdurrant@amazon.com> Reviewed-by: David Woodhouse <dwmw@amazon.co.uk> Link: https://lore.kernel.org/r/20240215152916.1158-20-paul@xen.org Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
parent
615451d8cb
commit
9fa336e343
|
@ -29,14 +29,30 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
|
|||
|
||||
spin_lock(&kvm->gpc_lock);
|
||||
list_for_each_entry(gpc, &kvm->gpc_list, list) {
|
||||
write_lock_irq(&gpc->lock);
|
||||
read_lock_irq(&gpc->lock);
|
||||
|
||||
/* Only a single page so no need to care about length */
|
||||
if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
|
||||
gpc->uhva >= start && gpc->uhva < end) {
|
||||
gpc->valid = false;
|
||||
read_unlock_irq(&gpc->lock);
|
||||
|
||||
/*
|
||||
* There is a small window here where the cache could
|
||||
* be modified, and invalidation would no longer be
|
||||
* necessary. Hence check again whether invalidation
|
||||
* is still necessary once the write lock has been
|
||||
* acquired.
|
||||
*/
|
||||
|
||||
write_lock_irq(&gpc->lock);
|
||||
if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
|
||||
gpc->uhva >= start && gpc->uhva < end)
|
||||
gpc->valid = false;
|
||||
write_unlock_irq(&gpc->lock);
|
||||
continue;
|
||||
}
|
||||
write_unlock_irq(&gpc->lock);
|
||||
|
||||
read_unlock_irq(&gpc->lock);
|
||||
}
|
||||
spin_unlock(&kvm->gpc_lock);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue