Commit 357a18ad authored by David Woodhouse's avatar David Woodhouse Committed by Paolo Bonzini

KVM: Kill kvm_map_gfn() / kvm_unmap_gfn() and gfn_to_pfn_cache

In commit 7e2175eb ("KVM: x86: Fix recording of guest steal time /
preempted status") I removed the only user of these functions because
it was basically impossible to use them safely.

There are two stages to the GFN->PFN mapping; first through the KVM
memslots to a userspace HVA and then through the page tables to
translate that HVA to an underlying PFN. Invalidations of the former
were being handled correctly, but no attempt was made to use the MMU
notifiers to invalidate the cache when the HVA->GFN mapping changed.

As a prelude to reinventing the gfn_to_pfn_cache with more usable
semantics, rip it out entirely and untangle the implementation of
the unsafe kvm_vcpu_map()/kvm_vcpu_unmap() functions from it.

All current users of kvm_vcpu_map() also look broken right now, and
will be dealt with separately. They broadly fall into two classes:

* Those which map, access the data and immediately unmap. This is
  mostly gratuitous and could just as well use the existing user
  HVA, and could probably benefit from a gfn_to_hva_cache as they
  do so.

* Those which keep the mapping around for a longer time, perhaps
  even using the PFN directly from the guest. These will need to
  be converted to the new gfn_to_pfn_cache and then kvm_vcpu_map()
  can be removed too.
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Message-Id: <20211115165030.7422-8-dwmw2@infradead.org>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent cee66664
...@@ -866,7 +866,7 @@ void kvm_release_pfn_dirty(kvm_pfn_t pfn); ...@@ -866,7 +866,7 @@ void kvm_release_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_dirty(kvm_pfn_t pfn); void kvm_set_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_accessed(kvm_pfn_t pfn); void kvm_set_pfn_accessed(kvm_pfn_t pfn);
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache); void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len); int len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
...@@ -942,12 +942,8 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn ...@@ -942,12 +942,8 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache, bool atomic);
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
......
...@@ -53,13 +53,6 @@ struct gfn_to_hva_cache { ...@@ -53,13 +53,6 @@ struct gfn_to_hva_cache {
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
}; };
struct gfn_to_pfn_cache {
u64 generation;
gfn_t gfn;
kvm_pfn_t pfn;
bool dirty;
};
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
/* /*
* Memory caches are used to preallocate memory ahead of various MMU flows, * Memory caches are used to preallocate memory ahead of various MMU flows,
......
...@@ -2548,72 +2548,36 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) ...@@ -2548,72 +2548,36 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
} }
EXPORT_SYMBOL_GPL(gfn_to_page); EXPORT_SYMBOL_GPL(gfn_to_page);
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache) void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
{ {
if (pfn == 0) if (pfn == 0)
return; return;
if (cache)
cache->pfn = cache->gfn = 0;
if (dirty) if (dirty)
kvm_release_pfn_dirty(pfn); kvm_release_pfn_dirty(pfn);
else else
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
} }
static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn, int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
struct gfn_to_pfn_cache *cache, u64 gen)
{
kvm_release_pfn(cache->pfn, cache->dirty, cache);
cache->pfn = gfn_to_pfn_memslot(slot, gfn);
cache->gfn = gfn;
cache->dirty = false;
cache->generation = gen;
}
static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache,
bool atomic)
{ {
kvm_pfn_t pfn; kvm_pfn_t pfn;
void *hva = NULL; void *hva = NULL;
struct page *page = KVM_UNMAPPED_PAGE; struct page *page = KVM_UNMAPPED_PAGE;
struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
u64 gen = slots->generation;
if (!map) if (!map)
return -EINVAL; return -EINVAL;
if (cache) { pfn = gfn_to_pfn(vcpu->kvm, gfn);
if (!cache->pfn || cache->gfn != gfn ||
cache->generation != gen) {
if (atomic)
return -EAGAIN;
kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
}
pfn = cache->pfn;
} else {
if (atomic)
return -EAGAIN;
pfn = gfn_to_pfn_memslot(slot, gfn);
}
if (is_error_noslot_pfn(pfn)) if (is_error_noslot_pfn(pfn))
return -EINVAL; return -EINVAL;
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (atomic) hva = kmap(page);
hva = kmap_atomic(page);
else
hva = kmap(page);
#ifdef CONFIG_HAS_IOMEM #ifdef CONFIG_HAS_IOMEM
} else if (!atomic) {
hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
} else { } else {
return -EINVAL; hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
#endif #endif
} }
...@@ -2627,27 +2591,9 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn, ...@@ -2627,27 +2591,9 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
return 0; return 0;
} }
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache, bool atomic)
{
return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
cache, atomic);
}
EXPORT_SYMBOL_GPL(kvm_map_gfn);
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
{
return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
NULL, false);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_map); EXPORT_SYMBOL_GPL(kvm_vcpu_map);
static void __kvm_unmap_gfn(struct kvm *kvm, void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
struct kvm_memory_slot *memslot,
struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache,
bool dirty, bool atomic)
{ {
if (!map) if (!map)
return; return;
...@@ -2655,45 +2601,21 @@ static void __kvm_unmap_gfn(struct kvm *kvm, ...@@ -2655,45 +2601,21 @@ static void __kvm_unmap_gfn(struct kvm *kvm,
if (!map->hva) if (!map->hva)
return; return;
if (map->page != KVM_UNMAPPED_PAGE) { if (map->page != KVM_UNMAPPED_PAGE)
if (atomic) kunmap(map->page);
kunmap_atomic(map->hva);
else
kunmap(map->page);
}
#ifdef CONFIG_HAS_IOMEM #ifdef CONFIG_HAS_IOMEM
else if (!atomic)
memunmap(map->hva);
else else
WARN_ONCE(1, "Unexpected unmapping in atomic context"); memunmap(map->hva);
#endif #endif
if (dirty) if (dirty)
mark_page_dirty_in_slot(kvm, memslot, map->gfn); kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
if (cache) kvm_release_pfn(map->pfn, dirty);
cache->dirty |= dirty;
else
kvm_release_pfn(map->pfn, dirty, NULL);
map->hva = NULL; map->hva = NULL;
map->page = NULL; map->page = NULL;
} }
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
{
__kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map,
cache, dirty, atomic);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
{
__kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn),
map, NULL, dirty, false);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment