Commit 48987781 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity

KVM: MMU: introduce gfn_to_page_many_atomic() function

Introduce this function to get consecutive gfn's pages, it can reduce
gup's overload, used by later patch
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 887c08ac
...@@ -289,6 +289,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, ...@@ -289,6 +289,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
void kvm_disable_largepages(void); void kvm_disable_largepages(void);
void kvm_arch_flush_shadow(struct kvm *kvm); void kvm_arch_flush_shadow(struct kvm *kvm);
int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
int nr_pages);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
void kvm_release_page_clean(struct page *page); void kvm_release_page_clean(struct page *page);
......
...@@ -927,15 +927,25 @@ int memslot_id(struct kvm *kvm, gfn_t gfn) ...@@ -927,15 +927,25 @@ int memslot_id(struct kvm *kvm, gfn_t gfn)
return memslot - slots->memslots; return memslot - slots->memslots;
} }
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn,
gfn_t *nr_pages)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
slot = gfn_to_memslot(kvm, gfn); slot = gfn_to_memslot(kvm, gfn);
if (!slot || slot->flags & KVM_MEMSLOT_INVALID) if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
return bad_hva(); return bad_hva();
if (nr_pages)
*nr_pages = slot->npages - (gfn - slot->base_gfn);
return gfn_to_hva_memslot(slot, gfn); return gfn_to_hva_memslot(slot, gfn);
} }
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
return gfn_to_hva_many(kvm, gfn, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_hva); EXPORT_SYMBOL_GPL(gfn_to_hva);
static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic) static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic)
...@@ -1010,6 +1020,23 @@ pfn_t gfn_to_pfn_memslot(struct kvm *kvm, ...@@ -1010,6 +1020,23 @@ pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
return hva_to_pfn(kvm, addr, false); return hva_to_pfn(kvm, addr, false);
} }
int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
int nr_pages)
{
unsigned long addr;
gfn_t entry;
addr = gfn_to_hva_many(kvm, gfn, &entry);
if (kvm_is_error_hva(addr))
return -1;
if (entry < nr_pages)
return 0;
return __get_user_pages_fast(addr, nr_pages, 1, pages);
}
EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{ {
pfn_t pfn; pfn_t pfn;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment