Commit 2aa9c199 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: Move x86's version of struct kvm_mmu_memory_cache to common code

Move x86's 'struct kvm_mmu_memory_cache' to common code in anticipation
of moving the entire x86 implementation code to common KVM and reusing
it for arm64 and MIPS.  Add a new architecture specific asm/kvm_types.h
to control the existence and parameters of the struct.  The new header
is needed to avoid a chicken-and-egg problem with asm/kvm_host.h as all
architectures define instances of the struct in their vCPU structs.

Add an asm-generic version of kvm_types.h to avoid having empty files on
PPC and s390 in the long term, and for arm64 and mips in the short term.
Suggested-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
Reviewed-by: default avatarBen Gardon <bgardon@google.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200703023545.8771-15-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 94ce87ef
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
generic-y += early_ioremap.h generic-y += early_ioremap.h
generic-y += kvm_types.h
generic-y += local64.h generic-y += local64.h
generic-y += mcs_spinlock.h generic-y += mcs_spinlock.h
generic-y += qrwlock.h generic-y += qrwlock.h
......
...@@ -5,6 +5,7 @@ generated-y += syscall_table_64_n32.h ...@@ -5,6 +5,7 @@ generated-y += syscall_table_64_n32.h
generated-y += syscall_table_64_n64.h generated-y += syscall_table_64_n64.h
generated-y += syscall_table_64_o32.h generated-y += syscall_table_64_o32.h
generic-y += export.h generic-y += export.h
generic-y += kvm_types.h
generic-y += local64.h generic-y += local64.h
generic-y += mcs_spinlock.h generic-y += mcs_spinlock.h
generic-y += parport.h generic-y += parport.h
......
...@@ -4,6 +4,7 @@ generated-y += syscall_table_64.h ...@@ -4,6 +4,7 @@ generated-y += syscall_table_64.h
generated-y += syscall_table_c32.h generated-y += syscall_table_c32.h
generated-y += syscall_table_spu.h generated-y += syscall_table_spu.h
generic-y += export.h generic-y += export.h
generic-y += kvm_types.h
generic-y += local64.h generic-y += local64.h
generic-y += mcs_spinlock.h generic-y += mcs_spinlock.h
generic-y += vtime.h generic-y += vtime.h
......
...@@ -6,5 +6,6 @@ generated-y += unistd_nr.h ...@@ -6,5 +6,6 @@ generated-y += unistd_nr.h
generic-y += asm-offsets.h generic-y += asm-offsets.h
generic-y += export.h generic-y += export.h
generic-y += kvm_types.h
generic-y += local64.h generic-y += local64.h
generic-y += mcs_spinlock.h generic-y += mcs_spinlock.h
...@@ -193,8 +193,6 @@ struct x86_exception; ...@@ -193,8 +193,6 @@ struct x86_exception;
enum x86_intercept; enum x86_intercept;
enum x86_intercept_stage; enum x86_intercept_stage;
#define KVM_NR_MEM_OBJS 40
#define KVM_NR_DB_REGS 4 #define KVM_NR_DB_REGS 4
#define DR6_BD (1 << 13) #define DR6_BD (1 << 13)
...@@ -245,17 +243,6 @@ enum x86_intercept_stage; ...@@ -245,17 +243,6 @@ enum x86_intercept_stage;
struct kvm_kernel_irq_routing_entry; struct kvm_kernel_irq_routing_entry;
/*
* We don't want allocation failures within the mmu code, so we preallocate
* enough memory for a single page fault in a cache.
*/
struct kvm_mmu_memory_cache {
int nobjs;
gfp_t gfp_zero;
struct kmem_cache *kmem_cache;
void *objects[KVM_NR_MEM_OBJS];
};
/* /*
* the pages used as guest page table on soft mmu are tracked by * the pages used as guest page table on soft mmu are tracked by
* kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_KVM_TYPES_H
#define _ASM_X86_KVM_TYPES_H
#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
#endif /* _ASM_X86_KVM_TYPES_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_GENERIC_KVM_TYPES_H
#define _ASM_GENERIC_KVM_TYPES_H
#endif
...@@ -20,6 +20,8 @@ enum kvm_mr_change; ...@@ -20,6 +20,8 @@ enum kvm_mr_change;
#include <linux/types.h> #include <linux/types.h>
#include <asm/kvm_types.h>
/* /*
* Address types: * Address types:
* *
...@@ -58,4 +60,21 @@ struct gfn_to_pfn_cache { ...@@ -58,4 +60,21 @@ struct gfn_to_pfn_cache {
bool dirty; bool dirty;
}; };
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
/*
* Memory caches are used to preallocate memory ahead of various MMU flows,
* e.g. page fault handlers. Gracefully handling allocation failures deep in
* MMU flows is problematic, as is triggering reclaim, I/O, etc... while
* holding MMU locks. Note, these caches act more like prefetch buffers than
* classical caches, i.e. objects are not returned to the cache on being freed.
*/
struct kvm_mmu_memory_cache {
int nobjs;
gfp_t gfp_zero;
struct kmem_cache *kmem_cache;
void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE];
};
#endif
#endif /* __KVM_TYPES_H__ */ #endif /* __KVM_TYPES_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment