Commit 564429a6 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: rename CONFIG_HAVE_KVM_GMEM_* to CONFIG_HAVE_KVM_ARCH_GMEM_*

Add "ARCH" to the symbols; shortly, the "prepare" phase will include both
the arch-independent step to clear out contents left in the page by the
host, and the arch-dependent step enabled by CONFIG_HAVE_KVM_GMEM_PREPARE.
For consistency do the same for CONFIG_HAVE_KVM_GMEM_INVALIDATE as well.
Reviewed-by: default avatarMichael Roth <michael.roth@amd.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7fbdda31
...@@ -141,8 +141,8 @@ config KVM_AMD_SEV ...@@ -141,8 +141,8 @@ config KVM_AMD_SEV
depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m) depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
select ARCH_HAS_CC_PLATFORM select ARCH_HAS_CC_PLATFORM
select KVM_GENERIC_PRIVATE_MEM select KVM_GENERIC_PRIVATE_MEM
select HAVE_KVM_GMEM_PREPARE select HAVE_KVM_ARCH_GMEM_PREPARE
select HAVE_KVM_GMEM_INVALIDATE select HAVE_KVM_ARCH_GMEM_INVALIDATE
help help
Provides support for launching Encrypted VMs (SEV) and Encrypted VMs Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
with Encrypted State (SEV-ES) on AMD processors. with Encrypted State (SEV-ES) on AMD processors.
......
...@@ -13644,7 +13644,7 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) ...@@ -13644,7 +13644,7 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
} }
EXPORT_SYMBOL_GPL(kvm_arch_no_poll); EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
#ifdef CONFIG_HAVE_KVM_GMEM_PREPARE #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
bool kvm_arch_gmem_prepare_needed(struct kvm *kvm) bool kvm_arch_gmem_prepare_needed(struct kvm *kvm)
{ {
return kvm->arch.vm_type == KVM_X86_SNP_VM; return kvm->arch.vm_type == KVM_X86_SNP_VM;
...@@ -13656,7 +13656,7 @@ int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_ord ...@@ -13656,7 +13656,7 @@ int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_ord
} }
#endif #endif
#ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
{ {
kvm_x86_call(gmem_invalidate)(start, end); kvm_x86_call(gmem_invalidate)(start, end);
......
...@@ -2445,7 +2445,7 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm, ...@@ -2445,7 +2445,7 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm,
} }
#endif /* CONFIG_KVM_PRIVATE_MEM */ #endif /* CONFIG_KVM_PRIVATE_MEM */
#ifdef CONFIG_HAVE_KVM_GMEM_PREPARE #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order); int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order);
bool kvm_arch_gmem_prepare_needed(struct kvm *kvm); bool kvm_arch_gmem_prepare_needed(struct kvm *kvm);
#endif #endif
...@@ -2477,7 +2477,7 @@ typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, ...@@ -2477,7 +2477,7 @@ typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages, long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages,
kvm_gmem_populate_cb post_populate, void *opaque); kvm_gmem_populate_cb post_populate, void *opaque);
#ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end); void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
#endif #endif
......
...@@ -113,10 +113,10 @@ config KVM_GENERIC_PRIVATE_MEM ...@@ -113,10 +113,10 @@ config KVM_GENERIC_PRIVATE_MEM
select KVM_PRIVATE_MEM select KVM_PRIVATE_MEM
bool bool
config HAVE_KVM_GMEM_PREPARE config HAVE_KVM_ARCH_GMEM_PREPARE
bool bool
depends on KVM_PRIVATE_MEM depends on KVM_PRIVATE_MEM
config HAVE_KVM_GMEM_INVALIDATE config HAVE_KVM_ARCH_GMEM_INVALIDATE
bool bool
depends on KVM_PRIVATE_MEM depends on KVM_PRIVATE_MEM
...@@ -27,7 +27,7 @@ static inline kvm_pfn_t folio_file_pfn(struct folio *folio, pgoff_t index) ...@@ -27,7 +27,7 @@ static inline kvm_pfn_t folio_file_pfn(struct folio *folio, pgoff_t index)
static int kvm_gmem_prepare_folio(struct inode *inode, pgoff_t index, struct folio *folio) static int kvm_gmem_prepare_folio(struct inode *inode, pgoff_t index, struct folio *folio)
{ {
#ifdef CONFIG_HAVE_KVM_GMEM_PREPARE #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
struct list_head *gmem_list = &inode->i_mapping->i_private_list; struct list_head *gmem_list = &inode->i_mapping->i_private_list;
struct kvm_gmem *gmem; struct kvm_gmem *gmem;
...@@ -353,7 +353,7 @@ static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *fol ...@@ -353,7 +353,7 @@ static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *fol
return MF_DELAYED; return MF_DELAYED;
} }
#ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
static void kvm_gmem_free_folio(struct folio *folio) static void kvm_gmem_free_folio(struct folio *folio)
{ {
struct page *page = folio_page(folio, 0); struct page *page = folio_page(folio, 0);
...@@ -368,7 +368,7 @@ static const struct address_space_operations kvm_gmem_aops = { ...@@ -368,7 +368,7 @@ static const struct address_space_operations kvm_gmem_aops = {
.dirty_folio = noop_dirty_folio, .dirty_folio = noop_dirty_folio,
.migrate_folio = kvm_gmem_migrate_folio, .migrate_folio = kvm_gmem_migrate_folio,
.error_remove_folio = kvm_gmem_error_folio, .error_remove_folio = kvm_gmem_error_folio,
#ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
.free_folio = kvm_gmem_free_folio, .free_folio = kvm_gmem_free_folio,
#endif #endif
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment