Commit 1d55934e authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-x86-svm-6.9' of https://github.com/kvm-x86/linux into HEAD

KVM SVM changes for 6.9:

 - Add support for systems that are configured with SEV and SEV-ES+ enabled,
   but have all ASIDs assigned to SEV-ES+ guests, which effectively makes SEV
   unusuable.  Cleanup ASID handling to make supporting this scenario less
   brittle/ugly.

 - Return -EINVAL instead of -EBUSY if userspace attempts to invoke
   KVM_SEV{,ES}_INIT on an SEV+ guest.  The operation is simply invalid, and
   not related to resource contention in any way.
parents bf3a69c6 fdd58834
...@@ -84,9 +84,10 @@ struct enc_region { ...@@ -84,9 +84,10 @@ struct enc_region {
}; };
/* Called with the sev_bitmap_lock held, or on shutdown */ /* Called with the sev_bitmap_lock held, or on shutdown */
static int sev_flush_asids(int min_asid, int max_asid) static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid)
{ {
int ret, asid, error = 0; int ret, error = 0;
unsigned int asid;
/* Check if there are any ASIDs to reclaim before performing a flush */ /* Check if there are any ASIDs to reclaim before performing a flush */
asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid); asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
...@@ -116,7 +117,7 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm) ...@@ -116,7 +117,7 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm)
} }
/* Must be called with the sev_bitmap_lock held */ /* Must be called with the sev_bitmap_lock held */
static bool __sev_recycle_asids(int min_asid, int max_asid) static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid)
{ {
if (sev_flush_asids(min_asid, max_asid)) if (sev_flush_asids(min_asid, max_asid))
return false; return false;
...@@ -143,8 +144,20 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev) ...@@ -143,8 +144,20 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
static int sev_asid_new(struct kvm_sev_info *sev) static int sev_asid_new(struct kvm_sev_info *sev)
{ {
int asid, min_asid, max_asid, ret; /*
* SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
* SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
* Note: min ASID can end up larger than the max if basic SEV support is
* effectively disabled by disallowing use of ASIDs for SEV guests.
*/
unsigned int min_asid = sev->es_active ? 1 : min_sev_asid;
unsigned int max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
unsigned int asid;
bool retry = true; bool retry = true;
int ret;
if (min_asid > max_asid)
return -ENOTTY;
WARN_ON(sev->misc_cg); WARN_ON(sev->misc_cg);
sev->misc_cg = get_current_misc_cg(); sev->misc_cg = get_current_misc_cg();
...@@ -157,12 +170,6 @@ static int sev_asid_new(struct kvm_sev_info *sev) ...@@ -157,12 +170,6 @@ static int sev_asid_new(struct kvm_sev_info *sev)
mutex_lock(&sev_bitmap_lock); mutex_lock(&sev_bitmap_lock);
/*
* SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
* SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
*/
min_asid = sev->es_active ? 1 : min_sev_asid;
max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
again: again:
asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid); asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
if (asid > max_asid) { if (asid > max_asid) {
...@@ -179,7 +186,8 @@ static int sev_asid_new(struct kvm_sev_info *sev) ...@@ -179,7 +186,8 @@ static int sev_asid_new(struct kvm_sev_info *sev)
mutex_unlock(&sev_bitmap_lock); mutex_unlock(&sev_bitmap_lock);
return asid; sev->asid = asid;
return 0;
e_uncharge: e_uncharge:
sev_misc_cg_uncharge(sev); sev_misc_cg_uncharge(sev);
put_misc_cg(sev->misc_cg); put_misc_cg(sev->misc_cg);
...@@ -187,7 +195,7 @@ static int sev_asid_new(struct kvm_sev_info *sev) ...@@ -187,7 +195,7 @@ static int sev_asid_new(struct kvm_sev_info *sev)
return ret; return ret;
} }
static int sev_get_asid(struct kvm *kvm) static unsigned int sev_get_asid(struct kvm *kvm)
{ {
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
...@@ -247,21 +255,19 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) ...@@ -247,21 +255,19 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
{ {
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_platform_init_args init_args = {0}; struct sev_platform_init_args init_args = {0};
int asid, ret; int ret;
if (kvm->created_vcpus) if (kvm->created_vcpus)
return -EINVAL; return -EINVAL;
ret = -EBUSY;
if (unlikely(sev->active)) if (unlikely(sev->active))
return ret; return -EINVAL;
sev->active = true; sev->active = true;
sev->es_active = argp->id == KVM_SEV_ES_INIT; sev->es_active = argp->id == KVM_SEV_ES_INIT;
asid = sev_asid_new(sev); ret = sev_asid_new(sev);
if (asid < 0) if (ret)
goto e_no_asid; goto e_no_asid;
sev->asid = asid;
init_args.probe = false; init_args.probe = false;
ret = sev_platform_init(&init_args); ret = sev_platform_init(&init_args);
...@@ -287,8 +293,8 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) ...@@ -287,8 +293,8 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error) static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
{ {
unsigned int asid = sev_get_asid(kvm);
struct sev_data_activate activate; struct sev_data_activate activate;
int asid = sev_get_asid(kvm);
int ret; int ret;
/* activate ASID on the given handle */ /* activate ASID on the given handle */
...@@ -2240,8 +2246,10 @@ void __init sev_hardware_setup(void) ...@@ -2240,8 +2246,10 @@ void __init sev_hardware_setup(void)
goto out; goto out;
} }
sev_asid_count = max_sev_asid - min_sev_asid + 1; if (min_sev_asid <= max_sev_asid) {
WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count)); sev_asid_count = max_sev_asid - min_sev_asid + 1;
WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
}
sev_supported = true; sev_supported = true;
/* SEV-ES support requested? */ /* SEV-ES support requested? */
...@@ -2272,7 +2280,9 @@ void __init sev_hardware_setup(void) ...@@ -2272,7 +2280,9 @@ void __init sev_hardware_setup(void)
out: out:
if (boot_cpu_has(X86_FEATURE_SEV)) if (boot_cpu_has(X86_FEATURE_SEV))
pr_info("SEV %s (ASIDs %u - %u)\n", pr_info("SEV %s (ASIDs %u - %u)\n",
sev_supported ? "enabled" : "disabled", sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" :
"unusable" :
"disabled",
min_sev_asid, max_sev_asid); min_sev_asid, max_sev_asid);
if (boot_cpu_has(X86_FEATURE_SEV_ES)) if (boot_cpu_has(X86_FEATURE_SEV_ES))
pr_info("SEV-ES %s (ASIDs %u - %u)\n", pr_info("SEV-ES %s (ASIDs %u - %u)\n",
...@@ -2320,7 +2330,7 @@ int sev_cpu_init(struct svm_cpu_data *sd) ...@@ -2320,7 +2330,7 @@ int sev_cpu_init(struct svm_cpu_data *sd)
*/ */
static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va) static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
{ {
int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid; unsigned int asid = sev_get_asid(vcpu->kvm);
/* /*
* Note! The address must be a kernel address, as regular page walk * Note! The address must be a kernel address, as regular page walk
...@@ -2638,7 +2648,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm) ...@@ -2638,7 +2648,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
void pre_sev_run(struct vcpu_svm *svm, int cpu) void pre_sev_run(struct vcpu_svm *svm, int cpu)
{ {
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu); struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
int asid = sev_get_asid(svm->vcpu.kvm); unsigned int asid = sev_get_asid(svm->vcpu.kvm);
/* Assign the asid allocated with this SEV guest */ /* Assign the asid allocated with this SEV guest */
svm->asid = asid; svm->asid = asid;
......
...@@ -735,13 +735,13 @@ TRACE_EVENT(kvm_nested_intr_vmexit, ...@@ -735,13 +735,13 @@ TRACE_EVENT(kvm_nested_intr_vmexit,
* Tracepoint for nested #vmexit because of interrupt pending * Tracepoint for nested #vmexit because of interrupt pending
*/ */
TRACE_EVENT(kvm_invlpga, TRACE_EVENT(kvm_invlpga,
TP_PROTO(__u64 rip, int asid, u64 address), TP_PROTO(__u64 rip, unsigned int asid, u64 address),
TP_ARGS(rip, asid, address), TP_ARGS(rip, asid, address),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( __u64, rip ) __field( __u64, rip )
__field( int, asid ) __field( unsigned int, asid )
__field( __u64, address ) __field( __u64, address )
), ),
TP_fast_assign( TP_fast_assign(
...@@ -750,7 +750,7 @@ TRACE_EVENT(kvm_invlpga, ...@@ -750,7 +750,7 @@ TRACE_EVENT(kvm_invlpga,
__entry->address = address; __entry->address = address;
), ),
TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx", TP_printk("rip: 0x%016llx asid: %u address: 0x%016llx",
__entry->rip, __entry->asid, __entry->address) __entry->rip, __entry->asid, __entry->address)
); );
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment