Commit bb2baeb2 authored by Mingwei Zhang's avatar Mingwei Zhang Committed by Paolo Bonzini

KVM: SVM: improve the code readability for ASID management

KVM SEV code uses bitmaps to manage ASID states. ASID 0 was always skipped
because it is never used by VM. Thus, in existing code, ASID value and its
bitmap postion always has an 'offset-by-1' relationship.

Both SEV and SEV-ES shares the ASID space, thus KVM uses a dynamic range
[min_asid, max_asid] to handle SEV and SEV-ES ASIDs separately.

Existing code mixes the usage of ASID value and its bitmap position by
using the same variable called 'min_asid'.

Fix the min_asid usage: ensure that its usage is consistent with its name;
allocate extra size for ASID 0 to ensure that each ASID has the same value
with its bitmap position. Add comments on ASID bitmap allocation to clarify
the size change.
Signed-off-by: default avatarMingwei Zhang <mizhang@google.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Marc Orr <marcorr@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Alper Gun <alpergun@google.com>
Cc: Dionna Glaze <dionnaglaze@google.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Vipin Sharma <vipinsh@google.com>
Cc: Peter Gonda <pgonda@google.com>
Cc: Joerg Roedel <joro@8bytes.org>
Message-Id: <20210802180903.159381-1-mizhang@google.com>
[Fix up sev_asid_free to also index by ASID, as suggested by Sean
 Christopherson, and use nr_asids in sev_cpu_init. - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 179c6c27
...@@ -64,6 +64,7 @@ static DEFINE_MUTEX(sev_bitmap_lock); ...@@ -64,6 +64,7 @@ static DEFINE_MUTEX(sev_bitmap_lock);
unsigned int max_sev_asid; unsigned int max_sev_asid;
static unsigned int min_sev_asid; static unsigned int min_sev_asid;
static unsigned long sev_me_mask; static unsigned long sev_me_mask;
static unsigned int nr_asids;
static unsigned long *sev_asid_bitmap; static unsigned long *sev_asid_bitmap;
static unsigned long *sev_reclaim_asid_bitmap; static unsigned long *sev_reclaim_asid_bitmap;
...@@ -78,11 +79,11 @@ struct enc_region { ...@@ -78,11 +79,11 @@ struct enc_region {
/* Called with the sev_bitmap_lock held, or on shutdown */ /* Called with the sev_bitmap_lock held, or on shutdown */
static int sev_flush_asids(int min_asid, int max_asid) static int sev_flush_asids(int min_asid, int max_asid)
{ {
int ret, pos, error = 0; int ret, asid, error = 0;
/* Check if there are any ASIDs to reclaim before performing a flush */ /* Check if there are any ASIDs to reclaim before performing a flush */
pos = find_next_bit(sev_reclaim_asid_bitmap, max_asid, min_asid); asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
if (pos >= max_asid) if (asid > max_asid)
return -EBUSY; return -EBUSY;
/* /*
...@@ -115,15 +116,15 @@ static bool __sev_recycle_asids(int min_asid, int max_asid) ...@@ -115,15 +116,15 @@ static bool __sev_recycle_asids(int min_asid, int max_asid)
/* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */ /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap, bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
max_sev_asid); nr_asids);
bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid); bitmap_zero(sev_reclaim_asid_bitmap, nr_asids);
return true; return true;
} }
static int sev_asid_new(struct kvm_sev_info *sev) static int sev_asid_new(struct kvm_sev_info *sev)
{ {
int pos, min_asid, max_asid, ret; int asid, min_asid, max_asid, ret;
bool retry = true; bool retry = true;
enum misc_res_type type; enum misc_res_type type;
...@@ -143,11 +144,11 @@ static int sev_asid_new(struct kvm_sev_info *sev) ...@@ -143,11 +144,11 @@ static int sev_asid_new(struct kvm_sev_info *sev)
* SEV-enabled guests must use asid from min_sev_asid to max_sev_asid. * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
* SEV-ES-enabled guest can use from 1 to min_sev_asid - 1. * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
*/ */
min_asid = sev->es_active ? 0 : min_sev_asid - 1; min_asid = sev->es_active ? 1 : min_sev_asid;
max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid; max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
again: again:
pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid); asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
if (pos >= max_asid) { if (asid > max_asid) {
if (retry && __sev_recycle_asids(min_asid, max_asid)) { if (retry && __sev_recycle_asids(min_asid, max_asid)) {
retry = false; retry = false;
goto again; goto again;
...@@ -157,11 +158,11 @@ static int sev_asid_new(struct kvm_sev_info *sev) ...@@ -157,11 +158,11 @@ static int sev_asid_new(struct kvm_sev_info *sev)
goto e_uncharge; goto e_uncharge;
} }
__set_bit(pos, sev_asid_bitmap); __set_bit(asid, sev_asid_bitmap);
mutex_unlock(&sev_bitmap_lock); mutex_unlock(&sev_bitmap_lock);
return pos + 1; return asid;
e_uncharge: e_uncharge:
misc_cg_uncharge(type, sev->misc_cg, 1); misc_cg_uncharge(type, sev->misc_cg, 1);
put_misc_cg(sev->misc_cg); put_misc_cg(sev->misc_cg);
...@@ -179,13 +180,12 @@ static int sev_get_asid(struct kvm *kvm) ...@@ -179,13 +180,12 @@ static int sev_get_asid(struct kvm *kvm)
static void sev_asid_free(struct kvm_sev_info *sev) static void sev_asid_free(struct kvm_sev_info *sev)
{ {
struct svm_cpu_data *sd; struct svm_cpu_data *sd;
int cpu, pos; int cpu;
enum misc_res_type type; enum misc_res_type type;
mutex_lock(&sev_bitmap_lock); mutex_lock(&sev_bitmap_lock);
pos = sev->asid - 1; __set_bit(sev->asid, sev_reclaim_asid_bitmap);
__set_bit(pos, sev_reclaim_asid_bitmap);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
sd = per_cpu(svm_data, cpu); sd = per_cpu(svm_data, cpu);
...@@ -1857,12 +1857,17 @@ void __init sev_hardware_setup(void) ...@@ -1857,12 +1857,17 @@ void __init sev_hardware_setup(void)
min_sev_asid = edx; min_sev_asid = edx;
sev_me_mask = 1UL << (ebx & 0x3f); sev_me_mask = 1UL << (ebx & 0x3f);
/* Initialize SEV ASID bitmaps */ /*
sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap,
* even though it's never used, so that the bitmap is indexed by the
* actual ASID.
*/
nr_asids = max_sev_asid + 1;
sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
if (!sev_asid_bitmap) if (!sev_asid_bitmap)
goto out; goto out;
sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
if (!sev_reclaim_asid_bitmap) { if (!sev_reclaim_asid_bitmap) {
bitmap_free(sev_asid_bitmap); bitmap_free(sev_asid_bitmap);
sev_asid_bitmap = NULL; sev_asid_bitmap = NULL;
...@@ -1907,7 +1912,7 @@ void sev_hardware_teardown(void) ...@@ -1907,7 +1912,7 @@ void sev_hardware_teardown(void)
return; return;
/* No need to take sev_bitmap_lock, all VMs have been destroyed. */ /* No need to take sev_bitmap_lock, all VMs have been destroyed. */
sev_flush_asids(0, max_sev_asid); sev_flush_asids(1, max_sev_asid);
bitmap_free(sev_asid_bitmap); bitmap_free(sev_asid_bitmap);
bitmap_free(sev_reclaim_asid_bitmap); bitmap_free(sev_reclaim_asid_bitmap);
...@@ -1921,7 +1926,7 @@ int sev_cpu_init(struct svm_cpu_data *sd) ...@@ -1921,7 +1926,7 @@ int sev_cpu_init(struct svm_cpu_data *sd)
if (!sev_enabled) if (!sev_enabled)
return 0; return 0;
sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *), GFP_KERNEL); sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
if (!sd->sev_vmcbs) if (!sd->sev_vmcbs)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment