Commit e9f12b5f authored by Andrew Jones's avatar Andrew Jones Committed by Anup Patel

RISC-V: KVM: Implement SBI STA extension

Add a select SCHED_INFO to the KVM config in order to get run_delay
info. Then implement SBI STA's set-steal-time-shmem function and
kvm_riscv_vcpu_record_steal_time() to provide the steal-time info
to guests.
Reviewed-by: default avatarAnup Patel <anup@brainfault.org>
Reviewed-by: default avatarAtish Patra <atishp@rivosinc.com>
Signed-off-by: default avatarAndrew Jones <ajones@ventanamicro.com>
Signed-off-by: default avatarAnup Patel <anup@brainfault.org>
parent f61ce890
......@@ -32,6 +32,7 @@ config KVM
select KVM_XFER_TO_GUEST_WORK
select MMU_NOTIFIER
select PREEMPT_NOTIFIERS
select SCHED_INFO
help
Support hosting virtualized guest machines.
......
......@@ -6,9 +6,15 @@
#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/kvm_host.h>
#include <linux/mm.h>
#include <linux/sizes.h>
#include <asm/bug.h>
#include <asm/current.h>
#include <asm/kvm_vcpu_sbi.h>
#include <asm/page.h>
#include <asm/sbi.h>
#include <asm/uaccess.h>
void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
{
......@@ -19,14 +25,100 @@ void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
{
gpa_t shmem = vcpu->arch.sta.shmem;
u64 last_steal = vcpu->arch.sta.last_steal;
u32 *sequence_ptr, sequence;
u64 *steal_ptr, steal;
unsigned long hva;
gfn_t gfn;
if (shmem == INVALID_GPA)
return;
/*
* shmem is 64-byte aligned (see the enforcement in
* kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct
* is 64 bytes, so we know all its offsets are in the same page.
*/
gfn = shmem >> PAGE_SHIFT;
hva = kvm_vcpu_gfn_to_hva(vcpu, gfn);
if (WARN_ON(kvm_is_error_hva(hva))) {
vcpu->arch.sta.shmem = INVALID_GPA;
return;
}
sequence_ptr = (u32 *)(hva + offset_in_page(shmem) +
offsetof(struct sbi_sta_struct, sequence));
steal_ptr = (u64 *)(hva + offset_in_page(shmem) +
offsetof(struct sbi_sta_struct, steal));
if (WARN_ON(get_user(sequence, sequence_ptr)))
return;
sequence = le32_to_cpu(sequence);
sequence += 1;
if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
return;
if (!WARN_ON(get_user(steal, steal_ptr))) {
steal = le64_to_cpu(steal);
vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
steal += vcpu->arch.sta.last_steal - last_steal;
WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));
}
sequence += 1;
WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr));
kvm_vcpu_mark_page_dirty(vcpu, gfn);
}
static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long shmem_phys_lo = cp->a0;
unsigned long shmem_phys_hi = cp->a1;
u32 flags = cp->a2;
struct sbi_sta_struct zero_sta = {0};
unsigned long hva;
bool writable;
gpa_t shmem;
int ret;
if (flags != 0)
return SBI_ERR_INVALID_PARAM;
if (shmem_phys_lo == SBI_STA_SHMEM_DISABLE &&
shmem_phys_hi == SBI_STA_SHMEM_DISABLE) {
vcpu->arch.sta.shmem = INVALID_GPA;
return 0;
}
if (shmem_phys_lo & (SZ_64 - 1))
return SBI_ERR_INVALID_PARAM;
shmem = shmem_phys_lo;
if (shmem_phys_hi != 0) {
if (IS_ENABLED(CONFIG_32BIT))
shmem |= ((gpa_t)shmem_phys_hi << 32);
else
return SBI_ERR_INVALID_ADDRESS;
}
hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable);
if (kvm_is_error_hva(hva) || !writable)
return SBI_ERR_INVALID_ADDRESS;
ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta));
if (ret)
return SBI_ERR_FAILURE;
vcpu->arch.sta.shmem = shmem;
vcpu->arch.sta.last_steal = current->sched_info.run_delay;
return 0;
}
static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
......@@ -52,7 +144,7 @@ static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu)
{
return 0;
return !!sched_info_on();
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment