Commit e67391ca authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-riscv-fixes-6.8-1' of https://github.com/kvm-riscv/linux into HEAD

KVM/riscv fixes for 6.8, take #1

- Fix steal-time related sparse warnings
parents 2f8ebe43 f072b272
...@@ -41,7 +41,7 @@ static int __init parse_no_stealacc(char *arg) ...@@ -41,7 +41,7 @@ static int __init parse_no_stealacc(char *arg)
early_param("no-steal-acc", parse_no_stealacc); early_param("no-steal-acc", parse_no_stealacc);
DEFINE_PER_CPU(struct sbi_sta_struct, steal_time) __aligned(64); static DEFINE_PER_CPU(struct sbi_sta_struct, steal_time) __aligned(64);
static bool __init has_pv_steal_clock(void) static bool __init has_pv_steal_clock(void)
{ {
...@@ -91,8 +91,8 @@ static int pv_time_cpu_down_prepare(unsigned int cpu) ...@@ -91,8 +91,8 @@ static int pv_time_cpu_down_prepare(unsigned int cpu)
static u64 pv_time_steal_clock(int cpu) static u64 pv_time_steal_clock(int cpu)
{ {
struct sbi_sta_struct *st = per_cpu_ptr(&steal_time, cpu); struct sbi_sta_struct *st = per_cpu_ptr(&steal_time, cpu);
u32 sequence; __le32 sequence;
u64 steal; __le64 steal;
/* /*
* Check the sequence field before and after reading the steal * Check the sequence field before and after reading the steal
......
...@@ -26,8 +26,12 @@ void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu) ...@@ -26,8 +26,12 @@ void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
{ {
gpa_t shmem = vcpu->arch.sta.shmem; gpa_t shmem = vcpu->arch.sta.shmem;
u64 last_steal = vcpu->arch.sta.last_steal; u64 last_steal = vcpu->arch.sta.last_steal;
u32 *sequence_ptr, sequence; __le32 __user *sequence_ptr;
u64 *steal_ptr, steal; __le64 __user *steal_ptr;
__le32 sequence_le;
__le64 steal_le;
u32 sequence;
u64 steal;
unsigned long hva; unsigned long hva;
gfn_t gfn; gfn_t gfn;
...@@ -47,22 +51,22 @@ void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu) ...@@ -47,22 +51,22 @@ void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
return; return;
} }
sequence_ptr = (u32 *)(hva + offset_in_page(shmem) + sequence_ptr = (__le32 __user *)(hva + offset_in_page(shmem) +
offsetof(struct sbi_sta_struct, sequence)); offsetof(struct sbi_sta_struct, sequence));
steal_ptr = (u64 *)(hva + offset_in_page(shmem) + steal_ptr = (__le64 __user *)(hva + offset_in_page(shmem) +
offsetof(struct sbi_sta_struct, steal)); offsetof(struct sbi_sta_struct, steal));
if (WARN_ON(get_user(sequence, sequence_ptr))) if (WARN_ON(get_user(sequence_le, sequence_ptr)))
return; return;
sequence = le32_to_cpu(sequence); sequence = le32_to_cpu(sequence_le);
sequence += 1; sequence += 1;
if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr))) if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
return; return;
if (!WARN_ON(get_user(steal, steal_ptr))) { if (!WARN_ON(get_user(steal_le, steal_ptr))) {
steal = le64_to_cpu(steal); steal = le64_to_cpu(steal_le);
vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay); vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
steal += vcpu->arch.sta.last_steal - last_steal; steal += vcpu->arch.sta.last_steal - last_steal;
WARN_ON(put_user(cpu_to_le64(steal), steal_ptr)); WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment