Commit f1d2cfbf authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Stefan Bader

KVM: x86: pass kvm_vcpu to kvm_read_guest_virt and kvm_write_guest_virt_system

commit ce14e868 upstream.

Int the next patch the emulator's .read_std and .write_std callbacks will
grow another argument, which is not needed in kvm_read_guest_virt and
kvm_write_guest_virt_system's callers.  Since we have to make separate
functions, let's give the currently existing names a nicer interface, too.

Fixes: 129a72a0 ("KVM: x86: Introduce segmented_write_std", 2017-01-12)
Cc: stable@vger.kernel.org
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarJuerg Haefliger <juergh@canonical.com>
Signed-off-by: default avatarKhalid Elmously <khalid.elmously@canonical.com>

CVE-2018-3620
CVE-2018-3646
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
parent c277c3f5
...@@ -6713,8 +6713,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, ...@@ -6713,8 +6713,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
return 1; return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, if (kvm_read_guest_virt(vcpu, gva, &vmptr, sizeof(vmptr), &e)) {
sizeof(vmptr), &e)) {
kvm_inject_page_fault(vcpu, &e); kvm_inject_page_fault(vcpu, &e);
return 1; return 1;
} }
...@@ -7232,8 +7231,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu) ...@@ -7232,8 +7231,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
vmx_instruction_info, true, &gva)) vmx_instruction_info, true, &gva))
return 1; return 1;
/* _system ok, as nested_vmx_check_permission verified cpl=0 */ /* _system ok, as nested_vmx_check_permission verified cpl=0 */
kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, kvm_write_guest_virt_system(vcpu, gva, &field_value,
&field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); (is_long_mode(vcpu) ? 8 : 4), NULL);
} }
nested_vmx_succeed(vcpu); nested_vmx_succeed(vcpu);
...@@ -7268,8 +7267,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) ...@@ -7268,8 +7267,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, exit_qualification, if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, false, &gva)) vmx_instruction_info, false, &gva))
return 1; return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, if (kvm_read_guest_virt(vcpu, gva, &field_value,
&field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
kvm_inject_page_fault(vcpu, &e); kvm_inject_page_fault(vcpu, &e);
return 1; return 1;
} }
...@@ -7359,9 +7358,9 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) ...@@ -7359,9 +7358,9 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
vmx_instruction_info, true, &vmcs_gva)) vmx_instruction_info, true, &vmcs_gva))
return 1; return 1;
/* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */ /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
(void *)&to_vmx(vcpu)->nested.current_vmptr, (void *)&to_vmx(vcpu)->nested.current_vmptr,
sizeof(u64), &e)) { sizeof(u64), &e)) {
kvm_inject_page_fault(vcpu, &e); kvm_inject_page_fault(vcpu, &e);
return 1; return 1;
} }
...@@ -7415,8 +7414,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) ...@@ -7415,8 +7414,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
vmx_instruction_info, false, &gva)) vmx_instruction_info, false, &gva))
return 1; return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
sizeof(operand), &e)) {
kvm_inject_page_fault(vcpu, &e); kvm_inject_page_fault(vcpu, &e);
return 1; return 1;
} }
...@@ -7475,8 +7473,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -7475,8 +7473,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
vmx_instruction_info, false, &gva)) vmx_instruction_info, false, &gva))
return 1; return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid, if (kvm_read_guest_virt(vcpu, gva, &vpid, sizeof(u32), &e)) {
sizeof(u32), &e)) {
kvm_inject_page_fault(vcpu, &e); kvm_inject_page_fault(vcpu, &e);
return 1; return 1;
} }
......
...@@ -4270,11 +4270,10 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, ...@@ -4270,11 +4270,10 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }
int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
gva_t addr, void *val, unsigned int bytes, gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception) struct x86_exception *exception)
{ {
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
...@@ -4282,9 +4281,9 @@ int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, ...@@ -4282,9 +4281,9 @@ int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
} }
EXPORT_SYMBOL_GPL(kvm_read_guest_virt); EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes, gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception) struct x86_exception *exception)
{ {
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
...@@ -4299,18 +4298,16 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt, ...@@ -4299,18 +4298,16 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE; return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
} }
int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
gva_t addr, void *val, struct kvm_vcpu *vcpu, u32 access,
unsigned int bytes, struct x86_exception *exception)
struct x86_exception *exception)
{ {
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
void *data = val; void *data = val;
int r = X86EMUL_CONTINUE; int r = X86EMUL_CONTINUE;
while (bytes) { while (bytes) {
gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
PFERR_WRITE_MASK, access,
exception); exception);
unsigned offset = addr & (PAGE_SIZE-1); unsigned offset = addr & (PAGE_SIZE-1);
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
...@@ -4331,6 +4328,22 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, ...@@ -4331,6 +4328,22 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
out: out:
return r; return r;
} }
static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
unsigned int bytes, struct x86_exception *exception)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
PFERR_WRITE_MASK, exception);
}
int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
unsigned int bytes, struct x86_exception *exception)
{
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
PFERR_WRITE_MASK, exception);
}
EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
...@@ -5050,8 +5063,8 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla ...@@ -5050,8 +5063,8 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla
static const struct x86_emulate_ops emulate_ops = { static const struct x86_emulate_ops emulate_ops = {
.read_gpr = emulator_read_gpr, .read_gpr = emulator_read_gpr,
.write_gpr = emulator_write_gpr, .write_gpr = emulator_write_gpr,
.read_std = kvm_read_guest_virt_system, .read_std = emulator_read_std,
.write_std = kvm_write_guest_virt_system, .write_std = emulator_write_std,
.read_phys = kvm_read_guest_phys_system, .read_phys = kvm_read_guest_phys_system,
.fetch = kvm_fetch_guest_virt, .fetch = kvm_fetch_guest_virt,
.read_emulated = emulator_read_emulated, .read_emulated = emulator_read_emulated,
......
...@@ -164,11 +164,11 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); ...@@ -164,11 +164,11 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
gva_t addr, void *val, unsigned int bytes, gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception); struct x86_exception *exception);
int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
gva_t addr, void *val, unsigned int bytes, gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception); struct x86_exception *exception);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment