Commit c76de350 authored by Jan Kiszka's avatar Jan Kiszka Committed by Marcelo Tosatti

KVM: SVM: Clean up and enhance mov dr emulation

Enhance mov dr instruction emulation used by SVM so that it properly
handles dr4/5: alias to dr6/7 if cr4.de is cleared. Otherwise return
EMULATE_FAIL which will let our only possible caller in that scenario,
ud_interception, re-inject UD.

We do not need to inject faults, SVM does this for us (exceptions take
precedence over instruction interceptions). For the same reason, the
value overflow checks can be removed.
Signed-off-by: default avatarJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent fd7373cc
...@@ -506,9 +506,8 @@ struct kvm_x86_ops { ...@@ -506,9 +506,8 @@ struct kvm_x86_ops {
void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest);
void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value);
int *exception);
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
......
...@@ -1122,76 +1122,70 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) ...@@ -1122,76 +1122,70 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
svm->vmcb->control.asid = sd->next_asid++; svm->vmcb->control.asid = sd->next_asid++;
} }
static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) static int svm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *dest)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
unsigned long val;
switch (dr) { switch (dr) {
case 0 ... 3: case 0 ... 3:
val = vcpu->arch.db[dr]; *dest = vcpu->arch.db[dr];
break; break;
case 4:
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
return EMULATE_FAIL; /* will re-inject UD */
/* fall through */
case 6: case 6:
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
val = vcpu->arch.dr6; *dest = vcpu->arch.dr6;
else else
val = svm->vmcb->save.dr6; *dest = svm->vmcb->save.dr6;
break; break;
case 5:
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
return EMULATE_FAIL; /* will re-inject UD */
/* fall through */
case 7: case 7:
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
val = vcpu->arch.dr7; *dest = vcpu->arch.dr7;
else else
val = svm->vmcb->save.dr7; *dest = svm->vmcb->save.dr7;
break; break;
default:
val = 0;
} }
return val; return EMULATE_DONE;
} }
static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, static int svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value)
int *exception)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
*exception = 0;
switch (dr) { switch (dr) {
case 0 ... 3: case 0 ... 3:
vcpu->arch.db[dr] = value; vcpu->arch.db[dr] = value;
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
vcpu->arch.eff_db[dr] = value; vcpu->arch.eff_db[dr] = value;
return; break;
case 4 ... 5: case 4:
if (vcpu->arch.cr4 & X86_CR4_DE) if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
*exception = UD_VECTOR; return EMULATE_FAIL; /* will re-inject UD */
return; /* fall through */
case 6: case 6:
if (value & 0xffffffff00000000ULL) {
*exception = GP_VECTOR;
return;
}
vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1; vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
return; break;
case 5:
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
return EMULATE_FAIL; /* will re-inject UD */
/* fall through */
case 7: case 7:
if (value & 0xffffffff00000000ULL) {
*exception = GP_VECTOR;
return;
}
vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1; vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
svm->vmcb->save.dr7 = vcpu->arch.dr7; svm->vmcb->save.dr7 = vcpu->arch.dr7;
vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK); vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
} }
return; break;
default:
/* FIXME: Possible case? */
printk(KERN_DEBUG "%s: unexpected dr %u\n",
__func__, dr);
*exception = UD_VECTOR;
return;
} }
return EMULATE_DONE;
} }
static int pf_interception(struct vcpu_svm *svm) static int pf_interception(struct vcpu_svm *svm)
......
...@@ -3270,29 +3270,14 @@ int emulate_clts(struct kvm_vcpu *vcpu) ...@@ -3270,29 +3270,14 @@ int emulate_clts(struct kvm_vcpu *vcpu)
int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
{ {
struct kvm_vcpu *vcpu = ctxt->vcpu; return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest);
switch (dr) {
case 0 ... 3:
*dest = kvm_x86_ops->get_dr(vcpu, dr);
return X86EMUL_CONTINUE;
default:
pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
return X86EMUL_UNHANDLEABLE;
}
} }
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
{ {
unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U; unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
int exception;
kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception); return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask);
if (exception) {
/* FIXME: better handling */
return X86EMUL_UNHANDLEABLE;
}
return X86EMUL_CONTINUE;
} }
void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context) void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment