Commit 20b97fea authored by Abel Gordon's avatar Abel Gordon Committed by Gleb Natapov

KVM: nVMX: Refactor handle_vmwrite

Refactor existent code so we re-use vmcs12_write_any to copy fields from the
shadow vmcs specified by the link pointer (used by the processor,
implementation-specific) to the VMCS12 software format used by L0 to hold
the fields in L1 memory address space.
Signed-off-by: default avatarAbel Gordon <abelg@il.ibm.com>
Reviewed-by: default avatarOrit Wasserman <owasserm@redhat.com>
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
parent 4607c2d7
...@@ -5842,6 +5842,33 @@ static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu, ...@@ -5842,6 +5842,33 @@ static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
} }
} }
static inline bool vmcs12_write_any(struct kvm_vcpu *vcpu,
unsigned long field, u64 field_value){
short offset = vmcs_field_to_offset(field);
char *p = ((char *) get_vmcs12(vcpu)) + offset;
if (offset < 0)
return false;
switch (vmcs_field_type(field)) {
case VMCS_FIELD_TYPE_U16:
*(u16 *)p = field_value;
return true;
case VMCS_FIELD_TYPE_U32:
*(u32 *)p = field_value;
return true;
case VMCS_FIELD_TYPE_U64:
*(u64 *)p = field_value;
return true;
case VMCS_FIELD_TYPE_NATURAL_WIDTH:
*(natural_width *)p = field_value;
return true;
default:
return false; /* can never happen. */
}
}
/* /*
* VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was
* used before) all generate the same failure when it is missing. * used before) all generate the same failure when it is missing.
...@@ -5906,8 +5933,6 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) ...@@ -5906,8 +5933,6 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
gva_t gva; gva_t gva;
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
char *p;
short offset;
/* The value to write might be 32 or 64 bits, depending on L1's long /* The value to write might be 32 or 64 bits, depending on L1's long
* mode, and eventually we need to write that into a field of several * mode, and eventually we need to write that into a field of several
* possible lengths. The code below first zero-extends the value to 64 * possible lengths. The code below first zero-extends the value to 64
...@@ -5944,28 +5969,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) ...@@ -5944,28 +5969,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
return 1; return 1;
} }
offset = vmcs_field_to_offset(field); if (!vmcs12_write_any(vcpu, field, field_value)) {
if (offset < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
skip_emulated_instruction(vcpu);
return 1;
}
p = ((char *) get_vmcs12(vcpu)) + offset;
switch (vmcs_field_type(field)) {
case VMCS_FIELD_TYPE_U16:
*(u16 *)p = field_value;
break;
case VMCS_FIELD_TYPE_U32:
*(u32 *)p = field_value;
break;
case VMCS_FIELD_TYPE_U64:
*(u64 *)p = field_value;
break;
case VMCS_FIELD_TYPE_NATURAL_WIDTH:
*(natural_width *)p = field_value;
break;
default:
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
skip_emulated_instruction(vcpu); skip_emulated_instruction(vcpu);
return 1; return 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment