Commit d08b4858 authored by Carlos Bilbao's avatar Carlos Bilbao Committed by Paolo Bonzini

KVM: SVM: Name and check reserved fields with structs offset

Rename reserved fields on all structs in arch/x86/include/asm/svm.h
following their offset within the structs. Include compile time checks for
this in the same place where other BUILD_BUG_ON for the structs are.

This also solves that fields of struct sev_es_save_area are named by their
order of appearance, but right now they jump from reserved_5 to reserved_7.

Link: https://lkml.org/lkml/2022/10/22/376Signed-off-by: default avatarCarlos Bilbao <carlos.bilbao@amd.com>
Message-Id: <20221024164448.203351-1-carlos.bilbao@amd.com>
[Use ASSERT_STRUCT_OFFSET + fix a couple wrong offsets. - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 07a368b3
......@@ -293,12 +293,13 @@ struct vmcb_save_area {
struct vmcb_seg ldtr;
struct vmcb_seg idtr;
struct vmcb_seg tr;
u8 reserved_1[42];
/* Reserved fields are named following their struct offset */
u8 reserved_0xa0[42];
u8 vmpl;
u8 cpl;
u8 reserved_2[4];
u8 reserved_0xcc[4];
u64 efer;
u8 reserved_3[112];
u8 reserved_0xd8[112];
u64 cr4;
u64 cr3;
u64 cr0;
......@@ -306,7 +307,7 @@ struct vmcb_save_area {
u64 dr6;
u64 rflags;
u64 rip;
u8 reserved_4[88];
u8 reserved_0x180[88];
u64 rsp;
u64 s_cet;
u64 ssp;
......@@ -321,14 +322,14 @@ struct vmcb_save_area {
u64 sysenter_esp;
u64 sysenter_eip;
u64 cr2;
u8 reserved_5[32];
u8 reserved_0x248[32];
u64 g_pat;
u64 dbgctl;
u64 br_from;
u64 br_to;
u64 last_excp_from;
u64 last_excp_to;
u8 reserved_6[72];
u8 reserved_0x298[72];
u32 spec_ctrl; /* Guest version of SPEC_CTRL at 0x2E0 */
} __packed;
......@@ -349,12 +350,12 @@ struct sev_es_save_area {
u64 vmpl2_ssp;
u64 vmpl3_ssp;
u64 u_cet;
u8 reserved_1[2];
u8 reserved_0xc8[2];
u8 vmpl;
u8 cpl;
u8 reserved_2[4];
u8 reserved_0xcc[4];
u64 efer;
u8 reserved_3[104];
u8 reserved_0xd8[104];
u64 xss;
u64 cr4;
u64 cr3;
......@@ -371,7 +372,7 @@ struct sev_es_save_area {
u64 dr1_addr_mask;
u64 dr2_addr_mask;
u64 dr3_addr_mask;
u8 reserved_4[24];
u8 reserved_0x1c0[24];
u64 rsp;
u64 s_cet;
u64 ssp;
......@@ -386,21 +387,21 @@ struct sev_es_save_area {
u64 sysenter_esp;
u64 sysenter_eip;
u64 cr2;
u8 reserved_5[32];
u8 reserved_0x248[32];
u64 g_pat;
u64 dbgctl;
u64 br_from;
u64 br_to;
u64 last_excp_from;
u64 last_excp_to;
u8 reserved_7[80];
u8 reserved_0x298[80];
u32 pkru;
u8 reserved_8[20];
u64 reserved_9; /* rax already available at 0x01f8 */
u32 tsc_aux;
u8 reserved_0x2f0[24];
u64 rcx;
u64 rdx;
u64 rbx;
u64 reserved_10; /* rsp already available at 0x01d8 */
u64 reserved_0x320; /* rsp already available at 0x01d8 */
u64 rbp;
u64 rsi;
u64 rdi;
......@@ -412,7 +413,7 @@ struct sev_es_save_area {
u64 r13;
u64 r14;
u64 r15;
u8 reserved_11[16];
u8 reserved_0x380[16];
u64 guest_exit_info_1;
u64 guest_exit_info_2;
u64 guest_exit_int_info;
......@@ -425,7 +426,7 @@ struct sev_es_save_area {
u64 pcpu_id;
u64 event_inj;
u64 xcr0;
u8 reserved_12[16];
u8 reserved_0x3f0[16];
/* Floating point area */
u64 x87_dp;
......@@ -443,23 +444,23 @@ struct sev_es_save_area {
} __packed;
struct ghcb_save_area {
u8 reserved_1[203];
u8 reserved_0x0[203];
u8 cpl;
u8 reserved_2[116];
u8 reserved_0xcc[116];
u64 xss;
u8 reserved_3[24];
u8 reserved_0x148[24];
u64 dr7;
u8 reserved_4[16];
u8 reserved_0x168[16];
u64 rip;
u8 reserved_5[88];
u8 reserved_0x180[88];
u64 rsp;
u8 reserved_6[24];
u8 reserved_0x1e0[24];
u64 rax;
u8 reserved_7[264];
u8 reserved_0x200[264];
u64 rcx;
u64 rdx;
u64 rbx;
u8 reserved_8[8];
u8 reserved_0x320[8];
u64 rbp;
u64 rsi;
u64 rdi;
......@@ -471,12 +472,12 @@ struct ghcb_save_area {
u64 r13;
u64 r14;
u64 r15;
u8 reserved_9[16];
u8 reserved_0x380[16];
u64 sw_exit_code;
u64 sw_exit_info_1;
u64 sw_exit_info_2;
u64 sw_scratch;
u8 reserved_10[56];
u8 reserved_0x3b0[56];
u64 xcr0;
u8 valid_bitmap[16];
u64 x87_state_gpa;
......@@ -490,7 +491,7 @@ struct ghcb {
u8 shared_buffer[GHCB_SHARED_BUF_SIZE];
u8 reserved_1[10];
u8 reserved_0xff0[10];
u16 protocol_version; /* negotiated SEV-ES/GHCB protocol version */
u32 ghcb_usage;
} __packed;
......@@ -502,6 +503,9 @@ struct ghcb {
#define EXPECTED_VMCB_CONTROL_AREA_SIZE 1024
#define EXPECTED_GHCB_SIZE PAGE_SIZE
#define BUILD_BUG_RESERVED_OFFSET(x, y) \
ASSERT_STRUCT_OFFSET(struct x, reserved ## _ ## y, y)
static inline void __unused_size_checks(void)
{
BUILD_BUG_ON(sizeof(struct vmcb_save_area) != EXPECTED_VMCB_SAVE_AREA_SIZE);
......@@ -509,6 +513,39 @@ static inline void __unused_size_checks(void)
BUILD_BUG_ON(sizeof(struct sev_es_save_area) != EXPECTED_SEV_ES_SAVE_AREA_SIZE);
BUILD_BUG_ON(sizeof(struct vmcb_control_area) != EXPECTED_VMCB_CONTROL_AREA_SIZE);
BUILD_BUG_ON(sizeof(struct ghcb) != EXPECTED_GHCB_SIZE);
/* Check offsets of reserved fields */
BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0xa0);
BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0xcc);
BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0xd8);
BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0x180);
BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0x248);
BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0x298);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0xc8);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0xcc);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0xd8);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x1c0);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x248);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x298);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x2f0);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x320);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x380);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x3f0);
BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x0);
BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0xcc);
BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x148);
BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x168);
BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x180);
BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x1e0);
BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x200);
BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x320);
BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x380);
BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x3b0);
BUILD_BUG_RESERVED_OFFSET(ghcb, 0xff0);
}
struct vmcb {
......
......@@ -2648,7 +2648,7 @@ static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
ghcb_scratch_beg = control->ghcb_gpa +
offsetof(struct ghcb, shared_buffer);
ghcb_scratch_end = control->ghcb_gpa +
offsetof(struct ghcb, reserved_1);
offsetof(struct ghcb, reserved_0xff0);
/*
* If the scratch area begins within the GHCB, it must be
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment