Commit b2c13c23 authored by Alexey Makhalov's avatar Alexey Makhalov Committed by Borislav Petkov (AMD)

x86/vmware: Use VMware hypercall API

Remove VMWARE_CMD macro and move to vmware_hypercall API.
No functional changes intended.

Use u32/u64 instead of uint32_t/uint64_t across the file.
Signed-off-by: default avatarAlexey Makhalov <alexey.makhalov@broadcom.com>
Signed-off-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20240613191650.9913-6-alexey.makhalov@broadcom.com
parent 90328eaa
...@@ -49,54 +49,16 @@ ...@@ -49,54 +49,16 @@
#define STEALCLOCK_DISABLED 0 #define STEALCLOCK_DISABLED 0
#define STEALCLOCK_ENABLED 1 #define STEALCLOCK_ENABLED 1
#define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \
__asm__("inl (%%dx), %%eax" : \
"=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
"a"(VMWARE_HYPERVISOR_MAGIC), \
"c"(VMWARE_CMD_##cmd), \
"d"(VMWARE_HYPERVISOR_PORT), "b"(UINT_MAX) : \
"memory")
#define VMWARE_VMCALL(cmd, eax, ebx, ecx, edx) \
__asm__("vmcall" : \
"=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
"a"(VMWARE_HYPERVISOR_MAGIC), \
"c"(VMWARE_CMD_##cmd), \
"d"(0), "b"(UINT_MAX) : \
"memory")
#define VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx) \
__asm__("vmmcall" : \
"=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
"a"(VMWARE_HYPERVISOR_MAGIC), \
"c"(VMWARE_CMD_##cmd), \
"d"(0), "b"(UINT_MAX) : \
"memory")
#define VMWARE_CMD(cmd, eax, ebx, ecx, edx) do { \
switch (vmware_hypercall_mode) { \
case CPUID_VMWARE_FEATURES_ECX_VMCALL: \
VMWARE_VMCALL(cmd, eax, ebx, ecx, edx); \
break; \
case CPUID_VMWARE_FEATURES_ECX_VMMCALL: \
VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx); \
break; \
default: \
VMWARE_PORT(cmd, eax, ebx, ecx, edx); \
break; \
} \
} while (0)
struct vmware_steal_time { struct vmware_steal_time {
union { union {
uint64_t clock; /* stolen time counter in units of vtsc */ u64 clock; /* stolen time counter in units of vtsc */
struct { struct {
/* only for little-endian */ /* only for little-endian */
uint32_t clock_low; u32 clock_low;
uint32_t clock_high; u32 clock_high;
}; };
}; };
uint64_t reserved[7]; u64 reserved[7];
}; };
static unsigned long vmware_tsc_khz __ro_after_init; static unsigned long vmware_tsc_khz __ro_after_init;
...@@ -166,9 +128,10 @@ unsigned long vmware_hypercall_slow(unsigned long cmd, ...@@ -166,9 +128,10 @@ unsigned long vmware_hypercall_slow(unsigned long cmd,
static inline int __vmware_platform(void) static inline int __vmware_platform(void)
{ {
uint32_t eax, ebx, ecx, edx; u32 eax, ebx, ecx;
VMWARE_CMD(GETVERSION, eax, ebx, ecx, edx);
return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC; eax = vmware_hypercall3(VMWARE_CMD_GETVERSION, 0, &ebx, &ecx);
return eax != UINT_MAX && ebx == VMWARE_HYPERVISOR_MAGIC;
} }
static unsigned long vmware_get_tsc_khz(void) static unsigned long vmware_get_tsc_khz(void)
...@@ -220,21 +183,12 @@ static void __init vmware_cyc2ns_setup(void) ...@@ -220,21 +183,12 @@ static void __init vmware_cyc2ns_setup(void)
pr_info("using clock offset of %llu ns\n", d->cyc2ns_offset); pr_info("using clock offset of %llu ns\n", d->cyc2ns_offset);
} }
static int vmware_cmd_stealclock(uint32_t arg1, uint32_t arg2) static int vmware_cmd_stealclock(u32 addr_hi, u32 addr_lo)
{ {
uint32_t result, info; u32 info;
asm volatile (VMWARE_HYPERCALL : return vmware_hypercall5(VMWARE_CMD_STEALCLOCK, 0, 0, addr_hi, addr_lo,
"=a"(result), &info);
"=c"(info) :
"a"(VMWARE_HYPERVISOR_MAGIC),
"b"(0),
"c"(VMWARE_CMD_STEALCLOCK),
"d"(0),
"S"(arg1),
"D"(arg2) :
"memory");
return result;
} }
static bool stealclock_enable(phys_addr_t pa) static bool stealclock_enable(phys_addr_t pa)
...@@ -269,15 +223,15 @@ static bool vmware_is_stealclock_available(void) ...@@ -269,15 +223,15 @@ static bool vmware_is_stealclock_available(void)
* Return: * Return:
* The steal clock reading in ns. * The steal clock reading in ns.
*/ */
static uint64_t vmware_steal_clock(int cpu) static u64 vmware_steal_clock(int cpu)
{ {
struct vmware_steal_time *steal = &per_cpu(vmw_steal_time, cpu); struct vmware_steal_time *steal = &per_cpu(vmw_steal_time, cpu);
uint64_t clock; u64 clock;
if (IS_ENABLED(CONFIG_64BIT)) if (IS_ENABLED(CONFIG_64BIT))
clock = READ_ONCE(steal->clock); clock = READ_ONCE(steal->clock);
else { else {
uint32_t initial_high, low, high; u32 initial_high, low, high;
do { do {
initial_high = READ_ONCE(steal->clock_high); initial_high = READ_ONCE(steal->clock_high);
...@@ -289,7 +243,7 @@ static uint64_t vmware_steal_clock(int cpu) ...@@ -289,7 +243,7 @@ static uint64_t vmware_steal_clock(int cpu)
high = READ_ONCE(steal->clock_high); high = READ_ONCE(steal->clock_high);
} while (initial_high != high); } while (initial_high != high);
clock = ((uint64_t)high << 32) | low; clock = ((u64)high << 32) | low;
} }
return mul_u64_u32_shr(clock, vmware_cyc2ns.cyc2ns_mul, return mul_u64_u32_shr(clock, vmware_cyc2ns.cyc2ns_mul,
...@@ -443,13 +397,13 @@ static void __init vmware_set_capabilities(void) ...@@ -443,13 +397,13 @@ static void __init vmware_set_capabilities(void)
static void __init vmware_platform_setup(void) static void __init vmware_platform_setup(void)
{ {
uint32_t eax, ebx, ecx, edx; u32 eax, ebx, ecx;
uint64_t lpj, tsc_khz; u64 lpj, tsc_khz;
VMWARE_CMD(GETHZ, eax, ebx, ecx, edx); eax = vmware_hypercall3(VMWARE_CMD_GETHZ, UINT_MAX, &ebx, &ecx);
if (ebx != UINT_MAX) { if (ebx != UINT_MAX) {
lpj = tsc_khz = eax | (((uint64_t)ebx) << 32); lpj = tsc_khz = eax | (((u64)ebx) << 32);
do_div(tsc_khz, 1000); do_div(tsc_khz, 1000);
WARN_ON(tsc_khz >> 32); WARN_ON(tsc_khz >> 32);
pr_info("TSC freq read from hypervisor : %lu.%03lu MHz\n", pr_info("TSC freq read from hypervisor : %lu.%03lu MHz\n",
...@@ -500,7 +454,7 @@ static u8 __init vmware_select_hypercall(void) ...@@ -500,7 +454,7 @@ static u8 __init vmware_select_hypercall(void)
* If !boot_cpu_has(X86_FEATURE_HYPERVISOR), vmware_hypercall_mode * If !boot_cpu_has(X86_FEATURE_HYPERVISOR), vmware_hypercall_mode
* intentionally defaults to 0. * intentionally defaults to 0.
*/ */
static uint32_t __init vmware_platform(void) static u32 __init vmware_platform(void)
{ {
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
unsigned int eax; unsigned int eax;
...@@ -528,8 +482,9 @@ static uint32_t __init vmware_platform(void) ...@@ -528,8 +482,9 @@ static uint32_t __init vmware_platform(void)
/* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */ /* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */
static bool __init vmware_legacy_x2apic_available(void) static bool __init vmware_legacy_x2apic_available(void)
{ {
uint32_t eax, ebx, ecx, edx; u32 eax;
VMWARE_CMD(GETVCPU_INFO, eax, ebx, ecx, edx);
eax = vmware_hypercall1(VMWARE_CMD_GETVCPU_INFO, 0);
return !(eax & BIT(VMWARE_CMD_VCPU_RESERVED)) && return !(eax & BIT(VMWARE_CMD_VCPU_RESERVED)) &&
(eax & BIT(VMWARE_CMD_LEGACY_X2APIC)); (eax & BIT(VMWARE_CMD_LEGACY_X2APIC));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment