Commit 43972cf2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_urgent_for_v6.5_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

 - Do not parse the confidential computing blob on non-AMD hardware as
   it leads to an EFI config table ending up unmapped

 - Use the correct segment selector in the 32-bit version of getcpu() in
   the vDSO

 - Make sure vDSO and VVAR regions are placed in the 47-bit VA range
   even on 5-level paging systems

 - Add models 0x90-0x91 to the range of AMD Zenbleed-affected CPUs

* tag 'x86_urgent_for_v6.5_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/cpu/amd: Enable Zenbleed fix for AMD Custom APU 0405
  x86/mm: Fix VDSO and VVAR placement on 5-level paging machines
  x86/linkage: Fix typo of BUILD_VDSO in asm/linkage.h
  x86/vdso: Choose the right GDT_ENTRY_CPUNODE for 32-bit getcpu() on 64-bit kernel
  x86/sev: Do not try to parse for the CC blob on non-AMD hardware
parents 272b86ba 6dbef74a
...@@ -63,7 +63,14 @@ void load_stage2_idt(void) ...@@ -63,7 +63,14 @@ void load_stage2_idt(void)
set_idt_entry(X86_TRAP_PF, boot_page_fault); set_idt_entry(X86_TRAP_PF, boot_page_fault);
#ifdef CONFIG_AMD_MEM_ENCRYPT #ifdef CONFIG_AMD_MEM_ENCRYPT
set_idt_entry(X86_TRAP_VC, boot_stage2_vc); /*
* Clear the second stage #VC handler in case guest types
* needing #VC have not been detected.
*/
if (sev_status & BIT(1))
set_idt_entry(X86_TRAP_VC, boot_stage2_vc);
else
set_idt_entry(X86_TRAP_VC, NULL);
#endif #endif
load_boot_idt(&boot_idt_desc); load_boot_idt(&boot_idt_desc);
......
...@@ -404,13 +404,46 @@ void sev_enable(struct boot_params *bp) ...@@ -404,13 +404,46 @@ void sev_enable(struct boot_params *bp)
if (bp) if (bp)
bp->cc_blob_address = 0; bp->cc_blob_address = 0;
/*
* Do an initial SEV capability check before snp_init() which
* loads the CPUID page and the same checks afterwards are done
* without the hypervisor and are trustworthy.
*
* If the HV fakes SEV support, the guest will crash'n'burn
* which is good enough.
*/
/* Check for the SME/SEV support leaf */
eax = 0x80000000;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
if (eax < 0x8000001f)
return;
/*
* Check for the SME/SEV feature:
* CPUID Fn8000_001F[EAX]
* - Bit 0 - Secure Memory Encryption support
* - Bit 1 - Secure Encrypted Virtualization support
* CPUID Fn8000_001F[EBX]
* - Bits 5:0 - Pagetable bit position used to indicate encryption
*/
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
/* Check whether SEV is supported */
if (!(eax & BIT(1)))
return;
/* /*
* Setup/preliminary detection of SNP. This will be sanity-checked * Setup/preliminary detection of SNP. This will be sanity-checked
* against CPUID/MSR values later. * against CPUID/MSR values later.
*/ */
snp = snp_init(bp); snp = snp_init(bp);
/* Check for the SME/SEV support leaf */ /* Now repeat the checks with the SNP CPUID table. */
/* Recheck the SME/SEV support leaf */
eax = 0x80000000; eax = 0x80000000;
ecx = 0; ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx); native_cpuid(&eax, &ebx, &ecx, &edx);
...@@ -418,7 +451,7 @@ void sev_enable(struct boot_params *bp) ...@@ -418,7 +451,7 @@ void sev_enable(struct boot_params *bp)
return; return;
/* /*
* Check for the SME/SEV feature: * Recheck for the SME/SEV feature:
* CPUID Fn8000_001F[EAX] * CPUID Fn8000_001F[EAX]
* - Bit 0 - Secure Memory Encryption support * - Bit 0 - Secure Memory Encryption support
* - Bit 1 - Secure Encrypted Virtualization support * - Bit 1 - Secure Encrypted Virtualization support
......
...@@ -299,8 +299,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) ...@@ -299,8 +299,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
/* Round the lowest possible end address up to a PMD boundary. */ /* Round the lowest possible end address up to a PMD boundary. */
end = (start + len + PMD_SIZE - 1) & PMD_MASK; end = (start + len + PMD_SIZE - 1) & PMD_MASK;
if (end >= TASK_SIZE_MAX) if (end >= DEFAULT_MAP_WINDOW)
end = TASK_SIZE_MAX; end = DEFAULT_MAP_WINDOW;
end -= len; end -= len;
if (end > start) { if (end > start) {
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#define FUNCTION_PADDING #define FUNCTION_PADDING
#endif #endif
#if (CONFIG_FUNCTION_ALIGNMENT > 8) && !defined(__DISABLE_EXPORTS) && !defined(BULID_VDSO) #if (CONFIG_FUNCTION_ALIGNMENT > 8) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
# define __FUNC_ALIGN __ALIGN; FUNCTION_PADDING # define __FUNC_ALIGN __ALIGN; FUNCTION_PADDING
#else #else
# define __FUNC_ALIGN __ALIGN # define __FUNC_ALIGN __ALIGN
......
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
#define GDT_ENTRY_INVALID_SEG 0 #define GDT_ENTRY_INVALID_SEG 0
#ifdef CONFIG_X86_32 #if defined(CONFIG_X86_32) && !defined(BUILD_VDSO32_64)
/* /*
* The layout of the per-CPU GDT under Linux: * The layout of the per-CPU GDT under Linux:
* *
......
...@@ -73,6 +73,7 @@ static const int amd_erratum_1054[] = ...@@ -73,6 +73,7 @@ static const int amd_erratum_1054[] =
static const int amd_zenbleed[] = static const int amd_zenbleed[] =
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf), AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf),
AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf),
AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
static const int amd_div0[] = static const int amd_div0[] =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment