Commit 107cd253 authored by Tom Lendacky's avatar Tom Lendacky Committed by Ingo Molnar

x86/mm: Encrypt the initrd earlier for BSP microcode update

Currently the BSP microcode update code examines the initrd very early
in the boot process.  If SME is active, the initrd is treated as being
encrypted but it has not been encrypted (in place) yet.  Update the
early boot code that encrypts the kernel to also encrypt the initrd so
that early BSP microcode updates work.
Tested-by: default avatarGabriel Craciunescu <nix.or.die@gmail.com>
Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20180110192634.6026.10452.stgit@tlendack-t1.amdoffice.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent cc5f01e2
...@@ -39,7 +39,7 @@ void __init sme_unmap_bootdata(char *real_mode_data); ...@@ -39,7 +39,7 @@ void __init sme_unmap_bootdata(char *real_mode_data);
void __init sme_early_init(void); void __init sme_early_init(void);
void __init sme_encrypt_kernel(void); void __init sme_encrypt_kernel(struct boot_params *bp);
void __init sme_enable(struct boot_params *bp); void __init sme_enable(struct boot_params *bp);
int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size); int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
...@@ -67,7 +67,7 @@ static inline void __init sme_unmap_bootdata(char *real_mode_data) { } ...@@ -67,7 +67,7 @@ static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
static inline void __init sme_early_init(void) { } static inline void __init sme_early_init(void) { }
static inline void __init sme_encrypt_kernel(void) { } static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
static inline void __init sme_enable(struct boot_params *bp) { } static inline void __init sme_enable(struct boot_params *bp) { }
static inline bool sme_active(void) { return false; } static inline bool sme_active(void) { return false; }
......
...@@ -157,8 +157,8 @@ unsigned long __head __startup_64(unsigned long physaddr, ...@@ -157,8 +157,8 @@ unsigned long __head __startup_64(unsigned long physaddr,
p = fixup_pointer(&phys_base, physaddr); p = fixup_pointer(&phys_base, physaddr);
*p += load_delta - sme_get_me_mask(); *p += load_delta - sme_get_me_mask();
/* Encrypt the kernel (if SME is active) */ /* Encrypt the kernel and related (if SME is active) */
sme_encrypt_kernel(); sme_encrypt_kernel(bp);
/* /*
* Return the SME encryption mask (if SME is active) to be used as a * Return the SME encryption mask (if SME is active) to be used as a
......
...@@ -364,16 +364,6 @@ static void __init reserve_initrd(void) ...@@ -364,16 +364,6 @@ static void __init reserve_initrd(void)
!ramdisk_image || !ramdisk_size) !ramdisk_image || !ramdisk_size)
return; /* No initrd provided by bootloader */ return; /* No initrd provided by bootloader */
/*
* If SME is active, this memory will be marked encrypted by the
* kernel when it is accessed (including relocation). However, the
* ramdisk image was loaded decrypted by the bootloader, so make
* sure that it is encrypted before accessing it. For SEV the
* ramdisk will already be encrypted, so only do this for SME.
*/
if (sme_active())
sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
initrd_start = 0; initrd_start = 0;
mapped_size = memblock_mem_size(max_pfn_mapped); mapped_size = memblock_mem_size(max_pfn_mapped);
......
...@@ -738,11 +738,12 @@ static unsigned long __init sme_pgtable_calc(unsigned long len) ...@@ -738,11 +738,12 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
return total; return total;
} }
void __init sme_encrypt_kernel(void) void __init sme_encrypt_kernel(struct boot_params *bp)
{ {
unsigned long workarea_start, workarea_end, workarea_len; unsigned long workarea_start, workarea_end, workarea_len;
unsigned long execute_start, execute_end, execute_len; unsigned long execute_start, execute_end, execute_len;
unsigned long kernel_start, kernel_end, kernel_len; unsigned long kernel_start, kernel_end, kernel_len;
unsigned long initrd_start, initrd_end, initrd_len;
struct sme_populate_pgd_data ppd; struct sme_populate_pgd_data ppd;
unsigned long pgtable_area_len; unsigned long pgtable_area_len;
unsigned long decrypted_base; unsigned long decrypted_base;
...@@ -751,14 +752,15 @@ void __init sme_encrypt_kernel(void) ...@@ -751,14 +752,15 @@ void __init sme_encrypt_kernel(void)
return; return;
/* /*
* Prepare for encrypting the kernel by building new pagetables with * Prepare for encrypting the kernel and initrd by building new
* the necessary attributes needed to encrypt the kernel in place. * pagetables with the necessary attributes needed to encrypt the
* kernel in place.
* *
* One range of virtual addresses will map the memory occupied * One range of virtual addresses will map the memory occupied
* by the kernel as encrypted. * by the kernel and initrd as encrypted.
* *
* Another range of virtual addresses will map the memory occupied * Another range of virtual addresses will map the memory occupied
* by the kernel as decrypted and write-protected. * by the kernel and initrd as decrypted and write-protected.
* *
* The use of write-protect attribute will prevent any of the * The use of write-protect attribute will prevent any of the
* memory from being cached. * memory from being cached.
...@@ -769,6 +771,20 @@ void __init sme_encrypt_kernel(void) ...@@ -769,6 +771,20 @@ void __init sme_encrypt_kernel(void)
kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE); kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
kernel_len = kernel_end - kernel_start; kernel_len = kernel_end - kernel_start;
initrd_start = 0;
initrd_end = 0;
initrd_len = 0;
#ifdef CONFIG_BLK_DEV_INITRD
initrd_len = (unsigned long)bp->hdr.ramdisk_size |
((unsigned long)bp->ext_ramdisk_size << 32);
if (initrd_len) {
initrd_start = (unsigned long)bp->hdr.ramdisk_image |
((unsigned long)bp->ext_ramdisk_image << 32);
initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
initrd_len = initrd_end - initrd_start;
}
#endif
/* Set the encryption workarea to be immediately after the kernel */ /* Set the encryption workarea to be immediately after the kernel */
workarea_start = kernel_end; workarea_start = kernel_end;
...@@ -791,6 +807,8 @@ void __init sme_encrypt_kernel(void) ...@@ -791,6 +807,8 @@ void __init sme_encrypt_kernel(void)
*/ */
pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD; pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2; pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
if (initrd_len)
pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
/* PUDs and PMDs needed in the current pagetables for the workarea */ /* PUDs and PMDs needed in the current pagetables for the workarea */
pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len); pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
...@@ -829,9 +847,9 @@ void __init sme_encrypt_kernel(void) ...@@ -829,9 +847,9 @@ void __init sme_encrypt_kernel(void)
/* /*
* A new pagetable structure is being built to allow for the kernel * A new pagetable structure is being built to allow for the kernel
* to be encrypted. It starts with an empty PGD that will then be * and initrd to be encrypted. It starts with an empty PGD that will
* populated with new PUDs and PMDs as the encrypted and decrypted * then be populated with new PUDs and PMDs as the encrypted and
* kernel mappings are created. * decrypted kernel mappings are created.
*/ */
ppd.pgd = ppd.pgtable_area; ppd.pgd = ppd.pgtable_area;
memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD); memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
...@@ -844,6 +862,12 @@ void __init sme_encrypt_kernel(void) ...@@ -844,6 +862,12 @@ void __init sme_encrypt_kernel(void)
* the base of the mapping. * the base of the mapping.
*/ */
decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1); decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
if (initrd_len) {
unsigned long check_base;
check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
decrypted_base = max(decrypted_base, check_base);
}
decrypted_base <<= PGDIR_SHIFT; decrypted_base <<= PGDIR_SHIFT;
/* Add encrypted kernel (identity) mappings */ /* Add encrypted kernel (identity) mappings */
...@@ -858,6 +882,21 @@ void __init sme_encrypt_kernel(void) ...@@ -858,6 +882,21 @@ void __init sme_encrypt_kernel(void)
ppd.vaddr_end = kernel_end + decrypted_base; ppd.vaddr_end = kernel_end + decrypted_base;
sme_map_range_decrypted_wp(&ppd); sme_map_range_decrypted_wp(&ppd);
if (initrd_len) {
/* Add encrypted initrd (identity) mappings */
ppd.paddr = initrd_start;
ppd.vaddr = initrd_start;
ppd.vaddr_end = initrd_end;
sme_map_range_encrypted(&ppd);
/*
* Add decrypted, write-protected initrd (non-identity) mappings
*/
ppd.paddr = initrd_start;
ppd.vaddr = initrd_start + decrypted_base;
ppd.vaddr_end = initrd_end + decrypted_base;
sme_map_range_decrypted_wp(&ppd);
}
/* Add decrypted workarea mappings to both kernel mappings */ /* Add decrypted workarea mappings to both kernel mappings */
ppd.paddr = workarea_start; ppd.paddr = workarea_start;
ppd.vaddr = workarea_start; ppd.vaddr = workarea_start;
...@@ -873,6 +912,11 @@ void __init sme_encrypt_kernel(void) ...@@ -873,6 +912,11 @@ void __init sme_encrypt_kernel(void)
sme_encrypt_execute(kernel_start, kernel_start + decrypted_base, sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
kernel_len, workarea_start, (unsigned long)ppd.pgd); kernel_len, workarea_start, (unsigned long)ppd.pgd);
if (initrd_len)
sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
initrd_len, workarea_start,
(unsigned long)ppd.pgd);
/* /*
* At this point we are running encrypted. Remove the mappings for * At this point we are running encrypted. Remove the mappings for
* the decrypted areas - all that is needed for this is to remove * the decrypted areas - all that is needed for this is to remove
...@@ -882,6 +926,12 @@ void __init sme_encrypt_kernel(void) ...@@ -882,6 +926,12 @@ void __init sme_encrypt_kernel(void)
ppd.vaddr_end = kernel_end + decrypted_base; ppd.vaddr_end = kernel_end + decrypted_base;
sme_clear_pgd(&ppd); sme_clear_pgd(&ppd);
if (initrd_len) {
ppd.vaddr = initrd_start + decrypted_base;
ppd.vaddr_end = initrd_end + decrypted_base;
sme_clear_pgd(&ppd);
}
ppd.vaddr = workarea_start + decrypted_base; ppd.vaddr = workarea_start + decrypted_base;
ppd.vaddr_end = workarea_end + decrypted_base; ppd.vaddr_end = workarea_end + decrypted_base;
sme_clear_pgd(&ppd); sme_clear_pgd(&ppd);
......
...@@ -22,9 +22,9 @@ ENTRY(sme_encrypt_execute) ...@@ -22,9 +22,9 @@ ENTRY(sme_encrypt_execute)
/* /*
* Entry parameters: * Entry parameters:
* RDI - virtual address for the encrypted kernel mapping * RDI - virtual address for the encrypted mapping
* RSI - virtual address for the decrypted kernel mapping * RSI - virtual address for the decrypted mapping
* RDX - length of kernel * RDX - length to encrypt
* RCX - virtual address of the encryption workarea, including: * RCX - virtual address of the encryption workarea, including:
* - stack page (PAGE_SIZE) * - stack page (PAGE_SIZE)
* - encryption routine page (PAGE_SIZE) * - encryption routine page (PAGE_SIZE)
...@@ -41,9 +41,9 @@ ENTRY(sme_encrypt_execute) ...@@ -41,9 +41,9 @@ ENTRY(sme_encrypt_execute)
addq $PAGE_SIZE, %rax /* Workarea encryption routine */ addq $PAGE_SIZE, %rax /* Workarea encryption routine */
push %r12 push %r12
movq %rdi, %r10 /* Encrypted kernel */ movq %rdi, %r10 /* Encrypted area */
movq %rsi, %r11 /* Decrypted kernel */ movq %rsi, %r11 /* Decrypted area */
movq %rdx, %r12 /* Kernel length */ movq %rdx, %r12 /* Area length */
/* Copy encryption routine into the workarea */ /* Copy encryption routine into the workarea */
movq %rax, %rdi /* Workarea encryption routine */ movq %rax, %rdi /* Workarea encryption routine */
...@@ -52,10 +52,10 @@ ENTRY(sme_encrypt_execute) ...@@ -52,10 +52,10 @@ ENTRY(sme_encrypt_execute)
rep movsb rep movsb
/* Setup registers for call */ /* Setup registers for call */
movq %r10, %rdi /* Encrypted kernel */ movq %r10, %rdi /* Encrypted area */
movq %r11, %rsi /* Decrypted kernel */ movq %r11, %rsi /* Decrypted area */
movq %r8, %rdx /* Pagetables used for encryption */ movq %r8, %rdx /* Pagetables used for encryption */
movq %r12, %rcx /* Kernel length */ movq %r12, %rcx /* Area length */
movq %rax, %r8 /* Workarea encryption routine */ movq %rax, %r8 /* Workarea encryption routine */
addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */ addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */
...@@ -71,7 +71,7 @@ ENDPROC(sme_encrypt_execute) ...@@ -71,7 +71,7 @@ ENDPROC(sme_encrypt_execute)
ENTRY(__enc_copy) ENTRY(__enc_copy)
/* /*
* Routine used to encrypt kernel. * Routine used to encrypt memory in place.
* This routine must be run outside of the kernel proper since * This routine must be run outside of the kernel proper since
* the kernel will be encrypted during the process. So this * the kernel will be encrypted during the process. So this
* routine is defined here and then copied to an area outside * routine is defined here and then copied to an area outside
...@@ -79,19 +79,19 @@ ENTRY(__enc_copy) ...@@ -79,19 +79,19 @@ ENTRY(__enc_copy)
* during execution. * during execution.
* *
* On entry the registers must be: * On entry the registers must be:
* RDI - virtual address for the encrypted kernel mapping * RDI - virtual address for the encrypted mapping
* RSI - virtual address for the decrypted kernel mapping * RSI - virtual address for the decrypted mapping
* RDX - address of the pagetables to use for encryption * RDX - address of the pagetables to use for encryption
* RCX - length of kernel * RCX - length of area
* R8 - intermediate copy buffer * R8 - intermediate copy buffer
* *
* RAX - points to this routine * RAX - points to this routine
* *
* The kernel will be encrypted by copying from the non-encrypted * The area will be encrypted by copying from the non-encrypted
* kernel space to an intermediate buffer and then copying from the * memory space to an intermediate buffer and then copying from the
* intermediate buffer back to the encrypted kernel space. The physical * intermediate buffer back to the encrypted memory space. The physical
* addresses of the two kernel space mappings are the same which * addresses of the two mappings are the same which results in the area
* results in the kernel being encrypted "in place". * being encrypted "in place".
*/ */
/* Enable the new page tables */ /* Enable the new page tables */
mov %rdx, %cr3 mov %rdx, %cr3
...@@ -106,9 +106,9 @@ ENTRY(__enc_copy) ...@@ -106,9 +106,9 @@ ENTRY(__enc_copy)
push %r15 push %r15
push %r12 push %r12
movq %rcx, %r9 /* Save kernel length */ movq %rcx, %r9 /* Save area length */
movq %rdi, %r10 /* Save encrypted kernel address */ movq %rdi, %r10 /* Save encrypted area address */
movq %rsi, %r11 /* Save decrypted kernel address */ movq %rsi, %r11 /* Save decrypted area address */
/* Set the PAT register PA5 entry to write-protect */ /* Set the PAT register PA5 entry to write-protect */
movl $MSR_IA32_CR_PAT, %ecx movl $MSR_IA32_CR_PAT, %ecx
...@@ -128,13 +128,13 @@ ENTRY(__enc_copy) ...@@ -128,13 +128,13 @@ ENTRY(__enc_copy)
movq %r9, %r12 movq %r9, %r12
2: 2:
movq %r11, %rsi /* Source - decrypted kernel */ movq %r11, %rsi /* Source - decrypted area */
movq %r8, %rdi /* Dest - intermediate copy buffer */ movq %r8, %rdi /* Dest - intermediate copy buffer */
movq %r12, %rcx movq %r12, %rcx
rep movsb rep movsb
movq %r8, %rsi /* Source - intermediate copy buffer */ movq %r8, %rsi /* Source - intermediate copy buffer */
movq %r10, %rdi /* Dest - encrypted kernel */ movq %r10, %rdi /* Dest - encrypted area */
movq %r12, %rcx movq %r12, %rcx
rep movsb rep movsb
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment