Commit 634cd4b6 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'efi-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull EFI updates from Ingo Molnar:
 "The main changes in this cycle were:

   - Cleanup of the GOP [graphics output] handling code in the EFI stub

   - Complete refactoring of the mixed mode handling in the x86 EFI stub

   - Overhaul of the x86 EFI boot/runtime code

   - Increase robustness for mixed mode code

   - Add the ability to disable DMA at the root port level in the EFI
     stub

   - Get rid of RWX mappings in the EFI memory map and page tables,
     where possible

   - Move the support code for the old EFI memory mapping style into its
     only user, the SGI UV1+ support code.

   - plus misc fixes, updates, smaller cleanups.

  ... and due to interactions with the RWX changes, another round of PAT
  cleanups make a guest appearance via the EFI tree - with no side
  effects intended"

* 'efi-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (75 commits)
  efi/x86: Disable instrumentation in the EFI runtime handling code
  efi/libstub/x86: Fix EFI server boot failure
  efi/x86: Disallow efi=old_map in mixed mode
  x86/boot/compressed: Relax sed symbol type regex for LLVM ld.lld
  efi/x86: avoid KASAN false positives when accessing the 1: 1 mapping
  efi: Fix handling of multiple efi_fake_mem= entries
  efi: Fix efi_memmap_alloc() leaks
  efi: Add tracking for dynamically allocated memmaps
  efi: Add a flags parameter to efi_memory_map
  efi: Fix comment for efi_mem_type() wrt absent physical addresses
  efi/arm: Defer probe of PCIe backed efifb on DT systems
  efi/x86: Limit EFI old memory map to SGI UV machines
  efi/x86: Avoid RWX mappings for all of DRAM
  efi/x86: Don't map the entire kernel text RW for mixed mode
  x86/mm: Fix NX bit clearing issue in kernel_map_pages_in_pgd
  efi/libstub/x86: Fix unused-variable warning
  efi/libstub/x86: Use mandatory 16-byte stack alignment in mixed mode
  efi/libstub/x86: Use const attribute for efi_is_64bit()
  efi: Allow disabling PCI busmastering on bridges during boot
  efi/x86: Allow translating 64-bit arguments for mixed mode calls
  ...
parents d99391ec ac6119e7
...@@ -1165,10 +1165,10 @@ ...@@ -1165,10 +1165,10 @@
efi= [EFI] efi= [EFI]
Format: { "old_map", "nochunk", "noruntime", "debug", Format: { "old_map", "nochunk", "noruntime", "debug",
"nosoftreserve" } "nosoftreserve", "disable_early_pci_dma",
"no_disable_early_pci_dma" }
old_map [X86-64]: switch to the old ioremap-based EFI old_map [X86-64]: switch to the old ioremap-based EFI
runtime services mapping. 32-bit still uses this one by runtime services mapping. [Needs CONFIG_X86_UV=y]
default.
nochunk: disable reading files in "chunks" in the EFI nochunk: disable reading files in "chunks" in the EFI
boot stub, as chunking can cause problems with some boot stub, as chunking can cause problems with some
firmware implementations. firmware implementations.
...@@ -1180,6 +1180,10 @@ ...@@ -1180,6 +1180,10 @@
claim. Specify efi=nosoftreserve to disable this claim. Specify efi=nosoftreserve to disable this
reservation and treat the memory by its base type reservation and treat the memory by its base type
(i.e. EFI_CONVENTIONAL_MEMORY / "System RAM"). (i.e. EFI_CONVENTIONAL_MEMORY / "System RAM").
disable_early_pci_dma: Disable the busmaster bit on all
PCI bridges while in the EFI boot stub
no_disable_early_pci_dma: Leave the busmaster bit set
on all PCI bridges while in the EFI boot stub
efi_no_storage_paranoia [EFI; X86] efi_no_storage_paranoia [EFI; X86]
Using this parameter you can use more than 50% of Using this parameter you can use more than 50% of
......
#ifndef _ASM_ALPHA_VMALLOC_H
#define _ASM_ALPHA_VMALLOC_H
#endif /* _ASM_ALPHA_VMALLOC_H */
#ifndef _ASM_ARC_VMALLOC_H
#define _ASM_ARC_VMALLOC_H
#endif /* _ASM_ARC_VMALLOC_H */
...@@ -50,19 +50,16 @@ void efi_virtmap_unload(void); ...@@ -50,19 +50,16 @@ void efi_virtmap_unload(void);
/* arch specific definitions used by the stub code */ /* arch specific definitions used by the stub code */
#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) #define efi_bs_call(func, ...) efi_system_table()->boottime->func(__VA_ARGS__)
#define __efi_call_early(f, ...) f(__VA_ARGS__) #define efi_rt_call(func, ...) efi_system_table()->runtime->func(__VA_ARGS__)
#define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__) #define efi_is_native() (true)
#define efi_is_64bit() (false)
#define efi_table_attr(table, attr, instance) \ #define efi_table_attr(inst, attr) (inst->attr)
((table##_t *)instance)->attr
#define efi_call_proto(protocol, f, instance, ...) \ #define efi_call_proto(inst, func, ...) inst->func(inst, ##__VA_ARGS__)
((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg); struct screen_info *alloc_screen_info(void);
void free_screen_info(efi_system_table_t *sys_table, struct screen_info *si); void free_screen_info(struct screen_info *si);
static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
{ {
......
#ifndef _ASM_ARM_VMALLOC_H
#define _ASM_ARM_VMALLOC_H
#endif /* _ASM_ARM_VMALLOC_H */
...@@ -93,21 +93,17 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base, ...@@ -93,21 +93,17 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS_MIN - 1)); return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS_MIN - 1));
} }
#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) #define efi_bs_call(func, ...) efi_system_table()->boottime->func(__VA_ARGS__)
#define __efi_call_early(f, ...) f(__VA_ARGS__) #define efi_rt_call(func, ...) efi_system_table()->runtime->func(__VA_ARGS__)
#define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__) #define efi_is_native() (true)
#define efi_is_64bit() (true)
#define efi_table_attr(table, attr, instance) \ #define efi_table_attr(inst, attr) (inst->attr)
((table##_t *)instance)->attr
#define efi_call_proto(protocol, f, instance, ...) \ #define efi_call_proto(inst, func, ...) inst->func(inst, ##__VA_ARGS__)
((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
#define alloc_screen_info(x...) &screen_info #define alloc_screen_info(x...) &screen_info
static inline void free_screen_info(efi_system_table_t *sys_table_arg, static inline void free_screen_info(struct screen_info *si)
struct screen_info *si)
{ {
} }
......
#ifndef _ASM_ARM64_VMALLOC_H
#define _ASM_ARM64_VMALLOC_H
#endif /* _ASM_ARM64_VMALLOC_H */
#ifndef _ASM_C6X_VMALLOC_H
#define _ASM_C6X_VMALLOC_H
#endif /* _ASM_C6X_VMALLOC_H */
#ifndef _ASM_CSKY_VMALLOC_H
#define _ASM_CSKY_VMALLOC_H
#endif /* _ASM_CSKY_VMALLOC_H */
#ifndef _ASM_H8300_VMALLOC_H
#define _ASM_H8300_VMALLOC_H
#endif /* _ASM_H8300_VMALLOC_H */
#ifndef _ASM_HEXAGON_VMALLOC_H
#define _ASM_HEXAGON_VMALLOC_H
#endif /* _ASM_HEXAGON_VMALLOC_H */
#ifndef _ASM_IA64_VMALLOC_H
#define _ASM_IA64_VMALLOC_H
#endif /* _ASM_IA64_VMALLOC_H */
#ifndef _ASM_M68K_VMALLOC_H
#define _ASM_M68K_VMALLOC_H
#endif /* _ASM_M68K_VMALLOC_H */
#ifndef _ASM_MICROBLAZE_VMALLOC_H
#define _ASM_MICROBLAZE_VMALLOC_H
#endif /* _ASM_MICROBLAZE_VMALLOC_H */
#ifndef _ASM_MIPS_VMALLOC_H
#define _ASM_MIPS_VMALLOC_H
#endif /* _ASM_MIPS_VMALLOC_H */
#ifndef _ASM_NDS32_VMALLOC_H
#define _ASM_NDS32_VMALLOC_H
#endif /* _ASM_NDS32_VMALLOC_H */
#ifndef _ASM_NIOS2_VMALLOC_H
#define _ASM_NIOS2_VMALLOC_H
#endif /* _ASM_NIOS2_VMALLOC_H */
#ifndef _ASM_OPENRISC_VMALLOC_H
#define _ASM_OPENRISC_VMALLOC_H
#endif /* _ASM_OPENRISC_VMALLOC_H */
#ifndef _ASM_PARISC_VMALLOC_H
#define _ASM_PARISC_VMALLOC_H
#endif /* _ASM_PARISC_VMALLOC_H */
#ifndef _ASM_POWERPC_VMALLOC_H
#define _ASM_POWERPC_VMALLOC_H
#endif /* _ASM_POWERPC_VMALLOC_H */
#ifndef _ASM_RISCV_VMALLOC_H
#define _ASM_RISCV_VMALLOC_H
#endif /* _ASM_RISCV_VMALLOC_H */
#ifndef _ASM_S390_VMALLOC_H
#define _ASM_S390_VMALLOC_H
#endif /* _ASM_S390_VMALLOC_H */
#ifndef _ASM_SH_VMALLOC_H
#define _ASM_SH_VMALLOC_H
#endif /* _ASM_SH_VMALLOC_H */
#ifndef _ASM_SPARC_VMALLOC_H
#define _ASM_SPARC_VMALLOC_H
#endif /* _ASM_SPARC_VMALLOC_H */
#ifndef _ASM_UM_VMALLOC_H
#define _ASM_UM_VMALLOC_H
#endif /* _ASM_UM_VMALLOC_H */
#ifndef _ASM_UNICORE32_VMALLOC_H
#define _ASM_UNICORE32_VMALLOC_H
#endif /* _ASM_UNICORE32_VMALLOC_H */
...@@ -1513,7 +1513,7 @@ config X86_CPA_STATISTICS ...@@ -1513,7 +1513,7 @@ config X86_CPA_STATISTICS
bool "Enable statistic for Change Page Attribute" bool "Enable statistic for Change Page Attribute"
depends on DEBUG_FS depends on DEBUG_FS
---help--- ---help---
Expose statistics about the Change Page Attribute mechanims, which Expose statistics about the Change Page Attribute mechanism, which
helps to determine the effectiveness of preserving large and huge helps to determine the effectiveness of preserving large and huge
page mappings when mapping protections are changed. page mappings when mapping protections are changed.
...@@ -1994,6 +1994,7 @@ config EFI ...@@ -1994,6 +1994,7 @@ config EFI
config EFI_STUB config EFI_STUB
bool "EFI stub support" bool "EFI stub support"
depends on EFI && !X86_USE_3DNOW depends on EFI && !X86_USE_3DNOW
depends on $(cc-option,-mabi=ms) || X86_32
select RELOCATABLE select RELOCATABLE
---help--- ---help---
This kernel feature allows a bzImage to be loaded directly This kernel feature allows a bzImage to be loaded directly
......
...@@ -88,7 +88,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE ...@@ -88,7 +88,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
SETUP_OBJS = $(addprefix $(obj)/,$(setup-y)) SETUP_OBJS = $(addprefix $(obj)/,$(setup-y))
sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|kernel_info\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p' sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [a-zA-Z] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|kernel_info\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p'
quiet_cmd_zoffset = ZOFFSET $@ quiet_cmd_zoffset = ZOFFSET $@
cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@ cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
......
...@@ -89,7 +89,7 @@ vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o ...@@ -89,7 +89,7 @@ vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \ vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o \
$(objtree)/drivers/firmware/efi/libstub/lib.a $(objtree)/drivers/firmware/efi/libstub/lib.a
vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
......
This diff is collapsed.
...@@ -12,22 +12,20 @@ ...@@ -12,22 +12,20 @@
#define DESC_TYPE_CODE_DATA (1 << 0) #define DESC_TYPE_CODE_DATA (1 << 0)
typedef struct { typedef union efi_uga_draw_protocol efi_uga_draw_protocol_t;
u32 get_mode;
u32 set_mode;
u32 blt;
} efi_uga_draw_protocol_32_t;
typedef struct {
u64 get_mode;
u64 set_mode;
u64 blt;
} efi_uga_draw_protocol_64_t;
typedef struct { union efi_uga_draw_protocol {
void *get_mode; struct {
efi_status_t (__efiapi *get_mode)(efi_uga_draw_protocol_t *,
u32*, u32*, u32*, u32*);
void *set_mode; void *set_mode;
void *blt; void *blt;
} efi_uga_draw_protocol_t; };
struct {
u32 get_mode;
u32 set_mode;
u32 blt;
} mixed_mode;
};
#endif /* BOOT_COMPRESSED_EBOOT_H */ #endif /* BOOT_COMPRESSED_EBOOT_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* EFI call stub for IA32.
*
* This stub allows us to make EFI calls in physical mode with interrupts
* turned off. Note that this implementation is different from the one in
* arch/x86/platform/efi/efi_stub_32.S because we're _already_ in physical
* mode at this point.
*/
#include <linux/linkage.h>
#include <asm/page_types.h>
/*
* efi_call_phys(void *, ...) is a function with variable parameters.
* All the callers of this function assure that all the parameters are 4-bytes.
*/
/*
* In gcc calling convention, EBX, ESP, EBP, ESI and EDI are all callee save.
* So we'd better save all of them at the beginning of this function and restore
* at the end no matter how many we use, because we can not assure EFI runtime
* service functions will comply with gcc calling convention, too.
*/
.text
SYM_FUNC_START(efi_call_phys)
/*
* 0. The function can only be called in Linux kernel. So CS has been
* set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
* the values of these registers are the same. And, the corresponding
* GDT entries are identical. So I will do nothing about segment reg
* and GDT, but change GDT base register in prelog and epilog.
*/
/*
* 1. Because we haven't been relocated by this point we need to
* use relative addressing.
*/
call 1f
1: popl %edx
subl $1b, %edx
/*
* 2. Now on the top of stack is the return
* address in the caller of efi_call_phys(), then parameter 1,
* parameter 2, ..., param n. To make things easy, we save the return
* address of efi_call_phys in a global variable.
*/
popl %ecx
movl %ecx, saved_return_addr(%edx)
/* get the function pointer into ECX*/
popl %ecx
movl %ecx, efi_rt_function_ptr(%edx)
/*
* 3. Call the physical function.
*/
call *%ecx
/*
* 4. Balance the stack. And because EAX contain the return value,
* we'd better not clobber it. We need to calculate our address
* again because %ecx and %edx are not preserved across EFI function
* calls.
*/
call 1f
1: popl %edx
subl $1b, %edx
movl efi_rt_function_ptr(%edx), %ecx
pushl %ecx
/*
* 10. Push the saved return address onto the stack and return.
*/
movl saved_return_addr(%edx), %ecx
pushl %ecx
ret
SYM_FUNC_END(efi_call_phys)
.previous
.data
saved_return_addr:
.long 0
efi_rt_function_ptr:
.long 0
#include <asm/segment.h>
#include <asm/msr.h>
#include <asm/processor-flags.h>
#include "../../platform/efi/efi_stub_64.S"
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
* needs to be able to service interrupts. * needs to be able to service interrupts.
* *
* On the plus side, we don't have to worry about mangling 64-bit * On the plus side, we don't have to worry about mangling 64-bit
* addresses into 32-bits because we're executing with an identify * addresses into 32-bits because we're executing with an identity
* mapped pagetable and haven't transitioned to 64-bit virtual addresses * mapped pagetable and haven't transitioned to 64-bit virtual addresses
* yet. * yet.
*/ */
...@@ -23,16 +23,13 @@ ...@@ -23,16 +23,13 @@
.code64 .code64
.text .text
SYM_FUNC_START(efi64_thunk) SYM_FUNC_START(__efi64_thunk)
push %rbp push %rbp
push %rbx push %rbx
subq $8, %rsp leaq 1f(%rip), %rbp
leaq efi_exit32(%rip), %rax leaq efi_gdt64(%rip), %rbx
movl %eax, 4(%rsp) movl %ebx, 2(%rbx) /* Fixup the gdt base address */
leaq efi_gdt64(%rip), %rax
movl %eax, (%rsp)
movl %eax, 2(%rax) /* Fixup the gdt base address */
movl %ds, %eax movl %ds, %eax
push %rax push %rax
...@@ -48,15 +45,10 @@ SYM_FUNC_START(efi64_thunk) ...@@ -48,15 +45,10 @@ SYM_FUNC_START(efi64_thunk)
movl %esi, 0x0(%rsp) movl %esi, 0x0(%rsp)
movl %edx, 0x4(%rsp) movl %edx, 0x4(%rsp)
movl %ecx, 0x8(%rsp) movl %ecx, 0x8(%rsp)
movq %r8, %rsi movl %r8d, 0xc(%rsp)
movl %esi, 0xc(%rsp) movl %r9d, 0x10(%rsp)
movq %r9, %rsi
movl %esi, 0x10(%rsp)
sgdt save_gdt(%rip) sgdt 0x14(%rsp)
leaq 1f(%rip), %rbx
movq %rbx, func_rt_ptr(%rip)
/* /*
* Switch to gdt with 32-bit segments. This is the firmware GDT * Switch to gdt with 32-bit segments. This is the firmware GDT
...@@ -71,9 +63,9 @@ SYM_FUNC_START(efi64_thunk) ...@@ -71,9 +63,9 @@ SYM_FUNC_START(efi64_thunk)
pushq %rax pushq %rax
lretq lretq
1: addq $32, %rsp 1: lgdt 0x14(%rsp)
addq $32, %rsp
lgdt save_gdt(%rip) movq %rdi, %rax
pop %rbx pop %rbx
movl %ebx, %ss movl %ebx, %ss
...@@ -85,26 +77,13 @@ SYM_FUNC_START(efi64_thunk) ...@@ -85,26 +77,13 @@ SYM_FUNC_START(efi64_thunk)
/* /*
* Convert 32-bit status code into 64-bit. * Convert 32-bit status code into 64-bit.
*/ */
test %rax, %rax roll $1, %eax
jz 1f rorq $1, %rax
movl %eax, %ecx
andl $0x0fffffff, %ecx
andl $0xf0000000, %eax
shl $32, %rax
or %rcx, %rax
1:
addq $8, %rsp
pop %rbx pop %rbx
pop %rbp pop %rbp
ret ret
SYM_FUNC_END(efi64_thunk) SYM_FUNC_END(__efi64_thunk)
SYM_FUNC_START_LOCAL(efi_exit32)
movq func_rt_ptr(%rip), %rax
push %rax
mov %rdi, %rax
ret
SYM_FUNC_END(efi_exit32)
.code32 .code32
/* /*
...@@ -144,9 +123,7 @@ SYM_FUNC_START_LOCAL(efi_enter32) ...@@ -144,9 +123,7 @@ SYM_FUNC_START_LOCAL(efi_enter32)
*/ */
cli cli
movl 56(%esp), %eax lgdtl (%ebx)
movl %eax, 2(%eax)
lgdtl (%eax)
movl %cr4, %eax movl %cr4, %eax
btsl $(X86_CR4_PAE_BIT), %eax btsl $(X86_CR4_PAE_BIT), %eax
...@@ -163,9 +140,8 @@ SYM_FUNC_START_LOCAL(efi_enter32) ...@@ -163,9 +140,8 @@ SYM_FUNC_START_LOCAL(efi_enter32)
xorl %eax, %eax xorl %eax, %eax
lldt %ax lldt %ax
movl 60(%esp), %eax
pushl $__KERNEL_CS pushl $__KERNEL_CS
pushl %eax pushl %ebp
/* Enable paging */ /* Enable paging */
movl %cr0, %eax movl %cr0, %eax
...@@ -181,13 +157,6 @@ SYM_DATA_START(efi32_boot_gdt) ...@@ -181,13 +157,6 @@ SYM_DATA_START(efi32_boot_gdt)
.quad 0 .quad 0
SYM_DATA_END(efi32_boot_gdt) SYM_DATA_END(efi32_boot_gdt)
SYM_DATA_START_LOCAL(save_gdt)
.word 0
.quad 0
SYM_DATA_END(save_gdt)
SYM_DATA_LOCAL(func_rt_ptr, .quad 0)
SYM_DATA_START(efi_gdt64) SYM_DATA_START(efi_gdt64)
.word efi_gdt64_end - efi_gdt64 .word efi_gdt64_end - efi_gdt64
.long 0 /* Filled out by user */ .long 0 /* Filled out by user */
......
...@@ -145,67 +145,16 @@ SYM_FUNC_START(startup_32) ...@@ -145,67 +145,16 @@ SYM_FUNC_START(startup_32)
SYM_FUNC_END(startup_32) SYM_FUNC_END(startup_32)
#ifdef CONFIG_EFI_STUB #ifdef CONFIG_EFI_STUB
/*
* We don't need the return address, so set up the stack so efi_main() can find
* its arguments.
*/
SYM_FUNC_START(efi_pe_entry)
add $0x4, %esp
call 1f
1: popl %esi
subl $1b, %esi
popl %ecx
movl %ecx, efi32_config(%esi) /* Handle */
popl %ecx
movl %ecx, efi32_config+8(%esi) /* EFI System table pointer */
/* Relocate efi_config->call() */
leal efi32_config(%esi), %eax
add %esi, 40(%eax)
pushl %eax
call make_boot_params
cmpl $0, %eax
je fail
movl %esi, BP_code32_start(%eax)
popl %ecx
pushl %eax
pushl %ecx
jmp 2f /* Skip efi_config initialization */
SYM_FUNC_END(efi_pe_entry)
SYM_FUNC_START(efi32_stub_entry) SYM_FUNC_START(efi32_stub_entry)
SYM_FUNC_START_ALIAS(efi_stub_entry)
add $0x4, %esp add $0x4, %esp
popl %ecx
popl %edx
call 1f
1: popl %esi
subl $1b, %esi
movl %ecx, efi32_config(%esi) /* Handle */
movl %edx, efi32_config+8(%esi) /* EFI System table pointer */
/* Relocate efi_config->call() */
leal efi32_config(%esi), %eax
add %esi, 40(%eax)
pushl %eax
2:
call efi_main call efi_main
cmpl $0, %eax
movl %eax, %esi movl %eax, %esi
jne 2f
fail:
/* EFI init failed, so hang. */
hlt
jmp fail
2:
movl BP_code32_start(%esi), %eax movl BP_code32_start(%esi), %eax
leal startup_32(%eax), %eax leal startup_32(%eax), %eax
jmp *%eax jmp *%eax
SYM_FUNC_END(efi32_stub_entry) SYM_FUNC_END(efi32_stub_entry)
SYM_FUNC_END_ALIAS(efi_stub_entry)
#endif #endif
.text .text
...@@ -262,15 +211,6 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) ...@@ -262,15 +211,6 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
jmp *%eax jmp *%eax
SYM_FUNC_END(.Lrelocated) SYM_FUNC_END(.Lrelocated)
#ifdef CONFIG_EFI_STUB
.data
efi32_config:
.fill 5,8,0
.long efi_call_phys
.long 0
.byte 0
#endif
/* /*
* Stack and heap for uncompression * Stack and heap for uncompression
*/ */
......
...@@ -208,10 +208,12 @@ SYM_FUNC_START(startup_32) ...@@ -208,10 +208,12 @@ SYM_FUNC_START(startup_32)
pushl $__KERNEL_CS pushl $__KERNEL_CS
leal startup_64(%ebp), %eax leal startup_64(%ebp), %eax
#ifdef CONFIG_EFI_MIXED #ifdef CONFIG_EFI_MIXED
movl efi32_config(%ebp), %ebx movl efi32_boot_args(%ebp), %edi
cmp $0, %ebx cmp $0, %edi
jz 1f jz 1f
leal handover_entry(%ebp), %eax leal efi64_stub_entry(%ebp), %eax
movl %esi, %edx
movl efi32_boot_args+4(%ebp), %esi
1: 1:
#endif #endif
pushl %eax pushl %eax
...@@ -232,17 +234,14 @@ SYM_FUNC_START(efi32_stub_entry) ...@@ -232,17 +234,14 @@ SYM_FUNC_START(efi32_stub_entry)
popl %edx popl %edx
popl %esi popl %esi
leal (BP_scratch+4)(%esi), %esp
call 1f call 1f
1: pop %ebp 1: pop %ebp
subl $1b, %ebp subl $1b, %ebp
movl %ecx, efi32_config(%ebp) movl %ecx, efi32_boot_args(%ebp)
movl %edx, efi32_config+8(%ebp) movl %edx, efi32_boot_args+4(%ebp)
sgdtl efi32_boot_gdt(%ebp) sgdtl efi32_boot_gdt(%ebp)
movb $0, efi_is64(%ebp)
leal efi32_config(%ebp), %eax
movl %eax, efi_config(%ebp)
/* Disable paging */ /* Disable paging */
movl %cr0, %eax movl %cr0, %eax
...@@ -450,70 +449,17 @@ trampoline_return: ...@@ -450,70 +449,17 @@ trampoline_return:
SYM_CODE_END(startup_64) SYM_CODE_END(startup_64)
#ifdef CONFIG_EFI_STUB #ifdef CONFIG_EFI_STUB
.org 0x390
/* The entry point for the PE/COFF executable is efi_pe_entry. */ SYM_FUNC_START(efi64_stub_entry)
SYM_FUNC_START(efi_pe_entry) SYM_FUNC_START_ALIAS(efi_stub_entry)
movq %rcx, efi64_config(%rip) /* Handle */ and $~0xf, %rsp /* realign the stack */
movq %rdx, efi64_config+8(%rip) /* EFI System table pointer */
leaq efi64_config(%rip), %rax
movq %rax, efi_config(%rip)
call 1f
1: popq %rbp
subq $1b, %rbp
/*
* Relocate efi_config->call().
*/
addq %rbp, efi64_config+40(%rip)
movq %rax, %rdi
call make_boot_params
cmpq $0,%rax
je fail
mov %rax, %rsi
leaq startup_32(%rip), %rax
movl %eax, BP_code32_start(%rsi)
jmp 2f /* Skip the relocation */
handover_entry:
call 1f
1: popq %rbp
subq $1b, %rbp
/*
* Relocate efi_config->call().
*/
movq efi_config(%rip), %rax
addq %rbp, 40(%rax)
2:
movq efi_config(%rip), %rdi
call efi_main call efi_main
movq %rax,%rsi movq %rax,%rsi
cmpq $0,%rax
jne 2f
fail:
/* EFI init failed, so hang. */
hlt
jmp fail
2:
movl BP_code32_start(%esi), %eax movl BP_code32_start(%esi), %eax
leaq startup_64(%rax), %rax leaq startup_64(%rax), %rax
jmp *%rax jmp *%rax
SYM_FUNC_END(efi_pe_entry)
.org 0x390
SYM_FUNC_START(efi64_stub_entry)
movq %rdi, efi64_config(%rip) /* Handle */
movq %rsi, efi64_config+8(%rip) /* EFI System table pointer */
leaq efi64_config(%rip), %rax
movq %rax, efi_config(%rip)
movq %rdx, %rsi
jmp handover_entry
SYM_FUNC_END(efi64_stub_entry) SYM_FUNC_END(efi64_stub_entry)
SYM_FUNC_END_ALIAS(efi_stub_entry)
#endif #endif
.text .text
...@@ -682,24 +628,11 @@ SYM_DATA_START_LOCAL(gdt) ...@@ -682,24 +628,11 @@ SYM_DATA_START_LOCAL(gdt)
.quad 0x0000000000000000 /* TS continued */ .quad 0x0000000000000000 /* TS continued */
SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end) SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end)
#ifdef CONFIG_EFI_STUB
SYM_DATA_LOCAL(efi_config, .quad 0)
#ifdef CONFIG_EFI_MIXED #ifdef CONFIG_EFI_MIXED
SYM_DATA_START(efi32_config) SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0)
.fill 5,8,0 SYM_DATA(efi_is64, .byte 1)
.quad efi64_thunk
.byte 0
SYM_DATA_END(efi32_config)
#endif #endif
SYM_DATA_START(efi64_config)
.fill 5,8,0
.quad efi_call
.byte 1
SYM_DATA_END(efi64_config)
#endif /* CONFIG_EFI_STUB */
/* /*
* Stack and heap for uncompression * Stack and heap for uncompression
*/ */
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/percpu-defs.h> #include <linux/percpu-defs.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/intel_ds.h> #include <asm/intel_ds.h>
#include <asm/pgtable_areas.h>
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -134,15 +135,6 @@ DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks); ...@@ -134,15 +135,6 @@ DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
extern void setup_cpu_entry_areas(void); extern void setup_cpu_entry_areas(void);
extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
/* Single page reserved for the readonly IDT mapping: */
#define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE
#define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
#define CPU_ENTRY_AREA_MAP_SIZE \
(CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)
extern struct cpu_entry_area *get_cpu_entry_area(int cpu); extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
static inline struct entry_stack *cpu_entry_stack(int cpu) static inline struct entry_stack *cpu_entry_stack(int cpu)
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_PAT_H #ifndef _ASM_X86_MEMTYPE_H
#define _ASM_X86_PAT_H #define _ASM_X86_MEMTYPE_H
#include <linux/types.h> #include <linux/types.h>
#include <asm/pgtable_types.h> #include <asm/pgtable_types.h>
bool pat_enabled(void); extern bool pat_enabled(void);
void pat_disable(const char *reason); extern void pat_disable(const char *reason);
extern void pat_init(void); extern void pat_init(void);
extern void init_cache_modes(void); extern void init_cache_modes(void);
extern int reserve_memtype(u64 start, u64 end, extern int memtype_reserve(u64 start, u64 end,
enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm); enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
extern int free_memtype(u64 start, u64 end); extern int memtype_free(u64 start, u64 end);
extern int kernel_map_sync_memtype(u64 base, unsigned long size, extern int memtype_kernel_map_sync(u64 base, unsigned long size,
enum page_cache_mode pcm); enum page_cache_mode pcm);
int io_reserve_memtype(resource_size_t start, resource_size_t end, extern int memtype_reserve_io(resource_size_t start, resource_size_t end,
enum page_cache_mode *pcm); enum page_cache_mode *pcm);
void io_free_memtype(resource_size_t start, resource_size_t end); extern void memtype_free_io(resource_size_t start, resource_size_t end);
bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn); extern bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn);
#endif /* _ASM_X86_PAT_H */ #endif /* _ASM_X86_MEMTYPE_H */
...@@ -69,14 +69,6 @@ struct ldt_struct { ...@@ -69,14 +69,6 @@ struct ldt_struct {
int slot; int slot;
}; };
/* This is a multiple of PAGE_SIZE. */
#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
static inline void *ldt_slot_va(int slot)
{
return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
}
/* /*
* Used for LDT copy/destruction. * Used for LDT copy/destruction.
*/ */
...@@ -99,87 +91,21 @@ static inline void destroy_context_ldt(struct mm_struct *mm) { } ...@@ -99,87 +91,21 @@ static inline void destroy_context_ldt(struct mm_struct *mm) { }
static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
#endif #endif
static inline void load_mm_ldt(struct mm_struct *mm)
{
#ifdef CONFIG_MODIFY_LDT_SYSCALL #ifdef CONFIG_MODIFY_LDT_SYSCALL
struct ldt_struct *ldt; extern void load_mm_ldt(struct mm_struct *mm);
extern void switch_ldt(struct mm_struct *prev, struct mm_struct *next);
/* READ_ONCE synchronizes with smp_store_release */
ldt = READ_ONCE(mm->context.ldt);
/*
* Any change to mm->context.ldt is followed by an IPI to all
* CPUs with the mm active. The LDT will not be freed until
* after the IPI is handled by all such CPUs. This means that,
* if the ldt_struct changes before we return, the values we see
* will be safe, and the new values will be loaded before we run
* any user code.
*
* NB: don't try to convert this to use RCU without extreme care.
* We would still need IRQs off, because we don't want to change
* the local LDT after an IPI loaded a newer value than the one
* that we can see.
*/
if (unlikely(ldt)) {
if (static_cpu_has(X86_FEATURE_PTI)) {
if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) {
/*
* Whoops -- either the new LDT isn't mapped
* (if slot == -1) or is mapped into a bogus
* slot (if slot > 1).
*/
clear_LDT();
return;
}
/*
* If page table isolation is enabled, ldt->entries
* will not be mapped in the userspace pagetables.
* Tell the CPU to access the LDT through the alias
* at ldt_slot_va(ldt->slot).
*/
set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries);
} else {
set_ldt(ldt->entries, ldt->nr_entries);
}
} else {
clear_LDT();
}
#else #else
static inline void load_mm_ldt(struct mm_struct *mm)
{
clear_LDT(); clear_LDT();
#endif
} }
static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
{ {
#ifdef CONFIG_MODIFY_LDT_SYSCALL
/*
* Load the LDT if either the old or new mm had an LDT.
*
* An mm will never go from having an LDT to not having an LDT. Two
* mms never share an LDT, so we don't gain anything by checking to
* see whether the LDT changed. There's also no guarantee that
* prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
* then prev->context.ldt will also be non-NULL.
*
* If we really cared, we could optimize the case where prev == next
* and we're exiting lazy mode. Most of the time, if this happens,
* we don't actually need to reload LDTR, but modify_ldt() is mostly
* used by legacy code and emulators where we don't need this level of
* performance.
*
* This uses | instead of || because it generates better code.
*/
if (unlikely((unsigned long)prev->context.ldt |
(unsigned long)next->context.ldt))
load_mm_ldt(next);
#endif
DEBUG_LOCKS_WARN_ON(preemptible()); DEBUG_LOCKS_WARN_ON(preemptible());
} }
#endif
void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
/* /*
* Init a new mm. Used on mm copies, like at fork() * Init a new mm. Used on mm copies, like at fork()
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#define _ASM_X86_MTRR_H #define _ASM_X86_MTRR_H
#include <uapi/asm/mtrr.h> #include <uapi/asm/mtrr.h>
#include <asm/pat.h> #include <asm/memtype.h>
/* /*
...@@ -86,7 +86,7 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) ...@@ -86,7 +86,7 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
} }
static inline void mtrr_bp_init(void) static inline void mtrr_bp_init(void)
{ {
pat_disable("MTRRs disabled, skipping PAT initialization too."); pat_disable("PAT support disabled because CONFIG_MTRR is disabled in the kernel.");
} }
#define mtrr_ap_init() do {} while (0) #define mtrr_ap_init() do {} while (0)
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/numa.h> #include <linux/numa.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/pat.h> #include <asm/memtype.h>
#include <asm/x86_init.h> #include <asm/x86_init.h>
struct pci_sysdata { struct pci_sysdata {
......
#ifndef _ASM_X86_PGTABLE_32_AREAS_H
#define _ASM_X86_PGTABLE_32_AREAS_H
#include <asm/cpu_entry_area.h>
/*
* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
* any out-of-bounds memory accesses will hopefully be caught.
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
#define VMALLOC_OFFSET (8 * 1024 * 1024)
#ifndef __ASSEMBLY__
extern bool __vmalloc_start_set; /* set once high_memory is set */
#endif
#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
#ifdef CONFIG_X86_PAE
#define LAST_PKMAP 512
#else
#define LAST_PKMAP 1024
#endif
#define CPU_ENTRY_AREA_PAGES (NR_CPUS * DIV_ROUND_UP(sizeof(struct cpu_entry_area), PAGE_SIZE))
/* The +1 is for the readonly IDT page: */
#define CPU_ENTRY_AREA_BASE \
((FIXADDR_TOT_START - PAGE_SIZE*(CPU_ENTRY_AREA_PAGES+1)) & PMD_MASK)
#define LDT_BASE_ADDR \
((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
#define LDT_END_ADDR (LDT_BASE_ADDR + PMD_SIZE)
#define PKMAP_BASE \
((LDT_BASE_ADDR - PAGE_SIZE) & PMD_MASK)
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
#else
# define VMALLOC_END (LDT_BASE_ADDR - 2 * PAGE_SIZE)
#endif
#define MODULES_VADDR VMALLOC_START
#define MODULES_END VMALLOC_END
#define MODULES_LEN (MODULES_VADDR - MODULES_END)
#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
#endif /* _ASM_X86_PGTABLE_32_AREAS_H */
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_PGTABLE_32_DEFS_H #ifndef _ASM_X86_PGTABLE_32_TYPES_H
#define _ASM_X86_PGTABLE_32_DEFS_H #define _ASM_X86_PGTABLE_32_TYPES_H
/* /*
* The Linux x86 paging architecture is 'compile-time dual-mode', it * The Linux x86 paging architecture is 'compile-time dual-mode', it
...@@ -20,55 +20,4 @@ ...@@ -20,55 +20,4 @@
#define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1)) #define PGDIR_MASK (~(PGDIR_SIZE - 1))
/* Just any arbitrary offset to the start of the vmalloc VM area: the #endif /* _ASM_X86_PGTABLE_32_TYPES_H */
* current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
* any out-of-bounds memory accesses will hopefully be caught.
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
#define VMALLOC_OFFSET (8 * 1024 * 1024)
#ifndef __ASSEMBLY__
extern bool __vmalloc_start_set; /* set once high_memory is set */
#endif
#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
#ifdef CONFIG_X86_PAE
#define LAST_PKMAP 512
#else
#define LAST_PKMAP 1024
#endif
/*
* This is an upper bound on sizeof(struct cpu_entry_area) / PAGE_SIZE.
* Define this here and validate with BUILD_BUG_ON() in cpu_entry_area.c
* to avoid include recursion hell.
*/
#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 43)
/* The +1 is for the readonly IDT page: */
#define CPU_ENTRY_AREA_BASE \
((FIXADDR_TOT_START - PAGE_SIZE*(CPU_ENTRY_AREA_PAGES+1)) & PMD_MASK)
#define LDT_BASE_ADDR \
((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
#define LDT_END_ADDR (LDT_BASE_ADDR + PMD_SIZE)
#define PKMAP_BASE \
((LDT_BASE_ADDR - PAGE_SIZE) & PMD_MASK)
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
#else
# define VMALLOC_END (LDT_BASE_ADDR - 2 * PAGE_SIZE)
#endif
#define MODULES_VADDR VMALLOC_START
#define MODULES_END VMALLOC_END
#define MODULES_LEN (MODULES_VADDR - MODULES_END)
#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
#endif /* _ASM_X86_PGTABLE_32_DEFS_H */
#ifndef _ASM_X86_PGTABLE_AREAS_H
#define _ASM_X86_PGTABLE_AREAS_H
#ifdef CONFIG_X86_32
# include <asm/pgtable_32_areas.h>
#endif
/* Single page reserved for the readonly IDT mapping: */
#define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE
#define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)
#endif /* _ASM_X86_PGTABLE_AREAS_H */
...@@ -110,11 +110,6 @@ ...@@ -110,11 +110,6 @@
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
#define _PAGE_TABLE_NOENC (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |\
_PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE_NOENC (_PAGE_PRESENT | _PAGE_RW | \
_PAGE_ACCESSED | _PAGE_DIRTY)
/* /*
* Set of bits not changed in pte_modify. The pte's * Set of bits not changed in pte_modify. The pte's
* protection key is treated like _PAGE_RW, for * protection key is treated like _PAGE_RW, for
...@@ -142,74 +137,87 @@ enum page_cache_mode { ...@@ -142,74 +137,87 @@ enum page_cache_mode {
_PAGE_CACHE_MODE_UC = 3, _PAGE_CACHE_MODE_UC = 3,
_PAGE_CACHE_MODE_WT = 4, _PAGE_CACHE_MODE_WT = 4,
_PAGE_CACHE_MODE_WP = 5, _PAGE_CACHE_MODE_WP = 5,
_PAGE_CACHE_MODE_NUM = 8 _PAGE_CACHE_MODE_NUM = 8
}; };
#endif #endif
#define _PAGE_CACHE_MASK (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT) #define _PAGE_ENC (_AT(pteval_t, sme_me_mask))
#define _PAGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
#define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC)) #define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC))
#define _PAGE_CACHE_WP (cachemode2protval(_PAGE_CACHE_MODE_WP)) #define _PAGE_CACHE_WP (cachemode2protval(_PAGE_CACHE_MODE_WP))
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) #define __PP _PAGE_PRESENT
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ #define __RW _PAGE_RW
_PAGE_ACCESSED | _PAGE_NX) #define _USR _PAGE_USER
#define ___A _PAGE_ACCESSED
#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \ #define ___D _PAGE_DIRTY
_PAGE_USER | _PAGE_ACCESSED) #define ___G _PAGE_GLOBAL
#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ #define __NX _PAGE_NX
_PAGE_ACCESSED | _PAGE_NX)
#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_ACCESSED)
#define PAGE_COPY PAGE_COPY_NOEXEC
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_ACCESSED | _PAGE_NX)
#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_ACCESSED)
#define __PAGE_KERNEL_EXEC \
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
#define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
#define __PAGE_KERNEL_WP (__PAGE_KERNEL | _PAGE_CACHE_WP)
#define __PAGE_KERNEL_IO (__PAGE_KERNEL)
#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE)
#ifndef __ASSEMBLY__ #define _ENC _PAGE_ENC
#define __WP _PAGE_CACHE_WP
#define __NC _PAGE_NOCACHE
#define _PSE _PAGE_PSE
#define _PAGE_ENC (_AT(pteval_t, sme_me_mask)) #define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) } )
#define __pg(x) __pgprot(x)
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
#define PAGE_NONE __pg( 0| 0| 0|___A| 0| 0| 0|___G)
#define PAGE_SHARED __pg(__PP|__RW|_USR|___A|__NX| 0| 0| 0)
#define PAGE_SHARED_EXEC __pg(__PP|__RW|_USR|___A| 0| 0| 0| 0)
#define PAGE_COPY_NOEXEC __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0)
#define PAGE_COPY_EXEC __pg(__PP| 0|_USR|___A| 0| 0| 0| 0)
#define PAGE_COPY __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0)
#define PAGE_READONLY __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0)
#define PAGE_READONLY_EXEC __pg(__PP| 0|_USR|___A| 0| 0| 0| 0)
#define __PAGE_KERNEL (__PP|__RW| 0|___A|__NX|___D| 0|___G)
#define __PAGE_KERNEL_EXEC (__PP|__RW| 0|___A| 0|___D| 0|___G)
#define _KERNPG_TABLE_NOENC (__PP|__RW| 0|___A| 0|___D| 0| 0)
#define _KERNPG_TABLE (__PP|__RW| 0|___A| 0|___D| 0| 0| _ENC)
#define _PAGE_TABLE_NOENC (__PP|__RW|_USR|___A| 0|___D| 0| 0)
#define _PAGE_TABLE (__PP|__RW|_USR|___A| 0|___D| 0| 0| _ENC)
#define __PAGE_KERNEL_RO (__PP| 0| 0|___A|__NX|___D| 0|___G)
#define __PAGE_KERNEL_RX (__PP| 0| 0|___A| 0|___D| 0|___G)
#define __PAGE_KERNEL_NOCACHE (__PP|__RW| 0|___A|__NX|___D| 0|___G| __NC)
#define __PAGE_KERNEL_VVAR (__PP| 0|_USR|___A|__NX|___D| 0|___G)
#define __PAGE_KERNEL_LARGE (__PP|__RW| 0|___A|__NX|___D|_PSE|___G)
#define __PAGE_KERNEL_LARGE_EXEC (__PP|__RW| 0|___A| 0|___D|_PSE|___G)
#define __PAGE_KERNEL_WP (__PP|__RW| 0|___A|__NX|___D| 0|___G| __WP)
#define __PAGE_KERNEL_IO __PAGE_KERNEL
#define __PAGE_KERNEL_IO_NOCACHE __PAGE_KERNEL_NOCACHE
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
_PAGE_DIRTY | _PAGE_ENC)
#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
#define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _PAGE_ENC) #ifndef __ASSEMBLY__
#define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _PAGE_ENC)
#define __PAGE_KERNEL_NOENC (__PAGE_KERNEL) #define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _ENC)
#define __PAGE_KERNEL_NOENC_WP (__PAGE_KERNEL_WP) #define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _ENC)
#define __PAGE_KERNEL_NOENC (__PAGE_KERNEL | 0)
#define __PAGE_KERNEL_NOENC_WP (__PAGE_KERNEL_WP | 0)
#define default_pgprot(x) __pgprot((x) & __default_kernel_pte_mask) #define __pgprot_mask(x) __pgprot((x) & __default_kernel_pte_mask)
#define PAGE_KERNEL default_pgprot(__PAGE_KERNEL | _PAGE_ENC) #define PAGE_KERNEL __pgprot_mask(__PAGE_KERNEL | _ENC)
#define PAGE_KERNEL_NOENC default_pgprot(__PAGE_KERNEL) #define PAGE_KERNEL_NOENC __pgprot_mask(__PAGE_KERNEL | 0)
#define PAGE_KERNEL_RO default_pgprot(__PAGE_KERNEL_RO | _PAGE_ENC) #define PAGE_KERNEL_RO __pgprot_mask(__PAGE_KERNEL_RO | _ENC)
#define PAGE_KERNEL_EXEC default_pgprot(__PAGE_KERNEL_EXEC | _PAGE_ENC) #define PAGE_KERNEL_EXEC __pgprot_mask(__PAGE_KERNEL_EXEC | _ENC)
#define PAGE_KERNEL_EXEC_NOENC default_pgprot(__PAGE_KERNEL_EXEC) #define PAGE_KERNEL_EXEC_NOENC __pgprot_mask(__PAGE_KERNEL_EXEC | 0)
#define PAGE_KERNEL_RX default_pgprot(__PAGE_KERNEL_RX | _PAGE_ENC) #define PAGE_KERNEL_RX __pgprot_mask(__PAGE_KERNEL_RX | _ENC)
#define PAGE_KERNEL_NOCACHE default_pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC) #define PAGE_KERNEL_NOCACHE __pgprot_mask(__PAGE_KERNEL_NOCACHE | _ENC)
#define PAGE_KERNEL_LARGE default_pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC) #define PAGE_KERNEL_LARGE __pgprot_mask(__PAGE_KERNEL_LARGE | _ENC)
#define PAGE_KERNEL_LARGE_EXEC default_pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC) #define PAGE_KERNEL_LARGE_EXEC __pgprot_mask(__PAGE_KERNEL_LARGE_EXEC | _ENC)
#define PAGE_KERNEL_VVAR default_pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC) #define PAGE_KERNEL_VVAR __pgprot_mask(__PAGE_KERNEL_VVAR | _ENC)
#define PAGE_KERNEL_IO default_pgprot(__PAGE_KERNEL_IO) #define PAGE_KERNEL_IO __pgprot_mask(__PAGE_KERNEL_IO)
#define PAGE_KERNEL_IO_NOCACHE default_pgprot(__PAGE_KERNEL_IO_NOCACHE) #define PAGE_KERNEL_IO_NOCACHE __pgprot_mask(__PAGE_KERNEL_IO_NOCACHE)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
...@@ -449,9 +457,6 @@ static inline pteval_t pte_flags(pte_t pte) ...@@ -449,9 +457,6 @@ static inline pteval_t pte_flags(pte_t pte)
return native_pte_val(pte) & PTE_FLAGS_MASK; return native_pte_val(pte) & PTE_FLAGS_MASK;
} }
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) } )
extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM]; extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM];
extern uint8_t __pte2cachemode_tbl[8]; extern uint8_t __pte2cachemode_tbl[8];
......
#ifndef _ASM_X86_VMALLOC_H
#define _ASM_X86_VMALLOC_H
#include <asm/pgtable_areas.h>
#endif /* _ASM_X86_VMALLOC_H */
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/pat.h> #include <asm/memtype.h>
#include <asm/microcode.h> #include <asm/microcode.h>
#include <asm/microcode_intel.h> #include <asm/microcode_intel.h>
#include <asm/intel-family.h> #include <asm/intel-family.h>
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/pat.h> #include <asm/memtype.h>
#include "mtrr.h" #include "mtrr.h"
......
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
#include <asm/e820/api.h> #include <asm/e820/api.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/pat.h> #include <asm/memtype.h>
#include "mtrr.h" #include "mtrr.h"
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
*/ */
#include <linux/cpu.h> #include <linux/cpu.h>
#include <asm/pat.h> #include <asm/memtype.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/processor.h> #include <asm/processor.h>
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/pat.h> #include <asm/memtype.h>
#include <asm/processor.h> #include <asm/processor.h>
#include "cpu.h" #include "cpu.h"
......
...@@ -177,7 +177,7 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr, ...@@ -177,7 +177,7 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
* acpi_rsdp=<addr> on kernel command line to make second kernel boot * acpi_rsdp=<addr> on kernel command line to make second kernel boot
* without efi. * without efi.
*/ */
if (efi_enabled(EFI_OLD_MEMMAP)) if (efi_have_uv1_memmap())
return 0; return 0;
params->secure_boot = boot_params.secure_boot; params->secure_boot = boot_params.secure_boot;
......
...@@ -28,6 +28,89 @@ ...@@ -28,6 +28,89 @@
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/syscalls.h> #include <asm/syscalls.h>
#include <asm/pgtable_areas.h>
/* This is a multiple of PAGE_SIZE. */
#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
static inline void *ldt_slot_va(int slot)
{
return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
}
void load_mm_ldt(struct mm_struct *mm)
{
struct ldt_struct *ldt;
/* READ_ONCE synchronizes with smp_store_release */
ldt = READ_ONCE(mm->context.ldt);
/*
* Any change to mm->context.ldt is followed by an IPI to all
* CPUs with the mm active. The LDT will not be freed until
* after the IPI is handled by all such CPUs. This means that,
* if the ldt_struct changes before we return, the values we see
* will be safe, and the new values will be loaded before we run
* any user code.
*
* NB: don't try to convert this to use RCU without extreme care.
* We would still need IRQs off, because we don't want to change
* the local LDT after an IPI loaded a newer value than the one
* that we can see.
*/
if (unlikely(ldt)) {
if (static_cpu_has(X86_FEATURE_PTI)) {
if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) {
/*
* Whoops -- either the new LDT isn't mapped
* (if slot == -1) or is mapped into a bogus
* slot (if slot > 1).
*/
clear_LDT();
return;
}
/*
* If page table isolation is enabled, ldt->entries
* will not be mapped in the userspace pagetables.
* Tell the CPU to access the LDT through the alias
* at ldt_slot_va(ldt->slot).
*/
set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries);
} else {
set_ldt(ldt->entries, ldt->nr_entries);
}
} else {
clear_LDT();
}
}
void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
{
/*
* Load the LDT if either the old or new mm had an LDT.
*
* An mm will never go from having an LDT to not having an LDT. Two
* mms never share an LDT, so we don't gain anything by checking to
* see whether the LDT changed. There's also no guarantee that
* prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
* then prev->context.ldt will also be non-NULL.
*
* If we really cared, we could optimize the case where prev == next
* and we're exiting lazy mode. Most of the time, if this happens,
* we don't actually need to reload LDTR, but modify_ldt() is mostly
* used by legacy code and emulators where we don't need this level of
* performance.
*
* This uses | instead of || because it generates better code.
*/
if (unlikely((unsigned long)prev->context.ldt |
(unsigned long)next->context.ldt))
load_mm_ldt(next);
DEBUG_LOCKS_WARN_ON(preemptible());
}
static void refresh_ldt_segments(void) static void refresh_ldt_segments(void)
{ {
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/unwind.h> #include <asm/unwind.h>
#include <asm/vsyscall.h> #include <asm/vsyscall.h>
#include <linux/vmalloc.h>
/* /*
* max_low_pfn_mapped: highest directly mapped pfn < 4 GB * max_low_pfn_mapped: highest directly mapped pfn < 4 GB
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/io_apic.h> #include <asm/io_apic.h>
#include <asm/hpet.h> #include <asm/hpet.h>
#include <asm/pat.h> #include <asm/memtype.h>
#include <asm/tsc.h> #include <asm/tsc.h>
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/mach_traps.h> #include <asm/mach_traps.h>
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pat.h> #include <asm/memtype.h>
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
#include <asm/e820/api.h> #include <asm/e820/api.h>
#include <asm/io.h> #include <asm/io.h>
......
...@@ -12,8 +12,10 @@ CFLAGS_REMOVE_mem_encrypt.o = -pg ...@@ -12,8 +12,10 @@ CFLAGS_REMOVE_mem_encrypt.o = -pg
CFLAGS_REMOVE_mem_encrypt_identity.o = -pg CFLAGS_REMOVE_mem_encrypt_identity.o = -pg
endif endif
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o mmap.o \
pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o maccess.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o maccess.o
obj-y += pat/
# Make sure __phys_addr has no stackprotector # Make sure __phys_addr has no stackprotector
nostackp := $(call cc-option, -fno-stack-protector) nostackp := $(call cc-option, -fno-stack-protector)
...@@ -23,8 +25,6 @@ CFLAGS_mem_encrypt_identity.o := $(nostackp) ...@@ -23,8 +25,6 @@ CFLAGS_mem_encrypt_identity.o := $(nostackp)
CFLAGS_fault.o := -I $(srctree)/$(src)/../include/asm/trace CFLAGS_fault.o := -I $(srctree)/$(src)/../include/asm/trace
obj-$(CONFIG_X86_PAT) += pat_interval.o
obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <asm/efi.h> /* efi_recover_from_page_fault()*/ #include <asm/efi.h> /* efi_recover_from_page_fault()*/
#include <asm/desc.h> /* store_idt(), ... */ #include <asm/desc.h> /* store_idt(), ... */
#include <asm/cpu_entry_area.h> /* exception stack */ #include <asm/cpu_entry_area.h> /* exception stack */
#include <asm/pgtable_areas.h> /* VMALLOC_START, ... */
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <asm/trace/exceptions.h> #include <asm/trace/exceptions.h>
......
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
#include <asm/page_types.h> #include <asm/page_types.h>
#include <asm/cpu_entry_area.h> #include <asm/cpu_entry_area.h>
#include <asm/init.h> #include <asm/init.h>
#include <asm/pgtable_areas.h>
#include "mm_internal.h" #include "mm_internal.h"
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
*/ */
#include <asm/iomap.h> #include <asm/iomap.h>
#include <asm/pat.h> #include <asm/memtype.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/highmem.h> #include <linux/highmem.h>
...@@ -26,7 +26,7 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot) ...@@ -26,7 +26,7 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
if (!is_io_mapping_possible(base, size)) if (!is_io_mapping_possible(base, size))
return -EINVAL; return -EINVAL;
ret = io_reserve_memtype(base, base + size, &pcm); ret = memtype_reserve_io(base, base + size, &pcm);
if (ret) if (ret)
return ret; return ret;
...@@ -40,7 +40,7 @@ EXPORT_SYMBOL_GPL(iomap_create_wc); ...@@ -40,7 +40,7 @@ EXPORT_SYMBOL_GPL(iomap_create_wc);
void iomap_free(resource_size_t base, unsigned long size) void iomap_free(resource_size_t base, unsigned long size)
{ {
io_free_memtype(base, base + size); memtype_free_io(base, base + size);
} }
EXPORT_SYMBOL_GPL(iomap_free); EXPORT_SYMBOL_GPL(iomap_free);
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pat.h> #include <asm/memtype.h>
#include <asm/setup.h> #include <asm/setup.h>
#include "physaddr.h" #include "physaddr.h"
...@@ -196,10 +196,10 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size, ...@@ -196,10 +196,10 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size,
phys_addr &= PHYSICAL_PAGE_MASK; phys_addr &= PHYSICAL_PAGE_MASK;
size = PAGE_ALIGN(last_addr+1) - phys_addr; size = PAGE_ALIGN(last_addr+1) - phys_addr;
retval = reserve_memtype(phys_addr, (u64)phys_addr + size, retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
pcm, &new_pcm); pcm, &new_pcm);
if (retval) { if (retval) {
printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval);
return NULL; return NULL;
} }
...@@ -255,7 +255,7 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size, ...@@ -255,7 +255,7 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size,
area->phys_addr = phys_addr; area->phys_addr = phys_addr;
vaddr = (unsigned long) area->addr; vaddr = (unsigned long) area->addr;
if (kernel_map_sync_memtype(phys_addr, size, pcm)) if (memtype_kernel_map_sync(phys_addr, size, pcm))
goto err_free_area; goto err_free_area;
if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
...@@ -275,7 +275,7 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size, ...@@ -275,7 +275,7 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size,
err_free_area: err_free_area:
free_vm_area(area); free_vm_area(area);
err_free_memtype: err_free_memtype:
free_memtype(phys_addr, phys_addr + size); memtype_free(phys_addr, phys_addr + size);
return NULL; return NULL;
} }
...@@ -451,7 +451,7 @@ void iounmap(volatile void __iomem *addr) ...@@ -451,7 +451,7 @@ void iounmap(volatile void __iomem *addr)
return; return;
} }
free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
/* Finally remove it */ /* Finally remove it */
o = remove_vm_area((void __force *)addr); o = remove_vm_area((void __force *)addr);
......
# SPDX-License-Identifier: GPL-2.0
obj-y := set_memory.o memtype.o
obj-$(CONFIG_X86_PAT) += memtype_interval.o
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PAT_INTERNAL_H_ #ifndef __MEMTYPE_H_
#define __PAT_INTERNAL_H_ #define __MEMTYPE_H_
extern int pat_debug_enable; extern int pat_debug_enable;
...@@ -29,13 +29,13 @@ static inline char *cattr_name(enum page_cache_mode pcm) ...@@ -29,13 +29,13 @@ static inline char *cattr_name(enum page_cache_mode pcm)
} }
#ifdef CONFIG_X86_PAT #ifdef CONFIG_X86_PAT
extern int memtype_check_insert(struct memtype *new, extern int memtype_check_insert(struct memtype *entry_new,
enum page_cache_mode *new_type); enum page_cache_mode *new_type);
extern struct memtype *memtype_erase(u64 start, u64 end); extern struct memtype *memtype_erase(u64 start, u64 end);
extern struct memtype *memtype_lookup(u64 addr); extern struct memtype *memtype_lookup(u64 addr);
extern int memtype_copy_nth_element(struct memtype *out, loff_t pos); extern int memtype_copy_nth_element(struct memtype *entry_out, loff_t pos);
#else #else
static inline int memtype_check_insert(struct memtype *new, static inline int memtype_check_insert(struct memtype *entry_new,
enum page_cache_mode *new_type) enum page_cache_mode *new_type)
{ return 0; } { return 0; }
static inline struct memtype *memtype_erase(u64 start, u64 end) static inline struct memtype *memtype_erase(u64 start, u64 end)
...@@ -46,4 +46,4 @@ static inline int memtype_copy_nth_element(struct memtype *out, loff_t pos) ...@@ -46,4 +46,4 @@ static inline int memtype_copy_nth_element(struct memtype *out, loff_t pos)
{ return 0; } { return 0; }
#endif #endif
#endif /* __PAT_INTERNAL_H_ */ #endif /* __MEMTYPE_H_ */
...@@ -16,34 +16,36 @@ ...@@ -16,34 +16,36 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/pat.h> #include <asm/memtype.h>
#include "pat_internal.h" #include "memtype.h"
/* /*
* The memtype tree keeps track of memory type for specific * The memtype tree keeps track of memory type for specific
* physical memory areas. Without proper tracking, conflicting memory * physical memory areas. Without proper tracking, conflicting memory
* types in different mappings can cause CPU cache corruption. * types in different mappings can cause CPU cache corruption.
* *
* The tree is an interval tree (augmented rbtree) with tree ordered * The tree is an interval tree (augmented rbtree) which tree is ordered
* on starting address. Tree can contain multiple entries for * by the starting address. The tree can contain multiple entries for
* different regions which overlap. All the aliases have the same * different regions which overlap. All the aliases have the same
* cache attributes of course. * cache attributes of course, as enforced by the PAT logic.
* *
* memtype_lock protects the rbtree. * memtype_lock protects the rbtree.
*/ */
static inline u64 memtype_interval_start(struct memtype *memtype)
static inline u64 interval_start(struct memtype *entry)
{ {
return memtype->start; return entry->start;
} }
static inline u64 memtype_interval_end(struct memtype *memtype) static inline u64 interval_end(struct memtype *entry)
{ {
return memtype->end - 1; return entry->end - 1;
} }
INTERVAL_TREE_DEFINE(struct memtype, rb, u64, subtree_max_end, INTERVAL_TREE_DEFINE(struct memtype, rb, u64, subtree_max_end,
memtype_interval_start, memtype_interval_end, interval_start, interval_end,
static, memtype_interval) static, interval)
static struct rb_root_cached memtype_rbroot = RB_ROOT_CACHED; static struct rb_root_cached memtype_rbroot = RB_ROOT_CACHED;
...@@ -54,19 +56,20 @@ enum { ...@@ -54,19 +56,20 @@ enum {
static struct memtype *memtype_match(u64 start, u64 end, int match_type) static struct memtype *memtype_match(u64 start, u64 end, int match_type)
{ {
struct memtype *match; struct memtype *entry_match;
match = memtype_interval_iter_first(&memtype_rbroot, start, end-1); entry_match = interval_iter_first(&memtype_rbroot, start, end-1);
while (match != NULL && match->start < end) {
while (entry_match != NULL && entry_match->start < end) {
if ((match_type == MEMTYPE_EXACT_MATCH) && if ((match_type == MEMTYPE_EXACT_MATCH) &&
(match->start == start) && (match->end == end)) (entry_match->start == start) && (entry_match->end == end))
return match; return entry_match;
if ((match_type == MEMTYPE_END_MATCH) && if ((match_type == MEMTYPE_END_MATCH) &&
(match->start < start) && (match->end == end)) (entry_match->start < start) && (entry_match->end == end))
return match; return entry_match;
match = memtype_interval_iter_next(match, start, end-1); entry_match = interval_iter_next(entry_match, start, end-1);
} }
return NULL; /* Returns NULL if there is no match */ return NULL; /* Returns NULL if there is no match */
...@@ -76,25 +79,25 @@ static int memtype_check_conflict(u64 start, u64 end, ...@@ -76,25 +79,25 @@ static int memtype_check_conflict(u64 start, u64 end,
enum page_cache_mode reqtype, enum page_cache_mode reqtype,
enum page_cache_mode *newtype) enum page_cache_mode *newtype)
{ {
struct memtype *match; struct memtype *entry_match;
enum page_cache_mode found_type = reqtype; enum page_cache_mode found_type = reqtype;
match = memtype_interval_iter_first(&memtype_rbroot, start, end-1); entry_match = interval_iter_first(&memtype_rbroot, start, end-1);
if (match == NULL) if (entry_match == NULL)
goto success; goto success;
if (match->type != found_type && newtype == NULL) if (entry_match->type != found_type && newtype == NULL)
goto failure; goto failure;
dprintk("Overlap at 0x%Lx-0x%Lx\n", match->start, match->end); dprintk("Overlap at 0x%Lx-0x%Lx\n", entry_match->start, entry_match->end);
found_type = match->type; found_type = entry_match->type;
match = memtype_interval_iter_next(match, start, end-1); entry_match = interval_iter_next(entry_match, start, end-1);
while (match) { while (entry_match) {
if (match->type != found_type) if (entry_match->type != found_type)
goto failure; goto failure;
match = memtype_interval_iter_next(match, start, end-1); entry_match = interval_iter_next(entry_match, start, end-1);
} }
success: success:
if (newtype) if (newtype)
...@@ -105,29 +108,29 @@ static int memtype_check_conflict(u64 start, u64 end, ...@@ -105,29 +108,29 @@ static int memtype_check_conflict(u64 start, u64 end,
failure: failure:
pr_info("x86/PAT: %s:%d conflicting memory types %Lx-%Lx %s<->%s\n", pr_info("x86/PAT: %s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
current->comm, current->pid, start, end, current->comm, current->pid, start, end,
cattr_name(found_type), cattr_name(match->type)); cattr_name(found_type), cattr_name(entry_match->type));
return -EBUSY; return -EBUSY;
} }
int memtype_check_insert(struct memtype *new, int memtype_check_insert(struct memtype *entry_new, enum page_cache_mode *ret_type)
enum page_cache_mode *ret_type)
{ {
int err = 0; int err = 0;
err = memtype_check_conflict(new->start, new->end, new->type, ret_type); err = memtype_check_conflict(entry_new->start, entry_new->end, entry_new->type, ret_type);
if (err) if (err)
return err; return err;
if (ret_type) if (ret_type)
new->type = *ret_type; entry_new->type = *ret_type;
memtype_interval_insert(new, &memtype_rbroot); interval_insert(entry_new, &memtype_rbroot);
return 0; return 0;
} }
struct memtype *memtype_erase(u64 start, u64 end) struct memtype *memtype_erase(u64 start, u64 end)
{ {
struct memtype *data; struct memtype *entry_old;
/* /*
* Since the memtype_rbroot tree allows overlapping ranges, * Since the memtype_rbroot tree allows overlapping ranges,
...@@ -136,47 +139,53 @@ struct memtype *memtype_erase(u64 start, u64 end) ...@@ -136,47 +139,53 @@ struct memtype *memtype_erase(u64 start, u64 end)
* it then checks with END_MATCH, i.e. shrink the size of a node * it then checks with END_MATCH, i.e. shrink the size of a node
* from the end for the mremap case. * from the end for the mremap case.
*/ */
data = memtype_match(start, end, MEMTYPE_EXACT_MATCH); entry_old = memtype_match(start, end, MEMTYPE_EXACT_MATCH);
if (!data) { if (!entry_old) {
data = memtype_match(start, end, MEMTYPE_END_MATCH); entry_old = memtype_match(start, end, MEMTYPE_END_MATCH);
if (!data) if (!entry_old)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
if (data->start == start) { if (entry_old->start == start) {
/* munmap: erase this node */ /* munmap: erase this node */
memtype_interval_remove(data, &memtype_rbroot); interval_remove(entry_old, &memtype_rbroot);
} else { } else {
/* mremap: update the end value of this node */ /* mremap: update the end value of this node */
memtype_interval_remove(data, &memtype_rbroot); interval_remove(entry_old, &memtype_rbroot);
data->end = start; entry_old->end = start;
memtype_interval_insert(data, &memtype_rbroot); interval_insert(entry_old, &memtype_rbroot);
return NULL; return NULL;
} }
return data; return entry_old;
} }
struct memtype *memtype_lookup(u64 addr) struct memtype *memtype_lookup(u64 addr)
{ {
return memtype_interval_iter_first(&memtype_rbroot, addr, return interval_iter_first(&memtype_rbroot, addr, addr + PAGE_SIZE-1);
addr + PAGE_SIZE-1);
} }
#if defined(CONFIG_DEBUG_FS) /*
int memtype_copy_nth_element(struct memtype *out, loff_t pos) * Debugging helper, copy the Nth entry of the tree into a
* a copy for printout. This allows us to print out the tree
* via debugfs, without holding the memtype_lock too long:
*/
#ifdef CONFIG_DEBUG_FS
int memtype_copy_nth_element(struct memtype *entry_out, loff_t pos)
{ {
struct memtype *match; struct memtype *entry_match;
int i = 1; int i = 1;
match = memtype_interval_iter_first(&memtype_rbroot, 0, ULONG_MAX); entry_match = interval_iter_first(&memtype_rbroot, 0, ULONG_MAX);
while (match && pos != i) {
match = memtype_interval_iter_next(match, 0, ULONG_MAX); while (entry_match && pos != i) {
entry_match = interval_iter_next(entry_match, 0, ULONG_MAX);
i++; i++;
} }
if (match) { /* pos == i */ if (entry_match) { /* pos == i */
*out = *match; *entry_out = *entry_match;
return 0; return 0;
} else { } else {
return 1; return 1;
......
...@@ -24,10 +24,10 @@ ...@@ -24,10 +24,10 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/pat.h> #include <asm/memtype.h>
#include <asm/set_memory.h> #include <asm/set_memory.h>
#include "mm_internal.h" #include "../mm_internal.h"
/* /*
* The current flushing context - we pass it instead of 5 arguments: * The current flushing context - we pass it instead of 5 arguments:
...@@ -331,7 +331,7 @@ static void cpa_flush_all(unsigned long cache) ...@@ -331,7 +331,7 @@ static void cpa_flush_all(unsigned long cache)
on_each_cpu(__cpa_flush_all, (void *) cache, 1); on_each_cpu(__cpa_flush_all, (void *) cache, 1);
} }
void __cpa_flush_tlb(void *data) static void __cpa_flush_tlb(void *data)
{ {
struct cpa_data *cpa = data; struct cpa_data *cpa = data;
unsigned int i; unsigned int i;
...@@ -1801,7 +1801,7 @@ int set_memory_uc(unsigned long addr, int numpages) ...@@ -1801,7 +1801,7 @@ int set_memory_uc(unsigned long addr, int numpages)
/* /*
* for now UC MINUS. see comments in ioremap() * for now UC MINUS. see comments in ioremap()
*/ */
ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
_PAGE_CACHE_MODE_UC_MINUS, NULL); _PAGE_CACHE_MODE_UC_MINUS, NULL);
if (ret) if (ret)
goto out_err; goto out_err;
...@@ -1813,7 +1813,7 @@ int set_memory_uc(unsigned long addr, int numpages) ...@@ -1813,7 +1813,7 @@ int set_memory_uc(unsigned long addr, int numpages)
return 0; return 0;
out_free: out_free:
free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
out_err: out_err:
return ret; return ret;
} }
...@@ -1839,14 +1839,14 @@ int set_memory_wc(unsigned long addr, int numpages) ...@@ -1839,14 +1839,14 @@ int set_memory_wc(unsigned long addr, int numpages)
{ {
int ret; int ret;
ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
_PAGE_CACHE_MODE_WC, NULL); _PAGE_CACHE_MODE_WC, NULL);
if (ret) if (ret)
return ret; return ret;
ret = _set_memory_wc(addr, numpages); ret = _set_memory_wc(addr, numpages);
if (ret) if (ret)
free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
return ret; return ret;
} }
...@@ -1873,7 +1873,7 @@ int set_memory_wb(unsigned long addr, int numpages) ...@@ -1873,7 +1873,7 @@ int set_memory_wb(unsigned long addr, int numpages)
if (ret) if (ret)
return ret; return ret;
free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
return 0; return 0;
} }
EXPORT_SYMBOL(set_memory_wb); EXPORT_SYMBOL(set_memory_wb);
...@@ -2014,7 +2014,7 @@ static int _set_pages_array(struct page **pages, int numpages, ...@@ -2014,7 +2014,7 @@ static int _set_pages_array(struct page **pages, int numpages,
continue; continue;
start = page_to_pfn(pages[i]) << PAGE_SHIFT; start = page_to_pfn(pages[i]) << PAGE_SHIFT;
end = start + PAGE_SIZE; end = start + PAGE_SIZE;
if (reserve_memtype(start, end, new_type, NULL)) if (memtype_reserve(start, end, new_type, NULL))
goto err_out; goto err_out;
} }
...@@ -2040,7 +2040,7 @@ static int _set_pages_array(struct page **pages, int numpages, ...@@ -2040,7 +2040,7 @@ static int _set_pages_array(struct page **pages, int numpages,
continue; continue;
start = page_to_pfn(pages[i]) << PAGE_SHIFT; start = page_to_pfn(pages[i]) << PAGE_SHIFT;
end = start + PAGE_SIZE; end = start + PAGE_SIZE;
free_memtype(start, end); memtype_free(start, end);
} }
return -EINVAL; return -EINVAL;
} }
...@@ -2089,7 +2089,7 @@ int set_pages_array_wb(struct page **pages, int numpages) ...@@ -2089,7 +2089,7 @@ int set_pages_array_wb(struct page **pages, int numpages)
continue; continue;
start = page_to_pfn(pages[i]) << PAGE_SHIFT; start = page_to_pfn(pages[i]) << PAGE_SHIFT;
end = start + PAGE_SIZE; end = start + PAGE_SIZE;
free_memtype(start, end); memtype_free(start, end);
} }
return 0; return 0;
...@@ -2215,7 +2215,7 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, ...@@ -2215,7 +2215,7 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
.pgd = pgd, .pgd = pgd,
.numpages = numpages, .numpages = numpages,
.mask_set = __pgprot(0), .mask_set = __pgprot(0),
.mask_clr = __pgprot(0), .mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)),
.flags = 0, .flags = 0,
}; };
...@@ -2224,12 +2224,6 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, ...@@ -2224,12 +2224,6 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
if (!(__supported_pte_mask & _PAGE_NX)) if (!(__supported_pte_mask & _PAGE_NX))
goto out; goto out;
if (!(page_flags & _PAGE_NX))
cpa.mask_clr = __pgprot(_PAGE_NX);
if (!(page_flags & _PAGE_RW))
cpa.mask_clr = __pgprot(_PAGE_RW);
if (!(page_flags & _PAGE_ENC)) if (!(page_flags & _PAGE_ENC))
cpa.mask_clr = pgprot_encrypted(cpa.mask_clr); cpa.mask_clr = pgprot_encrypted(cpa.mask_clr);
...@@ -2281,5 +2275,5 @@ int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address, ...@@ -2281,5 +2275,5 @@ int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
* be exposed to the rest of the kernel. Include these directly here. * be exposed to the rest of the kernel. Include these directly here.
*/ */
#ifdef CONFIG_CPA_DEBUG #ifdef CONFIG_CPA_DEBUG
#include "pageattr-test.c" #include "cpa-test.c"
#endif #endif
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/io.h> #include <asm/io.h>
#include <linux/vmalloc.h>
unsigned int __VMALLOC_RESERVE = 128 << 20; unsigned int __VMALLOC_RESERVE = 128 << 20;
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/page.h> #include <asm/page.h>
#include <linux/vmalloc.h>
#include "physaddr.h" #include "physaddr.h"
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <asm/pat.h> #include <asm/memtype.h>
#include <asm/e820/api.h> #include <asm/e820/api.h>
#include <asm/pci_x86.h> #include <asm/pci_x86.h>
#include <asm/io_apic.h> #include <asm/io_apic.h>
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
OBJECT_FILES_NON_STANDARD_efi_thunk_$(BITS).o := y OBJECT_FILES_NON_STANDARD_efi_thunk_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_efi_stub_$(BITS).o := y KASAN_SANITIZE := n
GCOV_PROFILE := n
obj-$(CONFIG_EFI) += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o obj-$(CONFIG_EFI) += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o
obj-$(CONFIG_EFI_MIXED) += efi_thunk_$(BITS).o obj-$(CONFIG_EFI_MIXED) += efi_thunk_$(BITS).o
This diff is collapsed.
...@@ -66,9 +66,17 @@ void __init efi_map_region(efi_memory_desc_t *md) ...@@ -66,9 +66,17 @@ void __init efi_map_region(efi_memory_desc_t *md)
void __init efi_map_region_fixed(efi_memory_desc_t *md) {} void __init efi_map_region_fixed(efi_memory_desc_t *md) {}
void __init parse_efi_setup(u64 phys_addr, u32 data_len) {} void __init parse_efi_setup(u64 phys_addr, u32 data_len) {}
pgd_t * __init efi_call_phys_prolog(void) efi_status_t efi_call_svam(efi_set_virtual_address_map_t *__efiapi *,
u32, u32, u32, void *);
efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size,
unsigned long descriptor_size,
u32 descriptor_version,
efi_memory_desc_t *virtual_map)
{ {
struct desc_ptr gdt_descr; struct desc_ptr gdt_descr;
efi_status_t status;
unsigned long flags;
pgd_t *save_pgd; pgd_t *save_pgd;
/* Current pgd is swapper_pg_dir, we'll restore it later: */ /* Current pgd is swapper_pg_dir, we'll restore it later: */
...@@ -80,14 +88,18 @@ pgd_t * __init efi_call_phys_prolog(void) ...@@ -80,14 +88,18 @@ pgd_t * __init efi_call_phys_prolog(void)
gdt_descr.size = GDT_SIZE - 1; gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr); load_gdt(&gdt_descr);
return save_pgd; /* Disable interrupts around EFI calls: */
} local_irq_save(flags);
status = efi_call_svam(&efi.systab->runtime->set_virtual_address_map,
memory_map_size, descriptor_size,
descriptor_version, virtual_map);
local_irq_restore(flags);
void __init efi_call_phys_epilog(pgd_t *save_pgd)
{
load_fixmap_gdt(0); load_fixmap_gdt(0);
load_cr3(save_pgd); load_cr3(save_pgd);
__flush_tlb_all(); __flush_tlb_all();
return status;
} }
void __init efi_runtime_update_mappings(void) void __init efi_runtime_update_mappings(void)
......
This diff is collapsed.
...@@ -7,118 +7,43 @@ ...@@ -7,118 +7,43 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h>
#include <asm/page_types.h> #include <asm/page_types.h>
/* __INIT
* efi_call_phys(void *, ...) is a function with variable parameters. SYM_FUNC_START(efi_call_svam)
* All the callers of this function assure that all the parameters are 4-bytes. push 8(%esp)
*/ push 8(%esp)
push %ecx
/* push %edx
* In gcc calling convention, EBX, ESP, EBP, ESI and EDI are all callee save.
* So we'd better save all of them at the beginning of this function and restore
* at the end no matter how many we use, because we can not assure EFI runtime
* service functions will comply with gcc calling convention, too.
*/
.text
SYM_FUNC_START(efi_call_phys)
/* /*
* 0. The function can only be called in Linux kernel. So CS has been * Switch to the flat mapped alias of this routine, by jumping to the
* set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found * address of label '1' after subtracting PAGE_OFFSET from it.
* the values of these registers are the same. And, the corresponding
* GDT entries are identical. So I will do nothing about segment reg
* and GDT, but change GDT base register in prolog and epilog.
*/
/*
* 1. Now I am running with EIP = <physical address> + PAGE_OFFSET.
* But to make it smoothly switch from virtual mode to flat mode.
* The mapping of lower virtual memory has been created in prolog and
* epilog.
*/ */
movl $1f, %edx movl $1f, %edx
subl $__PAGE_OFFSET, %edx subl $__PAGE_OFFSET, %edx
jmp *%edx jmp *%edx
1: 1:
/* /* disable paging */
* 2. Now on the top of stack is the return
* address in the caller of efi_call_phys(), then parameter 1,
* parameter 2, ..., param n. To make things easy, we save the return
* address of efi_call_phys in a global variable.
*/
popl %edx
movl %edx, saved_return_addr
/* get the function pointer into ECX*/
popl %ecx
movl %ecx, efi_rt_function_ptr
movl $2f, %edx
subl $__PAGE_OFFSET, %edx
pushl %edx
/*
* 3. Clear PG bit in %CR0.
*/
movl %cr0, %edx movl %cr0, %edx
andl $0x7fffffff, %edx andl $0x7fffffff, %edx
movl %edx, %cr0 movl %edx, %cr0
jmp 1f
1:
/* /* convert the stack pointer to a flat mapped address */
* 4. Adjust stack pointer.
*/
subl $__PAGE_OFFSET, %esp subl $__PAGE_OFFSET, %esp
/* /* call the EFI routine */
* 5. Call the physical function. call *(%eax)
*/
jmp *%ecx
2: /* convert ESP back to a kernel VA, and pop the outgoing args */
/* addl $__PAGE_OFFSET + 16, %esp
* 6. After EFI runtime service returns, control will return to
* following instruction. We'd better readjust stack pointer first.
*/
addl $__PAGE_OFFSET, %esp
/* /* re-enable paging */
* 7. Restore PG bit
*/
movl %cr0, %edx movl %cr0, %edx
orl $0x80000000, %edx orl $0x80000000, %edx
movl %edx, %cr0 movl %edx, %cr0
jmp 1f
1:
/*
* 8. Now restore the virtual mode from flat mode by
* adding EIP with PAGE_OFFSET.
*/
movl $1f, %edx
jmp *%edx
1:
/*
* 9. Balance the stack. And because EAX contain the return value,
* we'd better not clobber it.
*/
leal efi_rt_function_ptr, %edx
movl (%edx), %ecx
pushl %ecx
/*
* 10. Push the saved return address onto the stack and return.
*/
leal saved_return_addr, %edx
movl (%edx), %ecx
pushl %ecx
ret ret
SYM_FUNC_END(efi_call_phys) SYM_FUNC_END(efi_call_svam)
.previous
.data
saved_return_addr:
.long 0
efi_rt_function_ptr:
.long 0
...@@ -8,41 +8,12 @@ ...@@ -8,41 +8,12 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/segment.h> #include <asm/nospec-branch.h>
#include <asm/msr.h>
#include <asm/processor-flags.h>
#include <asm/page_types.h>
#define SAVE_XMM \ SYM_FUNC_START(__efi_call)
mov %rsp, %rax; \
subq $0x70, %rsp; \
and $~0xf, %rsp; \
mov %rax, (%rsp); \
mov %cr0, %rax; \
clts; \
mov %rax, 0x8(%rsp); \
movaps %xmm0, 0x60(%rsp); \
movaps %xmm1, 0x50(%rsp); \
movaps %xmm2, 0x40(%rsp); \
movaps %xmm3, 0x30(%rsp); \
movaps %xmm4, 0x20(%rsp); \
movaps %xmm5, 0x10(%rsp)
#define RESTORE_XMM \
movaps 0x60(%rsp), %xmm0; \
movaps 0x50(%rsp), %xmm1; \
movaps 0x40(%rsp), %xmm2; \
movaps 0x30(%rsp), %xmm3; \
movaps 0x20(%rsp), %xmm4; \
movaps 0x10(%rsp), %xmm5; \
mov 0x8(%rsp), %rsi; \
mov %rsi, %cr0; \
mov (%rsp), %rsp
SYM_FUNC_START(efi_call)
pushq %rbp pushq %rbp
movq %rsp, %rbp movq %rsp, %rbp
SAVE_XMM and $~0xf, %rsp
mov 16(%rbp), %rax mov 16(%rbp), %rax
subq $48, %rsp subq $48, %rsp
mov %r9, 32(%rsp) mov %r9, 32(%rsp)
...@@ -50,9 +21,7 @@ SYM_FUNC_START(efi_call) ...@@ -50,9 +21,7 @@ SYM_FUNC_START(efi_call)
mov %r8, %r9 mov %r8, %r9
mov %rcx, %r8 mov %rcx, %r8
mov %rsi, %rcx mov %rsi, %rcx
call *%rdi CALL_NOSPEC %rdi
addq $48, %rsp leave
RESTORE_XMM
popq %rbp
ret ret
SYM_FUNC_END(efi_call) SYM_FUNC_END(__efi_call)
...@@ -25,15 +25,16 @@ ...@@ -25,15 +25,16 @@
.text .text
.code64 .code64
SYM_FUNC_START(efi64_thunk) SYM_CODE_START(__efi64_thunk)
push %rbp push %rbp
push %rbx push %rbx
/* /*
* Switch to 1:1 mapped 32-bit stack pointer. * Switch to 1:1 mapped 32-bit stack pointer.
*/ */
movq %rsp, efi_saved_sp(%rip) movq %rsp, %rax
movq efi_scratch(%rip), %rsp movq efi_scratch(%rip), %rsp
push %rax
/* /*
* Calculate the physical address of the kernel text. * Calculate the physical address of the kernel text.
...@@ -41,113 +42,31 @@ SYM_FUNC_START(efi64_thunk) ...@@ -41,113 +42,31 @@ SYM_FUNC_START(efi64_thunk)
movq $__START_KERNEL_map, %rax movq $__START_KERNEL_map, %rax
subq phys_base(%rip), %rax subq phys_base(%rip), %rax
/* leaq 1f(%rip), %rbp
* Push some physical addresses onto the stack. This is easier leaq 2f(%rip), %rbx
* to do now in a code64 section while the assembler can address subq %rax, %rbp
* 64-bit values. Note that all the addresses on the stack are
* 32-bit.
*/
subq $16, %rsp
leaq efi_exit32(%rip), %rbx
subq %rax, %rbx
movl %ebx, 8(%rsp)
leaq __efi64_thunk(%rip), %rbx
subq %rax, %rbx subq %rax, %rbx
call *%rbx
movq efi_saved_sp(%rip), %rsp
pop %rbx
pop %rbp
retq
SYM_FUNC_END(efi64_thunk)
/* subq $28, %rsp
* We run this function from the 1:1 mapping. movl %ebx, 0x0(%rsp) /* return address */
* movl %esi, 0x4(%rsp)
* This function must be invoked with a 1:1 mapped stack. movl %edx, 0x8(%rsp)
*/ movl %ecx, 0xc(%rsp)
SYM_FUNC_START_LOCAL(__efi64_thunk) movl %r8d, 0x10(%rsp)
movl %ds, %eax movl %r9d, 0x14(%rsp)
push %rax
movl %es, %eax
push %rax
movl %ss, %eax
push %rax
subq $32, %rsp
movl %esi, 0x0(%rsp)
movl %edx, 0x4(%rsp)
movl %ecx, 0x8(%rsp)
movq %r8, %rsi
movl %esi, 0xc(%rsp)
movq %r9, %rsi
movl %esi, 0x10(%rsp)
leaq 1f(%rip), %rbx
movq %rbx, func_rt_ptr(%rip)
/* Switch to 32-bit descriptor */ /* Switch to 32-bit descriptor */
pushq $__KERNEL32_CS pushq $__KERNEL32_CS
leaq efi_enter32(%rip), %rax pushq %rdi /* EFI runtime service address */
pushq %rax
lretq lretq
1: addq $32, %rsp 1: movq 24(%rsp), %rsp
pop %rbx pop %rbx
movl %ebx, %ss pop %rbp
pop %rbx retq
movl %ebx, %es
pop %rbx
movl %ebx, %ds
/*
* Convert 32-bit status code into 64-bit.
*/
test %rax, %rax
jz 1f
movl %eax, %ecx
andl $0x0fffffff, %ecx
andl $0xf0000000, %eax
shl $32, %rax
or %rcx, %rax
1:
ret
SYM_FUNC_END(__efi64_thunk)
SYM_FUNC_START_LOCAL(efi_exit32)
movq func_rt_ptr(%rip), %rax
push %rax
mov %rdi, %rax
ret
SYM_FUNC_END(efi_exit32)
.code32 .code32
/* 2: pushl $__KERNEL_CS
* EFI service pointer must be in %edi. pushl %ebp
*
* The stack should represent the 32-bit calling convention.
*/
SYM_FUNC_START_LOCAL(efi_enter32)
movl $__KERNEL_DS, %eax
movl %eax, %ds
movl %eax, %es
movl %eax, %ss
call *%edi
/* We must preserve return value */
movl %eax, %edi
movl 72(%esp), %eax
pushl $__KERNEL_CS
pushl %eax
lret lret
SYM_FUNC_END(efi_enter32) SYM_CODE_END(__efi64_thunk)
.data
.balign 8
func_rt_ptr: .quad 0
efi_saved_sp: .quad 0
...@@ -244,7 +244,7 @@ EXPORT_SYMBOL_GPL(efi_query_variable_store); ...@@ -244,7 +244,7 @@ EXPORT_SYMBOL_GPL(efi_query_variable_store);
*/ */
void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
{ {
phys_addr_t new_phys, new_size; struct efi_memory_map_data data = { 0 };
struct efi_mem_range mr; struct efi_mem_range mr;
efi_memory_desc_t md; efi_memory_desc_t md;
int num_entries; int num_entries;
...@@ -272,24 +272,21 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) ...@@ -272,24 +272,21 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
num_entries = efi_memmap_split_count(&md, &mr.range); num_entries = efi_memmap_split_count(&md, &mr.range);
num_entries += efi.memmap.nr_map; num_entries += efi.memmap.nr_map;
new_size = efi.memmap.desc_size * num_entries; if (efi_memmap_alloc(num_entries, &data) != 0) {
new_phys = efi_memmap_alloc(num_entries);
if (!new_phys) {
pr_err("Could not allocate boot services memmap\n"); pr_err("Could not allocate boot services memmap\n");
return; return;
} }
new = early_memremap(new_phys, new_size); new = early_memremap(data.phys_map, data.size);
if (!new) { if (!new) {
pr_err("Failed to map new boot services memmap\n"); pr_err("Failed to map new boot services memmap\n");
return; return;
} }
efi_memmap_insert(&efi.memmap, new, &mr); efi_memmap_insert(&efi.memmap, new, &mr);
early_memunmap(new, new_size); early_memunmap(new, data.size);
efi_memmap_install(new_phys, num_entries); efi_memmap_install(&data);
e820__range_update(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED); e820__range_update(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
e820__update_table(e820_table); e820__update_table(e820_table);
} }
...@@ -385,10 +382,10 @@ static void __init efi_unmap_pages(efi_memory_desc_t *md) ...@@ -385,10 +382,10 @@ static void __init efi_unmap_pages(efi_memory_desc_t *md)
/* /*
* To Do: Remove this check after adding functionality to unmap EFI boot * To Do: Remove this check after adding functionality to unmap EFI boot
* services code/data regions from direct mapping area because * services code/data regions from direct mapping area because the UV1
* "efi=old_map" maps EFI regions in swapper_pg_dir. * memory map maps EFI regions in swapper_pg_dir.
*/ */
if (efi_enabled(EFI_OLD_MEMMAP)) if (efi_have_uv1_memmap())
return; return;
/* /*
...@@ -396,7 +393,7 @@ static void __init efi_unmap_pages(efi_memory_desc_t *md) ...@@ -396,7 +393,7 @@ static void __init efi_unmap_pages(efi_memory_desc_t *md)
* EFI runtime calls, hence don't unmap EFI boot services code/data * EFI runtime calls, hence don't unmap EFI boot services code/data
* regions. * regions.
*/ */
if (!efi_is_native()) if (efi_is_mixed())
return; return;
if (kernel_unmap_pages_in_pgd(pgd, pa, md->num_pages)) if (kernel_unmap_pages_in_pgd(pgd, pa, md->num_pages))
...@@ -408,7 +405,7 @@ static void __init efi_unmap_pages(efi_memory_desc_t *md) ...@@ -408,7 +405,7 @@ static void __init efi_unmap_pages(efi_memory_desc_t *md)
void __init efi_free_boot_services(void) void __init efi_free_boot_services(void)
{ {
phys_addr_t new_phys, new_size; struct efi_memory_map_data data = { 0 };
efi_memory_desc_t *md; efi_memory_desc_t *md;
int num_entries = 0; int num_entries = 0;
void *new, *new_md; void *new, *new_md;
...@@ -463,14 +460,12 @@ void __init efi_free_boot_services(void) ...@@ -463,14 +460,12 @@ void __init efi_free_boot_services(void)
if (!num_entries) if (!num_entries)
return; return;
new_size = efi.memmap.desc_size * num_entries; if (efi_memmap_alloc(num_entries, &data) != 0) {
new_phys = efi_memmap_alloc(num_entries);
if (!new_phys) {
pr_err("Failed to allocate new EFI memmap\n"); pr_err("Failed to allocate new EFI memmap\n");
return; return;
} }
new = memremap(new_phys, new_size, MEMREMAP_WB); new = memremap(data.phys_map, data.size, MEMREMAP_WB);
if (!new) { if (!new) {
pr_err("Failed to map new EFI memmap\n"); pr_err("Failed to map new EFI memmap\n");
return; return;
...@@ -494,7 +489,7 @@ void __init efi_free_boot_services(void) ...@@ -494,7 +489,7 @@ void __init efi_free_boot_services(void)
memunmap(new); memunmap(new);
if (efi_memmap_install(new_phys, num_entries)) { if (efi_memmap_install(&data) != 0) {
pr_err("Could not install new EFI memmap\n"); pr_err("Could not install new EFI memmap\n");
return; return;
} }
...@@ -559,7 +554,7 @@ int __init efi_reuse_config(u64 tables, int nr_tables) ...@@ -559,7 +554,7 @@ int __init efi_reuse_config(u64 tables, int nr_tables)
return ret; return ret;
} }
static const struct dmi_system_id sgi_uv1_dmi[] = { static const struct dmi_system_id sgi_uv1_dmi[] __initconst = {
{ NULL, "SGI UV1", { NULL, "SGI UV1",
{ DMI_MATCH(DMI_PRODUCT_NAME, "Stoutland Platform"), { DMI_MATCH(DMI_PRODUCT_NAME, "Stoutland Platform"),
DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
...@@ -582,8 +577,15 @@ void __init efi_apply_memmap_quirks(void) ...@@ -582,8 +577,15 @@ void __init efi_apply_memmap_quirks(void)
} }
/* UV2+ BIOS has a fix for this issue. UV1 still needs the quirk. */ /* UV2+ BIOS has a fix for this issue. UV1 still needs the quirk. */
if (dmi_check_system(sgi_uv1_dmi)) if (dmi_check_system(sgi_uv1_dmi)) {
set_bit(EFI_OLD_MEMMAP, &efi.flags); if (IS_ENABLED(CONFIG_X86_UV)) {
set_bit(EFI_UV1_MEMMAP, &efi.flags);
} else {
pr_warn("EFI runtime disabled, needs CONFIG_X86_UV=y on UV1\n");
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
efi_memmap_unmap();
}
}
} }
/* /*
...@@ -721,7 +723,7 @@ void efi_recover_from_page_fault(unsigned long phys_addr) ...@@ -721,7 +723,7 @@ void efi_recover_from_page_fault(unsigned long phys_addr)
/* /*
* Make sure that an efi runtime service caused the page fault. * Make sure that an efi runtime service caused the page fault.
* "efi_mm" cannot be used to check if the page fault had occurred * "efi_mm" cannot be used to check if the page fault had occurred
* in the firmware context because efi=old_map doesn't use efi_pgd. * in the firmware context because the UV1 memmap doesn't use efi_pgd.
*/ */
if (efi_rts_work.efi_rts_id == EFI_NONE) if (efi_rts_work.efi_rts_id == EFI_NONE)
return; return;
......
...@@ -31,13 +31,16 @@ static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, ...@@ -31,13 +31,16 @@ static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
return BIOS_STATUS_UNIMPLEMENTED; return BIOS_STATUS_UNIMPLEMENTED;
/* /*
* If EFI_OLD_MEMMAP is set, we need to fall back to using our old EFI * If EFI_UV1_MEMMAP is set, we need to fall back to using our old EFI
* callback method, which uses efi_call() directly, with the kernel page tables: * callback method, which uses efi_call() directly, with the kernel page tables:
*/ */
if (unlikely(efi_enabled(EFI_OLD_MEMMAP))) if (unlikely(efi_enabled(EFI_UV1_MEMMAP))) {
kernel_fpu_begin();
ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5); ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5);
else kernel_fpu_end();
} else {
ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5); ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
}
return ret; return ret;
} }
...@@ -214,3 +217,163 @@ int uv_bios_init(void) ...@@ -214,3 +217,163 @@ int uv_bios_init(void)
pr_info("UV: UVsystab: Revision:%x\n", uv_systab->revision); pr_info("UV: UVsystab: Revision:%x\n", uv_systab->revision);
return 0; return 0;
} }
static void __init early_code_mapping_set_exec(int executable)
{
efi_memory_desc_t *md;
if (!(__supported_pte_mask & _PAGE_NX))
return;
/* Make EFI service code area executable */
for_each_efi_memory_desc(md) {
if (md->type == EFI_RUNTIME_SERVICES_CODE ||
md->type == EFI_BOOT_SERVICES_CODE)
efi_set_executable(md, executable);
}
}
void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd)
{
/*
* After the lock is released, the original page table is restored.
*/
int pgd_idx, i;
int nr_pgds;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
if (!pgd_present(*pgd))
continue;
for (i = 0; i < PTRS_PER_P4D; i++) {
p4d = p4d_offset(pgd,
pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
if (!p4d_present(*p4d))
continue;
pud = (pud_t *)p4d_page_vaddr(*p4d);
pud_free(&init_mm, pud);
}
p4d = (p4d_t *)pgd_page_vaddr(*pgd);
p4d_free(&init_mm, p4d);
}
kfree(save_pgd);
__flush_tlb_all();
early_code_mapping_set_exec(0);
}
pgd_t * __init efi_uv1_memmap_phys_prolog(void)
{
unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
pgd_t *save_pgd, *pgd_k, *pgd_efi;
p4d_t *p4d, *p4d_k, *p4d_efi;
pud_t *pud;
int pgd;
int n_pgds, i, j;
early_code_mapping_set_exec(1);
n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
if (!save_pgd)
return NULL;
/*
* Build 1:1 identity mapping for UV1 memmap usage. Note that
* PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
* it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
* address X, the pud_index(X) != pud_index(__va(X)), we can only copy
* PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
* This means here we can only reuse the PMD tables of the direct mapping.
*/
for (pgd = 0; pgd < n_pgds; pgd++) {
addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
pgd_efi = pgd_offset_k(addr_pgd);
save_pgd[pgd] = *pgd_efi;
p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
if (!p4d) {
pr_err("Failed to allocate p4d table!\n");
goto out;
}
for (i = 0; i < PTRS_PER_P4D; i++) {
addr_p4d = addr_pgd + i * P4D_SIZE;
p4d_efi = p4d + p4d_index(addr_p4d);
pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
if (!pud) {
pr_err("Failed to allocate pud table!\n");
goto out;
}
for (j = 0; j < PTRS_PER_PUD; j++) {
addr_pud = addr_p4d + j * PUD_SIZE;
if (addr_pud > (max_pfn << PAGE_SHIFT))
break;
vaddr = (unsigned long)__va(addr_pud);
pgd_k = pgd_offset_k(vaddr);
p4d_k = p4d_offset(pgd_k, vaddr);
pud[j] = *pud_offset(p4d_k, vaddr);
}
}
pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
}
__flush_tlb_all();
return save_pgd;
out:
efi_uv1_memmap_phys_epilog(save_pgd);
return NULL;
}
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
u32 type, u64 attribute)
{
unsigned long last_map_pfn;
if (type == EFI_MEMORY_MAPPED_IO)
return ioremap(phys_addr, size);
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
unsigned long top = last_map_pfn << PAGE_SHIFT;
efi_ioremap(top, size - (top - phys_addr), type, attribute);
}
if (!(attribute & EFI_MEMORY_WB))
efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
return (void __iomem *)__va(phys_addr);
}
static int __init arch_parse_efi_cmdline(char *str)
{
if (!str) {
pr_warn("need at least one option\n");
return -EINVAL;
}
if (!efi_is_mixed() && parse_option_str(str, "old_map"))
set_bit(EFI_UV1_MEMMAP, &efi.flags);
return 0;
}
early_param("efi", arch_parse_efi_cmdline);
...@@ -31,7 +31,7 @@ static efi_system_table_t efi_systab_xen __initdata = { ...@@ -31,7 +31,7 @@ static efi_system_table_t efi_systab_xen __initdata = {
.con_in_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */ .con_in_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
.con_in = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */ .con_in = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
.con_out_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */ .con_out_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
.con_out = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */ .con_out = NULL, /* Not used under Xen. */
.stderr_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */ .stderr_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
.stderr = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */ .stderr = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
.runtime = (efi_runtime_services_t *)EFI_INVALID_TABLE_ADDR, .runtime = (efi_runtime_services_t *)EFI_INVALID_TABLE_ADDR,
......
...@@ -67,7 +67,7 @@ ...@@ -67,7 +67,7 @@
#include <asm/linkage.h> #include <asm/linkage.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/init.h> #include <asm/init.h>
#include <asm/pat.h> #include <asm/memtype.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/tlb.h> #include <asm/tlb.h>
......
#ifndef _ASM_XTENSA_VMALLOC_H
#define _ASM_XTENSA_VMALLOC_H
#endif /* _ASM_XTENSA_VMALLOC_H */
...@@ -215,6 +215,28 @@ config EFI_RCI2_TABLE ...@@ -215,6 +215,28 @@ config EFI_RCI2_TABLE
Say Y here for Dell EMC PowerEdge systems. Say Y here for Dell EMC PowerEdge systems.
config EFI_DISABLE_PCI_DMA
bool "Clear Busmaster bit on PCI bridges during ExitBootServices()"
help
Disable the busmaster bit in the control register on all PCI bridges
while calling ExitBootServices() and passing control to the runtime
kernel. System firmware may configure the IOMMU to prevent malicious
PCI devices from being able to attack the OS via DMA. However, since
firmware can't guarantee that the OS is IOMMU-aware, it will tear
down IOMMU configuration when ExitBootServices() is called. This
leaves a window between where a hostile device could still cause
damage before Linux configures the IOMMU again.
If you say Y here, the EFI stub will clear the busmaster bit on all
PCI bridges before ExitBootServices() is called. This will prevent
any malicious PCI devices from being able to perform DMA until the
kernel reenables busmastering after configuring the IOMMU.
This option will cause failures with some poorly behaved hardware
and should not be enabled without testing. The kernel commandline
options "efi=disable_early_pci_dma" or "efi=no_disable_early_pci_dma"
may be used to override this option.
endmenu endmenu
config UEFI_CPER config UEFI_CPER
......
This diff is collapsed.
...@@ -908,7 +908,7 @@ u64 efi_mem_attributes(unsigned long phys_addr) ...@@ -908,7 +908,7 @@ u64 efi_mem_attributes(unsigned long phys_addr)
* *
* Search in the EFI memory map for the region covering @phys_addr. * Search in the EFI memory map for the region covering @phys_addr.
* Returns the EFI memory type if the region was found in the memory * Returns the EFI memory type if the region was found in the memory
* map, EFI_RESERVED_TYPE (zero) otherwise. * map, -EINVAL otherwise.
*/ */
int efi_mem_type(unsigned long phys_addr) int efi_mem_type(unsigned long phys_addr)
{ {
......
This diff is collapsed.
...@@ -39,7 +39,7 @@ OBJECT_FILES_NON_STANDARD := y ...@@ -39,7 +39,7 @@ OBJECT_FILES_NON_STANDARD := y
KCOV_INSTRUMENT := n KCOV_INSTRUMENT := n
lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \ lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
random.o random.o pci.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64 # include the stub's generic dependencies from lib/ when building for ARM/arm64
arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -21,18 +21,13 @@ static const efi_char16_t efi_SetupMode_name[] = L"SetupMode"; ...@@ -21,18 +21,13 @@ static const efi_char16_t efi_SetupMode_name[] = L"SetupMode";
static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID; static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
static const efi_char16_t shim_MokSBState_name[] = L"MokSBState"; static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
#define get_efi_var(name, vendor, ...) \
efi_call_runtime(get_variable, \
(efi_char16_t *)(name), (efi_guid_t *)(vendor), \
__VA_ARGS__);
/* /*
* Determine whether we're in secure boot mode. * Determine whether we're in secure boot mode.
* *
* Please keep the logic in sync with * Please keep the logic in sync with
* arch/x86/xen/efi.c:xen_efi_get_secureboot(). * arch/x86/xen/efi.c:xen_efi_get_secureboot().
*/ */
enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg) enum efi_secureboot_mode efi_get_secureboot(void)
{ {
u32 attr; u32 attr;
u8 secboot, setupmode, moksbstate; u8 secboot, setupmode, moksbstate;
...@@ -72,10 +67,10 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg) ...@@ -72,10 +67,10 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
return efi_secureboot_mode_disabled; return efi_secureboot_mode_disabled;
secure_boot_enabled: secure_boot_enabled:
pr_efi(sys_table_arg, "UEFI Secure Boot is enabled.\n"); pr_efi("UEFI Secure Boot is enabled.\n");
return efi_secureboot_mode_enabled; return efi_secureboot_mode_enabled;
out_efi_err: out_efi_err:
pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n"); pr_efi_err("Could not determine UEFI Secure Boot status.\n");
return efi_secureboot_mode_unknown; return efi_secureboot_mode_unknown;
} }
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment