Commit 0279aa78 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-cleanups-2024-09-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cleanups from Thomas Gleixner:
 "A set of cleanups across x86:

   - Use memremap() for the EISA probe instead of ioremap(). EISA is
     strictly memory and not MMIO

   - Cleanups and enhancement all over the place"

* tag 'x86-cleanups-2024-09-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/EISA: Dereference memory directly instead of using readl()
  x86/extable: Remove unused declaration fixup_bug()
  x86/boot/64: Strip percpu address space when setting up GDT descriptors
  x86/cpu: Clarify the error message when BIOS does not support SGX
  x86/kexec: Add comments around swap_pages() assembly to improve readability
  x86/kexec: Fix a comment of swap_pages() assembly
  x86/sgx: Fix a W=1 build warning in function comment
  x86/EISA: Use memremap() to probe for the EISA BIOS signature
  x86/mtrr: Remove obsolete declaration for mtrr_bp_restore()
  x86/cpu_entry_area: Annotate percpu_setup_exception_stacks() as __init
parents 5ba202a7 a678164a
...@@ -37,7 +37,6 @@ struct pt_regs; ...@@ -37,7 +37,6 @@ struct pt_regs;
extern int fixup_exception(struct pt_regs *regs, int trapnr, extern int fixup_exception(struct pt_regs *regs, int trapnr,
unsigned long error_code, unsigned long fault_addr); unsigned long error_code, unsigned long fault_addr);
extern int fixup_bug(struct pt_regs *regs, int trapnr);
extern int ex_get_fixup_type(unsigned long ip); extern int ex_get_fixup_type(unsigned long ip);
extern void early_fixup_exception(struct pt_regs *regs, int trapnr); extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
......
...@@ -69,7 +69,6 @@ extern int mtrr_add_page(unsigned long base, unsigned long size, ...@@ -69,7 +69,6 @@ extern int mtrr_add_page(unsigned long base, unsigned long size,
unsigned int type, bool increment); unsigned int type, bool increment);
extern int mtrr_del(int reg, unsigned long base, unsigned long size); extern int mtrr_del(int reg, unsigned long base, unsigned long size);
extern int mtrr_del_page(int reg, unsigned long base, unsigned long size); extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
extern void mtrr_bp_restore(void);
extern int mtrr_trim_uncached_memory(unsigned long end_pfn); extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
extern int amd_special_default_mtrr(void); extern int amd_special_default_mtrr(void);
void mtrr_disable(void); void mtrr_disable(void);
...@@ -117,7 +116,6 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn) ...@@ -117,7 +116,6 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn)
return 0; return 0;
} }
#define mtrr_bp_init() do {} while (0) #define mtrr_bp_init() do {} while (0)
#define mtrr_bp_restore() do {} while (0)
#define mtrr_disable() do {} while (0) #define mtrr_disable() do {} while (0)
#define mtrr_enable() do {} while (0) #define mtrr_enable() do {} while (0)
#define mtrr_generic_set_state() do {} while (0) #define mtrr_generic_set_state() do {} while (0)
......
...@@ -188,7 +188,7 @@ void init_ia32_feat_ctl(struct cpuinfo_x86 *c) ...@@ -188,7 +188,7 @@ void init_ia32_feat_ctl(struct cpuinfo_x86 *c)
update_sgx: update_sgx:
if (!(msr & FEAT_CTL_SGX_ENABLED)) { if (!(msr & FEAT_CTL_SGX_ENABLED)) {
if (enable_sgx_kvm || enable_sgx_driver) if (enable_sgx_kvm || enable_sgx_driver)
pr_err_once("SGX disabled by BIOS.\n"); pr_err_once("SGX disabled or unsupported by BIOS.\n");
clear_cpu_cap(c, X86_FEATURE_SGX); clear_cpu_cap(c, X86_FEATURE_SGX);
return; return;
} }
......
...@@ -733,7 +733,7 @@ int arch_memory_failure(unsigned long pfn, int flags) ...@@ -733,7 +733,7 @@ int arch_memory_failure(unsigned long pfn, int flags)
return 0; return 0;
} }
/** /*
* A section metric is concatenated in a way that @low bits 12-31 define the * A section metric is concatenated in a way that @low bits 12-31 define the
* bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the
* metric. * metric.
......
...@@ -11,15 +11,15 @@ ...@@ -11,15 +11,15 @@
static __init int eisa_bus_probe(void) static __init int eisa_bus_probe(void)
{ {
void __iomem *p; u32 *p;
if ((xen_pv_domain() && !xen_initial_domain()) || cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) if ((xen_pv_domain() && !xen_initial_domain()) || cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
return 0; return 0;
p = ioremap(0x0FFFD9, 4); p = memremap(0x0FFFD9, 4, MEMREMAP_WB);
if (p && readl(p) == 'E' + ('I' << 8) + ('S' << 16) + ('A' << 24)) if (p && *p == 'E' + ('I' << 8) + ('S' << 16) + ('A' << 24))
EISA_bus = 1; EISA_bus = 1;
iounmap(p); memunmap(p);
return 0; return 0;
} }
subsys_initcall(eisa_bus_probe); subsys_initcall(eisa_bus_probe);
...@@ -559,10 +559,11 @@ void early_setup_idt(void) ...@@ -559,10 +559,11 @@ void early_setup_idt(void)
*/ */
void __head startup_64_setup_gdt_idt(void) void __head startup_64_setup_gdt_idt(void)
{ {
struct desc_struct *gdt = (void *)(__force unsigned long)init_per_cpu_var(gdt_page.gdt);
void *handler = NULL; void *handler = NULL;
struct desc_ptr startup_gdt_descr = { struct desc_ptr startup_gdt_descr = {
.address = (unsigned long)&RIP_REL_REF(init_per_cpu_var(gdt_page.gdt)), .address = (unsigned long)&RIP_REL_REF(*gdt),
.size = GDT_SIZE - 1, .size = GDT_SIZE - 1,
}; };
......
...@@ -170,6 +170,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) ...@@ -170,6 +170,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
wbinvd wbinvd
.Lsme_off: .Lsme_off:
/* Save the preserve_context to %r11 as swap_pages clobbers %rcx. */
movq %rcx, %r11 movq %rcx, %r11
call swap_pages call swap_pages
...@@ -258,7 +259,7 @@ SYM_CODE_END(virtual_mapped) ...@@ -258,7 +259,7 @@ SYM_CODE_END(virtual_mapped)
/* Do the copies */ /* Do the copies */
SYM_CODE_START_LOCAL_NOALIGN(swap_pages) SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
UNWIND_HINT_END_OF_STACK UNWIND_HINT_END_OF_STACK
movq %rdi, %rcx /* Put the page_list in %rcx */ movq %rdi, %rcx /* Put the indirection_page in %rcx */
xorl %edi, %edi xorl %edi, %edi
xorl %esi, %esi xorl %esi, %esi
jmp 1f jmp 1f
...@@ -289,18 +290,21 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages) ...@@ -289,18 +290,21 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
movq %rcx, %rsi /* For ever source page do a copy */ movq %rcx, %rsi /* For ever source page do a copy */
andq $0xfffffffffffff000, %rsi andq $0xfffffffffffff000, %rsi
movq %rdi, %rdx movq %rdi, %rdx /* Save destination page to %rdx */
movq %rsi, %rax movq %rsi, %rax /* Save source page to %rax */
/* copy source page to swap page */
movq %r10, %rdi movq %r10, %rdi
movl $512, %ecx movl $512, %ecx
rep ; movsq rep ; movsq
/* copy destination page to source page */
movq %rax, %rdi movq %rax, %rdi
movq %rdx, %rsi movq %rdx, %rsi
movl $512, %ecx movl $512, %ecx
rep ; movsq rep ; movsq
/* copy swap page to destination page */
movq %rdx, %rdi movq %rdx, %rdi
movq %r10, %rsi movq %r10, %rsi
movl $512, %ecx movl $512, %ecx
......
...@@ -164,7 +164,7 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu) ...@@ -164,7 +164,7 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu)
} }
} }
#else #else
static inline void percpu_setup_exception_stacks(unsigned int cpu) static void __init percpu_setup_exception_stacks(unsigned int cpu)
{ {
struct cpu_entry_area *cea = get_cpu_entry_area(cpu); struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment