Commit b2da7df5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_urgent_for_v5.18_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

 - A fix to disable PCI/MSI[-X] masking for XEN_HVM guests as that is
   solely controlled by the hypervisor

 - A build fix to make the function prototype (__warn()) as visible as
   the definition itself

 - A bunch of objtool annotation fixes which have accumulated over time

 - An ORC unwinder fix to handle bad input gracefully

 - Well, we thought the microcode gets loaded in time in order to
   restore the microcode-emulated MSRs but we thought wrong. So there's
   a fix for that to have the ordering done properly

 - Add new Intel model numbers

 - A spelling fix

* tag 'x86_urgent_for_v5.18_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/pci/xen: Disable PCI/MSI[-X] masking for XEN_HVM guests
  bug: Have __warn() prototype defined unconditionally
  x86/Kconfig: fix the spelling of 'becoming' in X86_KERNEL_IBT config
  objtool: Use offstr() to print address of missing ENDBR
  objtool: Print data address for "!ENDBR" data warnings
  x86/xen: Add ANNOTATE_NOENDBR to startup_xen()
  x86/uaccess: Add ENDBR to __put_user_nocheck*()
  x86/retpoline: Add ANNOTATE_NOENDBR for retpolines
  x86/static_call: Add ANNOTATE_NOENDBR to static call trampoline
  objtool: Enable unreachable warnings for CLANG LTO
  x86,objtool: Explicitly mark idtentry_body()s tail REACHABLE
  x86,objtool: Mark cpu_startup_entry() __noreturn
  x86,xen,objtool: Add UNWIND hint
  lib/strn*,objtool: Enforce user_access_begin() rules
  MAINTAINERS: Add x86 unwinding entry
  x86/unwind/orc: Recheck address range after stack info was updated
  x86/cpu: Load microcode during restore_processor_state()
  x86/cpu: Add new Alderlake and Raptorlake CPU model numbers
parents b70ed23c 7e0815b3
...@@ -21443,6 +21443,15 @@ F: arch/x86/include/asm/uv/ ...@@ -21443,6 +21443,15 @@ F: arch/x86/include/asm/uv/
F: arch/x86/kernel/apic/x2apic_uv_x.c F: arch/x86/kernel/apic/x2apic_uv_x.c
F: arch/x86/platform/uv/ F: arch/x86/platform/uv/
X86 STACK UNWINDING
M: Josh Poimboeuf <jpoimboe@redhat.com>
M: Peter Zijlstra <peterz@infradead.org>
S: Supported
F: arch/x86/include/asm/unwind*.h
F: arch/x86/kernel/dumpstack.c
F: arch/x86/kernel/stacktrace.c
F: arch/x86/kernel/unwind_*.c
X86 VDSO X86 VDSO
M: Andy Lutomirski <luto@kernel.org> M: Andy Lutomirski <luto@kernel.org>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
......
...@@ -1866,7 +1866,7 @@ config X86_KERNEL_IBT ...@@ -1866,7 +1866,7 @@ config X86_KERNEL_IBT
code with them to make this happen. code with them to make this happen.
In addition to building the kernel with IBT, seal all functions that In addition to building the kernel with IBT, seal all functions that
are not indirect call targets, avoiding them ever becomming one. are not indirect call targets, avoiding them ever becoming one.
This requires LTO like objtool runs and will slow down the build. It This requires LTO like objtool runs and will slow down the build. It
does significantly reduce the number of ENDBR instructions in the does significantly reduce the number of ENDBR instructions in the
......
...@@ -337,6 +337,9 @@ SYM_CODE_END(ret_from_fork) ...@@ -337,6 +337,9 @@ SYM_CODE_END(ret_from_fork)
call \cfunc call \cfunc
/* For some configurations \cfunc ends up being a noreturn. */
REACHABLE
jmp error_return jmp error_return
.endm .endm
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
* _G - parts with extra graphics on * _G - parts with extra graphics on
* _X - regular server parts * _X - regular server parts
* _D - micro server parts * _D - micro server parts
* _N,_P - other mobile parts
* *
* Historical OPTDIFFs: * Historical OPTDIFFs:
* *
...@@ -107,8 +108,10 @@ ...@@ -107,8 +108,10 @@
#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */ #define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */ #define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
#define INTEL_FAM6_ALDERLAKE_N 0xBE
#define INTEL_FAM6_RAPTORLAKE 0xB7 #define INTEL_FAM6_RAPTORLAKE 0xB7
#define INTEL_FAM6_RAPTORLAKE_P 0xBA
/* "Small Core" Processors (Atom) */ /* "Small Core" Processors (Atom) */
......
...@@ -131,10 +131,12 @@ extern void __init load_ucode_bsp(void); ...@@ -131,10 +131,12 @@ extern void __init load_ucode_bsp(void);
extern void load_ucode_ap(void); extern void load_ucode_ap(void);
void reload_early_microcode(void); void reload_early_microcode(void);
extern bool initrd_gone; extern bool initrd_gone;
void microcode_bsp_resume(void);
#else #else
static inline void __init load_ucode_bsp(void) { } static inline void __init load_ucode_bsp(void) { }
static inline void load_ucode_ap(void) { } static inline void load_ucode_ap(void) { }
static inline void reload_early_microcode(void) { } static inline void reload_early_microcode(void) { }
static inline void microcode_bsp_resume(void) { }
#endif #endif
#endif /* _ASM_X86_MICROCODE_H */ #endif /* _ASM_X86_MICROCODE_H */
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
".align 4 \n" \ ".align 4 \n" \
".globl " STATIC_CALL_TRAMP_STR(name) " \n" \ ".globl " STATIC_CALL_TRAMP_STR(name) " \n" \
STATIC_CALL_TRAMP_STR(name) ": \n" \ STATIC_CALL_TRAMP_STR(name) ": \n" \
ANNOTATE_NOENDBR \
insns " \n" \ insns " \n" \
".byte 0x53, 0x43, 0x54 \n" \ ".byte 0x53, 0x43, 0x54 \n" \
".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \ ".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \
......
...@@ -758,9 +758,9 @@ static struct subsys_interface mc_cpu_interface = { ...@@ -758,9 +758,9 @@ static struct subsys_interface mc_cpu_interface = {
}; };
/** /**
* mc_bp_resume - Update boot CPU microcode during resume. * microcode_bsp_resume - Update boot CPU microcode during resume.
*/ */
static void mc_bp_resume(void) void microcode_bsp_resume(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
...@@ -772,7 +772,7 @@ static void mc_bp_resume(void) ...@@ -772,7 +772,7 @@ static void mc_bp_resume(void)
} }
static struct syscore_ops mc_syscore_ops = { static struct syscore_ops mc_syscore_ops = {
.resume = mc_bp_resume, .resume = microcode_bsp_resume,
}; };
static int mc_cpu_starting(unsigned int cpu) static int mc_cpu_starting(unsigned int cpu)
......
...@@ -339,11 +339,11 @@ static bool stack_access_ok(struct unwind_state *state, unsigned long _addr, ...@@ -339,11 +339,11 @@ static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
struct stack_info *info = &state->stack_info; struct stack_info *info = &state->stack_info;
void *addr = (void *)_addr; void *addr = (void *)_addr;
if (!on_stack(info, addr, len) && if (on_stack(info, addr, len))
(get_stack_info(addr, state->task, info, &state->stack_mask))) return true;
return false;
return true; return !get_stack_info(addr, state->task, info, &state->stack_mask) &&
on_stack(info, addr, len);
} }
static bool deref_stack_reg(struct unwind_state *state, unsigned long addr, static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
......
...@@ -48,6 +48,7 @@ SYM_FUNC_START(__put_user_1) ...@@ -48,6 +48,7 @@ SYM_FUNC_START(__put_user_1)
cmp %_ASM_BX,%_ASM_CX cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user jae .Lbad_put_user
SYM_INNER_LABEL(__put_user_nocheck_1, SYM_L_GLOBAL) SYM_INNER_LABEL(__put_user_nocheck_1, SYM_L_GLOBAL)
ENDBR
ASM_STAC ASM_STAC
1: movb %al,(%_ASM_CX) 1: movb %al,(%_ASM_CX)
xor %ecx,%ecx xor %ecx,%ecx
...@@ -62,6 +63,7 @@ SYM_FUNC_START(__put_user_2) ...@@ -62,6 +63,7 @@ SYM_FUNC_START(__put_user_2)
cmp %_ASM_BX,%_ASM_CX cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user jae .Lbad_put_user
SYM_INNER_LABEL(__put_user_nocheck_2, SYM_L_GLOBAL) SYM_INNER_LABEL(__put_user_nocheck_2, SYM_L_GLOBAL)
ENDBR
ASM_STAC ASM_STAC
2: movw %ax,(%_ASM_CX) 2: movw %ax,(%_ASM_CX)
xor %ecx,%ecx xor %ecx,%ecx
...@@ -76,6 +78,7 @@ SYM_FUNC_START(__put_user_4) ...@@ -76,6 +78,7 @@ SYM_FUNC_START(__put_user_4)
cmp %_ASM_BX,%_ASM_CX cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user jae .Lbad_put_user
SYM_INNER_LABEL(__put_user_nocheck_4, SYM_L_GLOBAL) SYM_INNER_LABEL(__put_user_nocheck_4, SYM_L_GLOBAL)
ENDBR
ASM_STAC ASM_STAC
3: movl %eax,(%_ASM_CX) 3: movl %eax,(%_ASM_CX)
xor %ecx,%ecx xor %ecx,%ecx
...@@ -90,6 +93,7 @@ SYM_FUNC_START(__put_user_8) ...@@ -90,6 +93,7 @@ SYM_FUNC_START(__put_user_8)
cmp %_ASM_BX,%_ASM_CX cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user jae .Lbad_put_user
SYM_INNER_LABEL(__put_user_nocheck_8, SYM_L_GLOBAL) SYM_INNER_LABEL(__put_user_nocheck_8, SYM_L_GLOBAL)
ENDBR
ASM_STAC ASM_STAC
4: mov %_ASM_AX,(%_ASM_CX) 4: mov %_ASM_AX,(%_ASM_CX)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
.align RETPOLINE_THUNK_SIZE .align RETPOLINE_THUNK_SIZE
SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL) SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
ANNOTATE_NOENDBR
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \ ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
__stringify(RETPOLINE \reg), X86_FEATURE_RETPOLINE, \ __stringify(RETPOLINE \reg), X86_FEATURE_RETPOLINE, \
...@@ -55,7 +56,6 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL) ...@@ -55,7 +56,6 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
.align RETPOLINE_THUNK_SIZE .align RETPOLINE_THUNK_SIZE
SYM_CODE_START(__x86_indirect_thunk_array) SYM_CODE_START(__x86_indirect_thunk_array)
ANNOTATE_NOENDBR // apply_retpolines
#define GEN(reg) THUNK reg #define GEN(reg) THUNK reg
#include <asm/GEN-for-each-reg.h> #include <asm/GEN-for-each-reg.h>
......
...@@ -467,7 +467,6 @@ static __init void xen_setup_pci_msi(void) ...@@ -467,7 +467,6 @@ static __init void xen_setup_pci_msi(void)
else else
xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs; xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs;
xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs; xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs;
pci_msi_ignore_mask = 1;
} else if (xen_hvm_domain()) { } else if (xen_hvm_domain()) {
xen_msi_ops.setup_msi_irqs = xen_hvm_setup_msi_irqs; xen_msi_ops.setup_msi_irqs = xen_hvm_setup_msi_irqs;
xen_msi_ops.teardown_msi_irqs = xen_teardown_msi_irqs; xen_msi_ops.teardown_msi_irqs = xen_teardown_msi_irqs;
...@@ -481,6 +480,11 @@ static __init void xen_setup_pci_msi(void) ...@@ -481,6 +480,11 @@ static __init void xen_setup_pci_msi(void)
* in allocating the native domain and never use it. * in allocating the native domain and never use it.
*/ */
x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain; x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain;
/*
* With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely
* controlled by the hypervisor.
*/
pci_msi_ignore_mask = 1;
} }
#else /* CONFIG_PCI_MSI */ #else /* CONFIG_PCI_MSI */
......
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8) #define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8)
SYM_CODE_START_LOCAL(pvh_start_xen) SYM_CODE_START_LOCAL(pvh_start_xen)
UNWIND_HINT_EMPTY
cld cld
lgdt (_pa(gdt)) lgdt (_pa(gdt))
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/microcode.h>
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
__visible unsigned long saved_context_ebx; __visible unsigned long saved_context_ebx;
...@@ -262,11 +263,18 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) ...@@ -262,11 +263,18 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
x86_platform.restore_sched_clock_state(); x86_platform.restore_sched_clock_state();
mtrr_bp_restore(); mtrr_bp_restore();
perf_restore_debug_store(); perf_restore_debug_store();
msr_restore_context(ctxt);
c = &cpu_data(smp_processor_id()); c = &cpu_data(smp_processor_id());
if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL)) if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
init_ia32_feat_ctl(c); init_ia32_feat_ctl(c);
microcode_bsp_resume();
/*
* This needs to happen after the microcode has been updated upon resume
* because some of the MSRs are "emulated" in microcode.
*/
msr_restore_context(ctxt);
} }
/* Needed by apm.c */ /* Needed by apm.c */
......
...@@ -45,6 +45,7 @@ SYM_CODE_END(hypercall_page) ...@@ -45,6 +45,7 @@ SYM_CODE_END(hypercall_page)
__INIT __INIT
SYM_CODE_START(startup_xen) SYM_CODE_START(startup_xen)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
ANNOTATE_NOENDBR
cld cld
/* Clear .bss */ /* Clear .bss */
......
...@@ -21,6 +21,12 @@ ...@@ -21,6 +21,12 @@
#include <linux/panic.h> #include <linux/panic.h>
#include <linux/printk.h> #include <linux/printk.h>
struct warn_args;
struct pt_regs;
void __warn(const char *file, int line, void *caller, unsigned taint,
struct pt_regs *regs, struct warn_args *args);
#ifdef CONFIG_BUG #ifdef CONFIG_BUG
#ifdef CONFIG_GENERIC_BUG #ifdef CONFIG_GENERIC_BUG
...@@ -110,11 +116,6 @@ extern __printf(1, 2) void __warn_printk(const char *fmt, ...); ...@@ -110,11 +116,6 @@ extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
#endif #endif
/* used internally by panic.c */ /* used internally by panic.c */
struct warn_args;
struct pt_regs;
void __warn(const char *file, int line, void *caller, unsigned taint,
struct pt_regs *regs, struct warn_args *args);
#ifndef WARN_ON #ifndef WARN_ON
#define WARN_ON(condition) ({ \ #define WARN_ON(condition) ({ \
......
...@@ -167,7 +167,7 @@ static inline int suspend_disable_secondary_cpus(void) { return 0; } ...@@ -167,7 +167,7 @@ static inline int suspend_disable_secondary_cpus(void) { return 0; }
static inline void suspend_enable_secondary_cpus(void) { } static inline void suspend_enable_secondary_cpus(void) { }
#endif /* !CONFIG_PM_SLEEP_SMP */ #endif /* !CONFIG_PM_SLEEP_SMP */
void cpu_startup_entry(enum cpuhp_state state); void __noreturn cpu_startup_entry(enum cpuhp_state state);
void cpu_idle_poll_ctrl(bool enable); void cpu_idle_poll_ctrl(bool enable);
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
* hit it), 'max' is the address space maximum (and we return * hit it), 'max' is the address space maximum (and we return
* -EFAULT if we hit it). * -EFAULT if we hit it).
*/ */
static inline long do_strncpy_from_user(char *dst, const char __user *src, static __always_inline long do_strncpy_from_user(char *dst, const char __user *src,
unsigned long count, unsigned long max) unsigned long count, unsigned long max)
{ {
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
* if it fits in a aligned 'long'. The caller needs to check * if it fits in a aligned 'long'. The caller needs to check
* the return value against "> max". * the return value against "> max".
*/ */
static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max) static __always_inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
{ {
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
unsigned long align, res = 0; unsigned long align, res = 0;
......
...@@ -231,7 +231,7 @@ objtool_args = \ ...@@ -231,7 +231,7 @@ objtool_args = \
$(if $(part-of-module), --module) \ $(if $(part-of-module), --module) \
$(if $(CONFIG_X86_KERNEL_IBT), --lto --ibt) \ $(if $(CONFIG_X86_KERNEL_IBT), --lto --ibt) \
$(if $(CONFIG_FRAME_POINTER),, --no-fp) \ $(if $(CONFIG_FRAME_POINTER),, --no-fp) \
$(if $(CONFIG_GCOV_KERNEL)$(CONFIG_LTO_CLANG), --no-unreachable)\ $(if $(CONFIG_GCOV_KERNEL), --no-unreachable) \
$(if $(CONFIG_RETPOLINE), --retpoline) \ $(if $(CONFIG_RETPOLINE), --retpoline) \
$(if $(CONFIG_X86_SMAP), --uaccess) \ $(if $(CONFIG_X86_SMAP), --uaccess) \
$(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount) \ $(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount) \
......
...@@ -140,7 +140,7 @@ objtool_link() ...@@ -140,7 +140,7 @@ objtool_link()
if ! is_enabled CONFIG_FRAME_POINTER; then if ! is_enabled CONFIG_FRAME_POINTER; then
objtoolopt="${objtoolopt} --no-fp" objtoolopt="${objtoolopt} --no-fp"
fi fi
if is_enabled CONFIG_GCOV_KERNEL || is_enabled CONFIG_LTO_CLANG; then if is_enabled CONFIG_GCOV_KERNEL; then
objtoolopt="${objtoolopt} --no-unreachable" objtoolopt="${objtoolopt} --no-unreachable"
fi fi
if is_enabled CONFIG_RETPOLINE; then if is_enabled CONFIG_RETPOLINE; then
......
...@@ -184,6 +184,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, ...@@ -184,6 +184,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
"do_group_exit", "do_group_exit",
"stop_this_cpu", "stop_this_cpu",
"__invalid_creds", "__invalid_creds",
"cpu_startup_entry",
}; };
if (!func) if (!func)
...@@ -3217,9 +3218,8 @@ validate_ibt_reloc(struct objtool_file *file, struct reloc *reloc) ...@@ -3217,9 +3218,8 @@ validate_ibt_reloc(struct objtool_file *file, struct reloc *reloc)
static void warn_noendbr(const char *msg, struct section *sec, unsigned long offset, static void warn_noendbr(const char *msg, struct section *sec, unsigned long offset,
struct instruction *dest) struct instruction *dest)
{ {
WARN_FUNC("%srelocation to !ENDBR: %s+0x%lx", sec, offset, msg, WARN_FUNC("%srelocation to !ENDBR: %s", sec, offset, msg,
dest->func ? dest->func->name : dest->sec->name, offstr(dest->sec, dest->offset));
dest->func ? dest->offset - dest->func->offset : dest->offset);
} }
static void validate_ibt_dest(struct objtool_file *file, struct instruction *insn, static void validate_ibt_dest(struct objtool_file *file, struct instruction *insn,
...@@ -3823,11 +3823,8 @@ static int validate_ibt(struct objtool_file *file) ...@@ -3823,11 +3823,8 @@ static int validate_ibt(struct objtool_file *file)
struct instruction *dest; struct instruction *dest;
dest = validate_ibt_reloc(file, reloc); dest = validate_ibt_reloc(file, reloc);
if (is_data && dest && !dest->noendbr) { if (is_data && dest && !dest->noendbr)
warn_noendbr("data ", reloc->sym->sec, warn_noendbr("data ", sec, reloc->offset, dest);
reloc->sym->offset + reloc->addend,
dest);
}
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment