Commit a5dbd76a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-urgent-2024-08-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:

 - Prevent a deadlock on cpu_hotplug_lock in the aperf/mperf driver.

   A recent change in the ACPI code which consolidated code pathes moved
   the invocation of init_freq_invariance_cppc() to be moved to a CPU
   hotplug handler. The first invocation on AMD CPUs ends up enabling a
   static branch which dead locks because the static branch enable tries
   to acquire cpu_hotplug_lock but that lock is already held write by
   the hotplug machinery.

   Use static_branch_enable_cpuslocked() instead and take the hotplug
   lock read for the Intel code path which is invoked from the
   architecture code outside of the CPU hotplug operations.

 - Fix the number of reserved bits in the sev_config structure bit field
   so that the bitfield does not exceed 64 bit.

 - Add missing Zen5 model numbers

 - Fix the alignment assumptions of pti_clone_pgtable() and
   clone_entry_text() on 32-bit:

   The code assumes PMD aligned code sections, but on 32-bit the kernel
   entry text is not PMD aligned. So depending on the code size and
   location, which is configuration and compiler dependent, entry text
   can cross a PMD boundary. As the start is not PMD aligned adding PMD
   size to the start address is larger than the end address which
   results in partially mapped entry code for user space. That causes
   endless recursion on the first entry from userspace (usually #PF).

   Cure this by aligning the start address in the addition so it ends up
   at the next PMD start address.

   clone_entry_text() enforces PMD mapping, but on 32-bit the tail might
   eventually be PTE mapped, which causes a map fail because the PMD for
   the tail is not a large page mapping. Use PTI_LEVEL_KERNEL_IMAGE for
   the clone() invocation which resolves to PTE on 32-bit and PMD on
   64-bit.

 - Zero the 8-byte case for get_user() on range check failure on 32-bit

   The recend consolidation of the 8-byte get_user() case broke the
   zeroing in the failure case again. Establish it by clearing ECX
   before the range check and not afterwards as that obvioulsy can't be
   reached when the range check fails

* tag 'x86-urgent-2024-08-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/uaccess: Zero the 8-byte get_range case on failure on 32-bit
  x86/mm: Fix pti_clone_entry_text() for i386
  x86/mm: Fix pti_clone_pgtable() alignment assumption
  x86/setup: Parse the builtin command line before merging
  x86/CPU/AMD: Add models 0x60-0x6f to the Zen5 range
  x86/sev: Fix __reserved field in sev_config
  x86/aperfmperf: Fix deadlock on cpu_hotplug_lock
parents 61ca6c78 dd35a093
...@@ -163,7 +163,7 @@ struct sev_config { ...@@ -163,7 +163,7 @@ struct sev_config {
*/ */
use_cas : 1, use_cas : 1,
__reserved : 62; __reserved : 61;
}; };
static struct sev_config sev_cfg __read_mostly; static struct sev_config sev_cfg __read_mostly;
......
...@@ -2,6 +2,10 @@ ...@@ -2,6 +2,10 @@
#ifndef _ASM_X86_CMDLINE_H #ifndef _ASM_X86_CMDLINE_H
#define _ASM_X86_CMDLINE_H #define _ASM_X86_CMDLINE_H
#include <asm/setup.h>
extern char builtin_cmdline[COMMAND_LINE_SIZE];
int cmdline_find_option_bool(const char *cmdline_ptr, const char *option); int cmdline_find_option_bool(const char *cmdline_ptr, const char *option);
int cmdline_find_option(const char *cmdline_ptr, const char *option, int cmdline_find_option(const char *cmdline_ptr, const char *option,
char *buffer, int bufsize); char *buffer, int bufsize);
......
...@@ -462,7 +462,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) ...@@ -462,7 +462,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
switch (c->x86_model) { switch (c->x86_model) {
case 0x00 ... 0x2f: case 0x00 ... 0x2f:
case 0x40 ... 0x4f: case 0x40 ... 0x4f:
case 0x70 ... 0x7f: case 0x60 ... 0x7f:
setup_force_cpu_cap(X86_FEATURE_ZEN5); setup_force_cpu_cap(X86_FEATURE_ZEN5);
break; break;
default: default:
......
...@@ -306,7 +306,7 @@ static void freq_invariance_enable(void) ...@@ -306,7 +306,7 @@ static void freq_invariance_enable(void)
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
return; return;
} }
static_branch_enable(&arch_scale_freq_key); static_branch_enable_cpuslocked(&arch_scale_freq_key);
register_freq_invariance_syscore_ops(); register_freq_invariance_syscore_ops();
pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio); pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
} }
...@@ -323,8 +323,10 @@ static void __init bp_init_freq_invariance(void) ...@@ -323,8 +323,10 @@ static void __init bp_init_freq_invariance(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return; return;
if (intel_set_max_freq_ratio()) if (intel_set_max_freq_ratio()) {
guard(cpus_read_lock)();
freq_invariance_enable(); freq_invariance_enable();
}
} }
static void disable_freq_invariance_workfn(struct work_struct *work) static void disable_freq_invariance_workfn(struct work_struct *work)
......
...@@ -164,7 +164,7 @@ unsigned long saved_video_mode; ...@@ -164,7 +164,7 @@ unsigned long saved_video_mode;
static char __initdata command_line[COMMAND_LINE_SIZE]; static char __initdata command_line[COMMAND_LINE_SIZE];
#ifdef CONFIG_CMDLINE_BOOL #ifdef CONFIG_CMDLINE_BOOL
static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; char builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
bool builtin_cmdline_added __ro_after_init; bool builtin_cmdline_added __ro_after_init;
#endif #endif
......
...@@ -207,18 +207,29 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size, ...@@ -207,18 +207,29 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size,
int cmdline_find_option_bool(const char *cmdline, const char *option) int cmdline_find_option_bool(const char *cmdline, const char *option)
{ {
if (IS_ENABLED(CONFIG_CMDLINE_BOOL)) int ret;
WARN_ON_ONCE(!builtin_cmdline_added);
return __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option); ret = __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option);
if (ret > 0)
return ret;
if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && !builtin_cmdline_added)
return __cmdline_find_option_bool(builtin_cmdline, COMMAND_LINE_SIZE, option);
return ret;
} }
int cmdline_find_option(const char *cmdline, const char *option, char *buffer, int cmdline_find_option(const char *cmdline, const char *option, char *buffer,
int bufsize) int bufsize)
{ {
if (IS_ENABLED(CONFIG_CMDLINE_BOOL)) int ret;
WARN_ON_ONCE(!builtin_cmdline_added);
ret = __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option, buffer, bufsize);
if (ret > 0)
return ret;
if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && !builtin_cmdline_added)
return __cmdline_find_option(builtin_cmdline, COMMAND_LINE_SIZE, option, buffer, bufsize);
return __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option, return ret;
buffer, bufsize);
} }
...@@ -88,12 +88,14 @@ SYM_FUNC_END(__get_user_4) ...@@ -88,12 +88,14 @@ SYM_FUNC_END(__get_user_4)
EXPORT_SYMBOL(__get_user_4) EXPORT_SYMBOL(__get_user_4)
SYM_FUNC_START(__get_user_8) SYM_FUNC_START(__get_user_8)
#ifndef CONFIG_X86_64
xor %ecx,%ecx
#endif
check_range size=8 check_range size=8
ASM_STAC ASM_STAC
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
UACCESS movq (%_ASM_AX),%rdx UACCESS movq (%_ASM_AX),%rdx
#else #else
xor %ecx,%ecx
UACCESS movl (%_ASM_AX),%edx UACCESS movl (%_ASM_AX),%edx
UACCESS movl 4(%_ASM_AX),%ecx UACCESS movl 4(%_ASM_AX),%ecx
#endif #endif
......
...@@ -374,14 +374,14 @@ pti_clone_pgtable(unsigned long start, unsigned long end, ...@@ -374,14 +374,14 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
*/ */
*target_pmd = *pmd; *target_pmd = *pmd;
addr += PMD_SIZE; addr = round_up(addr + 1, PMD_SIZE);
} else if (level == PTI_CLONE_PTE) { } else if (level == PTI_CLONE_PTE) {
/* Walk the page-table down to the pte level */ /* Walk the page-table down to the pte level */
pte = pte_offset_kernel(pmd, addr); pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte)) { if (pte_none(*pte)) {
addr += PAGE_SIZE; addr = round_up(addr + 1, PAGE_SIZE);
continue; continue;
} }
...@@ -401,7 +401,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end, ...@@ -401,7 +401,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
/* Clone the PTE */ /* Clone the PTE */
*target_pte = *pte; *target_pte = *pte;
addr += PAGE_SIZE; addr = round_up(addr + 1, PAGE_SIZE);
} else { } else {
BUG(); BUG();
...@@ -496,7 +496,7 @@ static void pti_clone_entry_text(void) ...@@ -496,7 +496,7 @@ static void pti_clone_entry_text(void)
{ {
pti_clone_pgtable((unsigned long) __entry_text_start, pti_clone_pgtable((unsigned long) __entry_text_start,
(unsigned long) __entry_text_end, (unsigned long) __entry_text_end,
PTI_CLONE_PMD); PTI_LEVEL_KERNEL_IMAGE);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment