Commit 936fd005 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
 "A couple of fixes addressing the following issues:

   - The last polishing for the TLB code, removing the last BUG_ON() and
     the debug file along with tidying up the lazy TLB code.

   - Prevent triple fault on 1st Gen. 486 caused by stupidly calling the
     early IDT setup after the first function which causes a fault which
     should be caught by the exception table.

   - Limit the mmap of /dev/mem to valid addresses

   - Prevent late microcode loading on Broadwell X

   - Remove a redundant assignment in the cache info code"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm: Limit mmap() of /dev/mem to valid physical addresses
  x86/mm: Remove debug/x86/tlb_defer_switch_to_init_mm
  x86/mm: Tidy up "x86/mm: Flush more aggressively in lazy TLB mode"
  x86/mm/64: Remove the last VM_BUG_ON() from the TLB code
  x86/microcode/intel: Disable late loading on model 79
  x86/idt: Initialize early IDT before cr4_init_shadow()
  x86/cpu/intel_cacheinfo: Remove redundant assignment to 'this_leaf'
parents 9e415a8e ce56a86e
...@@ -110,6 +110,10 @@ build_mmio_write(__writeq, "q", unsigned long, "r", ) ...@@ -110,6 +110,10 @@ build_mmio_write(__writeq, "q", unsigned long, "r", )
#endif #endif
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
/** /**
* virt_to_phys - map virtual addresses to physical * virt_to_phys - map virtual addresses to physical
* @address: address to remap * @address: address to remap
......
...@@ -82,12 +82,21 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) ...@@ -82,12 +82,21 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr) #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
#endif #endif
/* static inline bool tlb_defer_switch_to_init_mm(void)
* If tlb_use_lazy_mode is true, then we try to avoid switching CR3 to point {
* to init_mm when we switch to a kernel thread (e.g. the idle thread). If /*
* it's false, then we immediately switch CR3 when entering a kernel thread. * If we have PCID, then switching to init_mm is reasonably
* fast. If we don't have PCID, then switching to init_mm is
* quite slow, so we try to defer it in the hopes that we can
* avoid it entirely. The latter approach runs the risk of
* receiving otherwise unnecessary IPIs.
*
* This choice is just a heuristic. The tlb code can handle this
* function returning true or false regardless of whether we have
* PCID.
*/ */
DECLARE_STATIC_KEY_TRUE(tlb_use_lazy_mode); return !static_cpu_has(X86_FEATURE_PCID);
}
/* /*
* 6 because 6 should be plenty and struct tlb_state will fit in * 6 because 6 should be plenty and struct tlb_state will fit in
......
...@@ -831,7 +831,6 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index, ...@@ -831,7 +831,6 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
} else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { } else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
unsigned int apicid, nshared, first, last; unsigned int apicid, nshared, first, last;
this_leaf = this_cpu_ci->info_list + index;
nshared = base->eax.split.num_threads_sharing + 1; nshared = base->eax.split.num_threads_sharing + 1;
apicid = cpu_data(cpu).apicid; apicid = cpu_data(cpu).apicid;
first = apicid - (apicid % nshared); first = apicid - (apicid % nshared);
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/microcode_intel.h> #include <asm/microcode_intel.h>
#include <asm/intel-family.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/setup.h> #include <asm/setup.h>
...@@ -918,6 +919,18 @@ static int get_ucode_fw(void *to, const void *from, size_t n) ...@@ -918,6 +919,18 @@ static int get_ucode_fw(void *to, const void *from, size_t n)
return 0; return 0;
} }
static bool is_blacklisted(unsigned int cpu)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) {
pr_err_once("late loading on model 79 is disabled.\n");
return true;
}
return false;
}
static enum ucode_state request_microcode_fw(int cpu, struct device *device, static enum ucode_state request_microcode_fw(int cpu, struct device *device,
bool refresh_fw) bool refresh_fw)
{ {
...@@ -926,6 +939,9 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device, ...@@ -926,6 +939,9 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
const struct firmware *firmware; const struct firmware *firmware;
enum ucode_state ret; enum ucode_state ret;
if (is_blacklisted(cpu))
return UCODE_NFOUND;
sprintf(name, "intel-ucode/%02x-%02x-%02x", sprintf(name, "intel-ucode/%02x-%02x-%02x",
c->x86, c->x86_model, c->x86_mask); c->x86, c->x86_model, c->x86_mask);
...@@ -950,6 +966,9 @@ static int get_ucode_user(void *to, const void *from, size_t n) ...@@ -950,6 +966,9 @@ static int get_ucode_user(void *to, const void *from, size_t n)
static enum ucode_state static enum ucode_state
request_microcode_user(int cpu, const void __user *buf, size_t size) request_microcode_user(int cpu, const void __user *buf, size_t size)
{ {
if (is_blacklisted(cpu))
return UCODE_NFOUND;
return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user); return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
} }
......
...@@ -30,10 +30,11 @@ static void __init i386_default_early_setup(void) ...@@ -30,10 +30,11 @@ static void __init i386_default_early_setup(void)
asmlinkage __visible void __init i386_start_kernel(void) asmlinkage __visible void __init i386_start_kernel(void)
{ {
cr4_init_shadow(); /* Make sure IDT is set up before any exception happens */
idt_setup_early_handler(); idt_setup_early_handler();
cr4_init_shadow();
sanitize_boot_params(&boot_params); sanitize_boot_params(&boot_params);
x86_early_init_platform_quirks(); x86_early_init_platform_quirks();
......
...@@ -174,3 +174,15 @@ const char *arch_vma_name(struct vm_area_struct *vma) ...@@ -174,3 +174,15 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return "[mpx]"; return "[mpx]";
return NULL; return NULL;
} }
int valid_phys_addr_range(phys_addr_t addr, size_t count)
{
return addr + count <= __pa(high_memory);
}
int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
{
phys_addr_t addr = (phys_addr_t)pfn << PAGE_SHIFT;
return valid_phys_addr_range(addr, count);
}
...@@ -30,7 +30,6 @@ ...@@ -30,7 +30,6 @@
atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
DEFINE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
u16 *new_asid, bool *need_flush) u16 *new_asid, bool *need_flush)
...@@ -147,7 +146,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, ...@@ -147,7 +146,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
this_cpu_write(cpu_tlbstate.is_lazy, false); this_cpu_write(cpu_tlbstate.is_lazy, false);
if (real_prev == next) { if (real_prev == next) {
VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
next->context.ctx_id); next->context.ctx_id);
/* /*
...@@ -213,6 +212,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, ...@@ -213,6 +212,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
} }
/* /*
* Please ignore the name of this function. It should be called
* switch_to_kernel_thread().
*
* enter_lazy_tlb() is a hint from the scheduler that we are entering a * enter_lazy_tlb() is a hint from the scheduler that we are entering a
* kernel thread or other context without an mm. Acceptable implementations * kernel thread or other context without an mm. Acceptable implementations
* include doing nothing whatsoever, switching to init_mm, or various clever * include doing nothing whatsoever, switching to init_mm, or various clever
...@@ -227,7 +229,7 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) ...@@ -227,7 +229,7 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm) if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
return; return;
if (static_branch_unlikely(&tlb_use_lazy_mode)) { if (tlb_defer_switch_to_init_mm()) {
/* /*
* There's a significant optimization that may be possible * There's a significant optimization that may be possible
* here. We have accurate enough TLB flush tracking that we * here. We have accurate enough TLB flush tracking that we
...@@ -626,57 +628,3 @@ static int __init create_tlb_single_page_flush_ceiling(void) ...@@ -626,57 +628,3 @@ static int __init create_tlb_single_page_flush_ceiling(void)
return 0; return 0;
} }
late_initcall(create_tlb_single_page_flush_ceiling); late_initcall(create_tlb_single_page_flush_ceiling);
static ssize_t tlblazy_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
char buf[2];
buf[0] = static_branch_likely(&tlb_use_lazy_mode) ? '1' : '0';
buf[1] = '\n';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}
static ssize_t tlblazy_write_file(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
bool val;
if (kstrtobool_from_user(user_buf, count, &val))
return -EINVAL;
if (val)
static_branch_enable(&tlb_use_lazy_mode);
else
static_branch_disable(&tlb_use_lazy_mode);
return count;
}
static const struct file_operations fops_tlblazy = {
.read = tlblazy_read_file,
.write = tlblazy_write_file,
.llseek = default_llseek,
};
static int __init init_tlb_use_lazy_mode(void)
{
if (boot_cpu_has(X86_FEATURE_PCID)) {
/*
* Heuristic: with PCID on, switching to and from
* init_mm is reasonably fast, but remote flush IPIs
* as expensive as ever, so turn off lazy TLB mode.
*
* We can't do this in setup_pcid() because static keys
* haven't been initialized yet, and it would blow up
* badly.
*/
static_branch_disable(&tlb_use_lazy_mode);
}
debugfs_create_file("tlb_use_lazy_mode", S_IRUSR | S_IWUSR,
arch_debugfs_dir, NULL, &fops_tlblazy);
return 0;
}
late_initcall(init_tlb_use_lazy_mode);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment