Commit d517be5f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
 "A bit on the largish side due to a series of fixes for a regression in
  the x86 vector management which was introduced in 4.3.  This work was
  started in December already, but it took some time to fix all corner
  cases and a couple of older bugs in that area which were detected
  while at it

  Aside of that a few platform updates for intel-mid, quark and UV and
  two fixes for in the mm code:
   - Use proper types for pgprot values to avoid truncation
   - Prevent a size truncation in the pageattr code when setting page
     attributes for large mappings"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (21 commits)
  x86/mm/pat: Avoid truncation when converting cpa->numpages to address
  x86/mm: Fix types used in pgprot cacheability flags translations
  x86/platform/quark: Print boundaries correctly
  x86/platform/UV: Remove EFI memmap quirk for UV2+
  x86/platform/intel-mid: Join string and fix SoC name
  x86/platform/intel-mid: Enable 64-bit build
  x86/irq: Plug vector cleanup race
  x86/irq: Call irq_force_move_complete with irq descriptor
  x86/irq: Remove outgoing CPU from vector cleanup mask
  x86/irq: Remove the cpumask allocation from send_cleanup_vector()
  x86/irq: Clear move_in_progress before sending cleanup IPI
  x86/irq: Remove offline cpus from vector cleanup
  x86/irq: Get rid of code duplication
  x86/irq: Copy vectormask instead of an AND operation
  x86/irq: Check vector allocation early
  x86/irq: Reorganize the search in assign_irq_vector
  x86/irq: Reorganize the return path in assign_irq_vector
  x86/irq: Do not use apic_chip_data.old_domain as temporary buffer
  x86/irq: Validate that irq descriptor is still active
  x86/irq: Fix a race in x86_vector_free_irqs()
  ...
parents dc799d01 74256377
...@@ -509,11 +509,10 @@ config X86_INTEL_CE ...@@ -509,11 +509,10 @@ config X86_INTEL_CE
config X86_INTEL_MID config X86_INTEL_MID
bool "Intel MID platform support" bool "Intel MID platform support"
depends on X86_32
depends on X86_EXTENDED_PLATFORM depends on X86_EXTENDED_PLATFORM
depends on X86_PLATFORM_DEVICES depends on X86_PLATFORM_DEVICES
depends on PCI depends on PCI
depends on PCI_GOANY depends on X86_64 || (PCI_GOANY && X86_32)
depends on X86_IO_APIC depends on X86_IO_APIC
select SFI select SFI
select I2C select I2C
......
...@@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu); ...@@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu);
#define __ARCH_HAS_DO_SOFTIRQ #define __ARCH_HAS_DO_SOFTIRQ
struct irq_desc;
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
#include <linux/cpumask.h> #include <linux/cpumask.h>
extern int check_irq_vectors_for_cpu_disable(void); extern int check_irq_vectors_for_cpu_disable(void);
extern void fixup_irqs(void); extern void fixup_irqs(void);
extern void irq_force_complete_move(int); extern void irq_force_complete_move(struct irq_desc *desc);
#endif #endif
#ifdef CONFIG_HAVE_KVM #ifdef CONFIG_HAVE_KVM
...@@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void)); ...@@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
extern void (*x86_platform_ipi_callback)(void); extern void (*x86_platform_ipi_callback)(void);
extern void native_init_IRQ(void); extern void native_init_IRQ(void);
struct irq_desc;
extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs); extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
extern __visible unsigned int do_IRQ(struct pt_regs *regs); extern __visible unsigned int do_IRQ(struct pt_regs *regs);
......
...@@ -366,20 +366,18 @@ static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot) ...@@ -366,20 +366,18 @@ static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
} }
static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot) static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
{ {
pgprotval_t val = pgprot_val(pgprot);
pgprot_t new; pgprot_t new;
unsigned long val;
val = pgprot_val(pgprot);
pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
return new; return new;
} }
static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot) static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
{ {
pgprotval_t val = pgprot_val(pgprot);
pgprot_t new; pgprot_t new;
unsigned long val;
val = pgprot_val(pgprot);
pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
((val & _PAGE_PAT_LARGE) >> ((val & _PAGE_PAT_LARGE) >>
(_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
......
...@@ -2521,6 +2521,7 @@ void __init setup_ioapic_dest(void) ...@@ -2521,6 +2521,7 @@ void __init setup_ioapic_dest(void)
{ {
int pin, ioapic, irq, irq_entry; int pin, ioapic, irq, irq_entry;
const struct cpumask *mask; const struct cpumask *mask;
struct irq_desc *desc;
struct irq_data *idata; struct irq_data *idata;
struct irq_chip *chip; struct irq_chip *chip;
...@@ -2536,7 +2537,9 @@ void __init setup_ioapic_dest(void) ...@@ -2536,7 +2537,9 @@ void __init setup_ioapic_dest(void)
if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq)) if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
continue; continue;
idata = irq_get_irq_data(irq); desc = irq_to_desc(irq);
raw_spin_lock_irq(&desc->lock);
idata = irq_desc_get_irq_data(desc);
/* /*
* Honour affinities which have been set in early boot * Honour affinities which have been set in early boot
...@@ -2550,6 +2553,7 @@ void __init setup_ioapic_dest(void) ...@@ -2550,6 +2553,7 @@ void __init setup_ioapic_dest(void)
/* Might be lapic_chip for irq 0 */ /* Might be lapic_chip for irq 0 */
if (chip->irq_set_affinity) if (chip->irq_set_affinity)
chip->irq_set_affinity(idata, mask, false); chip->irq_set_affinity(idata, mask, false);
raw_spin_unlock_irq(&desc->lock);
} }
} }
#endif #endif
......
This diff is collapsed.
...@@ -889,7 +889,10 @@ void __init uv_system_init(void) ...@@ -889,7 +889,10 @@ void __init uv_system_init(void)
return; return;
} }
pr_info("UV: Found %s hub\n", hub); pr_info("UV: Found %s hub\n", hub);
map_low_mmrs();
/* We now only need to map the MMRs on UV1 */
if (is_uv1_hub())
map_low_mmrs();
m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR ); m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
m_val = m_n_config.s.m_skt; m_val = m_n_config.s.m_skt;
......
...@@ -192,5 +192,13 @@ void __init x86_64_start_reservations(char *real_mode_data) ...@@ -192,5 +192,13 @@ void __init x86_64_start_reservations(char *real_mode_data)
reserve_ebda_region(); reserve_ebda_region();
switch (boot_params.hdr.hardware_subarch) {
case X86_SUBARCH_INTEL_MID:
x86_intel_mid_early_setup();
break;
default:
break;
}
start_kernel(); start_kernel();
} }
...@@ -462,7 +462,7 @@ void fixup_irqs(void) ...@@ -462,7 +462,7 @@ void fixup_irqs(void)
* non intr-remapping case, we can't wait till this interrupt * non intr-remapping case, we can't wait till this interrupt
* arrives at this cpu before completing the irq move. * arrives at this cpu before completing the irq move.
*/ */
irq_force_complete_move(irq); irq_force_complete_move(desc);
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
break_affinity = 1; break_affinity = 1;
...@@ -470,6 +470,15 @@ void fixup_irqs(void) ...@@ -470,6 +470,15 @@ void fixup_irqs(void)
} }
chip = irq_data_get_irq_chip(data); chip = irq_data_get_irq_chip(data);
/*
* The interrupt descriptor might have been cleaned up
* already, but it is not yet removed from the radix tree
*/
if (!chip) {
raw_spin_unlock(&desc->lock);
continue;
}
if (!irqd_can_move_in_process_context(data) && chip->irq_mask) if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
chip->irq_mask(data); chip->irq_mask(data);
......
...@@ -33,7 +33,7 @@ struct cpa_data { ...@@ -33,7 +33,7 @@ struct cpa_data {
pgd_t *pgd; pgd_t *pgd;
pgprot_t mask_set; pgprot_t mask_set;
pgprot_t mask_clr; pgprot_t mask_clr;
int numpages; unsigned long numpages;
int flags; int flags;
unsigned long pfn; unsigned long pfn;
unsigned force_split : 1; unsigned force_split : 1;
...@@ -1350,7 +1350,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) ...@@ -1350,7 +1350,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
* CPA operation. Either a large page has been * CPA operation. Either a large page has been
* preserved or a single page update happened. * preserved or a single page update happened.
*/ */
BUG_ON(cpa->numpages > numpages); BUG_ON(cpa->numpages > numpages || !cpa->numpages);
numpages -= cpa->numpages; numpages -= cpa->numpages;
if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
cpa->curpage++; cpa->curpage++;
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/dmi.h>
#include <asm/efi.h> #include <asm/efi.h>
#include <asm/uv/uv.h> #include <asm/uv/uv.h>
...@@ -248,6 +249,16 @@ int __init efi_reuse_config(u64 tables, int nr_tables) ...@@ -248,6 +249,16 @@ int __init efi_reuse_config(u64 tables, int nr_tables)
return ret; return ret;
} }
static const struct dmi_system_id sgi_uv1_dmi[] = {
{ NULL, "SGI UV1",
{ DMI_MATCH(DMI_PRODUCT_NAME, "Stoutland Platform"),
DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
DMI_MATCH(DMI_BIOS_VENDOR, "SGI.COM"),
}
},
{ } /* NULL entry stops DMI scanning */
};
void __init efi_apply_memmap_quirks(void) void __init efi_apply_memmap_quirks(void)
{ {
/* /*
...@@ -260,10 +271,8 @@ void __init efi_apply_memmap_quirks(void) ...@@ -260,10 +271,8 @@ void __init efi_apply_memmap_quirks(void)
efi_unmap_memmap(); efi_unmap_memmap();
} }
/* /* UV2+ BIOS has a fix for this issue. UV1 still needs the quirk. */
* UV doesn't support the new EFI pagetable mapping yet. if (dmi_check_system(sgi_uv1_dmi))
*/
if (is_uv_system())
set_bit(EFI_OLD_MEMMAP, &efi.flags); set_bit(EFI_OLD_MEMMAP, &efi.flags);
} }
......
...@@ -138,7 +138,7 @@ static void intel_mid_arch_setup(void) ...@@ -138,7 +138,7 @@ static void intel_mid_arch_setup(void)
intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip](); intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
else { else {
intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL](); intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
pr_info("ARCH: Unknown SoC, assuming PENWELL!\n"); pr_info("ARCH: Unknown SoC, assuming Penwell!\n");
} }
out: out:
...@@ -214,12 +214,10 @@ static inline int __init setup_x86_intel_mid_timer(char *arg) ...@@ -214,12 +214,10 @@ static inline int __init setup_x86_intel_mid_timer(char *arg)
else if (strcmp("lapic_and_apbt", arg) == 0) else if (strcmp("lapic_and_apbt", arg) == 0)
intel_mid_timer_options = INTEL_MID_TIMER_LAPIC_APBT; intel_mid_timer_options = INTEL_MID_TIMER_LAPIC_APBT;
else { else {
pr_warn("X86 INTEL_MID timer option %s not recognised" pr_warn("X86 INTEL_MID timer option %s not recognised use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n",
" use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n", arg);
arg);
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
} }
__setup("x86_intel_mid_timer=", setup_x86_intel_mid_timer); __setup("x86_intel_mid_timer=", setup_x86_intel_mid_timer);
...@@ -220,11 +220,12 @@ static int imr_dbgfs_state_show(struct seq_file *s, void *unused) ...@@ -220,11 +220,12 @@ static int imr_dbgfs_state_show(struct seq_file *s, void *unused)
if (imr_is_enabled(&imr)) { if (imr_is_enabled(&imr)) {
base = imr_to_phys(imr.addr_lo); base = imr_to_phys(imr.addr_lo);
end = imr_to_phys(imr.addr_hi) + IMR_MASK; end = imr_to_phys(imr.addr_hi) + IMR_MASK;
size = end - base + 1;
} else { } else {
base = 0; base = 0;
end = 0; end = 0;
size = 0;
} }
size = end - base;
seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx " seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx "
"rmask=0x%08x, wmask=0x%08x, %s, %s\n", i, "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i,
&base, &end, size, imr.rmask, imr.wmask, &base, &end, size, imr.rmask, imr.wmask,
...@@ -579,6 +580,7 @@ static void __init imr_fixup_memmap(struct imr_device *idev) ...@@ -579,6 +580,7 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
{ {
phys_addr_t base = virt_to_phys(&_text); phys_addr_t base = virt_to_phys(&_text);
size_t size = virt_to_phys(&__end_rodata) - base; size_t size = virt_to_phys(&__end_rodata) - base;
unsigned long start, end;
int i; int i;
int ret; int ret;
...@@ -586,18 +588,24 @@ static void __init imr_fixup_memmap(struct imr_device *idev) ...@@ -586,18 +588,24 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
for (i = 0; i < idev->max_imr; i++) for (i = 0; i < idev->max_imr; i++)
imr_clear(i); imr_clear(i);
start = (unsigned long)_text;
end = (unsigned long)__end_rodata - 1;
/* /*
* Setup a locked IMR around the physical extent of the kernel * Setup a locked IMR around the physical extent of the kernel
* from the beginning of the .text secton to the end of the * from the beginning of the .text secton to the end of the
* .rodata section as one physically contiguous block. * .rodata section as one physically contiguous block.
*
* We don't round up @size since it is already PAGE_SIZE aligned.
* See vmlinux.lds.S for details.
*/ */
ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true); ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true);
if (ret < 0) { if (ret < 0) {
pr_err("unable to setup IMR for kernel: (%p - %p)\n", pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
&_text, &__end_rodata); size / 1024, start, end);
} else { } else {
pr_info("protecting kernel .text - .rodata: %zu KiB (%p - %p)\n", pr_info("protecting kernel .text - .rodata: %zu KiB (%lx - %lx)\n",
size / 1024, &_text, &__end_rodata); size / 1024, start, end);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment