Commit 32f741b0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-5.10-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
 "Some more powerpc fixes for 5.10:

   - Three commits fixing possible missed TLB invalidations for
     multi-threaded processes when CPUs are hotplugged in and out.

   - A fix for a host crash triggerable by host userspace (qemu) in KVM
     on Power9.

   - A fix for a host crash in machine check handling when running HPT
     guests on a HPT host.

   - One commit fixing potential missed TLB invalidations when using the
     hash MMU on Power9 or later.

   - A regression fix for machines with CPUs on node 0 but no memory.

  Thanks to Aneesh Kumar K.V, Cédric Le Goater, Greg Kurz, Milan
  Mohanty, Milton Miller, Nicholas Piggin, Paul Mackerras, and Srikar
  Dronamraju"

* tag 'powerpc-5.10-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/64s/powernv: Fix memory corruption when saving SLB entries on MCE
  KVM: PPC: Book3S HV: XIVE: Fix vCPU id sanity check
  powerpc/numa: Fix a regression on memoryless node 0
  powerpc/64s: Trim offlined CPUs from mm_cpumasks
  kernel/cpu: add arch override for clear_tasks_mm_cpumask() mm handling
  powerpc/64s/pseries: Fix hash tlbiel_all_isa300 for guest kernels
  powerpc/64s: Fix hash ISA v3.0 TLBIEL instruction generation
parents d4e90419 a1ee2811
...@@ -242,6 +242,18 @@ extern void radix_init_pseries(void); ...@@ -242,6 +242,18 @@ extern void radix_init_pseries(void);
static inline void radix_init_pseries(void) { }; static inline void radix_init_pseries(void) { };
#endif #endif
#ifdef CONFIG_HOTPLUG_CPU
#define arch_clear_mm_cpumask_cpu(cpu, mm) \
do { \
if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { \
atomic_dec(&(mm)->context.active_cpus); \
cpumask_clear_cpu(cpu, mm_cpumask(mm)); \
} \
} while (0)
void cleanup_cpu_mmu_context(void);
#endif
static inline int get_user_context(mm_context_t *ctx, unsigned long ea) static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
{ {
int index = ea >> MAX_EA_BITS_PER_CONTEXT; int index = ea >> MAX_EA_BITS_PER_CONTEXT;
......
...@@ -1214,12 +1214,9 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) ...@@ -1214,12 +1214,9 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu) static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
{ {
/* We have a block of xive->nr_servers VPs. We just need to check /* We have a block of xive->nr_servers VPs. We just need to check
* raw vCPU ids are below the expected limit for this guest's * packed vCPU ids are below that.
* core stride ; kvmppc_pack_vcpu_id() will pack them down to an
* index that can be safely used to compute a VP id that belongs
* to the VP block.
*/ */
return cpu < xive->nr_servers * xive->kvm->arch.emul_smt_mode; return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers;
} }
int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp) int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
......
...@@ -68,7 +68,7 @@ static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned in ...@@ -68,7 +68,7 @@ static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned in
rs = ((unsigned long)pid << PPC_BITLSHIFT(31)); rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
: : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r) : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "i"(r)
: "memory"); : "memory");
} }
...@@ -92,16 +92,15 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) ...@@ -92,16 +92,15 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
asm volatile("ptesync": : :"memory"); asm volatile("ptesync": : :"memory");
/* /*
* Flush the first set of the TLB, and any caching of partition table * Flush the partition table cache if this is HV mode.
* entries. Then flush the remaining sets of the TLB. Hash mode uses
* partition scoped TLB translations.
*/ */
tlbiel_hash_set_isa300(0, is, 0, 2, 0); if (early_cpu_has_feature(CPU_FTR_HVMODE))
for (set = 1; set < num_sets; set++) tlbiel_hash_set_isa300(0, is, 0, 2, 0);
tlbiel_hash_set_isa300(set, is, 0, 0, 0);
/* /*
* Now invalidate the process table cache. * Now invalidate the process table cache. UPRT=0 HPT modes (what
* current hardware implements) do not use the process table, but
* add the flushes anyway.
* *
* From ISA v3.0B p. 1078: * From ISA v3.0B p. 1078:
* The following forms are invalid. * The following forms are invalid.
...@@ -110,6 +109,14 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) ...@@ -110,6 +109,14 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
*/ */
tlbiel_hash_set_isa300(0, is, 0, 2, 1); tlbiel_hash_set_isa300(0, is, 0, 2, 1);
/*
* Then flush the sets of the TLB proper. Hash mode uses
* partition scoped TLB translations, which may be flushed
* in !HV mode.
*/
for (set = 0; set < num_sets; set++)
tlbiel_hash_set_isa300(set, is, 0, 0, 0);
ppc_after_tlbiel_barrier(); ppc_after_tlbiel_barrier();
asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory"); asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/cpu.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -307,3 +308,22 @@ void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) ...@@ -307,3 +308,22 @@ void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
isync(); isync();
} }
#endif #endif
/**
* cleanup_cpu_mmu_context - Clean up MMU details for this CPU (newly offlined)
*
* This clears the CPU from mm_cpumask for all processes, and then flushes the
* local TLB to ensure TLB coherency in case the CPU is onlined again.
*
* KVM guest translations are not necessarily flushed here. If KVM started
* using mm_cpumask or the Linux APIs which do, this would have to be resolved.
*/
#ifdef CONFIG_HOTPLUG_CPU
void cleanup_cpu_mmu_context(void)
{
int cpu = smp_processor_id();
clear_tasks_mm_cpumask(cpu);
tlbiel_all();
}
#endif
...@@ -742,8 +742,7 @@ static int __init parse_numa_properties(void) ...@@ -742,8 +742,7 @@ static int __init parse_numa_properties(void)
of_node_put(cpu); of_node_put(cpu);
} }
if (likely(nid > 0)) node_set_online(nid);
node_set_online(nid);
} }
get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
......
...@@ -911,6 +911,8 @@ static int smp_core99_cpu_disable(void) ...@@ -911,6 +911,8 @@ static int smp_core99_cpu_disable(void)
mpic_cpu_set_priority(0xf); mpic_cpu_set_priority(0xf);
cleanup_cpu_mmu_context();
return 0; return 0;
} }
......
...@@ -211,11 +211,16 @@ static void __init pnv_init(void) ...@@ -211,11 +211,16 @@ static void __init pnv_init(void)
add_preferred_console("hvc", 0, NULL); add_preferred_console("hvc", 0, NULL);
if (!radix_enabled()) { if (!radix_enabled()) {
size_t size = sizeof(struct slb_entry) * mmu_slb_size;
int i; int i;
/* Allocate per cpu area to save old slb contents during MCE */ /* Allocate per cpu area to save old slb contents during MCE */
for_each_possible_cpu(i) for_each_possible_cpu(i) {
paca_ptrs[i]->mce_faulty_slbs = memblock_alloc_node(mmu_slb_size, __alignof__(*paca_ptrs[i]->mce_faulty_slbs), cpu_to_node(i)); paca_ptrs[i]->mce_faulty_slbs =
memblock_alloc_node(size,
__alignof__(struct slb_entry),
cpu_to_node(i));
}
} }
} }
......
...@@ -143,6 +143,9 @@ static int pnv_smp_cpu_disable(void) ...@@ -143,6 +143,9 @@ static int pnv_smp_cpu_disable(void)
xive_smp_disable_cpu(); xive_smp_disable_cpu();
else else
xics_migrate_irqs_away(); xics_migrate_irqs_away();
cleanup_cpu_mmu_context();
return 0; return 0;
} }
......
...@@ -90,6 +90,9 @@ static int pseries_cpu_disable(void) ...@@ -90,6 +90,9 @@ static int pseries_cpu_disable(void)
xive_smp_disable_cpu(); xive_smp_disable_cpu();
else else
xics_migrate_irqs_away(); xics_migrate_irqs_away();
cleanup_cpu_mmu_context();
return 0; return 0;
} }
......
...@@ -815,6 +815,10 @@ void __init cpuhp_threads_init(void) ...@@ -815,6 +815,10 @@ void __init cpuhp_threads_init(void)
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
#ifndef arch_clear_mm_cpumask_cpu
#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
#endif
/** /**
* clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
* @cpu: a CPU id * @cpu: a CPU id
...@@ -850,7 +854,7 @@ void clear_tasks_mm_cpumask(int cpu) ...@@ -850,7 +854,7 @@ void clear_tasks_mm_cpumask(int cpu)
t = find_lock_task_mm(p); t = find_lock_task_mm(p);
if (!t) if (!t)
continue; continue;
cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); arch_clear_mm_cpumask_cpu(cpu, t->mm);
task_unlock(t); task_unlock(t);
} }
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment