Commit 7e2d732b authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Stefan Bader

UBUNTU: SAUCE: s390/mm: fix local TLB flushing vs. detach of an mm address space

BugLink: http://bugs.launchpad.net/bugs/1708399

The local TLB flushing code keeps an additional mask in the mm.context,
the cpu_attach_mask. At the time a global flush of an address space is
done the cpu_attach_mask is copied to the mm_cpumask in order to avoid
future global flushes in case the mm is used by a single CPU only after
the flush.

Trouble is that the reset of the mm_cpumask is racy against the detach
of an mm address space by switch_mm. The current order is first the
global TLB flush and then the copy of the cpu_attach_mask to the
mm_cpumask. The order needs to be the other way around.

Cc: <stable@vger.kernel.org>
Reviewed-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
(backported from b3e5dc45 linux-next)
[merged with "s390/mm,kvm: flush gmap address space with IDTE"]
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
Acked-by: default avatarColin King <colin.king@canonical.com>
Acked-by: default avatarBrad Figg <brad.figg@canonical.com>
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
parent 64a6cde7
......@@ -47,47 +47,31 @@ static inline void __tlb_flush_global(void)
}
/*
* Flush TLB entries for a specific mm on all CPUs (in case gmap is used
* this implicates multiple ASCEs!).
* Flush TLB entries for a specific ASCE on all CPUs.
*/
static inline void __tlb_flush_full(struct mm_struct *mm)
static inline void __tlb_flush_mm(struct mm_struct * mm)
{
/*
* If the machine has IDTE we prefer to do a per mm flush
* on all cpus instead of doing a local flush if the mm
* only ran on the local cpu.
*/
preempt_disable();
atomic_add(0x10000, &mm->context.attach_count);
if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
/* Local TLB flush */
__tlb_flush_local();
/* Reset TLB flush mask */
if (MACHINE_HAS_TLB_LC)
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
barrier();
if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) {
__tlb_flush_idte(mm->context.asce);
} else {
/* Global TLB flush */
__tlb_flush_global();
/* Reset TLB flush mask */
if (MACHINE_HAS_TLB_LC)
cpumask_copy(mm_cpumask(mm),
&mm->context.cpu_attach_mask);
}
atomic_sub(0x10000, &mm->context.attach_count);
preempt_enable();
}
/*
* Flush TLB entries for a specific ASCE on all CPUs. Should never be used
* when more than one asce (e.g. gmap) ran on this mm.
*/
static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
{
preempt_disable();
atomic_add(0x10000, &mm->context.attach_count);
if (MACHINE_HAS_IDTE)
__tlb_flush_idte(asce);
else
__tlb_flush_global();
/* Reset TLB flush mask */
if (MACHINE_HAS_TLB_LC)
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
atomic_sub(0x10000, &mm->context.attach_count);
preempt_enable();
}
static inline void __tlb_flush_kernel(void)
{
if (MACHINE_HAS_IDTE)
......@@ -97,7 +81,6 @@ static inline void __tlb_flush_kernel(void)
}
#else
#define __tlb_flush_global() __tlb_flush_local()
#define __tlb_flush_full(mm) __tlb_flush_local()
/*
* Flush TLB entries for a specific ASCE on all CPUs.
......@@ -111,21 +94,14 @@ static inline void __tlb_flush_kernel(void)
{
__tlb_flush_local();
}
#endif
static inline void __tlb_flush_mm(struct mm_struct * mm)
{
/*
* If the machine has IDTE we prefer to do a per mm flush
* on all cpus instead of doing a local flush if the mm
* only ran on the local cpu.
*/
if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
__tlb_flush_asce(mm, mm->context.asce);
else
__tlb_flush_full(mm);
__tlb_flush_local();
}
#endif
static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
{
if (mm->context.flush_mm) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment