Commit 13da6ac1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-5.3-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
 "One fix for a boot hang on some Freescale machines when PREEMPT is
  enabled.

  Two CVE fixes for bugs in our handling of FP registers and
  transactional memory, both of which can result in corrupted FP state,
  or FP state leaking between processes.

  Thanks to: Chris Packham, Christophe Leroy, Gustavo Romero, Michael
  Neuling"

* tag 'powerpc-5.3-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/tm: Fix restoring FP/VMX facility incorrectly on interrupts
  powerpc/tm: Fix FP/VMX unavailable exceptions inside a transaction
  powerpc/64e: Drop stale call to smp_processor_id() which hangs SMP startup
parents d41a3eff a8318c13
...@@ -101,21 +101,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk) ...@@ -101,21 +101,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
} }
} }
static bool tm_active_with_fp(struct task_struct *tsk)
{
return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
(tsk->thread.ckpt_regs.msr & MSR_FP);
}
static bool tm_active_with_altivec(struct task_struct *tsk)
{
return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
(tsk->thread.ckpt_regs.msr & MSR_VEC);
}
#else #else
static inline void check_if_tm_restore_required(struct task_struct *tsk) { } static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
bool strict_msr_control; bool strict_msr_control;
...@@ -252,7 +239,7 @@ EXPORT_SYMBOL(enable_kernel_fp); ...@@ -252,7 +239,7 @@ EXPORT_SYMBOL(enable_kernel_fp);
static int restore_fp(struct task_struct *tsk) static int restore_fp(struct task_struct *tsk)
{ {
if (tsk->thread.load_fp || tm_active_with_fp(tsk)) { if (tsk->thread.load_fp) {
load_fp_state(&current->thread.fp_state); load_fp_state(&current->thread.fp_state);
current->thread.load_fp++; current->thread.load_fp++;
return 1; return 1;
...@@ -334,8 +321,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread); ...@@ -334,8 +321,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
static int restore_altivec(struct task_struct *tsk) static int restore_altivec(struct task_struct *tsk)
{ {
if (cpu_has_feature(CPU_FTR_ALTIVEC) && if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
(tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
load_vr_state(&tsk->thread.vr_state); load_vr_state(&tsk->thread.vr_state);
tsk->thread.used_vr = 1; tsk->thread.used_vr = 1;
tsk->thread.load_vec++; tsk->thread.load_vec++;
...@@ -497,13 +483,14 @@ void giveup_all(struct task_struct *tsk) ...@@ -497,13 +483,14 @@ void giveup_all(struct task_struct *tsk)
if (!tsk->thread.regs) if (!tsk->thread.regs)
return; return;
check_if_tm_restore_required(tsk);
usermsr = tsk->thread.regs->msr; usermsr = tsk->thread.regs->msr;
if ((usermsr & msr_all_available) == 0) if ((usermsr & msr_all_available) == 0)
return; return;
msr_check_and_set(msr_all_available); msr_check_and_set(msr_all_available);
check_if_tm_restore_required(tsk);
WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC))); WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
......
...@@ -630,7 +630,6 @@ static void early_init_this_mmu(void) ...@@ -630,7 +630,6 @@ static void early_init_this_mmu(void)
#ifdef CONFIG_PPC_FSL_BOOK3E #ifdef CONFIG_PPC_FSL_BOOK3E
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
unsigned int num_cams; unsigned int num_cams;
int __maybe_unused cpu = smp_processor_id();
bool map = true; bool map = true;
/* use a quarter of the TLBCAM for bolted linear map */ /* use a quarter of the TLBCAM for bolted linear map */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment