Commit 20edcec2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-6.5-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

 - Fix Speculation_Store_Bypass reporting in /proc/self/status on
   Power10

 - Fix HPT with 4K pages since recent changes by implementing pmd_same()

 - Fix 64-bit native_hpte_remove() to be irq-safe

Thanks to Aneesh Kumar K.V, Nageswara R Sastry, and Russell Currey.

* tag 'powerpc-6.5-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/mm/book3s64/hash/4k: Add pmd_same callback for 4K page size
  powerpc/64e: Fix obtool warnings in exceptions-64e.S
  powerpc/security: Fix Speculation_Store_Bypass reporting on Power10
  powerpc/64s: Fix native_hpte_remove() to be irq-safe
parents 6eede068 cf53564b
......@@ -136,12 +136,6 @@ static inline int hash__pmd_trans_huge(pmd_t pmd)
return 0;
}
static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
BUG();
return 0;
}
static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
{
BUG();
......
......@@ -263,11 +263,6 @@ static inline int hash__pmd_trans_huge(pmd_t pmd)
(_PAGE_PTE | H_PAGE_THP_HUGE));
}
static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
}
static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
{
return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE));
......
......@@ -132,6 +132,11 @@ static inline int get_region_id(unsigned long ea)
return region_id;
}
static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
}
#define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS)
#define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS)
static inline int hash__p4d_bad(p4d_t p4d)
......
......@@ -5,6 +5,7 @@
* Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
*/
#include <linux/linkage.h>
#include <linux/threads.h>
#include <asm/reg.h>
#include <asm/page.h>
......@@ -66,7 +67,7 @@
#define SPECIAL_EXC_LOAD(reg, name) \
ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
special_reg_save:
SYM_CODE_START_LOCAL(special_reg_save)
/*
* We only need (or have stack space) to save this stuff if
* we interrupted the kernel.
......@@ -131,8 +132,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
SPECIAL_EXC_STORE(r10,CSRR1)
blr
SYM_CODE_END(special_reg_save)
ret_from_level_except:
SYM_CODE_START_LOCAL(ret_from_level_except)
ld r3,_MSR(r1)
andi. r3,r3,MSR_PR
beq 1f
......@@ -206,6 +208,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
mtxer r11
blr
SYM_CODE_END(ret_from_level_except)
.macro ret_from_level srr0 srr1 paca_ex scratch
bl ret_from_level_except
......@@ -232,13 +235,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
mfspr r13,\scratch
.endm
ret_from_crit_except:
SYM_CODE_START_LOCAL(ret_from_crit_except)
ret_from_level SPRN_CSRR0 SPRN_CSRR1 PACA_EXCRIT SPRN_SPRG_CRIT_SCRATCH
rfci
SYM_CODE_END(ret_from_crit_except)
ret_from_mc_except:
SYM_CODE_START_LOCAL(ret_from_mc_except)
ret_from_level SPRN_MCSRR0 SPRN_MCSRR1 PACA_EXMC SPRN_SPRG_MC_SCRATCH
rfmci
SYM_CODE_END(ret_from_mc_except)
/* Exception prolog code for all exceptions */
#define EXCEPTION_PROLOG(n, intnum, type, addition) \
......@@ -978,20 +983,22 @@ masked_interrupt_book3e_0x2c0:
* r14 and r15 containing the fault address and error code, with the
* original values stashed away in the PACA
*/
storage_fault_common:
SYM_CODE_START_LOCAL(storage_fault_common)
addi r3,r1,STACK_INT_FRAME_REGS
bl do_page_fault
b interrupt_return
SYM_CODE_END(storage_fault_common)
/*
* Alignment exception doesn't fit entirely in the 0x100 bytes so it
* continues here.
*/
alignment_more:
SYM_CODE_START_LOCAL(alignment_more)
addi r3,r1,STACK_INT_FRAME_REGS
bl alignment_exception
REST_NVGPRS(r1)
b interrupt_return
SYM_CODE_END(alignment_more)
/*
* Trampolines used when spotting a bad kernel stack pointer in
......@@ -1030,8 +1037,7 @@ BAD_STACK_TRAMPOLINE(0xe00)
BAD_STACK_TRAMPOLINE(0xf00)
BAD_STACK_TRAMPOLINE(0xf20)
.globl bad_stack_book3e
bad_stack_book3e:
_GLOBAL(bad_stack_book3e)
/* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */
mfspr r10,SPRN_SRR0; /* read SRR0 before touching stack */
ld r1,PACAEMERGSP(r13)
......@@ -1285,8 +1291,7 @@ have_hes:
* ever takes any parameters, the SCOM code must also be updated to
* provide them.
*/
.globl a2_tlbinit_code_start
a2_tlbinit_code_start:
_GLOBAL(a2_tlbinit_code_start)
ori r11,r3,MAS0_WQ_ALLWAYS
oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */
......@@ -1479,8 +1484,7 @@ _GLOBAL(book3e_secondary_thread_init)
mflr r28
b 3b
.globl init_core_book3e
init_core_book3e:
_GLOBAL(init_core_book3e)
/* Establish the interrupt vector base */
tovirt(r2,r2)
LOAD_REG_ADDR(r3, interrupt_base_book3e)
......@@ -1488,7 +1492,7 @@ init_core_book3e:
sync
blr
init_thread_book3e:
SYM_CODE_START_LOCAL(init_thread_book3e)
lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h
mtspr SPRN_EPCR,r3
......@@ -1502,6 +1506,7 @@ init_thread_book3e:
mtspr SPRN_TSR,r3
blr
SYM_CODE_END(init_thread_book3e)
_GLOBAL(__setup_base_ivors)
SET_IVOR(0, 0x020) /* Critical Input */
......
......@@ -364,26 +364,27 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
static int ssb_prctl_get(struct task_struct *task)
{
/*
* The STF_BARRIER feature is on by default, so if it's off that means
* firmware has explicitly said the CPU is not vulnerable via either
* the hypercall or device tree.
*/
if (!security_ftr_enabled(SEC_FTR_STF_BARRIER))
return PR_SPEC_NOT_AFFECTED;
/*
* If the system's CPU has no known barrier (see setup_stf_barrier())
* then assume that the CPU is not vulnerable.
*/
if (stf_enabled_flush_types == STF_BARRIER_NONE)
/*
* We don't have an explicit signal from firmware that we're
* vulnerable or not, we only have certain CPU revisions that
* are known to be vulnerable.
*
* We assume that if we're on another CPU, where the barrier is
* NONE, then we are not vulnerable.
*/
return PR_SPEC_NOT_AFFECTED;
else
/*
* If we do have a barrier type then we are vulnerable. The
* barrier is not a global or per-process mitigation, so the
* only value we can report here is PR_SPEC_ENABLE, which
* appears as "vulnerable" in /proc.
*/
return PR_SPEC_ENABLE;
return -EINVAL;
/*
* Otherwise the CPU is vulnerable. The barrier is not a global or
* per-process mitigation, so the only value that can be reported here
* is PR_SPEC_ENABLE, which appears as "vulnerable" in /proc.
*/
return PR_SPEC_ENABLE;
}
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
......
......@@ -328,10 +328,12 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
static long native_hpte_remove(unsigned long hpte_group)
{
unsigned long hpte_v, flags;
struct hash_pte *hptep;
int i;
int slot_offset;
unsigned long hpte_v;
local_irq_save(flags);
DBG_LOW(" remove(group=%lx)\n", hpte_group);
......@@ -356,13 +358,16 @@ static long native_hpte_remove(unsigned long hpte_group)
slot_offset &= 0x7;
}
if (i == HPTES_PER_GROUP)
return -1;
if (i == HPTES_PER_GROUP) {
i = -1;
goto out;
}
/* Invalidate the hpte. NOTE: this also unlocks it */
release_hpte_lock();
hptep->v = 0;
out:
local_irq_restore(flags);
return i;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment