Commit d45b832d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "Here's a sizeable batch of Friday the 13th arm64 fixes for -rc4. What
  could possibly go wrong?

  The obvious reason we have so much here is because of the holiday
  season right after the merge window, but we've also brought back an
  erratum workaround that was previously dropped at the last minute and
  there's an MTE coredumping fix that strays outside of the arch/arm64
  directory.

  Summary:

   - Fix PAGE_TABLE_CHECK failures on hugepage splitting path

   - Fix PSCI encoding of MEM_PROTECT_RANGE function in UAPI header

   - Fix NULL deref when accessing debugfs node if PSCI is not present

   - Fix MTE core dumping when VMA list is being updated concurrently

   - Fix SME signal frame handling when SVE is not implemented by the
     CPU

   - Fix asm constraints for cmpxchg_double() to hazard both words

   - Fix build failure with stack tracer and older versions of Clang

   - Bring back workaround for Cortex-A715 erratum 2645198"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: Fix build with CC=clang, CONFIG_FTRACE=y and CONFIG_STACK_TRACER=y
  arm64/mm: Define dummy pud_user_exec() when using 2-level page-table
  arm64: errata: Workaround possible Cortex-A715 [ESR|FAR]_ELx corruption
  firmware/psci: Don't register with debugfs if PSCI isn't available
  firmware/psci: Fix MEM_PROTECT_RANGE function numbers
  arm64/signal: Always allocate SVE signal frames on SME only systems
  arm64/signal: Always accept SVE signal frames on SME only systems
  arm64/sme: Fix context switch for SME only systems
  arm64: cmpxchg_double*: hazard against entire exchange variable
  arm64/uprobes: change the uprobe_opcode_t typedef to fix the sparse warning
  arm64: mte: Avoid the racy walk of the vma list during core dump
  elfcore: Add a cprm parameter to elf_core_extra_{phdrs,data_size}
  arm64: mte: Fix double-freeing of the temporary tag storage during coredump
  arm64: ptrace: Use ARM64_SME to guard the SME register enumerations
  arm64/mm: add pud_user_exec() check in pud_user_accessible_page()
  arm64/mm: fix incorrect file_map_count for invalid pmd
parents d9fc1511 68a63a41
...@@ -120,6 +120,8 @@ stable kernels. ...@@ -120,6 +120,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 | | ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A715 | #2645198 | ARM64_ERRATUM_2645198 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 | | ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 | | ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 |
......
...@@ -184,8 +184,6 @@ config ARM64 ...@@ -184,8 +184,6 @@ config ARM64
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_ARGS \
if $(cc-option,-fpatchable-function-entry=2)
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \ select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
if DYNAMIC_FTRACE_WITH_ARGS if DYNAMIC_FTRACE_WITH_ARGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_EFFICIENT_UNALIGNED_ACCESS
...@@ -972,6 +970,22 @@ config ARM64_ERRATUM_2457168 ...@@ -972,6 +970,22 @@ config ARM64_ERRATUM_2457168
If unsure, say Y. If unsure, say Y.
config ARM64_ERRATUM_2645198
bool "Cortex-A715: 2645198: Workaround possible [ESR|FAR]_ELx corruption"
default y
help
This option adds the workaround for ARM Cortex-A715 erratum 2645198.
If a Cortex-A715 cpu sees a page mapping permissions change from executable
to non-executable, it may corrupt the ESR_ELx and FAR_ELx registers on the
next instruction abort caused by permission fault.
Only user-space does executable to non-executable permission transition via
mprotect() system call. Workaround the problem by doing a break-before-make
TLB invalidation, for all changes to executable user space mappings.
If unsure, say Y.
config CAVIUM_ERRATUM_22375 config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313" bool "Cavium erratum 22375, 24313"
default y default y
......
...@@ -315,7 +315,7 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \ ...@@ -315,7 +315,7 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \
" cbnz %w0, 1b\n" \ " cbnz %w0, 1b\n" \
" " #mb "\n" \ " " #mb "\n" \
"2:" \ "2:" \
: "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \ : "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr) \
: "r" (old1), "r" (old2), "r" (new1), "r" (new2) \ : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
: cl); \ : cl); \
\ \
......
...@@ -311,7 +311,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \ ...@@ -311,7 +311,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \
" eor %[old2], %[old2], %[oldval2]\n" \ " eor %[old2], %[old2], %[oldval2]\n" \
" orr %[old1], %[old1], %[old2]" \ " orr %[old1], %[old1], %[old2]" \
: [old1] "+&r" (x0), [old2] "+&r" (x1), \ : [old1] "+&r" (x0), [old2] "+&r" (x1), \
[v] "+Q" (*(unsigned long *)ptr) \ [v] "+Q" (*(__uint128_t *)ptr) \
: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
[oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
: cl); \ : cl); \
......
...@@ -49,6 +49,15 @@ extern pte_t huge_ptep_get(pte_t *ptep); ...@@ -49,6 +49,15 @@ extern pte_t huge_ptep_get(pte_t *ptep);
void __init arm64_hugetlb_cma_reserve(void); void __init arm64_hugetlb_cma_reserve(void);
#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep);
#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t new_pte);
#include <asm-generic/hugetlb.h> #include <asm-generic/hugetlb.h>
#endif /* __ASM_HUGETLB_H */ #endif /* __ASM_HUGETLB_H */
...@@ -681,7 +681,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) ...@@ -681,7 +681,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud)) #define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
#define pud_valid(pud) pte_valid(pud_pte(pud)) #define pud_valid(pud) pte_valid(pud_pte(pud))
#define pud_user(pud) pte_user(pud_pte(pud)) #define pud_user(pud) pte_user(pud_pte(pud))
#define pud_user_exec(pud) pte_user_exec(pud_pte(pud))
static inline void set_pud(pud_t *pudp, pud_t pud) static inline void set_pud(pud_t *pudp, pud_t pud)
{ {
...@@ -730,6 +730,7 @@ static inline pmd_t *pud_pgtable(pud_t pud) ...@@ -730,6 +730,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
#else #else
#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
#define pud_user_exec(pud) pud_user(pud) /* Always 0 with folding */
/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */ /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
#define pmd_set_fixmap(addr) NULL #define pmd_set_fixmap(addr) NULL
...@@ -862,12 +863,12 @@ static inline bool pte_user_accessible_page(pte_t pte) ...@@ -862,12 +863,12 @@ static inline bool pte_user_accessible_page(pte_t pte)
static inline bool pmd_user_accessible_page(pmd_t pmd) static inline bool pmd_user_accessible_page(pmd_t pmd)
{ {
return pmd_leaf(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd)); return pmd_leaf(pmd) && !pmd_present_invalid(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
} }
static inline bool pud_user_accessible_page(pud_t pud) static inline bool pud_user_accessible_page(pud_t pud)
{ {
return pud_leaf(pud) && pud_user(pud); return pud_leaf(pud) && (pud_user(pud) || pud_user_exec(pud));
} }
#endif #endif
...@@ -1093,6 +1094,15 @@ static inline bool pud_sect_supported(void) ...@@ -1093,6 +1094,15 @@ static inline bool pud_sect_supported(void)
} }
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
#define ptep_modify_prot_start ptep_modify_prot_start
extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep);
#define ptep_modify_prot_commit ptep_modify_prot_commit
extern void ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t new_pte);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __ASM_PGTABLE_H */ #endif /* __ASM_PGTABLE_H */
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE #define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE
#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES #define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
typedef u32 uprobe_opcode_t; typedef __le32 uprobe_opcode_t;
struct arch_uprobe_task { struct arch_uprobe_task {
}; };
......
...@@ -661,6 +661,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -661,6 +661,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus), CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
}, },
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_2645198
{
.desc = "ARM erratum 2645198",
.capability = ARM64_WORKAROUND_2645198,
ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715)
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_2077057 #ifdef CONFIG_ARM64_ERRATUM_2077057
{ {
.desc = "ARM erratum 2077057", .desc = "ARM erratum 2077057",
......
...@@ -8,28 +8,27 @@ ...@@ -8,28 +8,27 @@
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/mte.h> #include <asm/mte.h>
#define for_each_mte_vma(vmi, vma) \ #define for_each_mte_vma(cprm, i, m) \
if (system_supports_mte()) \ if (system_supports_mte()) \
for_each_vma(vmi, vma) \ for (i = 0, m = cprm->vma_meta; \
if (vma->vm_flags & VM_MTE) i < cprm->vma_count; \
i++, m = cprm->vma_meta + i) \
if (m->flags & VM_MTE)
static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma) static unsigned long mte_vma_tag_dump_size(struct core_vma_metadata *m)
{ {
if (vma->vm_flags & VM_DONTDUMP) return (m->dump_size >> PAGE_SHIFT) * MTE_PAGE_TAG_STORAGE;
return 0;
return vma_pages(vma) * MTE_PAGE_TAG_STORAGE;
} }
/* Derived from dump_user_range(); start/end must be page-aligned */ /* Derived from dump_user_range(); start/end must be page-aligned */
static int mte_dump_tag_range(struct coredump_params *cprm, static int mte_dump_tag_range(struct coredump_params *cprm,
unsigned long start, unsigned long end) unsigned long start, unsigned long len)
{ {
int ret = 1; int ret = 1;
unsigned long addr; unsigned long addr;
void *tags = NULL; void *tags = NULL;
for (addr = start; addr < end; addr += PAGE_SIZE) { for (addr = start; addr < start + len; addr += PAGE_SIZE) {
struct page *page = get_dump_page(addr); struct page *page = get_dump_page(addr);
/* /*
...@@ -65,7 +64,6 @@ static int mte_dump_tag_range(struct coredump_params *cprm, ...@@ -65,7 +64,6 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
mte_save_page_tags(page_address(page), tags); mte_save_page_tags(page_address(page), tags);
put_page(page); put_page(page);
if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) { if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) {
mte_free_tag_storage(tags);
ret = 0; ret = 0;
break; break;
} }
...@@ -77,13 +75,13 @@ static int mte_dump_tag_range(struct coredump_params *cprm, ...@@ -77,13 +75,13 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
return ret; return ret;
} }
Elf_Half elf_core_extra_phdrs(void) Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
{ {
struct vm_area_struct *vma; int i;
struct core_vma_metadata *m;
int vma_count = 0; int vma_count = 0;
VMA_ITERATOR(vmi, current->mm, 0);
for_each_mte_vma(vmi, vma) for_each_mte_vma(cprm, i, m)
vma_count++; vma_count++;
return vma_count; return vma_count;
...@@ -91,18 +89,18 @@ Elf_Half elf_core_extra_phdrs(void) ...@@ -91,18 +89,18 @@ Elf_Half elf_core_extra_phdrs(void)
int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
{ {
struct vm_area_struct *vma; int i;
VMA_ITERATOR(vmi, current->mm, 0); struct core_vma_metadata *m;
for_each_mte_vma(vmi, vma) { for_each_mte_vma(cprm, i, m) {
struct elf_phdr phdr; struct elf_phdr phdr;
phdr.p_type = PT_AARCH64_MEMTAG_MTE; phdr.p_type = PT_AARCH64_MEMTAG_MTE;
phdr.p_offset = offset; phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start; phdr.p_vaddr = m->start;
phdr.p_paddr = 0; phdr.p_paddr = 0;
phdr.p_filesz = mte_vma_tag_dump_size(vma); phdr.p_filesz = mte_vma_tag_dump_size(m);
phdr.p_memsz = vma->vm_end - vma->vm_start; phdr.p_memsz = m->end - m->start;
offset += phdr.p_filesz; offset += phdr.p_filesz;
phdr.p_flags = 0; phdr.p_flags = 0;
phdr.p_align = 0; phdr.p_align = 0;
...@@ -114,28 +112,25 @@ int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) ...@@ -114,28 +112,25 @@ int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
return 1; return 1;
} }
size_t elf_core_extra_data_size(void) size_t elf_core_extra_data_size(struct coredump_params *cprm)
{ {
struct vm_area_struct *vma; int i;
struct core_vma_metadata *m;
size_t data_size = 0; size_t data_size = 0;
VMA_ITERATOR(vmi, current->mm, 0);
for_each_mte_vma(vmi, vma) for_each_mte_vma(cprm, i, m)
data_size += mte_vma_tag_dump_size(vma); data_size += mte_vma_tag_dump_size(m);
return data_size; return data_size;
} }
int elf_core_write_extra_data(struct coredump_params *cprm) int elf_core_write_extra_data(struct coredump_params *cprm)
{ {
struct vm_area_struct *vma; int i;
VMA_ITERATOR(vmi, current->mm, 0); struct core_vma_metadata *m;
for_each_mte_vma(vmi, vma) {
if (vma->vm_flags & VM_DONTDUMP)
continue;
if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end)) for_each_mte_vma(cprm, i, m) {
if (!mte_dump_tag_range(cprm, m->start, m->dump_size))
return 0; return 0;
} }
......
...@@ -385,7 +385,7 @@ static void task_fpsimd_load(void) ...@@ -385,7 +385,7 @@ static void task_fpsimd_load(void)
WARN_ON(!system_supports_fpsimd()); WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context()); WARN_ON(!have_cpu_fpsimd_context());
if (system_supports_sve()) { if (system_supports_sve() || system_supports_sme()) {
switch (current->thread.fp_type) { switch (current->thread.fp_type) {
case FP_STATE_FPSIMD: case FP_STATE_FPSIMD:
/* Stop tracking SVE for this task until next use. */ /* Stop tracking SVE for this task until next use. */
......
...@@ -1357,7 +1357,7 @@ enum aarch64_regset { ...@@ -1357,7 +1357,7 @@ enum aarch64_regset {
#ifdef CONFIG_ARM64_SVE #ifdef CONFIG_ARM64_SVE
REGSET_SVE, REGSET_SVE,
#endif #endif
#ifdef CONFIG_ARM64_SVE #ifdef CONFIG_ARM64_SME
REGSET_SSVE, REGSET_SSVE,
REGSET_ZA, REGSET_ZA,
#endif #endif
......
...@@ -281,7 +281,12 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) ...@@ -281,7 +281,12 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
vl = task_get_sme_vl(current); vl = task_get_sme_vl(current);
} else { } else {
if (!system_supports_sve()) /*
* A SME only system use SVE for streaming mode so can
* have a SVE formatted context with a zero VL and no
* payload data.
*/
if (!system_supports_sve() && !system_supports_sme())
return -EINVAL; return -EINVAL;
vl = task_get_sve_vl(current); vl = task_get_sve_vl(current);
...@@ -732,7 +737,7 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, ...@@ -732,7 +737,7 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
return err; return err;
} }
if (system_supports_sve()) { if (system_supports_sve() || system_supports_sme()) {
unsigned int vq = 0; unsigned int vq = 0;
if (add_all || test_thread_flag(TIF_SVE) || if (add_all || test_thread_flag(TIF_SVE) ||
......
...@@ -559,3 +559,24 @@ bool __init arch_hugetlb_valid_size(unsigned long size) ...@@ -559,3 +559,24 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
{ {
return __hugetlb_valid_size(size); return __hugetlb_valid_size(size);
} }
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
/*
* Break-before-make (BBM) is required for all user space mappings
* when the permission changes from executable to non-executable
* in cases where cpu is affected with errata #2645198.
*/
if (pte_user_exec(READ_ONCE(*ptep)))
return huge_ptep_clear_flush(vma, addr, ptep);
}
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
}
void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t pte)
{
set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
}
...@@ -1630,3 +1630,24 @@ static int __init prevent_bootmem_remove_init(void) ...@@ -1630,3 +1630,24 @@ static int __init prevent_bootmem_remove_init(void)
} }
early_initcall(prevent_bootmem_remove_init); early_initcall(prevent_bootmem_remove_init);
#endif #endif
pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
/*
* Break-before-make (BBM) is required for all user space mappings
* when the permission changes from executable to non-executable
* in cases where cpu is affected with errata #2645198.
*/
if (pte_user_exec(READ_ONCE(*ptep)))
return ptep_clear_flush(vma, addr, ptep);
}
return ptep_get_and_clear(vma->vm_mm, addr, ptep);
}
void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t pte)
{
set_pte_at(vma->vm_mm, addr, ptep, pte);
}
...@@ -71,6 +71,7 @@ WORKAROUND_2038923 ...@@ -71,6 +71,7 @@ WORKAROUND_2038923
WORKAROUND_2064142 WORKAROUND_2064142
WORKAROUND_2077057 WORKAROUND_2077057
WORKAROUND_2457168 WORKAROUND_2457168
WORKAROUND_2645198
WORKAROUND_2658417 WORKAROUND_2658417
WORKAROUND_TRBE_OVERWRITE_FILL_MODE WORKAROUND_TRBE_OVERWRITE_FILL_MODE
WORKAROUND_TSB_FLUSH_FAILURE WORKAROUND_TSB_FLUSH_FAILURE
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <asm/elf.h> #include <asm/elf.h>
Elf64_Half elf_core_extra_phdrs(void) Elf64_Half elf_core_extra_phdrs(struct coredump_params *cprm)
{ {
return GATE_EHDR->e_phnum; return GATE_EHDR->e_phnum;
} }
...@@ -60,7 +60,7 @@ int elf_core_write_extra_data(struct coredump_params *cprm) ...@@ -60,7 +60,7 @@ int elf_core_write_extra_data(struct coredump_params *cprm)
return 1; return 1;
} }
size_t elf_core_extra_data_size(void) size_t elf_core_extra_data_size(struct coredump_params *cprm)
{ {
const struct elf_phdr *const gate_phdrs = const struct elf_phdr *const gate_phdrs =
(const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <asm/elf.h> #include <asm/elf.h>
Elf32_Half elf_core_extra_phdrs(void) Elf32_Half elf_core_extra_phdrs(struct coredump_params *cprm)
{ {
return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0; return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0;
} }
...@@ -60,7 +60,7 @@ int elf_core_write_extra_data(struct coredump_params *cprm) ...@@ -60,7 +60,7 @@ int elf_core_write_extra_data(struct coredump_params *cprm)
return 1; return 1;
} }
size_t elf_core_extra_data_size(void) size_t elf_core_extra_data_size(struct coredump_params *cprm)
{ {
if ( vsyscall_ehdr ) { if ( vsyscall_ehdr ) {
const struct elfhdr *const ehdrp = const struct elfhdr *const ehdrp =
......
...@@ -440,6 +440,9 @@ static const struct file_operations psci_debugfs_ops = { ...@@ -440,6 +440,9 @@ static const struct file_operations psci_debugfs_ops = {
static int __init psci_debugfs_init(void) static int __init psci_debugfs_init(void)
{ {
if (!invoke_psci_fn || !psci_ops.get_version)
return 0;
return PTR_ERR_OR_ZERO(debugfs_create_file("psci", 0444, NULL, NULL, return PTR_ERR_OR_ZERO(debugfs_create_file("psci", 0444, NULL, NULL,
&psci_debugfs_ops)); &psci_debugfs_ops));
} }
......
...@@ -2034,7 +2034,7 @@ static int elf_core_dump(struct coredump_params *cprm) ...@@ -2034,7 +2034,7 @@ static int elf_core_dump(struct coredump_params *cprm)
* The number of segs are recored into ELF header as 16bit value. * The number of segs are recored into ELF header as 16bit value.
* Please check DEFAULT_MAX_MAP_COUNT definition when you modify here. * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
*/ */
segs = cprm->vma_count + elf_core_extra_phdrs(); segs = cprm->vma_count + elf_core_extra_phdrs(cprm);
/* for notes section */ /* for notes section */
segs++; segs++;
...@@ -2074,7 +2074,7 @@ static int elf_core_dump(struct coredump_params *cprm) ...@@ -2074,7 +2074,7 @@ static int elf_core_dump(struct coredump_params *cprm)
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
offset += cprm->vma_data_size; offset += cprm->vma_data_size;
offset += elf_core_extra_data_size(); offset += elf_core_extra_data_size(cprm);
e_shoff = offset; e_shoff = offset;
if (e_phnum == PN_XNUM) { if (e_phnum == PN_XNUM) {
......
...@@ -1509,7 +1509,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) ...@@ -1509,7 +1509,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
tmp->next = thread_list; tmp->next = thread_list;
thread_list = tmp; thread_list = tmp;
segs = cprm->vma_count + elf_core_extra_phdrs(); segs = cprm->vma_count + elf_core_extra_phdrs(cprm);
/* for notes section */ /* for notes section */
segs++; segs++;
...@@ -1555,7 +1555,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) ...@@ -1555,7 +1555,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
offset += cprm->vma_data_size; offset += cprm->vma_data_size;
offset += elf_core_extra_data_size(); offset += elf_core_extra_data_size(cprm);
e_shoff = offset; e_shoff = offset;
if (e_phnum == PN_XNUM) { if (e_phnum == PN_XNUM) {
......
...@@ -105,14 +105,14 @@ int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu); ...@@ -105,14 +105,14 @@ int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
* Dumping its extra ELF program headers includes all the other information * Dumping its extra ELF program headers includes all the other information
* a debugger needs to easily find how the gate DSO was being used. * a debugger needs to easily find how the gate DSO was being used.
*/ */
extern Elf_Half elf_core_extra_phdrs(void); extern Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm);
extern int extern int
elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset); elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
extern int extern int
elf_core_write_extra_data(struct coredump_params *cprm); elf_core_write_extra_data(struct coredump_params *cprm);
extern size_t elf_core_extra_data_size(void); extern size_t elf_core_extra_data_size(struct coredump_params *cprm);
#else #else
static inline Elf_Half elf_core_extra_phdrs(void) static inline Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
{ {
return 0; return 0;
} }
...@@ -127,7 +127,7 @@ static inline int elf_core_write_extra_data(struct coredump_params *cprm) ...@@ -127,7 +127,7 @@ static inline int elf_core_write_extra_data(struct coredump_params *cprm)
return 1; return 1;
} }
static inline size_t elf_core_extra_data_size(void) static inline size_t elf_core_extra_data_size(struct coredump_params *cprm)
{ {
return 0; return 0;
} }
......
...@@ -58,7 +58,7 @@ ...@@ -58,7 +58,7 @@
#define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18) #define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18)
#define PSCI_1_1_FN_MEM_PROTECT PSCI_0_2_FN(19) #define PSCI_1_1_FN_MEM_PROTECT PSCI_0_2_FN(19)
#define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN(19) #define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN(20)
#define PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND PSCI_0_2_FN64(12) #define PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND PSCI_0_2_FN64(12)
#define PSCI_1_0_FN64_NODE_HW_STATE PSCI_0_2_FN64(13) #define PSCI_1_0_FN64_NODE_HW_STATE PSCI_0_2_FN64(13)
...@@ -67,7 +67,7 @@ ...@@ -67,7 +67,7 @@
#define PSCI_1_0_FN64_STAT_COUNT PSCI_0_2_FN64(17) #define PSCI_1_0_FN64_STAT_COUNT PSCI_0_2_FN64(17)
#define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18) #define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18)
#define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN64(19) #define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN64(20)
/* PSCI v0.2 power state encoding for CPU_SUSPEND function */ /* PSCI v0.2 power state encoding for CPU_SUSPEND function */
#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff #define PSCI_0_2_POWER_STATE_ID_MASK 0xffff
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment