Commit 6dd8b1a0 authored by Catalin Marinas's avatar Catalin Marinas Committed by Will Deacon

arm64: mte: Dump the MTE tags in the core file

For each vma mapped with PROT_MTE (the VM_MTE flag set), generate a
PT_ARM_MEMTAG_MTE segment in the core file and dump the corresponding
tags. The in-file size for such segments is 128 bytes per page.

For pages in a VM_MTE vma which are not present in the user page tables
or don't have the PG_mte_tagged flag set (e.g. execute-only), just write
zeros in the core file.

An example of program headers for two vmas, one 2-page, the other 4-page
long:

  Type           Offset   VirtAddr           PhysAddr           FileSiz  MemSiz   Flg Align
  ...
  LOAD           0x030000 0x0000ffff80034000 0x0000000000000000 0x000000 0x002000 RW  0x1000
  LOAD           0x030000 0x0000ffff80036000 0x0000000000000000 0x004000 0x004000 RW  0x1000
  ...
  LOPROC+0x1     0x05b000 0x0000ffff80034000 0x0000000000000000 0x000100 0x002000     0
  LOPROC+0x1     0x05b100 0x0000ffff80036000 0x0000000000000000 0x000200 0x004000     0
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarLuis Machado <luis.machado@linaro.org>
Reviewed-by: default avatarMark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20220131165456.2160675-5-catalin.marinas@arm.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent ab1e435c
...@@ -10,6 +10,7 @@ config ARM64 ...@@ -10,6 +10,7 @@ config ARM64
select ACPI_SPCR_TABLE if ACPI select ACPI_SPCR_TABLE if ACPI
select ACPI_PPTT if ACPI select ACPI_PPTT if ACPI
select ARCH_HAS_DEBUG_WX select ARCH_HAS_DEBUG_WX
select ARCH_BINFMT_ELF_EXTRA_PHDRS
select ARCH_BINFMT_ELF_STATE select ARCH_BINFMT_ELF_STATE
select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
......
...@@ -61,6 +61,7 @@ obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o ...@@ -61,6 +61,7 @@ obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
obj-$(CONFIG_PARAVIRT) += paravirt.o obj-$(CONFIG_PARAVIRT) += paravirt.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
obj-$(CONFIG_ELF_CORE) += elfcore.o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \ obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \
cpu-reset.o cpu-reset.o
obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
......
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/coredump.h>
#include <linux/elfcore.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/cpufeature.h>
#include <asm/mte.h>
#define for_each_mte_vma(tsk, vma) \
if (system_supports_mte()) \
for (vma = tsk->mm->mmap; vma; vma = vma->vm_next) \
if (vma->vm_flags & VM_MTE)
static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_DONTDUMP)
return 0;
return vma_pages(vma) * MTE_PAGE_TAG_STORAGE;
}
/* Derived from dump_user_range(); start/end must be page-aligned */
static int mte_dump_tag_range(struct coredump_params *cprm,
unsigned long start, unsigned long end)
{
unsigned long addr;
for (addr = start; addr < end; addr += PAGE_SIZE) {
char tags[MTE_PAGE_TAG_STORAGE];
struct page *page = get_dump_page(addr);
/*
* get_dump_page() returns NULL when encountering an empty
* page table entry that would otherwise have been filled with
* the zero page. Skip the equivalent tag dump which would
* have been all zeros.
*/
if (!page) {
dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
continue;
}
/*
* Pages mapped in user space as !pte_access_permitted() (e.g.
* PROT_EXEC only) may not have the PG_mte_tagged flag set.
*/
if (!test_bit(PG_mte_tagged, &page->flags)) {
put_page(page);
dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
continue;
}
mte_save_page_tags(page_address(page), tags);
put_page(page);
if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE))
return 0;
}
return 1;
}
Elf_Half elf_core_extra_phdrs(void)
{
struct vm_area_struct *vma;
int vma_count = 0;
for_each_mte_vma(current, vma)
vma_count++;
return vma_count;
}
int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
{
struct vm_area_struct *vma;
for_each_mte_vma(current, vma) {
struct elf_phdr phdr;
phdr.p_type = PT_ARM_MEMTAG_MTE;
phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start;
phdr.p_paddr = 0;
phdr.p_filesz = mte_vma_tag_dump_size(vma);
phdr.p_memsz = vma->vm_end - vma->vm_start;
offset += phdr.p_filesz;
phdr.p_flags = 0;
phdr.p_align = 0;
if (!dump_emit(cprm, &phdr, sizeof(phdr)))
return 0;
}
return 1;
}
size_t elf_core_extra_data_size(void)
{
struct vm_area_struct *vma;
size_t data_size = 0;
for_each_mte_vma(current, vma)
data_size += mte_vma_tag_dump_size(vma);
return data_size;
}
int elf_core_write_extra_data(struct coredump_params *cprm)
{
struct vm_area_struct *vma;
for_each_mte_vma(current, vma) {
if (vma->vm_flags & VM_DONTDUMP)
continue;
if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end))
return 0;
}
return 1;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment