Commit 5d868627 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:

 - Build fix when !CONFIG_UID16 (the patch is touching generic files but
   it only affects arm64 builds; submitted by Arnd Bergmann)

 - EFI fixes to deal with early_memremap() returning NULL and correctly
   mapping run-time regions

 - Fix CPUID register extraction of unsigned fields (not to be
   sign-extended)

 - ASID allocator fix to deal with long-running tasks over multiple
   generation roll-overs

 - Revert support for marking page ranges as contiguous PTEs (it leads
   to TLB conflicts and requires additional non-trivial kernel changes)

 - Proper early_alloc() failure check

 - Disable KASan for 48-bit VA and 16KB page configuration (the pgd is
   larger than the KASan shadow memory)

 - Update the fault_info table (original descriptions based on early
   engineering spec)

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: efi: fix initcall return values
  arm64: efi: deal with NULL return value of early_memremap()
  arm64: debug: Treat the BRPs/WRPs as unsigned
  arm64: cpufeature: Track unsigned fields
  arm64: cpufeature: Add helpers for extracting unsigned values
  Revert "arm64: Mark kernel page ranges contiguous"
  arm64: mm: keep reserved ASIDs in sync with mm after multiple rollovers
  arm64: KASAN depends on !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
  arm64: efi: correctly map runtime regions
  arm64: mm: fix fault_info table xFSC decoding
  arm64: fix building without CONFIG_UID16
  arm64: early_alloc: Fix check for allocation failure
parents 5a44ed0d 66362c9a
...@@ -49,7 +49,7 @@ config ARM64 ...@@ -49,7 +49,7 @@ config ARM64
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
......
...@@ -47,8 +47,12 @@ enum ftr_type { ...@@ -47,8 +47,12 @@ enum ftr_type {
#define FTR_STRICT true /* SANITY check strict matching required */ #define FTR_STRICT true /* SANITY check strict matching required */
#define FTR_NONSTRICT false /* SANITY check ignored */ #define FTR_NONSTRICT false /* SANITY check ignored */
#define FTR_SIGNED true /* Value should be treated as signed */
#define FTR_UNSIGNED false /* Value should be treated as unsigned */
struct arm64_ftr_bits { struct arm64_ftr_bits {
bool strict; /* CPU Sanity check: strict matching required ? */ bool sign; /* Value is signed ? */
bool strict; /* CPU Sanity check: strict matching required ? */
enum ftr_type type; enum ftr_type type;
u8 shift; u8 shift;
u8 width; u8 width;
...@@ -124,6 +128,18 @@ cpuid_feature_extract_field(u64 features, int field) ...@@ -124,6 +128,18 @@ cpuid_feature_extract_field(u64 features, int field)
return cpuid_feature_extract_field_width(features, field, 4); return cpuid_feature_extract_field_width(features, field, 4);
} }
static inline unsigned int __attribute_const__
cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
{
return (u64)(features << (64 - width - field)) >> (64 - width);
}
static inline unsigned int __attribute_const__
cpuid_feature_extract_unsigned_field(u64 features, int field)
{
return cpuid_feature_extract_unsigned_field_width(features, field, 4);
}
static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp) static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp)
{ {
return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
...@@ -131,7 +147,9 @@ static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp) ...@@ -131,7 +147,9 @@ static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp)
static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val) static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val)
{ {
return cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width); return ftrp->sign ?
cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width) :
cpuid_feature_extract_unsigned_field_width(val, ftrp->shift, ftrp->width);
} }
static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
......
...@@ -138,16 +138,18 @@ extern struct pmu perf_ops_bp; ...@@ -138,16 +138,18 @@ extern struct pmu perf_ops_bp;
/* Determine number of BRP registers available. */ /* Determine number of BRP registers available. */
static inline int get_num_brps(void) static inline int get_num_brps(void)
{ {
u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1);
return 1 + return 1 +
cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_BRPS_SHIFT); ID_AA64DFR0_BRPS_SHIFT);
} }
/* Determine number of WRP registers available. */ /* Determine number of WRP registers available. */
static inline int get_num_wrps(void) static inline int get_num_wrps(void)
{ {
u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1);
return 1 + return 1 +
cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_WRPS_SHIFT); ID_AA64DFR0_WRPS_SHIFT);
} }
......
...@@ -44,8 +44,9 @@ unsigned int compat_elf_hwcap2 __read_mostly; ...@@ -44,8 +44,9 @@ unsigned int compat_elf_hwcap2 __read_mostly;
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ #define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
{ \ { \
.sign = SIGNED, \
.strict = STRICT, \ .strict = STRICT, \
.type = TYPE, \ .type = TYPE, \
.shift = SHIFT, \ .shift = SHIFT, \
...@@ -53,6 +54,14 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); ...@@ -53,6 +54,14 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
.safe_val = SAFE_VAL, \ .safe_val = SAFE_VAL, \
} }
/* Define a feature with signed values */
#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
__ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
/* Define a feature with unsigned value */
#define U_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
__ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
#define ARM64_FTR_END \ #define ARM64_FTR_END \
{ \ { \
.width = 0, \ .width = 0, \
...@@ -99,7 +108,7 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { ...@@ -99,7 +108,7 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
* Differing PARange is fine as long as all peripherals and memory are mapped * Differing PARange is fine as long as all peripherals and memory are mapped
* within the minimum PARange of all CPUs * within the minimum PARange of all CPUs
*/ */
ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
}; };
...@@ -115,18 +124,18 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { ...@@ -115,18 +124,18 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
}; };
static struct arm64_ftr_bits ftr_ctr[] = { static struct arm64_ftr_bits ftr_ctr[] = {
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
/* /*
* Linux can handle differing I-cache policies. Userspace JITs will * Linux can handle differing I-cache policies. Userspace JITs will
* make use of *minLine * make use of *minLine
*/ */
ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */ U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */
ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
ARM64_FTR_END, ARM64_FTR_END,
}; };
...@@ -144,12 +153,12 @@ static struct arm64_ftr_bits ftr_id_mmfr0[] = { ...@@ -144,12 +153,12 @@ static struct arm64_ftr_bits ftr_id_mmfr0[] = {
static struct arm64_ftr_bits ftr_id_aa64dfr0[] = { static struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
ARM64_FTR_END, ARM64_FTR_END,
}; };
......
...@@ -127,7 +127,11 @@ static int __init uefi_init(void) ...@@ -127,7 +127,11 @@ static int __init uefi_init(void)
table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables; table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
config_tables = early_memremap(efi_to_phys(efi.systab->tables), config_tables = early_memremap(efi_to_phys(efi.systab->tables),
table_size); table_size);
if (config_tables == NULL) {
pr_warn("Unable to map EFI config table array.\n");
retval = -ENOMEM;
goto out;
}
retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables, retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
sizeof(efi_config_table_64_t), NULL); sizeof(efi_config_table_64_t), NULL);
...@@ -209,6 +213,14 @@ void __init efi_init(void) ...@@ -209,6 +213,14 @@ void __init efi_init(void)
PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK))); PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK)));
memmap.phys_map = params.mmap; memmap.phys_map = params.mmap;
memmap.map = early_memremap(params.mmap, params.mmap_size); memmap.map = early_memremap(params.mmap, params.mmap_size);
if (memmap.map == NULL) {
/*
* If we are booting via UEFI, the UEFI memory map is the only
* description of memory we have, so there is little point in
* proceeding if we cannot access it.
*/
panic("Unable to map EFI memory map.\n");
}
memmap.map_end = memmap.map + params.mmap_size; memmap.map_end = memmap.map + params.mmap_size;
memmap.desc_size = params.desc_size; memmap.desc_size = params.desc_size;
memmap.desc_version = params.desc_ver; memmap.desc_version = params.desc_ver;
...@@ -227,7 +239,6 @@ static bool __init efi_virtmap_init(void) ...@@ -227,7 +239,6 @@ static bool __init efi_virtmap_init(void)
init_new_context(NULL, &efi_mm); init_new_context(NULL, &efi_mm);
for_each_efi_memory_desc(&memmap, md) { for_each_efi_memory_desc(&memmap, md) {
u64 paddr, npages, size;
pgprot_t prot; pgprot_t prot;
if (!(md->attribute & EFI_MEMORY_RUNTIME)) if (!(md->attribute & EFI_MEMORY_RUNTIME))
...@@ -235,11 +246,6 @@ static bool __init efi_virtmap_init(void) ...@@ -235,11 +246,6 @@ static bool __init efi_virtmap_init(void)
if (md->virt_addr == 0) if (md->virt_addr == 0)
return false; return false;
paddr = md->phys_addr;
npages = md->num_pages;
memrange_efi_to_native(&paddr, &npages);
size = npages << PAGE_SHIFT;
pr_info(" EFI remap 0x%016llx => %p\n", pr_info(" EFI remap 0x%016llx => %p\n",
md->phys_addr, (void *)md->virt_addr); md->phys_addr, (void *)md->virt_addr);
...@@ -256,7 +262,8 @@ static bool __init efi_virtmap_init(void) ...@@ -256,7 +262,8 @@ static bool __init efi_virtmap_init(void)
else else
prot = PAGE_KERNEL; prot = PAGE_KERNEL;
create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, create_pgd_mapping(&efi_mm, md->phys_addr, md->virt_addr,
md->num_pages << EFI_PAGE_SHIFT,
__pgprot(pgprot_val(prot) | PTE_NG)); __pgprot(pgprot_val(prot) | PTE_NG));
} }
return true; return true;
...@@ -273,12 +280,12 @@ static int __init arm64_enable_runtime_services(void) ...@@ -273,12 +280,12 @@ static int __init arm64_enable_runtime_services(void)
if (!efi_enabled(EFI_BOOT)) { if (!efi_enabled(EFI_BOOT)) {
pr_info("EFI services will not be available.\n"); pr_info("EFI services will not be available.\n");
return -1; return 0;
} }
if (efi_runtime_disabled()) { if (efi_runtime_disabled()) {
pr_info("EFI runtime services will be disabled.\n"); pr_info("EFI runtime services will be disabled.\n");
return -1; return 0;
} }
pr_info("Remapping and enabling EFI services.\n"); pr_info("Remapping and enabling EFI services.\n");
...@@ -288,7 +295,7 @@ static int __init arm64_enable_runtime_services(void) ...@@ -288,7 +295,7 @@ static int __init arm64_enable_runtime_services(void)
mapsize); mapsize);
if (!memmap.map) { if (!memmap.map) {
pr_err("Failed to remap EFI memory map\n"); pr_err("Failed to remap EFI memory map\n");
return -1; return -ENOMEM;
} }
memmap.map_end = memmap.map + mapsize; memmap.map_end = memmap.map + mapsize;
efi.memmap = &memmap; efi.memmap = &memmap;
...@@ -297,13 +304,13 @@ static int __init arm64_enable_runtime_services(void) ...@@ -297,13 +304,13 @@ static int __init arm64_enable_runtime_services(void)
sizeof(efi_system_table_t)); sizeof(efi_system_table_t));
if (!efi.systab) { if (!efi.systab) {
pr_err("Failed to remap EFI System Table\n"); pr_err("Failed to remap EFI System Table\n");
return -1; return -ENOMEM;
} }
set_bit(EFI_SYSTEM_TABLES, &efi.flags); set_bit(EFI_SYSTEM_TABLES, &efi.flags);
if (!efi_virtmap_init()) { if (!efi_virtmap_init()) {
pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
return -1; return -ENOMEM;
} }
/* Set up runtime services function pointers */ /* Set up runtime services function pointers */
......
...@@ -76,13 +76,28 @@ static void flush_context(unsigned int cpu) ...@@ -76,13 +76,28 @@ static void flush_context(unsigned int cpu)
__flush_icache_all(); __flush_icache_all();
} }
static int is_reserved_asid(u64 asid) static bool check_update_reserved_asid(u64 asid, u64 newasid)
{ {
int cpu; int cpu;
for_each_possible_cpu(cpu) bool hit = false;
if (per_cpu(reserved_asids, cpu) == asid)
return 1; /*
return 0; * Iterate over the set of reserved ASIDs looking for a match.
* If we find one, then we can update our mm to use newasid
* (i.e. the same ASID in the current generation) but we can't
* exit the loop early, since we need to ensure that all copies
* of the old ASID are updated to reflect the mm. Failure to do
* so could result in us missing the reserved ASID in a future
* generation.
*/
for_each_possible_cpu(cpu) {
if (per_cpu(reserved_asids, cpu) == asid) {
hit = true;
per_cpu(reserved_asids, cpu) = newasid;
}
}
return hit;
} }
static u64 new_context(struct mm_struct *mm, unsigned int cpu) static u64 new_context(struct mm_struct *mm, unsigned int cpu)
...@@ -92,12 +107,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) ...@@ -92,12 +107,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
u64 generation = atomic64_read(&asid_generation); u64 generation = atomic64_read(&asid_generation);
if (asid != 0) { if (asid != 0) {
u64 newasid = generation | (asid & ~ASID_MASK);
/* /*
* If our current ASID was active during a rollover, we * If our current ASID was active during a rollover, we
* can continue to use it and this was just a false alarm. * can continue to use it and this was just a false alarm.
*/ */
if (is_reserved_asid(asid)) if (check_update_reserved_asid(asid, newasid))
return generation | (asid & ~ASID_MASK); return newasid;
/* /*
* We had a valid ASID in a previous life, so try to re-use * We had a valid ASID in a previous life, so try to re-use
...@@ -105,7 +122,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) ...@@ -105,7 +122,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
*/ */
asid &= ~ASID_MASK; asid &= ~ASID_MASK;
if (!__test_and_set_bit(asid, asid_map)) if (!__test_and_set_bit(asid, asid_map))
goto bump_gen; return newasid;
} }
/* /*
...@@ -129,10 +146,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) ...@@ -129,10 +146,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
set_asid: set_asid:
__set_bit(asid, asid_map); __set_bit(asid, asid_map);
cur_idx = asid; cur_idx = asid;
return asid | generation;
bump_gen:
asid |= generation;
return asid;
} }
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
......
...@@ -393,16 +393,16 @@ static struct fault_info { ...@@ -393,16 +393,16 @@ static struct fault_info {
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
{ do_bad, SIGBUS, 0, "reserved access flag fault" }, { do_bad, SIGBUS, 0, "unknown 8" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
{ do_bad, SIGBUS, 0, "reserved permission fault" }, { do_bad, SIGBUS, 0, "unknown 12" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
{ do_bad, SIGBUS, 0, "synchronous external abort" }, { do_bad, SIGBUS, 0, "synchronous external abort" },
{ do_bad, SIGBUS, 0, "asynchronous external abort" }, { do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" }, { do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" }, { do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
...@@ -410,16 +410,16 @@ static struct fault_info { ...@@ -410,16 +410,16 @@ static struct fault_info {
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous parity error" }, { do_bad, SIGBUS, 0, "synchronous parity error" },
{ do_bad, SIGBUS, 0, "asynchronous parity error" }, { do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" }, { do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" }, { do_bad, SIGBUS, 0, "unknown 27" },
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
{ do_bad, SIGBUS, 0, "unknown 32" }, { do_bad, SIGBUS, 0, "unknown 32" },
{ do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" },
{ do_bad, SIGBUS, 0, "debug event" }, { do_bad, SIGBUS, 0, "unknown 34" },
{ do_bad, SIGBUS, 0, "unknown 35" }, { do_bad, SIGBUS, 0, "unknown 35" },
{ do_bad, SIGBUS, 0, "unknown 36" }, { do_bad, SIGBUS, 0, "unknown 36" },
{ do_bad, SIGBUS, 0, "unknown 37" }, { do_bad, SIGBUS, 0, "unknown 37" },
...@@ -433,21 +433,21 @@ static struct fault_info { ...@@ -433,21 +433,21 @@ static struct fault_info {
{ do_bad, SIGBUS, 0, "unknown 45" }, { do_bad, SIGBUS, 0, "unknown 45" },
{ do_bad, SIGBUS, 0, "unknown 46" }, { do_bad, SIGBUS, 0, "unknown 46" },
{ do_bad, SIGBUS, 0, "unknown 47" }, { do_bad, SIGBUS, 0, "unknown 47" },
{ do_bad, SIGBUS, 0, "unknown 48" }, { do_bad, SIGBUS, 0, "TLB conflict abort" },
{ do_bad, SIGBUS, 0, "unknown 49" }, { do_bad, SIGBUS, 0, "unknown 49" },
{ do_bad, SIGBUS, 0, "unknown 50" }, { do_bad, SIGBUS, 0, "unknown 50" },
{ do_bad, SIGBUS, 0, "unknown 51" }, { do_bad, SIGBUS, 0, "unknown 51" },
{ do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
{ do_bad, SIGBUS, 0, "unknown 53" }, { do_bad, SIGBUS, 0, "implementation fault (unsupported exclusive)" },
{ do_bad, SIGBUS, 0, "unknown 54" }, { do_bad, SIGBUS, 0, "unknown 54" },
{ do_bad, SIGBUS, 0, "unknown 55" }, { do_bad, SIGBUS, 0, "unknown 55" },
{ do_bad, SIGBUS, 0, "unknown 56" }, { do_bad, SIGBUS, 0, "unknown 56" },
{ do_bad, SIGBUS, 0, "unknown 57" }, { do_bad, SIGBUS, 0, "unknown 57" },
{ do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" }, { do_bad, SIGBUS, 0, "unknown 58" },
{ do_bad, SIGBUS, 0, "unknown 59" }, { do_bad, SIGBUS, 0, "unknown 59" },
{ do_bad, SIGBUS, 0, "unknown 60" }, { do_bad, SIGBUS, 0, "unknown 60" },
{ do_bad, SIGBUS, 0, "unknown 61" }, { do_bad, SIGBUS, 0, "section domain fault" },
{ do_bad, SIGBUS, 0, "unknown 62" }, { do_bad, SIGBUS, 0, "page domain fault" },
{ do_bad, SIGBUS, 0, "unknown 63" }, { do_bad, SIGBUS, 0, "unknown 63" },
}; };
......
...@@ -64,8 +64,12 @@ EXPORT_SYMBOL(phys_mem_access_prot); ...@@ -64,8 +64,12 @@ EXPORT_SYMBOL(phys_mem_access_prot);
static void __init *early_alloc(unsigned long sz) static void __init *early_alloc(unsigned long sz)
{ {
void *ptr = __va(memblock_alloc(sz, sz)); phys_addr_t phys;
BUG_ON(!ptr); void *ptr;
phys = memblock_alloc(sz, sz);
BUG_ON(!phys);
ptr = __va(phys);
memset(ptr, 0, sz); memset(ptr, 0, sz);
return ptr; return ptr;
} }
...@@ -81,55 +85,19 @@ static void split_pmd(pmd_t *pmd, pte_t *pte) ...@@ -81,55 +85,19 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
do { do {
/* /*
* Need to have the least restrictive permissions available * Need to have the least restrictive permissions available
* permissions will be fixed up later. Default the new page * permissions will be fixed up later
* range as contiguous ptes.
*/ */
set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT)); set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
pfn++; pfn++;
} while (pte++, i++, i < PTRS_PER_PTE); } while (pte++, i++, i < PTRS_PER_PTE);
} }
/*
* Given a PTE with the CONT bit set, determine where the CONT range
* starts, and clear the entire range of PTE CONT bits.
*/
static void clear_cont_pte_range(pte_t *pte, unsigned long addr)
{
int i;
pte -= CONT_RANGE_OFFSET(addr);
for (i = 0; i < CONT_PTES; i++) {
set_pte(pte, pte_mknoncont(*pte));
pte++;
}
flush_tlb_all();
}
/*
* Given a range of PTEs set the pfn and provided page protection flags
*/
static void __populate_init_pte(pte_t *pte, unsigned long addr,
unsigned long end, phys_addr_t phys,
pgprot_t prot)
{
unsigned long pfn = __phys_to_pfn(phys);
do {
/* clear all the bits except the pfn, then apply the prot */
set_pte(pte, pfn_pte(pfn, prot));
pte++;
pfn++;
addr += PAGE_SIZE;
} while (addr != end);
}
static void alloc_init_pte(pmd_t *pmd, unsigned long addr, static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, phys_addr_t phys, unsigned long end, unsigned long pfn,
pgprot_t prot, pgprot_t prot,
void *(*alloc)(unsigned long size)) void *(*alloc)(unsigned long size))
{ {
pte_t *pte; pte_t *pte;
unsigned long next;
if (pmd_none(*pmd) || pmd_sect(*pmd)) { if (pmd_none(*pmd) || pmd_sect(*pmd)) {
pte = alloc(PTRS_PER_PTE * sizeof(pte_t)); pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
...@@ -142,27 +110,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr, ...@@ -142,27 +110,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
pte = pte_offset_kernel(pmd, addr); pte = pte_offset_kernel(pmd, addr);
do { do {
next = min(end, (addr + CONT_SIZE) & CONT_MASK); set_pte(pte, pfn_pte(pfn, prot));
if (((addr | next | phys) & ~CONT_MASK) == 0) { pfn++;
/* a block of CONT_PTES */ } while (pte++, addr += PAGE_SIZE, addr != end);
__populate_init_pte(pte, addr, next, phys,
__pgprot(pgprot_val(prot) | PTE_CONT));
} else {
/*
* If the range being split is already inside of a
* contiguous range but this PTE isn't going to be
* contiguous, then we want to unmark the adjacent
* ranges, then update the portion of the range we
* are interrested in.
*/
clear_cont_pte_range(pte, addr);
__populate_init_pte(pte, addr, next, phys, prot);
}
pte += (next - addr) >> PAGE_SHIFT;
phys += next - addr;
addr = next;
} while (addr != end);
} }
static void split_pud(pud_t *old_pud, pmd_t *pmd) static void split_pud(pud_t *old_pud, pmd_t *pmd)
...@@ -223,7 +173,8 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, ...@@ -223,7 +173,8 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
} }
} }
} else { } else {
alloc_init_pte(pmd, addr, next, phys, prot, alloc); alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
prot, alloc);
} }
phys += next - addr; phys += next - addr;
} while (pmd++, addr = next, addr != end); } while (pmd++, addr = next, addr != end);
......
...@@ -524,7 +524,7 @@ asmlinkage long sys_chown(const char __user *filename, ...@@ -524,7 +524,7 @@ asmlinkage long sys_chown(const char __user *filename,
asmlinkage long sys_lchown(const char __user *filename, asmlinkage long sys_lchown(const char __user *filename,
uid_t user, gid_t group); uid_t user, gid_t group);
asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group); asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
#ifdef CONFIG_UID16 #ifdef CONFIG_HAVE_UID16
asmlinkage long sys_chown16(const char __user *filename, asmlinkage long sys_chown16(const char __user *filename,
old_uid_t user, old_gid_t group); old_uid_t user, old_gid_t group);
asmlinkage long sys_lchown16(const char __user *filename, asmlinkage long sys_lchown16(const char __user *filename,
......
...@@ -35,7 +35,7 @@ typedef __kernel_gid16_t gid16_t; ...@@ -35,7 +35,7 @@ typedef __kernel_gid16_t gid16_t;
typedef unsigned long uintptr_t; typedef unsigned long uintptr_t;
#ifdef CONFIG_UID16 #ifdef CONFIG_HAVE_UID16
/* This is defined by include/asm-{arch}/posix_types.h */ /* This is defined by include/asm-{arch}/posix_types.h */
typedef __kernel_old_uid_t old_uid_t; typedef __kernel_old_uid_t old_uid_t;
typedef __kernel_old_gid_t old_gid_t; typedef __kernel_old_gid_t old_gid_t;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment