Commit 1bdb2d4e authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by ard

ARM: split off core mapping logic from create_mapping

In order to be able to reuse the core mapping logic of create_mapping
for mapping the UEFI Runtime Services into a private set of page tables,
split it off from create_mapping() into a separate function
__create_mapping which we will wire up in a subsequent patch.
Tested-by: default avatarRyan Harkin <ryan.harkin@linaro.org>
Reviewed-by: default avatarMatt Fleming <matt@codeblueprint.co.uk>
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
parent 2937367b
...@@ -818,7 +818,8 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, ...@@ -818,7 +818,8 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
} }
#ifndef CONFIG_ARM_LPAE #ifndef CONFIG_ARM_LPAE
static void __init create_36bit_mapping(struct map_desc *md, static void __init create_36bit_mapping(struct mm_struct *mm,
struct map_desc *md,
const struct mem_type *type) const struct mem_type *type)
{ {
unsigned long addr, length, end; unsigned long addr, length, end;
...@@ -859,7 +860,7 @@ static void __init create_36bit_mapping(struct map_desc *md, ...@@ -859,7 +860,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
*/ */
phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
pgd = pgd_offset_k(addr); pgd = pgd_offset(mm, addr);
end = addr + length; end = addr + length;
do { do {
pud_t *pud = pud_offset(pgd, addr); pud_t *pud = pud_offset(pgd, addr);
...@@ -876,33 +877,13 @@ static void __init create_36bit_mapping(struct map_desc *md, ...@@ -876,33 +877,13 @@ static void __init create_36bit_mapping(struct map_desc *md,
} }
#endif /* !CONFIG_ARM_LPAE */ #endif /* !CONFIG_ARM_LPAE */
/* static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md)
* Create the page directory entries and any necessary
* page tables for the mapping specified by `md'. We
* are able to cope here with varying sizes and address
* offsets, and we take full advantage of sections and
* supersections.
*/
static void __init create_mapping(struct map_desc *md)
{ {
unsigned long addr, length, end; unsigned long addr, length, end;
phys_addr_t phys; phys_addr_t phys;
const struct mem_type *type; const struct mem_type *type;
pgd_t *pgd; pgd_t *pgd;
if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
}
type = &mem_types[md->type]; type = &mem_types[md->type];
#ifndef CONFIG_ARM_LPAE #ifndef CONFIG_ARM_LPAE
...@@ -910,7 +891,7 @@ static void __init create_mapping(struct map_desc *md) ...@@ -910,7 +891,7 @@ static void __init create_mapping(struct map_desc *md)
* Catch 36-bit addresses * Catch 36-bit addresses
*/ */
if (md->pfn >= 0x100000) { if (md->pfn >= 0x100000) {
create_36bit_mapping(md, type); create_36bit_mapping(mm, md, type);
return; return;
} }
#endif #endif
...@@ -925,7 +906,7 @@ static void __init create_mapping(struct map_desc *md) ...@@ -925,7 +906,7 @@ static void __init create_mapping(struct map_desc *md)
return; return;
} }
pgd = pgd_offset_k(addr); pgd = pgd_offset(mm, addr);
end = addr + length; end = addr + length;
do { do {
unsigned long next = pgd_addr_end(addr, end); unsigned long next = pgd_addr_end(addr, end);
...@@ -937,6 +918,31 @@ static void __init create_mapping(struct map_desc *md) ...@@ -937,6 +918,31 @@ static void __init create_mapping(struct map_desc *md)
} while (pgd++, addr != end); } while (pgd++, addr != end);
} }
/*
* Create the page directory entries and any necessary
* page tables for the mapping specified by `md'. We
* are able to cope here with varying sizes and address
* offsets, and we take full advantage of sections and
* supersections.
*/
static void __init create_mapping(struct map_desc *md)
{
if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
}
__create_mapping(&init_mm, md);
}
/* /*
* Create the architecture specific mappings * Create the architecture specific mappings
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment