Commit d01447b3 authored by Paul Mundt's avatar Paul Mundt

sh: Merge legacy and dynamic PMB modes.

This implements a bit of rework for the PMB code, which permits us to
kill off the legacy PMB mode completely. Rather than trusting the boot
loader to do the right thing, we do a quick verification of the PMB
contents to determine whether to have the kernel setup the initial
mappings or whether it needs to mangle them later on instead.

If we're booting from legacy mappings, the kernel will now take control
of them and make them match the kernel's initial mapping configuration.
This is accomplished by breaking the initialization phase out in to
multiple steps: synchronization, merging, and resizing. With the recent
rework, the synchronization code establishes page links for compound
mappings already, so we build on top of this for promoting mappings and
reclaiming unused slots.

At the same time, the changes introduced for the uncached helpers also
permit us to dynamically resize the uncached mapping without any
particular headaches. The smallest page size is more than sufficient for
mapping all of kernel text, and as we're careful not to jump to any far
off locations in the setup code the mapping can safely be resized
regardless of whether we are executing from it or not.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 2e450643
......@@ -117,7 +117,7 @@ void decompress_kernel(void)
output_addr = (CONFIG_MEMORY_START + 0x2000);
#else
output_addr = __pa((unsigned long)&_text+PAGE_SIZE);
#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_LEGACY)
#if defined(CONFIG_29BIT)
output_addr |= P2SEG;
#endif
#endif
......
......@@ -58,7 +58,7 @@ typedef struct {
long pmb_remap(unsigned long virt, unsigned long phys,
unsigned long size, pgprot_t prot);
void pmb_unmap(unsigned long addr);
int pmb_init(void);
void pmb_init(void);
bool __in_29bit_mode(void);
#else
static inline long pmb_remap(unsigned long virt, unsigned long phys,
......@@ -67,14 +67,8 @@ static inline long pmb_remap(unsigned long virt, unsigned long phys,
return -EINVAL;
}
static inline void pmb_unmap(unsigned long addr)
{
}
static inline int pmb_init(void)
{
return -ENODEV;
}
#define pmb_unmap(addr) do { } while (0)
#define pmb_init(addr) do { } while (0)
#ifdef CONFIG_29BIT
#define __in_29bit_mode() (1)
......
......@@ -45,21 +45,12 @@
#endif
#ifndef __ASSEMBLY__
#include <asm/uncached.h>
extern unsigned long shm_align_mask;
extern unsigned long max_low_pfn, min_low_pfn;
extern unsigned long memory_start, memory_end;
#ifdef CONFIG_UNCACHED_MAPPING
extern unsigned long uncached_start, uncached_end;
extern int virt_addr_uncached(unsigned long kaddr);
extern void uncached_init(void);
#else
#define virt_addr_uncached(kaddr) (0)
#define uncached_init() do { } while (0)
#endif
static inline unsigned long
pages_do_alias(unsigned long addr1, unsigned long addr2)
{
......
#ifndef __ASM_SH_UNCACHED_H
#define __ASM_SH_UNCACHED_H
#include <linux/bug.h>
#ifdef CONFIG_UNCACHED_MAPPING
extern unsigned long uncached_start, uncached_end;
extern int virt_addr_uncached(unsigned long kaddr);
extern void uncached_init(void);
extern void uncached_resize(unsigned long size);
#else
#define virt_addr_uncached(kaddr) (0)
#define uncached_init() do { } while (0)
#define uncached_resize(size) BUG()
#endif
#endif /* __ASM_SH_UNCACHED_H */
......@@ -85,7 +85,7 @@ ENTRY(_stext)
ldc r0, r7_bank ! ... and initial thread_info
#endif
#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY)
#ifdef CONFIG_PMB
/*
* Reconfigure the initial PMB mappings setup by the hardware.
*
......@@ -139,7 +139,6 @@ ENTRY(_stext)
mov.l r0, @r1
mov.l .LMEMORY_SIZE, r5
mov r5, r7
mov #PMB_E_SHIFT, r0
mov #0x1, r4
......@@ -150,6 +149,40 @@ ENTRY(_stext)
mov.l .LFIRST_ADDR_ENTRY, r2
mov.l .LPMB_ADDR, r3
/*
* First we need to walk the PMB and figure out if there are any
* existing mappings that match the initial mappings VPN/PPN.
* If these have already been established by the bootloader, we
* don't bother setting up new entries here, and let the late PMB
* initialization take care of things instead.
*
* Note that we may need to coalesce and merge entries in order
* to reclaim more available PMB slots, which is much more than
* we want to do at this early stage.
*/
mov #0, r10
mov #NR_PMB_ENTRIES, r9
mov r1, r7 /* temporary PMB_DATA iter */
.Lvalidate_existing_mappings:
mov.l @r7, r8
and r0, r8
cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */
bt .Lpmb_done
add #1, r10 /* Increment the loop counter */
cmp/eq r9, r10
bf/s .Lvalidate_existing_mappings
add r4, r7 /* Increment to the next PMB_DATA entry */
/*
* If we've fallen through, continue with setting up the initial
* mappings.
*/
mov r5, r7 /* cached_to_uncached */
mov #0, r10
#ifdef CONFIG_UNCACHED_MAPPING
......@@ -252,7 +285,8 @@ ENTRY(_stext)
mov.l 6f, r0
icbi @r0
#endif /* !CONFIG_PMB_LEGACY */
.Lpmb_done:
#endif /* CONFIG_PMB */
#ifndef CONFIG_SH_NO_BSS_INIT
/*
......@@ -304,7 +338,7 @@ ENTRY(stack_start)
6: .long sh_cpu_init
7: .long init_thread_union
#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY)
#ifdef CONFIG_PMB
.LPMB_ADDR: .long PMB_ADDR
.LPMB_DATA: .long PMB_DATA
.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
......
......@@ -421,6 +421,8 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
uncached_init();
plat_early_device_setup();
/* Let earlyprintk output early console messages */
......
......@@ -91,16 +91,6 @@ config PMB
32-bits through the SH-4A PMB. If this is not set, legacy
29-bit physical addressing will be used.
config PMB_LEGACY
bool "Support legacy boot mappings for PMB"
depends on PMB
select 32BIT
help
If this option is enabled, fixed PMB mappings are inherited
from the boot loader, and the kernel does not attempt dynamic
management. This is the closest to legacy 29-bit physical mode,
and allows systems to support up to 512MiB of system memory.
config X2TLB
def_bool y
depends on (CPU_SHX2 || CPU_SHX3) && MMU
......
......@@ -245,7 +245,6 @@ void __init mem_init(void)
memset(empty_zero_page, 0, PAGE_SIZE);
__flush_wback_region(empty_zero_page, PAGE_SIZE);
uncached_init();
vsyscall_init();
codesize = (unsigned long) &_etext - (unsigned long) &_text;
......
......@@ -52,7 +52,7 @@ struct pmb_entry {
struct pmb_entry *link;
};
static void pmb_unmap_entry(struct pmb_entry *);
static void pmb_unmap_entry(struct pmb_entry *, int depth);
static DEFINE_RWLOCK(pmb_rwlock);
static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
......@@ -115,13 +115,14 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
pmbe = &pmb_entry_list[pos];
memset(pmbe, 0, sizeof(struct pmb_entry));
spin_lock_init(&pmbe->lock);
pmbe->vpn = vpn;
pmbe->ppn = ppn;
pmbe->flags = flags;
pmbe->entry = pos;
pmbe->size = 0;
return pmbe;
......@@ -133,7 +134,9 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
static void pmb_free(struct pmb_entry *pmbe)
{
__clear_bit(pmbe->entry, pmb_map);
pmbe->entry = PMB_NO_ENTRY;
pmbe->link = NULL;
}
/*
......@@ -161,9 +164,6 @@ static __always_inline unsigned long pmb_cache_flags(void)
*/
static void __set_pmb_entry(struct pmb_entry *pmbe)
{
pmbe->flags &= ~PMB_CACHE_MASK;
pmbe->flags |= pmb_cache_flags();
writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
writel_uncached(pmbe->ppn | pmbe->flags | PMB_V,
mk_pmb_data(pmbe->entry));
......@@ -280,7 +280,7 @@ long pmb_remap(unsigned long vaddr, unsigned long phys,
return wanted - size;
out:
pmb_unmap_entry(pmbp);
pmb_unmap_entry(pmbp, NR_PMB_ENTRIES);
return err;
}
......@@ -302,18 +302,40 @@ void pmb_unmap(unsigned long addr)
read_unlock(&pmb_rwlock);
pmb_unmap_entry(pmbe);
pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
}
static void pmb_unmap_entry(struct pmb_entry *pmbe)
static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
{
unsigned long flags;
return (b->vpn == (a->vpn + a->size)) &&
(b->ppn == (a->ppn + a->size)) &&
(b->flags == a->flags);
}
if (unlikely(!pmbe))
return;
static bool pmb_size_valid(unsigned long size)
{
int i;
write_lock_irqsave(&pmb_rwlock, flags);
for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
if (pmb_sizes[i].size == size)
return true;
return false;
}
static int pmb_size_to_flags(unsigned long size)
{
int i;
for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
if (pmb_sizes[i].size == size)
return pmb_sizes[i].flag;
return 0;
}
static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
{
do {
struct pmb_entry *pmblink = pmbe;
......@@ -332,8 +354,18 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe)
pmbe = pmblink->link;
pmb_free(pmblink);
} while (pmbe);
} while (pmbe && --depth);
}
static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
{
unsigned long flags;
if (unlikely(!pmbe))
return;
write_lock_irqsave(&pmb_rwlock, flags);
__pmb_unmap_entry(pmbe, depth);
write_unlock_irqrestore(&pmb_rwlock, flags);
}
......@@ -342,14 +374,40 @@ static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
}
static int pmb_synchronize_mappings(void)
static void __init pmb_notify(void)
{
unsigned int applied = 0;
struct pmb_entry *pmbp = NULL;
int i, j;
int i;
pr_info("PMB: boot mappings:\n");
read_lock(&pmb_rwlock);
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
struct pmb_entry *pmbe;
if (!test_bit(i, pmb_map))
continue;
pmbe = &pmb_entry_list[i];
pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
}
read_unlock(&pmb_rwlock);
}
/*
* Sync our software copy of the PMB mappings with those in hardware. The
* mappings in the hardware PMB were either set up by the bootloader or
* very early on by the kernel.
*/
static void __init pmb_synchronize(void)
{
struct pmb_entry *pmbp = NULL;
int i, j;
/*
* Run through the initial boot mappings, log the established
* ones, and blow away anything that falls outside of the valid
......@@ -432,10 +490,10 @@ static int pmb_synchronize_mappings(void)
/*
* Compare the previous entry against the current one to
* see if the entries span a contiguous mapping. If so,
* setup the entry links accordingly.
* setup the entry links accordingly. Compound mappings
* are later coalesced.
*/
if ((pmbe->vpn == (pmbp->vpn + pmbp->size)) &&
(pmbe->ppn == (pmbp->ppn + pmbp->size)))
if (pmb_can_merge(pmbp, pmbe))
pmbp->link = pmbe;
spin_unlock(&pmbp->lock);
......@@ -444,37 +502,150 @@ static int pmb_synchronize_mappings(void)
pmbp = pmbe;
spin_unlock_irqrestore(&pmbe->lock, irqflags);
}
}
static void __init pmb_merge(struct pmb_entry *head)
{
unsigned long span, newsize;
struct pmb_entry *tail;
int i = 1, depth = 0;
span = newsize = head->size;
tail = head->link;
while (tail) {
span += tail->size;
if (pmb_size_valid(span)) {
newsize = span;
depth = i;
}
/* This is the end of the line.. */
if (!tail->link)
break;
tail = tail->link;
i++;
}
/*
* The merged page size must be valid.
*/
if (!pmb_size_valid(newsize))
return;
head->flags &= ~PMB_SZ_MASK;
head->flags |= pmb_size_to_flags(newsize);
pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n",
vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20,
(data_val & PMB_C) ? "" : "un");
head->size = newsize;
__pmb_unmap_entry(head->link, depth);
__set_pmb_entry(head);
}
static void __init pmb_coalesce(void)
{
unsigned long flags;
int i;
applied++;
write_lock_irqsave(&pmb_rwlock, flags);
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
struct pmb_entry *pmbe;
if (!test_bit(i, pmb_map))
continue;
pmbe = &pmb_entry_list[i];
/*
* We're only interested in compound mappings
*/
if (!pmbe->link)
continue;
/*
* Nothing to do if it already uses the largest possible
* page size.
*/
if (pmbe->size == SZ_512M)
continue;
pmb_merge(pmbe);
}
return (applied == 0);
write_unlock_irqrestore(&pmb_rwlock, flags);
}
int pmb_init(void)
#ifdef CONFIG_UNCACHED_MAPPING
static void __init pmb_resize(void)
{
int ret;
int i;
/*
* Sync our software copy of the PMB mappings with those in
* hardware. The mappings in the hardware PMB were either set up
* by the bootloader or very early on by the kernel.
* If the uncached mapping was constructed by the kernel, it will
* already be a reasonable size.
*/
ret = pmb_synchronize_mappings();
if (unlikely(ret == 0))
return 0;
if (uncached_size == SZ_16M)
return;
read_lock(&pmb_rwlock);
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
struct pmb_entry *pmbe;
unsigned long flags;
if (!test_bit(i, pmb_map))
continue;
pmbe = &pmb_entry_list[i];
if (pmbe->vpn != uncached_start)
continue;
/*
* Found it, now resize it.
*/
spin_lock_irqsave(&pmbe->lock, flags);
pmbe->size = SZ_16M;
pmbe->flags &= ~PMB_SZ_MASK;
pmbe->flags |= pmb_size_to_flags(pmbe->size);
uncached_resize(pmbe->size);
__set_pmb_entry(pmbe);
spin_unlock_irqrestore(&pmbe->lock, flags);
}
read_lock(&pmb_rwlock);
}
#endif
void __init pmb_init(void)
{
/* Synchronize software state */
pmb_synchronize();
/* Attempt to combine compound mappings */
pmb_coalesce();
#ifdef CONFIG_UNCACHED_MAPPING
/* Resize initial mappings, if necessary */
pmb_resize();
#endif
/* Log them */
pmb_notify();
writel_uncached(0, PMB_IRMCR);
/* Flush out the TLB */
__raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
ctrl_barrier();
return 0;
}
bool __in_29bit_mode(void)
......
......@@ -26,3 +26,9 @@ void __init uncached_init(void)
uncached_start = memory_end;
uncached_end = uncached_start + uncached_size;
}
void __init uncached_resize(unsigned long size)
{
uncached_size = size;
uncached_end = uncached_start + uncached_size;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment