Commit 1d5cfcdf authored by Paul Mundt's avatar Paul Mundt

sh: Kill off some superfluous legacy PMB special casing.

The __va()/__pa() offsets and the boot memory offsets are consistent for
all PMB users, so there is no need to special case these for legacy PMB.
Kill the special casing off and depend on CONFIG_PMB across the board.
This also fixes up yet another addressing bug for sh64.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent efd54ea3
...@@ -127,12 +127,7 @@ typedef struct page *pgtable_t; ...@@ -127,12 +127,7 @@ typedef struct page *pgtable_t;
* is not visible (it is part of the PMB mapping) and so needs to be * is not visible (it is part of the PMB mapping) and so needs to be
* added or subtracted as required. * added or subtracted as required.
*/ */
#if defined(CONFIG_PMB_LEGACY) #ifdef CONFIG_PMB
/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */
#define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START))
#define __pa(x) ((unsigned long)(x) - PMB_OFFSET)
#define __va(x) ((void *)((unsigned long)(x) + PMB_OFFSET))
#elif defined(CONFIG_32BIT)
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START) #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START)) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START))
#else #else
......
...@@ -14,11 +14,10 @@ OUTPUT_ARCH(sh) ...@@ -14,11 +14,10 @@ OUTPUT_ARCH(sh)
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/vmlinux.lds.h> #include <asm/vmlinux.lds.h>
#if defined(CONFIG_29BIT) || defined(CONFIG_SUPERH64) || \ #ifdef CONFIG_PMB
defined(CONFIG_PMB_LEGACY)
#define MEMORY_OFFSET __MEMORY_START
#else
#define MEMORY_OFFSET 0 #define MEMORY_OFFSET 0
#else
#define MEMORY_OFFSET __MEMORY_START
#endif #endif
ENTRY(_start) ENTRY(_start)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment