Commit d3059cbc authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ppc64: cleanup lmb code

From: Anton Blanchard <anton@samba.org>

- remove LMB_MEMORY_AREA, LMB_IO_AREA, we only allocate/reserve memory
  areas now
- remove lmb_property->type, lmb_region->iosize, lmb_region->lcd_size,
  no longer used
- bump number of regions to 128, we'll hit this limit sooner or later
  with our big boxes (if we have more than 64 PCI host bridges the
  reserved array will fill up for example)
- make all the lmb stuff __init
- no need to explicitly zero struct lmb lmb now we zero the BSS early
- we had two functions to dump the lmb array, kill one of them
- move the inline functions into lmb.c, they are only ever called from
  there
parent a930a896
/*
*
* Procedures for interfacing to Open Firmware.
*
* Peter Bergner, IBM Corp. June 2001.
......@@ -13,46 +12,63 @@
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/types.h>
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/lmb.h>
#include <asm/abs_addr.h>
#include <asm/bitops.h>
#include <asm/udbg.h>
extern unsigned long klimit;
extern unsigned long reloc_offset(void);
struct lmb lmb __initdata;
static unsigned long __init
lmb_addrs_overlap(unsigned long base1, unsigned long size1,
unsigned long base2, unsigned long size2)
{
return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
}
static long lmb_add_region(struct lmb_region *, unsigned long, unsigned long, unsigned long);
static long __init
lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
unsigned long base2, unsigned long size2)
{
if (base2 == base1 + size1)
return 1;
else if (base1 == base2 + size2)
return -1;
struct lmb lmb = {
0, 0,
{0,0,0,0,{{0,0,0}}},
{0,0,0,0,{{0,0,0}}}
};
return 0;
}
static long __init
lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
{
unsigned long base1 = rgn->region[r1].base;
unsigned long size1 = rgn->region[r1].size;
unsigned long base2 = rgn->region[r2].base;
unsigned long size2 = rgn->region[r2].size;
return lmb_addrs_adjacent(base1, size1, base2, size2);
}
/* Assumption: base addr of region 1 < base addr of region 2 */
static void
static void __init
lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
{
unsigned long i;
rgn->region[r1].size += rgn->region[r2].size;
for (i=r2; i < rgn->cnt-1 ;i++) {
for (i=r2; i < rgn->cnt-1; i++) {
rgn->region[i].base = rgn->region[i+1].base;
rgn->region[i].physbase = rgn->region[i+1].physbase;
rgn->region[i].size = rgn->region[i+1].size;
rgn->region[i].type = rgn->region[i+1].type;
}
rgn->cnt--;
}
/* This routine called with relocation disabled. */
void
void __init
lmb_init(void)
{
unsigned long offset = reloc_offset();
......@@ -63,38 +79,20 @@ lmb_init(void)
*/
_lmb->memory.region[0].base = 0;
_lmb->memory.region[0].size = 0;
_lmb->memory.region[0].type = LMB_MEMORY_AREA;
_lmb->memory.cnt = 1;
/* Ditto. */
_lmb->reserved.region[0].base = 0;
_lmb->reserved.region[0].size = 0;
_lmb->reserved.region[0].type = LMB_MEMORY_AREA;
_lmb->reserved.cnt = 1;
}
/* This is only used here, it doesn't deserve to be in bitops.h */
static __inline__ long cnt_trailing_zeros(unsigned long mask)
{
long cnt;
asm(
" addi %0,%1,-1 \n\
andc %0,%0,%1 \n\
cntlzd %0,%0 \n\
subfic %0,%0,64"
: "=r" (cnt)
: "r" (mask));
return cnt;
}
/* This routine called with relocation disabled. */
void
void __init
lmb_analyze(void)
{
unsigned long i;
unsigned long mem_size = 0;
unsigned long io_size = 0;
unsigned long size_mask = 0;
unsigned long offset = reloc_offset();
struct lmb *_lmb = PTRRELOC(&lmb);
......@@ -102,13 +100,9 @@ lmb_analyze(void)
unsigned long physbase = 0;
#endif
for (i=0; i < _lmb->memory.cnt ;i++) {
unsigned long lmb_type = _lmb->memory.region[i].type;
for (i=0; i < _lmb->memory.cnt; i++) {
unsigned long lmb_size;
if ( lmb_type != LMB_MEMORY_AREA )
continue;
lmb_size = _lmb->memory.region[i].size;
#ifdef CONFIG_MSCHUNKS
......@@ -121,84 +115,20 @@ lmb_analyze(void)
size_mask |= lmb_size;
}
#ifdef CONFIG_MSCHUNKS
for (i=0; i < _lmb->memory.cnt ;i++) {
unsigned long lmb_type = _lmb->memory.region[i].type;
unsigned long lmb_size;
if ( lmb_type != LMB_IO_AREA )
continue;
lmb_size = _lmb->memory.region[i].size;
_lmb->memory.region[i].physbase = physbase;
physbase += lmb_size;
io_size += lmb_size;
size_mask |= lmb_size;
}
#endif /* CONFIG_MSCHUNKS */
_lmb->memory.size = mem_size;
_lmb->memory.iosize = io_size;
_lmb->memory.lcd_size = (1UL << cnt_trailing_zeros(size_mask));
}
/* This routine called with relocation disabled. */
long
lmb_add(unsigned long base, unsigned long size)
{
unsigned long offset = reloc_offset();
struct lmb *_lmb = PTRRELOC(&lmb);
struct lmb_region *_rgn = &(_lmb->memory);
/* On pSeries LPAR systems, the first LMB is our RMO region. */
if ( base == 0 )
_lmb->rmo_size = size;
return lmb_add_region(_rgn, base, size, LMB_MEMORY_AREA);
}
#ifdef CONFIG_MSCHUNKS
/* This routine called with relocation disabled. */
long
lmb_add_io(unsigned long base, unsigned long size)
{
unsigned long offset = reloc_offset();
struct lmb *_lmb = PTRRELOC(&lmb);
struct lmb_region *_rgn = &(_lmb->memory);
return lmb_add_region(_rgn, base, size, LMB_IO_AREA);
}
#endif /* CONFIG_MSCHUNKS */
long
lmb_reserve(unsigned long base, unsigned long size)
{
unsigned long offset = reloc_offset();
struct lmb *_lmb = PTRRELOC(&lmb);
struct lmb_region *_rgn = &(_lmb->reserved);
return lmb_add_region(_rgn, base, size, LMB_MEMORY_AREA);
}
/* This routine called with relocation disabled. */
static long
lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size,
unsigned long type)
static long __init
lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
{
unsigned long i, coalesced = 0;
long adjacent;
/* First try and coalesce this LMB with another. */
for (i=0; i < rgn->cnt ;i++) {
for (i=0; i < rgn->cnt; i++) {
unsigned long rgnbase = rgn->region[i].base;
unsigned long rgnsize = rgn->region[i].size;
unsigned long rgntype = rgn->region[i].type;
if ( rgntype != type )
continue;
adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
if ( adjacent > 0 ) {
......@@ -227,17 +157,15 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size,
}
/* Couldn't coalesce the LMB, so add it to the sorted table. */
for (i=rgn->cnt-1; i >= 0 ;i--) {
for (i=rgn->cnt-1; i >= 0; i--) {
if (base < rgn->region[i].base) {
rgn->region[i+1].base = rgn->region[i].base;
rgn->region[i+1].physbase = rgn->region[i].physbase;
rgn->region[i+1].size = rgn->region[i].size;
rgn->region[i+1].type = rgn->region[i].type;
} else {
rgn->region[i+1].base = base;
rgn->region[i+1].physbase = lmb_abs_to_phys(base);
rgn->region[i+1].size = size;
rgn->region[i+1].type = type;
break;
}
}
......@@ -246,12 +174,38 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size,
return 0;
}
long
/* This routine called with relocation disabled. */
long __init
lmb_add(unsigned long base, unsigned long size)
{
unsigned long offset = reloc_offset();
struct lmb *_lmb = PTRRELOC(&lmb);
struct lmb_region *_rgn = &(_lmb->memory);
/* On pSeries LPAR systems, the first LMB is our RMO region. */
if ( base == 0 )
_lmb->rmo_size = size;
return lmb_add_region(_rgn, base, size);
}
long __init
lmb_reserve(unsigned long base, unsigned long size)
{
unsigned long offset = reloc_offset();
struct lmb *_lmb = PTRRELOC(&lmb);
struct lmb_region *_rgn = &(_lmb->reserved);
return lmb_add_region(_rgn, base, size);
}
long __init
lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
{
unsigned long i;
for (i=0; i < rgn->cnt ;i++) {
for (i=0; i < rgn->cnt; i++) {
unsigned long rgnbase = rgn->region[i].base;
unsigned long rgnsize = rgn->region[i].size;
if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
......@@ -262,13 +216,13 @@ lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, unsigned long si
return (i < rgn->cnt) ? i : -1;
}
unsigned long
unsigned long __init
lmb_alloc(unsigned long size, unsigned long align)
{
return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
}
unsigned long
unsigned long __init
lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
{
long i, j;
......@@ -278,13 +232,9 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
struct lmb_region *_mem = &(_lmb->memory);
struct lmb_region *_rsv = &(_lmb->reserved);
for (i=_mem->cnt-1; i >= 0 ;i--) {
for (i=_mem->cnt-1; i >= 0; i--) {
unsigned long lmbbase = _mem->region[i].base;
unsigned long lmbsize = _mem->region[i].size;
unsigned long lmbtype = _mem->region[i].type;
if ( lmbtype != LMB_MEMORY_AREA )
continue;
if ( max_addr == LMB_ALLOC_ANYWHERE )
base = _ALIGN_DOWN(lmbbase+lmbsize-size, align);
......@@ -305,12 +255,12 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
if ( i < 0 )
return 0;
lmb_add_region(_rsv, base, size, LMB_MEMORY_AREA);
lmb_add_region(_rsv, base, size);
return base;
}
unsigned long
unsigned long __init
lmb_phys_mem_size(void)
{
unsigned long offset = reloc_offset();
......@@ -327,7 +277,7 @@ lmb_phys_mem_size(void)
#endif /* CONFIG_MSCHUNKS */
}
unsigned long
unsigned long __init
lmb_end_of_DRAM(void)
{
unsigned long offset = reloc_offset();
......@@ -335,9 +285,7 @@ lmb_end_of_DRAM(void)
struct lmb_region *_mem = &(_lmb->memory);
unsigned long idx;
for(idx=_mem->cnt-1; idx >= 0 ;idx--) {
if ( _mem->region[idx].type != LMB_MEMORY_AREA )
continue;
for(idx=_mem->cnt-1; idx >= 0; idx--) {
#ifdef CONFIG_MSCHUNKS
return (_mem->region[idx].physbase + _mem->region[idx].size);
#else
......@@ -348,8 +296,7 @@ lmb_end_of_DRAM(void)
return 0;
}
unsigned long
unsigned long __init
lmb_abs_to_phys(unsigned long aa)
{
unsigned long i, pa = aa;
......@@ -357,7 +304,7 @@ lmb_abs_to_phys(unsigned long aa)
struct lmb *_lmb = PTRRELOC(&lmb);
struct lmb_region *_mem = &(_lmb->memory);
for (i=0; i < _mem->cnt ;i++) {
for (i=0; i < _mem->cnt; i++) {
unsigned long lmbbase = _mem->region[i].base;
unsigned long lmbsize = _mem->region[i].size;
if ( lmb_addrs_overlap(aa,1,lmbbase,lmbsize) ) {
......@@ -368,47 +315,3 @@ lmb_abs_to_phys(unsigned long aa)
return pa;
}
void
lmb_dump(char *str)
{
unsigned long i;
udbg_printf("\nlmb_dump: %s\n", str);
udbg_printf(" debug = %s\n",
(lmb.debug) ? "TRUE" : "FALSE");
udbg_printf(" memory.cnt = %d\n",
lmb.memory.cnt);
udbg_printf(" memory.size = 0x%lx\n",
lmb.memory.size);
udbg_printf(" memory.lcd_size = 0x%lx\n",
lmb.memory.lcd_size);
for (i=0; i < lmb.memory.cnt ;i++) {
udbg_printf(" memory.region[%d].base = 0x%lx\n",
i, lmb.memory.region[i].base);
udbg_printf(" .physbase = 0x%lx\n",
lmb.memory.region[i].physbase);
udbg_printf(" .size = 0x%lx\n",
lmb.memory.region[i].size);
udbg_printf(" .type = 0x%lx\n",
lmb.memory.region[i].type);
}
udbg_printf("\n");
udbg_printf(" reserved.cnt = %d\n",
lmb.reserved.cnt);
udbg_printf(" reserved.size = 0x%lx\n",
lmb.reserved.size);
udbg_printf(" reserved.lcd_size = 0x%lx\n",
lmb.reserved.lcd_size);
for (i=0; i < lmb.reserved.cnt ;i++) {
udbg_printf(" reserved.region[%d].base = 0x%lx\n",
i, lmb.reserved.region[i].base);
udbg_printf(" .physbase = 0x%lx\n",
lmb.reserved.region[i].physbase);
udbg_printf(" .size = 0x%lx\n",
lmb.reserved.region[i].size);
udbg_printf(" .type = 0x%lx\n",
lmb.reserved.region[i].type);
}
}
......@@ -699,9 +699,6 @@ prom_dump_lmb(void)
prom_print(RELOC(" memory.size = 0x"));
prom_print_hex(_lmb->memory.size);
prom_print_nl();
prom_print(RELOC(" memory.lcd_size = 0x"));
prom_print_hex(_lmb->memory.lcd_size);
prom_print_nl();
for (i=0; i < _lmb->memory.cnt ;i++) {
prom_print(RELOC(" memory.region[0x"));
prom_print_hex(i);
......@@ -714,9 +711,6 @@ prom_dump_lmb(void)
prom_print(RELOC(" .size = 0x"));
prom_print_hex(_lmb->memory.region[i].size);
prom_print_nl();
prom_print(RELOC(" .type = 0x"));
prom_print_hex(_lmb->memory.region[i].type);
prom_print_nl();
}
prom_print_nl();
......@@ -726,9 +720,6 @@ prom_dump_lmb(void)
prom_print(RELOC(" reserved.size = 0x"));
prom_print_hex(_lmb->reserved.size);
prom_print_nl();
prom_print(RELOC(" reserved.lcd_size = 0x"));
prom_print_hex(_lmb->reserved.lcd_size);
prom_print_nl();
for (i=0; i < _lmb->reserved.cnt ;i++) {
prom_print(RELOC(" reserved.region[0x"));
prom_print_hex(i);
......@@ -741,9 +732,6 @@ prom_dump_lmb(void)
prom_print(RELOC(" .size = 0x"));
prom_print_hex(_lmb->reserved.region[i].size);
prom_print_nl();
prom_print(RELOC(" .type = 0x"));
prom_print_hex(_lmb->reserved.region[i].type);
prom_print_nl();
}
}
#endif /* DEBUG_PROM */
......
......@@ -699,10 +699,6 @@ void __init do_init_bootmem(void)
/* add all physical memory to the bootmem map */
for (i=0; i < lmb.memory.cnt; i++) {
unsigned long physbase, size;
unsigned long type = lmb.memory.region[i].type;
if ( type != LMB_MEMORY_AREA )
continue;
physbase = lmb.memory.region[i].physbase;
size = lmb.memory.region[i].size;
......@@ -743,12 +739,8 @@ static int __init setup_kcore(void)
for (i=0; i < lmb.memory.cnt; i++) {
unsigned long physbase, size;
unsigned long type = lmb.memory.region[i].type;
struct kcore_list *kcore_mem;
if (type != LMB_MEMORY_AREA)
continue;
physbase = lmb.memory.region[i].physbase;
size = lmb.memory.region[i].size;
......
......@@ -257,10 +257,6 @@ void __init do_init_bootmem(void)
for (i = 0; i < lmb.memory.cnt; i++) {
unsigned long physbase, size;
unsigned long type = lmb.memory.region[i].type;
if (type != LMB_MEMORY_AREA)
continue;
physbase = lmb.memory.region[i].physbase;
size = lmb.memory.region[i].size;
......
......@@ -13,12 +13,12 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/init.h>
#include <asm/prom.h>
extern unsigned long reloc_offset(void);
#define MAX_LMB_REGIONS 64
#define MAX_LMB_REGIONS 128
union lmb_reg_property {
struct reg_property32 addr32[MAX_LMB_REGIONS];
......@@ -26,24 +26,17 @@ union lmb_reg_property {
struct reg_property_pmac addrPM[MAX_LMB_REGIONS];
};
#define LMB_MEMORY_AREA 1
#define LMB_IO_AREA 2
#define LMB_ALLOC_ANYWHERE 0
#define LMB_ALLOC_FIRST4GBYTE (1UL<<32)
struct lmb_property {
unsigned long base;
unsigned long physbase;
unsigned long size;
unsigned long type;
};
struct lmb_region {
unsigned long cnt;
unsigned long size;
unsigned long iosize;
unsigned long lcd_size; /* Least Common Denominator */
struct lmb_property region[MAX_LMB_REGIONS+1];
};
......@@ -54,63 +47,17 @@ struct lmb {
struct lmb_region reserved;
};
extern struct lmb lmb;
extern void lmb_init(void);
extern void lmb_analyze(void);
extern long lmb_add(unsigned long, unsigned long);
#ifdef CONFIG_MSCHUNKS
extern long lmb_add_io(unsigned long base, unsigned long size);
#endif /* CONFIG_MSCHUNKS */
extern long lmb_reserve(unsigned long, unsigned long);
extern unsigned long lmb_alloc(unsigned long, unsigned long);
extern unsigned long lmb_alloc_base(unsigned long, unsigned long, unsigned long);
extern unsigned long lmb_phys_mem_size(void);
extern unsigned long lmb_end_of_DRAM(void);
extern unsigned long lmb_abs_to_phys(unsigned long);
extern void lmb_dump(char *);
static inline unsigned long
lmb_addrs_overlap(unsigned long base1, unsigned long size1,
unsigned long base2, unsigned long size2)
{
return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
}
static inline long
lmb_regions_overlap(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
{
unsigned long base1 = rgn->region[r1].base;
unsigned long size1 = rgn->region[r1].size;
unsigned long base2 = rgn->region[r2].base;
unsigned long size2 = rgn->region[r2].size;
return lmb_addrs_overlap(base1,size1,base2,size2);
}
static inline long
lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
unsigned long base2, unsigned long size2)
{
if ( base2 == base1 + size1 ) {
return 1;
} else if ( base1 == base2 + size2 ) {
return -1;
}
return 0;
}
static inline long
lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
{
unsigned long base1 = rgn->region[r1].base;
unsigned long size1 = rgn->region[r1].size;
unsigned long type1 = rgn->region[r1].type;
unsigned long base2 = rgn->region[r2].base;
unsigned long size2 = rgn->region[r2].size;
unsigned long type2 = rgn->region[r2].type;
return (type1 == type2) && lmb_addrs_adjacent(base1,size1,base2,size2);
}
extern struct lmb lmb __initdata;
extern void __init lmb_init(void);
extern void __init lmb_analyze(void);
extern long __init lmb_add(unsigned long, unsigned long);
extern long __init lmb_reserve(unsigned long, unsigned long);
extern unsigned long __init lmb_alloc(unsigned long, unsigned long);
extern unsigned long __init lmb_alloc_base(unsigned long, unsigned long,
unsigned long);
extern unsigned long __init lmb_phys_mem_size(void);
extern unsigned long __init lmb_end_of_DRAM(void);
extern unsigned long __init lmb_abs_to_phys(unsigned long);
#endif /* _PPC64_LMB_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment