Commit ae959b6d authored by David S. Miller's avatar David S. Miller

Merge bk://kernel.bkbits.net/wesolows/sparc32-2.6

into nuts.davemloft.net:/disk1/BK/sparc-2.6
parents 3078adde c7dd2ca7
...@@ -42,6 +42,9 @@ int bit_map_string_get(struct bit_map *t, int len, int align) ...@@ -42,6 +42,9 @@ int bit_map_string_get(struct bit_map *t, int len, int align)
BUG(); BUG();
spin_lock(&t->lock); spin_lock(&t->lock);
if (len < t->last_size)
offset = t->first_free;
else
offset = t->last_off & ~align1; offset = t->last_off & ~align1;
count = 0; count = 0;
for (;;) { for (;;) {
...@@ -71,9 +74,14 @@ int bit_map_string_get(struct bit_map *t, int len, int align) ...@@ -71,9 +74,14 @@ int bit_map_string_get(struct bit_map *t, int len, int align)
if (i == len) { if (i == len) {
for (i = 0; i < len; i++) for (i = 0; i < len; i++)
__set_bit(offset + i, t->map); __set_bit(offset + i, t->map);
if (offset == t->first_free)
t->first_free = find_next_zero_bit
(t->map, t->size,
t->first_free + len);
if ((t->last_off = offset + len) >= t->size) if ((t->last_off = offset + len) >= t->size)
t->last_off = 0; t->last_off = 0;
t->used += len; t->used += len;
t->last_size = len;
spin_unlock(&t->lock); spin_unlock(&t->lock);
return offset; return offset;
} }
...@@ -96,6 +104,8 @@ void bit_map_clear(struct bit_map *t, int offset, int len) ...@@ -96,6 +104,8 @@ void bit_map_clear(struct bit_map *t, int offset, int len)
BUG(); BUG();
__clear_bit(offset + i, t->map); __clear_bit(offset + i, t->map);
} }
if (offset < t->first_free)
t->first_free = offset;
t->used -= len; t->used -= len;
spin_unlock(&t->lock); spin_unlock(&t->lock);
} }
...@@ -111,4 +121,6 @@ void bit_map_init(struct bit_map *t, unsigned long *map, int size) ...@@ -111,4 +121,6 @@ void bit_map_init(struct bit_map *t, unsigned long *map, int size)
spin_lock_init(&t->lock); spin_lock_init(&t->lock);
t->map = map; t->map = map;
t->size = size; t->size = size;
t->last_size = 0;
t->first_free = 0;
} }
...@@ -369,8 +369,7 @@ static void __init taint_real_pages(void) ...@@ -369,8 +369,7 @@ static void __init taint_real_pages(void)
end = start + sp_banks[i].num_bytes; end = start + sp_banks[i].num_bytes;
while (start < end) { while (start < end) {
set_bit (start >> 20, set_bit(start >> 20, sparc_valid_addr_bitmap);
sparc_valid_addr_bitmap);
start += PAGE_SIZE; start += PAGE_SIZE;
} }
} }
...@@ -400,6 +399,7 @@ void __init mem_init(void) ...@@ -400,6 +399,7 @@ void __init mem_init(void)
int codepages = 0; int codepages = 0;
int datapages = 0; int datapages = 0;
int initpages = 0; int initpages = 0;
int reservedpages = 0;
int i; int i;
highmem_start_page = pfn_to_page(highstart_pfn); highmem_start_page = pfn_to_page(highstart_pfn);
...@@ -434,12 +434,14 @@ void __init mem_init(void) ...@@ -434,12 +434,14 @@ void __init mem_init(void)
max_mapnr = last_valid_pfn - pfn_base; max_mapnr = last_valid_pfn - pfn_base;
high_memory = __va(max_low_pfn << PAGE_SHIFT); high_memory = __va(max_low_pfn << PAGE_SHIFT);
num_physpages = totalram_pages = free_all_bootmem(); totalram_pages = free_all_bootmem();
for (i = 0; sp_banks[i].num_bytes != 0; i++) { for (i = 0; sp_banks[i].num_bytes != 0; i++) {
unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
num_physpages += sp_banks[i].num_bytes >> PAGE_SHIFT;
if (end_pfn <= highstart_pfn) if (end_pfn <= highstart_pfn)
continue; continue;
...@@ -458,13 +460,20 @@ void __init mem_init(void) ...@@ -458,13 +460,20 @@ void __init mem_init(void)
initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin)); initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin));
initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
printk(KERN_INFO "Memory: %dk available (%dk kernel code, %dk data, %dk init, %ldk highmem) [%08lx,%08lx]\n", /* Ignore memory holes for the purpose of counting reserved pages */
nr_free_pages() << (PAGE_SHIFT-10), for (i=0; i < max_low_pfn; i++)
if (test_bit(i >> (20 - PAGE_SHIFT), sparc_valid_addr_bitmap)
&& PageReserved(pfn_to_page(i)))
reservedpages++;
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT - 10),
codepages << (PAGE_SHIFT-10), codepages << (PAGE_SHIFT-10),
reservedpages << (PAGE_SHIFT - 10),
datapages << (PAGE_SHIFT-10), datapages << (PAGE_SHIFT-10),
initpages << (PAGE_SHIFT-10), initpages << (PAGE_SHIFT-10),
totalhigh_pages << (PAGE_SHIFT-10), totalhigh_pages << (PAGE_SHIFT-10));
(unsigned long)PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
} }
void free_initmem (void) void free_initmem (void)
......
...@@ -276,6 +276,23 @@ static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address) ...@@ -276,6 +276,23 @@ static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE_SOFT - 1)); ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE_SOFT - 1));
} }
static unsigned long srmmu_swp_type(swp_entry_t entry)
{
return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
}
static unsigned long srmmu_swp_offset(swp_entry_t entry)
{
return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
}
static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset)
{
return (swp_entry_t) {
(type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
| (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
}
/* /*
* size: bytes to allocate in the nocache area. * size: bytes to allocate in the nocache area.
* align: bytes, number to align at. * align: bytes, number to align at.
...@@ -2205,6 +2222,10 @@ void __init ld_mmu_srmmu(void) ...@@ -2205,6 +2222,10 @@ void __init ld_mmu_srmmu(void)
BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(alloc_thread_info, srmmu_alloc_thread_info, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(alloc_thread_info, srmmu_alloc_thread_info, BTFIXUPCALL_NORM);
......
...@@ -1863,6 +1863,23 @@ pte_t *sun4c_pte_offset_kernel(pmd_t * dir, unsigned long address) ...@@ -1863,6 +1863,23 @@ pte_t *sun4c_pte_offset_kernel(pmd_t * dir, unsigned long address)
((address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1)); ((address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1));
} }
static unsigned long sun4c_swp_type(swp_entry_t entry)
{
return (entry.val & SUN4C_SWP_TYPE_MASK);
}
static unsigned long sun4c_swp_offset(swp_entry_t entry)
{
return (entry.val >> SUN4C_SWP_OFF_SHIFT) & SUN4C_SWP_OFF_MASK;
}
static swp_entry_t sun4c_swp_entry(unsigned long type, unsigned long offset)
{
return (swp_entry_t) {
(offset & SUN4C_SWP_OFF_MASK) << SUN4C_SWP_OFF_SHIFT
| (type & SUN4C_SWP_TYPE_MASK) };
}
static void sun4c_free_pte_slow(pte_t *pte) static void sun4c_free_pte_slow(pte_t *pte)
{ {
free_page((unsigned long)pte); free_page((unsigned long)pte);
...@@ -2242,6 +2259,10 @@ void __init ld_mmu_sun4c(void) ...@@ -2242,6 +2259,10 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_CALL(sparc_mapiorange, sun4c_mapiorange, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(sparc_mapiorange, sun4c_mapiorange, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(sparc_unmapiorange, sun4c_unmapiorange, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(sparc_unmapiorange, sun4c_unmapiorange, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__swp_type, sun4c_swp_type, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__swp_offset, sun4c_swp_offset, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__swp_entry, sun4c_swp_entry, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(alloc_thread_info, sun4c_alloc_thread_info, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(alloc_thread_info, sun4c_alloc_thread_info, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(free_thread_info, sun4c_free_thread_info, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_thread_info, sun4c_free_thread_info, BTFIXUPCALL_NORM);
......
...@@ -15,6 +15,8 @@ struct bit_map { ...@@ -15,6 +15,8 @@ struct bit_map {
int size; int size;
int used; int used;
int last_off; int last_off;
int last_size;
int first_free;
}; };
extern int bit_map_string_get(struct bit_map *t, int len, int align); extern int bit_map_string_get(struct bit_map *t, int len, int align);
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/swap.h>
#include <asm/types.h> #include <asm/types.h>
#ifdef CONFIG_SUN4 #ifdef CONFIG_SUN4
#include <asm/pgtsun4.h> #include <asm/pgtsun4.h>
...@@ -401,9 +402,14 @@ BTFIXUPDEF_CALL(void, sparc_unmapiorange, unsigned long, unsigned int) ...@@ -401,9 +402,14 @@ BTFIXUPDEF_CALL(void, sparc_unmapiorange, unsigned long, unsigned int)
extern int invalid_segment; extern int invalid_segment;
/* Encode and de-code a swap entry */ /* Encode and de-code a swap entry */
#define __swp_type(x) (((x).val >> 2) & 0x7f) BTFIXUPDEF_CALL(unsigned long, __swp_type, swp_entry_t)
#define __swp_offset(x) (((x).val >> 9) & 0x3ffff) BTFIXUPDEF_CALL(unsigned long, __swp_offset, swp_entry_t)
#define __swp_entry(type,offset) ((swp_entry_t) { (((type) & 0x7f) << 2) | (((offset) & 0x3ffff) << 9) }) BTFIXUPDEF_CALL(swp_entry_t, __swp_entry, unsigned long, unsigned long)
#define __swp_type(__x) BTFIXUP_CALL(__swp_type)(__x)
#define __swp_offset(__x) BTFIXUP_CALL(__swp_offset)(__x)
#define __swp_entry(__type,__off) BTFIXUP_CALL(__swp_entry)(__type,__off)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
......
...@@ -90,6 +90,22 @@ ...@@ -90,6 +90,22 @@
#define SRMMU_CHG_MASK (0xffffff00 | SRMMU_REF | SRMMU_DIRTY) #define SRMMU_CHG_MASK (0xffffff00 | SRMMU_REF | SRMMU_DIRTY)
/* SRMMU swap entry encoding
*
* We use 5 bits for the type and 19 for the offset. This gives us
* 32 swapfiles of 4GB each. Encoding looks like:
*
* oooooooooooooooooootttttRRRRRRRR
* fedcba9876543210fedcba9876543210
*
* The bottom 8 bits are reserved for protection and status bits, especially
* FILE and PRESENT.
*/
#define SRMMU_SWP_TYPE_MASK 0x1f
#define SRMMU_SWP_TYPE_SHIFT SRMMU_PTE_FILE_SHIFT
#define SRMMU_SWP_OFF_MASK 0x7ffff
#define SRMMU_SWP_OFF_SHIFT (SRMMU_PTE_FILE_SHIFT + 5)
/* Some day I will implement true fine grained access bits for /* Some day I will implement true fine grained access bits for
* user pages because the SRMMU gives us the capabilities to * user pages because the SRMMU gives us the capabilities to
* enforce all the protection levels that vma's can have. * enforce all the protection levels that vma's can have.
......
...@@ -74,6 +74,21 @@ ...@@ -74,6 +74,21 @@
#define SUN4C_PAGE_KERNEL __pgprot(_SUN4C_READABLE|_SUN4C_WRITEABLE|\ #define SUN4C_PAGE_KERNEL __pgprot(_SUN4C_READABLE|_SUN4C_WRITEABLE|\
_SUN4C_PAGE_DIRTY|_SUN4C_PAGE_PRIV) _SUN4C_PAGE_DIRTY|_SUN4C_PAGE_PRIV)
/* SUN4C swap entry encoding
*
* We use 5 bits for the type and 19 for the offset. This gives us
* 32 swapfiles of 4GB each. Encoding looks like:
*
* RRRRRRRRooooooooooooooooooottttt
* fedcba9876543210fedcba9876543210
*
* The top 8 bits are reserved for protection and status bits, especially
* FILE and PRESENT.
*/
#define SUN4C_SWP_TYPE_MASK 0x1f
#define SUN4C_SWP_OFF_MASK 0x7ffff
#define SUN4C_SWP_OFF_SHIFT 5
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
static inline unsigned long sun4c_get_synchronous_error(void) static inline unsigned long sun4c_get_synchronous_error(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment