Commit ee3ff576 authored by Anton Blanchard's avatar Anton Blanchard Committed by Linus Torvalds

[PATCH] ppc64: replace mmu_context_queue with idr allocator

Replace the mmu_context_queue structure with the idr allocator.  The
mmu_context_queue allocation was quite large (~200kB) so on most machines
we will have a reduction in usage.

We might put a single entry cache on the front of this so we are more
likely to reuse ppc64 MMU hashtable entries that are in the caches.
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 06a7e66d
......@@ -36,6 +36,7 @@
#include <linux/delay.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/idr.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
......@@ -62,8 +63,6 @@
#include <asm/iommu.h>
#include <asm/abs_addr.h>
struct mmu_context_queue_t mmu_context_queue;
int mem_init_done;
unsigned long ioremap_bot = IMALLOC_BASE;
static unsigned long phbs_io_bot = PHBS_IO_BASE;
......@@ -477,6 +476,59 @@ void free_initrd_mem(unsigned long start, unsigned long end)
}
#endif
static spinlock_t mmu_context_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_IDR(mmu_context_idr);
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
int index;
int err;
again:
if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
return -ENOMEM;
spin_lock(&mmu_context_lock);
err = idr_get_new(&mmu_context_idr, NULL, &index);
spin_unlock(&mmu_context_lock);
if (err == -EAGAIN)
goto again;
else if (err)
return err;
if (index > MAX_CONTEXT) {
idr_remove(&mmu_context_idr, index);
return -ENOMEM;
}
mm->context.id = index;
return 0;
}
void destroy_context(struct mm_struct *mm)
{
spin_lock(&mmu_context_lock);
idr_remove(&mmu_context_idr, mm->context.id);
spin_unlock(&mmu_context_lock);
mm->context.id = NO_CONTEXT;
}
static int __init mmu_context_init(void)
{
int index;
/* Reserve the first (invalid) context*/
idr_pre_get(&mmu_context_idr, GFP_KERNEL);
idr_get_new(&mmu_context_idr, NULL, &index);
BUG_ON(0 != index);
return 0;
}
arch_initcall(mmu_context_init);
/*
* Do very early mm setup.
*/
......@@ -486,17 +538,6 @@ void __init mm_init_ppc64(void)
ppc64_boot_msg(0x100, "MM Init");
/* Reserve all contexts < FIRST_USER_CONTEXT for kernel use.
* The range of contexts [FIRST_USER_CONTEXT, NUM_USER_CONTEXT)
* are stored on a stack/queue for easy allocation and deallocation.
*/
mmu_context_queue.lock = SPIN_LOCK_UNLOCKED;
mmu_context_queue.head = 0;
mmu_context_queue.tail = NUM_USER_CONTEXT-1;
mmu_context_queue.size = NUM_USER_CONTEXT;
for (i = 0; i < NUM_USER_CONTEXT; i++)
mmu_context_queue.elements[i] = i + FIRST_USER_CONTEXT;
/* This is the story of the IO hole... please, keep seated,
* unfortunately, we are out of oxygen masks at the moment.
* So we need some rough way to tell where your big IO hole
......
......@@ -2,11 +2,9 @@
#define __PPC64_MMU_CONTEXT_H
#include <linux/config.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/mmu.h>
#include <asm/ppcdebug.h>
#include <asm/cputable.h>
/*
......@@ -33,107 +31,15 @@ static inline int sched_find_first_bit(unsigned long *b)
return __ffs(b[2]) + 128;
}
#define NO_CONTEXT 0
#define FIRST_USER_CONTEXT 1
#define LAST_USER_CONTEXT 0x8000 /* Same as PID_MAX for now... */
#define NUM_USER_CONTEXT (LAST_USER_CONTEXT-FIRST_USER_CONTEXT)
/* Choose whether we want to implement our context
* number allocator as a LIFO or FIFO queue.
*/
#if 1
#define MMU_CONTEXT_LIFO
#else
#define MMU_CONTEXT_FIFO
#endif
struct mmu_context_queue_t {
spinlock_t lock;
long head;
long tail;
long size;
mm_context_id_t elements[LAST_USER_CONTEXT];
};
extern struct mmu_context_queue_t mmu_context_queue;
static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
/*
* The context number queue has underflowed.
* Meaning: we tried to push a context number that was freed
* back onto the context queue and the queue was already full.
*/
static inline void
mmu_context_underflow(void)
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
printk(KERN_DEBUG "mmu_context_underflow\n");
panic("mmu_context_underflow");
}
/*
* Set up the context for a new address space.
*/
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
long head;
unsigned long flags;
/* This does the right thing across a fork (I hope) */
spin_lock_irqsave(&mmu_context_queue.lock, flags);
if (mmu_context_queue.size <= 0) {
spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
return -ENOMEM;
}
#define NO_CONTEXT 0
#define MAX_CONTEXT (0x100000-1)
head = mmu_context_queue.head;
mm->context.id = mmu_context_queue.elements[head];
head = (head < LAST_USER_CONTEXT-1) ? head+1 : 0;
mmu_context_queue.head = head;
mmu_context_queue.size--;
spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
return 0;
}
/*
* We're finished using the context for an address space.
*/
static inline void
destroy_context(struct mm_struct *mm)
{
long index;
unsigned long flags;
spin_lock_irqsave(&mmu_context_queue.lock, flags);
if (mmu_context_queue.size >= NUM_USER_CONTEXT) {
spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
mmu_context_underflow();
}
#ifdef MMU_CONTEXT_LIFO
index = mmu_context_queue.head;
index = (index > 0) ? index-1 : LAST_USER_CONTEXT-1;
mmu_context_queue.head = index;
#else
index = mmu_context_queue.tail;
index = (index < LAST_USER_CONTEXT-1) ? index+1 : 0;
mmu_context_queue.tail = index;
#endif
mmu_context_queue.size++;
mmu_context_queue.elements[index] = mm->context.id;
spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
}
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm);
extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment