Commit 944b380e authored by Paul Mackerras's avatar Paul Mackerras
parents fff5f528 c7eb7347
This diff is collapsed.
...@@ -345,18 +345,12 @@ EXPORT_SYMBOL_GPL(cbe_read_trace_buffer); ...@@ -345,18 +345,12 @@ EXPORT_SYMBOL_GPL(cbe_read_trace_buffer);
* Enabling/disabling interrupts for the entire performance monitoring unit. * Enabling/disabling interrupts for the entire performance monitoring unit.
*/ */
u32 cbe_query_pm_interrupts(u32 cpu) u32 cbe_get_and_clear_pm_interrupts(u32 cpu)
{
return cbe_read_pm(cpu, pm_status);
}
EXPORT_SYMBOL_GPL(cbe_query_pm_interrupts);
u32 cbe_clear_pm_interrupts(u32 cpu)
{ {
/* Reading pm_status clears the interrupt bits. */ /* Reading pm_status clears the interrupt bits. */
return cbe_query_pm_interrupts(cpu); return cbe_read_pm(cpu, pm_status);
} }
EXPORT_SYMBOL_GPL(cbe_clear_pm_interrupts); EXPORT_SYMBOL_GPL(cbe_get_and_clear_pm_interrupts);
void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask) void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
{ {
...@@ -371,7 +365,7 @@ EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts); ...@@ -371,7 +365,7 @@ EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts);
void cbe_disable_pm_interrupts(u32 cpu) void cbe_disable_pm_interrupts(u32 cpu)
{ {
cbe_clear_pm_interrupts(cpu); cbe_get_and_clear_pm_interrupts(cpu);
cbe_write_pm(cpu, pm_status, 0); cbe_write_pm(cpu, pm_status, 0);
} }
EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts); EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts);
......
...@@ -42,7 +42,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) ...@@ -42,7 +42,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
} }
spin_lock_init(&ctx->mmio_lock); spin_lock_init(&ctx->mmio_lock);
kref_init(&ctx->kref); kref_init(&ctx->kref);
init_rwsem(&ctx->state_sema); mutex_init(&ctx->state_mutex);
init_MUTEX(&ctx->run_sema); init_MUTEX(&ctx->run_sema);
init_waitqueue_head(&ctx->ibox_wq); init_waitqueue_head(&ctx->ibox_wq);
init_waitqueue_head(&ctx->wbox_wq); init_waitqueue_head(&ctx->wbox_wq);
...@@ -53,6 +53,10 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) ...@@ -53,6 +53,10 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
ctx->owner = get_task_mm(current); ctx->owner = get_task_mm(current);
if (gang) if (gang)
spu_gang_add_ctx(gang, ctx); spu_gang_add_ctx(gang, ctx);
ctx->rt_priority = current->rt_priority;
ctx->policy = current->policy;
ctx->prio = current->prio;
INIT_DELAYED_WORK(&ctx->sched_work, spu_sched_tick);
goto out; goto out;
out_free: out_free:
kfree(ctx); kfree(ctx);
...@@ -65,9 +69,9 @@ void destroy_spu_context(struct kref *kref) ...@@ -65,9 +69,9 @@ void destroy_spu_context(struct kref *kref)
{ {
struct spu_context *ctx; struct spu_context *ctx;
ctx = container_of(kref, struct spu_context, kref); ctx = container_of(kref, struct spu_context, kref);
down_write(&ctx->state_sema); mutex_lock(&ctx->state_mutex);
spu_deactivate(ctx); spu_deactivate(ctx);
up_write(&ctx->state_sema); mutex_unlock(&ctx->state_mutex);
spu_fini_csa(&ctx->csa); spu_fini_csa(&ctx->csa);
if (ctx->gang) if (ctx->gang)
spu_gang_remove_ctx(ctx->gang, ctx); spu_gang_remove_ctx(ctx->gang, ctx);
...@@ -96,16 +100,6 @@ void spu_forget(struct spu_context *ctx) ...@@ -96,16 +100,6 @@ void spu_forget(struct spu_context *ctx)
spu_release(ctx); spu_release(ctx);
} }
void spu_acquire(struct spu_context *ctx)
{
down_read(&ctx->state_sema);
}
void spu_release(struct spu_context *ctx)
{
up_read(&ctx->state_sema);
}
void spu_unmap_mappings(struct spu_context *ctx) void spu_unmap_mappings(struct spu_context *ctx)
{ {
if (ctx->local_store) if (ctx->local_store)
...@@ -124,83 +118,84 @@ void spu_unmap_mappings(struct spu_context *ctx) ...@@ -124,83 +118,84 @@ void spu_unmap_mappings(struct spu_context *ctx)
unmap_mapping_range(ctx->psmap, 0, 0x20000, 1); unmap_mapping_range(ctx->psmap, 0, 0x20000, 1);
} }
/**
* spu_acquire_exclusive - lock spu contex and protect against userspace access
* @ctx: spu contex to lock
*
* Note:
* Returns 0 and with the context locked on success
* Returns negative error and with the context _unlocked_ on failure.
*/
int spu_acquire_exclusive(struct spu_context *ctx) int spu_acquire_exclusive(struct spu_context *ctx)
{ {
int ret = 0; int ret = -EINVAL;
down_write(&ctx->state_sema); spu_acquire(ctx);
/* ctx is about to be freed, can't acquire any more */ /*
if (!ctx->owner) { * Context is about to be freed, so we can't acquire it anymore.
ret = -EINVAL; */
goto out; if (!ctx->owner)
} goto out_unlock;
if (ctx->state == SPU_STATE_SAVED) { if (ctx->state == SPU_STATE_SAVED) {
ret = spu_activate(ctx, 0); ret = spu_activate(ctx, 0);
if (ret) if (ret)
goto out; goto out_unlock;
ctx->state = SPU_STATE_RUNNABLE;
} else { } else {
/* We need to exclude userspace access to the context. */ /*
* We need to exclude userspace access to the context.
*
* To protect against memory access we invalidate all ptes
* and make sure the pagefault handlers block on the mutex.
*/
spu_unmap_mappings(ctx); spu_unmap_mappings(ctx);
} }
out: return 0;
if (ret)
up_write(&ctx->state_sema); out_unlock:
spu_release(ctx);
return ret; return ret;
} }
int spu_acquire_runnable(struct spu_context *ctx) /**
* spu_acquire_runnable - lock spu contex and make sure it is in runnable state
* @ctx: spu contex to lock
*
* Note:
* Returns 0 and with the context locked on success
* Returns negative error and with the context _unlocked_ on failure.
*/
int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags)
{ {
int ret = 0; int ret = -EINVAL;
down_read(&ctx->state_sema);
if (ctx->state == SPU_STATE_RUNNABLE) {
ctx->spu->prio = current->prio;
return 0;
}
up_read(&ctx->state_sema);
down_write(&ctx->state_sema);
/* ctx is about to be freed, can't acquire any more */
if (!ctx->owner) {
ret = -EINVAL;
goto out;
}
spu_acquire(ctx);
if (ctx->state == SPU_STATE_SAVED) { if (ctx->state == SPU_STATE_SAVED) {
ret = spu_activate(ctx, 0); /*
* Context is about to be freed, so we can't acquire it anymore.
*/
if (!ctx->owner)
goto out_unlock;
ret = spu_activate(ctx, flags);
if (ret) if (ret)
goto out; goto out_unlock;
ctx->state = SPU_STATE_RUNNABLE;
} }
downgrade_write(&ctx->state_sema); return 0;
/* On success, we return holding the lock */
return ret;
out:
/* Release here, to simplify calling code. */
up_write(&ctx->state_sema);
out_unlock:
spu_release(ctx);
return ret; return ret;
} }
/**
* spu_acquire_saved - lock spu contex and make sure it is in saved state
* @ctx: spu contex to lock
*/
void spu_acquire_saved(struct spu_context *ctx) void spu_acquire_saved(struct spu_context *ctx)
{ {
down_read(&ctx->state_sema); spu_acquire(ctx);
if (ctx->state != SPU_STATE_SAVED)
if (ctx->state == SPU_STATE_SAVED)
return;
up_read(&ctx->state_sema);
down_write(&ctx->state_sema);
if (ctx->state == SPU_STATE_RUNNABLE) {
spu_deactivate(ctx); spu_deactivate(ctx);
ctx->state = SPU_STATE_SAVED;
}
downgrade_write(&ctx->state_sema);
} }
...@@ -103,6 +103,9 @@ static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma, ...@@ -103,6 +103,9 @@ static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
offset += vma->vm_pgoff << PAGE_SHIFT; offset += vma->vm_pgoff << PAGE_SHIFT;
if (offset >= LS_SIZE)
return NOPFN_SIGBUS;
spu_acquire(ctx); spu_acquire(ctx);
if (ctx->state == SPU_STATE_SAVED) { if (ctx->state == SPU_STATE_SAVED) {
...@@ -164,7 +167,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, ...@@ -164,7 +167,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
/* error here usually means a signal.. we might want to test /* error here usually means a signal.. we might want to test
* the error code more precisely though * the error code more precisely though
*/ */
ret = spu_acquire_runnable(ctx); ret = spu_acquire_runnable(ctx, 0);
if (ret) if (ret)
return NOPFN_REFAULT; return NOPFN_REFAULT;
...@@ -1306,7 +1309,7 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, ...@@ -1306,7 +1309,7 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
if (ret) if (ret)
goto out; goto out;
spu_acquire_runnable(ctx); spu_acquire_runnable(ctx, 0);
if (file->f_flags & O_NONBLOCK) { if (file->f_flags & O_NONBLOCK) {
ret = ctx->ops->send_mfc_command(ctx, &cmd); ret = ctx->ops->send_mfc_command(ctx, &cmd);
} else { } else {
......
...@@ -133,7 +133,7 @@ static int spu_setup_isolated(struct spu_context *ctx) ...@@ -133,7 +133,7 @@ static int spu_setup_isolated(struct spu_context *ctx)
spu_mfc_sr1_set(ctx->spu, sr1); spu_mfc_sr1_set(ctx->spu, sr1);
out_unlock: out_unlock:
spu_release_exclusive(ctx); spu_release(ctx);
out: out:
return ret; return ret;
} }
...@@ -143,7 +143,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc) ...@@ -143,7 +143,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
int ret; int ret;
unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
ret = spu_acquire_runnable(ctx); ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE);
if (ret) if (ret)
return ret; return ret;
...@@ -155,7 +155,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc) ...@@ -155,7 +155,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
spu_release(ctx); spu_release(ctx);
ret = spu_setup_isolated(ctx); ret = spu_setup_isolated(ctx);
if (!ret) if (!ret)
ret = spu_acquire_runnable(ctx); ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE);
} }
/* if userspace has set the runcntrl register (eg, to issue an /* if userspace has set the runcntrl register (eg, to issue an
...@@ -164,8 +164,10 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc) ...@@ -164,8 +164,10 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
if (runcntl == 0) if (runcntl == 0)
runcntl = SPU_RUNCNTL_RUNNABLE; runcntl = SPU_RUNCNTL_RUNNABLE;
} else } else {
spu_start_tick(ctx);
ctx->ops->npc_write(ctx, *npc); ctx->ops->npc_write(ctx, *npc);
}
ctx->ops->runcntl_write(ctx, runcntl); ctx->ops->runcntl_write(ctx, runcntl);
return ret; return ret;
...@@ -176,6 +178,7 @@ static inline int spu_run_fini(struct spu_context *ctx, u32 * npc, ...@@ -176,6 +178,7 @@ static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
{ {
int ret = 0; int ret = 0;
spu_stop_tick(ctx);
*status = ctx->ops->status_read(ctx); *status = ctx->ops->status_read(ctx);
*npc = ctx->ops->npc_read(ctx); *npc = ctx->ops->npc_read(ctx);
spu_release(ctx); spu_release(ctx);
...@@ -329,8 +332,10 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx, ...@@ -329,8 +332,10 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
} }
if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
ret = spu_reacquire_runnable(ctx, npc, &status); ret = spu_reacquire_runnable(ctx, npc, &status);
if (ret) if (ret) {
spu_stop_tick(ctx);
goto out2; goto out2;
}
continue; continue;
} }
ret = spu_process_events(ctx); ret = spu_process_events(ctx);
...@@ -361,4 +366,3 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx, ...@@ -361,4 +366,3 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
up(&ctx->run_sema); up(&ctx->run_sema);
return ret; return ret;
} }
This diff is collapsed.
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#define SPUFS_H #define SPUFS_H
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/rwsem.h> #include <linux/mutex.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/fs.h> #include <linux/fs.h>
...@@ -37,11 +37,13 @@ enum { ...@@ -37,11 +37,13 @@ enum {
}; };
struct spu_context_ops; struct spu_context_ops;
#define SPU_CONTEXT_PREEMPT 0UL
struct spu_gang; struct spu_gang;
/* ctx->sched_flags */
enum {
SPU_SCHED_WAKE = 0,
};
struct spu_context { struct spu_context {
struct spu *spu; /* pointer to a physical SPU */ struct spu *spu; /* pointer to a physical SPU */
struct spu_state csa; /* SPU context save area. */ struct spu_state csa; /* SPU context save area. */
...@@ -56,7 +58,7 @@ struct spu_context { ...@@ -56,7 +58,7 @@ struct spu_context {
u64 object_id; /* user space pointer for oprofile */ u64 object_id; /* user space pointer for oprofile */
enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state; enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
struct rw_semaphore state_sema; struct mutex state_mutex;
struct semaphore run_sema; struct semaphore run_sema;
struct mm_struct *owner; struct mm_struct *owner;
...@@ -77,6 +79,14 @@ struct spu_context { ...@@ -77,6 +79,14 @@ struct spu_context {
struct list_head gang_list; struct list_head gang_list;
struct spu_gang *gang; struct spu_gang *gang;
/* scheduler fields */
struct list_head rq;
struct delayed_work sched_work;
unsigned long sched_flags;
unsigned long rt_priority;
int policy;
int prio;
}; };
struct spu_gang { struct spu_gang {
...@@ -161,6 +171,16 @@ void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx); ...@@ -161,6 +171,16 @@ void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx);
void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx); void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
/* context management */ /* context management */
static inline void spu_acquire(struct spu_context *ctx)
{
mutex_lock(&ctx->state_mutex);
}
static inline void spu_release(struct spu_context *ctx)
{
mutex_unlock(&ctx->state_mutex);
}
struct spu_context * alloc_spu_context(struct spu_gang *gang); struct spu_context * alloc_spu_context(struct spu_gang *gang);
void destroy_spu_context(struct kref *kref); void destroy_spu_context(struct kref *kref);
struct spu_context * get_spu_context(struct spu_context *ctx); struct spu_context * get_spu_context(struct spu_context *ctx);
...@@ -168,20 +188,18 @@ int put_spu_context(struct spu_context *ctx); ...@@ -168,20 +188,18 @@ int put_spu_context(struct spu_context *ctx);
void spu_unmap_mappings(struct spu_context *ctx); void spu_unmap_mappings(struct spu_context *ctx);
void spu_forget(struct spu_context *ctx); void spu_forget(struct spu_context *ctx);
void spu_acquire(struct spu_context *ctx); int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags);
void spu_release(struct spu_context *ctx);
int spu_acquire_runnable(struct spu_context *ctx);
void spu_acquire_saved(struct spu_context *ctx); void spu_acquire_saved(struct spu_context *ctx);
int spu_acquire_exclusive(struct spu_context *ctx); int spu_acquire_exclusive(struct spu_context *ctx);
enum {
static inline void spu_release_exclusive(struct spu_context *ctx) SPU_ACTIVATE_NOWAKE = 1,
{ };
up_write(&ctx->state_sema); int spu_activate(struct spu_context *ctx, unsigned long flags);
}
int spu_activate(struct spu_context *ctx, u64 flags);
void spu_deactivate(struct spu_context *ctx); void spu_deactivate(struct spu_context *ctx);
void spu_yield(struct spu_context *ctx); void spu_yield(struct spu_context *ctx);
void spu_start_tick(struct spu_context *ctx);
void spu_stop_tick(struct spu_context *ctx);
void spu_sched_tick(struct work_struct *work);
int __init spu_sched_init(void); int __init spu_sched_init(void);
void __exit spu_sched_exit(void); void __exit spu_sched_exit(void);
......
...@@ -2811,7 +2811,6 @@ static void dump_spu_fields(struct spu *spu) ...@@ -2811,7 +2811,6 @@ static void dump_spu_fields(struct spu *spu)
DUMP_FIELD(spu, "0x%lx", irqs[2]); DUMP_FIELD(spu, "0x%lx", irqs[2]);
DUMP_FIELD(spu, "0x%x", slb_replace); DUMP_FIELD(spu, "0x%x", slb_replace);
DUMP_FIELD(spu, "%d", pid); DUMP_FIELD(spu, "%d", pid);
DUMP_FIELD(spu, "%d", prio);
DUMP_FIELD(spu, "0x%p", mm); DUMP_FIELD(spu, "0x%p", mm);
DUMP_FIELD(spu, "0x%p", ctx); DUMP_FIELD(spu, "0x%p", ctx);
DUMP_FIELD(spu, "0x%p", rq); DUMP_FIELD(spu, "0x%p", rq);
......
...@@ -53,6 +53,11 @@ ...@@ -53,6 +53,11 @@
#define CBE_PM_CTR_POLARITY 0x01000000 #define CBE_PM_CTR_POLARITY 0x01000000
#define CBE_PM_CTR_COUNT_CYCLES 0x00800000 #define CBE_PM_CTR_COUNT_CYCLES 0x00800000
#define CBE_PM_CTR_ENABLE 0x00400000 #define CBE_PM_CTR_ENABLE 0x00400000
#define PM07_CTR_INPUT_MUX(x) (((x) & 0x3F) << 26)
#define PM07_CTR_INPUT_CONTROL(x) (((x) & 1) << 25)
#define PM07_CTR_POLARITY(x) (((x) & 1) << 24)
#define PM07_CTR_COUNT_CYCLES(x) (((x) & 1) << 23)
#define PM07_CTR_ENABLE(x) (((x) & 1) << 22)
/* Macros for the pm_status register. */ /* Macros for the pm_status register. */
#define CBE_PM_CTR_OVERFLOW_INTR(ctr) (1 << (31 - ((ctr) & 7))) #define CBE_PM_CTR_OVERFLOW_INTR(ctr) (1 << (31 - ((ctr) & 7)))
...@@ -89,8 +94,7 @@ extern void cbe_read_trace_buffer(u32 cpu, u64 *buf); ...@@ -89,8 +94,7 @@ extern void cbe_read_trace_buffer(u32 cpu, u64 *buf);
extern void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask); extern void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask);
extern void cbe_disable_pm_interrupts(u32 cpu); extern void cbe_disable_pm_interrupts(u32 cpu);
extern u32 cbe_query_pm_interrupts(u32 cpu); extern u32 cbe_get_and_clear_pm_interrupts(u32 cpu);
extern u32 cbe_clear_pm_interrupts(u32 cpu);
extern void cbe_sync_irq(int node); extern void cbe_sync_irq(int node);
/* Utility functions, macros */ /* Utility functions, macros */
...@@ -103,11 +107,4 @@ extern u32 cbe_get_hw_thread_id(int cpu); ...@@ -103,11 +107,4 @@ extern u32 cbe_get_hw_thread_id(int cpu);
#define CBE_COUNT_PROBLEM_MODE 2 #define CBE_COUNT_PROBLEM_MODE 2
#define CBE_COUNT_ALL_MODES 3 #define CBE_COUNT_ALL_MODES 3
/* Macros for the pm07_control registers. */
#define PM07_CTR_INPUT_MUX(x) (((x) & 0x3F) << 26)
#define PM07_CTR_INPUT_CONTROL(x) (((x) & 1) << 25)
#define PM07_CTR_POLARITY(x) (((x) & 1) << 24)
#define PM07_CTR_COUNT_CYCLES(x) (((x) & 1) << 23)
#define PM07_CTR_ENABLE(x) (((x) & 1) << 22)
#endif /* __ASM_CELL_PMU_H__ */ #endif /* __ASM_CELL_PMU_H__ */
...@@ -129,7 +129,6 @@ struct spu { ...@@ -129,7 +129,6 @@ struct spu {
struct spu_runqueue *rq; struct spu_runqueue *rq;
unsigned long long timestamp; unsigned long long timestamp;
pid_t pid; pid_t pid;
int prio;
int class_0_pending; int class_0_pending;
spinlock_t register_lock; spinlock_t register_lock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment