Commit 944b380e authored by Paul Mackerras's avatar Paul Mackerras
parents fff5f528 c7eb7347
...@@ -39,10 +39,17 @@ ...@@ -39,10 +39,17 @@
#include "../platforms/cell/interrupt.h" #include "../platforms/cell/interrupt.h"
#define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */ #define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */
#define PPU_CYCLES_GRP_NUM 1 /* special group number for identifying
* PPU_CYCLES event
*/
#define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */ #define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */
#define NUM_THREADS 2 #define NUM_THREADS 2 /* number of physical threads in
#define VIRT_CNTR_SW_TIME_NS 100000000 // 0.5 seconds * physical processor
*/
#define NUM_TRACE_BUS_WORDS 4
#define NUM_INPUT_BUS_WORDS 2
struct pmc_cntrl_data { struct pmc_cntrl_data {
unsigned long vcntr; unsigned long vcntr;
...@@ -58,7 +65,7 @@ struct pmc_cntrl_data { ...@@ -58,7 +65,7 @@ struct pmc_cntrl_data {
struct pm_signal { struct pm_signal {
u16 cpu; /* Processor to modify */ u16 cpu; /* Processor to modify */
u16 sub_unit; /* hw subunit this applies to (if applicable) */ u16 sub_unit; /* hw subunit this applies to (if applicable) */
u16 signal_group; /* Signal Group to Enable/Disable */ short int signal_group; /* Signal Group to Enable/Disable */
u8 bus_word; /* Enable/Disable on this Trace/Trigger/Event u8 bus_word; /* Enable/Disable on this Trace/Trigger/Event
* Bus Word(s) (bitmask) * Bus Word(s) (bitmask)
*/ */
...@@ -93,7 +100,6 @@ static struct { ...@@ -93,7 +100,6 @@ static struct {
u32 pm07_cntrl[NR_PHYS_CTRS]; u32 pm07_cntrl[NR_PHYS_CTRS];
} pm_regs; } pm_regs;
#define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12) #define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
#define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4) #define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
#define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8) #define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
...@@ -101,7 +107,6 @@ static struct { ...@@ -101,7 +107,6 @@ static struct {
#define GET_COUNT_CYCLES(x) (x & 0x00000001) #define GET_COUNT_CYCLES(x) (x & 0x00000001)
#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2) #define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values); static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS]; static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
...@@ -129,8 +134,8 @@ static spinlock_t virt_cntr_lock = SPIN_LOCK_UNLOCKED; ...@@ -129,8 +134,8 @@ static spinlock_t virt_cntr_lock = SPIN_LOCK_UNLOCKED;
static u32 ctr_enabled; static u32 ctr_enabled;
static unsigned char trace_bus[4]; static unsigned char trace_bus[NUM_TRACE_BUS_WORDS];
static unsigned char input_bus[2]; static unsigned char input_bus[NUM_INPUT_BUS_WORDS];
/* /*
* Firmware interface functions * Firmware interface functions
...@@ -177,25 +182,40 @@ static void pm_rtas_reset_signals(u32 node) ...@@ -177,25 +182,40 @@ static void pm_rtas_reset_signals(u32 node)
static void pm_rtas_activate_signals(u32 node, u32 count) static void pm_rtas_activate_signals(u32 node, u32 count)
{ {
int ret; int ret;
int j; int i, j;
struct pm_signal pm_signal_local[NR_PHYS_CTRS]; struct pm_signal pm_signal_local[NR_PHYS_CTRS];
/* There is no debug setup required for the cycles event.
* Note that only events in the same group can be used.
* Otherwise, there will be conflicts in correctly routing
* the signals on the debug bus. It is the responsiblity
* of the OProfile user tool to check the events are in
* the same group.
*/
i = 0;
for (j = 0; j < count; j++) { for (j = 0; j < count; j++) {
/* fw expects physical cpu # */ if (pm_signal[j].signal_group != PPU_CYCLES_GRP_NUM) {
pm_signal_local[j].cpu = node;
pm_signal_local[j].signal_group = pm_signal[j].signal_group; /* fw expects physical cpu # */
pm_signal_local[j].bus_word = pm_signal[j].bus_word; pm_signal_local[i].cpu = node;
pm_signal_local[j].sub_unit = pm_signal[j].sub_unit; pm_signal_local[i].signal_group
pm_signal_local[j].bit = pm_signal[j].bit; = pm_signal[j].signal_group;
pm_signal_local[i].bus_word = pm_signal[j].bus_word;
pm_signal_local[i].sub_unit = pm_signal[j].sub_unit;
pm_signal_local[i].bit = pm_signal[j].bit;
i++;
}
} }
ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE, if (i != 0) {
pm_signal_local, ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE,
count * sizeof(struct pm_signal)); pm_signal_local,
i * sizeof(struct pm_signal));
if (ret) if (ret)
printk(KERN_WARNING "%s: rtas returned: %d\n", printk(KERN_WARNING "%s: rtas returned: %d\n",
__FUNCTION__, ret); __FUNCTION__, ret);
}
} }
/* /*
...@@ -212,7 +232,7 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask) ...@@ -212,7 +232,7 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
/* Special Event: Count all cpu cycles */ /* Special Event: Count all cpu cycles */
pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES; pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES;
p = &(pm_signal[ctr]); p = &(pm_signal[ctr]);
p->signal_group = 21; p->signal_group = PPU_CYCLES_GRP_NUM;
p->bus_word = 1; p->bus_word = 1;
p->sub_unit = 0; p->sub_unit = 0;
p->bit = 0; p->bit = 0;
...@@ -232,13 +252,21 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask) ...@@ -232,13 +252,21 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
p->signal_group = event / 100; p->signal_group = event / 100;
p->bus_word = bus_word; p->bus_word = bus_word;
p->sub_unit = unit_mask & 0x0000f000; p->sub_unit = (unit_mask & 0x0000f000) >> 12;
pm_regs.pm07_cntrl[ctr] = 0; pm_regs.pm07_cntrl[ctr] = 0;
pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles); pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity); pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity);
pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control); pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control);
/* Some of the islands signal selection is based on 64 bit words.
* The debug bus words are 32 bits, the input words to the performance
* counters are defined as 32 bits. Need to convert the 64 bit island
* specification to the appropriate 32 input bit and bus word for the
* performance counter event selection. See the CELL Performance
* monitoring signals manual and the Perf cntr hardware descriptions
* for the details.
*/
if (input_control == 0) { if (input_control == 0) {
if (signal_bit > 31) { if (signal_bit > 31) {
signal_bit -= 32; signal_bit -= 32;
...@@ -259,12 +287,12 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask) ...@@ -259,12 +287,12 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
p->bit = signal_bit; p->bit = signal_bit;
} }
for (i = 0; i < 4; i++) { for (i = 0; i < NUM_TRACE_BUS_WORDS; i++) {
if (bus_word & (1 << i)) { if (bus_word & (1 << i)) {
pm_regs.debug_bus_control |= pm_regs.debug_bus_control |=
(bus_type << (31 - (2 * i) + 1)); (bus_type << (31 - (2 * i) + 1));
for (j = 0; j < 2; j++) { for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
if (input_bus[j] == 0xff) { if (input_bus[j] == 0xff) {
input_bus[j] = i; input_bus[j] = i;
pm_regs.group_control |= pm_regs.group_control |=
...@@ -278,52 +306,58 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask) ...@@ -278,52 +306,58 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
; ;
} }
static void write_pm_cntrl(int cpu, struct pm_cntrl *pm_cntrl) static void write_pm_cntrl(int cpu)
{ {
/* Oprofile will use 32 bit counters, set bits 7:10 to 0 */ /* Oprofile will use 32 bit counters, set bits 7:10 to 0
* pmregs.pm_cntrl is a global
*/
u32 val = 0; u32 val = 0;
if (pm_cntrl->enable == 1) if (pm_regs.pm_cntrl.enable == 1)
val |= CBE_PM_ENABLE_PERF_MON; val |= CBE_PM_ENABLE_PERF_MON;
if (pm_cntrl->stop_at_max == 1) if (pm_regs.pm_cntrl.stop_at_max == 1)
val |= CBE_PM_STOP_AT_MAX; val |= CBE_PM_STOP_AT_MAX;
if (pm_cntrl->trace_mode == 1) if (pm_regs.pm_cntrl.trace_mode == 1)
val |= CBE_PM_TRACE_MODE_SET(pm_cntrl->trace_mode); val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
if (pm_cntrl->freeze == 1) if (pm_regs.pm_cntrl.freeze == 1)
val |= CBE_PM_FREEZE_ALL_CTRS; val |= CBE_PM_FREEZE_ALL_CTRS;
/* Routine set_count_mode must be called previously to set /* Routine set_count_mode must be called previously to set
* the count mode based on the user selection of user and kernel. * the count mode based on the user selection of user and kernel.
*/ */
val |= CBE_PM_COUNT_MODE_SET(pm_cntrl->count_mode); val |= CBE_PM_COUNT_MODE_SET(pm_regs.pm_cntrl.count_mode);
cbe_write_pm(cpu, pm_control, val); cbe_write_pm(cpu, pm_control, val);
} }
static inline void static inline void
set_count_mode(u32 kernel, u32 user, struct pm_cntrl *pm_cntrl) set_count_mode(u32 kernel, u32 user)
{ {
/* The user must specify user and kernel if they want them. If /* The user must specify user and kernel if they want them. If
* neither is specified, OProfile will count in hypervisor mode * neither is specified, OProfile will count in hypervisor mode.
* pm_regs.pm_cntrl is a global
*/ */
if (kernel) { if (kernel) {
if (user) if (user)
pm_cntrl->count_mode = CBE_COUNT_ALL_MODES; pm_regs.pm_cntrl.count_mode = CBE_COUNT_ALL_MODES;
else else
pm_cntrl->count_mode = CBE_COUNT_SUPERVISOR_MODE; pm_regs.pm_cntrl.count_mode =
CBE_COUNT_SUPERVISOR_MODE;
} else { } else {
if (user) if (user)
pm_cntrl->count_mode = CBE_COUNT_PROBLEM_MODE; pm_regs.pm_cntrl.count_mode = CBE_COUNT_PROBLEM_MODE;
else else
pm_cntrl->count_mode = CBE_COUNT_HYPERVISOR_MODE; pm_regs.pm_cntrl.count_mode =
CBE_COUNT_HYPERVISOR_MODE;
} }
} }
static inline void enable_ctr(u32 cpu, u32 ctr, u32 * pm07_cntrl) static inline void enable_ctr(u32 cpu, u32 ctr, u32 * pm07_cntrl)
{ {
pm07_cntrl[ctr] |= PM07_CTR_ENABLE(1); pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]); cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]);
} }
...@@ -365,6 +399,14 @@ static void cell_virtual_cntr(unsigned long data) ...@@ -365,6 +399,14 @@ static void cell_virtual_cntr(unsigned long data)
hdw_thread = 1 ^ hdw_thread; hdw_thread = 1 ^ hdw_thread;
next_hdw_thread = hdw_thread; next_hdw_thread = hdw_thread;
for (i = 0; i < num_counters; i++)
/* There are some per thread events. Must do the
* set event, for the thread that is being started
*/
set_pm_event(i,
pmc_cntrl[next_hdw_thread][i].evnts,
pmc_cntrl[next_hdw_thread][i].masks);
/* The following is done only once per each node, but /* The following is done only once per each node, but
* we need cpu #, not node #, to pass to the cbe_xxx functions. * we need cpu #, not node #, to pass to the cbe_xxx functions.
*/ */
...@@ -385,12 +427,13 @@ static void cell_virtual_cntr(unsigned long data) ...@@ -385,12 +427,13 @@ static void cell_virtual_cntr(unsigned long data)
== 0xFFFFFFFF) == 0xFFFFFFFF)
/* If the cntr value is 0xffffffff, we must /* If the cntr value is 0xffffffff, we must
* reset that to 0xfffffff0 when the current * reset that to 0xfffffff0 when the current
* thread is restarted. This will generate a new * thread is restarted. This will generate a
* interrupt and make sure that we never restore * new interrupt and make sure that we never
* the counters to the max value. If the counters * restore the counters to the max value. If
* were restored to the max value, they do not * the counters were restored to the max value,
* increment and no interrupts are generated. Hence * they do not increment and no interrupts are
* no more samples will be collected on that cpu. * generated. Hence no more samples will be
* collected on that cpu.
*/ */
cbe_write_ctr(cpu, i, 0xFFFFFFF0); cbe_write_ctr(cpu, i, 0xFFFFFFF0);
else else
...@@ -410,9 +453,6 @@ static void cell_virtual_cntr(unsigned long data) ...@@ -410,9 +453,6 @@ static void cell_virtual_cntr(unsigned long data)
* Must do the set event, enable_cntr * Must do the set event, enable_cntr
* for each cpu. * for each cpu.
*/ */
set_pm_event(i,
pmc_cntrl[next_hdw_thread][i].evnts,
pmc_cntrl[next_hdw_thread][i].masks);
enable_ctr(cpu, i, enable_ctr(cpu, i,
pm_regs.pm07_cntrl); pm_regs.pm07_cntrl);
} else { } else {
...@@ -465,8 +505,7 @@ cell_reg_setup(struct op_counter_config *ctr, ...@@ -465,8 +505,7 @@ cell_reg_setup(struct op_counter_config *ctr,
pm_regs.pm_cntrl.trace_mode = 0; pm_regs.pm_cntrl.trace_mode = 0;
pm_regs.pm_cntrl.freeze = 1; pm_regs.pm_cntrl.freeze = 1;
set_count_mode(sys->enable_kernel, sys->enable_user, set_count_mode(sys->enable_kernel, sys->enable_user);
&pm_regs.pm_cntrl);
/* Setup the thread 0 events */ /* Setup the thread 0 events */
for (i = 0; i < num_ctrs; ++i) { for (i = 0; i < num_ctrs; ++i) {
...@@ -498,10 +537,10 @@ cell_reg_setup(struct op_counter_config *ctr, ...@@ -498,10 +537,10 @@ cell_reg_setup(struct op_counter_config *ctr,
pmc_cntrl[1][i].vcntr = i; pmc_cntrl[1][i].vcntr = i;
} }
for (i = 0; i < 4; i++) for (i = 0; i < NUM_TRACE_BUS_WORDS; i++)
trace_bus[i] = 0xff; trace_bus[i] = 0xff;
for (i = 0; i < 2; i++) for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
input_bus[i] = 0xff; input_bus[i] = 0xff;
/* Our counters count up, and "count" refers to /* Our counters count up, and "count" refers to
...@@ -560,7 +599,7 @@ static void cell_cpu_setup(struct op_counter_config *cntr) ...@@ -560,7 +599,7 @@ static void cell_cpu_setup(struct op_counter_config *cntr)
cbe_write_pm(cpu, pm_start_stop, 0); cbe_write_pm(cpu, pm_start_stop, 0);
cbe_write_pm(cpu, group_control, pm_regs.group_control); cbe_write_pm(cpu, group_control, pm_regs.group_control);
cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control); cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
write_pm_cntrl(cpu, &pm_regs.pm_cntrl); write_pm_cntrl(cpu);
for (i = 0; i < num_counters; ++i) { for (i = 0; i < num_counters; ++i) {
if (ctr_enabled & (1 << i)) { if (ctr_enabled & (1 << i)) {
...@@ -602,7 +641,7 @@ static void cell_global_start(struct op_counter_config *ctr) ...@@ -602,7 +641,7 @@ static void cell_global_start(struct op_counter_config *ctr)
} }
} }
cbe_clear_pm_interrupts(cpu); cbe_get_and_clear_pm_interrupts(cpu);
cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask); cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
cbe_enable_pm(cpu); cbe_enable_pm(cpu);
} }
...@@ -672,7 +711,7 @@ cell_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) ...@@ -672,7 +711,7 @@ cell_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr)
cbe_disable_pm(cpu); cbe_disable_pm(cpu);
interrupt_mask = cbe_clear_pm_interrupts(cpu); interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
/* If the interrupt mask has been cleared, then the virt cntr /* If the interrupt mask has been cleared, then the virt cntr
* has cleared the interrupt. When the thread that generated * has cleared the interrupt. When the thread that generated
......
...@@ -345,18 +345,12 @@ EXPORT_SYMBOL_GPL(cbe_read_trace_buffer); ...@@ -345,18 +345,12 @@ EXPORT_SYMBOL_GPL(cbe_read_trace_buffer);
* Enabling/disabling interrupts for the entire performance monitoring unit. * Enabling/disabling interrupts for the entire performance monitoring unit.
*/ */
u32 cbe_query_pm_interrupts(u32 cpu) u32 cbe_get_and_clear_pm_interrupts(u32 cpu)
{
return cbe_read_pm(cpu, pm_status);
}
EXPORT_SYMBOL_GPL(cbe_query_pm_interrupts);
u32 cbe_clear_pm_interrupts(u32 cpu)
{ {
/* Reading pm_status clears the interrupt bits. */ /* Reading pm_status clears the interrupt bits. */
return cbe_query_pm_interrupts(cpu); return cbe_read_pm(cpu, pm_status);
} }
EXPORT_SYMBOL_GPL(cbe_clear_pm_interrupts); EXPORT_SYMBOL_GPL(cbe_get_and_clear_pm_interrupts);
void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask) void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
{ {
...@@ -371,7 +365,7 @@ EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts); ...@@ -371,7 +365,7 @@ EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts);
void cbe_disable_pm_interrupts(u32 cpu) void cbe_disable_pm_interrupts(u32 cpu)
{ {
cbe_clear_pm_interrupts(cpu); cbe_get_and_clear_pm_interrupts(cpu);
cbe_write_pm(cpu, pm_status, 0); cbe_write_pm(cpu, pm_status, 0);
} }
EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts); EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts);
......
...@@ -42,7 +42,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) ...@@ -42,7 +42,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
} }
spin_lock_init(&ctx->mmio_lock); spin_lock_init(&ctx->mmio_lock);
kref_init(&ctx->kref); kref_init(&ctx->kref);
init_rwsem(&ctx->state_sema); mutex_init(&ctx->state_mutex);
init_MUTEX(&ctx->run_sema); init_MUTEX(&ctx->run_sema);
init_waitqueue_head(&ctx->ibox_wq); init_waitqueue_head(&ctx->ibox_wq);
init_waitqueue_head(&ctx->wbox_wq); init_waitqueue_head(&ctx->wbox_wq);
...@@ -53,6 +53,10 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) ...@@ -53,6 +53,10 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
ctx->owner = get_task_mm(current); ctx->owner = get_task_mm(current);
if (gang) if (gang)
spu_gang_add_ctx(gang, ctx); spu_gang_add_ctx(gang, ctx);
ctx->rt_priority = current->rt_priority;
ctx->policy = current->policy;
ctx->prio = current->prio;
INIT_DELAYED_WORK(&ctx->sched_work, spu_sched_tick);
goto out; goto out;
out_free: out_free:
kfree(ctx); kfree(ctx);
...@@ -65,9 +69,9 @@ void destroy_spu_context(struct kref *kref) ...@@ -65,9 +69,9 @@ void destroy_spu_context(struct kref *kref)
{ {
struct spu_context *ctx; struct spu_context *ctx;
ctx = container_of(kref, struct spu_context, kref); ctx = container_of(kref, struct spu_context, kref);
down_write(&ctx->state_sema); mutex_lock(&ctx->state_mutex);
spu_deactivate(ctx); spu_deactivate(ctx);
up_write(&ctx->state_sema); mutex_unlock(&ctx->state_mutex);
spu_fini_csa(&ctx->csa); spu_fini_csa(&ctx->csa);
if (ctx->gang) if (ctx->gang)
spu_gang_remove_ctx(ctx->gang, ctx); spu_gang_remove_ctx(ctx->gang, ctx);
...@@ -96,16 +100,6 @@ void spu_forget(struct spu_context *ctx) ...@@ -96,16 +100,6 @@ void spu_forget(struct spu_context *ctx)
spu_release(ctx); spu_release(ctx);
} }
void spu_acquire(struct spu_context *ctx)
{
down_read(&ctx->state_sema);
}
void spu_release(struct spu_context *ctx)
{
up_read(&ctx->state_sema);
}
void spu_unmap_mappings(struct spu_context *ctx) void spu_unmap_mappings(struct spu_context *ctx)
{ {
if (ctx->local_store) if (ctx->local_store)
...@@ -124,83 +118,84 @@ void spu_unmap_mappings(struct spu_context *ctx) ...@@ -124,83 +118,84 @@ void spu_unmap_mappings(struct spu_context *ctx)
unmap_mapping_range(ctx->psmap, 0, 0x20000, 1); unmap_mapping_range(ctx->psmap, 0, 0x20000, 1);
} }
/**
* spu_acquire_exclusive - lock spu contex and protect against userspace access
* @ctx: spu contex to lock
*
* Note:
* Returns 0 and with the context locked on success
* Returns negative error and with the context _unlocked_ on failure.
*/
int spu_acquire_exclusive(struct spu_context *ctx) int spu_acquire_exclusive(struct spu_context *ctx)
{ {
int ret = 0; int ret = -EINVAL;
down_write(&ctx->state_sema); spu_acquire(ctx);
/* ctx is about to be freed, can't acquire any more */ /*
if (!ctx->owner) { * Context is about to be freed, so we can't acquire it anymore.
ret = -EINVAL; */
goto out; if (!ctx->owner)
} goto out_unlock;
if (ctx->state == SPU_STATE_SAVED) { if (ctx->state == SPU_STATE_SAVED) {
ret = spu_activate(ctx, 0); ret = spu_activate(ctx, 0);
if (ret) if (ret)
goto out; goto out_unlock;
ctx->state = SPU_STATE_RUNNABLE;
} else { } else {
/* We need to exclude userspace access to the context. */ /*
* We need to exclude userspace access to the context.
*
* To protect against memory access we invalidate all ptes
* and make sure the pagefault handlers block on the mutex.
*/
spu_unmap_mappings(ctx); spu_unmap_mappings(ctx);
} }
out: return 0;
if (ret)
up_write(&ctx->state_sema); out_unlock:
spu_release(ctx);
return ret; return ret;
} }
int spu_acquire_runnable(struct spu_context *ctx) /**
* spu_acquire_runnable - lock spu contex and make sure it is in runnable state
* @ctx: spu contex to lock
*
* Note:
* Returns 0 and with the context locked on success
* Returns negative error and with the context _unlocked_ on failure.
*/
int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags)
{ {
int ret = 0; int ret = -EINVAL;
down_read(&ctx->state_sema);
if (ctx->state == SPU_STATE_RUNNABLE) {
ctx->spu->prio = current->prio;
return 0;
}
up_read(&ctx->state_sema);
down_write(&ctx->state_sema);
/* ctx is about to be freed, can't acquire any more */
if (!ctx->owner) {
ret = -EINVAL;
goto out;
}
spu_acquire(ctx);
if (ctx->state == SPU_STATE_SAVED) { if (ctx->state == SPU_STATE_SAVED) {
ret = spu_activate(ctx, 0); /*
* Context is about to be freed, so we can't acquire it anymore.
*/
if (!ctx->owner)
goto out_unlock;
ret = spu_activate(ctx, flags);
if (ret) if (ret)
goto out; goto out_unlock;
ctx->state = SPU_STATE_RUNNABLE;
} }
downgrade_write(&ctx->state_sema); return 0;
/* On success, we return holding the lock */
return ret;
out:
/* Release here, to simplify calling code. */
up_write(&ctx->state_sema);
out_unlock:
spu_release(ctx);
return ret; return ret;
} }
/**
* spu_acquire_saved - lock spu contex and make sure it is in saved state
* @ctx: spu contex to lock
*/
void spu_acquire_saved(struct spu_context *ctx) void spu_acquire_saved(struct spu_context *ctx)
{ {
down_read(&ctx->state_sema); spu_acquire(ctx);
if (ctx->state != SPU_STATE_SAVED)
if (ctx->state == SPU_STATE_SAVED)
return;
up_read(&ctx->state_sema);
down_write(&ctx->state_sema);
if (ctx->state == SPU_STATE_RUNNABLE) {
spu_deactivate(ctx); spu_deactivate(ctx);
ctx->state = SPU_STATE_SAVED;
}
downgrade_write(&ctx->state_sema);
} }
...@@ -103,6 +103,9 @@ static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma, ...@@ -103,6 +103,9 @@ static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
offset += vma->vm_pgoff << PAGE_SHIFT; offset += vma->vm_pgoff << PAGE_SHIFT;
if (offset >= LS_SIZE)
return NOPFN_SIGBUS;
spu_acquire(ctx); spu_acquire(ctx);
if (ctx->state == SPU_STATE_SAVED) { if (ctx->state == SPU_STATE_SAVED) {
...@@ -164,7 +167,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, ...@@ -164,7 +167,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
/* error here usually means a signal.. we might want to test /* error here usually means a signal.. we might want to test
* the error code more precisely though * the error code more precisely though
*/ */
ret = spu_acquire_runnable(ctx); ret = spu_acquire_runnable(ctx, 0);
if (ret) if (ret)
return NOPFN_REFAULT; return NOPFN_REFAULT;
...@@ -1306,7 +1309,7 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, ...@@ -1306,7 +1309,7 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
if (ret) if (ret)
goto out; goto out;
spu_acquire_runnable(ctx); spu_acquire_runnable(ctx, 0);
if (file->f_flags & O_NONBLOCK) { if (file->f_flags & O_NONBLOCK) {
ret = ctx->ops->send_mfc_command(ctx, &cmd); ret = ctx->ops->send_mfc_command(ctx, &cmd);
} else { } else {
......
...@@ -133,7 +133,7 @@ static int spu_setup_isolated(struct spu_context *ctx) ...@@ -133,7 +133,7 @@ static int spu_setup_isolated(struct spu_context *ctx)
spu_mfc_sr1_set(ctx->spu, sr1); spu_mfc_sr1_set(ctx->spu, sr1);
out_unlock: out_unlock:
spu_release_exclusive(ctx); spu_release(ctx);
out: out:
return ret; return ret;
} }
...@@ -143,7 +143,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc) ...@@ -143,7 +143,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
int ret; int ret;
unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
ret = spu_acquire_runnable(ctx); ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE);
if (ret) if (ret)
return ret; return ret;
...@@ -155,7 +155,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc) ...@@ -155,7 +155,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
spu_release(ctx); spu_release(ctx);
ret = spu_setup_isolated(ctx); ret = spu_setup_isolated(ctx);
if (!ret) if (!ret)
ret = spu_acquire_runnable(ctx); ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE);
} }
/* if userspace has set the runcntrl register (eg, to issue an /* if userspace has set the runcntrl register (eg, to issue an
...@@ -164,8 +164,10 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc) ...@@ -164,8 +164,10 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
if (runcntl == 0) if (runcntl == 0)
runcntl = SPU_RUNCNTL_RUNNABLE; runcntl = SPU_RUNCNTL_RUNNABLE;
} else } else {
spu_start_tick(ctx);
ctx->ops->npc_write(ctx, *npc); ctx->ops->npc_write(ctx, *npc);
}
ctx->ops->runcntl_write(ctx, runcntl); ctx->ops->runcntl_write(ctx, runcntl);
return ret; return ret;
...@@ -176,6 +178,7 @@ static inline int spu_run_fini(struct spu_context *ctx, u32 * npc, ...@@ -176,6 +178,7 @@ static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
{ {
int ret = 0; int ret = 0;
spu_stop_tick(ctx);
*status = ctx->ops->status_read(ctx); *status = ctx->ops->status_read(ctx);
*npc = ctx->ops->npc_read(ctx); *npc = ctx->ops->npc_read(ctx);
spu_release(ctx); spu_release(ctx);
...@@ -329,8 +332,10 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx, ...@@ -329,8 +332,10 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
} }
if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
ret = spu_reacquire_runnable(ctx, npc, &status); ret = spu_reacquire_runnable(ctx, npc, &status);
if (ret) if (ret) {
spu_stop_tick(ctx);
goto out2; goto out2;
}
continue; continue;
} }
ret = spu_process_events(ctx); ret = spu_process_events(ctx);
...@@ -361,4 +366,3 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx, ...@@ -361,4 +366,3 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
up(&ctx->run_sema); up(&ctx->run_sema);
return ret; return ret;
} }
...@@ -44,17 +44,18 @@ ...@@ -44,17 +44,18 @@
#include <asm/spu_priv1.h> #include <asm/spu_priv1.h>
#include "spufs.h" #include "spufs.h"
#define SPU_MIN_TIMESLICE (100 * HZ / 1000) #define SPU_TIMESLICE (HZ)
#define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
struct spu_prio_array { struct spu_prio_array {
unsigned long bitmap[SPU_BITMAP_SIZE]; DECLARE_BITMAP(bitmap, MAX_PRIO);
wait_queue_head_t waitq[MAX_PRIO]; struct list_head runq[MAX_PRIO];
spinlock_t runq_lock;
struct list_head active_list[MAX_NUMNODES]; struct list_head active_list[MAX_NUMNODES];
struct mutex active_mutex[MAX_NUMNODES]; struct mutex active_mutex[MAX_NUMNODES];
}; };
static struct spu_prio_array *spu_prio; static struct spu_prio_array *spu_prio;
static struct workqueue_struct *spu_sched_wq;
static inline int node_allowed(int node) static inline int node_allowed(int node)
{ {
...@@ -68,6 +69,64 @@ static inline int node_allowed(int node) ...@@ -68,6 +69,64 @@ static inline int node_allowed(int node)
return 1; return 1;
} }
void spu_start_tick(struct spu_context *ctx)
{
if (ctx->policy == SCHED_RR)
queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
}
void spu_stop_tick(struct spu_context *ctx)
{
if (ctx->policy == SCHED_RR)
cancel_delayed_work(&ctx->sched_work);
}
void spu_sched_tick(struct work_struct *work)
{
struct spu_context *ctx =
container_of(work, struct spu_context, sched_work.work);
struct spu *spu;
int rearm = 1;
mutex_lock(&ctx->state_mutex);
spu = ctx->spu;
if (spu) {
int best = sched_find_first_bit(spu_prio->bitmap);
if (best <= ctx->prio) {
spu_deactivate(ctx);
rearm = 0;
}
}
mutex_unlock(&ctx->state_mutex);
if (rearm)
spu_start_tick(ctx);
}
/**
* spu_add_to_active_list - add spu to active list
* @spu: spu to add to the active list
*/
static void spu_add_to_active_list(struct spu *spu)
{
mutex_lock(&spu_prio->active_mutex[spu->node]);
list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
mutex_unlock(&spu_prio->active_mutex[spu->node]);
}
/**
* spu_remove_from_active_list - remove spu from active list
* @spu: spu to remove from the active list
*/
static void spu_remove_from_active_list(struct spu *spu)
{
int node = spu->node;
mutex_lock(&spu_prio->active_mutex[node]);
list_del_init(&spu->list);
mutex_unlock(&spu_prio->active_mutex[node]);
}
static inline void mm_needs_global_tlbie(struct mm_struct *mm) static inline void mm_needs_global_tlbie(struct mm_struct *mm)
{ {
int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
...@@ -94,8 +153,12 @@ int spu_switch_event_unregister(struct notifier_block * n) ...@@ -94,8 +153,12 @@ int spu_switch_event_unregister(struct notifier_block * n)
return blocking_notifier_chain_unregister(&spu_switch_notifier, n); return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
} }
/**
static inline void bind_context(struct spu *spu, struct spu_context *ctx) * spu_bind_context - bind spu context to physical spu
* @spu: physical spu to bind to
* @ctx: context to bind
*/
static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
{ {
pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid, pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
spu->number, spu->node); spu->number, spu->node);
...@@ -104,7 +167,6 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx) ...@@ -104,7 +167,6 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx)
ctx->spu = spu; ctx->spu = spu;
ctx->ops = &spu_hw_ops; ctx->ops = &spu_hw_ops;
spu->pid = current->pid; spu->pid = current->pid;
spu->prio = current->prio;
spu->mm = ctx->owner; spu->mm = ctx->owner;
mm_needs_global_tlbie(spu->mm); mm_needs_global_tlbie(spu->mm);
spu->ibox_callback = spufs_ibox_callback; spu->ibox_callback = spufs_ibox_callback;
...@@ -118,12 +180,21 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx) ...@@ -118,12 +180,21 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx)
spu->timestamp = jiffies; spu->timestamp = jiffies;
spu_cpu_affinity_set(spu, raw_smp_processor_id()); spu_cpu_affinity_set(spu, raw_smp_processor_id());
spu_switch_notify(spu, ctx); spu_switch_notify(spu, ctx);
spu_add_to_active_list(spu);
ctx->state = SPU_STATE_RUNNABLE;
} }
static inline void unbind_context(struct spu *spu, struct spu_context *ctx) /**
* spu_unbind_context - unbind spu context from physical spu
* @spu: physical spu to unbind from
* @ctx: context to unbind
*/
static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
{ {
pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__, pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
spu->pid, spu->number, spu->node); spu->pid, spu->number, spu->node);
spu_remove_from_active_list(spu);
spu_switch_notify(spu, NULL); spu_switch_notify(spu, NULL);
spu_unmap_mappings(ctx); spu_unmap_mappings(ctx);
spu_save(&ctx->csa, spu); spu_save(&ctx->csa, spu);
...@@ -136,95 +207,98 @@ static inline void unbind_context(struct spu *spu, struct spu_context *ctx) ...@@ -136,95 +207,98 @@ static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
spu->dma_callback = NULL; spu->dma_callback = NULL;
spu->mm = NULL; spu->mm = NULL;
spu->pid = 0; spu->pid = 0;
spu->prio = MAX_PRIO;
ctx->ops = &spu_backing_ops; ctx->ops = &spu_backing_ops;
ctx->spu = NULL; ctx->spu = NULL;
spu->flags = 0; spu->flags = 0;
spu->ctx = NULL; spu->ctx = NULL;
} }
static inline void spu_add_wq(wait_queue_head_t * wq, wait_queue_t * wait, /**
int prio) * spu_add_to_rq - add a context to the runqueue
* @ctx: context to add
*/
static void spu_add_to_rq(struct spu_context *ctx)
{ {
prepare_to_wait_exclusive(wq, wait, TASK_INTERRUPTIBLE); spin_lock(&spu_prio->runq_lock);
set_bit(prio, spu_prio->bitmap); list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
set_bit(ctx->prio, spu_prio->bitmap);
spin_unlock(&spu_prio->runq_lock);
} }
static inline void spu_del_wq(wait_queue_head_t * wq, wait_queue_t * wait, /**
int prio) * spu_del_from_rq - remove a context from the runqueue
* @ctx: context to remove
*/
static void spu_del_from_rq(struct spu_context *ctx)
{ {
u64 flags; spin_lock(&spu_prio->runq_lock);
list_del_init(&ctx->rq);
__set_current_state(TASK_RUNNING); if (list_empty(&spu_prio->runq[ctx->prio]))
clear_bit(ctx->prio, spu_prio->bitmap);
spin_lock_irqsave(&wq->lock, flags); spin_unlock(&spu_prio->runq_lock);
}
remove_wait_queue_locked(wq, wait); /**
if (list_empty(&wq->task_list)) * spu_grab_context - remove one context from the runqueue
clear_bit(prio, spu_prio->bitmap); * @prio: priority of the context to be removed
*
* This function removes one context from the runqueue for priority @prio.
* If there is more than one context with the given priority the first
* task on the runqueue will be taken.
*
* Returns the spu_context it just removed.
*
* Must be called with spu_prio->runq_lock held.
*/
static struct spu_context *spu_grab_context(int prio)
{
struct list_head *rq = &spu_prio->runq[prio];
spin_unlock_irqrestore(&wq->lock, flags); if (list_empty(rq))
return NULL;
return list_entry(rq->next, struct spu_context, rq);
} }
static void spu_prio_wait(struct spu_context *ctx, u64 flags) static void spu_prio_wait(struct spu_context *ctx)
{ {
int prio = current->prio;
wait_queue_head_t *wq = &spu_prio->waitq[prio];
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
if (ctx->spu) set_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
return; prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
spu_add_wq(wq, &wait, prio);
if (!signal_pending(current)) { if (!signal_pending(current)) {
up_write(&ctx->state_sema); mutex_unlock(&ctx->state_mutex);
pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
current->pid, current->prio);
schedule(); schedule();
down_write(&ctx->state_sema); mutex_lock(&ctx->state_mutex);
} }
__set_current_state(TASK_RUNNING);
spu_del_wq(wq, &wait, prio); remove_wait_queue(&ctx->stop_wq, &wait);
clear_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
} }
static void spu_prio_wakeup(void) /**
* spu_reschedule - try to find a runnable context for a spu
* @spu: spu available
*
* This function is called whenever a spu becomes idle. It looks for the
* most suitable runnable spu context and schedules it for execution.
*/
static void spu_reschedule(struct spu *spu)
{ {
int best = sched_find_first_bit(spu_prio->bitmap); int best;
if (best < MAX_PRIO) {
wait_queue_head_t *wq = &spu_prio->waitq[best];
wake_up_interruptible_nr(wq, 1);
}
}
static int get_active_spu(struct spu *spu) spu_free(spu);
{
int node = spu->node;
struct spu *tmp;
int rc = 0;
mutex_lock(&spu_prio->active_mutex[node]); spin_lock(&spu_prio->runq_lock);
list_for_each_entry(tmp, &spu_prio->active_list[node], list) { best = sched_find_first_bit(spu_prio->bitmap);
if (tmp == spu) { if (best < MAX_PRIO) {
list_del_init(&spu->list); struct spu_context *ctx = spu_grab_context(best);
rc = 1; if (ctx && test_bit(SPU_SCHED_WAKE, &ctx->sched_flags))
break; wake_up(&ctx->stop_wq);
}
} }
mutex_unlock(&spu_prio->active_mutex[node]); spin_unlock(&spu_prio->runq_lock);
return rc;
}
static void put_active_spu(struct spu *spu)
{
int node = spu->node;
mutex_lock(&spu_prio->active_mutex[node]);
list_add_tail(&spu->list, &spu_prio->active_list[node]);
mutex_unlock(&spu_prio->active_mutex[node]);
} }
static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags) static struct spu *spu_get_idle(struct spu_context *ctx)
{ {
struct spu *spu = NULL; struct spu *spu = NULL;
int node = cpu_to_node(raw_smp_processor_id()); int node = cpu_to_node(raw_smp_processor_id());
...@@ -241,87 +315,154 @@ static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags) ...@@ -241,87 +315,154 @@ static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags)
return spu; return spu;
} }
static inline struct spu *spu_get(struct spu_context *ctx, u64 flags) /**
* find_victim - find a lower priority context to preempt
* @ctx: canidate context for running
*
* Returns the freed physical spu to run the new context on.
*/
static struct spu *find_victim(struct spu_context *ctx)
{ {
/* Future: spu_get_idle() if possible, struct spu_context *victim = NULL;
* otherwise try to preempt an active struct spu *spu;
* context. int node, n;
/*
* Look for a possible preemption candidate on the local node first.
* If there is no candidate look at the other nodes. This isn't
* exactly fair, but so far the whole spu schedule tries to keep
* a strong node affinity. We might want to fine-tune this in
* the future.
*/ */
return spu_get_idle(ctx, flags); restart:
node = cpu_to_node(raw_smp_processor_id());
for (n = 0; n < MAX_NUMNODES; n++, node++) {
node = (node < MAX_NUMNODES) ? node : 0;
if (!node_allowed(node))
continue;
mutex_lock(&spu_prio->active_mutex[node]);
list_for_each_entry(spu, &spu_prio->active_list[node], list) {
struct spu_context *tmp = spu->ctx;
if (tmp->rt_priority < ctx->rt_priority &&
(!victim || tmp->rt_priority < victim->rt_priority))
victim = spu->ctx;
}
mutex_unlock(&spu_prio->active_mutex[node]);
if (victim) {
/*
* This nests ctx->state_mutex, but we always lock
* higher priority contexts before lower priority
* ones, so this is safe until we introduce
* priority inheritance schemes.
*/
if (!mutex_trylock(&victim->state_mutex)) {
victim = NULL;
goto restart;
}
spu = victim->spu;
if (!spu) {
/*
* This race can happen because we've dropped
* the active list mutex. No a problem, just
* restart the search.
*/
mutex_unlock(&victim->state_mutex);
victim = NULL;
goto restart;
}
spu_unbind_context(spu, victim);
mutex_unlock(&victim->state_mutex);
return spu;
}
}
return NULL;
} }
/* The three externally callable interfaces /**
* for the scheduler begin here. * spu_activate - find a free spu for a context and execute it
* @ctx: spu context to schedule
* @flags: flags (currently ignored)
* *
* spu_activate - bind a context to SPU, waiting as needed. * Tries to find a free spu to run @ctx. If no free spu is availble
* spu_deactivate - unbind a context from its SPU. * add the context to the runqueue so it gets woken up once an spu
* spu_yield - yield an SPU if others are waiting. * is available.
*/ */
int spu_activate(struct spu_context *ctx, unsigned long flags)
int spu_activate(struct spu_context *ctx, u64 flags)
{ {
struct spu *spu;
int ret = 0;
for (;;) { if (ctx->spu)
if (ctx->spu) return 0;
do {
struct spu *spu;
spu = spu_get_idle(ctx);
/*
* If this is a realtime thread we try to get it running by
* preempting a lower priority thread.
*/
if (!spu && ctx->rt_priority)
spu = find_victim(ctx);
if (spu) {
spu_bind_context(spu, ctx);
return 0; return 0;
spu = spu_get(ctx, flags);
if (spu != NULL) {
if (ctx->spu != NULL) {
spu_free(spu);
spu_prio_wakeup();
break;
}
bind_context(spu, ctx);
put_active_spu(spu);
break;
} }
spu_prio_wait(ctx, flags);
if (signal_pending(current)) { spu_add_to_rq(ctx);
ret = -ERESTARTSYS; if (!(flags & SPU_ACTIVATE_NOWAKE))
spu_prio_wakeup(); spu_prio_wait(ctx);
break; spu_del_from_rq(ctx);
} } while (!signal_pending(current));
}
return ret; return -ERESTARTSYS;
} }
/**
* spu_deactivate - unbind a context from it's physical spu
* @ctx: spu context to unbind
*
* Unbind @ctx from the physical spu it is running on and schedule
* the highest priority context to run on the freed physical spu.
*/
void spu_deactivate(struct spu_context *ctx) void spu_deactivate(struct spu_context *ctx)
{ {
struct spu *spu; struct spu *spu = ctx->spu;
int needs_idle;
spu = ctx->spu; if (spu) {
if (!spu) spu_unbind_context(spu, ctx);
return; spu_reschedule(spu);
needs_idle = get_active_spu(spu);
unbind_context(spu, ctx);
if (needs_idle) {
spu_free(spu);
spu_prio_wakeup();
} }
} }
/**
* spu_yield - yield a physical spu if others are waiting
* @ctx: spu context to yield
*
* Check if there is a higher priority context waiting and if yes
* unbind @ctx from the physical spu and schedule the highest
* priority context to run on the freed physical spu instead.
*/
void spu_yield(struct spu_context *ctx) void spu_yield(struct spu_context *ctx)
{ {
struct spu *spu; struct spu *spu;
int need_yield = 0; int need_yield = 0;
if (down_write_trylock(&ctx->state_sema)) { if (mutex_trylock(&ctx->state_mutex)) {
if ((spu = ctx->spu) != NULL) { if ((spu = ctx->spu) != NULL) {
int best = sched_find_first_bit(spu_prio->bitmap); int best = sched_find_first_bit(spu_prio->bitmap);
if (best < MAX_PRIO) { if (best < MAX_PRIO) {
pr_debug("%s: yielding SPU %d NODE %d\n", pr_debug("%s: yielding SPU %d NODE %d\n",
__FUNCTION__, spu->number, spu->node); __FUNCTION__, spu->number, spu->node);
spu_deactivate(ctx); spu_deactivate(ctx);
ctx->state = SPU_STATE_SAVED;
need_yield = 1; need_yield = 1;
} else {
spu->prio = MAX_PRIO;
} }
} }
up_write(&ctx->state_sema); mutex_unlock(&ctx->state_mutex);
} }
if (unlikely(need_yield)) if (unlikely(need_yield))
yield(); yield();
...@@ -331,14 +472,19 @@ int __init spu_sched_init(void) ...@@ -331,14 +472,19 @@ int __init spu_sched_init(void)
{ {
int i; int i;
spu_sched_wq = create_singlethread_workqueue("spusched");
if (!spu_sched_wq)
return 1;
spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL); spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
if (!spu_prio) { if (!spu_prio) {
printk(KERN_WARNING "%s: Unable to allocate priority queue.\n", printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
__FUNCTION__); __FUNCTION__);
destroy_workqueue(spu_sched_wq);
return 1; return 1;
} }
for (i = 0; i < MAX_PRIO; i++) { for (i = 0; i < MAX_PRIO; i++) {
init_waitqueue_head(&spu_prio->waitq[i]); INIT_LIST_HEAD(&spu_prio->runq[i]);
__clear_bit(i, spu_prio->bitmap); __clear_bit(i, spu_prio->bitmap);
} }
__set_bit(MAX_PRIO, spu_prio->bitmap); __set_bit(MAX_PRIO, spu_prio->bitmap);
...@@ -346,6 +492,7 @@ int __init spu_sched_init(void) ...@@ -346,6 +492,7 @@ int __init spu_sched_init(void)
mutex_init(&spu_prio->active_mutex[i]); mutex_init(&spu_prio->active_mutex[i]);
INIT_LIST_HEAD(&spu_prio->active_list[i]); INIT_LIST_HEAD(&spu_prio->active_list[i]);
} }
spin_lock_init(&spu_prio->runq_lock);
return 0; return 0;
} }
...@@ -364,4 +511,5 @@ void __exit spu_sched_exit(void) ...@@ -364,4 +511,5 @@ void __exit spu_sched_exit(void)
mutex_unlock(&spu_prio->active_mutex[node]); mutex_unlock(&spu_prio->active_mutex[node]);
} }
kfree(spu_prio); kfree(spu_prio);
destroy_workqueue(spu_sched_wq);
} }
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#define SPUFS_H #define SPUFS_H
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/rwsem.h> #include <linux/mutex.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/fs.h> #include <linux/fs.h>
...@@ -37,11 +37,13 @@ enum { ...@@ -37,11 +37,13 @@ enum {
}; };
struct spu_context_ops; struct spu_context_ops;
#define SPU_CONTEXT_PREEMPT 0UL
struct spu_gang; struct spu_gang;
/* ctx->sched_flags */
enum {
SPU_SCHED_WAKE = 0,
};
struct spu_context { struct spu_context {
struct spu *spu; /* pointer to a physical SPU */ struct spu *spu; /* pointer to a physical SPU */
struct spu_state csa; /* SPU context save area. */ struct spu_state csa; /* SPU context save area. */
...@@ -56,7 +58,7 @@ struct spu_context { ...@@ -56,7 +58,7 @@ struct spu_context {
u64 object_id; /* user space pointer for oprofile */ u64 object_id; /* user space pointer for oprofile */
enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state; enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
struct rw_semaphore state_sema; struct mutex state_mutex;
struct semaphore run_sema; struct semaphore run_sema;
struct mm_struct *owner; struct mm_struct *owner;
...@@ -77,6 +79,14 @@ struct spu_context { ...@@ -77,6 +79,14 @@ struct spu_context {
struct list_head gang_list; struct list_head gang_list;
struct spu_gang *gang; struct spu_gang *gang;
/* scheduler fields */
struct list_head rq;
struct delayed_work sched_work;
unsigned long sched_flags;
unsigned long rt_priority;
int policy;
int prio;
}; };
struct spu_gang { struct spu_gang {
...@@ -161,6 +171,16 @@ void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx); ...@@ -161,6 +171,16 @@ void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx);
void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx); void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
/* context management */ /* context management */
static inline void spu_acquire(struct spu_context *ctx)
{
mutex_lock(&ctx->state_mutex);
}
static inline void spu_release(struct spu_context *ctx)
{
mutex_unlock(&ctx->state_mutex);
}
struct spu_context * alloc_spu_context(struct spu_gang *gang); struct spu_context * alloc_spu_context(struct spu_gang *gang);
void destroy_spu_context(struct kref *kref); void destroy_spu_context(struct kref *kref);
struct spu_context * get_spu_context(struct spu_context *ctx); struct spu_context * get_spu_context(struct spu_context *ctx);
...@@ -168,20 +188,18 @@ int put_spu_context(struct spu_context *ctx); ...@@ -168,20 +188,18 @@ int put_spu_context(struct spu_context *ctx);
void spu_unmap_mappings(struct spu_context *ctx); void spu_unmap_mappings(struct spu_context *ctx);
void spu_forget(struct spu_context *ctx); void spu_forget(struct spu_context *ctx);
void spu_acquire(struct spu_context *ctx); int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags);
void spu_release(struct spu_context *ctx);
int spu_acquire_runnable(struct spu_context *ctx);
void spu_acquire_saved(struct spu_context *ctx); void spu_acquire_saved(struct spu_context *ctx);
int spu_acquire_exclusive(struct spu_context *ctx); int spu_acquire_exclusive(struct spu_context *ctx);
enum {
static inline void spu_release_exclusive(struct spu_context *ctx) SPU_ACTIVATE_NOWAKE = 1,
{ };
up_write(&ctx->state_sema); int spu_activate(struct spu_context *ctx, unsigned long flags);
}
int spu_activate(struct spu_context *ctx, u64 flags);
void spu_deactivate(struct spu_context *ctx); void spu_deactivate(struct spu_context *ctx);
void spu_yield(struct spu_context *ctx); void spu_yield(struct spu_context *ctx);
void spu_start_tick(struct spu_context *ctx);
void spu_stop_tick(struct spu_context *ctx);
void spu_sched_tick(struct work_struct *work);
int __init spu_sched_init(void); int __init spu_sched_init(void);
void __exit spu_sched_exit(void); void __exit spu_sched_exit(void);
......
...@@ -2811,7 +2811,6 @@ static void dump_spu_fields(struct spu *spu) ...@@ -2811,7 +2811,6 @@ static void dump_spu_fields(struct spu *spu)
DUMP_FIELD(spu, "0x%lx", irqs[2]); DUMP_FIELD(spu, "0x%lx", irqs[2]);
DUMP_FIELD(spu, "0x%x", slb_replace); DUMP_FIELD(spu, "0x%x", slb_replace);
DUMP_FIELD(spu, "%d", pid); DUMP_FIELD(spu, "%d", pid);
DUMP_FIELD(spu, "%d", prio);
DUMP_FIELD(spu, "0x%p", mm); DUMP_FIELD(spu, "0x%p", mm);
DUMP_FIELD(spu, "0x%p", ctx); DUMP_FIELD(spu, "0x%p", ctx);
DUMP_FIELD(spu, "0x%p", rq); DUMP_FIELD(spu, "0x%p", rq);
......
...@@ -53,6 +53,11 @@ ...@@ -53,6 +53,11 @@
#define CBE_PM_CTR_POLARITY 0x01000000 #define CBE_PM_CTR_POLARITY 0x01000000
#define CBE_PM_CTR_COUNT_CYCLES 0x00800000 #define CBE_PM_CTR_COUNT_CYCLES 0x00800000
#define CBE_PM_CTR_ENABLE 0x00400000 #define CBE_PM_CTR_ENABLE 0x00400000
#define PM07_CTR_INPUT_MUX(x) (((x) & 0x3F) << 26)
#define PM07_CTR_INPUT_CONTROL(x) (((x) & 1) << 25)
#define PM07_CTR_POLARITY(x) (((x) & 1) << 24)
#define PM07_CTR_COUNT_CYCLES(x) (((x) & 1) << 23)
#define PM07_CTR_ENABLE(x) (((x) & 1) << 22)
/* Macros for the pm_status register. */ /* Macros for the pm_status register. */
#define CBE_PM_CTR_OVERFLOW_INTR(ctr) (1 << (31 - ((ctr) & 7))) #define CBE_PM_CTR_OVERFLOW_INTR(ctr) (1 << (31 - ((ctr) & 7)))
...@@ -89,8 +94,7 @@ extern void cbe_read_trace_buffer(u32 cpu, u64 *buf); ...@@ -89,8 +94,7 @@ extern void cbe_read_trace_buffer(u32 cpu, u64 *buf);
extern void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask); extern void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask);
extern void cbe_disable_pm_interrupts(u32 cpu); extern void cbe_disable_pm_interrupts(u32 cpu);
extern u32 cbe_query_pm_interrupts(u32 cpu); extern u32 cbe_get_and_clear_pm_interrupts(u32 cpu);
extern u32 cbe_clear_pm_interrupts(u32 cpu);
extern void cbe_sync_irq(int node); extern void cbe_sync_irq(int node);
/* Utility functions, macros */ /* Utility functions, macros */
...@@ -103,11 +107,4 @@ extern u32 cbe_get_hw_thread_id(int cpu); ...@@ -103,11 +107,4 @@ extern u32 cbe_get_hw_thread_id(int cpu);
#define CBE_COUNT_PROBLEM_MODE 2 #define CBE_COUNT_PROBLEM_MODE 2
#define CBE_COUNT_ALL_MODES 3 #define CBE_COUNT_ALL_MODES 3
/* Macros for the pm07_control registers. */
#define PM07_CTR_INPUT_MUX(x) (((x) & 0x3F) << 26)
#define PM07_CTR_INPUT_CONTROL(x) (((x) & 1) << 25)
#define PM07_CTR_POLARITY(x) (((x) & 1) << 24)
#define PM07_CTR_COUNT_CYCLES(x) (((x) & 1) << 23)
#define PM07_CTR_ENABLE(x) (((x) & 1) << 22)
#endif /* __ASM_CELL_PMU_H__ */ #endif /* __ASM_CELL_PMU_H__ */
...@@ -129,7 +129,6 @@ struct spu { ...@@ -129,7 +129,6 @@ struct spu {
struct spu_runqueue *rq; struct spu_runqueue *rq;
unsigned long long timestamp; unsigned long long timestamp;
pid_t pid; pid_t pid;
int prio;
int class_0_pending; int class_0_pending;
spinlock_t register_lock; spinlock_t register_lock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment