Commit 4ef11014 authored by Jeremy Kerr's avatar Jeremy Kerr

[POWERPC] spufs: fix scheduler starvation by idle contexts

2.6.25 has a regression where we can starve the scheduler by creating
(N_SPES+1) contexts, then running them one at a time.

The final context will never be run, as the other contexts are loaded on
the SPEs, none of which are repoted as free (ie, spu->alloc_state !=
SPU_FREE), so spu_get_idle() doesn't give us a spu to run on. Because
all of the contexts are stopped, none are descheduled by the scheduler
tick, as spusched_tick returns if spu_stopped(ctx).

This change replaces the spu_stopped() check with checking for SCHED_IDLE
in ctx->policy. We set a context's policy to SCHED_IDLE when we're not
in spu_run(). We also favour SCHED_IDLE contexts when looking for contexts
to unbind, but leave their timeslice intact for later resumption.

This patch fixes the following test in the spufs-testsuite:
  tests/20-scheduler/02-yield-starvation
Signed-off-by: default avatarJeremy Kerr <jk@ozlabs.org>
parent 101142c3
...@@ -234,6 +234,7 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc, ...@@ -234,6 +234,7 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc,
*npc = ctx->ops->npc_read(ctx); *npc = ctx->ops->npc_read(ctx);
spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
ctx->policy = SCHED_IDLE;
spu_release(ctx); spu_release(ctx);
if (signal_pending(current)) if (signal_pending(current))
......
...@@ -856,21 +856,18 @@ static noinline void spusched_tick(struct spu_context *ctx) ...@@ -856,21 +856,18 @@ static noinline void spusched_tick(struct spu_context *ctx)
{ {
struct spu_context *new = NULL; struct spu_context *new = NULL;
struct spu *spu = NULL; struct spu *spu = NULL;
u32 status;
if (spu_acquire(ctx)) if (spu_acquire(ctx))
BUG(); /* a kernel thread never has signals pending */ BUG(); /* a kernel thread never has signals pending */
if (ctx->state != SPU_STATE_RUNNABLE) if (ctx->state != SPU_STATE_RUNNABLE)
goto out; goto out;
if (spu_stopped(ctx, &status))
goto out;
if (ctx->flags & SPU_CREATE_NOSCHED) if (ctx->flags & SPU_CREATE_NOSCHED)
goto out; goto out;
if (ctx->policy == SCHED_FIFO) if (ctx->policy == SCHED_FIFO)
goto out; goto out;
if (--ctx->time_slice) if (--ctx->time_slice && ctx->policy != SCHED_IDLE)
goto out; goto out;
spu = ctx->spu; spu = ctx->spu;
...@@ -880,6 +877,7 @@ static noinline void spusched_tick(struct spu_context *ctx) ...@@ -880,6 +877,7 @@ static noinline void spusched_tick(struct spu_context *ctx)
new = grab_runnable_context(ctx->prio + 1, spu->node); new = grab_runnable_context(ctx->prio + 1, spu->node);
if (new) { if (new) {
spu_unschedule(spu, ctx); spu_unschedule(spu, ctx);
if (ctx->policy != SCHED_IDLE)
spu_add_to_rq(ctx); spu_add_to_rq(ctx);
} else { } else {
spu_context_nospu_trace(spusched_tick__newslice, ctx); spu_context_nospu_trace(spusched_tick__newslice, ctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment