Commit c9101bdb authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Paul Mackerras

[POWERPC] spufs: make state_mutex interruptible

Make most places that use spu_acquire/spu_acquire_saved interruptible,
this allows getting out of the spufs code when e.g. pressing ctrl+c.
There are a few places where we get called e.g. from spufs teardown
routines were we can't simply err out so these are left with a comment.
For now I've also not touched the poll routines because it's open what
libspe would expect in terms of interrupted system calls.
Acked-by: default avatarArnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJeremy Kerr <jk@ozlabs.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 197b1a82
......@@ -106,7 +106,17 @@ int put_spu_context(struct spu_context *ctx)
void spu_forget(struct spu_context *ctx)
{
struct mm_struct *mm;
spu_acquire_saved(ctx);
/*
* This is basically an open-coded spu_acquire_saved, except that
* we don't acquire the state mutex interruptible.
*/
mutex_lock(&ctx->state_mutex);
if (ctx->state != SPU_STATE_SAVED) {
set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags);
spu_deactivate(ctx);
}
mm = ctx->owner;
ctx->owner = NULL;
mmput(mm);
......@@ -137,13 +147,20 @@ void spu_unmap_mappings(struct spu_context *ctx)
* spu_acquire_saved - lock spu contex and make sure it is in saved state
* @ctx: spu contex to lock
*/
void spu_acquire_saved(struct spu_context *ctx)
int spu_acquire_saved(struct spu_context *ctx)
{
spu_acquire(ctx);
int ret;
ret = spu_acquire(ctx);
if (ret)
return ret;
if (ctx->state != SPU_STATE_SAVED) {
set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags);
spu_deactivate(ctx);
}
return 0;
}
/**
......
......@@ -148,7 +148,9 @@ int spufs_coredump_extra_notes_size(void)
fd = 0;
while ((ctx = coredump_next_context(&fd)) != NULL) {
spu_acquire_saved(ctx);
rc = spu_acquire_saved(ctx);
if (rc)
break;
rc = spufs_ctx_note_size(ctx, fd);
spu_release_saved(ctx);
if (rc < 0)
......@@ -224,7 +226,9 @@ int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset)
fd = 0;
while ((ctx = coredump_next_context(&fd)) != NULL) {
spu_acquire_saved(ctx);
rc = spu_acquire_saved(ctx);
if (rc)
return rc;
for (j = 0; spufs_coredump_read[j].name != NULL; j++) {
rc = spufs_arch_write_note(ctx, j, file, fd, foffset);
......
......@@ -107,7 +107,7 @@ int spufs_handle_class1(struct spu_context *ctx)
u64 ea, dsisr, access;
unsigned long flags;
unsigned flt = 0;
int ret;
int ret, ret2;
/*
* dar and dsisr get passed from the registers
......@@ -146,7 +146,14 @@ int spufs_handle_class1(struct spu_context *ctx)
if (ret)
ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt);
spu_acquire(ctx);
/*
* If spu_acquire fails due to a pending signal we just want to return
* EINTR to userspace even if that means missing the dma restart or
* updating the page fault statistics.
*/
ret2 = spu_acquire(ctx);
if (ret2)
goto out;
/*
* Clear dsisr under ctxt lock after handling the fault, so that
......@@ -177,6 +184,7 @@ int spufs_handle_class1(struct spu_context *ctx)
} else
spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
out:
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
return ret;
}
This diff is collapsed.
......@@ -292,7 +292,7 @@ static int spu_process_callback(struct spu_context *ctx)
u32 ls_pointer, npc;
void __iomem *ls;
long spu_ret;
int ret;
int ret, ret2;
/* get syscall block from local store */
npc = ctx->ops->npc_read(ctx) & ~3;
......@@ -314,9 +314,11 @@ static int spu_process_callback(struct spu_context *ctx)
if (spu_ret <= -ERESTARTSYS) {
ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
}
spu_acquire(ctx);
ret2 = spu_acquire(ctx);
if (ret == -ERESTARTSYS)
return ret;
if (ret2)
return -EINTR;
}
/* write result, jump over indirect pointer */
......@@ -338,7 +340,9 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
spu_enable_spu(ctx);
ctx->event_return = 0;
spu_acquire(ctx);
ret = spu_acquire(ctx);
if (ret)
goto out_unlock;
spu_update_sched_info(ctx);
......@@ -414,6 +418,7 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
out:
*event = ctx->event_return;
out_unlock:
mutex_unlock(&ctx->run_mutex);
return ret;
}
......@@ -705,7 +705,9 @@ static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
static void spu_schedule(struct spu *spu, struct spu_context *ctx)
{
spu_acquire(ctx);
/* not a candidate for interruptible because it's called either
from the scheduler thread or from spu_deactivate */
mutex_lock(&ctx->state_mutex);
__spu_schedule(spu, ctx);
spu_release(ctx);
}
......@@ -823,7 +825,9 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
else {
spu_release(ctx);
spu_schedule(spu, new);
spu_acquire(ctx);
/* this one can't easily be made
interruptible */
mutex_lock(&ctx->state_mutex);
}
}
}
......@@ -867,7 +871,8 @@ static noinline void spusched_tick(struct spu_context *ctx)
struct spu *spu = NULL;
u32 status;
spu_acquire(ctx);
if (spu_acquire(ctx))
BUG(); /* a kernel thread never has signals pending */
if (ctx->state != SPU_STATE_RUNNABLE)
goto out;
......
......@@ -229,9 +229,9 @@ struct spu *affinity_check(struct spu_context *ctx);
/* context management */
extern atomic_t nr_spu_contexts;
static inline void spu_acquire(struct spu_context *ctx)
static inline int __must_check spu_acquire(struct spu_context *ctx)
{
mutex_lock(&ctx->state_mutex);
return mutex_lock_interruptible(&ctx->state_mutex);
}
static inline void spu_release(struct spu_context *ctx)
......@@ -246,7 +246,7 @@ int put_spu_context(struct spu_context *ctx);
void spu_unmap_mappings(struct spu_context *ctx);
void spu_forget(struct spu_context *ctx);
void spu_acquire_saved(struct spu_context *ctx);
int __must_check spu_acquire_saved(struct spu_context *ctx);
void spu_release_saved(struct spu_context *ctx);
int spu_stopped(struct spu_context *ctx, u32 * stat);
......@@ -284,7 +284,9 @@ extern char *isolated_loader;
} \
spu_release(ctx); \
schedule(); \
spu_acquire(ctx); \
__ret = spu_acquire(ctx); \
if (__ret) \
break; \
} \
finish_wait(&(wq), &__wait); \
__ret; \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment