Commit f5ed0eb6 authored by Jeremy Kerr's avatar Jeremy Kerr

powerpc/spufs: Use state_mutex for switch_log locking, and prevent multiple openers

Currently, we use ctx->mapping_lock and ctx->switch_log->lock for the
context switch log. The mapping lock only prevents concurrent open()s,
so we require the switch_lock->lock for reads.

Since writes to the switch log buffer occur on context switches, we're
better off synchronising with the state_mutex, which is held during a
switch. Since we're serialised througout the buffer reads and writes,
we can use the state mutex to protect open and release too, and
can now kfree() the log buffer on release. This allows us to perform
the switch log notify without taking any extra locks.

Because the buffer is only present while the file is open, we can use
it to prevent multiple simultaneous openers.
Signed-off-by: default avatarJeremy Kerr <jk@ozlabs.org>
parent e869446b
...@@ -2426,38 +2426,48 @@ static inline int spufs_switch_log_avail(struct spu_context *ctx) ...@@ -2426,38 +2426,48 @@ static inline int spufs_switch_log_avail(struct spu_context *ctx)
static int spufs_switch_log_open(struct inode *inode, struct file *file) static int spufs_switch_log_open(struct inode *inode, struct file *file)
{ {
struct spu_context *ctx = SPUFS_I(inode)->i_ctx; struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
int rc;
rc = spu_acquire(ctx);
if (rc)
return rc;
/*
* We (ab-)use the mapping_lock here because it serves the similar
* purpose for synchronizing open/close elsewhere. Maybe it should
* be renamed eventually.
*/
mutex_lock(&ctx->mapping_lock);
if (ctx->switch_log) { if (ctx->switch_log) {
spin_lock(&ctx->switch_log->lock); rc = -EBUSY;
ctx->switch_log->head = 0; goto out;
ctx->switch_log->tail = 0; }
spin_unlock(&ctx->switch_log->lock);
} else {
/*
* We allocate the switch log data structures on first open.
* They will never be free because we assume a context will
* be traced until it goes away.
*/
ctx->switch_log = kzalloc(sizeof(struct switch_log) + ctx->switch_log = kzalloc(sizeof(struct switch_log) +
SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry), SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
GFP_KERNEL); GFP_KERNEL);
if (!ctx->switch_log)
if (!ctx->switch_log) {
rc = -ENOMEM;
goto out; goto out;
spin_lock_init(&ctx->switch_log->lock);
init_waitqueue_head(&ctx->switch_log->wait);
} }
mutex_unlock(&ctx->mapping_lock);
init_waitqueue_head(&ctx->switch_log->wait);
rc = 0;
out:
spu_release(ctx);
return rc;
}
static int spufs_switch_log_release(struct inode *inode, struct file *file)
{
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
int rc;
rc = spu_acquire(ctx);
if (rc)
return rc;
kfree(ctx->switch_log);
ctx->switch_log = NULL;
spu_release(ctx);
return 0; return 0;
out:
mutex_unlock(&ctx->mapping_lock);
return -ENOMEM;
} }
static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
...@@ -2485,42 +2495,46 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, ...@@ -2485,42 +2495,46 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
if (!buf || len < 0) if (!buf || len < 0)
return -EINVAL; return -EINVAL;
error = spu_acquire(ctx);
if (error)
return error;
while (cnt < len) { while (cnt < len) {
char tbuf[128]; char tbuf[128];
int width; int width;
if (file->f_flags & O_NONBLOCK) { if (file->f_flags & O_NONBLOCK) {
if (spufs_switch_log_used(ctx) <= 0) if (spufs_switch_log_used(ctx) == 0) {
return cnt ? cnt : -EAGAIN; error = -EAGAIN;
break;
}
} else { } else {
/* Wait for data in buffer */ /* spufs_wait will drop the mutex and re-acquire,
error = wait_event_interruptible(ctx->switch_log->wait, * but since we're in read(), the file cannot be
* _released (and so ctx->switch_log is stable).
*/
error = spufs_wait(ctx->switch_log->wait,
spufs_switch_log_used(ctx) > 0); spufs_switch_log_used(ctx) > 0);
/* On error, spufs_wait returns without the
* state mutex held */
if (error) if (error)
break; return error;
} }
spin_lock(&ctx->switch_log->lock); /* We may have had entries read from underneath us while we
if (ctx->switch_log->head == ctx->switch_log->tail) { * dropped the mutex in spufs_wait, so re-check */
/* multiple readers race? */ if (ctx->switch_log->head == ctx->switch_log->tail)
spin_unlock(&ctx->switch_log->lock);
continue; continue;
}
width = switch_log_sprint(ctx, tbuf, sizeof(tbuf)); width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
if (width < len) { if (width < len)
ctx->switch_log->tail = ctx->switch_log->tail =
(ctx->switch_log->tail + 1) % (ctx->switch_log->tail + 1) %
SWITCH_LOG_BUFSIZE; SWITCH_LOG_BUFSIZE;
} else
/* If the record is greater than space available return
spin_unlock(&ctx->switch_log->lock); * partial buffer (so far) */
/*
* If the record is greater than space available return
* partial buffer (so far)
*/
if (width >= len)
break; break;
error = copy_to_user(buf + cnt, tbuf, width); error = copy_to_user(buf + cnt, tbuf, width);
...@@ -2529,6 +2543,8 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, ...@@ -2529,6 +2543,8 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
cnt += width; cnt += width;
} }
spu_release(ctx);
return cnt == 0 ? error : cnt; return cnt == 0 ? error : cnt;
} }
...@@ -2537,12 +2553,19 @@ static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait) ...@@ -2537,12 +2553,19 @@ static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
struct inode *inode = file->f_path.dentry->d_inode; struct inode *inode = file->f_path.dentry->d_inode;
struct spu_context *ctx = SPUFS_I(inode)->i_ctx; struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
unsigned int mask = 0; unsigned int mask = 0;
int rc;
poll_wait(file, &ctx->switch_log->wait, wait); poll_wait(file, &ctx->switch_log->wait, wait);
rc = spu_acquire(ctx);
if (rc)
return rc;
if (spufs_switch_log_used(ctx) > 0) if (spufs_switch_log_used(ctx) > 0)
mask |= POLLIN; mask |= POLLIN;
spu_release(ctx);
return mask; return mask;
} }
...@@ -2551,15 +2574,20 @@ static const struct file_operations spufs_switch_log_fops = { ...@@ -2551,15 +2574,20 @@ static const struct file_operations spufs_switch_log_fops = {
.open = spufs_switch_log_open, .open = spufs_switch_log_open,
.read = spufs_switch_log_read, .read = spufs_switch_log_read,
.poll = spufs_switch_log_poll, .poll = spufs_switch_log_poll,
.release = spufs_switch_log_release,
}; };
/**
* Log a context switch event to a switch log reader.
*
* Must be called with ctx->state_mutex held.
*/
void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
u32 type, u32 val) u32 type, u32 val)
{ {
if (!ctx->switch_log) if (!ctx->switch_log)
return; return;
spin_lock(&ctx->switch_log->lock);
if (spufs_switch_log_avail(ctx) > 1) { if (spufs_switch_log_avail(ctx) > 1) {
struct switch_log_entry *p; struct switch_log_entry *p;
...@@ -2573,7 +2601,6 @@ void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, ...@@ -2573,7 +2601,6 @@ void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
ctx->switch_log->head = ctx->switch_log->head =
(ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE; (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
} }
spin_unlock(&ctx->switch_log->lock);
wake_up(&ctx->switch_log->wait); wake_up(&ctx->switch_log->wait);
} }
......
...@@ -249,6 +249,7 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc, ...@@ -249,6 +249,7 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc,
spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags); clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status);
spu_release(ctx); spu_release(ctx);
if (signal_pending(current)) if (signal_pending(current))
...@@ -417,8 +418,6 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) ...@@ -417,8 +418,6 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
ret = spu_run_fini(ctx, npc, &status); ret = spu_run_fini(ctx, npc, &status);
spu_yield(ctx); spu_yield(ctx);
spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, status);
if ((status & SPU_STATUS_STOPPED_BY_STOP) && if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
(((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100)) (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
ctx->stats.libassist++; ctx->stats.libassist++;
......
...@@ -65,7 +65,6 @@ enum { ...@@ -65,7 +65,6 @@ enum {
}; };
struct switch_log { struct switch_log {
spinlock_t lock;
wait_queue_head_t wait; wait_queue_head_t wait;
unsigned long head; unsigned long head;
unsigned long tail; unsigned long tail;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment