Commit 5503ac56 authored by Eric Van Hensbergen's avatar Eric Van Hensbergen Committed by Eric Van Hensbergen

9p: remove unnecessary prototypes

Cleanup files by reordering functions in order to remove need for
unnecessary function prototypes.

There are no code changes here, just functions being moved around and
prototypes being eliminated.
Signed-off-by: default avatarEric Van Hensbergen <ericvh@gmail.com>
parent bead27f0
......@@ -36,10 +36,6 @@
#include <net/9p/client.h>
#include <net/9p/transport.h>
static struct p9_fid *p9_fid_create(struct p9_client *clnt);
static void p9_fid_destroy(struct p9_fid *fid);
static struct p9_stat *p9_clone_stat(struct p9_stat *st, int dotu);
/*
* Client Option Parsing (code inspired by NFS code)
* - a little lazy - parse all client options
......@@ -124,6 +120,55 @@ static int parse_opts(char *opts, struct p9_client *clnt)
return ret;
}
static struct p9_fid *p9_fid_create(struct p9_client *clnt)
{
int err;
struct p9_fid *fid;
P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt);
fid = kmalloc(sizeof(struct p9_fid), GFP_KERNEL);
if (!fid)
return ERR_PTR(-ENOMEM);
fid->fid = p9_idpool_get(clnt->fidpool);
if (fid->fid < 0) {
err = -ENOSPC;
goto error;
}
memset(&fid->qid, 0, sizeof(struct p9_qid));
fid->mode = -1;
fid->rdir_fpos = 0;
fid->rdir_pos = 0;
fid->rdir_fcall = NULL;
fid->uid = current->fsuid;
fid->clnt = clnt;
fid->aux = NULL;
spin_lock(&clnt->lock);
list_add(&fid->flist, &clnt->fidlist);
spin_unlock(&clnt->lock);
return fid;
error:
kfree(fid);
return ERR_PTR(err);
}
static void p9_fid_destroy(struct p9_fid *fid)
{
struct p9_client *clnt;
P9_DPRINTK(P9_DEBUG_9P, "fid %d\n", fid->fid);
clnt = fid->clnt;
p9_idpool_put(fid->fid, clnt->fidpool);
spin_lock(&clnt->lock);
list_del(&fid->flist);
spin_unlock(&clnt->lock);
kfree(fid->rdir_fcall);
kfree(fid);
}
/**
* p9_client_rpc - sends 9P request and waits until a response is available.
......@@ -815,6 +860,46 @@ int p9_client_readn(struct p9_fid *fid, char *data, u64 offset, u32 count)
}
EXPORT_SYMBOL(p9_client_readn);
static struct p9_stat *p9_clone_stat(struct p9_stat *st, int dotu)
{
int n;
char *p;
struct p9_stat *ret;
n = sizeof(struct p9_stat) + st->name.len + st->uid.len + st->gid.len +
st->muid.len;
if (dotu)
n += st->extension.len;
ret = kmalloc(n, GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
memmove(ret, st, sizeof(struct p9_stat));
p = ((char *) ret) + sizeof(struct p9_stat);
memmove(p, st->name.str, st->name.len);
ret->name.str = p;
p += st->name.len;
memmove(p, st->uid.str, st->uid.len);
ret->uid.str = p;
p += st->uid.len;
memmove(p, st->gid.str, st->gid.len);
ret->gid.str = p;
p += st->gid.len;
memmove(p, st->muid.str, st->muid.len);
ret->muid.str = p;
p += st->muid.len;
if (dotu) {
memmove(p, st->extension.str, st->extension.len);
ret->extension.str = p;
p += st->extension.len;
}
return ret;
}
struct p9_stat *p9_client_stat(struct p9_fid *fid)
{
int err;
......@@ -986,93 +1071,3 @@ struct p9_stat *p9_client_dirread(struct p9_fid *fid, u64 offset)
return ERR_PTR(err);
}
EXPORT_SYMBOL(p9_client_dirread);
static struct p9_stat *p9_clone_stat(struct p9_stat *st, int dotu)
{
int n;
char *p;
struct p9_stat *ret;
n = sizeof(struct p9_stat) + st->name.len + st->uid.len + st->gid.len +
st->muid.len;
if (dotu)
n += st->extension.len;
ret = kmalloc(n, GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
memmove(ret, st, sizeof(struct p9_stat));
p = ((char *) ret) + sizeof(struct p9_stat);
memmove(p, st->name.str, st->name.len);
ret->name.str = p;
p += st->name.len;
memmove(p, st->uid.str, st->uid.len);
ret->uid.str = p;
p += st->uid.len;
memmove(p, st->gid.str, st->gid.len);
ret->gid.str = p;
p += st->gid.len;
memmove(p, st->muid.str, st->muid.len);
ret->muid.str = p;
p += st->muid.len;
if (dotu) {
memmove(p, st->extension.str, st->extension.len);
ret->extension.str = p;
p += st->extension.len;
}
return ret;
}
static struct p9_fid *p9_fid_create(struct p9_client *clnt)
{
int err;
struct p9_fid *fid;
P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt);
fid = kmalloc(sizeof(struct p9_fid), GFP_KERNEL);
if (!fid)
return ERR_PTR(-ENOMEM);
fid->fid = p9_idpool_get(clnt->fidpool);
if (fid->fid < 0) {
err = -ENOSPC;
goto error;
}
memset(&fid->qid, 0, sizeof(struct p9_qid));
fid->mode = -1;
fid->rdir_fpos = 0;
fid->rdir_pos = 0;
fid->rdir_fcall = NULL;
fid->uid = current->fsuid;
fid->clnt = clnt;
fid->aux = NULL;
spin_lock(&clnt->lock);
list_add(&fid->flist, &clnt->fidlist);
spin_unlock(&clnt->lock);
return fid;
error:
kfree(fid);
return ERR_PTR(err);
}
static void p9_fid_destroy(struct p9_fid *fid)
{
struct p9_client *clnt;
P9_DPRINTK(P9_DEBUG_9P, "fid %d\n", fid->fid);
clnt = fid->clnt;
p9_idpool_put(fid->fid, clnt->fidpool);
spin_lock(&clnt->lock);
list_del(&fid->flist);
spin_unlock(&clnt->lock);
kfree(fid->rdir_fcall);
kfree(fid);
}
......@@ -61,7 +61,6 @@ struct p9_fd_opts {
u16 port;
};
/**
* struct p9_trans_fd - transport state
* @rd: reference to file to read from
......@@ -206,30 +205,11 @@ struct p9_mux_rpc {
wait_queue_head_t wqueue;
};
static int p9_poll_proc(void *);
static void p9_read_work(struct work_struct *work);
static void p9_write_work(struct work_struct *work);
static void p9_pollwait(struct file *filp, wait_queue_head_t *wait_address,
poll_table *p);
static int p9_fd_write(struct p9_client *client, void *v, int len);
static int p9_fd_read(struct p9_client *client, void *v, int len);
static DEFINE_SPINLOCK(p9_poll_lock);
static LIST_HEAD(p9_poll_pending_list);
static struct workqueue_struct *p9_mux_wq;
static struct task_struct *p9_poll_task;
static void p9_conn_destroy(struct p9_conn *);
static unsigned int p9_fd_poll(struct p9_client *client,
struct poll_table_struct *pt);
#ifdef P9_NONBLOCK
static int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc,
p9_conn_req_callback cb, void *a);
#endif /* P9_NONBLOCK */
static void p9_conn_cancel(struct p9_conn *m, int err);
static u16 p9_mux_get_tag(struct p9_conn *m)
{
int tag;
......@@ -267,222 +247,314 @@ static void p9_mux_poll_stop(struct p9_conn *m)
}
/**
* p9_conn_create - allocate and initialize the per-session mux data
* @client: client instance
* p9_conn_cancel - cancel all pending requests with error
* @m: mux data
* @err: error code
*
* Note: Creates the polling task if this is the first session.
*/
static struct p9_conn *p9_conn_create(struct p9_client *client)
void p9_conn_cancel(struct p9_conn *m, int err)
{
int i, n;
struct p9_conn *m;
P9_DPRINTK(P9_DEBUG_MUX, "client %p msize %d\n", client, client->msize);
m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
if (!m)
return ERR_PTR(-ENOMEM);
struct p9_req *req, *rtmp;
LIST_HEAD(cancel_list);
spin_lock_init(&m->lock);
INIT_LIST_HEAD(&m->mux_list);
m->client = client;
m->tagpool = p9_idpool_create();
if (IS_ERR(m->tagpool)) {
kfree(m);
return ERR_PTR(-ENOMEM);
P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
m->err = err;
spin_lock(&m->lock);
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
list_move(&req->req_list, &cancel_list);
}
list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
list_move(&req->req_list, &cancel_list);
}
spin_unlock(&m->lock);
INIT_LIST_HEAD(&m->req_list);
INIT_LIST_HEAD(&m->unsent_req_list);
INIT_WORK(&m->rq, p9_read_work);
INIT_WORK(&m->wq, p9_write_work);
INIT_LIST_HEAD(&m->poll_pending_link);
init_poll_funcptr(&m->pt, p9_pollwait);
list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
list_del(&req->req_list);
if (!req->err)
req->err = err;
n = p9_fd_poll(client, &m->pt);
if (n & POLLIN) {
P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
set_bit(Rpending, &m->wsched);
if (req->cb)
(*req->cb) (req, req->cba);
else
kfree(req->rcall);
}
}
if (n & POLLOUT) {
P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
set_bit(Wpending, &m->wsched);
}
static void process_request(struct p9_conn *m, struct p9_req *req)
{
int ecode;
struct p9_str *ename;
for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
if (IS_ERR(m->poll_wait[i].wait_addr)) {
p9_mux_poll_stop(m);
kfree(m);
/* return the error code */
return (void *)m->poll_wait[i].wait_addr;
if (!req->err && req->rcall->id == P9_RERROR) {
ecode = req->rcall->params.rerror.errno;
ename = &req->rcall->params.rerror.error;
P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len,
ename->str);
if (m->client->dotu)
req->err = -ecode;
if (!req->err) {
req->err = p9_errstr2errno(ename->str, ename->len);
/* string match failed */
if (!req->err) {
PRINT_FCALL_ERROR("unknown error", req->rcall);
req->err = -ESERVERFAULT;
}
}
} else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
P9_DPRINTK(P9_DEBUG_ERROR,
"fcall mismatch: expected %d, got %d\n",
req->tcall->id + 1, req->rcall->id);
if (!req->err)
req->err = -EIO;
}
return m;
}
/**
* p9_mux_destroy - cancels all pending requests and frees mux resources
* @m: mux to destroy
*
*/
static void p9_conn_destroy(struct p9_conn *m)
static unsigned int
p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
{
P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m,
m->mux_list.prev, m->mux_list.next);
int ret, n;
struct p9_trans_fd *ts = NULL;
p9_mux_poll_stop(m);
cancel_work_sync(&m->rq);
cancel_work_sync(&m->wq);
if (client && client->status == Connected)
ts = client->trans;
p9_conn_cancel(m, -ECONNRESET);
if (!ts)
return -EREMOTEIO;
m->client = NULL;
p9_idpool_destroy(m->tagpool);
kfree(m);
}
if (!ts->rd->f_op || !ts->rd->f_op->poll)
return -EIO;
static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
struct p9_poll_wait *pwait =
container_of(wait, struct p9_poll_wait, wait);
struct p9_conn *m = pwait->conn;
unsigned long flags;
DECLARE_WAITQUEUE(dummy_wait, p9_poll_task);
if (!ts->wr->f_op || !ts->wr->f_op->poll)
return -EIO;
spin_lock_irqsave(&p9_poll_lock, flags);
if (list_empty(&m->poll_pending_link))
list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
spin_unlock_irqrestore(&p9_poll_lock, flags);
ret = ts->rd->f_op->poll(ts->rd, pt);
if (ret < 0)
return ret;
/* perform the default wake up operation */
return default_wake_function(&dummy_wait, mode, sync, key);
if (ts->rd != ts->wr) {
n = ts->wr->f_op->poll(ts->wr, pt);
if (n < 0)
return n;
ret = (ret & ~POLLOUT) | (n & ~POLLIN);
}
return ret;
}
/**
* p9_pollwait - add poll task to the wait queue
* @filp: file pointer being polled
* @wait_address: wait_q to block on
* @p: poll state
* p9_fd_read- read from a fd
* @client: client instance
* @v: buffer to receive data into
* @len: size of receive buffer
*
* called by files poll operation to add v9fs-poll task to files wait queue
*/
static void
p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
static int p9_fd_read(struct p9_client *client, void *v, int len)
{
struct p9_conn *m = container_of(p, struct p9_conn, pt);
struct p9_poll_wait *pwait = NULL;
int i;
int ret;
struct p9_trans_fd *ts = NULL;
for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
if (m->poll_wait[i].wait_addr == NULL) {
pwait = &m->poll_wait[i];
break;
}
}
if (client && client->status != Disconnected)
ts = client->trans;
if (!pwait) {
P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
return;
}
if (!ts)
return -EREMOTEIO;
if (!wait_address) {
P9_DPRINTK(P9_DEBUG_ERROR, "no wait_address\n");
pwait->wait_addr = ERR_PTR(-EIO);
return;
}
if (!(ts->rd->f_flags & O_NONBLOCK))
P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n");
pwait->conn = m;
pwait->wait_addr = wait_address;
init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
add_wait_queue(wait_address, &pwait->wait);
ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
client->status = Disconnected;
return ret;
}
/**
* p9_poll_mux - polls a mux and schedules read or write works if necessary
* @m: connection to poll
* p9_read_work - called when there is some data to be read from a transport
* @work: container of work to be done
*
*/
static void p9_poll_mux(struct p9_conn *m)
static void p9_read_work(struct work_struct *work)
{
int n;
int n, err;
struct p9_conn *m;
struct p9_req *req, *rptr, *rreq;
struct p9_fcall *rcall;
char *rbuf;
m = container_of(work, struct p9_conn, rq);
if (m->err < 0)
return;
n = p9_fd_poll(m->client, NULL);
if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
P9_DPRINTK(P9_DEBUG_MUX, "error mux %p err %d\n", m, n);
if (n >= 0)
n = -ECONNRESET;
p9_conn_cancel(m, n);
}
rcall = NULL;
P9_DPRINTK(P9_DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
if (n & POLLIN) {
set_bit(Rpending, &m->wsched);
P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
if (!test_and_set_bit(Rworksched, &m->wsched)) {
P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
queue_work(p9_mux_wq, &m->rq);
if (!m->rcall) {
m->rcall =
kmalloc(sizeof(struct p9_fcall) + m->client->msize,
GFP_KERNEL);
if (!m->rcall) {
err = -ENOMEM;
goto error;
}
m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
m->rpos = 0;
}
if (n & POLLOUT) {
set_bit(Wpending, &m->wsched);
P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
if ((m->wsize || !list_empty(&m->unsent_req_list))
&& !test_and_set_bit(Wworksched, &m->wsched)) {
P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
queue_work(p9_mux_wq, &m->wq);
}
clear_bit(Rpending, &m->wsched);
err = p9_fd_read(m->client, m->rbuf + m->rpos,
m->client->msize - m->rpos);
P9_DPRINTK(P9_DEBUG_MUX, "mux %p got %d bytes\n", m, err);
if (err == -EAGAIN) {
clear_bit(Rworksched, &m->wsched);
return;
}
}
/**
* p9_poll_proc - poll worker thread
* @a: thread state and arguments
*
* polls all v9fs transports for new events and queues the appropriate
* work to the work queue
if (err <= 0)
goto error;
m->rpos += err;
while (m->rpos > 4) {
n = le32_to_cpu(*(__le32 *) m->rbuf);
if (n >= m->client->msize) {
P9_DPRINTK(P9_DEBUG_ERROR,
"requested packet size too big: %d\n", n);
err = -EIO;
goto error;
}
if (m->rpos < n)
break;
err =
p9_deserialize_fcall(m->rbuf, n, m->rcall, m->client->dotu);
if (err < 0)
goto error;
#ifdef CONFIG_NET_9P_DEBUG
if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
char buf[150];
p9_printfcall(buf, sizeof(buf), m->rcall,
m->client->dotu);
printk(KERN_NOTICE ">>> %p %s\n", m, buf);
}
#endif
rcall = m->rcall;
rbuf = m->rbuf;
if (m->rpos > n) {
m->rcall = kmalloc(sizeof(struct p9_fcall) +
m->client->msize, GFP_KERNEL);
if (!m->rcall) {
err = -ENOMEM;
goto error;
}
m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
memmove(m->rbuf, rbuf + n, m->rpos - n);
m->rpos -= n;
} else {
m->rcall = NULL;
m->rbuf = NULL;
m->rpos = 0;
}
P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m,
rcall->id, rcall->tag);
req = NULL;
spin_lock(&m->lock);
list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
if (rreq->tag == rcall->tag) {
req = rreq;
if (req->flush != Flushing)
list_del(&req->req_list);
break;
}
}
spin_unlock(&m->lock);
if (req) {
req->rcall = rcall;
process_request(m, req);
if (req->flush != Flushing) {
if (req->cb)
(*req->cb) (req, req->cba);
else
kfree(req->rcall);
}
} else {
if (err >= 0 && rcall->id != P9_RFLUSH)
P9_DPRINTK(P9_DEBUG_ERROR,
"unexpected response mux %p id %d tag %d\n",
m, rcall->id, rcall->tag);
kfree(rcall);
}
}
if (!list_empty(&m->req_list)) {
if (test_and_clear_bit(Rpending, &m->wsched))
n = POLLIN;
else
n = p9_fd_poll(m->client, NULL);
if (n & POLLIN) {
P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
queue_work(p9_mux_wq, &m->rq);
} else
clear_bit(Rworksched, &m->wsched);
} else
clear_bit(Rworksched, &m->wsched);
return;
error:
p9_conn_cancel(m, err);
clear_bit(Rworksched, &m->wsched);
}
/**
* p9_fd_write - write to a socket
* @client: client instance
* @v: buffer to send data from
* @len: size of send buffer
*
*/
static int p9_poll_proc(void *a)
static int p9_fd_write(struct p9_client *client, void *v, int len)
{
unsigned long flags;
P9_DPRINTK(P9_DEBUG_MUX, "start %p\n", current);
repeat:
spin_lock_irqsave(&p9_poll_lock, flags);
while (!list_empty(&p9_poll_pending_list)) {
struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
struct p9_conn,
poll_pending_link);
list_del_init(&conn->poll_pending_link);
spin_unlock_irqrestore(&p9_poll_lock, flags);
int ret;
mm_segment_t oldfs;
struct p9_trans_fd *ts = NULL;
p9_poll_mux(conn);
if (client && client->status != Disconnected)
ts = client->trans;
spin_lock_irqsave(&p9_poll_lock, flags);
}
spin_unlock_irqrestore(&p9_poll_lock, flags);
if (!ts)
return -EREMOTEIO;
set_current_state(TASK_INTERRUPTIBLE);
if (list_empty(&p9_poll_pending_list)) {
P9_DPRINTK(P9_DEBUG_MUX, "sleeping...\n");
schedule();
}
__set_current_state(TASK_RUNNING);
if (!(ts->wr->f_flags & O_NONBLOCK))
P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n");
if (!kthread_should_stop())
goto repeat;
oldfs = get_fs();
set_fs(get_ds());
/* The cast to a user pointer is valid due to the set_fs() */
ret = vfs_write(ts->wr, (void __user *)v, len, &ts->wr->f_pos);
set_fs(oldfs);
P9_DPRINTK(P9_DEBUG_MUX, "finish\n");
return 0;
if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
client->status = Disconnected;
return ret;
}
/**
......@@ -566,186 +638,158 @@ static void p9_write_work(struct work_struct *work)
clear_bit(Wworksched, &m->wsched);
}
static void process_request(struct p9_conn *m, struct p9_req *req)
static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
int ecode;
struct p9_str *ename;
if (!req->err && req->rcall->id == P9_RERROR) {
ecode = req->rcall->params.rerror.errno;
ename = &req->rcall->params.rerror.error;
P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len,
ename->str);
if (m->client->dotu)
req->err = -ecode;
struct p9_poll_wait *pwait =
container_of(wait, struct p9_poll_wait, wait);
struct p9_conn *m = pwait->conn;
unsigned long flags;
DECLARE_WAITQUEUE(dummy_wait, p9_poll_task);
if (!req->err) {
req->err = p9_errstr2errno(ename->str, ename->len);
spin_lock_irqsave(&p9_poll_lock, flags);
if (list_empty(&m->poll_pending_link))
list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
spin_unlock_irqrestore(&p9_poll_lock, flags);
/* string match failed */
if (!req->err) {
PRINT_FCALL_ERROR("unknown error", req->rcall);
req->err = -ESERVERFAULT;
}
}
} else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
P9_DPRINTK(P9_DEBUG_ERROR,
"fcall mismatch: expected %d, got %d\n",
req->tcall->id + 1, req->rcall->id);
if (!req->err)
req->err = -EIO;
}
/* perform the default wake up operation */
return default_wake_function(&dummy_wait, mode, sync, key);
}
/**
* p9_read_work - called when there is some data to be read from a transport
* @work: container of work to be done
* p9_pollwait - add poll task to the wait queue
* @filp: file pointer being polled
* @wait_address: wait_q to block on
* @p: poll state
*
* called by files poll operation to add v9fs-poll task to files wait queue
*/
static void p9_read_work(struct work_struct *work)
static void
p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
{
int n, err;
struct p9_conn *m;
struct p9_req *req, *rptr, *rreq;
struct p9_fcall *rcall;
char *rbuf;
m = container_of(work, struct p9_conn, rq);
if (m->err < 0)
return;
rcall = NULL;
P9_DPRINTK(P9_DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
struct p9_conn *m = container_of(p, struct p9_conn, pt);
struct p9_poll_wait *pwait = NULL;
int i;
if (!m->rcall) {
m->rcall =
kmalloc(sizeof(struct p9_fcall) + m->client->msize,
GFP_KERNEL);
if (!m->rcall) {
err = -ENOMEM;
goto error;
for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
if (m->poll_wait[i].wait_addr == NULL) {
pwait = &m->poll_wait[i];
break;
}
m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
m->rpos = 0;
}
clear_bit(Rpending, &m->wsched);
err = p9_fd_read(m->client, m->rbuf + m->rpos,
m->client->msize - m->rpos);
P9_DPRINTK(P9_DEBUG_MUX, "mux %p got %d bytes\n", m, err);
if (err == -EAGAIN) {
clear_bit(Rworksched, &m->wsched);
if (!pwait) {
P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
return;
}
if (err <= 0)
goto error;
m->rpos += err;
while (m->rpos > 4) {
n = le32_to_cpu(*(__le32 *) m->rbuf);
if (n >= m->client->msize) {
P9_DPRINTK(P9_DEBUG_ERROR,
"requested packet size too big: %d\n", n);
err = -EIO;
goto error;
}
if (m->rpos < n)
break;
err =
p9_deserialize_fcall(m->rbuf, n, m->rcall, m->client->dotu);
if (err < 0)
goto error;
if (!wait_address) {
P9_DPRINTK(P9_DEBUG_ERROR, "no wait_address\n");
pwait->wait_addr = ERR_PTR(-EIO);
return;
}
#ifdef CONFIG_NET_9P_DEBUG
if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
char buf[150];
pwait->conn = m;
pwait->wait_addr = wait_address;
init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
add_wait_queue(wait_address, &pwait->wait);
}
p9_printfcall(buf, sizeof(buf), m->rcall,
m->client->dotu);
printk(KERN_NOTICE ">>> %p %s\n", m, buf);
}
#endif
/**
* p9_conn_create - allocate and initialize the per-session mux data
* @client: client instance
*
* Note: Creates the polling task if this is the first session.
*/
rcall = m->rcall;
rbuf = m->rbuf;
if (m->rpos > n) {
m->rcall = kmalloc(sizeof(struct p9_fcall) +
m->client->msize, GFP_KERNEL);
if (!m->rcall) {
err = -ENOMEM;
goto error;
}
static struct p9_conn *p9_conn_create(struct p9_client *client)
{
int i, n;
struct p9_conn *m;
m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
memmove(m->rbuf, rbuf + n, m->rpos - n);
m->rpos -= n;
} else {
m->rcall = NULL;
m->rbuf = NULL;
m->rpos = 0;
}
P9_DPRINTK(P9_DEBUG_MUX, "client %p msize %d\n", client, client->msize);
m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
if (!m)
return ERR_PTR(-ENOMEM);
P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m,
rcall->id, rcall->tag);
spin_lock_init(&m->lock);
INIT_LIST_HEAD(&m->mux_list);
m->client = client;
m->tagpool = p9_idpool_create();
if (IS_ERR(m->tagpool)) {
kfree(m);
return ERR_PTR(-ENOMEM);
}
req = NULL;
spin_lock(&m->lock);
list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
if (rreq->tag == rcall->tag) {
req = rreq;
if (req->flush != Flushing)
list_del(&req->req_list);
break;
}
}
spin_unlock(&m->lock);
INIT_LIST_HEAD(&m->req_list);
INIT_LIST_HEAD(&m->unsent_req_list);
INIT_WORK(&m->rq, p9_read_work);
INIT_WORK(&m->wq, p9_write_work);
INIT_LIST_HEAD(&m->poll_pending_link);
init_poll_funcptr(&m->pt, p9_pollwait);
if (req) {
req->rcall = rcall;
process_request(m, req);
n = p9_fd_poll(client, &m->pt);
if (n & POLLIN) {
P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
set_bit(Rpending, &m->wsched);
}
if (req->flush != Flushing) {
if (req->cb)
(*req->cb) (req, req->cba);
else
kfree(req->rcall);
}
} else {
if (err >= 0 && rcall->id != P9_RFLUSH)
P9_DPRINTK(P9_DEBUG_ERROR,
"unexpected response mux %p id %d tag %d\n",
m, rcall->id, rcall->tag);
kfree(rcall);
if (n & POLLOUT) {
P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
set_bit(Wpending, &m->wsched);
}
for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
if (IS_ERR(m->poll_wait[i].wait_addr)) {
p9_mux_poll_stop(m);
kfree(m);
/* return the error code */
return (void *)m->poll_wait[i].wait_addr;
}
}
if (!list_empty(&m->req_list)) {
if (test_and_clear_bit(Rpending, &m->wsched))
n = POLLIN;
else
n = p9_fd_poll(m->client, NULL);
return m;
}
if (n & POLLIN) {
/**
* p9_poll_mux - polls a mux and schedules read or write works if necessary
* @m: connection to poll
*
*/
static void p9_poll_mux(struct p9_conn *m)
{
int n;
if (m->err < 0)
return;
n = p9_fd_poll(m->client, NULL);
if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
P9_DPRINTK(P9_DEBUG_MUX, "error mux %p err %d\n", m, n);
if (n >= 0)
n = -ECONNRESET;
p9_conn_cancel(m, n);
}
if (n & POLLIN) {
set_bit(Rpending, &m->wsched);
P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
if (!test_and_set_bit(Rworksched, &m->wsched)) {
P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
queue_work(p9_mux_wq, &m->rq);
} else
clear_bit(Rworksched, &m->wsched);
} else
clear_bit(Rworksched, &m->wsched);
return;
}
}
error:
p9_conn_cancel(m, err);
clear_bit(Rworksched, &m->wsched);
if (n & POLLOUT) {
set_bit(Wpending, &m->wsched);
P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
if ((m->wsize || !list_empty(&m->unsent_req_list))
&& !test_and_set_bit(Wworksched, &m->wsched)) {
P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
queue_work(p9_mux_wq, &m->wq);
}
}
}
/**
......@@ -1005,69 +1049,6 @@ p9_fd_rpc(struct p9_client *client, struct p9_fcall *tc, struct p9_fcall **rc)
return err;
}
#ifdef P9_NONBLOCK
/**
* p9_conn_rpcnb - sends 9P request without waiting for response.
* @m: mux data
* @tc: request to be sent
* @cb: callback function to be called when response arrives
* @a: value to pass to the callback function
*
*/
int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc,
p9_conn_req_callback cb, void *a)
{
int err;
struct p9_req *req;
req = p9_send_request(m, tc, cb, a);
if (IS_ERR(req)) {
err = PTR_ERR(req);
P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err);
return PTR_ERR(req);
}
P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag);
return 0;
}
#endif /* P9_NONBLOCK */
/**
* p9_conn_cancel - cancel all pending requests with error
* @m: mux data
* @err: error code
*
*/
void p9_conn_cancel(struct p9_conn *m, int err)
{
struct p9_req *req, *rtmp;
LIST_HEAD(cancel_list);
P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
m->err = err;
spin_lock(&m->lock);
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
list_move(&req->req_list, &cancel_list);
}
list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
list_move(&req->req_list, &cancel_list);
}
spin_unlock(&m->lock);
list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
list_del(&req->req_list);
if (!req->err)
req->err = err;
if (req->cb)
(*req->cb) (req, req->cba);
else
kfree(req->rcall);
}
}
/**
* parse_options - parse mount options into session structure
* @options: options string passed from mount
......@@ -1177,97 +1158,25 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
}
/**
* p9_fd_read- read from a fd
* @client: client instance
* @v: buffer to receive data into
* @len: size of receive buffer
*
*/
static int p9_fd_read(struct p9_client *client, void *v, int len)
{
int ret;
struct p9_trans_fd *ts = NULL;
if (client && client->status != Disconnected)
ts = client->trans;
if (!ts)
return -EREMOTEIO;
if (!(ts->rd->f_flags & O_NONBLOCK))
P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n");
ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
client->status = Disconnected;
return ret;
}
/**
* p9_fd_write - write to a socket
* @client: client instance
* @v: buffer to send data from
* @len: size of send buffer
* p9_mux_destroy - cancels all pending requests and frees mux resources
* @m: mux to destroy
*
*/
static int p9_fd_write(struct p9_client *client, void *v, int len)
{
int ret;
mm_segment_t oldfs;
struct p9_trans_fd *ts = NULL;
if (client && client->status != Disconnected)
ts = client->trans;
if (!ts)
return -EREMOTEIO;
if (!(ts->wr->f_flags & O_NONBLOCK))
P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n");
oldfs = get_fs();
set_fs(get_ds());
/* The cast to a user pointer is valid due to the set_fs() */
ret = vfs_write(ts->wr, (void __user *)v, len, &ts->wr->f_pos);
set_fs(oldfs);
if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
client->status = Disconnected;
return ret;
}
static unsigned int
p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
static void p9_conn_destroy(struct p9_conn *m)
{
int ret, n;
struct p9_trans_fd *ts = NULL;
if (client && client->status == Connected)
ts = client->trans;
if (!ts)
return -EREMOTEIO;
if (!ts->rd->f_op || !ts->rd->f_op->poll)
return -EIO;
if (!ts->wr->f_op || !ts->wr->f_op->poll)
return -EIO;
P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m,
m->mux_list.prev, m->mux_list.next);
ret = ts->rd->f_op->poll(ts->rd, pt);
if (ret < 0)
return ret;
p9_mux_poll_stop(m);
cancel_work_sync(&m->rq);
cancel_work_sync(&m->wq);
if (ts->rd != ts->wr) {
n = ts->wr->f_op->poll(ts->wr, pt);
if (n < 0)
return n;
ret = (ret & ~POLLOUT) | (n & ~POLLIN);
}
p9_conn_cancel(m, -ECONNRESET);
return ret;
m->client = NULL;
p9_idpool_destroy(m->tagpool);
kfree(m);
}
/**
......@@ -1492,6 +1401,49 @@ static struct p9_trans_module p9_fd_trans = {
.owner = THIS_MODULE,
};
/**
* p9_poll_proc - poll worker thread
* @a: thread state and arguments
*
* polls all v9fs transports for new events and queues the appropriate
* work to the work queue
*
*/
static int p9_poll_proc(void *a)
{
unsigned long flags;
P9_DPRINTK(P9_DEBUG_MUX, "start %p\n", current);
repeat:
spin_lock_irqsave(&p9_poll_lock, flags);
while (!list_empty(&p9_poll_pending_list)) {
struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
struct p9_conn,
poll_pending_link);
list_del_init(&conn->poll_pending_link);
spin_unlock_irqrestore(&p9_poll_lock, flags);
p9_poll_mux(conn);
spin_lock_irqsave(&p9_poll_lock, flags);
}
spin_unlock_irqrestore(&p9_poll_lock, flags);
set_current_state(TASK_INTERRUPTIBLE);
if (list_empty(&p9_poll_pending_list)) {
P9_DPRINTK(P9_DEBUG_MUX, "sleeping...\n");
schedule();
}
__set_current_state(TASK_RUNNING);
if (!kthread_should_stop())
goto repeat;
P9_DPRINTK(P9_DEBUG_MUX, "finish\n");
return 0;
}
int p9_trans_fd_init(void)
{
p9_mux_wq = create_workqueue("v9fs");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment