Commit fd8aa909 authored by Juergen Gross's avatar Juergen Gross Committed by Boris Ostrovsky

xen: optimize xenbus driver for multiple concurrent xenstore accesses

Handling of multiple concurrent Xenstore accesses through xenbus driver
either from the kernel or user land is rather lame today: xenbus is
capable to have one access active only at one point of time.

Rewrite xenbus to handle multiple requests concurrently by making use
of the request id of the Xenstore protocol. This requires to:

- Instead of blocking inside xb_read() when trying to read data from
  the xenstore ring buffer do so only in the main loop of
  xenbus_thread().

- Instead of doing writes to the xenstore ring buffer in the context of
  the caller just queue the request and do the write in the dedicated
  xenbus thread.

- Instead of just forwarding the request id specified by the caller of
  xenbus to xenstore use a xenbus internal unique request id. This will
  allow multiple outstanding requests.

- Modify the locking scheme in order to allow multiple requests being
  active in parallel.

- Instead of waiting for the reply of a user's xenstore request after
  writing the request to the xenstore ring buffer return directly to
  the caller and do the waiting in the read path.

Additionally signal handling was optimized by avoiding waking up the
xenbus thread or sending an event to Xenstore in case the addressed
entity is known to be running already.

As a result communication with Xenstore is sped up by a factor of up
to 5: depending on the request type (read or write) and the amount of
data transferred the gain was at least 20% (small reads) and went up to
a factor of 5 for large writes.

In the end some more rough edges of xenbus have been smoothed:

- Handling of memory shortage when reading from xenstore ring buffer in
  the xenbus driver was not optimal: it was busy looping and issuing a
  warning in each loop.

- In case of xenstore not running in dom0 but in a stubdom we end up
  with two xenbus threads running as the initialization of xenbus in
  dom0 expecting a local xenstored will be redone later when connecting
  to the xenstore domain. Up to now this was no problem as locking
  would prevent the two xenbus threads interfering with each other, but
  this was just a waste of kernel resources.

- An out of memory situation while writing to or reading from the
  xenstore ring buffer no longer will lead to a possible loss of
  synchronization with xenstore.

- The user read and write part are now interruptible by signals.
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Signed-off-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
parent 5584ea25
...@@ -32,6 +32,10 @@ ...@@ -32,6 +32,10 @@
#ifndef _XENBUS_XENBUS_H #ifndef _XENBUS_XENBUS_H
#define _XENBUS_XENBUS_H #define _XENBUS_XENBUS_H
#include <linux/mutex.h>
#include <linux/uio.h>
#include <xen/xenbus.h>
#define XEN_BUS_ID_SIZE 20 #define XEN_BUS_ID_SIZE 20
struct xen_bus_type { struct xen_bus_type {
...@@ -52,16 +56,49 @@ enum xenstore_init { ...@@ -52,16 +56,49 @@ enum xenstore_init {
XS_LOCAL, XS_LOCAL,
}; };
struct xs_watch_event {
struct list_head list;
unsigned int len;
struct xenbus_watch *handle;
const char *path;
const char *token;
char body[];
};
enum xb_req_state {
xb_req_state_queued,
xb_req_state_wait_reply,
xb_req_state_got_reply,
xb_req_state_aborted
};
struct xb_req_data {
struct list_head list;
wait_queue_head_t wq;
struct xsd_sockmsg msg;
enum xsd_sockmsg_type type;
char *body;
const struct kvec *vec;
int num_vecs;
int err;
enum xb_req_state state;
void (*cb)(struct xb_req_data *);
void *par;
};
extern enum xenstore_init xen_store_domain_type; extern enum xenstore_init xen_store_domain_type;
extern const struct attribute_group *xenbus_dev_groups[]; extern const struct attribute_group *xenbus_dev_groups[];
extern struct mutex xs_response_mutex;
extern struct list_head xs_reply_list;
extern struct list_head xb_write_list;
extern wait_queue_head_t xb_waitq;
extern struct mutex xb_write_mutex;
int xs_init(void); int xs_init(void);
int xb_init_comms(void); int xb_init_comms(void);
void xb_deinit_comms(void); void xb_deinit_comms(void);
int xb_write(const void *data, unsigned int len); int xs_watch_msg(struct xs_watch_event *event);
int xb_read(void *data, unsigned int len); void xs_request_exit(struct xb_req_data *req);
int xb_data_to_read(void);
int xb_wait_for_data_to_read(void);
int xenbus_match(struct device *_dev, struct device_driver *_drv); int xenbus_match(struct device *_dev, struct device_driver *_drv);
int xenbus_dev_probe(struct device *_dev); int xenbus_dev_probe(struct device *_dev);
...@@ -92,6 +129,7 @@ int xenbus_read_otherend_details(struct xenbus_device *xendev, ...@@ -92,6 +129,7 @@ int xenbus_read_otherend_details(struct xenbus_device *xendev,
void xenbus_ring_ops_init(void); void xenbus_ring_ops_init(void);
void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg); int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par);
void xenbus_dev_queue_reply(struct xb_req_data *req);
#endif #endif
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/kthread.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/err.h> #include <linux/err.h>
#include <xen/xenbus.h> #include <xen/xenbus.h>
...@@ -42,11 +43,22 @@ ...@@ -42,11 +43,22 @@
#include <xen/page.h> #include <xen/page.h>
#include "xenbus.h" #include "xenbus.h"
/* A list of replies. Currently only one will ever be outstanding. */
LIST_HEAD(xs_reply_list);
/* A list of write requests. */
LIST_HEAD(xb_write_list);
DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
DEFINE_MUTEX(xb_write_mutex);
/* Protect xenbus reader thread against save/restore. */
DEFINE_MUTEX(xs_response_mutex);
static int xenbus_irq; static int xenbus_irq;
static struct task_struct *xenbus_task;
static DECLARE_WORK(probe_work, xenbus_probe); static DECLARE_WORK(probe_work, xenbus_probe);
static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
static irqreturn_t wake_waiting(int irq, void *unused) static irqreturn_t wake_waiting(int irq, void *unused)
{ {
...@@ -84,30 +96,31 @@ static const void *get_input_chunk(XENSTORE_RING_IDX cons, ...@@ -84,30 +96,31 @@ static const void *get_input_chunk(XENSTORE_RING_IDX cons,
return buf + MASK_XENSTORE_IDX(cons); return buf + MASK_XENSTORE_IDX(cons);
} }
static int xb_data_to_write(void)
{
struct xenstore_domain_interface *intf = xen_store_interface;
return (intf->req_prod - intf->req_cons) != XENSTORE_RING_SIZE &&
!list_empty(&xb_write_list);
}
/** /**
* xb_write - low level write * xb_write - low level write
* @data: buffer to send * @data: buffer to send
* @len: length of buffer * @len: length of buffer
* *
* Returns 0 on success, error otherwise. * Returns number of bytes written or -err.
*/ */
int xb_write(const void *data, unsigned len) static int xb_write(const void *data, unsigned int len)
{ {
struct xenstore_domain_interface *intf = xen_store_interface; struct xenstore_domain_interface *intf = xen_store_interface;
XENSTORE_RING_IDX cons, prod; XENSTORE_RING_IDX cons, prod;
int rc; unsigned int bytes = 0;
while (len != 0) { while (len != 0) {
void *dst; void *dst;
unsigned int avail; unsigned int avail;
rc = wait_event_interruptible(
xb_waitq,
(intf->req_prod - intf->req_cons) !=
XENSTORE_RING_SIZE);
if (rc < 0)
return rc;
/* Read indexes, then verify. */ /* Read indexes, then verify. */
cons = intf->req_cons; cons = intf->req_cons;
prod = intf->req_prod; prod = intf->req_prod;
...@@ -115,6 +128,11 @@ int xb_write(const void *data, unsigned len) ...@@ -115,6 +128,11 @@ int xb_write(const void *data, unsigned len)
intf->req_cons = intf->req_prod = 0; intf->req_cons = intf->req_prod = 0;
return -EIO; return -EIO;
} }
if (!xb_data_to_write())
return bytes;
/* Must write data /after/ reading the consumer index. */
virt_mb();
dst = get_output_chunk(cons, prod, intf->req, &avail); dst = get_output_chunk(cons, prod, intf->req, &avail);
if (avail == 0) if (avail == 0)
...@@ -122,52 +140,45 @@ int xb_write(const void *data, unsigned len) ...@@ -122,52 +140,45 @@ int xb_write(const void *data, unsigned len)
if (avail > len) if (avail > len)
avail = len; avail = len;
/* Must write data /after/ reading the consumer index. */
virt_mb();
memcpy(dst, data, avail); memcpy(dst, data, avail);
data += avail; data += avail;
len -= avail; len -= avail;
bytes += avail;
/* Other side must not see new producer until data is there. */ /* Other side must not see new producer until data is there. */
virt_wmb(); virt_wmb();
intf->req_prod += avail; intf->req_prod += avail;
/* Implies mb(): other side will see the updated producer. */ /* Implies mb(): other side will see the updated producer. */
notify_remote_via_evtchn(xen_store_evtchn); if (prod <= intf->req_cons)
notify_remote_via_evtchn(xen_store_evtchn);
} }
return 0; return bytes;
} }
int xb_data_to_read(void) static int xb_data_to_read(void)
{ {
struct xenstore_domain_interface *intf = xen_store_interface; struct xenstore_domain_interface *intf = xen_store_interface;
return (intf->rsp_cons != intf->rsp_prod); return (intf->rsp_cons != intf->rsp_prod);
} }
int xb_wait_for_data_to_read(void) static int xb_read(void *data, unsigned int len)
{
return wait_event_interruptible(xb_waitq, xb_data_to_read());
}
int xb_read(void *data, unsigned len)
{ {
struct xenstore_domain_interface *intf = xen_store_interface; struct xenstore_domain_interface *intf = xen_store_interface;
XENSTORE_RING_IDX cons, prod; XENSTORE_RING_IDX cons, prod;
int rc; unsigned int bytes = 0;
while (len != 0) { while (len != 0) {
unsigned int avail; unsigned int avail;
const char *src; const char *src;
rc = xb_wait_for_data_to_read();
if (rc < 0)
return rc;
/* Read indexes, then verify. */ /* Read indexes, then verify. */
cons = intf->rsp_cons; cons = intf->rsp_cons;
prod = intf->rsp_prod; prod = intf->rsp_prod;
if (cons == prod)
return bytes;
if (!check_indexes(cons, prod)) { if (!check_indexes(cons, prod)) {
intf->rsp_cons = intf->rsp_prod = 0; intf->rsp_cons = intf->rsp_prod = 0;
return -EIO; return -EIO;
...@@ -185,17 +196,243 @@ int xb_read(void *data, unsigned len) ...@@ -185,17 +196,243 @@ int xb_read(void *data, unsigned len)
memcpy(data, src, avail); memcpy(data, src, avail);
data += avail; data += avail;
len -= avail; len -= avail;
bytes += avail;
/* Other side must not see free space until we've copied out */ /* Other side must not see free space until we've copied out */
virt_mb(); virt_mb();
intf->rsp_cons += avail; intf->rsp_cons += avail;
pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
/* Implies mb(): other side will see the updated consumer. */ /* Implies mb(): other side will see the updated consumer. */
notify_remote_via_evtchn(xen_store_evtchn); if (intf->rsp_prod - cons >= XENSTORE_RING_SIZE)
notify_remote_via_evtchn(xen_store_evtchn);
}
return bytes;
}
static int process_msg(void)
{
static struct {
struct xsd_sockmsg msg;
char *body;
union {
void *alloc;
struct xs_watch_event *watch;
};
bool in_msg;
bool in_hdr;
unsigned int read;
} state;
struct xb_req_data *req;
int err;
unsigned int len;
if (!state.in_msg) {
state.in_msg = true;
state.in_hdr = true;
state.read = 0;
/*
* We must disallow save/restore while reading a message.
* A partial read across s/r leaves us out of sync with
* xenstored.
* xs_response_mutex is locked as long as we are processing one
* message. state.in_msg will be true as long as we are holding
* the lock here.
*/
mutex_lock(&xs_response_mutex);
if (!xb_data_to_read()) {
/* We raced with save/restore: pending data 'gone'. */
mutex_unlock(&xs_response_mutex);
state.in_msg = false;
return 0;
}
}
if (state.in_hdr) {
if (state.read != sizeof(state.msg)) {
err = xb_read((void *)&state.msg + state.read,
sizeof(state.msg) - state.read);
if (err < 0)
goto out;
state.read += err;
if (state.read != sizeof(state.msg))
return 0;
if (state.msg.len > XENSTORE_PAYLOAD_MAX) {
err = -EINVAL;
goto out;
}
}
len = state.msg.len + 1;
if (state.msg.type == XS_WATCH_EVENT)
len += sizeof(*state.watch);
state.alloc = kmalloc(len, GFP_NOIO | __GFP_HIGH);
if (!state.alloc)
return -ENOMEM;
if (state.msg.type == XS_WATCH_EVENT)
state.body = state.watch->body;
else
state.body = state.alloc;
state.in_hdr = false;
state.read = 0;
}
err = xb_read(state.body + state.read, state.msg.len - state.read);
if (err < 0)
goto out;
state.read += err;
if (state.read != state.msg.len)
return 0;
state.body[state.msg.len] = '\0';
if (state.msg.type == XS_WATCH_EVENT) {
state.watch->len = state.msg.len;
err = xs_watch_msg(state.watch);
} else {
err = -ENOENT;
mutex_lock(&xb_write_mutex);
list_for_each_entry(req, &xs_reply_list, list) {
if (req->msg.req_id == state.msg.req_id) {
if (req->state == xb_req_state_wait_reply) {
req->msg.type = state.msg.type;
req->msg.len = state.msg.len;
req->body = state.body;
req->state = xb_req_state_got_reply;
list_del(&req->list);
req->cb(req);
} else {
list_del(&req->list);
kfree(req);
}
err = 0;
break;
}
}
mutex_unlock(&xb_write_mutex);
if (err)
goto out;
} }
mutex_unlock(&xs_response_mutex);
state.in_msg = false;
state.alloc = NULL;
return err;
out:
mutex_unlock(&xs_response_mutex);
state.in_msg = false;
kfree(state.alloc);
state.alloc = NULL;
return err;
}
static int process_writes(void)
{
static struct {
struct xb_req_data *req;
int idx;
unsigned int written;
} state;
void *base;
unsigned int len;
int err = 0;
if (!xb_data_to_write())
return 0;
mutex_lock(&xb_write_mutex);
if (!state.req) {
state.req = list_first_entry(&xb_write_list,
struct xb_req_data, list);
state.idx = -1;
state.written = 0;
}
if (state.req->state == xb_req_state_aborted)
goto out_err;
while (state.idx < state.req->num_vecs) {
if (state.idx < 0) {
base = &state.req->msg;
len = sizeof(state.req->msg);
} else {
base = state.req->vec[state.idx].iov_base;
len = state.req->vec[state.idx].iov_len;
}
err = xb_write(base + state.written, len - state.written);
if (err < 0)
goto out_err;
state.written += err;
if (state.written != len)
goto out;
state.idx++;
state.written = 0;
}
list_del(&state.req->list);
state.req->state = xb_req_state_wait_reply;
list_add_tail(&state.req->list, &xs_reply_list);
state.req = NULL;
out:
mutex_unlock(&xb_write_mutex);
return 0;
out_err:
state.req->msg.type = XS_ERROR;
state.req->err = err;
list_del(&state.req->list);
if (state.req->state == xb_req_state_aborted)
kfree(state.req);
else {
state.req->state = xb_req_state_got_reply;
wake_up(&state.req->wq);
}
mutex_unlock(&xb_write_mutex);
state.req = NULL;
return err;
}
static int xb_thread_work(void)
{
return xb_data_to_read() || xb_data_to_write();
}
static int xenbus_thread(void *unused)
{
int err;
while (!kthread_should_stop()) {
if (wait_event_interruptible(xb_waitq, xb_thread_work()))
continue;
err = process_msg();
if (err == -ENOMEM)
schedule();
else if (err)
pr_warn_ratelimited("error %d while reading message\n",
err);
err = process_writes();
if (err)
pr_warn_ratelimited("error %d while writing message\n",
err);
}
xenbus_task = NULL;
return 0; return 0;
} }
...@@ -223,6 +460,7 @@ int xb_init_comms(void) ...@@ -223,6 +460,7 @@ int xb_init_comms(void)
rebind_evtchn_irq(xen_store_evtchn, xenbus_irq); rebind_evtchn_irq(xen_store_evtchn, xenbus_irq);
} else { } else {
int err; int err;
err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting, err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting,
0, "xenbus", &xb_waitq); 0, "xenbus", &xb_waitq);
if (err < 0) { if (err < 0) {
...@@ -231,6 +469,13 @@ int xb_init_comms(void) ...@@ -231,6 +469,13 @@ int xb_init_comms(void)
} }
xenbus_irq = err; xenbus_irq = err;
if (!xenbus_task) {
xenbus_task = kthread_run(xenbus_thread, NULL,
"xenbus");
if (IS_ERR(xenbus_task))
return PTR_ERR(xenbus_task);
}
} }
return 0; return 0;
......
...@@ -113,6 +113,7 @@ struct xenbus_file_priv { ...@@ -113,6 +113,7 @@ struct xenbus_file_priv {
struct list_head read_buffers; struct list_head read_buffers;
wait_queue_head_t read_waitq; wait_queue_head_t read_waitq;
struct kref kref;
}; };
/* Read out any raw xenbus messages queued up. */ /* Read out any raw xenbus messages queued up. */
...@@ -297,6 +298,107 @@ static void watch_fired(struct xenbus_watch *watch, ...@@ -297,6 +298,107 @@ static void watch_fired(struct xenbus_watch *watch,
mutex_unlock(&adap->dev_data->reply_mutex); mutex_unlock(&adap->dev_data->reply_mutex);
} }
static void xenbus_file_free(struct kref *kref)
{
struct xenbus_file_priv *u;
struct xenbus_transaction_holder *trans, *tmp;
struct watch_adapter *watch, *tmp_watch;
struct read_buffer *rb, *tmp_rb;
u = container_of(kref, struct xenbus_file_priv, kref);
/*
* No need for locking here because there are no other users,
* by definition.
*/
list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
xenbus_transaction_end(trans->handle, 1);
list_del(&trans->list);
kfree(trans);
}
list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
unregister_xenbus_watch(&watch->watch);
list_del(&watch->list);
free_watch_adapter(watch);
}
list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
list_del(&rb->list);
kfree(rb);
}
kfree(u);
}
static struct xenbus_transaction_holder *xenbus_get_transaction(
struct xenbus_file_priv *u, uint32_t tx_id)
{
struct xenbus_transaction_holder *trans;
list_for_each_entry(trans, &u->transactions, list)
if (trans->handle.id == tx_id)
return trans;
return NULL;
}
void xenbus_dev_queue_reply(struct xb_req_data *req)
{
struct xenbus_file_priv *u = req->par;
struct xenbus_transaction_holder *trans = NULL;
int rc;
LIST_HEAD(staging_q);
xs_request_exit(req);
mutex_lock(&u->msgbuffer_mutex);
if (req->type == XS_TRANSACTION_START) {
trans = xenbus_get_transaction(u, 0);
if (WARN_ON(!trans))
goto out;
if (req->msg.type == XS_ERROR) {
list_del(&trans->list);
kfree(trans);
} else {
rc = kstrtou32(req->body, 10, &trans->handle.id);
if (WARN_ON(rc))
goto out;
}
} else if (req->msg.type == XS_TRANSACTION_END) {
trans = xenbus_get_transaction(u, req->msg.tx_id);
if (WARN_ON(!trans))
goto out;
list_del(&trans->list);
kfree(trans);
}
mutex_unlock(&u->msgbuffer_mutex);
mutex_lock(&u->reply_mutex);
rc = queue_reply(&staging_q, &req->msg, sizeof(req->msg));
if (!rc)
rc = queue_reply(&staging_q, req->body, req->msg.len);
if (!rc) {
list_splice_tail(&staging_q, &u->read_buffers);
wake_up(&u->read_waitq);
} else {
queue_cleanup(&staging_q);
}
mutex_unlock(&u->reply_mutex);
kfree(req->body);
kfree(req);
kref_put(&u->kref, xenbus_file_free);
return;
out:
mutex_unlock(&u->msgbuffer_mutex);
}
static int xenbus_command_reply(struct xenbus_file_priv *u, static int xenbus_command_reply(struct xenbus_file_priv *u,
unsigned int msg_type, const char *reply) unsigned int msg_type, const char *reply)
{ {
...@@ -317,6 +419,9 @@ static int xenbus_command_reply(struct xenbus_file_priv *u, ...@@ -317,6 +419,9 @@ static int xenbus_command_reply(struct xenbus_file_priv *u,
wake_up(&u->read_waitq); wake_up(&u->read_waitq);
mutex_unlock(&u->reply_mutex); mutex_unlock(&u->reply_mutex);
if (!rc)
kref_put(&u->kref, xenbus_file_free);
return rc; return rc;
} }
...@@ -324,57 +429,22 @@ static int xenbus_write_transaction(unsigned msg_type, ...@@ -324,57 +429,22 @@ static int xenbus_write_transaction(unsigned msg_type,
struct xenbus_file_priv *u) struct xenbus_file_priv *u)
{ {
int rc; int rc;
void *reply;
struct xenbus_transaction_holder *trans = NULL; struct xenbus_transaction_holder *trans = NULL;
LIST_HEAD(staging_q);
if (msg_type == XS_TRANSACTION_START) { if (msg_type == XS_TRANSACTION_START) {
trans = kmalloc(sizeof(*trans), GFP_KERNEL); trans = kzalloc(sizeof(*trans), GFP_KERNEL);
if (!trans) { if (!trans) {
rc = -ENOMEM; rc = -ENOMEM;
goto out; goto out;
} }
} else if (u->u.msg.tx_id != 0) { list_add(&trans->list, &u->transactions);
list_for_each_entry(trans, &u->transactions, list) } else if (u->u.msg.tx_id != 0 &&
if (trans->handle.id == u->u.msg.tx_id) !xenbus_get_transaction(u, u->u.msg.tx_id))
break; return xenbus_command_reply(u, XS_ERROR, "ENOENT");
if (&trans->list == &u->transactions)
return xenbus_command_reply(u, XS_ERROR, "ENOENT");
}
reply = xenbus_dev_request_and_reply(&u->u.msg);
if (IS_ERR(reply)) {
if (msg_type == XS_TRANSACTION_START)
kfree(trans);
rc = PTR_ERR(reply);
goto out;
}
if (msg_type == XS_TRANSACTION_START) { rc = xenbus_dev_request_and_reply(&u->u.msg, u);
if (u->u.msg.type == XS_ERROR) if (rc)
kfree(trans);
else {
trans->handle.id = simple_strtoul(reply, NULL, 0);
list_add(&trans->list, &u->transactions);
}
} else if (u->u.msg.type == XS_TRANSACTION_END) {
list_del(&trans->list);
kfree(trans); kfree(trans);
}
mutex_lock(&u->reply_mutex);
rc = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg));
if (!rc)
rc = queue_reply(&staging_q, reply, u->u.msg.len);
if (!rc) {
list_splice_tail(&staging_q, &u->read_buffers);
wake_up(&u->read_waitq);
} else {
queue_cleanup(&staging_q);
}
mutex_unlock(&u->reply_mutex);
kfree(reply);
out: out:
return rc; return rc;
...@@ -506,6 +576,8 @@ static ssize_t xenbus_file_write(struct file *filp, ...@@ -506,6 +576,8 @@ static ssize_t xenbus_file_write(struct file *filp,
* OK, now we have a complete message. Do something with it. * OK, now we have a complete message. Do something with it.
*/ */
kref_get(&u->kref);
msg_type = u->u.msg.type; msg_type = u->u.msg.type;
switch (msg_type) { switch (msg_type) {
...@@ -520,8 +592,10 @@ static ssize_t xenbus_file_write(struct file *filp, ...@@ -520,8 +592,10 @@ static ssize_t xenbus_file_write(struct file *filp,
ret = xenbus_write_transaction(msg_type, u); ret = xenbus_write_transaction(msg_type, u);
break; break;
} }
if (ret != 0) if (ret != 0) {
rc = ret; rc = ret;
kref_put(&u->kref, xenbus_file_free);
}
/* Buffered message consumed */ /* Buffered message consumed */
u->len = 0; u->len = 0;
...@@ -546,6 +620,8 @@ static int xenbus_file_open(struct inode *inode, struct file *filp) ...@@ -546,6 +620,8 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
if (u == NULL) if (u == NULL)
return -ENOMEM; return -ENOMEM;
kref_init(&u->kref);
INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->transactions);
INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->watches);
INIT_LIST_HEAD(&u->read_buffers); INIT_LIST_HEAD(&u->read_buffers);
...@@ -562,32 +638,8 @@ static int xenbus_file_open(struct inode *inode, struct file *filp) ...@@ -562,32 +638,8 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
static int xenbus_file_release(struct inode *inode, struct file *filp) static int xenbus_file_release(struct inode *inode, struct file *filp)
{ {
struct xenbus_file_priv *u = filp->private_data; struct xenbus_file_priv *u = filp->private_data;
struct xenbus_transaction_holder *trans, *tmp;
struct watch_adapter *watch, *tmp_watch;
struct read_buffer *rb, *tmp_rb;
/*
* No need for locking here because there are no other users,
* by definition.
*/
list_for_each_entry_safe(trans, tmp, &u->transactions, list) { kref_put(&u->kref, xenbus_file_free);
xenbus_transaction_end(trans->handle, 1);
list_del(&trans->list);
kfree(trans);
}
list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
unregister_xenbus_watch(&watch->watch);
list_del(&watch->list);
free_watch_adapter(watch);
}
list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
list_del(&rb->list);
kfree(rb);
}
kfree(u);
return 0; return 0;
} }
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment