Commit e19c0d23 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/rdma_cm: Remove process_req and timer sorting

Now that the work queue is used directly to launch and track the work
there is no need for the second processing function to do 'all list
entries'. Just schedule all entries onto the main work queue directly.

We can also drop all of the useless list sorting now, as the workqueue
sorts by expiration time automatically.

This change requires switching lock to a spinlock as netdev notifiers
are called in an atomic context, this is now easy since the lock does
not need to be held across the lookup, that is already single
threaded due to the work queue.
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Reviewed-by: default avatarParav Pandit <parav@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 60cc43fc
...@@ -68,11 +68,8 @@ struct addr_req { ...@@ -68,11 +68,8 @@ struct addr_req {
static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0); static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0);
static void process_req(struct work_struct *work); static DEFINE_SPINLOCK(lock);
static DEFINE_MUTEX(lock);
static LIST_HEAD(req_list); static LIST_HEAD(req_list);
static DECLARE_DELAYED_WORK(work, process_req);
static struct workqueue_struct *addr_wq; static struct workqueue_struct *addr_wq;
static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = { static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
...@@ -112,7 +109,7 @@ static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh) ...@@ -112,7 +109,7 @@ static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
memcpy(&gid, nla_data(curr), nla_len(curr)); memcpy(&gid, nla_data(curr), nla_len(curr));
} }
mutex_lock(&lock); spin_lock_bh(&lock);
list_for_each_entry(req, &req_list, list) { list_for_each_entry(req, &req_list, list) {
if (nlh->nlmsg_seq != req->seq) if (nlh->nlmsg_seq != req->seq)
continue; continue;
...@@ -122,7 +119,7 @@ static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh) ...@@ -122,7 +119,7 @@ static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
found = 1; found = 1;
break; break;
} }
mutex_unlock(&lock); spin_unlock_bh(&lock);
if (!found) if (!found)
pr_info("Couldn't find request waiting for DGID: %pI6\n", pr_info("Couldn't find request waiting for DGID: %pI6\n",
...@@ -302,7 +299,7 @@ int rdma_translate_ip(const struct sockaddr *addr, ...@@ -302,7 +299,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
} }
EXPORT_SYMBOL(rdma_translate_ip); EXPORT_SYMBOL(rdma_translate_ip);
static void set_timeout(struct delayed_work *delayed_work, unsigned long time) static void set_timeout(struct addr_req *req, unsigned long time)
{ {
unsigned long delay; unsigned long delay;
...@@ -310,23 +307,15 @@ static void set_timeout(struct delayed_work *delayed_work, unsigned long time) ...@@ -310,23 +307,15 @@ static void set_timeout(struct delayed_work *delayed_work, unsigned long time)
if ((long)delay < 0) if ((long)delay < 0)
delay = 0; delay = 0;
mod_delayed_work(addr_wq, delayed_work, delay); mod_delayed_work(addr_wq, &req->work, delay);
} }
static void queue_req(struct addr_req *req) static void queue_req(struct addr_req *req)
{ {
struct addr_req *temp_req; spin_lock_bh(&lock);
list_add_tail(&req->list, &req_list);
mutex_lock(&lock); set_timeout(req, req->timeout);
list_for_each_entry_reverse(temp_req, &req_list, list) { spin_unlock_bh(&lock);
if (time_after_eq(req->timeout, temp_req->timeout))
break;
}
list_add(&req->list, &temp_req->list);
set_timeout(&req->work, req->timeout);
mutex_unlock(&lock);
} }
static int ib_nl_fetch_ha(const struct dst_entry *dst, static int ib_nl_fetch_ha(const struct dst_entry *dst,
...@@ -584,7 +573,6 @@ static void process_one_req(struct work_struct *_work) ...@@ -584,7 +573,6 @@ static void process_one_req(struct work_struct *_work)
struct addr_req *req; struct addr_req *req;
struct sockaddr *src_in, *dst_in; struct sockaddr *src_in, *dst_in;
mutex_lock(&lock);
req = container_of(_work, struct addr_req, work.work); req = container_of(_work, struct addr_req, work.work);
if (req->status == -ENODATA) { if (req->status == -ENODATA) {
...@@ -596,13 +584,15 @@ static void process_one_req(struct work_struct *_work) ...@@ -596,13 +584,15 @@ static void process_one_req(struct work_struct *_work)
req->status = -ETIMEDOUT; req->status = -ETIMEDOUT;
} else if (req->status == -ENODATA) { } else if (req->status == -ENODATA) {
/* requeue the work for retrying again */ /* requeue the work for retrying again */
set_timeout(&req->work, req->timeout); spin_lock_bh(&lock);
mutex_unlock(&lock); set_timeout(req, req->timeout);
spin_unlock_bh(&lock);
return; return;
} }
} }
spin_lock_bh(&lock);
list_del(&req->list); list_del(&req->list);
mutex_unlock(&lock); spin_unlock_bh(&lock);
/* /*
* Although the work will normally have been canceled by the * Although the work will normally have been canceled by the
...@@ -619,47 +609,6 @@ static void process_one_req(struct work_struct *_work) ...@@ -619,47 +609,6 @@ static void process_one_req(struct work_struct *_work)
kfree(req); kfree(req);
} }
static void process_req(struct work_struct *work)
{
struct addr_req *req, *temp_req;
struct sockaddr *src_in, *dst_in;
struct list_head done_list;
INIT_LIST_HEAD(&done_list);
mutex_lock(&lock);
list_for_each_entry_safe(req, temp_req, &req_list, list) {
if (req->status == -ENODATA) {
src_in = (struct sockaddr *) &req->src_addr;
dst_in = (struct sockaddr *) &req->dst_addr;
req->status = addr_resolve(src_in, dst_in, req->addr,
true, req->seq);
if (req->status && time_after_eq(jiffies, req->timeout))
req->status = -ETIMEDOUT;
else if (req->status == -ENODATA) {
set_timeout(&req->work, req->timeout);
continue;
}
}
list_move_tail(&req->list, &done_list);
}
mutex_unlock(&lock);
list_for_each_entry_safe(req, temp_req, &done_list, list) {
list_del(&req->list);
/* It is safe to cancel other work items from this work item
* because at a time there can be only one work item running
* with this single threaded work queue.
*/
cancel_delayed_work(&req->work);
req->callback(req->status, (struct sockaddr *) &req->src_addr,
req->addr, req->context);
put_client(req->client);
kfree(req);
}
}
int rdma_resolve_ip(struct rdma_addr_client *client, int rdma_resolve_ip(struct rdma_addr_client *client,
struct sockaddr *src_addr, struct sockaddr *dst_addr, struct sockaddr *src_addr, struct sockaddr *dst_addr,
struct rdma_dev_addr *addr, int timeout_ms, struct rdma_dev_addr *addr, int timeout_ms,
...@@ -743,17 +692,16 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr) ...@@ -743,17 +692,16 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
{ {
struct addr_req *req, *temp_req; struct addr_req *req, *temp_req;
mutex_lock(&lock); spin_lock_bh(&lock);
list_for_each_entry_safe(req, temp_req, &req_list, list) { list_for_each_entry_safe(req, temp_req, &req_list, list) {
if (req->addr == addr) { if (req->addr == addr) {
req->status = -ECANCELED; req->status = -ECANCELED;
req->timeout = jiffies; req->timeout = jiffies;
list_move(&req->list, &req_list); set_timeout(req, req->timeout);
set_timeout(&req->work, req->timeout);
break; break;
} }
} }
mutex_unlock(&lock); spin_unlock_bh(&lock);
} }
EXPORT_SYMBOL(rdma_addr_cancel); EXPORT_SYMBOL(rdma_addr_cancel);
...@@ -810,11 +758,17 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, ...@@ -810,11 +758,17 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
static int netevent_callback(struct notifier_block *self, unsigned long event, static int netevent_callback(struct notifier_block *self, unsigned long event,
void *ctx) void *ctx)
{ {
struct addr_req *req;
if (event == NETEVENT_NEIGH_UPDATE) { if (event == NETEVENT_NEIGH_UPDATE) {
struct neighbour *neigh = ctx; struct neighbour *neigh = ctx;
if (neigh->nud_state & NUD_VALID) if (neigh->nud_state & NUD_VALID) {
set_timeout(&work, jiffies); spin_lock_bh(&lock);
list_for_each_entry(req, &req_list, list)
set_timeout(req, jiffies);
spin_unlock_bh(&lock);
}
} }
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment