Commit 0b1e9738 authored by sjur.brandeland@stericsson.com's avatar sjur.brandeland@stericsson.com Committed by David S. Miller

caif: Use rcu_read_lock in CAIF mux layer.

Replace spin_lock with rcu_read_lock when accessing lists to layers
and cache. While packets are in flight rcu_read_lock should not be held,
instead ref-counters are used in combination with RCU.
Signed-off-by: default avatarSjur Brændeland <sjur.brandeland@stericsson.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1b1cb1f7
...@@ -12,5 +12,7 @@ struct cffrml; ...@@ -12,5 +12,7 @@ struct cffrml;
struct cflayer *cffrml_create(u16 phyid, bool DoFCS); struct cflayer *cffrml_create(u16 phyid, bool DoFCS);
void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up); void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up);
void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn); void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn);
void cffrml_put(struct cflayer *layr);
void cffrml_hold(struct cflayer *layr);
#endif /* CFFRML_H_ */ #endif /* CFFRML_H_ */
...@@ -145,3 +145,11 @@ static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, ...@@ -145,3 +145,11 @@ static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
if (layr->up->ctrlcmd) if (layr->up->ctrlcmd)
layr->up->ctrlcmd(layr->up, ctrl, layr->id); layr->up->ctrlcmd(layr->up, ctrl, layr->id);
} }
void cffrml_put(struct cflayer *layr)
{
}
void cffrml_hold(struct cflayer *layr)
{
}
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/rculist.h>
#include <net/caif/cfpkt.h> #include <net/caif/cfpkt.h>
#include <net/caif/cfmuxl.h> #include <net/caif/cfmuxl.h>
#include <net/caif/cfsrvl.h> #include <net/caif/cfsrvl.h>
...@@ -64,31 +65,31 @@ struct cflayer *cfmuxl_create(void) ...@@ -64,31 +65,31 @@ struct cflayer *cfmuxl_create(void)
int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid) int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
{ {
struct cfmuxl *muxl = container_obj(layr); struct cfmuxl *muxl = container_obj(layr);
spin_lock(&muxl->receive_lock);
cfsrvl_get(up); spin_lock_bh(&muxl->receive_lock);
list_add(&up->node, &muxl->srvl_list); list_add_rcu(&up->node, &muxl->srvl_list);
spin_unlock(&muxl->receive_lock); spin_unlock_bh(&muxl->receive_lock);
return 0; return 0;
} }
int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid) int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid)
{ {
struct cfmuxl *muxl = (struct cfmuxl *) layr; struct cfmuxl *muxl = (struct cfmuxl *) layr;
spin_lock(&muxl->transmit_lock);
list_add(&dn->node, &muxl->frml_list); spin_lock_bh(&muxl->transmit_lock);
spin_unlock(&muxl->transmit_lock); list_add_rcu(&dn->node, &muxl->frml_list);
spin_unlock_bh(&muxl->transmit_lock);
return 0; return 0;
} }
static struct cflayer *get_from_id(struct list_head *list, u16 id) static struct cflayer *get_from_id(struct list_head *list, u16 id)
{ {
struct list_head *node; struct cflayer *lyr;
struct cflayer *layer; list_for_each_entry_rcu(lyr, list, node) {
list_for_each(node, list) { if (lyr->id == id)
layer = list_entry(node, struct cflayer, node); return lyr;
if (layer->id == id)
return layer;
} }
return NULL; return NULL;
} }
...@@ -96,41 +97,45 @@ struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid) ...@@ -96,41 +97,45 @@ struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
{ {
struct cfmuxl *muxl = container_obj(layr); struct cfmuxl *muxl = container_obj(layr);
struct cflayer *dn; struct cflayer *dn;
spin_lock(&muxl->transmit_lock); int idx = phyid % DN_CACHE_SIZE;
memset(muxl->dn_cache, 0, sizeof(muxl->dn_cache));
spin_lock_bh(&muxl->transmit_lock);
rcu_assign_pointer(muxl->dn_cache[idx], NULL);
dn = get_from_id(&muxl->frml_list, phyid); dn = get_from_id(&muxl->frml_list, phyid);
if (dn == NULL) { if (dn == NULL)
spin_unlock(&muxl->transmit_lock); goto out;
return NULL;
} list_del_rcu(&dn->node);
list_del(&dn->node);
caif_assert(dn != NULL); caif_assert(dn != NULL);
spin_unlock(&muxl->transmit_lock); out:
spin_unlock_bh(&muxl->transmit_lock);
return dn; return dn;
} }
/* Invariant: lock is taken */
static struct cflayer *get_up(struct cfmuxl *muxl, u16 id) static struct cflayer *get_up(struct cfmuxl *muxl, u16 id)
{ {
struct cflayer *up; struct cflayer *up;
int idx = id % UP_CACHE_SIZE; int idx = id % UP_CACHE_SIZE;
up = muxl->up_cache[idx]; up = rcu_dereference(muxl->up_cache[idx]);
if (up == NULL || up->id != id) { if (up == NULL || up->id != id) {
spin_lock_bh(&muxl->receive_lock);
up = get_from_id(&muxl->srvl_list, id); up = get_from_id(&muxl->srvl_list, id);
muxl->up_cache[idx] = up; rcu_assign_pointer(muxl->up_cache[idx], up);
spin_unlock_bh(&muxl->receive_lock);
} }
return up; return up;
} }
/* Invariant: lock is taken */
static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info) static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info)
{ {
struct cflayer *dn; struct cflayer *dn;
int idx = dev_info->id % DN_CACHE_SIZE; int idx = dev_info->id % DN_CACHE_SIZE;
dn = muxl->dn_cache[idx]; dn = rcu_dereference(muxl->dn_cache[idx]);
if (dn == NULL || dn->id != dev_info->id) { if (dn == NULL || dn->id != dev_info->id) {
spin_lock_bh(&muxl->transmit_lock);
dn = get_from_id(&muxl->frml_list, dev_info->id); dn = get_from_id(&muxl->frml_list, dev_info->id);
muxl->dn_cache[idx] = dn; rcu_assign_pointer(muxl->dn_cache[idx], dn);
spin_unlock_bh(&muxl->transmit_lock);
} }
return dn; return dn;
} }
...@@ -139,15 +144,17 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id) ...@@ -139,15 +144,17 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
{ {
struct cflayer *up; struct cflayer *up;
struct cfmuxl *muxl = container_obj(layr); struct cfmuxl *muxl = container_obj(layr);
spin_lock(&muxl->receive_lock); int idx = id % UP_CACHE_SIZE;
up = get_up(muxl, id);
spin_lock_bh(&muxl->receive_lock);
up = get_from_id(&muxl->srvl_list, id);
if (up == NULL) if (up == NULL)
goto out; goto out;
memset(muxl->up_cache, 0, sizeof(muxl->up_cache));
list_del(&up->node); rcu_assign_pointer(muxl->up_cache[idx], NULL);
cfsrvl_put(up); list_del_rcu(&up->node);
out: out:
spin_unlock(&muxl->receive_lock); spin_unlock_bh(&muxl->receive_lock);
return up; return up;
} }
...@@ -162,22 +169,28 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt) ...@@ -162,22 +169,28 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
cfpkt_destroy(pkt); cfpkt_destroy(pkt);
return -EPROTO; return -EPROTO;
} }
rcu_read_lock();
spin_lock(&muxl->receive_lock);
up = get_up(muxl, id); up = get_up(muxl, id);
spin_unlock(&muxl->receive_lock);
if (up == NULL) { if (up == NULL) {
pr_info("Received data on unknown link ID = %d (0x%x) up == NULL", pr_debug("Received data on unknown link ID = %d (0x%x)"
id, id); " up == NULL", id, id);
cfpkt_destroy(pkt); cfpkt_destroy(pkt);
/* /*
* Don't return ERROR, since modem misbehaves and sends out * Don't return ERROR, since modem misbehaves and sends out
* flow on before linksetup response. * flow on before linksetup response.
*/ */
rcu_read_unlock();
return /* CFGLU_EPROT; */ 0; return /* CFGLU_EPROT; */ 0;
} }
/* We can't hold rcu_lock during receive, so take a ref count instead */
cfsrvl_get(up); cfsrvl_get(up);
rcu_read_unlock();
ret = up->receive(up, pkt); ret = up->receive(up, pkt);
cfsrvl_put(up); cfsrvl_put(up);
return ret; return ret;
} }
...@@ -185,31 +198,49 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt) ...@@ -185,31 +198,49 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt) static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
{ {
struct cfmuxl *muxl = container_obj(layr); struct cfmuxl *muxl = container_obj(layr);
int err;
u8 linkid; u8 linkid;
struct cflayer *dn; struct cflayer *dn;
struct caif_payload_info *info = cfpkt_info(pkt); struct caif_payload_info *info = cfpkt_info(pkt);
BUG_ON(!info); BUG_ON(!info);
rcu_read_lock();
dn = get_dn(muxl, info->dev_info); dn = get_dn(muxl, info->dev_info);
if (dn == NULL) { if (dn == NULL) {
pr_warn("Send data on unknown phy ID = %d (0x%x)\n", pr_debug("Send data on unknown phy ID = %d (0x%x)\n",
info->dev_info->id, info->dev_info->id); info->dev_info->id, info->dev_info->id);
rcu_read_unlock();
cfpkt_destroy(pkt);
return -ENOTCONN; return -ENOTCONN;
} }
info->hdr_len += 1; info->hdr_len += 1;
linkid = info->channel_id; linkid = info->channel_id;
cfpkt_add_head(pkt, &linkid, 1); cfpkt_add_head(pkt, &linkid, 1);
return dn->transmit(dn, pkt);
/* We can't hold rcu_lock during receive, so take a ref count instead */
cffrml_hold(dn);
rcu_read_unlock();
err = dn->transmit(dn, pkt);
cffrml_put(dn);
return err;
} }
static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
int phyid) int phyid)
{ {
struct cfmuxl *muxl = container_obj(layr); struct cfmuxl *muxl = container_obj(layr);
struct list_head *node, *next;
struct cflayer *layer; struct cflayer *layer;
list_for_each_safe(node, next, &muxl->srvl_list) {
layer = list_entry(node, struct cflayer, node); rcu_read_lock();
if (cfsrvl_phyid_match(layer, phyid)) list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd)
/* NOTE: ctrlcmd is not allowed to block */
layer->ctrlcmd(layer, ctrl, phyid); layer->ctrlcmd(layer, ctrl, phyid);
} }
rcu_read_unlock();
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment