Commit 1fa17d4b authored by Oliver Hartkopp's avatar Oliver Hartkopp Committed by David S. Miller

can: omit unneeded skb_clone() calls

The AF_CAN core delivered always cloned sk_buffs to the AF_CAN
protocols, although this was _only_ needed by the can-raw protocol.
With this (additionally documented) change, the AF_CAN core calls the
callback functions of the registered AF_CAN protocols with the original
(uncloned) sk_buff pointer and let's the can-raw protocol do the
skb_clone() itself which omits all unneeded skb_clone() calls for other
AF_CAN protocols.
Signed-off-by: default avatarOliver Hartkopp <oliver@hartkopp.net>
Signed-off-by: default avatarUrs Thuermann <urs.thuermann@volkswagen.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 98658bc9
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#define CAN_VERSION "20081130" #define CAN_VERSION "20090105"
/* increment this number each time you change some user-space interface */ /* increment this number each time you change some user-space interface */
#define CAN_ABI_VERSION "8" #define CAN_ABI_VERSION "8"
......
...@@ -414,6 +414,12 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, ...@@ -414,6 +414,12 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
* The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
* filter for error frames (CAN_ERR_FLAG bit set in mask). * filter for error frames (CAN_ERR_FLAG bit set in mask).
* *
* The provided pointer to the sk_buff is guaranteed to be valid as long as
* the callback function is running. The callback function must *not* free
* the given sk_buff while processing it's task. When the given sk_buff is
* needed after the end of the callback function it must be cloned inside
* the callback function with skb_clone().
*
* Return: * Return:
* 0 on success * 0 on success
* -ENOMEM on missing cache mem to create subscription entry * -ENOMEM on missing cache mem to create subscription entry
...@@ -569,13 +575,8 @@ EXPORT_SYMBOL(can_rx_unregister); ...@@ -569,13 +575,8 @@ EXPORT_SYMBOL(can_rx_unregister);
static inline void deliver(struct sk_buff *skb, struct receiver *r) static inline void deliver(struct sk_buff *skb, struct receiver *r)
{ {
struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); r->func(skb, r->data);
r->matches++;
if (clone) {
clone->sk = skb->sk;
r->func(clone, r->data);
r->matches++;
}
} }
static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
......
...@@ -633,7 +633,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) ...@@ -633,7 +633,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
hrtimer_cancel(&op->timer); hrtimer_cancel(&op->timer);
if (op->can_id != rxframe->can_id) if (op->can_id != rxframe->can_id)
goto rx_freeskb; return;
/* save rx timestamp */ /* save rx timestamp */
op->rx_stamp = skb->tstamp; op->rx_stamp = skb->tstamp;
...@@ -645,19 +645,19 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) ...@@ -645,19 +645,19 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
if (op->flags & RX_RTR_FRAME) { if (op->flags & RX_RTR_FRAME) {
/* send reply for RTR-request (placed in op->frames[0]) */ /* send reply for RTR-request (placed in op->frames[0]) */
bcm_can_tx(op); bcm_can_tx(op);
goto rx_freeskb; return;
} }
if (op->flags & RX_FILTER_ID) { if (op->flags & RX_FILTER_ID) {
/* the easiest case */ /* the easiest case */
bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
goto rx_freeskb_starttimer; goto rx_starttimer;
} }
if (op->nframes == 1) { if (op->nframes == 1) {
/* simple compare with index 0 */ /* simple compare with index 0 */
bcm_rx_cmp_to_index(op, 0, rxframe); bcm_rx_cmp_to_index(op, 0, rxframe);
goto rx_freeskb_starttimer; goto rx_starttimer;
} }
if (op->nframes > 1) { if (op->nframes > 1) {
...@@ -678,10 +678,8 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) ...@@ -678,10 +678,8 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
} }
} }
rx_freeskb_starttimer: rx_starttimer:
bcm_rx_starttimer(op); bcm_rx_starttimer(op);
rx_freeskb:
kfree_skb(skb);
} }
/* /*
......
...@@ -99,13 +99,14 @@ static void raw_rcv(struct sk_buff *skb, void *data) ...@@ -99,13 +99,14 @@ static void raw_rcv(struct sk_buff *skb, void *data)
struct raw_sock *ro = raw_sk(sk); struct raw_sock *ro = raw_sk(sk);
struct sockaddr_can *addr; struct sockaddr_can *addr;
if (!ro->recv_own_msgs) { /* check the received tx sock reference */
/* check the received tx sock reference */ if (!ro->recv_own_msgs && skb->sk == sk)
if (skb->sk == sk) { return;
kfree_skb(skb);
return; /* clone the given skb to be able to enqueue it into the rcv queue */
} skb = skb_clone(skb, GFP_ATOMIC);
} if (!skb)
return;
/* /*
* Put the datagram to the queue so that raw_recvmsg() can * Put the datagram to the queue so that raw_recvmsg() can
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment