Commit 2d5e8071 authored by Vladimir Oltean's avatar Vladimir Oltean Committed by David S. Miller

net/sched: taprio: split segmentation logic from qdisc_enqueue()

The majority of the taprio_enqueue()'s function is spent doing TCP
segmentation, which doesn't look right to me. Compilers shouldn't have a
problem in inlining code no matter how we write it, so move the
segmentation logic to a separate function.
Signed-off-by: default avatarVladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Kurt Kanzenbach's avatarKurt Kanzenbach <kurt@linutronix.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fed87cc6
...@@ -575,28 +575,10 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch, ...@@ -575,28 +575,10 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
return qdisc_enqueue(skb, child, to_free); return qdisc_enqueue(skb, child, to_free);
} }
/* Will not be called in the full offload case, since the TX queues are static int taprio_enqueue_segmented(struct sk_buff *skb, struct Qdisc *sch,
* attached to the Qdisc created using qdisc_create_dflt() struct Qdisc *child,
*/
static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct taprio_sched *q = qdisc_priv(sch);
struct Qdisc *child;
int queue;
queue = skb_get_queue_mapping(skb);
child = q->qdiscs[queue];
if (unlikely(!child))
return qdisc_drop(skb, sch, to_free);
/* Large packets might not be transmitted when the transmission duration
* exceeds any configured interval. Therefore, segment the skb into
* smaller chunks. Drivers with full offload are expected to handle
* this in hardware.
*/
if (skb_is_gso(skb)) {
unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb); unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb);
netdev_features_t features = netif_skb_features(skb); netdev_features_t features = netif_skb_features(skb);
struct sk_buff *segs, *nskb; struct sk_buff *segs, *nskb;
...@@ -625,7 +607,31 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -625,7 +607,31 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
consume_skb(skb); consume_skb(skb);
return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
} }
/* Will not be called in the full offload case, since the TX queues are
* attached to the Qdisc created using qdisc_create_dflt()
*/
static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct taprio_sched *q = qdisc_priv(sch);
struct Qdisc *child;
int queue;
queue = skb_get_queue_mapping(skb);
child = q->qdiscs[queue];
if (unlikely(!child))
return qdisc_drop(skb, sch, to_free);
/* Large packets might not be transmitted when the transmission duration
* exceeds any configured interval. Therefore, segment the skb into
* smaller chunks. Drivers with full offload are expected to handle
* this in hardware.
*/
if (skb_is_gso(skb))
return taprio_enqueue_segmented(skb, sch, child, to_free);
return taprio_enqueue_one(skb, sch, child, to_free); return taprio_enqueue_one(skb, sch, child, to_free);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment