Skip to content

Commit 2d5e807

Browse files
vladimirolteandavem330
authored andcommitted
net/sched: taprio: split segmentation logic from qdisc_enqueue()
The majority of the taprio_enqueue()'s function is spent doing TCP segmentation, which doesn't look right to me. Compilers shouldn't have a problem in inlining code no matter how we write it, so move the segmentation logic to a separate function. Signed-off-by: Vladimir Oltean <[email protected]> Reviewed-by: Kurt Kanzenbach <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent fed87cc commit 2d5e807

File tree

1 file changed

+36
-30
lines changed

1 file changed

+36
-30
lines changed

net/sched/sch_taprio.c

Lines changed: 36 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -575,6 +575,40 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
575575
return qdisc_enqueue(skb, child, to_free);
576576
}
577577

578+
static int taprio_enqueue_segmented(struct sk_buff *skb, struct Qdisc *sch,
579+
struct Qdisc *child,
580+
struct sk_buff **to_free)
581+
{
582+
unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb);
583+
netdev_features_t features = netif_skb_features(skb);
584+
struct sk_buff *segs, *nskb;
585+
int ret;
586+
587+
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
588+
if (IS_ERR_OR_NULL(segs))
589+
return qdisc_drop(skb, sch, to_free);
590+
591+
skb_list_walk_safe(segs, segs, nskb) {
592+
skb_mark_not_on_list(segs);
593+
qdisc_skb_cb(segs)->pkt_len = segs->len;
594+
slen += segs->len;
595+
596+
ret = taprio_enqueue_one(segs, sch, child, to_free);
597+
if (ret != NET_XMIT_SUCCESS) {
598+
if (net_xmit_drop_count(ret))
599+
qdisc_qstats_drop(sch);
600+
} else {
601+
numsegs++;
602+
}
603+
}
604+
605+
if (numsegs > 1)
606+
qdisc_tree_reduce_backlog(sch, 1 - numsegs, len - slen);
607+
consume_skb(skb);
608+
609+
return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
610+
}
611+
578612
/* Will not be called in the full offload case, since the TX queues are
579613
* attached to the Qdisc created using qdisc_create_dflt()
580614
*/
@@ -596,36 +630,8 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
596630
* smaller chunks. Drivers with full offload are expected to handle
597631
* this in hardware.
598632
*/
599-
if (skb_is_gso(skb)) {
600-
unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb);
601-
netdev_features_t features = netif_skb_features(skb);
602-
struct sk_buff *segs, *nskb;
603-
int ret;
604-
605-
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
606-
if (IS_ERR_OR_NULL(segs))
607-
return qdisc_drop(skb, sch, to_free);
608-
609-
skb_list_walk_safe(segs, segs, nskb) {
610-
skb_mark_not_on_list(segs);
611-
qdisc_skb_cb(segs)->pkt_len = segs->len;
612-
slen += segs->len;
613-
614-
ret = taprio_enqueue_one(segs, sch, child, to_free);
615-
if (ret != NET_XMIT_SUCCESS) {
616-
if (net_xmit_drop_count(ret))
617-
qdisc_qstats_drop(sch);
618-
} else {
619-
numsegs++;
620-
}
621-
}
622-
623-
if (numsegs > 1)
624-
qdisc_tree_reduce_backlog(sch, 1 - numsegs, len - slen);
625-
consume_skb(skb);
626-
627-
return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
628-
}
633+
if (skb_is_gso(skb))
634+
return taprio_enqueue_segmented(skb, sch, child, to_free);
629635

630636
return taprio_enqueue_one(skb, sch, child, to_free);
631637
}

0 commit comments

Comments
 (0)