|
80 | 80 | #define CAKE_QUEUES (1024) |
81 | 81 | #define CAKE_FLOW_MASK 63 |
82 | 82 | #define CAKE_FLOW_NAT_FLAG 64 |
| 83 | +#define CAKE_SPLIT_GSO_THRESHOLD (125000000) /* 1Gbps */ |
83 | 84 |
|
84 | 85 | /* struct cobalt_params - contains codel and blue parameters |
85 | 86 | * @interval: codel initial drop rate |
@@ -1650,36 +1651,73 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
1650 | 1651 | if (unlikely(len > b->max_skblen)) |
1651 | 1652 | b->max_skblen = len; |
1652 | 1653 |
|
1653 | | - cobalt_set_enqueue_time(skb, now); |
1654 | | - get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb); |
1655 | | - flow_queue_add(flow, skb); |
1656 | | - |
1657 | | - if (q->ack_filter) |
1658 | | - ack = cake_ack_filter(q, flow); |
| 1654 | + if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) { |
| 1655 | + struct sk_buff *segs, *nskb; |
| 1656 | + netdev_features_t features = netif_skb_features(skb); |
| 1657 | + unsigned int slen = 0; |
| 1658 | + |
| 1659 | + segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); |
| 1660 | + if (IS_ERR_OR_NULL(segs)) |
| 1661 | + return qdisc_drop(skb, sch, to_free); |
| 1662 | + |
| 1663 | + while (segs) { |
| 1664 | + nskb = segs->next; |
| 1665 | + segs->next = NULL; |
| 1666 | + qdisc_skb_cb(segs)->pkt_len = segs->len; |
| 1667 | + cobalt_set_enqueue_time(segs, now); |
| 1668 | + get_cobalt_cb(segs)->adjusted_len = cake_overhead(q, |
| 1669 | + segs); |
| 1670 | + flow_queue_add(flow, segs); |
| 1671 | + |
| 1672 | + sch->q.qlen++; |
| 1673 | + slen += segs->len; |
| 1674 | + q->buffer_used += segs->truesize; |
| 1675 | + b->packets++; |
| 1676 | + segs = nskb; |
| 1677 | + } |
1659 | 1678 |
|
1660 | | - if (ack) { |
1661 | | - b->ack_drops++; |
1662 | | - sch->qstats.drops++; |
1663 | | - b->bytes += qdisc_pkt_len(ack); |
1664 | | - len -= qdisc_pkt_len(ack); |
1665 | | - q->buffer_used += skb->truesize - ack->truesize; |
1666 | | - if (q->rate_flags & CAKE_FLAG_INGRESS) |
1667 | | - cake_advance_shaper(q, b, ack, now, true); |
| 1679 | + /* stats */ |
| 1680 | + b->bytes += slen; |
| 1681 | + b->backlogs[idx] += slen; |
| 1682 | + b->tin_backlog += slen; |
| 1683 | + sch->qstats.backlog += slen; |
| 1684 | + q->avg_window_bytes += slen; |
1668 | 1685 |
|
1669 | | - qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack)); |
1670 | | - consume_skb(ack); |
| 1686 | + qdisc_tree_reduce_backlog(sch, 1, len); |
| 1687 | + consume_skb(skb); |
1671 | 1688 | } else { |
1672 | | - sch->q.qlen++; |
1673 | | - q->buffer_used += skb->truesize; |
1674 | | - } |
| 1689 | + /* not splitting */ |
| 1690 | + cobalt_set_enqueue_time(skb, now); |
| 1691 | + get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb); |
| 1692 | + flow_queue_add(flow, skb); |
| 1693 | + |
| 1694 | + if (q->ack_filter) |
| 1695 | + ack = cake_ack_filter(q, flow); |
| 1696 | + |
| 1697 | + if (ack) { |
| 1698 | + b->ack_drops++; |
| 1699 | + sch->qstats.drops++; |
| 1700 | + b->bytes += qdisc_pkt_len(ack); |
| 1701 | + len -= qdisc_pkt_len(ack); |
| 1702 | + q->buffer_used += skb->truesize - ack->truesize; |
| 1703 | + if (q->rate_flags & CAKE_FLAG_INGRESS) |
| 1704 | + cake_advance_shaper(q, b, ack, now, true); |
| 1705 | + |
| 1706 | + qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack)); |
| 1707 | + consume_skb(ack); |
| 1708 | + } else { |
| 1709 | + sch->q.qlen++; |
| 1710 | + q->buffer_used += skb->truesize; |
| 1711 | + } |
1675 | 1712 |
|
1676 | | - /* stats */ |
1677 | | - b->packets++; |
1678 | | - b->bytes += len; |
1679 | | - b->backlogs[idx] += len; |
1680 | | - b->tin_backlog += len; |
1681 | | - sch->qstats.backlog += len; |
1682 | | - q->avg_window_bytes += len; |
| 1713 | + /* stats */ |
| 1714 | + b->packets++; |
| 1715 | + b->bytes += len; |
| 1716 | + b->backlogs[idx] += len; |
| 1717 | + b->tin_backlog += len; |
| 1718 | + sch->qstats.backlog += len; |
| 1719 | + q->avg_window_bytes += len; |
| 1720 | + } |
1683 | 1721 |
|
1684 | 1722 | if (q->overflow_timeout) |
1685 | 1723 | cake_heapify_up(q, b->overflow_idx[idx]); |
@@ -2531,6 +2569,11 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt, |
2531 | 2569 | if (tb[TCA_CAKE_MEMORY]) |
2532 | 2570 | q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]); |
2533 | 2571 |
|
| 2572 | + if (q->rate_bps && q->rate_bps <= CAKE_SPLIT_GSO_THRESHOLD) |
| 2573 | + q->rate_flags |= CAKE_FLAG_SPLIT_GSO; |
| 2574 | + else |
| 2575 | + q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO; |
| 2576 | + |
2534 | 2577 | if (q->tins) { |
2535 | 2578 | sch_tree_lock(sch); |
2536 | 2579 | cake_reconfigure(sch); |
@@ -2686,6 +2729,10 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb) |
2686 | 2729 | if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu)) |
2687 | 2730 | goto nla_put_failure; |
2688 | 2731 |
|
| 2732 | + if (nla_put_u32(skb, TCA_CAKE_SPLIT_GSO, |
| 2733 | + !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO))) |
| 2734 | + goto nla_put_failure; |
| 2735 | + |
2689 | 2736 | return nla_nest_end(skb, opts); |
2690 | 2737 |
|
2691 | 2738 | nla_put_failure: |
|
0 commit comments