Skip to content

Commit 7298de9

Browse files
tohojodavem330
authored andcommitted
sch_cake: Add ingress mode
The ingress mode is meant to be enabled when CAKE runs downlink of the actual bottleneck (such as on an IFB device). The mode changes the shaper to also account dropped packets to the shaped rate, as these have already traversed the bottleneck. Enabling ingress mode will also tune the AQM to always keep at least two packets queued *for each flow*. This is done by scaling the minimum queue occupancy level that will disable the AQM by the number of active bulk flows. The rationale for this is that retransmits are more expensive in ingress mode, since dropped packets have to traverse the bottleneck again when they are retransmitted; thus, being more lenient and keeping a minimum number of packets queued will improve throughput in cases where the number of active flows are so large that they saturate the bottleneck even at their minimum window size. This commit also adds a separate switch to enable ingress mode rate autoscaling. If enabled, the autoscaling code will observe the actual traffic rate and adjust the shaper rate to match it. This can help avoid latency increases in the case where the actual bottleneck rate decreases below the shaped rate. The scaling filters out spikes by an EWMA filter. Signed-off-by: Toke Høiland-Jørgensen <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 046f6fd commit 7298de9

File tree

1 file changed

+83
-4
lines changed

1 file changed

+83
-4
lines changed

net/sched/sch_cake.c

Lines changed: 83 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -435,7 +435,8 @@ static bool cobalt_queue_empty(struct cobalt_vars *vars,
435435
static bool cobalt_should_drop(struct cobalt_vars *vars,
436436
struct cobalt_params *p,
437437
ktime_t now,
438-
struct sk_buff *skb)
438+
struct sk_buff *skb,
439+
u32 bulk_flows)
439440
{
440441
bool next_due, over_target, drop = false;
441442
ktime_t schedule;
@@ -459,6 +460,7 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
459460
sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
460461
schedule = ktime_sub(now, vars->drop_next);
461462
over_target = sojourn > p->target &&
463+
sojourn > p->mtu_time * bulk_flows * 2 &&
462464
sojourn > p->mtu_time * 4;
463465
next_due = vars->count && ktime_to_ns(schedule) >= 0;
464466

@@ -881,6 +883,9 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
881883
b->tin_dropped++;
882884
sch->qstats.drops++;
883885

886+
if (q->rate_flags & CAKE_FLAG_INGRESS)
887+
cake_advance_shaper(q, b, skb, now, true);
888+
884889
__qdisc_drop(skb, to_free);
885890
sch->q.qlen--;
886891

@@ -921,6 +926,8 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data *t,
921926
return 0;
922927
}
923928

929+
static void cake_reconfigure(struct Qdisc *sch);
930+
924931
static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
925932
struct sk_buff **to_free)
926933
{
@@ -988,8 +995,46 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
988995
cake_heapify_up(q, b->overflow_idx[idx]);
989996

990997
/* incoming bandwidth capacity estimate */
991-
q->avg_window_bytes = 0;
992-
q->last_packet_time = now;
998+
if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
999+
u64 packet_interval = \
1000+
ktime_to_ns(ktime_sub(now, q->last_packet_time));
1001+
1002+
if (packet_interval > NSEC_PER_SEC)
1003+
packet_interval = NSEC_PER_SEC;
1004+
1005+
/* filter out short-term bursts, eg. wifi aggregation */
1006+
q->avg_packet_interval = \
1007+
cake_ewma(q->avg_packet_interval,
1008+
packet_interval,
1009+
(packet_interval > q->avg_packet_interval ?
1010+
2 : 8));
1011+
1012+
q->last_packet_time = now;
1013+
1014+
if (packet_interval > q->avg_packet_interval) {
1015+
u64 window_interval = \
1016+
ktime_to_ns(ktime_sub(now,
1017+
q->avg_window_begin));
1018+
u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
1019+
1020+
do_div(b, window_interval);
1021+
q->avg_peak_bandwidth =
1022+
cake_ewma(q->avg_peak_bandwidth, b,
1023+
b > q->avg_peak_bandwidth ? 2 : 8);
1024+
q->avg_window_bytes = 0;
1025+
q->avg_window_begin = now;
1026+
1027+
if (ktime_after(now,
1028+
ktime_add_ms(q->last_reconfig_time,
1029+
250))) {
1030+
q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
1031+
cake_reconfigure(sch);
1032+
}
1033+
}
1034+
} else {
1035+
q->avg_window_bytes = 0;
1036+
q->last_packet_time = now;
1037+
}
9931038

9941039
/* flowchain */
9951040
if (!flow->set || flow->set == CAKE_SET_DECAYING) {
@@ -1268,15 +1313,27 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
12681313
}
12691314

12701315
/* Last packet in queue may be marked, shouldn't be dropped */
1271-
if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb) ||
1316+
if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
1317+
(b->bulk_flow_count *
1318+
!!(q->rate_flags &
1319+
CAKE_FLAG_INGRESS))) ||
12721320
!flow->head)
12731321
break;
12741322

1323+
/* drop this packet, get another one */
1324+
if (q->rate_flags & CAKE_FLAG_INGRESS) {
1325+
len = cake_advance_shaper(q, b, skb,
1326+
now, true);
1327+
flow->deficit -= len;
1328+
b->tin_deficit -= len;
1329+
}
12751330
flow->dropped++;
12761331
b->tin_dropped++;
12771332
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
12781333
qdisc_qstats_drop(sch);
12791334
kfree_skb(skb);
1335+
if (q->rate_flags & CAKE_FLAG_INGRESS)
1336+
goto retry;
12801337
}
12811338

12821339
b->tin_ecn_mark += !!flow->cvars.ecn_marked;
@@ -1459,6 +1516,20 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
14591516
q->target = 1;
14601517
}
14611518

1519+
if (tb[TCA_CAKE_AUTORATE]) {
1520+
if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
1521+
q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
1522+
else
1523+
q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
1524+
}
1525+
1526+
if (tb[TCA_CAKE_INGRESS]) {
1527+
if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
1528+
q->rate_flags |= CAKE_FLAG_INGRESS;
1529+
else
1530+
q->rate_flags &= ~CAKE_FLAG_INGRESS;
1531+
}
1532+
14621533
if (tb[TCA_CAKE_MEMORY])
14631534
q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
14641535

@@ -1582,6 +1653,14 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
15821653
if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit))
15831654
goto nla_put_failure;
15841655

1656+
if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
1657+
!!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
1658+
goto nla_put_failure;
1659+
1660+
if (nla_put_u32(skb, TCA_CAKE_INGRESS,
1661+
!!(q->rate_flags & CAKE_FLAG_INGRESS)))
1662+
goto nla_put_failure;
1663+
15851664
return nla_nest_end(skb, opts);
15861665

15871666
nla_put_failure:

0 commit comments

Comments
 (0)