@@ -175,7 +175,7 @@ static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
175175static int fq_codel_enqueue (struct sk_buff * skb , struct Qdisc * sch )
176176{
177177 struct fq_codel_sched_data * q = qdisc_priv (sch );
178- unsigned int idx ;
178+ unsigned int idx , prev_backlog ;
179179 struct fq_codel_flow * flow ;
180180 int uninitialized_var (ret );
181181
@@ -203,6 +203,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
203203 if (++ sch -> q .qlen <= sch -> limit )
204204 return NET_XMIT_SUCCESS ;
205205
206+ prev_backlog = sch -> qstats .backlog ;
206207 q -> drop_overlimit ++ ;
207208 /* Return Congestion Notification only if we dropped a packet
208209 * from this flow.
@@ -211,7 +212,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
211212 return NET_XMIT_CN ;
212213
213214 /* As we dropped a packet, better let upper stack know this */
214- qdisc_tree_decrease_qlen (sch , 1 );
215+ qdisc_tree_reduce_backlog (sch , 1 , prev_backlog - sch -> qstats . backlog );
215216 return NET_XMIT_SUCCESS ;
216217}
217218
@@ -241,6 +242,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
241242 struct fq_codel_flow * flow ;
242243 struct list_head * head ;
243244 u32 prev_drop_count , prev_ecn_mark ;
245+ unsigned int prev_backlog ;
244246
245247begin :
246248 head = & q -> new_flows ;
@@ -259,6 +261,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
259261
260262 prev_drop_count = q -> cstats .drop_count ;
261263 prev_ecn_mark = q -> cstats .ecn_mark ;
264+ prev_backlog = sch -> qstats .backlog ;
262265
263266 skb = codel_dequeue (sch , & q -> cparams , & flow -> cvars , & q -> cstats ,
264267 dequeue );
@@ -276,12 +279,14 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
276279 }
277280 qdisc_bstats_update (sch , skb );
278281 flow -> deficit -= qdisc_pkt_len (skb );
279- /* We cant call qdisc_tree_decrease_qlen () if our qlen is 0,
282+ /* We cant call qdisc_tree_reduce_backlog () if our qlen is 0,
280283 * or HTB crashes. Defer it for next round.
281284 */
282285 if (q -> cstats .drop_count && sch -> q .qlen ) {
283- qdisc_tree_decrease_qlen (sch , q -> cstats .drop_count );
286+ qdisc_tree_reduce_backlog (sch , q -> cstats .drop_count ,
287+ q -> cstats .drop_len );
284288 q -> cstats .drop_count = 0 ;
289+ q -> cstats .drop_len = 0 ;
285290 }
286291 return skb ;
287292}
@@ -372,11 +377,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
372377 while (sch -> q .qlen > sch -> limit ) {
373378 struct sk_buff * skb = fq_codel_dequeue (sch );
374379
380+ q -> cstats .drop_len += qdisc_pkt_len (skb );
375381 kfree_skb (skb );
376382 q -> cstats .drop_count ++ ;
377383 }
378- qdisc_tree_decrease_qlen (sch , q -> cstats .drop_count );
384+ qdisc_tree_reduce_backlog (sch , q -> cstats .drop_count , q -> cstats . drop_len );
379385 q -> cstats .drop_count = 0 ;
386+ q -> cstats .drop_len = 0 ;
380387
381388 sch_tree_unlock (sch );
382389 return 0 ;
0 commit comments