@@ -627,6 +627,63 @@ static bool cake_ddst(int flow_mode)
627627 return (flow_mode & CAKE_FLOW_DUAL_DST ) == CAKE_FLOW_DUAL_DST ;
628628}
629629
630+ static void cake_dec_srchost_bulk_flow_count (struct cake_tin_data * q ,
631+ struct cake_flow * flow ,
632+ int flow_mode )
633+ {
634+ if (likely (cake_dsrc (flow_mode ) &&
635+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count ))
636+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count -- ;
637+ }
638+
639+ static void cake_inc_srchost_bulk_flow_count (struct cake_tin_data * q ,
640+ struct cake_flow * flow ,
641+ int flow_mode )
642+ {
643+ if (likely (cake_dsrc (flow_mode ) &&
644+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count < CAKE_QUEUES ))
645+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count ++ ;
646+ }
647+
648+ static void cake_dec_dsthost_bulk_flow_count (struct cake_tin_data * q ,
649+ struct cake_flow * flow ,
650+ int flow_mode )
651+ {
652+ if (likely (cake_ddst (flow_mode ) &&
653+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count ))
654+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count -- ;
655+ }
656+
657+ static void cake_inc_dsthost_bulk_flow_count (struct cake_tin_data * q ,
658+ struct cake_flow * flow ,
659+ int flow_mode )
660+ {
661+ if (likely (cake_ddst (flow_mode ) &&
662+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count < CAKE_QUEUES ))
663+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count ++ ;
664+ }
665+
666+ static u16 cake_get_flow_quantum (struct cake_tin_data * q ,
667+ struct cake_flow * flow ,
668+ int flow_mode )
669+ {
670+ u16 host_load = 1 ;
671+
672+ if (cake_dsrc (flow_mode ))
673+ host_load = max (host_load ,
674+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count );
675+
676+ if (cake_ddst (flow_mode ))
677+ host_load = max (host_load ,
678+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count );
679+
680+ /* The get_random_u16() is a way to apply dithering to avoid
681+ * accumulating roundoff errors
682+ */
683+ return (q -> flow_quantum * quantum_div [host_load ] +
684+ get_random_u16 ()) >> 16 ;
685+ }
686+
630687static u32 cake_hash (struct cake_tin_data * q , const struct sk_buff * skb ,
631688 int flow_mode , u16 flow_override , u16 host_override )
632689{
@@ -773,10 +830,8 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
773830 allocate_dst = cake_ddst (flow_mode );
774831
775832 if (q -> flows [outer_hash + k ].set == CAKE_SET_BULK ) {
776- if (allocate_src )
777- q -> hosts [q -> flows [reduced_hash ].srchost ].srchost_bulk_flow_count -- ;
778- if (allocate_dst )
779- q -> hosts [q -> flows [reduced_hash ].dsthost ].dsthost_bulk_flow_count -- ;
833+ cake_dec_srchost_bulk_flow_count (q , & q -> flows [outer_hash + k ], flow_mode );
834+ cake_dec_dsthost_bulk_flow_count (q , & q -> flows [outer_hash + k ], flow_mode );
780835 }
781836found :
782837 /* reserve queue for future packets in same flow */
@@ -801,9 +856,10 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
801856 q -> hosts [outer_hash + k ].srchost_tag = srchost_hash ;
802857found_src :
803858 srchost_idx = outer_hash + k ;
804- if (q -> flows [reduced_hash ].set == CAKE_SET_BULK )
805- q -> hosts [srchost_idx ].srchost_bulk_flow_count ++ ;
806859 q -> flows [reduced_hash ].srchost = srchost_idx ;
860+
861+ if (q -> flows [reduced_hash ].set == CAKE_SET_BULK )
862+ cake_inc_srchost_bulk_flow_count (q , & q -> flows [reduced_hash ], flow_mode );
807863 }
808864
809865 if (allocate_dst ) {
@@ -824,9 +880,10 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
824880 q -> hosts [outer_hash + k ].dsthost_tag = dsthost_hash ;
825881found_dst :
826882 dsthost_idx = outer_hash + k ;
827- if (q -> flows [reduced_hash ].set == CAKE_SET_BULK )
828- q -> hosts [dsthost_idx ].dsthost_bulk_flow_count ++ ;
829883 q -> flows [reduced_hash ].dsthost = dsthost_idx ;
884+
885+ if (q -> flows [reduced_hash ].set == CAKE_SET_BULK )
886+ cake_inc_dsthost_bulk_flow_count (q , & q -> flows [reduced_hash ], flow_mode );
830887 }
831888 }
832889
@@ -1839,10 +1896,6 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
18391896
18401897 /* flowchain */
18411898 if (!flow -> set || flow -> set == CAKE_SET_DECAYING ) {
1842- struct cake_host * srchost = & b -> hosts [flow -> srchost ];
1843- struct cake_host * dsthost = & b -> hosts [flow -> dsthost ];
1844- u16 host_load = 1 ;
1845-
18461899 if (!flow -> set ) {
18471900 list_add_tail (& flow -> flowchain , & b -> new_flows );
18481901 } else {
@@ -1852,31 +1905,17 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
18521905 flow -> set = CAKE_SET_SPARSE ;
18531906 b -> sparse_flow_count ++ ;
18541907
1855- if (cake_dsrc (q -> flow_mode ))
1856- host_load = max (host_load , srchost -> srchost_bulk_flow_count );
1857-
1858- if (cake_ddst (q -> flow_mode ))
1859- host_load = max (host_load , dsthost -> dsthost_bulk_flow_count );
1860-
1861- flow -> deficit = (b -> flow_quantum *
1862- quantum_div [host_load ]) >> 16 ;
1908+ flow -> deficit = cake_get_flow_quantum (b , flow , q -> flow_mode );
18631909 } else if (flow -> set == CAKE_SET_SPARSE_WAIT ) {
1864- struct cake_host * srchost = & b -> hosts [flow -> srchost ];
1865- struct cake_host * dsthost = & b -> hosts [flow -> dsthost ];
1866-
18671910 /* this flow was empty, accounted as a sparse flow, but actually
18681911 * in the bulk rotation.
18691912 */
18701913 flow -> set = CAKE_SET_BULK ;
18711914 b -> sparse_flow_count -- ;
18721915 b -> bulk_flow_count ++ ;
18731916
1874- if (cake_dsrc (q -> flow_mode ))
1875- srchost -> srchost_bulk_flow_count ++ ;
1876-
1877- if (cake_ddst (q -> flow_mode ))
1878- dsthost -> dsthost_bulk_flow_count ++ ;
1879-
1917+ cake_inc_srchost_bulk_flow_count (b , flow , q -> flow_mode );
1918+ cake_inc_dsthost_bulk_flow_count (b , flow , q -> flow_mode );
18801919 }
18811920
18821921 if (q -> buffer_used > q -> buffer_max_used )
@@ -1933,13 +1972,11 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
19331972{
19341973 struct cake_sched_data * q = qdisc_priv (sch );
19351974 struct cake_tin_data * b = & q -> tins [q -> cur_tin ];
1936- struct cake_host * srchost , * dsthost ;
19371975 ktime_t now = ktime_get ();
19381976 struct cake_flow * flow ;
19391977 struct list_head * head ;
19401978 bool first_flow = true;
19411979 struct sk_buff * skb ;
1942- u16 host_load ;
19431980 u64 delay ;
19441981 u32 len ;
19451982
@@ -2039,11 +2076,6 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
20392076 q -> cur_flow = flow - b -> flows ;
20402077 first_flow = false;
20412078
2042- /* triple isolation (modified DRR++) */
2043- srchost = & b -> hosts [flow -> srchost ];
2044- dsthost = & b -> hosts [flow -> dsthost ];
2045- host_load = 1 ;
2046-
20472079 /* flow isolation (DRR++) */
20482080 if (flow -> deficit <= 0 ) {
20492081 /* Keep all flows with deficits out of the sparse and decaying
@@ -2055,11 +2087,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
20552087 b -> sparse_flow_count -- ;
20562088 b -> bulk_flow_count ++ ;
20572089
2058- if (cake_dsrc (q -> flow_mode ))
2059- srchost -> srchost_bulk_flow_count ++ ;
2060-
2061- if (cake_ddst (q -> flow_mode ))
2062- dsthost -> dsthost_bulk_flow_count ++ ;
2090+ cake_inc_srchost_bulk_flow_count (b , flow , q -> flow_mode );
2091+ cake_inc_dsthost_bulk_flow_count (b , flow , q -> flow_mode );
20632092
20642093 flow -> set = CAKE_SET_BULK ;
20652094 } else {
@@ -2071,19 +2100,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
20712100 }
20722101 }
20732102
2074- if (cake_dsrc (q -> flow_mode ))
2075- host_load = max (host_load , srchost -> srchost_bulk_flow_count );
2076-
2077- if (cake_ddst (q -> flow_mode ))
2078- host_load = max (host_load , dsthost -> dsthost_bulk_flow_count );
2079-
2080- WARN_ON (host_load > CAKE_QUEUES );
2081-
2082- /* The get_random_u16() is a way to apply dithering to avoid
2083- * accumulating roundoff errors
2084- */
2085- flow -> deficit += (b -> flow_quantum * quantum_div [host_load ] +
2086- get_random_u16 ()) >> 16 ;
2103+ flow -> deficit += cake_get_flow_quantum (b , flow , q -> flow_mode );
20872104 list_move_tail (& flow -> flowchain , & b -> old_flows );
20882105
20892106 goto retry ;
@@ -2107,11 +2124,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
21072124 if (flow -> set == CAKE_SET_BULK ) {
21082125 b -> bulk_flow_count -- ;
21092126
2110- if (cake_dsrc (q -> flow_mode ))
2111- srchost -> srchost_bulk_flow_count -- ;
2112-
2113- if (cake_ddst (q -> flow_mode ))
2114- dsthost -> dsthost_bulk_flow_count -- ;
2127+ cake_dec_srchost_bulk_flow_count (b , flow , q -> flow_mode );
2128+ cake_dec_dsthost_bulk_flow_count (b , flow , q -> flow_mode );
21152129
21162130 b -> decaying_flow_count ++ ;
21172131 } else if (flow -> set == CAKE_SET_SPARSE ||
@@ -2129,12 +2143,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
21292143 else if (flow -> set == CAKE_SET_BULK ) {
21302144 b -> bulk_flow_count -- ;
21312145
2132- if (cake_dsrc (q -> flow_mode ))
2133- srchost -> srchost_bulk_flow_count -- ;
2134-
2135- if (cake_ddst (q -> flow_mode ))
2136- dsthost -> dsthost_bulk_flow_count -- ;
2137-
2146+ cake_dec_srchost_bulk_flow_count (b , flow , q -> flow_mode );
2147+ cake_dec_dsthost_bulk_flow_count (b , flow , q -> flow_mode );
21382148 } else
21392149 b -> decaying_flow_count -- ;
21402150
0 commit comments