@@ -644,6 +644,63 @@ static bool cake_ddst(int flow_mode)
644644 return (flow_mode & CAKE_FLOW_DUAL_DST ) == CAKE_FLOW_DUAL_DST ;
645645}
646646
647+ static void cake_dec_srchost_bulk_flow_count (struct cake_tin_data * q ,
648+ struct cake_flow * flow ,
649+ int flow_mode )
650+ {
651+ if (likely (cake_dsrc (flow_mode ) &&
652+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count ))
653+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count -- ;
654+ }
655+
656+ static void cake_inc_srchost_bulk_flow_count (struct cake_tin_data * q ,
657+ struct cake_flow * flow ,
658+ int flow_mode )
659+ {
660+ if (likely (cake_dsrc (flow_mode ) &&
661+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count < CAKE_QUEUES ))
662+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count ++ ;
663+ }
664+
665+ static void cake_dec_dsthost_bulk_flow_count (struct cake_tin_data * q ,
666+ struct cake_flow * flow ,
667+ int flow_mode )
668+ {
669+ if (likely (cake_ddst (flow_mode ) &&
670+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count ))
671+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count -- ;
672+ }
673+
674+ static void cake_inc_dsthost_bulk_flow_count (struct cake_tin_data * q ,
675+ struct cake_flow * flow ,
676+ int flow_mode )
677+ {
678+ if (likely (cake_ddst (flow_mode ) &&
679+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count < CAKE_QUEUES ))
680+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count ++ ;
681+ }
682+
683+ static u16 cake_get_flow_quantum (struct cake_tin_data * q ,
684+ struct cake_flow * flow ,
685+ int flow_mode )
686+ {
687+ u16 host_load = 1 ;
688+
689+ if (cake_dsrc (flow_mode ))
690+ host_load = max (host_load ,
691+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count );
692+
693+ if (cake_ddst (flow_mode ))
694+ host_load = max (host_load ,
695+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count );
696+
697+ /* The get_random_u16() is a way to apply dithering to avoid
698+ * accumulating roundoff errors
699+ */
700+ return (q -> flow_quantum * quantum_div [host_load ] +
701+ get_random_u16 ()) >> 16 ;
702+ }
703+
647704static u32 cake_hash (struct cake_tin_data * q , const struct sk_buff * skb ,
648705 int flow_mode , u16 flow_override , u16 host_override )
649706{
@@ -790,10 +847,8 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
790847 allocate_dst = cake_ddst (flow_mode );
791848
792849 if (q -> flows [outer_hash + k ].set == CAKE_SET_BULK ) {
793- if (allocate_src )
794- q -> hosts [q -> flows [reduced_hash ].srchost ].srchost_bulk_flow_count -- ;
795- if (allocate_dst )
796- q -> hosts [q -> flows [reduced_hash ].dsthost ].dsthost_bulk_flow_count -- ;
850+ cake_dec_srchost_bulk_flow_count (q , & q -> flows [outer_hash + k ], flow_mode );
851+ cake_dec_dsthost_bulk_flow_count (q , & q -> flows [outer_hash + k ], flow_mode );
797852 }
798853found :
799854 /* reserve queue for future packets in same flow */
@@ -818,9 +873,10 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
818873 q -> hosts [outer_hash + k ].srchost_tag = srchost_hash ;
819874found_src :
820875 srchost_idx = outer_hash + k ;
821- if (q -> flows [reduced_hash ].set == CAKE_SET_BULK )
822- q -> hosts [srchost_idx ].srchost_bulk_flow_count ++ ;
823876 q -> flows [reduced_hash ].srchost = srchost_idx ;
877+
878+ if (q -> flows [reduced_hash ].set == CAKE_SET_BULK )
879+ cake_inc_srchost_bulk_flow_count (q , & q -> flows [reduced_hash ], flow_mode );
824880 }
825881
826882 if (allocate_dst ) {
@@ -841,9 +897,10 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
841897 q -> hosts [outer_hash + k ].dsthost_tag = dsthost_hash ;
842898found_dst :
843899 dsthost_idx = outer_hash + k ;
844- if (q -> flows [reduced_hash ].set == CAKE_SET_BULK )
845- q -> hosts [dsthost_idx ].dsthost_bulk_flow_count ++ ;
846900 q -> flows [reduced_hash ].dsthost = dsthost_idx ;
901+
902+ if (q -> flows [reduced_hash ].set == CAKE_SET_BULK )
903+ cake_inc_dsthost_bulk_flow_count (q , & q -> flows [reduced_hash ], flow_mode );
847904 }
848905 }
849906
@@ -1856,10 +1913,6 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
18561913
18571914 /* flowchain */
18581915 if (!flow -> set || flow -> set == CAKE_SET_DECAYING ) {
1859- struct cake_host * srchost = & b -> hosts [flow -> srchost ];
1860- struct cake_host * dsthost = & b -> hosts [flow -> dsthost ];
1861- u16 host_load = 1 ;
1862-
18631916 if (!flow -> set ) {
18641917 list_add_tail (& flow -> flowchain , & b -> new_flows );
18651918 } else {
@@ -1869,31 +1922,17 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
18691922 flow -> set = CAKE_SET_SPARSE ;
18701923 b -> sparse_flow_count ++ ;
18711924
1872- if (cake_dsrc (q -> flow_mode ))
1873- host_load = max (host_load , srchost -> srchost_bulk_flow_count );
1874-
1875- if (cake_ddst (q -> flow_mode ))
1876- host_load = max (host_load , dsthost -> dsthost_bulk_flow_count );
1877-
1878- flow -> deficit = (b -> flow_quantum *
1879- quantum_div [host_load ]) >> 16 ;
1925+ flow -> deficit = cake_get_flow_quantum (b , flow , q -> flow_mode );
18801926 } else if (flow -> set == CAKE_SET_SPARSE_WAIT ) {
1881- struct cake_host * srchost = & b -> hosts [flow -> srchost ];
1882- struct cake_host * dsthost = & b -> hosts [flow -> dsthost ];
1883-
18841927 /* this flow was empty, accounted as a sparse flow, but actually
18851928 * in the bulk rotation.
18861929 */
18871930 flow -> set = CAKE_SET_BULK ;
18881931 b -> sparse_flow_count -- ;
18891932 b -> bulk_flow_count ++ ;
18901933
1891- if (cake_dsrc (q -> flow_mode ))
1892- srchost -> srchost_bulk_flow_count ++ ;
1893-
1894- if (cake_ddst (q -> flow_mode ))
1895- dsthost -> dsthost_bulk_flow_count ++ ;
1896-
1934+ cake_inc_srchost_bulk_flow_count (b , flow , q -> flow_mode );
1935+ cake_inc_dsthost_bulk_flow_count (b , flow , q -> flow_mode );
18971936 }
18981937
18991938 if (q -> buffer_used > q -> buffer_max_used )
@@ -1950,13 +1989,11 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
19501989{
19511990 struct cake_sched_data * q = qdisc_priv (sch );
19521991 struct cake_tin_data * b = & q -> tins [q -> cur_tin ];
1953- struct cake_host * srchost , * dsthost ;
19541992 ktime_t now = ktime_get ();
19551993 struct cake_flow * flow ;
19561994 struct list_head * head ;
19571995 bool first_flow = true;
19581996 struct sk_buff * skb ;
1959- u16 host_load ;
19601997 u64 delay ;
19611998 u32 len ;
19621999
@@ -2056,11 +2093,6 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
20562093 q -> cur_flow = flow - b -> flows ;
20572094 first_flow = false;
20582095
2059- /* triple isolation (modified DRR++) */
2060- srchost = & b -> hosts [flow -> srchost ];
2061- dsthost = & b -> hosts [flow -> dsthost ];
2062- host_load = 1 ;
2063-
20642096 /* flow isolation (DRR++) */
20652097 if (flow -> deficit <= 0 ) {
20662098 /* Keep all flows with deficits out of the sparse and decaying
@@ -2072,11 +2104,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
20722104 b -> sparse_flow_count -- ;
20732105 b -> bulk_flow_count ++ ;
20742106
2075- if (cake_dsrc (q -> flow_mode ))
2076- srchost -> srchost_bulk_flow_count ++ ;
2077-
2078- if (cake_ddst (q -> flow_mode ))
2079- dsthost -> dsthost_bulk_flow_count ++ ;
2107+ cake_inc_srchost_bulk_flow_count (b , flow , q -> flow_mode );
2108+ cake_inc_dsthost_bulk_flow_count (b , flow , q -> flow_mode );
20802109
20812110 flow -> set = CAKE_SET_BULK ;
20822111 } else {
@@ -2088,19 +2117,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
20882117 }
20892118 }
20902119
2091- if (cake_dsrc (q -> flow_mode ))
2092- host_load = max (host_load , srchost -> srchost_bulk_flow_count );
2093-
2094- if (cake_ddst (q -> flow_mode ))
2095- host_load = max (host_load , dsthost -> dsthost_bulk_flow_count );
2096-
2097- WARN_ON (host_load > CAKE_QUEUES );
2098-
2099- /* The get_random_u16() is a way to apply dithering to avoid
2100- * accumulating roundoff errors
2101- */
2102- flow -> deficit += (b -> flow_quantum * quantum_div [host_load ] +
2103- get_random_u16 ()) >> 16 ;
2120+ flow -> deficit += cake_get_flow_quantum (b , flow , q -> flow_mode );
21042121 list_move_tail (& flow -> flowchain , & b -> old_flows );
21052122
21062123 goto retry ;
@@ -2124,11 +2141,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
21242141 if (flow -> set == CAKE_SET_BULK ) {
21252142 b -> bulk_flow_count -- ;
21262143
2127- if (cake_dsrc (q -> flow_mode ))
2128- srchost -> srchost_bulk_flow_count -- ;
2129-
2130- if (cake_ddst (q -> flow_mode ))
2131- dsthost -> dsthost_bulk_flow_count -- ;
2144+ cake_dec_srchost_bulk_flow_count (b , flow , q -> flow_mode );
2145+ cake_dec_dsthost_bulk_flow_count (b , flow , q -> flow_mode );
21322146
21332147 b -> decaying_flow_count ++ ;
21342148 } else if (flow -> set == CAKE_SET_SPARSE ||
@@ -2146,12 +2160,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
21462160 else if (flow -> set == CAKE_SET_BULK ) {
21472161 b -> bulk_flow_count -- ;
21482162
2149- if (cake_dsrc (q -> flow_mode ))
2150- srchost -> srchost_bulk_flow_count -- ;
2151-
2152- if (cake_ddst (q -> flow_mode ))
2153- dsthost -> dsthost_bulk_flow_count -- ;
2154-
2163+ cake_dec_srchost_bulk_flow_count (b , flow , q -> flow_mode );
2164+ cake_dec_dsthost_bulk_flow_count (b , flow , q -> flow_mode );
21552165 } else
21562166 b -> decaying_flow_count -- ;
21572167
0 commit comments