@@ -644,6 +644,63 @@ static bool cake_ddst(int flow_mode)
644
644
return (flow_mode & CAKE_FLOW_DUAL_DST ) == CAKE_FLOW_DUAL_DST ;
645
645
}
646
646
647
+ static void cake_dec_srchost_bulk_flow_count (struct cake_tin_data * q ,
648
+ struct cake_flow * flow ,
649
+ int flow_mode )
650
+ {
651
+ if (likely (cake_dsrc (flow_mode ) &&
652
+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count ))
653
+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count -- ;
654
+ }
655
+
656
+ static void cake_inc_srchost_bulk_flow_count (struct cake_tin_data * q ,
657
+ struct cake_flow * flow ,
658
+ int flow_mode )
659
+ {
660
+ if (likely (cake_dsrc (flow_mode ) &&
661
+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count < CAKE_QUEUES ))
662
+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count ++ ;
663
+ }
664
+
665
+ static void cake_dec_dsthost_bulk_flow_count (struct cake_tin_data * q ,
666
+ struct cake_flow * flow ,
667
+ int flow_mode )
668
+ {
669
+ if (likely (cake_ddst (flow_mode ) &&
670
+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count ))
671
+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count -- ;
672
+ }
673
+
674
+ static void cake_inc_dsthost_bulk_flow_count (struct cake_tin_data * q ,
675
+ struct cake_flow * flow ,
676
+ int flow_mode )
677
+ {
678
+ if (likely (cake_ddst (flow_mode ) &&
679
+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count < CAKE_QUEUES ))
680
+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count ++ ;
681
+ }
682
+
683
+ static u16 cake_get_flow_quantum (struct cake_tin_data * q ,
684
+ struct cake_flow * flow ,
685
+ int flow_mode )
686
+ {
687
+ u16 host_load = 1 ;
688
+
689
+ if (cake_dsrc (flow_mode ))
690
+ host_load = max (host_load ,
691
+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count );
692
+
693
+ if (cake_ddst (flow_mode ))
694
+ host_load = max (host_load ,
695
+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count );
696
+
697
+ /* The get_random_u16() is a way to apply dithering to avoid
698
+ * accumulating roundoff errors
699
+ */
700
+ return (q -> flow_quantum * quantum_div [host_load ] +
701
+ get_random_u16 ()) >> 16 ;
702
+ }
703
+
647
704
static u32 cake_hash (struct cake_tin_data * q , const struct sk_buff * skb ,
648
705
int flow_mode , u16 flow_override , u16 host_override )
649
706
{
@@ -790,10 +847,8 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
790
847
allocate_dst = cake_ddst (flow_mode );
791
848
792
849
if (q -> flows [outer_hash + k ].set == CAKE_SET_BULK ) {
793
- if (allocate_src )
794
- q -> hosts [q -> flows [reduced_hash ].srchost ].srchost_bulk_flow_count -- ;
795
- if (allocate_dst )
796
- q -> hosts [q -> flows [reduced_hash ].dsthost ].dsthost_bulk_flow_count -- ;
850
+ cake_dec_srchost_bulk_flow_count (q , & q -> flows [outer_hash + k ], flow_mode );
851
+ cake_dec_dsthost_bulk_flow_count (q , & q -> flows [outer_hash + k ], flow_mode );
797
852
}
798
853
found :
799
854
/* reserve queue for future packets in same flow */
@@ -818,9 +873,10 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
818
873
q -> hosts [outer_hash + k ].srchost_tag = srchost_hash ;
819
874
found_src :
820
875
srchost_idx = outer_hash + k ;
821
- if (q -> flows [reduced_hash ].set == CAKE_SET_BULK )
822
- q -> hosts [srchost_idx ].srchost_bulk_flow_count ++ ;
823
876
q -> flows [reduced_hash ].srchost = srchost_idx ;
877
+
878
+ if (q -> flows [reduced_hash ].set == CAKE_SET_BULK )
879
+ cake_inc_srchost_bulk_flow_count (q , & q -> flows [reduced_hash ], flow_mode );
824
880
}
825
881
826
882
if (allocate_dst ) {
@@ -841,9 +897,10 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
841
897
q -> hosts [outer_hash + k ].dsthost_tag = dsthost_hash ;
842
898
found_dst :
843
899
dsthost_idx = outer_hash + k ;
844
- if (q -> flows [reduced_hash ].set == CAKE_SET_BULK )
845
- q -> hosts [dsthost_idx ].dsthost_bulk_flow_count ++ ;
846
900
q -> flows [reduced_hash ].dsthost = dsthost_idx ;
901
+
902
+ if (q -> flows [reduced_hash ].set == CAKE_SET_BULK )
903
+ cake_inc_dsthost_bulk_flow_count (q , & q -> flows [reduced_hash ], flow_mode );
847
904
}
848
905
}
849
906
@@ -1856,10 +1913,6 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1856
1913
1857
1914
/* flowchain */
1858
1915
if (!flow -> set || flow -> set == CAKE_SET_DECAYING ) {
1859
- struct cake_host * srchost = & b -> hosts [flow -> srchost ];
1860
- struct cake_host * dsthost = & b -> hosts [flow -> dsthost ];
1861
- u16 host_load = 1 ;
1862
-
1863
1916
if (!flow -> set ) {
1864
1917
list_add_tail (& flow -> flowchain , & b -> new_flows );
1865
1918
} else {
@@ -1869,31 +1922,17 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1869
1922
flow -> set = CAKE_SET_SPARSE ;
1870
1923
b -> sparse_flow_count ++ ;
1871
1924
1872
- if (cake_dsrc (q -> flow_mode ))
1873
- host_load = max (host_load , srchost -> srchost_bulk_flow_count );
1874
-
1875
- if (cake_ddst (q -> flow_mode ))
1876
- host_load = max (host_load , dsthost -> dsthost_bulk_flow_count );
1877
-
1878
- flow -> deficit = (b -> flow_quantum *
1879
- quantum_div [host_load ]) >> 16 ;
1925
+ flow -> deficit = cake_get_flow_quantum (b , flow , q -> flow_mode );
1880
1926
} else if (flow -> set == CAKE_SET_SPARSE_WAIT ) {
1881
- struct cake_host * srchost = & b -> hosts [flow -> srchost ];
1882
- struct cake_host * dsthost = & b -> hosts [flow -> dsthost ];
1883
-
1884
1927
/* this flow was empty, accounted as a sparse flow, but actually
1885
1928
* in the bulk rotation.
1886
1929
*/
1887
1930
flow -> set = CAKE_SET_BULK ;
1888
1931
b -> sparse_flow_count -- ;
1889
1932
b -> bulk_flow_count ++ ;
1890
1933
1891
- if (cake_dsrc (q -> flow_mode ))
1892
- srchost -> srchost_bulk_flow_count ++ ;
1893
-
1894
- if (cake_ddst (q -> flow_mode ))
1895
- dsthost -> dsthost_bulk_flow_count ++ ;
1896
-
1934
+ cake_inc_srchost_bulk_flow_count (b , flow , q -> flow_mode );
1935
+ cake_inc_dsthost_bulk_flow_count (b , flow , q -> flow_mode );
1897
1936
}
1898
1937
1899
1938
if (q -> buffer_used > q -> buffer_max_used )
@@ -1950,13 +1989,11 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
1950
1989
{
1951
1990
struct cake_sched_data * q = qdisc_priv (sch );
1952
1991
struct cake_tin_data * b = & q -> tins [q -> cur_tin ];
1953
- struct cake_host * srchost , * dsthost ;
1954
1992
ktime_t now = ktime_get ();
1955
1993
struct cake_flow * flow ;
1956
1994
struct list_head * head ;
1957
1995
bool first_flow = true;
1958
1996
struct sk_buff * skb ;
1959
- u16 host_load ;
1960
1997
u64 delay ;
1961
1998
u32 len ;
1962
1999
@@ -2056,11 +2093,6 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
2056
2093
q -> cur_flow = flow - b -> flows ;
2057
2094
first_flow = false;
2058
2095
2059
- /* triple isolation (modified DRR++) */
2060
- srchost = & b -> hosts [flow -> srchost ];
2061
- dsthost = & b -> hosts [flow -> dsthost ];
2062
- host_load = 1 ;
2063
-
2064
2096
/* flow isolation (DRR++) */
2065
2097
if (flow -> deficit <= 0 ) {
2066
2098
/* Keep all flows with deficits out of the sparse and decaying
@@ -2072,11 +2104,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
2072
2104
b -> sparse_flow_count -- ;
2073
2105
b -> bulk_flow_count ++ ;
2074
2106
2075
- if (cake_dsrc (q -> flow_mode ))
2076
- srchost -> srchost_bulk_flow_count ++ ;
2077
-
2078
- if (cake_ddst (q -> flow_mode ))
2079
- dsthost -> dsthost_bulk_flow_count ++ ;
2107
+ cake_inc_srchost_bulk_flow_count (b , flow , q -> flow_mode );
2108
+ cake_inc_dsthost_bulk_flow_count (b , flow , q -> flow_mode );
2080
2109
2081
2110
flow -> set = CAKE_SET_BULK ;
2082
2111
} else {
@@ -2088,19 +2117,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
2088
2117
}
2089
2118
}
2090
2119
2091
- if (cake_dsrc (q -> flow_mode ))
2092
- host_load = max (host_load , srchost -> srchost_bulk_flow_count );
2093
-
2094
- if (cake_ddst (q -> flow_mode ))
2095
- host_load = max (host_load , dsthost -> dsthost_bulk_flow_count );
2096
-
2097
- WARN_ON (host_load > CAKE_QUEUES );
2098
-
2099
- /* The get_random_u16() is a way to apply dithering to avoid
2100
- * accumulating roundoff errors
2101
- */
2102
- flow -> deficit += (b -> flow_quantum * quantum_div [host_load ] +
2103
- get_random_u16 ()) >> 16 ;
2120
+ flow -> deficit += cake_get_flow_quantum (b , flow , q -> flow_mode );
2104
2121
list_move_tail (& flow -> flowchain , & b -> old_flows );
2105
2122
2106
2123
goto retry ;
@@ -2124,11 +2141,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
2124
2141
if (flow -> set == CAKE_SET_BULK ) {
2125
2142
b -> bulk_flow_count -- ;
2126
2143
2127
- if (cake_dsrc (q -> flow_mode ))
2128
- srchost -> srchost_bulk_flow_count -- ;
2129
-
2130
- if (cake_ddst (q -> flow_mode ))
2131
- dsthost -> dsthost_bulk_flow_count -- ;
2144
+ cake_dec_srchost_bulk_flow_count (b , flow , q -> flow_mode );
2145
+ cake_dec_dsthost_bulk_flow_count (b , flow , q -> flow_mode );
2132
2146
2133
2147
b -> decaying_flow_count ++ ;
2134
2148
} else if (flow -> set == CAKE_SET_SPARSE ||
@@ -2146,12 +2160,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
2146
2160
else if (flow -> set == CAKE_SET_BULK ) {
2147
2161
b -> bulk_flow_count -- ;
2148
2162
2149
- if (cake_dsrc (q -> flow_mode ))
2150
- srchost -> srchost_bulk_flow_count -- ;
2151
-
2152
- if (cake_ddst (q -> flow_mode ))
2153
- dsthost -> dsthost_bulk_flow_count -- ;
2154
-
2163
+ cake_dec_srchost_bulk_flow_count (b , flow , q -> flow_mode );
2164
+ cake_dec_dsthost_bulk_flow_count (b , flow , q -> flow_mode );
2155
2165
} else
2156
2166
b -> decaying_flow_count -- ;
2157
2167
0 commit comments