Tor 0.4.9.0-alpha-dev
congestion_control_st.h
Go to the documentation of this file.
1/* Copyright (c) 2019-2021, The Tor Project, Inc. */
2/* See LICENSE for licensing information */
3
4/**
5 * \file congestion_control_st.h
6 * \brief Structure definitions for congestion control.
7 **/
8
9#ifndef CONGESTION_CONTROL_ST_H
10#define CONGESTION_CONTROL_ST_H
11
13#include "core/or/circuit_st.h"
14
15/** Signifies which sendme algorithm to use */
16typedef enum {
17 /** OG Tor fixed-sized circ and stream windows. It sucks, but it is important
18 * to make sure that the new algs can compete with the old garbage. */
20
21 /**
22 * Prop#324 TOR_WESTWOOD - Deliberately aggressive. Westwood may not even
23 * converge to fairness in some cases because max RTT will also increase
24 * on congestion, which boosts the Westwood RTT congestion threshold. So it
25 * can cause runaway queue bloat, which may or may not lead to a robot
26 * uprising... Ok that's Westworld, not Westwood. Still, we need to test
27 * Vegas and NOLA against something more aggressive to ensure they do not
28 * starve in the presence of cheaters. We also need to make sure cheaters
29 * trigger the oomkiller in those cases.
30 */
32
33 /**
34 * Prop#324 TOR_VEGAS - TCP Vegas-style BDP tracker. Because Vegas backs off
35 * whenever it detects queue delay, it can be beaten out by more aggressive
36 * algs. However, in live network testing, it seems to do just fine against
37 * current SENDMEs. It outperforms Westwood and does not stall. */
39
40 /**
41 * Prop#324: TOR_NOLA - NOLA looks the BDP right in the eye and uses it
42 * immediately as CWND. No slow start, no other congestion signals, no delay,
43 * no bullshit. Like TOR_VEGAS, it also uses aggressive BDP estimates, to
44 * avoid out-competition. It seems a bit better throughput than Vegas, but
45 * its aggressive BDP and rapid updates may lead to more queue latency. */
47} cc_alg_t;
48
49/* Total number of CC algs in cc_alg_t enum */
50#define NUM_CC_ALGS (CC_ALG_NOLA+1)
51
52/** Signifies how we estimate circuit BDP */
53typedef enum {
54 /* CWND-based BDP will respond to changes in RTT only, and is relative
55 * to cwnd growth. So in slow-start, this will under-estimate BDP */
56 BDP_ALG_CWND_RTT = 0,
57
58 /* Sendme-based BDP will quickly measure BDP in less than
59 * a cwnd worth of data when in use. So it should be good for slow-start.
60 * But if the link goes idle, it will be vastly lower than true BDP. Thus,
61 * this estimate gets reset when the cwnd is not fully utilized. */
62 BDP_ALG_SENDME_RATE = 1,
63
64 /* Inflight BDP is similar to the cwnd estimator, except it uses
65 * packets inflight minus local circuit queues instead of current cwnd.
66 * Because it is strictly less than or equal to the cwnd, it will cause
67 * the cwnd to drift downward. It is only used if the local OR connection
68 * is blocked. */
69 BDP_ALG_INFLIGHT_RTT = 2,
70
71 /* The Piecewise BDP estimator uses the CWND estimator before there
72 * are sufficient SENDMEs to calculate the SENDME estimator. At that
73 * point, it uses the SENDME estimator, unless the local OR connection
74 * becomes blocked. In that case, it switches to the inflight estimator. */
75 BDP_ALG_PIECEWISE = 3,
76
77} bdp_alg_t;
78
79/** Total number of BDP algs in bdp_alg_t enum */
80#define NUM_BDP_ALGS (BDP_ALG_PIECEWISE+1)
81
82/** Vegas algorithm parameters. */
84 /** The slow-start cwnd cap for RFC3742 */
85 uint32_t ss_cwnd_cap;
86 /** The maximum slow-start cwnd */
87 uint32_t ss_cwnd_max;
88 /** The queue use allowed before we exit slow start */
89 uint16_t gamma;
90 /** The queue use below which we increment cwnd */
91 uint16_t alpha;
92 /** The queue use above which we decrement cwnd */
93 uint16_t beta;
94 /** The queue use at which we cap cwnd in steady state */
95 uint16_t delta;
96 /** Weighted average (percent) between cwnd estimator and
97 * piecewise estimator. */
98 uint8_t bdp_mix_pct;
99};
100
101/** Fields common to all congestion control algorithms */
103 /**
104 * Smartlist of uint64_t monotime usec timestamps of when we sent a data
105 * cell that is pending a sendme. FIFO queue that is managed similar to
106 * sendme_last_digests. */
108
109 /** RTT time data for congestion control. */
111 uint64_t min_rtt_usec;
112 uint64_t max_rtt_usec;
113
114 /* Vegas BDP estimate */
115 uint64_t bdp;
116
117 /** Congestion window */
118 uint64_t cwnd;
119
120 /** Number of cells in-flight (sent but awaiting SENDME ack). */
121 uint64_t inflight;
122
123 /**
124 * For steady-state: the number of sendme acks until we will acknowledge
125 * a congestion event again. It starts out as the number of sendme acks
126 * in a congestion window and is decremented each ack. When this reaches
127 * 0, it means we should examine our congestion algorithm conditions.
128 * In this way, we only react to one congestion event per congestion window.
129 *
130 * It is also reset to 0 immediately whenever the circuit's orconn is
131 * blocked, and when a previously blocked orconn is unblocked.
132 */
134
135 /** Counts down until we process a cwnd worth of SENDME acks.
136 * Used to track full cwnd status. */
138
139 /** Are we in slow start? */
141
142 /** Has the cwnd become full since last cwnd update? */
144
145 /** Is the local channel blocked on us? That's a congestion signal */
147
148 /* The following parameters are cached from consensus values upon
149 * circuit setup. */
150
151 /** Percent of cwnd to increment by during slow start */
153
154 /** Number of cells to increment cwnd by during steady state */
155 uint16_t cwnd_inc;
156
157 /** Minimum congestion window (must be at least sendme_inc) */
158 uint16_t cwnd_min;
159
160 /**
161 * Number of times per congestion window to update based on congestion
162 * signals */
164
165 /**
166 * Number of cells to ack with every sendme. Taken from consensus parameter
167 * and negotiation during circuit setup. */
168 uint8_t sendme_inc;
169
170 /** Which congestion control algorithm to use. Taken from
171 * consensus parameter and negotiation during circuit setup. */
173
174 /** Which algorithm to estimate circuit bandwidth with. Taken from
175 * consensus parameter during circuit setup. */
177
178 /** Vegas-specific parameters. These should not be accessed anywhere
179 * other than the congestion_control_vegas.c file. */
181};
182
183/**
184 * Returns the number of sendme acks we will receive before we update cwnd.
185 *
186 * Congestion control literature recommends only one update of cwnd per
187 * cwnd worth of acks. However, we can also tune this to be more frequent
188 * by increasing the 'cc_cwnd_inc_rate' consensus parameter. This tuning
189 * only applies after slow start.
190 *
191 * If this returns 0 due to high cwnd_inc_rate, the calling code will
192 * update every sendme ack.
193 */
194static inline uint64_t CWND_UPDATE_RATE(const struct congestion_control_t *cc)
195{
196 /* We add cwnd_inc_rate*sendme_inc/2 to round to nearest integer number
197 * of acks */
198
199 if (cc->in_slow_start) {
200 return 1;
201 } else {
202 return ((cc->cwnd + cc->cwnd_inc_rate*cc->sendme_inc/2)
203 / (cc->cwnd_inc_rate*cc->sendme_inc));
204 }
205}
206
207/**
208 * Gives us the number of SENDMEs in a CWND, rounded.
209 */
210static inline uint64_t SENDME_PER_CWND(const struct congestion_control_t *cc)
211{
212 /* We add cwnd_inc_rate*sendme_inc/2 to round to nearest integer number
213 * of acks */
214 return ((cc->cwnd + cc->sendme_inc/2)/cc->sendme_inc);
215}
216
217/**
218 * Returns the amount to increment the congestion window each update,
219 * during slow start.
220 *
221 * Congestion control literature recommends either doubling the cwnd
222 * every cwnd during slow start, or some similar exponential growth
223 * (such as 50% more every cwnd, for Vegas).
224 *
225 * This is controlled by a consensus parameter 'cwnd_inc_pct_ss', which
226 * allows us to specify the percent of the current consensus window
227 * to update by.
228 */
229static inline uint64_t CWND_INC_SS(const struct congestion_control_t *cc)
230{
231 return (cc->cwnd_inc_pct_ss*cc->cwnd/100);
232}
233
234/**
235 * Returns the amount to increment (and for Vegas, also decrement) the
236 * congestion window by, every update period.
237 *
238 * This is controlled by the cc_cwnd_inc consensus parameter.
239 */
240#define CWND_INC(cc) ((cc)->cwnd_inc)
241
242#endif /* !defined(CONGESTION_CONTROL_ST_H) */
Base circuit structure.
static uint64_t SENDME_PER_CWND(const struct congestion_control_t *cc)
static uint64_t CWND_UPDATE_RATE(const struct congestion_control_t *cc)
@ CC_ALG_VEGAS
@ CC_ALG_NOLA
@ CC_ALG_SENDME
@ CC_ALG_WESTWOOD
static uint64_t CWND_INC_SS(const struct congestion_control_t *cc)
Path structures for origin circuits.
struct vegas_params_t vegas_params
smartlist_t * sendme_pending_timestamps