fallbackdir: Update list generated on August 30, 2023
[tor.git] / src / core / or / congestion_control_vegas.c
blob97f1dc0a0efbb28f7175c3b179c2ee727a11ada3
1 /* Copyright (c) 2019-2021, The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
4 /**
5 * \file congestion_control_vegas.c
6 * \brief Code that implements the TOR_VEGAS congestion control algorithm
7 * from Proposal #324.
8 */
10 #define TOR_CONGESTION_CONTROL_VEGAS_PRIVATE
12 #include "core/or/or.h"
14 #include "core/or/crypt_path.h"
15 #include "core/or/or_circuit_st.h"
16 #include "core/or/sendme.h"
17 #include "core/or/congestion_control_st.h"
18 #include "core/or/congestion_control_common.h"
19 #include "core/or/congestion_control_vegas.h"
20 #include "core/or/circuitlist.h"
21 #include "core/or/circuituse.h"
22 #include "core/or/origin_circuit_st.h"
23 #include "core/or/channel.h"
24 #include "feature/nodelist/networkstatus.h"
25 #include "feature/control/control_events.h"
26 #include "lib/math/stats.h"
28 #define OUTBUF_CELLS (2*TLS_RECORD_MAX_CELLS)
30 #define SS_CWND_MAX_DFLT (5000)
32 /* sbws circs are two hops, so params are based on 2 outbufs of cells */
33 #define VEGAS_ALPHA_SBWS_DFLT (2*OUTBUF_CELLS-TLS_RECORD_MAX_CELLS)
34 #define VEGAS_BETA_SBWS_DFLT (2*OUTBUF_CELLS+TLS_RECORD_MAX_CELLS)
35 #define VEGAS_GAMMA_SBWS_DFLT (2*OUTBUF_CELLS)
36 #define VEGAS_DELTA_SBWS_DFLT (4*OUTBUF_CELLS)
37 #define VEGAS_SSCAP_SBWS_DFLT (400)
39 /* Exits are three hops, so params are based on 3 outbufs of cells */
40 #define VEGAS_ALPHA_EXIT_DFLT (3*OUTBUF_CELLS)
41 #define VEGAS_BETA_EXIT_DFLT (4*OUTBUF_CELLS)
42 #define VEGAS_GAMMA_EXIT_DFLT (3*OUTBUF_CELLS)
43 #define VEGAS_DELTA_EXIT_DFLT (5*OUTBUF_CELLS)
44 #define VEGAS_SSCAP_EXIT_DFLT (600)
46 /* Onion rends are six hops, so params are based on 6 outbufs of cells */
47 #define VEGAS_ALPHA_ONION_DFLT (3*OUTBUF_CELLS)
48 #define VEGAS_BETA_ONION_DFLT (6*OUTBUF_CELLS)
49 #define VEGAS_GAMMA_ONION_DFLT (4*OUTBUF_CELLS)
50 #define VEGAS_DELTA_ONION_DFLT (7*OUTBUF_CELLS)
51 #define VEGAS_SSCAP_ONION_DFLT (475)
53 /**
54 * Number of sendme_incs between cwnd and inflight for cwnd to be
55 * still considered full */
56 #define VEGAS_CWND_FULL_GAP_DFLT (4)
57 static int cc_vegas_cwnd_full_gap = VEGAS_CWND_FULL_GAP_DFLT;
59 /**
60 * If the cwnd becomes less than this percent full at any point,
61 * we declare it not full immediately.
63 #define VEGAS_CWND_FULL_MINPCT_DFLT (25)
64 static int cc_vegas_cwnd_full_minpct = VEGAS_CWND_FULL_MINPCT_DFLT;
66 /**
67 * Param to decide when to reset the cwnd.
69 #define VEGAS_CWND_FULL_PER_CWND_DFLT (1)
70 static int cc_cwnd_full_per_cwnd = VEGAS_CWND_FULL_PER_CWND_DFLT;
72 /** Moving average of the cc->cwnd from each circuit exiting slowstart. */
73 double cc_stats_vegas_exit_ss_cwnd_ma = 0;
74 double cc_stats_vegas_exit_ss_bdp_ma = 0;
75 double cc_stats_vegas_exit_ss_inc_ma = 0;
76 double cc_stats_vegas_gamma_drop_ma = 0;
77 double cc_stats_vegas_delta_drop_ma = 0;
78 double cc_stats_vegas_ss_csig_blocked_ma = 0;
79 double cc_stats_vegas_csig_blocked_ma = 0;
80 double cc_stats_vegas_csig_alpha_ma = 0;
81 double cc_stats_vegas_csig_beta_ma = 0;
82 double cc_stats_vegas_csig_delta_ma = 0;
84 double cc_stats_vegas_ss_queue_ma = 0;
85 double cc_stats_vegas_queue_ma = 0;
86 double cc_stats_vegas_bdp_ma = 0;
88 /** Stats on how many times we reached "delta" param. */
89 uint64_t cc_stats_vegas_above_delta = 0;
90 /** Stats on how many times we reached "ss_cwnd_max" param. */
91 uint64_t cc_stats_vegas_above_ss_cwnd_max = 0;
92 uint64_t cc_stats_vegas_below_ss_inc_floor = 0;
93 uint64_t cc_stats_vegas_circ_exited_ss = 0;
95 /**
96 * The original TCP Vegas congestion window BDP estimator.
98 static inline uint64_t
99 vegas_bdp(const congestion_control_t *cc)
101 return cc->bdp;
105 * Cache Vegas consensus parameters.
107 void
108 congestion_control_vegas_set_params(congestion_control_t *cc,
109 cc_path_t path)
111 tor_assert(cc->cc_alg == CC_ALG_VEGAS);
112 const char *alpha_str = NULL, *beta_str = NULL, *gamma_str = NULL;
113 const char *delta_str = NULL, *sscap_str = NULL;
114 int alpha, beta, gamma, delta, ss_cwnd_cap;
116 switch (path) {
117 case CC_PATH_SBWS:
118 alpha_str = "cc_vegas_alpha_sbws";
119 beta_str = "cc_vegas_beta_sbws";
120 gamma_str = "cc_vegas_gamma_sbws";
121 delta_str = "cc_vegas_delta_sbws";
122 sscap_str = "cc_sscap_sbws";
123 alpha = VEGAS_ALPHA_SBWS_DFLT;
124 beta = VEGAS_BETA_SBWS_DFLT;
125 gamma = VEGAS_GAMMA_SBWS_DFLT;
126 delta = VEGAS_DELTA_SBWS_DFLT;
127 ss_cwnd_cap = VEGAS_SSCAP_SBWS_DFLT;
128 break;
129 case CC_PATH_EXIT:
130 case CC_PATH_ONION_SOS:
131 alpha_str = "cc_vegas_alpha_exit";
132 beta_str = "cc_vegas_beta_exit";
133 gamma_str = "cc_vegas_gamma_exit";
134 delta_str = "cc_vegas_delta_exit";
135 sscap_str = "cc_sscap_exit";
136 alpha = VEGAS_ALPHA_EXIT_DFLT;
137 beta = VEGAS_BETA_EXIT_DFLT;
138 gamma = VEGAS_GAMMA_EXIT_DFLT;
139 delta = VEGAS_DELTA_EXIT_DFLT;
140 ss_cwnd_cap = VEGAS_SSCAP_EXIT_DFLT;
141 break;
142 case CC_PATH_ONION:
143 case CC_PATH_ONION_VG:
144 alpha_str = "cc_vegas_alpha_onion";
145 beta_str = "cc_vegas_beta_onion";
146 gamma_str = "cc_vegas_gamma_onion";
147 delta_str = "cc_vegas_delta_onion";
148 sscap_str = "cc_sscap_onion";
149 alpha = VEGAS_ALPHA_ONION_DFLT;
150 beta = VEGAS_BETA_ONION_DFLT;
151 gamma = VEGAS_GAMMA_ONION_DFLT;
152 delta = VEGAS_DELTA_ONION_DFLT;
153 ss_cwnd_cap = VEGAS_SSCAP_ONION_DFLT;
154 break;
155 default:
156 tor_assert(0);
157 break;
160 cc->vegas_params.ss_cwnd_cap =
161 networkstatus_get_param(NULL, sscap_str,
162 ss_cwnd_cap,
163 100,
164 INT32_MAX);
166 cc->vegas_params.ss_cwnd_max =
167 networkstatus_get_param(NULL, "cc_ss_max",
168 SS_CWND_MAX_DFLT,
169 500,
170 INT32_MAX);
172 cc->vegas_params.alpha =
173 networkstatus_get_param(NULL, alpha_str,
174 alpha,
176 1000);
178 cc->vegas_params.beta =
179 networkstatus_get_param(NULL, beta_str,
180 beta,
182 1000);
184 cc->vegas_params.gamma =
185 networkstatus_get_param(NULL, gamma_str,
186 gamma,
188 1000);
190 cc->vegas_params.delta =
191 networkstatus_get_param(NULL, delta_str,
192 delta,
194 INT32_MAX);
196 cc_vegas_cwnd_full_minpct =
197 networkstatus_get_param(NULL, "cc_cwnd_full_minpct",
198 VEGAS_CWND_FULL_MINPCT_DFLT,
200 100);
202 cc_vegas_cwnd_full_gap =
203 networkstatus_get_param(NULL, "cc_cwnd_full_gap",
204 VEGAS_CWND_FULL_GAP_DFLT,
206 INT16_MAX);
208 cc_cwnd_full_per_cwnd =
209 networkstatus_get_param(NULL, "cc_cwnd_full_per_cwnd",
210 VEGAS_CWND_FULL_PER_CWND_DFLT,
216 * Common log function for tracking all vegas state.
218 static void
219 congestion_control_vegas_log(const circuit_t *circ,
220 const congestion_control_t *cc)
222 uint64_t queue_use = cc->cwnd - vegas_bdp(cc);
224 if (CIRCUIT_IS_ORIGIN(circ) &&
225 circ->purpose == CIRCUIT_PURPOSE_S_REND_JOINED) {
226 log_info(LD_CIRC,
227 "CC: TOR_VEGAS Onion Circuit %d "
228 "RTT: %"PRIu64", %"PRIu64", %"PRIu64", "
229 "CWND: %"PRIu64", "
230 "INFL: %"PRIu64", "
231 "VBDP: %"PRIu64", "
232 "QUSE: %"PRIu64", "
233 "BWE: %"PRIu64", "
234 "SS: %d",
235 CONST_TO_ORIGIN_CIRCUIT(circ)->global_identifier,
236 cc->min_rtt_usec/1000,
237 cc->ewma_rtt_usec/1000,
238 cc->max_rtt_usec/1000,
239 cc->cwnd,
240 cc->inflight,
241 vegas_bdp(cc),
242 queue_use,
243 cc->cwnd*CELL_MAX_NETWORK_SIZE*1000/
244 MAX(cc->min_rtt_usec,cc->ewma_rtt_usec),
245 cc->in_slow_start
247 } else {
248 log_info(LD_CIRC,
249 "CC: TOR_VEGAS "
250 "RTT: %"PRIu64", %"PRIu64", %"PRIu64", "
251 "CWND: %"PRIu64", "
252 "INFL: %"PRIu64", "
253 "VBDP: %"PRIu64", "
254 "QUSE: %"PRIu64", "
255 "BWE: %"PRIu64", "
256 "SS: %d",
257 cc->min_rtt_usec/1000,
258 cc->ewma_rtt_usec/1000,
259 cc->max_rtt_usec/1000,
260 cc->cwnd,
261 cc->inflight,
262 vegas_bdp(cc),
263 queue_use,
264 cc->cwnd*CELL_MAX_NETWORK_SIZE*1000/
265 MAX(cc->min_rtt_usec,cc->ewma_rtt_usec),
266 cc->in_slow_start
272 * Implements RFC3742: Limited Slow Start.
273 * https://datatracker.ietf.org/doc/html/rfc3742#section-2
275 static inline uint64_t
276 rfc3742_ss_inc(const congestion_control_t *cc)
278 if (cc->cwnd <= cc->vegas_params.ss_cwnd_cap) {
279 /* If less than the cap, round and always grow by at least 1 sendme_inc. */
280 return ((uint64_t)cc->cwnd_inc_pct_ss*cc->sendme_inc + 50)/100;
281 } else {
282 // K = int(cwnd/(0.5 max_ssthresh));
283 // => K = 2*cwnd/max_ssthresh
284 // cwnd += int(MSS/K);
285 // => cwnd += MSS*max_ssthresh/(2*cwnd)
286 // Return at least 1 for inc.
287 return MAX(
288 ((uint64_t)cc->sendme_inc*cc->vegas_params.ss_cwnd_cap + cc->cwnd)/
289 (2*cc->cwnd),
295 * Exit Vegas slow start.
297 * This function sets our slow-start state to 0, and emits logs
298 * and control port information signifying end of slow start.
299 * It also schedules the next CWND update for steady-state.
301 static void
302 congestion_control_vegas_exit_slow_start(const circuit_t *circ,
303 congestion_control_t *cc)
305 congestion_control_vegas_log(circ, cc);
306 cc->in_slow_start = 0;
307 congestion_control_vegas_log(circ, cc);
309 /* Update metricsport metrics */
310 cc_stats_vegas_exit_ss_cwnd_ma =
311 stats_update_running_avg(cc_stats_vegas_exit_ss_cwnd_ma,
312 cc->cwnd);
313 cc_stats_vegas_exit_ss_bdp_ma =
314 stats_update_running_avg(cc_stats_vegas_exit_ss_bdp_ma,
315 vegas_bdp(cc));
316 cc_stats_vegas_exit_ss_inc_ma =
317 stats_update_running_avg(cc_stats_vegas_exit_ss_inc_ma,
318 rfc3742_ss_inc(cc));
319 cc_stats_vegas_circ_exited_ss++;
321 /* We need to report that slow start has exited ASAP,
322 * for sbws bandwidth measurement. */
323 if (CIRCUIT_IS_ORIGIN(circ)) {
324 /* We must discard const here because the event modifies fields :/ */
325 control_event_circ_bandwidth_used_for_circ(
326 TO_ORIGIN_CIRCUIT((circuit_t*)circ));
331 * Returns true if the congestion window is considered full.
333 * We allow a number of sendme_incs gap in case buffering issues
334 * with edge conns cause the window to occasionally be not quite
335 * full. This can happen if several SENDMEs arrive before we
336 * return to the eventloop to fill the inbuf on edge connections.
338 static inline bool
339 cwnd_became_full(const congestion_control_t *cc)
341 if (cc->inflight + cc_vegas_cwnd_full_gap*cc->sendme_inc >= cc->cwnd) {
342 return true;
343 } else {
344 return false;
349 * Returns true if the congestion window is no longer full.
351 * This functions as a low watermark, below which we stop
352 * allowing cwnd increments.
354 static inline bool
355 cwnd_became_nonfull(const congestion_control_t *cc)
357 /* Use multiply form to avoid division */
358 if (100*cc->inflight < cc_vegas_cwnd_full_minpct * cc->cwnd) {
359 return true;
360 } else {
361 return false;
366 * Decide if it is time to reset the cwnd_full status.
368 * If cc_cwnd_full_per_cwnd=1, we reset cwnd_full once per congestion
369 * window, ie:
370 * next_cwnd_event == SENDME_PER_CWND(cc)
372 * Otherwise, we reset cwnd_full whenever there is an update of
373 * the congestion window, ie:
374 * next_cc_event == CWND_UPDATE_RATE(cc)
376 static inline bool
377 cwnd_full_reset(const congestion_control_t *cc)
379 if (cc_cwnd_full_per_cwnd) {
380 return (cc->next_cwnd_event == SENDME_PER_CWND(cc));
381 } else {
382 return (cc->next_cc_event == CWND_UPDATE_RATE(cc));
387 * Process a SENDME and update the congestion window according to the
388 * rules specified in TOR_VEGAS of Proposal #324.
390 * Essentially, this algorithm attempts to measure queue lengths on
391 * the circuit by subtracting the bandwidth-delay-product estimate
392 * from the current congestion window.
394 * If the congestion window is larger than the bandwidth-delay-product,
395 * then data is assumed to be queuing. We reduce the congestion window
396 * in that case.
398 * If the congestion window is smaller than the bandwidth-delay-product,
399 * then there is spare bandwidth capacity on the circuit. We increase the
400 * congestion window in that case.
402 * The congestion window is updated only once every congestion window worth of
403 * packets, even if the signal persists. It is also updated whenever the
404 * upstream orcon blocks, or unblocks. This minimizes local client queues.
407 congestion_control_vegas_process_sendme(congestion_control_t *cc,
408 const circuit_t *circ)
410 uint64_t queue_use;
412 tor_assert(cc && cc->cc_alg == CC_ALG_VEGAS);
413 tor_assert(circ);
415 /* Update ack counter until next congestion signal event is allowed */
416 if (cc->next_cc_event)
417 cc->next_cc_event--;
419 /* Update ack counter until a full cwnd is processed */
420 if (cc->next_cwnd_event)
421 cc->next_cwnd_event--;
423 /* Compute BDP and RTT. If we did not update, don't run the alg */
424 if (!congestion_control_update_circuit_estimates(cc, circ)) {
425 cc->inflight = cc->inflight - cc->sendme_inc;
426 return 0;
429 /* The queue use is the amount in which our cwnd is above BDP;
430 * if it is below, then 0 queue use. */
431 if (vegas_bdp(cc) > cc->cwnd)
432 queue_use = 0; // This should not happen anymore..
433 else
434 queue_use = cc->cwnd - vegas_bdp(cc);
436 /* Update the full state */
437 if (cwnd_became_full(cc))
438 cc->cwnd_full = 1;
439 else if (cwnd_became_nonfull(cc))
440 cc->cwnd_full = 0;
442 if (cc->in_slow_start) {
443 if (queue_use < cc->vegas_params.gamma && !cc->blocked_chan) {
444 /* If the congestion window is not fully in use, skip any
445 * increment of cwnd in slow start */
446 if (cc->cwnd_full) {
447 /* Get the "Limited Slow Start" increment */
448 uint64_t inc = rfc3742_ss_inc(cc);
449 cc->cwnd += inc;
451 // Check if inc is less than what we would do in steady-state
452 // avoidance. Note that this is likely never to happen
453 // in practice, but we keep this block and the metrics to make
454 // sure.
455 if (inc*SENDME_PER_CWND(cc) <= CWND_INC(cc)*cc->cwnd_inc_rate) {
456 congestion_control_vegas_exit_slow_start(circ, cc);
458 cc_stats_vegas_below_ss_inc_floor++;
460 /* We exited slow start without being blocked */
461 cc_stats_vegas_ss_csig_blocked_ma =
462 stats_update_running_avg(cc_stats_vegas_ss_csig_blocked_ma,
466 } else {
467 uint64_t old_cwnd = cc->cwnd;
469 /* Congestion signal: Set cwnd to gamma threshold */
470 cc->cwnd = vegas_bdp(cc) + cc->vegas_params.gamma;
472 /* Compute the percentage we experience a blocked csig vs RTT sig */
473 if (cc->blocked_chan) {
474 cc_stats_vegas_ss_csig_blocked_ma =
475 stats_update_running_avg(cc_stats_vegas_ss_csig_blocked_ma,
476 100);
477 } else {
478 uint64_t cwnd_diff = (old_cwnd > cc->cwnd ? old_cwnd - cc->cwnd : 0);
480 cc_stats_vegas_ss_csig_blocked_ma =
481 stats_update_running_avg(cc_stats_vegas_ss_csig_blocked_ma,
484 /* Account the amount we reduced the cwnd by for the gamma cutoff */
485 cc_stats_vegas_gamma_drop_ma =
486 stats_update_running_avg(cc_stats_vegas_gamma_drop_ma,
487 cwnd_diff);
490 congestion_control_vegas_exit_slow_start(circ, cc);
493 if (cc->cwnd >= cc->vegas_params.ss_cwnd_max) {
494 cc->cwnd = cc->vegas_params.ss_cwnd_max;
495 congestion_control_vegas_exit_slow_start(circ, cc);
496 cc_stats_vegas_above_ss_cwnd_max++;
499 cc_stats_vegas_ss_queue_ma =
500 stats_update_running_avg(cc_stats_vegas_ss_queue_ma,
501 queue_use);
502 /* After slow start, We only update once per window */
503 } else if (cc->next_cc_event == 0) {
504 if (queue_use > cc->vegas_params.delta) {
505 uint64_t old_cwnd = cc->cwnd;
506 uint64_t cwnd_diff;
508 /* If we are above the delta threshold, drop cwnd down to the
509 * delta threshold. */
510 cc->cwnd = vegas_bdp(cc) + cc->vegas_params.delta - CWND_INC(cc);
512 /* Account the amount we reduced the cwnd by for the gamma cutoff */
513 cwnd_diff = (old_cwnd > cc->cwnd ? old_cwnd - cc->cwnd : 0);
514 cc_stats_vegas_delta_drop_ma =
515 stats_update_running_avg(cc_stats_vegas_delta_drop_ma,
516 cwnd_diff);
518 cc_stats_vegas_above_delta++;
520 /* Percentage metrics: Add 100% delta, 0 for other two */
521 cc_stats_vegas_csig_alpha_ma =
522 stats_update_running_avg(cc_stats_vegas_csig_alpha_ma,
524 cc_stats_vegas_csig_beta_ma =
525 stats_update_running_avg(cc_stats_vegas_csig_beta_ma,
527 cc_stats_vegas_csig_delta_ma =
528 stats_update_running_avg(cc_stats_vegas_csig_delta_ma,
529 100);
530 } else if (queue_use > cc->vegas_params.beta || cc->blocked_chan) {
531 cc->cwnd -= CWND_INC(cc);
533 /* Compute the percentage we experience a blocked csig vs RTT sig */
534 if (cc->blocked_chan) {
535 cc_stats_vegas_csig_blocked_ma =
536 stats_update_running_avg(cc_stats_vegas_csig_blocked_ma,
537 100);
538 } else {
539 cc_stats_vegas_csig_blocked_ma =
540 stats_update_running_avg(cc_stats_vegas_csig_blocked_ma,
544 /* Percentage counters: Add 100% beta, 0 for other two */
545 cc_stats_vegas_csig_alpha_ma =
546 stats_update_running_avg(cc_stats_vegas_csig_alpha_ma,
548 cc_stats_vegas_csig_beta_ma =
549 stats_update_running_avg(cc_stats_vegas_csig_beta_ma,
550 100);
551 cc_stats_vegas_csig_delta_ma =
552 stats_update_running_avg(cc_stats_vegas_csig_delta_ma,
554 } else if (cc->cwnd_full &&
555 queue_use < cc->vegas_params.alpha) {
556 cc->cwnd += CWND_INC(cc);
558 /* Percentage counters: Add 100% alpha, 0 for other two */
559 cc_stats_vegas_csig_alpha_ma =
560 stats_update_running_avg(cc_stats_vegas_csig_alpha_ma,
561 100);
562 cc_stats_vegas_csig_beta_ma =
563 stats_update_running_avg(cc_stats_vegas_csig_beta_ma,
565 cc_stats_vegas_csig_delta_ma =
566 stats_update_running_avg(cc_stats_vegas_csig_delta_ma,
568 } else {
569 /* Percentage counters: No signal this round. Add 0% to all */
570 cc_stats_vegas_csig_alpha_ma =
571 stats_update_running_avg(cc_stats_vegas_csig_alpha_ma,
573 cc_stats_vegas_csig_beta_ma =
574 stats_update_running_avg(cc_stats_vegas_csig_beta_ma,
576 cc_stats_vegas_csig_delta_ma =
577 stats_update_running_avg(cc_stats_vegas_csig_delta_ma,
581 /* cwnd can never fall below 1 increment */
582 cc->cwnd = MAX(cc->cwnd, cc->cwnd_min);
584 congestion_control_vegas_log(circ, cc);
586 /* Update metrics */
587 cc_stats_vegas_queue_ma =
588 stats_update_running_avg(cc_stats_vegas_queue_ma,
589 queue_use);
590 cc_stats_vegas_bdp_ma =
591 stats_update_running_avg(cc_stats_vegas_bdp_ma,
592 vegas_bdp(cc));
594 /* Log if we're above the ss_cap */
595 if (cc->cwnd >= cc->vegas_params.ss_cwnd_max) {
596 log_info(LD_CIRC,
597 "CC: TOR_VEGAS above ss_max in steady state for circ %d: %"PRIu64,
598 circ->purpose, cc->cwnd);
602 /* Reset event counters */
603 if (cc->next_cwnd_event == 0) {
604 cc->next_cwnd_event = SENDME_PER_CWND(cc);
606 if (cc->next_cc_event == 0) {
607 cc->next_cc_event = CWND_UPDATE_RATE(cc);
610 /* Decide if enough time has passed to reset the cwnd utilization */
611 if (cwnd_full_reset(cc))
612 cc->cwnd_full = 0;
614 /* Update inflight with ack */
615 cc->inflight = cc->inflight - cc->sendme_inc;
617 return 0;