Add "Heartbeat" to the start of several heartbeat messages.
[tor.git] / src / core / or / circuitpadding.c
blob6dfe94de016d55a9ede895e78db3b9c855ae5ea9
1 /* Copyright (c) 2017 The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
4 /**
5 * \file circuitpadding.c
6 * \brief Circuit-level padding implementation
8 * \details
10 * This file implements Tor proposal 254 "Padding Negotiation" which is heavily
11 * inspired by the paper "Toward an Efficient Website Fingerprinting Defense"
12 * by M. Juarez, M. Imani, M. Perry, C. Diaz, M. Wright.
14 * In particular the code in this file describes mechanisms for clients to
15 * negotiate various types of circuit-level padding from relays.
17 * Each padding type is described by a state machine (circpad_machine_spec_t),
18 * which is also referred as a "padding machine" in this file. Currently,
19 * these state machines are hardcoded in the source code (e.g. see
20 * circpad_machines_init()), but in the future we will be able to
21 * serialize them in the torrc or the consensus.
23 * As specified by prop#254, clients can negotiate padding with relays by using
24 * PADDING_NEGOTIATE cells. After successful padding negotiation, padding
25 * machines are assigned to the circuit in their mutable form as a
26 * circpad_machine_runtime_t.
28 * Each state of a padding state machine can be either:
29 * - A histogram that specifies inter-arrival padding delays.
30 * - Or a parametrized probability distribution that specifies inter-arrival
31 * delays (see circpad_distribution_type_t).
33 * Padding machines start from the START state and finish with the END
34 * state. They can transition between states using the events in
35 * circpad_event_t.
37 * When a padding machine reaches the END state, it gets wiped from the circuit
38 * so that other padding machines can take over if needed (see
39 * circpad_machine_spec_transitioned_to_end()).
41 ****************************
42 * General notes:
44 * All used machines should be heap allocated and placed into
45 * origin_padding_machines/relay_padding_machines so that they get correctly
46 * cleaned up by the circpad_free_all() function.
47 **/
49 #define CIRCUITPADDING_PRIVATE
51 #include <math.h>
52 #include "lib/math/fp.h"
53 #include "lib/math/prob_distr.h"
54 #include "core/or/or.h"
55 #include "core/or/circuitpadding.h"
56 #include "core/or/circuitpadding_machines.h"
57 #include "core/or/circuitlist.h"
58 #include "core/or/circuituse.h"
59 #include "core/mainloop/netstatus.h"
60 #include "core/or/relay.h"
61 #include "feature/stats/rephist.h"
62 #include "feature/nodelist/networkstatus.h"
64 #include "core/or/channel.h"
66 #include "lib/time/compat_time.h"
67 #include "lib/defs/time.h"
68 #include "lib/crypt_ops/crypto_rand.h"
70 #include "core/or/crypt_path_st.h"
71 #include "core/or/circuit_st.h"
72 #include "core/or/origin_circuit_st.h"
73 #include "core/or/or_circuit_st.h"
74 #include "feature/nodelist/routerstatus_st.h"
75 #include "feature/nodelist/node_st.h"
76 #include "core/or/cell_st.h"
77 #include "core/or/extend_info_st.h"
78 #include "core/crypto/relay_crypto.h"
79 #include "feature/nodelist/nodelist.h"
81 #include "app/config/config.h"
83 static inline circpad_circuit_state_t circpad_circuit_state(
84 origin_circuit_t *circ);
85 static void circpad_setup_machine_on_circ(circuit_t *on_circ,
86 const circpad_machine_spec_t *machine);
87 static double circpad_distribution_sample(circpad_distribution_t dist);
89 static inline void circpad_machine_update_state_length_for_nonpadding(
90 circpad_machine_runtime_t *mi);
92 /** Cached consensus params */
93 static uint8_t circpad_padding_disabled;
94 static uint8_t circpad_padding_reduced;
95 static uint8_t circpad_global_max_padding_percent;
96 static uint16_t circpad_global_allowed_cells;
97 static uint16_t circpad_max_circ_queued_cells;
99 /** Global cell counts, for rate limiting */
100 static uint64_t circpad_global_padding_sent;
101 static uint64_t circpad_global_nonpadding_sent;
103 /** This is the list of circpad_machine_spec_t's parsed from consensus and
104 * torrc that have origin_side == 1 (ie: are for client side).
106 * The machines in this smartlist are considered immutable and they are used
107 * as-is by circuits so they should not change or get deallocated in Tor's
108 * runtime and as long as circuits are alive. */
109 STATIC smartlist_t *origin_padding_machines = NULL;
111 /** This is the list of circpad_machine_spec_t's parsed from consensus and
112 * torrc that have origin_side == 0 (ie: are for relay side).
114 * The machines in this smartlist are considered immutable and they are used
115 * as-is by circuits so they should not change or get deallocated in Tor's
116 * runtime and as long as circuits are alive. */
117 STATIC smartlist_t *relay_padding_machines = NULL;
119 #ifndef COCCI
120 /** Loop over the current padding state machines using <b>loop_var</b> as the
121 * loop variable. */
122 #define FOR_EACH_CIRCUIT_MACHINE_BEGIN(loop_var) \
123 STMT_BEGIN \
124 for (int loop_var = 0; loop_var < CIRCPAD_MAX_MACHINES; loop_var++) {
125 #define FOR_EACH_CIRCUIT_MACHINE_END } STMT_END ;
127 /** Loop over the current active padding state machines using <b>loop_var</b>
128 * as the loop variable. If a machine is not active, skip it. */
129 #define FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(loop_var, circ) \
130 FOR_EACH_CIRCUIT_MACHINE_BEGIN(loop_var) \
131 if (!(circ)->padding_info[loop_var]) \
132 continue;
133 #define FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END } STMT_END ;
134 #endif /* !defined(COCCI) */
137 * Free the machineinfo at an index
139 static void
140 circpad_circuit_machineinfo_free_idx(circuit_t *circ, int idx)
142 if (circ->padding_info[idx]) {
143 log_fn(LOG_INFO,LD_CIRC, "Freeing padding info idx %d on circuit %u (%d)",
144 idx, CIRCUIT_IS_ORIGIN(circ) ?
145 TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0,
146 circ->purpose);
148 tor_free(circ->padding_info[idx]->histogram);
149 timer_free(circ->padding_info[idx]->padding_timer);
150 tor_free(circ->padding_info[idx]);
155 * Return true if circpad has decided to hold the circuit open for additional
156 * padding. This function is used to take and retain ownership of certain
157 * types of circuits that have padding machines on them, that have been passed
158 * to circuit_mark_for_close().
160 * circuit_mark_for_close() calls this function to ask circpad if any padding
161 * machines want to keep the circuit open longer to pad.
163 * Any non-measurement circuit that was closed for a normal, non-error reason
164 * code may be held open for up to CIRCPAD_DELAY_INFINITE microseconds between
165 * network-driven cell events.
167 * After CIRCPAD_DELAY_INFINITE microseconds of silence on a circuit, this
168 * function will no longer hold it open (it will return 0 regardless of
169 * what the machines ask for, and thus circuit_expire_old_circuits_clientside()
170 * will close the circuit after roughly 1.25hr of idle time, maximum,
171 * regardless of the padding machine state.
174 circpad_marked_circuit_for_padding(circuit_t *circ, int reason)
176 /* If the circuit purpose is measurement or path bias, don't
177 * hold it open */
178 if (circ->purpose == CIRCUIT_PURPOSE_PATH_BIAS_TESTING ||
179 circ->purpose == CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT) {
180 return 0;
183 /* If the circuit is closed for any reason other than these three valid,
184 * client-side close reasons, do not try to keep it open. It is probably
185 * damaged or unusable. Note this is OK with vanguards because
186 * controller-closed circuits have REASON=REQUESTED, so vanguards-closed
187 * circuits will not be held open (we want them to close ASAP). */
188 if (!(reason == END_CIRC_REASON_NONE ||
189 reason == END_CIRC_REASON_FINISHED ||
190 reason == END_CIRC_REASON_IP_NOW_REDUNDANT)) {
191 return 0;
194 FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i, circ) {
195 circpad_machine_runtime_t *mi = circ->padding_info[i];
196 if (!mi) {
197 continue; // No padding runtime info; check next machine
200 const circpad_state_t *state = circpad_machine_current_state(mi);
202 /* If we're in END state (NULL here), then check next machine */
203 if (!state) {
204 continue; // check next machine
207 /* If the machine does not want to control the circuit close itself, then
208 * check the next machine */
209 if (!circ->padding_machine[i]->manage_circ_lifetime) {
210 continue; // check next machine
213 /* If the machine has reached the END state, we can close. Check next
214 * machine. */
215 if (mi->current_state == CIRCPAD_STATE_END) {
216 continue; // check next machine
219 log_info(LD_CIRC, "Circuit %d is not marked for close because of a "
220 "pending padding machine in index %d.",
221 CIRCUIT_IS_ORIGIN(circ) ?
222 TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0, i);
224 /* If the machine has had no network events at all within the
225 * last circpad_delay_t timespan, it's in some deadlock state.
226 * Tell circuit_mark_for_close() that we don't own it anymore.
227 * This will allow circuit_expire_old_circuits_clientside() to
228 * close it.
230 if (circ->padding_info[i]->last_cell_time_sec +
231 (time_t)CIRCPAD_DELAY_MAX_SECS < approx_time()) {
232 log_notice(LD_BUG, "Circuit %d was not marked for close because of a "
233 "pending padding machine in index %d for over an hour. "
234 "Circuit is a %s",
235 CIRCUIT_IS_ORIGIN(circ) ?
236 TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0,
237 i, circuit_purpose_to_string(circ->purpose));
239 return 0; // abort timer reached; mark the circuit for close now
242 /* If we weren't marked dirty yet, let's pretend we're dirty now.
243 * ("Dirty" means that a circuit has been used for application traffic
244 * by Tor.. Dirty circuits have different expiry times, and are not
245 * considered in counts of built circuits, etc. By claiming that we're
246 * dirty, the rest of Tor will make decisions as if we were actually
247 * used by application data.
249 * This is most important for circuit_expire_old_circuits_clientside(),
250 * where we want that function to expire us after the padding machine
251 * has shut down, but using the MaxCircuitDirtiness timer instead of
252 * the idle circuit timer (again, we want this because we're not
253 * supposed to look idle to Guard nodes that can see our lifespan). */
254 if (!circ->timestamp_dirty)
255 circ->timestamp_dirty = approx_time();
257 /* Take ownership of the circuit */
258 circuit_change_purpose(circ, CIRCUIT_PURPOSE_C_CIRCUIT_PADDING);
260 return 1;
261 } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END;
263 return 0; // No machine wanted to keep the circuit open; mark for close
267 * Free all the machineinfos in <b>circ</b> that match <b>machine_num</b>.
269 * If machine_ctr is non-zero, also make sure it matches the padding_info's
270 * machine counter before freeing.
272 * Returns true if any machineinfos with that number were freed.
273 * False otherwise. */
274 static int
275 free_circ_machineinfos_with_machine_num(circuit_t *circ, int machine_num,
276 uint32_t machine_ctr)
278 int found = 0;
279 FOR_EACH_CIRCUIT_MACHINE_BEGIN(i) {
280 if (circ->padding_machine[i] &&
281 circ->padding_machine[i]->machine_num == machine_num) {
282 /* If machine_ctr is non-zero, make sure it matches too. This
283 * is to ensure that old STOP messages don't shutdown newer machines. */
284 if (machine_ctr && circ->padding_info[i] &&
285 circ->padding_info[i]->machine_ctr != machine_ctr) {
286 log_info(LD_CIRC,
287 "Padding shutdown for wrong (old?) machine ctr: %u vs %u",
288 machine_ctr, circ->padding_info[i]->machine_ctr);
289 } else {
290 circpad_circuit_machineinfo_free_idx(circ, i);
291 circ->padding_machine[i] = NULL;
292 found = 1;
295 } FOR_EACH_CIRCUIT_MACHINE_END;
297 return found;
301 * Free all padding machines and mutable info associated with circuit
303 void
304 circpad_circuit_free_all_machineinfos(circuit_t *circ)
306 FOR_EACH_CIRCUIT_MACHINE_BEGIN(i) {
307 circpad_circuit_machineinfo_free_idx(circ, i);
308 } FOR_EACH_CIRCUIT_MACHINE_END;
312 * Allocate a new mutable machineinfo structure.
314 STATIC circpad_machine_runtime_t *
315 circpad_circuit_machineinfo_new(circuit_t *on_circ, int machine_index)
317 circpad_machine_runtime_t *mi =
318 tor_malloc_zero(sizeof(circpad_machine_runtime_t));
319 mi->machine_index = machine_index;
320 mi->on_circ = on_circ;
321 mi->last_cell_time_sec = approx_time();
322 mi->machine_ctr = on_circ->padding_machine_ctr;
324 return mi;
328 * Return the circpad_state_t for the current state based on the
329 * mutable info.
331 * This function returns NULL when the machine is in the end state or in an
332 * invalid state.
334 STATIC const circpad_state_t *
335 circpad_machine_current_state(const circpad_machine_runtime_t *mi)
337 const circpad_machine_spec_t *machine = CIRCPAD_GET_MACHINE(mi);
339 if (mi->current_state == CIRCPAD_STATE_END) {
340 return NULL;
341 } else if (BUG(mi->current_state >= machine->num_states)) {
342 log_fn(LOG_WARN,LD_CIRC,
343 "Invalid circuit padding state %d",
344 mi->current_state);
346 return NULL;
349 return &machine->states[mi->current_state];
353 * Get the lower bound of a histogram bin.
355 * You can obtain the upper bound using histogram_get_bin_upper_bound().
357 * This function can also be called with 'bin' set to a value equal or greater
358 * than histogram_len in which case the infinity bin is chosen and
359 * CIRCPAD_DELAY_INFINITE is returned.
361 STATIC circpad_delay_t
362 circpad_histogram_bin_to_usec(const circpad_machine_runtime_t *mi,
363 circpad_hist_index_t bin)
365 const circpad_state_t *state = circpad_machine_current_state(mi);
366 circpad_delay_t rtt_add_usec = 0;
368 /* Our state should have been checked to be non-null by the caller
369 * (circpad_machine_remove_token()) */
370 if (BUG(state == NULL)) {
371 return CIRCPAD_DELAY_INFINITE;
374 /* The infinity bin has an upper bound of infinity, so make sure we return
375 * that if they ask for it. */
376 if (bin > CIRCPAD_INFINITY_BIN(state)) {
377 return CIRCPAD_DELAY_INFINITE;
380 /* If we are using an RTT estimate, consider it as well. */
381 if (state->use_rtt_estimate) {
382 rtt_add_usec = mi->rtt_estimate_usec;
385 return state->histogram_edges[bin] + rtt_add_usec;
389 * Like circpad_histogram_bin_to_usec() but return the upper bound of bin.
390 * (The upper bound is included in the bin.)
392 STATIC circpad_delay_t
393 histogram_get_bin_upper_bound(const circpad_machine_runtime_t *mi,
394 circpad_hist_index_t bin)
396 return circpad_histogram_bin_to_usec(mi, bin+1) - 1;
399 /** Return the midpoint of the histogram bin <b>bin_index</b>. */
400 static circpad_delay_t
401 circpad_get_histogram_bin_midpoint(const circpad_machine_runtime_t *mi,
402 int bin_index)
404 circpad_delay_t left_bound = circpad_histogram_bin_to_usec(mi, bin_index);
405 circpad_delay_t right_bound = histogram_get_bin_upper_bound(mi, bin_index);
407 return left_bound + (right_bound - left_bound)/2;
411 * Return the bin that contains the usec argument.
412 * "Contains" is defined as us in [lower, upper).
414 * This function will never return the infinity bin (histogram_len-1), in order
415 * to simplify the rest of the code, so if a usec is provided that falls above
416 * the highest non-infinity bin, that bin index will be returned.
418 STATIC circpad_hist_index_t
419 circpad_histogram_usec_to_bin(const circpad_machine_runtime_t *mi,
420 circpad_delay_t usec)
422 const circpad_state_t *state = circpad_machine_current_state(mi);
423 circpad_delay_t rtt_add_usec = 0;
424 circpad_hist_index_t bin;
426 /* Our state should have been checked to be non-null by the caller
427 * (circpad_machine_remove_token()) */
428 if (BUG(state == NULL)) {
429 return 0;
432 /* If we are using an RTT estimate, consider it as well. */
433 if (state->use_rtt_estimate) {
434 rtt_add_usec = mi->rtt_estimate_usec;
437 /* Walk through the bins and check the upper bound of each bin, if 'usec' is
438 * less-or-equal to that, return that bin. If rtt_estimate is enabled then
439 * add that to the upper bound of each bin.
441 * We don't want to return the infinity bin here, so don't go there. */
442 for (bin = 0 ; bin < CIRCPAD_INFINITY_BIN(state) ; bin++) {
443 if (usec <= histogram_get_bin_upper_bound(mi, bin) + rtt_add_usec) {
444 return bin;
448 /* We don't want to return the infinity bin here, so if we still didn't find
449 * the right bin, return the highest non-infinity bin */
450 return CIRCPAD_INFINITY_BIN(state)-1;
454 * Return true if the machine supports token removal.
456 * Token removal is equivalent to having a mutable histogram in the
457 * circpad_machine_runtime_t mutable info. So while we're at it,
458 * let's assert that everything is consistent between the mutable
459 * runtime and the readonly machine spec.
461 static inline int
462 circpad_is_token_removal_supported(circpad_machine_runtime_t *mi)
464 /* No runtime histogram == no token removal */
465 if (mi->histogram == NULL) {
466 /* Machines that don't want token removal are trying to avoid
467 * potentially expensive mallocs, extra memory accesses, and/or
468 * potentially expensive monotime calls. Let's minimize checks
469 * and keep this path fast. */
470 tor_assert_nonfatal(mi->histogram_len == 0);
471 return 0;
472 } else {
473 /* Machines that do want token removal are less sensitive to performance.
474 * Let's spend some time to check that our state is consistent and sane */
475 const circpad_state_t *state = circpad_machine_current_state(mi);
476 if (BUG(!state)) {
477 return 1;
479 tor_assert_nonfatal(state->token_removal != CIRCPAD_TOKEN_REMOVAL_NONE);
480 tor_assert_nonfatal(state->histogram_len == mi->histogram_len);
481 tor_assert_nonfatal(mi->histogram_len != 0);
482 return 1;
485 tor_assert_nonfatal_unreached();
486 return 0;
490 * This function frees any token bins allocated from a previous state
492 * Called after a state transition, or if the bins are empty.
494 STATIC void
495 circpad_machine_setup_tokens(circpad_machine_runtime_t *mi)
497 const circpad_state_t *state = circpad_machine_current_state(mi);
499 /* If this state doesn't exist, or doesn't have token removal,
500 * free any previous state's runtime histogram, and bail.
502 * If we don't have a token removal strategy, we also don't need a runtime
503 * histogram and we rely on the immutable one in machine_spec_t. */
504 if (!state || state->token_removal == CIRCPAD_TOKEN_REMOVAL_NONE) {
505 if (mi->histogram) {
506 tor_free(mi->histogram);
507 mi->histogram = NULL;
508 mi->histogram_len = 0;
510 return;
513 /* Try to avoid re-mallocing if we don't really need to */
514 if (!mi->histogram || (mi->histogram
515 && mi->histogram_len != state->histogram_len)) {
516 tor_free(mi->histogram); // null ok
517 mi->histogram = tor_malloc_zero(sizeof(circpad_hist_token_t)
518 *state->histogram_len);
520 mi->histogram_len = state->histogram_len;
522 memcpy(mi->histogram, state->histogram,
523 sizeof(circpad_hist_token_t)*state->histogram_len);
527 * Choose a length for this state (in cells), if specified.
529 static void
530 circpad_choose_state_length(circpad_machine_runtime_t *mi)
532 const circpad_state_t *state = circpad_machine_current_state(mi);
533 double length;
535 if (!state || state->length_dist.type == CIRCPAD_DIST_NONE) {
536 mi->state_length = CIRCPAD_STATE_LENGTH_INFINITE;
537 return;
540 length = circpad_distribution_sample(state->length_dist);
541 length = MAX(0, length);
542 length += state->start_length;
544 if (state->max_length) {
545 length = MIN(length, state->max_length);
548 mi->state_length = clamp_double_to_int64(length);
550 log_info(LD_CIRC, "State length sampled to %"PRIu64" for circuit %u",
551 mi->state_length, CIRCUIT_IS_ORIGIN(mi->on_circ) ?
552 TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier : 0);
556 * Sample a value from our iat_dist, and clamp it safely
557 * to circpad_delay_t.
559 * Before returning, add <b>delay_shift</b> (can be zero) to the sampled value.
561 static circpad_delay_t
562 circpad_distribution_sample_iat_delay(const circpad_state_t *state,
563 circpad_delay_t delay_shift)
565 double val = circpad_distribution_sample(state->iat_dist);
566 /* These comparisons are safe, because the output is in the range
567 * [0, 2**32), and double has a precision of 53 bits. */
568 /* We want a positive sample value */
569 val = MAX(0, val);
570 /* Respect the maximum sample setting */
571 val = MIN(val, state->dist_max_sample_usec);
573 /* Now apply the shift:
574 * This addition is exact: val is at most 2**32-1, delay_shift is at most
575 * 2**32-1, and doubles have a precision of 53 bits. */
576 val += delay_shift;
578 /* Clamp the distribution at infinite delay val */
579 return (circpad_delay_t)MIN(tor_llround(val), CIRCPAD_DELAY_INFINITE);
583 * Sample an expected time-until-next-packet delay from the histogram or
584 * probability distribution.
586 * A bin of the histogram is chosen with probability proportional to the number
587 * of tokens in each bin, and then a time value is chosen uniformly from that
588 * bin's [start,end) time range.
590 STATIC circpad_delay_t
591 circpad_machine_sample_delay(circpad_machine_runtime_t *mi)
593 const circpad_state_t *state = circpad_machine_current_state(mi);
594 const circpad_hist_token_t *histogram = NULL;
595 circpad_hist_index_t curr_bin = 0;
596 circpad_delay_t bin_start, bin_end;
597 /* These three must all be larger than circpad_hist_token_t, because
598 * we sum several circpad_hist_token_t values across the histogram */
599 uint64_t curr_weight = 0;
600 uint64_t histogram_total_tokens = 0;
601 uint64_t bin_choice;
603 tor_assert(state);
605 if (state->iat_dist.type != CIRCPAD_DIST_NONE) {
606 /* Sample from a fixed IAT distribution and return */
607 circpad_delay_t iat_delay_shift = state->use_rtt_estimate ?
608 mi->rtt_estimate_usec + state->dist_added_shift_usec :
609 state->dist_added_shift_usec;
610 return circpad_distribution_sample_iat_delay(state, iat_delay_shift);
611 } else if (circpad_is_token_removal_supported(mi)) {
612 histogram = mi->histogram;
613 for (circpad_hist_index_t b = 0; b < state->histogram_len; b++)
614 histogram_total_tokens += histogram[b];
615 } else {
616 /* We have a histogram, but it's immutable */
617 histogram = state->histogram;
618 histogram_total_tokens = state->histogram_total_tokens;
621 /* If we are out of tokens, don't schedule padding. */
622 if (!histogram_total_tokens) {
623 return CIRCPAD_DELAY_INFINITE;
626 bin_choice = crypto_fast_rng_get_uint64(get_thread_fast_rng(),
627 histogram_total_tokens);
629 /* Skip all the initial zero bins */
630 while (!histogram[curr_bin]) {
631 curr_bin++;
633 curr_weight = histogram[curr_bin];
635 // TODO: This is not constant-time. Pretty sure we don't
636 // really need it to be, though.
637 while (curr_weight < bin_choice) {
638 curr_bin++;
639 /* It should be impossible to run past the end of the histogram */
640 if (BUG(curr_bin >= state->histogram_len)) {
641 return CIRCPAD_DELAY_INFINITE;
643 curr_weight += histogram[curr_bin];
646 /* Do some basic checking of the current bin we are in */
647 if (BUG(curr_bin >= state->histogram_len) ||
648 BUG(histogram[curr_bin] == 0)) {
649 return CIRCPAD_DELAY_INFINITE;
652 // Store this index to remove the token upon callback.
653 if (state->token_removal != CIRCPAD_TOKEN_REMOVAL_NONE) {
654 mi->chosen_bin = curr_bin;
657 if (curr_bin >= CIRCPAD_INFINITY_BIN(state)) {
658 if (state->token_removal != CIRCPAD_TOKEN_REMOVAL_NONE &&
659 mi->histogram[curr_bin] > 0) {
660 mi->histogram[curr_bin]--;
663 // Infinity: Don't send a padding packet. Wait for a real packet
664 // and then see if our bins are empty or what else we should do.
665 return CIRCPAD_DELAY_INFINITE;
668 tor_assert(curr_bin < CIRCPAD_INFINITY_BIN(state));
670 bin_start = circpad_histogram_bin_to_usec(mi, curr_bin);
671 /* We don't need to reduct 1 from the upper bound because the random range
672 * function below samples from [bin_start, bin_end) */
673 bin_end = circpad_histogram_bin_to_usec(mi, curr_bin+1);
675 /* Bin edges are monotonically increasing so this is a bug. Handle it. */
676 if (BUG(bin_start >= bin_end)) {
677 return bin_start;
680 return (circpad_delay_t)crypto_fast_rng_uint64_range(get_thread_fast_rng(),
681 bin_start, bin_end);
685 * Sample a value from the specified probability distribution.
687 * Uses functions from src/lib/math/prob_distr.c .
689 static double
690 circpad_distribution_sample(circpad_distribution_t dist)
692 log_fn(LOG_DEBUG,LD_CIRC, "Sampling delay with distribution %d",
693 dist.type);
695 switch (dist.type) {
696 case CIRCPAD_DIST_NONE:
698 /* We should not get in here like this */
699 tor_assert_nonfatal_unreached();
700 return 0;
702 case CIRCPAD_DIST_UNIFORM:
704 // param2 is upper bound, param1 is lower
705 const struct uniform_t my_uniform = {
706 .base = UNIFORM(my_uniform),
707 .a = dist.param1,
708 .b = dist.param2,
710 return dist_sample(&my_uniform.base);
712 case CIRCPAD_DIST_LOGISTIC:
714 /* param1 is Mu, param2 is sigma. */
715 const struct logistic_t my_logistic = {
716 .base = LOGISTIC(my_logistic),
717 .mu = dist.param1,
718 .sigma = dist.param2,
720 return dist_sample(&my_logistic.base);
722 case CIRCPAD_DIST_LOG_LOGISTIC:
724 /* param1 is Alpha, param2 is 1.0/Beta */
725 const struct log_logistic_t my_log_logistic = {
726 .base = LOG_LOGISTIC(my_log_logistic),
727 .alpha = dist.param1,
728 .beta = dist.param2,
730 return dist_sample(&my_log_logistic.base);
732 case CIRCPAD_DIST_GEOMETRIC:
734 /* param1 is 'p' (success probability) */
735 const struct geometric_t my_geometric = {
736 .base = GEOMETRIC(my_geometric),
737 .p = dist.param1,
739 return dist_sample(&my_geometric.base);
741 case CIRCPAD_DIST_WEIBULL:
743 /* param1 is k, param2 is Lambda */
744 const struct weibull_t my_weibull = {
745 .base = WEIBULL(my_weibull),
746 .k = dist.param1,
747 .lambda = dist.param2,
749 return dist_sample(&my_weibull.base);
751 case CIRCPAD_DIST_PARETO:
753 /* param1 is sigma, param2 is xi, no more params for mu so we use 0 */
754 const struct genpareto_t my_genpareto = {
755 .base = GENPARETO(my_genpareto),
756 .mu = 0,
757 .sigma = dist.param1,
758 .xi = dist.param2,
760 return dist_sample(&my_genpareto.base);
764 tor_assert_nonfatal_unreached();
765 return 0;
769 * Find the index of the first bin whose upper bound is
770 * greater than the target, and that has tokens remaining.
772 * Used for histograms with token removal.
774 static circpad_hist_index_t
775 circpad_machine_first_higher_index(const circpad_machine_runtime_t *mi,
776 circpad_delay_t target_bin_usec)
778 circpad_hist_index_t bin = circpad_histogram_usec_to_bin(mi,
779 target_bin_usec);
781 /* Don't remove from the infinity bin */
782 for (; bin < CIRCPAD_INFINITY_BIN(mi); bin++) {
783 if (mi->histogram[bin] &&
784 histogram_get_bin_upper_bound(mi, bin) >= target_bin_usec) {
785 return bin;
789 return mi->histogram_len;
793 * Find the index of the first bin whose lower bound is lower or equal to
794 * <b>target_bin_usec</b>, and that still has tokens remaining.
796 * Used for histograms with token removal.
798 static circpad_hist_index_t
799 circpad_machine_first_lower_index(const circpad_machine_runtime_t *mi,
800 circpad_delay_t target_bin_usec)
802 circpad_hist_index_t bin = circpad_histogram_usec_to_bin(mi,
803 target_bin_usec);
805 for (; bin >= 0; bin--) {
806 if (mi->histogram[bin] &&
807 circpad_histogram_bin_to_usec(mi, bin) <= target_bin_usec) {
808 return bin;
812 return -1;
816 * Remove a token from the first non-empty bin whose upper bound is
817 * greater than the target.
819 * Used for histograms with token removal.
821 STATIC void
822 circpad_machine_remove_higher_token(circpad_machine_runtime_t *mi,
823 circpad_delay_t target_bin_usec)
825 /* We need to remove the token from the first bin
826 * whose upper bound is greater than the target, and that
827 * has tokens remaining. */
828 circpad_hist_index_t bin = circpad_machine_first_higher_index(mi,
829 target_bin_usec);
831 if (bin >= 0 && bin < CIRCPAD_INFINITY_BIN(mi)) {
832 if (!BUG(mi->histogram[bin] == 0)) {
833 mi->histogram[bin]--;
839 * Remove a token from the first non-empty bin whose upper bound is
840 * lower than the target.
842 * Used for histograms with token removal.
844 STATIC void
845 circpad_machine_remove_lower_token(circpad_machine_runtime_t *mi,
846 circpad_delay_t target_bin_usec)
848 circpad_hist_index_t bin = circpad_machine_first_lower_index(mi,
849 target_bin_usec);
851 if (bin >= 0 && bin < CIRCPAD_INFINITY_BIN(mi)) {
852 if (!BUG(mi->histogram[bin] == 0)) {
853 mi->histogram[bin]--;
858 /* Helper macro: Ensure that the bin has tokens available, and BUG out of the
859 * function if it's not the case. */
860 #define ENSURE_BIN_CAPACITY(bin_index) \
861 if (BUG(mi->histogram[bin_index] == 0)) { \
862 return; \
866 * Remove a token from the closest non-empty bin to the target.
868 * If use_usec is true, measure "closest" in terms of the next closest bin
869 * midpoint.
871 * If it is false, use bin index distance only.
873 * Used for histograms with token removal.
875 STATIC void
876 circpad_machine_remove_closest_token(circpad_machine_runtime_t *mi,
877 circpad_delay_t target_bin_usec,
878 bool use_usec)
880 circpad_hist_index_t lower, higher, current;
881 circpad_hist_index_t bin_to_remove = -1;
883 lower = circpad_machine_first_lower_index(mi, target_bin_usec);
884 higher = circpad_machine_first_higher_index(mi, target_bin_usec);
885 current = circpad_histogram_usec_to_bin(mi, target_bin_usec);
887 /* Sanity check the results */
888 if (BUG(lower > current) || BUG(higher < current)) {
889 return;
892 /* Take care of edge cases first */
893 if (higher == mi->histogram_len && lower == -1) {
894 /* All bins are empty */
895 return;
896 } else if (higher == mi->histogram_len) {
897 /* All higher bins are empty */
898 ENSURE_BIN_CAPACITY(lower);
899 mi->histogram[lower]--;
900 return;
901 } else if (lower == -1) {
902 /* All lower bins are empty */
903 ENSURE_BIN_CAPACITY(higher);
904 mi->histogram[higher]--;
905 return;
908 /* Now handle the intermediate cases */
909 if (use_usec) {
910 /* Find the closest bin midpoint to the target */
911 circpad_delay_t lower_usec = circpad_get_histogram_bin_midpoint(mi, lower);
912 circpad_delay_t higher_usec =
913 circpad_get_histogram_bin_midpoint(mi, higher);
915 if (target_bin_usec < lower_usec) {
916 // Lower bin is closer
917 ENSURE_BIN_CAPACITY(lower);
918 bin_to_remove = lower;
919 } else if (target_bin_usec > higher_usec) {
920 // Higher bin is closer
921 ENSURE_BIN_CAPACITY(higher);
922 bin_to_remove = higher;
923 } else if (target_bin_usec-lower_usec > higher_usec-target_bin_usec) {
924 // Higher bin is closer
925 ENSURE_BIN_CAPACITY(higher);
926 bin_to_remove = higher;
927 } else {
928 // Lower bin is closer
929 ENSURE_BIN_CAPACITY(lower);
930 bin_to_remove = lower;
932 mi->histogram[bin_to_remove]--;
933 log_debug(LD_CIRC, "Removing token from bin %d", bin_to_remove);
934 return;
935 } else {
936 if (current - lower > higher - current) {
937 // Higher bin is closer
938 ENSURE_BIN_CAPACITY(higher);
939 mi->histogram[higher]--;
940 return;
941 } else {
942 // Lower bin is closer
943 ENSURE_BIN_CAPACITY(lower);
944 mi->histogram[lower]--;
945 return;
950 #undef ENSURE_BIN_CAPACITY
953 * Remove a token from the exact bin corresponding to the target.
955 * If it is empty, do nothing.
957 * Used for histograms with token removal.
959 static void
960 circpad_machine_remove_exact(circpad_machine_runtime_t *mi,
961 circpad_delay_t target_bin_usec)
963 circpad_hist_index_t bin = circpad_histogram_usec_to_bin(mi,
964 target_bin_usec);
966 if (mi->histogram[bin] > 0)
967 mi->histogram[bin]--;
971 * Check our state's cell limit count and tokens.
973 * Returns 1 if either limits are hit and we decide to change states,
974 * otherwise returns 0.
976 static circpad_decision_t
977 check_machine_token_supply(circpad_machine_runtime_t *mi)
979 uint32_t histogram_total_tokens = 0;
981 /* Check if bins empty. This requires summing up the current mutable
982 * machineinfo histogram token total and checking if it is zero.
983 * Machineinfo does not keep a running token count. We're assuming the
984 * extra space is not worth this short loop iteration.
986 * We also do not count infinity bin in histogram totals.
988 if (circpad_is_token_removal_supported(mi)) {
989 for (circpad_hist_index_t b = 0; b < CIRCPAD_INFINITY_BIN(mi); b++)
990 histogram_total_tokens += mi->histogram[b];
992 /* If we change state, we're done */
993 if (histogram_total_tokens == 0) {
994 if (circpad_internal_event_bins_empty(mi) == CIRCPAD_STATE_CHANGED)
995 return CIRCPAD_STATE_CHANGED;
999 if (mi->state_length == 0) {
1000 return circpad_internal_event_state_length_up(mi);
1003 return CIRCPAD_STATE_UNCHANGED;
1007 * Count that a padding packet was sent.
1009 * This updates our state length count, our machine rate limit counts,
1010 * and if token removal is used, decrements the histogram.
1012 static inline void
1013 circpad_machine_count_padding_sent(circpad_machine_runtime_t *mi)
1015 /* If we have a valid state length bound, consider it */
1016 if (mi->state_length != CIRCPAD_STATE_LENGTH_INFINITE &&
1017 !BUG(mi->state_length <= 0)) {
1018 mi->state_length--;
1022 * Update non-padding counts for rate limiting: We scale at UINT16_MAX
1023 * because we only use this for a percentile limit of 2 sig figs, and
1024 * space is scare in the machineinfo struct.
1026 mi->padding_sent++;
1027 if (mi->padding_sent == UINT16_MAX) {
1028 mi->padding_sent /= 2;
1029 mi->nonpadding_sent /= 2;
1032 circpad_global_padding_sent++;
1034 /* If we have a mutable histogram, reduce the token count from
1035 * the chosen padding bin (this assumes we always send padding
1036 * when we intended to). */
1037 if (circpad_is_token_removal_supported(mi)) {
1038 /* Check array bounds and token count before removing */
1039 if (!BUG(mi->chosen_bin >= mi->histogram_len) &&
1040 !BUG(mi->histogram[mi->chosen_bin] == 0)) {
1041 mi->histogram[mi->chosen_bin]--;
1047 * Count a nonpadding packet as being sent.
1049 * This function updates our overhead accounting variables, as well
1050 * as decrements the state limit packet counter, if the latter was
1051 * flagged as applying to non-padding as well.
1053 static inline void
1054 circpad_machine_count_nonpadding_sent(circpad_machine_runtime_t *mi)
1056 /* Update non-padding counts for rate limiting: We scale at UINT16_MAX
1057 * because we only use this for a percentile limit of 2 sig figs, and
1058 * space is scare in the machineinfo struct. */
1059 mi->nonpadding_sent++;
1060 if (mi->nonpadding_sent == UINT16_MAX) {
1061 mi->padding_sent /= 2;
1062 mi->nonpadding_sent /= 2;
1065 /* Update any state packet length limits that apply */
1066 circpad_machine_update_state_length_for_nonpadding(mi);
1068 /* Remove a token from the histogram, if applicable */
1069 circpad_machine_remove_token(mi);
1073 * Decrement the state length counter for a non-padding packet.
1075 * Only updates the state length if we're using that feature, we
1076 * have a state, and the machine wants to count non-padding packets
1077 * towards the state length.
1079 static inline void
1080 circpad_machine_update_state_length_for_nonpadding(
1081 circpad_machine_runtime_t *mi)
1083 const circpad_state_t *state = NULL;
1085 if (mi->state_length == CIRCPAD_STATE_LENGTH_INFINITE)
1086 return;
1088 state = circpad_machine_current_state(mi);
1090 /* If we are not in a padding state (like start or end), we're done */
1091 if (!state)
1092 return;
1094 /* If we're enforcing a state length on non-padding packets,
1095 * decrement it */
1096 if (state->length_includes_nonpadding &&
1097 mi->state_length > 0) {
1098 mi->state_length--;
1103 * When a non-padding packet arrives, remove a token from the bin
1104 * corresponding to the delta since last sent packet. If that bin
1105 * is empty, choose a token based on the specified removal strategy
1106 * in the state machine.
1108 STATIC void
1109 circpad_machine_remove_token(circpad_machine_runtime_t *mi)
1111 const circpad_state_t *state = NULL;
1112 circpad_time_t current_time;
1113 circpad_delay_t target_bin_usec;
1115 /* Dont remove any tokens if there was no padding scheduled */
1116 if (!mi->padding_scheduled_at_usec) {
1117 return;
1120 state = circpad_machine_current_state(mi);
1122 /* If we are not in a padding state (like start or end), we're done */
1123 if (!state)
1124 return;
1125 /* Don't remove any tokens if we're not doing token removal */
1126 if (state->token_removal == CIRCPAD_TOKEN_REMOVAL_NONE)
1127 return;
1129 current_time = monotime_absolute_usec();
1131 /* If we have scheduled padding some time in the future, we want to see what
1132 bin we are in at the current time */
1133 target_bin_usec = (circpad_delay_t)
1134 MIN((current_time - mi->padding_scheduled_at_usec),
1135 CIRCPAD_DELAY_INFINITE-1);
1137 /* We are treating this non-padding cell as a padding cell, so we cancel
1138 padding timer, if present. */
1139 mi->padding_scheduled_at_usec = 0;
1140 if (mi->is_padding_timer_scheduled) {
1141 mi->is_padding_timer_scheduled = 0;
1142 timer_disable(mi->padding_timer);
1145 /* Perform the specified token removal strategy */
1146 switch (state->token_removal) {
1147 case CIRCPAD_TOKEN_REMOVAL_CLOSEST_USEC:
1148 circpad_machine_remove_closest_token(mi, target_bin_usec, 1);
1149 break;
1150 case CIRCPAD_TOKEN_REMOVAL_CLOSEST:
1151 circpad_machine_remove_closest_token(mi, target_bin_usec, 0);
1152 break;
1153 case CIRCPAD_TOKEN_REMOVAL_LOWER:
1154 circpad_machine_remove_lower_token(mi, target_bin_usec);
1155 break;
1156 case CIRCPAD_TOKEN_REMOVAL_HIGHER:
1157 circpad_machine_remove_higher_token(mi, target_bin_usec);
1158 break;
1159 case CIRCPAD_TOKEN_REMOVAL_EXACT:
1160 circpad_machine_remove_exact(mi, target_bin_usec);
1161 break;
1162 case CIRCPAD_TOKEN_REMOVAL_NONE:
1163 default:
1164 tor_assert_nonfatal_unreached();
1165 log_warn(LD_BUG, "Circpad: Unknown token removal strategy %d",
1166 state->token_removal);
1167 break;
1172 * Send a relay command with a relay cell payload on a circuit to
1173 * the particular hopnum.
1175 * Hopnum starts at 1 (1=guard, 2=middle, 3=exit, etc).
1177 * Payload may be null.
1179 * Returns negative on error, 0 on success.
1181 MOCK_IMPL(STATIC signed_error_t,
1182 circpad_send_command_to_hop,(origin_circuit_t *circ, uint8_t hopnum,
1183 uint8_t relay_command, const uint8_t *payload,
1184 ssize_t payload_len))
1186 crypt_path_t *target_hop = circuit_get_cpath_hop(circ, hopnum);
1187 signed_error_t ret;
1189 /* Check that the cpath has the target hop */
1190 if (!target_hop) {
1191 log_fn(LOG_WARN, LD_BUG, "Padding circuit %u has %d hops, not %d",
1192 circ->global_identifier, circuit_get_cpath_len(circ), hopnum);
1193 return -1;
1196 /* Check that the target hop is opened */
1197 if (target_hop->state != CPATH_STATE_OPEN) {
1198 log_fn(LOG_WARN,LD_CIRC,
1199 "Padding circuit %u has %d hops, not %d",
1200 circ->global_identifier,
1201 circuit_get_cpath_opened_len(circ), hopnum);
1202 return -1;
1205 /* Send the drop command to the second hop */
1206 ret = relay_send_command_from_edge(0, TO_CIRCUIT(circ), relay_command,
1207 (const char*)payload, payload_len,
1208 target_hop);
1209 return ret;
1213 * Callback helper to send a padding cell.
1215 * This helper is called after our histogram-sampled delay period passes
1216 * without another packet being sent first. If a packet is sent before this
1217 * callback happens, it is canceled. So when we're called here, send padding
1218 * right away.
1220 * If sending this padding cell forced us to transition states return
1221 * CIRCPAD_STATE_CHANGED. Otherwise return CIRCPAD_STATE_UNCHANGED.
1223 circpad_decision_t
1224 circpad_send_padding_cell_for_callback(circpad_machine_runtime_t *mi)
1226 circuit_t *circ = mi->on_circ;
1227 int machine_idx = mi->machine_index;
1228 mi->padding_scheduled_at_usec = 0;
1229 mi->is_padding_timer_scheduled = 0;
1230 circpad_statenum_t state = mi->current_state;
1232 /* Make sure circuit didn't close on us */
1233 if (mi->on_circ->marked_for_close) {
1234 log_fn(LOG_INFO,LD_CIRC,
1235 "Padding callback on circuit marked for close (%u). Ignoring.",
1236 CIRCUIT_IS_ORIGIN(mi->on_circ) ?
1237 TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier : 0);
1238 return CIRCPAD_STATE_CHANGED;
1241 circpad_machine_count_padding_sent(mi);
1243 if (CIRCUIT_IS_ORIGIN(mi->on_circ)) {
1244 circpad_send_command_to_hop(TO_ORIGIN_CIRCUIT(mi->on_circ),
1245 CIRCPAD_GET_MACHINE(mi)->target_hopnum,
1246 RELAY_COMMAND_DROP, NULL, 0);
1247 log_info(LD_CIRC, "Callback: Sending padding to origin circuit %u"
1248 " (%d) [length: %"PRIu64"]",
1249 TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier,
1250 mi->on_circ->purpose, mi->state_length);
1251 } else {
1252 // If we're a non-origin circ, we can just send from here as if we're the
1253 // edge.
1254 if (TO_OR_CIRCUIT(circ)->p_chan_cells.n <= circpad_max_circ_queued_cells) {
1255 log_info(LD_CIRC, "Callback: Sending padding to circuit (%d)"
1256 " [length: %"PRIu64"]", mi->on_circ->purpose, mi->state_length);
1257 relay_send_command_from_edge(0, mi->on_circ, RELAY_COMMAND_DROP, NULL,
1258 0, NULL);
1259 rep_hist_padding_count_write(PADDING_TYPE_DROP);
1260 } else {
1261 static ratelim_t cell_lim = RATELIM_INIT(600);
1262 log_fn_ratelim(&cell_lim,LOG_NOTICE,LD_CIRC,
1263 "Too many cells (%d) in circ queue to send padding.",
1264 TO_OR_CIRCUIT(circ)->p_chan_cells.n);
1268 /* This is a padding cell sent from the client or from the middle node,
1269 * (because it's invoked from circuitpadding.c) */
1270 circpad_cell_event_padding_sent(circ);
1272 /* The circpad_cell_event_padding_sent() could cause us to transition.
1273 * Check that we still have a padding machineinfo, and then check our token
1274 * supply. */
1275 if (circ->padding_info[machine_idx] != NULL) {
1276 if (state != circ->padding_info[machine_idx]->current_state)
1277 return CIRCPAD_STATE_CHANGED;
1278 else
1279 return check_machine_token_supply(circ->padding_info[machine_idx]);
1280 } else {
1281 return CIRCPAD_STATE_CHANGED;
1286 * Tor-timer compatible callback that tells us to send a padding cell.
1288 * Timers are associated with circpad_machine_runtime_t's. When the machineinfo
1289 * is freed on a circuit, the timers are cancelled. Since the lifetime
1290 * of machineinfo is always longer than the timers, handles are not
1291 * needed.
1293 static void
1294 circpad_send_padding_callback(tor_timer_t *timer, void *args,
1295 const struct monotime_t *time)
1297 circpad_machine_runtime_t *mi = ((circpad_machine_runtime_t*)args);
1298 (void)timer; (void)time;
1300 if (mi && mi->on_circ) {
1301 assert_circuit_ok(mi->on_circ);
1302 circpad_send_padding_cell_for_callback(mi);
1303 } else {
1304 // This shouldn't happen (represents a timer leak)
1305 log_fn(LOG_WARN,LD_CIRC,
1306 "Circuit closed while waiting for padding timer.");
1307 tor_fragile_assert();
1310 // TODO-MP-AP: Unify this counter with channelpadding for rephist stats
1311 //total_timers_pending--;
1315 * Cache our consensus parameters upon consensus update.
1317 void
1318 circpad_new_consensus_params(const networkstatus_t *ns)
1320 circpad_padding_disabled =
1321 networkstatus_get_param(ns, "circpad_padding_disabled",
1322 0, 0, 1);
1324 circpad_padding_reduced =
1325 networkstatus_get_param(ns, "circpad_padding_reduced",
1326 0, 0, 1);
1328 circpad_global_allowed_cells =
1329 networkstatus_get_param(ns, "circpad_global_allowed_cells",
1330 0, 0, UINT16_MAX-1);
1332 circpad_global_max_padding_percent =
1333 networkstatus_get_param(ns, "circpad_global_max_padding_pct",
1334 0, 0, 100);
1336 circpad_max_circ_queued_cells =
1337 networkstatus_get_param(ns, "circpad_max_circ_queued_cells",
1338 CIRCWINDOW_START_MAX, 0, 50*CIRCWINDOW_START_MAX);
1342 * Return true if padding is allowed by torrc and consensus.
1344 static bool
1345 circpad_is_padding_allowed(void)
1347 /* If padding has been disabled in the consensus, don't send any more
1348 * padding. Technically the machine should be shut down when the next
1349 * machine condition check happens, but machine checks only happen on
1350 * certain circuit events, and if padding is disabled due to some
1351 * network overload or DoS condition, we really want to stop ASAP. */
1352 if (circpad_padding_disabled || !get_options()->CircuitPadding) {
1353 return 0;
1356 return 1;
1360 * Check this machine against its padding limits, as well as global
1361 * consensus limits.
1363 * We have two limits: a percent and a cell count. The cell count
1364 * limit must be reached before the percent is enforced (this is to
1365 * optionally allow very light padding of things like circuit setup
1366 * while there is no other traffic on the circuit).
1368 * TODO: Don't apply limits to machines form torrc.
1370 * Returns 1 if limits are set and we've hit them. Otherwise returns 0.
1372 STATIC bool
1373 circpad_machine_reached_padding_limit(circpad_machine_runtime_t *mi)
1375 const circpad_machine_spec_t *machine = CIRCPAD_GET_MACHINE(mi);
1377 /* If machine_padding_pct is non-zero, and we've sent more
1378 * than the allowed count of padding cells, then check our
1379 * percent limits for this machine. */
1380 if (machine->max_padding_percent &&
1381 mi->padding_sent >= machine->allowed_padding_count) {
1382 uint32_t total_cells = mi->padding_sent + mi->nonpadding_sent;
1384 /* Check the percent */
1385 if ((100*(uint32_t)mi->padding_sent) / total_cells >
1386 machine->max_padding_percent) {
1387 return 1; // limit is reached. Stop.
1391 /* If circpad_max_global_padding_pct is non-zero, and we've
1392 * sent more than the global padding cell limit, then check our
1393 * global tor process percentage limit on padding. */
1394 if (circpad_global_max_padding_percent &&
1395 circpad_global_padding_sent >= circpad_global_allowed_cells) {
1396 uint64_t total_cells = circpad_global_padding_sent +
1397 circpad_global_nonpadding_sent;
1399 /* Check the percent */
1400 if ((100*circpad_global_padding_sent) / total_cells >
1401 circpad_global_max_padding_percent) {
1402 return 1; // global limit reached. Stop.
1406 return 0; // All good!
1410 * Schedule the next padding time according to the machineinfo on a
1411 * circuit.
1413 * The histograms represent inter-packet-delay. Whenever you get an packet
1414 * event you should be scheduling your next timer (after cancelling any old
1415 * ones and updating tokens accordingly).
1417 * Returns 1 if we decide to transition states (due to infinity bin),
1418 * 0 otherwise.
1420 MOCK_IMPL(circpad_decision_t,
1421 circpad_machine_schedule_padding,(circpad_machine_runtime_t *mi))
1423 circpad_delay_t in_usec = 0;
1424 struct timeval timeout;
1425 tor_assert(mi);
1427 /* Don't schedule padding if it is disabled */
1428 if (!circpad_is_padding_allowed()) {
1429 static ratelim_t padding_lim = RATELIM_INIT(600);
1430 log_fn_ratelim(&padding_lim,LOG_INFO,LD_CIRC,
1431 "Padding has been disabled, but machine still on circuit %"PRIu64
1432 ", %d",
1433 mi->on_circ->n_chan ? mi->on_circ->n_chan->global_identifier : 0,
1434 mi->on_circ->n_circ_id);
1436 return CIRCPAD_STATE_UNCHANGED;
1439 /* Don't schedule padding if we are currently in dormant mode. */
1440 if (!is_participating_on_network()) {
1441 log_info(LD_CIRC, "Not scheduling padding because we are dormant.");
1442 return CIRCPAD_STATE_UNCHANGED;
1445 // Don't pad in end (but also don't cancel any previously
1446 // scheduled padding either).
1447 if (mi->current_state == CIRCPAD_STATE_END) {
1448 log_fn(LOG_INFO, LD_CIRC, "Padding end state on circuit %u",
1449 CIRCUIT_IS_ORIGIN(mi->on_circ) ?
1450 TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier : 0);
1451 return CIRCPAD_STATE_UNCHANGED;
1454 /* Check our padding limits */
1455 if (circpad_machine_reached_padding_limit(mi)) {
1456 if (CIRCUIT_IS_ORIGIN(mi->on_circ)) {
1457 log_fn(LOG_INFO, LD_CIRC,
1458 "Padding machine has reached padding limit on circuit %u",
1459 TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier);
1460 } else {
1461 static ratelim_t padding_lim = RATELIM_INIT(600);
1462 log_fn_ratelim(&padding_lim,LOG_INFO,LD_CIRC,
1463 "Padding machine has reached padding limit on circuit %"PRIu64
1464 ", %d",
1465 mi->on_circ->n_chan ? mi->on_circ->n_chan->global_identifier : 0,
1466 mi->on_circ->n_circ_id);
1468 return CIRCPAD_STATE_UNCHANGED;
1471 if (mi->is_padding_timer_scheduled) {
1472 /* Cancel current timer (if any) */
1473 timer_disable(mi->padding_timer);
1474 mi->is_padding_timer_scheduled = 0;
1477 /* in_usec = in microseconds */
1478 in_usec = circpad_machine_sample_delay(mi);
1479 /* If we're using token removal, we need to know when the padding
1480 * was scheduled at, so we can remove the appropriate token if
1481 * a non-padding cell is sent before the padding timer expires.
1483 * However, since monotime is unpredictably expensive, let's avoid
1484 * using it for machines that don't need token removal. */
1485 if (circpad_is_token_removal_supported(mi)) {
1486 mi->padding_scheduled_at_usec = monotime_absolute_usec();
1487 } else {
1488 mi->padding_scheduled_at_usec = 1;
1490 log_fn(LOG_INFO,LD_CIRC,"\tPadding in %u usec on circuit %u", in_usec,
1491 CIRCUIT_IS_ORIGIN(mi->on_circ) ?
1492 TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier : 0);
1494 // Don't schedule if we have infinite delay.
1495 if (in_usec == CIRCPAD_DELAY_INFINITE) {
1496 return circpad_internal_event_infinity(mi);
1499 if (mi->state_length == 0) {
1500 /* If we're at length 0, that means we hit 0 after sending
1501 * a cell earlier, and emitted an event for it, but
1502 * for whatever reason we did not decide to change states then.
1503 * So maybe the machine is waiting for bins empty, or for an
1504 * infinity event later? That would be a strange machine,
1505 * but there's no reason to make it impossible. */
1506 return CIRCPAD_STATE_UNCHANGED;
1509 if (in_usec <= 0) {
1510 return circpad_send_padding_cell_for_callback(mi);
1513 timeout.tv_sec = in_usec/TOR_USEC_PER_SEC;
1514 timeout.tv_usec = (in_usec%TOR_USEC_PER_SEC);
1516 log_fn(LOG_INFO, LD_CIRC, "\tPadding circuit %u in %u sec, %u usec",
1517 CIRCUIT_IS_ORIGIN(mi->on_circ) ?
1518 TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier : 0,
1519 (unsigned)timeout.tv_sec, (unsigned)timeout.tv_usec);
1521 if (mi->padding_timer) {
1522 timer_set_cb(mi->padding_timer, circpad_send_padding_callback, mi);
1523 } else {
1524 mi->padding_timer =
1525 timer_new(circpad_send_padding_callback, mi);
1527 timer_schedule(mi->padding_timer, &timeout);
1528 mi->is_padding_timer_scheduled = 1;
1530 // TODO-MP-AP: Unify with channelpadding counter
1531 //rep_hist_padding_count_timers(++total_timers_pending);
1533 return CIRCPAD_STATE_UNCHANGED;
1537 * If the machine transitioned to the END state, we need
1538 * to check to see if it wants us to shut it down immediately.
1539 * If it does, then we need to send the appropriate negotiation commands
1540 * depending on which side it is.
1542 * After this function is called, mi may point to freed memory. Do
1543 * not access it.
1545 static void
1546 circpad_machine_spec_transitioned_to_end(circpad_machine_runtime_t *mi)
1548 const circpad_machine_spec_t *machine = CIRCPAD_GET_MACHINE(mi);
1549 circuit_t *on_circ = mi->on_circ;
1551 log_fn(LOG_INFO,LD_CIRC, "Padding machine in end state on circuit %u (%d)",
1552 CIRCUIT_IS_ORIGIN(on_circ) ?
1553 TO_ORIGIN_CIRCUIT(on_circ)->global_identifier : 0,
1554 on_circ->purpose);
1557 * We allow machines to shut down and delete themselves as opposed
1558 * to just going back to START or waiting forever in END so that
1559 * we can handle the case where this machine started while it was
1560 * the only machine that matched conditions, but *since* then more
1561 * "higher ranking" machines now match the conditions, and would
1562 * be given a chance to take precedence over this one in
1563 * circpad_add_matching_machines().
1565 * Returning to START or waiting forever in END would not give those
1566 * other machines a chance to be launched, where as shutting down
1567 * here does.
1569 if (machine->should_negotiate_end) {
1570 if (machine->is_origin_side) {
1571 /* We free the machine info here so that we can be replaced
1572 * by a different machine. But we must leave the padding_machine
1573 * in place to wait for the negotiated response */
1574 uint32_t machine_ctr = mi->machine_ctr;
1575 circpad_circuit_machineinfo_free_idx(on_circ,
1576 machine->machine_index);
1577 circpad_negotiate_padding(TO_ORIGIN_CIRCUIT(on_circ),
1578 machine->machine_num,
1579 machine->target_hopnum,
1580 CIRCPAD_COMMAND_STOP,
1581 machine_ctr);
1582 } else {
1583 uint32_t machine_ctr = mi->machine_ctr;
1584 circpad_circuit_machineinfo_free_idx(on_circ,
1585 machine->machine_index);
1586 circpad_padding_negotiated(on_circ,
1587 machine->machine_num,
1588 CIRCPAD_COMMAND_STOP,
1589 CIRCPAD_RESPONSE_OK,
1590 machine_ctr);
1591 on_circ->padding_machine[machine->machine_index] = NULL;
1597 * Generic state transition function for padding state machines.
1599 * Given an event and our mutable machine info, decide if/how to
1600 * transition to a different state, and perform actions accordingly.
1602 * Returns 1 if we transition states, 0 otherwise.
1604 MOCK_IMPL(circpad_decision_t,
1605 circpad_machine_spec_transition,(circpad_machine_runtime_t *mi,
1606 circpad_event_t event))
1608 const circpad_state_t *state =
1609 circpad_machine_current_state(mi);
1611 /* If state is null we are in the end state. */
1612 if (!state) {
1613 /* If we in end state we don't pad no matter what. */
1614 return CIRCPAD_STATE_UNCHANGED;
1617 /* Check if this event is ignored or causes a cancel */
1618 if (state->next_state[event] == CIRCPAD_STATE_IGNORE) {
1619 return CIRCPAD_STATE_UNCHANGED;
1620 } else if (state->next_state[event] == CIRCPAD_STATE_CANCEL) {
1621 /* Check cancel events and cancel any pending padding */
1622 mi->padding_scheduled_at_usec = 0;
1623 if (mi->is_padding_timer_scheduled) {
1624 mi->is_padding_timer_scheduled = 0;
1625 /* Cancel current timer (if any) */
1626 timer_disable(mi->padding_timer);
1628 return CIRCPAD_STATE_UNCHANGED;
1629 } else {
1630 circpad_statenum_t s = state->next_state[event];
1631 /* See if we need to transition to any other states based on this event.
1632 * Whenever a transition happens, even to our own state, we schedule
1633 * padding.
1635 * So if a state only wants to schedule padding for an event, it specifies
1636 * a transition to itself. All non-specified events are ignored.
1638 log_fn(LOG_INFO, LD_CIRC,
1639 "Circuit %u circpad machine %d transitioning from %u to %u",
1640 CIRCUIT_IS_ORIGIN(mi->on_circ) ?
1641 TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier : 0,
1642 mi->machine_index, mi->current_state, s);
1644 /* If this is not the same state, switch and init tokens,
1645 * otherwise just reschedule padding. */
1646 if (mi->current_state != s) {
1647 mi->current_state = s;
1648 circpad_machine_setup_tokens(mi);
1649 circpad_choose_state_length(mi);
1651 /* If we transition to the end state, check to see
1652 * if this machine wants to be shut down at end */
1653 if (s == CIRCPAD_STATE_END) {
1654 circpad_machine_spec_transitioned_to_end(mi);
1655 /* We transitioned but we don't pad in end. Also, mi
1656 * may be freed. Returning STATE_CHANGED prevents us
1657 * from accessing it in any callers of this function. */
1658 return CIRCPAD_STATE_CHANGED;
1661 /* We transitioned to a new state, schedule padding */
1662 circpad_machine_schedule_padding(mi);
1663 return CIRCPAD_STATE_CHANGED;
1666 /* We transitioned back to the same state. Schedule padding,
1667 * and inform if that causes a state transition. */
1668 return circpad_machine_schedule_padding(mi);
1671 return CIRCPAD_STATE_UNCHANGED;
1675 * Estimate the circuit RTT from the current middle hop out to the
1676 * end of the circuit.
1678 * We estimate RTT by calculating the time between "receive" and
1679 * "send" at a middle hop. This is because we "receive" a cell
1680 * from the origin, and then relay it towards the exit before a
1681 * response comes back. It is that response time from the exit side
1682 * that we want to measure, so that we can make use of it for synthetic
1683 * response delays.
1685 static void
1686 circpad_estimate_circ_rtt_on_received(circuit_t *circ,
1687 circpad_machine_runtime_t *mi)
1689 /* Origin circuits don't estimate RTT. They could do it easily enough,
1690 * but they have no reason to use it in any delay calculations. */
1691 if (CIRCUIT_IS_ORIGIN(circ) || mi->stop_rtt_update)
1692 return;
1694 /* If we already have a last received packet time, that means we
1695 * did not get a response before this packet. The RTT estimate
1696 * only makes sense if we do not have multiple packets on the
1697 * wire, so stop estimating if this is the second packet
1698 * back to back. However, for the first set of back-to-back
1699 * packets, we can wait until the very first response comes back
1700 * to us, to measure that RTT (for the response to optimistic
1701 * data, for example). Hence stop_rtt_update is only checked
1702 * in this received side function, and not in send side below.
1704 if (mi->last_received_time_usec) {
1705 /* We also allow multiple back-to-back packets if the circuit is not
1706 * opened, to handle var cells.
1707 * XXX: Will this work with out var cell plans? Maybe not,
1708 * since we're opened at the middle hop as soon as we process
1709 * one var extend2 :/ */
1710 if (circ->state == CIRCUIT_STATE_OPEN) {
1711 log_fn(LOG_INFO, LD_CIRC,
1712 "Stopping padding RTT estimation on circuit (%"PRIu64
1713 ", %d) after two back to back packets. Current RTT: %d",
1714 circ->n_chan ? circ->n_chan->global_identifier : 0,
1715 circ->n_circ_id, mi->rtt_estimate_usec);
1716 mi->stop_rtt_update = 1;
1718 if (!mi->rtt_estimate_usec) {
1719 static ratelim_t rtt_lim = RATELIM_INIT(600);
1720 log_fn_ratelim(&rtt_lim,LOG_NOTICE,LD_BUG,
1721 "Circuit got two cells back to back before estimating RTT.");
1724 } else {
1725 const circpad_state_t *state = circpad_machine_current_state(mi);
1726 if (BUG(!state)) {
1727 return;
1730 /* Since monotime is unpredictably expensive, only update this field
1731 * if rtt estimates are needed. Otherwise, stop the rtt update. */
1732 if (state->use_rtt_estimate) {
1733 mi->last_received_time_usec = monotime_absolute_usec();
1734 } else {
1735 /* Let's fast-path future decisions not to update rtt if the
1736 * feature is not in use. */
1737 mi->stop_rtt_update = 1;
1743 * Handles the "send" side of RTT calculation at middle nodes.
1745 * This function calculates the RTT from the middle to the end
1746 * of the circuit by subtracting the last received cell timestamp
1747 * from the current time. It allows back-to-back cells until
1748 * the circuit is opened, to allow for var cell handshakes.
1749 * XXX: Check our var cell plans to make sure this will work.
1751 static void
1752 circpad_estimate_circ_rtt_on_send(circuit_t *circ,
1753 circpad_machine_runtime_t *mi)
1755 /* Origin circuits don't estimate RTT. They could do it easily enough,
1756 * but they have no reason to use it in any delay calculations. */
1757 if (CIRCUIT_IS_ORIGIN(circ))
1758 return;
1760 /* If last_received_time_usec is non-zero, we are waiting for a response
1761 * from the exit side. Calculate the time delta and use it as RTT. */
1762 if (mi->last_received_time_usec) {
1763 circpad_time_t rtt_time = monotime_absolute_usec() -
1764 mi->last_received_time_usec;
1766 /* Reset the last RTT packet time, so we can tell if two cells
1767 * arrive back to back */
1768 mi->last_received_time_usec = 0;
1770 /* Use INT32_MAX to ensure the addition doesn't overflow */
1771 if (rtt_time >= INT32_MAX) {
1772 log_fn(LOG_WARN,LD_CIRC,
1773 "Circuit padding RTT estimate overflowed: %"PRIu64
1774 " vs %"PRIu64, monotime_absolute_usec(),
1775 mi->last_received_time_usec);
1776 return;
1779 /* If the old RTT estimate is lower than this one, use this one, because
1780 * the circuit is getting longer. If this estimate is somehow
1781 * faster than the previous, then maybe that was network jitter, or a
1782 * bad monotonic clock source (so our ratchet returned a zero delta).
1783 * In that case, average them. */
1784 if (mi->rtt_estimate_usec < (circpad_delay_t)rtt_time) {
1785 mi->rtt_estimate_usec = (circpad_delay_t)rtt_time;
1786 } else {
1787 mi->rtt_estimate_usec += (circpad_delay_t)rtt_time;
1788 mi->rtt_estimate_usec /= 2;
1790 } else if (circ->state == CIRCUIT_STATE_OPEN) {
1791 /* If last_received_time_usec is zero, then we have gotten two cells back
1792 * to back. Stop estimating RTT in this case. Note that we only
1793 * stop RTT update if the circuit is opened, to allow for RTT estimates
1794 * of var cells during circ setup. */
1795 if (!mi->rtt_estimate_usec && !mi->stop_rtt_update) {
1796 static ratelim_t rtt_lim = RATELIM_INIT(600);
1797 log_fn_ratelim(&rtt_lim,LOG_NOTICE,LD_BUG,
1798 "Circuit sent two cells back to back before estimating RTT.");
1800 mi->stop_rtt_update = 1;
1805 * A "non-padding" cell has been sent from this endpoint. React
1806 * according to any padding state machines on the circuit.
1808 * For origin circuits, this means we sent a cell into the network.
1809 * For middle relay circuits, this means we sent a cell towards the
1810 * origin.
1812 void
1813 circpad_cell_event_nonpadding_sent(circuit_t *on_circ)
1815 /* Update global cell count */
1816 circpad_global_nonpadding_sent++;
1818 /* If there are no machines then this loop should not iterate */
1819 FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i, on_circ) {
1820 /* First, update any timestamps */
1821 on_circ->padding_info[i]->last_cell_time_sec = approx_time();
1822 circpad_estimate_circ_rtt_on_send(on_circ, on_circ->padding_info[i]);
1824 /* Then, do accounting */
1825 circpad_machine_count_nonpadding_sent(on_circ->padding_info[i]);
1827 /* Check to see if we've run out of tokens for this state already,
1828 * and if not, check for other state transitions */
1829 if (check_machine_token_supply(on_circ->padding_info[i])
1830 == CIRCPAD_STATE_UNCHANGED) {
1831 /* If removing a token did not cause a transition, check if
1832 * non-padding sent event should */
1833 circpad_machine_spec_transition(on_circ->padding_info[i],
1834 CIRCPAD_EVENT_NONPADDING_SENT);
1836 } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END;
1839 /** Check if this cell or circuit are related to circuit padding and handle
1840 * them if so. Return 0 if the cell was handled in this subsystem and does
1841 * not need any other consideration, otherwise return 1.
1844 circpad_check_received_cell(cell_t *cell, circuit_t *circ,
1845 crypt_path_t *layer_hint,
1846 const relay_header_t *rh)
1848 /* First handle the padding commands, since we want to ignore any other
1849 * commands if this circuit is padding-specific. */
1850 switch (rh->command) {
1851 case RELAY_COMMAND_DROP:
1852 /* Already examined in circpad_deliver_recognized_relay_cell_events */
1853 return 0;
1854 case RELAY_COMMAND_PADDING_NEGOTIATE:
1855 circpad_handle_padding_negotiate(circ, cell);
1856 return 0;
1857 case RELAY_COMMAND_PADDING_NEGOTIATED:
1858 if (circpad_handle_padding_negotiated(circ, cell, layer_hint) == 0)
1859 circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ), rh->length);
1860 return 0;
1863 /* If this is a padding circuit we don't need to parse any other commands
1864 * than the padding ones. Just drop them to the floor.
1866 * Note: we deliberately do not call circuit_read_valid_data() here. The
1867 * vanguards addon (specifically the 'bandguards' component's dropped cell
1868 * detection) will thus close this circuit, as it would for any other
1869 * unexpected cell. However, default tor will *not* close the circuit.
1871 * This is intentional. We are not yet certain that is it optimal to keep
1872 * padding circuits open in cases like these, rather than closing them.
1873 * We suspect that continuing to pad is optimal against a passive classifier,
1874 * but as soon as the adversary is active (even as a client adversary) this
1875 * might change.
1877 * So as a way forward, we log the cell command and circuit number, to
1878 * help us enumerate the most common instances of this in testing with
1879 * vanguards, to see which are common enough to verify and handle
1880 * properly.
1881 * - Mike
1883 if (circ->purpose == CIRCUIT_PURPOSE_C_CIRCUIT_PADDING) {
1884 log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
1885 "Ignored cell (%d) that arrived in padding circuit "
1886 " %u.", rh->command, CIRCUIT_IS_ORIGIN(circ) ?
1887 TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0);
1888 return 0;
1891 return 1;
1895 * A "non-padding" cell has been received by this endpoint. React
1896 * according to any padding state machines on the circuit.
1898 * For origin circuits, this means we read a cell from the network.
1899 * For middle relay circuits, this means we received a cell from the
1900 * origin.
1902 void
1903 circpad_cell_event_nonpadding_received(circuit_t *on_circ)
1905 FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i, on_circ) {
1906 /* First, update any timestamps */
1907 on_circ->padding_info[i]->last_cell_time_sec = approx_time();
1908 circpad_estimate_circ_rtt_on_received(on_circ, on_circ->padding_info[i]);
1910 circpad_machine_spec_transition(on_circ->padding_info[i],
1911 CIRCPAD_EVENT_NONPADDING_RECV);
1912 } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END;
1916 * A padding cell has been sent from this endpoint. React
1917 * according to any padding state machines on the circuit.
1919 * For origin circuits, this means we sent a cell into the network.
1920 * For middle relay circuits, this means we sent a cell towards the
1921 * origin.
1923 void
1924 circpad_cell_event_padding_sent(circuit_t *on_circ)
1926 FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i, on_circ) {
1927 /* Check to see if we've run out of tokens for this state already,
1928 * and if not, check for other state transitions */
1929 if (check_machine_token_supply(on_circ->padding_info[i])
1930 == CIRCPAD_STATE_UNCHANGED) {
1931 /* If removing a token did not cause a transition, check if
1932 * non-padding sent event should */
1934 on_circ->padding_info[i]->last_cell_time_sec = approx_time();
1935 circpad_machine_spec_transition(on_circ->padding_info[i],
1936 CIRCPAD_EVENT_PADDING_SENT);
1938 } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END;
1942 * A padding cell has been received by this endpoint. React
1943 * according to any padding state machines on the circuit.
1945 * For origin circuits, this means we read a cell from the network.
1946 * For middle relay circuits, this means we received a cell from the
1947 * origin.
1949 void
1950 circpad_cell_event_padding_received(circuit_t *on_circ)
1952 /* identical to padding sent */
1953 FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i, on_circ) {
1954 on_circ->padding_info[i]->last_cell_time_sec = approx_time();
1955 circpad_machine_spec_transition(on_circ->padding_info[i],
1956 CIRCPAD_EVENT_PADDING_RECV);
1957 } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END;
1961 * An "infinite" delay has ben chosen from one of our histograms.
1963 * "Infinite" delays mean don't send padding -- but they can also
1964 * mean transition to another state depending on the state machine
1965 * definitions. Check the rules and react accordingly.
1967 * Return 1 if we decide to transition, 0 otherwise.
1969 circpad_decision_t
1970 circpad_internal_event_infinity(circpad_machine_runtime_t *mi)
1972 return circpad_machine_spec_transition(mi, CIRCPAD_EVENT_INFINITY);
1976 * All of the bins of our current state's histogram's are empty.
1978 * Check to see if this means transition to another state, and if
1979 * not, refill the tokens.
1981 * Return 1 if we decide to transition, 0 otherwise.
1983 circpad_decision_t
1984 circpad_internal_event_bins_empty(circpad_machine_runtime_t *mi)
1986 if (circpad_machine_spec_transition(mi, CIRCPAD_EVENT_BINS_EMPTY)
1987 == CIRCPAD_STATE_CHANGED) {
1988 return CIRCPAD_STATE_CHANGED;
1989 } else {
1990 /* If we dont transition, then we refill the tokens */
1991 circpad_machine_setup_tokens(mi);
1992 return CIRCPAD_STATE_UNCHANGED;
1997 * This state has used up its cell count. Emit the event and
1998 * see if we transition.
2000 * Return 1 if we decide to transition, 0 otherwise.
2002 circpad_decision_t
2003 circpad_internal_event_state_length_up(circpad_machine_runtime_t *mi)
2005 return circpad_machine_spec_transition(mi, CIRCPAD_EVENT_LENGTH_COUNT);
2009 * Returns true if the circuit matches the conditions.
2011 static inline bool
2012 circpad_machine_conditions_apply(origin_circuit_t *circ,
2013 const circpad_machine_spec_t *machine)
2015 /* If padding is disabled, no machines should match/apply. This has
2016 * the effect of shutting down all machines, and not adding any more. */
2017 if (circpad_padding_disabled || !get_options()->CircuitPadding)
2018 return 0;
2020 /* If the consensus or our torrc has selected reduced connection padding,
2021 * then only allow this machine if it is flagged as acceptable under
2022 * reduced padding conditions */
2023 if (circpad_padding_reduced || get_options()->ReducedCircuitPadding) {
2024 if (!machine->conditions.reduced_padding_ok)
2025 return 0;
2028 if (!(circpad_circ_purpose_to_mask(TO_CIRCUIT(circ)->purpose)
2029 & machine->conditions.apply_purpose_mask))
2030 return 0;
2032 if (machine->conditions.requires_vanguards) {
2033 const or_options_t *options = get_options();
2035 /* Pinned middles are effectively vanguards */
2036 if (!(options->HSLayer2Nodes || options->HSLayer3Nodes))
2037 return 0;
2040 /* We check for any bits set in the circuit state mask so that machines
2041 * can say any of the following through their state bitmask:
2042 * "I want to apply to circuits with either streams or no streams"; OR
2043 * "I only want to apply to circuits with streams"; OR
2044 * "I only want to apply to circuits without streams". */
2045 if (!(circpad_circuit_state(circ) & machine->conditions.apply_state_mask))
2046 return 0;
2048 if (circuit_get_cpath_opened_len(circ) < machine->conditions.min_hops)
2049 return 0;
2051 return 1;
2055 * Check to see if any of the keep conditions still apply to this circuit.
2057 * These conditions keep the machines active if they match, but do not
2058 * cause new machines to start up.
2060 static inline bool
2061 circpad_machine_conditions_keep(origin_circuit_t *circ,
2062 const circpad_machine_spec_t *machine)
2064 if ((circpad_circ_purpose_to_mask(TO_CIRCUIT(circ)->purpose)
2065 & machine->conditions.keep_purpose_mask))
2066 return 1;
2068 if ((circpad_circuit_state(circ) & machine->conditions.keep_state_mask))
2069 return 1;
2071 return 0;
2075 * Returns a minimized representation of the circuit state.
2077 * The padding code only cares if the circuit is building,
2078 * opened, used for streams, and/or still has relay early cells.
2079 * This returns a bitmask of all state properties that apply to
2080 * this circuit.
2082 static inline
2083 circpad_circuit_state_t
2084 circpad_circuit_state(origin_circuit_t *circ)
2086 circpad_circuit_state_t retmask = 0;
2088 if (circ->p_streams)
2089 retmask |= CIRCPAD_CIRC_STREAMS;
2090 else
2091 retmask |= CIRCPAD_CIRC_NO_STREAMS;
2093 /* We use has_opened to prevent cannibialized circs from flapping. */
2094 if (circ->has_opened)
2095 retmask |= CIRCPAD_CIRC_OPENED;
2096 else
2097 retmask |= CIRCPAD_CIRC_BUILDING;
2099 if (circ->remaining_relay_early_cells > 0)
2100 retmask |= CIRCPAD_CIRC_HAS_RELAY_EARLY;
2101 else
2102 retmask |= CIRCPAD_CIRC_HAS_NO_RELAY_EARLY;
2104 return retmask;
2108 * Convert a normal circuit purpose into a bitmask that we can
2109 * use for determining matching circuits.
2111 circpad_purpose_mask_t
2112 circpad_circ_purpose_to_mask(uint8_t circ_purpose)
2114 /* Treat OR circ purposes as ignored. They should not be passed here*/
2115 if (BUG(circ_purpose <= CIRCUIT_PURPOSE_OR_MAX_)) {
2116 return 0;
2119 /* Treat new client circuit purposes as "OMG ITS EVERYTHING".
2120 * This also should not happen */
2121 if (BUG(circ_purpose - CIRCUIT_PURPOSE_OR_MAX_ - 1 > 32)) {
2122 return CIRCPAD_PURPOSE_ALL;
2125 /* Convert the purpose to a bit position */
2126 return 1 << (circ_purpose - CIRCUIT_PURPOSE_OR_MAX_ - 1);
2130 * Shut down any machines whose conditions no longer match
2131 * the current circuit.
2133 static void
2134 circpad_shutdown_old_machines(origin_circuit_t *on_circ)
2136 circuit_t *circ = TO_CIRCUIT(on_circ);
2138 FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i, circ) {
2139 /* We shut down a machine if neither the apply conditions
2140 * nor the keep conditions match. If either set of conditions match,
2141 * keep it around. */
2142 if (!circpad_machine_conditions_apply(on_circ,
2143 circ->padding_machine[i]) &&
2144 !circpad_machine_conditions_keep(on_circ,
2145 circ->padding_machine[i])) {
2146 uint32_t machine_ctr = circ->padding_info[i]->machine_ctr;
2147 // Clear machineinfo (frees timers)
2148 circpad_circuit_machineinfo_free_idx(circ, i);
2149 // Send padding negotiate stop
2150 circpad_negotiate_padding(on_circ,
2151 circ->padding_machine[i]->machine_num,
2152 circ->padding_machine[i]->target_hopnum,
2153 CIRCPAD_COMMAND_STOP,
2154 machine_ctr);
2156 } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END;
2160 * Negotiate new machines that would apply to this circuit, given the machines
2161 * inside <b>machines_sl</b>.
2163 * This function checks to see if we have any free machine indexes,
2164 * and for each free machine index, it initializes the most recently
2165 * added origin-side padding machine that matches the target machine
2166 * index and circuit conditions, and negotiates it with the appropriate
2167 * middle relay.
2169 STATIC void
2170 circpad_add_matching_machines(origin_circuit_t *on_circ,
2171 smartlist_t *machines_sl)
2173 circuit_t *circ = TO_CIRCUIT(on_circ);
2175 #ifdef TOR_UNIT_TESTS
2176 /* Tests don't have to init our padding machines */
2177 if (!machines_sl)
2178 return;
2179 #endif
2181 /* If padding negotiation failed before, do not try again */
2182 if (on_circ->padding_negotiation_failed)
2183 return;
2185 FOR_EACH_CIRCUIT_MACHINE_BEGIN(i) {
2186 /* If there is a padding machine info, this index is occupied.
2187 * No need to check conditions for this index. */
2188 if (circ->padding_info[i])
2189 continue;
2191 /* We have a free machine index. Check the origin padding
2192 * machines in reverse order, so that more recently added
2193 * machines take priority over older ones. */
2194 SMARTLIST_FOREACH_REVERSE_BEGIN(machines_sl,
2195 circpad_machine_spec_t *,
2196 machine) {
2197 /* Machine definitions have a specific target machine index.
2198 * This is so event ordering is deterministic with respect
2199 * to which machine gets events first when there are two
2200 * machines installed on a circuit. Make sure we only
2201 * add this machine if its target machine index is free. */
2202 if (machine->machine_index == i &&
2203 circpad_machine_conditions_apply(on_circ, machine)) {
2205 // We can only replace this machine if the target hopnum
2206 // is the same, otherwise we'll get invalid data
2207 if (circ->padding_machine[i]) {
2208 if (circ->padding_machine[i]->target_hopnum !=
2209 machine->target_hopnum)
2210 continue;
2211 /* Replace it. (Don't free - is global). */
2212 circ->padding_machine[i] = NULL;
2215 /* Set up the machine immediately so that the slot is occupied.
2216 * We will tear it down on error return, or if there is an error
2217 * response from the relay. */
2218 circpad_setup_machine_on_circ(circ, machine);
2219 if (circpad_negotiate_padding(on_circ, machine->machine_num,
2220 machine->target_hopnum,
2221 CIRCPAD_COMMAND_START,
2222 circ->padding_machine_ctr) < 0) {
2223 log_info(LD_CIRC,
2224 "Padding not negotiated. Cleaning machine from circuit %u",
2225 CIRCUIT_IS_ORIGIN(circ) ?
2226 TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0);
2227 circpad_circuit_machineinfo_free_idx(circ, i);
2228 circ->padding_machine[i] = NULL;
2229 on_circ->padding_negotiation_failed = 1;
2230 } else {
2231 /* Success. Don't try any more machines on this index */
2232 break;
2235 } SMARTLIST_FOREACH_END(machine);
2236 } FOR_EACH_CIRCUIT_MACHINE_END;
2240 * Event that tells us we added a hop to an origin circuit.
2242 * This event is used to decide if we should create a padding machine
2243 * on a circuit.
2245 void
2246 circpad_machine_event_circ_added_hop(origin_circuit_t *on_circ)
2248 /* Since our padding conditions do not specify a max_hops,
2249 * all we can do is add machines here */
2250 circpad_add_matching_machines(on_circ, origin_padding_machines);
2254 * Event that tells us that an origin circuit is now built.
2256 * Shut down any machines that only applied to un-built circuits.
2257 * Activate any new ones.
2259 void
2260 circpad_machine_event_circ_built(origin_circuit_t *circ)
2262 circpad_shutdown_old_machines(circ);
2263 circpad_add_matching_machines(circ, origin_padding_machines);
2267 * Circpad purpose changed event.
2269 * Shut down any machines that don't apply to our circ purpose.
2270 * Activate any new ones that do.
2272 void
2273 circpad_machine_event_circ_purpose_changed(origin_circuit_t *circ)
2275 circpad_shutdown_old_machines(circ);
2276 circpad_add_matching_machines(circ, origin_padding_machines);
2280 * Event that tells us that an origin circuit is out of RELAY_EARLY
2281 * cells.
2283 * Shut down any machines that only applied to RELAY_EARLY circuits.
2284 * Activate any new ones.
2286 void
2287 circpad_machine_event_circ_has_no_relay_early(origin_circuit_t *circ)
2289 circpad_shutdown_old_machines(circ);
2290 circpad_add_matching_machines(circ, origin_padding_machines);
2294 * Streams attached event.
2296 * Called from link_apconn_to_circ() and handle_hs_exit_conn()
2298 * Shut down any machines that only applied to machines without
2299 * streams. Activate any new ones.
2301 void
2302 circpad_machine_event_circ_has_streams(origin_circuit_t *circ)
2304 circpad_shutdown_old_machines(circ);
2305 circpad_add_matching_machines(circ, origin_padding_machines);
2309 * Streams detached event.
2311 * Called from circuit_detach_stream()
2313 * Shut down any machines that only applied to machines without
2314 * streams. Activate any new ones.
2316 void
2317 circpad_machine_event_circ_has_no_streams(origin_circuit_t *circ)
2319 circpad_shutdown_old_machines(circ);
2320 circpad_add_matching_machines(circ, origin_padding_machines);
2324 * Verify that padding is coming from the expected hop.
2326 * Returns true if from_hop matches the target hop from
2327 * one of our padding machines.
2329 * Returns false if we're not an origin circuit, or if from_hop
2330 * does not match one of the padding machines.
2332 bool
2333 circpad_padding_is_from_expected_hop(circuit_t *circ,
2334 crypt_path_t *from_hop)
2336 crypt_path_t *target_hop = NULL;
2337 if (!CIRCUIT_IS_ORIGIN(circ))
2338 return 0;
2340 FOR_EACH_CIRCUIT_MACHINE_BEGIN(i) {
2341 /* We have to check padding_machine and not padding_info/active
2342 * machines here because padding may arrive after we shut down a
2343 * machine. The info is gone, but the padding_machine waits
2344 * for the padding_negotiated response to come back. */
2345 if (!circ->padding_machine[i])
2346 continue;
2348 target_hop = circuit_get_cpath_hop(TO_ORIGIN_CIRCUIT(circ),
2349 circ->padding_machine[i]->target_hopnum);
2351 if (target_hop == from_hop)
2352 return 1;
2353 } FOR_EACH_CIRCUIT_MACHINE_END;
2355 return 0;
2359 * Deliver circpad events for an "unrecognized cell".
2361 * Unrecognized cells are sent to relays and are forwarded
2362 * onto the next hop of their circuits. Unrecognized cells
2363 * are by definition not padding. We need to tell relay-side
2364 * state machines that a non-padding cell was sent or received,
2365 * depending on the direction, so they can update their histograms
2366 * and decide to pad or not.
2368 void
2369 circpad_deliver_unrecognized_cell_events(circuit_t *circ,
2370 cell_direction_t dir)
2372 // We should never see unrecognized cells at origin.
2373 // Our caller emits a warn when this happens.
2374 if (CIRCUIT_IS_ORIGIN(circ)) {
2375 return;
2378 if (dir == CELL_DIRECTION_OUT) {
2379 /* When direction is out (away from origin), then we received non-padding
2380 cell coming from the origin to us. */
2381 circpad_cell_event_nonpadding_received(circ);
2382 } else if (dir == CELL_DIRECTION_IN) {
2383 /* It's in and not origin, so the cell is going away from us.
2384 * So we are relaying a non-padding cell towards the origin. */
2385 circpad_cell_event_nonpadding_sent(circ);
2390 * Deliver circpad events for "recognized" relay cells.
2392 * Recognized cells are destined for this hop, either client or middle.
2393 * Check if this is a padding cell or not, and send the appropriate
2394 * received event.
2396 void
2397 circpad_deliver_recognized_relay_cell_events(circuit_t *circ,
2398 uint8_t relay_command,
2399 crypt_path_t *layer_hint)
2401 if (relay_command == RELAY_COMMAND_DROP) {
2402 rep_hist_padding_count_read(PADDING_TYPE_DROP);
2404 if (CIRCUIT_IS_ORIGIN(circ)) {
2405 if (circpad_padding_is_from_expected_hop(circ, layer_hint)) {
2406 circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ), 0);
2407 } else {
2408 /* This is unexpected padding. Ignore it for now. */
2409 return;
2413 /* The cell should be recognized by now, which means that we are on the
2414 destination, which means that we received a padding cell. We might be
2415 the client or the Middle node, still, because leaky-pipe. */
2416 circpad_cell_event_padding_received(circ);
2417 log_fn(LOG_INFO, LD_CIRC, "Got padding cell on %s circuit %u.",
2418 CIRCUIT_IS_ORIGIN(circ) ? "origin" : "non-origin",
2419 CIRCUIT_IS_ORIGIN(circ) ?
2420 TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0);
2421 } else {
2422 /* We received a non-padding cell on the edge */
2423 circpad_cell_event_nonpadding_received(circ);
2428 * Deliver circpad events for relay cells sent from us.
2430 * If this is a padding cell, update our padding stats
2431 * and deliver the event. Otherwise just deliver the event.
2433 void
2434 circpad_deliver_sent_relay_cell_events(circuit_t *circ,
2435 uint8_t relay_command)
2437 /* RELAY_COMMAND_DROP is the multi-hop (aka circuit-level) padding cell in
2438 * tor. (CELL_PADDING is a channel-level padding cell, which is not relayed
2439 * or processed here).
2441 * We do generate events for PADDING_NEGOTIATE and PADDING_NEGOTIATED cells.
2443 if (relay_command == RELAY_COMMAND_DROP) {
2444 /* Optimization: The event for RELAY_COMMAND_DROP is sent directly
2445 * from circpad_send_padding_cell_for_callback(). This is to avoid
2446 * putting a cell_t and a relay_header_t on the stack repeatedly
2447 * if we decide to send a long train of padding cells back-to-back
2448 * with 0 delay. So we do nothing here. */
2449 return;
2450 } else {
2451 /* This is a non-padding cell sent from the client or from
2452 * this node. */
2453 circpad_cell_event_nonpadding_sent(circ);
2458 * Initialize the states array for a circpad machine.
2460 void
2461 circpad_machine_states_init(circpad_machine_spec_t *machine,
2462 circpad_statenum_t num_states)
2464 if (BUG(num_states > CIRCPAD_MAX_MACHINE_STATES)) {
2465 num_states = CIRCPAD_MAX_MACHINE_STATES;
2468 machine->num_states = num_states;
2469 machine->states = tor_malloc_zero(sizeof(circpad_state_t)*num_states);
2471 /* Initialize the default next state for all events to
2472 * "ignore" -- if events aren't specified, they are ignored. */
2473 for (circpad_statenum_t s = 0; s < num_states; s++) {
2474 for (int e = 0; e < CIRCPAD_NUM_EVENTS; e++) {
2475 machine->states[s].next_state[e] = CIRCPAD_STATE_IGNORE;
2480 static void
2481 circpad_setup_machine_on_circ(circuit_t *on_circ,
2482 const circpad_machine_spec_t *machine)
2484 if (CIRCUIT_IS_ORIGIN(on_circ) && !machine->is_origin_side) {
2485 log_fn(LOG_WARN, LD_BUG,
2486 "Can't set up non-origin machine on origin circuit!");
2487 return;
2490 if (!CIRCUIT_IS_ORIGIN(on_circ) && machine->is_origin_side) {
2491 log_fn(LOG_WARN, LD_BUG,
2492 "Can't set up origin machine on non-origin circuit!");
2493 return;
2496 IF_BUG_ONCE(on_circ->padding_machine[machine->machine_index] != NULL) {
2497 return;
2499 IF_BUG_ONCE(on_circ->padding_info[machine->machine_index] != NULL) {
2500 return;
2503 /* Log message */
2504 if (CIRCUIT_IS_ORIGIN(on_circ)) {
2505 log_info(LD_CIRC, "Registering machine %s to origin circ %u (%d)",
2506 machine->name,
2507 TO_ORIGIN_CIRCUIT(on_circ)->global_identifier, on_circ->purpose);
2508 } else {
2509 log_info(LD_CIRC, "Registering machine %s to non-origin circ (%d)",
2510 machine->name, on_circ->purpose);
2513 /* Padding machine ctr starts at 1, so we increment this ctr first.
2514 * (machine ctr of 0 means "any machine").
2516 * See https://bugs.tororject.org/30992. */
2517 on_circ->padding_machine_ctr++;
2519 /* uint32 wraparound check: 0 is special, just wrap to 1 */
2520 if (on_circ->padding_machine_ctr == 0) {
2521 on_circ->padding_machine_ctr = 1;
2524 on_circ->padding_info[machine->machine_index] =
2525 circpad_circuit_machineinfo_new(on_circ, machine->machine_index);
2526 on_circ->padding_machine[machine->machine_index] = machine;
2529 /** Validate a single state of a padding machine */
2530 static bool
2531 padding_machine_state_is_valid(const circpad_state_t *state)
2533 int b;
2534 uint32_t tokens_count = 0;
2535 circpad_delay_t prev_bin_edge = 0;
2537 /* We only validate histograms */
2538 if (!state->histogram_len) {
2539 return true;
2542 /* We need at least two bins in a histogram */
2543 if (state->histogram_len < 2) {
2544 log_warn(LD_CIRC, "You can't have a histogram with less than 2 bins");
2545 return false;
2548 /* For each machine state, if it's a histogram, make sure all the
2549 * histogram edges are well defined (i.e. are strictly monotonic). */
2550 for (b = 0 ; b < state->histogram_len ; b++) {
2551 /* Check that histogram edges are strictly increasing. Ignore the first
2552 * edge since it can be zero. */
2553 if (prev_bin_edge >= state->histogram_edges[b] && b > 0) {
2554 log_warn(LD_CIRC, "Histogram edges are not increasing [%u/%u]",
2555 prev_bin_edge, state->histogram_edges[b]);
2556 return false;
2559 prev_bin_edge = state->histogram_edges[b];
2561 /* Also count the number of tokens as we go through the histogram states */
2562 tokens_count += state->histogram[b];
2564 /* Verify that the total number of tokens is correct */
2565 if (tokens_count != state->histogram_total_tokens) {
2566 log_warn(LD_CIRC, "Histogram token count is wrong [%u/%u]",
2567 tokens_count, state->histogram_total_tokens);
2568 return false;
2571 return true;
2574 /** Basic validation of padding machine */
2575 static bool
2576 padding_machine_is_valid(const circpad_machine_spec_t *machine)
2578 int i;
2580 /* Validate the histograms of the padding machine */
2581 for (i = 0 ; i < machine->num_states ; i++) {
2582 if (!padding_machine_state_is_valid(&machine->states[i])) {
2583 return false;
2587 return true;
2590 /* Validate and register <b>machine</b> into <b>machine_list</b>. If
2591 * <b>machine_list</b> is NULL, then just validate. */
2592 void
2593 circpad_register_padding_machine(circpad_machine_spec_t *machine,
2594 smartlist_t *machine_list)
2596 if (!padding_machine_is_valid(machine)) {
2597 log_warn(LD_CIRC, "Machine #%u is invalid. Ignoring.",
2598 machine->machine_num);
2599 return;
2602 if (machine_list) {
2603 smartlist_add(machine_list, machine);
2607 #ifdef TOR_UNIT_TESTS
2608 /* These padding machines are only used for tests pending #28634. */
2609 static void
2610 circpad_circ_client_machine_init(void)
2612 circpad_machine_spec_t *circ_client_machine
2613 = tor_malloc_zero(sizeof(circpad_machine_spec_t));
2615 circ_client_machine->conditions.min_hops = 2;
2616 circ_client_machine->conditions.apply_state_mask =
2617 CIRCPAD_CIRC_BUILDING|CIRCPAD_CIRC_OPENED|CIRCPAD_CIRC_HAS_RELAY_EARLY;
2618 circ_client_machine->conditions.apply_purpose_mask = CIRCPAD_PURPOSE_ALL;
2619 circ_client_machine->conditions.reduced_padding_ok = 1;
2621 circ_client_machine->target_hopnum = 2;
2622 circ_client_machine->is_origin_side = 1;
2624 /* Start, gap, burst */
2625 circpad_machine_states_init(circ_client_machine, 3);
2627 circ_client_machine->states[CIRCPAD_STATE_START].
2628 next_state[CIRCPAD_EVENT_NONPADDING_RECV] = CIRCPAD_STATE_BURST;
2630 circ_client_machine->states[CIRCPAD_STATE_BURST].
2631 next_state[CIRCPAD_EVENT_NONPADDING_RECV] = CIRCPAD_STATE_BURST;
2632 circ_client_machine->states[CIRCPAD_STATE_BURST].
2633 next_state[CIRCPAD_EVENT_PADDING_RECV] = CIRCPAD_STATE_BURST;
2635 /* If we are in burst state, and we send a non-padding cell, then we cancel
2636 the timer for the next padding cell:
2637 We dont want to send fake extends when actual extends are going on */
2638 circ_client_machine->states[CIRCPAD_STATE_BURST].
2639 next_state[CIRCPAD_EVENT_NONPADDING_SENT] = CIRCPAD_STATE_CANCEL;
2641 circ_client_machine->states[CIRCPAD_STATE_BURST].
2642 next_state[CIRCPAD_EVENT_BINS_EMPTY] = CIRCPAD_STATE_END;
2644 circ_client_machine->states[CIRCPAD_STATE_BURST].token_removal =
2645 CIRCPAD_TOKEN_REMOVAL_CLOSEST;
2647 circ_client_machine->states[CIRCPAD_STATE_BURST].histogram_len = 2;
2648 circ_client_machine->states[CIRCPAD_STATE_BURST].histogram_edges[0]= 500;
2649 circ_client_machine->states[CIRCPAD_STATE_BURST].histogram_edges[1]= 1000000;
2651 /* We have 5 tokens in the histogram, which means that all circuits will look
2652 * like they have 7 hops (since we start this machine after the second hop,
2653 * and tokens are decremented for any valid hops, and fake extends are
2654 * used after that -- 2+5==7). */
2655 circ_client_machine->states[CIRCPAD_STATE_BURST].histogram[0] = 5;
2657 circ_client_machine->states[CIRCPAD_STATE_BURST].histogram_total_tokens = 5;
2659 circ_client_machine->machine_num = smartlist_len(origin_padding_machines);
2660 circpad_register_padding_machine(circ_client_machine,
2661 origin_padding_machines);
2664 static void
2665 circpad_circ_responder_machine_init(void)
2667 circpad_machine_spec_t *circ_responder_machine
2668 = tor_malloc_zero(sizeof(circpad_machine_spec_t));
2670 /* Shut down the machine after we've sent enough packets */
2671 circ_responder_machine->should_negotiate_end = 1;
2673 /* The relay-side doesn't care what hopnum it is, but for consistency,
2674 * let's match the client */
2675 circ_responder_machine->target_hopnum = 2;
2676 circ_responder_machine->is_origin_side = 0;
2678 /* Start, gap, burst */
2679 circpad_machine_states_init(circ_responder_machine, 3);
2681 /* This is the settings of the state machine. In the future we are gonna
2682 serialize this into the consensus or the torrc */
2684 /* We transition to the burst state on padding receive and on non-padding
2685 * receive */
2686 circ_responder_machine->states[CIRCPAD_STATE_START].
2687 next_state[CIRCPAD_EVENT_PADDING_RECV] = CIRCPAD_STATE_BURST;
2688 circ_responder_machine->states[CIRCPAD_STATE_START].
2689 next_state[CIRCPAD_EVENT_NONPADDING_RECV] = CIRCPAD_STATE_BURST;
2691 /* Inside the burst state we _stay_ in the burst state when a non-padding
2692 * is sent */
2693 circ_responder_machine->states[CIRCPAD_STATE_BURST].
2694 next_state[CIRCPAD_EVENT_NONPADDING_SENT] = CIRCPAD_STATE_BURST;
2696 /* Inside the burst state we transition to the gap state when we receive a
2697 * padding cell */
2698 circ_responder_machine->states[CIRCPAD_STATE_BURST].
2699 next_state[CIRCPAD_EVENT_PADDING_RECV] = CIRCPAD_STATE_GAP;
2701 /* These describe the padding charasteristics when in burst state */
2703 /* use_rtt_estimate tries to estimate how long padding cells take to go from
2704 C->M, and uses that as what as the base of the histogram */
2705 circ_responder_machine->states[CIRCPAD_STATE_BURST].use_rtt_estimate = 1;
2706 /* The histogram is 2 bins: an empty one, and infinity */
2707 circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram_len = 2;
2708 circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram_edges[0]= 500;
2709 circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram_edges[1] =
2710 1000000;
2711 /* During burst state we wait forever for padding to arrive.
2713 We are waiting for a padding cell from the client to come in, so that we
2714 respond, and we imitate how extend looks like */
2715 circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram[0] = 0;
2716 // Only infinity bin:
2717 circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram[1] = 1;
2718 circ_responder_machine->states[CIRCPAD_STATE_BURST].
2719 histogram_total_tokens = 1;
2721 /* From the gap state, we _stay_ in the gap state, when we receive padding
2722 * or non padding */
2723 circ_responder_machine->states[CIRCPAD_STATE_GAP].
2724 next_state[CIRCPAD_EVENT_PADDING_RECV] = CIRCPAD_STATE_GAP;
2725 circ_responder_machine->states[CIRCPAD_STATE_GAP].
2726 next_state[CIRCPAD_EVENT_NONPADDING_RECV] = CIRCPAD_STATE_GAP;
2728 /* And from the gap state, we go to the end, when the bins are empty or a
2729 * non-padding cell is sent */
2730 circ_responder_machine->states[CIRCPAD_STATE_GAP].
2731 next_state[CIRCPAD_EVENT_BINS_EMPTY] = CIRCPAD_STATE_END;
2732 circ_responder_machine->states[CIRCPAD_STATE_GAP].
2733 next_state[CIRCPAD_EVENT_NONPADDING_SENT] = CIRCPAD_STATE_END;
2735 // FIXME: Tune this histogram
2737 /* The gap state is the delay you wait after you receive a padding cell
2738 before you send a padding response */
2739 circ_responder_machine->states[CIRCPAD_STATE_GAP].use_rtt_estimate = 1;
2740 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_len = 6;
2741 /* Specify histogram bins */
2742 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[0]= 500;
2743 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[1]= 1000;
2744 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[2]= 2000;
2745 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[3]= 4000;
2746 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[4]= 8000;
2747 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[5]= 16000;
2748 /* Specify histogram tokens */
2749 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[0] = 0;
2750 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[1] = 1;
2751 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[2] = 2;
2752 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[3] = 2;
2753 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[4] = 1;
2754 /* Total number of tokens */
2755 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_total_tokens = 6;
2757 circ_responder_machine->states[CIRCPAD_STATE_GAP].token_removal =
2758 CIRCPAD_TOKEN_REMOVAL_CLOSEST_USEC;
2760 circ_responder_machine->machine_num = smartlist_len(relay_padding_machines);
2761 circpad_register_padding_machine(circ_responder_machine,
2762 relay_padding_machines);
2764 #endif /* defined(TOR_UNIT_TESTS) */
2767 * Initialize all of our padding machines.
2769 * This is called at startup. It sets up some global machines, and then
2770 * loads some from torrc, and from the tor consensus.
2772 void
2773 circpad_machines_init(void)
2775 tor_assert_nonfatal(origin_padding_machines == NULL);
2776 tor_assert_nonfatal(relay_padding_machines == NULL);
2778 origin_padding_machines = smartlist_new();
2779 relay_padding_machines = smartlist_new();
2781 /* Register machines for hiding client-side intro circuits */
2782 circpad_machine_client_hide_intro_circuits(origin_padding_machines);
2783 circpad_machine_relay_hide_intro_circuits(relay_padding_machines);
2785 /* Register machines for hiding client-side rendezvous circuits */
2786 circpad_machine_client_hide_rend_circuits(origin_padding_machines);
2787 circpad_machine_relay_hide_rend_circuits(relay_padding_machines);
2789 // TODO: Parse machines from consensus and torrc
2790 #ifdef TOR_UNIT_TESTS
2791 circpad_circ_client_machine_init();
2792 circpad_circ_responder_machine_init();
2793 #endif
2797 * Free our padding machines
2799 void
2800 circpad_machines_free(void)
2802 if (origin_padding_machines) {
2803 SMARTLIST_FOREACH(origin_padding_machines,
2804 circpad_machine_spec_t *,
2805 m, tor_free(m->states); tor_free(m));
2806 smartlist_free(origin_padding_machines);
2809 if (relay_padding_machines) {
2810 SMARTLIST_FOREACH(relay_padding_machines,
2811 circpad_machine_spec_t *,
2812 m, tor_free(m->states); tor_free(m));
2813 smartlist_free(relay_padding_machines);
2818 * Check the Protover info to see if a node supports padding.
2820 static bool
2821 circpad_node_supports_padding(const node_t *node)
2823 if (node->rs) {
2824 log_fn(LOG_INFO, LD_CIRC, "Checking padding: %s",
2825 node->rs->pv.supports_hs_setup_padding ?
2826 "supported" : "unsupported");
2827 return node->rs->pv.supports_hs_setup_padding;
2830 log_fn(LOG_INFO, LD_CIRC, "Empty routerstatus in padding check");
2831 return 0;
2835 * Get a node_t for the nth hop in our circuit, starting from 1.
2837 * Returns node_t from the consensus for that hop, if it is opened.
2838 * Otherwise returns NULL.
2840 MOCK_IMPL(STATIC const node_t *,
2841 circuit_get_nth_node,(origin_circuit_t *circ, int hop))
2843 crypt_path_t *iter = circuit_get_cpath_hop(circ, hop);
2845 if (!iter || iter->state != CPATH_STATE_OPEN)
2846 return NULL;
2848 return node_get_by_id(iter->extend_info->identity_digest);
2852 * Return true if a particular circuit supports padding
2853 * at the desired hop.
2855 static bool
2856 circpad_circuit_supports_padding(origin_circuit_t *circ,
2857 int target_hopnum)
2859 const node_t *hop;
2861 if (!(hop = circuit_get_nth_node(circ, target_hopnum))) {
2862 return 0;
2865 return circpad_node_supports_padding(hop);
2869 * Try to negotiate padding.
2871 * Returns -1 on error, 0 on success.
2873 signed_error_t
2874 circpad_negotiate_padding(origin_circuit_t *circ,
2875 circpad_machine_num_t machine,
2876 uint8_t target_hopnum,
2877 uint8_t command,
2878 uint32_t machine_ctr)
2880 circpad_negotiate_t type;
2881 cell_t cell;
2882 ssize_t len;
2884 /* Check that the target hop lists support for padding in
2885 * its ProtoVer fields */
2886 if (!circpad_circuit_supports_padding(circ, target_hopnum)) {
2887 return -1;
2890 memset(&cell, 0, sizeof(cell_t));
2891 memset(&type, 0, sizeof(circpad_negotiate_t));
2892 // This gets reset to RELAY_EARLY appropriately by
2893 // relay_send_command_from_edge_. At least, it looks that way.
2894 // QQQ-MP-AP: Verify that.
2895 cell.command = CELL_RELAY;
2897 circpad_negotiate_set_command(&type, command);
2898 circpad_negotiate_set_version(&type, 0);
2899 circpad_negotiate_set_machine_type(&type, machine);
2900 circpad_negotiate_set_machine_ctr(&type, machine_ctr);
2902 if ((len = circpad_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE,
2903 &type)) < 0)
2904 return -1;
2906 log_fn(LOG_INFO,LD_CIRC,
2907 "Negotiating padding on circuit %u (%d), command %d, for ctr %u",
2908 circ->global_identifier, TO_CIRCUIT(circ)->purpose, command,
2909 machine_ctr);
2911 return circpad_send_command_to_hop(circ, target_hopnum,
2912 RELAY_COMMAND_PADDING_NEGOTIATE,
2913 cell.payload, len);
2917 * Try to negotiate padding.
2919 * Returns 1 if successful (or already set up), 0 otherwise.
2921 bool
2922 circpad_padding_negotiated(circuit_t *circ,
2923 circpad_machine_num_t machine,
2924 uint8_t command,
2925 uint8_t response,
2926 uint32_t machine_ctr)
2928 circpad_negotiated_t type;
2929 cell_t cell;
2930 ssize_t len;
2932 memset(&cell, 0, sizeof(cell_t));
2933 memset(&type, 0, sizeof(circpad_negotiated_t));
2934 // This gets reset to RELAY_EARLY appropriately by
2935 // relay_send_command_from_edge_. At least, it looks that way.
2936 // QQQ-MP-AP: Verify that.
2937 cell.command = CELL_RELAY;
2939 circpad_negotiated_set_command(&type, command);
2940 circpad_negotiated_set_response(&type, response);
2941 circpad_negotiated_set_version(&type, 0);
2942 circpad_negotiated_set_machine_type(&type, machine);
2943 circpad_negotiated_set_machine_ctr(&type, machine_ctr);
2945 if ((len = circpad_negotiated_encode(cell.payload, CELL_PAYLOAD_SIZE,
2946 &type)) < 0)
2947 return 0;
2949 /* Use relay_send because we're from the middle to the origin. We don't
2950 * need to specify a target hop or layer_hint. */
2951 return relay_send_command_from_edge(0, circ,
2952 RELAY_COMMAND_PADDING_NEGOTIATED,
2953 (void*)cell.payload,
2954 (size_t)len, NULL) == 0;
2958 * Parse and react to a padding_negotiate cell.
2960 * This is called at the middle node upon receipt of the client's choice of
2961 * state machine, so that it can use the requested state machine index, if
2962 * it is available.
2964 * Returns -1 on error, 0 on success.
2966 signed_error_t
2967 circpad_handle_padding_negotiate(circuit_t *circ, cell_t *cell)
2969 int retval = 0;
2970 circpad_negotiate_t *negotiate;
2972 if (CIRCUIT_IS_ORIGIN(circ)) {
2973 log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
2974 "Padding negotiate cell unsupported at origin (circuit %u)",
2975 TO_ORIGIN_CIRCUIT(circ)->global_identifier);
2976 return -1;
2979 if (circpad_negotiate_parse(&negotiate, cell->payload+RELAY_HEADER_SIZE,
2980 CELL_PAYLOAD_SIZE-RELAY_HEADER_SIZE) < 0) {
2981 log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
2982 "Received malformed PADDING_NEGOTIATE cell; dropping.");
2983 return -1;
2986 if (negotiate->command == CIRCPAD_COMMAND_STOP) {
2987 /* Free the machine corresponding to this machine type */
2988 if (free_circ_machineinfos_with_machine_num(circ,
2989 negotiate->machine_type,
2990 negotiate->machine_ctr)) {
2991 log_info(LD_CIRC, "Received STOP command for machine %u, ctr %u",
2992 negotiate->machine_type, negotiate->machine_ctr);
2993 goto done;
2995 if (negotiate->machine_ctr <= circ->padding_machine_ctr) {
2996 log_info(LD_CIRC, "Received STOP command for old machine %u, ctr %u",
2997 negotiate->machine_type, negotiate->machine_ctr);
2998 goto done;
3000 } else {
3001 log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
3002 "Received circuit padding stop command for unknown machine.");
3003 goto err;
3005 } else if (negotiate->command == CIRCPAD_COMMAND_START) {
3006 SMARTLIST_FOREACH_BEGIN(relay_padding_machines,
3007 const circpad_machine_spec_t *, m) {
3008 if (m->machine_num == negotiate->machine_type) {
3009 circpad_setup_machine_on_circ(circ, m);
3010 if (negotiate->machine_ctr &&
3011 circ->padding_machine_ctr != negotiate->machine_ctr) {
3012 log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
3013 "Client and relay have different counts for padding machines: "
3014 "%u vs %u", circ->padding_machine_ctr, negotiate->machine_ctr);
3016 circpad_cell_event_nonpadding_received(circ);
3017 goto done;
3019 } SMARTLIST_FOREACH_END(m);
3022 err:
3023 retval = -1;
3025 done:
3026 circpad_padding_negotiated(circ, negotiate->machine_type,
3027 negotiate->command,
3028 (retval == 0) ? CIRCPAD_RESPONSE_OK : CIRCPAD_RESPONSE_ERR,
3029 negotiate->machine_ctr);
3030 circpad_negotiate_free(negotiate);
3032 return retval;
3036 * Parse and react to a padding_negotiated cell.
3038 * This is called at the origin upon receipt of the middle's response
3039 * to our choice of state machine.
3041 * Returns -1 on error, 0 on success.
3043 signed_error_t
3044 circpad_handle_padding_negotiated(circuit_t *circ, cell_t *cell,
3045 crypt_path_t *layer_hint)
3047 circpad_negotiated_t *negotiated;
3049 if (!CIRCUIT_IS_ORIGIN(circ)) {
3050 log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
3051 "Padding negotiated cell unsupported at non-origin.");
3052 return -1;
3055 /* Verify this came from the expected hop */
3056 if (!circpad_padding_is_from_expected_hop(circ, layer_hint)) {
3057 log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
3058 "Padding negotiated cell from wrong hop on circuit %u",
3059 TO_ORIGIN_CIRCUIT(circ)->global_identifier);
3060 return -1;
3063 if (circpad_negotiated_parse(&negotiated, cell->payload+RELAY_HEADER_SIZE,
3064 CELL_PAYLOAD_SIZE-RELAY_HEADER_SIZE) < 0) {
3065 log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
3066 "Received malformed PADDING_NEGOTIATED cell on circuit %u; "
3067 "dropping.", TO_ORIGIN_CIRCUIT(circ)->global_identifier);
3068 return -1;
3071 if (negotiated->command == CIRCPAD_COMMAND_STOP) {
3072 log_info(LD_CIRC,
3073 "Received STOP command on PADDING_NEGOTIATED for circuit %u",
3074 TO_ORIGIN_CIRCUIT(circ)->global_identifier);
3075 /* There may not be a padding_info here if we shut down the
3076 * machine in circpad_shutdown_old_machines(). Or, if
3077 * circpad_add_matching_matchines() added a new machine,
3078 * there may be a padding_machine for a different machine num
3079 * than this response. */
3080 free_circ_machineinfos_with_machine_num(circ, negotiated->machine_type,
3081 negotiated->machine_ctr);
3082 } else if (negotiated->command == CIRCPAD_COMMAND_START &&
3083 negotiated->response == CIRCPAD_RESPONSE_ERR) {
3084 // This can still happen due to consensus drift.. free the machines
3085 // and be sad
3086 if (free_circ_machineinfos_with_machine_num(circ, negotiated->machine_type,
3087 negotiated->machine_ctr)) {
3088 // Only fail if a machine was there and matched the error cell
3089 TO_ORIGIN_CIRCUIT(circ)->padding_negotiation_failed = 1;
3090 log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
3091 "Middle node did not accept our padding request on circuit "
3092 "%u (%d)",
3093 TO_ORIGIN_CIRCUIT(circ)->global_identifier,
3094 circ->purpose);
3098 circpad_negotiated_free(negotiated);
3099 return 0;
3102 /** Free memory allocated by this machine spec. */
3103 STATIC void
3104 machine_spec_free_(circpad_machine_spec_t *m)
3106 if (!m) return;
3108 tor_free(m->states);
3109 tor_free(m);
3112 /** Free all memory allocated by the circuitpadding subsystem. */
3113 void
3114 circpad_free_all(void)
3116 if (origin_padding_machines) {
3117 SMARTLIST_FOREACH_BEGIN(origin_padding_machines,
3118 circpad_machine_spec_t *, m) {
3119 machine_spec_free(m);
3120 } SMARTLIST_FOREACH_END(m);
3121 smartlist_free(origin_padding_machines);
3123 if (relay_padding_machines) {
3124 SMARTLIST_FOREACH_BEGIN(relay_padding_machines,
3125 circpad_machine_spec_t *, m) {
3126 machine_spec_free(m);
3127 } SMARTLIST_FOREACH_END(m);
3128 smartlist_free(relay_padding_machines);
3132 /* Serialization */
3133 // TODO: Should we use keyword=value here? Are there helpers for that?
3134 #if 0
3135 static void
3136 circpad_state_serialize(const circpad_state_t *state,
3137 smartlist_t *chunks)
3139 smartlist_add_asprintf(chunks, " %u", state->histogram[0]);
3140 for (int i = 1; i < state->histogram_len; i++) {
3141 smartlist_add_asprintf(chunks, ",%u",
3142 state->histogram[i]);
3145 smartlist_add_asprintf(chunks, " 0x%x",
3146 state->transition_cancel_events);
3148 for (int i = 0; i < CIRCPAD_NUM_STATES; i++) {
3149 smartlist_add_asprintf(chunks, ",0x%x",
3150 state->transition_events[i]);
3153 smartlist_add_asprintf(chunks, " %u %u",
3154 state->use_rtt_estimate,
3155 state->token_removal);
3158 char *
3159 circpad_machine_spec_to_string(const circpad_machine_spec_t *machine)
3161 smartlist_t *chunks = smartlist_new();
3162 char *out;
3163 (void)machine;
3165 circpad_state_serialize(&machine->start, chunks);
3166 circpad_state_serialize(&machine->gap, chunks);
3167 circpad_state_serialize(&machine->burst, chunks);
3169 out = smartlist_join_strings(chunks, "", 0, NULL);
3171 SMARTLIST_FOREACH(chunks, char *, cp, tor_free(cp));
3172 smartlist_free(chunks);
3173 return out;
3176 // XXX: Writeme
3177 const circpad_machine_spec_t *
3178 circpad_string_to_machine(const char *str)
3180 (void)str;
3181 return NULL;
3184 #endif /* 0 */