Report only the top 10 ports in exit-port stats.
[tor/rransom.git] / src / or / relay.c
blob467f8847c874343ea168228225f947c1b7109a30
1 /* Copyright (c) 2001 Matej Pfajfar.
2 * Copyright (c) 2001-2004, Roger Dingledine.
3 * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
4 * Copyright (c) 2007-2010, The Tor Project, Inc. */
5 /* See LICENSE for licensing information */
7 /**
8 * \file relay.c
9 * \brief Handle relay cell encryption/decryption, plus packaging and
10 * receiving from circuits, plus queuing on circuits.
11 **/
13 #include <math.h>
14 #include "or.h"
15 #include "buffers.h"
16 #include "circuitbuild.h"
17 #include "circuitlist.h"
18 #include "config.h"
19 #include "connection.h"
20 #include "connection_edge.h"
21 #include "connection_or.h"
22 #include "control.h"
23 #include "geoip.h"
24 #include "main.h"
25 #include "mempool.h"
26 #include "networkstatus.h"
27 #include "policies.h"
28 #include "reasons.h"
29 #include "relay.h"
30 #include "rendcommon.h"
31 #include "routerlist.h"
32 #include "routerparse.h"
34 static int relay_crypt(circuit_t *circ, cell_t *cell,
35 cell_direction_t cell_direction,
36 crypt_path_t **layer_hint, char *recognized);
37 static edge_connection_t *relay_lookup_conn(circuit_t *circ, cell_t *cell,
38 cell_direction_t cell_direction,
39 crypt_path_t *layer_hint);
41 static int connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
42 edge_connection_t *conn,
43 crypt_path_t *layer_hint);
44 static void circuit_consider_sending_sendme(circuit_t *circ,
45 crypt_path_t *layer_hint);
46 static void circuit_resume_edge_reading(circuit_t *circ,
47 crypt_path_t *layer_hint);
48 static int circuit_resume_edge_reading_helper(edge_connection_t *conn,
49 circuit_t *circ,
50 crypt_path_t *layer_hint);
51 static int circuit_consider_stop_edge_reading(circuit_t *circ,
52 crypt_path_t *layer_hint);
53 static int circuit_queue_streams_are_blocked(circuit_t *circ);
55 /** Cache the current hi-res time; the cache gets reset when libevent
56 * calls us. */
58 static struct timeval cached_time_hires = {0, 0};
60 /** Stop reading on edge connections when we have this many cells
61 * waiting on the appropriate queue. */
62 #define CELL_QUEUE_HIGHWATER_SIZE 256
63 /** Start reading from edge connections again when we get down to this many
64 * cells. */
65 #define CELL_QUEUE_LOWWATER_SIZE 64
67 static void
68 tor_gettimeofday_cached(struct timeval *tv)
70 if (cached_time_hires.tv_sec == 0) {
71 tor_gettimeofday(&cached_time_hires);
73 *tv = cached_time_hires;
76 void
77 tor_gettimeofday_cache_clear(void)
79 cached_time_hires.tv_sec = 0;
82 /** Stats: how many relay cells have originated at this hop, or have
83 * been relayed onward (not recognized at this hop)?
85 uint64_t stats_n_relay_cells_relayed = 0;
86 /** Stats: how many relay cells have been delivered to streams at this
87 * hop?
89 uint64_t stats_n_relay_cells_delivered = 0;
91 /** Update digest from the payload of cell. Assign integrity part to
92 * cell.
94 static void
95 relay_set_digest(crypto_digest_env_t *digest, cell_t *cell)
97 char integrity[4];
98 relay_header_t rh;
100 crypto_digest_add_bytes(digest, cell->payload, CELL_PAYLOAD_SIZE);
101 crypto_digest_get_digest(digest, integrity, 4);
102 // log_fn(LOG_DEBUG,"Putting digest of %u %u %u %u into relay cell.",
103 // integrity[0], integrity[1], integrity[2], integrity[3]);
104 relay_header_unpack(&rh, cell->payload);
105 memcpy(rh.integrity, integrity, 4);
106 relay_header_pack(cell->payload, &rh);
109 /** Does the digest for this circuit indicate that this cell is for us?
111 * Update digest from the payload of cell (with the integrity part set
112 * to 0). If the integrity part is valid, return 1, else restore digest
113 * and cell to their original state and return 0.
115 static int
116 relay_digest_matches(crypto_digest_env_t *digest, cell_t *cell)
118 char received_integrity[4], calculated_integrity[4];
119 relay_header_t rh;
120 crypto_digest_env_t *backup_digest=NULL;
122 backup_digest = crypto_digest_dup(digest);
124 relay_header_unpack(&rh, cell->payload);
125 memcpy(received_integrity, rh.integrity, 4);
126 memset(rh.integrity, 0, 4);
127 relay_header_pack(cell->payload, &rh);
129 // log_fn(LOG_DEBUG,"Reading digest of %u %u %u %u from relay cell.",
130 // received_integrity[0], received_integrity[1],
131 // received_integrity[2], received_integrity[3]);
133 crypto_digest_add_bytes(digest, cell->payload, CELL_PAYLOAD_SIZE);
134 crypto_digest_get_digest(digest, calculated_integrity, 4);
136 if (memcmp(received_integrity, calculated_integrity, 4)) {
137 // log_fn(LOG_INFO,"Recognized=0 but bad digest. Not recognizing.");
138 // (%d vs %d).", received_integrity, calculated_integrity);
139 /* restore digest to its old form */
140 crypto_digest_assign(digest, backup_digest);
141 /* restore the relay header */
142 memcpy(rh.integrity, received_integrity, 4);
143 relay_header_pack(cell->payload, &rh);
144 crypto_free_digest_env(backup_digest);
145 return 0;
147 crypto_free_digest_env(backup_digest);
148 return 1;
151 /** Apply <b>cipher</b> to CELL_PAYLOAD_SIZE bytes of <b>in</b>
152 * (in place).
154 * If <b>encrypt_mode</b> is 1 then encrypt, else decrypt.
156 * Return -1 if the crypto fails, else return 0.
158 static int
159 relay_crypt_one_payload(crypto_cipher_env_t *cipher, char *in,
160 int encrypt_mode)
162 int r;
163 (void)encrypt_mode;
164 r = crypto_cipher_crypt_inplace(cipher, in, CELL_PAYLOAD_SIZE);
166 if (r) {
167 log_warn(LD_BUG,"Error during relay encryption");
168 return -1;
170 return 0;
173 /** Receive a relay cell:
174 * - Crypt it (encrypt if headed toward the origin or if we <b>are</b> the
175 * origin; decrypt if we're headed toward the exit).
176 * - Check if recognized (if exitward).
177 * - If recognized and the digest checks out, then find if there's a stream
178 * that the cell is intended for, and deliver it to the right
179 * connection_edge.
180 * - If not recognized, then we need to relay it: append it to the appropriate
181 * cell_queue on <b>circ</b>.
183 * Return -<b>reason</b> on failure.
186 circuit_receive_relay_cell(cell_t *cell, circuit_t *circ,
187 cell_direction_t cell_direction)
189 or_connection_t *or_conn=NULL;
190 crypt_path_t *layer_hint=NULL;
191 char recognized=0;
192 int reason;
194 tor_assert(cell);
195 tor_assert(circ);
196 tor_assert(cell_direction == CELL_DIRECTION_OUT ||
197 cell_direction == CELL_DIRECTION_IN);
198 if (circ->marked_for_close)
199 return 0;
201 if (relay_crypt(circ, cell, cell_direction, &layer_hint, &recognized) < 0) {
202 log_warn(LD_BUG,"relay crypt failed. Dropping connection.");
203 return -END_CIRC_REASON_INTERNAL;
206 if (recognized) {
207 edge_connection_t *conn = relay_lookup_conn(circ, cell, cell_direction,
208 layer_hint);
209 if (cell_direction == CELL_DIRECTION_OUT) {
210 ++stats_n_relay_cells_delivered;
211 log_debug(LD_OR,"Sending away from origin.");
212 if ((reason=connection_edge_process_relay_cell(cell, circ, conn, NULL))
213 < 0) {
214 log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
215 "connection_edge_process_relay_cell (away from origin) "
216 "failed.");
217 return reason;
220 if (cell_direction == CELL_DIRECTION_IN) {
221 ++stats_n_relay_cells_delivered;
222 log_debug(LD_OR,"Sending to origin.");
223 if ((reason = connection_edge_process_relay_cell(cell, circ, conn,
224 layer_hint)) < 0) {
225 log_warn(LD_OR,
226 "connection_edge_process_relay_cell (at origin) failed.");
227 return reason;
230 return 0;
233 /* not recognized. pass it on. */
234 if (cell_direction == CELL_DIRECTION_OUT) {
235 cell->circ_id = circ->n_circ_id; /* switch it */
236 or_conn = circ->n_conn;
237 } else if (! CIRCUIT_IS_ORIGIN(circ)) {
238 cell->circ_id = TO_OR_CIRCUIT(circ)->p_circ_id; /* switch it */
239 or_conn = TO_OR_CIRCUIT(circ)->p_conn;
240 } else {
241 log_fn(LOG_PROTOCOL_WARN, LD_OR,
242 "Dropping unrecognized inbound cell on origin circuit.");
243 return 0;
246 if (!or_conn) {
247 // XXXX Can this splice stuff be done more cleanly?
248 if (! CIRCUIT_IS_ORIGIN(circ) &&
249 TO_OR_CIRCUIT(circ)->rend_splice &&
250 cell_direction == CELL_DIRECTION_OUT) {
251 or_circuit_t *splice = TO_OR_CIRCUIT(circ)->rend_splice;
252 tor_assert(circ->purpose == CIRCUIT_PURPOSE_REND_ESTABLISHED);
253 tor_assert(splice->_base.purpose == CIRCUIT_PURPOSE_REND_ESTABLISHED);
254 cell->circ_id = splice->p_circ_id;
255 cell->command = CELL_RELAY; /* can't be relay_early anyway */
256 if ((reason = circuit_receive_relay_cell(cell, TO_CIRCUIT(splice),
257 CELL_DIRECTION_IN)) < 0) {
258 log_warn(LD_REND, "Error relaying cell across rendezvous; closing "
259 "circuits");
260 /* XXXX Do this here, or just return -1? */
261 circuit_mark_for_close(circ, -reason);
262 return reason;
264 return 0;
266 log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
267 "Didn't recognize cell, but circ stops here! Closing circ.");
268 return -END_CIRC_REASON_TORPROTOCOL;
271 log_debug(LD_OR,"Passing on unrecognized cell.");
273 ++stats_n_relay_cells_relayed; /* XXXX no longer quite accurate {cells}
274 * we might kill the circ before we relay
275 * the cells. */
277 append_cell_to_circuit_queue(circ, or_conn, cell, cell_direction, 0);
278 return 0;
281 /** Do the appropriate en/decryptions for <b>cell</b> arriving on
282 * <b>circ</b> in direction <b>cell_direction</b>.
284 * If cell_direction == CELL_DIRECTION_IN:
285 * - If we're at the origin (we're the OP), for hops 1..N,
286 * decrypt cell. If recognized, stop.
287 * - Else (we're not the OP), encrypt one hop. Cell is not recognized.
289 * If cell_direction == CELL_DIRECTION_OUT:
290 * - decrypt one hop. Check if recognized.
292 * If cell is recognized, set *recognized to 1, and set
293 * *layer_hint to the hop that recognized it.
295 * Return -1 to indicate that we should mark the circuit for close,
296 * else return 0.
298 static int
299 relay_crypt(circuit_t *circ, cell_t *cell, cell_direction_t cell_direction,
300 crypt_path_t **layer_hint, char *recognized)
302 relay_header_t rh;
304 tor_assert(circ);
305 tor_assert(cell);
306 tor_assert(recognized);
307 tor_assert(cell_direction == CELL_DIRECTION_IN ||
308 cell_direction == CELL_DIRECTION_OUT);
310 if (cell_direction == CELL_DIRECTION_IN) {
311 if (CIRCUIT_IS_ORIGIN(circ)) { /* We're at the beginning of the circuit.
312 * We'll want to do layered decrypts. */
313 crypt_path_t *thishop, *cpath = TO_ORIGIN_CIRCUIT(circ)->cpath;
314 thishop = cpath;
315 if (thishop->state != CPATH_STATE_OPEN) {
316 log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
317 "Relay cell before first created cell? Closing.");
318 return -1;
320 do { /* Remember: cpath is in forward order, that is, first hop first. */
321 tor_assert(thishop);
323 if (relay_crypt_one_payload(thishop->b_crypto, cell->payload, 0) < 0)
324 return -1;
326 relay_header_unpack(&rh, cell->payload);
327 if (rh.recognized == 0) {
328 /* it's possibly recognized. have to check digest to be sure. */
329 if (relay_digest_matches(thishop->b_digest, cell)) {
330 *recognized = 1;
331 *layer_hint = thishop;
332 return 0;
336 thishop = thishop->next;
337 } while (thishop != cpath && thishop->state == CPATH_STATE_OPEN);
338 log_fn(LOG_PROTOCOL_WARN, LD_OR,
339 "Incoming cell at client not recognized. Closing.");
340 return -1;
341 } else { /* we're in the middle. Just one crypt. */
342 if (relay_crypt_one_payload(TO_OR_CIRCUIT(circ)->p_crypto,
343 cell->payload, 1) < 0)
344 return -1;
345 // log_fn(LOG_DEBUG,"Skipping recognized check, because we're not "
346 // "the client.");
348 } else /* cell_direction == CELL_DIRECTION_OUT */ {
349 /* we're in the middle. Just one crypt. */
351 if (relay_crypt_one_payload(TO_OR_CIRCUIT(circ)->n_crypto,
352 cell->payload, 0) < 0)
353 return -1;
355 relay_header_unpack(&rh, cell->payload);
356 if (rh.recognized == 0) {
357 /* it's possibly recognized. have to check digest to be sure. */
358 if (relay_digest_matches(TO_OR_CIRCUIT(circ)->n_digest, cell)) {
359 *recognized = 1;
360 return 0;
364 return 0;
367 /** Package a relay cell from an edge:
368 * - Encrypt it to the right layer
369 * - Append it to the appropriate cell_queue on <b>circ</b>.
371 static int
372 circuit_package_relay_cell(cell_t *cell, circuit_t *circ,
373 cell_direction_t cell_direction,
374 crypt_path_t *layer_hint, streamid_t on_stream)
376 or_connection_t *conn; /* where to send the cell */
378 if (cell_direction == CELL_DIRECTION_OUT) {
379 crypt_path_t *thishop; /* counter for repeated crypts */
380 conn = circ->n_conn;
381 if (!CIRCUIT_IS_ORIGIN(circ) || !conn) {
382 log_warn(LD_BUG,"outgoing relay cell has n_conn==NULL. Dropping.");
383 return 0; /* just drop it */
386 relay_set_digest(layer_hint->f_digest, cell);
388 thishop = layer_hint;
389 /* moving from farthest to nearest hop */
390 do {
391 tor_assert(thishop);
392 /* XXXX RD This is a bug, right? */
393 log_debug(LD_OR,"crypting a layer of the relay cell.");
394 if (relay_crypt_one_payload(thishop->f_crypto, cell->payload, 1) < 0) {
395 return -1;
398 thishop = thishop->prev;
399 } while (thishop != TO_ORIGIN_CIRCUIT(circ)->cpath->prev);
401 } else { /* incoming cell */
402 or_circuit_t *or_circ;
403 if (CIRCUIT_IS_ORIGIN(circ)) {
404 /* We should never package an _incoming_ cell from the circuit
405 * origin; that means we messed up somewhere. */
406 log_warn(LD_BUG,"incoming relay cell at origin circuit. Dropping.");
407 assert_circuit_ok(circ);
408 return 0; /* just drop it */
410 or_circ = TO_OR_CIRCUIT(circ);
411 conn = or_circ->p_conn;
412 relay_set_digest(or_circ->p_digest, cell);
413 if (relay_crypt_one_payload(or_circ->p_crypto, cell->payload, 1) < 0)
414 return -1;
416 ++stats_n_relay_cells_relayed;
418 append_cell_to_circuit_queue(circ, conn, cell, cell_direction, on_stream);
419 return 0;
422 /** If cell's stream_id matches the stream_id of any conn that's
423 * attached to circ, return that conn, else return NULL.
425 static edge_connection_t *
426 relay_lookup_conn(circuit_t *circ, cell_t *cell,
427 cell_direction_t cell_direction, crypt_path_t *layer_hint)
429 edge_connection_t *tmpconn;
430 relay_header_t rh;
432 relay_header_unpack(&rh, cell->payload);
434 if (!rh.stream_id)
435 return NULL;
437 /* IN or OUT cells could have come from either direction, now
438 * that we allow rendezvous *to* an OP.
441 if (CIRCUIT_IS_ORIGIN(circ)) {
442 for (tmpconn = TO_ORIGIN_CIRCUIT(circ)->p_streams; tmpconn;
443 tmpconn=tmpconn->next_stream) {
444 if (rh.stream_id == tmpconn->stream_id &&
445 !tmpconn->_base.marked_for_close &&
446 tmpconn->cpath_layer == layer_hint) {
447 log_debug(LD_APP,"found conn for stream %d.", rh.stream_id);
448 return tmpconn;
451 } else {
452 for (tmpconn = TO_OR_CIRCUIT(circ)->n_streams; tmpconn;
453 tmpconn=tmpconn->next_stream) {
454 if (rh.stream_id == tmpconn->stream_id &&
455 !tmpconn->_base.marked_for_close) {
456 log_debug(LD_EXIT,"found conn for stream %d.", rh.stream_id);
457 if (cell_direction == CELL_DIRECTION_OUT ||
458 connection_edge_is_rendezvous_stream(tmpconn))
459 return tmpconn;
462 for (tmpconn = TO_OR_CIRCUIT(circ)->resolving_streams; tmpconn;
463 tmpconn=tmpconn->next_stream) {
464 if (rh.stream_id == tmpconn->stream_id &&
465 !tmpconn->_base.marked_for_close) {
466 log_debug(LD_EXIT,"found conn for stream %d.", rh.stream_id);
467 return tmpconn;
471 return NULL; /* probably a begin relay cell */
474 /** Pack the relay_header_t host-order structure <b>src</b> into
475 * network-order in the buffer <b>dest</b>. See tor-spec.txt for details
476 * about the wire format.
478 void
479 relay_header_pack(char *dest, const relay_header_t *src)
481 *(uint8_t*)(dest) = src->command;
483 set_uint16(dest+1, htons(src->recognized));
484 set_uint16(dest+3, htons(src->stream_id));
485 memcpy(dest+5, src->integrity, 4);
486 set_uint16(dest+9, htons(src->length));
489 /** Unpack the network-order buffer <b>src</b> into a host-order
490 * relay_header_t structure <b>dest</b>.
492 void
493 relay_header_unpack(relay_header_t *dest, const char *src)
495 dest->command = *(uint8_t*)(src);
497 dest->recognized = ntohs(get_uint16(src+1));
498 dest->stream_id = ntohs(get_uint16(src+3));
499 memcpy(dest->integrity, src+5, 4);
500 dest->length = ntohs(get_uint16(src+9));
503 /** Convert the relay <b>command</b> into a human-readable string. */
504 static const char *
505 relay_command_to_string(uint8_t command)
507 switch (command) {
508 case RELAY_COMMAND_BEGIN: return "BEGIN";
509 case RELAY_COMMAND_DATA: return "DATA";
510 case RELAY_COMMAND_END: return "END";
511 case RELAY_COMMAND_CONNECTED: return "CONNECTED";
512 case RELAY_COMMAND_SENDME: return "SENDME";
513 case RELAY_COMMAND_EXTEND: return "EXTEND";
514 case RELAY_COMMAND_EXTENDED: return "EXTENDED";
515 case RELAY_COMMAND_TRUNCATE: return "TRUNCATE";
516 case RELAY_COMMAND_TRUNCATED: return "TRUNCATED";
517 case RELAY_COMMAND_DROP: return "DROP";
518 case RELAY_COMMAND_RESOLVE: return "RESOLVE";
519 case RELAY_COMMAND_RESOLVED: return "RESOLVED";
520 case RELAY_COMMAND_BEGIN_DIR: return "BEGIN_DIR";
521 case RELAY_COMMAND_ESTABLISH_INTRO: return "ESTABLISH_INTRO";
522 case RELAY_COMMAND_ESTABLISH_RENDEZVOUS: return "ESTABLISH_RENDEZVOUS";
523 case RELAY_COMMAND_INTRODUCE1: return "INTRODUCE1";
524 case RELAY_COMMAND_INTRODUCE2: return "INTRODUCE2";
525 case RELAY_COMMAND_RENDEZVOUS1: return "RENDEZVOUS1";
526 case RELAY_COMMAND_RENDEZVOUS2: return "RENDEZVOUS2";
527 case RELAY_COMMAND_INTRO_ESTABLISHED: return "INTRO_ESTABLISHED";
528 case RELAY_COMMAND_RENDEZVOUS_ESTABLISHED:
529 return "RENDEZVOUS_ESTABLISHED";
530 case RELAY_COMMAND_INTRODUCE_ACK: return "INTRODUCE_ACK";
531 default: return "(unrecognized)";
535 /** Make a relay cell out of <b>relay_command</b> and <b>payload</b>, and send
536 * it onto the open circuit <b>circ</b>. <b>stream_id</b> is the ID on
537 * <b>circ</b> for the stream that's sending the relay cell, or 0 if it's a
538 * control cell. <b>cpath_layer</b> is NULL for OR->OP cells, or the
539 * destination hop for OP->OR cells.
541 * If you can't send the cell, mark the circuit for close and return -1. Else
542 * return 0.
545 relay_send_command_from_edge(streamid_t stream_id, circuit_t *circ,
546 uint8_t relay_command, const char *payload,
547 size_t payload_len, crypt_path_t *cpath_layer)
549 cell_t cell;
550 relay_header_t rh;
551 cell_direction_t cell_direction;
552 /* XXXX NM Split this function into a separate versions per circuit type? */
554 tor_assert(circ);
555 tor_assert(payload_len <= RELAY_PAYLOAD_SIZE);
557 memset(&cell, 0, sizeof(cell_t));
558 cell.command = CELL_RELAY;
559 if (cpath_layer) {
560 cell.circ_id = circ->n_circ_id;
561 cell_direction = CELL_DIRECTION_OUT;
562 } else if (! CIRCUIT_IS_ORIGIN(circ)) {
563 cell.circ_id = TO_OR_CIRCUIT(circ)->p_circ_id;
564 cell_direction = CELL_DIRECTION_IN;
565 } else {
566 return -1;
569 memset(&rh, 0, sizeof(rh));
570 rh.command = relay_command;
571 rh.stream_id = stream_id;
572 rh.length = payload_len;
573 relay_header_pack(cell.payload, &rh);
574 if (payload_len)
575 memcpy(cell.payload+RELAY_HEADER_SIZE, payload, payload_len);
577 log_debug(LD_OR,"delivering %d cell %s.", relay_command,
578 cell_direction == CELL_DIRECTION_OUT ? "forward" : "backward");
580 /* If we are sending an END cell and this circuit is used for a tunneled
581 * directory request, advance its state. */
582 if (relay_command == RELAY_COMMAND_END && circ->dirreq_id)
583 geoip_change_dirreq_state(circ->dirreq_id, DIRREQ_TUNNELED,
584 DIRREQ_END_CELL_SENT);
586 if (cell_direction == CELL_DIRECTION_OUT && circ->n_conn) {
587 /* if we're using relaybandwidthrate, this conn wants priority */
588 circ->n_conn->client_used = approx_time();
591 if (cell_direction == CELL_DIRECTION_OUT) {
592 origin_circuit_t *origin_circ = TO_ORIGIN_CIRCUIT(circ);
593 if (origin_circ->remaining_relay_early_cells > 0 &&
594 (relay_command == RELAY_COMMAND_EXTEND ||
595 cpath_layer != origin_circ->cpath)) {
596 /* If we've got any relay_early cells left and (we're sending
597 * an extend cell or we're not talking to the first hop), use
598 * one of them. Don't worry about the conn protocol version:
599 * append_cell_to_circuit_queue will fix it up. */
600 cell.command = CELL_RELAY_EARLY;
601 --origin_circ->remaining_relay_early_cells;
602 log_debug(LD_OR, "Sending a RELAY_EARLY cell; %d remaining.",
603 (int)origin_circ->remaining_relay_early_cells);
604 /* Memorize the command that is sent as RELAY_EARLY cell; helps debug
605 * task 878. */
606 origin_circ->relay_early_commands[
607 origin_circ->relay_early_cells_sent++] = relay_command;
608 } else if (relay_command == RELAY_COMMAND_EXTEND) {
609 /* If no RELAY_EARLY cells can be sent over this circuit, log which
610 * commands have been sent as RELAY_EARLY cells before; helps debug
611 * task 878. */
612 smartlist_t *commands_list = smartlist_create();
613 int i = 0;
614 char *commands = NULL;
615 for (; i < origin_circ->relay_early_cells_sent; i++)
616 smartlist_add(commands_list, (char *)
617 relay_command_to_string(origin_circ->relay_early_commands[i]));
618 commands = smartlist_join_strings(commands_list, ",", 0, NULL);
619 log_warn(LD_BUG, "Uh-oh. We're sending a RELAY_COMMAND_EXTEND cell, "
620 "but we have run out of RELAY_EARLY cells on that circuit. "
621 "Commands sent before: %s", commands);
622 tor_free(commands);
623 smartlist_free(commands_list);
627 if (circuit_package_relay_cell(&cell, circ, cell_direction, cpath_layer,
628 stream_id) < 0) {
629 log_warn(LD_BUG,"circuit_package_relay_cell failed. Closing.");
630 circuit_mark_for_close(circ, END_CIRC_REASON_INTERNAL);
631 return -1;
633 return 0;
636 /** Make a relay cell out of <b>relay_command</b> and <b>payload</b>, and
637 * send it onto the open circuit <b>circ</b>. <b>fromconn</b> is the stream
638 * that's sending the relay cell, or NULL if it's a control cell.
639 * <b>cpath_layer</b> is NULL for OR->OP cells, or the destination hop
640 * for OP->OR cells.
642 * If you can't send the cell, mark the circuit for close and
643 * return -1. Else return 0.
646 connection_edge_send_command(edge_connection_t *fromconn,
647 uint8_t relay_command, const char *payload,
648 size_t payload_len)
650 /* XXXX NM Split this function into a separate versions per circuit type? */
651 circuit_t *circ;
652 tor_assert(fromconn);
653 circ = fromconn->on_circuit;
655 if (fromconn->_base.marked_for_close) {
656 log_warn(LD_BUG,
657 "called on conn that's already marked for close at %s:%d.",
658 fromconn->_base.marked_for_close_file,
659 fromconn->_base.marked_for_close);
660 return 0;
663 if (!circ) {
664 if (fromconn->_base.type == CONN_TYPE_AP) {
665 log_info(LD_APP,"no circ. Closing conn.");
666 connection_mark_unattached_ap(fromconn, END_STREAM_REASON_INTERNAL);
667 } else {
668 log_info(LD_EXIT,"no circ. Closing conn.");
669 fromconn->edge_has_sent_end = 1; /* no circ to send to */
670 fromconn->end_reason = END_STREAM_REASON_INTERNAL;
671 connection_mark_for_close(TO_CONN(fromconn));
673 return -1;
676 return relay_send_command_from_edge(fromconn->stream_id, circ,
677 relay_command, payload,
678 payload_len, fromconn->cpath_layer);
681 /** How many times will I retry a stream that fails due to DNS
682 * resolve failure or misc error?
684 #define MAX_RESOLVE_FAILURES 3
686 /** Return 1 if reason is something that you should retry if you
687 * get the end cell before you've connected; else return 0. */
688 static int
689 edge_reason_is_retriable(int reason)
691 return reason == END_STREAM_REASON_HIBERNATING ||
692 reason == END_STREAM_REASON_RESOURCELIMIT ||
693 reason == END_STREAM_REASON_EXITPOLICY ||
694 reason == END_STREAM_REASON_RESOLVEFAILED ||
695 reason == END_STREAM_REASON_MISC ||
696 reason == END_STREAM_REASON_NOROUTE;
699 /** Called when we receive an END cell on a stream that isn't open yet,
700 * from the client side.
701 * Arguments are as for connection_edge_process_relay_cell().
703 static int
704 connection_ap_process_end_not_open(
705 relay_header_t *rh, cell_t *cell, origin_circuit_t *circ,
706 edge_connection_t *conn, crypt_path_t *layer_hint)
708 struct in_addr in;
709 routerinfo_t *exitrouter;
710 int reason = *(cell->payload+RELAY_HEADER_SIZE);
711 int control_reason = reason | END_STREAM_REASON_FLAG_REMOTE;
712 (void) layer_hint; /* unused */
714 if (rh->length > 0 && edge_reason_is_retriable(reason) &&
715 !connection_edge_is_rendezvous_stream(conn) /* avoid retry if rend */
717 log_info(LD_APP,"Address '%s' refused due to '%s'. Considering retrying.",
718 safe_str(conn->socks_request->address),
719 stream_end_reason_to_string(reason));
720 exitrouter =
721 router_get_by_digest(circ->build_state->chosen_exit->identity_digest);
722 switch (reason) {
723 case END_STREAM_REASON_EXITPOLICY:
724 if (rh->length >= 5) {
725 uint32_t addr = ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+1));
726 int ttl;
727 if (!addr) {
728 log_info(LD_APP,"Address '%s' resolved to 0.0.0.0. Closing,",
729 safe_str(conn->socks_request->address));
730 connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
731 return 0;
733 if (rh->length >= 9)
734 ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+5));
735 else
736 ttl = -1;
738 if (get_options()->ClientDNSRejectInternalAddresses &&
739 is_internal_IP(addr, 0)) {
740 log_info(LD_APP,"Address '%s' resolved to internal. Closing,",
741 safe_str(conn->socks_request->address));
742 connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
743 return 0;
745 client_dns_set_addressmap(conn->socks_request->address, addr,
746 conn->chosen_exit_name, ttl);
748 /* check if he *ought* to have allowed it */
749 if (exitrouter &&
750 (rh->length < 5 ||
751 (tor_inet_aton(conn->socks_request->address, &in) &&
752 !conn->chosen_exit_name))) {
753 log_info(LD_APP,
754 "Exitrouter '%s' seems to be more restrictive than its exit "
755 "policy. Not using this router as exit for now.",
756 exitrouter->nickname);
757 policies_set_router_exitpolicy_to_reject_all(exitrouter);
759 /* rewrite it to an IP if we learned one. */
760 if (addressmap_rewrite(conn->socks_request->address,
761 sizeof(conn->socks_request->address),
762 NULL)) {
763 control_event_stream_status(conn, STREAM_EVENT_REMAP, 0);
765 if (conn->chosen_exit_optional ||
766 conn->chosen_exit_retries) {
767 /* stop wanting a specific exit */
768 conn->chosen_exit_optional = 0;
769 /* A non-zero chosen_exit_retries can happen if we set a
770 * TrackHostExits for this address under a port that the exit
771 * relay allows, but then try the same address with a different
772 * port that it doesn't allow to exit. We shouldn't unregister
773 * the mapping, since it is probably still wanted on the
774 * original port. But now we give away to the exit relay that
775 * we probably have a TrackHostExits on it. So be it. */
776 conn->chosen_exit_retries = 0;
777 tor_free(conn->chosen_exit_name); /* clears it */
779 if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0)
780 return 0;
781 /* else, conn will get closed below */
782 break;
783 case END_STREAM_REASON_CONNECTREFUSED:
784 if (!conn->chosen_exit_optional)
785 break; /* break means it'll close, below */
786 /* Else fall through: expire this circuit, clear the
787 * chosen_exit_name field, and try again. */
788 case END_STREAM_REASON_RESOLVEFAILED:
789 case END_STREAM_REASON_TIMEOUT:
790 case END_STREAM_REASON_MISC:
791 case END_STREAM_REASON_NOROUTE:
792 if (client_dns_incr_failures(conn->socks_request->address)
793 < MAX_RESOLVE_FAILURES) {
794 /* We haven't retried too many times; reattach the connection. */
795 circuit_log_path(LOG_INFO,LD_APP,circ);
796 tor_assert(circ->_base.timestamp_dirty);
797 circ->_base.timestamp_dirty -= get_options()->MaxCircuitDirtiness;
799 if (conn->chosen_exit_optional) {
800 /* stop wanting a specific exit */
801 conn->chosen_exit_optional = 0;
802 tor_free(conn->chosen_exit_name); /* clears it */
804 if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0)
805 return 0;
806 /* else, conn will get closed below */
807 } else {
808 log_notice(LD_APP,
809 "Have tried resolving or connecting to address '%s' "
810 "at %d different places. Giving up.",
811 safe_str(conn->socks_request->address),
812 MAX_RESOLVE_FAILURES);
813 /* clear the failures, so it will have a full try next time */
814 client_dns_clear_failures(conn->socks_request->address);
816 break;
817 case END_STREAM_REASON_HIBERNATING:
818 case END_STREAM_REASON_RESOURCELIMIT:
819 if (exitrouter) {
820 policies_set_router_exitpolicy_to_reject_all(exitrouter);
822 if (conn->chosen_exit_optional) {
823 /* stop wanting a specific exit */
824 conn->chosen_exit_optional = 0;
825 tor_free(conn->chosen_exit_name); /* clears it */
827 if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0)
828 return 0;
829 /* else, will close below */
830 break;
831 } /* end switch */
832 log_info(LD_APP,"Giving up on retrying; conn can't be handled.");
835 log_info(LD_APP,
836 "Edge got end (%s) before we're connected. Marking for close.",
837 stream_end_reason_to_string(rh->length > 0 ? reason : -1));
838 circuit_log_path(LOG_INFO,LD_APP,circ);
839 /* need to test because of detach_retriable */
840 if (!conn->_base.marked_for_close)
841 connection_mark_unattached_ap(conn, control_reason);
842 return 0;
845 /** Helper: change the socks_request-&gt;address field on conn to the
846 * dotted-quad representation of <b>new_addr</b> (given in host order),
847 * and send an appropriate REMAP event. */
848 static void
849 remap_event_helper(edge_connection_t *conn, uint32_t new_addr)
851 struct in_addr in;
853 in.s_addr = htonl(new_addr);
854 tor_inet_ntoa(&in, conn->socks_request->address,
855 sizeof(conn->socks_request->address));
856 control_event_stream_status(conn, STREAM_EVENT_REMAP,
857 REMAP_STREAM_SOURCE_EXIT);
860 /** An incoming relay cell has arrived from circuit <b>circ</b> to
861 * stream <b>conn</b>.
863 * The arguments here are the same as in
864 * connection_edge_process_relay_cell() below; this function is called
865 * from there when <b>conn</b> is defined and not in an open state.
867 static int
868 connection_edge_process_relay_cell_not_open(
869 relay_header_t *rh, cell_t *cell, circuit_t *circ,
870 edge_connection_t *conn, crypt_path_t *layer_hint)
872 if (rh->command == RELAY_COMMAND_END) {
873 if (CIRCUIT_IS_ORIGIN(circ) && conn->_base.type == CONN_TYPE_AP) {
874 return connection_ap_process_end_not_open(rh, cell,
875 TO_ORIGIN_CIRCUIT(circ), conn,
876 layer_hint);
877 } else {
878 /* we just got an 'end', don't need to send one */
879 conn->edge_has_sent_end = 1;
880 conn->end_reason = *(cell->payload+RELAY_HEADER_SIZE) |
881 END_STREAM_REASON_FLAG_REMOTE;
882 connection_mark_for_close(TO_CONN(conn));
883 return 0;
887 if (conn->_base.type == CONN_TYPE_AP &&
888 rh->command == RELAY_COMMAND_CONNECTED) {
889 tor_assert(CIRCUIT_IS_ORIGIN(circ));
890 if (conn->_base.state != AP_CONN_STATE_CONNECT_WAIT) {
891 log_fn(LOG_PROTOCOL_WARN, LD_APP,
892 "Got 'connected' while not in state connect_wait. Dropping.");
893 return 0;
895 conn->_base.state = AP_CONN_STATE_OPEN;
896 log_info(LD_APP,"'connected' received after %d seconds.",
897 (int)(time(NULL) - conn->_base.timestamp_lastread));
898 if (rh->length >= 4) {
899 uint32_t addr = ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE));
900 int ttl;
901 if (!addr || (get_options()->ClientDNSRejectInternalAddresses &&
902 is_internal_IP(addr, 0))) {
903 char buf[INET_NTOA_BUF_LEN];
904 struct in_addr a;
905 a.s_addr = htonl(addr);
906 tor_inet_ntoa(&a, buf, sizeof(buf));
907 log_info(LD_APP,
908 "...but it claims the IP address was %s. Closing.", buf);
909 connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL);
910 connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
911 return 0;
913 if (rh->length >= 8)
914 ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+4));
915 else
916 ttl = -1;
917 client_dns_set_addressmap(conn->socks_request->address, addr,
918 conn->chosen_exit_name, ttl);
920 remap_event_helper(conn, addr);
922 circuit_log_path(LOG_INFO,LD_APP,TO_ORIGIN_CIRCUIT(circ));
923 /* don't send a socks reply to transparent conns */
924 if (!conn->socks_request->has_finished)
925 connection_ap_handshake_socks_reply(conn, NULL, 0, 0);
927 /* Was it a linked dir conn? If so, a dir request just started to
928 * fetch something; this could be a bootstrap status milestone. */
929 log_debug(LD_APP, "considering");
930 if (TO_CONN(conn)->linked_conn &&
931 TO_CONN(conn)->linked_conn->type == CONN_TYPE_DIR) {
932 connection_t *dirconn = TO_CONN(conn)->linked_conn;
933 log_debug(LD_APP, "it is! %d", dirconn->purpose);
934 switch (dirconn->purpose) {
935 case DIR_PURPOSE_FETCH_CERTIFICATE:
936 if (consensus_is_waiting_for_certs())
937 control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_KEYS, 0);
938 break;
939 case DIR_PURPOSE_FETCH_CONSENSUS:
940 control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_STATUS, 0);
941 break;
942 case DIR_PURPOSE_FETCH_SERVERDESC:
943 control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_DESCRIPTORS,
944 count_loading_descriptors_progress());
945 break;
949 /* handle anything that might have queued */
950 if (connection_edge_package_raw_inbuf(conn, 1, NULL) < 0) {
951 /* (We already sent an end cell if possible) */
952 connection_mark_for_close(TO_CONN(conn));
953 return 0;
955 return 0;
957 if (conn->_base.type == CONN_TYPE_AP &&
958 rh->command == RELAY_COMMAND_RESOLVED) {
959 int ttl;
960 int answer_len;
961 uint8_t answer_type;
962 if (conn->_base.state != AP_CONN_STATE_RESOLVE_WAIT) {
963 log_fn(LOG_PROTOCOL_WARN, LD_APP, "Got a 'resolved' cell while "
964 "not in state resolve_wait. Dropping.");
965 return 0;
967 tor_assert(SOCKS_COMMAND_IS_RESOLVE(conn->socks_request->command));
968 answer_len = cell->payload[RELAY_HEADER_SIZE+1];
969 if (rh->length < 2 || answer_len+2>rh->length) {
970 log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
971 "Dropping malformed 'resolved' cell");
972 connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
973 return 0;
975 answer_type = cell->payload[RELAY_HEADER_SIZE];
976 if (rh->length >= answer_len+6)
977 ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+
978 2+answer_len));
979 else
980 ttl = -1;
981 if (answer_type == RESOLVED_TYPE_IPV4 && answer_len == 4) {
982 uint32_t addr = ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+2));
983 if (get_options()->ClientDNSRejectInternalAddresses &&
984 is_internal_IP(addr, 0)) {
985 char buf[INET_NTOA_BUF_LEN];
986 struct in_addr a;
987 a.s_addr = htonl(addr);
988 tor_inet_ntoa(&a, buf, sizeof(buf));
989 log_info(LD_APP,"Got a resolve with answer %s. Rejecting.", buf);
990 connection_ap_handshake_socks_resolved(conn,
991 RESOLVED_TYPE_ERROR_TRANSIENT,
992 0, NULL, 0, TIME_MAX);
993 connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
994 return 0;
997 connection_ap_handshake_socks_resolved(conn,
998 answer_type,
999 cell->payload[RELAY_HEADER_SIZE+1], /*answer_len*/
1000 cell->payload+RELAY_HEADER_SIZE+2, /*answer*/
1001 ttl,
1002 -1);
1003 if (answer_type == RESOLVED_TYPE_IPV4 && answer_len == 4) {
1004 uint32_t addr = ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+2));
1005 remap_event_helper(conn, addr);
1007 connection_mark_unattached_ap(conn,
1008 END_STREAM_REASON_DONE |
1009 END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED);
1010 return 0;
1013 log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
1014 "Got an unexpected relay command %d, in state %d (%s). Dropping.",
1015 rh->command, conn->_base.state,
1016 conn_state_to_string(conn->_base.type, conn->_base.state));
1017 return 0; /* for forward compatibility, don't kill the circuit */
1018 // connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL);
1019 // connection_mark_for_close(conn);
1020 // return -1;
1023 /** An incoming relay cell has arrived on circuit <b>circ</b>. If
1024 * <b>conn</b> is NULL this is a control cell, else <b>cell</b> is
1025 * destined for <b>conn</b>.
1027 * If <b>layer_hint</b> is defined, then we're the origin of the
1028 * circuit, and it specifies the hop that packaged <b>cell</b>.
1030 * Return -reason if you want to warn and tear down the circuit, else 0.
1032 static int
1033 connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
1034 edge_connection_t *conn,
1035 crypt_path_t *layer_hint)
1037 static int num_seen=0;
1038 relay_header_t rh;
1039 unsigned domain = layer_hint?LD_APP:LD_EXIT;
1040 int reason;
1042 tor_assert(cell);
1043 tor_assert(circ);
1045 relay_header_unpack(&rh, cell->payload);
1046 // log_fn(LOG_DEBUG,"command %d stream %d", rh.command, rh.stream_id);
1047 num_seen++;
1048 log_debug(domain, "Now seen %d relay cells here (command %d, stream %d).",
1049 num_seen, rh.command, rh.stream_id);
1051 if (rh.length > RELAY_PAYLOAD_SIZE) {
1052 log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
1053 "Relay cell length field too long. Closing circuit.");
1054 return - END_CIRC_REASON_TORPROTOCOL;
1057 /* either conn is NULL, in which case we've got a control cell, or else
1058 * conn points to the recognized stream. */
1060 if (conn && !connection_state_is_open(TO_CONN(conn)))
1061 return connection_edge_process_relay_cell_not_open(
1062 &rh, cell, circ, conn, layer_hint);
1064 switch (rh.command) {
1065 case RELAY_COMMAND_DROP:
1066 // log_info(domain,"Got a relay-level padding cell. Dropping.");
1067 return 0;
1068 case RELAY_COMMAND_BEGIN:
1069 case RELAY_COMMAND_BEGIN_DIR:
1070 if (layer_hint &&
1071 circ->purpose != CIRCUIT_PURPOSE_S_REND_JOINED) {
1072 log_fn(LOG_PROTOCOL_WARN, LD_APP,
1073 "Relay begin request unsupported at AP. Dropping.");
1074 return 0;
1076 if (circ->purpose == CIRCUIT_PURPOSE_S_REND_JOINED &&
1077 layer_hint != TO_ORIGIN_CIRCUIT(circ)->cpath->prev) {
1078 log_fn(LOG_PROTOCOL_WARN, LD_APP,
1079 "Relay begin request to Hidden Service "
1080 "from intermediary node. Dropping.");
1081 return 0;
1083 if (conn) {
1084 log_fn(LOG_PROTOCOL_WARN, domain,
1085 "Begin cell for known stream. Dropping.");
1086 return 0;
1088 if (rh.command == RELAY_COMMAND_BEGIN_DIR) {
1089 /* Assign this circuit and its app-ward OR connection a unique ID,
1090 * so that we can measure download times. The local edge and dir
1091 * connection will be assigned the same ID when they are created
1092 * and linked. */
1093 static uint64_t next_id = 0;
1094 circ->dirreq_id = ++next_id;
1095 TO_CONN(TO_OR_CIRCUIT(circ)->p_conn)->dirreq_id = circ->dirreq_id;
1098 return connection_exit_begin_conn(cell, circ);
1099 case RELAY_COMMAND_DATA:
1100 ++stats_n_data_cells_received;
1101 if (( layer_hint && --layer_hint->deliver_window < 0) ||
1102 (!layer_hint && --circ->deliver_window < 0)) {
1103 log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
1104 "(relay data) circ deliver_window below 0. Killing.");
1105 connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL);
1106 connection_mark_for_close(TO_CONN(conn));
1107 return -END_CIRC_REASON_TORPROTOCOL;
1109 log_debug(domain,"circ deliver_window now %d.", layer_hint ?
1110 layer_hint->deliver_window : circ->deliver_window);
1112 circuit_consider_sending_sendme(circ, layer_hint);
1114 if (!conn) {
1115 log_info(domain,"data cell dropped, unknown stream (streamid %d).",
1116 rh.stream_id);
1117 return 0;
1120 if (--conn->deliver_window < 0) { /* is it below 0 after decrement? */
1121 log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
1122 "(relay data) conn deliver_window below 0. Killing.");
1123 return -END_CIRC_REASON_TORPROTOCOL;
1126 stats_n_data_bytes_received += rh.length;
1127 connection_write_to_buf(cell->payload + RELAY_HEADER_SIZE,
1128 rh.length, TO_CONN(conn));
1129 connection_edge_consider_sending_sendme(conn);
1130 return 0;
1131 case RELAY_COMMAND_END:
1132 reason = rh.length > 0 ?
1133 *(uint8_t *)(cell->payload+RELAY_HEADER_SIZE) : END_STREAM_REASON_MISC;
1134 if (!conn) {
1135 log_info(domain,"end cell (%s) dropped, unknown stream.",
1136 stream_end_reason_to_string(reason));
1137 return 0;
1139 /* XXX add to this log_fn the exit node's nickname? */
1140 log_info(domain,"%d: end cell (%s) for stream %d. Removing stream.",
1141 conn->_base.s,
1142 stream_end_reason_to_string(reason),
1143 conn->stream_id);
1144 if (conn->socks_request && !conn->socks_request->has_finished)
1145 log_warn(LD_BUG,
1146 "open stream hasn't sent socks answer yet? Closing.");
1147 /* We just *got* an end; no reason to send one. */
1148 conn->edge_has_sent_end = 1;
1149 if (!conn->end_reason)
1150 conn->end_reason = reason | END_STREAM_REASON_FLAG_REMOTE;
1151 if (!conn->_base.marked_for_close) {
1152 /* only mark it if not already marked. it's possible to
1153 * get the 'end' right around when the client hangs up on us. */
1154 connection_mark_for_close(TO_CONN(conn));
1155 conn->_base.hold_open_until_flushed = 1;
1157 return 0;
1158 case RELAY_COMMAND_EXTEND:
1159 if (conn) {
1160 log_fn(LOG_PROTOCOL_WARN, domain,
1161 "'extend' cell received for non-zero stream. Dropping.");
1162 return 0;
1164 return circuit_extend(cell, circ);
1165 case RELAY_COMMAND_EXTENDED:
1166 if (!layer_hint) {
1167 log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
1168 "'extended' unsupported at non-origin. Dropping.");
1169 return 0;
1171 log_debug(domain,"Got an extended cell! Yay.");
1172 if ((reason = circuit_finish_handshake(TO_ORIGIN_CIRCUIT(circ),
1173 CELL_CREATED,
1174 cell->payload+RELAY_HEADER_SIZE)) < 0) {
1175 log_warn(domain,"circuit_finish_handshake failed.");
1176 return reason;
1178 if ((reason=circuit_send_next_onion_skin(TO_ORIGIN_CIRCUIT(circ)))<0) {
1179 log_info(domain,"circuit_send_next_onion_skin() failed.");
1180 return reason;
1182 return 0;
1183 case RELAY_COMMAND_TRUNCATE:
1184 if (layer_hint) {
1185 log_fn(LOG_PROTOCOL_WARN, LD_APP,
1186 "'truncate' unsupported at origin. Dropping.");
1187 return 0;
1189 if (circ->n_conn) {
1190 uint8_t trunc_reason = *(uint8_t*)(cell->payload + RELAY_HEADER_SIZE);
1191 circuit_clear_cell_queue(circ, circ->n_conn);
1192 connection_or_send_destroy(circ->n_circ_id, circ->n_conn,
1193 trunc_reason);
1194 circuit_set_n_circid_orconn(circ, 0, NULL);
1196 log_debug(LD_EXIT, "Processed 'truncate', replying.");
1198 char payload[1];
1199 payload[0] = (char)END_CIRC_REASON_REQUESTED;
1200 relay_send_command_from_edge(0, circ, RELAY_COMMAND_TRUNCATED,
1201 payload, sizeof(payload), NULL);
1203 return 0;
1204 case RELAY_COMMAND_TRUNCATED:
1205 if (!layer_hint) {
1206 log_fn(LOG_PROTOCOL_WARN, LD_EXIT,
1207 "'truncated' unsupported at non-origin. Dropping.");
1208 return 0;
1210 circuit_truncated(TO_ORIGIN_CIRCUIT(circ), layer_hint);
1211 return 0;
1212 case RELAY_COMMAND_CONNECTED:
1213 if (conn) {
1214 log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
1215 "'connected' unsupported while open. Closing circ.");
1216 return -END_CIRC_REASON_TORPROTOCOL;
1218 log_info(domain,
1219 "'connected' received, no conn attached anymore. Ignoring.");
1220 return 0;
1221 case RELAY_COMMAND_SENDME:
1222 if (!conn) {
1223 if (layer_hint) {
1224 layer_hint->package_window += CIRCWINDOW_INCREMENT;
1225 log_debug(LD_APP,"circ-level sendme at origin, packagewindow %d.",
1226 layer_hint->package_window);
1227 circuit_resume_edge_reading(circ, layer_hint);
1228 } else {
1229 circ->package_window += CIRCWINDOW_INCREMENT;
1230 log_debug(LD_APP,
1231 "circ-level sendme at non-origin, packagewindow %d.",
1232 circ->package_window);
1233 circuit_resume_edge_reading(circ, layer_hint);
1235 return 0;
1237 conn->package_window += STREAMWINDOW_INCREMENT;
1238 log_debug(domain,"stream-level sendme, packagewindow now %d.",
1239 conn->package_window);
1240 if (circuit_queue_streams_are_blocked(circ)) {
1241 /* Still waiting for queue to flush; don't touch conn */
1242 return 0;
1244 connection_start_reading(TO_CONN(conn));
1245 /* handle whatever might still be on the inbuf */
1246 if (connection_edge_package_raw_inbuf(conn, 1, NULL) < 0) {
1247 /* (We already sent an end cell if possible) */
1248 connection_mark_for_close(TO_CONN(conn));
1249 return 0;
1251 return 0;
1252 case RELAY_COMMAND_RESOLVE:
1253 if (layer_hint) {
1254 log_fn(LOG_PROTOCOL_WARN, LD_APP,
1255 "resolve request unsupported at AP; dropping.");
1256 return 0;
1257 } else if (conn) {
1258 log_fn(LOG_PROTOCOL_WARN, domain,
1259 "resolve request for known stream; dropping.");
1260 return 0;
1261 } else if (circ->purpose != CIRCUIT_PURPOSE_OR) {
1262 log_fn(LOG_PROTOCOL_WARN, domain,
1263 "resolve request on circ with purpose %d; dropping",
1264 circ->purpose);
1265 return 0;
1267 connection_exit_begin_resolve(cell, TO_OR_CIRCUIT(circ));
1268 return 0;
1269 case RELAY_COMMAND_RESOLVED:
1270 if (conn) {
1271 log_fn(LOG_PROTOCOL_WARN, domain,
1272 "'resolved' unsupported while open. Closing circ.");
1273 return -END_CIRC_REASON_TORPROTOCOL;
1275 log_info(domain,
1276 "'resolved' received, no conn attached anymore. Ignoring.");
1277 return 0;
1278 case RELAY_COMMAND_ESTABLISH_INTRO:
1279 case RELAY_COMMAND_ESTABLISH_RENDEZVOUS:
1280 case RELAY_COMMAND_INTRODUCE1:
1281 case RELAY_COMMAND_INTRODUCE2:
1282 case RELAY_COMMAND_INTRODUCE_ACK:
1283 case RELAY_COMMAND_RENDEZVOUS1:
1284 case RELAY_COMMAND_RENDEZVOUS2:
1285 case RELAY_COMMAND_INTRO_ESTABLISHED:
1286 case RELAY_COMMAND_RENDEZVOUS_ESTABLISHED:
1287 rend_process_relay_cell(circ, layer_hint,
1288 rh.command, rh.length,
1289 cell->payload+RELAY_HEADER_SIZE);
1290 return 0;
1292 log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
1293 "Received unknown relay command %d. Perhaps the other side is using "
1294 "a newer version of Tor? Dropping.",
1295 rh.command);
1296 return 0; /* for forward compatibility, don't kill the circuit */
1299 /** How many relay_data cells have we built, ever? */
1300 uint64_t stats_n_data_cells_packaged = 0;
1301 /** How many bytes of data have we put in relay_data cells have we built,
1302 * ever? This would be RELAY_PAYLOAD_SIZE*stats_n_data_cells_packaged if
1303 * every relay cell we ever sent were completely full of data. */
1304 uint64_t stats_n_data_bytes_packaged = 0;
1305 /** How many relay_data cells have we received, ever? */
1306 uint64_t stats_n_data_cells_received = 0;
1307 /** How many bytes of data have we received relay_data cells, ever? This would
1308 * be RELAY_PAYLOAD_SIZE*stats_n_data_cells_packaged if every relay cell we
1309 * ever received were completely full of data. */
1310 uint64_t stats_n_data_bytes_received = 0;
1312 /** If <b>conn</b> has an entire relay payload of bytes on its inbuf (or
1313 * <b>package_partial</b> is true), and the appropriate package windows aren't
1314 * empty, grab a cell and send it down the circuit.
1316 * If *<b>max_cells</b> is given, package no more than max_cells. Decrement
1317 * *<b>max_cells</b> by the number of cells packaged.
1319 * Return -1 (and send a RELAY_COMMAND_END cell if necessary) if conn should
1320 * be marked for close, else return 0.
1323 connection_edge_package_raw_inbuf(edge_connection_t *conn, int package_partial,
1324 int *max_cells)
1326 size_t amount_to_process, length;
1327 char payload[CELL_PAYLOAD_SIZE];
1328 circuit_t *circ;
1329 unsigned domain = conn->cpath_layer ? LD_APP : LD_EXIT;
1331 tor_assert(conn);
1333 if (conn->_base.marked_for_close) {
1334 log_warn(LD_BUG,
1335 "called on conn that's already marked for close at %s:%d.",
1336 conn->_base.marked_for_close_file, conn->_base.marked_for_close);
1337 return 0;
1340 if (max_cells && *max_cells <= 0)
1341 return 0;
1343 repeat_connection_edge_package_raw_inbuf:
1345 circ = circuit_get_by_edge_conn(conn);
1346 if (!circ) {
1347 log_info(domain,"conn has no circuit! Closing.");
1348 conn->end_reason = END_STREAM_REASON_CANT_ATTACH;
1349 return -1;
1352 if (circuit_consider_stop_edge_reading(circ, conn->cpath_layer))
1353 return 0;
1355 if (conn->package_window <= 0) {
1356 log_info(domain,"called with package_window %d. Skipping.",
1357 conn->package_window);
1358 connection_stop_reading(TO_CONN(conn));
1359 return 0;
1362 amount_to_process = buf_datalen(conn->_base.inbuf);
1364 if (!amount_to_process)
1365 return 0;
1367 if (!package_partial && amount_to_process < RELAY_PAYLOAD_SIZE)
1368 return 0;
1370 if (amount_to_process > RELAY_PAYLOAD_SIZE) {
1371 length = RELAY_PAYLOAD_SIZE;
1372 } else {
1373 length = amount_to_process;
1375 stats_n_data_bytes_packaged += length;
1376 stats_n_data_cells_packaged += 1;
1378 connection_fetch_from_buf(payload, length, TO_CONN(conn));
1380 log_debug(domain,"(%d) Packaging %d bytes (%d waiting).", conn->_base.s,
1381 (int)length, (int)buf_datalen(conn->_base.inbuf));
1383 if (connection_edge_send_command(conn, RELAY_COMMAND_DATA,
1384 payload, length) < 0 )
1385 /* circuit got marked for close, don't continue, don't need to mark conn */
1386 return 0;
1388 if (!conn->cpath_layer) { /* non-rendezvous exit */
1389 tor_assert(circ->package_window > 0);
1390 circ->package_window--;
1391 } else { /* we're an AP, or an exit on a rendezvous circ */
1392 tor_assert(conn->cpath_layer->package_window > 0);
1393 conn->cpath_layer->package_window--;
1396 if (--conn->package_window <= 0) { /* is it 0 after decrement? */
1397 connection_stop_reading(TO_CONN(conn));
1398 log_debug(domain,"conn->package_window reached 0.");
1399 circuit_consider_stop_edge_reading(circ, conn->cpath_layer);
1400 return 0; /* don't process the inbuf any more */
1402 log_debug(domain,"conn->package_window is now %d",conn->package_window);
1404 if (max_cells) {
1405 *max_cells -= 1;
1406 if (*max_cells <= 0)
1407 return 0;
1410 /* handle more if there's more, or return 0 if there isn't */
1411 goto repeat_connection_edge_package_raw_inbuf;
1414 /** Called when we've just received a relay data cell, or when
1415 * we've just finished flushing all bytes to stream <b>conn</b>.
1417 * If conn->outbuf is not too full, and our deliver window is
1418 * low, send back a suitable number of stream-level sendme cells.
1420 void
1421 connection_edge_consider_sending_sendme(edge_connection_t *conn)
1423 circuit_t *circ;
1425 if (connection_outbuf_too_full(TO_CONN(conn)))
1426 return;
1428 circ = circuit_get_by_edge_conn(conn);
1429 if (!circ) {
1430 /* this can legitimately happen if the destroy has already
1431 * arrived and torn down the circuit */
1432 log_info(LD_APP,"No circuit associated with conn. Skipping.");
1433 return;
1436 while (conn->deliver_window <= STREAMWINDOW_START - STREAMWINDOW_INCREMENT) {
1437 log_debug(conn->cpath_layer?LD_APP:LD_EXIT,
1438 "Outbuf %d, Queuing stream sendme.",
1439 (int)conn->_base.outbuf_flushlen);
1440 conn->deliver_window += STREAMWINDOW_INCREMENT;
1441 if (connection_edge_send_command(conn, RELAY_COMMAND_SENDME,
1442 NULL, 0) < 0) {
1443 log_warn(LD_APP,"connection_edge_send_command failed. Skipping.");
1444 return; /* the circuit's closed, don't continue */
1449 /** The circuit <b>circ</b> has received a circuit-level sendme
1450 * (on hop <b>layer_hint</b>, if we're the OP). Go through all the
1451 * attached streams and let them resume reading and packaging, if
1452 * their stream windows allow it.
1454 static void
1455 circuit_resume_edge_reading(circuit_t *circ, crypt_path_t *layer_hint)
1457 if (circuit_queue_streams_are_blocked(circ)) {
1458 log_debug(layer_hint?LD_APP:LD_EXIT,"Too big queue, no resuming");
1459 return;
1461 log_debug(layer_hint?LD_APP:LD_EXIT,"resuming");
1463 if (CIRCUIT_IS_ORIGIN(circ))
1464 circuit_resume_edge_reading_helper(TO_ORIGIN_CIRCUIT(circ)->p_streams,
1465 circ, layer_hint);
1466 else
1467 circuit_resume_edge_reading_helper(TO_OR_CIRCUIT(circ)->n_streams,
1468 circ, layer_hint);
1471 /** A helper function for circuit_resume_edge_reading() above.
1472 * The arguments are the same, except that <b>conn</b> is the head
1473 * of a linked list of edge streams that should each be considered.
1475 static int
1476 circuit_resume_edge_reading_helper(edge_connection_t *first_conn,
1477 circuit_t *circ,
1478 crypt_path_t *layer_hint)
1480 edge_connection_t *conn;
1481 int n_streams, n_streams_left;
1482 int packaged_this_round;
1483 int cells_on_queue;
1484 int cells_per_conn;
1486 /* How many cells do we have space for? It will be the minimum of
1487 * the number needed to exhaust the package window, and the minimum
1488 * needed to fill the cell queue. */
1489 int max_to_package = circ->package_window;
1490 if (CIRCUIT_IS_ORIGIN(circ)) {
1491 cells_on_queue = circ->n_conn_cells.n;
1492 } else {
1493 or_circuit_t *or_circ = TO_OR_CIRCUIT(circ);
1494 cells_on_queue = or_circ->p_conn_cells.n;
1496 if (CELL_QUEUE_HIGHWATER_SIZE - cells_on_queue < max_to_package)
1497 max_to_package = CELL_QUEUE_HIGHWATER_SIZE - cells_on_queue;
1499 /* Count how many non-marked streams there are that have anything on
1500 * their inbuf, and enable reading on all of the connections. */
1501 n_streams = 0;
1502 for (conn=first_conn; conn; conn=conn->next_stream) {
1503 if (conn->_base.marked_for_close || conn->package_window <= 0)
1504 continue;
1505 if (!layer_hint || conn->cpath_layer == layer_hint) {
1506 connection_start_reading(TO_CONN(conn));
1508 if (buf_datalen(conn->_base.inbuf) > 0)
1509 ++n_streams;
1513 if (n_streams == 0) /* avoid divide-by-zero */
1514 return 0;
1516 again:
1518 cells_per_conn = CEIL_DIV(max_to_package, n_streams);
1520 packaged_this_round = 0;
1521 n_streams_left = 0;
1523 /* Iterate over all connections. Package up to cells_per_conn cells on
1524 * each. Update packaged_this_round with the total number of cells
1525 * packaged, and n_streams_left with the number that still have data to
1526 * package.
1528 for (conn=first_conn; conn; conn=conn->next_stream) {
1529 if (conn->_base.marked_for_close || conn->package_window <= 0)
1530 continue;
1531 if (!layer_hint || conn->cpath_layer == layer_hint) {
1532 int n = cells_per_conn, r;
1533 /* handle whatever might still be on the inbuf */
1534 r = connection_edge_package_raw_inbuf(conn, 1, &n);
1536 /* Note how many we packaged */
1537 packaged_this_round += (cells_per_conn-n);
1539 if (r<0) {
1540 /* Problem while packaging. (We already sent an end cell if
1541 * possible) */
1542 connection_mark_for_close(TO_CONN(conn));
1543 continue;
1546 /* If there's still data to read, we'll be coming back to this stream. */
1547 if (buf_datalen(conn->_base.inbuf))
1548 ++n_streams_left;
1550 /* If the circuit won't accept any more data, return without looking
1551 * at any more of the streams. Any connections that should be stopped
1552 * have already been stopped by connection_edge_package_raw_inbuf. */
1553 if (circuit_consider_stop_edge_reading(circ, layer_hint))
1554 return -1;
1555 /* XXXX should we also stop immediately if we fill up the cell queue?
1556 * Probably. */
1560 /* If we made progress, and we are willing to package more, and there are
1561 * any streams left that want to package stuff... try again!
1563 if (packaged_this_round && packaged_this_round < max_to_package &&
1564 n_streams_left) {
1565 max_to_package -= packaged_this_round;
1566 n_streams = n_streams_left;
1567 goto again;
1570 return 0;
1573 /** Check if the package window for <b>circ</b> is empty (at
1574 * hop <b>layer_hint</b> if it's defined).
1576 * If yes, tell edge streams to stop reading and return 1.
1577 * Else return 0.
1579 static int
1580 circuit_consider_stop_edge_reading(circuit_t *circ, crypt_path_t *layer_hint)
1582 edge_connection_t *conn = NULL;
1583 unsigned domain = layer_hint ? LD_APP : LD_EXIT;
1585 if (!layer_hint) {
1586 or_circuit_t *or_circ = TO_OR_CIRCUIT(circ);
1587 log_debug(domain,"considering circ->package_window %d",
1588 circ->package_window);
1589 if (circ->package_window <= 0) {
1590 log_debug(domain,"yes, not-at-origin. stopped.");
1591 for (conn = or_circ->n_streams; conn; conn=conn->next_stream)
1592 connection_stop_reading(TO_CONN(conn));
1593 return 1;
1595 return 0;
1597 /* else, layer hint is defined, use it */
1598 log_debug(domain,"considering layer_hint->package_window %d",
1599 layer_hint->package_window);
1600 if (layer_hint->package_window <= 0) {
1601 log_debug(domain,"yes, at-origin. stopped.");
1602 for (conn = TO_ORIGIN_CIRCUIT(circ)->p_streams; conn;
1603 conn=conn->next_stream)
1604 if (conn->cpath_layer == layer_hint)
1605 connection_stop_reading(TO_CONN(conn));
1606 return 1;
1608 return 0;
1611 /** Check if the deliver_window for circuit <b>circ</b> (at hop
1612 * <b>layer_hint</b> if it's defined) is low enough that we should
1613 * send a circuit-level sendme back down the circuit. If so, send
1614 * enough sendmes that the window would be overfull if we sent any
1615 * more.
1617 static void
1618 circuit_consider_sending_sendme(circuit_t *circ, crypt_path_t *layer_hint)
1620 // log_fn(LOG_INFO,"Considering: layer_hint is %s",
1621 // layer_hint ? "defined" : "null");
1622 while ((layer_hint ? layer_hint->deliver_window : circ->deliver_window) <=
1623 CIRCWINDOW_START - CIRCWINDOW_INCREMENT) {
1624 log_debug(LD_CIRC,"Queuing circuit sendme.");
1625 if (layer_hint)
1626 layer_hint->deliver_window += CIRCWINDOW_INCREMENT;
1627 else
1628 circ->deliver_window += CIRCWINDOW_INCREMENT;
1629 if (relay_send_command_from_edge(0, circ, RELAY_COMMAND_SENDME,
1630 NULL, 0, layer_hint) < 0) {
1631 log_warn(LD_CIRC,
1632 "relay_send_command_from_edge failed. Circuit's closed.");
1633 return; /* the circuit's closed, don't continue */
1638 #ifdef ACTIVE_CIRCUITS_PARANOIA
1639 #define assert_active_circuits_ok_paranoid(conn) \
1640 assert_active_circuits_ok(conn)
1641 #else
1642 #define assert_active_circuits_ok_paranoid(conn)
1643 #endif
1645 /** The total number of cells we have allocated from the memory pool. */
1646 static int total_cells_allocated = 0;
1648 /** A memory pool to allocate packed_cell_t objects. */
1649 static mp_pool_t *cell_pool = NULL;
1651 /** Memory pool to allocate insertion_time_elem_t objects used for cell
1652 * statistics. */
1653 static mp_pool_t *it_pool = NULL;
1655 /** Allocate structures to hold cells. */
1656 void
1657 init_cell_pool(void)
1659 tor_assert(!cell_pool);
1660 cell_pool = mp_pool_new(sizeof(packed_cell_t), 128*1024);
1663 /** Free all storage used to hold cells (and insertion times if we measure
1664 * cell statistics). */
1665 void
1666 free_cell_pool(void)
1668 /* Maybe we haven't called init_cell_pool yet; need to check for it. */
1669 if (cell_pool) {
1670 mp_pool_destroy(cell_pool);
1671 cell_pool = NULL;
1673 if (it_pool) {
1674 mp_pool_destroy(it_pool);
1675 it_pool = NULL;
1679 /** Free excess storage in cell pool. */
1680 void
1681 clean_cell_pool(void)
1683 tor_assert(cell_pool);
1684 mp_pool_clean(cell_pool, 0, 1);
1687 /** Release storage held by <b>cell</b>. */
1688 static INLINE void
1689 packed_cell_free_unchecked(packed_cell_t *cell)
1691 --total_cells_allocated;
1692 mp_pool_release(cell);
1695 /** Allocate and return a new packed_cell_t. */
1696 static INLINE packed_cell_t *
1697 packed_cell_alloc(void)
1699 ++total_cells_allocated;
1700 return mp_pool_get(cell_pool);
1703 /** Log current statistics for cell pool allocation at log level
1704 * <b>severity</b>. */
1705 void
1706 dump_cell_pool_usage(int severity)
1708 circuit_t *c;
1709 int n_circs = 0;
1710 int n_cells = 0;
1711 for (c = _circuit_get_global_list(); c; c = c->next) {
1712 n_cells += c->n_conn_cells.n;
1713 if (!CIRCUIT_IS_ORIGIN(c))
1714 n_cells += TO_OR_CIRCUIT(c)->p_conn_cells.n;
1715 ++n_circs;
1717 log(severity, LD_MM, "%d cells allocated on %d circuits. %d cells leaked.",
1718 n_cells, n_circs, total_cells_allocated - n_cells);
1719 mp_pool_log_status(cell_pool, severity);
1722 /** Allocate a new copy of packed <b>cell</b>. */
1723 static INLINE packed_cell_t *
1724 packed_cell_copy(const cell_t *cell)
1726 packed_cell_t *c = packed_cell_alloc();
1727 cell_pack(c, cell);
1728 c->next = NULL;
1729 return c;
1732 /** Append <b>cell</b> to the end of <b>queue</b>. */
1733 void
1734 cell_queue_append(cell_queue_t *queue, packed_cell_t *cell)
1736 if (queue->tail) {
1737 tor_assert(!queue->tail->next);
1738 queue->tail->next = cell;
1739 } else {
1740 queue->head = cell;
1742 queue->tail = cell;
1743 cell->next = NULL;
1744 ++queue->n;
1747 /** Append a newly allocated copy of <b>cell</b> to the end of <b>queue</b> */
1748 void
1749 cell_queue_append_packed_copy(cell_queue_t *queue, const cell_t *cell)
1751 packed_cell_t *copy = packed_cell_copy(cell);
1752 /* Remember the time when this cell was put in the queue. */
1753 if (get_options()->CellStatistics) {
1754 struct timeval now;
1755 uint32_t added;
1756 insertion_time_queue_t *it_queue = queue->insertion_times;
1757 if (!it_pool)
1758 it_pool = mp_pool_new(sizeof(insertion_time_elem_t), 1024);
1759 tor_gettimeofday_cached(&now);
1760 #define SECONDS_IN_A_DAY 86400L
1761 added = (uint32_t)(((now.tv_sec % SECONDS_IN_A_DAY) * 100L)
1762 + ((uint32_t)now.tv_usec / (uint32_t)10000L));
1763 if (!it_queue) {
1764 it_queue = tor_malloc_zero(sizeof(insertion_time_queue_t));
1765 queue->insertion_times = it_queue;
1767 if (it_queue->last && it_queue->last->insertion_time == added) {
1768 it_queue->last->counter++;
1769 } else {
1770 insertion_time_elem_t *elem = mp_pool_get(it_pool);
1771 elem->next = NULL;
1772 elem->insertion_time = added;
1773 elem->counter = 1;
1774 if (it_queue->last) {
1775 it_queue->last->next = elem;
1776 it_queue->last = elem;
1777 } else {
1778 it_queue->first = it_queue->last = elem;
1782 cell_queue_append(queue, copy);
1785 /** Remove and free every cell in <b>queue</b>. */
1786 void
1787 cell_queue_clear(cell_queue_t *queue)
1789 packed_cell_t *cell, *next;
1790 cell = queue->head;
1791 while (cell) {
1792 next = cell->next;
1793 packed_cell_free_unchecked(cell);
1794 cell = next;
1796 queue->head = queue->tail = NULL;
1797 queue->n = 0;
1798 if (queue->insertion_times) {
1799 while (queue->insertion_times->first) {
1800 insertion_time_elem_t *elem = queue->insertion_times->first;
1801 queue->insertion_times->first = elem->next;
1802 mp_pool_release(elem);
1804 tor_free(queue->insertion_times);
1808 /** Extract and return the cell at the head of <b>queue</b>; return NULL if
1809 * <b>queue</b> is empty. */
1810 static INLINE packed_cell_t *
1811 cell_queue_pop(cell_queue_t *queue)
1813 packed_cell_t *cell = queue->head;
1814 if (!cell)
1815 return NULL;
1816 queue->head = cell->next;
1817 if (cell == queue->tail) {
1818 tor_assert(!queue->head);
1819 queue->tail = NULL;
1821 --queue->n;
1822 return cell;
1825 /** Return a pointer to the "next_active_on_{n,p}_conn" pointer of <b>circ</b>,
1826 * depending on whether <b>conn</b> matches n_conn or p_conn. */
1827 static INLINE circuit_t **
1828 next_circ_on_conn_p(circuit_t *circ, or_connection_t *conn)
1830 tor_assert(circ);
1831 tor_assert(conn);
1832 if (conn == circ->n_conn) {
1833 return &circ->next_active_on_n_conn;
1834 } else {
1835 or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
1836 tor_assert(conn == orcirc->p_conn);
1837 return &orcirc->next_active_on_p_conn;
1841 /** Return a pointer to the "prev_active_on_{n,p}_conn" pointer of <b>circ</b>,
1842 * depending on whether <b>conn</b> matches n_conn or p_conn. */
1843 static INLINE circuit_t **
1844 prev_circ_on_conn_p(circuit_t *circ, or_connection_t *conn)
1846 tor_assert(circ);
1847 tor_assert(conn);
1848 if (conn == circ->n_conn) {
1849 return &circ->prev_active_on_n_conn;
1850 } else {
1851 or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
1852 tor_assert(conn == orcirc->p_conn);
1853 return &orcirc->prev_active_on_p_conn;
1857 /** Helper for sorting cell_ewma_t values in their priority queue. */
1858 static int
1859 compare_cell_ewma_counts(const void *p1, const void *p2)
1861 const cell_ewma_t *e1=p1, *e2=p2;
1862 if (e1->cell_count < e2->cell_count)
1863 return -1;
1864 else if (e1->cell_count > e2->cell_count)
1865 return 1;
1866 else
1867 return 0;
1870 /** Given a cell_ewma_t, return a pointer to the circuit containing it. */
1871 static circuit_t *
1872 cell_ewma_to_circuit(cell_ewma_t *ewma)
1874 if (ewma->is_for_p_conn) {
1875 /* This is an or_circuit_t's p_cell_ewma. */
1876 or_circuit_t *orcirc = SUBTYPE_P(ewma, or_circuit_t, p_cell_ewma);
1877 return TO_CIRCUIT(orcirc);
1878 } else {
1879 /* This is some circuit's n_cell_ewma. */
1880 return SUBTYPE_P(ewma, circuit_t, n_cell_ewma);
1884 /* ==== Functions for scaling cell_ewma_t ====
1886 When choosing which cells to relay first, we favor circuits that have been
1887 quiet recently. This gives better latency on connections that aren't
1888 pushing lots of data, and makes the network feel more interactive.
1890 Conceptually, we take an exponentially weighted mean average of the number
1891 of cells a circuit has sent, and allow active circuits (those with cells to
1892 relay) to send cells in reverse order of their exponentially-weighted mean
1893 average (EWMA) cell count. [That is, a cell sent N seconds ago 'counts'
1894 F^N times as much as a cell sent now, for 0<F<1.0, and we favor the
1895 circuit that has sent the fewest cells]
1897 If 'double' had infinite precision, we could do this simply by counting a
1898 cell sent at startup as having weight 1.0, and a cell sent N seconds later
1899 as having weight F^-N. This way, we would never need to re-scale
1900 any already-sent cells.
1902 To prevent double from overflowing, we could count a cell sent now as
1903 having weight 1.0 and a cell sent N seconds ago as having weight F^N.
1904 This, however, would mean we'd need to re-scale *ALL* old circuits every
1905 time we wanted to send a cell.
1907 So as a compromise, we divide time into 'ticks' (currently, 10-second
1908 increments) and say that a cell sent at the start of a current tick is
1909 worth 1.0, a cell sent N seconds before the start of the current tick is
1910 worth F^N, and a cell sent N seconds after the start of the current tick is
1911 worth F^-N. This way we don't overflow, and we don't need to constantly
1912 rescale.
1915 /** How long does a tick last (seconds)? */
1916 #define EWMA_TICK_LEN 10
1918 /** The default per-tick scale factor, if it hasn't been overridden by a
1919 * consensus or a configuration setting. zero means "disabled". */
1920 #define EWMA_DEFAULT_HALFLIFE 0.0
1922 /** Given a timeval <b>now</b>, compute the cell_ewma tick in which it occurs
1923 * and the fraction of the tick that has elapsed between the start of the tick
1924 * and <b>now</b>. Return the former and store the latter in
1925 * *<b>remainder_out</b>.
1927 * These tick values are not meant to be shared between Tor instances, or used
1928 * for other purposes. */
1929 static unsigned
1930 cell_ewma_tick_from_timeval(const struct timeval *now,
1931 double *remainder_out)
1933 unsigned res = (unsigned) (now->tv_sec / EWMA_TICK_LEN);
1934 /* rem */
1935 double rem = (now->tv_sec % EWMA_TICK_LEN) +
1936 ((double)(now->tv_usec)) / 1.0e6;
1937 *remainder_out = rem / EWMA_TICK_LEN;
1938 return res;
1941 /** Compute and return the current cell_ewma tick. */
1942 unsigned
1943 cell_ewma_get_tick(void)
1945 return ((unsigned)approx_time() / EWMA_TICK_LEN);
1948 /** The per-tick scale factor to be used when computing cell-count EWMA
1949 * values. (A cell sent N ticks before the start of the current tick
1950 * has value ewma_scale_factor ** N.)
1952 static double ewma_scale_factor = 0.1;
1953 static int ewma_enabled = 0;
1955 #define EPSILON 0.00001
1956 #define LOG_ONEHALF -0.69314718055994529
1958 /** Adjust the global cell scale factor based on <b>options</b> */
1959 void
1960 cell_ewma_set_scale_factor(or_options_t *options, networkstatus_t *consensus)
1962 int32_t halflife_ms;
1963 double halflife;
1964 const char *source;
1965 if (options && options->CircuitPriorityHalflife >= -EPSILON) {
1966 halflife = options->CircuitPriorityHalflife;
1967 source = "CircuitPriorityHalflife in configuration";
1968 } else if (consensus &&
1969 (halflife_ms = networkstatus_get_param(
1970 consensus, "CircuitPriorityHalflifeMsec", -1)) >= 0) {
1971 halflife = ((double)halflife_ms)/1000.0;
1972 source = "CircuitPriorityHalflifeMsec in consensus";
1973 } else {
1974 halflife = EWMA_DEFAULT_HALFLIFE;
1975 source = "Default value";
1978 if (halflife <= EPSILON) {
1979 /* The cell EWMA algorithm is disabled. */
1980 ewma_scale_factor = 0.1;
1981 ewma_enabled = 0;
1982 log_info(LD_OR,
1983 "Disabled cell_ewma algorithm because of value in %s",
1984 source);
1985 } else {
1986 /* convert halflife into halflife-per-tick. */
1987 halflife /= EWMA_TICK_LEN;
1988 /* compute per-tick scale factor. */
1989 ewma_scale_factor = exp( LOG_ONEHALF / halflife );
1990 ewma_enabled = 1;
1991 log_info(LD_OR,
1992 "Enabled cell_ewma algorithm because of value in %s; "
1993 "scale factor is %lf per %d seconds",
1994 source, ewma_scale_factor, EWMA_TICK_LEN);
1998 /** Return the multiplier necessary to convert the value of a cell sent in
1999 * 'from_tick' to one sent in 'to_tick'. */
2000 static INLINE double
2001 get_scale_factor(unsigned from_tick, unsigned to_tick)
2003 /* This math can wrap around, but that's okay: unsigned overflow is
2004 well-defined */
2005 int diff = (int)(to_tick - from_tick);
2006 return pow(ewma_scale_factor, diff);
2009 /** Adjust the cell count of <b>ewma</b> so that it is scaled with respect to
2010 * <b>cur_tick</b> */
2011 static void
2012 scale_single_cell_ewma(cell_ewma_t *ewma, unsigned cur_tick)
2014 double factor = get_scale_factor(ewma->last_adjusted_tick, cur_tick);
2015 ewma->cell_count *= factor;
2016 ewma->last_adjusted_tick = cur_tick;
2019 /** Adjust the cell count of every active circuit on <b>conn</b> so
2020 * that they are scaled with respect to <b>cur_tick</b> */
2021 static void
2022 scale_active_circuits(or_connection_t *conn, unsigned cur_tick)
2025 double factor = get_scale_factor(
2026 conn->active_circuit_pqueue_last_recalibrated,
2027 cur_tick);
2028 /** Ordinarily it isn't okay to change the value of an element in a heap,
2029 * but it's okay here, since we are preserving the order. */
2030 SMARTLIST_FOREACH(conn->active_circuit_pqueue, cell_ewma_t *, e, {
2031 tor_assert(e->last_adjusted_tick ==
2032 conn->active_circuit_pqueue_last_recalibrated);
2033 e->cell_count *= factor;
2034 e->last_adjusted_tick = cur_tick;
2036 conn->active_circuit_pqueue_last_recalibrated = cur_tick;
2039 /** Rescale <b>ewma</b> to the same scale as <b>conn</b>, and add it to
2040 * <b>conn</b>'s priority queue of active circuits */
2041 static void
2042 add_cell_ewma_to_conn(or_connection_t *conn, cell_ewma_t *ewma)
2044 tor_assert(ewma->heap_index == -1);
2045 scale_single_cell_ewma(ewma,
2046 conn->active_circuit_pqueue_last_recalibrated);
2048 smartlist_pqueue_add(conn->active_circuit_pqueue,
2049 compare_cell_ewma_counts,
2050 STRUCT_OFFSET(cell_ewma_t, heap_index),
2051 ewma);
2054 /** Remove <b>ewma</b> from <b>conn</b>'s priority queue of active circuits */
2055 static void
2056 remove_cell_ewma_from_conn(or_connection_t *conn, cell_ewma_t *ewma)
2058 tor_assert(ewma->heap_index != -1);
2059 smartlist_pqueue_remove(conn->active_circuit_pqueue,
2060 compare_cell_ewma_counts,
2061 STRUCT_OFFSET(cell_ewma_t, heap_index),
2062 ewma);
2065 /** Remove and return the first cell_ewma_t from conn's priority queue of
2066 * active circuits. Requires that the priority queue is nonempty. */
2067 static cell_ewma_t *
2068 pop_first_cell_ewma_from_conn(or_connection_t *conn)
2070 return smartlist_pqueue_pop(conn->active_circuit_pqueue,
2071 compare_cell_ewma_counts,
2072 STRUCT_OFFSET(cell_ewma_t, heap_index));
2075 /** Add <b>circ</b> to the list of circuits with pending cells on
2076 * <b>conn</b>. No effect if <b>circ</b> is already linked. */
2077 void
2078 make_circuit_active_on_conn(circuit_t *circ, or_connection_t *conn)
2080 circuit_t **nextp = next_circ_on_conn_p(circ, conn);
2081 circuit_t **prevp = prev_circ_on_conn_p(circ, conn);
2083 if (*nextp && *prevp) {
2084 /* Already active. */
2085 return;
2088 assert_active_circuits_ok_paranoid(conn);
2090 if (! conn->active_circuits) {
2091 conn->active_circuits = circ;
2092 *prevp = *nextp = circ;
2093 } else {
2094 circuit_t *head = conn->active_circuits;
2095 circuit_t *old_tail = *prev_circ_on_conn_p(head, conn);
2096 *next_circ_on_conn_p(old_tail, conn) = circ;
2097 *nextp = head;
2098 *prev_circ_on_conn_p(head, conn) = circ;
2099 *prevp = old_tail;
2102 if (circ->n_conn == conn) {
2103 add_cell_ewma_to_conn(conn, &circ->n_cell_ewma);
2104 } else {
2105 or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
2106 tor_assert(conn == orcirc->p_conn);
2107 add_cell_ewma_to_conn(conn, &orcirc->p_cell_ewma);
2110 assert_active_circuits_ok_paranoid(conn);
2113 /** Remove <b>circ</b> from the list of circuits with pending cells on
2114 * <b>conn</b>. No effect if <b>circ</b> is already unlinked. */
2115 void
2116 make_circuit_inactive_on_conn(circuit_t *circ, or_connection_t *conn)
2118 circuit_t **nextp = next_circ_on_conn_p(circ, conn);
2119 circuit_t **prevp = prev_circ_on_conn_p(circ, conn);
2120 circuit_t *next = *nextp, *prev = *prevp;
2122 if (!next && !prev) {
2123 /* Already inactive. */
2124 return;
2127 assert_active_circuits_ok_paranoid(conn);
2129 tor_assert(next && prev);
2130 tor_assert(*prev_circ_on_conn_p(next, conn) == circ);
2131 tor_assert(*next_circ_on_conn_p(prev, conn) == circ);
2133 if (next == circ) {
2134 conn->active_circuits = NULL;
2135 } else {
2136 *prev_circ_on_conn_p(next, conn) = prev;
2137 *next_circ_on_conn_p(prev, conn) = next;
2138 if (conn->active_circuits == circ)
2139 conn->active_circuits = next;
2141 *prevp = *nextp = NULL;
2143 if (circ->n_conn == conn) {
2144 remove_cell_ewma_from_conn(conn, &circ->n_cell_ewma);
2145 } else {
2146 or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
2147 tor_assert(conn == orcirc->p_conn);
2148 remove_cell_ewma_from_conn(conn, &orcirc->p_cell_ewma);
2151 assert_active_circuits_ok_paranoid(conn);
2154 /** Remove all circuits from the list of circuits with pending cells on
2155 * <b>conn</b>. */
2156 void
2157 connection_or_unlink_all_active_circs(or_connection_t *orconn)
2159 circuit_t *head = orconn->active_circuits;
2160 circuit_t *cur = head;
2161 if (! head)
2162 return;
2163 do {
2164 circuit_t *next = *next_circ_on_conn_p(cur, orconn);
2165 *prev_circ_on_conn_p(cur, orconn) = NULL;
2166 *next_circ_on_conn_p(cur, orconn) = NULL;
2167 cur = next;
2168 } while (cur != head);
2169 orconn->active_circuits = NULL;
2171 SMARTLIST_FOREACH(orconn->active_circuit_pqueue, cell_ewma_t *, e,
2172 e->heap_index = -1);
2173 smartlist_clear(orconn->active_circuit_pqueue);
2176 /** Block (if <b>block</b> is true) or unblock (if <b>block</b> is false)
2177 * every edge connection that is using <b>circ</b> to write to <b>orconn</b>,
2178 * and start or stop reading as appropriate.
2180 * If <b>stream_id</b> is nonzero, block only the edge connection whose
2181 * stream_id matches it.
2183 * Returns the number of streams whose status we changed.
2185 static int
2186 set_streams_blocked_on_circ(circuit_t *circ, or_connection_t *orconn,
2187 int block, streamid_t stream_id)
2189 edge_connection_t *edge = NULL;
2190 int n = 0;
2191 if (circ->n_conn == orconn) {
2192 circ->streams_blocked_on_n_conn = block;
2193 if (CIRCUIT_IS_ORIGIN(circ))
2194 edge = TO_ORIGIN_CIRCUIT(circ)->p_streams;
2195 } else {
2196 circ->streams_blocked_on_p_conn = block;
2197 tor_assert(!CIRCUIT_IS_ORIGIN(circ));
2198 edge = TO_OR_CIRCUIT(circ)->n_streams;
2201 for (; edge; edge = edge->next_stream) {
2202 connection_t *conn = TO_CONN(edge);
2203 if (stream_id && edge->stream_id != stream_id)
2204 continue;
2206 if (edge->edge_blocked_on_circ != block) {
2207 ++n;
2208 edge->edge_blocked_on_circ = block;
2211 if (!conn->read_event) {
2212 /* This connection is a placeholder for something; probably a DNS
2213 * request. It can't actually stop or start reading.*/
2214 continue;
2217 if (block) {
2218 if (connection_is_reading(conn))
2219 connection_stop_reading(conn);
2220 } else {
2221 /* Is this right? */
2222 if (!connection_is_reading(conn))
2223 connection_start_reading(conn);
2227 return n;
2230 /** Pull as many cells as possible (but no more than <b>max</b>) from the
2231 * queue of the first active circuit on <b>conn</b>, and write them to
2232 * <b>conn</b>-&gt;outbuf. Return the number of cells written. Advance
2233 * the active circuit pointer to the next active circuit in the ring. */
2235 connection_or_flush_from_first_active_circuit(or_connection_t *conn, int max,
2236 time_t now)
2238 int n_flushed;
2239 cell_queue_t *queue;
2240 circuit_t *circ;
2241 int streams_blocked;
2243 /* The current (hi-res) time */
2244 struct timeval now_hires;
2246 /* The EWMA cell counter for the circuit we're flushing. */
2247 cell_ewma_t *cell_ewma = NULL;
2248 double ewma_increment = -1;
2250 circ = conn->active_circuits;
2251 if (!circ) return 0;
2252 assert_active_circuits_ok_paranoid(conn);
2254 /* See if we're doing the ewma circuit selection algorithm. */
2255 if (ewma_enabled) {
2256 unsigned tick;
2257 double fractional_tick;
2258 tor_gettimeofday_cached(&now_hires);
2259 tick = cell_ewma_tick_from_timeval(&now_hires, &fractional_tick);
2261 if (tick != conn->active_circuit_pqueue_last_recalibrated) {
2262 scale_active_circuits(conn, tick);
2265 ewma_increment = pow(ewma_scale_factor, -fractional_tick);
2267 cell_ewma = smartlist_get(conn->active_circuit_pqueue, 0);
2268 circ = cell_ewma_to_circuit(cell_ewma);
2271 if (circ->n_conn == conn) {
2272 queue = &circ->n_conn_cells;
2273 streams_blocked = circ->streams_blocked_on_n_conn;
2274 } else {
2275 queue = &TO_OR_CIRCUIT(circ)->p_conn_cells;
2276 streams_blocked = circ->streams_blocked_on_p_conn;
2278 tor_assert(*next_circ_on_conn_p(circ,conn));
2280 for (n_flushed = 0; n_flushed < max && queue->head; ) {
2281 packed_cell_t *cell = cell_queue_pop(queue);
2282 tor_assert(*next_circ_on_conn_p(circ,conn));
2284 /* Calculate the exact time that this cell has spent in the queue. */
2285 if (get_options()->CellStatistics && !CIRCUIT_IS_ORIGIN(circ)) {
2286 struct timeval now;
2287 uint32_t flushed;
2288 uint32_t cell_waiting_time;
2289 insertion_time_queue_t *it_queue = queue->insertion_times;
2290 tor_gettimeofday_cached(&now);
2291 flushed = (uint32_t)((now.tv_sec % SECONDS_IN_A_DAY) * 100L +
2292 (uint32_t)now.tv_usec / (uint32_t)10000L);
2293 if (!it_queue || !it_queue->first) {
2294 log_info(LD_GENERAL, "Cannot determine insertion time of cell. "
2295 "Looks like the CellStatistics option was "
2296 "recently enabled.");
2297 } else {
2298 or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
2299 insertion_time_elem_t *elem = it_queue->first;
2300 cell_waiting_time =
2301 (uint32_t)((flushed * 10L + SECONDS_IN_A_DAY * 1000L -
2302 elem->insertion_time * 10L) %
2303 (SECONDS_IN_A_DAY * 1000L));
2304 #undef SECONDS_IN_A_DAY
2305 elem->counter--;
2306 if (elem->counter < 1) {
2307 it_queue->first = elem->next;
2308 if (elem == it_queue->last)
2309 it_queue->last = NULL;
2310 mp_pool_release(elem);
2312 orcirc->total_cell_waiting_time += cell_waiting_time;
2313 orcirc->processed_cells++;
2317 /* If we just flushed our queue and this circuit is used for a
2318 * tunneled directory request, possibly advance its state. */
2319 if (queue->n == 0 && TO_CONN(conn)->dirreq_id)
2320 geoip_change_dirreq_state(TO_CONN(conn)->dirreq_id,
2321 DIRREQ_TUNNELED,
2322 DIRREQ_CIRC_QUEUE_FLUSHED);
2324 connection_write_to_buf(cell->body, CELL_NETWORK_SIZE, TO_CONN(conn));
2326 packed_cell_free_unchecked(cell);
2327 ++n_flushed;
2328 if (cell_ewma) {
2329 cell_ewma_t *tmp;
2330 cell_ewma->cell_count += ewma_increment;
2331 /* We pop and re-add the cell_ewma_t here, not above, since we need to
2332 * re-add it immediately to keep the priority queue consistent with
2333 * the linked-list implementation */
2334 tmp = pop_first_cell_ewma_from_conn(conn);
2335 tor_assert(tmp == cell_ewma);
2336 add_cell_ewma_to_conn(conn, cell_ewma);
2338 if (circ != conn->active_circuits) {
2339 /* If this happens, the current circuit just got made inactive by
2340 * a call in connection_write_to_buf(). That's nothing to worry about:
2341 * circuit_make_inactive_on_conn() already advanced conn->active_circuits
2342 * for us.
2344 assert_active_circuits_ok_paranoid(conn);
2345 goto done;
2348 tor_assert(*next_circ_on_conn_p(circ,conn));
2349 assert_active_circuits_ok_paranoid(conn);
2350 conn->active_circuits = *next_circ_on_conn_p(circ, conn);
2352 /* Is the cell queue low enough to unblock all the streams that are waiting
2353 * to write to this circuit? */
2354 if (streams_blocked && queue->n <= CELL_QUEUE_LOWWATER_SIZE)
2355 set_streams_blocked_on_circ(circ, conn, 0, 0); /* unblock streams */
2357 /* Did we just run out of cells on this circuit's queue? */
2358 if (queue->n == 0) {
2359 log_debug(LD_GENERAL, "Made a circuit inactive.");
2360 make_circuit_inactive_on_conn(circ, conn);
2362 done:
2363 if (n_flushed)
2364 conn->timestamp_last_added_nonpadding = now;
2365 return n_flushed;
2368 /** Add <b>cell</b> to the queue of <b>circ</b> writing to <b>orconn</b>
2369 * transmitting in <b>direction</b>. */
2370 void
2371 append_cell_to_circuit_queue(circuit_t *circ, or_connection_t *orconn,
2372 cell_t *cell, cell_direction_t direction,
2373 streamid_t fromstream)
2375 cell_queue_t *queue;
2376 int streams_blocked;
2377 if (circ->marked_for_close)
2378 return;
2380 if (direction == CELL_DIRECTION_OUT) {
2381 queue = &circ->n_conn_cells;
2382 streams_blocked = circ->streams_blocked_on_n_conn;
2383 } else {
2384 or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
2385 queue = &orcirc->p_conn_cells;
2386 streams_blocked = circ->streams_blocked_on_p_conn;
2388 if (cell->command == CELL_RELAY_EARLY && orconn->link_proto < 2) {
2389 /* V1 connections don't understand RELAY_EARLY. */
2390 cell->command = CELL_RELAY;
2393 cell_queue_append_packed_copy(queue, cell);
2395 /* If we have too many cells on the circuit, we should stop reading from
2396 * the edge streams for a while. */
2397 if (!streams_blocked && queue->n >= CELL_QUEUE_HIGHWATER_SIZE)
2398 set_streams_blocked_on_circ(circ, orconn, 1, 0); /* block streams */
2400 if (streams_blocked && fromstream) {
2401 /* This edge connection is apparently not blocked; block it. */
2402 set_streams_blocked_on_circ(circ, orconn, 1, fromstream);
2405 if (queue->n == 1) {
2406 /* This was the first cell added to the queue. We need to make this
2407 * circuit active. */
2408 log_debug(LD_GENERAL, "Made a circuit active.");
2409 make_circuit_active_on_conn(circ, orconn);
2412 if (! buf_datalen(orconn->_base.outbuf)) {
2413 /* There is no data at all waiting to be sent on the outbuf. Add a
2414 * cell, so that we can notice when it gets flushed, flushed_some can
2415 * get called, and we can start putting more data onto the buffer then.
2417 log_debug(LD_GENERAL, "Primed a buffer.");
2418 connection_or_flush_from_first_active_circuit(orconn, 1, approx_time());
2422 /** Append an encoded value of <b>addr</b> to <b>payload_out</b>, which must
2423 * have at least 18 bytes of free space. The encoding is, as specified in
2424 * tor-spec.txt:
2425 * RESOLVED_TYPE_IPV4 or RESOLVED_TYPE_IPV6 [1 byte]
2426 * LENGTH [1 byte]
2427 * ADDRESS [length bytes]
2428 * Return the number of bytes added, or -1 on error */
2430 append_address_to_payload(char *payload_out, const tor_addr_t *addr)
2432 uint32_t a;
2433 switch (tor_addr_family(addr)) {
2434 case AF_INET:
2435 payload_out[0] = RESOLVED_TYPE_IPV4;
2436 payload_out[1] = 4;
2437 a = tor_addr_to_ipv4n(addr);
2438 memcpy(payload_out+2, &a, 4);
2439 return 6;
2440 case AF_INET6:
2441 payload_out[0] = RESOLVED_TYPE_IPV6;
2442 payload_out[1] = 16;
2443 memcpy(payload_out+2, tor_addr_to_in6_addr8(addr), 16);
2444 return 18;
2445 case AF_UNSPEC:
2446 default:
2447 return -1;
2451 /** Given <b>payload_len</b> bytes at <b>payload</b>, starting with an address
2452 * encoded as by append_address_to_payload(), try to decode the address into
2453 * *<b>addr_out</b>. Return the next byte in the payload after the address on
2454 * success, or NULL on failure. */
2455 const char *
2456 decode_address_from_payload(tor_addr_t *addr_out, const char *payload,
2457 int payload_len)
2459 if (payload_len < 2)
2460 return NULL;
2461 if (payload_len < 2+(uint8_t)payload[1])
2462 return NULL;
2464 switch (payload[0]) {
2465 case RESOLVED_TYPE_IPV4:
2466 if (payload[1] != 4)
2467 return NULL;
2468 tor_addr_from_ipv4n(addr_out, get_uint32(payload+2));
2469 break;
2470 case RESOLVED_TYPE_IPV6:
2471 if (payload[1] != 16)
2472 return NULL;
2473 tor_addr_from_ipv6_bytes(addr_out, payload+2);
2474 break;
2475 default:
2476 tor_addr_make_unspec(addr_out);
2477 break;
2479 return payload + 2 + (uint8_t)payload[1];
2482 /** Remove all the cells queued on <b>circ</b> for <b>orconn</b>. */
2483 void
2484 circuit_clear_cell_queue(circuit_t *circ, or_connection_t *orconn)
2486 cell_queue_t *queue;
2487 if (circ->n_conn == orconn) {
2488 queue = &circ->n_conn_cells;
2489 } else {
2490 or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
2491 tor_assert(orcirc->p_conn == orconn);
2492 queue = &orcirc->p_conn_cells;
2495 if (queue->n)
2496 make_circuit_inactive_on_conn(circ,orconn);
2498 cell_queue_clear(queue);
2501 /** Fail with an assert if the active circuits ring on <b>orconn</b> is
2502 * corrupt. */
2503 void
2504 assert_active_circuits_ok(or_connection_t *orconn)
2506 circuit_t *head = orconn->active_circuits;
2507 circuit_t *cur = head;
2508 int n = 0;
2509 if (! head)
2510 return;
2511 do {
2512 circuit_t *next = *next_circ_on_conn_p(cur, orconn);
2513 circuit_t *prev = *prev_circ_on_conn_p(cur, orconn);
2514 cell_ewma_t *ewma;
2515 tor_assert(next);
2516 tor_assert(prev);
2517 tor_assert(*next_circ_on_conn_p(prev, orconn) == cur);
2518 tor_assert(*prev_circ_on_conn_p(next, orconn) == cur);
2519 if (orconn == cur->n_conn) {
2520 ewma = &cur->n_cell_ewma;
2521 tor_assert(!ewma->is_for_p_conn);
2522 } else {
2523 ewma = &TO_OR_CIRCUIT(cur)->p_cell_ewma;
2524 tor_assert(ewma->is_for_p_conn);
2526 tor_assert(ewma->heap_index != -1);
2527 tor_assert(ewma == smartlist_get(orconn->active_circuit_pqueue,
2528 ewma->heap_index));
2529 n++;
2530 cur = next;
2531 } while (cur != head);
2533 tor_assert(n == smartlist_len(orconn->active_circuit_pqueue));
2536 /** Return 1 if we shouldn't restart reading on this circuit, even if
2537 * we get a SENDME. Else return 0.
2539 static int
2540 circuit_queue_streams_are_blocked(circuit_t *circ)
2542 if (CIRCUIT_IS_ORIGIN(circ)) {
2543 return circ->streams_blocked_on_n_conn;
2544 } else {
2545 return circ->streams_blocked_on_p_conn;