1 /* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
2 * Copyright (c) 2007-2017, The Tor Project, Inc. */
3 /* See LICENSE for licensing information */
7 * \brief Basic history and performance-tracking functionality.
9 * Basic history and performance-tracking functionality to remember
10 * which servers have worked in the past, how much bandwidth we've
11 * been using, which ports we tend to want, and so on; further,
12 * exit port statistics, cell statistics, and connection statistics.
14 * The history and information tracked in this module could sensibly be
15 * divided into several categories:
17 * <ul><li>Statistics used by authorities to remember the uptime and
18 * stability information about various relays, including "uptime",
19 * "weighted fractional uptime" and "mean time between failures".
21 * <li>Bandwidth usage history, used by relays to self-report how much
22 * bandwidth they've used for different purposes over last day or so,
23 * in order to generate the {dirreq-,}{read,write}-history lines in
26 * <li>Predicted ports, used by clients to remember how long it's been
27 * since they opened an exit connection to each given target
28 * port. Clients use this information in order to try to keep circuits
29 * open to exit nodes that can connect to the ports that they care
30 * about. (The predicted ports mechanism also handles predicted circuit
31 * usage that _isn't_ port-specific, such as resolves, internal circuits,
34 * <li>Public key operation counters, for tracking how many times we've
35 * done each public key operation. (This is unmaintained and we should
38 * <li>Exit statistics by port, used by exits to keep track of the
39 * number of streams and bytes they've served at each exit port, so they
40 * can generate their exit-kibibytes-{read,written} and
41 * exit-streams-opened statistics.
43 * <li>Circuit stats, used by relays instances to tract circuit
44 * queue fullness and delay over time, and generate cell-processed-cells,
45 * cell-queued-cells, cell-time-in-queue, and cell-circuits-per-decile
48 * <li>Descriptor serving statistics, used by directory caches to track
49 * how many descriptors they've served.
51 * <li>Connection statistics, used by relays to track one-way and
52 * bidirectional connections.
54 * <li>Onion handshake statistics, used by relays to count how many
55 * TAP and ntor handshakes they've handled.
57 * <li>Hidden service statistics, used by relays to count rendezvous
58 * traffic and HSDir-stored descriptors.
60 * <li>Link protocol statistics, used by relays to count how many times
61 * each link protocol has been used.
65 * The entry points for this module are scattered throughout the
66 * codebase. Sending data, receiving data, connecting to a relay,
67 * losing a connection to a relay, and so on can all trigger a change in
68 * our current stats. Relays also invoke this module in order to
69 * extract their statistics when building routerinfo and extrainfo
70 * objects in router.c.
72 * TODO: This module should be broken up.
74 * (The "rephist" name originally stood for "reputation and history". )
78 #include "circuitlist.h"
79 #include "circuituse.h"
81 #include "networkstatus.h"
85 #include "routerlist.h"
87 #include "channelpadding.h"
89 #include "connection_or.h"
91 static void bw_arrays_init(void);
92 static void predicted_ports_alloc(void);
94 /** Total number of bytes currently allocated in fields used by rephist.c. */
95 uint64_t rephist_total_alloc
=0;
96 /** Number of or_history_t objects currently allocated. */
97 uint32_t rephist_total_num
=0;
99 /** If the total weighted run count of all runs for a router ever falls
100 * below this amount, the router can be treated as having 0 MTBF. */
101 #define STABILITY_EPSILON 0.0001
102 /** Value by which to discount all old intervals for MTBF purposes. This
103 * is compounded every STABILITY_INTERVAL. */
104 #define STABILITY_ALPHA 0.95
105 /** Interval at which to discount all old intervals for MTBF purposes. */
106 #define STABILITY_INTERVAL (12*60*60)
107 /* (This combination of ALPHA, INTERVAL, and EPSILON makes it so that an
108 * interval that just ended counts twice as much as one that ended a week ago,
109 * 20X as much as one that ended a month ago, and routers that have had no
110 * uptime data for about half a year will get forgotten.) */
112 /** History of an OR. */
113 typedef struct or_history_t
{
114 /** When did we start tracking this OR? */
116 /** When did we most recently note a change to this OR? */
119 /** The address at which we most recently connected to this OR
121 tor_addr_t last_reached_addr
;
123 /** The port at which we most recently connected to this OR successfully */
124 uint16_t last_reached_port
;
126 /* === For MTBF tracking: */
127 /** Weighted sum total of all times that this router has been online.
129 unsigned long weighted_run_length
;
130 /** If the router is now online (according to stability-checking rules),
131 * when did it come online? */
133 /** Sum of weights for runs in weighted_run_length. */
134 double total_run_weights
;
135 /* === For fractional uptime tracking: */
136 time_t start_of_downtime
;
137 unsigned long weighted_uptime
;
138 unsigned long total_weighted_time
;
142 * This structure holds accounting needed to calculate the padding overhead.
144 typedef struct padding_counts_t
{
145 /** Total number of cells we have received, including padding */
146 uint64_t read_cell_count
;
147 /** Total number of cells we have sent, including padding */
148 uint64_t write_cell_count
;
149 /** Total number of CELL_PADDING cells we have received */
150 uint64_t read_pad_cell_count
;
151 /** Total number of CELL_PADDING cells we have sent */
152 uint64_t write_pad_cell_count
;
153 /** Total number of read cells on padding-enabled conns */
154 uint64_t enabled_read_cell_count
;
155 /** Total number of sent cells on padding-enabled conns */
156 uint64_t enabled_write_cell_count
;
157 /** Total number of read CELL_PADDING cells on padding-enabled cons */
158 uint64_t enabled_read_pad_cell_count
;
159 /** Total number of sent CELL_PADDING cells on padding-enabled cons */
160 uint64_t enabled_write_pad_cell_count
;
161 /** Total number of RELAY_DROP cells we have received */
162 uint64_t read_drop_cell_count
;
163 /** Total number of RELAY_DROP cells we have sent */
164 uint64_t write_drop_cell_count
;
165 /** The maximum number of padding timers we've seen in 24 hours */
166 uint64_t maximum_chanpad_timers
;
167 /** When did we first copy padding_current into padding_published? */
168 char first_published_at
[ISO_TIME_LEN
+1];
171 /** Holds the current values of our padding statistics.
172 * It is not published until it is transferred to padding_published. */
173 static padding_counts_t padding_current
;
175 /** Remains fixed for a 24 hour period, and then is replaced
176 * by a redacted copy of padding_current */
177 static padding_counts_t padding_published
;
179 /** When did we last multiply all routers' weighted_run_length and
180 * total_run_weights by STABILITY_ALPHA? */
181 static time_t stability_last_downrated
= 0;
184 static time_t started_tracking_stability
= 0;
186 /** Map from hex OR identity digest to or_history_t. */
187 static digestmap_t
*history_map
= NULL
;
189 /** Return the or_history_t for the OR with identity digest <b>id</b>,
190 * creating it if necessary. */
191 static or_history_t
*
192 get_or_history(const char* id
)
196 if (tor_digest_is_zero(id
))
199 hist
= digestmap_get(history_map
, id
);
201 hist
= tor_malloc_zero(sizeof(or_history_t
));
202 rephist_total_alloc
+= sizeof(or_history_t
);
204 hist
->since
= hist
->changed
= time(NULL
);
205 tor_addr_make_unspec(&hist
->last_reached_addr
);
206 digestmap_set(history_map
, id
, hist
);
211 /** Helper: free storage held by a single OR history entry. */
213 free_or_history(void *_hist
)
215 or_history_t
*hist
= _hist
;
216 rephist_total_alloc
-= sizeof(or_history_t
);
221 /** Initialize the static data structures for tracking history. */
225 history_map
= digestmap_new();
227 predicted_ports_alloc();
230 /** We have just decided that this router with identity digest <b>id</b> is
231 * reachable, meaning we will give it a "Running" flag for the next while. */
233 rep_hist_note_router_reachable(const char *id
, const tor_addr_t
*at_addr
,
234 const uint16_t at_port
, time_t when
)
236 or_history_t
*hist
= get_or_history(id
);
238 char tbuf
[ISO_TIME_LEN
+1];
239 int addr_changed
, port_changed
;
242 tor_assert((!at_addr
&& !at_port
) || (at_addr
&& at_port
));
244 addr_changed
= at_addr
&& !tor_addr_is_null(&hist
->last_reached_addr
) &&
245 tor_addr_compare(at_addr
, &hist
->last_reached_addr
, CMP_EXACT
) != 0;
246 port_changed
= at_port
&& hist
->last_reached_port
&&
247 at_port
!= hist
->last_reached_port
;
249 if (!started_tracking_stability
)
250 started_tracking_stability
= time(NULL
);
251 if (!hist
->start_of_run
) {
252 hist
->start_of_run
= when
;
255 if (hist
->start_of_downtime
) {
258 format_local_iso_time(tbuf
, hist
->start_of_downtime
);
259 log_info(LD_HIST
, "Router %s is now Running; it had been down since %s.",
260 hex_str(id
, DIGEST_LEN
), tbuf
);
262 log_info(LD_HIST
, " (Paradoxically, it was already Running too.)");
264 down_length
= when
- hist
->start_of_downtime
;
265 hist
->total_weighted_time
+= down_length
;
266 hist
->start_of_downtime
= 0;
267 } else if (addr_changed
|| port_changed
) {
268 /* If we're reachable, but the address changed, treat this as some
270 int penalty
= get_options()->TestingTorNetwork
? 240 : 3600;
273 if ((ns
= networkstatus_get_latest_consensus())) {
274 int fresh_interval
= (int)(ns
->fresh_until
- ns
->valid_after
);
275 int live_interval
= (int)(ns
->valid_until
- ns
->valid_after
);
276 /* on average, a descriptor addr change takes .5 intervals to make it
277 * into a consensus, and half a liveness period to make it to
279 penalty
= (int)(fresh_interval
+ live_interval
) / 2;
281 format_local_iso_time(tbuf
, hist
->start_of_run
);
282 log_info(LD_HIST
,"Router %s still seems Running, but its address appears "
283 "to have changed since the last time it was reachable. I'm "
284 "going to treat it as having been down for %d seconds",
285 hex_str(id
, DIGEST_LEN
), penalty
);
286 rep_hist_note_router_unreachable(id
, when
-penalty
);
287 rep_hist_note_router_reachable(id
, NULL
, 0, when
);
289 format_local_iso_time(tbuf
, hist
->start_of_run
);
291 log_debug(LD_HIST
, "Router %s is still Running; it has been Running "
292 "since %s", hex_str(id
, DIGEST_LEN
), tbuf
);
294 log_info(LD_HIST
,"Router %s is now Running; it was previously untracked",
295 hex_str(id
, DIGEST_LEN
));
298 tor_addr_copy(&hist
->last_reached_addr
, at_addr
);
300 hist
->last_reached_port
= at_port
;
303 /** We have just decided that this router is unreachable, meaning
304 * we are taking away its "Running" flag. */
306 rep_hist_note_router_unreachable(const char *id
, time_t when
)
308 or_history_t
*hist
= get_or_history(id
);
309 char tbuf
[ISO_TIME_LEN
+1];
311 if (!started_tracking_stability
)
312 started_tracking_stability
= time(NULL
);
315 if (hist
->start_of_run
) {
316 /*XXXX We could treat failed connections differently from failed
317 * connect attempts. */
318 long run_length
= when
- hist
->start_of_run
;
319 format_local_iso_time(tbuf
, hist
->start_of_run
);
321 hist
->total_run_weights
+= 1.0;
322 hist
->start_of_run
= 0;
323 if (run_length
< 0) {
324 unsigned long penalty
= -run_length
;
325 #define SUBTRACT_CLAMPED(var, penalty) \
326 do { (var) = (var) < (penalty) ? 0 : (var) - (penalty); } while (0)
328 SUBTRACT_CLAMPED(hist
->weighted_run_length
, penalty
);
329 SUBTRACT_CLAMPED(hist
->weighted_uptime
, penalty
);
331 hist
->weighted_run_length
+= run_length
;
332 hist
->weighted_uptime
+= run_length
;
333 hist
->total_weighted_time
+= run_length
;
336 log_info(LD_HIST
, "Router %s is now non-Running: it had previously been "
337 "Running since %s. Its total weighted uptime is %lu/%lu.",
338 hex_str(id
, DIGEST_LEN
), tbuf
, hist
->weighted_uptime
,
339 hist
->total_weighted_time
);
341 if (!hist
->start_of_downtime
) {
342 hist
->start_of_downtime
= when
;
345 log_info(LD_HIST
, "Router %s is now non-Running; it was previously "
346 "untracked.", hex_str(id
, DIGEST_LEN
));
349 format_local_iso_time(tbuf
, hist
->start_of_downtime
);
351 log_info(LD_HIST
, "Router %s is still non-Running; it has been "
352 "non-Running since %s.", hex_str(id
, DIGEST_LEN
), tbuf
);
357 /** Mark a router with ID <b>id</b> as non-Running, and retroactively declare
358 * that it has never been running: give it no stability and no WFU. */
360 rep_hist_make_router_pessimal(const char *id
, time_t when
)
362 or_history_t
*hist
= get_or_history(id
);
365 rep_hist_note_router_unreachable(id
, when
);
367 hist
->weighted_run_length
= 0;
368 hist
->weighted_uptime
= 0;
371 /** Helper: Discount all old MTBF data, if it is time to do so. Return
372 * the time at which we should next discount MTBF data. */
374 rep_hist_downrate_old_runs(time_t now
)
376 digestmap_iter_t
*orhist_it
;
383 history_map
= digestmap_new();
384 if (!stability_last_downrated
)
385 stability_last_downrated
= now
;
386 if (stability_last_downrated
+ STABILITY_INTERVAL
> now
)
387 return stability_last_downrated
+ STABILITY_INTERVAL
;
389 /* Okay, we should downrate the data. By how much? */
390 while (stability_last_downrated
+ STABILITY_INTERVAL
< now
) {
391 stability_last_downrated
+= STABILITY_INTERVAL
;
392 alpha
*= STABILITY_ALPHA
;
395 log_info(LD_HIST
, "Discounting all old stability info by a factor of %f",
398 /* Multiply every w_r_l, t_r_w pair by alpha. */
399 for (orhist_it
= digestmap_iter_init(history_map
);
400 !digestmap_iter_done(orhist_it
);
401 orhist_it
= digestmap_iter_next(history_map
,orhist_it
)) {
402 digestmap_iter_get(orhist_it
, &digest1
, &hist_p
);
405 hist
->weighted_run_length
=
406 (unsigned long)(hist
->weighted_run_length
* alpha
);
407 hist
->total_run_weights
*= alpha
;
409 hist
->weighted_uptime
= (unsigned long)(hist
->weighted_uptime
* alpha
);
410 hist
->total_weighted_time
= (unsigned long)
411 (hist
->total_weighted_time
* alpha
);
414 return stability_last_downrated
+ STABILITY_INTERVAL
;
417 /** Helper: Return the weighted MTBF of the router with history <b>hist</b>. */
419 get_stability(or_history_t
*hist
, time_t when
)
421 long total
= hist
->weighted_run_length
;
422 double total_weights
= hist
->total_run_weights
;
424 if (hist
->start_of_run
) {
425 /* We're currently in a run. Let total and total_weights hold the values
426 * they would hold if the current run were to end now. */
427 total
+= (when
-hist
->start_of_run
);
428 total_weights
+= 1.0;
430 if (total_weights
< STABILITY_EPSILON
) {
431 /* Round down to zero, and avoid divide-by-zero. */
435 return total
/ total_weights
;
438 /** Return the total amount of time we've been observing, with each run of
439 * time downrated by the appropriate factor. */
441 get_total_weighted_time(or_history_t
*hist
, time_t when
)
443 long total
= hist
->total_weighted_time
;
444 if (hist
->start_of_run
) {
445 total
+= (when
- hist
->start_of_run
);
446 } else if (hist
->start_of_downtime
) {
447 total
+= (when
- hist
->start_of_downtime
);
452 /** Helper: Return the weighted percent-of-time-online of the router with
453 * history <b>hist</b>. */
455 get_weighted_fractional_uptime(or_history_t
*hist
, time_t when
)
457 long total
= hist
->total_weighted_time
;
458 long up
= hist
->weighted_uptime
;
460 if (hist
->start_of_run
) {
461 long run_length
= (when
- hist
->start_of_run
);
464 } else if (hist
->start_of_downtime
) {
465 total
+= (when
- hist
->start_of_downtime
);
469 /* Avoid calling anybody's uptime infinity (which should be impossible if
470 * the code is working), or NaN (which can happen for any router we haven't
471 * observed up or down yet). */
475 return ((double) up
) / total
;
478 /** Return how long the router whose identity digest is <b>id</b> has
479 * been reachable. Return 0 if the router is unknown or currently deemed
482 rep_hist_get_uptime(const char *id
, time_t when
)
484 or_history_t
*hist
= get_or_history(id
);
487 if (!hist
->start_of_run
|| when
< hist
->start_of_run
)
489 return when
- hist
->start_of_run
;
492 /** Return an estimated MTBF for the router whose identity digest is
493 * <b>id</b>. Return 0 if the router is unknown. */
495 rep_hist_get_stability(const char *id
, time_t when
)
497 or_history_t
*hist
= get_or_history(id
);
501 return get_stability(hist
, when
);
504 /** Return an estimated percent-of-time-online for the router whose identity
505 * digest is <b>id</b>. Return 0 if the router is unknown. */
507 rep_hist_get_weighted_fractional_uptime(const char *id
, time_t when
)
509 or_history_t
*hist
= get_or_history(id
);
513 return get_weighted_fractional_uptime(hist
, when
);
516 /** Return a number representing how long we've known about the router whose
517 * digest is <b>id</b>. Return 0 if the router is unknown.
519 * Be careful: this measure increases monotonically as we know the router for
520 * longer and longer, but it doesn't increase linearly.
523 rep_hist_get_weighted_time_known(const char *id
, time_t when
)
525 or_history_t
*hist
= get_or_history(id
);
529 return get_total_weighted_time(hist
, when
);
532 /** Return true if we've been measuring MTBFs for long enough to
533 * pronounce on Stability. */
535 rep_hist_have_measured_enough_stability(void)
537 /* XXXX++ This doesn't do so well when we change our opinion
538 * as to whether we're tracking router stability. */
539 return started_tracking_stability
< time(NULL
) - 4*60*60;
542 /** Log all the reliability data we have remembered, with the chosen
546 rep_hist_dump_stats(time_t now
, int severity
)
548 digestmap_iter_t
*orhist_it
;
549 const char *name1
, *digest1
;
550 char hexdigest1
[HEX_DIGEST_LEN
+1];
551 or_history_t
*or_history
;
555 rep_history_clean(now
- get_options()->RephistTrackTime
);
557 tor_log(severity
, LD_HIST
, "--------------- Dumping history information:");
559 for (orhist_it
= digestmap_iter_init(history_map
);
560 !digestmap_iter_done(orhist_it
);
561 orhist_it
= digestmap_iter_next(history_map
,orhist_it
)) {
564 digestmap_iter_get(orhist_it
, &digest1
, &or_history_p
);
565 or_history
= (or_history_t
*) or_history_p
;
567 if ((node
= node_get_by_id(digest1
)) && node_get_nickname(node
))
568 name1
= node_get_nickname(node
);
571 base16_encode(hexdigest1
, sizeof(hexdigest1
), digest1
, DIGEST_LEN
);
572 s
= get_stability(or_history
, now
);
574 tor_log(severity
, LD_HIST
,
575 "OR %s [%s]: wmtbf %lu:%02lu:%02lu",
577 stability
/3600, (stability
/60)%60, stability
%60);
581 /** Remove history info for routers/links that haven't changed since
585 rep_history_clean(time_t before
)
587 int authority
= authdir_mode(get_options());
588 or_history_t
*or_history
;
590 digestmap_iter_t
*orhist_it
;
593 orhist_it
= digestmap_iter_init(history_map
);
594 while (!digestmap_iter_done(orhist_it
)) {
596 digestmap_iter_get(orhist_it
, &d1
, &or_history_p
);
597 or_history
= or_history_p
;
599 should_remove
= authority
?
600 (or_history
->total_run_weights
< STABILITY_EPSILON
&&
601 !or_history
->start_of_run
)
602 : (or_history
->changed
< before
);
604 orhist_it
= digestmap_iter_next_rmv(history_map
, orhist_it
);
605 free_or_history(or_history
);
608 orhist_it
= digestmap_iter_next(history_map
, orhist_it
);
612 /** Write MTBF data to disk. Return 0 on success, negative on failure.
614 * If <b>missing_means_down</b>, then if we're about to write an entry
615 * that is still considered up but isn't in our routerlist, consider it
618 rep_hist_record_mtbf_data(time_t now
, int missing_means_down
)
620 char time_buf
[ISO_TIME_LEN
+1];
622 digestmap_iter_t
*orhist_it
;
626 open_file_t
*open_file
= NULL
;
630 char *filename
= get_datadir_fname("router-stability");
631 f
= start_writing_to_stdio_file(filename
, OPEN_FLAGS_REPLACE
|O_TEXT
, 0600,
639 * FormatLine *KeywordLine Data
641 * FormatLine = "format 1" NL
642 * KeywordLine = Keyword SP Arguments NL
643 * Data = "data" NL *RouterMTBFLine "." NL
644 * RouterMTBFLine = Fingerprint SP WeightedRunLen SP
645 * TotalRunWeights [SP S=StartRunTime] NL
647 #define PUT(s) STMT_BEGIN if (fputs((s),f)<0) goto err; STMT_END
648 #define PRINTF(args) STMT_BEGIN if (fprintf args <0) goto err; STMT_END
652 format_iso_time(time_buf
, time(NULL
));
653 PRINTF((f
, "stored-at %s\n", time_buf
));
655 if (started_tracking_stability
) {
656 format_iso_time(time_buf
, started_tracking_stability
);
657 PRINTF((f
, "tracked-since %s\n", time_buf
));
659 if (stability_last_downrated
) {
660 format_iso_time(time_buf
, stability_last_downrated
);
661 PRINTF((f
, "last-downrated %s\n", time_buf
));
666 /* XXX Nick: now bridge auths record this for all routers too.
667 * Should we make them record it only for bridge routers? -RD
668 * Not for 0.2.0. -NM */
669 for (orhist_it
= digestmap_iter_init(history_map
);
670 !digestmap_iter_done(orhist_it
);
671 orhist_it
= digestmap_iter_next(history_map
,orhist_it
)) {
672 char dbuf
[HEX_DIGEST_LEN
+1];
673 const char *t
= NULL
;
674 digestmap_iter_get(orhist_it
, &digest
, &or_history_p
);
675 hist
= (or_history_t
*) or_history_p
;
677 base16_encode(dbuf
, sizeof(dbuf
), digest
, DIGEST_LEN
);
679 if (missing_means_down
&& hist
->start_of_run
&&
680 !connection_or_digest_is_known_relay(digest
)) {
681 /* We think this relay is running, but it's not listed in our
682 * consensus. Somehow it fell out without telling us it went
683 * down. Complain and also correct it. */
685 "Relay '%s' is listed as up in rephist, but it's not in "
686 "our routerlist. Correcting.", dbuf
);
687 rep_hist_note_router_unreachable(digest
, now
);
690 PRINTF((f
, "R %s\n", dbuf
));
691 if (hist
->start_of_run
> 0) {
692 format_iso_time(time_buf
, hist
->start_of_run
);
695 PRINTF((f
, "+MTBF %lu %.5f%s%s\n",
696 hist
->weighted_run_length
, hist
->total_run_weights
,
697 t
? " S=" : "", t
? t
: ""));
699 if (hist
->start_of_downtime
> 0) {
700 format_iso_time(time_buf
, hist
->start_of_downtime
);
703 PRINTF((f
, "+WFU %lu %lu%s%s\n",
704 hist
->weighted_uptime
, hist
->total_weighted_time
,
705 t
? " S=" : "", t
? t
: ""));
713 return finish_writing_to_file(open_file
);
715 abort_writing_to_file(open_file
);
719 /** Helper: return the first j >= i such that !strcmpstart(sl[j], prefix) and
720 * such that no line sl[k] with i <= k < j starts with "R ". Return -1 if no
721 * such line exists. */
723 find_next_with(smartlist_t
*sl
, int i
, const char *prefix
)
725 for ( ; i
< smartlist_len(sl
); ++i
) {
726 const char *line
= smartlist_get(sl
, i
);
727 if (!strcmpstart(line
, prefix
))
729 if (!strcmpstart(line
, "R "))
735 /** How many bad times has parse_possibly_bad_iso_time() parsed? */
736 static int n_bogus_times
= 0;
737 /** Parse the ISO-formatted time in <b>s</b> into *<b>time_out</b>, but
738 * round any pre-1970 date to Jan 1, 1970. */
740 parse_possibly_bad_iso_time(const char *s
, time_t *time_out
)
744 strlcpy(b
, s
, sizeof(b
));
746 year
= (int)tor_parse_long(b
, 10, 0, INT_MAX
, NULL
, NULL
);
752 return parse_iso_time(s
, time_out
);
755 /** We've read a time <b>t</b> from a file stored at <b>stored_at</b>, which
756 * says we started measuring at <b>started_measuring</b>. Return a new number
757 * that's about as much before <b>now</b> as <b>t</b> was before
761 correct_time(time_t t
, time_t now
, time_t stored_at
, time_t started_measuring
)
763 if (t
< started_measuring
- 24*60*60*365)
765 else if (t
< started_measuring
)
766 return started_measuring
;
767 else if (t
> stored_at
)
770 long run_length
= stored_at
- t
;
771 t
= (time_t)(now
- run_length
);
772 if (t
< started_measuring
)
773 t
= started_measuring
;
778 /** Load MTBF data from disk. Returns 0 on success or recoverable error, -1
781 rep_hist_load_mtbf_data(time_t now
)
783 /* XXXX won't handle being called while history is already populated. */
785 const char *line
= NULL
;
787 time_t last_downrated
= 0, stored_at
= 0, tracked_since
= 0;
788 time_t latest_possible_start
= now
;
792 char *filename
= get_datadir_fname("router-stability");
793 char *d
= read_file_to_str(filename
, RFTS_IGNORE_MISSING
, NULL
);
797 lines
= smartlist_new();
798 smartlist_split_string(lines
, d
, "\n", SPLIT_SKIP_SPACE
, 0);
803 const char *firstline
;
804 if (smartlist_len(lines
)>4) {
805 firstline
= smartlist_get(lines
, 0);
806 if (!strcmpstart(firstline
, "format "))
807 format
= tor_parse_long(firstline
+strlen("format "),
808 10, -1, LONG_MAX
, NULL
, NULL
);
811 if (format
!= 1 && format
!= 2) {
813 "Unrecognized format in mtbf history file. Skipping.");
816 for (i
= 1; i
< smartlist_len(lines
); ++i
) {
817 line
= smartlist_get(lines
, i
);
818 if (!strcmp(line
, "data"))
820 if (!strcmpstart(line
, "last-downrated ")) {
821 if (parse_iso_time(line
+strlen("last-downrated "), &last_downrated
)<0)
822 log_warn(LD_HIST
,"Couldn't parse downrate time in mtbf "
825 if (!strcmpstart(line
, "stored-at ")) {
826 if (parse_iso_time(line
+strlen("stored-at "), &stored_at
)<0)
827 log_warn(LD_HIST
,"Couldn't parse stored time in mtbf "
830 if (!strcmpstart(line
, "tracked-since ")) {
831 if (parse_iso_time(line
+strlen("tracked-since "), &tracked_since
)<0)
832 log_warn(LD_HIST
,"Couldn't parse started-tracking time in mtbf "
836 if (last_downrated
> now
)
837 last_downrated
= now
;
838 if (tracked_since
> now
)
842 log_warn(LD_HIST
, "No stored time recorded.");
846 if (line
&& !strcmp(line
, "data"))
851 for (; i
< smartlist_len(lines
); ++i
) {
852 char digest
[DIGEST_LEN
];
853 char hexbuf
[HEX_DIGEST_LEN
+1];
854 char mtbf_timebuf
[ISO_TIME_LEN
+1];
855 char wfu_timebuf
[ISO_TIME_LEN
+1];
856 time_t start_of_run
= 0;
857 time_t start_of_downtime
= 0;
858 int have_mtbf
= 0, have_wfu
= 0;
861 long wt_uptime
= 0, total_wt_time
= 0;
864 line
= smartlist_get(lines
, i
);
865 if (!strcmp(line
, "."))
868 mtbf_timebuf
[0] = '\0';
869 wfu_timebuf
[0] = '\0';
872 n
= tor_sscanf(line
, "%40s %ld %lf S=%10s %8s",
873 hexbuf
, &wrl
, &trw
, mtbf_timebuf
, mtbf_timebuf
+11);
874 if (n
!= 3 && n
!= 5) {
875 log_warn(LD_HIST
, "Couldn't scan line %s", escaped(line
));
881 int mtbf_idx
, wfu_idx
;
882 if (strcmpstart(line
, "R ") || strlen(line
) < 2+HEX_DIGEST_LEN
)
884 strlcpy(hexbuf
, line
+2, sizeof(hexbuf
));
885 mtbf_idx
= find_next_with(lines
, i
+1, "+MTBF ");
886 wfu_idx
= find_next_with(lines
, i
+1, "+WFU ");
888 const char *mtbfline
= smartlist_get(lines
, mtbf_idx
);
889 n
= tor_sscanf(mtbfline
, "+MTBF %lu %lf S=%10s %8s",
890 &wrl
, &trw
, mtbf_timebuf
, mtbf_timebuf
+11);
891 if (n
== 2 || n
== 4) {
894 log_warn(LD_HIST
, "Couldn't scan +MTBF line %s",
899 const char *wfuline
= smartlist_get(lines
, wfu_idx
);
900 n
= tor_sscanf(wfuline
, "+WFU %lu %lu S=%10s %8s",
901 &wt_uptime
, &total_wt_time
,
902 wfu_timebuf
, wfu_timebuf
+11);
903 if (n
== 2 || n
== 4) {
906 log_warn(LD_HIST
, "Couldn't scan +WFU line %s", escaped(wfuline
));
914 if (base16_decode(digest
, DIGEST_LEN
,
915 hexbuf
, HEX_DIGEST_LEN
) != DIGEST_LEN
) {
916 log_warn(LD_HIST
, "Couldn't hex string %s", escaped(hexbuf
));
919 hist
= get_or_history(digest
);
924 if (mtbf_timebuf
[0]) {
925 mtbf_timebuf
[10] = ' ';
926 if (parse_possibly_bad_iso_time(mtbf_timebuf
, &start_of_run
)<0)
927 log_warn(LD_HIST
, "Couldn't parse time %s",
928 escaped(mtbf_timebuf
));
930 hist
->start_of_run
= correct_time(start_of_run
, now
, stored_at
,
932 if (hist
->start_of_run
< latest_possible_start
+ wrl
)
933 latest_possible_start
= (time_t)(hist
->start_of_run
- wrl
);
935 hist
->weighted_run_length
= wrl
;
936 hist
->total_run_weights
= trw
;
939 if (wfu_timebuf
[0]) {
940 wfu_timebuf
[10] = ' ';
941 if (parse_possibly_bad_iso_time(wfu_timebuf
, &start_of_downtime
)<0)
942 log_warn(LD_HIST
, "Couldn't parse time %s", escaped(wfu_timebuf
));
945 hist
->start_of_downtime
= correct_time(start_of_downtime
, now
, stored_at
,
947 hist
->weighted_uptime
= wt_uptime
;
948 hist
->total_weighted_time
= total_wt_time
;
950 if (strcmp(line
, "."))
951 log_warn(LD_HIST
, "Truncated MTBF file.");
953 if (tracked_since
< 86400*365) /* Recover from insanely early value. */
954 tracked_since
= latest_possible_start
;
956 stability_last_downrated
= last_downrated
;
957 started_tracking_stability
= tracked_since
;
963 SMARTLIST_FOREACH(lines
, char *, cp
, tor_free(cp
));
964 smartlist_free(lines
);
968 /** For how many seconds do we keep track of individual per-second bandwidth
970 #define NUM_SECS_ROLLING_MEASURE 10
971 /** How large are the intervals for which we track and report bandwidth use? */
972 #define NUM_SECS_BW_SUM_INTERVAL (24*60*60)
973 /** How far in the past do we remember and publish bandwidth use? */
974 #define NUM_SECS_BW_SUM_IS_VALID (5*24*60*60)
975 /** How many bandwidth usage intervals do we remember? (derived) */
976 #define NUM_TOTALS (NUM_SECS_BW_SUM_IS_VALID/NUM_SECS_BW_SUM_INTERVAL)
978 /** Structure to track bandwidth use, and remember the maxima for a given
981 typedef struct bw_array_t
{
982 /** Observation array: Total number of bytes transferred in each of the last
983 * NUM_SECS_ROLLING_MEASURE seconds. This is used as a circular array. */
984 uint64_t obs
[NUM_SECS_ROLLING_MEASURE
];
985 int cur_obs_idx
; /**< Current position in obs. */
986 time_t cur_obs_time
; /**< Time represented in obs[cur_obs_idx] */
987 uint64_t total_obs
; /**< Total for all members of obs except
988 * obs[cur_obs_idx] */
989 uint64_t max_total
; /**< Largest value that total_obs has taken on in the
991 uint64_t total_in_period
; /**< Total bytes transferred in the current
994 /** When does the next period begin? */
996 /** Where in 'maxima' should the maximum bandwidth usage for the current
997 * period be stored? */
999 /** How many values in maxima/totals have been set ever? */
1001 /** Circular array of the maximum
1002 * bandwidth-per-NUM_SECS_ROLLING_MEASURE usage for the last
1003 * NUM_TOTALS periods */
1004 uint64_t maxima
[NUM_TOTALS
];
1005 /** Circular array of the total bandwidth usage for the last NUM_TOTALS
1007 uint64_t totals
[NUM_TOTALS
];
1010 /** Shift the current period of b forward by one. */
1012 commit_max(bw_array_t
*b
)
1014 /* Store total from current period. */
1015 b
->totals
[b
->next_max_idx
] = b
->total_in_period
;
1016 /* Store maximum from current period. */
1017 b
->maxima
[b
->next_max_idx
++] = b
->max_total
;
1018 /* Advance next_period and next_max_idx */
1019 b
->next_period
+= NUM_SECS_BW_SUM_INTERVAL
;
1020 if (b
->next_max_idx
== NUM_TOTALS
)
1021 b
->next_max_idx
= 0;
1022 if (b
->num_maxes_set
< NUM_TOTALS
)
1024 /* Reset max_total. */
1026 /* Reset total_in_period. */
1027 b
->total_in_period
= 0;
1030 /** Shift the current observation time of <b>b</b> forward by one second. */
1032 advance_obs(bw_array_t
*b
)
1037 /* Calculate the total bandwidth for the last NUM_SECS_ROLLING_MEASURE
1038 * seconds; adjust max_total as needed.*/
1039 total
= b
->total_obs
+ b
->obs
[b
->cur_obs_idx
];
1040 if (total
> b
->max_total
)
1041 b
->max_total
= total
;
1043 nextidx
= b
->cur_obs_idx
+1;
1044 if (nextidx
== NUM_SECS_ROLLING_MEASURE
)
1047 b
->total_obs
= total
- b
->obs
[nextidx
];
1049 b
->cur_obs_idx
= nextidx
;
1051 if (++b
->cur_obs_time
>= b
->next_period
)
1055 /** Add <b>n</b> bytes to the number of bytes in <b>b</b> for second
1058 add_obs(bw_array_t
*b
, time_t when
, uint64_t n
)
1060 if (when
< b
->cur_obs_time
)
1061 return; /* Don't record data in the past. */
1063 /* If we're currently adding observations for an earlier second than
1064 * 'when', advance b->cur_obs_time and b->cur_obs_idx by an
1065 * appropriate number of seconds, and do all the other housekeeping. */
1066 while (when
> b
->cur_obs_time
) {
1067 /* Doing this one second at a time is potentially inefficient, if we start
1068 with a state file that is very old. Fortunately, it doesn't seem to
1069 show up in profiles, so we can just ignore it for now. */
1073 b
->obs
[b
->cur_obs_idx
] += n
;
1074 b
->total_in_period
+= n
;
1077 /** Allocate, initialize, and return a new bw_array. */
1083 b
= tor_malloc_zero(sizeof(bw_array_t
));
1084 rephist_total_alloc
+= sizeof(bw_array_t
);
1086 b
->cur_obs_time
= start
;
1087 b
->next_period
= start
+ NUM_SECS_BW_SUM_INTERVAL
;
1091 #define bw_array_free(val) \
1092 FREE_AND_NULL(bw_array_t, bw_array_free_, (val))
1094 /** Free storage held by bandwidth array <b>b</b>. */
1096 bw_array_free_(bw_array_t
*b
)
1102 rephist_total_alloc
-= sizeof(bw_array_t
);
1106 /** Recent history of bandwidth observations for read operations. */
1107 static bw_array_t
*read_array
= NULL
;
1108 /** Recent history of bandwidth observations for write operations. */
1109 static bw_array_t
*write_array
= NULL
;
1110 /** Recent history of bandwidth observations for read operations for the
1111 directory protocol. */
1112 static bw_array_t
*dir_read_array
= NULL
;
1113 /** Recent history of bandwidth observations for write operations for the
1114 directory protocol. */
1115 static bw_array_t
*dir_write_array
= NULL
;
1117 /** Set up [dir-]read_array and [dir-]write_array, freeing them if they
1120 bw_arrays_init(void)
1122 bw_array_free(read_array
);
1123 bw_array_free(write_array
);
1124 bw_array_free(dir_read_array
);
1125 bw_array_free(dir_write_array
);
1127 read_array
= bw_array_new();
1128 write_array
= bw_array_new();
1129 dir_read_array
= bw_array_new();
1130 dir_write_array
= bw_array_new();
1133 /** Remember that we read <b>num_bytes</b> bytes in second <b>when</b>.
1135 * Add num_bytes to the current running total for <b>when</b>.
1137 * <b>when</b> can go back to time, but it's safe to ignore calls
1138 * earlier than the latest <b>when</b> you've heard of.
1141 rep_hist_note_bytes_written(size_t num_bytes
, time_t when
)
1143 /* Maybe a circular array for recent seconds, and step to a new point
1144 * every time a new second shows up. Or simpler is to just to have
1145 * a normal array and push down each item every second; it's short.
1147 /* When a new second has rolled over, compute the sum of the bytes we've
1148 * seen over when-1 to when-1-NUM_SECS_ROLLING_MEASURE, and stick it
1149 * somewhere. See rep_hist_bandwidth_assess() below.
1151 add_obs(write_array
, when
, num_bytes
);
1154 /** Remember that we wrote <b>num_bytes</b> bytes in second <b>when</b>.
1155 * (like rep_hist_note_bytes_written() above)
1158 rep_hist_note_bytes_read(size_t num_bytes
, time_t when
)
1160 /* if we're smart, we can make this func and the one above share code */
1161 add_obs(read_array
, when
, num_bytes
);
1164 /** Remember that we wrote <b>num_bytes</b> directory bytes in second
1165 * <b>when</b>. (like rep_hist_note_bytes_written() above)
1168 rep_hist_note_dir_bytes_written(size_t num_bytes
, time_t when
)
1170 add_obs(dir_write_array
, when
, num_bytes
);
1173 /** Remember that we read <b>num_bytes</b> directory bytes in second
1174 * <b>when</b>. (like rep_hist_note_bytes_written() above)
1177 rep_hist_note_dir_bytes_read(size_t num_bytes
, time_t when
)
1179 add_obs(dir_read_array
, when
, num_bytes
);
1182 /** Helper: Return the largest value in b->maxima. (This is equal to the
1183 * most bandwidth used in any NUM_SECS_ROLLING_MEASURE period for the last
1184 * NUM_SECS_BW_SUM_IS_VALID seconds.)
1187 find_largest_max(bw_array_t
*b
)
1192 for (i
=0; i
<NUM_TOTALS
; ++i
) {
1193 if (b
->maxima
[i
]>max
)
1199 /** Find the largest sums in the past NUM_SECS_BW_SUM_IS_VALID (roughly)
1200 * seconds. Find one sum for reading and one for writing. They don't have
1201 * to be at the same time.
1203 * Return the smaller of these sums, divided by NUM_SECS_ROLLING_MEASURE.
1206 rep_hist_bandwidth_assess(void)
1209 r
= find_largest_max(read_array
);
1210 w
= find_largest_max(write_array
);
1212 return (int)(U64_TO_DBL(w
)/NUM_SECS_ROLLING_MEASURE
);
1214 return (int)(U64_TO_DBL(r
)/NUM_SECS_ROLLING_MEASURE
);
1217 /** Print the bandwidth history of b (either [dir-]read_array or
1218 * [dir-]write_array) into the buffer pointed to by buf. The format is
1219 * simply comma separated numbers, from oldest to newest.
1221 * It returns the number of bytes written.
1224 rep_hist_fill_bandwidth_history(char *buf
, size_t len
, const bw_array_t
*b
)
1228 const or_options_t
*options
= get_options();
1231 if (b
->num_maxes_set
<= b
->next_max_idx
) {
1232 /* We haven't been through the circular array yet; time starts at i=0.*/
1235 /* We've been around the array at least once. The next i to be
1236 overwritten is the oldest. */
1237 i
= b
->next_max_idx
;
1240 if (options
->RelayBandwidthRate
) {
1241 /* We don't want to report that we used more bandwidth than the max we're
1242 * willing to relay; otherwise everybody will know how much traffic
1243 * we used ourself. */
1244 cutoff
= options
->RelayBandwidthRate
* NUM_SECS_BW_SUM_INTERVAL
;
1246 cutoff
= UINT64_MAX
;
1249 for (n
=0; n
<b
->num_maxes_set
; ++n
,++i
) {
1251 if (i
>= NUM_TOTALS
)
1253 tor_assert(i
< NUM_TOTALS
);
1254 /* Round the bandwidth used down to the nearest 1k. */
1255 total
= b
->totals
[i
] & ~0x3ff;
1259 if (n
==(b
->num_maxes_set
-1))
1260 tor_snprintf(cp
, len
-(cp
-buf
), U64_FORMAT
, U64_PRINTF_ARG(total
));
1262 tor_snprintf(cp
, len
-(cp
-buf
), U64_FORMAT
",", U64_PRINTF_ARG(total
));
1268 /** Allocate and return lines for representing this server's bandwidth
1269 * history in its descriptor. We publish these lines in our extra-info
1273 rep_hist_get_bandwidth_lines(void)
1276 char t
[ISO_TIME_LEN
+1];
1278 bw_array_t
*b
= NULL
;
1279 const char *desc
= NULL
;
1282 /* [dirreq-](read|write)-history yyyy-mm-dd HH:MM:SS (n s) n,n,n... */
1283 /* The n,n,n part above. Largest representation of a uint64_t is 20 chars
1284 * long, plus the comma. */
1285 #define MAX_HIST_VALUE_LEN (21*NUM_TOTALS)
1286 len
= (67+MAX_HIST_VALUE_LEN
)*4;
1287 buf
= tor_malloc_zero(len
);
1290 char tmp
[MAX_HIST_VALUE_LEN
];
1295 desc
= "write-history";
1299 desc
= "read-history";
1302 b
= dir_write_array
;
1303 desc
= "dirreq-write-history";
1307 desc
= "dirreq-read-history";
1311 slen
= rep_hist_fill_bandwidth_history(tmp
, MAX_HIST_VALUE_LEN
, b
);
1312 /* If we don't have anything to write, skip to the next entry. */
1315 format_iso_time(t
, b
->next_period
-NUM_SECS_BW_SUM_INTERVAL
);
1316 tor_snprintf(cp
, len
-(cp
-buf
), "%s %s (%d s) ",
1317 desc
, t
, NUM_SECS_BW_SUM_INTERVAL
);
1319 strlcat(cp
, tmp
, len
-(cp
-buf
));
1321 strlcat(cp
, "\n", len
-(cp
-buf
));
1327 /** Write a single bw_array_t into the Values, Ends, Interval, and Maximum
1328 * entries of an or_state_t. Done before writing out a new state file. */
1330 rep_hist_update_bwhist_state_section(or_state_t
*state
,
1331 const bw_array_t
*b
,
1332 smartlist_t
**s_values
,
1333 smartlist_t
**s_maxima
,
1341 SMARTLIST_FOREACH(*s_values
, char *, val
, tor_free(val
));
1342 smartlist_free(*s_values
);
1345 SMARTLIST_FOREACH(*s_maxima
, char *, val
, tor_free(val
));
1346 smartlist_free(*s_maxima
);
1348 if (! server_mode(get_options())) {
1349 /* Clients don't need to store bandwidth history persistently;
1350 * force these values to the defaults. */
1351 /* FFFF we should pull the default out of config.c's state table,
1352 * so we don't have two defaults. */
1353 if (*s_begins
!= 0 || *s_interval
!= 900) {
1354 time_t now
= time(NULL
);
1355 time_t save_at
= get_options()->AvoidDiskWrites
? now
+3600 : now
+600;
1356 or_state_mark_dirty(state
, save_at
);
1360 *s_values
= smartlist_new();
1361 *s_maxima
= smartlist_new();
1364 *s_begins
= b
->next_period
;
1365 *s_interval
= NUM_SECS_BW_SUM_INTERVAL
;
1367 *s_values
= smartlist_new();
1368 *s_maxima
= smartlist_new();
1369 /* Set i to first position in circular array */
1370 i
= (b
->num_maxes_set
<= b
->next_max_idx
) ? 0 : b
->next_max_idx
;
1371 for (j
=0; j
< b
->num_maxes_set
; ++j
,++i
) {
1372 if (i
>= NUM_TOTALS
)
1374 smartlist_add_asprintf(*s_values
, U64_FORMAT
,
1375 U64_PRINTF_ARG(b
->totals
[i
] & ~0x3ff));
1376 maxval
= b
->maxima
[i
] / NUM_SECS_ROLLING_MEASURE
;
1377 smartlist_add_asprintf(*s_maxima
, U64_FORMAT
,
1378 U64_PRINTF_ARG(maxval
& ~0x3ff));
1380 smartlist_add_asprintf(*s_values
, U64_FORMAT
,
1381 U64_PRINTF_ARG(b
->total_in_period
& ~0x3ff));
1382 maxval
= b
->max_total
/ NUM_SECS_ROLLING_MEASURE
;
1383 smartlist_add_asprintf(*s_maxima
, U64_FORMAT
,
1384 U64_PRINTF_ARG(maxval
& ~0x3ff));
1387 /** Update <b>state</b> with the newest bandwidth history. Done before
1388 * writing out a new state file. */
1390 rep_hist_update_state(or_state_t
*state
)
1392 #define UPDATE(arrname,st) \
1393 rep_hist_update_bwhist_state_section(state,\
1395 &state->BWHistory ## st ## Values, \
1396 &state->BWHistory ## st ## Maxima, \
1397 &state->BWHistory ## st ## Ends, \
1398 &state->BWHistory ## st ## Interval)
1400 UPDATE(write_array
, Write
);
1401 UPDATE(read_array
, Read
);
1402 UPDATE(dir_write_array
, DirWrite
);
1403 UPDATE(dir_read_array
, DirRead
);
1405 if (server_mode(get_options())) {
1406 or_state_mark_dirty(state
, time(NULL
)+(2*3600));
1411 /** Load a single bw_array_t from its Values, Ends, Maxima, and Interval
1412 * entries in an or_state_t. Done while reading the state file. */
1414 rep_hist_load_bwhist_state_section(bw_array_t
*b
,
1415 const smartlist_t
*s_values
,
1416 const smartlist_t
*s_maxima
,
1417 const time_t s_begins
,
1418 const int s_interval
)
1420 time_t now
= time(NULL
);
1426 int have_maxima
= s_maxima
&& s_values
&&
1427 (smartlist_len(s_values
) == smartlist_len(s_maxima
));
1429 if (s_values
&& s_begins
>= now
- NUM_SECS_BW_SUM_INTERVAL
*NUM_TOTALS
) {
1430 start
= s_begins
- s_interval
*(smartlist_len(s_values
));
1433 b
->cur_obs_time
= start
;
1434 b
->next_period
= start
+ NUM_SECS_BW_SUM_INTERVAL
;
1435 SMARTLIST_FOREACH_BEGIN(s_values
, const char *, cp
) {
1436 const char *maxstr
= NULL
;
1437 v
= tor_parse_uint64(cp
, 10, 0, UINT64_MAX
, &ok
, NULL
);
1439 maxstr
= smartlist_get(s_maxima
, cp_sl_idx
);
1440 mv
= tor_parse_uint64(maxstr
, 10, 0, UINT64_MAX
, &ok_m
, NULL
);
1441 mv
*= NUM_SECS_ROLLING_MEASURE
;
1443 /* No maxima known; guess average rate to be conservative. */
1444 mv
= (v
/ s_interval
) * NUM_SECS_ROLLING_MEASURE
;
1448 log_notice(LD_HIST
, "Could not parse value '%s' into a number.'",cp
);
1450 if (maxstr
&& !ok_m
) {
1452 log_notice(LD_HIST
, "Could not parse maximum '%s' into a number.'",
1457 time_t cur_start
= start
;
1458 time_t actual_interval_len
= s_interval
;
1459 uint64_t cur_val
= 0;
1460 /* Calculate the average per second. This is the best we can do
1461 * because our state file doesn't have per-second resolution. */
1462 if (start
+ s_interval
> now
)
1463 actual_interval_len
= now
- start
;
1464 cur_val
= v
/ actual_interval_len
;
1465 /* This is potentially inefficient, but since we don't do it very
1466 * often it should be ok. */
1467 while (cur_start
< start
+ actual_interval_len
) {
1468 add_obs(b
, cur_start
, cur_val
);
1472 /* This will result in some fairly choppy history if s_interval
1473 * is not the same as NUM_SECS_BW_SUM_INTERVAL. XXXX */
1474 start
+= actual_interval_len
;
1476 } SMARTLIST_FOREACH_END(cp
);
1479 /* Clean up maxima and observed */
1480 for (i
=0; i
<NUM_SECS_ROLLING_MEASURE
; ++i
) {
1488 /** Set bandwidth history from the state file we just loaded. */
1490 rep_hist_load_state(or_state_t
*state
, char **err
)
1494 /* Assert they already have been malloced */
1495 tor_assert(read_array
&& write_array
);
1496 tor_assert(dir_read_array
&& dir_write_array
);
1498 #define LOAD(arrname,st) \
1499 if (rep_hist_load_bwhist_state_section( \
1501 state->BWHistory ## st ## Values, \
1502 state->BWHistory ## st ## Maxima, \
1503 state->BWHistory ## st ## Ends, \
1504 state->BWHistory ## st ## Interval)<0) \
1507 LOAD(write_array
, Write
);
1508 LOAD(read_array
, Read
);
1509 LOAD(dir_write_array
, DirWrite
);
1510 LOAD(dir_read_array
, DirRead
);
1514 *err
= tor_strdup("Parsing of bandwidth history values failed");
1515 /* and create fresh arrays */
1522 /*********************************************************************/
1524 /** A single predicted port: used to remember which ports we've made
1525 * connections to, so that we can try to keep making circuits that can handle
1527 typedef struct predicted_port_t
{
1528 /** The port we connected to */
1530 /** The time at which we last used it */
1534 /** A list of port numbers that have been used recently. */
1535 static smartlist_t
*predicted_ports_list
=NULL
;
1536 /** How long do we keep predicting circuits? */
1537 static int prediction_timeout
=0;
1538 /** When was the last time we added a prediction entry (HS or port) */
1539 static time_t last_prediction_add_time
=0;
1542 * How much time left until we stop predicting circuits?
1545 predicted_ports_prediction_time_remaining(time_t now
)
1549 /* Protect against overflow of return value. This can happen if the clock
1550 * jumps backwards in time. Update the last prediction time (aka last
1551 * active time) to prevent it. This update is preferable to using monotonic
1552 * time because it prevents clock jumps into the past from simply causing
1553 * very long idle timeouts while the monotonic time stands still. */
1554 if (last_prediction_add_time
> now
) {
1555 last_prediction_add_time
= now
;
1558 idle_delta
= now
- last_prediction_add_time
;
1561 /* Protect against underflow of the return value. This can happen for very
1562 * large periods of inactivity/system sleep. */
1563 if (idle_delta
> prediction_timeout
)
1566 if (BUG((prediction_timeout
- idle_delta
) > INT_MAX
)) {
1570 return (int)(prediction_timeout
- idle_delta
);
1573 /** We just got an application request for a connection with
1574 * port <b>port</b>. Remember it for the future, so we can keep
1575 * some circuits open that will exit to this port.
1578 add_predicted_port(time_t now
, uint16_t port
)
1580 predicted_port_t
*pp
= tor_malloc(sizeof(predicted_port_t
));
1582 // If the list is empty, re-randomize predicted ports lifetime
1583 if (!any_predicted_circuits(now
)) {
1584 prediction_timeout
= channelpadding_get_circuits_available_timeout();
1587 last_prediction_add_time
= now
;
1590 "New port prediction added. Will continue predictive circ building "
1591 "for %d more seconds.",
1592 predicted_ports_prediction_time_remaining(now
));
1596 rephist_total_alloc
+= sizeof(*pp
);
1597 smartlist_add(predicted_ports_list
, pp
);
1601 * Allocate whatever memory and structs are needed for predicting
1602 * which ports will be used. Also seed it with port 80, so we'll build
1603 * circuits on start-up.
1606 predicted_ports_alloc(void)
1608 predicted_ports_list
= smartlist_new();
1612 predicted_ports_init(void)
1614 add_predicted_port(time(NULL
), 443); // Add a port to get us started
1617 /** Free whatever memory is needed for predicting which ports will
1621 predicted_ports_free_all(void)
1623 rephist_total_alloc
-=
1624 smartlist_len(predicted_ports_list
)*sizeof(predicted_port_t
);
1625 SMARTLIST_FOREACH(predicted_ports_list
, predicted_port_t
*,
1627 smartlist_free(predicted_ports_list
);
1630 /** Remember that <b>port</b> has been asked for as of time <b>now</b>.
1631 * This is used for predicting what sorts of streams we'll make in the
1632 * future and making exit circuits to anticipate that.
1635 rep_hist_note_used_port(time_t now
, uint16_t port
)
1637 tor_assert(predicted_ports_list
);
1639 if (!port
) /* record nothing */
1642 SMARTLIST_FOREACH_BEGIN(predicted_ports_list
, predicted_port_t
*, pp
) {
1643 if (pp
->port
== port
) {
1646 last_prediction_add_time
= now
;
1648 "New port prediction added. Will continue predictive circ "
1649 "building for %d more seconds.",
1650 predicted_ports_prediction_time_remaining(now
));
1653 } SMARTLIST_FOREACH_END(pp
);
1654 /* it's not there yet; we need to add it */
1655 add_predicted_port(now
, port
);
1658 /** Return a newly allocated pointer to a list of uint16_t * for ports that
1659 * are likely to be asked for in the near future.
1662 rep_hist_get_predicted_ports(time_t now
)
1664 int predicted_circs_relevance_time
;
1665 smartlist_t
*out
= smartlist_new();
1666 tor_assert(predicted_ports_list
);
1668 predicted_circs_relevance_time
= prediction_timeout
;
1670 /* clean out obsolete entries */
1671 SMARTLIST_FOREACH_BEGIN(predicted_ports_list
, predicted_port_t
*, pp
) {
1672 if (pp
->time
+ predicted_circs_relevance_time
< now
) {
1673 log_debug(LD_CIRC
, "Expiring predicted port %d", pp
->port
);
1675 rephist_total_alloc
-= sizeof(predicted_port_t
);
1677 SMARTLIST_DEL_CURRENT(predicted_ports_list
, pp
);
1679 smartlist_add(out
, tor_memdup(&pp
->port
, sizeof(uint16_t)));
1681 } SMARTLIST_FOREACH_END(pp
);
1686 * Take a list of uint16_t *, and remove every port in the list from the
1687 * current list of predicted ports.
1690 rep_hist_remove_predicted_ports(const smartlist_t
*rmv_ports
)
1692 /* Let's do this on O(N), not O(N^2). */
1693 bitarray_t
*remove_ports
= bitarray_init_zero(UINT16_MAX
);
1694 SMARTLIST_FOREACH(rmv_ports
, const uint16_t *, p
,
1695 bitarray_set(remove_ports
, *p
));
1696 SMARTLIST_FOREACH_BEGIN(predicted_ports_list
, predicted_port_t
*, pp
) {
1697 if (bitarray_is_set(remove_ports
, pp
->port
)) {
1699 rephist_total_alloc
-= sizeof(*pp
);
1700 SMARTLIST_DEL_CURRENT(predicted_ports_list
, pp
);
1702 } SMARTLIST_FOREACH_END(pp
);
1703 bitarray_free(remove_ports
);
1706 /** The user asked us to do a resolve. Rather than keeping track of
1707 * timings and such of resolves, we fake it for now by treating
1708 * it the same way as a connection to port 80. This way we will continue
1709 * to have circuits lying around if the user only uses Tor for resolves.
1712 rep_hist_note_used_resolve(time_t now
)
1714 rep_hist_note_used_port(now
, 80);
1717 /** The last time at which we needed an internal circ. */
1718 static time_t predicted_internal_time
= 0;
1719 /** The last time we needed an internal circ with good uptime. */
1720 static time_t predicted_internal_uptime_time
= 0;
1721 /** The last time we needed an internal circ with good capacity. */
1722 static time_t predicted_internal_capacity_time
= 0;
1724 /** Remember that we used an internal circ at time <b>now</b>. */
1726 rep_hist_note_used_internal(time_t now
, int need_uptime
, int need_capacity
)
1728 // If the list is empty, re-randomize predicted ports lifetime
1729 if (!any_predicted_circuits(now
)) {
1730 prediction_timeout
= channelpadding_get_circuits_available_timeout();
1733 last_prediction_add_time
= now
;
1736 "New port prediction added. Will continue predictive circ building "
1737 "for %d more seconds.",
1738 predicted_ports_prediction_time_remaining(now
));
1740 predicted_internal_time
= now
;
1742 predicted_internal_uptime_time
= now
;
1744 predicted_internal_capacity_time
= now
;
1747 /** Return 1 if we've used an internal circ recently; else return 0. */
1749 rep_hist_get_predicted_internal(time_t now
, int *need_uptime
,
1752 int predicted_circs_relevance_time
;
1754 predicted_circs_relevance_time
= prediction_timeout
;
1756 if (!predicted_internal_time
) { /* initialize it */
1757 predicted_internal_time
= now
;
1758 predicted_internal_uptime_time
= now
;
1759 predicted_internal_capacity_time
= now
;
1761 if (predicted_internal_time
+ predicted_circs_relevance_time
< now
)
1762 return 0; /* too long ago */
1763 if (predicted_internal_uptime_time
+ predicted_circs_relevance_time
>= now
)
1765 // Always predict that we need capacity.
1770 /** Any ports used lately? These are pre-seeded if we just started
1771 * up or if we're running a hidden service. */
1773 any_predicted_circuits(time_t now
)
1775 int predicted_circs_relevance_time
;
1776 predicted_circs_relevance_time
= prediction_timeout
;
1778 return smartlist_len(predicted_ports_list
) ||
1779 predicted_internal_time
+ predicted_circs_relevance_time
>= now
;
1782 /** Return 1 if we have no need for circuits currently, else return 0. */
1784 rep_hist_circbuilding_dormant(time_t now
)
1786 const or_options_t
*options
= get_options();
1788 if (any_predicted_circuits(now
))
1791 /* see if we'll still need to build testing circuits */
1792 if (server_mode(options
) &&
1793 (!check_whether_orport_reachable(options
) ||
1794 !circuit_enough_testing_circs()))
1796 if (!check_whether_dirport_reachable(options
))
1802 /*** Exit port statistics ***/
1804 /* Some constants */
1805 /** To what multiple should byte numbers be rounded up? */
1806 #define EXIT_STATS_ROUND_UP_BYTES 1024
1807 /** To what multiple should stream counts be rounded up? */
1808 #define EXIT_STATS_ROUND_UP_STREAMS 4
1809 /** Number of TCP ports */
1810 #define EXIT_STATS_NUM_PORTS 65536
1811 /** Top n ports that will be included in exit stats. */
1812 #define EXIT_STATS_TOP_N_PORTS 10
1814 /* The following data structures are arrays and no fancy smartlists or maps,
1815 * so that all write operations can be done in constant time. This comes at
1816 * the price of some memory (1.25 MB) and linear complexity when writing
1817 * stats for measuring relays. */
1818 /** Number of bytes read in current period by exit port */
1819 static uint64_t *exit_bytes_read
= NULL
;
1820 /** Number of bytes written in current period by exit port */
1821 static uint64_t *exit_bytes_written
= NULL
;
1822 /** Number of streams opened in current period by exit port */
1823 static uint32_t *exit_streams
= NULL
;
1825 /** Start time of exit stats or 0 if we're not collecting exit stats. */
1826 static time_t start_of_exit_stats_interval
;
1828 /** Initialize exit port stats. */
1830 rep_hist_exit_stats_init(time_t now
)
1832 start_of_exit_stats_interval
= now
;
1833 exit_bytes_read
= tor_calloc(EXIT_STATS_NUM_PORTS
, sizeof(uint64_t));
1834 exit_bytes_written
= tor_calloc(EXIT_STATS_NUM_PORTS
, sizeof(uint64_t));
1835 exit_streams
= tor_calloc(EXIT_STATS_NUM_PORTS
, sizeof(uint32_t));
1838 /** Reset counters for exit port statistics. */
1840 rep_hist_reset_exit_stats(time_t now
)
1842 start_of_exit_stats_interval
= now
;
1843 memset(exit_bytes_read
, 0, EXIT_STATS_NUM_PORTS
* sizeof(uint64_t));
1844 memset(exit_bytes_written
, 0, EXIT_STATS_NUM_PORTS
* sizeof(uint64_t));
1845 memset(exit_streams
, 0, EXIT_STATS_NUM_PORTS
* sizeof(uint32_t));
1848 /** Stop collecting exit port stats in a way that we can re-start doing
1849 * so in rep_hist_exit_stats_init(). */
1851 rep_hist_exit_stats_term(void)
1853 start_of_exit_stats_interval
= 0;
1854 tor_free(exit_bytes_read
);
1855 tor_free(exit_bytes_written
);
1856 tor_free(exit_streams
);
1859 /** Helper for qsort: compare two ints. Does not handle overflow properly,
1860 * but works fine for sorting an array of port numbers, which is what we use
1863 compare_int_(const void *x
, const void *y
)
1865 return (*(int*)x
- *(int*)y
);
1868 /** Return a newly allocated string containing the exit port statistics
1869 * until <b>now</b>, or NULL if we're not collecting exit stats. Caller
1870 * must ensure start_of_exit_stats_interval is in the past. */
1872 rep_hist_format_exit_stats(time_t now
)
1874 int i
, j
, top_elements
= 0, cur_min_idx
= 0, cur_port
;
1875 uint64_t top_bytes
[EXIT_STATS_TOP_N_PORTS
];
1876 int top_ports
[EXIT_STATS_TOP_N_PORTS
];
1877 uint64_t cur_bytes
= 0, other_read
= 0, other_written
= 0,
1878 total_read
= 0, total_written
= 0;
1879 uint32_t total_streams
= 0, other_streams
= 0;
1880 smartlist_t
*written_strings
, *read_strings
, *streams_strings
;
1881 char *written_string
, *read_string
, *streams_string
;
1882 char t
[ISO_TIME_LEN
+1];
1885 if (!start_of_exit_stats_interval
)
1886 return NULL
; /* Not initialized. */
1888 tor_assert(now
>= start_of_exit_stats_interval
);
1890 /* Go through all ports to find the n ports that saw most written and
1893 * Invariant: at the end of the loop for iteration i,
1894 * total_read is the sum of all exit_bytes_read[0..i]
1895 * total_written is the sum of all exit_bytes_written[0..i]
1896 * total_stream is the sum of all exit_streams[0..i]
1898 * top_elements = MAX(EXIT_STATS_TOP_N_PORTS,
1899 * #{j | 0 <= j <= i && volume(i) > 0})
1901 * For all 0 <= j < top_elements,
1903 * 0 <= top_ports[j] <= 65535
1904 * top_bytes[j] = volume(top_ports[j])
1906 * There is no j in 0..i and k in 0..top_elements such that:
1907 * volume(j) > top_bytes[k] AND j is not in top_ports[0..top_elements]
1909 * There is no j!=cur_min_idx in 0..top_elements such that:
1910 * top_bytes[j] < top_bytes[cur_min_idx]
1912 * where volume(x) == exit_bytes_read[x]+exit_bytes_written[x]
1914 * Worst case: O(EXIT_STATS_NUM_PORTS * EXIT_STATS_TOP_N_PORTS)
1916 for (i
= 1; i
< EXIT_STATS_NUM_PORTS
; i
++) {
1917 total_read
+= exit_bytes_read
[i
];
1918 total_written
+= exit_bytes_written
[i
];
1919 total_streams
+= exit_streams
[i
];
1920 cur_bytes
= exit_bytes_read
[i
] + exit_bytes_written
[i
];
1921 if (cur_bytes
== 0) {
1924 if (top_elements
< EXIT_STATS_TOP_N_PORTS
) {
1925 top_bytes
[top_elements
] = cur_bytes
;
1926 top_ports
[top_elements
++] = i
;
1927 } else if (cur_bytes
> top_bytes
[cur_min_idx
]) {
1928 top_bytes
[cur_min_idx
] = cur_bytes
;
1929 top_ports
[cur_min_idx
] = i
;
1934 for (j
= 1; j
< top_elements
; j
++) {
1935 if (top_bytes
[j
] < top_bytes
[cur_min_idx
]) {
1941 /* Add observations of top ports to smartlists. */
1942 written_strings
= smartlist_new();
1943 read_strings
= smartlist_new();
1944 streams_strings
= smartlist_new();
1945 other_read
= total_read
;
1946 other_written
= total_written
;
1947 other_streams
= total_streams
;
1948 /* Sort the ports; this puts them out of sync with top_bytes, but we
1949 * won't be using top_bytes again anyway */
1950 qsort(top_ports
, top_elements
, sizeof(int), compare_int_
);
1951 for (j
= 0; j
< top_elements
; j
++) {
1952 cur_port
= top_ports
[j
];
1953 if (exit_bytes_written
[cur_port
] > 0) {
1954 uint64_t num
= round_uint64_to_next_multiple_of(
1955 exit_bytes_written
[cur_port
],
1956 EXIT_STATS_ROUND_UP_BYTES
);
1958 smartlist_add_asprintf(written_strings
, "%d="U64_FORMAT
,
1959 cur_port
, U64_PRINTF_ARG(num
));
1960 other_written
-= exit_bytes_written
[cur_port
];
1962 if (exit_bytes_read
[cur_port
] > 0) {
1963 uint64_t num
= round_uint64_to_next_multiple_of(
1964 exit_bytes_read
[cur_port
],
1965 EXIT_STATS_ROUND_UP_BYTES
);
1967 smartlist_add_asprintf(read_strings
, "%d="U64_FORMAT
,
1968 cur_port
, U64_PRINTF_ARG(num
));
1969 other_read
-= exit_bytes_read
[cur_port
];
1971 if (exit_streams
[cur_port
] > 0) {
1972 uint32_t num
= round_uint32_to_next_multiple_of(
1973 exit_streams
[cur_port
],
1974 EXIT_STATS_ROUND_UP_STREAMS
);
1975 smartlist_add_asprintf(streams_strings
, "%d=%u", cur_port
, num
);
1976 other_streams
-= exit_streams
[cur_port
];
1980 /* Add observations of other ports in a single element. */
1981 other_written
= round_uint64_to_next_multiple_of(other_written
,
1982 EXIT_STATS_ROUND_UP_BYTES
);
1983 other_written
/= 1024;
1984 smartlist_add_asprintf(written_strings
, "other="U64_FORMAT
,
1985 U64_PRINTF_ARG(other_written
));
1986 other_read
= round_uint64_to_next_multiple_of(other_read
,
1987 EXIT_STATS_ROUND_UP_BYTES
);
1989 smartlist_add_asprintf(read_strings
, "other="U64_FORMAT
,
1990 U64_PRINTF_ARG(other_read
));
1991 other_streams
= round_uint32_to_next_multiple_of(other_streams
,
1992 EXIT_STATS_ROUND_UP_STREAMS
);
1993 smartlist_add_asprintf(streams_strings
, "other=%u", other_streams
);
1995 /* Join all observations in single strings. */
1996 written_string
= smartlist_join_strings(written_strings
, ",", 0, NULL
);
1997 read_string
= smartlist_join_strings(read_strings
, ",", 0, NULL
);
1998 streams_string
= smartlist_join_strings(streams_strings
, ",", 0, NULL
);
1999 SMARTLIST_FOREACH(written_strings
, char *, cp
, tor_free(cp
));
2000 SMARTLIST_FOREACH(read_strings
, char *, cp
, tor_free(cp
));
2001 SMARTLIST_FOREACH(streams_strings
, char *, cp
, tor_free(cp
));
2002 smartlist_free(written_strings
);
2003 smartlist_free(read_strings
);
2004 smartlist_free(streams_strings
);
2006 /* Put everything together. */
2007 format_iso_time(t
, now
);
2008 tor_asprintf(&result
, "exit-stats-end %s (%d s)\n"
2009 "exit-kibibytes-written %s\n"
2010 "exit-kibibytes-read %s\n"
2011 "exit-streams-opened %s\n",
2012 t
, (unsigned) (now
- start_of_exit_stats_interval
),
2016 tor_free(written_string
);
2017 tor_free(read_string
);
2018 tor_free(streams_string
);
2022 /** If 24 hours have passed since the beginning of the current exit port
2023 * stats period, write exit stats to $DATADIR/stats/exit-stats (possibly
2024 * overwriting an existing file) and reset counters. Return when we would
2025 * next want to write exit stats or 0 if we never want to write. */
2027 rep_hist_exit_stats_write(time_t now
)
2031 if (!start_of_exit_stats_interval
)
2032 return 0; /* Not initialized. */
2033 if (start_of_exit_stats_interval
+ WRITE_STATS_INTERVAL
> now
)
2034 goto done
; /* Not ready to write. */
2036 log_info(LD_HIST
, "Writing exit port statistics to disk.");
2038 /* Generate history string. */
2039 str
= rep_hist_format_exit_stats(now
);
2041 /* Reset counters. */
2042 rep_hist_reset_exit_stats(now
);
2044 /* Try to write to disk. */
2045 if (!check_or_create_data_subdir("stats")) {
2046 write_to_data_subdir("stats", "exit-stats", str
, "exit port statistics");
2051 return start_of_exit_stats_interval
+ WRITE_STATS_INTERVAL
;
2054 /** Note that we wrote <b>num_written</b> bytes and read <b>num_read</b>
2055 * bytes to/from an exit connection to <b>port</b>. */
2057 rep_hist_note_exit_bytes(uint16_t port
, size_t num_written
,
2060 if (!start_of_exit_stats_interval
)
2061 return; /* Not initialized. */
2062 exit_bytes_written
[port
] += num_written
;
2063 exit_bytes_read
[port
] += num_read
;
2064 log_debug(LD_HIST
, "Written %lu bytes and read %lu bytes to/from an "
2065 "exit connection to port %d.",
2066 (unsigned long)num_written
, (unsigned long)num_read
, port
);
2069 /** Note that we opened an exit stream to <b>port</b>. */
2071 rep_hist_note_exit_stream_opened(uint16_t port
)
2073 if (!start_of_exit_stats_interval
)
2074 return; /* Not initialized. */
2075 exit_streams
[port
]++;
2076 log_debug(LD_HIST
, "Opened exit stream to port %d", port
);
2079 /*** cell statistics ***/
2081 /** Start of the current buffer stats interval or 0 if we're not
2082 * collecting buffer statistics. */
2083 static time_t start_of_buffer_stats_interval
;
2085 /** Initialize buffer stats. */
2087 rep_hist_buffer_stats_init(time_t now
)
2089 start_of_buffer_stats_interval
= now
;
2092 /** Statistics from a single circuit. Collected when the circuit closes, or
2093 * when we flush statistics to disk. */
2094 typedef struct circ_buffer_stats_t
{
2095 /** Average number of cells in the circuit's queue */
2096 double mean_num_cells_in_queue
;
2097 /** Average time a cell waits in the queue. */
2098 double mean_time_cells_in_queue
;
2099 /** Total number of cells sent over this circuit */
2100 uint32_t processed_cells
;
2101 } circ_buffer_stats_t
;
2103 /** List of circ_buffer_stats_t. */
2104 static smartlist_t
*circuits_for_buffer_stats
= NULL
;
2106 /** Remember cell statistics <b>mean_num_cells_in_queue</b>,
2107 * <b>mean_time_cells_in_queue</b>, and <b>processed_cells</b> of a
2110 rep_hist_add_buffer_stats(double mean_num_cells_in_queue
,
2111 double mean_time_cells_in_queue
, uint32_t processed_cells
)
2113 circ_buffer_stats_t
*stats
;
2114 if (!start_of_buffer_stats_interval
)
2115 return; /* Not initialized. */
2116 stats
= tor_malloc_zero(sizeof(circ_buffer_stats_t
));
2117 stats
->mean_num_cells_in_queue
= mean_num_cells_in_queue
;
2118 stats
->mean_time_cells_in_queue
= mean_time_cells_in_queue
;
2119 stats
->processed_cells
= processed_cells
;
2120 if (!circuits_for_buffer_stats
)
2121 circuits_for_buffer_stats
= smartlist_new();
2122 smartlist_add(circuits_for_buffer_stats
, stats
);
2125 /** Remember cell statistics for circuit <b>circ</b> at time
2126 * <b>end_of_interval</b> and reset cell counters in case the circuit
2127 * remains open in the next measurement interval. */
2129 rep_hist_buffer_stats_add_circ(circuit_t
*circ
, time_t end_of_interval
)
2131 time_t start_of_interval
;
2132 int interval_length
;
2133 or_circuit_t
*orcirc
;
2134 double mean_num_cells_in_queue
, mean_time_cells_in_queue
;
2135 uint32_t processed_cells
;
2136 if (CIRCUIT_IS_ORIGIN(circ
))
2138 orcirc
= TO_OR_CIRCUIT(circ
);
2139 if (!orcirc
->processed_cells
)
2141 start_of_interval
= (circ
->timestamp_created
.tv_sec
>
2142 start_of_buffer_stats_interval
) ?
2143 (time_t)circ
->timestamp_created
.tv_sec
:
2144 start_of_buffer_stats_interval
;
2145 interval_length
= (int) (end_of_interval
- start_of_interval
);
2146 if (interval_length
<= 0)
2148 processed_cells
= orcirc
->processed_cells
;
2149 /* 1000.0 for s -> ms; 2.0 because of app-ward and exit-ward queues */
2150 mean_num_cells_in_queue
= (double) orcirc
->total_cell_waiting_time
/
2151 (double) interval_length
/ 1000.0 / 2.0;
2152 mean_time_cells_in_queue
=
2153 (double) orcirc
->total_cell_waiting_time
/
2154 (double) orcirc
->processed_cells
;
2155 orcirc
->total_cell_waiting_time
= 0;
2156 orcirc
->processed_cells
= 0;
2157 rep_hist_add_buffer_stats(mean_num_cells_in_queue
,
2158 mean_time_cells_in_queue
,
2162 /** Sorting helper: return -1, 1, or 0 based on comparison of two
2163 * circ_buffer_stats_t */
2165 buffer_stats_compare_entries_(const void **_a
, const void **_b
)
2167 const circ_buffer_stats_t
*a
= *_a
, *b
= *_b
;
2168 if (a
->processed_cells
< b
->processed_cells
)
2170 else if (a
->processed_cells
> b
->processed_cells
)
2176 /** Stop collecting cell stats in a way that we can re-start doing so in
2177 * rep_hist_buffer_stats_init(). */
2179 rep_hist_buffer_stats_term(void)
2181 rep_hist_reset_buffer_stats(0);
2184 /** Clear history of circuit statistics and set the measurement interval
2185 * start to <b>now</b>. */
2187 rep_hist_reset_buffer_stats(time_t now
)
2189 if (!circuits_for_buffer_stats
)
2190 circuits_for_buffer_stats
= smartlist_new();
2191 SMARTLIST_FOREACH(circuits_for_buffer_stats
, circ_buffer_stats_t
*,
2192 stats
, tor_free(stats
));
2193 smartlist_clear(circuits_for_buffer_stats
);
2194 start_of_buffer_stats_interval
= now
;
2197 /** Return a newly allocated string containing the buffer statistics until
2198 * <b>now</b>, or NULL if we're not collecting buffer stats. Caller must
2199 * ensure start_of_buffer_stats_interval is in the past. */
2201 rep_hist_format_buffer_stats(time_t now
)
2204 uint64_t processed_cells
[SHARES
];
2205 uint32_t circs_in_share
[SHARES
];
2206 int number_of_circuits
, i
;
2207 double queued_cells
[SHARES
], time_in_queue
[SHARES
];
2208 smartlist_t
*processed_cells_strings
, *queued_cells_strings
,
2209 *time_in_queue_strings
;
2210 char *processed_cells_string
, *queued_cells_string
,
2211 *time_in_queue_string
;
2212 char t
[ISO_TIME_LEN
+1];
2215 if (!start_of_buffer_stats_interval
)
2216 return NULL
; /* Not initialized. */
2218 tor_assert(now
>= start_of_buffer_stats_interval
);
2220 /* Calculate deciles if we saw at least one circuit. */
2221 memset(processed_cells
, 0, SHARES
* sizeof(uint64_t));
2222 memset(circs_in_share
, 0, SHARES
* sizeof(uint32_t));
2223 memset(queued_cells
, 0, SHARES
* sizeof(double));
2224 memset(time_in_queue
, 0, SHARES
* sizeof(double));
2225 if (!circuits_for_buffer_stats
)
2226 circuits_for_buffer_stats
= smartlist_new();
2227 number_of_circuits
= smartlist_len(circuits_for_buffer_stats
);
2228 if (number_of_circuits
> 0) {
2229 smartlist_sort(circuits_for_buffer_stats
,
2230 buffer_stats_compare_entries_
);
2232 SMARTLIST_FOREACH_BEGIN(circuits_for_buffer_stats
,
2233 circ_buffer_stats_t
*, stats
)
2235 int share
= i
++ * SHARES
/ number_of_circuits
;
2236 processed_cells
[share
] += stats
->processed_cells
;
2237 queued_cells
[share
] += stats
->mean_num_cells_in_queue
;
2238 time_in_queue
[share
] += stats
->mean_time_cells_in_queue
;
2239 circs_in_share
[share
]++;
2241 SMARTLIST_FOREACH_END(stats
);
2244 /* Write deciles to strings. */
2245 processed_cells_strings
= smartlist_new();
2246 queued_cells_strings
= smartlist_new();
2247 time_in_queue_strings
= smartlist_new();
2248 for (i
= 0; i
< SHARES
; i
++) {
2249 smartlist_add_asprintf(processed_cells_strings
,
2250 U64_FORMAT
, !circs_in_share
[i
] ? 0 :
2251 U64_PRINTF_ARG(processed_cells
[i
] /
2252 circs_in_share
[i
]));
2254 for (i
= 0; i
< SHARES
; i
++) {
2255 smartlist_add_asprintf(queued_cells_strings
, "%.2f",
2256 circs_in_share
[i
] == 0 ? 0.0 :
2257 queued_cells
[i
] / (double) circs_in_share
[i
]);
2259 for (i
= 0; i
< SHARES
; i
++) {
2260 smartlist_add_asprintf(time_in_queue_strings
, "%.0f",
2261 circs_in_share
[i
] == 0 ? 0.0 :
2262 time_in_queue
[i
] / (double) circs_in_share
[i
]);
2265 /* Join all observations in single strings. */
2266 processed_cells_string
= smartlist_join_strings(processed_cells_strings
,
2268 queued_cells_string
= smartlist_join_strings(queued_cells_strings
,
2270 time_in_queue_string
= smartlist_join_strings(time_in_queue_strings
,
2272 SMARTLIST_FOREACH(processed_cells_strings
, char *, cp
, tor_free(cp
));
2273 SMARTLIST_FOREACH(queued_cells_strings
, char *, cp
, tor_free(cp
));
2274 SMARTLIST_FOREACH(time_in_queue_strings
, char *, cp
, tor_free(cp
));
2275 smartlist_free(processed_cells_strings
);
2276 smartlist_free(queued_cells_strings
);
2277 smartlist_free(time_in_queue_strings
);
2279 /* Put everything together. */
2280 format_iso_time(t
, now
);
2281 tor_asprintf(&result
, "cell-stats-end %s (%d s)\n"
2282 "cell-processed-cells %s\n"
2283 "cell-queued-cells %s\n"
2284 "cell-time-in-queue %s\n"
2285 "cell-circuits-per-decile %d\n",
2286 t
, (unsigned) (now
- start_of_buffer_stats_interval
),
2287 processed_cells_string
,
2288 queued_cells_string
,
2289 time_in_queue_string
,
2290 CEIL_DIV(number_of_circuits
, SHARES
));
2291 tor_free(processed_cells_string
);
2292 tor_free(queued_cells_string
);
2293 tor_free(time_in_queue_string
);
2298 /** If 24 hours have passed since the beginning of the current buffer
2299 * stats period, write buffer stats to $DATADIR/stats/buffer-stats
2300 * (possibly overwriting an existing file) and reset counters. Return
2301 * when we would next want to write buffer stats or 0 if we never want to
2304 rep_hist_buffer_stats_write(time_t now
)
2308 if (!start_of_buffer_stats_interval
)
2309 return 0; /* Not initialized. */
2310 if (start_of_buffer_stats_interval
+ WRITE_STATS_INTERVAL
> now
)
2311 goto done
; /* Not ready to write */
2313 /* Add open circuits to the history. */
2314 SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t
*, circ
) {
2315 rep_hist_buffer_stats_add_circ(circ
, now
);
2317 SMARTLIST_FOREACH_END(circ
);
2319 /* Generate history string. */
2320 str
= rep_hist_format_buffer_stats(now
);
2322 /* Reset both buffer history and counters of open circuits. */
2323 rep_hist_reset_buffer_stats(now
);
2325 /* Try to write to disk. */
2326 if (!check_or_create_data_subdir("stats")) {
2327 write_to_data_subdir("stats", "buffer-stats", str
, "buffer statistics");
2332 return start_of_buffer_stats_interval
+ WRITE_STATS_INTERVAL
;
2335 /*** Descriptor serving statistics ***/
2337 /** Digestmap to track which descriptors were downloaded this stats
2338 * collection interval. It maps descriptor digest to pointers to 1,
2339 * effectively turning this into a list. */
2340 static digestmap_t
*served_descs
= NULL
;
2342 /** Number of how many descriptors were downloaded in total during this
2344 static unsigned long total_descriptor_downloads
;
2346 /** Start time of served descs stats or 0 if we're not collecting those. */
2347 static time_t start_of_served_descs_stats_interval
;
2349 /** Initialize descriptor stats. */
2351 rep_hist_desc_stats_init(time_t now
)
2354 log_warn(LD_BUG
, "Called rep_hist_desc_stats_init() when desc stats were "
2355 "already initialized. This is probably harmless.");
2356 return; // Already initialized
2358 served_descs
= digestmap_new();
2359 total_descriptor_downloads
= 0;
2360 start_of_served_descs_stats_interval
= now
;
2363 /** Reset served descs stats to empty, starting a new interval <b>now</b>. */
2365 rep_hist_reset_desc_stats(time_t now
)
2367 rep_hist_desc_stats_term();
2368 rep_hist_desc_stats_init(now
);
2371 /** Stop collecting served descs stats, so that rep_hist_desc_stats_init() is
2372 * safe to be called again. */
2374 rep_hist_desc_stats_term(void)
2376 digestmap_free(served_descs
, NULL
);
2377 served_descs
= NULL
;
2378 start_of_served_descs_stats_interval
= 0;
2379 total_descriptor_downloads
= 0;
2382 /** Helper for rep_hist_desc_stats_write(). Return a newly allocated string
2383 * containing the served desc statistics until now, or NULL if we're not
2384 * collecting served desc stats. Caller must ensure that now is not before
2385 * start_of_served_descs_stats_interval. */
2387 rep_hist_format_desc_stats(time_t now
)
2389 char t
[ISO_TIME_LEN
+1];
2392 digestmap_iter_t
*iter
;
2396 int *vals
, max
= 0, q3
= 0, md
= 0, q1
= 0, min
= 0;
2399 if (!start_of_served_descs_stats_interval
)
2402 size
= digestmap_size(served_descs
);
2404 vals
= tor_calloc(size
, sizeof(int));
2405 for (iter
= digestmap_iter_init(served_descs
);
2406 !digestmap_iter_done(iter
);
2407 iter
= digestmap_iter_next(served_descs
, iter
)) {
2409 digestmap_iter_get(iter
, &key
, &val
);
2410 count
= (uintptr_t)val
;
2411 vals
[n
++] = (int)count
;
2414 max
= find_nth_int(vals
, size
, size
-1);
2415 q3
= find_nth_int(vals
, size
, (3*size
-1)/4);
2416 md
= find_nth_int(vals
, size
, (size
-1)/2);
2417 q1
= find_nth_int(vals
, size
, (size
-1)/4);
2418 min
= find_nth_int(vals
, size
, 0);
2422 format_iso_time(t
, now
);
2424 tor_asprintf(&result
,
2425 "served-descs-stats-end %s (%d s) total=%lu unique=%u "
2426 "max=%d q3=%d md=%d q1=%d min=%d\n",
2428 (unsigned) (now
- start_of_served_descs_stats_interval
),
2429 total_descriptor_downloads
,
2430 size
, max
, q3
, md
, q1
, min
);
2435 /** If WRITE_STATS_INTERVAL seconds have passed since the beginning of
2436 * the current served desc stats interval, write the stats to
2437 * $DATADIR/stats/served-desc-stats (possibly appending to an existing file)
2438 * and reset the state for the next interval. Return when we would next want
2439 * to write served desc stats or 0 if we won't want to write. */
2441 rep_hist_desc_stats_write(time_t now
)
2443 char *filename
= NULL
, *str
= NULL
;
2445 if (!start_of_served_descs_stats_interval
)
2446 return 0; /* We're not collecting stats. */
2447 if (start_of_served_descs_stats_interval
+ WRITE_STATS_INTERVAL
> now
)
2448 return start_of_served_descs_stats_interval
+ WRITE_STATS_INTERVAL
;
2450 str
= rep_hist_format_desc_stats(now
);
2451 tor_assert(str
!= NULL
);
2453 if (check_or_create_data_subdir("stats") < 0) {
2456 filename
= get_datadir_fname2("stats", "served-desc-stats");
2457 if (append_bytes_to_file(filename
, str
, strlen(str
), 0) < 0)
2458 log_warn(LD_HIST
, "Unable to write served descs statistics to disk!");
2460 rep_hist_reset_desc_stats(now
);
2465 return start_of_served_descs_stats_interval
+ WRITE_STATS_INTERVAL
;
2468 /** Called to note that we've served a given descriptor (by
2469 * digest). Increments the count of descriptors served, and the number
2470 * of times we've served this descriptor. */
2472 rep_hist_note_desc_served(const char * desc
)
2477 return; // We're not collecting stats
2478 val
= digestmap_get(served_descs
, desc
);
2479 count
= (uintptr_t)val
;
2480 if (count
!= INT_MAX
)
2482 digestmap_set(served_descs
, desc
, (void*)count
);
2483 total_descriptor_downloads
++;
2486 /*** Connection statistics ***/
2488 /** Start of the current connection stats interval or 0 if we're not
2489 * collecting connection statistics. */
2490 static time_t start_of_conn_stats_interval
;
2492 /** Initialize connection stats. */
2494 rep_hist_conn_stats_init(time_t now
)
2496 start_of_conn_stats_interval
= now
;
2499 /* Count connections that we read and wrote less than these many bytes
2500 * from/to as below threshold. */
2501 #define BIDI_THRESHOLD 20480
2503 /* Count connections that we read or wrote at least this factor as many
2504 * bytes from/to than we wrote or read to/from as mostly reading or
2506 #define BIDI_FACTOR 10
2508 /* Interval length in seconds for considering read and written bytes for
2509 * connection stats. */
2510 #define BIDI_INTERVAL 10
2512 /** Start of next BIDI_INTERVAL second interval. */
2513 static time_t bidi_next_interval
= 0;
2515 /** Number of connections that we read and wrote less than BIDI_THRESHOLD
2516 * bytes from/to in BIDI_INTERVAL seconds. */
2517 static uint32_t below_threshold
= 0;
2519 /** Number of connections that we read at least BIDI_FACTOR times more
2520 * bytes from than we wrote to in BIDI_INTERVAL seconds. */
2521 static uint32_t mostly_read
= 0;
2523 /** Number of connections that we wrote at least BIDI_FACTOR times more
2524 * bytes to than we read from in BIDI_INTERVAL seconds. */
2525 static uint32_t mostly_written
= 0;
2527 /** Number of connections that we read and wrote at least BIDI_THRESHOLD
2528 * bytes from/to, but not BIDI_FACTOR times more in either direction in
2529 * BIDI_INTERVAL seconds. */
2530 static uint32_t both_read_and_written
= 0;
2532 /** Entry in a map from connection ID to the number of read and written
2533 * bytes on this connection in a BIDI_INTERVAL second interval. */
2534 typedef struct bidi_map_entry_t
{
2535 HT_ENTRY(bidi_map_entry_t
) node
;
2536 uint64_t conn_id
; /**< Connection ID */
2537 size_t read
; /**< Number of read bytes */
2538 size_t written
; /**< Number of written bytes */
2541 /** Map of OR connections together with the number of read and written
2542 * bytes in the current BIDI_INTERVAL second interval. */
2543 static HT_HEAD(bidimap
, bidi_map_entry_t
) bidi_map
=
2547 bidi_map_ent_eq(const bidi_map_entry_t
*a
, const bidi_map_entry_t
*b
)
2549 return a
->conn_id
== b
->conn_id
;
2552 /* DOCDOC bidi_map_ent_hash */
2554 bidi_map_ent_hash(const bidi_map_entry_t
*entry
)
2556 return (unsigned) entry
->conn_id
;
2559 HT_PROTOTYPE(bidimap
, bidi_map_entry_t
, node
, bidi_map_ent_hash
,
2561 HT_GENERATE2(bidimap
, bidi_map_entry_t
, node
, bidi_map_ent_hash
,
2562 bidi_map_ent_eq
, 0.6, tor_reallocarray_
, tor_free_
)
2564 /* DOCDOC bidi_map_free */
2566 bidi_map_free_all(void)
2568 bidi_map_entry_t
**ptr
, **next
, *ent
;
2569 for (ptr
= HT_START(bidimap
, &bidi_map
); ptr
; ptr
= next
) {
2571 next
= HT_NEXT_RMV(bidimap
, &bidi_map
, ptr
);
2574 HT_CLEAR(bidimap
, &bidi_map
);
2577 /** Reset counters for conn statistics. */
2579 rep_hist_reset_conn_stats(time_t now
)
2581 start_of_conn_stats_interval
= now
;
2582 below_threshold
= 0;
2585 both_read_and_written
= 0;
2586 bidi_map_free_all();
2589 /** Stop collecting connection stats in a way that we can re-start doing
2590 * so in rep_hist_conn_stats_init(). */
2592 rep_hist_conn_stats_term(void)
2594 rep_hist_reset_conn_stats(0);
2597 /** We read <b>num_read</b> bytes and wrote <b>num_written</b> from/to OR
2598 * connection <b>conn_id</b> in second <b>when</b>. If this is the first
2599 * observation in a new interval, sum up the last observations. Add bytes
2600 * for this connection. */
2602 rep_hist_note_or_conn_bytes(uint64_t conn_id
, size_t num_read
,
2603 size_t num_written
, time_t when
)
2605 if (!start_of_conn_stats_interval
)
2608 if (bidi_next_interval
== 0)
2609 bidi_next_interval
= when
+ BIDI_INTERVAL
;
2610 /* Sum up last period's statistics */
2611 if (when
>= bidi_next_interval
) {
2612 bidi_map_entry_t
**ptr
, **next
, *ent
;
2613 for (ptr
= HT_START(bidimap
, &bidi_map
); ptr
; ptr
= next
) {
2615 if (ent
->read
+ ent
->written
< BIDI_THRESHOLD
)
2617 else if (ent
->read
>= ent
->written
* BIDI_FACTOR
)
2619 else if (ent
->written
>= ent
->read
* BIDI_FACTOR
)
2622 both_read_and_written
++;
2623 next
= HT_NEXT_RMV(bidimap
, &bidi_map
, ptr
);
2626 while (when
>= bidi_next_interval
)
2627 bidi_next_interval
+= BIDI_INTERVAL
;
2628 log_info(LD_GENERAL
, "%d below threshold, %d mostly read, "
2629 "%d mostly written, %d both read and written.",
2630 below_threshold
, mostly_read
, mostly_written
,
2631 both_read_and_written
);
2633 /* Add this connection's bytes. */
2634 if (num_read
> 0 || num_written
> 0) {
2635 bidi_map_entry_t
*entry
, lookup
;
2636 lookup
.conn_id
= conn_id
;
2637 entry
= HT_FIND(bidimap
, &bidi_map
, &lookup
);
2639 entry
->written
+= num_written
;
2640 entry
->read
+= num_read
;
2642 entry
= tor_malloc_zero(sizeof(bidi_map_entry_t
));
2643 entry
->conn_id
= conn_id
;
2644 entry
->written
= num_written
;
2645 entry
->read
= num_read
;
2646 HT_INSERT(bidimap
, &bidi_map
, entry
);
2651 /** Return a newly allocated string containing the connection statistics
2652 * until <b>now</b>, or NULL if we're not collecting conn stats. Caller must
2653 * ensure start_of_conn_stats_interval is in the past. */
2655 rep_hist_format_conn_stats(time_t now
)
2657 char *result
, written
[ISO_TIME_LEN
+1];
2659 if (!start_of_conn_stats_interval
)
2660 return NULL
; /* Not initialized. */
2662 tor_assert(now
>= start_of_conn_stats_interval
);
2664 format_iso_time(written
, now
);
2665 tor_asprintf(&result
, "conn-bi-direct %s (%d s) %d,%d,%d,%d\n",
2667 (unsigned) (now
- start_of_conn_stats_interval
),
2671 both_read_and_written
);
2675 /** If 24 hours have passed since the beginning of the current conn stats
2676 * period, write conn stats to $DATADIR/stats/conn-stats (possibly
2677 * overwriting an existing file) and reset counters. Return when we would
2678 * next want to write conn stats or 0 if we never want to write. */
2680 rep_hist_conn_stats_write(time_t now
)
2684 if (!start_of_conn_stats_interval
)
2685 return 0; /* Not initialized. */
2686 if (start_of_conn_stats_interval
+ WRITE_STATS_INTERVAL
> now
)
2687 goto done
; /* Not ready to write */
2689 /* Generate history string. */
2690 str
= rep_hist_format_conn_stats(now
);
2692 /* Reset counters. */
2693 rep_hist_reset_conn_stats(now
);
2695 /* Try to write to disk. */
2696 if (!check_or_create_data_subdir("stats")) {
2697 write_to_data_subdir("stats", "conn-stats", str
, "connection statistics");
2702 return start_of_conn_stats_interval
+ WRITE_STATS_INTERVAL
;
2705 /** Internal statistics to track how many requests of each type of
2706 * handshake we've received, and how many we've assigned to cpuworkers.
2707 * Useful for seeing trends in cpu load.
2709 STATIC
int onion_handshakes_requested
[MAX_ONION_HANDSHAKE_TYPE
+1] = {0};
2710 STATIC
int onion_handshakes_assigned
[MAX_ONION_HANDSHAKE_TYPE
+1] = {0};
2713 /** A new onionskin (using the <b>type</b> handshake) has arrived. */
2715 rep_hist_note_circuit_handshake_requested(uint16_t type
)
2717 if (type
<= MAX_ONION_HANDSHAKE_TYPE
)
2718 onion_handshakes_requested
[type
]++;
2721 /** We've sent an onionskin (using the <b>type</b> handshake) to a
2724 rep_hist_note_circuit_handshake_assigned(uint16_t type
)
2726 if (type
<= MAX_ONION_HANDSHAKE_TYPE
)
2727 onion_handshakes_assigned
[type
]++;
2730 /** Log our onionskin statistics since the last time we were called. */
2732 rep_hist_log_circuit_handshake_stats(time_t now
)
2735 log_notice(LD_HEARTBEAT
, "Circuit handshake stats since last time: "
2736 "%d/%d TAP, %d/%d NTor.",
2737 onion_handshakes_assigned
[ONION_HANDSHAKE_TYPE_TAP
],
2738 onion_handshakes_requested
[ONION_HANDSHAKE_TYPE_TAP
],
2739 onion_handshakes_assigned
[ONION_HANDSHAKE_TYPE_NTOR
],
2740 onion_handshakes_requested
[ONION_HANDSHAKE_TYPE_NTOR
]);
2741 memset(onion_handshakes_assigned
, 0, sizeof(onion_handshakes_assigned
));
2742 memset(onion_handshakes_requested
, 0, sizeof(onion_handshakes_requested
));
2745 /* Hidden service statistics section */
2747 /** Start of the current hidden service stats interval or 0 if we're
2748 * not collecting hidden service statistics. */
2749 static time_t start_of_hs_stats_interval
;
2751 /** Carries the various hidden service statistics, and any other
2752 * information needed. */
2753 typedef struct hs_stats_t
{
2754 /** How many relay cells have we seen as rendezvous points? */
2755 uint64_t rp_relay_cells_seen
;
2757 /** Set of unique public key digests we've seen this stat period
2758 * (could also be implemented as sorted smartlist). */
2759 digestmap_t
*onions_seen_this_period
;
2762 /** Our statistics structure singleton. */
2763 static hs_stats_t
*hs_stats
= NULL
;
2765 /** Allocate, initialize and return an hs_stats_t structure. */
2769 hs_stats_t
*new_hs_stats
= tor_malloc_zero(sizeof(hs_stats_t
));
2770 new_hs_stats
->onions_seen_this_period
= digestmap_new();
2772 return new_hs_stats
;
2775 #define hs_stats_free(val) \
2776 FREE_AND_NULL(hs_stats_t, hs_stats_free_, (val))
2778 /** Free an hs_stats_t structure. */
2780 hs_stats_free_(hs_stats_t
*victim_hs_stats
)
2782 if (!victim_hs_stats
) {
2786 digestmap_free(victim_hs_stats
->onions_seen_this_period
, NULL
);
2787 tor_free(victim_hs_stats
);
2790 /** Initialize hidden service statistics. */
2792 rep_hist_hs_stats_init(time_t now
)
2795 hs_stats
= hs_stats_new();
2798 start_of_hs_stats_interval
= now
;
2801 /** Clear history of hidden service statistics and set the measurement
2802 * interval start to <b>now</b>. */
2804 rep_hist_reset_hs_stats(time_t now
)
2807 hs_stats
= hs_stats_new();
2810 hs_stats
->rp_relay_cells_seen
= 0;
2812 digestmap_free(hs_stats
->onions_seen_this_period
, NULL
);
2813 hs_stats
->onions_seen_this_period
= digestmap_new();
2815 start_of_hs_stats_interval
= now
;
2818 /** Stop collecting hidden service stats in a way that we can re-start
2819 * doing so in rep_hist_buffer_stats_init(). */
2821 rep_hist_hs_stats_term(void)
2823 rep_hist_reset_hs_stats(0);
2826 /** We saw a new HS relay cell, Count it! */
2828 rep_hist_seen_new_rp_cell(void)
2831 return; // We're not collecting stats
2834 hs_stats
->rp_relay_cells_seen
++;
2837 /** As HSDirs, we saw another hidden service with public key
2838 * <b>pubkey</b>. Check whether we have counted it before, if not
2841 rep_hist_stored_maybe_new_hs(const crypto_pk_t
*pubkey
)
2843 char pubkey_hash
[DIGEST_LEN
];
2846 return; // We're not collecting stats
2849 /* Get the digest of the pubkey which will be used to detect whether
2850 we've seen this hidden service before or not. */
2851 if (crypto_pk_get_digest(pubkey
, pubkey_hash
) < 0) {
2852 /* This fail should not happen; key has been validated by
2853 descriptor parsing code first. */
2857 /* Check if this is the first time we've seen this hidden
2858 service. If it is, count it as new. */
2859 if (!digestmap_get(hs_stats
->onions_seen_this_period
,
2861 digestmap_set(hs_stats
->onions_seen_this_period
,
2862 pubkey_hash
, (void*)(uintptr_t)1);
2866 /* The number of cells that are supposed to be hidden from the adversary
2867 * by adding noise from the Laplace distribution. This value, divided by
2868 * EPSILON, is Laplace parameter b. It must be greather than 0. */
2869 #define REND_CELLS_DELTA_F 2048
2870 /* Security parameter for obfuscating number of cells with a value between
2871 * ]0.0, 1.0]. Smaller values obfuscate observations more, but at the same
2872 * time make statistics less usable. */
2873 #define REND_CELLS_EPSILON 0.3
2874 /* The number of cells that are supposed to be hidden from the adversary
2875 * by rounding up to the next multiple of this number. */
2876 #define REND_CELLS_BIN_SIZE 1024
2877 /* The number of service identities that are supposed to be hidden from the
2878 * adversary by adding noise from the Laplace distribution. This value,
2879 * divided by EPSILON, is Laplace parameter b. It must be greater than 0. */
2880 #define ONIONS_SEEN_DELTA_F 8
2881 /* Security parameter for obfuscating number of service identities with a
2882 * value between ]0.0, 1.0]. Smaller values obfuscate observations more, but
2883 * at the same time make statistics less usable. */
2884 #define ONIONS_SEEN_EPSILON 0.3
2885 /* The number of service identities that are supposed to be hidden from
2886 * the adversary by rounding up to the next multiple of this number. */
2887 #define ONIONS_SEEN_BIN_SIZE 8
2889 /** Allocate and return a string containing hidden service stats that
2890 * are meant to be placed in the extra-info descriptor. */
2892 rep_hist_format_hs_stats(time_t now
)
2894 char t
[ISO_TIME_LEN
+1];
2895 char *hs_stats_string
;
2896 int64_t obfuscated_cells_seen
;
2897 int64_t obfuscated_onions_seen
;
2899 uint64_t rounded_cells_seen
2900 = round_uint64_to_next_multiple_of(hs_stats
->rp_relay_cells_seen
,
2901 REND_CELLS_BIN_SIZE
);
2902 rounded_cells_seen
= MIN(rounded_cells_seen
, INT64_MAX
);
2903 obfuscated_cells_seen
= add_laplace_noise((int64_t)rounded_cells_seen
,
2904 crypto_rand_double(),
2905 REND_CELLS_DELTA_F
, REND_CELLS_EPSILON
);
2907 uint64_t rounded_onions_seen
=
2908 round_uint64_to_next_multiple_of((size_t)digestmap_size(
2909 hs_stats
->onions_seen_this_period
),
2910 ONIONS_SEEN_BIN_SIZE
);
2911 rounded_onions_seen
= MIN(rounded_onions_seen
, INT64_MAX
);
2912 obfuscated_onions_seen
= add_laplace_noise((int64_t)rounded_onions_seen
,
2913 crypto_rand_double(), ONIONS_SEEN_DELTA_F
,
2914 ONIONS_SEEN_EPSILON
);
2916 format_iso_time(t
, now
);
2917 tor_asprintf(&hs_stats_string
, "hidserv-stats-end %s (%d s)\n"
2918 "hidserv-rend-relayed-cells "I64_FORMAT
" delta_f=%d "
2919 "epsilon=%.2f bin_size=%d\n"
2920 "hidserv-dir-onions-seen "I64_FORMAT
" delta_f=%d "
2921 "epsilon=%.2f bin_size=%d\n",
2922 t
, (unsigned) (now
- start_of_hs_stats_interval
),
2923 I64_PRINTF_ARG(obfuscated_cells_seen
), REND_CELLS_DELTA_F
,
2924 REND_CELLS_EPSILON
, REND_CELLS_BIN_SIZE
,
2925 I64_PRINTF_ARG(obfuscated_onions_seen
),
2926 ONIONS_SEEN_DELTA_F
,
2927 ONIONS_SEEN_EPSILON
, ONIONS_SEEN_BIN_SIZE
);
2929 return hs_stats_string
;
2932 /** If 24 hours have passed since the beginning of the current HS
2933 * stats period, write buffer stats to $DATADIR/stats/hidserv-stats
2934 * (possibly overwriting an existing file) and reset counters. Return
2935 * when we would next want to write buffer stats or 0 if we never want to
2938 rep_hist_hs_stats_write(time_t now
)
2942 if (!start_of_hs_stats_interval
) {
2943 return 0; /* Not initialized. */
2946 if (start_of_hs_stats_interval
+ WRITE_STATS_INTERVAL
> now
) {
2947 goto done
; /* Not ready to write */
2950 /* Generate history string. */
2951 str
= rep_hist_format_hs_stats(now
);
2953 /* Reset HS history. */
2954 rep_hist_reset_hs_stats(now
);
2956 /* Try to write to disk. */
2957 if (!check_or_create_data_subdir("stats")) {
2958 write_to_data_subdir("stats", "hidserv-stats", str
,
2959 "hidden service stats");
2964 return start_of_hs_stats_interval
+ WRITE_STATS_INTERVAL
;
2967 static uint64_t link_proto_count
[MAX_LINK_PROTO
+1][2];
2969 /** Note that we negotiated link protocol version <b>link_proto</b>, on
2970 * a connection that started here iff <b>started_here</b> is true.
2973 rep_hist_note_negotiated_link_proto(unsigned link_proto
, int started_here
)
2975 started_here
= !!started_here
; /* force to 0 or 1 */
2976 if (link_proto
> MAX_LINK_PROTO
) {
2977 log_warn(LD_BUG
, "Can't log link protocol %u", link_proto
);
2981 link_proto_count
[link_proto
][started_here
]++;
2985 * Update the maximum count of total pending channel padding timers
2989 rep_hist_padding_count_timers(uint64_t num_timers
)
2991 if (num_timers
> padding_current
.maximum_chanpad_timers
) {
2992 padding_current
.maximum_chanpad_timers
= num_timers
;
2997 * Count a cell that we sent for padding overhead statistics.
2999 * RELAY_COMMAND_DROP and CELL_PADDING are accounted separately. Both should be
3000 * counted for PADDING_TYPE_TOTAL.
3003 rep_hist_padding_count_write(padding_type_t type
)
3006 case PADDING_TYPE_DROP
:
3007 padding_current
.write_drop_cell_count
++;
3009 case PADDING_TYPE_CELL
:
3010 padding_current
.write_pad_cell_count
++;
3012 case PADDING_TYPE_TOTAL
:
3013 padding_current
.write_cell_count
++;
3015 case PADDING_TYPE_ENABLED_TOTAL
:
3016 padding_current
.enabled_write_cell_count
++;
3018 case PADDING_TYPE_ENABLED_CELL
:
3019 padding_current
.enabled_write_pad_cell_count
++;
3025 * Count a cell that we've received for padding overhead statistics.
3027 * RELAY_COMMAND_DROP and CELL_PADDING are accounted separately. Both should be
3028 * counted for PADDING_TYPE_TOTAL.
3031 rep_hist_padding_count_read(padding_type_t type
)
3034 case PADDING_TYPE_DROP
:
3035 padding_current
.read_drop_cell_count
++;
3037 case PADDING_TYPE_CELL
:
3038 padding_current
.read_pad_cell_count
++;
3040 case PADDING_TYPE_TOTAL
:
3041 padding_current
.read_cell_count
++;
3043 case PADDING_TYPE_ENABLED_TOTAL
:
3044 padding_current
.enabled_read_cell_count
++;
3046 case PADDING_TYPE_ENABLED_CELL
:
3047 padding_current
.enabled_read_pad_cell_count
++;
3053 * Reset our current padding statistics. Called once every 24 hours.
3056 rep_hist_reset_padding_counts(void)
3058 memset(&padding_current
, 0, sizeof(padding_current
));
3062 * Copy our current cell counts into a structure for listing in our
3063 * extra-info descriptor. Also perform appropriate rounding and redaction.
3065 * This function is called once every 24 hours.
3067 #define MIN_CELL_COUNTS_TO_PUBLISH 1
3068 #define ROUND_CELL_COUNTS_TO 10000
3070 rep_hist_prep_published_padding_counts(time_t now
)
3072 memcpy(&padding_published
, &padding_current
, sizeof(padding_published
));
3074 if (padding_published
.read_cell_count
< MIN_CELL_COUNTS_TO_PUBLISH
||
3075 padding_published
.write_cell_count
< MIN_CELL_COUNTS_TO_PUBLISH
) {
3076 memset(&padding_published
, 0, sizeof(padding_published
));
3080 format_iso_time(padding_published
.first_published_at
, now
);
3081 #define ROUND_AND_SET_COUNT(x) (x) = round_uint64_to_next_multiple_of((x), \
3082 ROUND_CELL_COUNTS_TO)
3083 ROUND_AND_SET_COUNT(padding_published
.read_pad_cell_count
);
3084 ROUND_AND_SET_COUNT(padding_published
.write_pad_cell_count
);
3085 ROUND_AND_SET_COUNT(padding_published
.read_drop_cell_count
);
3086 ROUND_AND_SET_COUNT(padding_published
.write_drop_cell_count
);
3087 ROUND_AND_SET_COUNT(padding_published
.write_cell_count
);
3088 ROUND_AND_SET_COUNT(padding_published
.read_cell_count
);
3089 ROUND_AND_SET_COUNT(padding_published
.enabled_read_cell_count
);
3090 ROUND_AND_SET_COUNT(padding_published
.enabled_read_pad_cell_count
);
3091 ROUND_AND_SET_COUNT(padding_published
.enabled_write_cell_count
);
3092 ROUND_AND_SET_COUNT(padding_published
.enabled_write_pad_cell_count
);
3093 #undef ROUND_AND_SET_COUNT
3097 * Returns an allocated string for extra-info documents for publishing
3098 * padding statistics from the last 24 hour interval.
3101 rep_hist_get_padding_count_lines(void)
3103 char *result
= NULL
;
3105 if (!padding_published
.read_cell_count
||
3106 !padding_published
.write_cell_count
) {
3110 tor_asprintf(&result
, "padding-counts %s (%d s)"
3111 " bin-size="U64_FORMAT
3112 " write-drop="U64_FORMAT
3113 " write-pad="U64_FORMAT
3114 " write-total="U64_FORMAT
3115 " read-drop="U64_FORMAT
3116 " read-pad="U64_FORMAT
3117 " read-total="U64_FORMAT
3118 " enabled-read-pad="U64_FORMAT
3119 " enabled-read-total="U64_FORMAT
3120 " enabled-write-pad="U64_FORMAT
3121 " enabled-write-total="U64_FORMAT
3122 " max-chanpad-timers="U64_FORMAT
3124 padding_published
.first_published_at
,
3125 REPHIST_CELL_PADDING_COUNTS_INTERVAL
,
3126 U64_PRINTF_ARG(ROUND_CELL_COUNTS_TO
),
3127 U64_PRINTF_ARG(padding_published
.write_drop_cell_count
),
3128 U64_PRINTF_ARG(padding_published
.write_pad_cell_count
),
3129 U64_PRINTF_ARG(padding_published
.write_cell_count
),
3130 U64_PRINTF_ARG(padding_published
.read_drop_cell_count
),
3131 U64_PRINTF_ARG(padding_published
.read_pad_cell_count
),
3132 U64_PRINTF_ARG(padding_published
.read_cell_count
),
3133 U64_PRINTF_ARG(padding_published
.enabled_read_pad_cell_count
),
3134 U64_PRINTF_ARG(padding_published
.enabled_read_cell_count
),
3135 U64_PRINTF_ARG(padding_published
.enabled_write_pad_cell_count
),
3136 U64_PRINTF_ARG(padding_published
.enabled_write_cell_count
),
3137 U64_PRINTF_ARG(padding_published
.maximum_chanpad_timers
)
3143 /** Log a heartbeat message explaining how many connections of each link
3144 * protocol version we have used.
3147 rep_hist_log_link_protocol_counts(void)
3149 log_notice(LD_HEARTBEAT
,
3150 "Since startup, we have initiated "
3151 U64_FORMAT
" v1 connections, "
3152 U64_FORMAT
" v2 connections, "
3153 U64_FORMAT
" v3 connections, and "
3154 U64_FORMAT
" v4 connections; and received "
3155 U64_FORMAT
" v1 connections, "
3156 U64_FORMAT
" v2 connections, "
3157 U64_FORMAT
" v3 connections, and "
3158 U64_FORMAT
" v4 connections.",
3159 U64_PRINTF_ARG(link_proto_count
[1][1]),
3160 U64_PRINTF_ARG(link_proto_count
[2][1]),
3161 U64_PRINTF_ARG(link_proto_count
[3][1]),
3162 U64_PRINTF_ARG(link_proto_count
[4][1]),
3163 U64_PRINTF_ARG(link_proto_count
[1][0]),
3164 U64_PRINTF_ARG(link_proto_count
[2][0]),
3165 U64_PRINTF_ARG(link_proto_count
[3][0]),
3166 U64_PRINTF_ARG(link_proto_count
[4][0]));
3169 /** Free all storage held by the OR/link history caches, by the
3170 * bandwidth history arrays, by the port history, or by statistics . */
3172 rep_hist_free_all(void)
3174 hs_stats_free(hs_stats
);
3175 digestmap_free(history_map
, free_or_history
);
3177 bw_array_free(read_array
);
3180 bw_array_free(write_array
);
3183 bw_array_free(dir_read_array
);
3184 dir_read_array
= NULL
;
3186 bw_array_free(dir_write_array
);
3187 dir_write_array
= NULL
;
3189 tor_free(exit_bytes_read
);
3190 tor_free(exit_bytes_written
);
3191 tor_free(exit_streams
);
3192 predicted_ports_free_all();
3193 bidi_map_free_all();
3195 if (circuits_for_buffer_stats
) {
3196 SMARTLIST_FOREACH(circuits_for_buffer_stats
, circ_buffer_stats_t
*, s
,
3198 smartlist_free(circuits_for_buffer_stats
);
3199 circuits_for_buffer_stats
= NULL
;
3201 rep_hist_desc_stats_term();
3202 total_descriptor_downloads
= 0;
3204 tor_assert_nonfatal(rephist_total_alloc
== 0);
3205 tor_assert_nonfatal_once(rephist_total_num
== 0);