1 /* Copyright (c) 2001-2004, Roger Dingledine.
2 * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
3 * Copyright (c) 2007-2018, The Tor Project, Inc. */
4 /* See LICENSE for licensing information */
6 #define DIRSERV_PRIVATE
7 #include "core/or/or.h"
9 #include "app/config/config.h"
10 #include "core/mainloop/connection.h"
11 #include "feature/dircache/conscache.h"
12 #include "feature/dircache/consdiffmgr.h"
13 #include "feature/dircommon/directory.h"
14 #include "feature/dircache/dirserv.h"
15 #include "feature/nodelist/microdesc.h"
16 #include "feature/nodelist/routerlist.h"
17 #include "feature/relay/router.h"
18 #include "feature/relay/routermode.h"
19 #include "feature/stats/predict_ports.h"
21 #include "feature/dircache/cached_dir_st.h"
22 #include "feature/dircommon/dir_connection_st.h"
23 #include "feature/nodelist/extrainfo_st.h"
24 #include "feature/nodelist/microdesc_st.h"
25 #include "feature/nodelist/routerinfo_st.h"
26 #include "feature/nodelist/routerlist_st.h"
28 #include "lib/compress/compress.h"
32 * \brief Directory server core implementation. Manages directory
33 * contents and generates directory documents.
35 * This module implements most of directory cache functionality, and some of
36 * the directory authority functionality. The directory.c module delegates
37 * here in order to handle incoming requests from clients, via
38 * connection_dirserv_flushed_some() and its kin. In order to save RAM, this
39 * module is responsible for spooling directory objects (in whole or in part)
40 * onto buf_t instances, and then closing the dir_connection_t once the
41 * objects are totally flushed.
43 * The directory.c module also delegates here for handling descriptor uploads
44 * via dirserv_add_multiple_descriptors().
46 * Additionally, this module handles some aspects of voting, including:
47 * deciding how to vote on individual flags (based on decisions reached in
48 * rephist.c), of formatting routerstatus lines, and deciding what relays to
49 * include in an authority's vote. (TODO: Those functions could profitably be
50 * split off. They only live in this file because historically they were
51 * shared among the v1, v2, and v3 directory code.)
54 static void clear_cached_dir(cached_dir_t
*d
);
55 static const signed_descriptor_t
*get_signed_descriptor_by_fp(
59 static int spooled_resource_lookup_body(const spooled_resource_t
*spooled
,
60 int conn_is_encrypted
,
61 const uint8_t **body_out
,
63 time_t *published_out
);
64 static cached_dir_t
*spooled_resource_lookup_cached_dir(
65 const spooled_resource_t
*spooled
,
66 time_t *published_out
);
67 static cached_dir_t
*lookup_cached_dir_by_fp(const uint8_t *fp
);
69 /********************************************************************/
71 /* A set of functions to answer questions about how we'd like to behave
72 * as a directory mirror/client. */
74 /** Return 1 if we fetch our directory material directly from the
75 * authorities, rather than from a mirror. */
77 directory_fetches_from_authorities(const or_options_t
*options
)
79 const routerinfo_t
*me
;
82 if (options
->FetchDirInfoEarly
)
84 if (options
->BridgeRelay
== 1)
86 if (server_mode(options
) &&
87 router_pick_published_address(options
, &addr
, 1) < 0)
88 return 1; /* we don't know our IP address; ask an authority. */
89 refuseunknown
= ! router_my_exit_policy_is_reject_star() &&
90 should_refuse_unknown_exits(options
);
91 if (!dir_server_mode(options
) && !refuseunknown
)
93 if (!server_mode(options
) || !advertised_server_mode())
95 me
= router_get_my_routerinfo();
96 if (!me
|| (!me
->supports_tunnelled_dir_requests
&& !refuseunknown
))
97 return 0; /* if we don't service directory requests, return 0 too */
101 /** Return 1 if we should fetch new networkstatuses, descriptors, etc
102 * on the "mirror" schedule rather than the "client" schedule.
105 directory_fetches_dir_info_early(const or_options_t
*options
)
107 return directory_fetches_from_authorities(options
);
110 /** Return 1 if we should fetch new networkstatuses, descriptors, etc
111 * on a very passive schedule -- waiting long enough for ordinary clients
112 * to probably have the info we want. These would include bridge users,
113 * and maybe others in the future e.g. if a Tor client uses another Tor
114 * client as a directory guard.
117 directory_fetches_dir_info_later(const or_options_t
*options
)
119 return options
->UseBridges
!= 0;
122 /** Return true iff we want to serve certificates for authorities
123 * that we don't acknowledge as authorities ourself.
124 * Use we_want_to_fetch_unknown_auth_certs to check if we want to fetch
125 * and keep these certificates.
128 directory_caches_unknown_auth_certs(const or_options_t
*options
)
130 return dir_server_mode(options
) || options
->BridgeRelay
;
133 /** Return 1 if we want to fetch and serve descriptors, networkstatuses, etc
135 * Check options->DirPort_set and directory_permits_begindir_requests()
136 * to see if we are willing to serve these directory documents to others via
137 * the DirPort and begindir-over-ORPort, respectively.
139 * To check if we should fetch documents, use we_want_to_fetch_flavor and
140 * we_want_to_fetch_unknown_auth_certs instead of this function.
143 directory_caches_dir_info(const or_options_t
*options
)
145 if (options
->BridgeRelay
|| dir_server_mode(options
))
147 if (!server_mode(options
) || !advertised_server_mode())
149 /* We need an up-to-date view of network info if we're going to try to
150 * block exit attempts from unknown relays. */
151 return ! router_my_exit_policy_is_reject_star() &&
152 should_refuse_unknown_exits(options
);
155 /** Return 1 if we want to allow remote clients to ask us directory
156 * requests via the "begin_dir" interface, which doesn't require
157 * having any separate port open. */
159 directory_permits_begindir_requests(const or_options_t
*options
)
161 return options
->BridgeRelay
!= 0 || dir_server_mode(options
);
164 /** Return 1 if we have no need to fetch new descriptors. This generally
165 * happens when we're not a dir cache and we haven't built any circuits
169 directory_too_idle_to_fetch_descriptors(const or_options_t
*options
,
172 return !directory_caches_dir_info(options
) &&
173 !options
->FetchUselessDescriptors
&&
174 rep_hist_circbuilding_dormant(now
);
177 /********************************************************************/
179 /** Map from flavor name to the cached_dir_t for the v3 consensuses that we're
180 * currently serving. */
181 static strmap_t
*cached_consensuses
= NULL
;
183 /** Decrement the reference count on <b>d</b>, and free it if it no longer has
186 cached_dir_decref(cached_dir_t
*d
)
188 if (!d
|| --d
->refcnt
> 0)
194 /** Allocate and return a new cached_dir_t containing the string <b>s</b>,
195 * published at <b>published</b>. */
197 new_cached_dir(char *s
, time_t published
)
199 cached_dir_t
*d
= tor_malloc_zero(sizeof(cached_dir_t
));
202 d
->dir_len
= strlen(s
);
203 d
->published
= published
;
204 if (tor_compress(&(d
->dir_compressed
), &(d
->dir_compressed_len
),
205 d
->dir
, d
->dir_len
, ZLIB_METHOD
)) {
206 log_warn(LD_BUG
, "Error compressing directory");
211 /** Remove all storage held in <b>d</b>, but do not free <b>d</b> itself. */
213 clear_cached_dir(cached_dir_t
*d
)
216 tor_free(d
->dir_compressed
);
217 memset(d
, 0, sizeof(cached_dir_t
));
220 /** Free all storage held by the cached_dir_t in <b>d</b>. */
222 free_cached_dir_(void *_d
)
228 d
= (cached_dir_t
*)_d
;
229 cached_dir_decref(d
);
232 /** Replace the v3 consensus networkstatus of type <b>flavor_name</b> that
233 * we're serving with <b>networkstatus</b>, published at <b>published</b>. No
234 * validation is performed. */
236 dirserv_set_cached_consensus_networkstatus(const char *networkstatus
,
237 const char *flavor_name
,
238 const common_digests_t
*digests
,
239 const uint8_t *sha3_as_signed
,
242 cached_dir_t
*new_networkstatus
;
243 cached_dir_t
*old_networkstatus
;
244 if (!cached_consensuses
)
245 cached_consensuses
= strmap_new();
247 new_networkstatus
= new_cached_dir(tor_strdup(networkstatus
), published
);
248 memcpy(&new_networkstatus
->digests
, digests
, sizeof(common_digests_t
));
249 memcpy(&new_networkstatus
->digest_sha3_as_signed
, sha3_as_signed
,
251 old_networkstatus
= strmap_set(cached_consensuses
, flavor_name
,
253 if (old_networkstatus
)
254 cached_dir_decref(old_networkstatus
);
257 /** Return the latest downloaded consensus networkstatus in encoded, signed,
258 * optionally compressed format, suitable for sending to clients. */
260 dirserv_get_consensus(const char *flavor_name
)
262 if (!cached_consensuses
)
264 return strmap_get(cached_consensuses
, flavor_name
);
267 /** As dirserv_get_routerdescs(), but instead of getting signed_descriptor_t
268 * pointers, adds copies of digests to fps_out, and doesn't use the
269 * /tor/server/ prefix. For a /d/ request, adds descriptor digests; for other
270 * requests, adds identity digests.
273 dirserv_get_routerdesc_spool(smartlist_t
*spool_out
,
275 dir_spool_source_t source
,
276 int conn_is_encrypted
,
277 const char **msg_out
)
281 if (!strcmp(key
, "all")) {
282 const routerlist_t
*rl
= router_get_routerlist();
283 SMARTLIST_FOREACH_BEGIN(rl
->routers
, const routerinfo_t
*, r
) {
284 spooled_resource_t
*spooled
;
285 spooled
= spooled_resource_new(source
,
286 (const uint8_t *)r
->cache_info
.identity_digest
,
288 /* Treat "all" requests as if they were unencrypted */
289 conn_is_encrypted
= 0;
290 smartlist_add(spool_out
, spooled
);
291 } SMARTLIST_FOREACH_END(r
);
292 } else if (!strcmp(key
, "authority")) {
293 const routerinfo_t
*ri
= router_get_my_routerinfo();
295 smartlist_add(spool_out
,
296 spooled_resource_new(source
,
297 (const uint8_t *)ri
->cache_info
.identity_digest
,
299 } else if (!strcmpstart(key
, "d/")) {
301 dir_split_resource_into_spoolable(key
, source
, spool_out
, NULL
,
302 DSR_HEX
|DSR_SORT_UNIQ
);
303 } else if (!strcmpstart(key
, "fp/")) {
304 key
+= strlen("fp/");
305 dir_split_resource_into_spoolable(key
, source
, spool_out
, NULL
,
306 DSR_HEX
|DSR_SORT_UNIQ
);
308 *msg_out
= "Not found";
312 if (! conn_is_encrypted
) {
313 /* Remove anything that insists it not be sent unencrypted. */
314 SMARTLIST_FOREACH_BEGIN(spool_out
, spooled_resource_t
*, spooled
) {
315 const uint8_t *body
= NULL
;
317 int r
= spooled_resource_lookup_body(spooled
, conn_is_encrypted
,
318 &body
, &bodylen
, NULL
);
319 if (r
< 0 || body
== NULL
|| bodylen
== 0) {
320 SMARTLIST_DEL_CURRENT(spool_out
, spooled
);
321 spooled_resource_free(spooled
);
323 } SMARTLIST_FOREACH_END(spooled
);
326 if (!smartlist_len(spool_out
)) {
327 *msg_out
= "Servers unavailable";
333 /** Add a signed_descriptor_t to <b>descs_out</b> for each router matching
334 * <b>key</b>. The key should be either
335 * - "/tor/server/authority" for our own routerinfo;
336 * - "/tor/server/all" for all the routerinfos we have, concatenated;
337 * - "/tor/server/fp/FP" where FP is a plus-separated sequence of
338 * hex identity digests; or
339 * - "/tor/server/d/D" where D is a plus-separated sequence
340 * of server descriptor digests, in hex.
342 * Return 0 if we found some matching descriptors, or -1 if we do not
343 * have any descriptors, no matching descriptors, or if we did not
344 * recognize the key (URL).
345 * If -1 is returned *<b>msg</b> will be set to an appropriate error
348 * XXXX rename this function. It's only called from the controller.
349 * XXXX in fact, refactor this function, merging as much as possible.
352 dirserv_get_routerdescs(smartlist_t
*descs_out
, const char *key
,
357 if (!strcmp(key
, "/tor/server/all")) {
358 routerlist_t
*rl
= router_get_routerlist();
359 SMARTLIST_FOREACH(rl
->routers
, routerinfo_t
*, r
,
360 smartlist_add(descs_out
, &(r
->cache_info
)));
361 } else if (!strcmp(key
, "/tor/server/authority")) {
362 const routerinfo_t
*ri
= router_get_my_routerinfo();
364 smartlist_add(descs_out
, (void*) &(ri
->cache_info
));
365 } else if (!strcmpstart(key
, "/tor/server/d/")) {
366 smartlist_t
*digests
= smartlist_new();
367 key
+= strlen("/tor/server/d/");
368 dir_split_resource_into_fingerprints(key
, digests
, NULL
,
369 DSR_HEX
|DSR_SORT_UNIQ
);
370 SMARTLIST_FOREACH(digests
, const char *, d
,
372 signed_descriptor_t
*sd
= router_get_by_descriptor_digest(d
);
374 smartlist_add(descs_out
,sd
);
376 SMARTLIST_FOREACH(digests
, char *, d
, tor_free(d
));
377 smartlist_free(digests
);
378 } else if (!strcmpstart(key
, "/tor/server/fp/")) {
379 smartlist_t
*digests
= smartlist_new();
380 time_t cutoff
= time(NULL
) - ROUTER_MAX_AGE_TO_PUBLISH
;
381 key
+= strlen("/tor/server/fp/");
382 dir_split_resource_into_fingerprints(key
, digests
, NULL
,
383 DSR_HEX
|DSR_SORT_UNIQ
);
384 SMARTLIST_FOREACH_BEGIN(digests
, const char *, d
) {
385 if (router_digest_is_me(d
)) {
386 /* calling router_get_my_routerinfo() to make sure it exists */
387 const routerinfo_t
*ri
= router_get_my_routerinfo();
389 smartlist_add(descs_out
, (void*) &(ri
->cache_info
));
391 const routerinfo_t
*ri
= router_get_by_id_digest(d
);
392 /* Don't actually serve a descriptor that everyone will think is
393 * expired. This is an (ugly) workaround to keep buggy 0.1.1.10
394 * Tors from downloading descriptors that they will throw away.
396 if (ri
&& ri
->cache_info
.published_on
> cutoff
)
397 smartlist_add(descs_out
, (void*) &(ri
->cache_info
));
399 } SMARTLIST_FOREACH_END(d
);
400 SMARTLIST_FOREACH(digests
, char *, d
, tor_free(d
));
401 smartlist_free(digests
);
403 *msg
= "Key not recognized";
407 if (!smartlist_len(descs_out
)) {
408 *msg
= "Servers unavailable";
419 spooled_resource_new(dir_spool_source_t source
,
420 const uint8_t *digest
, size_t digestlen
)
422 spooled_resource_t
*spooled
= tor_malloc_zero(sizeof(spooled_resource_t
));
423 spooled
->spool_source
= source
;
425 case DIR_SPOOL_NETWORKSTATUS
:
426 spooled
->spool_eagerly
= 0;
428 case DIR_SPOOL_SERVER_BY_DIGEST
:
429 case DIR_SPOOL_SERVER_BY_FP
:
430 case DIR_SPOOL_EXTRA_BY_DIGEST
:
431 case DIR_SPOOL_EXTRA_BY_FP
:
432 case DIR_SPOOL_MICRODESC
:
434 spooled
->spool_eagerly
= 1;
436 case DIR_SPOOL_CONSENSUS_CACHE_ENTRY
:
437 tor_assert_unreached();
440 tor_assert(digestlen
<= sizeof(spooled
->digest
));
442 memcpy(spooled
->digest
, digest
, digestlen
);
447 * Create a new spooled_resource_t to spool the contents of <b>entry</b> to
448 * the user. Return the spooled object on success, or NULL on failure (which
449 * is probably caused by a failure to map the body of the item from disk).
451 * Adds a reference to entry's reference counter.
454 spooled_resource_new_from_cache_entry(consensus_cache_entry_t
*entry
)
456 spooled_resource_t
*spooled
= tor_malloc_zero(sizeof(spooled_resource_t
));
457 spooled
->spool_source
= DIR_SPOOL_CONSENSUS_CACHE_ENTRY
;
458 spooled
->spool_eagerly
= 0;
459 consensus_cache_entry_incref(entry
);
460 spooled
->consensus_cache_entry
= entry
;
462 int r
= consensus_cache_entry_get_body(entry
,
468 spooled_resource_free(spooled
);
473 /** Release all storage held by <b>spooled</b>. */
475 spooled_resource_free_(spooled_resource_t
*spooled
)
480 if (spooled
->cached_dir_ref
) {
481 cached_dir_decref(spooled
->cached_dir_ref
);
484 if (spooled
->consensus_cache_entry
) {
485 consensus_cache_entry_decref(spooled
->consensus_cache_entry
);
491 /** When spooling data from a cached_dir_t object, we always add
492 * at least this much. */
493 #define DIRSERV_CACHED_DIR_CHUNK_SIZE 8192
495 /** Return an compression ratio for compressing objects from <b>source</b>.
498 estimate_compression_ratio(dir_spool_source_t source
)
500 /* We should put in better estimates here, depending on the number of
501 objects and their type */
506 /** Return an estimated number of bytes needed for transmitting the
507 * resource in <b>spooled</b> on <b>conn</b>
509 * As a convenient side-effect, set *<b>published_out</b> to the resource's
513 spooled_resource_estimate_size(const spooled_resource_t
*spooled
,
514 dir_connection_t
*conn
,
516 time_t *published_out
)
518 if (spooled
->spool_eagerly
) {
519 const uint8_t *body
= NULL
;
521 int r
= spooled_resource_lookup_body(spooled
,
522 connection_dir_is_encrypted(conn
),
525 if (r
== -1 || body
== NULL
|| bodylen
== 0)
528 double ratio
= estimate_compression_ratio(spooled
->spool_source
);
529 bodylen
= (size_t)(bodylen
* ratio
);
533 cached_dir_t
*cached
;
534 if (spooled
->consensus_cache_entry
) {
536 consensus_cache_entry_get_valid_after(
537 spooled
->consensus_cache_entry
, published_out
);
540 return spooled
->cce_len
;
542 if (spooled
->cached_dir_ref
) {
543 cached
= spooled
->cached_dir_ref
;
545 cached
= spooled_resource_lookup_cached_dir(spooled
,
548 if (cached
== NULL
) {
551 size_t result
= compressed
? cached
->dir_compressed_len
: cached
->dir_len
;
556 /** Return code for spooled_resource_flush_some */
561 } spooled_resource_flush_status_t
;
563 /** Flush some or all of the bytes from <b>spooled</b> onto <b>conn</b>.
564 * Return SRFS_ERR on error, SRFS_MORE if there are more bytes to flush from
565 * this spooled resource, or SRFS_DONE if we are done flushing this spooled
568 static spooled_resource_flush_status_t
569 spooled_resource_flush_some(spooled_resource_t
*spooled
,
570 dir_connection_t
*conn
)
572 if (spooled
->spool_eagerly
) {
573 /* Spool_eagerly resources are sent all-at-once. */
574 const uint8_t *body
= NULL
;
576 int r
= spooled_resource_lookup_body(spooled
,
577 connection_dir_is_encrypted(conn
),
578 &body
, &bodylen
, NULL
);
579 if (r
== -1 || body
== NULL
|| bodylen
== 0) {
580 /* Absent objects count as "done". */
583 if (conn
->compress_state
) {
584 connection_buf_add_compress((const char*)body
, bodylen
, conn
, 0);
586 connection_buf_add((const char*)body
, bodylen
, TO_CONN(conn
));
590 cached_dir_t
*cached
= spooled
->cached_dir_ref
;
591 consensus_cache_entry_t
*cce
= spooled
->consensus_cache_entry
;
592 if (cached
== NULL
&& cce
== NULL
) {
593 /* The cached_dir_t hasn't been materialized yet. So let's look it up. */
594 cached
= spooled
->cached_dir_ref
=
595 spooled_resource_lookup_cached_dir(spooled
, NULL
);
597 /* Absent objects count as done. */
601 tor_assert_nonfatal(spooled
->cached_dir_offset
== 0);
604 if (BUG(!cached
&& !cce
))
610 total_len
= cached
->dir_compressed_len
;
611 ptr
= cached
->dir_compressed
;
613 total_len
= spooled
->cce_len
;
614 ptr
= (const char *)spooled
->cce_body
;
616 /* How many bytes left to flush? */
618 remaining
= total_len
- spooled
->cached_dir_offset
;
619 if (BUG(remaining
< 0))
621 ssize_t bytes
= (ssize_t
) MIN(DIRSERV_CACHED_DIR_CHUNK_SIZE
, remaining
);
622 if (conn
->compress_state
) {
623 connection_buf_add_compress(
624 ptr
+ spooled
->cached_dir_offset
,
627 connection_buf_add(ptr
+ spooled
->cached_dir_offset
,
628 bytes
, TO_CONN(conn
));
630 spooled
->cached_dir_offset
+= bytes
;
631 if (spooled
->cached_dir_offset
>= (off_t
)total_len
) {
639 /** Helper: find the cached_dir_t for a spooled_resource_t, for
640 * sending it to <b>conn</b>. Set *<b>published_out</b>, if provided,
641 * to the published time of the cached_dir_t.
643 * DOES NOT increase the reference count on the result. Callers must do that
644 * themselves if they mean to hang on to it.
646 static cached_dir_t
*
647 spooled_resource_lookup_cached_dir(const spooled_resource_t
*spooled
,
648 time_t *published_out
)
650 tor_assert(spooled
->spool_eagerly
== 0);
651 cached_dir_t
*d
= lookup_cached_dir_by_fp(spooled
->digest
);
654 *published_out
= d
->published
;
659 /** Helper: Look up the body for an eagerly-served spooled_resource. If
660 * <b>conn_is_encrypted</b> is false, don't look up any resource that
661 * shouldn't be sent over an unencrypted connection. On success, set
662 * <b>body_out</b>, <b>size_out</b>, and <b>published_out</b> to refer
663 * to the resource's body, size, and publication date, and return 0.
664 * On failure return -1. */
666 spooled_resource_lookup_body(const spooled_resource_t
*spooled
,
667 int conn_is_encrypted
,
668 const uint8_t **body_out
,
670 time_t *published_out
)
672 tor_assert(spooled
->spool_eagerly
== 1);
674 const signed_descriptor_t
*sd
= NULL
;
676 switch (spooled
->spool_source
) {
677 case DIR_SPOOL_EXTRA_BY_FP
: {
678 sd
= get_signed_descriptor_by_fp(spooled
->digest
, 1);
681 case DIR_SPOOL_SERVER_BY_FP
: {
682 sd
= get_signed_descriptor_by_fp(spooled
->digest
, 0);
685 case DIR_SPOOL_SERVER_BY_DIGEST
: {
686 sd
= router_get_by_descriptor_digest((const char *)spooled
->digest
);
689 case DIR_SPOOL_EXTRA_BY_DIGEST
: {
690 sd
= extrainfo_get_by_descriptor_digest((const char *)spooled
->digest
);
693 case DIR_SPOOL_MICRODESC
: {
694 microdesc_t
*md
= microdesc_cache_lookup_by_digest256(
695 get_microdesc_cache(),
696 (const char *)spooled
->digest
);
697 if (! md
|| ! md
->body
) {
700 *body_out
= (const uint8_t *)md
->body
;
701 *size_out
= md
->bodylen
;
703 *published_out
= TIME_MAX
;
706 case DIR_SPOOL_NETWORKSTATUS
:
707 case DIR_SPOOL_CONSENSUS_CACHE_ENTRY
:
709 /* LCOV_EXCL_START */
710 tor_assert_nonfatal_unreached();
715 /* If we get here, then we tried to set "sd" to a signed_descriptor_t. */
720 if (sd
->send_unencrypted
== 0 && ! conn_is_encrypted
) {
721 /* we did this check once before (so we could have an accurate size
722 * estimate and maybe send a 404 if somebody asked for only bridges on
723 * a connection), but we need to do it again in case a previously
724 * unknown bridge descriptor has shown up between then and now. */
727 *body_out
= (const uint8_t *) signed_descriptor_get_body(sd
);
728 *size_out
= sd
->signed_descriptor_len
;
730 *published_out
= sd
->published_on
;
734 /** Given a fingerprint <b>fp</b> which is either set if we're looking for a
735 * v2 status, or zeroes if we're looking for a v3 status, or a NUL-padded
736 * flavor name if we want a flavored v3 status, return a pointer to the
737 * appropriate cached dir object, or NULL if there isn't one available. */
738 static cached_dir_t
*
739 lookup_cached_dir_by_fp(const uint8_t *fp
)
741 cached_dir_t
*d
= NULL
;
742 if (tor_digest_is_zero((const char *)fp
) && cached_consensuses
) {
743 d
= strmap_get(cached_consensuses
, "ns");
744 } else if (memchr(fp
, '\0', DIGEST_LEN
) && cached_consensuses
) {
745 /* this here interface is a nasty hack: we're shoving a flavor into
747 d
= strmap_get(cached_consensuses
, (const char *)fp
);
752 /** Try to guess the number of bytes that will be needed to send the
753 * spooled objects for <b>conn</b>'s outgoing spool. In the process,
754 * remove every element of the spool that refers to an absent object, or
755 * which was published earlier than <b>cutoff</b>. Set *<b>size_out</b>
756 * to the number of bytes, and *<b>n_expired_out</b> to the number of
757 * objects removed for being too old. */
759 dirserv_spool_remove_missing_and_guess_size(dir_connection_t
*conn
,
768 smartlist_t
*spool
= conn
->spool
;
778 SMARTLIST_FOREACH_BEGIN(spool
, spooled_resource_t
*, spooled
) {
779 time_t published
= TIME_MAX
;
780 size_t sz
= spooled_resource_estimate_size(spooled
, conn
,
781 compression
, &published
);
782 if (published
< cutoff
) {
784 SMARTLIST_DEL_CURRENT(spool
, spooled
);
785 spooled_resource_free(spooled
);
786 } else if (sz
== 0) {
787 SMARTLIST_DEL_CURRENT(spool
, spooled
);
788 spooled_resource_free(spooled
);
792 } SMARTLIST_FOREACH_END(spooled
);
795 *size_out
= (total
> SIZE_MAX
) ? SIZE_MAX
: (size_t)total
;
798 *n_expired_out
= n_expired
;
801 /** Helper: used to sort a connection's spool. */
803 dirserv_spool_sort_comparison_(const void **a_
, const void **b_
)
805 const spooled_resource_t
*a
= *a_
;
806 const spooled_resource_t
*b
= *b_
;
807 return fast_memcmp(a
->digest
, b
->digest
, sizeof(a
->digest
));
810 /** Sort all the entries in <b>conn</b> by digest. */
812 dirserv_spool_sort(dir_connection_t
*conn
)
814 if (conn
->spool
== NULL
)
816 smartlist_sort(conn
->spool
, dirserv_spool_sort_comparison_
);
819 /** Return the cache-info for identity fingerprint <b>fp</b>, or
820 * its extra-info document if <b>extrainfo</b> is true. Return
821 * NULL if not found or if the descriptor is older than
822 * <b>publish_cutoff</b>. */
823 static const signed_descriptor_t
*
824 get_signed_descriptor_by_fp(const uint8_t *fp
, int extrainfo
)
826 if (router_digest_is_me((const char *)fp
)) {
828 return &(router_get_my_extrainfo()->cache_info
);
830 return &(router_get_my_routerinfo()->cache_info
);
832 const routerinfo_t
*ri
= router_get_by_id_digest((const char *)fp
);
835 return extrainfo_get_by_descriptor_digest(
836 ri
->cache_info
.extra_info_digest
);
838 return &ri
->cache_info
;
844 /** When we're spooling data onto our outbuf, add more whenever we dip
845 * below this threshold. */
846 #define DIRSERV_BUFFER_MIN 16384
849 * Called whenever we have flushed some directory data in state
850 * SERVER_WRITING, or whenever we want to fill the buffer with initial
851 * directory data (so that subsequent writes will occur, and trigger this
854 * Return 0 on success, and -1 on failure.
857 connection_dirserv_flushed_some(dir_connection_t
*conn
)
859 tor_assert(conn
->base_
.state
== DIR_CONN_STATE_SERVER_WRITING
);
860 if (conn
->spool
== NULL
)
863 while (connection_get_outbuf_len(TO_CONN(conn
)) < DIRSERV_BUFFER_MIN
&&
864 smartlist_len(conn
->spool
)) {
865 spooled_resource_t
*spooled
=
866 smartlist_get(conn
->spool
, smartlist_len(conn
->spool
)-1);
867 spooled_resource_flush_status_t status
;
868 status
= spooled_resource_flush_some(spooled
, conn
);
869 if (status
== SRFS_ERR
) {
871 } else if (status
== SRFS_MORE
) {
874 tor_assert(status
== SRFS_DONE
);
876 /* If we're here, we're done flushing this resource. */
877 tor_assert(smartlist_pop_last(conn
->spool
) == spooled
);
878 spooled_resource_free(spooled
);
881 if (smartlist_len(conn
->spool
) > 0) {
882 /* We're still spooling something. */
886 /* If we get here, we're done. */
887 smartlist_free(conn
->spool
);
889 if (conn
->compress_state
) {
890 /* Flush the compression state: there could be more bytes pending in there,
891 * and we don't want to omit bytes. */
892 connection_buf_add_compress("", 0, conn
, 1);
893 tor_compress_free(conn
->compress_state
);
894 conn
->compress_state
= NULL
;
899 /** Remove every element from <b>conn</b>'s outgoing spool, and delete
902 dir_conn_clear_spool(dir_connection_t
*conn
)
904 if (!conn
|| ! conn
->spool
)
906 SMARTLIST_FOREACH(conn
->spool
, spooled_resource_t
*, s
,
907 spooled_resource_free(s
));
908 smartlist_free(conn
->spool
);
912 /** Release all storage used by the directory server. */
914 dirserv_free_all(void)
916 strmap_free(cached_consensuses
, free_cached_dir_
);
917 cached_consensuses
= NULL
;