1 /* Copyright (c) 2017-2021, The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
5 * \file test_hs_common.c
6 * \brief Test hidden service common functionalities.
9 #define CONNECTION_EDGE_PRIVATE
10 #define HS_COMMON_PRIVATE
11 #define HS_CLIENT_PRIVATE
12 #define HS_SERVICE_PRIVATE
13 #define NODELIST_PRIVATE
15 #include "test/test.h"
16 #include "test/test_helpers.h"
17 #include "test/log_test_helpers.h"
18 #include "test/hs_test_helpers.h"
20 #include "core/or/connection_edge.h"
21 #include "lib/crypt_ops/crypto_format.h"
22 #include "lib/crypt_ops/crypto_rand.h"
23 #include "feature/hs/hs_common.h"
24 #include "feature/hs/hs_client.h"
25 #include "feature/hs/hs_service.h"
26 #include "app/config/config.h"
27 #include "feature/nodelist/networkstatus.h"
28 #include "feature/dirclient/dirclient.h"
29 #include "feature/dirauth/dirvote.h"
30 #include "feature/nodelist/nodelist.h"
31 #include "feature/nodelist/routerlist.h"
32 #include "app/config/statefile.h"
33 #include "core/or/circuitlist.h"
34 #include "feature/dirauth/shared_random.h"
35 #include "feature/dirauth/voting_schedule.h"
37 #include "feature/nodelist/microdesc_st.h"
38 #include "feature/nodelist/networkstatus_st.h"
39 #include "feature/nodelist/node_st.h"
40 #include "app/config/or_state_st.h"
41 #include "feature/nodelist/routerinfo_st.h"
42 #include "feature/nodelist/routerstatus_st.h"
44 /** Test the validation of HS v3 addresses */
46 test_validate_address(void *arg
)
52 /* Address too short and too long. */
53 setup_full_capture_of_logs(LOG_WARN
);
54 ret
= hs_address_is_valid("blah");
55 tt_int_op(ret
, OP_EQ
, 0);
56 expect_log_msg_containing("Invalid length");
57 teardown_capture_of_logs();
59 setup_full_capture_of_logs(LOG_WARN
);
60 ret
= hs_address_is_valid(
61 "p3xnclpu4mu22dwaurjtsybyqk4xfjmcfz6z62yl24uwmhjatiwnlnadb");
62 tt_int_op(ret
, OP_EQ
, 0);
63 expect_log_msg_containing("Invalid length");
64 teardown_capture_of_logs();
66 /* Invalid checksum (taken from prop224) */
67 setup_full_capture_of_logs(LOG_WARN
);
68 ret
= hs_address_is_valid(
69 "l5satjgud6gucryazcyvyvhuxhr74u6ygigiuyixe3a6ysis67ororad");
70 tt_int_op(ret
, OP_EQ
, 0);
71 expect_log_msg_containing("invalid checksum");
72 teardown_capture_of_logs();
74 setup_full_capture_of_logs(LOG_WARN
);
75 ret
= hs_address_is_valid(
76 "btojiu7nu5y5iwut64eufevogqdw4wmqzugnoluw232r4t3ecsfv37ad");
77 tt_int_op(ret
, OP_EQ
, 0);
78 expect_log_msg_containing("invalid checksum");
79 teardown_capture_of_logs();
81 /* Non base32 decodable string. */
82 setup_full_capture_of_logs(LOG_WARN
);
83 ret
= hs_address_is_valid(
84 "????????????????????????????????????????????????????????");
85 tt_int_op(ret
, OP_EQ
, 0);
86 expect_log_msg_containing("Unable to base32 decode");
87 teardown_capture_of_logs();
90 ret
= hs_address_is_valid(
91 "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid");
92 tt_int_op(ret
, OP_EQ
, 1);
99 mock_write_str_to_file(const char *path
, const char *str
, int bin
)
102 tt_str_op(path
, OP_EQ
, "/double/five"PATH_SEPARATOR
"squared");
103 tt_str_op(str
, OP_EQ
,
104 "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion\n");
110 /** Test building HS v3 onion addresses. Uses test vectors from the
111 * ./hs_build_address.py script. */
113 test_build_address(void *arg
)
116 char onion_addr
[HS_SERVICE_ADDR_LEN_BASE32
+ 1];
117 ed25519_public_key_t pubkey
;
118 /* hex-encoded ed25519 pubkey used in hs_build_address.py */
120 "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a";
121 hs_service_t
*service
= NULL
;
125 MOCK(write_str_to_file
, mock_write_str_to_file
);
127 /* The following has been created with hs_build_address.py script that
128 * follows proposal 224 specification to build an onion address. */
129 static const char *test_addr
=
130 "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid";
132 /* Let's try to build the same onion address as the script */
133 base16_decode((char*)pubkey
.pubkey
, sizeof(pubkey
.pubkey
),
134 pubkey_hex
, strlen(pubkey_hex
));
135 hs_build_address(&pubkey
, HS_VERSION_THREE
, onion_addr
);
136 tt_str_op(test_addr
, OP_EQ
, onion_addr
);
137 /* Validate that address. */
138 ret
= hs_address_is_valid(onion_addr
);
139 tt_int_op(ret
, OP_EQ
, 1);
141 service
= tor_malloc_zero(sizeof(hs_service_t
));
142 memcpy(service
->onion_address
, onion_addr
, sizeof(service
->onion_address
));
143 tor_asprintf(&service
->config
.directory_path
, "/double/five");
144 ret
= write_address_to_file(service
, "squared");
145 tt_int_op(ret
, OP_EQ
, 0);
148 hs_service_free(service
);
151 /** Test that our HS time period calculation functions work properly */
153 test_time_period(void *arg
)
158 time_t fake_time
, correct_time
, start_time
;
160 /* Let's do the example in prop224 section [TIME-PERIODS] */
161 retval
= parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
163 tt_int_op(retval
, OP_EQ
, 0);
165 /* Check that the time period number is right */
166 tn
= hs_get_time_period_num(fake_time
);
167 tt_u64_op(tn
, OP_EQ
, 16903);
169 /* Increase current time to 11:59:59 UTC and check that the time period
170 number is still the same */
172 tn
= hs_get_time_period_num(fake_time
);
173 tt_u64_op(tn
, OP_EQ
, 16903);
175 { /* Check start time of next time period */
176 retval
= parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
178 tt_int_op(retval
, OP_EQ
, 0);
180 start_time
= hs_get_start_time_of_next_time_period(fake_time
);
181 tt_int_op(start_time
, OP_EQ
, correct_time
);
184 /* Now take time to 12:00:00 UTC and check that the time period rotated */
186 tn
= hs_get_time_period_num(fake_time
);
187 tt_u64_op(tn
, OP_EQ
, 16904);
189 /* Now also check our hs_get_next_time_period_num() function */
190 tn
= hs_get_next_time_period_num(fake_time
);
191 tt_u64_op(tn
, OP_EQ
, 16905);
193 { /* Check start time of next time period again */
194 retval
= parse_rfc1123_time("Wed, 14 Apr 2016 12:00:00 UTC",
196 tt_int_op(retval
, OP_EQ
, 0);
198 start_time
= hs_get_start_time_of_next_time_period(fake_time
);
199 tt_int_op(start_time
, OP_EQ
, correct_time
);
202 /* Now do another sanity check: The time period number at the start of the
203 * next time period, must be the same time period number as the one returned
204 * from hs_get_next_time_period_num() */
206 time_t next_tp_start
= hs_get_start_time_of_next_time_period(fake_time
);
207 tt_u64_op(hs_get_time_period_num(next_tp_start
), OP_EQ
,
208 hs_get_next_time_period_num(fake_time
));
215 /** Test that we can correctly find the start time of the next time period */
217 test_start_time_of_next_time_period(void *arg
)
222 char tbuf
[ISO_TIME_LEN
+ 1];
223 time_t next_tp_start_time
;
225 /* Do some basic tests */
226 retval
= parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
228 tt_int_op(retval
, OP_EQ
, 0);
229 next_tp_start_time
= hs_get_start_time_of_next_time_period(fake_time
);
230 /* Compare it with the correct result */
231 format_iso_time(tbuf
, next_tp_start_time
);
232 tt_str_op("2016-04-13 12:00:00", OP_EQ
, tbuf
);
234 /* Another test with an edge-case time (start of TP) */
235 retval
= parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
237 tt_int_op(retval
, OP_EQ
, 0);
238 next_tp_start_time
= hs_get_start_time_of_next_time_period(fake_time
);
239 format_iso_time(tbuf
, next_tp_start_time
);
240 tt_str_op("2016-04-14 12:00:00", OP_EQ
, tbuf
);
243 /* Now pretend we are on a testing network and alter the voting schedule to
244 be every 10 seconds. This means that a time period has length 10*24
245 seconds (4 minutes). It also means that we apply a rotational offset of
246 120 seconds to the time period, so that it starts at 00:02:00 instead of
248 or_options_t
*options
= get_options_mutable();
249 options
->TestingTorNetwork
= 1;
250 options
->V3AuthVotingInterval
= 10;
251 options
->TestingV3AuthInitialVotingInterval
= 10;
253 retval
= parse_rfc1123_time("Wed, 13 Apr 2016 00:00:00 UTC",
255 tt_int_op(retval
, OP_EQ
, 0);
256 next_tp_start_time
= hs_get_start_time_of_next_time_period(fake_time
);
257 /* Compare it with the correct result */
258 format_iso_time(tbuf
, next_tp_start_time
);
259 tt_str_op("2016-04-13 00:02:00", OP_EQ
, tbuf
);
261 retval
= parse_rfc1123_time("Wed, 13 Apr 2016 00:02:00 UTC",
263 tt_int_op(retval
, OP_EQ
, 0);
264 next_tp_start_time
= hs_get_start_time_of_next_time_period(fake_time
);
265 /* Compare it with the correct result */
266 format_iso_time(tbuf
, next_tp_start_time
);
267 tt_str_op("2016-04-13 00:06:00", OP_EQ
, tbuf
);
274 /* Cleanup the global nodelist. It also frees the "md" in the node_t because
275 * we allocate the memory in helper_add_hsdir_to_networkstatus(). */
277 cleanup_nodelist(void)
279 const smartlist_t
*nodelist
= nodelist_get_list();
280 SMARTLIST_FOREACH_BEGIN(nodelist
, node_t
*, node
) {
283 } SMARTLIST_FOREACH_END(node
);
288 helper_add_hsdir_to_networkstatus(networkstatus_t
*ns
,
290 const char *nickname
,
293 routerstatus_t
*rs
= tor_malloc_zero(sizeof(routerstatus_t
));
294 routerinfo_t
*ri
= tor_malloc_zero(sizeof(routerinfo_t
));
295 uint8_t identity
[DIGEST_LEN
];
298 memset(identity
, identity_idx
, sizeof(identity
));
300 memcpy(rs
->identity_digest
, identity
, DIGEST_LEN
);
301 rs
->is_hs_dir
= is_hsdir
;
302 rs
->pv
.supports_v3_hsdir
= 1;
303 strlcpy(rs
->nickname
, nickname
, sizeof(rs
->nickname
));
304 tor_addr_parse(&ri
->ipv4_addr
, "1.2.3.4");
305 tor_addr_parse(&rs
->ipv4_addr
, "1.2.3.4");
306 ri
->nickname
= tor_strdup(nickname
);
307 ri
->protocol_list
= tor_strdup("HSDir=1-2 LinkAuth=3");
308 memcpy(ri
->cache_info
.identity_digest
, identity
, DIGEST_LEN
);
309 ri
->cache_info
.signing_key_cert
= tor_malloc_zero(sizeof(tor_cert_t
));
310 /* Needed for the HSDir index computation. */
311 memset(&ri
->cache_info
.signing_key_cert
->signing_key
,
312 identity_idx
, ED25519_PUBKEY_LEN
);
313 tt_assert(nodelist_set_routerinfo(ri
, NULL
));
315 node
= node_get_mutable_by_id(ri
->cache_info
.identity_digest
);
318 /* We need this to exist for node_has_preferred_descriptor() to return
320 node
->md
= tor_malloc_zero(sizeof(microdesc_t
));
321 /* Do this now the nodelist_set_routerinfo() function needs a "rs" to set
322 * the indexes which it doesn't have when it is called. */
323 node_set_hsdir_index(node
, ns
);
325 smartlist_add(ns
->routerstatus_list
, rs
);
329 routerstatus_free(rs
);
334 static networkstatus_t
*mock_ns
= NULL
;
336 static networkstatus_t
*
337 mock_networkstatus_get_latest_consensus(void)
339 time_t now
= approx_time();
341 /* If initialized, return it */
346 /* Initialize fake consensus */
347 mock_ns
= tor_malloc_zero(sizeof(networkstatus_t
));
349 /* This consensus is live */
350 mock_ns
->valid_after
= now
-1;
351 mock_ns
->fresh_until
= now
+1;
352 mock_ns
->valid_until
= now
+2;
353 /* Create routerstatus list */
354 mock_ns
->routerstatus_list
= smartlist_new();
355 mock_ns
->type
= NS_TYPE_CONSENSUS
;
360 static networkstatus_t
*
361 mock_networkstatus_get_reasonably_live_consensus(time_t now
, int flavor
)
372 /** Test the responsible HSDirs calculation function */
374 test_responsible_hsdirs(void *arg
)
376 smartlist_t
*responsible_dirs
= smartlist_new();
377 networkstatus_t
*ns
= NULL
;
382 MOCK(networkstatus_get_latest_consensus
,
383 mock_networkstatus_get_latest_consensus
);
384 MOCK(networkstatus_get_reasonably_live_consensus
,
385 mock_networkstatus_get_reasonably_live_consensus
);
387 ns
= networkstatus_get_latest_consensus();
389 { /* First router: HSdir */
390 helper_add_hsdir_to_networkstatus(ns
, 1, "igor", 1);
394 helper_add_hsdir_to_networkstatus(ns
, 2, "victor", 1);
397 { /* Third relay but not HSDir */
398 helper_add_hsdir_to_networkstatus(ns
, 3, "spyro", 0);
401 /* Use a fixed time period and pub key so we always take the same path */
402 ed25519_public_key_t pubkey
;
403 uint64_t time_period_num
= 17653; // 2 May, 2018, 14:00.
404 memset(&pubkey
, 42, sizeof(pubkey
));
406 hs_get_responsible_hsdirs(&pubkey
, time_period_num
,
407 0, 0, responsible_dirs
);
409 /* Make sure that we only found 2 responsible HSDirs.
410 * The third relay was not an hsdir! */
411 tt_int_op(smartlist_len(responsible_dirs
), OP_EQ
, 2);
413 /** TODO: Build a bigger network and do more tests here */
416 SMARTLIST_FOREACH(ns
->routerstatus_list
,
417 routerstatus_t
*, rs
, routerstatus_free(rs
));
418 smartlist_free(responsible_dirs
);
419 smartlist_clear(ns
->routerstatus_list
);
420 networkstatus_vote_free(mock_ns
);
423 UNMOCK(networkstatus_get_reasonably_live_consensus
);
427 mock_directory_initiate_request(directory_request_t
*req
)
434 mock_hs_desc_encode_descriptor(const hs_descriptor_t
*desc
,
435 const ed25519_keypair_t
*signing_kp
,
436 const uint8_t *descriptor_cookie
,
441 (void)descriptor_cookie
;
443 tor_asprintf(encoded_out
, "lulu");
447 static or_state_t dummy_state
;
449 /* Mock function to get fake or state (used for rev counters) */
451 get_or_state_replacement(void)
457 mock_router_have_minimum_dir_info(void)
462 /** Test that we correctly detect when the HSDir hash ring changes so that we
463 * reupload our descriptor. */
465 test_desc_reupload_logic(void *arg
)
467 networkstatus_t
*ns
= NULL
;
473 MOCK(networkstatus_get_reasonably_live_consensus
,
474 mock_networkstatus_get_reasonably_live_consensus
);
475 MOCK(router_have_minimum_dir_info
,
476 mock_router_have_minimum_dir_info
);
478 get_or_state_replacement
);
479 MOCK(networkstatus_get_latest_consensus
,
480 mock_networkstatus_get_latest_consensus
);
481 MOCK(directory_initiate_request
,
482 mock_directory_initiate_request
);
483 MOCK(hs_desc_encode_descriptor
,
484 mock_hs_desc_encode_descriptor
);
486 ns
= networkstatus_get_latest_consensus();
489 * 1) Upload descriptor to HSDirs
490 * CHECK that previous_hsdirs list was populated.
491 * 2) Then call router_dir_info_changed() without an HSDir set change.
492 * CHECK that no reupload occurs.
493 * 3) Now change the HSDir set, and call dir_info_changed() again.
494 * CHECK that reupload occurs.
495 * 4) Finally call service_desc_schedule_upload().
496 * CHECK that previous_hsdirs list was cleared.
499 /* Let's start by building our descriptor and service */
500 hs_service_descriptor_t
*desc
= service_descriptor_new();
501 hs_service_t
*service
= NULL
;
502 /* hex-encoded ed25519 pubkey used in hs_build_address.py */
504 "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a";
505 char onion_addr
[HS_SERVICE_ADDR_LEN_BASE32
+ 1];
506 ed25519_public_key_t pubkey
;
507 base16_decode((char*)pubkey
.pubkey
, sizeof(pubkey
.pubkey
),
508 pubkey_hex
, strlen(pubkey_hex
));
509 hs_build_address(&pubkey
, HS_VERSION_THREE
, onion_addr
);
510 service
= tor_malloc_zero(sizeof(hs_service_t
));
512 memcpy(service
->onion_address
, onion_addr
, sizeof(service
->onion_address
));
513 ed25519_secret_key_generate(&service
->keys
.identity_sk
, 0);
514 ed25519_public_key_generate(&service
->keys
.identity_pk
,
515 &service
->keys
.identity_sk
);
516 service
->desc_current
= desc
;
517 /* Also add service to service map */
518 hs_service_ht
*service_map
= get_hs_service_map();
519 tt_assert(service_map
);
520 tt_int_op(hs_service_get_num_services(), OP_EQ
, 0);
521 register_service(service_map
, service
);
522 tt_int_op(hs_service_get_num_services(), OP_EQ
, 1);
524 /* Now let's create our hash ring: */
526 helper_add_hsdir_to_networkstatus(ns
, 1, "dingus", 1);
527 helper_add_hsdir_to_networkstatus(ns
, 2, "clive", 1);
528 helper_add_hsdir_to_networkstatus(ns
, 3, "aaron", 1);
529 helper_add_hsdir_to_networkstatus(ns
, 4, "lizzie", 1);
530 helper_add_hsdir_to_networkstatus(ns
, 5, "daewon", 1);
531 helper_add_hsdir_to_networkstatus(ns
, 6, "clarke", 1);
534 /* Now let's upload our desc to all hsdirs */
535 upload_descriptor_to_all(service
, desc
);
536 /* Check that previous hsdirs were populated */
537 tt_int_op(smartlist_len(desc
->previous_hsdirs
), OP_EQ
, 6);
539 /* Poison next upload time so that we can see if it was changed by
540 * router_dir_info_changed(). No changes in hash ring so far, so the upload
541 * time should stay as is. */
542 desc
->next_upload_time
= 42;
543 router_dir_info_changed();
544 tt_int_op(desc
->next_upload_time
, OP_EQ
, 42);
546 /* Now change the HSDir hash ring by swapping nora for aaron.
547 * Start by clearing the hash ring */
549 SMARTLIST_FOREACH(ns
->routerstatus_list
,
550 routerstatus_t
*, rs
, routerstatus_free(rs
));
551 smartlist_clear(ns
->routerstatus_list
);
553 routerlist_free_all();
556 { /* Now add back all the nodes */
557 helper_add_hsdir_to_networkstatus(ns
, 1, "dingus", 1);
558 helper_add_hsdir_to_networkstatus(ns
, 2, "clive", 1);
559 helper_add_hsdir_to_networkstatus(ns
, 4, "lizzie", 1);
560 helper_add_hsdir_to_networkstatus(ns
, 5, "daewon", 1);
561 helper_add_hsdir_to_networkstatus(ns
, 6, "clarke", 1);
562 helper_add_hsdir_to_networkstatus(ns
, 7, "nora", 1);
565 /* Now call service_desc_hsdirs_changed() and see that it detected the hash
567 time_t now
= approx_time();
569 tt_int_op(service_desc_hsdirs_changed(service
, desc
), OP_EQ
, 1);
570 tt_int_op(smartlist_len(desc
->previous_hsdirs
), OP_EQ
, 6);
572 /* Now order another upload and see that we keep having 6 prev hsdirs */
573 upload_descriptor_to_all(service
, desc
);
574 /* Check that previous hsdirs were populated */
575 tt_int_op(smartlist_len(desc
->previous_hsdirs
), OP_EQ
, 6);
577 /* Now restore the HSDir hash ring to its original state by swapping back
579 /* First clear up the hash ring */
581 SMARTLIST_FOREACH(ns
->routerstatus_list
,
582 routerstatus_t
*, rs
, routerstatus_free(rs
));
583 smartlist_clear(ns
->routerstatus_list
);
585 routerlist_free_all();
588 { /* Now populate the hash ring again */
589 helper_add_hsdir_to_networkstatus(ns
, 1, "dingus", 1);
590 helper_add_hsdir_to_networkstatus(ns
, 2, "clive", 1);
591 helper_add_hsdir_to_networkstatus(ns
, 3, "aaron", 1);
592 helper_add_hsdir_to_networkstatus(ns
, 4, "lizzie", 1);
593 helper_add_hsdir_to_networkstatus(ns
, 5, "daewon", 1);
594 helper_add_hsdir_to_networkstatus(ns
, 6, "clarke", 1);
597 /* Check that our algorithm catches this change of hsdirs */
598 tt_int_op(service_desc_hsdirs_changed(service
, desc
), OP_EQ
, 1);
600 /* Now pretend that the descriptor changed, and order a reupload to all
601 HSDirs. Make sure that the set of previous HSDirs was cleared. */
602 service_desc_schedule_upload(desc
, now
, 1);
603 tt_int_op(smartlist_len(desc
->previous_hsdirs
), OP_EQ
, 0);
605 /* Now reupload again: see that the prev hsdir set got populated again. */
606 upload_descriptor_to_all(service
, desc
);
607 tt_int_op(smartlist_len(desc
->previous_hsdirs
), OP_EQ
, 6);
610 SMARTLIST_FOREACH(ns
->routerstatus_list
,
611 routerstatus_t
*, rs
, routerstatus_free(rs
));
612 smartlist_clear(ns
->routerstatus_list
);
614 remove_service(get_hs_service_map(), service
);
615 hs_service_free(service
);
617 networkstatus_vote_free(ns
);
622 /** Test disaster SRV computation and caching */
624 test_disaster_srv(void *arg
)
626 uint8_t *cached_disaster_srv_one
= NULL
;
627 uint8_t *cached_disaster_srv_two
= NULL
;
628 uint8_t srv_one
[DIGEST256_LEN
] = {0};
629 uint8_t srv_two
[DIGEST256_LEN
] = {0};
630 uint8_t srv_three
[DIGEST256_LEN
] = {0};
631 uint8_t srv_four
[DIGEST256_LEN
] = {0};
632 uint8_t srv_five
[DIGEST256_LEN
] = {0};
636 /* Get the cached SRVs: we gonna use them later for verification */
637 cached_disaster_srv_one
= get_first_cached_disaster_srv();
638 cached_disaster_srv_two
= get_second_cached_disaster_srv();
640 /* Compute some srvs */
641 get_disaster_srv(1, srv_one
);
642 get_disaster_srv(2, srv_two
);
644 /* Check that the cached ones were updated */
645 tt_mem_op(cached_disaster_srv_one
, OP_EQ
, srv_one
, DIGEST256_LEN
);
646 tt_mem_op(cached_disaster_srv_two
, OP_EQ
, srv_two
, DIGEST256_LEN
);
648 /* Ask for an SRV that has already been computed */
649 get_disaster_srv(2, srv_two
);
650 /* and check that the cache entries have not changed */
651 tt_mem_op(cached_disaster_srv_one
, OP_EQ
, srv_one
, DIGEST256_LEN
);
652 tt_mem_op(cached_disaster_srv_two
, OP_EQ
, srv_two
, DIGEST256_LEN
);
654 /* Ask for a new SRV */
655 get_disaster_srv(3, srv_three
);
656 tt_mem_op(cached_disaster_srv_one
, OP_EQ
, srv_three
, DIGEST256_LEN
);
657 tt_mem_op(cached_disaster_srv_two
, OP_EQ
, srv_two
, DIGEST256_LEN
);
659 /* Ask for another SRV: none of the original SRVs should now be cached */
660 get_disaster_srv(4, srv_four
);
661 tt_mem_op(cached_disaster_srv_one
, OP_EQ
, srv_three
, DIGEST256_LEN
);
662 tt_mem_op(cached_disaster_srv_two
, OP_EQ
, srv_four
, DIGEST256_LEN
);
664 /* Ask for yet another SRV */
665 get_disaster_srv(5, srv_five
);
666 tt_mem_op(cached_disaster_srv_one
, OP_EQ
, srv_five
, DIGEST256_LEN
);
667 tt_mem_op(cached_disaster_srv_two
, OP_EQ
, srv_four
, DIGEST256_LEN
);
673 /** Test our HS descriptor request tracker by making various requests and
674 * checking whether they get tracked properly. */
676 test_hid_serv_request_tracker(void *arg
)
680 routerstatus_t
*hsdir
= NULL
, *hsdir2
= NULL
, *hsdir3
= NULL
;
681 time_t now
= approx_time();
683 const char *req_key_str_first
=
684 "vd4zb6zesaubtrjvdqcr2w7x7lhw2up4Xnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
685 const char *req_key_str_second
=
686 "g53o7iavcd62oihswhr24u6czmqws5kpXnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
687 const char *req_key_str_small
= "ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ";
689 /*************************** basic test *******************************/
691 /* Get request tracker and make sure it's empty */
692 strmap_t
*request_tracker
= get_last_hid_serv_requests();
693 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 0);
695 /* Let's register a hid serv request */
696 hsdir
= tor_malloc_zero(sizeof(routerstatus_t
));
697 memset(hsdir
->identity_digest
, 'Z', DIGEST_LEN
);
698 retval
= hs_lookup_last_hid_serv_request(hsdir
, req_key_str_first
,
700 tt_int_op(retval
, OP_EQ
, now
);
701 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 1);
703 /* Let's lookup a non-existent hidserv request */
704 retval
= hs_lookup_last_hid_serv_request(hsdir
, req_key_str_second
,
706 tt_int_op(retval
, OP_EQ
, 0);
707 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 1);
709 /* Let's lookup a real hidserv request */
710 retval
= hs_lookup_last_hid_serv_request(hsdir
, req_key_str_first
,
712 tt_int_op(retval
, OP_EQ
, now
); /* we got it */
713 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 1);
715 /**********************************************************************/
717 /* Let's add another request for the same HS but on a different HSDir. */
718 hsdir2
= tor_malloc_zero(sizeof(routerstatus_t
));
719 memset(hsdir2
->identity_digest
, 2, DIGEST_LEN
);
720 retval
= hs_lookup_last_hid_serv_request(hsdir2
, req_key_str_first
,
722 tt_int_op(retval
, OP_EQ
, now
+3);
723 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 2);
725 /* Check that we can clean the first request based on time */
726 hs_clean_last_hid_serv_requests(now
+3+REND_HID_SERV_DIR_REQUERY_PERIOD
);
727 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 1);
728 /* Check that it doesn't exist anymore */
729 retval
= hs_lookup_last_hid_serv_request(hsdir
, req_key_str_first
,
731 tt_int_op(retval
, OP_EQ
, 0);
733 /* Now let's add a smaller req key str */
734 hsdir3
= tor_malloc_zero(sizeof(routerstatus_t
));
735 memset(hsdir3
->identity_digest
, 3, DIGEST_LEN
);
736 retval
= hs_lookup_last_hid_serv_request(hsdir3
, req_key_str_small
,
738 tt_int_op(retval
, OP_EQ
, now
+4);
739 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 2);
741 /*************************** deleting entries **************************/
743 /* Add another request with very short key */
744 retval
= hs_lookup_last_hid_serv_request(hsdir
, "l", now
, 1);
745 tt_int_op(retval
, OP_EQ
, now
);
746 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 3);
748 /* Try deleting entries with a dummy key. Check that our previous requests
750 tor_capture_bugs_(1);
751 hs_purge_hid_serv_from_last_hid_serv_requests("a");
752 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 3);
753 tor_end_capture_bugs_();
755 /* Try another dummy key. Check that requests are still there */
758 memset(dummy
, 'Z', 2000);
759 dummy
[1999] = '\x00';
760 hs_purge_hid_serv_from_last_hid_serv_requests(dummy
);
761 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 3);
764 /* Another dummy key! */
765 hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_second
);
766 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 3);
768 /* Now actually delete a request! */
769 hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_first
);
770 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 2);
773 hs_purge_last_hid_serv_requests();
774 request_tracker
= get_last_hid_serv_requests();
775 tt_int_op(strmap_size(request_tracker
),OP_EQ
, 0);
784 test_parse_extended_hostname(void *arg
)
787 hostname_type_t type
;
789 char address1
[] = "fooaddress.onion";
790 char address3
[] = "fooaddress.exit";
791 char address4
[] = "www.torproject.org";
792 char address5
[] = "foo.abcdefghijklmnop.onion";
793 char address6
[] = "foo.bar.abcdefghijklmnop.onion";
794 char address7
[] = ".abcdefghijklmnop.onion";
796 "www.25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion";
798 "www.15njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion";
800 "15njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid7jdl.onion";
802 tt_assert(!parse_extended_hostname(address1
, &type
));
803 tt_int_op(type
, OP_EQ
, BAD_HOSTNAME
);
805 tt_assert(parse_extended_hostname(address3
, &type
));
806 tt_int_op(type
, OP_EQ
, EXIT_HOSTNAME
);
808 tt_assert(parse_extended_hostname(address4
, &type
));
809 tt_int_op(type
, OP_EQ
, NORMAL_HOSTNAME
);
811 tt_assert(!parse_extended_hostname(address5
, &type
));
812 tt_int_op(type
, OP_EQ
, BAD_HOSTNAME
);
814 tt_assert(!parse_extended_hostname(address6
, &type
));
815 tt_int_op(type
, OP_EQ
, BAD_HOSTNAME
);
817 tt_assert(!parse_extended_hostname(address7
, &type
));
818 tt_int_op(type
, OP_EQ
, BAD_HOSTNAME
);
820 tt_assert(parse_extended_hostname(address8
, &type
));
821 tt_int_op(type
, OP_EQ
, ONION_V3_HOSTNAME
);
822 tt_str_op(address8
, OP_EQ
,
823 "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid");
825 /* Invalid v3 address. */
826 tt_assert(!parse_extended_hostname(address9
, &type
));
827 tt_int_op(type
, OP_EQ
, BAD_HOSTNAME
);
829 /* Invalid v3 address: too long */
830 tt_assert(!parse_extended_hostname(address10
, &type
));
831 tt_int_op(type
, OP_EQ
, BAD_HOSTNAME
);
837 test_time_between_tp_and_srv(void *arg
)
843 /* This function should be returning true where "^" are:
845 * +------------------------------------------------------------------+
847 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
848 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
850 * | $==========|-----------$===========|-----------$===========| |
851 * | ^^^^^^^^^^^^ ^^^^^^^^^^^^ |
853 * +------------------------------------------------------------------+
856 ret
= parse_rfc1123_time("Sat, 26 Oct 1985 00:00:00 UTC", &ns
.valid_after
);
857 tt_int_op(ret
, OP_EQ
, 0);
858 ret
= parse_rfc1123_time("Sat, 26 Oct 1985 01:00:00 UTC", &ns
.fresh_until
);
859 tt_int_op(ret
, OP_EQ
, 0);
860 dirauth_sched_recalculate_timing(get_options(), ns
.valid_after
);
861 ret
= hs_in_period_between_tp_and_srv(&ns
, 0);
862 tt_int_op(ret
, OP_EQ
, 0);
864 ret
= parse_rfc1123_time("Sat, 26 Oct 1985 11:00:00 UTC", &ns
.valid_after
);
865 tt_int_op(ret
, OP_EQ
, 0);
866 ret
= parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns
.fresh_until
);
867 tt_int_op(ret
, OP_EQ
, 0);
868 dirauth_sched_recalculate_timing(get_options(), ns
.valid_after
);
869 ret
= hs_in_period_between_tp_and_srv(&ns
, 0);
870 tt_int_op(ret
, OP_EQ
, 0);
872 ret
= parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns
.valid_after
);
873 tt_int_op(ret
, OP_EQ
, 0);
874 ret
= parse_rfc1123_time("Sat, 26 Oct 1985 13:00:00 UTC", &ns
.fresh_until
);
875 tt_int_op(ret
, OP_EQ
, 0);
876 dirauth_sched_recalculate_timing(get_options(), ns
.valid_after
);
877 ret
= hs_in_period_between_tp_and_srv(&ns
, 0);
878 tt_int_op(ret
, OP_EQ
, 1);
880 ret
= parse_rfc1123_time("Sat, 26 Oct 1985 23:00:00 UTC", &ns
.valid_after
);
881 tt_int_op(ret
, OP_EQ
, 0);
882 ret
= parse_rfc1123_time("Sat, 27 Oct 1985 00:00:00 UTC", &ns
.fresh_until
);
883 tt_int_op(ret
, OP_EQ
, 0);
884 dirauth_sched_recalculate_timing(get_options(), ns
.valid_after
);
885 ret
= hs_in_period_between_tp_and_srv(&ns
, 0);
886 tt_int_op(ret
, OP_EQ
, 1);
888 ret
= parse_rfc1123_time("Sat, 27 Oct 1985 00:00:00 UTC", &ns
.valid_after
);
889 tt_int_op(ret
, OP_EQ
, 0);
890 ret
= parse_rfc1123_time("Sat, 27 Oct 1985 01:00:00 UTC", &ns
.fresh_until
);
891 tt_int_op(ret
, OP_EQ
, 0);
892 dirauth_sched_recalculate_timing(get_options(), ns
.valid_after
);
893 ret
= hs_in_period_between_tp_and_srv(&ns
, 0);
894 tt_int_op(ret
, OP_EQ
, 0);
900 /************ Reachability Test (it is huge) ****************/
902 /* Simulate different consensus for client and service. Used by the
903 * reachability test. The SRV and responsible HSDir list are used by all
904 * reachability tests so make them common to simplify setup and teardown. */
905 static networkstatus_t
*mock_service_ns
= NULL
;
906 static networkstatus_t
*mock_client_ns
= NULL
;
907 static sr_srv_t current_srv
, previous_srv
;
908 static smartlist_t
*service_responsible_hsdirs
= NULL
;
909 static smartlist_t
*client_responsible_hsdirs
= NULL
;
911 static networkstatus_t
*
912 mock_networkstatus_get_reasonably_live_consensus_service(time_t now
,
918 if (mock_service_ns
) {
919 return mock_service_ns
;
922 mock_service_ns
= tor_malloc_zero(sizeof(networkstatus_t
));
923 mock_service_ns
->routerstatus_list
= smartlist_new();
924 mock_service_ns
->type
= NS_TYPE_CONSENSUS
;
926 return mock_service_ns
;
929 static networkstatus_t
*
930 mock_networkstatus_get_latest_consensus_service(void)
932 return mock_networkstatus_get_reasonably_live_consensus_service(0, 0);
935 static networkstatus_t
*
936 mock_networkstatus_get_reasonably_live_consensus_client(time_t now
, int flavor
)
941 if (mock_client_ns
) {
942 return mock_client_ns
;
945 mock_client_ns
= tor_malloc_zero(sizeof(networkstatus_t
));
946 mock_client_ns
->routerstatus_list
= smartlist_new();
947 mock_client_ns
->type
= NS_TYPE_CONSENSUS
;
949 return mock_client_ns
;
952 static networkstatus_t
*
953 mock_networkstatus_get_latest_consensus_client(void)
955 return mock_networkstatus_get_reasonably_live_consensus_client(0, 0);
958 /* Mock function because we are not trying to test the close circuit that does
959 * an awful lot of checks on the circuit object. */
961 mock_circuit_mark_for_close(circuit_t
*circ
, int reason
, int line
,
971 /* Initialize a big HSDir V3 hash ring. */
973 helper_initialize_big_hash_ring(networkstatus_t
*ns
)
977 /* Generate 250 hsdirs! :) */
978 for (int counter
= 1 ; counter
< 251 ; counter
++) {
979 /* Let's generate random nickname for each hsdir... */
980 char nickname_binary
[8];
981 char nickname_str
[13] = {0};
982 crypto_rand(nickname_binary
, sizeof(nickname_binary
));
983 ret
= base64_encode(nickname_str
, sizeof(nickname_str
),
984 nickname_binary
, sizeof(nickname_binary
), 0);
985 tt_int_op(ret
, OP_EQ
, 12);
986 helper_add_hsdir_to_networkstatus(ns
, counter
, nickname_str
, 1);
989 /* Make sure we have 200 hsdirs in our list */
990 tt_int_op(smartlist_len(ns
->routerstatus_list
), OP_EQ
, 250);
996 /** Initialize service and publish its descriptor as needed. Return the newly
997 * allocated service object to the caller. */
998 static hs_service_t
*
999 helper_init_service(time_t now
)
1002 hs_service_t
*service
= hs_service_new(get_options());
1004 service
->config
.version
= HS_VERSION_THREE
;
1005 ed25519_secret_key_generate(&service
->keys
.identity_sk
, 0);
1006 ed25519_public_key_generate(&service
->keys
.identity_pk
,
1007 &service
->keys
.identity_sk
);
1008 /* Register service to global map. */
1009 retval
= register_service(get_hs_service_map(), service
);
1010 tt_int_op(retval
, OP_EQ
, 0);
1012 /* Initialize service descriptor */
1013 build_all_descriptors(now
);
1014 tt_assert(service
->desc_current
);
1015 tt_assert(service
->desc_next
);
1021 /* Helper function to set the RFC 1123 time string into t. */
1023 set_consensus_times(const char *timestr
, time_t *t
)
1028 int ret
= parse_rfc1123_time(timestr
, t
);
1029 tt_int_op(ret
, OP_EQ
, 0);
1035 /* Helper function to cleanup the mock consensus (client and service) */
1037 cleanup_mock_ns(void)
1039 if (mock_service_ns
) {
1040 SMARTLIST_FOREACH(mock_service_ns
->routerstatus_list
,
1041 routerstatus_t
*, rs
, routerstatus_free(rs
));
1042 smartlist_clear(mock_service_ns
->routerstatus_list
);
1043 mock_service_ns
->sr_info
.current_srv
= NULL
;
1044 mock_service_ns
->sr_info
.previous_srv
= NULL
;
1045 networkstatus_vote_free(mock_service_ns
);
1046 mock_service_ns
= NULL
;
1049 if (mock_client_ns
) {
1050 SMARTLIST_FOREACH(mock_client_ns
->routerstatus_list
,
1051 routerstatus_t
*, rs
, routerstatus_free(rs
));
1052 smartlist_clear(mock_client_ns
->routerstatus_list
);
1053 mock_client_ns
->sr_info
.current_srv
= NULL
;
1054 mock_client_ns
->sr_info
.previous_srv
= NULL
;
1055 networkstatus_vote_free(mock_client_ns
);
1056 mock_client_ns
= NULL
;
1060 /* Helper function to setup a reachability test. Once called, the
1061 * cleanup_reachability_test MUST be called at the end. */
1063 setup_reachability_test(void)
1065 MOCK(circuit_mark_for_close_
, mock_circuit_mark_for_close
);
1066 MOCK(get_or_state
, get_or_state_replacement
);
1070 /* Baseline to start with. */
1071 memset(¤t_srv
, 0, sizeof(current_srv
));
1072 memset(&previous_srv
, 1, sizeof(previous_srv
));
1074 /* Initialize the consensuses. */
1075 mock_networkstatus_get_latest_consensus_service();
1076 mock_networkstatus_get_latest_consensus_client();
1078 service_responsible_hsdirs
= smartlist_new();
1079 client_responsible_hsdirs
= smartlist_new();
1082 /* Helper function to cleanup a reachability test initial setup. */
1084 cleanup_reachability_test(void)
1086 smartlist_free(service_responsible_hsdirs
);
1087 service_responsible_hsdirs
= NULL
;
1088 smartlist_free(client_responsible_hsdirs
);
1089 client_responsible_hsdirs
= NULL
;
1092 UNMOCK(get_or_state
);
1093 UNMOCK(circuit_mark_for_close_
);
1096 /* A reachability test always check if the resulting service and client
1097 * responsible HSDir for the given parameters are equal.
1099 * Return true iff the same exact nodes are in both list. */
1101 are_responsible_hsdirs_equal(void)
1104 tt_int_op(smartlist_len(client_responsible_hsdirs
), OP_EQ
, 6);
1105 tt_int_op(smartlist_len(service_responsible_hsdirs
), OP_EQ
, 8);
1107 SMARTLIST_FOREACH_BEGIN(client_responsible_hsdirs
,
1108 const routerstatus_t
*, c_rs
) {
1109 SMARTLIST_FOREACH_BEGIN(service_responsible_hsdirs
,
1110 const routerstatus_t
*, s_rs
) {
1111 if (tor_memeq(c_rs
->identity_digest
, s_rs
->identity_digest
,
1116 } SMARTLIST_FOREACH_END(s_rs
);
1117 } SMARTLIST_FOREACH_END(c_rs
);
1120 return (count
== 6);
1123 /* Tor doesn't use such a function to get the previous HSDir, it is only used
1124 * in node_set_hsdir_index(). We need it here so we can test the reachability
1125 * scenario 6 that requires the previous time period to compute the list of
1126 * responsible HSDir because of the client state timing. */
1128 get_previous_time_period(time_t now
)
1130 return hs_get_time_period_num(now
) - 1;
1133 /* Configuration of a reachability test scenario. */
1134 typedef struct reachability_cfg_t
{
1135 /* Consensus timings to be set. They have to be compliant with
1136 * RFC 1123 time format. */
1137 const char *service_valid_after
;
1138 const char *service_valid_until
;
1139 const char *client_valid_after
;
1140 const char *client_valid_until
;
1142 /* SRVs that the service and client should use. */
1143 sr_srv_t
*service_current_srv
;
1144 sr_srv_t
*service_previous_srv
;
1145 sr_srv_t
*client_current_srv
;
1146 sr_srv_t
*client_previous_srv
;
1148 /* A time period function for the service to use for this scenario. For a
1149 * successful reachability test, the client always use the current time
1150 * period thus why no client function. */
1151 uint64_t (*service_time_period_fn
)(time_t);
1153 /* Is the client and service expected to be in a new time period. After
1154 * setting the consensus time, the reachability test checks
1155 * hs_in_period_between_tp_and_srv() and test the returned value against
1157 unsigned int service_in_new_tp
;
1158 unsigned int client_in_new_tp
;
1160 /* Some scenario requires a hint that the client, because of its consensus
1161 * time, will request the "next" service descriptor so this indicates if it
1162 * is the case or not. */
1163 unsigned int client_fetch_next_desc
;
1164 } reachability_cfg_t
;
1166 /* Some defines to help with semantic while reading a configuration below. */
1167 #define NOT_IN_NEW_TP 0
1169 #define DONT_NEED_NEXT_DESC 0
1170 #define NEED_NEXT_DESC 1
1172 static reachability_cfg_t reachability_scenarios
[] = {
1175 * +------------------------------------------------------------------+
1177 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1178 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1180 * | $==========|-----------$===========|-----------$===========| |
1183 * +------------------------------------------------------------------+
1185 * S: Service, C: Client
1187 * Service consensus valid_after time is set to 13:00 and client to 15:00,
1188 * both are after TP#1 thus have access to SRV#1. Service and client should
1192 { "Sat, 26 Oct 1985 13:00:00 UTC", /* Service valid_after */
1193 "Sat, 26 Oct 1985 14:00:00 UTC", /* Service valid_until */
1194 "Sat, 26 Oct 1985 15:00:00 UTC", /* Client valid_after */
1195 "Sat, 26 Oct 1985 16:00:00 UTC", /* Client valid_until. */
1196 ¤t_srv
, NULL
, /* Service current and previous SRV */
1197 ¤t_srv
, NULL
, /* Client current and previous SRV */
1198 hs_get_time_period_num
, /* Service time period function. */
1199 IN_NEW_TP
, /* Is service in new TP? */
1200 IN_NEW_TP
, /* Is client in new TP? */
1205 * +------------------------------------------------------------------+
1207 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1208 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1210 * | $==========|-----------$===========|-----------$===========| |
1213 * +------------------------------------------------------------------+
1215 * S: Service, C: Client
1217 * Service consensus valid_after time is set to 23:00 and client to 01:00,
1218 * which makes the client after the SRV#2 and the service just before. The
1219 * service should only be using TP#1. The client should be using TP#1.
1222 { "Sat, 26 Oct 1985 23:00:00 UTC", /* Service valid_after */
1223 "Sat, 27 Oct 1985 00:00:00 UTC", /* Service valid_until */
1224 "Sat, 27 Oct 1985 01:00:00 UTC", /* Client valid_after */
1225 "Sat, 27 Oct 1985 02:00:00 UTC", /* Client valid_until. */
1226 &previous_srv
, NULL
, /* Service current and previous SRV */
1227 ¤t_srv
, &previous_srv
, /* Client current and previous SRV */
1228 hs_get_time_period_num
, /* Service time period function. */
1229 IN_NEW_TP
, /* Is service in new TP? */
1230 NOT_IN_NEW_TP
, /* Is client in new TP? */
1235 * +------------------------------------------------------------------+
1237 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1238 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1240 * | $==========|-----------$===========|----------$===========| |
1243 * +------------------------------------------------------------------+
1245 * S: Service, C: Client
1247 * Service consensus valid_after time is set to 03:00 and client to 05:00,
1248 * which makes both after SRV#2. The service should be using TP#1 as its
1249 * current time period. The client should be using TP#1.
1252 { "Sat, 27 Oct 1985 03:00:00 UTC", /* Service valid_after */
1253 "Sat, 27 Oct 1985 04:00:00 UTC", /* Service valid_until */
1254 "Sat, 27 Oct 1985 05:00:00 UTC", /* Client valid_after */
1255 "Sat, 27 Oct 1985 06:00:00 UTC", /* Client valid_until. */
1256 ¤t_srv
, &previous_srv
, /* Service current and previous SRV */
1257 ¤t_srv
, &previous_srv
, /* Client current and previous SRV */
1258 hs_get_time_period_num
, /* Service time period function. */
1259 NOT_IN_NEW_TP
, /* Is service in new TP? */
1260 NOT_IN_NEW_TP
, /* Is client in new TP? */
1261 DONT_NEED_NEXT_DESC
},
1265 * +------------------------------------------------------------------+
1267 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1268 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1270 * | $==========|-----------$===========|-----------$===========| |
1273 * +------------------------------------------------------------------+
1275 * S: Service, C: Client
1277 * Service consensus valid_after time is set to 11:00 and client to 13:00,
1278 * which makes the service before TP#2 and the client just after. The
1279 * service should be using TP#1 as its current time period and TP#2 as the
1280 * next. The client should be using TP#2 time period.
1283 { "Sat, 27 Oct 1985 11:00:00 UTC", /* Service valid_after */
1284 "Sat, 27 Oct 1985 12:00:00 UTC", /* Service valid_until */
1285 "Sat, 27 Oct 1985 13:00:00 UTC", /* Client valid_after */
1286 "Sat, 27 Oct 1985 14:00:00 UTC", /* Client valid_until. */
1287 ¤t_srv
, &previous_srv
, /* Service current and previous SRV */
1288 ¤t_srv
, &previous_srv
, /* Client current and previous SRV */
1289 hs_get_next_time_period_num
, /* Service time period function. */
1290 NOT_IN_NEW_TP
, /* Is service in new TP? */
1291 IN_NEW_TP
, /* Is client in new TP? */
1296 * +------------------------------------------------------------------+
1298 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1299 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1301 * | $==========|-----------$===========|-----------$===========| |
1304 * +------------------------------------------------------------------+
1306 * S: Service, C: Client
1308 * Service consensus valid_after time is set to 01:00 and client to 23:00,
1309 * which makes the service after SRV#2 and the client just before. The
1310 * service should be using TP#1 as its current time period and TP#2 as the
1311 * next. The client should be using TP#1 time period.
1314 { "Sat, 27 Oct 1985 01:00:00 UTC", /* Service valid_after */
1315 "Sat, 27 Oct 1985 02:00:00 UTC", /* Service valid_until */
1316 "Sat, 26 Oct 1985 23:00:00 UTC", /* Client valid_after */
1317 "Sat, 27 Oct 1985 00:00:00 UTC", /* Client valid_until. */
1318 ¤t_srv
, &previous_srv
, /* Service current and previous SRV */
1319 &previous_srv
, NULL
, /* Client current and previous SRV */
1320 hs_get_time_period_num
, /* Service time period function. */
1321 NOT_IN_NEW_TP
, /* Is service in new TP? */
1322 IN_NEW_TP
, /* Is client in new TP? */
1323 DONT_NEED_NEXT_DESC
},
1327 * +------------------------------------------------------------------+
1329 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1330 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1332 * | $==========|-----------$===========|-----------$===========| |
1335 * +------------------------------------------------------------------+
1337 * S: Service, C: Client
1339 * Service consensus valid_after time is set to 13:00 and client to 11:00,
1340 * which makes the service outside after TP#2 and the client just before.
1341 * The service should be using TP#1 as its current time period and TP#2 as
1342 * its next. The client should be using TP#1 time period.
1345 { "Sat, 27 Oct 1985 13:00:00 UTC", /* Service valid_after */
1346 "Sat, 27 Oct 1985 14:00:00 UTC", /* Service valid_until */
1347 "Sat, 27 Oct 1985 11:00:00 UTC", /* Client valid_after */
1348 "Sat, 27 Oct 1985 12:00:00 UTC", /* Client valid_until. */
1349 ¤t_srv
, &previous_srv
, /* Service current and previous SRV */
1350 ¤t_srv
, &previous_srv
, /* Client current and previous SRV */
1351 get_previous_time_period
, /* Service time period function. */
1352 IN_NEW_TP
, /* Is service in new TP? */
1353 NOT_IN_NEW_TP
, /* Is client in new TP? */
1354 DONT_NEED_NEXT_DESC
},
1357 { NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, 0, 0, 0}
1360 /* Run a single reachability scenario. num_scenario is the corresponding
1361 * scenario number from the documentation. It is used to log it in case of
1362 * failure so we know which scenario fails. */
1364 run_reachability_scenario(const reachability_cfg_t
*cfg
, int num_scenario
)
1367 hs_service_t
*service
;
1368 uint64_t service_tp
, client_tp
;
1369 ed25519_public_key_t service_blinded_pk
, client_blinded_pk
;
1371 setup_reachability_test();
1375 /* Set service consensus time. */
1376 set_consensus_times(cfg
->service_valid_after
,
1377 &mock_service_ns
->valid_after
);
1378 set_consensus_times(cfg
->service_valid_until
,
1379 &mock_service_ns
->valid_until
);
1380 set_consensus_times(cfg
->service_valid_until
,
1381 &mock_service_ns
->fresh_until
);
1382 dirauth_sched_recalculate_timing(get_options(),
1383 mock_service_ns
->valid_after
);
1384 /* Check that service is in the right time period point */
1385 tt_int_op(hs_in_period_between_tp_and_srv(mock_service_ns
, 0), OP_EQ
,
1386 cfg
->service_in_new_tp
);
1388 /* Set client consensus time. */
1389 set_consensus_times(cfg
->client_valid_after
,
1390 &mock_client_ns
->valid_after
);
1391 set_consensus_times(cfg
->client_valid_until
,
1392 &mock_client_ns
->valid_until
);
1393 set_consensus_times(cfg
->client_valid_until
,
1394 &mock_client_ns
->fresh_until
);
1395 dirauth_sched_recalculate_timing(get_options(),
1396 mock_client_ns
->valid_after
);
1397 /* Check that client is in the right time period point */
1398 tt_int_op(hs_in_period_between_tp_and_srv(mock_client_ns
, 0), OP_EQ
,
1399 cfg
->client_in_new_tp
);
1401 /* Set the SRVs for this scenario. */
1402 mock_client_ns
->sr_info
.current_srv
= cfg
->client_current_srv
;
1403 mock_client_ns
->sr_info
.previous_srv
= cfg
->client_previous_srv
;
1404 mock_service_ns
->sr_info
.current_srv
= cfg
->service_current_srv
;
1405 mock_service_ns
->sr_info
.previous_srv
= cfg
->service_previous_srv
;
1407 /* Initialize a service to get keys. */
1408 update_approx_time(mock_service_ns
->valid_after
);
1409 service
= helper_init_service(mock_service_ns
->valid_after
+1);
1412 * === Client setup ===
1415 MOCK(networkstatus_get_reasonably_live_consensus
,
1416 mock_networkstatus_get_reasonably_live_consensus_client
);
1417 MOCK(networkstatus_get_latest_consensus
,
1418 mock_networkstatus_get_latest_consensus_client
);
1420 /* Make networkstatus_is_live() happy. */
1421 update_approx_time(mock_client_ns
->valid_after
);
1422 /* Initialize a big hashring for this consensus with the hsdir index set. */
1423 helper_initialize_big_hash_ring(mock_client_ns
);
1425 /* Client ONLY use the current time period. This is the whole point of these
1426 * reachability test that is to make sure the client can always reach the
1427 * service using only its current time period. */
1428 client_tp
= hs_get_time_period_num(0);
1430 hs_build_blinded_pubkey(&service
->keys
.identity_pk
, NULL
, 0,
1431 client_tp
, &client_blinded_pk
);
1432 hs_get_responsible_hsdirs(&client_blinded_pk
, client_tp
, 0, 1,
1433 client_responsible_hsdirs
);
1434 /* Cleanup the nodelist so we can let the service computes its own set of
1435 * node with its own hashring. */
1437 tt_int_op(smartlist_len(client_responsible_hsdirs
), OP_EQ
, 6);
1439 UNMOCK(networkstatus_get_latest_consensus
);
1440 UNMOCK(networkstatus_get_reasonably_live_consensus
);
1443 * === Service setup ===
1446 MOCK(networkstatus_get_reasonably_live_consensus
,
1447 mock_networkstatus_get_reasonably_live_consensus_service
);
1448 MOCK(networkstatus_get_latest_consensus
,
1449 mock_networkstatus_get_latest_consensus_service
);
1451 /* Make networkstatus_is_live() happy. */
1452 update_approx_time(mock_service_ns
->valid_after
);
1453 /* Initialize a big hashring for this consensus with the hsdir index set. */
1454 helper_initialize_big_hash_ring(mock_service_ns
);
1456 service_tp
= cfg
->service_time_period_fn(0);
1458 hs_build_blinded_pubkey(&service
->keys
.identity_pk
, NULL
, 0,
1459 service_tp
, &service_blinded_pk
);
1461 /* A service builds two lists of responsible HSDir, for the current and the
1462 * next descriptor. Depending on the scenario, the client timing indicate if
1463 * it is fetching the current or the next descriptor so we use the
1464 * "client_fetch_next_desc" to know which one the client is trying to get to
1465 * confirm that the service computes the same hashring for the same blinded
1466 * key and service time period function. */
1467 hs_get_responsible_hsdirs(&service_blinded_pk
, service_tp
,
1468 cfg
->client_fetch_next_desc
, 0,
1469 service_responsible_hsdirs
);
1471 tt_int_op(smartlist_len(service_responsible_hsdirs
), OP_EQ
, 8);
1473 UNMOCK(networkstatus_get_latest_consensus
);
1474 UNMOCK(networkstatus_get_reasonably_live_consensus
);
1476 /* Some testing of the values we just got from the client and service. */
1477 tt_mem_op(&client_blinded_pk
, OP_EQ
, &service_blinded_pk
,
1478 ED25519_PUBKEY_LEN
);
1479 tt_int_op(are_responsible_hsdirs_equal(), OP_EQ
, 1);
1481 /* Everything went well. */
1485 cleanup_reachability_test();
1487 /* Do this so we can know which scenario failed. */
1489 tor_snprintf(msg
, sizeof(msg
), "Scenario %d failed", num_scenario
);
1496 test_reachability(void *arg
)
1500 /* NOTE: An important axiom to understand here is that SRV#N must only be
1501 * used with TP#N value. For example, SRV#2 with TP#1 should NEVER be used
1502 * together. The HSDir index computation is based on this axiom.*/
1504 for (int i
= 0; reachability_scenarios
[i
].service_valid_after
; ++i
) {
1505 int ret
= run_reachability_scenario(&reachability_scenarios
[i
], i
+ 1);
1512 /** Pick an HSDir for service with <b>onion_identity_pk</b> as a client. Put
1513 * its identity digest in <b>hsdir_digest_out</b>. */
1515 helper_client_pick_hsdir(const ed25519_public_key_t
*onion_identity_pk
,
1516 char *hsdir_digest_out
)
1518 tt_assert(onion_identity_pk
);
1520 routerstatus_t
*client_hsdir
= pick_hsdir_v3(onion_identity_pk
);
1521 tt_assert(client_hsdir
);
1522 digest_to_base64(hsdir_digest_out
, client_hsdir
->identity_digest
);
1529 test_hs_indexes(void *arg
)
1532 uint64_t period_num
= 42;
1533 ed25519_public_key_t pubkey
;
1537 /* Build the hs_index */
1539 uint8_t hs_index
[DIGEST256_LEN
];
1540 const char *b32_test_vector
=
1541 "37e5cbbd56a22823714f18f1623ece5983a0d64c78495a8cfab854245e5f9a8a";
1542 char test_vector
[DIGEST256_LEN
];
1543 ret
= base16_decode(test_vector
, sizeof(test_vector
), b32_test_vector
,
1544 strlen(b32_test_vector
));
1545 tt_int_op(ret
, OP_EQ
, sizeof(test_vector
));
1546 /* Our test vector uses a public key set to 32 bytes of \x42. */
1547 memset(&pubkey
, '\x42', sizeof(pubkey
));
1548 hs_build_hs_index(1, &pubkey
, period_num
, hs_index
);
1549 tt_mem_op(hs_index
, OP_EQ
, test_vector
, sizeof(hs_index
));
1552 /* Build the hsdir_index */
1554 uint8_t srv
[DIGEST256_LEN
];
1555 uint8_t hsdir_index
[DIGEST256_LEN
];
1556 const char *b32_test_vector
=
1557 "db475361014a09965e7e5e4d4a25b8f8d4b8f16cb1d8a7e95eed50249cc1a2d5";
1558 char test_vector
[DIGEST256_LEN
];
1559 ret
= base16_decode(test_vector
, sizeof(test_vector
), b32_test_vector
,
1560 strlen(b32_test_vector
));
1561 tt_int_op(ret
, OP_EQ
, sizeof(test_vector
));
1562 /* Our test vector uses a public key set to 32 bytes of \x42. */
1563 memset(&pubkey
, '\x42', sizeof(pubkey
));
1564 memset(srv
, '\x43', sizeof(srv
));
1565 hs_build_hsdir_index(&pubkey
, srv
, period_num
, hsdir_index
);
1566 tt_mem_op(hsdir_index
, OP_EQ
, test_vector
, sizeof(hsdir_index
));
1573 #define EARLY_IN_SRV_TO_TP 0
1574 #define LATE_IN_SRV_TO_TP 1
1575 #define EARLY_IN_TP_TO_SRV 2
1576 #define LATE_IN_TP_TO_SRV 3
1578 /** Set the consensus and system time based on <b>position</b>. See the
1579 * following diagram for details:
1581 * +------------------------------------------------------------------+
1583 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1584 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1586 * | $==========|-----------$===========|----------$===========| |
1589 * +------------------------------------------------------------------+
1592 helper_set_consensus_and_system_time(networkstatus_t
*ns
, int position
)
1594 time_t real_time
= 0;
1596 /* The period between SRV#N and TP#N is from 00:00 to 12:00 UTC. Consensus
1597 * valid_after is what matters here, the rest is just to specify the voting
1598 * period correctly. */
1599 if (position
== LATE_IN_SRV_TO_TP
) {
1600 parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC", &ns
->valid_after
);
1601 parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC", &ns
->fresh_until
);
1602 parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns
->valid_until
);
1603 } else if (position
== EARLY_IN_TP_TO_SRV
) {
1604 parse_rfc1123_time("Wed, 13 Apr 2016 13:00:00 UTC", &ns
->valid_after
);
1605 parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns
->fresh_until
);
1606 parse_rfc1123_time("Wed, 13 Apr 2016 16:00:00 UTC", &ns
->valid_until
);
1607 } else if (position
== LATE_IN_TP_TO_SRV
) {
1608 parse_rfc1123_time("Wed, 13 Apr 2016 23:00:00 UTC", &ns
->valid_after
);
1609 parse_rfc1123_time("Wed, 14 Apr 2016 00:00:00 UTC", &ns
->fresh_until
);
1610 parse_rfc1123_time("Wed, 14 Apr 2016 02:00:00 UTC", &ns
->valid_until
);
1611 } else if (position
== EARLY_IN_SRV_TO_TP
) {
1612 parse_rfc1123_time("Wed, 14 Apr 2016 01:00:00 UTC", &ns
->valid_after
);
1613 parse_rfc1123_time("Wed, 14 Apr 2016 02:00:00 UTC", &ns
->fresh_until
);
1614 parse_rfc1123_time("Wed, 14 Apr 2016 04:00:00 UTC", &ns
->valid_until
);
1618 dirauth_sched_recalculate_timing(get_options(), ns
->valid_after
);
1620 /* Set system time: pretend to be just 2 minutes before consensus expiry */
1621 real_time
= ns
->valid_until
- 120;
1622 update_approx_time(real_time
);
1628 /** Helper function that carries out the actual test for
1629 * test_client_service_sync() */
1631 helper_test_hsdir_sync(networkstatus_t
*ns
,
1632 int service_position
, int client_position
,
1633 int client_fetches_next_desc
)
1635 hs_service_descriptor_t
*desc
;
1639 * 1) Initialize service time: consensus and system time.
1640 * 1.1) Initialize service hash ring
1641 * 2) Initialize service and publish descriptors.
1642 * 3) Initialize client time: consensus and system time.
1643 * 3.1) Initialize client hash ring
1644 * 4) Try to fetch descriptor as client, and CHECK that the HSDir picked by
1645 * the client was also picked by service.
1648 /* 1) Initialize service time: consensus and real time */
1649 time_t now
= helper_set_consensus_and_system_time(ns
, service_position
);
1650 helper_initialize_big_hash_ring(ns
);
1652 /* 2) Initialize service */
1653 hs_service_t
*service
= helper_init_service(now
);
1654 desc
= client_fetches_next_desc
? service
->desc_next
: service
->desc_current
;
1656 /* Now let's upload our desc to all hsdirs */
1657 upload_descriptor_to_all(service
, desc
);
1658 /* Cleanup right now so we don't memleak on error. */
1660 /* Check that previous hsdirs were populated */
1661 tt_int_op(smartlist_len(desc
->previous_hsdirs
), OP_EQ
, 8);
1663 /* 3) Initialize client time */
1664 helper_set_consensus_and_system_time(ns
, client_position
);
1667 SMARTLIST_FOREACH(ns
->routerstatus_list
,
1668 routerstatus_t
*, rs
, routerstatus_free(rs
));
1669 smartlist_clear(ns
->routerstatus_list
);
1670 helper_initialize_big_hash_ring(ns
);
1672 /* 4) Pick 6 HSDirs as a client and check that they were also chosen by the
1674 for (int y
= 0 ; y
< 6 ; y
++) {
1675 char client_hsdir_b64_digest
[BASE64_DIGEST_LEN
+1] = {0};
1676 helper_client_pick_hsdir(&service
->keys
.identity_pk
,
1677 client_hsdir_b64_digest
);
1679 /* CHECK: Go through the hsdirs chosen by the service and make sure that it
1680 * contains the one picked by the client! */
1681 retval
= smartlist_contains_string(desc
->previous_hsdirs
,
1682 client_hsdir_b64_digest
);
1683 tt_int_op(retval
, OP_EQ
, 1);
1686 /* Finally, try to pick a 7th hsdir and see that NULL is returned since we
1687 * exhausted all of them: */
1688 tt_assert(!pick_hsdir_v3(&service
->keys
.identity_pk
));
1691 /* At the end: free all services and initialize the subsystem again, we will
1692 * need it for next scenario. */
1694 hs_service_free_all();
1696 SMARTLIST_FOREACH(ns
->routerstatus_list
,
1697 routerstatus_t
*, rs
, routerstatus_free(rs
));
1698 smartlist_clear(ns
->routerstatus_list
);
1701 /** This test ensures that client and service will pick the same HSDirs, under
1702 * various timing scenarios:
1703 * a) Scenario where both client and service are in the time segment between
1705 * b) Scenario where both client and service are in the time segment between
1707 * c) Scenario where service is between SRV#N and TP#N, but client is between
1709 * d) Scenario where service is between TP#N and SRV#N+1, but client is
1710 * between SRV#N and TP#N.
1712 * This test is important because it tests that upload_descriptor_to_all() is
1713 * in synch with pick_hsdir_v3(). That's not the case for the
1714 * test_reachability() test which only compares the responsible hsdir sets.
1717 test_client_service_hsdir_set_sync(void *arg
)
1719 networkstatus_t
*ns
= NULL
;
1723 MOCK(networkstatus_get_latest_consensus
,
1724 mock_networkstatus_get_latest_consensus
);
1725 MOCK(networkstatus_get_reasonably_live_consensus
,
1726 mock_networkstatus_get_reasonably_live_consensus
);
1728 get_or_state_replacement
);
1729 MOCK(hs_desc_encode_descriptor
,
1730 mock_hs_desc_encode_descriptor
);
1731 MOCK(directory_initiate_request
,
1732 mock_directory_initiate_request
);
1736 /* Initialize a big hash ring: we want it to be big so that client and
1737 * service cannot accidentally select the same HSDirs */
1738 ns
= networkstatus_get_latest_consensus();
1741 /** Now test the various synch scenarios. See the helper function for more
1744 /* a) Scenario where both client and service are in the time segment between
1745 * SRV#N and TP#N. At this time the client fetches the first HS desc:
1747 * +------------------------------------------------------------------+
1749 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1750 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1752 * | $==========|-----------$===========|----------$===========| |
1755 * +------------------------------------------------------------------+
1757 helper_test_hsdir_sync(ns
, LATE_IN_SRV_TO_TP
, LATE_IN_SRV_TO_TP
, 0);
1759 /* b) Scenario where both client and service are in the time segment between
1760 * TP#N and SRV#N+1. At this time the client fetches the second HS
1763 * +------------------------------------------------------------------+
1765 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1766 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1768 * | $==========|-----------$===========|-----------$===========| |
1771 * +------------------------------------------------------------------+
1773 helper_test_hsdir_sync(ns
, LATE_IN_TP_TO_SRV
, LATE_IN_TP_TO_SRV
, 1);
1775 /* c) Scenario where service is between SRV#N and TP#N, but client is
1776 * between TP#N and SRV#N+1. Client is forward in time so it fetches the
1779 * +------------------------------------------------------------------+
1781 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1782 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1784 * | $==========|-----------$===========|-----------$===========| |
1787 * +------------------------------------------------------------------+
1789 helper_test_hsdir_sync(ns
, LATE_IN_SRV_TO_TP
, EARLY_IN_TP_TO_SRV
, 1);
1791 /* d) Scenario where service is between TP#N and SRV#N+1, but client is
1792 * between SRV#N and TP#N. Client is backwards in time so it fetches the
1795 * +------------------------------------------------------------------+
1797 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1798 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1800 * | $==========|-----------$===========|-----------$===========| |
1803 * +------------------------------------------------------------------+
1805 helper_test_hsdir_sync(ns
, EARLY_IN_TP_TO_SRV
, LATE_IN_SRV_TO_TP
, 0);
1807 /* e) Scenario where service is between SRV#N and TP#N, but client is
1808 * between TP#N-1 and SRV#3. Client is backwards in time so it fetches
1809 * the first HS desc.
1811 * +------------------------------------------------------------------+
1813 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1814 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1816 * | $==========|-----------$===========|-----------$===========| |
1819 * +------------------------------------------------------------------+
1821 helper_test_hsdir_sync(ns
, EARLY_IN_SRV_TO_TP
, LATE_IN_TP_TO_SRV
, 0);
1823 /* f) Scenario where service is between TP#N and SRV#N+1, but client is
1824 * between SRV#N+1 and TP#N+1. Client is forward in time so it fetches
1825 * the second HS desc.
1827 * +------------------------------------------------------------------+
1829 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1830 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1832 * | $==========|-----------$===========|-----------$===========| |
1835 * +------------------------------------------------------------------+
1837 helper_test_hsdir_sync(ns
, LATE_IN_TP_TO_SRV
, EARLY_IN_SRV_TO_TP
, 1);
1840 networkstatus_vote_free(ns
);
1841 nodelist_free_all();
1845 struct testcase_t hs_common_tests
[] = {
1846 { "build_address", test_build_address
, TT_FORK
,
1848 { "validate_address", test_validate_address
, TT_FORK
,
1850 { "time_period", test_time_period
, TT_FORK
,
1852 { "start_time_of_next_time_period", test_start_time_of_next_time_period
,
1853 TT_FORK
, NULL
, NULL
},
1854 { "responsible_hsdirs", test_responsible_hsdirs
, TT_FORK
,
1856 { "desc_reupload_logic", test_desc_reupload_logic
, TT_FORK
,
1858 { "disaster_srv", test_disaster_srv
, TT_FORK
,
1860 { "hid_serv_request_tracker", test_hid_serv_request_tracker
, TT_FORK
,
1862 { "parse_extended_hostname", test_parse_extended_hostname
, TT_FORK
,
1864 { "time_between_tp_and_srv", test_time_between_tp_and_srv
, TT_FORK
,
1866 { "reachability", test_reachability
, TT_FORK
,
1868 { "client_service_hsdir_set_sync", test_client_service_hsdir_set_sync
,
1869 TT_FORK
, NULL
, NULL
},
1870 { "hs_indexes", test_hs_indexes
, TT_FORK
,