dirvote: Handling adding vote and signature if module is disabled
[tor.git] / src / test / test_hs_common.c
blob43a2c5e80c28f0595a2b2c6801da4b8008af54b6
1 /* Copyright (c) 2017, The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
4 /**
5 * \file test_hs_common.c
6 * \brief Test hidden service common functionalities.
7 */
9 #define HS_COMMON_PRIVATE
10 #define HS_CLIENT_PRIVATE
11 #define HS_SERVICE_PRIVATE
12 #define NODELIST_PRIVATE
14 #include "test.h"
15 #include "test_helpers.h"
16 #include "log_test_helpers.h"
17 #include "hs_test_helpers.h"
19 #include "connection_edge.h"
20 #include "hs_common.h"
21 #include "hs_client.h"
22 #include "hs_service.h"
23 #include "config.h"
24 #include "networkstatus.h"
25 #include "directory.h"
26 #include "dirauth/dirvote.h"
27 #include "nodelist.h"
28 #include "routerlist.h"
29 #include "statefile.h"
30 #include "circuitlist.h"
31 #include "dirauth/shared_random.h"
32 #include "util.h"
34 /** Test the validation of HS v3 addresses */
35 static void
36 test_validate_address(void *arg)
38 int ret;
40 (void) arg;
42 /* Address too short and too long. */
43 setup_full_capture_of_logs(LOG_WARN);
44 ret = hs_address_is_valid("blah");
45 tt_int_op(ret, OP_EQ, 0);
46 expect_log_msg_containing("has an invalid length");
47 teardown_capture_of_logs();
49 setup_full_capture_of_logs(LOG_WARN);
50 ret = hs_address_is_valid(
51 "p3xnclpu4mu22dwaurjtsybyqk4xfjmcfz6z62yl24uwmhjatiwnlnadb");
52 tt_int_op(ret, OP_EQ, 0);
53 expect_log_msg_containing("has an invalid length");
54 teardown_capture_of_logs();
56 /* Invalid checksum (taken from prop224) */
57 setup_full_capture_of_logs(LOG_WARN);
58 ret = hs_address_is_valid(
59 "l5satjgud6gucryazcyvyvhuxhr74u6ygigiuyixe3a6ysis67ororad");
60 tt_int_op(ret, OP_EQ, 0);
61 expect_log_msg_containing("invalid checksum");
62 teardown_capture_of_logs();
64 setup_full_capture_of_logs(LOG_WARN);
65 ret = hs_address_is_valid(
66 "btojiu7nu5y5iwut64eufevogqdw4wmqzugnoluw232r4t3ecsfv37ad");
67 tt_int_op(ret, OP_EQ, 0);
68 expect_log_msg_containing("invalid checksum");
69 teardown_capture_of_logs();
71 /* Non base32 decodable string. */
72 setup_full_capture_of_logs(LOG_WARN);
73 ret = hs_address_is_valid(
74 "????????????????????????????????????????????????????????");
75 tt_int_op(ret, OP_EQ, 0);
76 expect_log_msg_containing("can't be decoded");
77 teardown_capture_of_logs();
79 /* Valid address. */
80 ret = hs_address_is_valid(
81 "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid");
82 tt_int_op(ret, OP_EQ, 1);
84 done:
88 static int
89 mock_write_str_to_file(const char *path, const char *str, int bin)
91 (void)bin;
92 tt_str_op(path, OP_EQ, "/double/five"PATH_SEPARATOR"squared");
93 tt_str_op(str, OP_EQ,
94 "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion\n");
96 done:
97 return 0;
100 /** Test building HS v3 onion addresses. Uses test vectors from the
101 * ./hs_build_address.py script. */
102 static void
103 test_build_address(void *arg)
105 int ret;
106 char onion_addr[HS_SERVICE_ADDR_LEN_BASE32 + 1];
107 ed25519_public_key_t pubkey;
108 /* hex-encoded ed25519 pubkey used in hs_build_address.py */
109 char pubkey_hex[] =
110 "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a";
111 hs_service_t *service = NULL;
113 (void) arg;
115 MOCK(write_str_to_file, mock_write_str_to_file);
117 /* The following has been created with hs_build_address.py script that
118 * follows proposal 224 specification to build an onion address. */
119 static const char *test_addr =
120 "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid";
122 /* Let's try to build the same onion address as the script */
123 base16_decode((char*)pubkey.pubkey, sizeof(pubkey.pubkey),
124 pubkey_hex, strlen(pubkey_hex));
125 hs_build_address(&pubkey, HS_VERSION_THREE, onion_addr);
126 tt_str_op(test_addr, OP_EQ, onion_addr);
127 /* Validate that address. */
128 ret = hs_address_is_valid(onion_addr);
129 tt_int_op(ret, OP_EQ, 1);
131 service = tor_malloc_zero(sizeof(hs_service_t));
132 memcpy(service->onion_address, onion_addr, sizeof(service->onion_address));
133 tor_asprintf(&service->config.directory_path, "/double/five");
134 ret = write_address_to_file(service, "squared");
135 tt_int_op(ret, OP_EQ, 0);
137 done:
138 hs_service_free(service);
141 /** Test that our HS time period calculation functions work properly */
142 static void
143 test_time_period(void *arg)
145 (void) arg;
146 uint64_t tn;
147 int retval;
148 time_t fake_time, correct_time, start_time;
150 /* Let's do the example in prop224 section [TIME-PERIODS] */
151 retval = parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
152 &fake_time);
153 tt_int_op(retval, OP_EQ, 0);
155 /* Check that the time period number is right */
156 tn = hs_get_time_period_num(fake_time);
157 tt_u64_op(tn, OP_EQ, 16903);
159 /* Increase current time to 11:59:59 UTC and check that the time period
160 number is still the same */
161 fake_time += 3599;
162 tn = hs_get_time_period_num(fake_time);
163 tt_u64_op(tn, OP_EQ, 16903);
165 { /* Check start time of next time period */
166 retval = parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
167 &correct_time);
168 tt_int_op(retval, OP_EQ, 0);
170 start_time = hs_get_start_time_of_next_time_period(fake_time);
171 tt_int_op(start_time, OP_EQ, correct_time);
174 /* Now take time to 12:00:00 UTC and check that the time period rotated */
175 fake_time += 1;
176 tn = hs_get_time_period_num(fake_time);
177 tt_u64_op(tn, OP_EQ, 16904);
179 /* Now also check our hs_get_next_time_period_num() function */
180 tn = hs_get_next_time_period_num(fake_time);
181 tt_u64_op(tn, OP_EQ, 16905);
183 { /* Check start time of next time period again */
184 retval = parse_rfc1123_time("Wed, 14 Apr 2016 12:00:00 UTC",
185 &correct_time);
186 tt_int_op(retval, OP_EQ, 0);
188 start_time = hs_get_start_time_of_next_time_period(fake_time);
189 tt_int_op(start_time, OP_EQ, correct_time);
192 /* Now do another sanity check: The time period number at the start of the
193 * next time period, must be the same time period number as the one returned
194 * from hs_get_next_time_period_num() */
196 time_t next_tp_start = hs_get_start_time_of_next_time_period(fake_time);
197 tt_u64_op(hs_get_time_period_num(next_tp_start), OP_EQ,
198 hs_get_next_time_period_num(fake_time));
201 done:
205 /** Test that we can correctly find the start time of the next time period */
206 static void
207 test_start_time_of_next_time_period(void *arg)
209 (void) arg;
210 int retval;
211 time_t fake_time;
212 char tbuf[ISO_TIME_LEN + 1];
213 time_t next_tp_start_time;
215 /* Do some basic tests */
216 retval = parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
217 &fake_time);
218 tt_int_op(retval, OP_EQ, 0);
219 next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
220 /* Compare it with the correct result */
221 format_iso_time(tbuf, next_tp_start_time);
222 tt_str_op("2016-04-13 12:00:00", OP_EQ, tbuf);
224 /* Another test with an edge-case time (start of TP) */
225 retval = parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
226 &fake_time);
227 tt_int_op(retval, OP_EQ, 0);
228 next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
229 format_iso_time(tbuf, next_tp_start_time);
230 tt_str_op("2016-04-14 12:00:00", OP_EQ, tbuf);
233 /* Now pretend we are on a testing network and alter the voting schedule to
234 be every 10 seconds. This means that a time period has length 10*24
235 seconds (4 minutes). It also means that we apply a rotational offset of
236 120 seconds to the time period, so that it starts at 00:02:00 instead of
237 00:00:00. */
238 or_options_t *options = get_options_mutable();
239 options->TestingTorNetwork = 1;
240 options->V3AuthVotingInterval = 10;
241 options->TestingV3AuthInitialVotingInterval = 10;
243 retval = parse_rfc1123_time("Wed, 13 Apr 2016 00:00:00 UTC",
244 &fake_time);
245 tt_int_op(retval, OP_EQ, 0);
246 next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
247 /* Compare it with the correct result */
248 format_iso_time(tbuf, next_tp_start_time);
249 tt_str_op("2016-04-13 00:02:00", OP_EQ, tbuf);
251 retval = parse_rfc1123_time("Wed, 13 Apr 2016 00:02:00 UTC",
252 &fake_time);
253 tt_int_op(retval, OP_EQ, 0);
254 next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
255 /* Compare it with the correct result */
256 format_iso_time(tbuf, next_tp_start_time);
257 tt_str_op("2016-04-13 00:06:00", OP_EQ, tbuf);
260 done:
264 /* Cleanup the global nodelist. It also frees the "md" in the node_t because
265 * we allocate the memory in helper_add_hsdir_to_networkstatus(). */
266 static void
267 cleanup_nodelist(void)
269 smartlist_t *nodelist = nodelist_get_list();
270 SMARTLIST_FOREACH_BEGIN(nodelist, node_t *, node) {
271 tor_free(node->md);
272 node->md = NULL;
273 } SMARTLIST_FOREACH_END(node);
274 nodelist_free_all();
277 static void
278 helper_add_hsdir_to_networkstatus(networkstatus_t *ns,
279 int identity_idx,
280 const char *nickname,
281 int is_hsdir)
283 routerstatus_t *rs = tor_malloc_zero(sizeof(routerstatus_t));
284 routerinfo_t *ri = tor_malloc_zero(sizeof(routerinfo_t));
285 uint8_t identity[DIGEST_LEN];
286 tor_addr_t ipv4_addr;
288 memset(identity, identity_idx, sizeof(identity));
290 memcpy(rs->identity_digest, identity, DIGEST_LEN);
291 rs->is_hs_dir = is_hsdir;
292 rs->pv.supports_v3_hsdir = 1;
293 strlcpy(rs->nickname, nickname, sizeof(rs->nickname));
294 tor_addr_parse(&ipv4_addr, "1.2.3.4");
295 ri->addr = tor_addr_to_ipv4h(&ipv4_addr);
296 rs->addr = tor_addr_to_ipv4h(&ipv4_addr);
297 ri->nickname = tor_strdup(nickname);
298 ri->protocol_list = tor_strdup("HSDir=1-2 LinkAuth=3");
299 memcpy(ri->cache_info.identity_digest, identity, DIGEST_LEN);
300 ri->cache_info.signing_key_cert = tor_malloc_zero(sizeof(tor_cert_t));
301 /* Needed for the HSDir index computation. */
302 memset(&ri->cache_info.signing_key_cert->signing_key,
303 identity_idx, ED25519_PUBKEY_LEN);
304 tt_assert(nodelist_set_routerinfo(ri, NULL));
305 node_t *node = node_get_mutable_by_id(ri->cache_info.identity_digest);
306 tt_assert(node);
307 node->rs = rs;
308 /* We need this to exist for node_has_preferred_descriptor() to return
309 * true. */
310 node->md = tor_malloc_zero(sizeof(microdesc_t));
311 /* Do this now the nodelist_set_routerinfo() function needs a "rs" to set
312 * the indexes which it doesn't have when it is called. */
313 node_set_hsdir_index(node, ns);
314 node->ri = NULL;
315 smartlist_add(ns->routerstatus_list, rs);
317 done:
318 routerinfo_free(ri);
321 static networkstatus_t *mock_ns = NULL;
323 static networkstatus_t *
324 mock_networkstatus_get_latest_consensus(void)
326 time_t now = approx_time();
328 /* If initialized, return it */
329 if (mock_ns) {
330 return mock_ns;
333 /* Initialize fake consensus */
334 mock_ns = tor_malloc_zero(sizeof(networkstatus_t));
336 /* This consensus is live */
337 mock_ns->valid_after = now-1;
338 mock_ns->fresh_until = now+1;
339 mock_ns->valid_until = now+2;
340 /* Create routerstatus list */
341 mock_ns->routerstatus_list = smartlist_new();
342 mock_ns->type = NS_TYPE_CONSENSUS;
344 return mock_ns;
347 static networkstatus_t *
348 mock_networkstatus_get_live_consensus(time_t now)
350 (void) now;
352 tt_assert(mock_ns);
354 done:
355 return mock_ns;
358 /** Test the responsible HSDirs calculation function */
359 static void
360 test_responsible_hsdirs(void *arg)
362 time_t now = approx_time();
363 smartlist_t *responsible_dirs = smartlist_new();
364 networkstatus_t *ns = NULL;
365 int retval;
367 (void) arg;
369 hs_init();
371 MOCK(networkstatus_get_latest_consensus,
372 mock_networkstatus_get_latest_consensus);
374 ns = networkstatus_get_latest_consensus();
376 { /* First router: HSdir */
377 helper_add_hsdir_to_networkstatus(ns, 1, "igor", 1);
380 { /* Second HSDir */
381 helper_add_hsdir_to_networkstatus(ns, 2, "victor", 1);
384 { /* Third relay but not HSDir */
385 helper_add_hsdir_to_networkstatus(ns, 3, "spyro", 0);
388 ed25519_keypair_t kp;
389 retval = ed25519_keypair_generate(&kp, 0);
390 tt_int_op(retval, OP_EQ , 0);
392 uint64_t time_period_num = hs_get_time_period_num(now);
393 hs_get_responsible_hsdirs(&kp.pubkey, time_period_num,
394 0, 0, responsible_dirs);
396 /* Make sure that we only found 2 responsible HSDirs.
397 * The third relay was not an hsdir! */
398 tt_int_op(smartlist_len(responsible_dirs), OP_EQ, 2);
400 /** TODO: Build a bigger network and do more tests here */
402 done:
403 SMARTLIST_FOREACH(ns->routerstatus_list,
404 routerstatus_t *, rs, routerstatus_free(rs));
405 smartlist_free(responsible_dirs);
406 smartlist_clear(ns->routerstatus_list);
407 networkstatus_vote_free(mock_ns);
408 cleanup_nodelist();
411 static void
412 mock_directory_initiate_request(directory_request_t *req)
414 (void)req;
415 return;
418 static int
419 mock_hs_desc_encode_descriptor(const hs_descriptor_t *desc,
420 const ed25519_keypair_t *signing_kp,
421 char **encoded_out)
423 (void)desc;
424 (void)signing_kp;
426 tor_asprintf(encoded_out, "lulu");
427 return 0;
430 static or_state_t dummy_state;
432 /* Mock function to get fake or state (used for rev counters) */
433 static or_state_t *
434 get_or_state_replacement(void)
436 return &dummy_state;
439 static int
440 mock_router_have_minimum_dir_info(void)
442 return 1;
445 /** Test that we correctly detect when the HSDir hash ring changes so that we
446 * reupload our descriptor. */
447 static void
448 test_desc_reupload_logic(void *arg)
450 networkstatus_t *ns = NULL;
452 (void) arg;
454 hs_init();
456 MOCK(router_have_minimum_dir_info,
457 mock_router_have_minimum_dir_info);
458 MOCK(get_or_state,
459 get_or_state_replacement);
460 MOCK(networkstatus_get_latest_consensus,
461 mock_networkstatus_get_latest_consensus);
462 MOCK(directory_initiate_request,
463 mock_directory_initiate_request);
464 MOCK(hs_desc_encode_descriptor,
465 mock_hs_desc_encode_descriptor);
467 ns = networkstatus_get_latest_consensus();
469 /** Test logic:
470 * 1) Upload descriptor to HSDirs
471 * CHECK that previous_hsdirs list was populated.
472 * 2) Then call router_dir_info_changed() without an HSDir set change.
473 * CHECK that no reuplod occurs.
474 * 3) Now change the HSDir set, and call dir_info_changed() again.
475 * CHECK that reupload occurs.
476 * 4) Finally call service_desc_schedule_upload().
477 * CHECK that previous_hsdirs list was cleared.
480 /* Let's start by building our descriptor and service */
481 hs_service_descriptor_t *desc = service_descriptor_new();
482 hs_service_t *service = NULL;
483 /* hex-encoded ed25519 pubkey used in hs_build_address.py */
484 char pubkey_hex[] =
485 "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a";
486 char onion_addr[HS_SERVICE_ADDR_LEN_BASE32 + 1];
487 ed25519_public_key_t pubkey;
488 base16_decode((char*)pubkey.pubkey, sizeof(pubkey.pubkey),
489 pubkey_hex, strlen(pubkey_hex));
490 hs_build_address(&pubkey, HS_VERSION_THREE, onion_addr);
491 service = tor_malloc_zero(sizeof(hs_service_t));
492 memcpy(service->onion_address, onion_addr, sizeof(service->onion_address));
493 ed25519_secret_key_generate(&service->keys.identity_sk, 0);
494 ed25519_public_key_generate(&service->keys.identity_pk,
495 &service->keys.identity_sk);
496 service->desc_current = desc;
497 /* Also add service to service map */
498 hs_service_ht *service_map = get_hs_service_map();
499 tt_assert(service_map);
500 tt_int_op(hs_service_get_num_services(), OP_EQ, 0);
501 register_service(service_map, service);
502 tt_int_op(hs_service_get_num_services(), OP_EQ, 1);
504 /* Now let's create our hash ring: */
506 helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
507 helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
508 helper_add_hsdir_to_networkstatus(ns, 3, "aaron", 1);
509 helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
510 helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
511 helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
514 /* Now let's upload our desc to all hsdirs */
515 upload_descriptor_to_all(service, desc);
516 /* Check that previous hsdirs were populated */
517 tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
519 /* Poison next upload time so that we can see if it was changed by
520 * router_dir_info_changed(). No changes in hash ring so far, so the upload
521 * time should stay as is. */
522 desc->next_upload_time = 42;
523 router_dir_info_changed();
524 tt_int_op(desc->next_upload_time, OP_EQ, 42);
526 /* Now change the HSDir hash ring by swapping nora for aaron.
527 * Start by clearing the hash ring */
529 SMARTLIST_FOREACH(ns->routerstatus_list,
530 routerstatus_t *, rs, routerstatus_free(rs));
531 smartlist_clear(ns->routerstatus_list);
532 cleanup_nodelist();
533 routerlist_free_all();
536 { /* Now add back all the nodes */
537 helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
538 helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
539 helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
540 helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
541 helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
542 helper_add_hsdir_to_networkstatus(ns, 7, "nora", 1);
545 /* Now call service_desc_hsdirs_changed() and see that it detected the hash
546 ring change */
547 time_t now = approx_time();
548 tt_assert(now);
549 tt_int_op(service_desc_hsdirs_changed(service, desc), OP_EQ, 1);
550 tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
552 /* Now order another upload and see that we keep having 6 prev hsdirs */
553 upload_descriptor_to_all(service, desc);
554 /* Check that previous hsdirs were populated */
555 tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
557 /* Now restore the HSDir hash ring to its original state by swapping back
558 aaron for nora */
559 /* First clear up the hash ring */
561 SMARTLIST_FOREACH(ns->routerstatus_list,
562 routerstatus_t *, rs, routerstatus_free(rs));
563 smartlist_clear(ns->routerstatus_list);
564 cleanup_nodelist();
565 routerlist_free_all();
568 { /* Now populate the hash ring again */
569 helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
570 helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
571 helper_add_hsdir_to_networkstatus(ns, 3, "aaron", 1);
572 helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
573 helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
574 helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
577 /* Check that our algorithm catches this change of hsdirs */
578 tt_int_op(service_desc_hsdirs_changed(service, desc), OP_EQ, 1);
580 /* Now pretend that the descriptor changed, and order a reupload to all
581 HSDirs. Make sure that the set of previous HSDirs was cleared. */
582 service_desc_schedule_upload(desc, now, 1);
583 tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 0);
585 /* Now reupload again: see that the prev hsdir set got populated again. */
586 upload_descriptor_to_all(service, desc);
587 tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
589 done:
590 SMARTLIST_FOREACH(ns->routerstatus_list,
591 routerstatus_t *, rs, routerstatus_free(rs));
592 smartlist_clear(ns->routerstatus_list);
593 networkstatus_vote_free(ns);
594 cleanup_nodelist();
595 hs_free_all();
598 /** Test disaster SRV computation and caching */
599 static void
600 test_disaster_srv(void *arg)
602 uint8_t *cached_disaster_srv_one = NULL;
603 uint8_t *cached_disaster_srv_two = NULL;
604 uint8_t srv_one[DIGEST256_LEN] = {0};
605 uint8_t srv_two[DIGEST256_LEN] = {0};
606 uint8_t srv_three[DIGEST256_LEN] = {0};
607 uint8_t srv_four[DIGEST256_LEN] = {0};
608 uint8_t srv_five[DIGEST256_LEN] = {0};
610 (void) arg;
612 /* Get the cached SRVs: we gonna use them later for verification */
613 cached_disaster_srv_one = get_first_cached_disaster_srv();
614 cached_disaster_srv_two = get_second_cached_disaster_srv();
616 /* Compute some srvs */
617 get_disaster_srv(1, srv_one);
618 get_disaster_srv(2, srv_two);
620 /* Check that the cached ones where updated */
621 tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_one, DIGEST256_LEN);
622 tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
624 /* Ask for an SRV that has already been computed */
625 get_disaster_srv(2, srv_two);
626 /* and check that the cache entries have not changed */
627 tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_one, DIGEST256_LEN);
628 tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
630 /* Ask for a new SRV */
631 get_disaster_srv(3, srv_three);
632 tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_three, DIGEST256_LEN);
633 tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
635 /* Ask for another SRV: none of the original SRVs should now be cached */
636 get_disaster_srv(4, srv_four);
637 tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_three, DIGEST256_LEN);
638 tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_four, DIGEST256_LEN);
640 /* Ask for yet another SRV */
641 get_disaster_srv(5, srv_five);
642 tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_five, DIGEST256_LEN);
643 tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_four, DIGEST256_LEN);
645 done:
649 /** Test our HS descriptor request tracker by making various requests and
650 * checking whether they get tracked properly. */
651 static void
652 test_hid_serv_request_tracker(void *arg)
654 (void) arg;
655 time_t retval;
656 routerstatus_t *hsdir = NULL, *hsdir2 = NULL, *hsdir3 = NULL;
657 time_t now = approx_time();
659 const char *req_key_str_first =
660 "vd4zb6zesaubtrjvdqcr2w7x7lhw2up4Xnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
661 const char *req_key_str_second =
662 "g53o7iavcd62oihswhr24u6czmqws5kpXnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
663 const char *req_key_str_small = "ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ";
665 /*************************** basic test *******************************/
667 /* Get request tracker and make sure it's empty */
668 strmap_t *request_tracker = get_last_hid_serv_requests();
669 tt_int_op(strmap_size(request_tracker),OP_EQ, 0);
671 /* Let's register a hid serv request */
672 hsdir = tor_malloc_zero(sizeof(routerstatus_t));
673 memset(hsdir->identity_digest, 'Z', DIGEST_LEN);
674 retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
675 now, 1);
676 tt_int_op(retval, OP_EQ, now);
677 tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
679 /* Let's lookup a non-existent hidserv request */
680 retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_second,
681 now+1, 0);
682 tt_int_op(retval, OP_EQ, 0);
683 tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
685 /* Let's lookup a real hidserv request */
686 retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
687 now+2, 0);
688 tt_int_op(retval, OP_EQ, now); /* we got it */
689 tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
691 /**********************************************************************/
693 /* Let's add another request for the same HS but on a different HSDir. */
694 hsdir2 = tor_malloc_zero(sizeof(routerstatus_t));
695 memset(hsdir2->identity_digest, 2, DIGEST_LEN);
696 retval = hs_lookup_last_hid_serv_request(hsdir2, req_key_str_first,
697 now+3, 1);
698 tt_int_op(retval, OP_EQ, now+3);
699 tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
701 /* Check that we can clean the first request based on time */
702 hs_clean_last_hid_serv_requests(now+3+REND_HID_SERV_DIR_REQUERY_PERIOD);
703 tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
704 /* Check that it doesn't exist anymore */
705 retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
706 now+2, 0);
707 tt_int_op(retval, OP_EQ, 0);
709 /* Now let's add a smaller req key str */
710 hsdir3 = tor_malloc_zero(sizeof(routerstatus_t));
711 memset(hsdir3->identity_digest, 3, DIGEST_LEN);
712 retval = hs_lookup_last_hid_serv_request(hsdir3, req_key_str_small,
713 now+4, 1);
714 tt_int_op(retval, OP_EQ, now+4);
715 tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
717 /*************************** deleting entries **************************/
719 /* Add another request with very short key */
720 retval = hs_lookup_last_hid_serv_request(hsdir, "l", now, 1);
721 tt_int_op(retval, OP_EQ, now);
722 tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
724 /* Try deleting entries with a dummy key. Check that our previous requests
725 * are still there */
726 tor_capture_bugs_(1);
727 hs_purge_hid_serv_from_last_hid_serv_requests("a");
728 tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
729 tor_end_capture_bugs_();
731 /* Try another dummy key. Check that requests are still there */
733 char dummy[2000];
734 memset(dummy, 'Z', 2000);
735 dummy[1999] = '\x00';
736 hs_purge_hid_serv_from_last_hid_serv_requests(dummy);
737 tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
740 /* Another dummy key! */
741 hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_second);
742 tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
744 /* Now actually delete a request! */
745 hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_first);
746 tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
748 /* Purge it all! */
749 hs_purge_last_hid_serv_requests();
750 request_tracker = get_last_hid_serv_requests();
751 tt_int_op(strmap_size(request_tracker),OP_EQ, 0);
753 done:
754 tor_free(hsdir);
755 tor_free(hsdir2);
756 tor_free(hsdir3);
759 static void
760 test_parse_extended_hostname(void *arg)
762 (void) arg;
764 char address1[] = "fooaddress.onion";
765 char address2[] = "aaaaaaaaaaaaaaaa.onion";
766 char address3[] = "fooaddress.exit";
767 char address4[] = "www.torproject.org";
768 char address5[] = "foo.abcdefghijklmnop.onion";
769 char address6[] = "foo.bar.abcdefghijklmnop.onion";
770 char address7[] = ".abcdefghijklmnop.onion";
771 char address8[] =
772 "www.25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion";
774 tt_assert(BAD_HOSTNAME == parse_extended_hostname(address1));
775 tt_assert(ONION_V2_HOSTNAME == parse_extended_hostname(address2));
776 tt_str_op(address2,OP_EQ, "aaaaaaaaaaaaaaaa");
777 tt_assert(EXIT_HOSTNAME == parse_extended_hostname(address3));
778 tt_assert(NORMAL_HOSTNAME == parse_extended_hostname(address4));
779 tt_assert(ONION_V2_HOSTNAME == parse_extended_hostname(address5));
780 tt_str_op(address5,OP_EQ, "abcdefghijklmnop");
781 tt_assert(ONION_V2_HOSTNAME == parse_extended_hostname(address6));
782 tt_str_op(address6,OP_EQ, "abcdefghijklmnop");
783 tt_assert(BAD_HOSTNAME == parse_extended_hostname(address7));
784 tt_assert(ONION_V3_HOSTNAME == parse_extended_hostname(address8));
785 tt_str_op(address8, OP_EQ,
786 "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid");
788 done: ;
791 static void
792 test_time_between_tp_and_srv(void *arg)
794 int ret;
795 networkstatus_t ns;
796 (void) arg;
798 /* This function should be returning true where "^" are:
800 * +------------------------------------------------------------------+
801 * | |
802 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
803 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
804 * | |
805 * | $==========|-----------$===========|-----------$===========| |
806 * | ^^^^^^^^^^^^ ^^^^^^^^^^^^ |
807 * | |
808 * +------------------------------------------------------------------+
811 ret = parse_rfc1123_time("Sat, 26 Oct 1985 00:00:00 UTC", &ns.valid_after);
812 tt_int_op(ret, OP_EQ, 0);
813 ret = parse_rfc1123_time("Sat, 26 Oct 1985 01:00:00 UTC", &ns.fresh_until);
814 tt_int_op(ret, OP_EQ, 0);
815 dirvote_recalculate_timing(get_options(), ns.valid_after);
816 ret = hs_in_period_between_tp_and_srv(&ns, 0);
817 tt_int_op(ret, OP_EQ, 0);
819 ret = parse_rfc1123_time("Sat, 26 Oct 1985 11:00:00 UTC", &ns.valid_after);
820 tt_int_op(ret, OP_EQ, 0);
821 ret = parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns.fresh_until);
822 tt_int_op(ret, OP_EQ, 0);
823 dirvote_recalculate_timing(get_options(), ns.valid_after);
824 ret = hs_in_period_between_tp_and_srv(&ns, 0);
825 tt_int_op(ret, OP_EQ, 0);
827 ret = parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns.valid_after);
828 tt_int_op(ret, OP_EQ, 0);
829 ret = parse_rfc1123_time("Sat, 26 Oct 1985 13:00:00 UTC", &ns.fresh_until);
830 tt_int_op(ret, OP_EQ, 0);
831 dirvote_recalculate_timing(get_options(), ns.valid_after);
832 ret = hs_in_period_between_tp_and_srv(&ns, 0);
833 tt_int_op(ret, OP_EQ, 1);
835 ret = parse_rfc1123_time("Sat, 26 Oct 1985 23:00:00 UTC", &ns.valid_after);
836 tt_int_op(ret, OP_EQ, 0);
837 ret = parse_rfc1123_time("Sat, 27 Oct 1985 00:00:00 UTC", &ns.fresh_until);
838 tt_int_op(ret, OP_EQ, 0);
839 dirvote_recalculate_timing(get_options(), ns.valid_after);
840 ret = hs_in_period_between_tp_and_srv(&ns, 0);
841 tt_int_op(ret, OP_EQ, 1);
843 ret = parse_rfc1123_time("Sat, 27 Oct 1985 00:00:00 UTC", &ns.valid_after);
844 tt_int_op(ret, OP_EQ, 0);
845 ret = parse_rfc1123_time("Sat, 27 Oct 1985 01:00:00 UTC", &ns.fresh_until);
846 tt_int_op(ret, OP_EQ, 0);
847 dirvote_recalculate_timing(get_options(), ns.valid_after);
848 ret = hs_in_period_between_tp_and_srv(&ns, 0);
849 tt_int_op(ret, OP_EQ, 0);
851 done:
855 /************ Reachability Test (it is huge) ****************/
857 /* Simulate different consensus for client and service. Used by the
858 * reachability test. The SRV and responsible HSDir list are used by all
859 * reachability tests so make them common to simplify setup and teardown. */
860 static networkstatus_t *mock_service_ns = NULL;
861 static networkstatus_t *mock_client_ns = NULL;
862 static sr_srv_t current_srv, previous_srv;
863 static smartlist_t *service_responsible_hsdirs = NULL;
864 static smartlist_t *client_responsible_hsdirs = NULL;
866 static networkstatus_t *
867 mock_networkstatus_get_live_consensus_service(time_t now)
869 (void) now;
871 if (mock_service_ns) {
872 return mock_service_ns;
875 mock_service_ns = tor_malloc_zero(sizeof(networkstatus_t));
876 mock_service_ns->routerstatus_list = smartlist_new();
877 mock_service_ns->type = NS_TYPE_CONSENSUS;
879 return mock_service_ns;
882 static networkstatus_t *
883 mock_networkstatus_get_latest_consensus_service(void)
885 return mock_networkstatus_get_live_consensus_service(0);
888 static networkstatus_t *
889 mock_networkstatus_get_live_consensus_client(time_t now)
891 (void) now;
893 if (mock_client_ns) {
894 return mock_client_ns;
897 mock_client_ns = tor_malloc_zero(sizeof(networkstatus_t));
898 mock_client_ns->routerstatus_list = smartlist_new();
899 mock_client_ns->type = NS_TYPE_CONSENSUS;
901 return mock_client_ns;
904 static networkstatus_t *
905 mock_networkstatus_get_latest_consensus_client(void)
907 return mock_networkstatus_get_live_consensus_client(0);
910 /* Mock function because we are not trying to test the close circuit that does
911 * an awful lot of checks on the circuit object. */
912 static void
913 mock_circuit_mark_for_close(circuit_t *circ, int reason, int line,
914 const char *file)
916 (void) circ;
917 (void) reason;
918 (void) line;
919 (void) file;
920 return;
923 /* Initialize a big HSDir V3 hash ring. */
924 static void
925 helper_initialize_big_hash_ring(networkstatus_t *ns)
927 int ret;
929 /* Generate 250 hsdirs! :) */
930 for (int counter = 1 ; counter < 251 ; counter++) {
931 /* Let's generate random nickname for each hsdir... */
932 char nickname_binary[8];
933 char nickname_str[13] = {0};
934 crypto_rand(nickname_binary, sizeof(nickname_binary));
935 ret = base64_encode(nickname_str, sizeof(nickname_str),
936 nickname_binary, sizeof(nickname_binary), 0);
937 tt_int_op(ret, OP_EQ, 12);
938 helper_add_hsdir_to_networkstatus(ns, counter, nickname_str, 1);
941 /* Make sure we have 200 hsdirs in our list */
942 tt_int_op(smartlist_len(ns->routerstatus_list), OP_EQ, 250);
944 done:
948 /** Initialize service and publish its descriptor as needed. Return the newly
949 * allocated service object to the caller. */
950 static hs_service_t *
951 helper_init_service(time_t now)
953 int retval;
954 hs_service_t *service = hs_service_new(get_options());
955 tt_assert(service);
956 service->config.version = HS_VERSION_THREE;
957 ed25519_secret_key_generate(&service->keys.identity_sk, 0);
958 ed25519_public_key_generate(&service->keys.identity_pk,
959 &service->keys.identity_sk);
960 /* Register service to global map. */
961 retval = register_service(get_hs_service_map(), service);
962 tt_int_op(retval, OP_EQ, 0);
964 /* Initialize service descriptor */
965 build_all_descriptors(now);
966 tt_assert(service->desc_current);
967 tt_assert(service->desc_next);
969 done:
970 return service;
973 /* Helper function to set the RFC 1123 time string into t. */
974 static void
975 set_consensus_times(const char *timestr, time_t *t)
977 tt_assert(timestr);
978 tt_assert(t);
980 int ret = parse_rfc1123_time(timestr, t);
981 tt_int_op(ret, OP_EQ, 0);
983 done:
984 return;
987 /* Helper function to cleanup the mock consensus (client and service) */
988 static void
989 cleanup_mock_ns(void)
991 if (mock_service_ns) {
992 SMARTLIST_FOREACH(mock_service_ns->routerstatus_list,
993 routerstatus_t *, rs, routerstatus_free(rs));
994 smartlist_clear(mock_service_ns->routerstatus_list);
995 mock_service_ns->sr_info.current_srv = NULL;
996 mock_service_ns->sr_info.previous_srv = NULL;
997 networkstatus_vote_free(mock_service_ns);
998 mock_service_ns = NULL;
1001 if (mock_client_ns) {
1002 SMARTLIST_FOREACH(mock_client_ns->routerstatus_list,
1003 routerstatus_t *, rs, routerstatus_free(rs));
1004 smartlist_clear(mock_client_ns->routerstatus_list);
1005 mock_client_ns->sr_info.current_srv = NULL;
1006 mock_client_ns->sr_info.previous_srv = NULL;
1007 networkstatus_vote_free(mock_client_ns);
1008 mock_client_ns = NULL;
1012 /* Helper function to setup a reachability test. Once called, the
1013 * cleanup_reachability_test MUST be called at the end. */
1014 static void
1015 setup_reachability_test(void)
1017 MOCK(circuit_mark_for_close_, mock_circuit_mark_for_close);
1018 MOCK(get_or_state, get_or_state_replacement);
1020 hs_init();
1022 /* Baseline to start with. */
1023 memset(&current_srv, 0, sizeof(current_srv));
1024 memset(&previous_srv, 1, sizeof(previous_srv));
1026 /* Initialize the consensuses. */
1027 mock_networkstatus_get_latest_consensus_service();
1028 mock_networkstatus_get_latest_consensus_client();
1030 service_responsible_hsdirs = smartlist_new();
1031 client_responsible_hsdirs = smartlist_new();
1034 /* Helper function to cleanup a reachability test initial setup. */
1035 static void
1036 cleanup_reachability_test(void)
1038 smartlist_free(service_responsible_hsdirs);
1039 service_responsible_hsdirs = NULL;
1040 smartlist_free(client_responsible_hsdirs);
1041 client_responsible_hsdirs = NULL;
1042 hs_free_all();
1043 cleanup_mock_ns();
1044 UNMOCK(get_or_state);
1045 UNMOCK(circuit_mark_for_close_);
1048 /* A reachability test always check if the resulting service and client
1049 * responsible HSDir for the given parameters are equal.
1051 * Return true iff the same exact nodes are in both list. */
1052 static int
1053 are_responsible_hsdirs_equal(void)
1055 int count = 0;
1056 tt_int_op(smartlist_len(client_responsible_hsdirs), OP_EQ, 6);
1057 tt_int_op(smartlist_len(service_responsible_hsdirs), OP_EQ, 8);
1059 SMARTLIST_FOREACH_BEGIN(client_responsible_hsdirs,
1060 const routerstatus_t *, c_rs) {
1061 SMARTLIST_FOREACH_BEGIN(service_responsible_hsdirs,
1062 const routerstatus_t *, s_rs) {
1063 if (tor_memeq(c_rs->identity_digest, s_rs->identity_digest,
1064 DIGEST_LEN)) {
1065 count++;
1066 break;
1068 } SMARTLIST_FOREACH_END(s_rs);
1069 } SMARTLIST_FOREACH_END(c_rs);
1071 done:
1072 return (count == 6);
1075 /* Tor doesn't use such a function to get the previous HSDir, it is only used
1076 * in node_set_hsdir_index(). We need it here so we can test the reachability
1077 * scenario 6 that requires the previous time period to compute the list of
1078 * responsible HSDir because of the client state timing. */
1079 static uint64_t
1080 get_previous_time_period(time_t now)
1082 return hs_get_time_period_num(now) - 1;
1085 /* Configuration of a reachability test scenario. */
1086 typedef struct reachability_cfg_t {
1087 /* Consensus timings to be set. They have to be compliant with
1088 * RFC 1123 time format. */
1089 const char *service_valid_after;
1090 const char *service_valid_until;
1091 const char *client_valid_after;
1092 const char *client_valid_until;
1094 /* SRVs that the service and client should use. */
1095 sr_srv_t *service_current_srv;
1096 sr_srv_t *service_previous_srv;
1097 sr_srv_t *client_current_srv;
1098 sr_srv_t *client_previous_srv;
1100 /* A time period function for the service to use for this scenario. For a
1101 * successful reachability test, the client always use the current time
1102 * period thus why no client function. */
1103 uint64_t (*service_time_period_fn)(time_t);
1105 /* Is the client and service expected to be in a new time period. After
1106 * setting the consensus time, the reachability test checks
1107 * hs_in_period_between_tp_and_srv() and test the returned value against
1108 * this. */
1109 unsigned int service_in_new_tp;
1110 unsigned int client_in_new_tp;
1112 /* Some scenario requires a hint that the client, because of its consensus
1113 * time, will request the "next" service descriptor so this indicates if it
1114 * is the case or not. */
1115 unsigned int client_fetch_next_desc;
1116 } reachability_cfg_t;
1118 /* Some defines to help with semantic while reading a configuration below. */
1119 #define NOT_IN_NEW_TP 0
1120 #define IN_NEW_TP 1
1121 #define DONT_NEED_NEXT_DESC 0
1122 #define NEED_NEXT_DESC 1
1124 static reachability_cfg_t reachability_scenarios[] = {
1125 /* Scenario 1
1127 * +------------------------------------------------------------------+
1128 * | |
1129 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1130 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1131 * | |
1132 * | $==========|-----------$===========|-----------$===========| |
1133 * | ^ ^ |
1134 * | S C |
1135 * +------------------------------------------------------------------+
1137 * S: Service, C: Client
1139 * Service consensus valid_after time is set to 13:00 and client to 15:00,
1140 * both are after TP#1 thus have access to SRV#1. Service and client should
1141 * be using TP#1.
1144 { "Sat, 26 Oct 1985 13:00:00 UTC", /* Service valid_after */
1145 "Sat, 26 Oct 1985 14:00:00 UTC", /* Service valid_until */
1146 "Sat, 26 Oct 1985 15:00:00 UTC", /* Client valid_after */
1147 "Sat, 26 Oct 1985 16:00:00 UTC", /* Client valid_until. */
1148 &current_srv, NULL, /* Service current and previous SRV */
1149 &current_srv, NULL, /* Client current and previous SRV */
1150 hs_get_time_period_num, /* Service time period function. */
1151 IN_NEW_TP, /* Is service in new TP? */
1152 IN_NEW_TP, /* Is client in new TP? */
1153 NEED_NEXT_DESC },
1155 /* Scenario 2
1157 * +------------------------------------------------------------------+
1158 * | |
1159 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1160 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1161 * | |
1162 * | $==========|-----------$===========|-----------$===========| |
1163 * | ^ ^ |
1164 * | S C |
1165 * +------------------------------------------------------------------+
1167 * S: Service, C: Client
1169 * Service consensus valid_after time is set to 23:00 and client to 01:00,
1170 * which makes the client after the SRV#2 and the service just before. The
1171 * service should only be using TP#1. The client should be using TP#1.
1174 { "Sat, 26 Oct 1985 23:00:00 UTC", /* Service valid_after */
1175 "Sat, 27 Oct 1985 00:00:00 UTC", /* Service valid_until */
1176 "Sat, 27 Oct 1985 01:00:00 UTC", /* Client valid_after */
1177 "Sat, 27 Oct 1985 02:00:00 UTC", /* Client valid_until. */
1178 &previous_srv, NULL, /* Service current and previous SRV */
1179 &current_srv, &previous_srv, /* Client current and previous SRV */
1180 hs_get_time_period_num, /* Service time period function. */
1181 IN_NEW_TP, /* Is service in new TP? */
1182 NOT_IN_NEW_TP, /* Is client in new TP? */
1183 NEED_NEXT_DESC },
1185 /* Scenario 3
1187 * +------------------------------------------------------------------+
1188 * | |
1189 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1190 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1191 * | |
1192 * | $==========|-----------$===========|----------$===========| |
1193 * | ^ ^ |
1194 * | S C |
1195 * +------------------------------------------------------------------+
1197 * S: Service, C: Client
1199 * Service consensus valid_after time is set to 03:00 and client to 05:00,
1200 * which makes both after SRV#2. The service should be using TP#1 as its
1201 * current time period. The client should be using TP#1.
1204 { "Sat, 27 Oct 1985 03:00:00 UTC", /* Service valid_after */
1205 "Sat, 27 Oct 1985 04:00:00 UTC", /* Service valid_until */
1206 "Sat, 27 Oct 1985 05:00:00 UTC", /* Client valid_after */
1207 "Sat, 27 Oct 1985 06:00:00 UTC", /* Client valid_until. */
1208 &current_srv, &previous_srv, /* Service current and previous SRV */
1209 &current_srv, &previous_srv, /* Client current and previous SRV */
1210 hs_get_time_period_num, /* Service time period function. */
1211 NOT_IN_NEW_TP, /* Is service in new TP? */
1212 NOT_IN_NEW_TP, /* Is client in new TP? */
1213 DONT_NEED_NEXT_DESC },
1215 /* Scenario 4
1217 * +------------------------------------------------------------------+
1218 * | |
1219 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1220 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1221 * | |
1222 * | $==========|-----------$===========|-----------$===========| |
1223 * | ^ ^ |
1224 * | S C |
1225 * +------------------------------------------------------------------+
1227 * S: Service, C: Client
1229 * Service consensus valid_after time is set to 11:00 and client to 13:00,
1230 * which makes the service before TP#2 and the client just after. The
1231 * service should be using TP#1 as its current time period and TP#2 as the
1232 * next. The client should be using TP#2 time period.
1235 { "Sat, 27 Oct 1985 11:00:00 UTC", /* Service valid_after */
1236 "Sat, 27 Oct 1985 12:00:00 UTC", /* Service valid_until */
1237 "Sat, 27 Oct 1985 13:00:00 UTC", /* Client valid_after */
1238 "Sat, 27 Oct 1985 14:00:00 UTC", /* Client valid_until. */
1239 &current_srv, &previous_srv, /* Service current and previous SRV */
1240 &current_srv, &previous_srv, /* Client current and previous SRV */
1241 hs_get_next_time_period_num, /* Service time period function. */
1242 NOT_IN_NEW_TP, /* Is service in new TP? */
1243 IN_NEW_TP, /* Is client in new TP? */
1244 NEED_NEXT_DESC },
1246 /* Scenario 5
1248 * +------------------------------------------------------------------+
1249 * | |
1250 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1251 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1252 * | |
1253 * | $==========|-----------$===========|-----------$===========| |
1254 * | ^ ^ |
1255 * | C S |
1256 * +------------------------------------------------------------------+
1258 * S: Service, C: Client
1260 * Service consensus valid_after time is set to 01:00 and client to 23:00,
1261 * which makes the service after SRV#2 and the client just before. The
1262 * service should be using TP#1 as its current time period and TP#2 as the
1263 * next. The client should be using TP#1 time period.
1266 { "Sat, 27 Oct 1985 01:00:00 UTC", /* Service valid_after */
1267 "Sat, 27 Oct 1985 02:00:00 UTC", /* Service valid_until */
1268 "Sat, 26 Oct 1985 23:00:00 UTC", /* Client valid_after */
1269 "Sat, 27 Oct 1985 00:00:00 UTC", /* Client valid_until. */
1270 &current_srv, &previous_srv, /* Service current and previous SRV */
1271 &previous_srv, NULL, /* Client current and previous SRV */
1272 hs_get_time_period_num, /* Service time period function. */
1273 NOT_IN_NEW_TP, /* Is service in new TP? */
1274 IN_NEW_TP, /* Is client in new TP? */
1275 DONT_NEED_NEXT_DESC },
1277 /* Scenario 6
1279 * +------------------------------------------------------------------+
1280 * | |
1281 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1282 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1283 * | |
1284 * | $==========|-----------$===========|-----------$===========| |
1285 * | ^ ^ |
1286 * | C S |
1287 * +------------------------------------------------------------------+
1289 * S: Service, C: Client
1291 * Service consensus valid_after time is set to 13:00 and client to 11:00,
1292 * which makes the service outside after TP#2 and the client just before.
1293 * The service should be using TP#1 as its current time period and TP#2 as
1294 * its next. The client should be using TP#1 time period.
1297 { "Sat, 27 Oct 1985 13:00:00 UTC", /* Service valid_after */
1298 "Sat, 27 Oct 1985 14:00:00 UTC", /* Service valid_until */
1299 "Sat, 27 Oct 1985 11:00:00 UTC", /* Client valid_after */
1300 "Sat, 27 Oct 1985 12:00:00 UTC", /* Client valid_until. */
1301 &current_srv, &previous_srv, /* Service current and previous SRV */
1302 &current_srv, &previous_srv, /* Client current and previous SRV */
1303 get_previous_time_period, /* Service time period function. */
1304 IN_NEW_TP, /* Is service in new TP? */
1305 NOT_IN_NEW_TP, /* Is client in new TP? */
1306 DONT_NEED_NEXT_DESC },
1308 /* End marker. */
1309 { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0}
1312 /* Run a single reachability scenario. num_scenario is the corresponding
1313 * scenario number from the documentation. It is used to log it in case of
1314 * failure so we know which scenario fails. */
1315 static int
1316 run_reachability_scenario(const reachability_cfg_t *cfg, int num_scenario)
1318 int ret = -1;
1319 hs_service_t *service;
1320 uint64_t service_tp, client_tp;
1321 ed25519_public_key_t service_blinded_pk, client_blinded_pk;
1323 setup_reachability_test();
1325 tt_assert(cfg);
1327 /* Set service consensus time. */
1328 set_consensus_times(cfg->service_valid_after,
1329 &mock_service_ns->valid_after);
1330 set_consensus_times(cfg->service_valid_until,
1331 &mock_service_ns->valid_until);
1332 set_consensus_times(cfg->service_valid_until,
1333 &mock_service_ns->fresh_until);
1334 dirvote_recalculate_timing(get_options(), mock_service_ns->valid_after);
1335 /* Set client consensus time. */
1336 set_consensus_times(cfg->client_valid_after,
1337 &mock_client_ns->valid_after);
1338 set_consensus_times(cfg->client_valid_until,
1339 &mock_client_ns->valid_until);
1340 set_consensus_times(cfg->client_valid_until,
1341 &mock_client_ns->fresh_until);
1342 dirvote_recalculate_timing(get_options(), mock_client_ns->valid_after);
1344 /* New time period checks for this scenario. */
1345 tt_int_op(hs_in_period_between_tp_and_srv(mock_service_ns, 0), OP_EQ,
1346 cfg->service_in_new_tp);
1347 tt_int_op(hs_in_period_between_tp_and_srv(mock_client_ns, 0), OP_EQ,
1348 cfg->client_in_new_tp);
1350 /* Set the SRVs for this scenario. */
1351 mock_client_ns->sr_info.current_srv = cfg->client_current_srv;
1352 mock_client_ns->sr_info.previous_srv = cfg->client_previous_srv;
1353 mock_service_ns->sr_info.current_srv = cfg->service_current_srv;
1354 mock_service_ns->sr_info.previous_srv = cfg->service_previous_srv;
1356 /* Initialize a service to get keys. */
1357 service = helper_init_service(time(NULL));
1360 * === Client setup ===
1363 MOCK(networkstatus_get_live_consensus,
1364 mock_networkstatus_get_live_consensus_client);
1365 MOCK(networkstatus_get_latest_consensus,
1366 mock_networkstatus_get_latest_consensus_client);
1368 /* Make networkstatus_is_live() happy. */
1369 update_approx_time(mock_client_ns->valid_after);
1370 /* Initialize a big hashring for this consensus with the hsdir index set. */
1371 helper_initialize_big_hash_ring(mock_client_ns);
1373 /* Client ONLY use the current time period. This is the whole point of these
1374 * reachability test that is to make sure the client can always reach the
1375 * service using only its current time period. */
1376 client_tp = hs_get_time_period_num(0);
1378 hs_build_blinded_pubkey(&service->keys.identity_pk, NULL, 0,
1379 client_tp, &client_blinded_pk);
1380 hs_get_responsible_hsdirs(&client_blinded_pk, client_tp, 0, 1,
1381 client_responsible_hsdirs);
1382 /* Cleanup the nodelist so we can let the service computes its own set of
1383 * node with its own hashring. */
1384 cleanup_nodelist();
1385 tt_int_op(smartlist_len(client_responsible_hsdirs), OP_EQ, 6);
1387 UNMOCK(networkstatus_get_latest_consensus);
1388 UNMOCK(networkstatus_get_live_consensus);
1391 * === Service setup ===
1394 MOCK(networkstatus_get_live_consensus,
1395 mock_networkstatus_get_live_consensus_service);
1396 MOCK(networkstatus_get_latest_consensus,
1397 mock_networkstatus_get_latest_consensus_service);
1399 /* Make networkstatus_is_live() happy. */
1400 update_approx_time(mock_service_ns->valid_after);
1401 /* Initialize a big hashring for this consensus with the hsdir index set. */
1402 helper_initialize_big_hash_ring(mock_service_ns);
1404 service_tp = cfg->service_time_period_fn(0);
1406 hs_build_blinded_pubkey(&service->keys.identity_pk, NULL, 0,
1407 service_tp, &service_blinded_pk);
1409 /* A service builds two lists of responsible HSDir, for the current and the
1410 * next descriptor. Depending on the scenario, the client timing indicate if
1411 * it is fetching the current or the next descriptor so we use the
1412 * "client_fetch_next_desc" to know which one the client is trying to get to
1413 * confirm that the service computes the same hashring for the same blinded
1414 * key and service time period function. */
1415 hs_get_responsible_hsdirs(&service_blinded_pk, service_tp,
1416 cfg->client_fetch_next_desc, 0,
1417 service_responsible_hsdirs);
1418 cleanup_nodelist();
1419 tt_int_op(smartlist_len(service_responsible_hsdirs), OP_EQ, 8);
1421 UNMOCK(networkstatus_get_latest_consensus);
1422 UNMOCK(networkstatus_get_live_consensus);
1424 /* Some testing of the values we just got from the client and service. */
1425 tt_mem_op(&client_blinded_pk, OP_EQ, &service_blinded_pk,
1426 ED25519_PUBKEY_LEN);
1427 tt_int_op(are_responsible_hsdirs_equal(), OP_EQ, 1);
1429 /* Everything went well. */
1430 ret = 0;
1432 done:
1433 cleanup_reachability_test();
1434 if (ret == -1) {
1435 /* Do this so we can know which scenario failed. */
1436 char msg[32];
1437 tor_snprintf(msg, sizeof(msg), "Scenario %d failed", num_scenario);
1438 tt_fail_msg(msg);
1440 return ret;
1443 static void
1444 test_reachability(void *arg)
1446 (void) arg;
1448 /* NOTE: An important axiom to understand here is that SRV#N must only be
1449 * used with TP#N value. For example, SRV#2 with TP#1 should NEVER be used
1450 * together. The HSDir index computation is based on this axiom.*/
1452 for (int i = 0; reachability_scenarios[i].service_valid_after; ++i) {
1453 int ret = run_reachability_scenario(&reachability_scenarios[i], i + 1);
1454 if (ret < 0) {
1455 return;
1460 /** Pick an HSDir for service with <b>onion_identity_pk</b> as a client. Put
1461 * its identity digest in <b>hsdir_digest_out</b>. */
1462 static void
1463 helper_client_pick_hsdir(const ed25519_public_key_t *onion_identity_pk,
1464 char *hsdir_digest_out)
1466 tt_assert(onion_identity_pk);
1468 routerstatus_t *client_hsdir = pick_hsdir_v3(onion_identity_pk);
1469 tt_assert(client_hsdir);
1470 digest_to_base64(hsdir_digest_out, client_hsdir->identity_digest);
1472 done:
1476 static void
1477 test_hs_indexes(void *arg)
1479 int ret;
1480 uint64_t period_num = 42;
1481 ed25519_public_key_t pubkey;
1483 (void) arg;
1485 /* Build the hs_index */
1487 uint8_t hs_index[DIGEST256_LEN];
1488 const char *b32_test_vector =
1489 "37e5cbbd56a22823714f18f1623ece5983a0d64c78495a8cfab854245e5f9a8a";
1490 char test_vector[DIGEST256_LEN];
1491 ret = base16_decode(test_vector, sizeof(test_vector), b32_test_vector,
1492 strlen(b32_test_vector));
1493 tt_int_op(ret, OP_EQ, sizeof(test_vector));
1494 /* Our test vector uses a public key set to 32 bytes of \x42. */
1495 memset(&pubkey, '\x42', sizeof(pubkey));
1496 hs_build_hs_index(1, &pubkey, period_num, hs_index);
1497 tt_mem_op(hs_index, OP_EQ, test_vector, sizeof(hs_index));
1500 /* Build the hsdir_index */
1502 uint8_t srv[DIGEST256_LEN];
1503 uint8_t hsdir_index[DIGEST256_LEN];
1504 const char *b32_test_vector =
1505 "db475361014a09965e7e5e4d4a25b8f8d4b8f16cb1d8a7e95eed50249cc1a2d5";
1506 char test_vector[DIGEST256_LEN];
1507 ret = base16_decode(test_vector, sizeof(test_vector), b32_test_vector,
1508 strlen(b32_test_vector));
1509 tt_int_op(ret, OP_EQ, sizeof(test_vector));
1510 /* Our test vector uses a public key set to 32 bytes of \x42. */
1511 memset(&pubkey, '\x42', sizeof(pubkey));
1512 memset(srv, '\x43', sizeof(srv));
1513 hs_build_hsdir_index(&pubkey, srv, period_num, hsdir_index);
1514 tt_mem_op(hsdir_index, OP_EQ, test_vector, sizeof(hsdir_index));
1517 done:
1521 #define EARLY_IN_SRV_TO_TP 0
1522 #define LATE_IN_SRV_TO_TP 1
1523 #define EARLY_IN_TP_TO_SRV 2
1524 #define LATE_IN_TP_TO_SRV 3
1526 /** Set the consensus and system time based on <b>position</b>. See the
1527 * following diagram for details:
1529 * +------------------------------------------------------------------+
1530 * | |
1531 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1532 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1533 * | |
1534 * | $==========|-----------$===========|----------$===========| |
1535 * | |
1536 * | |
1537 * +------------------------------------------------------------------+
1539 static time_t
1540 helper_set_consensus_and_system_time(networkstatus_t *ns, int position)
1542 time_t real_time = 0;
1544 /* The period between SRV#N and TP#N is from 00:00 to 12:00 UTC. Consensus
1545 * valid_after is what matters here, the rest is just to specify the voting
1546 * period correctly. */
1547 if (position == LATE_IN_SRV_TO_TP) {
1548 parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC", &ns->valid_after);
1549 parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC", &ns->fresh_until);
1550 parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns->valid_until);
1551 } else if (position == EARLY_IN_TP_TO_SRV) {
1552 parse_rfc1123_time("Wed, 13 Apr 2016 13:00:00 UTC", &ns->valid_after);
1553 parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns->fresh_until);
1554 parse_rfc1123_time("Wed, 13 Apr 2016 16:00:00 UTC", &ns->valid_until);
1555 } else if (position == LATE_IN_TP_TO_SRV) {
1556 parse_rfc1123_time("Wed, 13 Apr 2016 23:00:00 UTC", &ns->valid_after);
1557 parse_rfc1123_time("Wed, 14 Apr 2016 00:00:00 UTC", &ns->fresh_until);
1558 parse_rfc1123_time("Wed, 14 Apr 2016 02:00:00 UTC", &ns->valid_until);
1559 } else if (position == EARLY_IN_SRV_TO_TP) {
1560 parse_rfc1123_time("Wed, 14 Apr 2016 01:00:00 UTC", &ns->valid_after);
1561 parse_rfc1123_time("Wed, 14 Apr 2016 02:00:00 UTC", &ns->fresh_until);
1562 parse_rfc1123_time("Wed, 14 Apr 2016 04:00:00 UTC", &ns->valid_until);
1563 } else {
1564 tt_assert(0);
1566 dirvote_recalculate_timing(get_options(), ns->valid_after);
1568 /* Set system time: pretend to be just 2 minutes before consensus expiry */
1569 real_time = ns->valid_until - 120;
1570 update_approx_time(real_time);
1572 done:
1573 return real_time;
1576 /** Helper function that carries out the actual test for
1577 * test_client_service_sync() */
1578 static void
1579 helper_test_hsdir_sync(networkstatus_t *ns,
1580 int service_position, int client_position,
1581 int client_fetches_next_desc)
1583 hs_service_descriptor_t *desc;
1584 int retval;
1586 /** Test logic:
1587 * 1) Initialize service time: consensus and system time.
1588 * 1.1) Initialize service hash ring
1589 * 2) Initialize service and publish descriptors.
1590 * 3) Initialize client time: consensus and system time.
1591 * 3.1) Initialize client hash ring
1592 * 4) Try to fetch descriptor as client, and CHECK that the HSDir picked by
1593 * the client was also picked by service.
1596 /* 1) Initialize service time: consensus and real time */
1597 time_t now = helper_set_consensus_and_system_time(ns, service_position);
1598 helper_initialize_big_hash_ring(ns);
1600 /* 2) Initialize service */
1601 hs_service_t *service = helper_init_service(now);
1602 desc = client_fetches_next_desc ? service->desc_next : service->desc_current;
1604 /* Now let's upload our desc to all hsdirs */
1605 upload_descriptor_to_all(service, desc);
1606 /* Cleanup right now so we don't memleak on error. */
1607 cleanup_nodelist();
1608 /* Check that previous hsdirs were populated */
1609 tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 8);
1611 /* 3) Initialize client time */
1612 helper_set_consensus_and_system_time(ns, client_position);
1614 cleanup_nodelist();
1615 SMARTLIST_FOREACH(ns->routerstatus_list,
1616 routerstatus_t *, rs, routerstatus_free(rs));
1617 smartlist_clear(ns->routerstatus_list);
1618 helper_initialize_big_hash_ring(ns);
1620 /* 4) Pick 6 HSDirs as a client and check that they were also chosen by the
1621 service. */
1622 for (int y = 0 ; y < 6 ; y++) {
1623 char client_hsdir_b64_digest[BASE64_DIGEST_LEN+1] = {0};
1624 helper_client_pick_hsdir(&service->keys.identity_pk,
1625 client_hsdir_b64_digest);
1627 /* CHECK: Go through the hsdirs chosen by the service and make sure that it
1628 * contains the one picked by the client! */
1629 retval = smartlist_contains_string(desc->previous_hsdirs,
1630 client_hsdir_b64_digest);
1631 tt_int_op(retval, OP_EQ, 1);
1634 /* Finally, try to pick a 7th hsdir and see that NULL is returned since we
1635 * exhausted all of them: */
1636 tt_assert(!pick_hsdir_v3(&service->keys.identity_pk));
1638 done:
1639 /* At the end: free all services and initialize the subsystem again, we will
1640 * need it for next scenario. */
1641 cleanup_nodelist();
1642 hs_service_free_all();
1643 hs_service_init();
1644 SMARTLIST_FOREACH(ns->routerstatus_list,
1645 routerstatus_t *, rs, routerstatus_free(rs));
1646 smartlist_clear(ns->routerstatus_list);
1649 /** This test ensures that client and service will pick the same HSDirs, under
1650 * various timing scenarios:
1651 * a) Scenario where both client and service are in the time segment between
1652 * SRV#N and TP#N:
1653 * b) Scenario where both client and service are in the time segment between
1654 * TP#N and SRV#N+1.
1655 * c) Scenario where service is between SRV#N and TP#N, but client is between
1656 * TP#N and SRV#N+1.
1657 * d) Scenario where service is between TP#N and SRV#N+1, but client is
1658 * between SRV#N and TP#N.
1660 * This test is important because it tests that upload_descriptor_to_all() is
1661 * in synch with pick_hsdir_v3(). That's not the case for the
1662 * test_reachability() test which only compares the responsible hsdir sets.
1664 static void
1665 test_client_service_hsdir_set_sync(void *arg)
1667 networkstatus_t *ns = NULL;
1669 (void) arg;
1671 MOCK(networkstatus_get_latest_consensus,
1672 mock_networkstatus_get_latest_consensus);
1673 MOCK(networkstatus_get_live_consensus,
1674 mock_networkstatus_get_live_consensus);
1675 MOCK(get_or_state,
1676 get_or_state_replacement);
1677 MOCK(hs_desc_encode_descriptor,
1678 mock_hs_desc_encode_descriptor);
1679 MOCK(directory_initiate_request,
1680 mock_directory_initiate_request);
1682 hs_init();
1684 /* Initialize a big hash ring: we want it to be big so that client and
1685 * service cannot accidentally select the same HSDirs */
1686 ns = networkstatus_get_latest_consensus();
1687 tt_assert(ns);
1689 /** Now test the various synch scenarios. See the helper function for more
1690 details: */
1692 /* a) Scenario where both client and service are in the time segment between
1693 * SRV#N and TP#N. At this time the client fetches the first HS desc:
1695 * +------------------------------------------------------------------+
1696 * | |
1697 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1698 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1699 * | |
1700 * | $==========|-----------$===========|----------$===========| |
1701 * | ^ ^ |
1702 * | S C |
1703 * +------------------------------------------------------------------+
1705 helper_test_hsdir_sync(ns, LATE_IN_SRV_TO_TP, LATE_IN_SRV_TO_TP, 0);
1707 /* b) Scenario where both client and service are in the time segment between
1708 * TP#N and SRV#N+1. At this time the client fetches the second HS
1709 * desc:
1711 * +------------------------------------------------------------------+
1712 * | |
1713 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1714 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1715 * | |
1716 * | $==========|-----------$===========|-----------$===========| |
1717 * | ^ ^ |
1718 * | S C |
1719 * +------------------------------------------------------------------+
1721 helper_test_hsdir_sync(ns, LATE_IN_TP_TO_SRV, LATE_IN_TP_TO_SRV, 1);
1723 /* c) Scenario where service is between SRV#N and TP#N, but client is
1724 * between TP#N and SRV#N+1. Client is forward in time so it fetches the
1725 * second HS desc.
1727 * +------------------------------------------------------------------+
1728 * | |
1729 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1730 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1731 * | |
1732 * | $==========|-----------$===========|-----------$===========| |
1733 * | ^ ^ |
1734 * | S C |
1735 * +------------------------------------------------------------------+
1737 helper_test_hsdir_sync(ns, LATE_IN_SRV_TO_TP, EARLY_IN_TP_TO_SRV, 1);
1739 /* d) Scenario where service is between TP#N and SRV#N+1, but client is
1740 * between SRV#N and TP#N. Client is backwards in time so it fetches the
1741 * first HS desc.
1743 * +------------------------------------------------------------------+
1744 * | |
1745 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1746 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1747 * | |
1748 * | $==========|-----------$===========|-----------$===========| |
1749 * | ^ ^ |
1750 * | C S |
1751 * +------------------------------------------------------------------+
1753 helper_test_hsdir_sync(ns, EARLY_IN_TP_TO_SRV, LATE_IN_SRV_TO_TP, 0);
1755 /* e) Scenario where service is between SRV#N and TP#N, but client is
1756 * between TP#N-1 and SRV#3. Client is backwards in time so it fetches
1757 * the first HS desc.
1759 * +------------------------------------------------------------------+
1760 * | |
1761 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1762 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1763 * | |
1764 * | $==========|-----------$===========|-----------$===========| |
1765 * | ^ ^ |
1766 * | C S |
1767 * +------------------------------------------------------------------+
1769 helper_test_hsdir_sync(ns, EARLY_IN_SRV_TO_TP, LATE_IN_TP_TO_SRV, 0);
1771 /* f) Scenario where service is between TP#N and SRV#N+1, but client is
1772 * between SRV#N+1 and TP#N+1. Client is forward in time so it fetches
1773 * the second HS desc.
1775 * +------------------------------------------------------------------+
1776 * | |
1777 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1778 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1779 * | |
1780 * | $==========|-----------$===========|-----------$===========| |
1781 * | ^ ^ |
1782 * | S C |
1783 * +------------------------------------------------------------------+
1785 helper_test_hsdir_sync(ns, LATE_IN_TP_TO_SRV, EARLY_IN_SRV_TO_TP, 1);
1787 done:
1788 networkstatus_vote_free(ns);
1789 nodelist_free_all();
1790 hs_free_all();
1793 struct testcase_t hs_common_tests[] = {
1794 { "build_address", test_build_address, TT_FORK,
1795 NULL, NULL },
1796 { "validate_address", test_validate_address, TT_FORK,
1797 NULL, NULL },
1798 { "time_period", test_time_period, TT_FORK,
1799 NULL, NULL },
1800 { "start_time_of_next_time_period", test_start_time_of_next_time_period,
1801 TT_FORK, NULL, NULL },
1802 { "responsible_hsdirs", test_responsible_hsdirs, TT_FORK,
1803 NULL, NULL },
1804 { "desc_reupload_logic", test_desc_reupload_logic, TT_FORK,
1805 NULL, NULL },
1806 { "disaster_srv", test_disaster_srv, TT_FORK,
1807 NULL, NULL },
1808 { "hid_serv_request_tracker", test_hid_serv_request_tracker, TT_FORK,
1809 NULL, NULL },
1810 { "parse_extended_hostname", test_parse_extended_hostname, TT_FORK,
1811 NULL, NULL },
1812 { "time_between_tp_and_srv", test_time_between_tp_and_srv, TT_FORK,
1813 NULL, NULL },
1814 { "reachability", test_reachability, TT_FORK,
1815 NULL, NULL },
1816 { "client_service_hsdir_set_sync", test_client_service_hsdir_set_sync,
1817 TT_FORK, NULL, NULL },
1818 { "hs_indexes", test_hs_indexes, TT_FORK,
1819 NULL, NULL },
1821 END_OF_TESTCASES