Merge branch 'maint-0.4.5' into maint-0.4.6
[tor.git] / src / test / test_hs_common.c
blob347a5b7174cd1e20ada1966665c915d3e30ae820
1 /* Copyright (c) 2017-2021, The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
4 /**
5 * \file test_hs_common.c
6 * \brief Test hidden service common functionalities.
7 */
9 #define CONNECTION_EDGE_PRIVATE
10 #define HS_COMMON_PRIVATE
11 #define HS_CLIENT_PRIVATE
12 #define HS_SERVICE_PRIVATE
13 #define NODELIST_PRIVATE
15 #include "test/test.h"
16 #include "test/test_helpers.h"
17 #include "test/log_test_helpers.h"
18 #include "test/hs_test_helpers.h"
20 #include "core/or/connection_edge.h"
21 #include "lib/crypt_ops/crypto_format.h"
22 #include "lib/crypt_ops/crypto_rand.h"
23 #include "feature/hs/hs_common.h"
24 #include "feature/hs/hs_client.h"
25 #include "feature/hs/hs_service.h"
26 #include "app/config/config.h"
27 #include "feature/nodelist/networkstatus.h"
28 #include "feature/dirclient/dirclient.h"
29 #include "feature/dirauth/dirvote.h"
30 #include "feature/nodelist/nodelist.h"
31 #include "feature/nodelist/routerlist.h"
32 #include "app/config/statefile.h"
33 #include "core/or/circuitlist.h"
34 #include "feature/dirauth/shared_random.h"
35 #include "feature/dirauth/voting_schedule.h"
37 #include "feature/nodelist/microdesc_st.h"
38 #include "feature/nodelist/networkstatus_st.h"
39 #include "feature/nodelist/node_st.h"
40 #include "app/config/or_state_st.h"
41 #include "feature/nodelist/routerinfo_st.h"
42 #include "feature/nodelist/routerstatus_st.h"
44 /** Test the validation of HS v3 addresses */
45 static void
46 test_validate_address(void *arg)
48 int ret;
50 (void) arg;
52 /* Address too short and too long. */
53 setup_full_capture_of_logs(LOG_WARN);
54 ret = hs_address_is_valid("blah");
55 tt_int_op(ret, OP_EQ, 0);
56 expect_log_msg_containing("Invalid length");
57 teardown_capture_of_logs();
59 setup_full_capture_of_logs(LOG_WARN);
60 ret = hs_address_is_valid(
61 "p3xnclpu4mu22dwaurjtsybyqk4xfjmcfz6z62yl24uwmhjatiwnlnadb");
62 tt_int_op(ret, OP_EQ, 0);
63 expect_log_msg_containing("Invalid length");
64 teardown_capture_of_logs();
66 /* Invalid checksum (taken from prop224) */
67 setup_full_capture_of_logs(LOG_WARN);
68 ret = hs_address_is_valid(
69 "l5satjgud6gucryazcyvyvhuxhr74u6ygigiuyixe3a6ysis67ororad");
70 tt_int_op(ret, OP_EQ, 0);
71 expect_log_msg_containing("invalid checksum");
72 teardown_capture_of_logs();
74 setup_full_capture_of_logs(LOG_WARN);
75 ret = hs_address_is_valid(
76 "btojiu7nu5y5iwut64eufevogqdw4wmqzugnoluw232r4t3ecsfv37ad");
77 tt_int_op(ret, OP_EQ, 0);
78 expect_log_msg_containing("invalid checksum");
79 teardown_capture_of_logs();
81 /* Non base32 decodable string. */
82 setup_full_capture_of_logs(LOG_WARN);
83 ret = hs_address_is_valid(
84 "????????????????????????????????????????????????????????");
85 tt_int_op(ret, OP_EQ, 0);
86 expect_log_msg_containing("Unable to base32 decode");
87 teardown_capture_of_logs();
89 /* Valid address. */
90 ret = hs_address_is_valid(
91 "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid");
92 tt_int_op(ret, OP_EQ, 1);
94 done:
98 static int
99 mock_write_str_to_file(const char *path, const char *str, int bin)
101 (void)bin;
102 tt_str_op(path, OP_EQ, "/double/five"PATH_SEPARATOR"squared");
103 tt_str_op(str, OP_EQ,
104 "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion\n");
106 done:
107 return 0;
110 /** Test building HS v3 onion addresses. Uses test vectors from the
111 * ./hs_build_address.py script. */
112 static void
113 test_build_address(void *arg)
115 int ret;
116 char onion_addr[HS_SERVICE_ADDR_LEN_BASE32 + 1];
117 ed25519_public_key_t pubkey;
118 /* hex-encoded ed25519 pubkey used in hs_build_address.py */
119 char pubkey_hex[] =
120 "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a";
121 hs_service_t *service = NULL;
123 (void) arg;
125 MOCK(write_str_to_file, mock_write_str_to_file);
127 /* The following has been created with hs_build_address.py script that
128 * follows proposal 224 specification to build an onion address. */
129 static const char *test_addr =
130 "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid";
132 /* Let's try to build the same onion address as the script */
133 base16_decode((char*)pubkey.pubkey, sizeof(pubkey.pubkey),
134 pubkey_hex, strlen(pubkey_hex));
135 hs_build_address(&pubkey, HS_VERSION_THREE, onion_addr);
136 tt_str_op(test_addr, OP_EQ, onion_addr);
137 /* Validate that address. */
138 ret = hs_address_is_valid(onion_addr);
139 tt_int_op(ret, OP_EQ, 1);
141 service = tor_malloc_zero(sizeof(hs_service_t));
142 memcpy(service->onion_address, onion_addr, sizeof(service->onion_address));
143 tor_asprintf(&service->config.directory_path, "/double/five");
144 ret = write_address_to_file(service, "squared");
145 tt_int_op(ret, OP_EQ, 0);
147 done:
148 hs_service_free(service);
151 /** Test that our HS time period calculation functions work properly */
152 static void
153 test_time_period(void *arg)
155 (void) arg;
156 uint64_t tn;
157 int retval;
158 time_t fake_time, correct_time, start_time;
160 /* Let's do the example in prop224 section [TIME-PERIODS] */
161 retval = parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
162 &fake_time);
163 tt_int_op(retval, OP_EQ, 0);
165 /* Check that the time period number is right */
166 tn = hs_get_time_period_num(fake_time);
167 tt_u64_op(tn, OP_EQ, 16903);
169 /* Increase current time to 11:59:59 UTC and check that the time period
170 number is still the same */
171 fake_time += 3599;
172 tn = hs_get_time_period_num(fake_time);
173 tt_u64_op(tn, OP_EQ, 16903);
175 { /* Check start time of next time period */
176 retval = parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
177 &correct_time);
178 tt_int_op(retval, OP_EQ, 0);
180 start_time = hs_get_start_time_of_next_time_period(fake_time);
181 tt_int_op(start_time, OP_EQ, correct_time);
184 /* Now take time to 12:00:00 UTC and check that the time period rotated */
185 fake_time += 1;
186 tn = hs_get_time_period_num(fake_time);
187 tt_u64_op(tn, OP_EQ, 16904);
189 /* Now also check our hs_get_next_time_period_num() function */
190 tn = hs_get_next_time_period_num(fake_time);
191 tt_u64_op(tn, OP_EQ, 16905);
193 { /* Check start time of next time period again */
194 retval = parse_rfc1123_time("Wed, 14 Apr 2016 12:00:00 UTC",
195 &correct_time);
196 tt_int_op(retval, OP_EQ, 0);
198 start_time = hs_get_start_time_of_next_time_period(fake_time);
199 tt_int_op(start_time, OP_EQ, correct_time);
202 /* Now do another sanity check: The time period number at the start of the
203 * next time period, must be the same time period number as the one returned
204 * from hs_get_next_time_period_num() */
206 time_t next_tp_start = hs_get_start_time_of_next_time_period(fake_time);
207 tt_u64_op(hs_get_time_period_num(next_tp_start), OP_EQ,
208 hs_get_next_time_period_num(fake_time));
211 done:
215 /** Test that we can correctly find the start time of the next time period */
216 static void
217 test_start_time_of_next_time_period(void *arg)
219 (void) arg;
220 int retval;
221 time_t fake_time;
222 char tbuf[ISO_TIME_LEN + 1];
223 time_t next_tp_start_time;
225 /* Do some basic tests */
226 retval = parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
227 &fake_time);
228 tt_int_op(retval, OP_EQ, 0);
229 next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
230 /* Compare it with the correct result */
231 format_iso_time(tbuf, next_tp_start_time);
232 tt_str_op("2016-04-13 12:00:00", OP_EQ, tbuf);
234 /* Another test with an edge-case time (start of TP) */
235 retval = parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
236 &fake_time);
237 tt_int_op(retval, OP_EQ, 0);
238 next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
239 format_iso_time(tbuf, next_tp_start_time);
240 tt_str_op("2016-04-14 12:00:00", OP_EQ, tbuf);
243 /* Now pretend we are on a testing network and alter the voting schedule to
244 be every 10 seconds. This means that a time period has length 10*24
245 seconds (4 minutes). It also means that we apply a rotational offset of
246 120 seconds to the time period, so that it starts at 00:02:00 instead of
247 00:00:00. */
248 or_options_t *options = get_options_mutable();
249 options->TestingTorNetwork = 1;
250 options->V3AuthVotingInterval = 10;
251 options->TestingV3AuthInitialVotingInterval = 10;
253 retval = parse_rfc1123_time("Wed, 13 Apr 2016 00:00:00 UTC",
254 &fake_time);
255 tt_int_op(retval, OP_EQ, 0);
256 next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
257 /* Compare it with the correct result */
258 format_iso_time(tbuf, next_tp_start_time);
259 tt_str_op("2016-04-13 00:02:00", OP_EQ, tbuf);
261 retval = parse_rfc1123_time("Wed, 13 Apr 2016 00:02:00 UTC",
262 &fake_time);
263 tt_int_op(retval, OP_EQ, 0);
264 next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
265 /* Compare it with the correct result */
266 format_iso_time(tbuf, next_tp_start_time);
267 tt_str_op("2016-04-13 00:06:00", OP_EQ, tbuf);
270 done:
274 /* Cleanup the global nodelist. It also frees the "md" in the node_t because
275 * we allocate the memory in helper_add_hsdir_to_networkstatus(). */
276 static void
277 cleanup_nodelist(void)
279 const smartlist_t *nodelist = nodelist_get_list();
280 SMARTLIST_FOREACH_BEGIN(nodelist, node_t *, node) {
281 tor_free(node->md);
282 node->md = NULL;
283 } SMARTLIST_FOREACH_END(node);
284 nodelist_free_all();
287 static void
288 helper_add_hsdir_to_networkstatus(networkstatus_t *ns,
289 int identity_idx,
290 const char *nickname,
291 int is_hsdir)
293 routerstatus_t *rs = tor_malloc_zero(sizeof(routerstatus_t));
294 routerinfo_t *ri = tor_malloc_zero(sizeof(routerinfo_t));
295 uint8_t identity[DIGEST_LEN];
296 node_t *node = NULL;
298 memset(identity, identity_idx, sizeof(identity));
300 memcpy(rs->identity_digest, identity, DIGEST_LEN);
301 rs->is_hs_dir = is_hsdir;
302 rs->pv.supports_v3_hsdir = 1;
303 strlcpy(rs->nickname, nickname, sizeof(rs->nickname));
304 tor_addr_parse(&ri->ipv4_addr, "1.2.3.4");
305 tor_addr_parse(&rs->ipv4_addr, "1.2.3.4");
306 ri->nickname = tor_strdup(nickname);
307 ri->protocol_list = tor_strdup("HSDir=1-2 LinkAuth=3");
308 memcpy(ri->cache_info.identity_digest, identity, DIGEST_LEN);
309 ri->cache_info.signing_key_cert = tor_malloc_zero(sizeof(tor_cert_t));
310 /* Needed for the HSDir index computation. */
311 memset(&ri->cache_info.signing_key_cert->signing_key,
312 identity_idx, ED25519_PUBKEY_LEN);
313 tt_assert(nodelist_set_routerinfo(ri, NULL));
315 node = node_get_mutable_by_id(ri->cache_info.identity_digest);
316 tt_assert(node);
317 node->rs = rs;
318 /* We need this to exist for node_has_preferred_descriptor() to return
319 * true. */
320 node->md = tor_malloc_zero(sizeof(microdesc_t));
321 /* Do this now the nodelist_set_routerinfo() function needs a "rs" to set
322 * the indexes which it doesn't have when it is called. */
323 node_set_hsdir_index(node, ns);
324 node->ri = NULL;
325 smartlist_add(ns->routerstatus_list, rs);
327 done:
328 if (node == NULL)
329 routerstatus_free(rs);
331 routerinfo_free(ri);
334 static networkstatus_t *mock_ns = NULL;
336 static networkstatus_t *
337 mock_networkstatus_get_latest_consensus(void)
339 time_t now = approx_time();
341 /* If initialized, return it */
342 if (mock_ns) {
343 return mock_ns;
346 /* Initialize fake consensus */
347 mock_ns = tor_malloc_zero(sizeof(networkstatus_t));
349 /* This consensus is live */
350 mock_ns->valid_after = now-1;
351 mock_ns->fresh_until = now+1;
352 mock_ns->valid_until = now+2;
353 /* Create routerstatus list */
354 mock_ns->routerstatus_list = smartlist_new();
355 mock_ns->type = NS_TYPE_CONSENSUS;
357 return mock_ns;
360 static networkstatus_t *
361 mock_networkstatus_get_reasonably_live_consensus(time_t now, int flavor)
363 (void) now;
364 (void) flavor;
366 tt_assert(mock_ns);
368 done:
369 return mock_ns;
372 /** Test the responsible HSDirs calculation function */
373 static void
374 test_responsible_hsdirs(void *arg)
376 smartlist_t *responsible_dirs = smartlist_new();
377 networkstatus_t *ns = NULL;
378 (void) arg;
380 hs_init();
382 MOCK(networkstatus_get_latest_consensus,
383 mock_networkstatus_get_latest_consensus);
384 MOCK(networkstatus_get_reasonably_live_consensus,
385 mock_networkstatus_get_reasonably_live_consensus);
387 ns = networkstatus_get_latest_consensus();
389 { /* First router: HSdir */
390 helper_add_hsdir_to_networkstatus(ns, 1, "igor", 1);
393 { /* Second HSDir */
394 helper_add_hsdir_to_networkstatus(ns, 2, "victor", 1);
397 { /* Third relay but not HSDir */
398 helper_add_hsdir_to_networkstatus(ns, 3, "spyro", 0);
401 /* Use a fixed time period and pub key so we always take the same path */
402 ed25519_public_key_t pubkey;
403 uint64_t time_period_num = 17653; // 2 May, 2018, 14:00.
404 memset(&pubkey, 42, sizeof(pubkey));
406 hs_get_responsible_hsdirs(&pubkey, time_period_num,
407 0, 0, responsible_dirs);
409 /* Make sure that we only found 2 responsible HSDirs.
410 * The third relay was not an hsdir! */
411 tt_int_op(smartlist_len(responsible_dirs), OP_EQ, 2);
413 /** TODO: Build a bigger network and do more tests here */
415 done:
416 SMARTLIST_FOREACH(ns->routerstatus_list,
417 routerstatus_t *, rs, routerstatus_free(rs));
418 smartlist_free(responsible_dirs);
419 smartlist_clear(ns->routerstatus_list);
420 networkstatus_vote_free(mock_ns);
421 cleanup_nodelist();
423 UNMOCK(networkstatus_get_reasonably_live_consensus);
426 static void
427 mock_directory_initiate_request(directory_request_t *req)
429 (void)req;
430 return;
433 static int
434 mock_hs_desc_encode_descriptor(const hs_descriptor_t *desc,
435 const ed25519_keypair_t *signing_kp,
436 const uint8_t *descriptor_cookie,
437 char **encoded_out)
439 (void)desc;
440 (void)signing_kp;
441 (void)descriptor_cookie;
443 tor_asprintf(encoded_out, "lulu");
444 return 0;
447 static or_state_t dummy_state;
449 /* Mock function to get fake or state (used for rev counters) */
450 static or_state_t *
451 get_or_state_replacement(void)
453 return &dummy_state;
456 static int
457 mock_router_have_minimum_dir_info(void)
459 return 1;
462 /** Test that we correctly detect when the HSDir hash ring changes so that we
463 * reupload our descriptor. */
464 static void
465 test_desc_reupload_logic(void *arg)
467 networkstatus_t *ns = NULL;
469 (void) arg;
471 hs_init();
473 MOCK(networkstatus_get_reasonably_live_consensus,
474 mock_networkstatus_get_reasonably_live_consensus);
475 MOCK(router_have_minimum_dir_info,
476 mock_router_have_minimum_dir_info);
477 MOCK(get_or_state,
478 get_or_state_replacement);
479 MOCK(networkstatus_get_latest_consensus,
480 mock_networkstatus_get_latest_consensus);
481 MOCK(directory_initiate_request,
482 mock_directory_initiate_request);
483 MOCK(hs_desc_encode_descriptor,
484 mock_hs_desc_encode_descriptor);
486 ns = networkstatus_get_latest_consensus();
488 /** Test logic:
489 * 1) Upload descriptor to HSDirs
490 * CHECK that previous_hsdirs list was populated.
491 * 2) Then call router_dir_info_changed() without an HSDir set change.
492 * CHECK that no reupload occurs.
493 * 3) Now change the HSDir set, and call dir_info_changed() again.
494 * CHECK that reupload occurs.
495 * 4) Finally call service_desc_schedule_upload().
496 * CHECK that previous_hsdirs list was cleared.
499 /* Let's start by building our descriptor and service */
500 hs_service_descriptor_t *desc = service_descriptor_new();
501 hs_service_t *service = NULL;
502 /* hex-encoded ed25519 pubkey used in hs_build_address.py */
503 char pubkey_hex[] =
504 "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a";
505 char onion_addr[HS_SERVICE_ADDR_LEN_BASE32 + 1];
506 ed25519_public_key_t pubkey;
507 base16_decode((char*)pubkey.pubkey, sizeof(pubkey.pubkey),
508 pubkey_hex, strlen(pubkey_hex));
509 hs_build_address(&pubkey, HS_VERSION_THREE, onion_addr);
510 service = tor_malloc_zero(sizeof(hs_service_t));
511 tt_assert(service);
512 memcpy(service->onion_address, onion_addr, sizeof(service->onion_address));
513 ed25519_secret_key_generate(&service->keys.identity_sk, 0);
514 ed25519_public_key_generate(&service->keys.identity_pk,
515 &service->keys.identity_sk);
516 service->desc_current = desc;
517 /* Also add service to service map */
518 hs_service_ht *service_map = get_hs_service_map();
519 tt_assert(service_map);
520 tt_int_op(hs_service_get_num_services(), OP_EQ, 0);
521 register_service(service_map, service);
522 tt_int_op(hs_service_get_num_services(), OP_EQ, 1);
524 /* Now let's create our hash ring: */
526 helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
527 helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
528 helper_add_hsdir_to_networkstatus(ns, 3, "aaron", 1);
529 helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
530 helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
531 helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
534 /* Now let's upload our desc to all hsdirs */
535 upload_descriptor_to_all(service, desc);
536 /* Check that previous hsdirs were populated */
537 tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
539 /* Poison next upload time so that we can see if it was changed by
540 * router_dir_info_changed(). No changes in hash ring so far, so the upload
541 * time should stay as is. */
542 desc->next_upload_time = 42;
543 router_dir_info_changed();
544 tt_int_op(desc->next_upload_time, OP_EQ, 42);
546 /* Now change the HSDir hash ring by swapping nora for aaron.
547 * Start by clearing the hash ring */
549 SMARTLIST_FOREACH(ns->routerstatus_list,
550 routerstatus_t *, rs, routerstatus_free(rs));
551 smartlist_clear(ns->routerstatus_list);
552 cleanup_nodelist();
553 routerlist_free_all();
556 { /* Now add back all the nodes */
557 helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
558 helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
559 helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
560 helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
561 helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
562 helper_add_hsdir_to_networkstatus(ns, 7, "nora", 1);
565 /* Now call service_desc_hsdirs_changed() and see that it detected the hash
566 ring change */
567 time_t now = approx_time();
568 tt_assert(now);
569 tt_int_op(service_desc_hsdirs_changed(service, desc), OP_EQ, 1);
570 tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
572 /* Now order another upload and see that we keep having 6 prev hsdirs */
573 upload_descriptor_to_all(service, desc);
574 /* Check that previous hsdirs were populated */
575 tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
577 /* Now restore the HSDir hash ring to its original state by swapping back
578 aaron for nora */
579 /* First clear up the hash ring */
581 SMARTLIST_FOREACH(ns->routerstatus_list,
582 routerstatus_t *, rs, routerstatus_free(rs));
583 smartlist_clear(ns->routerstatus_list);
584 cleanup_nodelist();
585 routerlist_free_all();
588 { /* Now populate the hash ring again */
589 helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
590 helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
591 helper_add_hsdir_to_networkstatus(ns, 3, "aaron", 1);
592 helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
593 helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
594 helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
597 /* Check that our algorithm catches this change of hsdirs */
598 tt_int_op(service_desc_hsdirs_changed(service, desc), OP_EQ, 1);
600 /* Now pretend that the descriptor changed, and order a reupload to all
601 HSDirs. Make sure that the set of previous HSDirs was cleared. */
602 service_desc_schedule_upload(desc, now, 1);
603 tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 0);
605 /* Now reupload again: see that the prev hsdir set got populated again. */
606 upload_descriptor_to_all(service, desc);
607 tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
609 done:
610 SMARTLIST_FOREACH(ns->routerstatus_list,
611 routerstatus_t *, rs, routerstatus_free(rs));
612 smartlist_clear(ns->routerstatus_list);
613 if (service) {
614 remove_service(get_hs_service_map(), service);
615 hs_service_free(service);
617 networkstatus_vote_free(ns);
618 cleanup_nodelist();
619 hs_free_all();
622 /** Test disaster SRV computation and caching */
623 static void
624 test_disaster_srv(void *arg)
626 uint8_t *cached_disaster_srv_one = NULL;
627 uint8_t *cached_disaster_srv_two = NULL;
628 uint8_t srv_one[DIGEST256_LEN] = {0};
629 uint8_t srv_two[DIGEST256_LEN] = {0};
630 uint8_t srv_three[DIGEST256_LEN] = {0};
631 uint8_t srv_four[DIGEST256_LEN] = {0};
632 uint8_t srv_five[DIGEST256_LEN] = {0};
634 (void) arg;
636 /* Get the cached SRVs: we gonna use them later for verification */
637 cached_disaster_srv_one = get_first_cached_disaster_srv();
638 cached_disaster_srv_two = get_second_cached_disaster_srv();
640 /* Compute some srvs */
641 get_disaster_srv(1, srv_one);
642 get_disaster_srv(2, srv_two);
644 /* Check that the cached ones were updated */
645 tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_one, DIGEST256_LEN);
646 tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
648 /* Ask for an SRV that has already been computed */
649 get_disaster_srv(2, srv_two);
650 /* and check that the cache entries have not changed */
651 tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_one, DIGEST256_LEN);
652 tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
654 /* Ask for a new SRV */
655 get_disaster_srv(3, srv_three);
656 tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_three, DIGEST256_LEN);
657 tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
659 /* Ask for another SRV: none of the original SRVs should now be cached */
660 get_disaster_srv(4, srv_four);
661 tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_three, DIGEST256_LEN);
662 tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_four, DIGEST256_LEN);
664 /* Ask for yet another SRV */
665 get_disaster_srv(5, srv_five);
666 tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_five, DIGEST256_LEN);
667 tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_four, DIGEST256_LEN);
669 done:
673 /** Test our HS descriptor request tracker by making various requests and
674 * checking whether they get tracked properly. */
675 static void
676 test_hid_serv_request_tracker(void *arg)
678 (void) arg;
679 time_t retval;
680 routerstatus_t *hsdir = NULL, *hsdir2 = NULL, *hsdir3 = NULL;
681 time_t now = approx_time();
683 const char *req_key_str_first =
684 "vd4zb6zesaubtrjvdqcr2w7x7lhw2up4Xnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
685 const char *req_key_str_second =
686 "g53o7iavcd62oihswhr24u6czmqws5kpXnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
687 const char *req_key_str_small = "ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ";
689 /*************************** basic test *******************************/
691 /* Get request tracker and make sure it's empty */
692 strmap_t *request_tracker = get_last_hid_serv_requests();
693 tt_int_op(strmap_size(request_tracker),OP_EQ, 0);
695 /* Let's register a hid serv request */
696 hsdir = tor_malloc_zero(sizeof(routerstatus_t));
697 memset(hsdir->identity_digest, 'Z', DIGEST_LEN);
698 retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
699 now, 1);
700 tt_int_op(retval, OP_EQ, now);
701 tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
703 /* Let's lookup a non-existent hidserv request */
704 retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_second,
705 now+1, 0);
706 tt_int_op(retval, OP_EQ, 0);
707 tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
709 /* Let's lookup a real hidserv request */
710 retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
711 now+2, 0);
712 tt_int_op(retval, OP_EQ, now); /* we got it */
713 tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
715 /**********************************************************************/
717 /* Let's add another request for the same HS but on a different HSDir. */
718 hsdir2 = tor_malloc_zero(sizeof(routerstatus_t));
719 memset(hsdir2->identity_digest, 2, DIGEST_LEN);
720 retval = hs_lookup_last_hid_serv_request(hsdir2, req_key_str_first,
721 now+3, 1);
722 tt_int_op(retval, OP_EQ, now+3);
723 tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
725 /* Check that we can clean the first request based on time */
726 hs_clean_last_hid_serv_requests(now+3+REND_HID_SERV_DIR_REQUERY_PERIOD);
727 tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
728 /* Check that it doesn't exist anymore */
729 retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
730 now+2, 0);
731 tt_int_op(retval, OP_EQ, 0);
733 /* Now let's add a smaller req key str */
734 hsdir3 = tor_malloc_zero(sizeof(routerstatus_t));
735 memset(hsdir3->identity_digest, 3, DIGEST_LEN);
736 retval = hs_lookup_last_hid_serv_request(hsdir3, req_key_str_small,
737 now+4, 1);
738 tt_int_op(retval, OP_EQ, now+4);
739 tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
741 /*************************** deleting entries **************************/
743 /* Add another request with very short key */
744 retval = hs_lookup_last_hid_serv_request(hsdir, "l", now, 1);
745 tt_int_op(retval, OP_EQ, now);
746 tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
748 /* Try deleting entries with a dummy key. Check that our previous requests
749 * are still there */
750 tor_capture_bugs_(1);
751 hs_purge_hid_serv_from_last_hid_serv_requests("a");
752 tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
753 tor_end_capture_bugs_();
755 /* Try another dummy key. Check that requests are still there */
757 char dummy[2000];
758 memset(dummy, 'Z', 2000);
759 dummy[1999] = '\x00';
760 hs_purge_hid_serv_from_last_hid_serv_requests(dummy);
761 tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
764 /* Another dummy key! */
765 hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_second);
766 tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
768 /* Now actually delete a request! */
769 hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_first);
770 tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
772 /* Purge it all! */
773 hs_purge_last_hid_serv_requests();
774 request_tracker = get_last_hid_serv_requests();
775 tt_int_op(strmap_size(request_tracker),OP_EQ, 0);
777 done:
778 tor_free(hsdir);
779 tor_free(hsdir2);
780 tor_free(hsdir3);
783 static void
784 test_parse_extended_hostname(void *arg)
786 (void) arg;
787 hostname_type_t type;
789 char address1[] = "fooaddress.onion";
790 char address3[] = "fooaddress.exit";
791 char address4[] = "www.torproject.org";
792 char address5[] = "foo.abcdefghijklmnop.onion";
793 char address6[] = "foo.bar.abcdefghijklmnop.onion";
794 char address7[] = ".abcdefghijklmnop.onion";
795 char address8[] =
796 "www.25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion";
797 char address9[] =
798 "www.15njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion";
799 char address10[] =
800 "15njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid7jdl.onion";
802 tt_assert(!parse_extended_hostname(address1, &type));
803 tt_int_op(type, OP_EQ, BAD_HOSTNAME);
805 tt_assert(parse_extended_hostname(address3, &type));
806 tt_int_op(type, OP_EQ, EXIT_HOSTNAME);
808 tt_assert(parse_extended_hostname(address4, &type));
809 tt_int_op(type, OP_EQ, NORMAL_HOSTNAME);
811 tt_assert(!parse_extended_hostname(address5, &type));
812 tt_int_op(type, OP_EQ, BAD_HOSTNAME);
814 tt_assert(!parse_extended_hostname(address6, &type));
815 tt_int_op(type, OP_EQ, BAD_HOSTNAME);
817 tt_assert(!parse_extended_hostname(address7, &type));
818 tt_int_op(type, OP_EQ, BAD_HOSTNAME);
820 tt_assert(parse_extended_hostname(address8, &type));
821 tt_int_op(type, OP_EQ, ONION_V3_HOSTNAME);
822 tt_str_op(address8, OP_EQ,
823 "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid");
825 /* Invalid v3 address. */
826 tt_assert(!parse_extended_hostname(address9, &type));
827 tt_int_op(type, OP_EQ, BAD_HOSTNAME);
829 /* Invalid v3 address: too long */
830 tt_assert(!parse_extended_hostname(address10, &type));
831 tt_int_op(type, OP_EQ, BAD_HOSTNAME);
833 done: ;
836 static void
837 test_time_between_tp_and_srv(void *arg)
839 int ret;
840 networkstatus_t ns;
841 (void) arg;
843 /* This function should be returning true where "^" are:
845 * +------------------------------------------------------------------+
846 * | |
847 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
848 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
849 * | |
850 * | $==========|-----------$===========|-----------$===========| |
851 * | ^^^^^^^^^^^^ ^^^^^^^^^^^^ |
852 * | |
853 * +------------------------------------------------------------------+
856 ret = parse_rfc1123_time("Sat, 26 Oct 1985 00:00:00 UTC", &ns.valid_after);
857 tt_int_op(ret, OP_EQ, 0);
858 ret = parse_rfc1123_time("Sat, 26 Oct 1985 01:00:00 UTC", &ns.fresh_until);
859 tt_int_op(ret, OP_EQ, 0);
860 dirauth_sched_recalculate_timing(get_options(), ns.valid_after);
861 ret = hs_in_period_between_tp_and_srv(&ns, 0);
862 tt_int_op(ret, OP_EQ, 0);
864 ret = parse_rfc1123_time("Sat, 26 Oct 1985 11:00:00 UTC", &ns.valid_after);
865 tt_int_op(ret, OP_EQ, 0);
866 ret = parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns.fresh_until);
867 tt_int_op(ret, OP_EQ, 0);
868 dirauth_sched_recalculate_timing(get_options(), ns.valid_after);
869 ret = hs_in_period_between_tp_and_srv(&ns, 0);
870 tt_int_op(ret, OP_EQ, 0);
872 ret = parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns.valid_after);
873 tt_int_op(ret, OP_EQ, 0);
874 ret = parse_rfc1123_time("Sat, 26 Oct 1985 13:00:00 UTC", &ns.fresh_until);
875 tt_int_op(ret, OP_EQ, 0);
876 dirauth_sched_recalculate_timing(get_options(), ns.valid_after);
877 ret = hs_in_period_between_tp_and_srv(&ns, 0);
878 tt_int_op(ret, OP_EQ, 1);
880 ret = parse_rfc1123_time("Sat, 26 Oct 1985 23:00:00 UTC", &ns.valid_after);
881 tt_int_op(ret, OP_EQ, 0);
882 ret = parse_rfc1123_time("Sat, 27 Oct 1985 00:00:00 UTC", &ns.fresh_until);
883 tt_int_op(ret, OP_EQ, 0);
884 dirauth_sched_recalculate_timing(get_options(), ns.valid_after);
885 ret = hs_in_period_between_tp_and_srv(&ns, 0);
886 tt_int_op(ret, OP_EQ, 1);
888 ret = parse_rfc1123_time("Sat, 27 Oct 1985 00:00:00 UTC", &ns.valid_after);
889 tt_int_op(ret, OP_EQ, 0);
890 ret = parse_rfc1123_time("Sat, 27 Oct 1985 01:00:00 UTC", &ns.fresh_until);
891 tt_int_op(ret, OP_EQ, 0);
892 dirauth_sched_recalculate_timing(get_options(), ns.valid_after);
893 ret = hs_in_period_between_tp_and_srv(&ns, 0);
894 tt_int_op(ret, OP_EQ, 0);
896 done:
900 /************ Reachability Test (it is huge) ****************/
902 /* Simulate different consensus for client and service. Used by the
903 * reachability test. The SRV and responsible HSDir list are used by all
904 * reachability tests so make them common to simplify setup and teardown. */
905 static networkstatus_t *mock_service_ns = NULL;
906 static networkstatus_t *mock_client_ns = NULL;
907 static sr_srv_t current_srv, previous_srv;
908 static smartlist_t *service_responsible_hsdirs = NULL;
909 static smartlist_t *client_responsible_hsdirs = NULL;
911 static networkstatus_t *
912 mock_networkstatus_get_reasonably_live_consensus_service(time_t now,
913 int flavor)
915 (void) now;
916 (void) flavor;
918 if (mock_service_ns) {
919 return mock_service_ns;
922 mock_service_ns = tor_malloc_zero(sizeof(networkstatus_t));
923 mock_service_ns->routerstatus_list = smartlist_new();
924 mock_service_ns->type = NS_TYPE_CONSENSUS;
926 return mock_service_ns;
929 static networkstatus_t *
930 mock_networkstatus_get_latest_consensus_service(void)
932 return mock_networkstatus_get_reasonably_live_consensus_service(0, 0);
935 static networkstatus_t *
936 mock_networkstatus_get_reasonably_live_consensus_client(time_t now, int flavor)
938 (void) now;
939 (void) flavor;
941 if (mock_client_ns) {
942 return mock_client_ns;
945 mock_client_ns = tor_malloc_zero(sizeof(networkstatus_t));
946 mock_client_ns->routerstatus_list = smartlist_new();
947 mock_client_ns->type = NS_TYPE_CONSENSUS;
949 return mock_client_ns;
952 static networkstatus_t *
953 mock_networkstatus_get_latest_consensus_client(void)
955 return mock_networkstatus_get_reasonably_live_consensus_client(0, 0);
958 /* Mock function because we are not trying to test the close circuit that does
959 * an awful lot of checks on the circuit object. */
960 static void
961 mock_circuit_mark_for_close(circuit_t *circ, int reason, int line,
962 const char *file)
964 (void) circ;
965 (void) reason;
966 (void) line;
967 (void) file;
968 return;
971 /* Initialize a big HSDir V3 hash ring. */
972 static void
973 helper_initialize_big_hash_ring(networkstatus_t *ns)
975 int ret;
977 /* Generate 250 hsdirs! :) */
978 for (int counter = 1 ; counter < 251 ; counter++) {
979 /* Let's generate random nickname for each hsdir... */
980 char nickname_binary[8];
981 char nickname_str[13] = {0};
982 crypto_rand(nickname_binary, sizeof(nickname_binary));
983 ret = base64_encode(nickname_str, sizeof(nickname_str),
984 nickname_binary, sizeof(nickname_binary), 0);
985 tt_int_op(ret, OP_EQ, 12);
986 helper_add_hsdir_to_networkstatus(ns, counter, nickname_str, 1);
989 /* Make sure we have 200 hsdirs in our list */
990 tt_int_op(smartlist_len(ns->routerstatus_list), OP_EQ, 250);
992 done:
996 /** Initialize service and publish its descriptor as needed. Return the newly
997 * allocated service object to the caller. */
998 static hs_service_t *
999 helper_init_service(time_t now)
1001 int retval;
1002 hs_service_t *service = hs_service_new(get_options());
1003 tt_assert(service);
1004 service->config.version = HS_VERSION_THREE;
1005 ed25519_secret_key_generate(&service->keys.identity_sk, 0);
1006 ed25519_public_key_generate(&service->keys.identity_pk,
1007 &service->keys.identity_sk);
1008 /* Register service to global map. */
1009 retval = register_service(get_hs_service_map(), service);
1010 tt_int_op(retval, OP_EQ, 0);
1012 /* Initialize service descriptor */
1013 build_all_descriptors(now);
1014 tt_assert(service->desc_current);
1015 tt_assert(service->desc_next);
1017 done:
1018 return service;
1021 /* Helper function to set the RFC 1123 time string into t. */
1022 static void
1023 set_consensus_times(const char *timestr, time_t *t)
1025 tt_assert(timestr);
1026 tt_assert(t);
1028 int ret = parse_rfc1123_time(timestr, t);
1029 tt_int_op(ret, OP_EQ, 0);
1031 done:
1032 return;
1035 /* Helper function to cleanup the mock consensus (client and service) */
1036 static void
1037 cleanup_mock_ns(void)
1039 if (mock_service_ns) {
1040 SMARTLIST_FOREACH(mock_service_ns->routerstatus_list,
1041 routerstatus_t *, rs, routerstatus_free(rs));
1042 smartlist_clear(mock_service_ns->routerstatus_list);
1043 mock_service_ns->sr_info.current_srv = NULL;
1044 mock_service_ns->sr_info.previous_srv = NULL;
1045 networkstatus_vote_free(mock_service_ns);
1046 mock_service_ns = NULL;
1049 if (mock_client_ns) {
1050 SMARTLIST_FOREACH(mock_client_ns->routerstatus_list,
1051 routerstatus_t *, rs, routerstatus_free(rs));
1052 smartlist_clear(mock_client_ns->routerstatus_list);
1053 mock_client_ns->sr_info.current_srv = NULL;
1054 mock_client_ns->sr_info.previous_srv = NULL;
1055 networkstatus_vote_free(mock_client_ns);
1056 mock_client_ns = NULL;
1060 /* Helper function to setup a reachability test. Once called, the
1061 * cleanup_reachability_test MUST be called at the end. */
1062 static void
1063 setup_reachability_test(void)
1065 MOCK(circuit_mark_for_close_, mock_circuit_mark_for_close);
1066 MOCK(get_or_state, get_or_state_replacement);
1068 hs_init();
1070 /* Baseline to start with. */
1071 memset(&current_srv, 0, sizeof(current_srv));
1072 memset(&previous_srv, 1, sizeof(previous_srv));
1074 /* Initialize the consensuses. */
1075 mock_networkstatus_get_latest_consensus_service();
1076 mock_networkstatus_get_latest_consensus_client();
1078 service_responsible_hsdirs = smartlist_new();
1079 client_responsible_hsdirs = smartlist_new();
1082 /* Helper function to cleanup a reachability test initial setup. */
1083 static void
1084 cleanup_reachability_test(void)
1086 smartlist_free(service_responsible_hsdirs);
1087 service_responsible_hsdirs = NULL;
1088 smartlist_free(client_responsible_hsdirs);
1089 client_responsible_hsdirs = NULL;
1090 hs_free_all();
1091 cleanup_mock_ns();
1092 UNMOCK(get_or_state);
1093 UNMOCK(circuit_mark_for_close_);
1096 /* A reachability test always check if the resulting service and client
1097 * responsible HSDir for the given parameters are equal.
1099 * Return true iff the same exact nodes are in both list. */
1100 static int
1101 are_responsible_hsdirs_equal(void)
1103 int count = 0;
1104 tt_int_op(smartlist_len(client_responsible_hsdirs), OP_EQ, 6);
1105 tt_int_op(smartlist_len(service_responsible_hsdirs), OP_EQ, 8);
1107 SMARTLIST_FOREACH_BEGIN(client_responsible_hsdirs,
1108 const routerstatus_t *, c_rs) {
1109 SMARTLIST_FOREACH_BEGIN(service_responsible_hsdirs,
1110 const routerstatus_t *, s_rs) {
1111 if (tor_memeq(c_rs->identity_digest, s_rs->identity_digest,
1112 DIGEST_LEN)) {
1113 count++;
1114 break;
1116 } SMARTLIST_FOREACH_END(s_rs);
1117 } SMARTLIST_FOREACH_END(c_rs);
1119 done:
1120 return (count == 6);
1123 /* Tor doesn't use such a function to get the previous HSDir, it is only used
1124 * in node_set_hsdir_index(). We need it here so we can test the reachability
1125 * scenario 6 that requires the previous time period to compute the list of
1126 * responsible HSDir because of the client state timing. */
1127 static uint64_t
1128 get_previous_time_period(time_t now)
1130 return hs_get_time_period_num(now) - 1;
1133 /* Configuration of a reachability test scenario. */
1134 typedef struct reachability_cfg_t {
1135 /* Consensus timings to be set. They have to be compliant with
1136 * RFC 1123 time format. */
1137 const char *service_valid_after;
1138 const char *service_valid_until;
1139 const char *client_valid_after;
1140 const char *client_valid_until;
1142 /* SRVs that the service and client should use. */
1143 sr_srv_t *service_current_srv;
1144 sr_srv_t *service_previous_srv;
1145 sr_srv_t *client_current_srv;
1146 sr_srv_t *client_previous_srv;
1148 /* A time period function for the service to use for this scenario. For a
1149 * successful reachability test, the client always use the current time
1150 * period thus why no client function. */
1151 uint64_t (*service_time_period_fn)(time_t);
1153 /* Is the client and service expected to be in a new time period. After
1154 * setting the consensus time, the reachability test checks
1155 * hs_in_period_between_tp_and_srv() and test the returned value against
1156 * this. */
1157 unsigned int service_in_new_tp;
1158 unsigned int client_in_new_tp;
1160 /* Some scenario requires a hint that the client, because of its consensus
1161 * time, will request the "next" service descriptor so this indicates if it
1162 * is the case or not. */
1163 unsigned int client_fetch_next_desc;
1164 } reachability_cfg_t;
1166 /* Some defines to help with semantic while reading a configuration below. */
1167 #define NOT_IN_NEW_TP 0
1168 #define IN_NEW_TP 1
1169 #define DONT_NEED_NEXT_DESC 0
1170 #define NEED_NEXT_DESC 1
1172 static reachability_cfg_t reachability_scenarios[] = {
1173 /* Scenario 1
1175 * +------------------------------------------------------------------+
1176 * | |
1177 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1178 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1179 * | |
1180 * | $==========|-----------$===========|-----------$===========| |
1181 * | ^ ^ |
1182 * | S C |
1183 * +------------------------------------------------------------------+
1185 * S: Service, C: Client
1187 * Service consensus valid_after time is set to 13:00 and client to 15:00,
1188 * both are after TP#1 thus have access to SRV#1. Service and client should
1189 * be using TP#1.
1192 { "Sat, 26 Oct 1985 13:00:00 UTC", /* Service valid_after */
1193 "Sat, 26 Oct 1985 14:00:00 UTC", /* Service valid_until */
1194 "Sat, 26 Oct 1985 15:00:00 UTC", /* Client valid_after */
1195 "Sat, 26 Oct 1985 16:00:00 UTC", /* Client valid_until. */
1196 &current_srv, NULL, /* Service current and previous SRV */
1197 &current_srv, NULL, /* Client current and previous SRV */
1198 hs_get_time_period_num, /* Service time period function. */
1199 IN_NEW_TP, /* Is service in new TP? */
1200 IN_NEW_TP, /* Is client in new TP? */
1201 NEED_NEXT_DESC },
1203 /* Scenario 2
1205 * +------------------------------------------------------------------+
1206 * | |
1207 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1208 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1209 * | |
1210 * | $==========|-----------$===========|-----------$===========| |
1211 * | ^ ^ |
1212 * | S C |
1213 * +------------------------------------------------------------------+
1215 * S: Service, C: Client
1217 * Service consensus valid_after time is set to 23:00 and client to 01:00,
1218 * which makes the client after the SRV#2 and the service just before. The
1219 * service should only be using TP#1. The client should be using TP#1.
1222 { "Sat, 26 Oct 1985 23:00:00 UTC", /* Service valid_after */
1223 "Sat, 27 Oct 1985 00:00:00 UTC", /* Service valid_until */
1224 "Sat, 27 Oct 1985 01:00:00 UTC", /* Client valid_after */
1225 "Sat, 27 Oct 1985 02:00:00 UTC", /* Client valid_until. */
1226 &previous_srv, NULL, /* Service current and previous SRV */
1227 &current_srv, &previous_srv, /* Client current and previous SRV */
1228 hs_get_time_period_num, /* Service time period function. */
1229 IN_NEW_TP, /* Is service in new TP? */
1230 NOT_IN_NEW_TP, /* Is client in new TP? */
1231 NEED_NEXT_DESC },
1233 /* Scenario 3
1235 * +------------------------------------------------------------------+
1236 * | |
1237 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1238 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1239 * | |
1240 * | $==========|-----------$===========|----------$===========| |
1241 * | ^ ^ |
1242 * | S C |
1243 * +------------------------------------------------------------------+
1245 * S: Service, C: Client
1247 * Service consensus valid_after time is set to 03:00 and client to 05:00,
1248 * which makes both after SRV#2. The service should be using TP#1 as its
1249 * current time period. The client should be using TP#1.
1252 { "Sat, 27 Oct 1985 03:00:00 UTC", /* Service valid_after */
1253 "Sat, 27 Oct 1985 04:00:00 UTC", /* Service valid_until */
1254 "Sat, 27 Oct 1985 05:00:00 UTC", /* Client valid_after */
1255 "Sat, 27 Oct 1985 06:00:00 UTC", /* Client valid_until. */
1256 &current_srv, &previous_srv, /* Service current and previous SRV */
1257 &current_srv, &previous_srv, /* Client current and previous SRV */
1258 hs_get_time_period_num, /* Service time period function. */
1259 NOT_IN_NEW_TP, /* Is service in new TP? */
1260 NOT_IN_NEW_TP, /* Is client in new TP? */
1261 DONT_NEED_NEXT_DESC },
1263 /* Scenario 4
1265 * +------------------------------------------------------------------+
1266 * | |
1267 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1268 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1269 * | |
1270 * | $==========|-----------$===========|-----------$===========| |
1271 * | ^ ^ |
1272 * | S C |
1273 * +------------------------------------------------------------------+
1275 * S: Service, C: Client
1277 * Service consensus valid_after time is set to 11:00 and client to 13:00,
1278 * which makes the service before TP#2 and the client just after. The
1279 * service should be using TP#1 as its current time period and TP#2 as the
1280 * next. The client should be using TP#2 time period.
1283 { "Sat, 27 Oct 1985 11:00:00 UTC", /* Service valid_after */
1284 "Sat, 27 Oct 1985 12:00:00 UTC", /* Service valid_until */
1285 "Sat, 27 Oct 1985 13:00:00 UTC", /* Client valid_after */
1286 "Sat, 27 Oct 1985 14:00:00 UTC", /* Client valid_until. */
1287 &current_srv, &previous_srv, /* Service current and previous SRV */
1288 &current_srv, &previous_srv, /* Client current and previous SRV */
1289 hs_get_next_time_period_num, /* Service time period function. */
1290 NOT_IN_NEW_TP, /* Is service in new TP? */
1291 IN_NEW_TP, /* Is client in new TP? */
1292 NEED_NEXT_DESC },
1294 /* Scenario 5
1296 * +------------------------------------------------------------------+
1297 * | |
1298 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1299 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1300 * | |
1301 * | $==========|-----------$===========|-----------$===========| |
1302 * | ^ ^ |
1303 * | C S |
1304 * +------------------------------------------------------------------+
1306 * S: Service, C: Client
1308 * Service consensus valid_after time is set to 01:00 and client to 23:00,
1309 * which makes the service after SRV#2 and the client just before. The
1310 * service should be using TP#1 as its current time period and TP#2 as the
1311 * next. The client should be using TP#1 time period.
1314 { "Sat, 27 Oct 1985 01:00:00 UTC", /* Service valid_after */
1315 "Sat, 27 Oct 1985 02:00:00 UTC", /* Service valid_until */
1316 "Sat, 26 Oct 1985 23:00:00 UTC", /* Client valid_after */
1317 "Sat, 27 Oct 1985 00:00:00 UTC", /* Client valid_until. */
1318 &current_srv, &previous_srv, /* Service current and previous SRV */
1319 &previous_srv, NULL, /* Client current and previous SRV */
1320 hs_get_time_period_num, /* Service time period function. */
1321 NOT_IN_NEW_TP, /* Is service in new TP? */
1322 IN_NEW_TP, /* Is client in new TP? */
1323 DONT_NEED_NEXT_DESC },
1325 /* Scenario 6
1327 * +------------------------------------------------------------------+
1328 * | |
1329 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1330 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1331 * | |
1332 * | $==========|-----------$===========|-----------$===========| |
1333 * | ^ ^ |
1334 * | C S |
1335 * +------------------------------------------------------------------+
1337 * S: Service, C: Client
1339 * Service consensus valid_after time is set to 13:00 and client to 11:00,
1340 * which makes the service outside after TP#2 and the client just before.
1341 * The service should be using TP#1 as its current time period and TP#2 as
1342 * its next. The client should be using TP#1 time period.
1345 { "Sat, 27 Oct 1985 13:00:00 UTC", /* Service valid_after */
1346 "Sat, 27 Oct 1985 14:00:00 UTC", /* Service valid_until */
1347 "Sat, 27 Oct 1985 11:00:00 UTC", /* Client valid_after */
1348 "Sat, 27 Oct 1985 12:00:00 UTC", /* Client valid_until. */
1349 &current_srv, &previous_srv, /* Service current and previous SRV */
1350 &current_srv, &previous_srv, /* Client current and previous SRV */
1351 get_previous_time_period, /* Service time period function. */
1352 IN_NEW_TP, /* Is service in new TP? */
1353 NOT_IN_NEW_TP, /* Is client in new TP? */
1354 DONT_NEED_NEXT_DESC },
1356 /* End marker. */
1357 { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0}
1360 /* Run a single reachability scenario. num_scenario is the corresponding
1361 * scenario number from the documentation. It is used to log it in case of
1362 * failure so we know which scenario fails. */
1363 static int
1364 run_reachability_scenario(const reachability_cfg_t *cfg, int num_scenario)
1366 int ret = -1;
1367 hs_service_t *service;
1368 uint64_t service_tp, client_tp;
1369 ed25519_public_key_t service_blinded_pk, client_blinded_pk;
1371 setup_reachability_test();
1373 tt_assert(cfg);
1375 /* Set service consensus time. */
1376 set_consensus_times(cfg->service_valid_after,
1377 &mock_service_ns->valid_after);
1378 set_consensus_times(cfg->service_valid_until,
1379 &mock_service_ns->valid_until);
1380 set_consensus_times(cfg->service_valid_until,
1381 &mock_service_ns->fresh_until);
1382 dirauth_sched_recalculate_timing(get_options(),
1383 mock_service_ns->valid_after);
1384 /* Check that service is in the right time period point */
1385 tt_int_op(hs_in_period_between_tp_and_srv(mock_service_ns, 0), OP_EQ,
1386 cfg->service_in_new_tp);
1388 /* Set client consensus time. */
1389 set_consensus_times(cfg->client_valid_after,
1390 &mock_client_ns->valid_after);
1391 set_consensus_times(cfg->client_valid_until,
1392 &mock_client_ns->valid_until);
1393 set_consensus_times(cfg->client_valid_until,
1394 &mock_client_ns->fresh_until);
1395 dirauth_sched_recalculate_timing(get_options(),
1396 mock_client_ns->valid_after);
1397 /* Check that client is in the right time period point */
1398 tt_int_op(hs_in_period_between_tp_and_srv(mock_client_ns, 0), OP_EQ,
1399 cfg->client_in_new_tp);
1401 /* Set the SRVs for this scenario. */
1402 mock_client_ns->sr_info.current_srv = cfg->client_current_srv;
1403 mock_client_ns->sr_info.previous_srv = cfg->client_previous_srv;
1404 mock_service_ns->sr_info.current_srv = cfg->service_current_srv;
1405 mock_service_ns->sr_info.previous_srv = cfg->service_previous_srv;
1407 /* Initialize a service to get keys. */
1408 update_approx_time(mock_service_ns->valid_after);
1409 service = helper_init_service(mock_service_ns->valid_after+1);
1412 * === Client setup ===
1415 MOCK(networkstatus_get_reasonably_live_consensus,
1416 mock_networkstatus_get_reasonably_live_consensus_client);
1417 MOCK(networkstatus_get_latest_consensus,
1418 mock_networkstatus_get_latest_consensus_client);
1420 /* Make networkstatus_is_live() happy. */
1421 update_approx_time(mock_client_ns->valid_after);
1422 /* Initialize a big hashring for this consensus with the hsdir index set. */
1423 helper_initialize_big_hash_ring(mock_client_ns);
1425 /* Client ONLY use the current time period. This is the whole point of these
1426 * reachability test that is to make sure the client can always reach the
1427 * service using only its current time period. */
1428 client_tp = hs_get_time_period_num(0);
1430 hs_build_blinded_pubkey(&service->keys.identity_pk, NULL, 0,
1431 client_tp, &client_blinded_pk);
1432 hs_get_responsible_hsdirs(&client_blinded_pk, client_tp, 0, 1,
1433 client_responsible_hsdirs);
1434 /* Cleanup the nodelist so we can let the service computes its own set of
1435 * node with its own hashring. */
1436 cleanup_nodelist();
1437 tt_int_op(smartlist_len(client_responsible_hsdirs), OP_EQ, 6);
1439 UNMOCK(networkstatus_get_latest_consensus);
1440 UNMOCK(networkstatus_get_reasonably_live_consensus);
1443 * === Service setup ===
1446 MOCK(networkstatus_get_reasonably_live_consensus,
1447 mock_networkstatus_get_reasonably_live_consensus_service);
1448 MOCK(networkstatus_get_latest_consensus,
1449 mock_networkstatus_get_latest_consensus_service);
1451 /* Make networkstatus_is_live() happy. */
1452 update_approx_time(mock_service_ns->valid_after);
1453 /* Initialize a big hashring for this consensus with the hsdir index set. */
1454 helper_initialize_big_hash_ring(mock_service_ns);
1456 service_tp = cfg->service_time_period_fn(0);
1458 hs_build_blinded_pubkey(&service->keys.identity_pk, NULL, 0,
1459 service_tp, &service_blinded_pk);
1461 /* A service builds two lists of responsible HSDir, for the current and the
1462 * next descriptor. Depending on the scenario, the client timing indicate if
1463 * it is fetching the current or the next descriptor so we use the
1464 * "client_fetch_next_desc" to know which one the client is trying to get to
1465 * confirm that the service computes the same hashring for the same blinded
1466 * key and service time period function. */
1467 hs_get_responsible_hsdirs(&service_blinded_pk, service_tp,
1468 cfg->client_fetch_next_desc, 0,
1469 service_responsible_hsdirs);
1470 cleanup_nodelist();
1471 tt_int_op(smartlist_len(service_responsible_hsdirs), OP_EQ, 8);
1473 UNMOCK(networkstatus_get_latest_consensus);
1474 UNMOCK(networkstatus_get_reasonably_live_consensus);
1476 /* Some testing of the values we just got from the client and service. */
1477 tt_mem_op(&client_blinded_pk, OP_EQ, &service_blinded_pk,
1478 ED25519_PUBKEY_LEN);
1479 tt_int_op(are_responsible_hsdirs_equal(), OP_EQ, 1);
1481 /* Everything went well. */
1482 ret = 0;
1484 done:
1485 cleanup_reachability_test();
1486 if (ret == -1) {
1487 /* Do this so we can know which scenario failed. */
1488 char msg[32];
1489 tor_snprintf(msg, sizeof(msg), "Scenario %d failed", num_scenario);
1490 tt_fail_msg(msg);
1492 return ret;
1495 static void
1496 test_reachability(void *arg)
1498 (void) arg;
1500 /* NOTE: An important axiom to understand here is that SRV#N must only be
1501 * used with TP#N value. For example, SRV#2 with TP#1 should NEVER be used
1502 * together. The HSDir index computation is based on this axiom.*/
1504 for (int i = 0; reachability_scenarios[i].service_valid_after; ++i) {
1505 int ret = run_reachability_scenario(&reachability_scenarios[i], i + 1);
1506 if (ret < 0) {
1507 return;
1512 /** Pick an HSDir for service with <b>onion_identity_pk</b> as a client. Put
1513 * its identity digest in <b>hsdir_digest_out</b>. */
1514 static void
1515 helper_client_pick_hsdir(const ed25519_public_key_t *onion_identity_pk,
1516 char *hsdir_digest_out)
1518 tt_assert(onion_identity_pk);
1520 routerstatus_t *client_hsdir = pick_hsdir_v3(onion_identity_pk);
1521 tt_assert(client_hsdir);
1522 digest_to_base64(hsdir_digest_out, client_hsdir->identity_digest);
1524 done:
1528 static void
1529 test_hs_indexes(void *arg)
1531 int ret;
1532 uint64_t period_num = 42;
1533 ed25519_public_key_t pubkey;
1535 (void) arg;
1537 /* Build the hs_index */
1539 uint8_t hs_index[DIGEST256_LEN];
1540 const char *b32_test_vector =
1541 "37e5cbbd56a22823714f18f1623ece5983a0d64c78495a8cfab854245e5f9a8a";
1542 char test_vector[DIGEST256_LEN];
1543 ret = base16_decode(test_vector, sizeof(test_vector), b32_test_vector,
1544 strlen(b32_test_vector));
1545 tt_int_op(ret, OP_EQ, sizeof(test_vector));
1546 /* Our test vector uses a public key set to 32 bytes of \x42. */
1547 memset(&pubkey, '\x42', sizeof(pubkey));
1548 hs_build_hs_index(1, &pubkey, period_num, hs_index);
1549 tt_mem_op(hs_index, OP_EQ, test_vector, sizeof(hs_index));
1552 /* Build the hsdir_index */
1554 uint8_t srv[DIGEST256_LEN];
1555 uint8_t hsdir_index[DIGEST256_LEN];
1556 const char *b32_test_vector =
1557 "db475361014a09965e7e5e4d4a25b8f8d4b8f16cb1d8a7e95eed50249cc1a2d5";
1558 char test_vector[DIGEST256_LEN];
1559 ret = base16_decode(test_vector, sizeof(test_vector), b32_test_vector,
1560 strlen(b32_test_vector));
1561 tt_int_op(ret, OP_EQ, sizeof(test_vector));
1562 /* Our test vector uses a public key set to 32 bytes of \x42. */
1563 memset(&pubkey, '\x42', sizeof(pubkey));
1564 memset(srv, '\x43', sizeof(srv));
1565 hs_build_hsdir_index(&pubkey, srv, period_num, hsdir_index);
1566 tt_mem_op(hsdir_index, OP_EQ, test_vector, sizeof(hsdir_index));
1569 done:
1573 #define EARLY_IN_SRV_TO_TP 0
1574 #define LATE_IN_SRV_TO_TP 1
1575 #define EARLY_IN_TP_TO_SRV 2
1576 #define LATE_IN_TP_TO_SRV 3
1578 /** Set the consensus and system time based on <b>position</b>. See the
1579 * following diagram for details:
1581 * +------------------------------------------------------------------+
1582 * | |
1583 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1584 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1585 * | |
1586 * | $==========|-----------$===========|----------$===========| |
1587 * | |
1588 * | |
1589 * +------------------------------------------------------------------+
1591 static time_t
1592 helper_set_consensus_and_system_time(networkstatus_t *ns, int position)
1594 time_t real_time = 0;
1596 /* The period between SRV#N and TP#N is from 00:00 to 12:00 UTC. Consensus
1597 * valid_after is what matters here, the rest is just to specify the voting
1598 * period correctly. */
1599 if (position == LATE_IN_SRV_TO_TP) {
1600 parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC", &ns->valid_after);
1601 parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC", &ns->fresh_until);
1602 parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns->valid_until);
1603 } else if (position == EARLY_IN_TP_TO_SRV) {
1604 parse_rfc1123_time("Wed, 13 Apr 2016 13:00:00 UTC", &ns->valid_after);
1605 parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns->fresh_until);
1606 parse_rfc1123_time("Wed, 13 Apr 2016 16:00:00 UTC", &ns->valid_until);
1607 } else if (position == LATE_IN_TP_TO_SRV) {
1608 parse_rfc1123_time("Wed, 13 Apr 2016 23:00:00 UTC", &ns->valid_after);
1609 parse_rfc1123_time("Wed, 14 Apr 2016 00:00:00 UTC", &ns->fresh_until);
1610 parse_rfc1123_time("Wed, 14 Apr 2016 02:00:00 UTC", &ns->valid_until);
1611 } else if (position == EARLY_IN_SRV_TO_TP) {
1612 parse_rfc1123_time("Wed, 14 Apr 2016 01:00:00 UTC", &ns->valid_after);
1613 parse_rfc1123_time("Wed, 14 Apr 2016 02:00:00 UTC", &ns->fresh_until);
1614 parse_rfc1123_time("Wed, 14 Apr 2016 04:00:00 UTC", &ns->valid_until);
1615 } else {
1616 tt_assert(0);
1618 dirauth_sched_recalculate_timing(get_options(), ns->valid_after);
1620 /* Set system time: pretend to be just 2 minutes before consensus expiry */
1621 real_time = ns->valid_until - 120;
1622 update_approx_time(real_time);
1624 done:
1625 return real_time;
1628 /** Helper function that carries out the actual test for
1629 * test_client_service_sync() */
1630 static void
1631 helper_test_hsdir_sync(networkstatus_t *ns,
1632 int service_position, int client_position,
1633 int client_fetches_next_desc)
1635 hs_service_descriptor_t *desc;
1636 int retval;
1638 /** Test logic:
1639 * 1) Initialize service time: consensus and system time.
1640 * 1.1) Initialize service hash ring
1641 * 2) Initialize service and publish descriptors.
1642 * 3) Initialize client time: consensus and system time.
1643 * 3.1) Initialize client hash ring
1644 * 4) Try to fetch descriptor as client, and CHECK that the HSDir picked by
1645 * the client was also picked by service.
1648 /* 1) Initialize service time: consensus and real time */
1649 time_t now = helper_set_consensus_and_system_time(ns, service_position);
1650 helper_initialize_big_hash_ring(ns);
1652 /* 2) Initialize service */
1653 hs_service_t *service = helper_init_service(now);
1654 desc = client_fetches_next_desc ? service->desc_next : service->desc_current;
1656 /* Now let's upload our desc to all hsdirs */
1657 upload_descriptor_to_all(service, desc);
1658 /* Cleanup right now so we don't memleak on error. */
1659 cleanup_nodelist();
1660 /* Check that previous hsdirs were populated */
1661 tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 8);
1663 /* 3) Initialize client time */
1664 helper_set_consensus_and_system_time(ns, client_position);
1666 cleanup_nodelist();
1667 SMARTLIST_FOREACH(ns->routerstatus_list,
1668 routerstatus_t *, rs, routerstatus_free(rs));
1669 smartlist_clear(ns->routerstatus_list);
1670 helper_initialize_big_hash_ring(ns);
1672 /* 4) Pick 6 HSDirs as a client and check that they were also chosen by the
1673 service. */
1674 for (int y = 0 ; y < 6 ; y++) {
1675 char client_hsdir_b64_digest[BASE64_DIGEST_LEN+1] = {0};
1676 helper_client_pick_hsdir(&service->keys.identity_pk,
1677 client_hsdir_b64_digest);
1679 /* CHECK: Go through the hsdirs chosen by the service and make sure that it
1680 * contains the one picked by the client! */
1681 retval = smartlist_contains_string(desc->previous_hsdirs,
1682 client_hsdir_b64_digest);
1683 tt_int_op(retval, OP_EQ, 1);
1686 /* Finally, try to pick a 7th hsdir and see that NULL is returned since we
1687 * exhausted all of them: */
1688 tt_assert(!pick_hsdir_v3(&service->keys.identity_pk));
1690 done:
1691 /* At the end: free all services and initialize the subsystem again, we will
1692 * need it for next scenario. */
1693 cleanup_nodelist();
1694 hs_service_free_all();
1695 hs_service_init();
1696 SMARTLIST_FOREACH(ns->routerstatus_list,
1697 routerstatus_t *, rs, routerstatus_free(rs));
1698 smartlist_clear(ns->routerstatus_list);
1701 /** This test ensures that client and service will pick the same HSDirs, under
1702 * various timing scenarios:
1703 * a) Scenario where both client and service are in the time segment between
1704 * SRV#N and TP#N:
1705 * b) Scenario where both client and service are in the time segment between
1706 * TP#N and SRV#N+1.
1707 * c) Scenario where service is between SRV#N and TP#N, but client is between
1708 * TP#N and SRV#N+1.
1709 * d) Scenario where service is between TP#N and SRV#N+1, but client is
1710 * between SRV#N and TP#N.
1712 * This test is important because it tests that upload_descriptor_to_all() is
1713 * in synch with pick_hsdir_v3(). That's not the case for the
1714 * test_reachability() test which only compares the responsible hsdir sets.
1716 static void
1717 test_client_service_hsdir_set_sync(void *arg)
1719 networkstatus_t *ns = NULL;
1721 (void) arg;
1723 MOCK(networkstatus_get_latest_consensus,
1724 mock_networkstatus_get_latest_consensus);
1725 MOCK(networkstatus_get_reasonably_live_consensus,
1726 mock_networkstatus_get_reasonably_live_consensus);
1727 MOCK(get_or_state,
1728 get_or_state_replacement);
1729 MOCK(hs_desc_encode_descriptor,
1730 mock_hs_desc_encode_descriptor);
1731 MOCK(directory_initiate_request,
1732 mock_directory_initiate_request);
1734 hs_init();
1736 /* Initialize a big hash ring: we want it to be big so that client and
1737 * service cannot accidentally select the same HSDirs */
1738 ns = networkstatus_get_latest_consensus();
1739 tt_assert(ns);
1741 /** Now test the various synch scenarios. See the helper function for more
1742 details: */
1744 /* a) Scenario where both client and service are in the time segment between
1745 * SRV#N and TP#N. At this time the client fetches the first HS desc:
1747 * +------------------------------------------------------------------+
1748 * | |
1749 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1750 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1751 * | |
1752 * | $==========|-----------$===========|----------$===========| |
1753 * | ^ ^ |
1754 * | S C |
1755 * +------------------------------------------------------------------+
1757 helper_test_hsdir_sync(ns, LATE_IN_SRV_TO_TP, LATE_IN_SRV_TO_TP, 0);
1759 /* b) Scenario where both client and service are in the time segment between
1760 * TP#N and SRV#N+1. At this time the client fetches the second HS
1761 * desc:
1763 * +------------------------------------------------------------------+
1764 * | |
1765 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1766 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1767 * | |
1768 * | $==========|-----------$===========|-----------$===========| |
1769 * | ^ ^ |
1770 * | S C |
1771 * +------------------------------------------------------------------+
1773 helper_test_hsdir_sync(ns, LATE_IN_TP_TO_SRV, LATE_IN_TP_TO_SRV, 1);
1775 /* c) Scenario where service is between SRV#N and TP#N, but client is
1776 * between TP#N and SRV#N+1. Client is forward in time so it fetches the
1777 * second HS desc.
1779 * +------------------------------------------------------------------+
1780 * | |
1781 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1782 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1783 * | |
1784 * | $==========|-----------$===========|-----------$===========| |
1785 * | ^ ^ |
1786 * | S C |
1787 * +------------------------------------------------------------------+
1789 helper_test_hsdir_sync(ns, LATE_IN_SRV_TO_TP, EARLY_IN_TP_TO_SRV, 1);
1791 /* d) Scenario where service is between TP#N and SRV#N+1, but client is
1792 * between SRV#N and TP#N. Client is backwards in time so it fetches the
1793 * first HS desc.
1795 * +------------------------------------------------------------------+
1796 * | |
1797 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1798 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1799 * | |
1800 * | $==========|-----------$===========|-----------$===========| |
1801 * | ^ ^ |
1802 * | C S |
1803 * +------------------------------------------------------------------+
1805 helper_test_hsdir_sync(ns, EARLY_IN_TP_TO_SRV, LATE_IN_SRV_TO_TP, 0);
1807 /* e) Scenario where service is between SRV#N and TP#N, but client is
1808 * between TP#N-1 and SRV#3. Client is backwards in time so it fetches
1809 * the first HS desc.
1811 * +------------------------------------------------------------------+
1812 * | |
1813 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1814 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1815 * | |
1816 * | $==========|-----------$===========|-----------$===========| |
1817 * | ^ ^ |
1818 * | C S |
1819 * +------------------------------------------------------------------+
1821 helper_test_hsdir_sync(ns, EARLY_IN_SRV_TO_TP, LATE_IN_TP_TO_SRV, 0);
1823 /* f) Scenario where service is between TP#N and SRV#N+1, but client is
1824 * between SRV#N+1 and TP#N+1. Client is forward in time so it fetches
1825 * the second HS desc.
1827 * +------------------------------------------------------------------+
1828 * | |
1829 * | 00:00 12:00 00:00 12:00 00:00 12:00 |
1830 * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
1831 * | |
1832 * | $==========|-----------$===========|-----------$===========| |
1833 * | ^ ^ |
1834 * | S C |
1835 * +------------------------------------------------------------------+
1837 helper_test_hsdir_sync(ns, LATE_IN_TP_TO_SRV, EARLY_IN_SRV_TO_TP, 1);
1839 done:
1840 networkstatus_vote_free(ns);
1841 nodelist_free_all();
1842 hs_free_all();
1845 struct testcase_t hs_common_tests[] = {
1846 { "build_address", test_build_address, TT_FORK,
1847 NULL, NULL },
1848 { "validate_address", test_validate_address, TT_FORK,
1849 NULL, NULL },
1850 { "time_period", test_time_period, TT_FORK,
1851 NULL, NULL },
1852 { "start_time_of_next_time_period", test_start_time_of_next_time_period,
1853 TT_FORK, NULL, NULL },
1854 { "responsible_hsdirs", test_responsible_hsdirs, TT_FORK,
1855 NULL, NULL },
1856 { "desc_reupload_logic", test_desc_reupload_logic, TT_FORK,
1857 NULL, NULL },
1858 { "disaster_srv", test_disaster_srv, TT_FORK,
1859 NULL, NULL },
1860 { "hid_serv_request_tracker", test_hid_serv_request_tracker, TT_FORK,
1861 NULL, NULL },
1862 { "parse_extended_hostname", test_parse_extended_hostname, TT_FORK,
1863 NULL, NULL },
1864 { "time_between_tp_and_srv", test_time_between_tp_and_srv, TT_FORK,
1865 NULL, NULL },
1866 { "reachability", test_reachability, TT_FORK,
1867 NULL, NULL },
1868 { "client_service_hsdir_set_sync", test_client_service_hsdir_set_sync,
1869 TT_FORK, NULL, NULL },
1870 { "hs_indexes", test_hs_indexes, TT_FORK,
1871 NULL, NULL },
1873 END_OF_TESTCASES