2 * OpenVPN -- An application to securely tunnel IP networks
3 * over a single TCP/UDP port, with support for SSL/TLS-based
4 * session authentication and key exchange,
5 * packet encryption, packet authentication, and
8 * Copyright (C) 2002-2009 OpenVPN Technologies, Inc. <sales@openvpn.net>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program (see the file COPYING included with this
21 * distribution); if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 static struct status z
;
52 schedule_entry_debug_info (const char *caller
, const struct schedule_entry
*e
)
54 struct gc_arena gc
= gc_new ();
57 dmsg (D_SCHEDULER
, "SCHEDULE: %s wakeup=[%s] pri=%u",
59 tv_string_abs (&e
->tv
, &gc
),
64 dmsg (D_SCHEDULER
, "SCHEDULE: %s NULL",
72 schedule_set_pri (struct schedule_entry
*e
)
79 /* This is the master key comparison routine. A key is
80 * simply a struct timeval containing the absolute time for
81 * an event. The unique treap priority (pri) is used to ensure
82 * that keys do not collide.
85 schedule_entry_compare (const struct schedule_entry
*e1
,
86 const struct schedule_entry
*e2
)
88 if (e1
->tv
.tv_sec
< e2
->tv
.tv_sec
)
90 else if (e1
->tv
.tv_sec
> e2
->tv
.tv_sec
)
94 if (e1
->tv
.tv_usec
< e2
->tv
.tv_usec
)
96 else if (e1
->tv
.tv_usec
> e2
->tv
.tv_usec
)
100 if (e1
->pri
< e2
->pri
)
102 else if (e1
->pri
> e2
->pri
)
111 * Detach a btree node from its parent
114 schedule_detach_parent (struct schedule
*s
, struct schedule_entry
*e
)
120 if (e
->parent
->lt
== e
)
121 e
->parent
->lt
= NULL
;
122 else if (e
->parent
->gt
== e
)
123 e
->parent
->gt
= NULL
;
126 /* parent <-> child linkage is corrupted */
133 if (s
->root
== e
) /* last element deleted, tree is empty */
141 * Given a binary search tree, move a node toward the root
142 * while still maintaining the correct ordering relationships
143 * within the tree. This function is the workhorse
144 * of the tree balancer.
146 * This code will break on key collisions, which shouldn't
147 * happen because the treap priority is considered part of the key
148 * and is guaranteed to be unique.
151 schedule_rotate_up (struct schedule
*s
, struct schedule_entry
*e
)
155 struct schedule_entry
*lt
= e
->lt
;
156 struct schedule_entry
*gt
= e
->gt
;
157 struct schedule_entry
*p
= e
->parent
;
158 struct schedule_entry
*gp
= p
->parent
;
160 if (gp
) /* if grandparent exists, modify its child link */
164 else if (gp
->lt
== p
)
171 else /* no grandparent, now we are the root */
176 /* grandparent is now our parent */
179 /* parent is now our child */
182 /* reorient former parent's links
183 to reflect new position in the tree */
200 /* parent <-> child linkage is corrupted */
211 * This is the treap deletion algorithm:
213 * Rotate lesser-priority children up in the tree
214 * until we are childless. Then delete.
217 schedule_remove_node (struct schedule
*s
, struct schedule_entry
*e
)
219 while (e
->lt
|| e
->gt
)
225 if (e
->lt
->pri
< e
->gt
->pri
)
226 schedule_rotate_up (s
, e
->lt
);
228 schedule_rotate_up (s
, e
->gt
);
231 schedule_rotate_up (s
, e
->lt
);
234 schedule_rotate_up (s
, e
->gt
);
237 schedule_detach_parent (s
, e
);
242 * Trivially add a node to a binary search tree without
243 * regard for balance.
246 schedule_insert (struct schedule
*s
, struct schedule_entry
*e
)
248 struct schedule_entry
*c
= s
->root
;
251 const int comp
= schedule_entry_compare (e
, c
);
287 /* rare key/priority collision -- no big deal,
288 just choose another priority and retry */
292 schedule_set_pri (e
);
293 /* msg (M_INFO, "PRI COLLISION pri=%u", e->pri); */
301 * Given an element, remove it from the btree if it's already
302 * there and re-insert it based on its current key.
305 schedule_add_modify (struct schedule
*s
, struct schedule_entry
*e
)
308 if (check_debug_level (D_SCHEDULER
))
309 schedule_entry_debug_info ("schedule_add_modify", e
);
312 /* already in tree, remove */
314 schedule_remove_node (s
, e
);
316 /* set random priority */
317 schedule_set_pri (e
);
320 schedule_insert (s
, e
); /* trivial insert into tree */
322 s
->root
= e
; /* tree was empty, we are the first element */
324 /* This is the magic of the randomized treap algorithm which
325 keeps the tree balanced. Move the node up the tree until
326 its own priority is greater than that of its parent */
327 while (e
->parent
&& e
->parent
->pri
> e
->pri
)
328 schedule_rotate_up (s
, e
);
332 * Find the earliest event to be scheduled
334 struct schedule_entry
*
335 schedule_find_least (struct schedule_entry
*e
)
349 if (check_debug_level (D_SCHEDULER
))
350 schedule_entry_debug_info ("schedule_find_least", e
);
357 * Public functions below this point
365 ALLOC_OBJ_CLEAR (s
, struct schedule
);
366 mutex_init (&s
->mutex
);
371 schedule_free (struct schedule
*s
)
373 mutex_destroy (&s
->mutex
);
378 schedule_remove_entry (struct schedule
*s
, struct schedule_entry
*e
)
380 mutex_lock (&s
->mutex
);
381 s
->earliest_wakeup
= NULL
; /* invalidate cache */
382 schedule_remove_node (s
, e
);
383 mutex_unlock (&s
->mutex
);
387 * Debug functions below this point
392 static inline struct schedule_entry
*
393 schedule_find_earliest_wakeup (struct schedule
*s
)
395 return schedule_find_least (s
->root
);
399 * Recursively check that the treap (btree) is
400 * internally consistent.
403 schedule_debug_entry (const struct schedule_entry
* e
,
406 struct timeval
*least
,
407 const struct timeval
*min
,
408 const struct timeval
*max
)
410 struct gc_arena gc
= gc_new ();
411 int maxdepth
= depth
;
418 ASSERT (e
!= e
->parent
);
419 ASSERT (!e
->parent
|| e
->parent
!= e
->lt
);
420 ASSERT (!e
->parent
|| e
->parent
!= e
->gt
);
421 ASSERT (!e
->lt
|| e
->lt
!= e
->gt
);
425 ASSERT (e
->lt
->parent
== e
);
426 ASSERT (schedule_entry_compare (e
->lt
, e
) == -1);
427 ASSERT (e
->lt
->pri
>= e
->pri
);
432 ASSERT (e
->gt
->parent
== e
);
433 ASSERT (schedule_entry_compare (e
->gt
, e
));
434 ASSERT (e
->gt
->pri
>= e
->pri
);
437 ASSERT (tv_le (min
, &e
->tv
));
438 ASSERT (tv_le (&e
->tv
, max
));
443 if (least
&& tv_lt (&e
->tv
, least
))
446 d
= schedule_debug_entry (e
->lt
, depth
+1, count
, least
, min
, &e
->tv
);
450 d
= schedule_debug_entry (e
->gt
, depth
+1, count
, least
, &e
->tv
, max
);
459 schedule_debug (struct schedule
*s
, int *count
, struct timeval
*least
)
466 max
.tv_sec
= 0x7FFFFFFF;
467 max
.tv_usec
= 0x7FFFFFFF;
471 ASSERT (s
->root
->parent
== NULL
);
473 return schedule_debug_entry (s
->root
, 0, count
, least
, &min
, &max
);
479 tv_randomize (struct timeval
*tv
)
481 tv
->tv_sec
+= random() % 100;
482 tv
->tv_usec
= random () % 100;
488 tv_randomize (struct timeval
*tv
)
490 struct gc_arena gc
= gc_new ();
491 long int choice
= get_random ();
492 if ((choice
& 0xFF) == 0)
493 tv
->tv_usec
+= ((choice
>> 8) & 0xFF);
495 prng_bytes ((uint8_t *)tv
, sizeof (struct timeval
));
502 schedule_verify (struct schedule
*s
)
504 struct gc_arena gc
= gc_new ();
505 struct timeval least
;
508 struct schedule_entry
* e
;
509 const struct status zz
= z
;
511 least
.tv_sec
= least
.tv_usec
= 0x7FFFFFFF;
515 maxlev
= schedule_debug (s
, &count
, &least
);
517 e
= schedule_find_earliest_wakeup (s
);
521 printf ("Verification Phase count=%d maxlev=%d sru=%d ins=%d coll=%d ls=%d l=%s",
528 tv_string (&e
->tv
, &gc
));
530 if (!tv_eq (&least
, &e
->tv
))
531 printf (" [COMPUTED DIFFERENT MIN VALUES!]");
541 schedule_randomize_array (struct schedule_entry
**array
, int size
)
544 for (i
= 0; i
< size
; ++i
)
546 const int src
= get_random () % size
;
547 struct schedule_entry
*tmp
= array
[i
];
550 array
[i
] = array
[src
];
557 schedule_print_work (struct schedule_entry
*e
, int indent
)
559 struct gc_arena gc
= gc_new ();
561 for (i
= 0; i
< indent
; ++i
)
565 printf ("%s [%u] e=" ptr_format
", p=" ptr_format
" lt=" ptr_format
" gt=" ptr_format
"\n",
566 tv_string (&e
->tv
, &gc
),
572 schedule_print_work (e
->lt
, indent
+1);
573 schedule_print_work (e
->gt
, indent
+1);
581 schedule_print (struct schedule
*s
)
583 printf ("*************************\n");
584 schedule_print_work (s
->root
, 0);
590 struct gc_arena gc
= gc_new ();
595 struct schedule_entry
**array
;
596 struct schedule
*s
= schedule_init ();
597 struct schedule_entry
* e
;
600 ALLOC_ARRAY (array
, struct schedule_entry
*, n
);
602 printf ("Creation/Insertion Phase\n");
604 for (i
= 0; i
< n
; ++i
)
606 ALLOC_OBJ_CLEAR (array
[i
], struct schedule_entry
);
607 tv_randomize (&array
[i
]->tv
);
608 /*schedule_print (s);*/
609 /*schedule_verify (s);*/
610 schedule_add_modify (s
, array
[i
]);
613 schedule_randomize_array (array
, n
);
615 /*schedule_print (s);*/
618 for (j
= 1; j
<= n_mod
; ++j
)
620 printf ("Modification Phase Pass %d\n", j
);
622 for (i
= 0; i
< n
; ++i
)
624 e
= schedule_find_earliest_wakeup (s
);
625 /*printf ("BEFORE %s\n", tv_string (&e->tv, &gc));*/
626 tv_randomize (&e
->tv
);
627 /*printf ("AFTER %s\n", tv_string (&e->tv, &gc));*/
628 schedule_add_modify (s
, e
);
629 /*schedule_verify (s);*/
630 /*schedule_print (s);*/
633 /*schedule_print (s);*/
636 /*printf ("INS=%d\n", z.ins);*/
638 while ((e
= schedule_find_earliest_wakeup (s
)))
640 schedule_remove_node (s
, e
);
641 /*schedule_verify (s);*/
645 printf ("S->ROOT is %s\n", s
->root
? "NOT NULL" : "NULL");
647 for (i
= 0; i
< n
; ++i
)