2 * Copyright (c) 1988, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)radix.c 8.4 (Berkeley) 11/2/94
30 * $FreeBSD: src/sys/net/radix.c,v 1.20.2.3 2002/04/28 05:40:25 suz Exp $
34 * Routines to build and maintain radix trees for routing lookups.
36 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
40 #include <sys/domain.h>
41 #include <sys/globaldata.h>
42 #include <sys/thread.h>
46 #include <sys/syslog.h>
47 #include <net/radix.h>
50 * The arguments to the radix functions are really counted byte arrays with
51 * the length in the first byte. struct sockaddr's fit this type structurally.
53 #define clen(c) (*(u_char *)(c))
55 static int rn_walktree_from(struct radix_node_head
*h
, char *a
, char *m
,
56 walktree_f_t
*f
, void *w
);
57 static int rn_walktree(struct radix_node_head
*, walktree_f_t
*, void *);
58 static int rn_walktree_at(struct radix_node_head
*h
, const char *a
,
59 const char *m
, walktree_f_t
*f
, void *w
);
61 static struct radix_node
62 *rn_insert(char *, struct radix_node_head
*, boolean_t
*,
63 struct radix_node
[2]),
64 *rn_newpair(char *, int, struct radix_node
[2]),
65 *rn_search(const char *, struct radix_node
*),
66 *rn_search_m(const char *, struct radix_node
*, const char *);
68 static struct radix_mask
*rn_mkfreelist
[MAXCPU
];
69 static struct radix_node_head
*mask_rnheads
[MAXCPU
];
71 static char rn_zeros
[RN_MAXKEYLEN
];
72 static char rn_ones
[RN_MAXKEYLEN
] = RN_MAXKEYONES
;
74 static boolean_t
rn_lexobetter(char *m
, char *n
);
75 static struct radix_mask
*
76 rn_new_radix_mask(struct radix_node
*tt
, struct radix_mask
*nextmask
);
78 rn_satisfies_leaf(char *trial
, struct radix_node
*leaf
, int skip
);
80 static __inline
struct radix_mask
*
81 MKGet(struct radix_mask
**l
)
89 R_Malloc(m
, struct radix_mask
*, sizeof *m
);
95 MKFree(struct radix_mask
**l
, struct radix_mask
*m
)
102 * The data structure for the keys is a radix tree with one way
103 * branching removed. The index rn_bit at an internal node n represents a bit
104 * position to be tested. The tree is arranged so that all descendants
105 * of a node n have keys whose bits all agree up to position rn_bit - 1.
106 * (We say the index of n is rn_bit.)
108 * There is at least one descendant which has a one bit at position rn_bit,
109 * and at least one with a zero there.
111 * A route is determined by a pair of key and mask. We require that the
112 * bit-wise logical and of the key and mask to be the key.
113 * We define the index of a route to associated with the mask to be
114 * the first bit number in the mask where 0 occurs (with bit number 0
115 * representing the highest order bit).
117 * We say a mask is normal if every bit is 0, past the index of the mask.
118 * If a node n has a descendant (k, m) with index(m) == index(n) == rn_bit,
119 * and m is a normal mask, then the route applies to every descendant of n.
120 * If the index(m) < rn_bit, this implies the trailing last few bits of k
121 * before bit b are all 0, (and hence consequently true of every descendant
122 * of n), so the route applies to all descendants of the node as well.
124 * Similar logic shows that a non-normal mask m such that
125 * index(m) <= index(n) could potentially apply to many children of n.
126 * Thus, for each non-host route, we attach its mask to a list at an internal
127 * node as high in the tree as we can go.
129 * The present version of the code makes use of normal routes in short-
130 * circuiting an explict mask and compare operation when testing whether
131 * a key satisfies a normal route, and also in remembering the unique leaf
132 * that governs a subtree.
135 static struct radix_node
*
136 rn_search(const char *v
, struct radix_node
*head
)
138 struct radix_node
*x
;
141 while (x
->rn_bit
>= 0) {
142 if (x
->rn_bmask
& v
[x
->rn_offset
])
150 static struct radix_node
*
151 rn_search_m(const char *v
, struct radix_node
*head
, const char *m
)
153 struct radix_node
*x
;
155 for (x
= head
; x
->rn_bit
>= 0;) {
156 if ((x
->rn_bmask
& m
[x
->rn_offset
]) &&
157 (x
->rn_bmask
& v
[x
->rn_offset
]))
166 rn_refines(char *m
, char *n
)
169 int longer
= clen(n
++) - clen(m
++);
170 boolean_t masks_are_equal
= TRUE
;
172 lim2
= lim
= n
+ clen(n
);
179 masks_are_equal
= FALSE
;
184 if (masks_are_equal
&& (longer
< 0))
185 for (lim2
= m
- longer
; m
< lim2
; )
188 return (!masks_are_equal
);
192 rn_lookup(char *key
, char *mask
, struct radix_node_head
*head
)
194 struct radix_node
*x
;
195 char *netmask
= NULL
;
198 x
= rn_addmask(mask
, TRUE
, head
->rnh_treetop
->rn_offset
,
204 x
= rn_match(key
, head
);
205 if (x
!= NULL
&& netmask
!= NULL
) {
206 while (x
!= NULL
&& x
->rn_mask
!= netmask
)
213 rn_satisfies_leaf(char *trial
, struct radix_node
*leaf
, int skip
)
215 char *cp
= trial
, *cp2
= leaf
->rn_key
, *cp3
= leaf
->rn_mask
;
217 int length
= min(clen(cp
), clen(cp2
));
222 length
= min(length
, clen(cp3
));
226 for (cp
+= skip
; cp
< cplim
; cp
++, cp2
++, cp3
++)
227 if ((*cp
^ *cp2
) & *cp3
)
233 rn_match(char *key
, struct radix_node_head
*head
)
235 struct radix_node
*t
, *x
;
236 char *cp
= key
, *cp2
;
238 struct radix_node
*saved_t
, *top
= head
->rnh_treetop
;
239 int off
= top
->rn_offset
, klen
, matched_off
;
242 t
= rn_search(key
, top
);
244 * See if we match exactly as a host destination
245 * or at least learn how many bits match, for normal mask finesse.
247 * It doesn't hurt us to limit how many bytes to check
248 * to the length of the mask, since if it matches we had a genuine
249 * match and the leaf we have is the most specific one anyway;
250 * if it didn't match with a shorter length it would fail
251 * with a long one. This wins big for class B&C netmasks which
252 * are probably the most common case...
254 if (t
->rn_mask
!= NULL
)
255 klen
= clen(t
->rn_mask
);
258 cp
+= off
; cp2
= t
->rn_key
+ off
; cplim
= key
+ klen
;
259 for (; cp
< cplim
; cp
++, cp2
++)
263 * This extra grot is in case we are explicitly asked
264 * to look up the default. Ugh!
266 * Never return the root node itself, it seems to cause a
269 if (t
->rn_flags
& RNF_ROOT
)
273 test
= (*cp
^ *cp2
) & 0xff; /* find first bit that differs */
274 for (b
= 7; (test
>>= 1) > 0;)
276 matched_off
= cp
- key
;
277 b
+= matched_off
<< 3;
280 * If there is a host route in a duped-key chain, it will be first.
282 if ((saved_t
= t
)->rn_mask
== NULL
)
284 for (; t
; t
= t
->rn_dupedkey
) {
286 * Even if we don't match exactly as a host,
287 * we may match if the leaf we wound up at is
290 if (t
->rn_flags
& RNF_NORMAL
) {
291 if (rn_bit
<= t
->rn_bit
)
293 } else if (rn_satisfies_leaf(key
, t
, matched_off
))
297 /* start searching up the tree */
299 struct radix_mask
*m
;
303 * If non-contiguous masks ever become important
304 * we can restore the masking and open coding of
305 * the search and satisfaction test and put the
306 * calculation of "off" back before the "do".
310 if (m
->rm_flags
& RNF_NORMAL
) {
311 if (rn_bit
<= m
->rm_bit
)
314 off
= min(t
->rn_offset
, matched_off
);
315 x
= rn_search_m(key
, t
, m
->rm_mask
);
316 while (x
!= NULL
&& x
->rn_mask
!= m
->rm_mask
)
318 if (x
&& rn_satisfies_leaf(key
, x
, off
))
329 struct radix_node
*rn_clist
;
331 boolean_t rn_debug
= TRUE
;
334 static struct radix_node
*
335 rn_newpair(char *key
, int indexbit
, struct radix_node nodes
[2])
337 struct radix_node
*leaf
= &nodes
[0], *interior
= &nodes
[1];
339 interior
->rn_bit
= indexbit
;
340 interior
->rn_bmask
= 0x80 >> (indexbit
& 0x7);
341 interior
->rn_offset
= indexbit
>> 3;
342 interior
->rn_left
= leaf
;
343 interior
->rn_mklist
= NULL
;
347 leaf
->rn_parent
= interior
;
348 leaf
->rn_flags
= interior
->rn_flags
= RNF_ACTIVE
;
349 leaf
->rn_mklist
= NULL
;
352 leaf
->rn_info
= rn_nodenum
++;
353 interior
->rn_info
= rn_nodenum
++;
354 leaf
->rn_twin
= interior
;
355 leaf
->rn_ybro
= rn_clist
;
361 static struct radix_node
*
362 rn_insert(char *key
, struct radix_node_head
*head
, boolean_t
*dupentry
,
363 struct radix_node nodes
[2])
365 struct radix_node
*top
= head
->rnh_treetop
;
366 int head_off
= top
->rn_offset
, klen
= clen(key
);
367 struct radix_node
*t
= rn_search(key
, top
);
368 char *cp
= key
+ head_off
;
370 struct radix_node
*tt
;
373 * Find first bit at which the key and t->rn_key differ
376 char *cp2
= t
->rn_key
+ head_off
;
378 char *cplim
= key
+ klen
;
387 cmp_res
= (cp
[-1] ^ cp2
[-1]) & 0xff;
388 for (b
= (cp
- key
) << 3; cmp_res
; b
--)
392 struct radix_node
*p
, *x
= top
;
397 if (cp
[x
->rn_offset
] & x
->rn_bmask
)
401 } while (b
> (unsigned) x
->rn_bit
);
402 /* x->rn_bit < b && x->rn_bit >= 0 */
405 log(LOG_DEBUG
, "rn_insert: Going In:\n"), traverse(p
);
407 t
= rn_newpair(key
, b
, nodes
);
409 if ((cp
[p
->rn_offset
] & p
->rn_bmask
) == 0)
414 t
->rn_parent
= p
; /* frees x, p as temp vars below */
415 if ((cp
[t
->rn_offset
] & t
->rn_bmask
) == 0) {
423 log(LOG_DEBUG
, "rn_insert: Coming Out:\n"), traverse(p
);
430 rn_addmask(char *netmask
, boolean_t search
, int skip
,
431 struct radix_node_head
*mask_rnh
)
433 struct radix_node
*x
, *saved_x
;
435 int b
= 0, mlen
, m0
, j
;
436 boolean_t maskduplicated
, isnormal
;
439 if ((mlen
= clen(netmask
)) > RN_MAXKEYLEN
)
444 return (mask_rnh
->rnh_nodes
);
445 R_Malloc(addmask_key
, char *, RN_MAXKEYLEN
);
446 if (addmask_key
== NULL
)
449 bcopy(rn_ones
+ 1, addmask_key
+ 1, skip
- 1);
450 if ((m0
= mlen
) > skip
)
451 bcopy(netmask
+ skip
, addmask_key
+ skip
, mlen
- skip
);
453 * Trim trailing zeroes.
455 for (cp
= addmask_key
+ mlen
; (cp
> addmask_key
) && cp
[-1] == 0;)
457 mlen
= cp
- addmask_key
;
459 if (m0
>= mask_rnh
->rnh_last_zeroed
)
460 mask_rnh
->rnh_last_zeroed
= mlen
;
462 return (mask_rnh
->rnh_nodes
);
464 if (m0
< mask_rnh
->rnh_last_zeroed
)
465 bzero(addmask_key
+ m0
, mask_rnh
->rnh_last_zeroed
- m0
);
466 *addmask_key
= mask_rnh
->rnh_last_zeroed
= mlen
;
467 x
= rn_search(addmask_key
, mask_rnh
->rnh_treetop
);
468 if (x
->rn_key
== NULL
) {
469 kprintf("WARNING: radix_node->rn_key is NULL rn=%p\n", x
);
472 } else if (bcmp(addmask_key
, x
->rn_key
, mlen
) != 0) {
475 if (x
!= NULL
|| search
)
477 R_Malloc(x
, struct radix_node
*, RN_MAXKEYLEN
+ 2 * (sizeof *x
));
478 if ((saved_x
= x
) == NULL
)
480 bzero(x
, RN_MAXKEYLEN
+ 2 * (sizeof *x
));
481 netmask
= cp
= (char *)(x
+ 2);
482 bcopy(addmask_key
, cp
, mlen
);
483 x
= rn_insert(cp
, mask_rnh
, &maskduplicated
, x
);
484 if (maskduplicated
) {
485 log(LOG_ERR
, "rn_addmask: mask impossibly already in tree");
490 * Calculate index of mask, and check for normalcy.
493 cplim
= netmask
+ mlen
;
494 for (cp
= netmask
+ skip
; cp
< cplim
&& clen(cp
) == 0xff;)
497 static const char normal_chars
[] = {
498 0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1
501 for (j
= 0x80; (j
& *cp
) != 0; j
>>= 1)
503 if (*cp
!= normal_chars
[b
] || cp
!= (cplim
- 1))
506 b
+= (cp
- netmask
) << 3;
509 x
->rn_flags
|= RNF_NORMAL
;
515 /* XXX: arbitrary ordering for non-contiguous masks */
517 rn_lexobetter(char *mp
, char *np
)
521 if ((unsigned) *mp
> (unsigned) *np
)
522 return TRUE
;/* not really, but need to check longer one first */
524 for (lim
= mp
+ clen(mp
); mp
< lim
;)
530 static struct radix_mask
*
531 rn_new_radix_mask(struct radix_node
*tt
, struct radix_mask
*nextmask
)
533 struct radix_mask
*m
;
535 m
= MKGet(&rn_mkfreelist
[mycpuid
]);
537 log(LOG_ERR
, "Mask for route not entered\n");
541 m
->rm_bit
= tt
->rn_bit
;
542 m
->rm_flags
= tt
->rn_flags
;
543 if (tt
->rn_flags
& RNF_NORMAL
)
546 m
->rm_mask
= tt
->rn_mask
;
547 m
->rm_next
= nextmask
;
553 rn_addroute(char *key
, char *netmask
, struct radix_node_head
*head
,
554 struct radix_node treenodes
[2])
556 struct radix_node
*t
, *x
= NULL
, *tt
;
557 struct radix_node
*saved_tt
, *top
= head
->rnh_treetop
;
558 short b
= 0, b_leaf
= 0;
559 boolean_t keyduplicated
;
561 struct radix_mask
*m
, **mp
;
564 * In dealing with non-contiguous masks, there may be
565 * many different routes which have the same mask.
566 * We will find it useful to have a unique pointer to
567 * the mask to speed avoiding duplicate references at
568 * nodes and possibly save time in calculating indices.
570 if (netmask
!= NULL
) {
571 if ((x
= rn_addmask(netmask
, FALSE
, top
->rn_offset
,
572 head
->rnh_maskhead
)) == NULL
)
579 * Deal with duplicated keys: attach node to previous instance
581 saved_tt
= tt
= rn_insert(key
, head
, &keyduplicated
, treenodes
);
583 for (t
= tt
; tt
; t
= tt
, tt
= tt
->rn_dupedkey
) {
584 if (tt
->rn_mask
== netmask
)
586 if (netmask
== NULL
||
588 ((b_leaf
< tt
->rn_bit
) /* index(netmask) > node */
589 || rn_refines(netmask
, tt
->rn_mask
)
590 || rn_lexobetter(netmask
, tt
->rn_mask
))))
594 * If the mask is not duplicated, we wouldn't
595 * find it among possible duplicate key entries
596 * anyway, so the above test doesn't hurt.
598 * We sort the masks for a duplicated key the same way as
599 * in a masklist -- most specific to least specific.
600 * This may require the unfortunate nuisance of relocating
601 * the head of the list.
603 if (tt
== saved_tt
) {
604 struct radix_node
*xx
= x
;
605 /* link in at head of list */
606 (tt
= treenodes
)->rn_dupedkey
= t
;
607 tt
->rn_flags
= t
->rn_flags
;
608 tt
->rn_parent
= x
= t
->rn_parent
;
609 t
->rn_parent
= tt
; /* parent */
614 saved_tt
= tt
; x
= xx
;
616 (tt
= treenodes
)->rn_dupedkey
= t
->rn_dupedkey
;
618 tt
->rn_parent
= t
; /* parent */
619 if (tt
->rn_dupedkey
!= NULL
) /* parent */
620 tt
->rn_dupedkey
->rn_parent
= tt
; /* parent */
623 t
=tt
+1; tt
->rn_info
= rn_nodenum
++; t
->rn_info
= rn_nodenum
++;
624 tt
->rn_twin
= t
; tt
->rn_ybro
= rn_clist
; rn_clist
= tt
;
628 tt
->rn_flags
= RNF_ACTIVE
;
633 if (netmask
!= NULL
) {
634 tt
->rn_mask
= netmask
;
635 tt
->rn_bit
= x
->rn_bit
;
636 tt
->rn_flags
|= x
->rn_flags
& RNF_NORMAL
;
638 t
= saved_tt
->rn_parent
;
641 b_leaf
= -1 - t
->rn_bit
;
642 if (t
->rn_right
== saved_tt
)
646 /* Promote general routes from below */
650 if (x
->rn_mask
!= NULL
&&
651 x
->rn_bit
>= b_leaf
&&
652 x
->rn_mklist
== NULL
) {
653 *mp
= m
= rn_new_radix_mask(x
, NULL
);
659 } else if (x
->rn_mklist
!= NULL
) {
661 * Skip over masks whose index is > that of new node
663 for (mp
= &x
->rn_mklist
; (m
= *mp
); mp
= &m
->rm_next
)
664 if (m
->rm_bit
>= b_leaf
)
670 /* Add new route to highest possible ancestor's list */
671 if ((netmask
== NULL
) || (b
> t
->rn_bit
))
672 return tt
; /* can't lift at all */
677 } while (b
<= t
->rn_bit
&& x
!= top
);
679 * Search through routes associated with node to
680 * insert new route according to index.
681 * Need same criteria as when sorting dupedkeys to avoid
682 * double loop on deletion.
684 for (mp
= &x
->rn_mklist
; (m
= *mp
); mp
= &m
->rm_next
) {
685 if (m
->rm_bit
< b_leaf
)
687 if (m
->rm_bit
> b_leaf
)
689 if (m
->rm_flags
& RNF_NORMAL
) {
690 mmask
= m
->rm_leaf
->rn_mask
;
691 if (tt
->rn_flags
& RNF_NORMAL
) {
693 "Non-unique normal route, mask not entered\n");
698 if (mmask
== netmask
) {
703 if (rn_refines(netmask
, mmask
) || rn_lexobetter(netmask
, mmask
))
706 *mp
= rn_new_radix_mask(tt
, *mp
);
711 rn_delete(char *key
, char *netmask
, struct radix_node_head
*head
)
713 struct radix_node
*t
, *p
, *x
, *tt
;
714 struct radix_mask
*m
, *saved_m
, **mp
;
715 struct radix_node
*dupedkey
, *saved_tt
, *top
;
716 int b
, head_off
, klen
;
719 x
= head
->rnh_treetop
;
720 tt
= rn_search(key
, x
);
721 head_off
= x
->rn_offset
;
726 bcmp(key
+ head_off
, tt
->rn_key
+ head_off
, klen
- head_off
))
729 * Delete our route from mask lists.
731 if (netmask
!= NULL
) {
732 if ((x
= rn_addmask(netmask
, TRUE
, head_off
,
733 head
->rnh_maskhead
)) == NULL
)
736 while (tt
->rn_mask
!= netmask
)
737 if ((tt
= tt
->rn_dupedkey
) == NULL
)
740 if (tt
->rn_mask
== NULL
|| (saved_m
= m
= tt
->rn_mklist
) == NULL
)
742 if (tt
->rn_flags
& RNF_NORMAL
) {
743 if (m
->rm_leaf
!= tt
|| m
->rm_refs
> 0) {
744 log(LOG_ERR
, "rn_delete: inconsistent annotation\n");
745 return (NULL
); /* dangling ref could cause disaster */
748 if (m
->rm_mask
!= tt
->rn_mask
) {
749 log(LOG_ERR
, "rn_delete: inconsistent annotation\n");
752 if (--m
->rm_refs
>= 0)
756 t
= saved_tt
->rn_parent
;
758 goto on1
; /* Wasn't lifted at all */
762 } while (b
<= t
->rn_bit
&& x
!= top
);
763 for (mp
= &x
->rn_mklist
; (m
= *mp
); mp
= &m
->rm_next
)
766 MKFree(&rn_mkfreelist
[cpu
], m
);
770 log(LOG_ERR
, "rn_delete: couldn't find our annotation\n");
771 if (tt
->rn_flags
& RNF_NORMAL
)
772 return (NULL
); /* Dangling ref to us */
776 * Eliminate us from tree
778 if (tt
->rn_flags
& RNF_ROOT
)
781 /* Get us out of the creation list */
782 for (t
= rn_clist
; t
&& t
->rn_ybro
!= tt
; t
= t
->rn_ybro
) {}
783 if (t
) t
->rn_ybro
= tt
->rn_ybro
;
786 dupedkey
= saved_tt
->rn_dupedkey
;
787 if (dupedkey
!= NULL
) {
789 * at this point, tt is the deletion target and saved_tt
790 * is the head of the dupekey chain
792 if (tt
== saved_tt
) {
793 /* remove from head of chain */
794 x
= dupedkey
; x
->rn_parent
= t
;
795 if (t
->rn_left
== tt
)
800 /* find node in front of tt on the chain */
801 for (x
= p
= saved_tt
; p
&& p
->rn_dupedkey
!= tt
;)
804 p
->rn_dupedkey
= tt
->rn_dupedkey
;
805 if (tt
->rn_dupedkey
) /* parent */
806 tt
->rn_dupedkey
->rn_parent
= p
;
808 } else log(LOG_ERR
, "rn_delete: couldn't find us\n");
811 if (t
->rn_flags
& RNF_ACTIVE
) {
825 x
->rn_left
->rn_parent
= x
;
826 x
->rn_right
->rn_parent
= x
;
830 if (t
->rn_left
== tt
)
835 if (p
->rn_right
== t
)
841 * Demote routes attached to us.
843 if (t
->rn_mklist
!= NULL
) {
844 if (x
->rn_bit
>= 0) {
845 for (mp
= &x
->rn_mklist
; (m
= *mp
);)
850 * If there are any (key, mask) pairs in a sibling
851 * duped-key chain, some subset will appear sorted
852 * in the same order attached to our mklist.
854 for (m
= t
->rn_mklist
; m
&& x
; x
= x
->rn_dupedkey
)
855 if (m
== x
->rn_mklist
) {
856 struct radix_mask
*mm
= m
->rm_next
;
859 if (--(m
->rm_refs
) < 0)
860 MKFree(&rn_mkfreelist
[cpu
], m
);
865 "rn_delete: Orphaned Mask %p at %p\n",
866 (void *)m
, (void *)x
);
870 * We may be holding an active internal node in the tree.
881 t
->rn_left
->rn_parent
= t
;
882 t
->rn_right
->rn_parent
= t
;
890 tt
->rn_flags
&= ~RNF_ACTIVE
;
891 tt
[1].rn_flags
&= ~RNF_ACTIVE
;
896 * This is the same as rn_walktree() except for the parameters and the
900 rn_walktree_from(struct radix_node_head
*h
, char *xa
, char *xm
,
901 walktree_f_t
*f
, void *w
)
903 struct radix_node
*base
, *next
;
904 struct radix_node
*rn
, *last
= NULL
/* shut up gcc */;
905 boolean_t stopping
= FALSE
;
909 * rn_search_m is sort-of-open-coded here.
911 /* kprintf("about to search\n"); */
912 for (rn
= h
->rnh_treetop
; rn
->rn_bit
>= 0; ) {
914 /* kprintf("rn_bit %d, rn_bmask %x, xm[rn_offset] %x\n",
915 rn->rn_bit, rn->rn_bmask, xm[rn->rn_offset]); */
916 if (!(rn
->rn_bmask
& xm
[rn
->rn_offset
])) {
919 if (rn
->rn_bmask
& xa
[rn
->rn_offset
]) {
925 /* kprintf("done searching\n"); */
928 * Two cases: either we stepped off the end of our mask,
929 * in which case last == rn, or we reached a leaf, in which
930 * case we want to start from the last node we looked at.
931 * Either way, last is the node we want to start from.
936 /* kprintf("rn %p, lastb %d\n", rn, lastb);*/
939 * This gets complicated because we may delete the node
940 * while applying the function f to it, so we need to calculate
941 * the successor node in advance.
943 while (rn
->rn_bit
>= 0)
947 /* kprintf("node %p (%d)\n", rn, rn->rn_bit); */
949 /* If at right child go back up, otherwise, go right */
950 while (rn
->rn_parent
->rn_right
== rn
&&
951 !(rn
->rn_flags
& RNF_ROOT
)) {
954 /* if went up beyond last, stop */
955 if (rn
->rn_bit
< lastb
) {
957 /* kprintf("up too far\n"); */
961 /* Find the next *leaf* since next node might vanish, too */
962 for (rn
= rn
->rn_parent
->rn_right
; rn
->rn_bit
>= 0;)
966 while ((rn
= base
) != NULL
) {
967 base
= rn
->rn_dupedkey
;
968 /* kprintf("leaf %p\n", rn); */
969 if (!(rn
->rn_flags
& RNF_ROOT
) && (error
= (*f
)(rn
, w
)))
974 if (rn
->rn_flags
& RNF_ROOT
) {
975 /* kprintf("root, stopping"); */
984 rn_walktree_at(struct radix_node_head
*h
, const char *a
, const char *m
,
985 walktree_f_t
*f
, void *w
)
987 struct radix_node
*base
, *next
;
988 struct radix_node
*rn
= h
->rnh_treetop
;
992 * This gets complicated because we may delete the node
993 * while applying the function f to it, so we need to calculate
994 * the successor node in advance.
997 /* First time through node, go left */
998 while (rn
->rn_bit
>= 0)
1002 rn
= rn_search_m(a
, rn
, m
);
1004 rn
= rn_search(a
, rn
);
1008 /* If at right child go back up, otherwise, go right */
1009 while (rn
->rn_parent
->rn_right
== rn
&&
1010 !(rn
->rn_flags
& RNF_ROOT
))
1012 /* Find the next *leaf* since next node might vanish, too */
1013 for (rn
= rn
->rn_parent
->rn_right
; rn
->rn_bit
>= 0;)
1016 /* Process leaves */
1017 while ((rn
= base
)) {
1018 base
= rn
->rn_dupedkey
;
1019 if (!(rn
->rn_flags
& RNF_ROOT
) && (error
= (*f
)(rn
, w
)))
1023 if (rn
->rn_flags
& RNF_ROOT
)
1030 rn_walktree(struct radix_node_head
*h
, walktree_f_t
*f
, void *w
)
1032 return rn_walktree_at(h
, NULL
, NULL
, f
, w
);
1036 rn_inithead(void **head
, struct radix_node_head
*maskhead
, int off
)
1038 struct radix_node_head
*rnh
;
1039 struct radix_node
*root
, *left
, *right
;
1041 if (*head
!= NULL
) /* already initialized */
1044 R_Malloc(rnh
, struct radix_node_head
*, sizeof *rnh
);
1047 bzero(rnh
, sizeof *rnh
);
1050 root
= rn_newpair(rn_zeros
, off
, rnh
->rnh_nodes
);
1051 right
= &rnh
->rnh_nodes
[2];
1052 root
->rn_parent
= root
;
1053 root
->rn_flags
= RNF_ROOT
| RNF_ACTIVE
;
1054 root
->rn_right
= right
;
1056 left
= root
->rn_left
;
1057 left
->rn_bit
= -1 - off
;
1058 left
->rn_flags
= RNF_ROOT
| RNF_ACTIVE
;
1061 right
->rn_key
= rn_ones
;
1063 rnh
->rnh_treetop
= root
;
1064 rnh
->rnh_maskhead
= maskhead
;
1066 rnh
->rnh_addaddr
= rn_addroute
;
1067 rnh
->rnh_deladdr
= rn_delete
;
1068 rnh
->rnh_matchaddr
= rn_match
;
1069 rnh
->rnh_lookup
= rn_lookup
;
1070 rnh
->rnh_walktree
= rn_walktree
;
1071 rnh
->rnh_walktree_from
= rn_walktree_from
;
1072 rnh
->rnh_walktree_at
= rn_walktree_at
;
1084 SLIST_FOREACH(dom
, &domains
, dom_next
) {
1085 if (dom
->dom_maxrtkey
> RN_MAXKEYLEN
) {
1086 panic("domain %s maxkey too big %d/%d",
1087 dom
->dom_name
, dom
->dom_maxrtkey
, RN_MAXKEYLEN
);
1091 for (cpu
= 0; cpu
< ncpus
; ++cpu
) {
1092 if (rn_inithead((void **)&mask_rnheads
[cpu
], NULL
, 0) == 0)
1097 struct radix_node_head
*
1098 rn_cpumaskhead(int cpu
)
1100 KKASSERT(mask_rnheads
[cpu
] != NULL
);
1101 return mask_rnheads
[cpu
];