1 /* $FreeBSD: src/sys/contrib/pf/net/pf_table.c,v 1.5 2004/07/28 06:14:44 kan Exp $ */
2 /* $OpenBSD: pf_table.c,v 1.47 2004/03/09 21:44:41 mcbride Exp $ */
3 /* $DragonFly: src/sys/net/pf/pf_table.c,v 1.5 2006/12/22 23:44:57 swildner Exp $ */
6 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
8 * Copyright (c) 2002 Cedric Berger
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * - Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
38 #include "opt_inet6.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/socket.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/thread2.h>
47 #include <vm/vm_zone.h>
50 #include <net/route.h>
51 #include <netinet/in.h>
52 #include <net/pf/pfvar.h>
54 #define ACCEPT_FLAGS(oklist) \
56 if ((flags & ~(oklist)) & \
61 #define COPYIN(from, to, size) \
62 ((flags & PFR_FLAG_USERIOCTL) ? \
63 copyin((from), (to), (size)) : \
64 (bcopy((from), (to), (size)), 0))
66 #define COPYOUT(from, to, size) \
67 ((flags & PFR_FLAG_USERIOCTL) ? \
68 copyout((from), (to), (size)) : \
69 (bcopy((from), (to), (size)), 0))
71 #define FILLIN_SIN(sin, addr) \
73 (sin).sin_len = sizeof(sin); \
74 (sin).sin_family = AF_INET; \
75 (sin).sin_addr = (addr); \
78 #define FILLIN_SIN6(sin6, addr) \
80 (sin6).sin6_len = sizeof(sin6); \
81 (sin6).sin6_family = AF_INET6; \
82 (sin6).sin6_addr = (addr); \
85 #define SWAP(type, a1, a2) \
92 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
93 (struct pf_addr *)&(su)->sin.sin_addr : \
94 (struct pf_addr *)&(su)->sin6.sin6_addr)
96 #define AF_BITS(af) (((af)==AF_INET)?32:128)
97 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
98 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
99 #define KENTRY_RNF_ROOT(ke) \
100 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
102 #define NO_ADDRESSES (-1)
103 #define ENQUEUE_UNMARKED_ONLY (1)
104 #define INVERT_NEG_FLAG (1)
106 struct pfr_walktree
{
117 struct pfr_addr
*pfrw1_addr
;
118 struct pfr_astats
*pfrw1_astats
;
119 struct pfr_kentryworkq
*pfrw1_workq
;
120 struct pfr_kentry
*pfrw1_kentry
;
121 struct pfi_dynaddr
*pfrw1_dyn
;
126 #define pfrw_addr pfrw_1.pfrw1_addr
127 #define pfrw_astats pfrw_1.pfrw1_astats
128 #define pfrw_workq pfrw_1.pfrw1_workq
129 #define pfrw_kentry pfrw_1.pfrw1_kentry
130 #define pfrw_dyn pfrw_1.pfrw1_dyn
131 #define pfrw_cnt pfrw_free
133 #define senderr(e) do { rv = (e); goto _bad; } while (0)
135 vm_zone_t pfr_ktable_pl
;
136 vm_zone_t pfr_kentry_pl
;
137 struct sockaddr_in pfr_sin
;
138 struct sockaddr_in6 pfr_sin6
;
139 union sockaddr_union pfr_mask
;
140 struct pf_addr pfr_ffaddr
;
142 void pfr_copyout_addr(struct pfr_addr
*,
143 struct pfr_kentry
*ke
);
144 int pfr_validate_addr(struct pfr_addr
*);
145 void pfr_enqueue_addrs(struct pfr_ktable
*,
146 struct pfr_kentryworkq
*, int *, int);
147 void pfr_mark_addrs(struct pfr_ktable
*);
148 struct pfr_kentry
*pfr_lookup_addr(struct pfr_ktable
*,
149 struct pfr_addr
*, int);
150 struct pfr_kentry
*pfr_create_kentry(struct pfr_addr
*);
151 void pfr_destroy_kentries(struct pfr_kentryworkq
*);
152 void pfr_destroy_kentry(struct pfr_kentry
*);
153 void pfr_insert_kentries(struct pfr_ktable
*,
154 struct pfr_kentryworkq
*, long);
155 void pfr_remove_kentries(struct pfr_ktable
*,
156 struct pfr_kentryworkq
*);
157 void pfr_clstats_kentries(struct pfr_kentryworkq
*, long,
159 void pfr_reset_feedback(struct pfr_addr
*, int, int);
160 void pfr_prepare_network(union sockaddr_union
*, int, int);
161 int pfr_route_kentry(struct pfr_ktable
*,
162 struct pfr_kentry
*);
163 int pfr_unroute_kentry(struct pfr_ktable
*,
164 struct pfr_kentry
*);
165 int pfr_walktree(struct radix_node
*, void *);
166 int pfr_validate_table(struct pfr_table
*, int, int);
167 void pfr_commit_ktable(struct pfr_ktable
*, long);
168 void pfr_insert_ktables(struct pfr_ktableworkq
*);
169 void pfr_insert_ktable(struct pfr_ktable
*);
170 void pfr_setflags_ktables(struct pfr_ktableworkq
*);
171 void pfr_setflags_ktable(struct pfr_ktable
*, int);
172 void pfr_clstats_ktables(struct pfr_ktableworkq
*, long,
174 void pfr_clstats_ktable(struct pfr_ktable
*, long, int);
175 struct pfr_ktable
*pfr_create_ktable(struct pfr_table
*, long, int);
176 void pfr_destroy_ktables(struct pfr_ktableworkq
*, int);
177 void pfr_destroy_ktable(struct pfr_ktable
*, int);
178 int pfr_ktable_compare(struct pfr_ktable
*,
179 struct pfr_ktable
*);
180 struct pfr_ktable
*pfr_lookup_table(struct pfr_table
*);
181 void pfr_clean_node_mask(struct pfr_ktable
*,
182 struct pfr_kentryworkq
*);
183 int pfr_table_count(struct pfr_table
*, int);
184 int pfr_skip_table(struct pfr_table
*,
185 struct pfr_ktable
*, int);
186 struct pfr_kentry
*pfr_kentry_byidx(struct pfr_ktable
*, int, int);
188 RB_PROTOTYPE(pfr_ktablehead
, pfr_ktable
, pfrkt_tree
, pfr_ktable_compare
);
189 RB_GENERATE(pfr_ktablehead
, pfr_ktable
, pfrkt_tree
, pfr_ktable_compare
);
191 struct pfr_ktablehead pfr_ktables
;
192 struct pfr_table pfr_nulltable
;
198 pfr_sin
.sin_len
= sizeof(pfr_sin
);
199 pfr_sin
.sin_family
= AF_INET
;
200 pfr_sin6
.sin6_len
= sizeof(pfr_sin6
);
201 pfr_sin6
.sin6_family
= AF_INET6
;
203 memset(&pfr_ffaddr
, 0xff, sizeof(pfr_ffaddr
));
207 pfr_clr_addrs(struct pfr_table
*tbl
, int *ndel
, int flags
)
209 struct pfr_ktable
*kt
;
210 struct pfr_kentryworkq workq
;
212 ACCEPT_FLAGS(PFR_FLAG_ATOMIC
+PFR_FLAG_DUMMY
);
213 if (pfr_validate_table(tbl
, 0, flags
& PFR_FLAG_USERIOCTL
))
215 kt
= pfr_lookup_table(tbl
);
216 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
218 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
)
220 pfr_enqueue_addrs(kt
, &workq
, ndel
, 0);
222 if (!(flags
& PFR_FLAG_DUMMY
)) {
223 if (flags
& PFR_FLAG_ATOMIC
)
225 pfr_remove_kentries(kt
, &workq
);
226 if (flags
& PFR_FLAG_ATOMIC
)
229 kprintf("pfr_clr_addrs: corruption detected (%d).\n",
238 pfr_add_addrs(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int size
,
239 int *nadd
, int flags
)
241 struct pfr_ktable
*kt
, *tmpkt
;
242 struct pfr_kentryworkq workq
;
243 struct pfr_kentry
*p
, *q
;
246 long tzero
= time_second
;
248 ACCEPT_FLAGS(PFR_FLAG_ATOMIC
+PFR_FLAG_DUMMY
+PFR_FLAG_FEEDBACK
);
249 if (pfr_validate_table(tbl
, 0, flags
& PFR_FLAG_USERIOCTL
))
251 kt
= pfr_lookup_table(tbl
);
252 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
254 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
)
256 tmpkt
= pfr_create_ktable(&pfr_nulltable
, 0, 0);
260 for (i
= 0; i
< size
; i
++) {
261 if (COPYIN(addr
+i
, &ad
, sizeof(ad
)))
263 if (pfr_validate_addr(&ad
))
265 p
= pfr_lookup_addr(kt
, &ad
, 1);
266 q
= pfr_lookup_addr(tmpkt
, &ad
, 1);
267 if (flags
& PFR_FLAG_FEEDBACK
) {
269 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
271 ad
.pfra_fback
= PFR_FB_ADDED
;
272 else if (p
->pfrke_not
!= ad
.pfra_not
)
273 ad
.pfra_fback
= PFR_FB_CONFLICT
;
275 ad
.pfra_fback
= PFR_FB_NONE
;
277 if (p
== NULL
&& q
== NULL
) {
278 p
= pfr_create_kentry(&ad
);
281 if (pfr_route_kentry(tmpkt
, p
)) {
282 pfr_destroy_kentry(p
);
283 ad
.pfra_fback
= PFR_FB_NONE
;
285 SLIST_INSERT_HEAD(&workq
, p
, pfrke_workq
);
289 if (flags
& PFR_FLAG_FEEDBACK
)
290 if (COPYOUT(&ad
, addr
+i
, sizeof(ad
)))
293 pfr_clean_node_mask(tmpkt
, &workq
);
294 if (!(flags
& PFR_FLAG_DUMMY
)) {
295 if (flags
& PFR_FLAG_ATOMIC
)
297 pfr_insert_kentries(kt
, &workq
, tzero
);
298 if (flags
& PFR_FLAG_ATOMIC
)
301 pfr_destroy_kentries(&workq
);
304 pfr_destroy_ktable(tmpkt
, 0);
307 pfr_clean_node_mask(tmpkt
, &workq
);
308 pfr_destroy_kentries(&workq
);
309 if (flags
& PFR_FLAG_FEEDBACK
)
310 pfr_reset_feedback(addr
, size
, flags
);
311 pfr_destroy_ktable(tmpkt
, 0);
316 pfr_del_addrs(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int size
,
317 int *ndel
, int flags
)
319 struct pfr_ktable
*kt
;
320 struct pfr_kentryworkq workq
;
321 struct pfr_kentry
*p
;
325 ACCEPT_FLAGS(PFR_FLAG_ATOMIC
+PFR_FLAG_DUMMY
+PFR_FLAG_FEEDBACK
);
326 if (pfr_validate_table(tbl
, 0, flags
& PFR_FLAG_USERIOCTL
))
328 kt
= pfr_lookup_table(tbl
);
329 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
331 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
)
335 for (i
= 0; i
< size
; i
++) {
336 if (COPYIN(addr
+i
, &ad
, sizeof(ad
)))
338 if (pfr_validate_addr(&ad
))
340 p
= pfr_lookup_addr(kt
, &ad
, 1);
341 if (flags
& PFR_FLAG_FEEDBACK
) {
343 ad
.pfra_fback
= PFR_FB_NONE
;
344 else if (p
->pfrke_not
!= ad
.pfra_not
)
345 ad
.pfra_fback
= PFR_FB_CONFLICT
;
346 else if (p
->pfrke_mark
)
347 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
349 ad
.pfra_fback
= PFR_FB_DELETED
;
351 if (p
!= NULL
&& p
->pfrke_not
== ad
.pfra_not
&&
354 SLIST_INSERT_HEAD(&workq
, p
, pfrke_workq
);
357 if (flags
& PFR_FLAG_FEEDBACK
)
358 if (COPYOUT(&ad
, addr
+i
, sizeof(ad
)))
361 if (!(flags
& PFR_FLAG_DUMMY
)) {
362 if (flags
& PFR_FLAG_ATOMIC
)
364 pfr_remove_kentries(kt
, &workq
);
365 if (flags
& PFR_FLAG_ATOMIC
)
372 if (flags
& PFR_FLAG_FEEDBACK
)
373 pfr_reset_feedback(addr
, size
, flags
);
378 pfr_set_addrs(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int size
,
379 int *size2
, int *nadd
, int *ndel
, int *nchange
, int flags
)
381 struct pfr_ktable
*kt
, *tmpkt
;
382 struct pfr_kentryworkq addq
, delq
, changeq
;
383 struct pfr_kentry
*p
, *q
;
385 int i
, rv
, xadd
= 0, xdel
= 0, xchange
= 0;
386 long tzero
= time_second
;
388 ACCEPT_FLAGS(PFR_FLAG_ATOMIC
+PFR_FLAG_DUMMY
+PFR_FLAG_FEEDBACK
);
389 if (pfr_validate_table(tbl
, 0, flags
& PFR_FLAG_USERIOCTL
))
391 kt
= pfr_lookup_table(tbl
);
392 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
394 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
)
396 tmpkt
= pfr_create_ktable(&pfr_nulltable
, 0, 0);
402 SLIST_INIT(&changeq
);
403 for (i
= 0; i
< size
; i
++) {
404 if (COPYIN(addr
+i
, &ad
, sizeof(ad
)))
406 if (pfr_validate_addr(&ad
))
408 ad
.pfra_fback
= PFR_FB_NONE
;
409 p
= pfr_lookup_addr(kt
, &ad
, 1);
412 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
416 if (p
->pfrke_not
!= ad
.pfra_not
) {
417 SLIST_INSERT_HEAD(&changeq
, p
, pfrke_workq
);
418 ad
.pfra_fback
= PFR_FB_CHANGED
;
422 q
= pfr_lookup_addr(tmpkt
, &ad
, 1);
424 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
427 p
= pfr_create_kentry(&ad
);
430 if (pfr_route_kentry(tmpkt
, p
)) {
431 pfr_destroy_kentry(p
);
432 ad
.pfra_fback
= PFR_FB_NONE
;
434 SLIST_INSERT_HEAD(&addq
, p
, pfrke_workq
);
435 ad
.pfra_fback
= PFR_FB_ADDED
;
440 if (flags
& PFR_FLAG_FEEDBACK
)
441 if (COPYOUT(&ad
, addr
+i
, sizeof(ad
)))
444 pfr_enqueue_addrs(kt
, &delq
, &xdel
, ENQUEUE_UNMARKED_ONLY
);
445 if ((flags
& PFR_FLAG_FEEDBACK
) && *size2
) {
446 if (*size2
< size
+xdel
) {
451 SLIST_FOREACH(p
, &delq
, pfrke_workq
) {
452 pfr_copyout_addr(&ad
, p
);
453 ad
.pfra_fback
= PFR_FB_DELETED
;
454 if (COPYOUT(&ad
, addr
+size
+i
, sizeof(ad
)))
459 pfr_clean_node_mask(tmpkt
, &addq
);
460 if (!(flags
& PFR_FLAG_DUMMY
)) {
461 if (flags
& PFR_FLAG_ATOMIC
)
463 pfr_insert_kentries(kt
, &addq
, tzero
);
464 pfr_remove_kentries(kt
, &delq
);
465 pfr_clstats_kentries(&changeq
, tzero
, INVERT_NEG_FLAG
);
466 if (flags
& PFR_FLAG_ATOMIC
)
469 pfr_destroy_kentries(&addq
);
476 if ((flags
& PFR_FLAG_FEEDBACK
) && size2
)
478 pfr_destroy_ktable(tmpkt
, 0);
481 pfr_clean_node_mask(tmpkt
, &addq
);
482 pfr_destroy_kentries(&addq
);
483 if (flags
& PFR_FLAG_FEEDBACK
)
484 pfr_reset_feedback(addr
, size
, flags
);
485 pfr_destroy_ktable(tmpkt
, 0);
490 pfr_tst_addrs(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int size
,
491 int *nmatch
, int flags
)
493 struct pfr_ktable
*kt
;
494 struct pfr_kentry
*p
;
498 ACCEPT_FLAGS(PFR_FLAG_REPLACE
);
499 if (pfr_validate_table(tbl
, 0, 0))
501 kt
= pfr_lookup_table(tbl
);
502 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
505 for (i
= 0; i
< size
; i
++) {
506 if (COPYIN(addr
+i
, &ad
, sizeof(ad
)))
508 if (pfr_validate_addr(&ad
))
510 if (ADDR_NETWORK(&ad
))
512 p
= pfr_lookup_addr(kt
, &ad
, 0);
513 if (flags
& PFR_FLAG_REPLACE
)
514 pfr_copyout_addr(&ad
, p
);
515 ad
.pfra_fback
= (p
== NULL
) ? PFR_FB_NONE
:
516 (p
->pfrke_not
? PFR_FB_NOTMATCH
: PFR_FB_MATCH
);
517 if (p
!= NULL
&& !p
->pfrke_not
)
519 if (COPYOUT(&ad
, addr
+i
, sizeof(ad
)))
528 pfr_get_addrs(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int *size
,
531 struct pfr_ktable
*kt
;
532 struct pfr_walktree w
;
536 if (pfr_validate_table(tbl
, 0, 0))
538 kt
= pfr_lookup_table(tbl
);
539 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
541 if (kt
->pfrkt_cnt
> *size
) {
542 *size
= kt
->pfrkt_cnt
;
546 bzero(&w
, sizeof(w
));
547 w
.pfrw_op
= PFRW_GET_ADDRS
;
549 w
.pfrw_free
= kt
->pfrkt_cnt
;
550 w
.pfrw_flags
= flags
;
551 rv
= kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
);
553 rv
= kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
);
558 kprintf("pfr_get_addrs: corruption detected (%d).\n",
562 *size
= kt
->pfrkt_cnt
;
567 pfr_get_astats(struct pfr_table
*tbl
, struct pfr_astats
*addr
, int *size
,
570 struct pfr_ktable
*kt
;
571 struct pfr_walktree w
;
572 struct pfr_kentryworkq workq
;
574 long tzero
= time_second
;
576 ACCEPT_FLAGS(PFR_FLAG_ATOMIC
); /* XXX PFR_FLAG_CLSTATS disabled */
577 if (pfr_validate_table(tbl
, 0, 0))
579 kt
= pfr_lookup_table(tbl
);
580 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
582 if (kt
->pfrkt_cnt
> *size
) {
583 *size
= kt
->pfrkt_cnt
;
587 bzero(&w
, sizeof(w
));
588 w
.pfrw_op
= PFRW_GET_ASTATS
;
589 w
.pfrw_astats
= addr
;
590 w
.pfrw_free
= kt
->pfrkt_cnt
;
591 w
.pfrw_flags
= flags
;
592 if (flags
& PFR_FLAG_ATOMIC
)
594 rv
= kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
);
596 rv
= kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
);
597 if (!rv
&& (flags
& PFR_FLAG_CLSTATS
)) {
598 pfr_enqueue_addrs(kt
, &workq
, NULL
, 0);
599 pfr_clstats_kentries(&workq
, tzero
, 0);
601 if (flags
& PFR_FLAG_ATOMIC
)
607 kprintf("pfr_get_astats: corruption detected (%d).\n",
611 *size
= kt
->pfrkt_cnt
;
616 pfr_clr_astats(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int size
,
617 int *nzero
, int flags
)
619 struct pfr_ktable
*kt
;
620 struct pfr_kentryworkq workq
;
621 struct pfr_kentry
*p
;
623 int i
, rv
, xzero
= 0;
625 ACCEPT_FLAGS(PFR_FLAG_ATOMIC
+PFR_FLAG_DUMMY
+PFR_FLAG_FEEDBACK
);
626 if (pfr_validate_table(tbl
, 0, 0))
628 kt
= pfr_lookup_table(tbl
);
629 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
632 for (i
= 0; i
< size
; i
++) {
633 if (COPYIN(addr
+i
, &ad
, sizeof(ad
)))
635 if (pfr_validate_addr(&ad
))
637 p
= pfr_lookup_addr(kt
, &ad
, 1);
638 if (flags
& PFR_FLAG_FEEDBACK
) {
639 ad
.pfra_fback
= (p
!= NULL
) ?
640 PFR_FB_CLEARED
: PFR_FB_NONE
;
641 if (COPYOUT(&ad
, addr
+i
, sizeof(ad
)))
645 SLIST_INSERT_HEAD(&workq
, p
, pfrke_workq
);
650 if (!(flags
& PFR_FLAG_DUMMY
)) {
651 if (flags
& PFR_FLAG_ATOMIC
)
653 pfr_clstats_kentries(&workq
, 0, 0);
654 if (flags
& PFR_FLAG_ATOMIC
)
661 if (flags
& PFR_FLAG_FEEDBACK
)
662 pfr_reset_feedback(addr
, size
, flags
);
667 pfr_validate_addr(struct pfr_addr
*ad
)
671 switch (ad
->pfra_af
) {
673 if (ad
->pfra_net
> 32)
677 if (ad
->pfra_net
> 128)
683 if (ad
->pfra_net
< 128 &&
684 (((caddr_t
)ad
)[ad
->pfra_net
/8] & (0xFF >> (ad
->pfra_net
%8))))
686 for (i
= (ad
->pfra_net
+7)/8; i
< sizeof(ad
->pfra_u
); i
++)
687 if (((caddr_t
)ad
)[i
])
689 if (ad
->pfra_not
&& ad
->pfra_not
!= 1)
697 pfr_enqueue_addrs(struct pfr_ktable
*kt
, struct pfr_kentryworkq
*workq
,
698 int *naddr
, int sweep
)
700 struct pfr_walktree w
;
703 bzero(&w
, sizeof(w
));
704 w
.pfrw_op
= sweep
? PFRW_SWEEP
: PFRW_ENQUEUE
;
705 w
.pfrw_workq
= workq
;
706 if (kt
->pfrkt_ip4
!= NULL
)
707 if (kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
))
708 kprintf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
709 if (kt
->pfrkt_ip6
!= NULL
)
710 if (kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
))
711 kprintf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
717 pfr_mark_addrs(struct pfr_ktable
*kt
)
719 struct pfr_walktree w
;
721 bzero(&w
, sizeof(w
));
722 w
.pfrw_op
= PFRW_MARK
;
723 if (kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
))
724 kprintf("pfr_mark_addrs: IPv4 walktree failed.\n");
725 if (kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
))
726 kprintf("pfr_mark_addrs: IPv6 walktree failed.\n");
731 pfr_lookup_addr(struct pfr_ktable
*kt
, struct pfr_addr
*ad
, int exact
)
733 union sockaddr_union sa
, mask
;
734 struct radix_node_head
*head
;
735 struct pfr_kentry
*ke
;
737 bzero(&sa
, sizeof(sa
));
738 if (ad
->pfra_af
== AF_INET
) {
739 FILLIN_SIN(sa
.sin
, ad
->pfra_ip4addr
);
740 head
= kt
->pfrkt_ip4
;
742 FILLIN_SIN6(sa
.sin6
, ad
->pfra_ip6addr
);
743 head
= kt
->pfrkt_ip6
;
745 if (ADDR_NETWORK(ad
)) {
746 pfr_prepare_network(&mask
, ad
->pfra_af
, ad
->pfra_net
);
747 crit_enter(); /* rn_lookup makes use of globals */
748 ke
= (struct pfr_kentry
*)rn_lookup((char *)&sa
, (char *)&mask
,
751 if (ke
&& KENTRY_RNF_ROOT(ke
))
754 ke
= (struct pfr_kentry
*)rn_match((char *)&sa
, head
);
755 if (ke
&& KENTRY_RNF_ROOT(ke
))
757 if (exact
&& ke
&& KENTRY_NETWORK(ke
))
764 pfr_create_kentry(struct pfr_addr
*ad
)
766 struct pfr_kentry
*ke
;
768 ke
= pool_get(&pfr_kentry_pl
, PR_NOWAIT
);
771 bzero(ke
, sizeof(*ke
));
773 if (ad
->pfra_af
== AF_INET
)
774 FILLIN_SIN(ke
->pfrke_sa
.sin
, ad
->pfra_ip4addr
);
776 FILLIN_SIN6(ke
->pfrke_sa
.sin6
, ad
->pfra_ip6addr
);
777 ke
->pfrke_af
= ad
->pfra_af
;
778 ke
->pfrke_net
= ad
->pfra_net
;
779 ke
->pfrke_not
= ad
->pfra_not
;
784 pfr_destroy_kentries(struct pfr_kentryworkq
*workq
)
786 struct pfr_kentry
*p
, *q
;
788 for (p
= SLIST_FIRST(workq
); p
!= NULL
; p
= q
) {
789 q
= SLIST_NEXT(p
, pfrke_workq
);
790 pfr_destroy_kentry(p
);
795 pfr_destroy_kentry(struct pfr_kentry
*ke
)
797 pool_put(&pfr_kentry_pl
, ke
);
801 pfr_insert_kentries(struct pfr_ktable
*kt
,
802 struct pfr_kentryworkq
*workq
, long tzero
)
804 struct pfr_kentry
*p
;
807 SLIST_FOREACH(p
, workq
, pfrke_workq
) {
808 rv
= pfr_route_kentry(kt
, p
);
810 kprintf("pfr_insert_kentries: cannot route entry "
814 p
->pfrke_tzero
= tzero
;
821 pfr_remove_kentries(struct pfr_ktable
*kt
,
822 struct pfr_kentryworkq
*workq
)
824 struct pfr_kentry
*p
;
827 SLIST_FOREACH(p
, workq
, pfrke_workq
) {
828 pfr_unroute_kentry(kt
, p
);
832 pfr_destroy_kentries(workq
);
836 pfr_clean_node_mask(struct pfr_ktable
*kt
,
837 struct pfr_kentryworkq
*workq
)
839 struct pfr_kentry
*p
;
841 SLIST_FOREACH(p
, workq
, pfrke_workq
)
842 pfr_unroute_kentry(kt
, p
);
846 pfr_clstats_kentries(struct pfr_kentryworkq
*workq
, long tzero
, int negchange
)
848 struct pfr_kentry
*p
;
850 SLIST_FOREACH(p
, workq
, pfrke_workq
) {
853 p
->pfrke_not
= !p
->pfrke_not
;
854 bzero(p
->pfrke_packets
, sizeof(p
->pfrke_packets
));
855 bzero(p
->pfrke_bytes
, sizeof(p
->pfrke_bytes
));
857 p
->pfrke_tzero
= tzero
;
862 pfr_reset_feedback(struct pfr_addr
*addr
, int size
, int flags
)
867 for (i
= 0; i
< size
; i
++) {
868 if (COPYIN(addr
+i
, &ad
, sizeof(ad
)))
870 ad
.pfra_fback
= PFR_FB_NONE
;
871 if (COPYOUT(&ad
, addr
+i
, sizeof(ad
)))
877 pfr_prepare_network(union sockaddr_union
*sa
, int af
, int net
)
881 bzero(sa
, sizeof(*sa
));
883 sa
->sin
.sin_len
= sizeof(sa
->sin
);
884 sa
->sin
.sin_family
= AF_INET
;
885 sa
->sin
.sin_addr
.s_addr
= htonl(-1 << (32-net
));
887 sa
->sin6
.sin6_len
= sizeof(sa
->sin6
);
888 sa
->sin6
.sin6_family
= AF_INET6
;
889 for (i
= 0; i
< 4; i
++) {
891 sa
->sin6
.sin6_addr
.s6_addr32
[i
] =
892 htonl(-1 << (32-net
));
895 sa
->sin6
.sin6_addr
.s6_addr32
[i
] = 0xFFFFFFFF;
902 pfr_route_kentry(struct pfr_ktable
*kt
, struct pfr_kentry
*ke
)
904 union sockaddr_union mask
;
905 struct radix_node
*rn
;
906 struct radix_node_head
*head
;
908 bzero(ke
->pfrke_node
, sizeof(ke
->pfrke_node
));
909 if (ke
->pfrke_af
== AF_INET
)
910 head
= kt
->pfrkt_ip4
;
912 head
= kt
->pfrkt_ip6
;
915 if (KENTRY_NETWORK(ke
)) {
916 pfr_prepare_network(&mask
, ke
->pfrke_af
, ke
->pfrke_net
);
917 rn
= rn_addroute((char *)&ke
->pfrke_sa
, (char *)&mask
, head
,
920 rn
= rn_addroute((char *)&ke
->pfrke_sa
, NULL
, head
,
924 return (rn
== NULL
? -1 : 0);
928 pfr_unroute_kentry(struct pfr_ktable
*kt
, struct pfr_kentry
*ke
)
930 union sockaddr_union mask
;
931 struct radix_node
*rn
;
932 struct radix_node_head
*head
;
934 if (ke
->pfrke_af
== AF_INET
)
935 head
= kt
->pfrkt_ip4
;
937 head
= kt
->pfrkt_ip6
;
940 if (KENTRY_NETWORK(ke
)) {
941 pfr_prepare_network(&mask
, ke
->pfrke_af
, ke
->pfrke_net
);
942 rn
= rn_delete((char *)&ke
->pfrke_sa
, (char *)&mask
, head
);
944 rn
= rn_delete((char *)&ke
->pfrke_sa
, NULL
, head
);
948 kprintf("pfr_unroute_kentry: delete failed.\n");
955 pfr_copyout_addr(struct pfr_addr
*ad
, struct pfr_kentry
*ke
)
957 bzero(ad
, sizeof(*ad
));
960 ad
->pfra_af
= ke
->pfrke_af
;
961 ad
->pfra_net
= ke
->pfrke_net
;
962 ad
->pfra_not
= ke
->pfrke_not
;
963 if (ad
->pfra_af
== AF_INET
)
964 ad
->pfra_ip4addr
= ke
->pfrke_sa
.sin
.sin_addr
;
966 ad
->pfra_ip6addr
= ke
->pfrke_sa
.sin6
.sin6_addr
;
970 pfr_walktree(struct radix_node
*rn
, void *arg
)
972 struct pfr_kentry
*ke
= (struct pfr_kentry
*)rn
;
973 struct pfr_walktree
*w
= arg
;
974 int flags
= w
->pfrw_flags
;
976 switch (w
->pfrw_op
) {
985 SLIST_INSERT_HEAD(w
->pfrw_workq
, ke
, pfrke_workq
);
989 if (w
->pfrw_free
-- > 0) {
992 pfr_copyout_addr(&ad
, ke
);
993 if (copyout(&ad
, w
->pfrw_addr
, sizeof(ad
)))
998 case PFRW_GET_ASTATS
:
999 if (w
->pfrw_free
-- > 0) {
1000 struct pfr_astats as
;
1002 pfr_copyout_addr(&as
.pfras_a
, ke
);
1005 bcopy(ke
->pfrke_packets
, as
.pfras_packets
,
1006 sizeof(as
.pfras_packets
));
1007 bcopy(ke
->pfrke_bytes
, as
.pfras_bytes
,
1008 sizeof(as
.pfras_bytes
));
1010 as
.pfras_tzero
= ke
->pfrke_tzero
;
1012 if (COPYOUT(&as
, w
->pfrw_astats
, sizeof(as
)))
1019 break; /* negative entries are ignored */
1020 if (!w
->pfrw_cnt
--) {
1021 w
->pfrw_kentry
= ke
;
1022 return (1); /* finish search */
1025 case PFRW_DYNADDR_UPDATE
:
1026 if (ke
->pfrke_af
== AF_INET
) {
1027 if (w
->pfrw_dyn
->pfid_acnt4
++ > 0)
1029 pfr_prepare_network(&pfr_mask
, AF_INET
, ke
->pfrke_net
);
1030 w
->pfrw_dyn
->pfid_addr4
= *SUNION2PF(
1031 &ke
->pfrke_sa
, AF_INET
);
1032 w
->pfrw_dyn
->pfid_mask4
= *SUNION2PF(
1033 &pfr_mask
, AF_INET
);
1035 if (w
->pfrw_dyn
->pfid_acnt6
++ > 0)
1037 pfr_prepare_network(&pfr_mask
, AF_INET6
, ke
->pfrke_net
);
1038 w
->pfrw_dyn
->pfid_addr6
= *SUNION2PF(
1039 &ke
->pfrke_sa
, AF_INET6
);
1040 w
->pfrw_dyn
->pfid_mask6
= *SUNION2PF(
1041 &pfr_mask
, AF_INET6
);
1049 pfr_clr_tables(struct pfr_table
*filter
, int *ndel
, int flags
)
1051 struct pfr_ktableworkq workq
;
1052 struct pfr_ktable
*p
;
1055 ACCEPT_FLAGS(PFR_FLAG_ATOMIC
+PFR_FLAG_DUMMY
+PFR_FLAG_ALLRSETS
);
1056 if (pfr_table_count(filter
, flags
) < 0)
1060 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1061 if (pfr_skip_table(filter
, p
, flags
))
1063 if (!strcmp(p
->pfrkt_anchor
, PF_RESERVED_ANCHOR
))
1065 if (!(p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
1067 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_ACTIVE
;
1068 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1071 if (!(flags
& PFR_FLAG_DUMMY
)) {
1072 if (flags
& PFR_FLAG_ATOMIC
)
1074 pfr_setflags_ktables(&workq
);
1075 if (flags
& PFR_FLAG_ATOMIC
)
1084 pfr_add_tables(struct pfr_table
*tbl
, int size
, int *nadd
, int flags
)
1086 struct pfr_ktableworkq addq
, changeq
;
1087 struct pfr_ktable
*p
, *q
, *r
, key
;
1088 int i
, rv
, xadd
= 0;
1089 long tzero
= time_second
;
1091 ACCEPT_FLAGS(PFR_FLAG_ATOMIC
+PFR_FLAG_DUMMY
);
1093 SLIST_INIT(&changeq
);
1094 for (i
= 0; i
< size
; i
++) {
1095 if (COPYIN(tbl
+i
, &key
.pfrkt_t
, sizeof(key
.pfrkt_t
)))
1097 if (pfr_validate_table(&key
.pfrkt_t
, PFR_TFLAG_USRMASK
,
1098 flags
& PFR_FLAG_USERIOCTL
))
1100 key
.pfrkt_flags
|= PFR_TFLAG_ACTIVE
;
1101 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1103 p
= pfr_create_ktable(&key
.pfrkt_t
, tzero
, 1);
1106 SLIST_FOREACH(q
, &addq
, pfrkt_workq
) {
1107 if (!pfr_ktable_compare(p
, q
))
1110 SLIST_INSERT_HEAD(&addq
, p
, pfrkt_workq
);
1112 if (!key
.pfrkt_anchor
[0])
1115 /* find or create root table */
1116 bzero(key
.pfrkt_anchor
, sizeof(key
.pfrkt_anchor
));
1117 bzero(key
.pfrkt_ruleset
, sizeof(key
.pfrkt_ruleset
));
1118 r
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1123 SLIST_FOREACH(q
, &addq
, pfrkt_workq
) {
1124 if (!pfr_ktable_compare(&key
, q
)) {
1129 key
.pfrkt_flags
= 0;
1130 r
= pfr_create_ktable(&key
.pfrkt_t
, 0, 1);
1133 SLIST_INSERT_HEAD(&addq
, r
, pfrkt_workq
);
1135 } else if (!(p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
1136 SLIST_FOREACH(q
, &changeq
, pfrkt_workq
)
1137 if (!pfr_ktable_compare(&key
, q
))
1139 p
->pfrkt_nflags
= (p
->pfrkt_flags
&
1140 ~PFR_TFLAG_USRMASK
) | key
.pfrkt_flags
;
1141 SLIST_INSERT_HEAD(&changeq
, p
, pfrkt_workq
);
1147 if (!(flags
& PFR_FLAG_DUMMY
)) {
1148 if (flags
& PFR_FLAG_ATOMIC
)
1150 pfr_insert_ktables(&addq
);
1151 pfr_setflags_ktables(&changeq
);
1152 if (flags
& PFR_FLAG_ATOMIC
)
1155 pfr_destroy_ktables(&addq
, 0);
1160 pfr_destroy_ktables(&addq
, 0);
1165 pfr_del_tables(struct pfr_table
*tbl
, int size
, int *ndel
, int flags
)
1167 struct pfr_ktableworkq workq
;
1168 struct pfr_ktable
*p
, *q
, key
;
1171 ACCEPT_FLAGS(PFR_FLAG_ATOMIC
+PFR_FLAG_DUMMY
);
1173 for (i
= 0; i
< size
; i
++) {
1174 if (COPYIN(tbl
+i
, &key
.pfrkt_t
, sizeof(key
.pfrkt_t
)))
1176 if (pfr_validate_table(&key
.pfrkt_t
, 0,
1177 flags
& PFR_FLAG_USERIOCTL
))
1179 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1180 if (p
!= NULL
&& (p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
1181 SLIST_FOREACH(q
, &workq
, pfrkt_workq
)
1182 if (!pfr_ktable_compare(p
, q
))
1184 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_ACTIVE
;
1185 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1192 if (!(flags
& PFR_FLAG_DUMMY
)) {
1193 if (flags
& PFR_FLAG_ATOMIC
)
1195 pfr_setflags_ktables(&workq
);
1196 if (flags
& PFR_FLAG_ATOMIC
)
1205 pfr_get_tables(struct pfr_table
*filter
, struct pfr_table
*tbl
, int *size
,
1208 struct pfr_ktable
*p
;
1211 ACCEPT_FLAGS(PFR_FLAG_ALLRSETS
);
1212 n
= nn
= pfr_table_count(filter
, flags
);
1219 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1220 if (pfr_skip_table(filter
, p
, flags
))
1224 if (COPYOUT(&p
->pfrkt_t
, tbl
++, sizeof(*tbl
)))
1228 kprintf("pfr_get_tables: corruption detected (%d).\n", n
);
1236 pfr_get_tstats(struct pfr_table
*filter
, struct pfr_tstats
*tbl
, int *size
,
1239 struct pfr_ktable
*p
;
1240 struct pfr_ktableworkq workq
;
1242 long tzero
= time_second
;
1244 ACCEPT_FLAGS(PFR_FLAG_ATOMIC
|PFR_FLAG_ALLRSETS
);
1245 /* XXX PFR_FLAG_CLSTATS disabled */
1246 n
= nn
= pfr_table_count(filter
, flags
);
1254 if (flags
& PFR_FLAG_ATOMIC
)
1256 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1257 if (pfr_skip_table(filter
, p
, flags
))
1261 if (!(flags
& PFR_FLAG_ATOMIC
))
1263 if (COPYOUT(&p
->pfrkt_ts
, tbl
++, sizeof(*tbl
))) {
1267 if (!(flags
& PFR_FLAG_ATOMIC
))
1269 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1271 if (flags
& PFR_FLAG_CLSTATS
)
1272 pfr_clstats_ktables(&workq
, tzero
,
1273 flags
& PFR_FLAG_ADDRSTOO
);
1274 if (flags
& PFR_FLAG_ATOMIC
)
1277 kprintf("pfr_get_tstats: corruption detected (%d).\n", n
);
1285 pfr_clr_tstats(struct pfr_table
*tbl
, int size
, int *nzero
, int flags
)
1287 struct pfr_ktableworkq workq
;
1288 struct pfr_ktable
*p
, key
;
1290 long tzero
= time_second
;
1292 ACCEPT_FLAGS(PFR_FLAG_ATOMIC
+PFR_FLAG_DUMMY
+PFR_FLAG_ADDRSTOO
);
1294 for (i
= 0; i
< size
; i
++) {
1295 if (COPYIN(tbl
+i
, &key
.pfrkt_t
, sizeof(key
.pfrkt_t
)))
1297 if (pfr_validate_table(&key
.pfrkt_t
, 0, 0))
1299 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1301 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1305 if (!(flags
& PFR_FLAG_DUMMY
)) {
1306 if (flags
& PFR_FLAG_ATOMIC
)
1308 pfr_clstats_ktables(&workq
, tzero
, flags
& PFR_FLAG_ADDRSTOO
);
1309 if (flags
& PFR_FLAG_ATOMIC
)
1318 pfr_set_tflags(struct pfr_table
*tbl
, int size
, int setflag
, int clrflag
,
1319 int *nchange
, int *ndel
, int flags
)
1321 struct pfr_ktableworkq workq
;
1322 struct pfr_ktable
*p
, *q
, key
;
1323 int i
, xchange
= 0, xdel
= 0;
1325 ACCEPT_FLAGS(PFR_FLAG_ATOMIC
+PFR_FLAG_DUMMY
);
1326 if ((setflag
& ~PFR_TFLAG_USRMASK
) ||
1327 (clrflag
& ~PFR_TFLAG_USRMASK
) ||
1328 (setflag
& clrflag
))
1331 for (i
= 0; i
< size
; i
++) {
1332 if (COPYIN(tbl
+i
, &key
.pfrkt_t
, sizeof(key
.pfrkt_t
)))
1334 if (pfr_validate_table(&key
.pfrkt_t
, 0,
1335 flags
& PFR_FLAG_USERIOCTL
))
1337 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1338 if (p
!= NULL
&& (p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
1339 p
->pfrkt_nflags
= (p
->pfrkt_flags
| setflag
) &
1341 if (p
->pfrkt_nflags
== p
->pfrkt_flags
)
1343 SLIST_FOREACH(q
, &workq
, pfrkt_workq
)
1344 if (!pfr_ktable_compare(p
, q
))
1346 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1347 if ((p
->pfrkt_flags
& PFR_TFLAG_PERSIST
) &&
1348 (clrflag
& PFR_TFLAG_PERSIST
) &&
1349 !(p
->pfrkt_flags
& PFR_TFLAG_REFERENCED
))
1357 if (!(flags
& PFR_FLAG_DUMMY
)) {
1358 if (flags
& PFR_FLAG_ATOMIC
)
1360 pfr_setflags_ktables(&workq
);
1361 if (flags
& PFR_FLAG_ATOMIC
)
1364 if (nchange
!= NULL
)
1372 pfr_ina_begin(struct pfr_table
*trs
, u_int32_t
*ticket
, int *ndel
, int flags
)
1374 struct pfr_ktableworkq workq
;
1375 struct pfr_ktable
*p
;
1376 struct pf_ruleset
*rs
;
1379 ACCEPT_FLAGS(PFR_FLAG_DUMMY
);
1380 rs
= pf_find_or_create_ruleset(trs
->pfrt_anchor
, trs
->pfrt_ruleset
);
1384 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1385 if (!(p
->pfrkt_flags
& PFR_TFLAG_INACTIVE
) ||
1386 pfr_skip_table(trs
, p
, 0))
1388 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_INACTIVE
;
1389 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1392 if (!(flags
& PFR_FLAG_DUMMY
)) {
1393 pfr_setflags_ktables(&workq
);
1395 *ticket
= ++rs
->tticket
;
1398 pf_remove_if_empty_ruleset(rs
);
1405 pfr_ina_define(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int size
,
1406 int *nadd
, int *naddr
, u_int32_t ticket
, int flags
)
1408 struct pfr_ktableworkq tableq
;
1409 struct pfr_kentryworkq addrq
;
1410 struct pfr_ktable
*kt
, *rt
, *shadow
, key
;
1411 struct pfr_kentry
*p
;
1413 struct pf_ruleset
*rs
;
1414 int i
, rv
, xadd
= 0, xaddr
= 0;
1416 ACCEPT_FLAGS(PFR_FLAG_DUMMY
|PFR_FLAG_ADDRSTOO
);
1417 if (size
&& !(flags
& PFR_FLAG_ADDRSTOO
))
1419 if (pfr_validate_table(tbl
, PFR_TFLAG_USRMASK
,
1420 flags
& PFR_FLAG_USERIOCTL
))
1422 rs
= pf_find_ruleset(tbl
->pfrt_anchor
, tbl
->pfrt_ruleset
);
1423 if (rs
== NULL
|| !rs
->topen
|| ticket
!= rs
->tticket
)
1425 tbl
->pfrt_flags
|= PFR_TFLAG_INACTIVE
;
1426 SLIST_INIT(&tableq
);
1427 kt
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, (struct pfr_ktable
*)tbl
);
1429 kt
= pfr_create_ktable(tbl
, 0, 1);
1432 SLIST_INSERT_HEAD(&tableq
, kt
, pfrkt_workq
);
1434 if (!tbl
->pfrt_anchor
[0])
1437 /* find or create root table */
1438 bzero(&key
, sizeof(key
));
1439 strlcpy(key
.pfrkt_name
, tbl
->pfrt_name
, sizeof(key
.pfrkt_name
));
1440 rt
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1442 kt
->pfrkt_root
= rt
;
1445 rt
= pfr_create_ktable(&key
.pfrkt_t
, 0, 1);
1447 pfr_destroy_ktables(&tableq
, 0);
1450 SLIST_INSERT_HEAD(&tableq
, rt
, pfrkt_workq
);
1451 kt
->pfrkt_root
= rt
;
1452 } else if (!(kt
->pfrkt_flags
& PFR_TFLAG_INACTIVE
))
1455 shadow
= pfr_create_ktable(tbl
, 0, 0);
1456 if (shadow
== NULL
) {
1457 pfr_destroy_ktables(&tableq
, 0);
1461 for (i
= 0; i
< size
; i
++) {
1462 if (COPYIN(addr
+i
, &ad
, sizeof(ad
)))
1464 if (pfr_validate_addr(&ad
))
1466 if (pfr_lookup_addr(shadow
, &ad
, 1) != NULL
)
1468 p
= pfr_create_kentry(&ad
);
1471 if (pfr_route_kentry(shadow
, p
)) {
1472 pfr_destroy_kentry(p
);
1475 SLIST_INSERT_HEAD(&addrq
, p
, pfrke_workq
);
1478 if (!(flags
& PFR_FLAG_DUMMY
)) {
1479 if (kt
->pfrkt_shadow
!= NULL
)
1480 pfr_destroy_ktable(kt
->pfrkt_shadow
, 1);
1481 kt
->pfrkt_flags
|= PFR_TFLAG_INACTIVE
;
1482 pfr_insert_ktables(&tableq
);
1483 shadow
->pfrkt_cnt
= (flags
& PFR_FLAG_ADDRSTOO
) ?
1484 xaddr
: NO_ADDRESSES
;
1485 kt
->pfrkt_shadow
= shadow
;
1487 pfr_clean_node_mask(shadow
, &addrq
);
1488 pfr_destroy_ktable(shadow
, 0);
1489 pfr_destroy_ktables(&tableq
, 0);
1490 pfr_destroy_kentries(&addrq
);
1498 pfr_destroy_ktable(shadow
, 0);
1499 pfr_destroy_ktables(&tableq
, 0);
1500 pfr_destroy_kentries(&addrq
);
1505 pfr_ina_rollback(struct pfr_table
*trs
, u_int32_t ticket
, int *ndel
, int flags
)
1507 struct pfr_ktableworkq workq
;
1508 struct pfr_ktable
*p
;
1509 struct pf_ruleset
*rs
;
1512 ACCEPT_FLAGS(PFR_FLAG_DUMMY
);
1513 rs
= pf_find_ruleset(trs
->pfrt_anchor
, trs
->pfrt_ruleset
);
1514 if (rs
== NULL
|| !rs
->topen
|| ticket
!= rs
->tticket
)
1517 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1518 if (!(p
->pfrkt_flags
& PFR_TFLAG_INACTIVE
) ||
1519 pfr_skip_table(trs
, p
, 0))
1521 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_INACTIVE
;
1522 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1525 if (!(flags
& PFR_FLAG_DUMMY
)) {
1526 pfr_setflags_ktables(&workq
);
1528 pf_remove_if_empty_ruleset(rs
);
1536 pfr_ina_commit(struct pfr_table
*trs
, u_int32_t ticket
, int *nadd
,
1537 int *nchange
, int flags
)
1539 struct pfr_ktable
*p
;
1540 struct pfr_ktableworkq workq
;
1541 struct pf_ruleset
*rs
;
1542 int xadd
= 0, xchange
= 0;
1543 long tzero
= time_second
;
1545 ACCEPT_FLAGS(PFR_FLAG_ATOMIC
+PFR_FLAG_DUMMY
);
1546 rs
= pf_find_ruleset(trs
->pfrt_anchor
, trs
->pfrt_ruleset
);
1547 if (rs
== NULL
|| !rs
->topen
|| ticket
!= rs
->tticket
)
1551 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1552 if (!(p
->pfrkt_flags
& PFR_TFLAG_INACTIVE
) ||
1553 pfr_skip_table(trs
, p
, 0))
1555 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1556 if (p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)
1562 if (!(flags
& PFR_FLAG_DUMMY
)) {
1563 if (flags
& PFR_FLAG_ATOMIC
)
1565 SLIST_FOREACH(p
, &workq
, pfrkt_workq
)
1566 pfr_commit_ktable(p
, tzero
);
1567 if (flags
& PFR_FLAG_ATOMIC
)
1570 pf_remove_if_empty_ruleset(rs
);
1574 if (nchange
!= NULL
)
1581 pfr_commit_ktable(struct pfr_ktable
*kt
, long tzero
)
1583 struct pfr_ktable
*shadow
= kt
->pfrkt_shadow
;
1586 if (shadow
->pfrkt_cnt
== NO_ADDRESSES
) {
1587 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
1588 pfr_clstats_ktable(kt
, tzero
, 1);
1589 } else if (kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) {
1590 /* kt might contain addresses */
1591 struct pfr_kentryworkq addrq
, addq
, changeq
, delq
, garbageq
;
1592 struct pfr_kentry
*p
, *q
, *next
;
1595 pfr_enqueue_addrs(shadow
, &addrq
, NULL
, 0);
1598 SLIST_INIT(&changeq
);
1600 SLIST_INIT(&garbageq
);
1601 pfr_clean_node_mask(shadow
, &addrq
);
1602 for (p
= SLIST_FIRST(&addrq
); p
!= NULL
; p
= next
) {
1603 next
= SLIST_NEXT(p
, pfrke_workq
); /* XXX */
1604 pfr_copyout_addr(&ad
, p
);
1605 q
= pfr_lookup_addr(kt
, &ad
, 1);
1607 if (q
->pfrke_not
!= p
->pfrke_not
)
1608 SLIST_INSERT_HEAD(&changeq
, q
,
1611 SLIST_INSERT_HEAD(&garbageq
, p
, pfrke_workq
);
1613 p
->pfrke_tzero
= tzero
;
1614 SLIST_INSERT_HEAD(&addq
, p
, pfrke_workq
);
1617 pfr_enqueue_addrs(kt
, &delq
, NULL
, ENQUEUE_UNMARKED_ONLY
);
1618 pfr_insert_kentries(kt
, &addq
, tzero
);
1619 pfr_remove_kentries(kt
, &delq
);
1620 pfr_clstats_kentries(&changeq
, tzero
, INVERT_NEG_FLAG
);
1621 pfr_destroy_kentries(&garbageq
);
1623 /* kt cannot contain addresses */
1624 SWAP(struct radix_node_head
*, kt
->pfrkt_ip4
,
1626 SWAP(struct radix_node_head
*, kt
->pfrkt_ip6
,
1628 SWAP(int, kt
->pfrkt_cnt
, shadow
->pfrkt_cnt
);
1629 pfr_clstats_ktable(kt
, tzero
, 1);
1631 nflags
= ((shadow
->pfrkt_flags
& PFR_TFLAG_USRMASK
) |
1632 (kt
->pfrkt_flags
& PFR_TFLAG_SETMASK
) | PFR_TFLAG_ACTIVE
)
1633 & ~PFR_TFLAG_INACTIVE
;
1634 pfr_destroy_ktable(shadow
, 0);
1635 kt
->pfrkt_shadow
= NULL
;
1636 pfr_setflags_ktable(kt
, nflags
);
1640 pfr_validate_table(struct pfr_table
*tbl
, int allowedflags
, int no_reserved
)
1644 if (!tbl
->pfrt_name
[0])
1646 if (no_reserved
&& !strcmp(tbl
->pfrt_anchor
, PF_RESERVED_ANCHOR
))
1648 if (tbl
->pfrt_name
[PF_TABLE_NAME_SIZE
-1])
1650 for (i
= strlen(tbl
->pfrt_name
); i
< PF_TABLE_NAME_SIZE
; i
++)
1651 if (tbl
->pfrt_name
[i
])
1653 if (tbl
->pfrt_flags
& ~allowedflags
)
1659 pfr_table_count(struct pfr_table
*filter
, int flags
)
1661 struct pf_ruleset
*rs
;
1662 struct pf_anchor
*ac
;
1664 if (flags
& PFR_FLAG_ALLRSETS
)
1665 return (pfr_ktable_cnt
);
1666 if (filter
->pfrt_ruleset
[0]) {
1667 rs
= pf_find_ruleset(filter
->pfrt_anchor
,
1668 filter
->pfrt_ruleset
);
1669 return ((rs
!= NULL
) ? rs
->tables
: -1);
1671 if (filter
->pfrt_anchor
[0]) {
1672 ac
= pf_find_anchor(filter
->pfrt_anchor
);
1673 return ((ac
!= NULL
) ? ac
->tables
: -1);
1675 return (pf_main_ruleset
.tables
);
1679 pfr_skip_table(struct pfr_table
*filter
, struct pfr_ktable
*kt
, int flags
)
1681 if (flags
& PFR_FLAG_ALLRSETS
)
1683 if (strncmp(filter
->pfrt_anchor
, kt
->pfrkt_anchor
,
1684 PF_ANCHOR_NAME_SIZE
))
1686 if (!filter
->pfrt_ruleset
[0])
1688 if (strncmp(filter
->pfrt_ruleset
, kt
->pfrkt_ruleset
,
1689 PF_RULESET_NAME_SIZE
))
1695 pfr_insert_ktables(struct pfr_ktableworkq
*workq
)
1697 struct pfr_ktable
*p
;
1699 SLIST_FOREACH(p
, workq
, pfrkt_workq
)
1700 pfr_insert_ktable(p
);
1704 pfr_insert_ktable(struct pfr_ktable
*kt
)
1706 RB_INSERT(pfr_ktablehead
, &pfr_ktables
, kt
);
1708 if (kt
->pfrkt_root
!= NULL
)
1709 if (!kt
->pfrkt_root
->pfrkt_refcnt
[PFR_REFCNT_ANCHOR
]++)
1710 pfr_setflags_ktable(kt
->pfrkt_root
,
1711 kt
->pfrkt_root
->pfrkt_flags
|PFR_TFLAG_REFDANCHOR
);
1715 pfr_setflags_ktables(struct pfr_ktableworkq
*workq
)
1717 struct pfr_ktable
*p
;
1719 SLIST_FOREACH(p
, workq
, pfrkt_workq
)
1720 pfr_setflags_ktable(p
, p
->pfrkt_nflags
);
1724 pfr_setflags_ktable(struct pfr_ktable
*kt
, int newf
)
1726 struct pfr_kentryworkq addrq
;
1728 if (!(newf
& PFR_TFLAG_REFERENCED
) &&
1729 !(newf
& PFR_TFLAG_PERSIST
))
1730 newf
&= ~PFR_TFLAG_ACTIVE
;
1731 if (!(newf
& PFR_TFLAG_ACTIVE
))
1732 newf
&= ~PFR_TFLAG_USRMASK
;
1733 if (!(newf
& PFR_TFLAG_SETMASK
)) {
1734 RB_REMOVE(pfr_ktablehead
, &pfr_ktables
, kt
);
1735 if (kt
->pfrkt_root
!= NULL
)
1736 if (!--kt
->pfrkt_root
->pfrkt_refcnt
[PFR_REFCNT_ANCHOR
])
1737 pfr_setflags_ktable(kt
->pfrkt_root
,
1738 kt
->pfrkt_root
->pfrkt_flags
&
1739 ~PFR_TFLAG_REFDANCHOR
);
1740 pfr_destroy_ktable(kt
, 1);
1744 if (!(newf
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_cnt
) {
1745 pfr_enqueue_addrs(kt
, &addrq
, NULL
, 0);
1746 pfr_remove_kentries(kt
, &addrq
);
1748 if (!(newf
& PFR_TFLAG_INACTIVE
) && kt
->pfrkt_shadow
!= NULL
) {
1749 pfr_destroy_ktable(kt
->pfrkt_shadow
, 1);
1750 kt
->pfrkt_shadow
= NULL
;
1752 kt
->pfrkt_flags
= newf
;
1756 pfr_clstats_ktables(struct pfr_ktableworkq
*workq
, long tzero
, int recurse
)
1758 struct pfr_ktable
*p
;
1760 SLIST_FOREACH(p
, workq
, pfrkt_workq
)
1761 pfr_clstats_ktable(p
, tzero
, recurse
);
1765 pfr_clstats_ktable(struct pfr_ktable
*kt
, long tzero
, int recurse
)
1767 struct pfr_kentryworkq addrq
;
1770 pfr_enqueue_addrs(kt
, &addrq
, NULL
, 0);
1771 pfr_clstats_kentries(&addrq
, tzero
, 0);
1774 bzero(kt
->pfrkt_packets
, sizeof(kt
->pfrkt_packets
));
1775 bzero(kt
->pfrkt_bytes
, sizeof(kt
->pfrkt_bytes
));
1776 kt
->pfrkt_match
= kt
->pfrkt_nomatch
= 0;
1778 kt
->pfrkt_tzero
= tzero
;
1782 pfr_create_ktable(struct pfr_table
*tbl
, long tzero
, int attachruleset
)
1784 struct pfr_ktable
*kt
;
1785 struct pf_ruleset
*rs
;
1787 kt
= pool_get(&pfr_ktable_pl
, PR_NOWAIT
);
1790 bzero(kt
, sizeof(*kt
));
1793 if (attachruleset
) {
1794 rs
= pf_find_or_create_ruleset(tbl
->pfrt_anchor
,
1797 pfr_destroy_ktable(kt
, 0);
1802 if (rs
->anchor
!= NULL
)
1803 rs
->anchor
->tables
++;
1806 if (!rn_inithead((void **)&kt
->pfrkt_ip4
,
1807 offsetof(struct sockaddr_in
, sin_addr
) * 8) ||
1808 !rn_inithead((void **)&kt
->pfrkt_ip6
,
1809 offsetof(struct sockaddr_in6
, sin6_addr
) * 8)) {
1810 pfr_destroy_ktable(kt
, 0);
1813 kt
->pfrkt_tzero
= tzero
;
1819 pfr_destroy_ktables(struct pfr_ktableworkq
*workq
, int flushaddr
)
1821 struct pfr_ktable
*p
, *q
;
1823 for (p
= SLIST_FIRST(workq
); p
; p
= q
) {
1824 q
= SLIST_NEXT(p
, pfrkt_workq
);
1825 pfr_destroy_ktable(p
, flushaddr
);
1830 pfr_destroy_ktable(struct pfr_ktable
*kt
, int flushaddr
)
1832 struct pfr_kentryworkq addrq
;
1835 pfr_enqueue_addrs(kt
, &addrq
, NULL
, 0);
1836 pfr_clean_node_mask(kt
, &addrq
);
1837 pfr_destroy_kentries(&addrq
);
1839 if (kt
->pfrkt_ip4
!= NULL
)
1840 kfree((caddr_t
)kt
->pfrkt_ip4
, M_RTABLE
);
1841 if (kt
->pfrkt_ip6
!= NULL
)
1842 kfree((caddr_t
)kt
->pfrkt_ip6
, M_RTABLE
);
1843 if (kt
->pfrkt_shadow
!= NULL
)
1844 pfr_destroy_ktable(kt
->pfrkt_shadow
, flushaddr
);
1845 if (kt
->pfrkt_rs
!= NULL
) {
1846 kt
->pfrkt_rs
->tables
--;
1847 if (kt
->pfrkt_rs
->anchor
!= NULL
)
1848 kt
->pfrkt_rs
->anchor
->tables
--;
1849 pf_remove_if_empty_ruleset(kt
->pfrkt_rs
);
1851 pool_put(&pfr_ktable_pl
, kt
);
1855 pfr_ktable_compare(struct pfr_ktable
*p
, struct pfr_ktable
*q
)
1859 if ((d
= strncmp(p
->pfrkt_name
, q
->pfrkt_name
, PF_TABLE_NAME_SIZE
)))
1861 if ((d
= strncmp(p
->pfrkt_anchor
, q
->pfrkt_anchor
,
1862 PF_ANCHOR_NAME_SIZE
)))
1864 return (strncmp(p
->pfrkt_ruleset
, q
->pfrkt_ruleset
,
1865 PF_RULESET_NAME_SIZE
));
1869 pfr_lookup_table(struct pfr_table
*tbl
)
1871 /* struct pfr_ktable start like a struct pfr_table */
1872 return (RB_FIND(pfr_ktablehead
, &pfr_ktables
,
1873 (struct pfr_ktable
*)tbl
));
1877 pfr_match_addr(struct pfr_ktable
*kt
, struct pf_addr
*a
, sa_family_t af
)
1879 struct pfr_kentry
*ke
= NULL
;
1882 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
)
1883 kt
= kt
->pfrkt_root
;
1884 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
1889 pfr_sin
.sin_addr
.s_addr
= a
->addr32
[0];
1890 ke
= (struct pfr_kentry
*)rn_match((char *)&pfr_sin
,
1892 if (ke
&& KENTRY_RNF_ROOT(ke
))
1896 bcopy(a
, &pfr_sin6
.sin6_addr
, sizeof(pfr_sin6
.sin6_addr
));
1897 ke
= (struct pfr_kentry
*)rn_match((char *)&pfr_sin6
,
1899 if (ke
&& KENTRY_RNF_ROOT(ke
))
1903 match
= (ke
&& !ke
->pfrke_not
);
1907 kt
->pfrkt_nomatch
++;
1912 pfr_update_stats(struct pfr_ktable
*kt
, struct pf_addr
*a
, sa_family_t af
,
1913 u_int64_t len
, int dir_out
, int op_pass
, int notrule
)
1915 struct pfr_kentry
*ke
= NULL
;
1917 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
)
1918 kt
= kt
->pfrkt_root
;
1919 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
1924 pfr_sin
.sin_addr
.s_addr
= a
->addr32
[0];
1925 ke
= (struct pfr_kentry
*)rn_match((char *)&pfr_sin
,
1927 if (ke
&& KENTRY_RNF_ROOT(ke
))
1931 bcopy(a
, &pfr_sin6
.sin6_addr
, sizeof(pfr_sin6
.sin6_addr
));
1932 ke
= (struct pfr_kentry
*)rn_match((char *)&pfr_sin6
,
1934 if (ke
&& KENTRY_RNF_ROOT(ke
))
1938 if ((ke
== NULL
|| ke
->pfrke_not
) != notrule
) {
1939 if (op_pass
!= PFR_OP_PASS
)
1940 kprintf("pfr_update_stats: assertion failed.\n");
1941 op_pass
= PFR_OP_XPASS
;
1943 kt
->pfrkt_packets
[dir_out
][op_pass
]++;
1944 kt
->pfrkt_bytes
[dir_out
][op_pass
] += len
;
1945 if (ke
!= NULL
&& op_pass
!= PFR_OP_XPASS
) {
1946 ke
->pfrke_packets
[dir_out
][op_pass
]++;
1947 ke
->pfrke_bytes
[dir_out
][op_pass
] += len
;
1952 pfr_attach_table(struct pf_ruleset
*rs
, char *name
)
1954 struct pfr_ktable
*kt
, *rt
;
1955 struct pfr_table tbl
;
1956 struct pf_anchor
*ac
= rs
->anchor
;
1958 bzero(&tbl
, sizeof(tbl
));
1959 strlcpy(tbl
.pfrt_name
, name
, sizeof(tbl
.pfrt_name
));
1961 strlcpy(tbl
.pfrt_anchor
, ac
->name
, sizeof(tbl
.pfrt_anchor
));
1962 strlcpy(tbl
.pfrt_ruleset
, rs
->name
, sizeof(tbl
.pfrt_ruleset
));
1964 kt
= pfr_lookup_table(&tbl
);
1966 kt
= pfr_create_ktable(&tbl
, time_second
, 1);
1970 bzero(tbl
.pfrt_anchor
, sizeof(tbl
.pfrt_anchor
));
1971 bzero(tbl
.pfrt_ruleset
, sizeof(tbl
.pfrt_ruleset
));
1972 rt
= pfr_lookup_table(&tbl
);
1974 rt
= pfr_create_ktable(&tbl
, 0, 1);
1976 pfr_destroy_ktable(kt
, 0);
1979 pfr_insert_ktable(rt
);
1981 kt
->pfrkt_root
= rt
;
1983 pfr_insert_ktable(kt
);
1985 if (!kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
]++)
1986 pfr_setflags_ktable(kt
, kt
->pfrkt_flags
|PFR_TFLAG_REFERENCED
);
1991 pfr_detach_table(struct pfr_ktable
*kt
)
1993 if (kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
] <= 0)
1994 kprintf("pfr_detach_table: refcount = %d.\n",
1995 kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
]);
1996 else if (!--kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
])
1997 pfr_setflags_ktable(kt
, kt
->pfrkt_flags
&~PFR_TFLAG_REFERENCED
);
2001 pfr_pool_get(struct pfr_ktable
*kt
, int *pidx
, struct pf_addr
*counter
,
2002 struct pf_addr
**raddr
, struct pf_addr
**rmask
, sa_family_t af
)
2004 struct pfr_kentry
*ke
, *ke2
;
2005 struct pf_addr
*addr
;
2006 union sockaddr_union mask
;
2007 int idx
= -1, use_counter
= 0;
2009 addr
= (af
== AF_INET
) ? (struct pf_addr
*)&pfr_sin
.sin_addr
:
2010 (struct pf_addr
*)&pfr_sin6
.sin6_addr
;
2011 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
)
2012 kt
= kt
->pfrkt_root
;
2013 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
2018 if (counter
!= NULL
&& idx
>= 0)
2024 ke
= pfr_kentry_byidx(kt
, idx
, af
);
2027 pfr_prepare_network(&pfr_mask
, af
, ke
->pfrke_net
);
2028 *raddr
= SUNION2PF(&ke
->pfrke_sa
, af
);
2029 *rmask
= SUNION2PF(&pfr_mask
, af
);
2032 /* is supplied address within block? */
2033 if (!PF_MATCHA(0, *raddr
, *rmask
, counter
, af
)) {
2034 /* no, go to next block in table */
2039 PF_ACPY(addr
, counter
, af
);
2041 /* use first address of block */
2042 PF_ACPY(addr
, *raddr
, af
);
2045 if (!KENTRY_NETWORK(ke
)) {
2046 /* this is a single IP address - no possible nested block */
2047 PF_ACPY(counter
, addr
, af
);
2052 /* we don't want to use a nested block */
2053 ke2
= (struct pfr_kentry
*)(af
== AF_INET
?
2054 rn_match((char *)&pfr_sin
, kt
->pfrkt_ip4
) :
2055 rn_match((char *)&pfr_sin6
, kt
->pfrkt_ip6
));
2056 /* no need to check KENTRY_RNF_ROOT() here */
2058 /* lookup return the same block - perfect */
2059 PF_ACPY(counter
, addr
, af
);
2064 /* we need to increase the counter past the nested block */
2065 pfr_prepare_network(&mask
, AF_INET
, ke2
->pfrke_net
);
2066 PF_POOLMASK(addr
, addr
, SUNION2PF(&mask
, af
), &pfr_ffaddr
, af
);
2068 if (!PF_MATCHA(0, *raddr
, *rmask
, addr
, af
)) {
2069 /* ok, we reached the end of our main block */
2070 /* go to next block in table */
2079 pfr_kentry_byidx(struct pfr_ktable
*kt
, int idx
, int af
)
2081 struct pfr_walktree w
;
2083 bzero(&w
, sizeof(w
));
2084 w
.pfrw_op
= PFRW_POOL_GET
;
2089 kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
);
2090 return (w
.pfrw_kentry
);
2092 kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
);
2093 return (w
.pfrw_kentry
);
2100 pfr_dynaddr_update(struct pfr_ktable
*kt
, struct pfi_dynaddr
*dyn
)
2102 struct pfr_walktree w
;
2104 bzero(&w
, sizeof(w
));
2105 w
.pfrw_op
= PFRW_DYNADDR_UPDATE
;
2109 dyn
->pfid_acnt4
= 0;
2110 dyn
->pfid_acnt6
= 0;
2111 if (!dyn
->pfid_af
|| dyn
->pfid_af
== AF_INET
)
2112 kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
);
2113 if (!dyn
->pfid_af
|| dyn
->pfid_af
== AF_INET6
)
2114 kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
);