1 /* $OpenBSD: pf_table.c,v 1.78 2008/06/14 03:50:14 art Exp $ */
4 * Copyright (c) 2010 The DragonFly Project. All rights reserved.
6 * Copyright (c) 2002 Cedric Berger
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
36 #include "opt_inet6.h"
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/socket.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/thread2.h>
47 #include <net/route.h>
48 #include <netinet/in.h>
49 #include <net/pf/pfvar.h>
51 #define ACCEPT_FLAGS(flags, oklist) \
53 if ((flags & ~(oklist)) & \
58 #define COPYIN(from, to, size, flags) \
59 ((flags & PFR_FLAG_USERIOCTL) ? \
60 copyin((from), (to), (size)) : \
61 (bcopy((from), (to), (size)), 0))
63 #define COPYOUT(from, to, size, flags) \
64 ((flags & PFR_FLAG_USERIOCTL) ? \
65 copyout((from), (to), (size)) : \
66 (bcopy((from), (to), (size)), 0))
68 #define FILLIN_SIN(sin, addr) \
70 (sin).sin_len = sizeof(sin); \
71 (sin).sin_family = AF_INET; \
72 (sin).sin_addr = (addr); \
75 #define FILLIN_SIN6(sin6, addr) \
77 (sin6).sin6_len = sizeof(sin6); \
78 (sin6).sin6_family = AF_INET6; \
79 (sin6).sin6_addr = (addr); \
82 #define SWAP(type, a1, a2) \
89 #define SUNION2PF(su, af) (((af)==AF_INET) ? \
90 (struct pf_addr *)&(su)->sin.sin_addr : \
91 (struct pf_addr *)&(su)->sin6.sin6_addr)
93 #define AF_BITS(af) (((af)==AF_INET)?32:128)
94 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
95 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
96 #define KENTRY_RNF_ROOT(ke) \
97 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
99 #define NO_ADDRESSES (-1)
100 #define ENQUEUE_UNMARKED_ONLY (1)
101 #define INVERT_NEG_FLAG (1)
103 static MALLOC_DEFINE(M_PFRKTABLEPL
, "pfrktable", "pf radix table pool list");
104 static MALLOC_DEFINE(M_PFRKENTRYPL
, "pfrkentry", "pf radix entry pool list");
105 static MALLOC_DEFINE(M_PFRKENTRYPL2
, "pfrkentry2", "pf radix entry 2 pool list");
106 static MALLOC_DEFINE(M_PFRKCOUNTERSPL
, "pfrkcounters", "pf radix counters");
108 struct pfr_walktree
{
119 struct pfr_addr
*pfrw1_addr
;
120 struct pfr_astats
*pfrw1_astats
;
121 struct pfr_kentryworkq
*pfrw1_workq
;
122 struct pfr_kentry
*pfrw1_kentry
;
123 struct pfi_dynaddr
*pfrw1_dyn
;
128 #define pfrw_addr pfrw_1.pfrw1_addr
129 #define pfrw_astats pfrw_1.pfrw1_astats
130 #define pfrw_workq pfrw_1.pfrw1_workq
131 #define pfrw_kentry pfrw_1.pfrw1_kentry
132 #define pfrw_dyn pfrw_1.pfrw1_dyn
133 #define pfrw_cnt pfrw_free
135 #define senderr(e) do { rv = (e); goto _bad; } while (0)
136 struct malloc_type
*pfr_ktable_pl
;
137 struct malloc_type
*pfr_kentry_pl
;
138 struct malloc_type
*pfr_kentry_pl2
;
139 static struct pf_addr pfr_ffaddr
; /* constant after setup */
141 void pfr_copyout_addr(struct pfr_addr
*,
142 struct pfr_kentry
*ke
);
143 int pfr_validate_addr(struct pfr_addr
*);
144 void pfr_enqueue_addrs(struct pfr_ktable
*,
145 struct pfr_kentryworkq
*, int *, int);
146 void pfr_mark_addrs(struct pfr_ktable
*);
147 struct pfr_kentry
*pfr_lookup_addr(struct pfr_ktable
*,
148 struct pfr_addr
*, int);
149 struct pfr_kentry
*pfr_create_kentry(struct pfr_addr
*, int);
150 void pfr_destroy_kentries(struct pfr_kentryworkq
*);
151 void pfr_destroy_kentry(struct pfr_kentry
*);
152 void pfr_insert_kentries(struct pfr_ktable
*,
153 struct pfr_kentryworkq
*, long);
154 void pfr_remove_kentries(struct pfr_ktable
*,
155 struct pfr_kentryworkq
*);
156 void pfr_clstats_kentries(struct pfr_kentryworkq
*, long,
158 void pfr_reset_feedback(struct pfr_addr
*, int, int);
159 void pfr_prepare_network(union sockaddr_union
*, int, int);
160 int pfr_route_kentry(struct pfr_ktable
*,
161 struct pfr_kentry
*);
162 int pfr_unroute_kentry(struct pfr_ktable
*,
163 struct pfr_kentry
*);
164 int pfr_walktree(struct radix_node
*, void *);
165 int pfr_validate_table(struct pfr_table
*, int, int);
166 int pfr_fix_anchor(char *);
167 void pfr_commit_ktable(struct pfr_ktable
*, long);
168 void pfr_insert_ktables(struct pfr_ktableworkq
*);
169 void pfr_insert_ktable(struct pfr_ktable
*);
170 void pfr_setflags_ktables(struct pfr_ktableworkq
*);
171 void pfr_setflags_ktable(struct pfr_ktable
*, int);
172 void pfr_clstats_ktables(struct pfr_ktableworkq
*, long,
174 void pfr_clstats_ktable(struct pfr_ktable
*, long, int);
175 struct pfr_ktable
*pfr_create_ktable(struct pfr_table
*, long, int);
176 void pfr_destroy_ktables(struct pfr_ktableworkq
*, int);
177 void pfr_destroy_ktable(struct pfr_ktable
*, int);
178 int pfr_ktable_compare(struct pfr_ktable
*,
179 struct pfr_ktable
*);
180 struct pfr_ktable
*pfr_lookup_table(struct pfr_table
*);
181 void pfr_clean_node_mask(struct pfr_ktable
*,
182 struct pfr_kentryworkq
*);
183 int pfr_table_count(struct pfr_table
*, int);
184 int pfr_skip_table(struct pfr_table
*,
185 struct pfr_ktable
*, int);
186 struct pfr_kentry
*pfr_kentry_byidx(struct pfr_ktable
*, int, int);
188 RB_PROTOTYPE(pfr_ktablehead
, pfr_ktable
, pfrkt_tree
, pfr_ktable_compare
);
189 RB_GENERATE(pfr_ktablehead
, pfr_ktable
, pfrkt_tree
, pfr_ktable_compare
);
191 struct pfr_ktablehead pfr_ktables
;
192 struct pfr_table pfr_nulltable
;
198 memset(&pfr_ffaddr
, 0xff, sizeof(pfr_ffaddr
));
202 pfr_clr_addrs(struct pfr_table
*tbl
, int *ndel
, int flags
)
204 struct pfr_ktable
*kt
;
205 struct pfr_kentryworkq workq
;
207 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
208 if (pfr_validate_table(tbl
, 0, flags
& PFR_FLAG_USERIOCTL
))
210 kt
= pfr_lookup_table(tbl
);
211 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
213 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
)
215 pfr_enqueue_addrs(kt
, &workq
, ndel
, 0);
217 if (!(flags
& PFR_FLAG_DUMMY
)) {
218 if (flags
& PFR_FLAG_ATOMIC
)
220 pfr_remove_kentries(kt
, &workq
);
221 if (flags
& PFR_FLAG_ATOMIC
)
224 kprintf("pfr_clr_addrs: corruption detected (%d).\n",
233 pfr_add_addrs(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int size
,
234 int *nadd
, int flags
)
236 struct pfr_ktable
*kt
, *tmpkt
;
237 struct pfr_kentryworkq workq
;
238 struct pfr_kentry
*p
, *q
;
241 long tzero
= time_second
;
243 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
245 if (pfr_validate_table(tbl
, 0, flags
& PFR_FLAG_USERIOCTL
))
247 kt
= pfr_lookup_table(tbl
);
248 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
250 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
)
252 tmpkt
= pfr_create_ktable(&pfr_nulltable
, 0, 0);
256 for (i
= 0; i
< size
; i
++) {
257 if (COPYIN(addr
+i
, &ad
, sizeof(ad
), flags
))
259 if (pfr_validate_addr(&ad
))
261 p
= pfr_lookup_addr(kt
, &ad
, 1);
262 q
= pfr_lookup_addr(tmpkt
, &ad
, 1);
263 if (flags
& PFR_FLAG_FEEDBACK
) {
265 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
267 ad
.pfra_fback
= PFR_FB_ADDED
;
268 else if (p
->pfrke_not
!= ad
.pfra_not
)
269 ad
.pfra_fback
= PFR_FB_CONFLICT
;
271 ad
.pfra_fback
= PFR_FB_NONE
;
273 if (p
== NULL
&& q
== NULL
) {
274 p
= pfr_create_kentry(&ad
,
275 !(flags
& PFR_FLAG_USERIOCTL
));
278 if (pfr_route_kentry(tmpkt
, p
)) {
279 pfr_destroy_kentry(p
);
280 ad
.pfra_fback
= PFR_FB_NONE
;
282 SLIST_INSERT_HEAD(&workq
, p
, pfrke_workq
);
286 if (flags
& PFR_FLAG_FEEDBACK
)
287 if (COPYOUT(&ad
, addr
+i
, sizeof(ad
), flags
))
290 pfr_clean_node_mask(tmpkt
, &workq
);
291 if (!(flags
& PFR_FLAG_DUMMY
)) {
292 if (flags
& PFR_FLAG_ATOMIC
)
294 pfr_insert_kentries(kt
, &workq
, tzero
);
295 if (flags
& PFR_FLAG_ATOMIC
)
298 pfr_destroy_kentries(&workq
);
301 pfr_destroy_ktable(tmpkt
, 0);
304 pfr_clean_node_mask(tmpkt
, &workq
);
305 pfr_destroy_kentries(&workq
);
306 if (flags
& PFR_FLAG_FEEDBACK
)
307 pfr_reset_feedback(addr
, size
, flags
);
308 pfr_destroy_ktable(tmpkt
, 0);
313 pfr_del_addrs(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int size
,
314 int *ndel
, int flags
)
316 struct pfr_ktable
*kt
;
317 struct pfr_kentryworkq workq
;
318 struct pfr_kentry
*p
;
320 int i
, rv
, xdel
= 0, log
= 1;
322 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
324 if (pfr_validate_table(tbl
, 0, flags
& PFR_FLAG_USERIOCTL
))
326 kt
= pfr_lookup_table(tbl
);
327 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
329 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
)
332 * there are two algorithms to choose from here.
334 * n: number of addresses to delete
335 * N: number of addresses in the table
337 * one is O(N) and is better for large 'n'
338 * one is O(n*LOG(N)) and is better for small 'n'
340 * following code try to decide which one is best.
342 for (i
= kt
->pfrkt_cnt
; i
> 0; i
>>= 1)
344 if (size
> kt
->pfrkt_cnt
/log
) {
345 /* full table scan */
348 /* iterate over addresses to delete */
349 for (i
= 0; i
< size
; i
++) {
350 if (COPYIN(addr
+i
, &ad
, sizeof(ad
), flags
))
352 if (pfr_validate_addr(&ad
))
354 p
= pfr_lookup_addr(kt
, &ad
, 1);
360 for (i
= 0; i
< size
; i
++) {
361 if (COPYIN(addr
+i
, &ad
, sizeof(ad
), flags
))
363 if (pfr_validate_addr(&ad
))
365 p
= pfr_lookup_addr(kt
, &ad
, 1);
366 if (flags
& PFR_FLAG_FEEDBACK
) {
368 ad
.pfra_fback
= PFR_FB_NONE
;
369 else if (p
->pfrke_not
!= ad
.pfra_not
)
370 ad
.pfra_fback
= PFR_FB_CONFLICT
;
371 else if (p
->pfrke_mark
)
372 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
374 ad
.pfra_fback
= PFR_FB_DELETED
;
376 if (p
!= NULL
&& p
->pfrke_not
== ad
.pfra_not
&&
379 SLIST_INSERT_HEAD(&workq
, p
, pfrke_workq
);
382 if (flags
& PFR_FLAG_FEEDBACK
)
383 if (COPYOUT(&ad
, addr
+i
, sizeof(ad
), flags
))
386 if (!(flags
& PFR_FLAG_DUMMY
)) {
387 if (flags
& PFR_FLAG_ATOMIC
)
389 pfr_remove_kentries(kt
, &workq
);
390 if (flags
& PFR_FLAG_ATOMIC
)
397 if (flags
& PFR_FLAG_FEEDBACK
)
398 pfr_reset_feedback(addr
, size
, flags
);
403 pfr_set_addrs(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int size
,
404 int *size2
, int *nadd
, int *ndel
, int *nchange
, int flags
,
405 u_int32_t ignore_pfrt_flags
)
407 struct pfr_ktable
*kt
, *tmpkt
;
408 struct pfr_kentryworkq addq
, delq
, changeq
;
409 struct pfr_kentry
*p
, *q
;
411 int i
, rv
, xadd
= 0, xdel
= 0, xchange
= 0;
412 long tzero
= time_second
;
414 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
416 if (pfr_validate_table(tbl
, ignore_pfrt_flags
, flags
&
419 kt
= pfr_lookup_table(tbl
);
420 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
422 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
)
424 tmpkt
= pfr_create_ktable(&pfr_nulltable
, 0, 0);
430 SLIST_INIT(&changeq
);
431 for (i
= 0; i
< size
; i
++) {
432 if (COPYIN(addr
+i
, &ad
, sizeof(ad
), flags
))
434 if (pfr_validate_addr(&ad
))
436 ad
.pfra_fback
= PFR_FB_NONE
;
437 p
= pfr_lookup_addr(kt
, &ad
, 1);
440 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
444 if (p
->pfrke_not
!= ad
.pfra_not
) {
445 SLIST_INSERT_HEAD(&changeq
, p
, pfrke_workq
);
446 ad
.pfra_fback
= PFR_FB_CHANGED
;
450 q
= pfr_lookup_addr(tmpkt
, &ad
, 1);
452 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
455 p
= pfr_create_kentry(&ad
,
456 !(flags
& PFR_FLAG_USERIOCTL
));
459 if (pfr_route_kentry(tmpkt
, p
)) {
460 pfr_destroy_kentry(p
);
461 ad
.pfra_fback
= PFR_FB_NONE
;
463 SLIST_INSERT_HEAD(&addq
, p
, pfrke_workq
);
464 ad
.pfra_fback
= PFR_FB_ADDED
;
469 if (flags
& PFR_FLAG_FEEDBACK
)
470 if (COPYOUT(&ad
, addr
+i
, sizeof(ad
), flags
))
473 pfr_enqueue_addrs(kt
, &delq
, &xdel
, ENQUEUE_UNMARKED_ONLY
);
474 if ((flags
& PFR_FLAG_FEEDBACK
) && *size2
) {
475 if (*size2
< size
+xdel
) {
480 SLIST_FOREACH(p
, &delq
, pfrke_workq
) {
481 pfr_copyout_addr(&ad
, p
);
482 ad
.pfra_fback
= PFR_FB_DELETED
;
483 if (COPYOUT(&ad
, addr
+size
+i
, sizeof(ad
), flags
))
488 pfr_clean_node_mask(tmpkt
, &addq
);
489 if (!(flags
& PFR_FLAG_DUMMY
)) {
490 if (flags
& PFR_FLAG_ATOMIC
)
492 pfr_insert_kentries(kt
, &addq
, tzero
);
493 pfr_remove_kentries(kt
, &delq
);
494 pfr_clstats_kentries(&changeq
, tzero
, INVERT_NEG_FLAG
);
495 if (flags
& PFR_FLAG_ATOMIC
)
498 pfr_destroy_kentries(&addq
);
505 if ((flags
& PFR_FLAG_FEEDBACK
) && size2
)
507 pfr_destroy_ktable(tmpkt
, 0);
510 pfr_clean_node_mask(tmpkt
, &addq
);
511 pfr_destroy_kentries(&addq
);
512 if (flags
& PFR_FLAG_FEEDBACK
)
513 pfr_reset_feedback(addr
, size
, flags
);
514 pfr_destroy_ktable(tmpkt
, 0);
519 pfr_tst_addrs(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int size
,
520 int *nmatch
, int flags
)
522 struct pfr_ktable
*kt
;
523 struct pfr_kentry
*p
;
527 ACCEPT_FLAGS(flags
, PFR_FLAG_REPLACE
);
528 if (pfr_validate_table(tbl
, 0, 0))
530 kt
= pfr_lookup_table(tbl
);
531 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
534 for (i
= 0; i
< size
; i
++) {
535 if (COPYIN(addr
+i
, &ad
, sizeof(ad
), flags
))
537 if (pfr_validate_addr(&ad
))
539 if (ADDR_NETWORK(&ad
))
541 p
= pfr_lookup_addr(kt
, &ad
, 0);
542 if (flags
& PFR_FLAG_REPLACE
)
543 pfr_copyout_addr(&ad
, p
);
544 ad
.pfra_fback
= (p
== NULL
) ? PFR_FB_NONE
:
545 (p
->pfrke_not
? PFR_FB_NOTMATCH
: PFR_FB_MATCH
);
546 if (p
!= NULL
&& !p
->pfrke_not
)
548 if (COPYOUT(&ad
, addr
+i
, sizeof(ad
), flags
))
557 pfr_get_addrs(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int *size
,
560 struct pfr_ktable
*kt
;
561 struct pfr_walktree w
;
564 ACCEPT_FLAGS(flags
, 0);
565 if (pfr_validate_table(tbl
, 0, 0))
567 kt
= pfr_lookup_table(tbl
);
568 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
570 if (kt
->pfrkt_cnt
> *size
) {
571 *size
= kt
->pfrkt_cnt
;
575 bzero(&w
, sizeof(w
));
576 w
.pfrw_op
= PFRW_GET_ADDRS
;
578 w
.pfrw_free
= kt
->pfrkt_cnt
;
579 w
.pfrw_flags
= flags
;
580 rv
= kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
);
582 rv
= kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
);
587 kprintf("pfr_get_addrs: corruption detected (%d).\n",
591 *size
= kt
->pfrkt_cnt
;
596 pfr_get_astats(struct pfr_table
*tbl
, struct pfr_astats
*addr
, int *size
,
599 struct pfr_ktable
*kt
;
600 struct pfr_walktree w
;
601 struct pfr_kentryworkq workq
;
603 long tzero
= time_second
;
605 /* XXX PFR_FLAG_CLSTATS disabled */
606 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
);
607 if (pfr_validate_table(tbl
, 0, 0))
609 kt
= pfr_lookup_table(tbl
);
610 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
612 if (kt
->pfrkt_cnt
> *size
) {
613 *size
= kt
->pfrkt_cnt
;
617 bzero(&w
, sizeof(w
));
618 w
.pfrw_op
= PFRW_GET_ASTATS
;
619 w
.pfrw_astats
= addr
;
620 w
.pfrw_free
= kt
->pfrkt_cnt
;
621 w
.pfrw_flags
= flags
;
622 if (flags
& PFR_FLAG_ATOMIC
)
624 rv
= kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
);
626 rv
= kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
);
627 if (!rv
&& (flags
& PFR_FLAG_CLSTATS
)) {
628 pfr_enqueue_addrs(kt
, &workq
, NULL
, 0);
629 pfr_clstats_kentries(&workq
, tzero
, 0);
631 if (flags
& PFR_FLAG_ATOMIC
)
637 kprintf("pfr_get_astats: corruption detected (%d).\n",
641 *size
= kt
->pfrkt_cnt
;
646 pfr_clr_astats(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int size
,
647 int *nzero
, int flags
)
649 struct pfr_ktable
*kt
;
650 struct pfr_kentryworkq workq
;
651 struct pfr_kentry
*p
;
653 int i
, rv
, xzero
= 0;
655 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
657 if (pfr_validate_table(tbl
, 0, 0))
659 kt
= pfr_lookup_table(tbl
);
660 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
663 for (i
= 0; i
< size
; i
++) {
664 if (COPYIN(addr
+i
, &ad
, sizeof(ad
), flags
))
666 if (pfr_validate_addr(&ad
))
668 p
= pfr_lookup_addr(kt
, &ad
, 1);
669 if (flags
& PFR_FLAG_FEEDBACK
) {
670 ad
.pfra_fback
= (p
!= NULL
) ?
671 PFR_FB_CLEARED
: PFR_FB_NONE
;
672 if (COPYOUT(&ad
, addr
+i
, sizeof(ad
), flags
))
676 SLIST_INSERT_HEAD(&workq
, p
, pfrke_workq
);
681 if (!(flags
& PFR_FLAG_DUMMY
)) {
682 if (flags
& PFR_FLAG_ATOMIC
)
684 pfr_clstats_kentries(&workq
, 0, 0);
685 if (flags
& PFR_FLAG_ATOMIC
)
692 if (flags
& PFR_FLAG_FEEDBACK
)
693 pfr_reset_feedback(addr
, size
, flags
);
698 pfr_validate_addr(struct pfr_addr
*ad
)
702 switch (ad
->pfra_af
) {
705 if (ad
->pfra_net
> 32)
711 if (ad
->pfra_net
> 128)
718 if (ad
->pfra_net
< 128 &&
719 (((caddr_t
)ad
)[ad
->pfra_net
/8] & (0xFF >> (ad
->pfra_net
%8))))
721 for (i
= (ad
->pfra_net
+7)/8; i
< sizeof(ad
->pfra_u
); i
++)
722 if (((caddr_t
)ad
)[i
])
724 if (ad
->pfra_not
&& ad
->pfra_not
!= 1)
732 pfr_enqueue_addrs(struct pfr_ktable
*kt
, struct pfr_kentryworkq
*workq
,
733 int *naddr
, int sweep
)
735 struct pfr_walktree w
;
738 bzero(&w
, sizeof(w
));
739 w
.pfrw_op
= sweep
? PFRW_SWEEP
: PFRW_ENQUEUE
;
740 w
.pfrw_workq
= workq
;
741 if (kt
->pfrkt_ip4
!= NULL
)
742 if (kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
))
743 kprintf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
744 if (kt
->pfrkt_ip6
!= NULL
)
745 if (kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
))
746 kprintf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
752 pfr_mark_addrs(struct pfr_ktable
*kt
)
754 struct pfr_walktree w
;
756 bzero(&w
, sizeof(w
));
757 w
.pfrw_op
= PFRW_MARK
;
758 if (kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
))
759 kprintf("pfr_mark_addrs: IPv4 walktree failed.\n");
760 if (kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
))
761 kprintf("pfr_mark_addrs: IPv6 walktree failed.\n");
766 pfr_lookup_addr(struct pfr_ktable
*kt
, struct pfr_addr
*ad
, int exact
)
768 union sockaddr_union sa
, mask
;
769 struct radix_node_head
*head
= NULL
;
770 struct pfr_kentry
*ke
;
772 bzero(&sa
, sizeof(sa
));
773 if (ad
->pfra_af
== AF_INET
) {
774 FILLIN_SIN(sa
.sin
, ad
->pfra_ip4addr
);
775 head
= kt
->pfrkt_ip4
;
776 } else if ( ad
->pfra_af
== AF_INET6
) {
777 FILLIN_SIN6(sa
.sin6
, ad
->pfra_ip6addr
);
778 head
= kt
->pfrkt_ip6
;
780 if (ADDR_NETWORK(ad
)) {
781 pfr_prepare_network(&mask
, ad
->pfra_af
, ad
->pfra_net
);
782 crit_enter(); /* rn_lookup makes use of globals */
783 ke
= (struct pfr_kentry
*)rn_lookup((char *)&sa
, (char *)&mask
,
786 if (ke
&& KENTRY_RNF_ROOT(ke
))
789 ke
= (struct pfr_kentry
*)rn_match((char *)&sa
, head
);
790 if (ke
&& KENTRY_RNF_ROOT(ke
))
792 if (exact
&& ke
&& KENTRY_NETWORK(ke
))
799 pfr_create_kentry(struct pfr_addr
*ad
, int intr
)
801 struct pfr_kentry
*ke
;
804 ke
= kmalloc(sizeof(struct pfr_kentry
), M_PFRKENTRYPL2
, M_NOWAIT
|M_ZERO
);
806 ke
= kmalloc(sizeof(struct pfr_kentry
), M_PFRKENTRYPL
, M_NOWAIT
|M_ZERO
|M_NULLOK
);
810 if (ad
->pfra_af
== AF_INET
)
811 FILLIN_SIN(ke
->pfrke_sa
.sin
, ad
->pfra_ip4addr
);
812 else if (ad
->pfra_af
== AF_INET6
)
813 FILLIN_SIN6(ke
->pfrke_sa
.sin6
, ad
->pfra_ip6addr
);
814 ke
->pfrke_af
= ad
->pfra_af
;
815 ke
->pfrke_net
= ad
->pfra_net
;
816 ke
->pfrke_not
= ad
->pfra_not
;
817 ke
->pfrke_intrpool
= intr
;
822 pfr_destroy_kentries(struct pfr_kentryworkq
*workq
)
824 struct pfr_kentry
*p
, *q
;
826 for (p
= SLIST_FIRST(workq
); p
!= NULL
; p
= q
) {
827 q
= SLIST_NEXT(p
, pfrke_workq
);
828 pfr_destroy_kentry(p
);
833 pfr_destroy_kentry(struct pfr_kentry
*ke
)
835 if (ke
->pfrke_counters
)
836 kfree(ke
->pfrke_counters
, M_PFRKCOUNTERSPL
);
837 if (ke
->pfrke_intrpool
)
838 kfree(ke
, M_PFRKENTRYPL2
);
840 kfree(ke
, M_PFRKENTRYPL
);
844 pfr_insert_kentries(struct pfr_ktable
*kt
,
845 struct pfr_kentryworkq
*workq
, long tzero
)
847 struct pfr_kentry
*p
;
850 SLIST_FOREACH(p
, workq
, pfrke_workq
) {
851 rv
= pfr_route_kentry(kt
, p
);
853 kprintf("pfr_insert_kentries: cannot route entry "
857 p
->pfrke_tzero
= tzero
;
864 pfr_insert_kentry(struct pfr_ktable
*kt
, struct pfr_addr
*ad
, long tzero
)
866 struct pfr_kentry
*p
;
869 p
= pfr_lookup_addr(kt
, ad
, 1);
872 p
= pfr_create_kentry(ad
, 1);
876 rv
= pfr_route_kentry(kt
, p
);
880 p
->pfrke_tzero
= tzero
;
887 pfr_remove_kentries(struct pfr_ktable
*kt
,
888 struct pfr_kentryworkq
*workq
)
890 struct pfr_kentry
*p
;
893 SLIST_FOREACH(p
, workq
, pfrke_workq
) {
894 pfr_unroute_kentry(kt
, p
);
898 pfr_destroy_kentries(workq
);
902 pfr_clean_node_mask(struct pfr_ktable
*kt
,
903 struct pfr_kentryworkq
*workq
)
905 struct pfr_kentry
*p
;
907 SLIST_FOREACH(p
, workq
, pfrke_workq
)
908 pfr_unroute_kentry(kt
, p
);
912 pfr_clstats_kentries(struct pfr_kentryworkq
*workq
, long tzero
, int negchange
)
914 struct pfr_kentry
*p
;
916 SLIST_FOREACH(p
, workq
, pfrke_workq
) {
919 p
->pfrke_not
= !p
->pfrke_not
;
920 if (p
->pfrke_counters
) {
921 kfree(p
->pfrke_counters
, M_PFRKCOUNTERSPL
);
922 p
->pfrke_counters
= NULL
;
925 p
->pfrke_tzero
= tzero
;
930 pfr_reset_feedback(struct pfr_addr
*addr
, int size
, int flags
)
935 for (i
= 0; i
< size
; i
++) {
936 if (COPYIN(addr
+i
, &ad
, sizeof(ad
), flags
))
938 ad
.pfra_fback
= PFR_FB_NONE
;
939 if (COPYOUT(&ad
, addr
+i
, sizeof(ad
), flags
))
945 pfr_prepare_network(union sockaddr_union
*sa
, int af
, int net
)
949 bzero(sa
, sizeof(*sa
));
951 sa
->sin
.sin_len
= sizeof(sa
->sin
);
952 sa
->sin
.sin_family
= AF_INET
;
953 sa
->sin
.sin_addr
.s_addr
= net
? htonl(-1 << (32-net
)) : 0;
954 } else if (af
== AF_INET6
) {
955 sa
->sin6
.sin6_len
= sizeof(sa
->sin6
);
956 sa
->sin6
.sin6_family
= AF_INET6
;
957 for (i
= 0; i
< 4; i
++) {
959 sa
->sin6
.sin6_addr
.s6_addr32
[i
] =
960 net
? htonl(-1 << (32-net
)) : 0;
963 sa
->sin6
.sin6_addr
.s6_addr32
[i
] = 0xFFFFFFFF;
970 pfr_route_kentry(struct pfr_ktable
*kt
, struct pfr_kentry
*ke
)
972 union sockaddr_union mask
;
973 struct radix_node
*rn
;
974 struct radix_node_head
*head
= NULL
;
976 bzero(ke
->pfrke_node
, sizeof(ke
->pfrke_node
));
977 if (ke
->pfrke_af
== AF_INET
)
978 head
= kt
->pfrkt_ip4
;
979 else if (ke
->pfrke_af
== AF_INET6
)
980 head
= kt
->pfrkt_ip6
;
983 if (KENTRY_NETWORK(ke
)) {
984 pfr_prepare_network(&mask
, ke
->pfrke_af
, ke
->pfrke_net
);
985 rn
= rn_addroute((char *)&ke
->pfrke_sa
, (char *)&mask
, head
,
988 rn
= rn_addroute((char *)&ke
->pfrke_sa
, NULL
, head
,
992 return (rn
== NULL
? -1 : 0);
996 pfr_unroute_kentry(struct pfr_ktable
*kt
, struct pfr_kentry
*ke
)
998 union sockaddr_union mask
;
999 struct radix_node
*rn
;
1000 struct radix_node_head
*head
= NULL
;
1002 if (ke
->pfrke_af
== AF_INET
)
1003 head
= kt
->pfrkt_ip4
;
1004 else if (ke
->pfrke_af
== AF_INET6
)
1005 head
= kt
->pfrkt_ip6
;
1008 if (KENTRY_NETWORK(ke
)) {
1009 pfr_prepare_network(&mask
, ke
->pfrke_af
, ke
->pfrke_net
);
1010 rn
= rn_delete((char *)&ke
->pfrke_sa
, (char *)&mask
, head
);
1012 rn
= rn_delete((char *)&ke
->pfrke_sa
, NULL
, head
);
1016 kprintf("pfr_unroute_kentry: delete failed.\n");
1023 pfr_copyout_addr(struct pfr_addr
*ad
, struct pfr_kentry
*ke
)
1025 bzero(ad
, sizeof(*ad
));
1028 ad
->pfra_af
= ke
->pfrke_af
;
1029 ad
->pfra_net
= ke
->pfrke_net
;
1030 ad
->pfra_not
= ke
->pfrke_not
;
1031 if (ad
->pfra_af
== AF_INET
)
1032 ad
->pfra_ip4addr
= ke
->pfrke_sa
.sin
.sin_addr
;
1033 else if (ad
->pfra_af
== AF_INET6
)
1034 ad
->pfra_ip6addr
= ke
->pfrke_sa
.sin6
.sin6_addr
;
1038 pfr_walktree(struct radix_node
*rn
, void *arg
)
1040 struct pfr_kentry
*ke
= (struct pfr_kentry
*)rn
;
1041 struct pfr_walktree
*w
= arg
;
1042 union sockaddr_union pfr_mask
;
1043 int flags
= w
->pfrw_flags
;
1045 switch (w
->pfrw_op
) {
1054 SLIST_INSERT_HEAD(w
->pfrw_workq
, ke
, pfrke_workq
);
1057 case PFRW_GET_ADDRS
:
1058 if (w
->pfrw_free
-- > 0) {
1061 pfr_copyout_addr(&ad
, ke
);
1062 if (copyout(&ad
, w
->pfrw_addr
, sizeof(ad
)))
1067 case PFRW_GET_ASTATS
:
1068 if (w
->pfrw_free
-- > 0) {
1069 struct pfr_astats as
;
1071 pfr_copyout_addr(&as
.pfras_a
, ke
);
1074 if (ke
->pfrke_counters
) {
1075 bcopy(ke
->pfrke_counters
->pfrkc_packets
,
1076 as
.pfras_packets
, sizeof(as
.pfras_packets
));
1077 bcopy(ke
->pfrke_counters
->pfrkc_bytes
,
1078 as
.pfras_bytes
, sizeof(as
.pfras_bytes
));
1080 bzero(as
.pfras_packets
, sizeof(as
.pfras_packets
));
1081 bzero(as
.pfras_bytes
, sizeof(as
.pfras_bytes
));
1082 as
.pfras_a
.pfra_fback
= PFR_FB_NOCOUNT
;
1085 as
.pfras_tzero
= ke
->pfrke_tzero
;
1087 if (COPYOUT(&as
, w
->pfrw_astats
, sizeof(as
), flags
))
1094 break; /* negative entries are ignored */
1095 if (!w
->pfrw_cnt
--) {
1096 w
->pfrw_kentry
= ke
;
1097 return (1); /* finish search */
1100 case PFRW_DYNADDR_UPDATE
:
1101 if (ke
->pfrke_af
== AF_INET
) {
1102 if (w
->pfrw_dyn
->pfid_acnt4
++ > 0)
1104 pfr_prepare_network(&pfr_mask
, AF_INET
, ke
->pfrke_net
);
1105 w
->pfrw_dyn
->pfid_addr4
= *SUNION2PF(
1106 &ke
->pfrke_sa
, AF_INET
);
1107 w
->pfrw_dyn
->pfid_mask4
= *SUNION2PF(
1108 &pfr_mask
, AF_INET
);
1109 } else if (ke
->pfrke_af
== AF_INET6
){
1110 if (w
->pfrw_dyn
->pfid_acnt6
++ > 0)
1112 pfr_prepare_network(&pfr_mask
, AF_INET6
, ke
->pfrke_net
);
1113 w
->pfrw_dyn
->pfid_addr6
= *SUNION2PF(
1114 &ke
->pfrke_sa
, AF_INET6
);
1115 w
->pfrw_dyn
->pfid_mask6
= *SUNION2PF(
1116 &pfr_mask
, AF_INET6
);
1124 pfr_clr_tables(struct pfr_table
*filter
, int *ndel
, int flags
)
1126 struct pfr_ktableworkq workq
;
1127 struct pfr_ktable
*p
;
1130 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
1132 if (pfr_fix_anchor(filter
->pfrt_anchor
))
1134 if (pfr_table_count(filter
, flags
) < 0)
1138 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1139 if (pfr_skip_table(filter
, p
, flags
))
1141 if (!strcmp(p
->pfrkt_anchor
, PF_RESERVED_ANCHOR
))
1143 if (!(p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
1145 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_ACTIVE
;
1146 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1149 if (!(flags
& PFR_FLAG_DUMMY
)) {
1150 if (flags
& PFR_FLAG_ATOMIC
)
1152 pfr_setflags_ktables(&workq
);
1153 if (flags
& PFR_FLAG_ATOMIC
)
1162 pfr_add_tables(struct pfr_table
*tbl
, int size
, int *nadd
, int flags
)
1164 struct pfr_ktableworkq addq
, changeq
;
1165 struct pfr_ktable
*p
, *q
, *r
, key
;
1166 int i
, rv
, xadd
= 0;
1167 long tzero
= time_second
;
1169 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
1171 SLIST_INIT(&changeq
);
1172 for (i
= 0; i
< size
; i
++) {
1173 if (COPYIN(tbl
+i
, &key
.pfrkt_t
, sizeof(key
.pfrkt_t
), flags
))
1175 if (pfr_validate_table(&key
.pfrkt_t
, PFR_TFLAG_USRMASK
,
1176 flags
& PFR_FLAG_USERIOCTL
))
1178 key
.pfrkt_flags
|= PFR_TFLAG_ACTIVE
;
1179 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1181 p
= pfr_create_ktable(&key
.pfrkt_t
, tzero
, 1);
1184 SLIST_FOREACH(q
, &addq
, pfrkt_workq
) {
1185 if (!pfr_ktable_compare(p
, q
))
1188 SLIST_INSERT_HEAD(&addq
, p
, pfrkt_workq
);
1190 if (!key
.pfrkt_anchor
[0])
1193 /* find or create root table */
1194 bzero(key
.pfrkt_anchor
, sizeof(key
.pfrkt_anchor
));
1195 r
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1200 SLIST_FOREACH(q
, &addq
, pfrkt_workq
) {
1201 if (!pfr_ktable_compare(&key
, q
)) {
1206 key
.pfrkt_flags
= 0;
1207 r
= pfr_create_ktable(&key
.pfrkt_t
, 0, 1);
1210 SLIST_INSERT_HEAD(&addq
, r
, pfrkt_workq
);
1212 } else if (!(p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
1213 SLIST_FOREACH(q
, &changeq
, pfrkt_workq
)
1214 if (!pfr_ktable_compare(&key
, q
))
1216 p
->pfrkt_nflags
= (p
->pfrkt_flags
&
1217 ~PFR_TFLAG_USRMASK
) | key
.pfrkt_flags
;
1218 SLIST_INSERT_HEAD(&changeq
, p
, pfrkt_workq
);
1224 if (!(flags
& PFR_FLAG_DUMMY
)) {
1225 if (flags
& PFR_FLAG_ATOMIC
)
1227 pfr_insert_ktables(&addq
);
1228 pfr_setflags_ktables(&changeq
);
1229 if (flags
& PFR_FLAG_ATOMIC
)
1232 pfr_destroy_ktables(&addq
, 0);
1237 pfr_destroy_ktables(&addq
, 0);
1242 pfr_del_tables(struct pfr_table
*tbl
, int size
, int *ndel
, int flags
)
1244 struct pfr_ktableworkq workq
;
1245 struct pfr_ktable
*p
, *q
, key
;
1248 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
1250 for (i
= 0; i
< size
; i
++) {
1251 if (COPYIN(tbl
+i
, &key
.pfrkt_t
, sizeof(key
.pfrkt_t
), flags
))
1253 if (pfr_validate_table(&key
.pfrkt_t
, 0,
1254 flags
& PFR_FLAG_USERIOCTL
))
1256 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1257 if (p
!= NULL
&& (p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
1258 SLIST_FOREACH(q
, &workq
, pfrkt_workq
)
1259 if (!pfr_ktable_compare(p
, q
))
1261 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_ACTIVE
;
1262 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1269 if (!(flags
& PFR_FLAG_DUMMY
)) {
1270 if (flags
& PFR_FLAG_ATOMIC
)
1272 pfr_setflags_ktables(&workq
);
1273 if (flags
& PFR_FLAG_ATOMIC
)
1282 pfr_get_tables(struct pfr_table
*filter
, struct pfr_table
*tbl
, int *size
,
1285 struct pfr_ktable
*p
;
1288 ACCEPT_FLAGS(flags
, PFR_FLAG_ALLRSETS
);
1289 if (pfr_fix_anchor(filter
->pfrt_anchor
))
1291 n
= nn
= pfr_table_count(filter
, flags
);
1298 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1299 if (pfr_skip_table(filter
, p
, flags
))
1303 if (COPYOUT(&p
->pfrkt_t
, tbl
++, sizeof(*tbl
), flags
))
1307 kprintf("pfr_get_tables: corruption detected (%d).\n", n
);
1315 pfr_get_tstats(struct pfr_table
*filter
, struct pfr_tstats
*tbl
, int *size
,
1318 struct pfr_ktable
*p
;
1319 struct pfr_ktableworkq workq
;
1321 long tzero
= time_second
;
1323 /* XXX PFR_FLAG_CLSTATS disabled */
1324 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_ALLRSETS
);
1325 if (pfr_fix_anchor(filter
->pfrt_anchor
))
1327 n
= nn
= pfr_table_count(filter
, flags
);
1335 if (flags
& PFR_FLAG_ATOMIC
)
1337 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1338 if (pfr_skip_table(filter
, p
, flags
))
1342 if (!(flags
& PFR_FLAG_ATOMIC
))
1344 if (COPYOUT(&p
->pfrkt_ts
, tbl
++, sizeof(*tbl
), flags
)) {
1348 if (!(flags
& PFR_FLAG_ATOMIC
))
1350 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1352 if (flags
& PFR_FLAG_CLSTATS
)
1353 pfr_clstats_ktables(&workq
, tzero
,
1354 flags
& PFR_FLAG_ADDRSTOO
);
1355 if (flags
& PFR_FLAG_ATOMIC
)
1358 kprintf("pfr_get_tstats: corruption detected (%d).\n", n
);
1366 pfr_clr_tstats(struct pfr_table
*tbl
, int size
, int *nzero
, int flags
)
1368 struct pfr_ktableworkq workq
;
1369 struct pfr_ktable
*p
, key
;
1371 long tzero
= time_second
;
1373 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
1376 for (i
= 0; i
< size
; i
++) {
1377 if (COPYIN(tbl
+i
, &key
.pfrkt_t
, sizeof(key
.pfrkt_t
), flags
))
1379 if (pfr_validate_table(&key
.pfrkt_t
, 0, 0))
1381 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1383 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1387 if (!(flags
& PFR_FLAG_DUMMY
)) {
1388 if (flags
& PFR_FLAG_ATOMIC
)
1390 pfr_clstats_ktables(&workq
, tzero
, flags
& PFR_FLAG_ADDRSTOO
);
1391 if (flags
& PFR_FLAG_ATOMIC
)
1400 pfr_set_tflags(struct pfr_table
*tbl
, int size
, int setflag
, int clrflag
,
1401 int *nchange
, int *ndel
, int flags
)
1403 struct pfr_ktableworkq workq
;
1404 struct pfr_ktable
*p
, *q
, key
;
1405 int i
, xchange
= 0, xdel
= 0;
1407 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
1408 if ((setflag
& ~PFR_TFLAG_USRMASK
) ||
1409 (clrflag
& ~PFR_TFLAG_USRMASK
) ||
1410 (setflag
& clrflag
))
1413 for (i
= 0; i
< size
; i
++) {
1414 if (COPYIN(tbl
+i
, &key
.pfrkt_t
, sizeof(key
.pfrkt_t
), flags
))
1416 if (pfr_validate_table(&key
.pfrkt_t
, 0,
1417 flags
& PFR_FLAG_USERIOCTL
))
1419 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1420 if (p
!= NULL
&& (p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
1421 p
->pfrkt_nflags
= (p
->pfrkt_flags
| setflag
) &
1423 if (p
->pfrkt_nflags
== p
->pfrkt_flags
)
1425 SLIST_FOREACH(q
, &workq
, pfrkt_workq
)
1426 if (!pfr_ktable_compare(p
, q
))
1428 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1429 if ((p
->pfrkt_flags
& PFR_TFLAG_PERSIST
) &&
1430 (clrflag
& PFR_TFLAG_PERSIST
) &&
1431 !(p
->pfrkt_flags
& PFR_TFLAG_REFERENCED
))
1439 if (!(flags
& PFR_FLAG_DUMMY
)) {
1440 if (flags
& PFR_FLAG_ATOMIC
)
1442 pfr_setflags_ktables(&workq
);
1443 if (flags
& PFR_FLAG_ATOMIC
)
1446 if (nchange
!= NULL
)
1454 pfr_ina_begin(struct pfr_table
*trs
, u_int32_t
*ticket
, int *ndel
, int flags
)
1456 struct pfr_ktableworkq workq
;
1457 struct pfr_ktable
*p
;
1458 struct pf_ruleset
*rs
;
1461 ACCEPT_FLAGS(flags
, PFR_FLAG_DUMMY
);
1462 rs
= pf_find_or_create_ruleset(trs
->pfrt_anchor
);
1466 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1467 if (!(p
->pfrkt_flags
& PFR_TFLAG_INACTIVE
) ||
1468 pfr_skip_table(trs
, p
, 0))
1470 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_INACTIVE
;
1471 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1474 if (!(flags
& PFR_FLAG_DUMMY
)) {
1475 pfr_setflags_ktables(&workq
);
1477 *ticket
= ++rs
->tticket
;
1480 pf_remove_if_empty_ruleset(rs
);
1487 pfr_ina_define(struct pfr_table
*tbl
, struct pfr_addr
*addr
, int size
,
1488 int *nadd
, int *naddr
, u_int32_t ticket
, int flags
)
1490 struct pfr_ktableworkq tableq
;
1491 struct pfr_kentryworkq addrq
;
1492 struct pfr_ktable
*kt
, *rt
, *shadow
, key
;
1493 struct pfr_kentry
*p
;
1495 struct pf_ruleset
*rs
;
1496 int i
, rv
, xadd
= 0, xaddr
= 0;
1498 ACCEPT_FLAGS(flags
, PFR_FLAG_DUMMY
| PFR_FLAG_ADDRSTOO
);
1499 if (size
&& !(flags
& PFR_FLAG_ADDRSTOO
))
1501 if (pfr_validate_table(tbl
, PFR_TFLAG_USRMASK
,
1502 flags
& PFR_FLAG_USERIOCTL
))
1504 rs
= pf_find_ruleset(tbl
->pfrt_anchor
);
1505 if (rs
== NULL
|| !rs
->topen
|| ticket
!= rs
->tticket
)
1507 tbl
->pfrt_flags
|= PFR_TFLAG_INACTIVE
;
1508 SLIST_INIT(&tableq
);
1509 kt
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, (struct pfr_ktable
*)tbl
);
1511 kt
= pfr_create_ktable(tbl
, 0, 1);
1514 SLIST_INSERT_HEAD(&tableq
, kt
, pfrkt_workq
);
1516 if (!tbl
->pfrt_anchor
[0])
1519 /* find or create root table */
1520 bzero(&key
, sizeof(key
));
1521 strlcpy(key
.pfrkt_name
, tbl
->pfrt_name
, sizeof(key
.pfrkt_name
));
1522 rt
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1524 kt
->pfrkt_root
= rt
;
1527 rt
= pfr_create_ktable(&key
.pfrkt_t
, 0, 1);
1529 pfr_destroy_ktables(&tableq
, 0);
1532 SLIST_INSERT_HEAD(&tableq
, rt
, pfrkt_workq
);
1533 kt
->pfrkt_root
= rt
;
1534 } else if (!(kt
->pfrkt_flags
& PFR_TFLAG_INACTIVE
))
1537 shadow
= pfr_create_ktable(tbl
, 0, 0);
1538 if (shadow
== NULL
) {
1539 pfr_destroy_ktables(&tableq
, 0);
1543 for (i
= 0; i
< size
; i
++) {
1544 if (COPYIN(addr
+i
, &ad
, sizeof(ad
), flags
))
1546 if (pfr_validate_addr(&ad
))
1548 if (pfr_lookup_addr(shadow
, &ad
, 1) != NULL
)
1550 p
= pfr_create_kentry(&ad
, 0);
1553 if (pfr_route_kentry(shadow
, p
)) {
1554 pfr_destroy_kentry(p
);
1557 SLIST_INSERT_HEAD(&addrq
, p
, pfrke_workq
);
1560 if (!(flags
& PFR_FLAG_DUMMY
)) {
1561 if (kt
->pfrkt_shadow
!= NULL
)
1562 pfr_destroy_ktable(kt
->pfrkt_shadow
, 1);
1563 kt
->pfrkt_flags
|= PFR_TFLAG_INACTIVE
;
1564 pfr_insert_ktables(&tableq
);
1565 shadow
->pfrkt_cnt
= (flags
& PFR_FLAG_ADDRSTOO
) ?
1566 xaddr
: NO_ADDRESSES
;
1567 kt
->pfrkt_shadow
= shadow
;
1569 pfr_clean_node_mask(shadow
, &addrq
);
1570 pfr_destroy_ktable(shadow
, 0);
1571 pfr_destroy_ktables(&tableq
, 0);
1572 pfr_destroy_kentries(&addrq
);
1580 pfr_destroy_ktable(shadow
, 0);
1581 pfr_destroy_ktables(&tableq
, 0);
1582 pfr_destroy_kentries(&addrq
);
1587 pfr_ina_rollback(struct pfr_table
*trs
, u_int32_t ticket
, int *ndel
, int flags
)
1589 struct pfr_ktableworkq workq
;
1590 struct pfr_ktable
*p
;
1591 struct pf_ruleset
*rs
;
1594 ACCEPT_FLAGS(flags
, PFR_FLAG_DUMMY
);
1595 rs
= pf_find_ruleset(trs
->pfrt_anchor
);
1596 if (rs
== NULL
|| !rs
->topen
|| ticket
!= rs
->tticket
)
1599 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1600 if (!(p
->pfrkt_flags
& PFR_TFLAG_INACTIVE
) ||
1601 pfr_skip_table(trs
, p
, 0))
1603 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_INACTIVE
;
1604 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1607 if (!(flags
& PFR_FLAG_DUMMY
)) {
1608 pfr_setflags_ktables(&workq
);
1610 pf_remove_if_empty_ruleset(rs
);
1618 pfr_ina_commit(struct pfr_table
*trs
, u_int32_t ticket
, int *nadd
,
1619 int *nchange
, int flags
)
1621 struct pfr_ktable
*p
, *q
;
1622 struct pfr_ktableworkq workq
;
1623 struct pf_ruleset
*rs
;
1624 int xadd
= 0, xchange
= 0;
1625 long tzero
= time_second
;
1627 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
1628 rs
= pf_find_ruleset(trs
->pfrt_anchor
);
1629 if (rs
== NULL
|| !rs
->topen
|| ticket
!= rs
->tticket
)
1633 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1634 if (!(p
->pfrkt_flags
& PFR_TFLAG_INACTIVE
) ||
1635 pfr_skip_table(trs
, p
, 0))
1637 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1638 if (p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)
1644 if (!(flags
& PFR_FLAG_DUMMY
)) {
1645 if (flags
& PFR_FLAG_ATOMIC
)
1647 for (p
= SLIST_FIRST(&workq
); p
!= NULL
; p
= q
) {
1648 q
= SLIST_NEXT(p
, pfrkt_workq
);
1649 pfr_commit_ktable(p
, tzero
);
1651 if (flags
& PFR_FLAG_ATOMIC
)
1654 pf_remove_if_empty_ruleset(rs
);
1658 if (nchange
!= NULL
)
1665 pfr_commit_ktable(struct pfr_ktable
*kt
, long tzero
)
1667 struct pfr_ktable
*shadow
= kt
->pfrkt_shadow
;
1670 if (shadow
->pfrkt_cnt
== NO_ADDRESSES
) {
1671 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
1672 pfr_clstats_ktable(kt
, tzero
, 1);
1673 } else if (kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) {
1674 /* kt might contain addresses */
1675 struct pfr_kentryworkq addrq
, addq
, changeq
, delq
, garbageq
;
1676 struct pfr_kentry
*p
, *q
, *next
;
1679 pfr_enqueue_addrs(shadow
, &addrq
, NULL
, 0);
1682 SLIST_INIT(&changeq
);
1684 SLIST_INIT(&garbageq
);
1685 pfr_clean_node_mask(shadow
, &addrq
);
1686 for (p
= SLIST_FIRST(&addrq
); p
!= NULL
; p
= next
) {
1687 next
= SLIST_NEXT(p
, pfrke_workq
); /* XXX */
1688 pfr_copyout_addr(&ad
, p
);
1689 q
= pfr_lookup_addr(kt
, &ad
, 1);
1691 if (q
->pfrke_not
!= p
->pfrke_not
)
1692 SLIST_INSERT_HEAD(&changeq
, q
,
1695 SLIST_INSERT_HEAD(&garbageq
, p
, pfrke_workq
);
1697 p
->pfrke_tzero
= tzero
;
1698 SLIST_INSERT_HEAD(&addq
, p
, pfrke_workq
);
1701 pfr_enqueue_addrs(kt
, &delq
, NULL
, ENQUEUE_UNMARKED_ONLY
);
1702 pfr_insert_kentries(kt
, &addq
, tzero
);
1703 pfr_remove_kentries(kt
, &delq
);
1704 pfr_clstats_kentries(&changeq
, tzero
, INVERT_NEG_FLAG
);
1705 pfr_destroy_kentries(&garbageq
);
1707 /* kt cannot contain addresses */
1708 SWAP(struct radix_node_head
*, kt
->pfrkt_ip4
,
1710 SWAP(struct radix_node_head
*, kt
->pfrkt_ip6
,
1712 SWAP(int, kt
->pfrkt_cnt
, shadow
->pfrkt_cnt
);
1713 pfr_clstats_ktable(kt
, tzero
, 1);
1715 nflags
= ((shadow
->pfrkt_flags
& PFR_TFLAG_USRMASK
) |
1716 (kt
->pfrkt_flags
& PFR_TFLAG_SETMASK
) | PFR_TFLAG_ACTIVE
)
1717 & ~PFR_TFLAG_INACTIVE
;
1718 pfr_destroy_ktable(shadow
, 0);
1719 kt
->pfrkt_shadow
= NULL
;
1720 pfr_setflags_ktable(kt
, nflags
);
1724 pfr_validate_table(struct pfr_table
*tbl
, int allowedflags
, int no_reserved
)
1728 if (!tbl
->pfrt_name
[0])
1730 if (no_reserved
&& !strcmp(tbl
->pfrt_anchor
, PF_RESERVED_ANCHOR
))
1732 if (tbl
->pfrt_name
[PF_TABLE_NAME_SIZE
-1])
1734 for (i
= strlen(tbl
->pfrt_name
); i
< PF_TABLE_NAME_SIZE
; i
++)
1735 if (tbl
->pfrt_name
[i
])
1737 if (pfr_fix_anchor(tbl
->pfrt_anchor
))
1739 if (tbl
->pfrt_flags
& ~allowedflags
)
1745 * Rewrite anchors referenced by tables to remove slashes
1746 * and check for validity.
1749 pfr_fix_anchor(char *anchor
)
1751 size_t siz
= MAXPATHLEN
;
1754 if (anchor
[0] == '/') {
1760 while (*++path
== '/')
1762 bcopy(path
, anchor
, siz
- off
);
1763 memset(anchor
+ siz
- off
, 0, off
);
1765 if (anchor
[siz
- 1])
1767 for (i
= strlen(anchor
); i
< siz
; i
++)
1774 pfr_table_count(struct pfr_table
*filter
, int flags
)
1776 struct pf_ruleset
*rs
;
1778 if (flags
& PFR_FLAG_ALLRSETS
)
1779 return (pfr_ktable_cnt
);
1780 if (filter
->pfrt_anchor
[0]) {
1781 rs
= pf_find_ruleset(filter
->pfrt_anchor
);
1782 return ((rs
!= NULL
) ? rs
->tables
: -1);
1784 return (pf_main_ruleset
.tables
);
1788 pfr_skip_table(struct pfr_table
*filter
, struct pfr_ktable
*kt
, int flags
)
1790 if (flags
& PFR_FLAG_ALLRSETS
)
1792 if (strcmp(filter
->pfrt_anchor
, kt
->pfrkt_anchor
))
1798 pfr_insert_ktables(struct pfr_ktableworkq
*workq
)
1800 struct pfr_ktable
*p
;
1802 SLIST_FOREACH(p
, workq
, pfrkt_workq
)
1803 pfr_insert_ktable(p
);
1807 pfr_insert_ktable(struct pfr_ktable
*kt
)
1809 RB_INSERT(pfr_ktablehead
, &pfr_ktables
, kt
);
1811 if (kt
->pfrkt_root
!= NULL
)
1812 if (!kt
->pfrkt_root
->pfrkt_refcnt
[PFR_REFCNT_ANCHOR
]++)
1813 pfr_setflags_ktable(kt
->pfrkt_root
,
1814 kt
->pfrkt_root
->pfrkt_flags
|PFR_TFLAG_REFDANCHOR
);
1818 pfr_setflags_ktables(struct pfr_ktableworkq
*workq
)
1820 struct pfr_ktable
*p
, *q
;
1822 for (p
= SLIST_FIRST(workq
); p
; p
= q
) {
1823 q
= SLIST_NEXT(p
, pfrkt_workq
);
1824 pfr_setflags_ktable(p
, p
->pfrkt_nflags
);
1829 pfr_setflags_ktable(struct pfr_ktable
*kt
, int newf
)
1831 struct pfr_kentryworkq addrq
;
1833 if (!(newf
& PFR_TFLAG_REFERENCED
) &&
1834 !(newf
& PFR_TFLAG_PERSIST
))
1835 newf
&= ~PFR_TFLAG_ACTIVE
;
1836 if (!(newf
& PFR_TFLAG_ACTIVE
))
1837 newf
&= ~PFR_TFLAG_USRMASK
;
1838 if (!(newf
& PFR_TFLAG_SETMASK
)) {
1839 RB_REMOVE(pfr_ktablehead
, &pfr_ktables
, kt
);
1840 if (kt
->pfrkt_root
!= NULL
)
1841 if (!--kt
->pfrkt_root
->pfrkt_refcnt
[PFR_REFCNT_ANCHOR
])
1842 pfr_setflags_ktable(kt
->pfrkt_root
,
1843 kt
->pfrkt_root
->pfrkt_flags
&
1844 ~PFR_TFLAG_REFDANCHOR
);
1845 pfr_destroy_ktable(kt
, 1);
1849 if (!(newf
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_cnt
) {
1850 pfr_enqueue_addrs(kt
, &addrq
, NULL
, 0);
1851 pfr_remove_kentries(kt
, &addrq
);
1853 if (!(newf
& PFR_TFLAG_INACTIVE
) && kt
->pfrkt_shadow
!= NULL
) {
1854 pfr_destroy_ktable(kt
->pfrkt_shadow
, 1);
1855 kt
->pfrkt_shadow
= NULL
;
1857 kt
->pfrkt_flags
= newf
;
1861 pfr_clstats_ktables(struct pfr_ktableworkq
*workq
, long tzero
, int recurse
)
1863 struct pfr_ktable
*p
;
1865 SLIST_FOREACH(p
, workq
, pfrkt_workq
)
1866 pfr_clstats_ktable(p
, tzero
, recurse
);
1870 pfr_clstats_ktable(struct pfr_ktable
*kt
, long tzero
, int recurse
)
1872 struct pfr_kentryworkq addrq
;
1875 pfr_enqueue_addrs(kt
, &addrq
, NULL
, 0);
1876 pfr_clstats_kentries(&addrq
, tzero
, 0);
1879 bzero(kt
->pfrkt_packets
, sizeof(kt
->pfrkt_packets
));
1880 bzero(kt
->pfrkt_bytes
, sizeof(kt
->pfrkt_bytes
));
1881 kt
->pfrkt_match
= kt
->pfrkt_nomatch
= 0;
1883 kt
->pfrkt_tzero
= tzero
;
1887 pfr_create_ktable(struct pfr_table
*tbl
, long tzero
, int attachruleset
)
1889 struct pfr_ktable
*kt
;
1890 struct pf_ruleset
*rs
;
1892 kt
= kmalloc(sizeof(struct pfr_ktable
), M_PFRKTABLEPL
, M_NOWAIT
|M_ZERO
|M_NULLOK
);
1897 if (attachruleset
) {
1898 rs
= pf_find_or_create_ruleset(tbl
->pfrt_anchor
);
1900 pfr_destroy_ktable(kt
, 0);
1907 KKASSERT(pf_maskhead
!= NULL
);
1908 if (!rn_inithead((void **)&kt
->pfrkt_ip4
, pf_maskhead
,
1909 offsetof(struct sockaddr_in
, sin_addr
) * 8) ||
1910 !rn_inithead((void **)&kt
->pfrkt_ip6
, pf_maskhead
,
1911 offsetof(struct sockaddr_in6
, sin6_addr
) * 8)) {
1912 pfr_destroy_ktable(kt
, 0);
1915 kt
->pfrkt_tzero
= tzero
;
1921 pfr_destroy_ktables(struct pfr_ktableworkq
*workq
, int flushaddr
)
1923 struct pfr_ktable
*p
, *q
;
1925 for (p
= SLIST_FIRST(workq
); p
; p
= q
) {
1926 q
= SLIST_NEXT(p
, pfrkt_workq
);
1927 pfr_destroy_ktable(p
, flushaddr
);
1932 pfr_destroy_ktable(struct pfr_ktable
*kt
, int flushaddr
)
1934 struct pfr_kentryworkq addrq
;
1937 pfr_enqueue_addrs(kt
, &addrq
, NULL
, 0);
1938 pfr_clean_node_mask(kt
, &addrq
);
1939 pfr_destroy_kentries(&addrq
);
1941 if (kt
->pfrkt_ip4
!= NULL
)
1942 kfree((caddr_t
)kt
->pfrkt_ip4
, M_RTABLE
);
1944 if (kt
->pfrkt_ip6
!= NULL
)
1945 kfree((caddr_t
)kt
->pfrkt_ip6
, M_RTABLE
);
1946 if (kt
->pfrkt_shadow
!= NULL
)
1947 pfr_destroy_ktable(kt
->pfrkt_shadow
, flushaddr
);
1948 if (kt
->pfrkt_rs
!= NULL
) {
1949 kt
->pfrkt_rs
->tables
--;
1950 pf_remove_if_empty_ruleset(kt
->pfrkt_rs
);
1952 kfree(kt
, M_PFRKTABLEPL
);
1956 pfr_ktable_compare(struct pfr_ktable
*p
, struct pfr_ktable
*q
)
1960 if ((d
= strncmp(p
->pfrkt_name
, q
->pfrkt_name
, PF_TABLE_NAME_SIZE
)))
1962 return (strcmp(p
->pfrkt_anchor
, q
->pfrkt_anchor
));
1966 pfr_lookup_table(struct pfr_table
*tbl
)
1968 /* struct pfr_ktable start like a struct pfr_table */
1969 return (RB_FIND(pfr_ktablehead
, &pfr_ktables
,
1970 (struct pfr_ktable
*)tbl
));
1974 pfr_match_addr(struct pfr_ktable
*kt
, struct pf_addr
*a
, sa_family_t af
)
1976 struct pfr_kentry
*ke
= NULL
;
1978 struct sockaddr_in pfr_sin
;
1980 struct sockaddr_in6 pfr_sin6
;
1983 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
)
1984 kt
= kt
->pfrkt_root
;
1985 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
1991 bzero(&pfr_sin
, sizeof(pfr_sin
));
1992 pfr_sin
.sin_len
= sizeof(pfr_sin
);
1993 pfr_sin
.sin_family
= AF_INET
;
1994 pfr_sin
.sin_addr
.s_addr
= a
->addr32
[0];
1995 ke
= (struct pfr_kentry
*)rn_match((char *)&pfr_sin
,
1997 if (ke
&& KENTRY_RNF_ROOT(ke
))
2003 bzero(&pfr_sin6
, sizeof(pfr_sin6
));
2004 pfr_sin6
.sin6_len
= sizeof(pfr_sin6
);
2005 pfr_sin6
.sin6_family
= AF_INET6
;
2006 bcopy(a
, &pfr_sin6
.sin6_addr
, sizeof(pfr_sin6
.sin6_addr
));
2007 ke
= (struct pfr_kentry
*)rn_match((char *)&pfr_sin6
,
2009 if (ke
&& KENTRY_RNF_ROOT(ke
))
2014 match
= (ke
&& !ke
->pfrke_not
);
2018 kt
->pfrkt_nomatch
++;
2023 pfr_update_stats(struct pfr_ktable
*kt
, struct pf_addr
*a
, sa_family_t af
,
2024 u_int64_t len
, int dir_out
, int op_pass
, int notrule
)
2026 struct pfr_kentry
*ke
= NULL
;
2027 struct sockaddr_in pfr_sin
;
2029 struct sockaddr_in6 pfr_sin6
;
2032 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
)
2033 kt
= kt
->pfrkt_root
;
2034 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
2040 bzero(&pfr_sin
, sizeof(pfr_sin
));
2041 pfr_sin
.sin_len
= sizeof(pfr_sin
);
2042 pfr_sin
.sin_family
= AF_INET
;
2043 pfr_sin
.sin_addr
.s_addr
= a
->addr32
[0];
2044 ke
= (struct pfr_kentry
*)rn_match((char *)&pfr_sin
,
2046 if (ke
&& KENTRY_RNF_ROOT(ke
))
2052 bzero(&pfr_sin6
, sizeof(pfr_sin6
));
2053 pfr_sin6
.sin6_len
= sizeof(pfr_sin6
);
2054 pfr_sin6
.sin6_family
= AF_INET6
;
2055 bcopy(a
, &pfr_sin6
.sin6_addr
, sizeof(pfr_sin6
.sin6_addr
));
2056 ke
= (struct pfr_kentry
*)rn_match((char *)&pfr_sin6
,
2058 if (ke
&& KENTRY_RNF_ROOT(ke
))
2065 if ((ke
== NULL
|| ke
->pfrke_not
) != notrule
) {
2066 if (op_pass
!= PFR_OP_PASS
)
2067 kprintf("pfr_update_stats: assertion failed.\n");
2068 op_pass
= PFR_OP_XPASS
;
2070 kt
->pfrkt_packets
[dir_out
][op_pass
]++;
2071 kt
->pfrkt_bytes
[dir_out
][op_pass
] += len
;
2072 if (ke
!= NULL
&& op_pass
!= PFR_OP_XPASS
&&
2073 (kt
->pfrkt_flags
& PFR_TFLAG_COUNTERS
)) {
2074 if (ke
->pfrke_counters
== NULL
)
2075 ke
->pfrke_counters
= kmalloc(sizeof(struct pfr_kcounters
),
2076 M_PFRKCOUNTERSPL
, M_NOWAIT
|M_ZERO
);
2077 if (ke
->pfrke_counters
!= NULL
) {
2078 ke
->pfrke_counters
->pfrkc_packets
[dir_out
][op_pass
]++;
2079 ke
->pfrke_counters
->pfrkc_bytes
[dir_out
][op_pass
] += len
;
2085 pfr_attach_table(struct pf_ruleset
*rs
, char *name
)
2087 struct pfr_ktable
*kt
, *rt
;
2088 struct pfr_table tbl
;
2089 struct pf_anchor
*ac
= rs
->anchor
;
2091 bzero(&tbl
, sizeof(tbl
));
2092 strlcpy(tbl
.pfrt_name
, name
, sizeof(tbl
.pfrt_name
));
2094 strlcpy(tbl
.pfrt_anchor
, ac
->path
, sizeof(tbl
.pfrt_anchor
));
2095 kt
= pfr_lookup_table(&tbl
);
2097 kt
= pfr_create_ktable(&tbl
, time_second
, 1);
2101 bzero(tbl
.pfrt_anchor
, sizeof(tbl
.pfrt_anchor
));
2102 rt
= pfr_lookup_table(&tbl
);
2104 rt
= pfr_create_ktable(&tbl
, 0, 1);
2106 pfr_destroy_ktable(kt
, 0);
2109 pfr_insert_ktable(rt
);
2111 kt
->pfrkt_root
= rt
;
2113 pfr_insert_ktable(kt
);
2115 if (!kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
]++)
2116 pfr_setflags_ktable(kt
, kt
->pfrkt_flags
|PFR_TFLAG_REFERENCED
);
2121 pfr_detach_table(struct pfr_ktable
*kt
)
2123 if (kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
] <= 0)
2124 kprintf("pfr_detach_table: refcount = %d.\n",
2125 kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
]);
2126 else if (!--kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
])
2127 pfr_setflags_ktable(kt
, kt
->pfrkt_flags
&~PFR_TFLAG_REFERENCED
);
2131 pfr_pool_get(struct pfr_ktable
*kt
, int *pidx
, struct pf_addr
*counter
,
2132 struct pf_addr
**raddr
, struct pf_addr
**rmask
, sa_family_t af
)
2134 struct pfr_kentry
*ke
, *ke2
= NULL
;
2135 struct pf_addr
*addr
= NULL
;
2136 union sockaddr_union mask
;
2137 int idx
= -1, use_counter
= 0;
2138 struct sockaddr_in pfr_sin
;
2139 struct sockaddr_in6 pfr_sin6
;
2140 union sockaddr_union pfr_mask
;
2143 addr
= (struct pf_addr
*)&pfr_sin
.sin_addr
;
2144 else if (af
== AF_INET6
)
2145 addr
= (struct pf_addr
*)&pfr_sin6
.sin6_addr
;
2146 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
)
2147 kt
= kt
->pfrkt_root
;
2148 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
2153 if (counter
!= NULL
&& idx
>= 0)
2159 ke
= pfr_kentry_byidx(kt
, idx
, af
);
2161 kt
->pfrkt_nomatch
++;
2164 pfr_prepare_network(&pfr_mask
, af
, ke
->pfrke_net
);
2165 *raddr
= SUNION2PF(&ke
->pfrke_sa
, af
);
2166 *rmask
= SUNION2PF(&pfr_mask
, af
);
2169 /* is supplied address within block? */
2170 if (!PF_MATCHA(0, *raddr
, *rmask
, counter
, af
)) {
2171 /* no, go to next block in table */
2176 PF_ACPY(addr
, counter
, af
);
2178 /* use first address of block */
2179 PF_ACPY(addr
, *raddr
, af
);
2182 if (!KENTRY_NETWORK(ke
)) {
2183 /* this is a single IP address - no possible nested block */
2184 PF_ACPY(counter
, addr
, af
);
2190 /* we don't want to use a nested block */
2192 ke2
= (struct pfr_kentry
*)rn_match((char *)&pfr_sin
,
2194 else if (af
== AF_INET6
)
2195 ke2
= (struct pfr_kentry
*)rn_match((char *)&pfr_sin6
,
2197 /* no need to check KENTRY_RNF_ROOT() here */
2199 /* lookup return the same block - perfect */
2200 PF_ACPY(counter
, addr
, af
);
2206 /* we need to increase the counter past the nested block */
2207 pfr_prepare_network(&mask
, AF_INET
, ke2
->pfrke_net
);
2208 PF_POOLMASK(addr
, addr
, SUNION2PF(&mask
, af
), &pfr_ffaddr
, af
);
2210 if (!PF_MATCHA(0, *raddr
, *rmask
, addr
, af
)) {
2211 /* ok, we reached the end of our main block */
2212 /* go to next block in table */
2221 pfr_kentry_byidx(struct pfr_ktable
*kt
, int idx
, int af
)
2223 struct pfr_walktree w
;
2225 bzero(&w
, sizeof(w
));
2226 w
.pfrw_op
= PFRW_POOL_GET
;
2232 kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
);
2233 return (w
.pfrw_kentry
);
2237 kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
);
2238 return (w
.pfrw_kentry
);
2246 pfr_dynaddr_update(struct pfr_ktable
*kt
, struct pfi_dynaddr
*dyn
)
2248 struct pfr_walktree w
;
2250 bzero(&w
, sizeof(w
));
2251 w
.pfrw_op
= PFRW_DYNADDR_UPDATE
;
2255 dyn
->pfid_acnt4
= 0;
2256 dyn
->pfid_acnt6
= 0;
2257 if (!dyn
->pfid_af
|| dyn
->pfid_af
== AF_INET
)
2258 kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
);
2259 if (!dyn
->pfid_af
|| dyn
->pfid_af
== AF_INET6
)
2260 kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
);