1 /* $FreeBSD: src/sys/contrib/pf/net/pf_ioctl.c,v 1.12 2004/08/12 14:15:42 mlaier Exp $ */
2 /* $OpenBSD: pf_ioctl.c,v 1.112.2.2 2004/07/24 18:28:12 brad Exp $ */
3 /* $DragonFly: src/sys/net/pf/pf_ioctl.c,v 1.15 2008/09/15 05:11:02 sephe Exp $ */
6 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
8 * Copyright (c) 2001 Daniel Hartmeier
9 * Copyright (c) 2002,2003 Henning Brauer
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * - Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
33 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
36 * Effort sponsored in part by the Defense Advanced Research Projects
37 * Agency (DARPA) and Air Force Research Laboratory, Air Force
38 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43 #include "opt_inet6.h"
44 #include "use_pfsync.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
49 #include <sys/device.h>
51 #include <sys/filio.h>
52 #include <sys/fcntl.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/kernel.h>
56 #include <sys/thread2.h>
58 #include <sys/malloc.h>
59 #include <sys/module.h>
60 #include <vm/vm_zone.h>
63 #include <net/if_types.h>
64 #include <net/route.h>
66 #include <netinet/in.h>
67 #include <netinet/in_var.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/ip.h>
70 #include <netinet/ip_var.h>
71 #include <netinet/ip_icmp.h>
73 #include <net/pf/pfvar.h>
76 #include <net/pf/if_pfsync.h>
77 #endif /* NPFSYNC > 0 */
80 #include <netinet/ip6.h>
81 #include <netinet/in_pcb.h>
85 #include <net/altq/altq.h>
88 #include <machine/limits.h>
91 void init_zone_var(void);
92 void cleanup_pf_zone(void);
94 struct pf_pool
*pf_get_pool(char *, char *, u_int32_t
,
95 u_int8_t
, u_int32_t
, u_int8_t
, u_int8_t
, u_int8_t
);
96 int pf_get_ruleset_number(u_int8_t
);
97 void pf_init_ruleset(struct pf_ruleset
*);
98 void pf_mv_pool(struct pf_palist
*, struct pf_palist
*);
99 void pf_empty_pool(struct pf_palist
*);
101 int pf_begin_altq(u_int32_t
*);
102 int pf_rollback_altq(u_int32_t
);
103 int pf_commit_altq(u_int32_t
);
105 int pf_begin_rules(u_int32_t
*, int, char *, char *);
106 int pf_rollback_rules(u_int32_t
, int, char *, char *);
107 int pf_commit_rules(u_int32_t
, int, char *, char *);
109 extern struct callout pf_expire_to
;
111 struct pf_rule pf_default_rule
;
113 #define TAGID_MAX 50000
114 TAILQ_HEAD(pf_tags
, pf_tagname
) pf_tags
= TAILQ_HEAD_INITIALIZER(pf_tags
),
115 pf_qids
= TAILQ_HEAD_INITIALIZER(pf_qids
);
117 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
118 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
120 static u_int16_t
tagname2tag(struct pf_tags
*, char *);
121 static void tag2tagname(struct pf_tags
*, u_int16_t
, char *);
122 static void tag_unref(struct pf_tags
*, u_int16_t
);
124 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) kprintf x
126 static cdev_t pf_dev
;
129 * XXX - These are new and need to be checked when moveing to a new version
131 static void pf_clear_states(void);
132 static int pf_clear_tables(void);
133 static void pf_clear_srcnodes(void);
135 * XXX - These are new and need to be checked when moveing to a new version
139 * Wrapper functions for pfil(9) hooks
141 static int pf_check_in(void *arg
, struct mbuf
**m
, struct ifnet
*ifp
,
143 static int pf_check_out(void *arg
, struct mbuf
**m
, struct ifnet
*ifp
,
146 static int pf_check6_in(void *arg
, struct mbuf
**m
, struct ifnet
*ifp
,
148 static int pf_check6_out(void *arg
, struct mbuf
**m
, struct ifnet
*ifp
,
152 static int hook_pf(void);
153 static int dehook_pf(void);
154 static int shutdown_pf(void);
155 static int pf_load(void);
156 static int pf_unload(void);
162 static struct dev_ops pf_ops
= { /* XXX convert to port model */
169 static volatile int pf_pfil_hooked
= 0;
174 pf_src_tree_pl
= pf_rule_pl
= NULL
;
175 pf_state_pl
= pf_altq_pl
= pf_pooladdr_pl
= NULL
;
176 pf_frent_pl
= pf_frag_pl
= pf_cache_pl
= pf_cent_pl
= NULL
;
177 pf_state_scrub_pl
= NULL
;
178 pfr_ktable_pl
= pfr_kentry_pl
= NULL
;
182 cleanup_pf_zone(void)
184 ZONE_DESTROY(pf_src_tree_pl
);
185 ZONE_DESTROY(pf_rule_pl
);
186 ZONE_DESTROY(pf_state_pl
);
187 ZONE_DESTROY(pf_altq_pl
);
188 ZONE_DESTROY(pf_pooladdr_pl
);
189 ZONE_DESTROY(pf_frent_pl
);
190 ZONE_DESTROY(pf_frag_pl
);
191 ZONE_DESTROY(pf_cache_pl
);
192 ZONE_DESTROY(pf_cent_pl
);
193 ZONE_DESTROY(pfr_ktable_pl
);
194 ZONE_DESTROY(pfr_kentry_pl
);
195 ZONE_DESTROY(pf_state_scrub_pl
);
196 ZONE_DESTROY(pfi_addr_pl
);
202 u_int32_t
*my_timeout
= pf_default_rule
.timeout
;
206 ZONE_CREATE(pf_src_tree_pl
,struct pf_src_node
, "pfsrctrpl");
207 ZONE_CREATE(pf_rule_pl
, struct pf_rule
, "pfrulepl");
208 ZONE_CREATE(pf_state_pl
, struct pf_state
, "pfstatepl");
209 ZONE_CREATE(pf_altq_pl
, struct pf_altq
, "pfaltqpl");
210 ZONE_CREATE(pf_pooladdr_pl
,struct pf_pooladdr
, "pfpooladdrpl");
211 ZONE_CREATE(pfr_ktable_pl
, struct pfr_ktable
, "pfrktable");
212 ZONE_CREATE(pfr_kentry_pl
, struct pfr_kentry
, "pfrkentry");
213 ZONE_CREATE(pf_frent_pl
, struct pf_frent
, "pffrent");
214 ZONE_CREATE(pf_frag_pl
, struct pf_fragment
, "pffrag");
215 ZONE_CREATE(pf_cache_pl
, struct pf_fragment
, "pffrcache");
216 ZONE_CREATE(pf_cent_pl
, struct pf_frcache
, "pffrcent");
217 ZONE_CREATE(pf_state_scrub_pl
, struct pf_state_scrub
,
219 ZONE_CREATE(pfi_addr_pl
, struct pfi_dynaddr
, "pfiaddrpl");
228 error
= pf_osfp_initialize();
235 pf_pool_limits
[PF_LIMIT_STATES
].pp
= pf_state_pl
;
236 pf_pool_limits
[PF_LIMIT_STATES
].limit
= PFSTATE_HIWAT
;
237 pf_pool_limits
[PF_LIMIT_FRAGS
].pp
= pf_frent_pl
;
238 pf_pool_limits
[PF_LIMIT_FRAGS
].limit
= PFFRAG_FRENT_HIWAT
;
239 /* XXX uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp,
240 pf_pool_limits[PF_LIMIT_STATES].limit);
243 RB_INIT(&tree_src_tracking
);
244 TAILQ_INIT(&pf_anchors
);
245 pf_init_ruleset(&pf_main_ruleset
);
246 TAILQ_INIT(&pf_altqs
[0]);
247 TAILQ_INIT(&pf_altqs
[1]);
248 TAILQ_INIT(&pf_pabuf
);
249 pf_altqs_active
= &pf_altqs
[0];
250 pf_altqs_inactive
= &pf_altqs
[1];
251 TAILQ_INIT(&state_updates
);
253 /* default rule should never be garbage collected */
254 pf_default_rule
.entries
.tqe_prev
= &pf_default_rule
.entries
.tqe_next
;
255 pf_default_rule
.action
= PF_PASS
;
256 pf_default_rule
.nr
= (uint32_t)(-1);
258 /* initialize default timeouts */
259 my_timeout
[PFTM_TCP_FIRST_PACKET
] = 120; /* First TCP packet */
260 my_timeout
[PFTM_TCP_OPENING
] = 30; /* No response yet */
261 my_timeout
[PFTM_TCP_ESTABLISHED
] = 24*60*60; /* Established */
262 my_timeout
[PFTM_TCP_CLOSING
] = 15 * 60; /* Half closed */
263 my_timeout
[PFTM_TCP_FIN_WAIT
] = 45; /* Got both FINs */
264 my_timeout
[PFTM_TCP_CLOSED
] = 90; /* Got a RST */
265 my_timeout
[PFTM_UDP_FIRST_PACKET
] = 60; /* First UDP packet */
266 my_timeout
[PFTM_UDP_SINGLE
] = 30; /* Unidirectional */
267 my_timeout
[PFTM_UDP_MULTIPLE
] = 60; /* Bidirectional */
268 my_timeout
[PFTM_ICMP_FIRST_PACKET
] = 20; /* First ICMP packet */
269 my_timeout
[PFTM_ICMP_ERROR_REPLY
] = 10; /* Got error response */
270 my_timeout
[PFTM_OTHER_FIRST_PACKET
] = 60; /* First packet */
271 my_timeout
[PFTM_OTHER_SINGLE
] = 30; /* Unidirectional */
272 my_timeout
[PFTM_OTHER_MULTIPLE
] = 60; /* Bidirectional */
273 my_timeout
[PFTM_FRAG
] = 30; /* Fragment expire */
274 my_timeout
[PFTM_INTERVAL
] = 10; /* Expire interval */
276 callout_init(&pf_expire_to
);
277 callout_reset(&pf_expire_to
, my_timeout
[PFTM_INTERVAL
] * hz
,
278 pf_purge_timeout
, &pf_expire_to
);
281 bzero(&pf_status
, sizeof(pf_status
));
282 pf_status
.debug
= PF_DEBUG_URGENT
;
285 /* XXX do our best to avoid a conflict */
286 pf_status
.hostid
= karc4random();
292 pfopen(struct dev_open_args
*ap
)
294 cdev_t dev
= ap
->a_head
.a_dev
;
301 pfclose(struct dev_close_args
*ap
)
303 cdev_t dev
= ap
->a_head
.a_dev
;
310 pf_get_pool(char *anchorname
, char *rulesetname
, u_int32_t ticket
,
311 u_int8_t rule_action
, u_int32_t rule_number
, u_int8_t r_last
,
312 u_int8_t active
, u_int8_t check_ticket
)
314 struct pf_ruleset
*ruleset
;
315 struct pf_rule
*rule
;
318 ruleset
= pf_find_ruleset(anchorname
, rulesetname
);
321 rs_num
= pf_get_ruleset_number(rule_action
);
322 if (rs_num
>= PF_RULESET_MAX
)
325 if (check_ticket
&& ticket
!=
326 ruleset
->rules
[rs_num
].active
.ticket
)
329 rule
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
332 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
334 if (check_ticket
&& ticket
!=
335 ruleset
->rules
[rs_num
].inactive
.ticket
)
338 rule
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
,
341 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].inactive
.ptr
);
344 while ((rule
!= NULL
) && (rule
->nr
!= rule_number
))
345 rule
= TAILQ_NEXT(rule
, entries
);
350 return (&rule
->rpool
);
354 pf_get_ruleset_number(u_int8_t action
)
358 return (PF_RULESET_SCRUB
);
362 return (PF_RULESET_FILTER
);
366 return (PF_RULESET_NAT
);
370 return (PF_RULESET_BINAT
);
374 return (PF_RULESET_RDR
);
377 return (PF_RULESET_MAX
);
383 pf_init_ruleset(struct pf_ruleset
*ruleset
)
387 memset(ruleset
, 0, sizeof(struct pf_ruleset
));
388 for (i
= 0; i
< PF_RULESET_MAX
; i
++) {
389 TAILQ_INIT(&ruleset
->rules
[i
].queues
[0]);
390 TAILQ_INIT(&ruleset
->rules
[i
].queues
[1]);
391 ruleset
->rules
[i
].active
.ptr
= &ruleset
->rules
[i
].queues
[0];
392 ruleset
->rules
[i
].inactive
.ptr
= &ruleset
->rules
[i
].queues
[1];
397 pf_find_anchor(const char *anchorname
)
399 struct pf_anchor
*anchor
;
402 anchor
= TAILQ_FIRST(&pf_anchors
);
403 while (anchor
!= NULL
&& (n
= strcmp(anchor
->name
, anchorname
)) < 0)
404 anchor
= TAILQ_NEXT(anchor
, entries
);
412 pf_find_ruleset(char *anchorname
, char *rulesetname
)
414 struct pf_anchor
*anchor
;
415 struct pf_ruleset
*ruleset
;
417 if (!anchorname
[0] && !rulesetname
[0])
418 return (&pf_main_ruleset
);
419 if (!anchorname
[0] || !rulesetname
[0])
421 anchorname
[PF_ANCHOR_NAME_SIZE
-1] = 0;
422 rulesetname
[PF_RULESET_NAME_SIZE
-1] = 0;
423 anchor
= pf_find_anchor(anchorname
);
426 ruleset
= TAILQ_FIRST(&anchor
->rulesets
);
427 while (ruleset
!= NULL
&& strcmp(ruleset
->name
, rulesetname
) < 0)
428 ruleset
= TAILQ_NEXT(ruleset
, entries
);
429 if (ruleset
!= NULL
&& !strcmp(ruleset
->name
, rulesetname
))
436 pf_find_or_create_ruleset(char anchorname
[PF_ANCHOR_NAME_SIZE
],
437 char rulesetname
[PF_RULESET_NAME_SIZE
])
439 struct pf_anchor
*anchor
, *a
;
440 struct pf_ruleset
*ruleset
, *r
;
442 if (!anchorname
[0] && !rulesetname
[0])
443 return (&pf_main_ruleset
);
444 if (!anchorname
[0] || !rulesetname
[0])
446 anchorname
[PF_ANCHOR_NAME_SIZE
-1] = 0;
447 rulesetname
[PF_RULESET_NAME_SIZE
-1] = 0;
448 a
= TAILQ_FIRST(&pf_anchors
);
449 while (a
!= NULL
&& strcmp(a
->name
, anchorname
) < 0)
450 a
= TAILQ_NEXT(a
, entries
);
451 if (a
!= NULL
&& !strcmp(a
->name
, anchorname
))
454 anchor
= (struct pf_anchor
*)kmalloc(sizeof(struct pf_anchor
),
458 memset(anchor
, 0, sizeof(struct pf_anchor
));
459 bcopy(anchorname
, anchor
->name
, sizeof(anchor
->name
));
460 TAILQ_INIT(&anchor
->rulesets
);
462 TAILQ_INSERT_BEFORE(a
, anchor
, entries
);
464 TAILQ_INSERT_TAIL(&pf_anchors
, anchor
, entries
);
466 r
= TAILQ_FIRST(&anchor
->rulesets
);
467 while (r
!= NULL
&& strcmp(r
->name
, rulesetname
) < 0)
468 r
= TAILQ_NEXT(r
, entries
);
469 if (r
!= NULL
&& !strcmp(r
->name
, rulesetname
))
471 ruleset
= (struct pf_ruleset
*)kmalloc(sizeof(struct pf_ruleset
),
473 if (ruleset
!= NULL
) {
474 pf_init_ruleset(ruleset
);
475 bcopy(rulesetname
, ruleset
->name
, sizeof(ruleset
->name
));
476 ruleset
->anchor
= anchor
;
478 TAILQ_INSERT_BEFORE(r
, ruleset
, entries
);
480 TAILQ_INSERT_TAIL(&anchor
->rulesets
, ruleset
, entries
);
486 pf_remove_if_empty_ruleset(struct pf_ruleset
*ruleset
)
488 struct pf_anchor
*anchor
;
491 if (ruleset
== NULL
|| ruleset
->anchor
== NULL
|| ruleset
->tables
> 0 ||
494 for (i
= 0; i
< PF_RULESET_MAX
; ++i
)
495 if (!TAILQ_EMPTY(ruleset
->rules
[i
].active
.ptr
) ||
496 !TAILQ_EMPTY(ruleset
->rules
[i
].inactive
.ptr
) ||
497 ruleset
->rules
[i
].inactive
.open
)
500 anchor
= ruleset
->anchor
;
501 TAILQ_REMOVE(&anchor
->rulesets
, ruleset
, entries
);
502 kfree(ruleset
, M_TEMP
);
504 if (TAILQ_EMPTY(&anchor
->rulesets
)) {
505 TAILQ_REMOVE(&pf_anchors
, anchor
, entries
);
506 kfree(anchor
, M_TEMP
);
507 pf_update_anchor_rules();
512 pf_mv_pool(struct pf_palist
*poola
, struct pf_palist
*poolb
)
514 struct pf_pooladdr
*mv_pool_pa
;
516 while ((mv_pool_pa
= TAILQ_FIRST(poola
)) != NULL
) {
517 TAILQ_REMOVE(poola
, mv_pool_pa
, entries
);
518 TAILQ_INSERT_TAIL(poolb
, mv_pool_pa
, entries
);
523 pf_empty_pool(struct pf_palist
*poola
)
525 struct pf_pooladdr
*empty_pool_pa
;
527 while ((empty_pool_pa
= TAILQ_FIRST(poola
)) != NULL
) {
528 pfi_dynaddr_remove(&empty_pool_pa
->addr
);
529 pf_tbladdr_remove(&empty_pool_pa
->addr
);
530 pfi_detach_rule(empty_pool_pa
->kif
);
531 TAILQ_REMOVE(poola
, empty_pool_pa
, entries
);
532 pool_put(&pf_pooladdr_pl
, empty_pool_pa
);
537 pf_rm_rule(struct pf_rulequeue
*rulequeue
, struct pf_rule
*rule
)
539 if (rulequeue
!= NULL
) {
540 if (rule
->states
<= 0) {
542 * XXX - we need to remove the table *before* detaching
543 * the rule to make sure the table code does not delete
544 * the anchor under our feet.
546 pf_tbladdr_remove(&rule
->src
.addr
);
547 pf_tbladdr_remove(&rule
->dst
.addr
);
549 TAILQ_REMOVE(rulequeue
, rule
, entries
);
550 rule
->entries
.tqe_prev
= NULL
;
551 rule
->nr
= (uint32_t)(-1);
554 if (rule
->states
> 0 || rule
->src_nodes
> 0 ||
555 rule
->entries
.tqe_prev
!= NULL
)
557 pf_tag_unref(rule
->tag
);
558 pf_tag_unref(rule
->match_tag
);
560 if (rule
->pqid
!= rule
->qid
)
561 pf_qid_unref(rule
->pqid
);
562 pf_qid_unref(rule
->qid
);
564 pfi_dynaddr_remove(&rule
->src
.addr
);
565 pfi_dynaddr_remove(&rule
->dst
.addr
);
566 if (rulequeue
== NULL
) {
567 pf_tbladdr_remove(&rule
->src
.addr
);
568 pf_tbladdr_remove(&rule
->dst
.addr
);
570 pfi_detach_rule(rule
->kif
);
571 pf_empty_pool(&rule
->rpool
.list
);
572 pool_put(&pf_rule_pl
, rule
);
576 tagname2tag(struct pf_tags
*head
, char *tagname
)
578 struct pf_tagname
*tag
, *p
= NULL
;
579 u_int16_t new_tagid
= 1;
581 TAILQ_FOREACH(tag
, head
, entries
)
582 if (strcmp(tagname
, tag
->name
) == 0) {
588 * to avoid fragmentation, we do a linear search from the beginning
589 * and take the first free slot we find. if there is none or the list
590 * is empty, append a new entry at the end.
594 if (!TAILQ_EMPTY(head
))
595 for (p
= TAILQ_FIRST(head
); p
!= NULL
&&
596 p
->tag
== new_tagid
; p
= TAILQ_NEXT(p
, entries
))
597 new_tagid
= p
->tag
+ 1;
599 if (new_tagid
> TAGID_MAX
)
602 /* allocate and fill new struct pf_tagname */
603 tag
= (struct pf_tagname
*)kmalloc(sizeof(struct pf_tagname
),
604 M_TEMP
, M_NOWAIT
| M_ZERO
);
607 strlcpy(tag
->name
, tagname
, sizeof(tag
->name
));
608 tag
->tag
= new_tagid
;
611 if (p
!= NULL
) /* insert new entry before p */
612 TAILQ_INSERT_BEFORE(p
, tag
, entries
);
613 else /* either list empty or no free slot in between */
614 TAILQ_INSERT_TAIL(head
, tag
, entries
);
620 tag2tagname(struct pf_tags
*head
, u_int16_t tagid
, char *p
)
622 struct pf_tagname
*tag
;
624 TAILQ_FOREACH(tag
, head
, entries
)
625 if (tag
->tag
== tagid
) {
626 strlcpy(p
, tag
->name
, PF_TAG_NAME_SIZE
);
632 tag_unref(struct pf_tags
*head
, u_int16_t tag
)
634 struct pf_tagname
*p
, *next
;
639 for (p
= TAILQ_FIRST(head
); p
!= NULL
; p
= next
) {
640 next
= TAILQ_NEXT(p
, entries
);
643 TAILQ_REMOVE(head
, p
, entries
);
652 pf_tagname2tag(char *tagname
)
654 return (tagname2tag(&pf_tags
, tagname
));
658 pf_tag2tagname(u_int16_t tagid
, char *p
)
660 return (tag2tagname(&pf_tags
, tagid
, p
));
664 pf_tag_unref(u_int16_t tag
)
666 return (tag_unref(&pf_tags
, tag
));
671 pf_qname2qid(char *qname
)
673 return ((u_int32_t
)tagname2tag(&pf_qids
, qname
));
677 pf_qid2qname(u_int32_t qid
, char *p
)
679 return (tag2tagname(&pf_qids
, (u_int16_t
)qid
, p
));
683 pf_qid_unref(u_int32_t qid
)
685 return (tag_unref(&pf_qids
, (u_int16_t
)qid
));
689 pf_begin_altq(u_int32_t
*ticket
)
691 struct pf_altq
*altq
;
694 /* Purge the old altq list */
695 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
696 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
697 if (altq
->qname
[0] == 0) {
698 /* detach and destroy the discipline */
699 error
= altq_remove(altq
);
701 pf_qid_unref(altq
->qid
);
702 pool_put(&pf_altq_pl
, altq
);
706 *ticket
= ++ticket_altqs_inactive
;
707 altqs_inactive_open
= 1;
712 pf_rollback_altq(u_int32_t ticket
)
714 struct pf_altq
*altq
;
717 if (!altqs_inactive_open
|| ticket
!= ticket_altqs_inactive
)
719 /* Purge the old altq list */
720 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
721 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
722 if (altq
->qname
[0] == 0) {
723 /* detach and destroy the discipline */
724 error
= altq_remove(altq
);
726 pf_qid_unref(altq
->qid
);
727 pool_put(&pf_altq_pl
, altq
);
729 altqs_inactive_open
= 0;
734 pf_commit_altq(u_int32_t ticket
)
736 struct pf_altqqueue
*old_altqs
;
737 struct pf_altq
*altq
;
740 if (!altqs_inactive_open
|| ticket
!= ticket_altqs_inactive
)
743 /* swap altqs, keep the old. */
745 old_altqs
= pf_altqs_active
;
746 pf_altqs_active
= pf_altqs_inactive
;
747 pf_altqs_inactive
= old_altqs
;
748 ticket_altqs_active
= ticket_altqs_inactive
;
750 /* Attach new disciplines */
751 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
752 if (altq
->qname
[0] == 0) {
753 /* attach the discipline */
754 error
= altq_pfattach(altq
);
762 /* Purge the old altq list */
763 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
764 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
765 if (altq
->qname
[0] == 0) {
766 /* detach and destroy the discipline */
767 err
= altq_pfdetach(altq
);
768 if (err
!= 0 && error
== 0)
770 err
= altq_remove(altq
);
771 if (err
!= 0 && error
== 0)
774 pf_qid_unref(altq
->qid
);
775 pool_put(&pf_altq_pl
, altq
);
779 altqs_inactive_open
= 0;
785 pf_begin_rules(u_int32_t
*ticket
, int rs_num
, char *anchor
, char *ruleset
)
787 struct pf_ruleset
*rs
;
788 struct pf_rule
*rule
;
790 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
792 rs
= pf_find_or_create_ruleset(anchor
, ruleset
);
795 while ((rule
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
)
796 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
);
797 *ticket
= ++rs
->rules
[rs_num
].inactive
.ticket
;
798 rs
->rules
[rs_num
].inactive
.open
= 1;
803 pf_rollback_rules(u_int32_t ticket
, int rs_num
, char *anchor
, char *ruleset
)
805 struct pf_ruleset
*rs
;
806 struct pf_rule
*rule
;
808 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
810 rs
= pf_find_ruleset(anchor
, ruleset
);
811 if (rs
== NULL
|| !rs
->rules
[rs_num
].inactive
.open
||
812 rs
->rules
[rs_num
].inactive
.ticket
!= ticket
)
814 while ((rule
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
)
815 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
);
816 rs
->rules
[rs_num
].inactive
.open
= 0;
821 pf_commit_rules(u_int32_t ticket
, int rs_num
, char *anchor
, char *ruleset
)
823 struct pf_ruleset
*rs
;
824 struct pf_rule
*rule
;
825 struct pf_rulequeue
*old_rules
;
827 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
829 rs
= pf_find_ruleset(anchor
, ruleset
);
830 if (rs
== NULL
|| !rs
->rules
[rs_num
].inactive
.open
||
831 ticket
!= rs
->rules
[rs_num
].inactive
.ticket
)
834 /* Swap rules, keep the old. */
836 old_rules
= rs
->rules
[rs_num
].active
.ptr
;
837 rs
->rules
[rs_num
].active
.ptr
=
838 rs
->rules
[rs_num
].inactive
.ptr
;
839 rs
->rules
[rs_num
].inactive
.ptr
= old_rules
;
840 rs
->rules
[rs_num
].active
.ticket
=
841 rs
->rules
[rs_num
].inactive
.ticket
;
842 pf_calc_skip_steps(rs
->rules
[rs_num
].active
.ptr
);
844 /* Purge the old rule list. */
845 while ((rule
= TAILQ_FIRST(old_rules
)) != NULL
)
846 pf_rm_rule(old_rules
, rule
);
847 rs
->rules
[rs_num
].inactive
.open
= 0;
848 pf_remove_if_empty_ruleset(rs
);
849 pf_update_anchor_rules();
855 pfioctl(struct dev_ioctl_args
*ap
)
857 u_long cmd
= ap
->a_cmd
;
858 caddr_t addr
= ap
->a_data
;
859 struct pf_pooladdr
*pa
= NULL
;
860 struct pf_pool
*pool
= NULL
;
863 /* XXX keep in sync with switch() below */
871 case DIOCSETSTATUSIF
:
878 case DIOCCLRRULECTRS
:
885 case DIOCGETRULESETS
:
899 case DIOCGETSRCNODES
:
900 case DIOCCLRSRCNODES
:
909 if (((struct pfioc_table
*)addr
)->pfrio_flags
&
911 break; /* dummy operation ok */
917 if (!(ap
->a_fflag
& FWRITE
))
933 case DIOCGETRULESETS
:
941 case DIOCGETSRCNODES
:
954 if (((struct pfioc_table
*)addr
)->pfrio_flags
&
956 break; /* dummy operation ok */
965 if (pf_status
.running
)
970 DPFPRINTF(PF_DEBUG_MISC
,
971 ("pf: pfil registration fail\n"));
974 pf_status
.running
= 1;
975 pf_status
.since
= time_second
;
976 if (pf_status
.stateid
== 0) {
977 pf_status
.stateid
= time_second
;
978 pf_status
.stateid
= pf_status
.stateid
<< 32;
980 DPFPRINTF(PF_DEBUG_MISC
, ("pf: started\n"));
985 if (!pf_status
.running
)
988 pf_status
.running
= 0;
991 pf_status
.running
= 1;
992 DPFPRINTF(PF_DEBUG_MISC
,
993 ("pf: pfil unregistration failed\n"));
995 pf_status
.since
= time_second
;
996 DPFPRINTF(PF_DEBUG_MISC
, ("pf: stopped\n"));
1000 case DIOCBEGINRULES
: {
1001 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1003 error
= pf_begin_rules(&pr
->ticket
, pf_get_ruleset_number(
1004 pr
->rule
.action
), pr
->anchor
, pr
->ruleset
);
1009 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1010 struct pf_ruleset
*ruleset
;
1011 struct pf_rule
*rule
, *tail
;
1012 struct pf_pooladdr
*pa
;
1015 ruleset
= pf_find_ruleset(pr
->anchor
, pr
->ruleset
);
1016 if (ruleset
== NULL
) {
1020 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
1021 if (rs_num
>= PF_RULESET_MAX
) {
1025 if (pr
->rule
.anchorname
[0] && ruleset
!= &pf_main_ruleset
) {
1029 if (pr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
1033 if (pr
->ticket
!= ruleset
->rules
[rs_num
].inactive
.ticket
) {
1037 if (pr
->pool_ticket
!= ticket_pabuf
) {
1041 rule
= pool_get(&pf_rule_pl
, PR_NOWAIT
);
1046 bcopy(&pr
->rule
, rule
, sizeof(struct pf_rule
));
1047 rule
->anchor
= NULL
;
1049 TAILQ_INIT(&rule
->rpool
.list
);
1050 /* initialize refcounting */
1052 rule
->src_nodes
= 0;
1053 rule
->entries
.tqe_prev
= NULL
;
1055 if (rule
->af
== AF_INET
) {
1056 pool_put(&pf_rule_pl
, rule
);
1057 error
= EAFNOSUPPORT
;
1062 if (rule
->af
== AF_INET6
) {
1063 pool_put(&pf_rule_pl
, rule
);
1064 error
= EAFNOSUPPORT
;
1068 tail
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
,
1071 rule
->nr
= tail
->nr
+ 1;
1074 if (rule
->ifname
[0]) {
1075 rule
->kif
= pfi_attach_rule(rule
->ifname
);
1076 if (rule
->kif
== NULL
) {
1077 pool_put(&pf_rule_pl
, rule
);
1085 if (rule
->qname
[0] != 0) {
1086 if ((rule
->qid
= pf_qname2qid(rule
->qname
)) == 0)
1088 else if (rule
->pqname
[0] != 0) {
1090 pf_qname2qid(rule
->pqname
)) == 0)
1093 rule
->pqid
= rule
->qid
;
1096 if (rule
->tagname
[0])
1097 if ((rule
->tag
= pf_tagname2tag(rule
->tagname
)) == 0)
1099 if (rule
->match_tagname
[0])
1100 if ((rule
->match_tag
=
1101 pf_tagname2tag(rule
->match_tagname
)) == 0)
1103 if (rule
->rt
&& !rule
->direction
)
1105 if (pfi_dynaddr_setup(&rule
->src
.addr
, rule
->af
))
1107 if (pfi_dynaddr_setup(&rule
->dst
.addr
, rule
->af
))
1109 if (pf_tbladdr_setup(ruleset
, &rule
->src
.addr
))
1111 if (pf_tbladdr_setup(ruleset
, &rule
->dst
.addr
))
1113 TAILQ_FOREACH(pa
, &pf_pabuf
, entries
)
1114 if (pf_tbladdr_setup(ruleset
, &pa
->addr
))
1117 pf_mv_pool(&pf_pabuf
, &rule
->rpool
.list
);
1118 if (((((rule
->action
== PF_NAT
) || (rule
->action
== PF_RDR
) ||
1119 (rule
->action
== PF_BINAT
)) && !rule
->anchorname
[0]) ||
1120 (rule
->rt
> PF_FASTROUTE
)) &&
1121 (TAILQ_FIRST(&rule
->rpool
.list
) == NULL
))
1125 pf_rm_rule(NULL
, rule
);
1128 rule
->rpool
.cur
= TAILQ_FIRST(&rule
->rpool
.list
);
1129 rule
->evaluations
= rule
->packets
= rule
->bytes
= 0;
1130 TAILQ_INSERT_TAIL(ruleset
->rules
[rs_num
].inactive
.ptr
,
1135 case DIOCCOMMITRULES
: {
1136 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1138 error
= pf_commit_rules(pr
->ticket
, pf_get_ruleset_number(
1139 pr
->rule
.action
), pr
->anchor
, pr
->ruleset
);
1143 case DIOCGETRULES
: {
1144 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1145 struct pf_ruleset
*ruleset
;
1146 struct pf_rule
*tail
;
1149 ruleset
= pf_find_ruleset(pr
->anchor
, pr
->ruleset
);
1150 if (ruleset
== NULL
) {
1154 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
1155 if (rs_num
>= PF_RULESET_MAX
) {
1160 tail
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
1163 pr
->nr
= tail
->nr
+ 1;
1166 pr
->ticket
= ruleset
->rules
[rs_num
].active
.ticket
;
1172 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1173 struct pf_ruleset
*ruleset
;
1174 struct pf_rule
*rule
;
1177 ruleset
= pf_find_ruleset(pr
->anchor
, pr
->ruleset
);
1178 if (ruleset
== NULL
) {
1182 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
1183 if (rs_num
>= PF_RULESET_MAX
) {
1187 if (pr
->ticket
!= ruleset
->rules
[rs_num
].active
.ticket
) {
1192 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
1193 while ((rule
!= NULL
) && (rule
->nr
!= pr
->nr
))
1194 rule
= TAILQ_NEXT(rule
, entries
);
1200 bcopy(rule
, &pr
->rule
, sizeof(struct pf_rule
));
1201 pfi_dynaddr_copyout(&pr
->rule
.src
.addr
);
1202 pfi_dynaddr_copyout(&pr
->rule
.dst
.addr
);
1203 pf_tbladdr_copyout(&pr
->rule
.src
.addr
);
1204 pf_tbladdr_copyout(&pr
->rule
.dst
.addr
);
1205 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
)
1206 if (rule
->skip
[i
].ptr
== NULL
)
1207 pr
->rule
.skip
[i
].nr
= (uint32_t)(-1);
1209 pr
->rule
.skip
[i
].nr
=
1210 rule
->skip
[i
].ptr
->nr
;
1215 case DIOCCHANGERULE
: {
1216 struct pfioc_rule
*pcr
= (struct pfioc_rule
*)addr
;
1217 struct pf_ruleset
*ruleset
;
1218 struct pf_rule
*oldrule
= NULL
, *newrule
= NULL
;
1222 if (!(pcr
->action
== PF_CHANGE_REMOVE
||
1223 pcr
->action
== PF_CHANGE_GET_TICKET
) &&
1224 pcr
->pool_ticket
!= ticket_pabuf
) {
1229 if (pcr
->action
< PF_CHANGE_ADD_HEAD
||
1230 pcr
->action
> PF_CHANGE_GET_TICKET
) {
1234 ruleset
= pf_find_ruleset(pcr
->anchor
, pcr
->ruleset
);
1235 if (ruleset
== NULL
) {
1239 rs_num
= pf_get_ruleset_number(pcr
->rule
.action
);
1240 if (rs_num
>= PF_RULESET_MAX
) {
1245 if (pcr
->action
== PF_CHANGE_GET_TICKET
) {
1246 pcr
->ticket
= ++ruleset
->rules
[rs_num
].active
.ticket
;
1250 ruleset
->rules
[rs_num
].active
.ticket
) {
1254 if (pcr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
1260 if (pcr
->action
!= PF_CHANGE_REMOVE
) {
1261 newrule
= pool_get(&pf_rule_pl
, PR_NOWAIT
);
1262 if (newrule
== NULL
) {
1266 bcopy(&pcr
->rule
, newrule
, sizeof(struct pf_rule
));
1267 TAILQ_INIT(&newrule
->rpool
.list
);
1268 /* initialize refcounting */
1269 newrule
->states
= 0;
1270 newrule
->entries
.tqe_prev
= NULL
;
1272 if (newrule
->af
== AF_INET
) {
1273 pool_put(&pf_rule_pl
, newrule
);
1274 error
= EAFNOSUPPORT
;
1279 if (newrule
->af
== AF_INET6
) {
1280 pool_put(&pf_rule_pl
, newrule
);
1281 error
= EAFNOSUPPORT
;
1285 if (newrule
->ifname
[0]) {
1286 newrule
->kif
= pfi_attach_rule(newrule
->ifname
);
1287 if (newrule
->kif
== NULL
) {
1288 pool_put(&pf_rule_pl
, newrule
);
1293 newrule
->kif
= NULL
;
1297 if (newrule
->qname
[0] != 0) {
1299 pf_qname2qid(newrule
->qname
)) == 0)
1301 else if (newrule
->pqname
[0] != 0) {
1302 if ((newrule
->pqid
=
1303 pf_qname2qid(newrule
->pqname
)) == 0)
1306 newrule
->pqid
= newrule
->qid
;
1309 if (newrule
->tagname
[0])
1311 pf_tagname2tag(newrule
->tagname
)) == 0)
1313 if (newrule
->match_tagname
[0])
1314 if ((newrule
->match_tag
= pf_tagname2tag(
1315 newrule
->match_tagname
)) == 0)
1318 if (newrule
->rt
&& !newrule
->direction
)
1320 if (pfi_dynaddr_setup(&newrule
->src
.addr
, newrule
->af
))
1322 if (pfi_dynaddr_setup(&newrule
->dst
.addr
, newrule
->af
))
1324 if (pf_tbladdr_setup(ruleset
, &newrule
->src
.addr
))
1326 if (pf_tbladdr_setup(ruleset
, &newrule
->dst
.addr
))
1329 pf_mv_pool(&pf_pabuf
, &newrule
->rpool
.list
);
1330 if (((((newrule
->action
== PF_NAT
) ||
1331 (newrule
->action
== PF_RDR
) ||
1332 (newrule
->action
== PF_BINAT
) ||
1333 (newrule
->rt
> PF_FASTROUTE
)) &&
1334 !newrule
->anchorname
[0])) &&
1335 (TAILQ_FIRST(&newrule
->rpool
.list
) == NULL
))
1339 pf_rm_rule(NULL
, newrule
);
1342 newrule
->rpool
.cur
= TAILQ_FIRST(&newrule
->rpool
.list
);
1343 newrule
->evaluations
= newrule
->packets
= 0;
1346 pf_empty_pool(&pf_pabuf
);
1350 if (pcr
->action
== PF_CHANGE_ADD_HEAD
)
1351 oldrule
= TAILQ_FIRST(
1352 ruleset
->rules
[rs_num
].active
.ptr
);
1353 else if (pcr
->action
== PF_CHANGE_ADD_TAIL
)
1354 oldrule
= TAILQ_LAST(
1355 ruleset
->rules
[rs_num
].active
.ptr
, pf_rulequeue
);
1357 oldrule
= TAILQ_FIRST(
1358 ruleset
->rules
[rs_num
].active
.ptr
);
1359 while ((oldrule
!= NULL
) && (oldrule
->nr
!= pcr
->nr
))
1360 oldrule
= TAILQ_NEXT(oldrule
, entries
);
1361 if (oldrule
== NULL
) {
1362 pf_rm_rule(NULL
, newrule
);
1369 if (pcr
->action
== PF_CHANGE_REMOVE
)
1370 pf_rm_rule(ruleset
->rules
[rs_num
].active
.ptr
, oldrule
);
1372 if (oldrule
== NULL
)
1374 ruleset
->rules
[rs_num
].active
.ptr
,
1376 else if (pcr
->action
== PF_CHANGE_ADD_HEAD
||
1377 pcr
->action
== PF_CHANGE_ADD_BEFORE
)
1378 TAILQ_INSERT_BEFORE(oldrule
, newrule
, entries
);
1381 ruleset
->rules
[rs_num
].active
.ptr
,
1382 oldrule
, newrule
, entries
);
1386 TAILQ_FOREACH(oldrule
,
1387 ruleset
->rules
[rs_num
].active
.ptr
, entries
)
1390 pf_calc_skip_steps(ruleset
->rules
[rs_num
].active
.ptr
);
1391 pf_remove_if_empty_ruleset(ruleset
);
1392 pf_update_anchor_rules();
1394 ruleset
->rules
[rs_num
].active
.ticket
++;
1399 case DIOCCLRSTATES
: {
1400 struct pf_state
*state
;
1401 struct pfioc_state_kill
*psk
= (struct pfioc_state_kill
*)addr
;
1405 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
1406 if (!psk
->psk_ifname
[0] || !strcmp(psk
->psk_ifname
,
1407 state
->u
.s
.kif
->pfik_name
)) {
1408 state
->timeout
= PFTM_PURGE
;
1410 /* don't send out individual delete messages */
1411 state
->sync_flags
= PFSTATE_NOSYNC
;
1416 pf_purge_expired_states();
1417 pf_status
.states
= 0;
1418 psk
->psk_af
= killed
;
1420 pfsync_clear_states(pf_status
.hostid
, psk
->psk_ifname
);
1426 case DIOCKILLSTATES
: {
1427 struct pf_state
*state
;
1428 struct pfioc_state_kill
*psk
= (struct pfioc_state_kill
*)addr
;
1432 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
1433 if ((!psk
->psk_af
|| state
->af
== psk
->psk_af
)
1434 && (!psk
->psk_proto
|| psk
->psk_proto
==
1436 PF_MATCHA(psk
->psk_src
.not,
1437 &psk
->psk_src
.addr
.v
.a
.addr
,
1438 &psk
->psk_src
.addr
.v
.a
.mask
,
1439 &state
->lan
.addr
, state
->af
) &&
1440 PF_MATCHA(psk
->psk_dst
.not,
1441 &psk
->psk_dst
.addr
.v
.a
.addr
,
1442 &psk
->psk_dst
.addr
.v
.a
.mask
,
1443 &state
->ext
.addr
, state
->af
) &&
1444 (psk
->psk_src
.port_op
== 0 ||
1445 pf_match_port(psk
->psk_src
.port_op
,
1446 psk
->psk_src
.port
[0], psk
->psk_src
.port
[1],
1447 state
->lan
.port
)) &&
1448 (psk
->psk_dst
.port_op
== 0 ||
1449 pf_match_port(psk
->psk_dst
.port_op
,
1450 psk
->psk_dst
.port
[0], psk
->psk_dst
.port
[1],
1451 state
->ext
.port
)) &&
1452 (!psk
->psk_ifname
[0] || !strcmp(psk
->psk_ifname
,
1453 state
->u
.s
.kif
->pfik_name
))) {
1454 state
->timeout
= PFTM_PURGE
;
1458 pf_purge_expired_states();
1460 psk
->psk_af
= killed
;
1464 case DIOCADDSTATE
: {
1465 struct pfioc_state
*ps
= (struct pfioc_state
*)addr
;
1466 struct pf_state
*state
;
1467 struct pfi_kif
*kif
;
1469 if (ps
->state
.timeout
>= PFTM_MAX
&&
1470 ps
->state
.timeout
!= PFTM_UNTIL_PACKET
) {
1474 state
= pool_get(&pf_state_pl
, PR_NOWAIT
);
1475 if (state
== NULL
) {
1480 kif
= pfi_lookup_create(ps
->state
.u
.ifname
);
1482 pool_put(&pf_state_pl
, state
);
1487 bcopy(&ps
->state
, state
, sizeof(struct pf_state
));
1488 bzero(&state
->u
, sizeof(state
->u
));
1489 state
->rule
.ptr
= &pf_default_rule
;
1490 state
->nat_rule
.ptr
= NULL
;
1491 state
->anchor
.ptr
= NULL
;
1492 state
->rt_kif
= NULL
;
1493 state
->creation
= time_second
;
1494 state
->pfsync_time
= 0;
1495 state
->packets
[0] = state
->packets
[1] = 0;
1496 state
->bytes
[0] = state
->bytes
[1] = 0;
1497 state
->hash
= pf_state_hash(state
);
1499 if (pf_insert_state(kif
, state
)) {
1500 pfi_maybe_destroy(kif
);
1501 pool_put(&pf_state_pl
, state
);
1508 case DIOCGETSTATE
: {
1509 struct pfioc_state
*ps
= (struct pfioc_state
*)addr
;
1510 struct pf_state
*state
;
1515 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
1520 if (state
== NULL
) {
1525 bcopy(state
, &ps
->state
, sizeof(struct pf_state
));
1526 ps
->state
.rule
.nr
= state
->rule
.ptr
->nr
;
1527 ps
->state
.nat_rule
.nr
= (state
->nat_rule
.ptr
== NULL
) ?
1528 (uint32_t)(-1) : state
->nat_rule
.ptr
->nr
;
1529 ps
->state
.anchor
.nr
= (state
->anchor
.ptr
== NULL
) ?
1530 (uint32_t)(-1) : state
->anchor
.ptr
->nr
;
1532 ps
->state
.expire
= pf_state_expires(state
);
1533 if (ps
->state
.expire
> time_second
)
1534 ps
->state
.expire
-= time_second
;
1536 ps
->state
.expire
= 0;
1540 case DIOCGETSTATES
: {
1541 struct pfioc_states
*ps
= (struct pfioc_states
*)addr
;
1542 struct pf_state
*state
;
1543 struct pf_state
*p
, pstore
;
1544 struct pfi_kif
*kif
;
1546 int space
= ps
->ps_len
;
1550 TAILQ_FOREACH(kif
, &pfi_statehead
, pfik_w_states
)
1551 nr
+= kif
->pfik_states
;
1553 ps
->ps_len
= sizeof(struct pf_state
) * nr
;
1559 TAILQ_FOREACH(kif
, &pfi_statehead
, pfik_w_states
)
1560 RB_FOREACH(state
, pf_state_tree_ext_gwy
,
1561 &kif
->pfik_ext_gwy
) {
1562 int secs
= time_second
;
1564 if ((nr
+1) * sizeof(*p
) > (unsigned)ps
->ps_len
)
1567 bcopy(state
, &pstore
, sizeof(pstore
));
1568 strlcpy(pstore
.u
.ifname
, kif
->pfik_name
,
1569 sizeof(pstore
.u
.ifname
));
1570 pstore
.rule
.nr
= state
->rule
.ptr
->nr
;
1571 pstore
.nat_rule
.nr
= (state
->nat_rule
.ptr
==
1572 NULL
) ? (uint32_t)(-1)
1573 : state
->nat_rule
.ptr
->nr
;
1574 pstore
.anchor
.nr
= (state
->anchor
.ptr
==
1575 NULL
) ? (uint32_t)(-1)
1576 : state
->anchor
.ptr
->nr
;
1577 pstore
.creation
= secs
- pstore
.creation
;
1578 pstore
.expire
= pf_state_expires(state
);
1579 if (pstore
.expire
> secs
)
1580 pstore
.expire
-= secs
;
1583 error
= copyout(&pstore
, p
, sizeof(*p
));
1591 ps
->ps_len
= sizeof(struct pf_state
) * nr
;
1596 case DIOCGETSTATUS
: {
1597 struct pf_status
*s
= (struct pf_status
*)addr
;
1598 bcopy(&pf_status
, s
, sizeof(struct pf_status
));
1599 pfi_fill_oldstatus(s
);
1603 case DIOCSETSTATUSIF
: {
1604 struct pfioc_if
*pi
= (struct pfioc_if
*)addr
;
1606 if (pi
->ifname
[0] == 0) {
1607 bzero(pf_status
.ifname
, IFNAMSIZ
);
1610 if (ifunit(pi
->ifname
) == NULL
) {
1614 strlcpy(pf_status
.ifname
, pi
->ifname
, IFNAMSIZ
);
1618 case DIOCCLRSTATUS
: {
1619 bzero(pf_status
.counters
, sizeof(pf_status
.counters
));
1620 bzero(pf_status
.fcounters
, sizeof(pf_status
.fcounters
));
1621 bzero(pf_status
.scounters
, sizeof(pf_status
.scounters
));
1622 if (*pf_status
.ifname
)
1623 pfi_clr_istats(pf_status
.ifname
, NULL
,
1629 struct pfioc_natlook
*pnl
= (struct pfioc_natlook
*)addr
;
1630 struct pf_state
*state
;
1631 struct pf_state key
;
1632 int m
= 0, direction
= pnl
->direction
;
1635 key
.proto
= pnl
->proto
;
1638 PF_AZERO(&pnl
->saddr
, pnl
->af
) ||
1639 PF_AZERO(&pnl
->daddr
, pnl
->af
) ||
1640 !pnl
->dport
|| !pnl
->sport
)
1646 * userland gives us source and dest of connection,
1647 * reverse the lookup so we ask for what happens with
1648 * the return traffic, enabling us to find it in the
1651 if (direction
== PF_IN
) {
1652 PF_ACPY(&key
.ext
.addr
, &pnl
->daddr
, pnl
->af
);
1653 key
.ext
.port
= pnl
->dport
;
1654 PF_ACPY(&key
.gwy
.addr
, &pnl
->saddr
, pnl
->af
);
1655 key
.gwy
.port
= pnl
->sport
;
1656 state
= pf_find_state_all(&key
, PF_EXT_GWY
, &m
);
1658 PF_ACPY(&key
.lan
.addr
, &pnl
->daddr
, pnl
->af
);
1659 key
.lan
.port
= pnl
->dport
;
1660 PF_ACPY(&key
.ext
.addr
, &pnl
->saddr
, pnl
->af
);
1661 key
.ext
.port
= pnl
->sport
;
1662 state
= pf_find_state_all(&key
, PF_LAN_EXT
, &m
);
1665 error
= E2BIG
; /* more than one state */
1666 else if (state
!= NULL
) {
1667 if (direction
== PF_IN
) {
1668 PF_ACPY(&pnl
->rsaddr
, &state
->lan
.addr
,
1670 pnl
->rsport
= state
->lan
.port
;
1671 PF_ACPY(&pnl
->rdaddr
, &pnl
->daddr
,
1673 pnl
->rdport
= pnl
->dport
;
1675 PF_ACPY(&pnl
->rdaddr
, &state
->gwy
.addr
,
1677 pnl
->rdport
= state
->gwy
.port
;
1678 PF_ACPY(&pnl
->rsaddr
, &pnl
->saddr
,
1680 pnl
->rsport
= pnl
->sport
;
1689 case DIOCSETTIMEOUT
: {
1690 struct pfioc_tm
*pt
= (struct pfioc_tm
*)addr
;
1693 if (pt
->timeout
< 0 || pt
->timeout
>= PFTM_MAX
||
1698 old
= pf_default_rule
.timeout
[pt
->timeout
];
1699 pf_default_rule
.timeout
[pt
->timeout
] = pt
->seconds
;
1704 case DIOCGETTIMEOUT
: {
1705 struct pfioc_tm
*pt
= (struct pfioc_tm
*)addr
;
1707 if (pt
->timeout
< 0 || pt
->timeout
>= PFTM_MAX
) {
1711 pt
->seconds
= pf_default_rule
.timeout
[pt
->timeout
];
1715 case DIOCGETLIMIT
: {
1716 struct pfioc_limit
*pl
= (struct pfioc_limit
*)addr
;
1718 if (pl
->index
< 0 || pl
->index
>= PF_LIMIT_MAX
) {
1722 pl
->limit
= pf_pool_limits
[pl
->index
].limit
;
1726 case DIOCSETLIMIT
: {
1727 struct pfioc_limit
*pl
= (struct pfioc_limit
*)addr
;
1730 if (pl
->index
< 0 || pl
->index
>= PF_LIMIT_MAX
||
1731 pf_pool_limits
[pl
->index
].pp
== NULL
) {
1736 /* XXX Get an API to set limits on the zone/pool */
1737 old_limit
= pf_pool_limits
[pl
->index
].limit
;
1738 pf_pool_limits
[pl
->index
].limit
= pl
->limit
;
1739 pl
->limit
= old_limit
;
1743 case DIOCSETDEBUG
: {
1744 u_int32_t
*level
= (u_int32_t
*)addr
;
1746 pf_status
.debug
= *level
;
1750 case DIOCCLRRULECTRS
: {
1751 struct pf_ruleset
*ruleset
= &pf_main_ruleset
;
1752 struct pf_rule
*rule
;
1756 ruleset
->rules
[PF_RULESET_FILTER
].active
.ptr
, entries
)
1757 rule
->evaluations
= rule
->packets
=
1763 case DIOCGIFSPEED
: {
1764 struct pf_ifspeed
*psp
= (struct pf_ifspeed
*)addr
;
1765 struct pf_ifspeed ps
;
1768 if (psp
->ifname
[0] != 0) {
1769 /* Can we completely trust user-land? */
1770 strlcpy(ps
.ifname
, psp
->ifname
, IFNAMSIZ
);
1771 ifp
= ifunit(ps
.ifname
);
1773 psp
->baudrate
= ifp
->if_baudrate
;
1781 case DIOCSTARTALTQ
: {
1782 struct pf_altq
*altq
;
1784 struct tb_profile tb
;
1786 /* enable all altq interfaces on active list */
1788 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
1789 if (altq
->qname
[0] == 0) {
1790 if ((ifp
= ifunit(altq
->ifname
)) == NULL
) {
1794 if (ifp
->if_snd
.altq_type
!= ALTQT_NONE
)
1795 error
= altq_enable(&ifp
->if_snd
);
1798 /* set tokenbucket regulator */
1799 tb
.rate
= altq
->ifbandwidth
;
1800 tb
.depth
= altq
->tbrsize
;
1801 error
= tbr_set(&ifp
->if_snd
, &tb
);
1807 DPFPRINTF(PF_DEBUG_MISC
, ("altq: started\n"));
1811 case DIOCSTOPALTQ
: {
1812 struct pf_altq
*altq
;
1814 struct tb_profile tb
;
1817 /* disable all altq interfaces on active list */
1819 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
1820 if (altq
->qname
[0] == 0) {
1821 if ((ifp
= ifunit(altq
->ifname
)) == NULL
) {
1825 if (ifp
->if_snd
.altq_type
!= ALTQT_NONE
) {
1826 err
= altq_disable(&ifp
->if_snd
);
1827 if (err
!= 0 && error
== 0)
1830 /* clear tokenbucket regulator */
1832 err
= tbr_set(&ifp
->if_snd
, &tb
);
1833 if (err
!= 0 && error
== 0)
1838 DPFPRINTF(PF_DEBUG_MISC
, ("altq: stopped\n"));
1842 case DIOCBEGINALTQS
: {
1843 u_int32_t
*ticket
= (u_int32_t
*)addr
;
1845 error
= pf_begin_altq(ticket
);
1850 struct pfioc_altq
*pa
= (struct pfioc_altq
*)addr
;
1851 struct pf_altq
*altq
, *a
;
1853 if (pa
->ticket
!= ticket_altqs_inactive
) {
1857 altq
= pool_get(&pf_altq_pl
, PR_NOWAIT
);
1862 bcopy(&pa
->altq
, altq
, sizeof(struct pf_altq
));
1865 * if this is for a queue, find the discipline and
1866 * copy the necessary fields
1868 if (altq
->qname
[0] != 0) {
1869 if ((altq
->qid
= pf_qname2qid(altq
->qname
)) == 0) {
1871 pool_put(&pf_altq_pl
, altq
);
1874 TAILQ_FOREACH(a
, pf_altqs_inactive
, entries
) {
1875 if (strncmp(a
->ifname
, altq
->ifname
,
1876 IFNAMSIZ
) == 0 && a
->qname
[0] == 0) {
1877 altq
->altq_disc
= a
->altq_disc
;
1883 error
= altq_add(altq
);
1885 pool_put(&pf_altq_pl
, altq
);
1889 TAILQ_INSERT_TAIL(pf_altqs_inactive
, altq
, entries
);
1890 bcopy(altq
, &pa
->altq
, sizeof(struct pf_altq
));
1894 case DIOCCOMMITALTQS
: {
1895 u_int32_t ticket
= *(u_int32_t
*)addr
;
1897 error
= pf_commit_altq(ticket
);
1901 case DIOCGETALTQS
: {
1902 struct pfioc_altq
*pa
= (struct pfioc_altq
*)addr
;
1903 struct pf_altq
*altq
;
1907 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
)
1909 pa
->ticket
= ticket_altqs_active
;
1915 struct pfioc_altq
*pa
= (struct pfioc_altq
*)addr
;
1916 struct pf_altq
*altq
;
1919 if (pa
->ticket
!= ticket_altqs_active
) {
1925 altq
= TAILQ_FIRST(pf_altqs_active
);
1926 while ((altq
!= NULL
) && (nr
< pa
->nr
)) {
1927 altq
= TAILQ_NEXT(altq
, entries
);
1935 bcopy(altq
, &pa
->altq
, sizeof(struct pf_altq
));
1940 case DIOCCHANGEALTQ
:
1941 /* CHANGEALTQ not supported yet! */
1945 case DIOCGETQSTATS
: {
1946 struct pfioc_qstats
*pq
= (struct pfioc_qstats
*)addr
;
1947 struct pf_altq
*altq
;
1951 if (pq
->ticket
!= ticket_altqs_active
) {
1955 nbytes
= pq
->nbytes
;
1958 altq
= TAILQ_FIRST(pf_altqs_active
);
1959 while ((altq
!= NULL
) && (nr
< pq
->nr
)) {
1960 altq
= TAILQ_NEXT(altq
, entries
);
1968 error
= altq_getqstats(altq
, pq
->buf
, &nbytes
);
1971 pq
->scheduler
= altq
->scheduler
;
1972 pq
->nbytes
= nbytes
;
1978 case DIOCBEGINADDRS
: {
1979 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
1981 pf_empty_pool(&pf_pabuf
);
1982 pp
->ticket
= ++ticket_pabuf
;
1987 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
1990 if (pp
->af
== AF_INET
) {
1991 error
= EAFNOSUPPORT
;
1996 if (pp
->af
== AF_INET6
) {
1997 error
= EAFNOSUPPORT
;
2001 if (pp
->addr
.addr
.type
!= PF_ADDR_ADDRMASK
&&
2002 pp
->addr
.addr
.type
!= PF_ADDR_DYNIFTL
&&
2003 pp
->addr
.addr
.type
!= PF_ADDR_TABLE
) {
2007 pa
= pool_get(&pf_pooladdr_pl
, PR_NOWAIT
);
2012 bcopy(&pp
->addr
, pa
, sizeof(struct pf_pooladdr
));
2013 if (pa
->ifname
[0]) {
2014 pa
->kif
= pfi_attach_rule(pa
->ifname
);
2015 if (pa
->kif
== NULL
) {
2016 pool_put(&pf_pooladdr_pl
, pa
);
2021 if (pfi_dynaddr_setup(&pa
->addr
, pp
->af
)) {
2022 pfi_dynaddr_remove(&pa
->addr
);
2023 pfi_detach_rule(pa
->kif
);
2024 pool_put(&pf_pooladdr_pl
, pa
);
2028 TAILQ_INSERT_TAIL(&pf_pabuf
, pa
, entries
);
2032 case DIOCGETADDRS
: {
2033 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
2037 pool
= pf_get_pool(pp
->anchor
, pp
->ruleset
, pp
->ticket
,
2038 pp
->r_action
, pp
->r_num
, 0, 1, 0);
2044 TAILQ_FOREACH(pa
, &pool
->list
, entries
)
2051 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
2055 pool
= pf_get_pool(pp
->anchor
, pp
->ruleset
, pp
->ticket
,
2056 pp
->r_action
, pp
->r_num
, 0, 1, 1);
2062 pa
= TAILQ_FIRST(&pool
->list
);
2063 while ((pa
!= NULL
) && (nr
< pp
->nr
)) {
2064 pa
= TAILQ_NEXT(pa
, entries
);
2072 bcopy(pa
, &pp
->addr
, sizeof(struct pf_pooladdr
));
2073 pfi_dynaddr_copyout(&pp
->addr
.addr
);
2074 pf_tbladdr_copyout(&pp
->addr
.addr
);
2079 case DIOCCHANGEADDR
: {
2080 struct pfioc_pooladdr
*pca
= (struct pfioc_pooladdr
*)addr
;
2081 struct pf_pooladdr
*oldpa
= NULL
, *newpa
= NULL
;
2082 struct pf_ruleset
*ruleset
;
2084 if (pca
->action
< PF_CHANGE_ADD_HEAD
||
2085 pca
->action
> PF_CHANGE_REMOVE
) {
2089 if (pca
->addr
.addr
.type
!= PF_ADDR_ADDRMASK
&&
2090 pca
->addr
.addr
.type
!= PF_ADDR_DYNIFTL
&&
2091 pca
->addr
.addr
.type
!= PF_ADDR_TABLE
) {
2096 ruleset
= pf_find_ruleset(pca
->anchor
, pca
->ruleset
);
2097 if (ruleset
== NULL
) {
2101 pool
= pf_get_pool(pca
->anchor
, pca
->ruleset
, pca
->ticket
,
2102 pca
->r_action
, pca
->r_num
, pca
->r_last
, 1, 1);
2107 if (pca
->action
!= PF_CHANGE_REMOVE
) {
2108 newpa
= pool_get(&pf_pooladdr_pl
, PR_NOWAIT
);
2109 if (newpa
== NULL
) {
2113 bcopy(&pca
->addr
, newpa
, sizeof(struct pf_pooladdr
));
2115 if (pca
->af
== AF_INET
) {
2116 pool_put(&pf_pooladdr_pl
, newpa
);
2117 error
= EAFNOSUPPORT
;
2122 if (pca
->af
== AF_INET6
) {
2123 pool_put(&pf_pooladdr_pl
, newpa
);
2124 error
= EAFNOSUPPORT
;
2128 if (newpa
->ifname
[0]) {
2129 newpa
->kif
= pfi_attach_rule(newpa
->ifname
);
2130 if (newpa
->kif
== NULL
) {
2131 pool_put(&pf_pooladdr_pl
, newpa
);
2137 if (pfi_dynaddr_setup(&newpa
->addr
, pca
->af
) ||
2138 pf_tbladdr_setup(ruleset
, &newpa
->addr
)) {
2139 pfi_dynaddr_remove(&newpa
->addr
);
2140 pfi_detach_rule(newpa
->kif
);
2141 pool_put(&pf_pooladdr_pl
, newpa
);
2149 if (pca
->action
== PF_CHANGE_ADD_HEAD
)
2150 oldpa
= TAILQ_FIRST(&pool
->list
);
2151 else if (pca
->action
== PF_CHANGE_ADD_TAIL
)
2152 oldpa
= TAILQ_LAST(&pool
->list
, pf_palist
);
2156 oldpa
= TAILQ_FIRST(&pool
->list
);
2157 while ((oldpa
!= NULL
) && (i
< pca
->nr
)) {
2158 oldpa
= TAILQ_NEXT(oldpa
, entries
);
2161 if (oldpa
== NULL
) {
2168 if (pca
->action
== PF_CHANGE_REMOVE
) {
2169 TAILQ_REMOVE(&pool
->list
, oldpa
, entries
);
2170 pfi_dynaddr_remove(&oldpa
->addr
);
2171 pf_tbladdr_remove(&oldpa
->addr
);
2172 pfi_detach_rule(oldpa
->kif
);
2173 pool_put(&pf_pooladdr_pl
, oldpa
);
2176 TAILQ_INSERT_TAIL(&pool
->list
, newpa
, entries
);
2177 else if (pca
->action
== PF_CHANGE_ADD_HEAD
||
2178 pca
->action
== PF_CHANGE_ADD_BEFORE
)
2179 TAILQ_INSERT_BEFORE(oldpa
, newpa
, entries
);
2181 TAILQ_INSERT_AFTER(&pool
->list
, oldpa
,
2185 pool
->cur
= TAILQ_FIRST(&pool
->list
);
2186 PF_ACPY(&pool
->counter
, &pool
->cur
->addr
.v
.a
.addr
,
2192 case DIOCGETANCHORS
: {
2193 struct pfioc_anchor
*pa
= (struct pfioc_anchor
*)addr
;
2194 struct pf_anchor
*anchor
;
2197 TAILQ_FOREACH(anchor
, &pf_anchors
, entries
)
2202 case DIOCGETANCHOR
: {
2203 struct pfioc_anchor
*pa
= (struct pfioc_anchor
*)addr
;
2204 struct pf_anchor
*anchor
;
2207 anchor
= TAILQ_FIRST(&pf_anchors
);
2208 while (anchor
!= NULL
&& nr
< pa
->nr
) {
2209 anchor
= TAILQ_NEXT(anchor
, entries
);
2215 bcopy(anchor
->name
, pa
->name
, sizeof(pa
->name
));
2219 case DIOCGETRULESETS
: {
2220 struct pfioc_ruleset
*pr
= (struct pfioc_ruleset
*)addr
;
2221 struct pf_anchor
*anchor
;
2222 struct pf_ruleset
*ruleset
;
2224 pr
->anchor
[PF_ANCHOR_NAME_SIZE
-1] = 0;
2225 if ((anchor
= pf_find_anchor(pr
->anchor
)) == NULL
) {
2230 TAILQ_FOREACH(ruleset
, &anchor
->rulesets
, entries
)
2235 case DIOCGETRULESET
: {
2236 struct pfioc_ruleset
*pr
= (struct pfioc_ruleset
*)addr
;
2237 struct pf_anchor
*anchor
;
2238 struct pf_ruleset
*ruleset
;
2241 if ((anchor
= pf_find_anchor(pr
->anchor
)) == NULL
) {
2245 ruleset
= TAILQ_FIRST(&anchor
->rulesets
);
2246 while (ruleset
!= NULL
&& nr
< pr
->nr
) {
2247 ruleset
= TAILQ_NEXT(ruleset
, entries
);
2250 if (ruleset
== NULL
)
2253 bcopy(ruleset
->name
, pr
->name
, sizeof(pr
->name
));
2257 case DIOCRCLRTABLES
: {
2258 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2260 if (io
->pfrio_esize
!= 0) {
2264 error
= pfr_clr_tables(&io
->pfrio_table
, &io
->pfrio_ndel
,
2265 io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2269 case DIOCRADDTABLES
: {
2270 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2272 if (io
->pfrio_esize
!= sizeof(struct pfr_table
)) {
2276 error
= pfr_add_tables(io
->pfrio_buffer
, io
->pfrio_size
,
2277 &io
->pfrio_nadd
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2281 case DIOCRDELTABLES
: {
2282 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2284 if (io
->pfrio_esize
!= sizeof(struct pfr_table
)) {
2288 error
= pfr_del_tables(io
->pfrio_buffer
, io
->pfrio_size
,
2289 &io
->pfrio_ndel
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2293 case DIOCRGETTABLES
: {
2294 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2296 if (io
->pfrio_esize
!= sizeof(struct pfr_table
)) {
2300 error
= pfr_get_tables(&io
->pfrio_table
, io
->pfrio_buffer
,
2301 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2305 case DIOCRGETTSTATS
: {
2306 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2308 if (io
->pfrio_esize
!= sizeof(struct pfr_tstats
)) {
2312 error
= pfr_get_tstats(&io
->pfrio_table
, io
->pfrio_buffer
,
2313 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2317 case DIOCRCLRTSTATS
: {
2318 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2320 if (io
->pfrio_esize
!= sizeof(struct pfr_table
)) {
2324 error
= pfr_clr_tstats(io
->pfrio_buffer
, io
->pfrio_size
,
2325 &io
->pfrio_nzero
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2329 case DIOCRSETTFLAGS
: {
2330 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2332 if (io
->pfrio_esize
!= sizeof(struct pfr_table
)) {
2336 error
= pfr_set_tflags(io
->pfrio_buffer
, io
->pfrio_size
,
2337 io
->pfrio_setflag
, io
->pfrio_clrflag
, &io
->pfrio_nchange
,
2338 &io
->pfrio_ndel
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2342 case DIOCRCLRADDRS
: {
2343 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2345 if (io
->pfrio_esize
!= 0) {
2349 error
= pfr_clr_addrs(&io
->pfrio_table
, &io
->pfrio_ndel
,
2350 io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2354 case DIOCRADDADDRS
: {
2355 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2357 if (io
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2361 error
= pfr_add_addrs(&io
->pfrio_table
, io
->pfrio_buffer
,
2362 io
->pfrio_size
, &io
->pfrio_nadd
, io
->pfrio_flags
|
2363 PFR_FLAG_USERIOCTL
);
2367 case DIOCRDELADDRS
: {
2368 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2370 if (io
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2374 error
= pfr_del_addrs(&io
->pfrio_table
, io
->pfrio_buffer
,
2375 io
->pfrio_size
, &io
->pfrio_ndel
, io
->pfrio_flags
|
2376 PFR_FLAG_USERIOCTL
);
2380 case DIOCRSETADDRS
: {
2381 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2383 if (io
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2387 error
= pfr_set_addrs(&io
->pfrio_table
, io
->pfrio_buffer
,
2388 io
->pfrio_size
, &io
->pfrio_size2
, &io
->pfrio_nadd
,
2389 &io
->pfrio_ndel
, &io
->pfrio_nchange
, io
->pfrio_flags
|
2390 PFR_FLAG_USERIOCTL
);
2394 case DIOCRGETADDRS
: {
2395 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2397 if (io
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2401 error
= pfr_get_addrs(&io
->pfrio_table
, io
->pfrio_buffer
,
2402 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2406 case DIOCRGETASTATS
: {
2407 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2409 if (io
->pfrio_esize
!= sizeof(struct pfr_astats
)) {
2413 error
= pfr_get_astats(&io
->pfrio_table
, io
->pfrio_buffer
,
2414 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2418 case DIOCRCLRASTATS
: {
2419 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2421 if (io
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2425 error
= pfr_clr_astats(&io
->pfrio_table
, io
->pfrio_buffer
,
2426 io
->pfrio_size
, &io
->pfrio_nzero
, io
->pfrio_flags
|
2427 PFR_FLAG_USERIOCTL
);
2431 case DIOCRTSTADDRS
: {
2432 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2434 if (io
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2438 error
= pfr_tst_addrs(&io
->pfrio_table
, io
->pfrio_buffer
,
2439 io
->pfrio_size
, &io
->pfrio_nmatch
, io
->pfrio_flags
|
2440 PFR_FLAG_USERIOCTL
);
2444 case DIOCRINABEGIN
: {
2445 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2447 if (io
->pfrio_esize
!= 0) {
2451 error
= pfr_ina_begin(&io
->pfrio_table
, &io
->pfrio_ticket
,
2452 &io
->pfrio_ndel
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2456 case DIOCRINACOMMIT
: {
2457 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2459 if (io
->pfrio_esize
!= 0) {
2463 error
= pfr_ina_commit(&io
->pfrio_table
, io
->pfrio_ticket
,
2464 &io
->pfrio_nadd
, &io
->pfrio_nchange
, io
->pfrio_flags
|
2465 PFR_FLAG_USERIOCTL
);
2469 case DIOCRINADEFINE
: {
2470 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2472 if (io
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2476 error
= pfr_ina_define(&io
->pfrio_table
, io
->pfrio_buffer
,
2477 io
->pfrio_size
, &io
->pfrio_nadd
, &io
->pfrio_naddr
,
2478 io
->pfrio_ticket
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2483 struct pf_osfp_ioctl
*io
= (struct pf_osfp_ioctl
*)addr
;
2485 error
= pf_osfp_add(io
);
2491 struct pf_osfp_ioctl
*io
= (struct pf_osfp_ioctl
*)addr
;
2493 error
= pf_osfp_get(io
);
2499 struct pfioc_trans
*io
= (struct pfioc_trans
*)addr
;
2500 struct pfioc_trans_e ioe
;
2501 struct pfr_table table
;
2504 if (io
->esize
!= sizeof(ioe
)) {
2508 for (i
= 0; i
< io
->size
; i
++) {
2509 if (copyin(io
->array
+i
, &ioe
, sizeof(ioe
))) {
2513 switch (ioe
.rs_num
) {
2515 case PF_RULESET_ALTQ
:
2516 if (ioe
.anchor
[0] || ioe
.ruleset
[0]) {
2520 if ((error
= pf_begin_altq(&ioe
.ticket
)))
2524 case PF_RULESET_TABLE
:
2525 bzero(&table
, sizeof(table
));
2526 strlcpy(table
.pfrt_anchor
, ioe
.anchor
,
2527 sizeof(table
.pfrt_anchor
));
2528 strlcpy(table
.pfrt_ruleset
, ioe
.ruleset
,
2529 sizeof(table
.pfrt_ruleset
));
2530 if ((error
= pfr_ina_begin(&table
,
2531 &ioe
.ticket
, NULL
, 0)))
2535 if ((error
= pf_begin_rules(&ioe
.ticket
,
2536 ioe
.rs_num
, ioe
.anchor
, ioe
.ruleset
)))
2540 if (copyout(&ioe
, io
->array
+i
, sizeof(io
->array
[i
]))) {
2548 case DIOCXROLLBACK
: {
2549 struct pfioc_trans
*io
= (struct pfioc_trans
*)addr
;
2550 struct pfioc_trans_e ioe
;
2551 struct pfr_table table
;
2554 if (io
->esize
!= sizeof(ioe
)) {
2558 for (i
= 0; i
< io
->size
; i
++) {
2559 if (copyin(io
->array
+i
, &ioe
, sizeof(ioe
))) {
2563 switch (ioe
.rs_num
) {
2565 case PF_RULESET_ALTQ
:
2566 if (ioe
.anchor
[0] || ioe
.ruleset
[0]) {
2570 if ((error
= pf_rollback_altq(ioe
.ticket
)))
2571 goto fail
; /* really bad */
2574 case PF_RULESET_TABLE
:
2575 bzero(&table
, sizeof(table
));
2576 strlcpy(table
.pfrt_anchor
, ioe
.anchor
,
2577 sizeof(table
.pfrt_anchor
));
2578 strlcpy(table
.pfrt_ruleset
, ioe
.ruleset
,
2579 sizeof(table
.pfrt_ruleset
));
2580 if ((error
= pfr_ina_rollback(&table
,
2581 ioe
.ticket
, NULL
, 0)))
2582 goto fail
; /* really bad */
2585 if ((error
= pf_rollback_rules(ioe
.ticket
,
2586 ioe
.rs_num
, ioe
.anchor
, ioe
.ruleset
)))
2587 goto fail
; /* really bad */
2595 struct pfioc_trans
*io
= (struct pfioc_trans
*)addr
;
2596 struct pfioc_trans_e ioe
;
2597 struct pfr_table table
;
2598 struct pf_ruleset
*rs
;
2601 if (io
->esize
!= sizeof(ioe
)) {
2605 /* first makes sure everything will succeed */
2606 for (i
= 0; i
< io
->size
; i
++) {
2607 if (copyin(io
->array
+i
, &ioe
, sizeof(ioe
))) {
2611 switch (ioe
.rs_num
) {
2613 case PF_RULESET_ALTQ
:
2614 if (ioe
.anchor
[0] || ioe
.ruleset
[0]) {
2618 if (!altqs_inactive_open
|| ioe
.ticket
!=
2619 ticket_altqs_inactive
) {
2625 case PF_RULESET_TABLE
:
2626 rs
= pf_find_ruleset(ioe
.anchor
, ioe
.ruleset
);
2627 if (rs
== NULL
|| !rs
->topen
|| ioe
.ticket
!=
2634 if (ioe
.rs_num
< 0 || ioe
.rs_num
>=
2639 rs
= pf_find_ruleset(ioe
.anchor
, ioe
.ruleset
);
2641 !rs
->rules
[ioe
.rs_num
].inactive
.open
||
2642 rs
->rules
[ioe
.rs_num
].inactive
.ticket
!=
2650 /* now do the commit - no errors should happen here */
2651 for (i
= 0; i
< io
->size
; i
++) {
2652 if (copyin(io
->array
+i
, &ioe
, sizeof(ioe
))) {
2656 switch (ioe
.rs_num
) {
2658 case PF_RULESET_ALTQ
:
2659 if ((error
= pf_commit_altq(ioe
.ticket
)))
2660 goto fail
; /* really bad */
2663 case PF_RULESET_TABLE
:
2664 bzero(&table
, sizeof(table
));
2665 strlcpy(table
.pfrt_anchor
, ioe
.anchor
,
2666 sizeof(table
.pfrt_anchor
));
2667 strlcpy(table
.pfrt_ruleset
, ioe
.ruleset
,
2668 sizeof(table
.pfrt_ruleset
));
2669 if ((error
= pfr_ina_commit(&table
, ioe
.ticket
,
2671 goto fail
; /* really bad */
2674 if ((error
= pf_commit_rules(ioe
.ticket
,
2675 ioe
.rs_num
, ioe
.anchor
, ioe
.ruleset
)))
2676 goto fail
; /* really bad */
2683 case DIOCGETSRCNODES
: {
2684 struct pfioc_src_nodes
*psn
= (struct pfioc_src_nodes
*)addr
;
2685 struct pf_src_node
*n
;
2686 struct pf_src_node
*p
, pstore
;
2688 int space
= psn
->psn_len
;
2692 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
)
2695 psn
->psn_len
= sizeof(struct pf_src_node
) * nr
;
2700 p
= psn
->psn_src_nodes
;
2701 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
2702 int secs
= time_second
;
2704 if ((nr
+ 1) * sizeof(*p
) > (unsigned)psn
->psn_len
)
2707 bcopy(n
, &pstore
, sizeof(pstore
));
2708 if (n
->rule
.ptr
!= NULL
)
2709 pstore
.rule
.nr
= n
->rule
.ptr
->nr
;
2710 pstore
.creation
= secs
- pstore
.creation
;
2711 if (pstore
.expire
> secs
)
2712 pstore
.expire
-= secs
;
2715 error
= copyout(&pstore
, p
, sizeof(*p
));
2723 psn
->psn_len
= sizeof(struct pf_src_node
) * nr
;
2728 case DIOCCLRSRCNODES
: {
2729 struct pf_src_node
*n
;
2730 struct pf_state
*state
;
2733 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
2734 state
->src_node
= NULL
;
2735 state
->nat_src_node
= NULL
;
2737 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
2741 pf_purge_expired_src_nodes();
2742 pf_status
.src_nodes
= 0;
2747 case DIOCSETHOSTID
: {
2748 u_int32_t
*hostid
= (u_int32_t
*)addr
;
2754 pf_status
.hostid
= *hostid
;
2764 case DIOCIGETIFACES
: {
2765 struct pfioc_iface
*io
= (struct pfioc_iface
*)addr
;
2767 if (io
->pfiio_esize
!= sizeof(struct pfi_if
)) {
2771 error
= pfi_get_ifaces(io
->pfiio_name
, io
->pfiio_buffer
,
2772 &io
->pfiio_size
, io
->pfiio_flags
);
2776 case DIOCICLRISTATS
: {
2777 struct pfioc_iface
*io
= (struct pfioc_iface
*)addr
;
2779 error
= pfi_clr_istats(io
->pfiio_name
, &io
->pfiio_nzero
,
2793 * XXX - Check for version missmatch!!!
2796 pf_clear_states(void)
2798 struct pf_state
*state
;
2800 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
2801 state
->timeout
= PFTM_PURGE
;
2803 /* don't send out individual delete messages */
2804 state
->sync_flags
= PFSTATE_NOSYNC
;
2807 pf_purge_expired_states();
2808 pf_status
.states
= 0;
2811 * XXX This is called on module unload, we do not want to sync that over? */
2813 pfsync_clear_states(pf_status
.hostid
, psk
->psk_ifname
);
2818 pf_clear_tables(void)
2820 struct pfioc_table io
;
2823 bzero(&io
, sizeof(io
));
2825 error
= pfr_clr_tables(&io
.pfrio_table
, &io
.pfrio_ndel
,
2832 pf_clear_srcnodes(void)
2834 struct pf_src_node
*n
;
2835 struct pf_state
*state
;
2837 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
2838 state
->src_node
= NULL
;
2839 state
->nat_src_node
= NULL
;
2841 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
2845 pf_purge_expired_src_nodes();
2846 pf_status
.src_nodes
= 0;
2849 * XXX - Check for version missmatch!!!
2853 * Duplicate pfctl -Fa operation to get rid of as much as we can.
2862 callout_stop(&pf_expire_to
);
2864 pf_status
.running
= 0;
2866 if ((error
= pf_begin_rules(&t
[0], PF_RULESET_SCRUB
, &nn
,
2868 DPFPRINTF(PF_DEBUG_MISC
, ("shutdown_pf: SCRUB\n"));
2871 if ((error
= pf_begin_rules(&t
[1], PF_RULESET_FILTER
, &nn
,
2873 DPFPRINTF(PF_DEBUG_MISC
, ("shutdown_pf: FILTER\n"));
2874 break; /* XXX: rollback? */
2876 if ((error
= pf_begin_rules(&t
[2], PF_RULESET_NAT
, &nn
, &nn
))
2878 DPFPRINTF(PF_DEBUG_MISC
, ("shutdown_pf: NAT\n"));
2879 break; /* XXX: rollback? */
2881 if ((error
= pf_begin_rules(&t
[3], PF_RULESET_BINAT
, &nn
, &nn
))
2883 DPFPRINTF(PF_DEBUG_MISC
, ("shutdown_pf: BINAT\n"));
2884 break; /* XXX: rollback? */
2886 if ((error
= pf_begin_rules(&t
[4], PF_RULESET_RDR
, &nn
, &nn
))
2888 DPFPRINTF(PF_DEBUG_MISC
, ("shutdown_pf: RDR\n"));
2889 break; /* XXX: rollback? */
2892 /* XXX: these should always succeed here */
2893 pf_commit_rules(t
[0], PF_RULESET_SCRUB
, &nn
, &nn
);
2894 pf_commit_rules(t
[1], PF_RULESET_FILTER
, &nn
, &nn
);
2895 pf_commit_rules(t
[2], PF_RULESET_NAT
, &nn
, &nn
);
2896 pf_commit_rules(t
[3], PF_RULESET_BINAT
, &nn
, &nn
);
2897 pf_commit_rules(t
[4], PF_RULESET_RDR
, &nn
, &nn
);
2899 if ((error
= pf_clear_tables()) != 0)
2903 if ((error
= pf_begin_altq(&t
[0])) != 0) {
2904 DPFPRINTF(PF_DEBUG_MISC
, ("shutdown_pf: ALTQ\n"));
2907 pf_commit_altq(t
[0]);
2912 pf_clear_srcnodes();
2914 /* status does not use malloced mem so no need to cleanup */
2915 /* fingerprints and interfaces have their own cleanup code */
2922 pf_check_in(void *arg
, struct mbuf
**m
, struct ifnet
*ifp
, int dir
)
2925 * DragonFly's version of pf uses FreeBSD's native host byte ordering
2926 * for ip_len/ip_off. This is why we don't have to change byte order
2927 * like the FreeBSD-5 version does.
2931 chk
= pf_test(PF_IN
, ifp
, m
);
2940 pf_check_out(void *arg
, struct mbuf
**m
, struct ifnet
*ifp
, int dir
)
2943 * DragonFly's version of pf uses FreeBSD's native host byte ordering
2944 * for ip_len/ip_off. This is why we don't have to change byte order
2945 * like the FreeBSD-5 version does.
2949 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
2950 if ((*m
)->m_pkthdr
.csum_flags
& CSUM_DELAY_DATA
) {
2951 in_delayed_cksum(*m
);
2952 (*m
)->m_pkthdr
.csum_flags
&= ~CSUM_DELAY_DATA
;
2954 chk
= pf_test(PF_OUT
, ifp
, m
);
2964 pf_check6_in(void *arg
, struct mbuf
**m
, struct ifnet
*ifp
, int dir
)
2967 * IPv6 is not affected by ip_len/ip_off byte order changes.
2971 chk
= pf_test6(PF_IN
, ifp
, m
);
2980 pf_check6_out(void *arg
, struct mbuf
**m
, struct ifnet
*ifp
, int dir
)
2983 * IPv6 is not affected by ip_len/ip_off byte order changes.
2987 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
2988 if ((*m
)->m_pkthdr
.csum_flags
& CSUM_DELAY_DATA
) {
2989 in_delayed_cksum(*m
);
2990 (*m
)->m_pkthdr
.csum_flags
&= ~CSUM_DELAY_DATA
;
2992 chk
= pf_test6(PF_OUT
, ifp
, m
);
3004 struct pfil_head
*pfh_inet
;
3006 struct pfil_head
*pfh_inet6
;
3012 pfh_inet
= pfil_head_get(PFIL_TYPE_AF
, AF_INET
);
3013 if (pfh_inet
== NULL
)
3015 pfil_add_hook(pf_check_in
, NULL
, PFIL_IN
, pfh_inet
);
3016 pfil_add_hook(pf_check_out
, NULL
, PFIL_OUT
, pfh_inet
);
3018 pfh_inet6
= pfil_head_get(PFIL_TYPE_AF
, AF_INET6
);
3019 if (pfh_inet6
== NULL
) {
3020 pfil_remove_hook(pf_check_in
, NULL
, PFIL_IN
, pfh_inet
);
3021 pfil_remove_hook(pf_check_out
, NULL
, PFIL_OUT
, pfh_inet
);
3024 pfil_add_hook(pf_check6_in
, NULL
, PFIL_IN
, pfh_inet6
);
3025 pfil_add_hook(pf_check6_out
, NULL
, PFIL_OUT
, pfh_inet6
);
3035 struct pfil_head
*pfh_inet
;
3037 struct pfil_head
*pfh_inet6
;
3040 if (pf_pfil_hooked
== 0)
3043 pfh_inet
= pfil_head_get(PFIL_TYPE_AF
, AF_INET
);
3044 if (pfh_inet
== NULL
)
3046 pfil_remove_hook(pf_check_in
, NULL
, PFIL_IN
, pfh_inet
);
3047 pfil_remove_hook(pf_check_out
, NULL
, PFIL_OUT
, pfh_inet
);
3049 pfh_inet6
= pfil_head_get(PFIL_TYPE_AF
, AF_INET6
);
3050 if (pfh_inet6
== NULL
)
3052 pfil_remove_hook(pf_check6_in
, NULL
, PFIL_IN
, pfh_inet6
);
3053 pfil_remove_hook(pf_check6_out
, NULL
, PFIL_OUT
, pfh_inet6
);
3066 pf_dev
= make_dev(&pf_ops
, 0, 0, 0, 0600, PF_NAME
);
3069 dev_ops_remove_all(&pf_ops
);
3080 pf_status
.running
= 0;
3081 error
= dehook_pf();
3084 * Should not happen!
3085 * XXX Due to error code ESRCH, kldunload will show
3086 * a message like 'No such process'.
3088 kprintf("pfil unregistration fail\n");
3096 dev_ops_remove_all(&pf_ops
);
3101 pf_modevent(module_t mod
, int type
, void *data
)
3111 error
= pf_unload();
3120 static moduledata_t pf_mod
= {
3126 DECLARE_MODULE(pf
, pf_mod
, SI_SUB_PSEUDO
, SI_ORDER_FIRST
);
3127 MODULE_VERSION(pf
, PF_MODVER
);