1 /* $FreeBSD: src/sys/contrib/pf/net/pf_ioctl.c,v 1.12 2004/08/12 14:15:42 mlaier Exp $ */
2 /* $OpenBSD: pf_ioctl.c,v 1.112.2.2 2004/07/24 18:28:12 brad Exp $ */
3 /* $DragonFly: src/sys/net/pf/pf_ioctl.c,v 1.13 2008/01/05 14:02:38 swildner Exp $ */
6 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
8 * Copyright (c) 2001 Daniel Hartmeier
9 * Copyright (c) 2002,2003 Henning Brauer
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * - Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
33 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
36 * Effort sponsored in part by the Defense Advanced Research Projects
37 * Agency (DARPA) and Air Force Research Laboratory, Air Force
38 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43 #include "opt_inet6.h"
44 #include "use_pfsync.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
49 #include <sys/device.h>
51 #include <sys/filio.h>
52 #include <sys/fcntl.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/kernel.h>
56 #include <sys/thread2.h>
58 #include <sys/malloc.h>
59 #include <sys/module.h>
60 #include <vm/vm_zone.h>
63 #include <net/if_types.h>
64 #include <net/route.h>
66 #include <netinet/in.h>
67 #include <netinet/in_var.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/ip.h>
70 #include <netinet/ip_var.h>
71 #include <netinet/ip_icmp.h>
73 #include <net/pf/pfvar.h>
76 #include <net/pf/if_pfsync.h>
77 #endif /* NPFSYNC > 0 */
80 #include <netinet/ip6.h>
81 #include <netinet/in_pcb.h>
85 #include <net/altq/altq.h>
88 #include <machine/limits.h>
91 void init_zone_var(void);
92 void cleanup_pf_zone(void);
94 struct pf_pool
*pf_get_pool(char *, char *, u_int32_t
,
95 u_int8_t
, u_int32_t
, u_int8_t
, u_int8_t
, u_int8_t
);
96 int pf_get_ruleset_number(u_int8_t
);
97 void pf_init_ruleset(struct pf_ruleset
*);
98 void pf_mv_pool(struct pf_palist
*, struct pf_palist
*);
99 void pf_empty_pool(struct pf_palist
*);
101 int pf_begin_altq(u_int32_t
*);
102 int pf_rollback_altq(u_int32_t
);
103 int pf_commit_altq(u_int32_t
);
105 int pf_begin_rules(u_int32_t
*, int, char *, char *);
106 int pf_rollback_rules(u_int32_t
, int, char *, char *);
107 int pf_commit_rules(u_int32_t
, int, char *, char *);
109 extern struct callout pf_expire_to
;
111 struct pf_rule pf_default_rule
;
113 #define TAGID_MAX 50000
114 TAILQ_HEAD(pf_tags
, pf_tagname
) pf_tags
= TAILQ_HEAD_INITIALIZER(pf_tags
),
115 pf_qids
= TAILQ_HEAD_INITIALIZER(pf_qids
);
117 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
118 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
120 static u_int16_t
tagname2tag(struct pf_tags
*, char *);
121 static void tag2tagname(struct pf_tags
*, u_int16_t
, char *);
122 static void tag_unref(struct pf_tags
*, u_int16_t
);
124 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) kprintf x
126 static cdev_t pf_dev
;
129 * XXX - These are new and need to be checked when moveing to a new version
131 static void pf_clear_states(void);
132 static int pf_clear_tables(void);
133 static void pf_clear_srcnodes(void);
135 * XXX - These are new and need to be checked when moveing to a new version
139 * Wrapper functions for pfil(9) hooks
141 static int pf_check_in(void *arg
, struct mbuf
**m
, struct ifnet
*ifp
,
143 static int pf_check_out(void *arg
, struct mbuf
**m
, struct ifnet
*ifp
,
146 static int pf_check6_in(void *arg
, struct mbuf
**m
, struct ifnet
*ifp
,
148 static int pf_check6_out(void *arg
, struct mbuf
**m
, struct ifnet
*ifp
,
152 static int hook_pf(void);
153 static int dehook_pf(void);
154 static int shutdown_pf(void);
155 static int pf_load(void);
156 static int pf_unload(void);
162 static struct dev_ops pf_ops
= { /* XXX convert to port model */
169 static volatile int pf_pfil_hooked
= 0;
174 pf_src_tree_pl
= pf_rule_pl
= NULL
;
175 pf_state_pl
= pf_altq_pl
= pf_pooladdr_pl
= NULL
;
176 pf_frent_pl
= pf_frag_pl
= pf_cache_pl
= pf_cent_pl
= NULL
;
177 pf_state_scrub_pl
= NULL
;
178 pfr_ktable_pl
= pfr_kentry_pl
= NULL
;
182 cleanup_pf_zone(void)
184 ZONE_DESTROY(pf_src_tree_pl
);
185 ZONE_DESTROY(pf_rule_pl
);
186 ZONE_DESTROY(pf_state_pl
);
187 ZONE_DESTROY(pf_altq_pl
);
188 ZONE_DESTROY(pf_pooladdr_pl
);
189 ZONE_DESTROY(pf_frent_pl
);
190 ZONE_DESTROY(pf_frag_pl
);
191 ZONE_DESTROY(pf_cache_pl
);
192 ZONE_DESTROY(pf_cent_pl
);
193 ZONE_DESTROY(pfr_ktable_pl
);
194 ZONE_DESTROY(pfr_kentry_pl
);
195 ZONE_DESTROY(pf_state_scrub_pl
);
196 ZONE_DESTROY(pfi_addr_pl
);
202 u_int32_t
*my_timeout
= pf_default_rule
.timeout
;
206 ZONE_CREATE(pf_src_tree_pl
,struct pf_src_node
, "pfsrctrpl");
207 ZONE_CREATE(pf_rule_pl
, struct pf_rule
, "pfrulepl");
208 ZONE_CREATE(pf_state_pl
, struct pf_state
, "pfstatepl");
209 ZONE_CREATE(pf_altq_pl
, struct pf_altq
, "pfaltqpl");
210 ZONE_CREATE(pf_pooladdr_pl
,struct pf_pooladdr
, "pfpooladdrpl");
211 ZONE_CREATE(pfr_ktable_pl
, struct pfr_ktable
, "pfrktable");
212 ZONE_CREATE(pfr_kentry_pl
, struct pfr_kentry
, "pfrkentry");
213 ZONE_CREATE(pf_frent_pl
, struct pf_frent
, "pffrent");
214 ZONE_CREATE(pf_frag_pl
, struct pf_fragment
, "pffrag");
215 ZONE_CREATE(pf_cache_pl
, struct pf_fragment
, "pffrcache");
216 ZONE_CREATE(pf_cent_pl
, struct pf_frcache
, "pffrcent");
217 ZONE_CREATE(pf_state_scrub_pl
, struct pf_state_scrub
,
219 ZONE_CREATE(pfi_addr_pl
, struct pfi_dynaddr
, "pfiaddrpl");
228 error
= pf_osfp_initialize();
235 pf_pool_limits
[PF_LIMIT_STATES
].pp
= pf_state_pl
;
236 pf_pool_limits
[PF_LIMIT_STATES
].limit
= PFSTATE_HIWAT
;
237 pf_pool_limits
[PF_LIMIT_FRAGS
].pp
= pf_frent_pl
;
238 pf_pool_limits
[PF_LIMIT_FRAGS
].limit
= PFFRAG_FRENT_HIWAT
;
239 /* XXX uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp,
240 pf_pool_limits[PF_LIMIT_STATES].limit);
243 RB_INIT(&tree_src_tracking
);
244 TAILQ_INIT(&pf_anchors
);
245 pf_init_ruleset(&pf_main_ruleset
);
246 TAILQ_INIT(&pf_altqs
[0]);
247 TAILQ_INIT(&pf_altqs
[1]);
248 TAILQ_INIT(&pf_pabuf
);
249 pf_altqs_active
= &pf_altqs
[0];
250 pf_altqs_inactive
= &pf_altqs
[1];
251 TAILQ_INIT(&state_updates
);
253 /* default rule should never be garbage collected */
254 pf_default_rule
.entries
.tqe_prev
= &pf_default_rule
.entries
.tqe_next
;
255 pf_default_rule
.action
= PF_PASS
;
256 pf_default_rule
.nr
= (uint32_t)(-1);
258 /* initialize default timeouts */
259 my_timeout
[PFTM_TCP_FIRST_PACKET
] = 120; /* First TCP packet */
260 my_timeout
[PFTM_TCP_OPENING
] = 30; /* No response yet */
261 my_timeout
[PFTM_TCP_ESTABLISHED
] = 24*60*60; /* Established */
262 my_timeout
[PFTM_TCP_CLOSING
] = 15 * 60; /* Half closed */
263 my_timeout
[PFTM_TCP_FIN_WAIT
] = 45; /* Got both FINs */
264 my_timeout
[PFTM_TCP_CLOSED
] = 90; /* Got a RST */
265 my_timeout
[PFTM_UDP_FIRST_PACKET
] = 60; /* First UDP packet */
266 my_timeout
[PFTM_UDP_SINGLE
] = 30; /* Unidirectional */
267 my_timeout
[PFTM_UDP_MULTIPLE
] = 60; /* Bidirectional */
268 my_timeout
[PFTM_ICMP_FIRST_PACKET
] = 20; /* First ICMP packet */
269 my_timeout
[PFTM_ICMP_ERROR_REPLY
] = 10; /* Got error response */
270 my_timeout
[PFTM_OTHER_FIRST_PACKET
] = 60; /* First packet */
271 my_timeout
[PFTM_OTHER_SINGLE
] = 30; /* Unidirectional */
272 my_timeout
[PFTM_OTHER_MULTIPLE
] = 60; /* Bidirectional */
273 my_timeout
[PFTM_FRAG
] = 30; /* Fragment expire */
274 my_timeout
[PFTM_INTERVAL
] = 10; /* Expire interval */
276 callout_init(&pf_expire_to
);
277 callout_reset(&pf_expire_to
, my_timeout
[PFTM_INTERVAL
] * hz
,
278 pf_purge_timeout
, &pf_expire_to
);
281 bzero(&pf_status
, sizeof(pf_status
));
282 pf_status
.debug
= PF_DEBUG_URGENT
;
285 /* XXX do our best to avoid a conflict */
286 pf_status
.hostid
= karc4random();
292 pfopen(struct dev_open_args
*ap
)
294 cdev_t dev
= ap
->a_head
.a_dev
;
301 pfclose(struct dev_close_args
*ap
)
303 cdev_t dev
= ap
->a_head
.a_dev
;
310 pf_get_pool(char *anchorname
, char *rulesetname
, u_int32_t ticket
,
311 u_int8_t rule_action
, u_int32_t rule_number
, u_int8_t r_last
,
312 u_int8_t active
, u_int8_t check_ticket
)
314 struct pf_ruleset
*ruleset
;
315 struct pf_rule
*rule
;
318 ruleset
= pf_find_ruleset(anchorname
, rulesetname
);
321 rs_num
= pf_get_ruleset_number(rule_action
);
322 if (rs_num
>= PF_RULESET_MAX
)
325 if (check_ticket
&& ticket
!=
326 ruleset
->rules
[rs_num
].active
.ticket
)
329 rule
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
332 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
334 if (check_ticket
&& ticket
!=
335 ruleset
->rules
[rs_num
].inactive
.ticket
)
338 rule
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
,
341 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].inactive
.ptr
);
344 while ((rule
!= NULL
) && (rule
->nr
!= rule_number
))
345 rule
= TAILQ_NEXT(rule
, entries
);
350 return (&rule
->rpool
);
354 pf_get_ruleset_number(u_int8_t action
)
358 return (PF_RULESET_SCRUB
);
362 return (PF_RULESET_FILTER
);
366 return (PF_RULESET_NAT
);
370 return (PF_RULESET_BINAT
);
374 return (PF_RULESET_RDR
);
377 return (PF_RULESET_MAX
);
383 pf_init_ruleset(struct pf_ruleset
*ruleset
)
387 memset(ruleset
, 0, sizeof(struct pf_ruleset
));
388 for (i
= 0; i
< PF_RULESET_MAX
; i
++) {
389 TAILQ_INIT(&ruleset
->rules
[i
].queues
[0]);
390 TAILQ_INIT(&ruleset
->rules
[i
].queues
[1]);
391 ruleset
->rules
[i
].active
.ptr
= &ruleset
->rules
[i
].queues
[0];
392 ruleset
->rules
[i
].inactive
.ptr
= &ruleset
->rules
[i
].queues
[1];
397 pf_find_anchor(const char *anchorname
)
399 struct pf_anchor
*anchor
;
402 anchor
= TAILQ_FIRST(&pf_anchors
);
403 while (anchor
!= NULL
&& (n
= strcmp(anchor
->name
, anchorname
)) < 0)
404 anchor
= TAILQ_NEXT(anchor
, entries
);
412 pf_find_ruleset(char *anchorname
, char *rulesetname
)
414 struct pf_anchor
*anchor
;
415 struct pf_ruleset
*ruleset
;
417 if (!anchorname
[0] && !rulesetname
[0])
418 return (&pf_main_ruleset
);
419 if (!anchorname
[0] || !rulesetname
[0])
421 anchorname
[PF_ANCHOR_NAME_SIZE
-1] = 0;
422 rulesetname
[PF_RULESET_NAME_SIZE
-1] = 0;
423 anchor
= pf_find_anchor(anchorname
);
426 ruleset
= TAILQ_FIRST(&anchor
->rulesets
);
427 while (ruleset
!= NULL
&& strcmp(ruleset
->name
, rulesetname
) < 0)
428 ruleset
= TAILQ_NEXT(ruleset
, entries
);
429 if (ruleset
!= NULL
&& !strcmp(ruleset
->name
, rulesetname
))
436 pf_find_or_create_ruleset(char anchorname
[PF_ANCHOR_NAME_SIZE
],
437 char rulesetname
[PF_RULESET_NAME_SIZE
])
439 struct pf_anchor
*anchor
, *a
;
440 struct pf_ruleset
*ruleset
, *r
;
442 if (!anchorname
[0] && !rulesetname
[0])
443 return (&pf_main_ruleset
);
444 if (!anchorname
[0] || !rulesetname
[0])
446 anchorname
[PF_ANCHOR_NAME_SIZE
-1] = 0;
447 rulesetname
[PF_RULESET_NAME_SIZE
-1] = 0;
448 a
= TAILQ_FIRST(&pf_anchors
);
449 while (a
!= NULL
&& strcmp(a
->name
, anchorname
) < 0)
450 a
= TAILQ_NEXT(a
, entries
);
451 if (a
!= NULL
&& !strcmp(a
->name
, anchorname
))
454 anchor
= (struct pf_anchor
*)kmalloc(sizeof(struct pf_anchor
),
458 memset(anchor
, 0, sizeof(struct pf_anchor
));
459 bcopy(anchorname
, anchor
->name
, sizeof(anchor
->name
));
460 TAILQ_INIT(&anchor
->rulesets
);
462 TAILQ_INSERT_BEFORE(a
, anchor
, entries
);
464 TAILQ_INSERT_TAIL(&pf_anchors
, anchor
, entries
);
466 r
= TAILQ_FIRST(&anchor
->rulesets
);
467 while (r
!= NULL
&& strcmp(r
->name
, rulesetname
) < 0)
468 r
= TAILQ_NEXT(r
, entries
);
469 if (r
!= NULL
&& !strcmp(r
->name
, rulesetname
))
471 ruleset
= (struct pf_ruleset
*)kmalloc(sizeof(struct pf_ruleset
),
473 if (ruleset
!= NULL
) {
474 pf_init_ruleset(ruleset
);
475 bcopy(rulesetname
, ruleset
->name
, sizeof(ruleset
->name
));
476 ruleset
->anchor
= anchor
;
478 TAILQ_INSERT_BEFORE(r
, ruleset
, entries
);
480 TAILQ_INSERT_TAIL(&anchor
->rulesets
, ruleset
, entries
);
486 pf_remove_if_empty_ruleset(struct pf_ruleset
*ruleset
)
488 struct pf_anchor
*anchor
;
491 if (ruleset
== NULL
|| ruleset
->anchor
== NULL
|| ruleset
->tables
> 0 ||
494 for (i
= 0; i
< PF_RULESET_MAX
; ++i
)
495 if (!TAILQ_EMPTY(ruleset
->rules
[i
].active
.ptr
) ||
496 !TAILQ_EMPTY(ruleset
->rules
[i
].inactive
.ptr
) ||
497 ruleset
->rules
[i
].inactive
.open
)
500 anchor
= ruleset
->anchor
;
501 TAILQ_REMOVE(&anchor
->rulesets
, ruleset
, entries
);
502 kfree(ruleset
, M_TEMP
);
504 if (TAILQ_EMPTY(&anchor
->rulesets
)) {
505 TAILQ_REMOVE(&pf_anchors
, anchor
, entries
);
506 kfree(anchor
, M_TEMP
);
507 pf_update_anchor_rules();
512 pf_mv_pool(struct pf_palist
*poola
, struct pf_palist
*poolb
)
514 struct pf_pooladdr
*mv_pool_pa
;
516 while ((mv_pool_pa
= TAILQ_FIRST(poola
)) != NULL
) {
517 TAILQ_REMOVE(poola
, mv_pool_pa
, entries
);
518 TAILQ_INSERT_TAIL(poolb
, mv_pool_pa
, entries
);
523 pf_empty_pool(struct pf_palist
*poola
)
525 struct pf_pooladdr
*empty_pool_pa
;
527 while ((empty_pool_pa
= TAILQ_FIRST(poola
)) != NULL
) {
528 pfi_dynaddr_remove(&empty_pool_pa
->addr
);
529 pf_tbladdr_remove(&empty_pool_pa
->addr
);
530 pfi_detach_rule(empty_pool_pa
->kif
);
531 TAILQ_REMOVE(poola
, empty_pool_pa
, entries
);
532 pool_put(&pf_pooladdr_pl
, empty_pool_pa
);
537 pf_rm_rule(struct pf_rulequeue
*rulequeue
, struct pf_rule
*rule
)
539 if (rulequeue
!= NULL
) {
540 if (rule
->states
<= 0) {
542 * XXX - we need to remove the table *before* detaching
543 * the rule to make sure the table code does not delete
544 * the anchor under our feet.
546 pf_tbladdr_remove(&rule
->src
.addr
);
547 pf_tbladdr_remove(&rule
->dst
.addr
);
549 TAILQ_REMOVE(rulequeue
, rule
, entries
);
550 rule
->entries
.tqe_prev
= NULL
;
551 rule
->nr
= (uint32_t)(-1);
554 if (rule
->states
> 0 || rule
->src_nodes
> 0 ||
555 rule
->entries
.tqe_prev
!= NULL
)
557 pf_tag_unref(rule
->tag
);
558 pf_tag_unref(rule
->match_tag
);
560 if (rule
->pqid
!= rule
->qid
)
561 pf_qid_unref(rule
->pqid
);
562 pf_qid_unref(rule
->qid
);
564 pfi_dynaddr_remove(&rule
->src
.addr
);
565 pfi_dynaddr_remove(&rule
->dst
.addr
);
566 if (rulequeue
== NULL
) {
567 pf_tbladdr_remove(&rule
->src
.addr
);
568 pf_tbladdr_remove(&rule
->dst
.addr
);
570 pfi_detach_rule(rule
->kif
);
571 pf_empty_pool(&rule
->rpool
.list
);
572 pool_put(&pf_rule_pl
, rule
);
576 tagname2tag(struct pf_tags
*head
, char *tagname
)
578 struct pf_tagname
*tag
, *p
= NULL
;
579 u_int16_t new_tagid
= 1;
581 TAILQ_FOREACH(tag
, head
, entries
)
582 if (strcmp(tagname
, tag
->name
) == 0) {
588 * to avoid fragmentation, we do a linear search from the beginning
589 * and take the first free slot we find. if there is none or the list
590 * is empty, append a new entry at the end.
594 if (!TAILQ_EMPTY(head
))
595 for (p
= TAILQ_FIRST(head
); p
!= NULL
&&
596 p
->tag
== new_tagid
; p
= TAILQ_NEXT(p
, entries
))
597 new_tagid
= p
->tag
+ 1;
599 if (new_tagid
> TAGID_MAX
)
602 /* allocate and fill new struct pf_tagname */
603 tag
= (struct pf_tagname
*)kmalloc(sizeof(struct pf_tagname
),
604 M_TEMP
, M_NOWAIT
| M_ZERO
);
607 strlcpy(tag
->name
, tagname
, sizeof(tag
->name
));
608 tag
->tag
= new_tagid
;
611 if (p
!= NULL
) /* insert new entry before p */
612 TAILQ_INSERT_BEFORE(p
, tag
, entries
);
613 else /* either list empty or no free slot in between */
614 TAILQ_INSERT_TAIL(head
, tag
, entries
);
620 tag2tagname(struct pf_tags
*head
, u_int16_t tagid
, char *p
)
622 struct pf_tagname
*tag
;
624 TAILQ_FOREACH(tag
, head
, entries
)
625 if (tag
->tag
== tagid
) {
626 strlcpy(p
, tag
->name
, PF_TAG_NAME_SIZE
);
632 tag_unref(struct pf_tags
*head
, u_int16_t tag
)
634 struct pf_tagname
*p
, *next
;
639 for (p
= TAILQ_FIRST(head
); p
!= NULL
; p
= next
) {
640 next
= TAILQ_NEXT(p
, entries
);
643 TAILQ_REMOVE(head
, p
, entries
);
652 pf_tagname2tag(char *tagname
)
654 return (tagname2tag(&pf_tags
, tagname
));
658 pf_tag2tagname(u_int16_t tagid
, char *p
)
660 return (tag2tagname(&pf_tags
, tagid
, p
));
664 pf_tag_unref(u_int16_t tag
)
666 return (tag_unref(&pf_tags
, tag
));
671 pf_qname2qid(char *qname
)
673 return ((u_int32_t
)tagname2tag(&pf_qids
, qname
));
677 pf_qid2qname(u_int32_t qid
, char *p
)
679 return (tag2tagname(&pf_qids
, (u_int16_t
)qid
, p
));
683 pf_qid_unref(u_int32_t qid
)
685 return (tag_unref(&pf_qids
, (u_int16_t
)qid
));
689 pf_begin_altq(u_int32_t
*ticket
)
691 struct pf_altq
*altq
;
694 /* Purge the old altq list */
695 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
696 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
697 if (altq
->qname
[0] == 0) {
698 /* detach and destroy the discipline */
699 error
= altq_remove(altq
);
701 pf_qid_unref(altq
->qid
);
702 pool_put(&pf_altq_pl
, altq
);
706 *ticket
= ++ticket_altqs_inactive
;
707 altqs_inactive_open
= 1;
712 pf_rollback_altq(u_int32_t ticket
)
714 struct pf_altq
*altq
;
717 if (!altqs_inactive_open
|| ticket
!= ticket_altqs_inactive
)
719 /* Purge the old altq list */
720 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
721 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
722 if (altq
->qname
[0] == 0) {
723 /* detach and destroy the discipline */
724 error
= altq_remove(altq
);
726 pf_qid_unref(altq
->qid
);
727 pool_put(&pf_altq_pl
, altq
);
729 altqs_inactive_open
= 0;
734 pf_commit_altq(u_int32_t ticket
)
736 struct pf_altqqueue
*old_altqs
;
737 struct pf_altq
*altq
;
740 if (!altqs_inactive_open
|| ticket
!= ticket_altqs_inactive
)
743 /* swap altqs, keep the old. */
745 old_altqs
= pf_altqs_active
;
746 pf_altqs_active
= pf_altqs_inactive
;
747 pf_altqs_inactive
= old_altqs
;
748 ticket_altqs_active
= ticket_altqs_inactive
;
750 /* Attach new disciplines */
751 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
752 if (altq
->qname
[0] == 0) {
753 /* attach the discipline */
754 error
= altq_pfattach(altq
);
762 /* Purge the old altq list */
763 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
764 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
765 if (altq
->qname
[0] == 0) {
766 /* detach and destroy the discipline */
767 err
= altq_pfdetach(altq
);
768 if (err
!= 0 && error
== 0)
770 err
= altq_remove(altq
);
771 if (err
!= 0 && error
== 0)
774 pf_qid_unref(altq
->qid
);
775 pool_put(&pf_altq_pl
, altq
);
779 altqs_inactive_open
= 0;
785 pf_begin_rules(u_int32_t
*ticket
, int rs_num
, char *anchor
, char *ruleset
)
787 struct pf_ruleset
*rs
;
788 struct pf_rule
*rule
;
790 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
792 rs
= pf_find_or_create_ruleset(anchor
, ruleset
);
795 while ((rule
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
)
796 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
);
797 *ticket
= ++rs
->rules
[rs_num
].inactive
.ticket
;
798 rs
->rules
[rs_num
].inactive
.open
= 1;
803 pf_rollback_rules(u_int32_t ticket
, int rs_num
, char *anchor
, char *ruleset
)
805 struct pf_ruleset
*rs
;
806 struct pf_rule
*rule
;
808 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
810 rs
= pf_find_ruleset(anchor
, ruleset
);
811 if (rs
== NULL
|| !rs
->rules
[rs_num
].inactive
.open
||
812 rs
->rules
[rs_num
].inactive
.ticket
!= ticket
)
814 while ((rule
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
)
815 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
);
816 rs
->rules
[rs_num
].inactive
.open
= 0;
821 pf_commit_rules(u_int32_t ticket
, int rs_num
, char *anchor
, char *ruleset
)
823 struct pf_ruleset
*rs
;
824 struct pf_rule
*rule
;
825 struct pf_rulequeue
*old_rules
;
827 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
829 rs
= pf_find_ruleset(anchor
, ruleset
);
830 if (rs
== NULL
|| !rs
->rules
[rs_num
].inactive
.open
||
831 ticket
!= rs
->rules
[rs_num
].inactive
.ticket
)
834 /* Swap rules, keep the old. */
836 old_rules
= rs
->rules
[rs_num
].active
.ptr
;
837 rs
->rules
[rs_num
].active
.ptr
=
838 rs
->rules
[rs_num
].inactive
.ptr
;
839 rs
->rules
[rs_num
].inactive
.ptr
= old_rules
;
840 rs
->rules
[rs_num
].active
.ticket
=
841 rs
->rules
[rs_num
].inactive
.ticket
;
842 pf_calc_skip_steps(rs
->rules
[rs_num
].active
.ptr
);
844 /* Purge the old rule list. */
845 while ((rule
= TAILQ_FIRST(old_rules
)) != NULL
)
846 pf_rm_rule(old_rules
, rule
);
847 rs
->rules
[rs_num
].inactive
.open
= 0;
848 pf_remove_if_empty_ruleset(rs
);
849 pf_update_anchor_rules();
855 pfioctl(struct dev_ioctl_args
*ap
)
857 u_long cmd
= ap
->a_cmd
;
858 caddr_t addr
= ap
->a_data
;
859 struct pf_pooladdr
*pa
= NULL
;
860 struct pf_pool
*pool
= NULL
;
863 /* XXX keep in sync with switch() below */
871 case DIOCSETSTATUSIF
:
878 case DIOCCLRRULECTRS
:
885 case DIOCGETRULESETS
:
899 case DIOCGETSRCNODES
:
900 case DIOCCLRSRCNODES
:
909 if (((struct pfioc_table
*)addr
)->pfrio_flags
&
911 break; /* dummy operation ok */
917 if (!(ap
->a_fflag
& FWRITE
))
933 case DIOCGETRULESETS
:
941 case DIOCGETSRCNODES
:
954 if (((struct pfioc_table
*)addr
)->pfrio_flags
&
956 break; /* dummy operation ok */
965 if (pf_status
.running
)
970 DPFPRINTF(PF_DEBUG_MISC
,
971 ("pf: pfil registration fail\n"));
974 pf_status
.running
= 1;
975 pf_status
.since
= time_second
;
976 if (pf_status
.stateid
== 0) {
977 pf_status
.stateid
= time_second
;
978 pf_status
.stateid
= pf_status
.stateid
<< 32;
980 DPFPRINTF(PF_DEBUG_MISC
, ("pf: started\n"));
985 if (!pf_status
.running
)
988 pf_status
.running
= 0;
991 pf_status
.running
= 1;
992 DPFPRINTF(PF_DEBUG_MISC
,
993 ("pf: pfil unregistration failed\n"));
995 pf_status
.since
= time_second
;
996 DPFPRINTF(PF_DEBUG_MISC
, ("pf: stopped\n"));
1000 case DIOCBEGINRULES
: {
1001 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1003 error
= pf_begin_rules(&pr
->ticket
, pf_get_ruleset_number(
1004 pr
->rule
.action
), pr
->anchor
, pr
->ruleset
);
1009 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1010 struct pf_ruleset
*ruleset
;
1011 struct pf_rule
*rule
, *tail
;
1012 struct pf_pooladdr
*pa
;
1015 ruleset
= pf_find_ruleset(pr
->anchor
, pr
->ruleset
);
1016 if (ruleset
== NULL
) {
1020 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
1021 if (rs_num
>= PF_RULESET_MAX
) {
1025 if (pr
->rule
.anchorname
[0] && ruleset
!= &pf_main_ruleset
) {
1029 if (pr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
1033 if (pr
->ticket
!= ruleset
->rules
[rs_num
].inactive
.ticket
) {
1037 if (pr
->pool_ticket
!= ticket_pabuf
) {
1041 rule
= pool_get(&pf_rule_pl
, PR_NOWAIT
);
1046 bcopy(&pr
->rule
, rule
, sizeof(struct pf_rule
));
1047 rule
->anchor
= NULL
;
1049 TAILQ_INIT(&rule
->rpool
.list
);
1050 /* initialize refcounting */
1052 rule
->src_nodes
= 0;
1053 rule
->entries
.tqe_prev
= NULL
;
1055 if (rule
->af
== AF_INET
) {
1056 pool_put(&pf_rule_pl
, rule
);
1057 error
= EAFNOSUPPORT
;
1062 if (rule
->af
== AF_INET6
) {
1063 pool_put(&pf_rule_pl
, rule
);
1064 error
= EAFNOSUPPORT
;
1068 tail
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
,
1071 rule
->nr
= tail
->nr
+ 1;
1074 if (rule
->ifname
[0]) {
1075 rule
->kif
= pfi_attach_rule(rule
->ifname
);
1076 if (rule
->kif
== NULL
) {
1077 pool_put(&pf_rule_pl
, rule
);
1085 if (rule
->qname
[0] != 0) {
1086 if ((rule
->qid
= pf_qname2qid(rule
->qname
)) == 0)
1088 else if (rule
->pqname
[0] != 0) {
1090 pf_qname2qid(rule
->pqname
)) == 0)
1093 rule
->pqid
= rule
->qid
;
1096 if (rule
->tagname
[0])
1097 if ((rule
->tag
= pf_tagname2tag(rule
->tagname
)) == 0)
1099 if (rule
->match_tagname
[0])
1100 if ((rule
->match_tag
=
1101 pf_tagname2tag(rule
->match_tagname
)) == 0)
1103 if (rule
->rt
&& !rule
->direction
)
1105 if (pfi_dynaddr_setup(&rule
->src
.addr
, rule
->af
))
1107 if (pfi_dynaddr_setup(&rule
->dst
.addr
, rule
->af
))
1109 if (pf_tbladdr_setup(ruleset
, &rule
->src
.addr
))
1111 if (pf_tbladdr_setup(ruleset
, &rule
->dst
.addr
))
1113 TAILQ_FOREACH(pa
, &pf_pabuf
, entries
)
1114 if (pf_tbladdr_setup(ruleset
, &pa
->addr
))
1117 pf_mv_pool(&pf_pabuf
, &rule
->rpool
.list
);
1118 if (((((rule
->action
== PF_NAT
) || (rule
->action
== PF_RDR
) ||
1119 (rule
->action
== PF_BINAT
)) && !rule
->anchorname
[0]) ||
1120 (rule
->rt
> PF_FASTROUTE
)) &&
1121 (TAILQ_FIRST(&rule
->rpool
.list
) == NULL
))
1125 pf_rm_rule(NULL
, rule
);
1128 rule
->rpool
.cur
= TAILQ_FIRST(&rule
->rpool
.list
);
1129 rule
->evaluations
= rule
->packets
= rule
->bytes
= 0;
1130 TAILQ_INSERT_TAIL(ruleset
->rules
[rs_num
].inactive
.ptr
,
1135 case DIOCCOMMITRULES
: {
1136 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1138 error
= pf_commit_rules(pr
->ticket
, pf_get_ruleset_number(
1139 pr
->rule
.action
), pr
->anchor
, pr
->ruleset
);
1143 case DIOCGETRULES
: {
1144 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1145 struct pf_ruleset
*ruleset
;
1146 struct pf_rule
*tail
;
1149 ruleset
= pf_find_ruleset(pr
->anchor
, pr
->ruleset
);
1150 if (ruleset
== NULL
) {
1154 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
1155 if (rs_num
>= PF_RULESET_MAX
) {
1160 tail
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
1163 pr
->nr
= tail
->nr
+ 1;
1166 pr
->ticket
= ruleset
->rules
[rs_num
].active
.ticket
;
1172 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1173 struct pf_ruleset
*ruleset
;
1174 struct pf_rule
*rule
;
1177 ruleset
= pf_find_ruleset(pr
->anchor
, pr
->ruleset
);
1178 if (ruleset
== NULL
) {
1182 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
1183 if (rs_num
>= PF_RULESET_MAX
) {
1187 if (pr
->ticket
!= ruleset
->rules
[rs_num
].active
.ticket
) {
1192 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
1193 while ((rule
!= NULL
) && (rule
->nr
!= pr
->nr
))
1194 rule
= TAILQ_NEXT(rule
, entries
);
1200 bcopy(rule
, &pr
->rule
, sizeof(struct pf_rule
));
1201 pfi_dynaddr_copyout(&pr
->rule
.src
.addr
);
1202 pfi_dynaddr_copyout(&pr
->rule
.dst
.addr
);
1203 pf_tbladdr_copyout(&pr
->rule
.src
.addr
);
1204 pf_tbladdr_copyout(&pr
->rule
.dst
.addr
);
1205 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
)
1206 if (rule
->skip
[i
].ptr
== NULL
)
1207 pr
->rule
.skip
[i
].nr
= (uint32_t)(-1);
1209 pr
->rule
.skip
[i
].nr
=
1210 rule
->skip
[i
].ptr
->nr
;
1215 case DIOCCHANGERULE
: {
1216 struct pfioc_rule
*pcr
= (struct pfioc_rule
*)addr
;
1217 struct pf_ruleset
*ruleset
;
1218 struct pf_rule
*oldrule
= NULL
, *newrule
= NULL
;
1222 if (!(pcr
->action
== PF_CHANGE_REMOVE
||
1223 pcr
->action
== PF_CHANGE_GET_TICKET
) &&
1224 pcr
->pool_ticket
!= ticket_pabuf
) {
1229 if (pcr
->action
< PF_CHANGE_ADD_HEAD
||
1230 pcr
->action
> PF_CHANGE_GET_TICKET
) {
1234 ruleset
= pf_find_ruleset(pcr
->anchor
, pcr
->ruleset
);
1235 if (ruleset
== NULL
) {
1239 rs_num
= pf_get_ruleset_number(pcr
->rule
.action
);
1240 if (rs_num
>= PF_RULESET_MAX
) {
1245 if (pcr
->action
== PF_CHANGE_GET_TICKET
) {
1246 pcr
->ticket
= ++ruleset
->rules
[rs_num
].active
.ticket
;
1250 ruleset
->rules
[rs_num
].active
.ticket
) {
1254 if (pcr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
1260 if (pcr
->action
!= PF_CHANGE_REMOVE
) {
1261 newrule
= pool_get(&pf_rule_pl
, PR_NOWAIT
);
1262 if (newrule
== NULL
) {
1266 bcopy(&pcr
->rule
, newrule
, sizeof(struct pf_rule
));
1267 TAILQ_INIT(&newrule
->rpool
.list
);
1268 /* initialize refcounting */
1269 newrule
->states
= 0;
1270 newrule
->entries
.tqe_prev
= NULL
;
1272 if (newrule
->af
== AF_INET
) {
1273 pool_put(&pf_rule_pl
, newrule
);
1274 error
= EAFNOSUPPORT
;
1279 if (newrule
->af
== AF_INET6
) {
1280 pool_put(&pf_rule_pl
, newrule
);
1281 error
= EAFNOSUPPORT
;
1285 if (newrule
->ifname
[0]) {
1286 newrule
->kif
= pfi_attach_rule(newrule
->ifname
);
1287 if (newrule
->kif
== NULL
) {
1288 pool_put(&pf_rule_pl
, newrule
);
1293 newrule
->kif
= NULL
;
1297 if (newrule
->qname
[0] != 0) {
1299 pf_qname2qid(newrule
->qname
)) == 0)
1301 else if (newrule
->pqname
[0] != 0) {
1302 if ((newrule
->pqid
=
1303 pf_qname2qid(newrule
->pqname
)) == 0)
1306 newrule
->pqid
= newrule
->qid
;
1309 if (newrule
->tagname
[0])
1311 pf_tagname2tag(newrule
->tagname
)) == 0)
1313 if (newrule
->match_tagname
[0])
1314 if ((newrule
->match_tag
= pf_tagname2tag(
1315 newrule
->match_tagname
)) == 0)
1318 if (newrule
->rt
&& !newrule
->direction
)
1320 if (pfi_dynaddr_setup(&newrule
->src
.addr
, newrule
->af
))
1322 if (pfi_dynaddr_setup(&newrule
->dst
.addr
, newrule
->af
))
1324 if (pf_tbladdr_setup(ruleset
, &newrule
->src
.addr
))
1326 if (pf_tbladdr_setup(ruleset
, &newrule
->dst
.addr
))
1329 pf_mv_pool(&pf_pabuf
, &newrule
->rpool
.list
);
1330 if (((((newrule
->action
== PF_NAT
) ||
1331 (newrule
->action
== PF_RDR
) ||
1332 (newrule
->action
== PF_BINAT
) ||
1333 (newrule
->rt
> PF_FASTROUTE
)) &&
1334 !newrule
->anchorname
[0])) &&
1335 (TAILQ_FIRST(&newrule
->rpool
.list
) == NULL
))
1339 pf_rm_rule(NULL
, newrule
);
1342 newrule
->rpool
.cur
= TAILQ_FIRST(&newrule
->rpool
.list
);
1343 newrule
->evaluations
= newrule
->packets
= 0;
1346 pf_empty_pool(&pf_pabuf
);
1350 if (pcr
->action
== PF_CHANGE_ADD_HEAD
)
1351 oldrule
= TAILQ_FIRST(
1352 ruleset
->rules
[rs_num
].active
.ptr
);
1353 else if (pcr
->action
== PF_CHANGE_ADD_TAIL
)
1354 oldrule
= TAILQ_LAST(
1355 ruleset
->rules
[rs_num
].active
.ptr
, pf_rulequeue
);
1357 oldrule
= TAILQ_FIRST(
1358 ruleset
->rules
[rs_num
].active
.ptr
);
1359 while ((oldrule
!= NULL
) && (oldrule
->nr
!= pcr
->nr
))
1360 oldrule
= TAILQ_NEXT(oldrule
, entries
);
1361 if (oldrule
== NULL
) {
1362 pf_rm_rule(NULL
, newrule
);
1369 if (pcr
->action
== PF_CHANGE_REMOVE
)
1370 pf_rm_rule(ruleset
->rules
[rs_num
].active
.ptr
, oldrule
);
1372 if (oldrule
== NULL
)
1374 ruleset
->rules
[rs_num
].active
.ptr
,
1376 else if (pcr
->action
== PF_CHANGE_ADD_HEAD
||
1377 pcr
->action
== PF_CHANGE_ADD_BEFORE
)
1378 TAILQ_INSERT_BEFORE(oldrule
, newrule
, entries
);
1381 ruleset
->rules
[rs_num
].active
.ptr
,
1382 oldrule
, newrule
, entries
);
1386 TAILQ_FOREACH(oldrule
,
1387 ruleset
->rules
[rs_num
].active
.ptr
, entries
)
1390 pf_calc_skip_steps(ruleset
->rules
[rs_num
].active
.ptr
);
1391 pf_remove_if_empty_ruleset(ruleset
);
1392 pf_update_anchor_rules();
1394 ruleset
->rules
[rs_num
].active
.ticket
++;
1399 case DIOCCLRSTATES
: {
1400 struct pf_state
*state
;
1401 struct pfioc_state_kill
*psk
= (struct pfioc_state_kill
*)addr
;
1405 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
1406 if (!psk
->psk_ifname
[0] || !strcmp(psk
->psk_ifname
,
1407 state
->u
.s
.kif
->pfik_name
)) {
1408 state
->timeout
= PFTM_PURGE
;
1410 /* don't send out individual delete messages */
1411 state
->sync_flags
= PFSTATE_NOSYNC
;
1416 pf_purge_expired_states();
1417 pf_status
.states
= 0;
1418 psk
->psk_af
= killed
;
1420 pfsync_clear_states(pf_status
.hostid
, psk
->psk_ifname
);
1426 case DIOCKILLSTATES
: {
1427 struct pf_state
*state
;
1428 struct pfioc_state_kill
*psk
= (struct pfioc_state_kill
*)addr
;
1432 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
1433 if ((!psk
->psk_af
|| state
->af
== psk
->psk_af
)
1434 && (!psk
->psk_proto
|| psk
->psk_proto
==
1436 PF_MATCHA(psk
->psk_src
.not,
1437 &psk
->psk_src
.addr
.v
.a
.addr
,
1438 &psk
->psk_src
.addr
.v
.a
.mask
,
1439 &state
->lan
.addr
, state
->af
) &&
1440 PF_MATCHA(psk
->psk_dst
.not,
1441 &psk
->psk_dst
.addr
.v
.a
.addr
,
1442 &psk
->psk_dst
.addr
.v
.a
.mask
,
1443 &state
->ext
.addr
, state
->af
) &&
1444 (psk
->psk_src
.port_op
== 0 ||
1445 pf_match_port(psk
->psk_src
.port_op
,
1446 psk
->psk_src
.port
[0], psk
->psk_src
.port
[1],
1447 state
->lan
.port
)) &&
1448 (psk
->psk_dst
.port_op
== 0 ||
1449 pf_match_port(psk
->psk_dst
.port_op
,
1450 psk
->psk_dst
.port
[0], psk
->psk_dst
.port
[1],
1451 state
->ext
.port
)) &&
1452 (!psk
->psk_ifname
[0] || !strcmp(psk
->psk_ifname
,
1453 state
->u
.s
.kif
->pfik_name
))) {
1454 state
->timeout
= PFTM_PURGE
;
1458 pf_purge_expired_states();
1460 psk
->psk_af
= killed
;
1464 case DIOCADDSTATE
: {
1465 struct pfioc_state
*ps
= (struct pfioc_state
*)addr
;
1466 struct pf_state
*state
;
1467 struct pfi_kif
*kif
;
1469 if (ps
->state
.timeout
>= PFTM_MAX
&&
1470 ps
->state
.timeout
!= PFTM_UNTIL_PACKET
) {
1474 state
= pool_get(&pf_state_pl
, PR_NOWAIT
);
1475 if (state
== NULL
) {
1480 kif
= pfi_lookup_create(ps
->state
.u
.ifname
);
1482 pool_put(&pf_state_pl
, state
);
1487 bcopy(&ps
->state
, state
, sizeof(struct pf_state
));
1488 bzero(&state
->u
, sizeof(state
->u
));
1489 state
->rule
.ptr
= &pf_default_rule
;
1490 state
->nat_rule
.ptr
= NULL
;
1491 state
->anchor
.ptr
= NULL
;
1492 state
->rt_kif
= NULL
;
1493 state
->creation
= time_second
;
1494 state
->pfsync_time
= 0;
1495 state
->packets
[0] = state
->packets
[1] = 0;
1496 state
->bytes
[0] = state
->bytes
[1] = 0;
1498 if (pf_insert_state(kif
, state
)) {
1499 pfi_maybe_destroy(kif
);
1500 pool_put(&pf_state_pl
, state
);
1507 case DIOCGETSTATE
: {
1508 struct pfioc_state
*ps
= (struct pfioc_state
*)addr
;
1509 struct pf_state
*state
;
1514 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
1519 if (state
== NULL
) {
1524 bcopy(state
, &ps
->state
, sizeof(struct pf_state
));
1525 ps
->state
.rule
.nr
= state
->rule
.ptr
->nr
;
1526 ps
->state
.nat_rule
.nr
= (state
->nat_rule
.ptr
== NULL
) ?
1527 (uint32_t)(-1) : state
->nat_rule
.ptr
->nr
;
1528 ps
->state
.anchor
.nr
= (state
->anchor
.ptr
== NULL
) ?
1529 (uint32_t)(-1) : state
->anchor
.ptr
->nr
;
1531 ps
->state
.expire
= pf_state_expires(state
);
1532 if (ps
->state
.expire
> time_second
)
1533 ps
->state
.expire
-= time_second
;
1535 ps
->state
.expire
= 0;
1539 case DIOCGETSTATES
: {
1540 struct pfioc_states
*ps
= (struct pfioc_states
*)addr
;
1541 struct pf_state
*state
;
1542 struct pf_state
*p
, pstore
;
1543 struct pfi_kif
*kif
;
1545 int space
= ps
->ps_len
;
1549 TAILQ_FOREACH(kif
, &pfi_statehead
, pfik_w_states
)
1550 nr
+= kif
->pfik_states
;
1552 ps
->ps_len
= sizeof(struct pf_state
) * nr
;
1558 TAILQ_FOREACH(kif
, &pfi_statehead
, pfik_w_states
)
1559 RB_FOREACH(state
, pf_state_tree_ext_gwy
,
1560 &kif
->pfik_ext_gwy
) {
1561 int secs
= time_second
;
1563 if ((nr
+1) * sizeof(*p
) > (unsigned)ps
->ps_len
)
1566 bcopy(state
, &pstore
, sizeof(pstore
));
1567 strlcpy(pstore
.u
.ifname
, kif
->pfik_name
,
1568 sizeof(pstore
.u
.ifname
));
1569 pstore
.rule
.nr
= state
->rule
.ptr
->nr
;
1570 pstore
.nat_rule
.nr
= (state
->nat_rule
.ptr
==
1571 NULL
) ? (uint32_t)(-1)
1572 : state
->nat_rule
.ptr
->nr
;
1573 pstore
.anchor
.nr
= (state
->anchor
.ptr
==
1574 NULL
) ? (uint32_t)(-1)
1575 : state
->anchor
.ptr
->nr
;
1576 pstore
.creation
= secs
- pstore
.creation
;
1577 pstore
.expire
= pf_state_expires(state
);
1578 if (pstore
.expire
> secs
)
1579 pstore
.expire
-= secs
;
1582 error
= copyout(&pstore
, p
, sizeof(*p
));
1590 ps
->ps_len
= sizeof(struct pf_state
) * nr
;
1595 case DIOCGETSTATUS
: {
1596 struct pf_status
*s
= (struct pf_status
*)addr
;
1597 bcopy(&pf_status
, s
, sizeof(struct pf_status
));
1598 pfi_fill_oldstatus(s
);
1602 case DIOCSETSTATUSIF
: {
1603 struct pfioc_if
*pi
= (struct pfioc_if
*)addr
;
1605 if (pi
->ifname
[0] == 0) {
1606 bzero(pf_status
.ifname
, IFNAMSIZ
);
1609 if (ifunit(pi
->ifname
) == NULL
) {
1613 strlcpy(pf_status
.ifname
, pi
->ifname
, IFNAMSIZ
);
1617 case DIOCCLRSTATUS
: {
1618 bzero(pf_status
.counters
, sizeof(pf_status
.counters
));
1619 bzero(pf_status
.fcounters
, sizeof(pf_status
.fcounters
));
1620 bzero(pf_status
.scounters
, sizeof(pf_status
.scounters
));
1621 if (*pf_status
.ifname
)
1622 pfi_clr_istats(pf_status
.ifname
, NULL
,
1628 struct pfioc_natlook
*pnl
= (struct pfioc_natlook
*)addr
;
1629 struct pf_state
*state
;
1630 struct pf_state key
;
1631 int m
= 0, direction
= pnl
->direction
;
1634 key
.proto
= pnl
->proto
;
1637 PF_AZERO(&pnl
->saddr
, pnl
->af
) ||
1638 PF_AZERO(&pnl
->daddr
, pnl
->af
) ||
1639 !pnl
->dport
|| !pnl
->sport
)
1645 * userland gives us source and dest of connection,
1646 * reverse the lookup so we ask for what happens with
1647 * the return traffic, enabling us to find it in the
1650 if (direction
== PF_IN
) {
1651 PF_ACPY(&key
.ext
.addr
, &pnl
->daddr
, pnl
->af
);
1652 key
.ext
.port
= pnl
->dport
;
1653 PF_ACPY(&key
.gwy
.addr
, &pnl
->saddr
, pnl
->af
);
1654 key
.gwy
.port
= pnl
->sport
;
1655 state
= pf_find_state_all(&key
, PF_EXT_GWY
, &m
);
1657 PF_ACPY(&key
.lan
.addr
, &pnl
->daddr
, pnl
->af
);
1658 key
.lan
.port
= pnl
->dport
;
1659 PF_ACPY(&key
.ext
.addr
, &pnl
->saddr
, pnl
->af
);
1660 key
.ext
.port
= pnl
->sport
;
1661 state
= pf_find_state_all(&key
, PF_LAN_EXT
, &m
);
1664 error
= E2BIG
; /* more than one state */
1665 else if (state
!= NULL
) {
1666 if (direction
== PF_IN
) {
1667 PF_ACPY(&pnl
->rsaddr
, &state
->lan
.addr
,
1669 pnl
->rsport
= state
->lan
.port
;
1670 PF_ACPY(&pnl
->rdaddr
, &pnl
->daddr
,
1672 pnl
->rdport
= pnl
->dport
;
1674 PF_ACPY(&pnl
->rdaddr
, &state
->gwy
.addr
,
1676 pnl
->rdport
= state
->gwy
.port
;
1677 PF_ACPY(&pnl
->rsaddr
, &pnl
->saddr
,
1679 pnl
->rsport
= pnl
->sport
;
1688 case DIOCSETTIMEOUT
: {
1689 struct pfioc_tm
*pt
= (struct pfioc_tm
*)addr
;
1692 if (pt
->timeout
< 0 || pt
->timeout
>= PFTM_MAX
||
1697 old
= pf_default_rule
.timeout
[pt
->timeout
];
1698 pf_default_rule
.timeout
[pt
->timeout
] = pt
->seconds
;
1703 case DIOCGETTIMEOUT
: {
1704 struct pfioc_tm
*pt
= (struct pfioc_tm
*)addr
;
1706 if (pt
->timeout
< 0 || pt
->timeout
>= PFTM_MAX
) {
1710 pt
->seconds
= pf_default_rule
.timeout
[pt
->timeout
];
1714 case DIOCGETLIMIT
: {
1715 struct pfioc_limit
*pl
= (struct pfioc_limit
*)addr
;
1717 if (pl
->index
< 0 || pl
->index
>= PF_LIMIT_MAX
) {
1721 pl
->limit
= pf_pool_limits
[pl
->index
].limit
;
1725 case DIOCSETLIMIT
: {
1726 struct pfioc_limit
*pl
= (struct pfioc_limit
*)addr
;
1729 if (pl
->index
< 0 || pl
->index
>= PF_LIMIT_MAX
||
1730 pf_pool_limits
[pl
->index
].pp
== NULL
) {
1735 /* XXX Get an API to set limits on the zone/pool */
1736 old_limit
= pf_pool_limits
[pl
->index
].limit
;
1737 pf_pool_limits
[pl
->index
].limit
= pl
->limit
;
1738 pl
->limit
= old_limit
;
1742 case DIOCSETDEBUG
: {
1743 u_int32_t
*level
= (u_int32_t
*)addr
;
1745 pf_status
.debug
= *level
;
1749 case DIOCCLRRULECTRS
: {
1750 struct pf_ruleset
*ruleset
= &pf_main_ruleset
;
1751 struct pf_rule
*rule
;
1755 ruleset
->rules
[PF_RULESET_FILTER
].active
.ptr
, entries
)
1756 rule
->evaluations
= rule
->packets
=
1762 case DIOCGIFSPEED
: {
1763 struct pf_ifspeed
*psp
= (struct pf_ifspeed
*)addr
;
1764 struct pf_ifspeed ps
;
1767 if (psp
->ifname
[0] != 0) {
1768 /* Can we completely trust user-land? */
1769 strlcpy(ps
.ifname
, psp
->ifname
, IFNAMSIZ
);
1770 ifp
= ifunit(ps
.ifname
);
1772 psp
->baudrate
= ifp
->if_baudrate
;
1780 case DIOCSTARTALTQ
: {
1781 struct pf_altq
*altq
;
1783 struct tb_profile tb
;
1785 /* enable all altq interfaces on active list */
1787 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
1788 if (altq
->qname
[0] == 0) {
1789 if ((ifp
= ifunit(altq
->ifname
)) == NULL
) {
1793 if (ifp
->if_snd
.altq_type
!= ALTQT_NONE
)
1794 error
= altq_enable(&ifp
->if_snd
);
1797 /* set tokenbucket regulator */
1798 tb
.rate
= altq
->ifbandwidth
;
1799 tb
.depth
= altq
->tbrsize
;
1800 error
= tbr_set(&ifp
->if_snd
, &tb
);
1806 DPFPRINTF(PF_DEBUG_MISC
, ("altq: started\n"));
1810 case DIOCSTOPALTQ
: {
1811 struct pf_altq
*altq
;
1813 struct tb_profile tb
;
1816 /* disable all altq interfaces on active list */
1818 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
1819 if (altq
->qname
[0] == 0) {
1820 if ((ifp
= ifunit(altq
->ifname
)) == NULL
) {
1824 if (ifp
->if_snd
.altq_type
!= ALTQT_NONE
) {
1825 err
= altq_disable(&ifp
->if_snd
);
1826 if (err
!= 0 && error
== 0)
1829 /* clear tokenbucket regulator */
1831 err
= tbr_set(&ifp
->if_snd
, &tb
);
1832 if (err
!= 0 && error
== 0)
1837 DPFPRINTF(PF_DEBUG_MISC
, ("altq: stopped\n"));
1841 case DIOCBEGINALTQS
: {
1842 u_int32_t
*ticket
= (u_int32_t
*)addr
;
1844 error
= pf_begin_altq(ticket
);
1849 struct pfioc_altq
*pa
= (struct pfioc_altq
*)addr
;
1850 struct pf_altq
*altq
, *a
;
1852 if (pa
->ticket
!= ticket_altqs_inactive
) {
1856 altq
= pool_get(&pf_altq_pl
, PR_NOWAIT
);
1861 bcopy(&pa
->altq
, altq
, sizeof(struct pf_altq
));
1864 * if this is for a queue, find the discipline and
1865 * copy the necessary fields
1867 if (altq
->qname
[0] != 0) {
1868 if ((altq
->qid
= pf_qname2qid(altq
->qname
)) == 0) {
1870 pool_put(&pf_altq_pl
, altq
);
1873 TAILQ_FOREACH(a
, pf_altqs_inactive
, entries
) {
1874 if (strncmp(a
->ifname
, altq
->ifname
,
1875 IFNAMSIZ
) == 0 && a
->qname
[0] == 0) {
1876 altq
->altq_disc
= a
->altq_disc
;
1882 error
= altq_add(altq
);
1884 pool_put(&pf_altq_pl
, altq
);
1888 TAILQ_INSERT_TAIL(pf_altqs_inactive
, altq
, entries
);
1889 bcopy(altq
, &pa
->altq
, sizeof(struct pf_altq
));
1893 case DIOCCOMMITALTQS
: {
1894 u_int32_t ticket
= *(u_int32_t
*)addr
;
1896 error
= pf_commit_altq(ticket
);
1900 case DIOCGETALTQS
: {
1901 struct pfioc_altq
*pa
= (struct pfioc_altq
*)addr
;
1902 struct pf_altq
*altq
;
1906 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
)
1908 pa
->ticket
= ticket_altqs_active
;
1914 struct pfioc_altq
*pa
= (struct pfioc_altq
*)addr
;
1915 struct pf_altq
*altq
;
1918 if (pa
->ticket
!= ticket_altqs_active
) {
1924 altq
= TAILQ_FIRST(pf_altqs_active
);
1925 while ((altq
!= NULL
) && (nr
< pa
->nr
)) {
1926 altq
= TAILQ_NEXT(altq
, entries
);
1934 bcopy(altq
, &pa
->altq
, sizeof(struct pf_altq
));
1939 case DIOCCHANGEALTQ
:
1940 /* CHANGEALTQ not supported yet! */
1944 case DIOCGETQSTATS
: {
1945 struct pfioc_qstats
*pq
= (struct pfioc_qstats
*)addr
;
1946 struct pf_altq
*altq
;
1950 if (pq
->ticket
!= ticket_altqs_active
) {
1954 nbytes
= pq
->nbytes
;
1957 altq
= TAILQ_FIRST(pf_altqs_active
);
1958 while ((altq
!= NULL
) && (nr
< pq
->nr
)) {
1959 altq
= TAILQ_NEXT(altq
, entries
);
1967 error
= altq_getqstats(altq
, pq
->buf
, &nbytes
);
1970 pq
->scheduler
= altq
->scheduler
;
1971 pq
->nbytes
= nbytes
;
1977 case DIOCBEGINADDRS
: {
1978 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
1980 pf_empty_pool(&pf_pabuf
);
1981 pp
->ticket
= ++ticket_pabuf
;
1986 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
1989 if (pp
->af
== AF_INET
) {
1990 error
= EAFNOSUPPORT
;
1995 if (pp
->af
== AF_INET6
) {
1996 error
= EAFNOSUPPORT
;
2000 if (pp
->addr
.addr
.type
!= PF_ADDR_ADDRMASK
&&
2001 pp
->addr
.addr
.type
!= PF_ADDR_DYNIFTL
&&
2002 pp
->addr
.addr
.type
!= PF_ADDR_TABLE
) {
2006 pa
= pool_get(&pf_pooladdr_pl
, PR_NOWAIT
);
2011 bcopy(&pp
->addr
, pa
, sizeof(struct pf_pooladdr
));
2012 if (pa
->ifname
[0]) {
2013 pa
->kif
= pfi_attach_rule(pa
->ifname
);
2014 if (pa
->kif
== NULL
) {
2015 pool_put(&pf_pooladdr_pl
, pa
);
2020 if (pfi_dynaddr_setup(&pa
->addr
, pp
->af
)) {
2021 pfi_dynaddr_remove(&pa
->addr
);
2022 pfi_detach_rule(pa
->kif
);
2023 pool_put(&pf_pooladdr_pl
, pa
);
2027 TAILQ_INSERT_TAIL(&pf_pabuf
, pa
, entries
);
2031 case DIOCGETADDRS
: {
2032 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
2036 pool
= pf_get_pool(pp
->anchor
, pp
->ruleset
, pp
->ticket
,
2037 pp
->r_action
, pp
->r_num
, 0, 1, 0);
2043 TAILQ_FOREACH(pa
, &pool
->list
, entries
)
2050 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
2054 pool
= pf_get_pool(pp
->anchor
, pp
->ruleset
, pp
->ticket
,
2055 pp
->r_action
, pp
->r_num
, 0, 1, 1);
2061 pa
= TAILQ_FIRST(&pool
->list
);
2062 while ((pa
!= NULL
) && (nr
< pp
->nr
)) {
2063 pa
= TAILQ_NEXT(pa
, entries
);
2071 bcopy(pa
, &pp
->addr
, sizeof(struct pf_pooladdr
));
2072 pfi_dynaddr_copyout(&pp
->addr
.addr
);
2073 pf_tbladdr_copyout(&pp
->addr
.addr
);
2078 case DIOCCHANGEADDR
: {
2079 struct pfioc_pooladdr
*pca
= (struct pfioc_pooladdr
*)addr
;
2080 struct pf_pooladdr
*oldpa
= NULL
, *newpa
= NULL
;
2081 struct pf_ruleset
*ruleset
;
2083 if (pca
->action
< PF_CHANGE_ADD_HEAD
||
2084 pca
->action
> PF_CHANGE_REMOVE
) {
2088 if (pca
->addr
.addr
.type
!= PF_ADDR_ADDRMASK
&&
2089 pca
->addr
.addr
.type
!= PF_ADDR_DYNIFTL
&&
2090 pca
->addr
.addr
.type
!= PF_ADDR_TABLE
) {
2095 ruleset
= pf_find_ruleset(pca
->anchor
, pca
->ruleset
);
2096 if (ruleset
== NULL
) {
2100 pool
= pf_get_pool(pca
->anchor
, pca
->ruleset
, pca
->ticket
,
2101 pca
->r_action
, pca
->r_num
, pca
->r_last
, 1, 1);
2106 if (pca
->action
!= PF_CHANGE_REMOVE
) {
2107 newpa
= pool_get(&pf_pooladdr_pl
, PR_NOWAIT
);
2108 if (newpa
== NULL
) {
2112 bcopy(&pca
->addr
, newpa
, sizeof(struct pf_pooladdr
));
2114 if (pca
->af
== AF_INET
) {
2115 pool_put(&pf_pooladdr_pl
, newpa
);
2116 error
= EAFNOSUPPORT
;
2121 if (pca
->af
== AF_INET6
) {
2122 pool_put(&pf_pooladdr_pl
, newpa
);
2123 error
= EAFNOSUPPORT
;
2127 if (newpa
->ifname
[0]) {
2128 newpa
->kif
= pfi_attach_rule(newpa
->ifname
);
2129 if (newpa
->kif
== NULL
) {
2130 pool_put(&pf_pooladdr_pl
, newpa
);
2136 if (pfi_dynaddr_setup(&newpa
->addr
, pca
->af
) ||
2137 pf_tbladdr_setup(ruleset
, &newpa
->addr
)) {
2138 pfi_dynaddr_remove(&newpa
->addr
);
2139 pfi_detach_rule(newpa
->kif
);
2140 pool_put(&pf_pooladdr_pl
, newpa
);
2148 if (pca
->action
== PF_CHANGE_ADD_HEAD
)
2149 oldpa
= TAILQ_FIRST(&pool
->list
);
2150 else if (pca
->action
== PF_CHANGE_ADD_TAIL
)
2151 oldpa
= TAILQ_LAST(&pool
->list
, pf_palist
);
2155 oldpa
= TAILQ_FIRST(&pool
->list
);
2156 while ((oldpa
!= NULL
) && (i
< pca
->nr
)) {
2157 oldpa
= TAILQ_NEXT(oldpa
, entries
);
2160 if (oldpa
== NULL
) {
2167 if (pca
->action
== PF_CHANGE_REMOVE
) {
2168 TAILQ_REMOVE(&pool
->list
, oldpa
, entries
);
2169 pfi_dynaddr_remove(&oldpa
->addr
);
2170 pf_tbladdr_remove(&oldpa
->addr
);
2171 pfi_detach_rule(oldpa
->kif
);
2172 pool_put(&pf_pooladdr_pl
, oldpa
);
2175 TAILQ_INSERT_TAIL(&pool
->list
, newpa
, entries
);
2176 else if (pca
->action
== PF_CHANGE_ADD_HEAD
||
2177 pca
->action
== PF_CHANGE_ADD_BEFORE
)
2178 TAILQ_INSERT_BEFORE(oldpa
, newpa
, entries
);
2180 TAILQ_INSERT_AFTER(&pool
->list
, oldpa
,
2184 pool
->cur
= TAILQ_FIRST(&pool
->list
);
2185 PF_ACPY(&pool
->counter
, &pool
->cur
->addr
.v
.a
.addr
,
2191 case DIOCGETANCHORS
: {
2192 struct pfioc_anchor
*pa
= (struct pfioc_anchor
*)addr
;
2193 struct pf_anchor
*anchor
;
2196 TAILQ_FOREACH(anchor
, &pf_anchors
, entries
)
2201 case DIOCGETANCHOR
: {
2202 struct pfioc_anchor
*pa
= (struct pfioc_anchor
*)addr
;
2203 struct pf_anchor
*anchor
;
2206 anchor
= TAILQ_FIRST(&pf_anchors
);
2207 while (anchor
!= NULL
&& nr
< pa
->nr
) {
2208 anchor
= TAILQ_NEXT(anchor
, entries
);
2214 bcopy(anchor
->name
, pa
->name
, sizeof(pa
->name
));
2218 case DIOCGETRULESETS
: {
2219 struct pfioc_ruleset
*pr
= (struct pfioc_ruleset
*)addr
;
2220 struct pf_anchor
*anchor
;
2221 struct pf_ruleset
*ruleset
;
2223 pr
->anchor
[PF_ANCHOR_NAME_SIZE
-1] = 0;
2224 if ((anchor
= pf_find_anchor(pr
->anchor
)) == NULL
) {
2229 TAILQ_FOREACH(ruleset
, &anchor
->rulesets
, entries
)
2234 case DIOCGETRULESET
: {
2235 struct pfioc_ruleset
*pr
= (struct pfioc_ruleset
*)addr
;
2236 struct pf_anchor
*anchor
;
2237 struct pf_ruleset
*ruleset
;
2240 if ((anchor
= pf_find_anchor(pr
->anchor
)) == NULL
) {
2244 ruleset
= TAILQ_FIRST(&anchor
->rulesets
);
2245 while (ruleset
!= NULL
&& nr
< pr
->nr
) {
2246 ruleset
= TAILQ_NEXT(ruleset
, entries
);
2249 if (ruleset
== NULL
)
2252 bcopy(ruleset
->name
, pr
->name
, sizeof(pr
->name
));
2256 case DIOCRCLRTABLES
: {
2257 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2259 if (io
->pfrio_esize
!= 0) {
2263 error
= pfr_clr_tables(&io
->pfrio_table
, &io
->pfrio_ndel
,
2264 io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2268 case DIOCRADDTABLES
: {
2269 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2271 if (io
->pfrio_esize
!= sizeof(struct pfr_table
)) {
2275 error
= pfr_add_tables(io
->pfrio_buffer
, io
->pfrio_size
,
2276 &io
->pfrio_nadd
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2280 case DIOCRDELTABLES
: {
2281 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2283 if (io
->pfrio_esize
!= sizeof(struct pfr_table
)) {
2287 error
= pfr_del_tables(io
->pfrio_buffer
, io
->pfrio_size
,
2288 &io
->pfrio_ndel
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2292 case DIOCRGETTABLES
: {
2293 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2295 if (io
->pfrio_esize
!= sizeof(struct pfr_table
)) {
2299 error
= pfr_get_tables(&io
->pfrio_table
, io
->pfrio_buffer
,
2300 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2304 case DIOCRGETTSTATS
: {
2305 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2307 if (io
->pfrio_esize
!= sizeof(struct pfr_tstats
)) {
2311 error
= pfr_get_tstats(&io
->pfrio_table
, io
->pfrio_buffer
,
2312 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2316 case DIOCRCLRTSTATS
: {
2317 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2319 if (io
->pfrio_esize
!= sizeof(struct pfr_table
)) {
2323 error
= pfr_clr_tstats(io
->pfrio_buffer
, io
->pfrio_size
,
2324 &io
->pfrio_nzero
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2328 case DIOCRSETTFLAGS
: {
2329 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2331 if (io
->pfrio_esize
!= sizeof(struct pfr_table
)) {
2335 error
= pfr_set_tflags(io
->pfrio_buffer
, io
->pfrio_size
,
2336 io
->pfrio_setflag
, io
->pfrio_clrflag
, &io
->pfrio_nchange
,
2337 &io
->pfrio_ndel
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2341 case DIOCRCLRADDRS
: {
2342 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2344 if (io
->pfrio_esize
!= 0) {
2348 error
= pfr_clr_addrs(&io
->pfrio_table
, &io
->pfrio_ndel
,
2349 io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2353 case DIOCRADDADDRS
: {
2354 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2356 if (io
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2360 error
= pfr_add_addrs(&io
->pfrio_table
, io
->pfrio_buffer
,
2361 io
->pfrio_size
, &io
->pfrio_nadd
, io
->pfrio_flags
|
2362 PFR_FLAG_USERIOCTL
);
2366 case DIOCRDELADDRS
: {
2367 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2369 if (io
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2373 error
= pfr_del_addrs(&io
->pfrio_table
, io
->pfrio_buffer
,
2374 io
->pfrio_size
, &io
->pfrio_ndel
, io
->pfrio_flags
|
2375 PFR_FLAG_USERIOCTL
);
2379 case DIOCRSETADDRS
: {
2380 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2382 if (io
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2386 error
= pfr_set_addrs(&io
->pfrio_table
, io
->pfrio_buffer
,
2387 io
->pfrio_size
, &io
->pfrio_size2
, &io
->pfrio_nadd
,
2388 &io
->pfrio_ndel
, &io
->pfrio_nchange
, io
->pfrio_flags
|
2389 PFR_FLAG_USERIOCTL
);
2393 case DIOCRGETADDRS
: {
2394 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2396 if (io
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2400 error
= pfr_get_addrs(&io
->pfrio_table
, io
->pfrio_buffer
,
2401 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2405 case DIOCRGETASTATS
: {
2406 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2408 if (io
->pfrio_esize
!= sizeof(struct pfr_astats
)) {
2412 error
= pfr_get_astats(&io
->pfrio_table
, io
->pfrio_buffer
,
2413 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2417 case DIOCRCLRASTATS
: {
2418 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2420 if (io
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2424 error
= pfr_clr_astats(&io
->pfrio_table
, io
->pfrio_buffer
,
2425 io
->pfrio_size
, &io
->pfrio_nzero
, io
->pfrio_flags
|
2426 PFR_FLAG_USERIOCTL
);
2430 case DIOCRTSTADDRS
: {
2431 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2433 if (io
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2437 error
= pfr_tst_addrs(&io
->pfrio_table
, io
->pfrio_buffer
,
2438 io
->pfrio_size
, &io
->pfrio_nmatch
, io
->pfrio_flags
|
2439 PFR_FLAG_USERIOCTL
);
2443 case DIOCRINABEGIN
: {
2444 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2446 if (io
->pfrio_esize
!= 0) {
2450 error
= pfr_ina_begin(&io
->pfrio_table
, &io
->pfrio_ticket
,
2451 &io
->pfrio_ndel
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2455 case DIOCRINACOMMIT
: {
2456 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2458 if (io
->pfrio_esize
!= 0) {
2462 error
= pfr_ina_commit(&io
->pfrio_table
, io
->pfrio_ticket
,
2463 &io
->pfrio_nadd
, &io
->pfrio_nchange
, io
->pfrio_flags
|
2464 PFR_FLAG_USERIOCTL
);
2468 case DIOCRINADEFINE
: {
2469 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2471 if (io
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2475 error
= pfr_ina_define(&io
->pfrio_table
, io
->pfrio_buffer
,
2476 io
->pfrio_size
, &io
->pfrio_nadd
, &io
->pfrio_naddr
,
2477 io
->pfrio_ticket
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2482 struct pf_osfp_ioctl
*io
= (struct pf_osfp_ioctl
*)addr
;
2484 error
= pf_osfp_add(io
);
2490 struct pf_osfp_ioctl
*io
= (struct pf_osfp_ioctl
*)addr
;
2492 error
= pf_osfp_get(io
);
2498 struct pfioc_trans
*io
= (struct pfioc_trans
*)addr
;
2499 struct pfioc_trans_e ioe
;
2500 struct pfr_table table
;
2503 if (io
->esize
!= sizeof(ioe
)) {
2507 for (i
= 0; i
< io
->size
; i
++) {
2508 if (copyin(io
->array
+i
, &ioe
, sizeof(ioe
))) {
2512 switch (ioe
.rs_num
) {
2514 case PF_RULESET_ALTQ
:
2515 if (ioe
.anchor
[0] || ioe
.ruleset
[0]) {
2519 if ((error
= pf_begin_altq(&ioe
.ticket
)))
2523 case PF_RULESET_TABLE
:
2524 bzero(&table
, sizeof(table
));
2525 strlcpy(table
.pfrt_anchor
, ioe
.anchor
,
2526 sizeof(table
.pfrt_anchor
));
2527 strlcpy(table
.pfrt_ruleset
, ioe
.ruleset
,
2528 sizeof(table
.pfrt_ruleset
));
2529 if ((error
= pfr_ina_begin(&table
,
2530 &ioe
.ticket
, NULL
, 0)))
2534 if ((error
= pf_begin_rules(&ioe
.ticket
,
2535 ioe
.rs_num
, ioe
.anchor
, ioe
.ruleset
)))
2539 if (copyout(&ioe
, io
->array
+i
, sizeof(io
->array
[i
]))) {
2547 case DIOCXROLLBACK
: {
2548 struct pfioc_trans
*io
= (struct pfioc_trans
*)addr
;
2549 struct pfioc_trans_e ioe
;
2550 struct pfr_table table
;
2553 if (io
->esize
!= sizeof(ioe
)) {
2557 for (i
= 0; i
< io
->size
; i
++) {
2558 if (copyin(io
->array
+i
, &ioe
, sizeof(ioe
))) {
2562 switch (ioe
.rs_num
) {
2564 case PF_RULESET_ALTQ
:
2565 if (ioe
.anchor
[0] || ioe
.ruleset
[0]) {
2569 if ((error
= pf_rollback_altq(ioe
.ticket
)))
2570 goto fail
; /* really bad */
2573 case PF_RULESET_TABLE
:
2574 bzero(&table
, sizeof(table
));
2575 strlcpy(table
.pfrt_anchor
, ioe
.anchor
,
2576 sizeof(table
.pfrt_anchor
));
2577 strlcpy(table
.pfrt_ruleset
, ioe
.ruleset
,
2578 sizeof(table
.pfrt_ruleset
));
2579 if ((error
= pfr_ina_rollback(&table
,
2580 ioe
.ticket
, NULL
, 0)))
2581 goto fail
; /* really bad */
2584 if ((error
= pf_rollback_rules(ioe
.ticket
,
2585 ioe
.rs_num
, ioe
.anchor
, ioe
.ruleset
)))
2586 goto fail
; /* really bad */
2594 struct pfioc_trans
*io
= (struct pfioc_trans
*)addr
;
2595 struct pfioc_trans_e ioe
;
2596 struct pfr_table table
;
2597 struct pf_ruleset
*rs
;
2600 if (io
->esize
!= sizeof(ioe
)) {
2604 /* first makes sure everything will succeed */
2605 for (i
= 0; i
< io
->size
; i
++) {
2606 if (copyin(io
->array
+i
, &ioe
, sizeof(ioe
))) {
2610 switch (ioe
.rs_num
) {
2612 case PF_RULESET_ALTQ
:
2613 if (ioe
.anchor
[0] || ioe
.ruleset
[0]) {
2617 if (!altqs_inactive_open
|| ioe
.ticket
!=
2618 ticket_altqs_inactive
) {
2624 case PF_RULESET_TABLE
:
2625 rs
= pf_find_ruleset(ioe
.anchor
, ioe
.ruleset
);
2626 if (rs
== NULL
|| !rs
->topen
|| ioe
.ticket
!=
2633 if (ioe
.rs_num
< 0 || ioe
.rs_num
>=
2638 rs
= pf_find_ruleset(ioe
.anchor
, ioe
.ruleset
);
2640 !rs
->rules
[ioe
.rs_num
].inactive
.open
||
2641 rs
->rules
[ioe
.rs_num
].inactive
.ticket
!=
2649 /* now do the commit - no errors should happen here */
2650 for (i
= 0; i
< io
->size
; i
++) {
2651 if (copyin(io
->array
+i
, &ioe
, sizeof(ioe
))) {
2655 switch (ioe
.rs_num
) {
2657 case PF_RULESET_ALTQ
:
2658 if ((error
= pf_commit_altq(ioe
.ticket
)))
2659 goto fail
; /* really bad */
2662 case PF_RULESET_TABLE
:
2663 bzero(&table
, sizeof(table
));
2664 strlcpy(table
.pfrt_anchor
, ioe
.anchor
,
2665 sizeof(table
.pfrt_anchor
));
2666 strlcpy(table
.pfrt_ruleset
, ioe
.ruleset
,
2667 sizeof(table
.pfrt_ruleset
));
2668 if ((error
= pfr_ina_commit(&table
, ioe
.ticket
,
2670 goto fail
; /* really bad */
2673 if ((error
= pf_commit_rules(ioe
.ticket
,
2674 ioe
.rs_num
, ioe
.anchor
, ioe
.ruleset
)))
2675 goto fail
; /* really bad */
2682 case DIOCGETSRCNODES
: {
2683 struct pfioc_src_nodes
*psn
= (struct pfioc_src_nodes
*)addr
;
2684 struct pf_src_node
*n
;
2685 struct pf_src_node
*p
, pstore
;
2687 int space
= psn
->psn_len
;
2691 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
)
2694 psn
->psn_len
= sizeof(struct pf_src_node
) * nr
;
2699 p
= psn
->psn_src_nodes
;
2700 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
2701 int secs
= time_second
;
2703 if ((nr
+ 1) * sizeof(*p
) > (unsigned)psn
->psn_len
)
2706 bcopy(n
, &pstore
, sizeof(pstore
));
2707 if (n
->rule
.ptr
!= NULL
)
2708 pstore
.rule
.nr
= n
->rule
.ptr
->nr
;
2709 pstore
.creation
= secs
- pstore
.creation
;
2710 if (pstore
.expire
> secs
)
2711 pstore
.expire
-= secs
;
2714 error
= copyout(&pstore
, p
, sizeof(*p
));
2722 psn
->psn_len
= sizeof(struct pf_src_node
) * nr
;
2727 case DIOCCLRSRCNODES
: {
2728 struct pf_src_node
*n
;
2729 struct pf_state
*state
;
2732 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
2733 state
->src_node
= NULL
;
2734 state
->nat_src_node
= NULL
;
2736 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
2740 pf_purge_expired_src_nodes();
2741 pf_status
.src_nodes
= 0;
2746 case DIOCSETHOSTID
: {
2747 u_int32_t
*hostid
= (u_int32_t
*)addr
;
2753 pf_status
.hostid
= *hostid
;
2763 case DIOCIGETIFACES
: {
2764 struct pfioc_iface
*io
= (struct pfioc_iface
*)addr
;
2766 if (io
->pfiio_esize
!= sizeof(struct pfi_if
)) {
2770 error
= pfi_get_ifaces(io
->pfiio_name
, io
->pfiio_buffer
,
2771 &io
->pfiio_size
, io
->pfiio_flags
);
2775 case DIOCICLRISTATS
: {
2776 struct pfioc_iface
*io
= (struct pfioc_iface
*)addr
;
2778 error
= pfi_clr_istats(io
->pfiio_name
, &io
->pfiio_nzero
,
2792 * XXX - Check for version missmatch!!!
2795 pf_clear_states(void)
2797 struct pf_state
*state
;
2799 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
2800 state
->timeout
= PFTM_PURGE
;
2802 /* don't send out individual delete messages */
2803 state
->sync_flags
= PFSTATE_NOSYNC
;
2806 pf_purge_expired_states();
2807 pf_status
.states
= 0;
2810 * XXX This is called on module unload, we do not want to sync that over? */
2812 pfsync_clear_states(pf_status
.hostid
, psk
->psk_ifname
);
2817 pf_clear_tables(void)
2819 struct pfioc_table io
;
2822 bzero(&io
, sizeof(io
));
2824 error
= pfr_clr_tables(&io
.pfrio_table
, &io
.pfrio_ndel
,
2831 pf_clear_srcnodes(void)
2833 struct pf_src_node
*n
;
2834 struct pf_state
*state
;
2836 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
2837 state
->src_node
= NULL
;
2838 state
->nat_src_node
= NULL
;
2840 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
2844 pf_purge_expired_src_nodes();
2845 pf_status
.src_nodes
= 0;
2848 * XXX - Check for version missmatch!!!
2852 * Duplicate pfctl -Fa operation to get rid of as much as we can.
2861 callout_stop(&pf_expire_to
);
2863 pf_status
.running
= 0;
2865 if ((error
= pf_begin_rules(&t
[0], PF_RULESET_SCRUB
, &nn
,
2867 DPFPRINTF(PF_DEBUG_MISC
, ("shutdown_pf: SCRUB\n"));
2870 if ((error
= pf_begin_rules(&t
[1], PF_RULESET_FILTER
, &nn
,
2872 DPFPRINTF(PF_DEBUG_MISC
, ("shutdown_pf: FILTER\n"));
2873 break; /* XXX: rollback? */
2875 if ((error
= pf_begin_rules(&t
[2], PF_RULESET_NAT
, &nn
, &nn
))
2877 DPFPRINTF(PF_DEBUG_MISC
, ("shutdown_pf: NAT\n"));
2878 break; /* XXX: rollback? */
2880 if ((error
= pf_begin_rules(&t
[3], PF_RULESET_BINAT
, &nn
, &nn
))
2882 DPFPRINTF(PF_DEBUG_MISC
, ("shutdown_pf: BINAT\n"));
2883 break; /* XXX: rollback? */
2885 if ((error
= pf_begin_rules(&t
[4], PF_RULESET_RDR
, &nn
, &nn
))
2887 DPFPRINTF(PF_DEBUG_MISC
, ("shutdown_pf: RDR\n"));
2888 break; /* XXX: rollback? */
2891 /* XXX: these should always succeed here */
2892 pf_commit_rules(t
[0], PF_RULESET_SCRUB
, &nn
, &nn
);
2893 pf_commit_rules(t
[1], PF_RULESET_FILTER
, &nn
, &nn
);
2894 pf_commit_rules(t
[2], PF_RULESET_NAT
, &nn
, &nn
);
2895 pf_commit_rules(t
[3], PF_RULESET_BINAT
, &nn
, &nn
);
2896 pf_commit_rules(t
[4], PF_RULESET_RDR
, &nn
, &nn
);
2898 if ((error
= pf_clear_tables()) != 0)
2902 if ((error
= pf_begin_altq(&t
[0])) != 0) {
2903 DPFPRINTF(PF_DEBUG_MISC
, ("shutdown_pf: ALTQ\n"));
2906 pf_commit_altq(t
[0]);
2911 pf_clear_srcnodes();
2913 /* status does not use malloced mem so no need to cleanup */
2914 /* fingerprints and interfaces have their own cleanup code */
2921 pf_check_in(void *arg
, struct mbuf
**m
, struct ifnet
*ifp
, int dir
)
2924 * DragonFly's version of pf uses FreeBSD's native host byte ordering
2925 * for ip_len/ip_off. This is why we don't have to change byte order
2926 * like the FreeBSD-5 version does.
2930 chk
= pf_test(PF_IN
, ifp
, m
);
2939 pf_check_out(void *arg
, struct mbuf
**m
, struct ifnet
*ifp
, int dir
)
2942 * DragonFly's version of pf uses FreeBSD's native host byte ordering
2943 * for ip_len/ip_off. This is why we don't have to change byte order
2944 * like the FreeBSD-5 version does.
2948 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
2949 if ((*m
)->m_pkthdr
.csum_flags
& CSUM_DELAY_DATA
) {
2950 in_delayed_cksum(*m
);
2951 (*m
)->m_pkthdr
.csum_flags
&= ~CSUM_DELAY_DATA
;
2953 chk
= pf_test(PF_OUT
, ifp
, m
);
2963 pf_check6_in(void *arg
, struct mbuf
**m
, struct ifnet
*ifp
, int dir
)
2966 * IPv6 is not affected by ip_len/ip_off byte order changes.
2970 chk
= pf_test6(PF_IN
, ifp
, m
);
2979 pf_check6_out(void *arg
, struct mbuf
**m
, struct ifnet
*ifp
, int dir
)
2982 * IPv6 is not affected by ip_len/ip_off byte order changes.
2986 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
2987 if ((*m
)->m_pkthdr
.csum_flags
& CSUM_DELAY_DATA
) {
2988 in_delayed_cksum(*m
);
2989 (*m
)->m_pkthdr
.csum_flags
&= ~CSUM_DELAY_DATA
;
2991 chk
= pf_test6(PF_OUT
, ifp
, m
);
3003 struct pfil_head
*pfh_inet
;
3005 struct pfil_head
*pfh_inet6
;
3011 pfh_inet
= pfil_head_get(PFIL_TYPE_AF
, AF_INET
);
3012 if (pfh_inet
== NULL
)
3014 pfil_add_hook(pf_check_in
, NULL
, PFIL_IN
| PFIL_WAITOK
, pfh_inet
);
3015 pfil_add_hook(pf_check_out
, NULL
, PFIL_OUT
| PFIL_WAITOK
, pfh_inet
);
3017 pfh_inet6
= pfil_head_get(PFIL_TYPE_AF
, AF_INET6
);
3018 if (pfh_inet6
== NULL
) {
3019 pfil_remove_hook(pf_check_in
, NULL
, PFIL_IN
| PFIL_WAITOK
,
3021 pfil_remove_hook(pf_check_out
, NULL
, PFIL_OUT
| PFIL_WAITOK
,
3025 pfil_add_hook(pf_check6_in
, NULL
, PFIL_IN
| PFIL_WAITOK
, pfh_inet6
);
3026 pfil_add_hook(pf_check6_out
, NULL
, PFIL_OUT
| PFIL_WAITOK
, pfh_inet6
);
3036 struct pfil_head
*pfh_inet
;
3038 struct pfil_head
*pfh_inet6
;
3041 if (pf_pfil_hooked
== 0)
3044 pfh_inet
= pfil_head_get(PFIL_TYPE_AF
, AF_INET
);
3045 if (pfh_inet
== NULL
)
3047 pfil_remove_hook(pf_check_in
, NULL
, PFIL_IN
| PFIL_WAITOK
,
3049 pfil_remove_hook(pf_check_out
, NULL
, PFIL_OUT
| PFIL_WAITOK
,
3052 pfh_inet6
= pfil_head_get(PFIL_TYPE_AF
, AF_INET6
);
3053 if (pfh_inet6
== NULL
)
3055 pfil_remove_hook(pf_check6_in
, NULL
, PFIL_IN
| PFIL_WAITOK
,
3057 pfil_remove_hook(pf_check6_out
, NULL
, PFIL_OUT
| PFIL_WAITOK
,
3071 error
= dev_ops_add(&pf_ops
, 0, 0);
3074 pf_dev
= make_dev(&pf_ops
, 0, 0, 0, 0600, PF_NAME
);
3077 dev_ops_remove(&pf_ops
, 0, 0);
3088 pf_status
.running
= 0;
3089 error
= dehook_pf();
3092 * Should not happen!
3093 * XXX Due to error code ESRCH, kldunload will show
3094 * a message like 'No such process'.
3096 kprintf("pfil unregistration fail\n");
3104 dev_ops_remove(&pf_ops
, 0, 0);
3109 pf_modevent(module_t mod
, int type
, void *data
)
3119 error
= pf_unload();
3128 static moduledata_t pf_mod
= {
3134 DECLARE_MODULE(pf
, pf_mod
, SI_SUB_PSEUDO
, SI_ORDER_FIRST
);
3135 MODULE_VERSION(pf
, PF_MODVER
);