2 * Copyright (c) 1993 Daniel Boulet
3 * Copyright (c) 1994 Ugen J.S.Antsilevich
4 * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
5 * Copyright (c) 2015 - 2016 The DragonFly Project. All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
8 * by Bill Yuan <bycn82@dragonflybsd.org>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 #error IPFIREWALL3 requires INET.
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
49 #include <sys/kernel.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 #include <sys/ucred.h>
56 #include <sys/in_cksum.h>
58 #include <sys/thread2.h>
59 #include <sys/mplock2.h>
61 #include <netinet/in.h>
62 #include <netinet/in_systm.h>
63 #include <netinet/in_var.h>
64 #include <netinet/in_pcb.h>
65 #include <netinet/ip.h>
66 #include <netinet/ip_var.h>
67 #include <netinet/ip_icmp.h>
68 #include <netinet/tcp.h>
69 #include <netinet/tcp_timer.h>
70 #include <netinet/tcp_var.h>
71 #include <netinet/tcpip.h>
72 #include <netinet/udp.h>
73 #include <netinet/udp_var.h>
74 #include <netinet/ip_divert.h>
75 #include <netinet/if_ether.h>
78 #include <net/radix.h>
79 #include <net/route.h>
81 #include <net/netmsg2.h>
83 #include <net/ipfw3/ip_fw.h>
84 #include <net/ipfw3/ip_fw3_log.h>
85 #include <net/ipfw3/ip_fw3_table.h>
86 #include <net/ipfw3/ip_fw3_sync.h>
87 #include <net/ipfw3_basic/ip_fw3_basic.h>
88 #include <net/ipfw3_nat/ip_fw3_nat.h>
89 #include <net/dummynet3/ip_dummynet3.h>
91 MALLOC_DEFINE(M_IPFW3
, "IPFW3", "ip_fw3 default module");
93 #ifdef IPFIREWALL_DEBUG
94 #define DPRINTF(fmt, ...) \
97 kprintf(fmt, __VA_ARGS__); \
100 #define DPRINTF(fmt, ...) ((void)0)
103 #define MAX_MODULE 10
104 #define MAX_OPCODE_PER_MODULE 100
106 #define IPFW_AUTOINC_STEP_MIN 1
107 #define IPFW_AUTOINC_STEP_MAX 1000
108 #define IPFW_AUTOINC_STEP_DEF 100
112 struct netmsg_base base
;
113 const struct ipfw_ioc_rule
*ioc_rule
;
115 struct ip_fw
*next_rule
;
116 struct ip_fw
*prev_rule
;
117 struct ip_fw
*sibling
; /* sibling in prevous CPU */
121 struct netmsg_base base
;
123 struct ip_fw
*start_rule
;
124 struct ip_fw
*prev_rule
;
125 struct ipfw_ioc_state
*ioc_state
;
132 struct netmsg_base base
;
133 struct ip_fw
*start_rule
;
138 ip_fw_ctl_t
*ipfw_ctl_nat_ptr
= NULL
;
140 /* handlers which implemented in ipfw_basic module */
141 ipfw_basic_delete_state_t
*ipfw_basic_flush_state_prt
= NULL
;
142 ipfw_basic_append_state_t
*ipfw_basic_append_state_prt
= NULL
;
144 extern int ip_fw_loaded
;
145 static uint32_t static_count
; /* # of static rules */
146 static uint32_t static_ioc_len
; /* bytes of static rules */
147 static int ipfw_flushing
;
150 static int autoinc_step
= IPFW_AUTOINC_STEP_DEF
;
152 static int ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS
);
153 static int ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS
);
155 SYSCTL_NODE(_net_inet_ip
, OID_AUTO
, fw3
, CTLFLAG_RW
, 0, "Firewall");
156 SYSCTL_PROC(_net_inet_ip_fw3
, OID_AUTO
, enable
, CTLTYPE_INT
| CTLFLAG_RW
,
157 &fw3_enable
, 0, ipfw_sysctl_enable
, "I", "Enable ipfw");
158 SYSCTL_PROC(_net_inet_ip_fw3
, OID_AUTO
, autoinc_step
, CTLTYPE_INT
| CTLFLAG_RW
,
159 &autoinc_step
, 0, ipfw_sysctl_autoinc_step
, "I",
160 "Rule number autincrement step");
161 SYSCTL_INT(_net_inet_ip_fw3
, OID_AUTO
,one_pass
,CTLFLAG_RW
,
163 "Only do a single pass through ipfw when using dummynet(4)");
164 SYSCTL_INT(_net_inet_ip_fw3
, OID_AUTO
, debug
, CTLFLAG_RW
,
165 &fw_debug
, 0, "Enable printing of debug ip_fw statements");
166 SYSCTL_INT(_net_inet_ip_fw3
, OID_AUTO
, verbose
, CTLFLAG_RW
,
167 &fw_verbose
, 0, "Log matches to ipfw rules");
168 SYSCTL_INT(_net_inet_ip_fw3
, OID_AUTO
, static_count
, CTLFLAG_RD
,
169 &static_count
, 0, "Number of static rules");
171 filter_func filter_funcs
[MAX_MODULE
][MAX_OPCODE_PER_MODULE
];
172 struct ipfw_module ipfw_modules
[MAX_MODULE
];
173 struct ipfw_context
*ipfw_ctx
[MAXCPU
];
174 struct ipfw_sync_context sync_ctx
;
175 static int ipfw_ctl(struct sockopt
*sopt
);
179 check_accept(int *cmd_ctl
, int *cmd_val
, struct ip_fw_args
**args
,
180 struct ip_fw
**f
, ipfw_insn
*cmd
, uint16_t ip_len
);
182 check_deny(int *cmd_ctl
, int *cmd_val
, struct ip_fw_args
**args
,
183 struct ip_fw
**f
, ipfw_insn
*cmd
, uint16_t ip_len
);
184 void init_module(void);
188 register_ipfw_module(int module_id
,char *module_name
)
190 struct ipfw_module
*tmp
;
194 for (i
=0; i
< MAX_MODULE
; i
++) {
195 if (tmp
->type
== 0) {
198 strncpy(tmp
->name
, module_name
, strlen(module_name
));
203 kprintf("ipfw3 module %s loaded\n", module_name
);
207 unregister_ipfw_module(int module_id
)
209 struct ipfw_module
*tmp
;
212 int i
, len
, cmdlen
, found
;
216 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
217 fw
= ctx
->ipfw_rule_chain
;
218 for (; fw
; fw
= fw
->next
) {
219 for (len
= fw
->cmd_len
, cmd
= fw
->cmd
; len
> 0;
221 cmd
= (ipfw_insn
*)((uint32_t *)cmd
+ cmdlen
)) {
223 if (cmd
->module
== 0 &&
224 (cmd
->opcode
== 0 || cmd
->opcode
== 1)) {
225 //action accept or deny
226 } else if (cmd
->module
== module_id
) {
236 for (i
= 0; i
< MAX_MODULE
; i
++) {
237 if (tmp
->type
== 1 && tmp
->id
== module_id
) {
239 kprintf("ipfw3 module %s unloaded\n",
246 for (i
= 0; i
< MAX_OPCODE_PER_MODULE
; i
++) {
247 if (module_id
== 0) {
248 if (i
==0 || i
== 1) {
252 filter_funcs
[module_id
][i
] = NULL
;
259 register_ipfw_filter_funcs(int module
, int opcode
, filter_func func
)
261 filter_funcs
[module
][opcode
] = func
;
265 check_accept(int *cmd_ctl
, int *cmd_val
, struct ip_fw_args
**args
,
266 struct ip_fw
**f
, ipfw_insn
*cmd
, uint16_t ip_len
)
268 *cmd_val
= IP_FW_PASS
;
269 *cmd_ctl
= IP_FW_CTL_DONE
;
271 ipfw_log((*args
)->m
, (*args
)->eh
, cmd
->arg1
);
276 check_deny(int *cmd_ctl
, int *cmd_val
, struct ip_fw_args
**args
,
277 struct ip_fw
**f
, ipfw_insn
*cmd
, uint16_t ip_len
)
279 *cmd_val
= IP_FW_DENY
;
280 *cmd_ctl
= IP_FW_CTL_DONE
;
282 ipfw_log((*args
)->m
, (*args
)->eh
, cmd
->arg1
);
289 memset(ipfw_modules
, 0, sizeof(struct ipfw_module
) * MAX_MODULE
);
290 memset(filter_funcs
, 0, sizeof(filter_func
) *
291 MAX_OPCODE_PER_MODULE
* MAX_MODULE
);
292 register_ipfw_filter_funcs(0, O_BASIC_ACCEPT
,
293 (filter_func
)check_accept
);
294 register_ipfw_filter_funcs(0, O_BASIC_DENY
, (filter_func
)check_deny
);
298 ipfw_free_rule(struct ip_fw
*rule
)
300 kfree(rule
, M_IPFW3
);
305 static struct ip_fw
*
306 lookup_next_rule(struct ip_fw
*me
)
308 struct ip_fw
*rule
= NULL
;
311 /* look for action, in case it is a skipto */
312 cmd
= ACTION_PTR(me
);
313 if ((int)cmd
->module
== MODULE_BASIC_ID
&&
314 (int)cmd
->opcode
== O_BASIC_SKIPTO
) {
315 for (rule
= me
->next
; rule
; rule
= rule
->next
) {
316 if (rule
->rulenum
>= cmd
->arg1
)
320 if (rule
== NULL
) { /* failure or not a skipto */
323 me
->next_rule
= rule
;
328 * rules are stored in ctx->ipfw_rule_chain.
329 * and each rule is combination of multiple cmds.(ipfw_insn)
330 * in each rule, it begin with filter cmds. and end with action cmds.
331 * 'outer/inner loop' are looping the rules/cmds.
332 * it will invoke the cmds relatived function according to the cmd's
333 * module id and opcode id. and process according to return value.
336 ipfw_chk(struct ip_fw_args
*args
)
338 struct mbuf
*m
= args
->m
;
339 struct ip
*ip
= mtod(m
, struct ip
*);
340 struct ip_fw
*f
= NULL
; /* matching rule */
341 int cmd_val
= IP_FW_PASS
;
343 struct divert_info
*divinfo
;
346 * hlen The length of the IPv4 header.
347 * hlen >0 means we have an IPv4 packet.
349 u_int hlen
= 0; /* hlen >0 means we have an IP pkt */
352 * offset The offset of a fragment. offset != 0 means that
353 * we have a fragment at this offset of an IPv4 packet.
354 * offset == 0 means that (if this is an IPv4 packet)
355 * this is the first or only fragment.
360 uint16_t src_port
= 0, dst_port
= 0; /* NOTE: host format */
361 struct in_addr src_ip
, dst_ip
; /* NOTE: network format */
363 uint8_t prev_module
= -1, prev_opcode
= -1; /* previous module & opcode */
364 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
366 if (m
->m_pkthdr
.fw_flags
& IPFW_MBUF_GENERATED
)
367 return IP_FW_PASS
; /* accept */
369 if (args
->eh
== NULL
|| /* layer 3 packet */
370 (m
->m_pkthdr
.len
>= sizeof(struct ip
) &&
371 ntohs(args
->eh
->ether_type
) == ETHERTYPE_IP
))
372 hlen
= ip
->ip_hl
<< 2;
375 * Collect parameters into local variables for faster matching.
377 if (hlen
== 0) { /* do not grab addresses for non-ip pkts */
378 proto
= args
->f_id
.proto
= 0; /* mark f_id invalid */
379 goto after_ip_checks
;
382 proto
= args
->f_id
.proto
= ip
->ip_p
;
385 if (args
->eh
!= NULL
) { /* layer 2 packets are as on the wire */
386 offset
= ntohs(ip
->ip_off
) & IP_OFFMASK
;
387 ip_len
= ntohs(ip
->ip_len
);
389 offset
= ip
->ip_off
& IP_OFFMASK
;
393 #define PULLUP_TO(len) \
395 if (m->m_len < (len)) { \
396 args->m = m = m_pullup(m, (len)); \
398 goto pullup_failed; \
399 ip = mtod(m, struct ip *); \
409 PULLUP_TO(hlen
+ sizeof(struct tcphdr
));
410 tcp
= L3HDR(struct tcphdr
, ip
);
411 dst_port
= tcp
->th_dport
;
412 src_port
= tcp
->th_sport
;
413 args
->f_id
.flags
= tcp
->th_flags
;
421 PULLUP_TO(hlen
+ sizeof(struct udphdr
));
422 udp
= L3HDR(struct udphdr
, ip
);
423 dst_port
= udp
->uh_dport
;
424 src_port
= udp
->uh_sport
;
431 L3HDR(struct icmp
, ip
)->icmp_type
;
441 args
->f_id
.src_ip
= ntohl(src_ip
.s_addr
);
442 args
->f_id
.dst_ip
= ntohl(dst_ip
.s_addr
);
443 args
->f_id
.src_port
= src_port
= ntohs(src_port
);
444 args
->f_id
.dst_port
= dst_port
= ntohs(dst_port
);
449 * Packet has already been tagged. Look for the next rule
450 * to restart processing.
452 * If fw3_one_pass != 0 then just accept it.
453 * XXX should not happen here, but optimized out in
459 /* This rule is being/has been flushed */
463 f
= args
->rule
->next_rule
;
465 f
= lookup_next_rule(args
->rule
);
468 * Find the starting rule. It can be either the first
469 * one, or the one after divert_rule if asked so.
473 mtag
= m_tag_find(m
, PACKET_TAG_IPFW_DIVERT
, NULL
);
475 divinfo
= m_tag_data(mtag
);
476 skipto
= divinfo
->skipto
;
481 f
= ctx
->ipfw_rule_chain
;
482 if (args
->eh
== NULL
&& skipto
!= 0) {
483 /* No skipto during rule flushing */
487 if (skipto
>= IPFW_DEFAULT_RULE
) {
488 return IP_FW_DENY
; /* invalid */
490 while (f
&& f
->rulenum
<= skipto
) {
493 if (f
== NULL
) { /* drop packet */
496 } else if (ipfw_flushing
) {
497 /* Rules are being flushed; skip to default rule */
498 f
= ctx
->ipfw_default_rule
;
501 if ((mtag
= m_tag_find(m
, PACKET_TAG_IPFW_DIVERT
, NULL
)) != NULL
) {
502 m_tag_delete(m
, mtag
);
506 * Now scan the rules, and parse microinstructions for each rule.
508 int prev_val
; /* previous result of 'or' filter */
512 /* foreach rule in chain */
513 for (; f
; f
= f
->next
) {
514 again
: /* check the rule again*/
515 if (ctx
->ipfw_set_disable
& (1 << f
->set
)) {
520 /* foreach cmd in rule */
521 for (l
= f
->cmd_len
, cmd
= f
->cmd
; l
> 0; l
-= cmdlen
,
522 cmd
= (ipfw_insn
*)((uint32_t *)cmd
+ cmdlen
)) {
525 /* skip 'or' filter when already match */
526 if (cmd
->len
& F_OR
&&
527 cmd
->module
== prev_module
&&
528 cmd
->opcode
== prev_opcode
&&
533 check_body
: /* check the body of the rule again.*/
534 (filter_funcs
[cmd
->module
][cmd
->opcode
])
535 (&cmd_ctl
, &cmd_val
, &args
, &f
, cmd
, ip_len
);
538 if (prev_val
== 0) /* but 'or' failed */
541 case IP_FW_CTL_AGAIN
:
548 case IP_FW_CTL_CHK_STATE
:
549 /* update the cmd and l */
551 l
= f
->cmd_len
- f
->act_ofs
;
554 if (cmd
->len
& F_NOT
)
557 if (cmd
->len
& F_OR
) { /* has 'or' */
558 if (!cmd_val
) { /* not matched */
559 if(prev_val
== -1){ /* first 'or' */
561 prev_module
= cmd
->module
;
562 prev_opcode
= cmd
->opcode
;
563 } else if (prev_module
== cmd
->module
&&
564 prev_opcode
== cmd
->opcode
) {
565 /* continuous 'or' filter */
566 } else if (prev_module
!= cmd
->module
||
567 prev_opcode
!= cmd
->opcode
) {
568 /* 'or' filter changed */
573 prev_module
= cmd
->module
;
574 prev_opcode
= cmd
->opcode
;
577 } else { /* has 'or' and matched */
579 prev_module
= cmd
->module
;
580 prev_opcode
= cmd
->opcode
;
583 if (!cmd_val
) { /* not matched */
587 /* previous 'or' not matched */
595 } /* end of inner for, scan opcodes */
596 next_rule
:; /* try next rule */
597 } /* end of outer for, scan rules */
598 kprintf("+++ ipfw: ouch!, skip past end of rules, denying packet\n");
602 /* Update statistics */
605 f
->timestamp
= time_second
;
610 kprintf("pullup failed\n");
615 ipfw_dummynet_io(struct mbuf
*m
, int pipe_nr
, int dir
, struct ip_fw_args
*fwa
)
620 const struct ipfw_flow_id
*id
;
621 struct dn_flow_id
*fid
;
625 mtag
= m_tag_get(PACKET_TAG_DUMMYNET
, sizeof(*pkt
), M_NOWAIT
);
630 m_tag_prepend(m
, mtag
);
632 pkt
= m_tag_data(mtag
);
633 bzero(pkt
, sizeof(*pkt
));
635 cmd
= (ipfw_insn
*)((uint32_t *)fwa
->rule
->cmd
+ fwa
->rule
->act_ofs
);
636 KASSERT(cmd
->opcode
== O_DUMMYNET_PIPE
||
637 cmd
->opcode
== O_DUMMYNET_QUEUE
,
638 ("Rule is not PIPE or QUEUE, opcode %d", cmd
->opcode
));
641 pkt
->dn_flags
= (dir
& DN_FLAGS_DIR_MASK
);
643 pkt
->pipe_nr
= pipe_nr
;
645 pkt
->cpuid
= mycpuid
;
646 pkt
->msgport
= netisr_curport();
650 fid
->fid_dst_ip
= id
->dst_ip
;
651 fid
->fid_src_ip
= id
->src_ip
;
652 fid
->fid_dst_port
= id
->dst_port
;
653 fid
->fid_src_port
= id
->src_port
;
654 fid
->fid_proto
= id
->proto
;
655 fid
->fid_flags
= id
->flags
;
657 pkt
->dn_priv
= fwa
->rule
;
659 if ((int)cmd
->opcode
== O_DUMMYNET_PIPE
)
660 pkt
->dn_flags
|= DN_FLAGS_IS_PIPE
;
662 m
->m_pkthdr
.fw_flags
|= DUMMYNET_MBUF_TAGGED
;
666 ipfw_inc_static_count(struct ip_fw
*rule
)
668 /* Static rule's counts are updated only on CPU0 */
669 KKASSERT(mycpuid
== 0);
672 static_ioc_len
+= IOC_RULESIZE(rule
);
676 ipfw_dec_static_count(struct ip_fw
*rule
)
678 int l
= IOC_RULESIZE(rule
);
680 /* Static rule's counts are updated only on CPU0 */
681 KKASSERT(mycpuid
== 0);
683 KASSERT(static_count
> 0, ("invalid static count %u", static_count
));
686 KASSERT(static_ioc_len
>= l
,
687 ("invalid static len %u", static_ioc_len
));
692 ipfw_add_rule_dispatch(netmsg_t nmsg
)
694 struct netmsg_ipfw
*fwmsg
= (struct netmsg_ipfw
*)nmsg
;
695 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
696 struct ip_fw
*rule
, *prev
,*next
;
697 const struct ipfw_ioc_rule
*ioc_rule
;
699 ioc_rule
= fwmsg
->ioc_rule
;
700 // create rule by ioc_rule
701 rule
= kmalloc(RULESIZE(ioc_rule
), M_IPFW3
, M_WAITOK
| M_ZERO
);
702 rule
->act_ofs
= ioc_rule
->act_ofs
;
703 rule
->cmd_len
= ioc_rule
->cmd_len
;
704 rule
->rulenum
= ioc_rule
->rulenum
;
705 rule
->set
= ioc_rule
->set
;
706 bcopy(ioc_rule
->cmd
, rule
->cmd
, rule
->cmd_len
* 4);
708 for (prev
= NULL
, next
= ctx
->ipfw_rule_chain
;
709 next
; prev
= next
, next
= next
->next
) {
710 if (next
->rulenum
> ioc_rule
->rulenum
) {
714 KASSERT(next
!= NULL
, ("no default rule?!"));
717 * Insert rule into the pre-determined position
723 rule
->next
= ctx
->ipfw_rule_chain
;
724 ctx
->ipfw_rule_chain
= rule
;
728 * if sibiling in last CPU is exists,
729 * then it's sibling should be current rule
731 if (fwmsg
->sibling
!= NULL
) {
732 fwmsg
->sibling
->sibling
= rule
;
734 /* prepare for next CPU */
735 fwmsg
->sibling
= rule
;
738 /* Statistics only need to be updated once */
739 ipfw_inc_static_count(rule
);
741 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
745 * confirm the rulenumber
746 * call dispatch function to add rule into the list
747 * Update the statistic
750 ipfw_add_rule(struct ipfw_ioc_rule
*ioc_rule
)
752 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
753 struct netmsg_ipfw fwmsg
;
754 struct netmsg_base
*nmsg
;
757 IPFW_ASSERT_CFGPORT(&curthread
->td_msgport
);
760 * If rulenum is 0, find highest numbered rule before the
761 * default rule, and add rule number incremental step.
763 if (ioc_rule
->rulenum
== 0) {
764 int step
= autoinc_step
;
766 KKASSERT(step
>= IPFW_AUTOINC_STEP_MIN
&&
767 step
<= IPFW_AUTOINC_STEP_MAX
);
770 * Locate the highest numbered rule before default
772 for (f
= ctx
->ipfw_rule_chain
; f
; f
= f
->next
) {
773 if (f
->rulenum
== IPFW_DEFAULT_RULE
)
775 ioc_rule
->rulenum
= f
->rulenum
;
777 if (ioc_rule
->rulenum
< IPFW_DEFAULT_RULE
- step
)
778 ioc_rule
->rulenum
+= step
;
780 KASSERT(ioc_rule
->rulenum
!= IPFW_DEFAULT_RULE
&&
781 ioc_rule
->rulenum
!= 0,
782 ("invalid rule num %d", ioc_rule
->rulenum
));
784 bzero(&fwmsg
, sizeof(fwmsg
));
786 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
787 0, ipfw_add_rule_dispatch
);
788 fwmsg
.ioc_rule
= ioc_rule
;
790 netisr_domsg(nmsg
, 0);
792 DPRINTF("++ installed rule %d, static count now %d\n",
793 ioc_rule
->rulenum
, static_count
);
797 * Free storage associated with a static rule (including derived
799 * The caller is in charge of clearing rule pointers to avoid
801 * @return a pointer to the next entry.
802 * Arguments are not checked, so they better be correct.
803 * Must be called at splimp().
805 static struct ip_fw
*
806 ipfw_delete_rule(struct ipfw_context
*ctx
,
807 struct ip_fw
*prev
, struct ip_fw
*rule
)
810 ctx
->ipfw_rule_chain
= rule
->next
;
812 prev
->next
= rule
->next
;
814 if (mycpuid
== IPFW_CFGCPUID
)
815 ipfw_dec_static_count(rule
);
817 kfree(rule
, M_IPFW3
);
823 ipfw_flush_rule_dispatch(netmsg_t nmsg
)
825 struct lwkt_msg
*lmsg
= &nmsg
->lmsg
;
826 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
827 struct ip_fw
*rule
, *the_rule
;
828 int kill_default
= lmsg
->u
.ms_result
;
830 rule
= ctx
->ipfw_rule_chain
;
831 while (rule
!= NULL
) {
832 if (rule
->rulenum
== IPFW_DEFAULT_RULE
&& kill_default
== 0) {
833 ctx
->ipfw_rule_chain
= rule
;
838 if (mycpuid
== IPFW_CFGCPUID
)
839 ipfw_dec_static_count(the_rule
);
841 kfree(the_rule
, M_IPFW3
);
844 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
848 ipfw_append_state_dispatch(netmsg_t nmsg
)
850 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
851 struct ipfw_ioc_state
*ioc_state
= dmsg
->ioc_state
;
852 (*ipfw_basic_append_state_prt
)(ioc_state
);
853 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
857 ipfw_delete_state_dispatch(netmsg_t nmsg
)
859 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
860 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
861 struct ip_fw
*rule
= ctx
->ipfw_rule_chain
;
862 while (rule
!= NULL
) {
863 if (rule
->rulenum
== dmsg
->rulenum
) {
869 (*ipfw_basic_flush_state_prt
)(rule
);
870 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
874 * Deletes all rules from a chain (including the default rule
875 * if the second argument is set).
876 * Must be called at splimp().
879 ipfw_ctl_flush_rule(int kill_default
)
881 struct netmsg_del dmsg
;
882 struct netmsg_base nmsg
;
883 struct lwkt_msg
*lmsg
;
885 IPFW_ASSERT_CFGPORT(&curthread
->td_msgport
);
888 * If 'kill_default' then caller has done the necessary
889 * msgport syncing; unnecessary to do it again.
893 * Let ipfw_chk() know the rules are going to
894 * be flushed, so it could jump directly to
898 netmsg_service_sync();
902 * if ipfw_basic_flush_state_prt
903 * flush all states in all CPU
905 if (ipfw_basic_flush_state_prt
!= NULL
) {
906 bzero(&dmsg
, sizeof(dmsg
));
907 netmsg_init(&dmsg
.base
, NULL
, &curthread
->td_msgport
,
908 0, ipfw_delete_state_dispatch
);
909 netisr_domsg(&dmsg
.base
, 0);
912 * Press the 'flush' button
914 bzero(&nmsg
, sizeof(nmsg
));
915 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
916 0, ipfw_flush_rule_dispatch
);
918 lmsg
->u
.ms_result
= kill_default
;
919 netisr_domsg(&nmsg
, 0);
922 KASSERT(static_count
== 0,
923 ("%u static rules remain", static_count
));
924 KASSERT(static_ioc_len
== 0,
925 ("%u bytes of static rules remain", static_ioc_len
));
933 ipfw_delete_rule_dispatch(netmsg_t nmsg
)
935 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
936 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
937 struct ip_fw
*rule
, *prev
= NULL
;
939 rule
= ctx
->ipfw_rule_chain
;
941 if (rule
->rulenum
== dmsg
->rulenum
) {
942 ipfw_delete_rule(ctx
, prev
, rule
);
949 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
953 ipfw_alt_delete_rule(uint16_t rulenum
)
955 struct netmsg_del dmsg
;
956 struct netmsg_base
*nmsg
;
959 * delete the state which stub is the rule
960 * which belongs to the CPU and the rulenum
962 bzero(&dmsg
, sizeof(dmsg
));
964 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
965 0, ipfw_delete_state_dispatch
);
966 dmsg
.rulenum
= rulenum
;
967 netisr_domsg(nmsg
, 0);
970 * Get rid of the rule duplications on all CPUs
972 bzero(&dmsg
, sizeof(dmsg
));
974 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
975 0, ipfw_delete_rule_dispatch
);
976 dmsg
.rulenum
= rulenum
;
977 netisr_domsg(nmsg
, 0);
982 ipfw_alt_delete_ruleset_dispatch(netmsg_t nmsg
)
984 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
985 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
986 struct ip_fw
*prev
, *rule
;
992 rule
= ctx
->ipfw_rule_chain
;
993 while (rule
!= NULL
) {
994 if (rule
->set
== dmsg
->from_set
) {
995 rule
= ipfw_delete_rule(ctx
, prev
, rule
);
1004 KASSERT(del
, ("no match set?!"));
1006 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
1010 ipfw_disable_ruleset_state_dispatch(netmsg_t nmsg
)
1012 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
1013 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1019 for (rule
= ctx
->ipfw_rule_chain
; rule
; rule
= rule
->next
) {
1020 if (rule
->set
== dmsg
->from_set
) {
1026 KASSERT(cleared
, ("no match set?!"));
1028 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
1032 ipfw_alt_delete_ruleset(uint8_t set
)
1034 struct netmsg_del dmsg
;
1035 struct netmsg_base
*nmsg
;
1038 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1041 * Check whether the 'set' exists. If it exists,
1042 * then check whether any rules within the set will
1043 * try to create states.
1047 for (rule
= ctx
->ipfw_rule_chain
; rule
; rule
= rule
->next
) {
1048 if (rule
->set
== set
) {
1053 return 0; /* XXX EINVAL? */
1057 * Clear the STATE flag, so no more states will be
1058 * created based the rules in this set.
1060 bzero(&dmsg
, sizeof(dmsg
));
1062 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
1063 0, ipfw_disable_ruleset_state_dispatch
);
1064 dmsg
.from_set
= set
;
1066 netisr_domsg(nmsg
, 0);
1072 bzero(&dmsg
, sizeof(dmsg
));
1074 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
1075 0, ipfw_alt_delete_ruleset_dispatch
);
1076 dmsg
.from_set
= set
;
1078 netisr_domsg(nmsg
, 0);
1083 ipfw_alt_move_rule_dispatch(netmsg_t nmsg
)
1085 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
1088 rule
= dmsg
->start_rule
;
1091 * Move to the position on the next CPU
1092 * before the msg is forwarded.
1095 while (rule
&& rule
->rulenum
<= dmsg
->rulenum
) {
1096 if (rule
->rulenum
== dmsg
->rulenum
)
1097 rule
->set
= dmsg
->to_set
;
1100 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
1104 ipfw_alt_move_rule(uint16_t rulenum
, uint8_t set
)
1106 struct netmsg_del dmsg
;
1107 struct netmsg_base
*nmsg
;
1109 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1112 * Locate first rule to move
1114 for (rule
= ctx
->ipfw_rule_chain
;
1115 rule
&& rule
->rulenum
<= rulenum
; rule
= rule
->next
) {
1116 if (rule
->rulenum
== rulenum
&& rule
->set
!= set
)
1119 if (rule
== NULL
|| rule
->rulenum
> rulenum
)
1120 return 0; /* XXX error? */
1122 bzero(&dmsg
, sizeof(dmsg
));
1124 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
1125 0, ipfw_alt_move_rule_dispatch
);
1126 dmsg
.start_rule
= rule
;
1127 dmsg
.rulenum
= rulenum
;
1130 netisr_domsg(nmsg
, 0);
1131 KKASSERT(dmsg
.start_rule
== NULL
);
1136 ipfw_alt_move_ruleset_dispatch(netmsg_t nmsg
)
1138 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
1139 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1142 for (rule
= ctx
->ipfw_rule_chain
; rule
; rule
= rule
->next
) {
1143 if (rule
->set
== dmsg
->from_set
)
1144 rule
->set
= dmsg
->to_set
;
1146 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
1150 ipfw_alt_move_ruleset(uint8_t from_set
, uint8_t to_set
)
1152 struct netmsg_del dmsg
;
1153 struct netmsg_base
*nmsg
;
1155 bzero(&dmsg
, sizeof(dmsg
));
1157 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
1158 0, ipfw_alt_move_ruleset_dispatch
);
1159 dmsg
.from_set
= from_set
;
1160 dmsg
.to_set
= to_set
;
1162 netisr_domsg(nmsg
, 0);
1167 ipfw_alt_swap_ruleset_dispatch(netmsg_t nmsg
)
1169 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
1170 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1173 for (rule
= ctx
->ipfw_rule_chain
; rule
; rule
= rule
->next
) {
1174 if (rule
->set
== dmsg
->from_set
)
1175 rule
->set
= dmsg
->to_set
;
1176 else if (rule
->set
== dmsg
->to_set
)
1177 rule
->set
= dmsg
->from_set
;
1179 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
1183 ipfw_alt_swap_ruleset(uint8_t set1
, uint8_t set2
)
1185 struct netmsg_del dmsg
;
1186 struct netmsg_base
*nmsg
;
1188 bzero(&dmsg
, sizeof(dmsg
));
1190 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
1191 0, ipfw_alt_swap_ruleset_dispatch
);
1192 dmsg
.from_set
= set1
;
1195 netisr_domsg(nmsg
, 0);
1201 ipfw_ctl_alter(uint32_t arg
)
1204 uint8_t cmd
, new_set
;
1207 rulenum
= arg
& 0xffff;
1208 cmd
= (arg
>> 24) & 0xff;
1209 new_set
= (arg
>> 16) & 0xff;
1213 if (new_set
>= IPFW_DEFAULT_SET
)
1215 if (cmd
== 0 || cmd
== 2) {
1216 if (rulenum
== IPFW_DEFAULT_RULE
)
1219 if (rulenum
>= IPFW_DEFAULT_SET
)
1224 case 0: /* delete rules with given number */
1225 error
= ipfw_alt_delete_rule(rulenum
);
1228 case 1: /* delete all rules with given set number */
1229 error
= ipfw_alt_delete_ruleset(rulenum
);
1232 case 2: /* move rules with given number to new set */
1233 error
= ipfw_alt_move_rule(rulenum
, new_set
);
1236 case 3: /* move rules with given set number to new set */
1237 error
= ipfw_alt_move_ruleset(rulenum
, new_set
);
1240 case 4: /* swap two sets */
1241 error
= ipfw_alt_swap_ruleset(rulenum
, new_set
);
1248 * Clear counters for a specific rule.
1251 clear_counters(struct ip_fw
*rule
)
1253 rule
->bcnt
= rule
->pcnt
= 0;
1254 rule
->timestamp
= 0;
1258 ipfw_zero_entry_dispatch(netmsg_t nmsg
)
1260 struct netmsg_zent
*zmsg
= (struct netmsg_zent
*)nmsg
;
1261 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1264 if (zmsg
->rulenum
== 0) {
1265 for (rule
= ctx
->ipfw_rule_chain
; rule
; rule
= rule
->next
) {
1266 clear_counters(rule
);
1269 for (rule
= ctx
->ipfw_rule_chain
; rule
; rule
= rule
->next
) {
1270 if (rule
->rulenum
== zmsg
->rulenum
) {
1271 clear_counters(rule
);
1275 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
1279 * Reset some or all counters on firewall rules.
1280 * @arg frwl is null to clear all entries, or contains a specific
1282 * @arg log_only is 1 if we only want to reset logs, zero otherwise.
1285 ipfw_ctl_zero_entry(int rulenum
, int log_only
)
1287 struct netmsg_zent zmsg
;
1288 struct netmsg_base
*nmsg
;
1290 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1292 bzero(&zmsg
, sizeof(zmsg
));
1294 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
1295 0, ipfw_zero_entry_dispatch
);
1296 zmsg
.log_only
= log_only
;
1299 msg
= log_only
? "ipfw: All logging counts reset.\n"
1300 : "ipfw: Accounting cleared.\n";
1305 * Locate the first rule with 'rulenum'
1307 for (rule
= ctx
->ipfw_rule_chain
; rule
; rule
= rule
->next
) {
1308 if (rule
->rulenum
== rulenum
)
1311 if (rule
== NULL
) /* we did not find any matching rules */
1313 zmsg
.start_rule
= rule
;
1314 zmsg
.rulenum
= rulenum
;
1316 msg
= log_only
? "ipfw: Entry %d logging count reset.\n"
1317 : "ipfw: Entry %d cleared.\n";
1319 netisr_domsg(nmsg
, 0);
1320 KKASSERT(zmsg
.start_rule
== NULL
);
1323 log(LOG_SECURITY
| LOG_NOTICE
, msg
, rulenum
);
1328 ipfw_ctl_add_state(struct sockopt
*sopt
)
1330 struct ipfw_ioc_state
*ioc_state
;
1331 ioc_state
= sopt
->sopt_val
;
1332 if (ipfw_basic_append_state_prt
!= NULL
) {
1333 struct netmsg_del dmsg
;
1334 bzero(&dmsg
, sizeof(dmsg
));
1335 netmsg_init(&dmsg
.base
, NULL
, &curthread
->td_msgport
,
1336 0, ipfw_append_state_dispatch
);
1337 (&dmsg
)->ioc_state
= ioc_state
;
1338 netisr_domsg(&dmsg
.base
, 0);
1344 ipfw_ctl_delete_state(struct sockopt
*sopt
)
1346 int rulenum
= 0, error
;
1347 if (sopt
->sopt_valsize
!= 0) {
1348 error
= soopt_to_kbuf(sopt
, &rulenum
, sizeof(int), sizeof(int));
1353 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1354 struct ip_fw
*rule
= ctx
->ipfw_rule_chain
;
1356 while (rule
!=NULL
) {
1357 if (rule
->rulenum
== rulenum
) {
1366 struct netmsg_del dmsg
;
1367 struct netmsg_base
*nmsg
;
1369 * delete the state which stub is the rule
1370 * which belongs to the CPU and the rulenum
1372 bzero(&dmsg
, sizeof(dmsg
));
1374 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
1375 0, ipfw_delete_state_dispatch
);
1376 dmsg
.rulenum
= rulenum
;
1377 netisr_domsg(nmsg
, 0);
1382 ipfw_ctl_flush_state(struct sockopt
*sopt
)
1384 struct netmsg_del dmsg
;
1385 struct netmsg_base
*nmsg
;
1387 * delete the state which stub is the rule
1388 * which belongs to the CPU and the rulenum
1390 bzero(&dmsg
, sizeof(dmsg
));
1392 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
1393 0, ipfw_delete_state_dispatch
);
1395 netisr_domsg(nmsg
, 0);
1400 * Get the ioc_rule from the sopt
1401 * call ipfw_add_rule to add the rule
1404 ipfw_ctl_add_rule(struct sockopt
*sopt
)
1406 struct ipfw_ioc_rule
*ioc_rule
;
1409 size
= sopt
->sopt_valsize
;
1410 if (size
> (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX
) ||
1411 size
< sizeof(*ioc_rule
)) {
1414 if (size
!= (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX
)) {
1415 sopt
->sopt_val
= krealloc(sopt
->sopt_val
, sizeof(uint32_t) *
1416 IPFW_RULE_SIZE_MAX
, M_TEMP
, M_WAITOK
);
1418 ioc_rule
= sopt
->sopt_val
;
1420 ipfw_add_rule(ioc_rule
);
1425 ipfw_copy_state(struct ip_fw_state
*state
, struct ipfw_ioc_state
*ioc_state
, int cpuid
)
1427 ioc_state
->pcnt
= state
->pcnt
;
1428 ioc_state
->bcnt
= state
->bcnt
;
1429 ioc_state
->lifetime
= state
->lifetime
;
1430 ioc_state
->timestamp
= state
->timestamp
;
1431 ioc_state
->cpuid
= cpuid
;
1432 ioc_state
->expiry
= state
->expiry
;
1433 ioc_state
->rulenum
= state
->stub
->rulenum
;
1435 bcopy(&state
->flow_id
, &ioc_state
->flow_id
, sizeof(struct ipfw_flow_id
));
1436 return ioc_state
+ 1;
1440 ipfw_copy_rule(const struct ip_fw
*rule
, struct ipfw_ioc_rule
*ioc_rule
)
1442 const struct ip_fw
*sibling
;
1447 ioc_rule
->act_ofs
= rule
->act_ofs
;
1448 ioc_rule
->cmd_len
= rule
->cmd_len
;
1449 ioc_rule
->rulenum
= rule
->rulenum
;
1450 ioc_rule
->set
= rule
->set
;
1452 ioc_rule
->set_disable
= ipfw_ctx
[mycpuid
]->ipfw_set_disable
;
1453 ioc_rule
->static_count
= static_count
;
1454 ioc_rule
->static_len
= static_ioc_len
;
1458 ioc_rule
->timestamp
= 0;
1465 ioc_rule
->timestamp
= 0;
1466 for (sibling
= rule
; sibling
!= NULL
; sibling
= sibling
->sibling
) {
1467 ioc_rule
->pcnt
+= sibling
->pcnt
;
1468 ioc_rule
->bcnt
+= sibling
->bcnt
;
1469 if (sibling
->timestamp
> ioc_rule
->timestamp
)
1470 ioc_rule
->timestamp
= sibling
->timestamp
;
1476 KASSERT(i
== ncpus
, ("static rule is not duplicated on every cpu"));
1478 bcopy(rule
->cmd
, ioc_rule
->cmd
, ioc_rule
->cmd_len
* 4 /* XXX */);
1480 return ((uint8_t *)ioc_rule
+ IOC_RULESIZE(ioc_rule
));
1484 ipfw_ctl_get_modules(struct sockopt
*sopt
)
1487 struct ipfw_module
*mod
;
1488 char module_str
[1024];
1489 memset(module_str
,0,1024);
1490 for (i
= 0, mod
= ipfw_modules
; i
< MAX_MODULE
; i
++, mod
++) {
1491 if (mod
->type
!= 0) {
1493 strcat(module_str
,",");
1494 strcat(module_str
,mod
->name
);
1497 bzero(sopt
->sopt_val
, sopt
->sopt_valsize
);
1498 bcopy(module_str
, sopt
->sopt_val
, strlen(module_str
));
1499 sopt
->sopt_valsize
= strlen(module_str
);
1504 * Copy all static rules and states on all CPU
1507 ipfw_ctl_get_rules(struct sockopt
*sopt
)
1509 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1510 struct ipfw_state_context
*state_ctx
;
1512 struct ip_fw_state
*state
;
1515 int i
, j
, state_count
= 0;
1517 size
= static_ioc_len
;
1518 for (i
= 0; i
< ncpus
; i
++) {
1519 for (j
= 0; j
< ctx
->state_hash_size
; j
++) {
1520 state_ctx
= &ipfw_ctx
[i
]->state_ctx
[j
];
1521 state_count
+= state_ctx
->count
;
1524 if (state_count
> 0) {
1525 size
+= state_count
* sizeof(struct ipfw_ioc_state
);
1528 if (sopt
->sopt_valsize
< size
) {
1529 /* XXX TODO sopt_val is not big enough */
1530 bzero(sopt
->sopt_val
, sopt
->sopt_valsize
);
1534 sopt
->sopt_valsize
= size
;
1535 bp
= sopt
->sopt_val
;
1537 for (rule
= ctx
->ipfw_rule_chain
; rule
; rule
= rule
->next
) {
1538 bp
= ipfw_copy_rule(rule
, bp
);
1540 if (state_count
> 0 ) {
1541 for (i
= 0; i
< ncpus
; i
++) {
1542 for (j
= 0; j
< ctx
->state_hash_size
; j
++) {
1543 state_ctx
= &ipfw_ctx
[i
]->state_ctx
[j
];
1544 state
= state_ctx
->state
;
1545 while (state
!= NULL
) {
1546 bp
= ipfw_copy_state(state
, bp
, i
);
1547 state
= state
->next
;
1556 ipfw_set_disable_dispatch(netmsg_t nmsg
)
1558 struct lwkt_msg
*lmsg
= &nmsg
->lmsg
;
1559 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1561 ctx
->ipfw_set_disable
= lmsg
->u
.ms_result32
;
1563 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
1567 ipfw_ctl_set_disable(uint32_t disable
, uint32_t enable
)
1569 struct netmsg_base nmsg
;
1570 struct lwkt_msg
*lmsg
;
1571 uint32_t set_disable
;
1573 /* IPFW_DEFAULT_SET is always enabled */
1574 enable
|= (1 << IPFW_DEFAULT_SET
);
1575 set_disable
= (ipfw_ctx
[mycpuid
]->ipfw_set_disable
| disable
) & ~enable
;
1577 bzero(&nmsg
, sizeof(nmsg
));
1578 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
1579 0, ipfw_set_disable_dispatch
);
1581 lmsg
->u
.ms_result32
= set_disable
;
1583 netisr_domsg(&nmsg
, 0);
1588 * ipfw_ctl_x - extended version of ipfw_ctl
1589 * remove the x_header, and adjust the sopt_name,sopt_val and sopt_valsize.
1592 ipfw_ctl_x(struct sockopt
*sopt
)
1594 ip_fw_x_header
*x_header
;
1595 x_header
= (ip_fw_x_header
*)(sopt
->sopt_val
);
1596 sopt
->sopt_name
= x_header
->opcode
;
1597 sopt
->sopt_valsize
-= sizeof(ip_fw_x_header
);
1598 bcopy(++x_header
, sopt
->sopt_val
, sopt
->sopt_valsize
);
1599 return ipfw_ctl(sopt
);
1604 * {set|get}sockopt parser.
1607 ipfw_ctl(struct sockopt
*sopt
)
1614 switch (sopt
->sopt_name
) {
1619 error
= ipfw_ctl_get_rules(sopt
);
1622 error
= ipfw_ctl_get_modules(sopt
);
1626 ipfw_ctl_flush_rule(0);
1630 error
= ipfw_ctl_add_rule(sopt
);
1635 * IP_FW_DEL is used for deleting single rules or sets,
1636 * and (ab)used to atomically manipulate sets.
1637 * Argument size is used to distinguish between the two:
1639 * delete single rule or set of rules,
1640 * or reassign rules (or sets) to a different set.
1641 * 2 * sizeof(uint32_t)
1642 * atomic disable/enable sets.
1643 * first uint32_t contains sets to be disabled,
1644 * second uint32_t contains sets to be enabled.
1646 masks
= sopt
->sopt_val
;
1647 size
= sopt
->sopt_valsize
;
1648 if (size
== sizeof(*masks
)) {
1650 * Delete or reassign static rule
1652 error
= ipfw_ctl_alter(masks
[0]);
1653 } else if (size
== (2 * sizeof(*masks
))) {
1655 * Set enable/disable
1657 ipfw_ctl_set_disable(masks
[0], masks
[1]);
1663 case IP_FW_RESETLOG
: /* argument is an int, the rule number */
1665 if (sopt
->sopt_valsize
!= 0) {
1666 error
= soopt_to_kbuf(sopt
, &rulenum
,
1667 sizeof(int), sizeof(int));
1672 error
= ipfw_ctl_zero_entry(rulenum
,
1673 sopt
->sopt_name
== IP_FW_RESETLOG
);
1677 case IP_FW_NAT_FLUSH
:
1679 case IP_FW_NAT_GET_RECORD
:
1680 if (ipfw_ctl_nat_ptr
!= NULL
) {
1681 error
= ipfw_ctl_nat_ptr(sopt
);
1684 case IP_DUMMYNET_GET
:
1685 case IP_DUMMYNET_CONFIGURE
:
1686 case IP_DUMMYNET_DEL
:
1687 case IP_DUMMYNET_FLUSH
:
1688 error
= ip_dn_sockopt(sopt
);
1690 case IP_FW_STATE_ADD
:
1691 error
= ipfw_ctl_add_state(sopt
);
1693 case IP_FW_STATE_DEL
:
1694 error
= ipfw_ctl_delete_state(sopt
);
1696 case IP_FW_STATE_FLUSH
:
1697 error
= ipfw_ctl_flush_state(sopt
);
1699 case IP_FW_TABLE_CREATE
:
1700 case IP_FW_TABLE_DELETE
:
1701 case IP_FW_TABLE_APPEND
:
1702 case IP_FW_TABLE_REMOVE
:
1703 case IP_FW_TABLE_LIST
:
1704 case IP_FW_TABLE_FLUSH
:
1705 case IP_FW_TABLE_SHOW
:
1706 case IP_FW_TABLE_TEST
:
1707 case IP_FW_TABLE_RENAME
:
1708 error
= ipfw_ctl_table_sockopt(sopt
);
1710 case IP_FW_SYNC_SHOW_CONF
:
1711 case IP_FW_SYNC_SHOW_STATUS
:
1712 case IP_FW_SYNC_EDGE_CONF
:
1713 case IP_FW_SYNC_EDGE_START
:
1714 case IP_FW_SYNC_EDGE_STOP
:
1715 case IP_FW_SYNC_EDGE_TEST
:
1716 case IP_FW_SYNC_EDGE_CLEAR
:
1717 case IP_FW_SYNC_CENTRE_CONF
:
1718 case IP_FW_SYNC_CENTRE_START
:
1719 case IP_FW_SYNC_CENTRE_STOP
:
1720 case IP_FW_SYNC_CENTRE_TEST
:
1721 case IP_FW_SYNC_CENTRE_CLEAR
:
1722 error
= ipfw_ctl_sync_sockopt(sopt
);
1725 kprintf("ipfw_ctl invalid option %d\n",
1733 ipfw_check_in(void *arg
, struct mbuf
**m0
, struct ifnet
*ifp
, int dir
)
1735 struct ip_fw_args args
;
1736 struct mbuf
*m
= *m0
;
1738 int tee
= 0, error
= 0, ret
;
1740 if (m
->m_pkthdr
.fw_flags
& DUMMYNET_MBUF_TAGGED
) {
1741 /* Extract info from dummynet tag */
1742 mtag
= m_tag_find(m
, PACKET_TAG_DUMMYNET
, NULL
);
1743 KKASSERT(mtag
!= NULL
);
1744 args
.rule
= ((struct dn_pkt
*)m_tag_data(mtag
))->dn_priv
;
1745 KKASSERT(args
.rule
!= NULL
);
1747 m_tag_delete(m
, mtag
);
1748 m
->m_pkthdr
.fw_flags
&= ~DUMMYNET_MBUF_TAGGED
;
1756 ret
= ipfw_chk(&args
);
1773 case IP_FW_DUMMYNET
:
1774 /* Send packet to the appropriate pipe */
1775 ipfw_dummynet_io(m
, args
.cookie
, DN_TO_IP_IN
, &args
);
1784 * Must clear bridge tag when changing
1786 m
->m_pkthdr
.fw_flags
&= ~BRIDGE_MBUF_TAGGED
;
1787 if (ip_divert_p
!= NULL
) {
1788 m
= ip_divert_p(m
, tee
, 1);
1792 /* not sure this is the right error msg */
1802 panic("unknown ipfw return value: %d", ret
);
1810 ipfw_check_out(void *arg
, struct mbuf
**m0
, struct ifnet
*ifp
, int dir
)
1812 struct ip_fw_args args
;
1813 struct mbuf
*m
= *m0
;
1815 int tee
= 0, error
= 0, ret
;
1817 if (m
->m_pkthdr
.fw_flags
& DUMMYNET_MBUF_TAGGED
) {
1818 /* Extract info from dummynet tag */
1819 mtag
= m_tag_find(m
, PACKET_TAG_DUMMYNET
, NULL
);
1820 KKASSERT(mtag
!= NULL
);
1821 args
.rule
= ((struct dn_pkt
*)m_tag_data(mtag
))->dn_priv
;
1822 KKASSERT(args
.rule
!= NULL
);
1824 m_tag_delete(m
, mtag
);
1825 m
->m_pkthdr
.fw_flags
&= ~DUMMYNET_MBUF_TAGGED
;
1833 ret
= ipfw_chk(&args
);
1851 case IP_FW_DUMMYNET
:
1852 ipfw_dummynet_io(m
, args
.cookie
, DN_TO_IP_OUT
, &args
);
1860 if (ip_divert_p
!= NULL
) {
1861 m
= ip_divert_p(m
, tee
, 0);
1865 /* not sure this is the right error msg */
1875 panic("unknown ipfw return value: %d", ret
);
1885 struct pfil_head
*pfh
;
1886 IPFW_ASSERT_CFGPORT(&curthread
->td_msgport
);
1888 pfh
= pfil_head_get(PFIL_TYPE_AF
, AF_INET
);
1892 pfil_add_hook(ipfw_check_in
, NULL
, PFIL_IN
, pfh
);
1893 pfil_add_hook(ipfw_check_out
, NULL
, PFIL_OUT
, pfh
);
1899 struct pfil_head
*pfh
;
1901 IPFW_ASSERT_CFGPORT(&curthread
->td_msgport
);
1903 pfh
= pfil_head_get(PFIL_TYPE_AF
, AF_INET
);
1907 pfil_remove_hook(ipfw_check_in
, NULL
, PFIL_IN
, pfh
);
1908 pfil_remove_hook(ipfw_check_out
, NULL
, PFIL_OUT
, pfh
);
1912 ipfw_sysctl_enable_dispatch(netmsg_t nmsg
)
1914 struct lwkt_msg
*lmsg
= &nmsg
->lmsg
;
1915 int enable
= lmsg
->u
.ms_result
;
1917 if (fw3_enable
== enable
)
1920 fw3_enable
= enable
;
1927 lwkt_replymsg(lmsg
, 0);
1931 ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS
)
1933 struct netmsg_base nmsg
;
1934 struct lwkt_msg
*lmsg
;
1937 enable
= fw3_enable
;
1938 error
= sysctl_handle_int(oidp
, &enable
, 0, req
);
1939 if (error
|| req
->newptr
== NULL
)
1942 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
1943 0, ipfw_sysctl_enable_dispatch
);
1945 lmsg
->u
.ms_result
= enable
;
1947 return lwkt_domsg(IPFW_CFGPORT
, lmsg
, 0);
1951 ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS
)
1953 return sysctl_int_range(oidp
, arg1
, arg2
, req
,
1954 IPFW_AUTOINC_STEP_MIN
, IPFW_AUTOINC_STEP_MAX
);
1959 ipfw_ctx_init_dispatch(netmsg_t nmsg
)
1961 struct netmsg_ipfw
*fwmsg
= (struct netmsg_ipfw
*)nmsg
;
1962 struct ipfw_context
*ctx
;
1963 struct ip_fw
*def_rule
;
1965 ctx
= kmalloc(sizeof(struct ipfw_context
), M_IPFW3
, M_WAITOK
| M_ZERO
);
1966 ipfw_ctx
[mycpuid
] = ctx
;
1968 def_rule
= kmalloc(sizeof(struct ip_fw
), M_IPFW3
, M_WAITOK
| M_ZERO
);
1969 def_rule
->act_ofs
= 0;
1970 def_rule
->rulenum
= IPFW_DEFAULT_RULE
;
1971 def_rule
->cmd_len
= 2;
1972 def_rule
->set
= IPFW_DEFAULT_SET
;
1974 def_rule
->cmd
[0].len
= LEN_OF_IPFWINSN
;
1975 def_rule
->cmd
[0].module
= MODULE_BASIC_ID
;
1976 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
1977 def_rule
->cmd
[0].opcode
= O_BASIC_ACCEPT
;
1979 if (filters_default_to_accept
)
1980 def_rule
->cmd
[0].opcode
= O_BASIC_ACCEPT
;
1982 def_rule
->cmd
[0].opcode
= O_BASIC_DENY
;
1985 /* Install the default rule */
1986 ctx
->ipfw_default_rule
= def_rule
;
1987 ctx
->ipfw_rule_chain
= def_rule
;
1990 * if sibiling in last CPU is exists,
1991 * then it's sibling should be current rule
1993 if (fwmsg
->sibling
!= NULL
) {
1994 fwmsg
->sibling
->sibling
= def_rule
;
1996 /* prepare for next CPU */
1997 fwmsg
->sibling
= def_rule
;
1999 /* Statistics only need to be updated once */
2001 ipfw_inc_static_count(def_rule
);
2003 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
2007 ipfw_init_dispatch(netmsg_t nmsg
)
2009 struct netmsg_ipfw fwmsg
;
2012 kprintf("ipfw3 already loaded\n");
2017 bzero(&fwmsg
, sizeof(fwmsg
));
2018 netmsg_init(&fwmsg
.base
, NULL
, &curthread
->td_msgport
,
2019 0, ipfw_ctx_init_dispatch
);
2020 netisr_domsg(&fwmsg
.base
, 0);
2022 ip_fw_chk_ptr
= ipfw_chk
;
2023 ip_fw_ctl_x_ptr
= ipfw_ctl_x
;
2024 ip_fw_dn_io_ptr
= ipfw_dummynet_io
;
2026 kprintf("ipfw3 initialized, default to %s\n",
2027 filters_default_to_accept
? "accept" : "deny");
2033 lwkt_replymsg(&nmsg
->lmsg
, error
);
2039 struct netmsg_base smsg
;
2042 ipfw3_log_modevent(MOD_LOAD
);
2043 ipfw3_sync_modevent(MOD_LOAD
);
2046 netmsg_init(&smsg
, NULL
, &curthread
->td_msgport
,
2047 0, ipfw_init_dispatch
);
2048 error
= lwkt_domsg(IPFW_CFGPORT
, &smsg
.lmsg
, 0);
2049 netmsg_init(&smsg
, NULL
, &curthread
->td_msgport
,
2050 0, table_init_dispatch
);
2051 error
= lwkt_domsg(IPFW_CFGPORT
, &smsg
.lmsg
, 0);
2058 ipfw_fini_dispatch(netmsg_t nmsg
)
2065 netmsg_service_sync();
2066 ip_fw_chk_ptr
= NULL
;
2067 ip_fw_ctl_x_ptr
= NULL
;
2068 ip_fw_dn_io_ptr
= NULL
;
2069 ipfw_ctl_flush_rule(1 /* kill default rule */);
2071 /* Free pre-cpu context */
2072 for (cpu
= 0; cpu
< ncpus
; ++cpu
) {
2073 if (ipfw_ctx
[cpu
] != NULL
) {
2074 kfree(ipfw_ctx
[cpu
], M_IPFW3
);
2075 ipfw_ctx
[cpu
] = NULL
;
2078 kprintf("ipfw3 unloaded\n");
2080 lwkt_replymsg(&nmsg
->lmsg
, error
);
2086 struct netmsg_base smsg
;
2088 ipfw3_log_modevent(MOD_UNLOAD
);
2089 ipfw3_sync_modevent(MOD_UNLOAD
);
2091 netmsg_init(&smsg
, NULL
, &curthread
->td_msgport
,
2092 0, ipfw_fini_dispatch
);
2093 return lwkt_domsg(IPFW_CFGPORT
, &smsg
.lmsg
, 0);
2096 #endif /* KLD_MODULE */
2099 ipfw3_modevent(module_t mod
, int type
, void *unused
)
2111 kprintf("ipfw statically compiled, cannot unload\n");
2123 static moduledata_t ipfw3mod
= {
2128 DECLARE_MODULE(ipfw3
, ipfw3mod
, SI_SUB_PROTO_END
, SI_ORDER_ANY
);
2129 MODULE_VERSION(ipfw3
, 1);