2 * Copyright (c) 1993 Daniel Boulet
3 * Copyright (c) 1994 Ugen J.S.Antsilevich
4 * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
5 * Copyright (c) 2015 - 2016 The DragonFly Project. All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
8 * by Bill Yuan <bycn82@dragonflybsd.org>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 #error IPFIREWALL3 requires INET.
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
49 #include <sys/kernel.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 #include <sys/ucred.h>
56 #include <sys/in_cksum.h>
58 #include <sys/thread2.h>
59 #include <sys/mplock2.h>
61 #include <netinet/in.h>
62 #include <netinet/in_systm.h>
63 #include <netinet/in_var.h>
64 #include <netinet/in_pcb.h>
65 #include <netinet/ip.h>
66 #include <netinet/ip_var.h>
67 #include <netinet/ip_icmp.h>
68 #include <netinet/tcp.h>
69 #include <netinet/tcp_timer.h>
70 #include <netinet/tcp_var.h>
71 #include <netinet/tcpip.h>
72 #include <netinet/udp.h>
73 #include <netinet/udp_var.h>
74 #include <netinet/ip_divert.h>
75 #include <netinet/if_ether.h>
78 #include <net/radix.h>
79 #include <net/route.h>
81 #include <net/netmsg2.h>
83 #include <net/ipfw3/ip_fw.h>
84 #include <net/ipfw3/ip_fw3_log.h>
85 #include <net/ipfw3/ip_fw3_table.h>
86 #include <net/ipfw3/ip_fw3_sync.h>
87 #include <net/ipfw3_basic/ip_fw3_basic.h>
88 #include <net/ipfw3_nat/ip_fw3_nat.h>
89 #include <net/dummynet3/ip_dummynet3.h>
91 MALLOC_DEFINE(M_IPFW3
, "IPFW3", "ip_fw3 default module");
93 #ifdef IPFIREWALL_DEBUG
94 #define DPRINTF(fmt, ...) \
97 kprintf(fmt, __VA_ARGS__); \
100 #define DPRINTF(fmt, ...) ((void)0)
103 #define MAX_MODULE 10
104 #define MAX_OPCODE_PER_MODULE 100
106 #define IPFW_AUTOINC_STEP_MIN 1
107 #define IPFW_AUTOINC_STEP_MAX 1000
108 #define IPFW_AUTOINC_STEP_DEF 100
112 struct netmsg_base base
;
113 const struct ipfw_ioc_rule
*ioc_rule
;
115 struct ip_fw
*next_rule
;
116 struct ip_fw
*prev_rule
;
117 struct ip_fw
*sibling
; /* sibling in prevous CPU */
121 struct netmsg_base base
;
123 struct ip_fw
*start_rule
;
124 struct ip_fw
*prev_rule
;
125 struct ipfw_ioc_state
*ioc_state
;
132 struct netmsg_base base
;
133 struct ip_fw
*start_rule
;
138 ip_fw_ctl_t
*ipfw_ctl_nat_ptr
= NULL
;
140 /* handlers which implemented in ipfw_basic module */
141 ipfw_basic_delete_state_t
*ipfw_basic_flush_state_prt
= NULL
;
142 ipfw_basic_append_state_t
*ipfw_basic_append_state_prt
= NULL
;
144 extern int ip_fw_loaded
;
145 static uint32_t static_count
; /* # of static rules */
146 static uint32_t static_ioc_len
; /* bytes of static rules */
147 static int ipfw_flushing
;
150 static int autoinc_step
= IPFW_AUTOINC_STEP_DEF
;
152 static int ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS
);
153 static int ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS
);
155 SYSCTL_NODE(_net_inet_ip
, OID_AUTO
, fw3
, CTLFLAG_RW
, 0, "Firewall");
156 SYSCTL_PROC(_net_inet_ip_fw3
, OID_AUTO
, enable
, CTLTYPE_INT
| CTLFLAG_RW
,
157 &fw3_enable
, 0, ipfw_sysctl_enable
, "I", "Enable ipfw");
158 SYSCTL_PROC(_net_inet_ip_fw3
, OID_AUTO
, autoinc_step
, CTLTYPE_INT
| CTLFLAG_RW
,
159 &autoinc_step
, 0, ipfw_sysctl_autoinc_step
, "I",
160 "Rule number autincrement step");
161 SYSCTL_INT(_net_inet_ip_fw3
, OID_AUTO
,one_pass
,CTLFLAG_RW
,
163 "Only do a single pass through ipfw when using dummynet(4)");
164 SYSCTL_INT(_net_inet_ip_fw3
, OID_AUTO
, debug
, CTLFLAG_RW
,
165 &fw_debug
, 0, "Enable printing of debug ip_fw statements");
166 SYSCTL_INT(_net_inet_ip_fw3
, OID_AUTO
, verbose
, CTLFLAG_RW
,
167 &fw_verbose
, 0, "Log matches to ipfw rules");
168 SYSCTL_INT(_net_inet_ip_fw3
, OID_AUTO
, static_count
, CTLFLAG_RD
,
169 &static_count
, 0, "Number of static rules");
171 filter_func filter_funcs
[MAX_MODULE
][MAX_OPCODE_PER_MODULE
];
172 struct ipfw_module ipfw_modules
[MAX_MODULE
];
173 struct ipfw_context
*ipfw_ctx
[MAXCPU
];
174 struct ipfw_sync_context sync_ctx
;
175 static int ipfw_ctl(struct sockopt
*sopt
);
179 check_accept(int *cmd_ctl
, int *cmd_val
, struct ip_fw_args
**args
,
180 struct ip_fw
**f
, ipfw_insn
*cmd
, uint16_t ip_len
);
182 check_deny(int *cmd_ctl
, int *cmd_val
, struct ip_fw_args
**args
,
183 struct ip_fw
**f
, ipfw_insn
*cmd
, uint16_t ip_len
);
184 void init_module(void);
188 register_ipfw_module(int module_id
,char *module_name
)
190 struct ipfw_module
*tmp
;
194 for (i
=0; i
< MAX_MODULE
; i
++) {
195 if (tmp
->type
== 0) {
198 strncpy(tmp
->name
, module_name
, strlen(module_name
));
203 kprintf("ipfw3 module %s loaded\n", module_name
);
207 unregister_ipfw_module(int module_id
)
209 struct ipfw_module
*tmp
;
212 int i
, len
, cmdlen
, found
;
216 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
217 fw
= ctx
->ipfw_rule_chain
;
218 for (; fw
; fw
= fw
->next
) {
219 for (len
= fw
->cmd_len
, cmd
= fw
->cmd
; len
> 0;
221 cmd
= (ipfw_insn
*)((uint32_t *)cmd
+ cmdlen
)) {
223 if (cmd
->module
== 0 &&
224 (cmd
->opcode
== 0 || cmd
->opcode
== 1)) {
225 //action accept or deny
226 } else if (cmd
->module
== module_id
) {
236 for (i
= 0; i
< MAX_MODULE
; i
++) {
237 if (tmp
->type
== 1 && tmp
->id
== module_id
) {
239 kprintf("ipfw3 module %s unloaded\n",
246 for (i
= 0; i
< MAX_OPCODE_PER_MODULE
; i
++) {
247 if (module_id
== 0) {
248 if (i
==0 || i
== 1) {
252 filter_funcs
[module_id
][i
] = NULL
;
259 register_ipfw_filter_funcs(int module
, int opcode
, filter_func func
)
261 filter_funcs
[module
][opcode
] = func
;
265 check_accept(int *cmd_ctl
, int *cmd_val
, struct ip_fw_args
**args
,
266 struct ip_fw
**f
, ipfw_insn
*cmd
, uint16_t ip_len
)
268 *cmd_val
= IP_FW_PASS
;
269 *cmd_ctl
= IP_FW_CTL_DONE
;
271 ipfw_log((*args
)->m
, (*args
)->eh
, cmd
->arg1
);
276 check_deny(int *cmd_ctl
, int *cmd_val
, struct ip_fw_args
**args
,
277 struct ip_fw
**f
, ipfw_insn
*cmd
, uint16_t ip_len
)
279 *cmd_val
= IP_FW_DENY
;
280 *cmd_ctl
= IP_FW_CTL_DONE
;
282 ipfw_log((*args
)->m
, (*args
)->eh
, cmd
->arg1
);
289 memset(ipfw_modules
, 0, sizeof(struct ipfw_module
) * MAX_MODULE
);
290 memset(filter_funcs
, 0, sizeof(filter_func
) *
291 MAX_OPCODE_PER_MODULE
* MAX_MODULE
);
292 register_ipfw_filter_funcs(0, O_BASIC_ACCEPT
,
293 (filter_func
)check_accept
);
294 register_ipfw_filter_funcs(0, O_BASIC_DENY
, (filter_func
)check_deny
);
298 ipfw_free_rule(struct ip_fw
*rule
)
300 kfree(rule
, M_IPFW3
);
305 static struct ip_fw
*
306 lookup_next_rule(struct ip_fw
*me
)
308 struct ip_fw
*rule
= NULL
;
311 /* look for action, in case it is a skipto */
312 cmd
= ACTION_PTR(me
);
313 if ((int)cmd
->module
== MODULE_BASIC_ID
&&
314 (int)cmd
->opcode
== O_BASIC_SKIPTO
) {
315 for (rule
= me
->next
; rule
; rule
= rule
->next
) {
316 if (rule
->rulenum
>= cmd
->arg1
)
320 if (rule
== NULL
) { /* failure or not a skipto */
323 me
->next_rule
= rule
;
328 * rules are stored in ctx->ipfw_rule_chain.
329 * and each rule is combination of multiple cmds.(ipfw_insn)
330 * in each rule, it begin with filter cmds. and end with action cmds.
331 * 'outer/inner loop' are looping the rules/cmds.
332 * it will invoke the cmds relatived function according to the cmd's
333 * module id and opcode id. and process according to return value.
336 ipfw_chk(struct ip_fw_args
*args
)
338 struct mbuf
*m
= args
->m
;
339 struct ip
*ip
= mtod(m
, struct ip
*);
340 struct ip_fw
*f
= NULL
; /* matching rule */
341 int cmd_val
= IP_FW_PASS
;
343 struct divert_info
*divinfo
;
346 * hlen The length of the IPv4 header.
347 * hlen >0 means we have an IPv4 packet.
349 u_int hlen
= 0; /* hlen >0 means we have an IP pkt */
352 * offset The offset of a fragment. offset != 0 means that
353 * we have a fragment at this offset of an IPv4 packet.
354 * offset == 0 means that (if this is an IPv4 packet)
355 * this is the first or only fragment.
360 uint16_t src_port
= 0, dst_port
= 0; /* NOTE: host format */
361 struct in_addr src_ip
, dst_ip
; /* NOTE: network format */
363 uint8_t prev_module
= -1, prev_opcode
= -1; /* previous module & opcode */
364 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
366 if (m
->m_pkthdr
.fw_flags
& IPFW_MBUF_GENERATED
)
367 return IP_FW_PASS
; /* accept */
369 if (args
->eh
== NULL
|| /* layer 3 packet */
370 (m
->m_pkthdr
.len
>= sizeof(struct ip
) &&
371 ntohs(args
->eh
->ether_type
) == ETHERTYPE_IP
))
372 hlen
= ip
->ip_hl
<< 2;
375 * Collect parameters into local variables for faster matching.
377 if (hlen
== 0) { /* do not grab addresses for non-ip pkts */
378 proto
= args
->f_id
.proto
= 0; /* mark f_id invalid */
379 goto after_ip_checks
;
382 proto
= args
->f_id
.proto
= ip
->ip_p
;
385 if (args
->eh
!= NULL
) { /* layer 2 packets are as on the wire */
386 offset
= ntohs(ip
->ip_off
) & IP_OFFMASK
;
387 ip_len
= ntohs(ip
->ip_len
);
389 offset
= ip
->ip_off
& IP_OFFMASK
;
393 #define PULLUP_TO(len) \
395 if (m->m_len < (len)) { \
396 args->m = m = m_pullup(m, (len)); \
398 goto pullup_failed; \
399 ip = mtod(m, struct ip *); \
409 PULLUP_TO(hlen
+ sizeof(struct tcphdr
));
410 tcp
= L3HDR(struct tcphdr
, ip
);
411 dst_port
= tcp
->th_dport
;
412 src_port
= tcp
->th_sport
;
413 args
->f_id
.flags
= tcp
->th_flags
;
421 PULLUP_TO(hlen
+ sizeof(struct udphdr
));
422 udp
= L3HDR(struct udphdr
, ip
);
423 dst_port
= udp
->uh_dport
;
424 src_port
= udp
->uh_sport
;
431 L3HDR(struct icmp
, ip
)->icmp_type
;
441 args
->f_id
.src_ip
= ntohl(src_ip
.s_addr
);
442 args
->f_id
.dst_ip
= ntohl(dst_ip
.s_addr
);
443 args
->f_id
.src_port
= src_port
= ntohs(src_port
);
444 args
->f_id
.dst_port
= dst_port
= ntohs(dst_port
);
449 * Packet has already been tagged. Look for the next rule
450 * to restart processing.
452 * If fw3_one_pass != 0 then just accept it.
453 * XXX should not happen here, but optimized out in
459 /* This rule is being/has been flushed */
463 f
= args
->rule
->next_rule
;
465 f
= lookup_next_rule(args
->rule
);
468 * Find the starting rule. It can be either the first
469 * one, or the one after divert_rule if asked so.
473 mtag
= m_tag_find(m
, PACKET_TAG_IPFW_DIVERT
, NULL
);
475 divinfo
= m_tag_data(mtag
);
476 skipto
= divinfo
->skipto
;
481 f
= ctx
->ipfw_rule_chain
;
482 if (args
->eh
== NULL
&& skipto
!= 0) {
483 /* No skipto during rule flushing */
487 if (skipto
>= IPFW_DEFAULT_RULE
) {
488 return IP_FW_DENY
; /* invalid */
490 while (f
&& f
->rulenum
<= skipto
) {
493 if (f
== NULL
) { /* drop packet */
496 } else if (ipfw_flushing
) {
497 /* Rules are being flushed; skip to default rule */
498 f
= ctx
->ipfw_default_rule
;
501 if ((mtag
= m_tag_find(m
, PACKET_TAG_IPFW_DIVERT
, NULL
)) != NULL
) {
502 m_tag_delete(m
, mtag
);
506 * Now scan the rules, and parse microinstructions for each rule.
508 int prev_val
; /* previous result of 'or' filter */
512 /* foreach rule in chain */
513 for (; f
; f
= f
->next
) {
514 again
: /* check the rule again*/
515 if (ctx
->ipfw_set_disable
& (1 << f
->set
)) {
520 /* foreach cmd in rule */
521 for (l
= f
->cmd_len
, cmd
= f
->cmd
; l
> 0; l
-= cmdlen
,
522 cmd
= (ipfw_insn
*)((uint32_t *)cmd
+ cmdlen
)) {
525 /* skip 'or' filter when already match */
526 if (cmd
->len
& F_OR
&&
527 cmd
->module
== prev_module
&&
528 cmd
->opcode
== prev_opcode
&&
533 check_body
: /* check the body of the rule again.*/
534 (filter_funcs
[cmd
->module
][cmd
->opcode
])
535 (&cmd_ctl
, &cmd_val
, &args
, &f
, cmd
, ip_len
);
538 if (prev_val
== 0) /* but 'or' failed */
541 case IP_FW_CTL_AGAIN
:
548 case IP_FW_CTL_CHK_STATE
:
549 /* update the cmd and l */
551 l
= f
->cmd_len
- f
->act_ofs
;
554 if (cmd
->len
& F_NOT
)
557 if (cmd
->len
& F_OR
) { /* has 'or' */
558 if (!cmd_val
) { /* not matched */
559 if(prev_val
== -1){ /* first 'or' */
561 prev_module
= cmd
->module
;
562 prev_opcode
= cmd
->opcode
;
563 } else if (prev_module
== cmd
->module
&&
564 prev_opcode
== cmd
->opcode
) {
565 /* continuous 'or' filter */
566 } else if (prev_module
!= cmd
->module
||
567 prev_opcode
!= cmd
->opcode
) {
568 /* 'or' filter changed */
573 prev_module
= cmd
->module
;
574 prev_opcode
= cmd
->opcode
;
577 } else { /* has 'or' and matched */
579 prev_module
= cmd
->module
;
580 prev_opcode
= cmd
->opcode
;
583 if (!cmd_val
) { /* not matched */
587 /* previous 'or' not matched */
595 } /* end of inner for, scan opcodes */
596 next_rule
:; /* try next rule */
597 } /* end of outer for, scan rules */
598 kprintf("+++ ipfw: ouch!, skip past end of rules, denying packet\n");
602 /* Update statistics */
605 f
->timestamp
= time_second
;
610 kprintf("pullup failed\n");
615 ipfw_dummynet_io(struct mbuf
*m
, int pipe_nr
, int dir
, struct ip_fw_args
*fwa
)
620 const struct ipfw_flow_id
*id
;
621 struct dn_flow_id
*fid
;
625 mtag
= m_tag_get(PACKET_TAG_DUMMYNET
, sizeof(*pkt
),
626 M_INTWAIT
| M_NULLOK
);
631 m_tag_prepend(m
, mtag
);
633 pkt
= m_tag_data(mtag
);
634 bzero(pkt
, sizeof(*pkt
));
636 cmd
= (ipfw_insn
*)((uint32_t *)fwa
->rule
->cmd
+ fwa
->rule
->act_ofs
);
637 KASSERT(cmd
->opcode
== O_DUMMYNET_PIPE
||
638 cmd
->opcode
== O_DUMMYNET_QUEUE
,
639 ("Rule is not PIPE or QUEUE, opcode %d", cmd
->opcode
));
642 pkt
->dn_flags
= (dir
& DN_FLAGS_DIR_MASK
);
644 pkt
->pipe_nr
= pipe_nr
;
646 pkt
->cpuid
= mycpuid
;
647 pkt
->msgport
= netisr_curport();
651 fid
->fid_dst_ip
= id
->dst_ip
;
652 fid
->fid_src_ip
= id
->src_ip
;
653 fid
->fid_dst_port
= id
->dst_port
;
654 fid
->fid_src_port
= id
->src_port
;
655 fid
->fid_proto
= id
->proto
;
656 fid
->fid_flags
= id
->flags
;
658 pkt
->dn_priv
= fwa
->rule
;
660 if ((int)cmd
->opcode
== O_DUMMYNET_PIPE
)
661 pkt
->dn_flags
|= DN_FLAGS_IS_PIPE
;
663 m
->m_pkthdr
.fw_flags
|= DUMMYNET_MBUF_TAGGED
;
668 ipfw_inc_static_count(struct ip_fw
*rule
)
670 /* Static rule's counts are updated only on CPU0 */
671 KKASSERT(mycpuid
== 0);
674 static_ioc_len
+= IOC_RULESIZE(rule
);
678 ipfw_dec_static_count(struct ip_fw
*rule
)
680 int l
= IOC_RULESIZE(rule
);
682 /* Static rule's counts are updated only on CPU0 */
683 KKASSERT(mycpuid
== 0);
685 KASSERT(static_count
> 0, ("invalid static count %u", static_count
));
688 KASSERT(static_ioc_len
>= l
,
689 ("invalid static len %u", static_ioc_len
));
694 ipfw_add_rule_dispatch(netmsg_t nmsg
)
696 struct netmsg_ipfw
*fwmsg
= (struct netmsg_ipfw
*)nmsg
;
697 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
698 struct ip_fw
*rule
, *prev
,*next
;
699 const struct ipfw_ioc_rule
*ioc_rule
;
701 ioc_rule
= fwmsg
->ioc_rule
;
702 // create rule by ioc_rule
703 rule
= kmalloc(RULESIZE(ioc_rule
), M_IPFW3
, M_WAITOK
| M_ZERO
);
704 rule
->act_ofs
= ioc_rule
->act_ofs
;
705 rule
->cmd_len
= ioc_rule
->cmd_len
;
706 rule
->rulenum
= ioc_rule
->rulenum
;
707 rule
->set
= ioc_rule
->set
;
708 bcopy(ioc_rule
->cmd
, rule
->cmd
, rule
->cmd_len
* 4);
710 for (prev
= NULL
, next
= ctx
->ipfw_rule_chain
;
711 next
; prev
= next
, next
= next
->next
) {
712 if (next
->rulenum
> ioc_rule
->rulenum
) {
716 KASSERT(next
!= NULL
, ("no default rule?!"));
719 * Insert rule into the pre-determined position
725 rule
->next
= ctx
->ipfw_rule_chain
;
726 ctx
->ipfw_rule_chain
= rule
;
730 * if sibiling in last CPU is exists,
731 * then it's sibling should be current rule
733 if (fwmsg
->sibling
!= NULL
) {
734 fwmsg
->sibling
->sibling
= rule
;
736 /* prepare for next CPU */
737 fwmsg
->sibling
= rule
;
740 /* Statistics only need to be updated once */
741 ipfw_inc_static_count(rule
);
743 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
747 * confirm the rulenumber
748 * call dispatch function to add rule into the list
749 * Update the statistic
752 ipfw_add_rule(struct ipfw_ioc_rule
*ioc_rule
)
754 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
755 struct netmsg_ipfw fwmsg
;
756 struct netmsg_base
*nmsg
;
759 IPFW_ASSERT_CFGPORT(&curthread
->td_msgport
);
762 * If rulenum is 0, find highest numbered rule before the
763 * default rule, and add rule number incremental step.
765 if (ioc_rule
->rulenum
== 0) {
766 int step
= autoinc_step
;
768 KKASSERT(step
>= IPFW_AUTOINC_STEP_MIN
&&
769 step
<= IPFW_AUTOINC_STEP_MAX
);
772 * Locate the highest numbered rule before default
774 for (f
= ctx
->ipfw_rule_chain
; f
; f
= f
->next
) {
775 if (f
->rulenum
== IPFW_DEFAULT_RULE
)
777 ioc_rule
->rulenum
= f
->rulenum
;
779 if (ioc_rule
->rulenum
< IPFW_DEFAULT_RULE
- step
)
780 ioc_rule
->rulenum
+= step
;
782 KASSERT(ioc_rule
->rulenum
!= IPFW_DEFAULT_RULE
&&
783 ioc_rule
->rulenum
!= 0,
784 ("invalid rule num %d", ioc_rule
->rulenum
));
786 bzero(&fwmsg
, sizeof(fwmsg
));
788 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
789 0, ipfw_add_rule_dispatch
);
790 fwmsg
.ioc_rule
= ioc_rule
;
792 netisr_domsg(nmsg
, 0);
794 DPRINTF("++ installed rule %d, static count now %d\n",
795 ioc_rule
->rulenum
, static_count
);
799 * Free storage associated with a static rule (including derived
801 * The caller is in charge of clearing rule pointers to avoid
803 * @return a pointer to the next entry.
804 * Arguments are not checked, so they better be correct.
805 * Must be called at splimp().
807 static struct ip_fw
*
808 ipfw_delete_rule(struct ipfw_context
*ctx
,
809 struct ip_fw
*prev
, struct ip_fw
*rule
)
812 ctx
->ipfw_rule_chain
= rule
->next
;
814 prev
->next
= rule
->next
;
816 if (mycpuid
== IPFW_CFGCPUID
)
817 ipfw_dec_static_count(rule
);
819 kfree(rule
, M_IPFW3
);
825 ipfw_flush_rule_dispatch(netmsg_t nmsg
)
827 struct lwkt_msg
*lmsg
= &nmsg
->lmsg
;
828 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
829 struct ip_fw
*rule
, *the_rule
;
830 int kill_default
= lmsg
->u
.ms_result
;
832 rule
= ctx
->ipfw_rule_chain
;
833 while (rule
!= NULL
) {
834 if (rule
->rulenum
== IPFW_DEFAULT_RULE
&& kill_default
== 0) {
835 ctx
->ipfw_rule_chain
= rule
;
840 if (mycpuid
== IPFW_CFGCPUID
)
841 ipfw_dec_static_count(the_rule
);
843 kfree(the_rule
, M_IPFW3
);
846 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
850 ipfw_append_state_dispatch(netmsg_t nmsg
)
852 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
853 struct ipfw_ioc_state
*ioc_state
= dmsg
->ioc_state
;
854 (*ipfw_basic_append_state_prt
)(ioc_state
);
855 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
859 ipfw_delete_state_dispatch(netmsg_t nmsg
)
861 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
862 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
863 struct ip_fw
*rule
= ctx
->ipfw_rule_chain
;
864 while (rule
!= NULL
) {
865 if (rule
->rulenum
== dmsg
->rulenum
) {
871 (*ipfw_basic_flush_state_prt
)(rule
);
872 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
876 * Deletes all rules from a chain (including the default rule
877 * if the second argument is set).
878 * Must be called at splimp().
881 ipfw_ctl_flush_rule(int kill_default
)
883 struct netmsg_del dmsg
;
884 struct netmsg_base nmsg
;
885 struct lwkt_msg
*lmsg
;
887 IPFW_ASSERT_CFGPORT(&curthread
->td_msgport
);
890 * If 'kill_default' then caller has done the necessary
891 * msgport syncing; unnecessary to do it again.
895 * Let ipfw_chk() know the rules are going to
896 * be flushed, so it could jump directly to
900 netmsg_service_sync();
904 * if ipfw_basic_flush_state_prt
905 * flush all states in all CPU
907 if (ipfw_basic_flush_state_prt
!= NULL
) {
908 bzero(&dmsg
, sizeof(dmsg
));
909 netmsg_init(&dmsg
.base
, NULL
, &curthread
->td_msgport
,
910 0, ipfw_delete_state_dispatch
);
911 netisr_domsg(&dmsg
.base
, 0);
914 * Press the 'flush' button
916 bzero(&nmsg
, sizeof(nmsg
));
917 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
918 0, ipfw_flush_rule_dispatch
);
920 lmsg
->u
.ms_result
= kill_default
;
921 netisr_domsg(&nmsg
, 0);
924 KASSERT(static_count
== 0,
925 ("%u static rules remain", static_count
));
926 KASSERT(static_ioc_len
== 0,
927 ("%u bytes of static rules remain", static_ioc_len
));
935 ipfw_delete_rule_dispatch(netmsg_t nmsg
)
937 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
938 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
939 struct ip_fw
*rule
, *prev
= NULL
;
941 rule
= ctx
->ipfw_rule_chain
;
943 if (rule
->rulenum
== dmsg
->rulenum
) {
944 ipfw_delete_rule(ctx
, prev
, rule
);
951 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
955 ipfw_alt_delete_rule(uint16_t rulenum
)
957 struct netmsg_del dmsg
;
958 struct netmsg_base
*nmsg
;
961 * delete the state which stub is the rule
962 * which belongs to the CPU and the rulenum
964 bzero(&dmsg
, sizeof(dmsg
));
966 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
967 0, ipfw_delete_state_dispatch
);
968 dmsg
.rulenum
= rulenum
;
969 netisr_domsg(nmsg
, 0);
972 * Get rid of the rule duplications on all CPUs
974 bzero(&dmsg
, sizeof(dmsg
));
976 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
977 0, ipfw_delete_rule_dispatch
);
978 dmsg
.rulenum
= rulenum
;
979 netisr_domsg(nmsg
, 0);
984 ipfw_alt_delete_ruleset_dispatch(netmsg_t nmsg
)
986 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
987 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
988 struct ip_fw
*prev
, *rule
;
994 rule
= ctx
->ipfw_rule_chain
;
995 while (rule
!= NULL
) {
996 if (rule
->set
== dmsg
->from_set
) {
997 rule
= ipfw_delete_rule(ctx
, prev
, rule
);
1006 KASSERT(del
, ("no match set?!"));
1008 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
1012 ipfw_disable_ruleset_state_dispatch(netmsg_t nmsg
)
1014 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
1015 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1021 for (rule
= ctx
->ipfw_rule_chain
; rule
; rule
= rule
->next
) {
1022 if (rule
->set
== dmsg
->from_set
) {
1028 KASSERT(cleared
, ("no match set?!"));
1030 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
1034 ipfw_alt_delete_ruleset(uint8_t set
)
1036 struct netmsg_del dmsg
;
1037 struct netmsg_base
*nmsg
;
1040 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1043 * Check whether the 'set' exists. If it exists,
1044 * then check whether any rules within the set will
1045 * try to create states.
1049 for (rule
= ctx
->ipfw_rule_chain
; rule
; rule
= rule
->next
) {
1050 if (rule
->set
== set
) {
1055 return 0; /* XXX EINVAL? */
1059 * Clear the STATE flag, so no more states will be
1060 * created based the rules in this set.
1062 bzero(&dmsg
, sizeof(dmsg
));
1064 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
1065 0, ipfw_disable_ruleset_state_dispatch
);
1066 dmsg
.from_set
= set
;
1068 netisr_domsg(nmsg
, 0);
1074 bzero(&dmsg
, sizeof(dmsg
));
1076 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
1077 0, ipfw_alt_delete_ruleset_dispatch
);
1078 dmsg
.from_set
= set
;
1080 netisr_domsg(nmsg
, 0);
1085 ipfw_alt_move_rule_dispatch(netmsg_t nmsg
)
1087 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
1090 rule
= dmsg
->start_rule
;
1093 * Move to the position on the next CPU
1094 * before the msg is forwarded.
1097 while (rule
&& rule
->rulenum
<= dmsg
->rulenum
) {
1098 if (rule
->rulenum
== dmsg
->rulenum
)
1099 rule
->set
= dmsg
->to_set
;
1102 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
1106 ipfw_alt_move_rule(uint16_t rulenum
, uint8_t set
)
1108 struct netmsg_del dmsg
;
1109 struct netmsg_base
*nmsg
;
1111 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1114 * Locate first rule to move
1116 for (rule
= ctx
->ipfw_rule_chain
;
1117 rule
&& rule
->rulenum
<= rulenum
; rule
= rule
->next
) {
1118 if (rule
->rulenum
== rulenum
&& rule
->set
!= set
)
1121 if (rule
== NULL
|| rule
->rulenum
> rulenum
)
1122 return 0; /* XXX error? */
1124 bzero(&dmsg
, sizeof(dmsg
));
1126 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
1127 0, ipfw_alt_move_rule_dispatch
);
1128 dmsg
.start_rule
= rule
;
1129 dmsg
.rulenum
= rulenum
;
1132 netisr_domsg(nmsg
, 0);
1133 KKASSERT(dmsg
.start_rule
== NULL
);
1138 ipfw_alt_move_ruleset_dispatch(netmsg_t nmsg
)
1140 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
1141 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1144 for (rule
= ctx
->ipfw_rule_chain
; rule
; rule
= rule
->next
) {
1145 if (rule
->set
== dmsg
->from_set
)
1146 rule
->set
= dmsg
->to_set
;
1148 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
1152 ipfw_alt_move_ruleset(uint8_t from_set
, uint8_t to_set
)
1154 struct netmsg_del dmsg
;
1155 struct netmsg_base
*nmsg
;
1157 bzero(&dmsg
, sizeof(dmsg
));
1159 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
1160 0, ipfw_alt_move_ruleset_dispatch
);
1161 dmsg
.from_set
= from_set
;
1162 dmsg
.to_set
= to_set
;
1164 netisr_domsg(nmsg
, 0);
1169 ipfw_alt_swap_ruleset_dispatch(netmsg_t nmsg
)
1171 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
1172 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1175 for (rule
= ctx
->ipfw_rule_chain
; rule
; rule
= rule
->next
) {
1176 if (rule
->set
== dmsg
->from_set
)
1177 rule
->set
= dmsg
->to_set
;
1178 else if (rule
->set
== dmsg
->to_set
)
1179 rule
->set
= dmsg
->from_set
;
1181 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
1185 ipfw_alt_swap_ruleset(uint8_t set1
, uint8_t set2
)
1187 struct netmsg_del dmsg
;
1188 struct netmsg_base
*nmsg
;
1190 bzero(&dmsg
, sizeof(dmsg
));
1192 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
1193 0, ipfw_alt_swap_ruleset_dispatch
);
1194 dmsg
.from_set
= set1
;
1197 netisr_domsg(nmsg
, 0);
1203 ipfw_ctl_alter(uint32_t arg
)
1206 uint8_t cmd
, new_set
;
1209 rulenum
= arg
& 0xffff;
1210 cmd
= (arg
>> 24) & 0xff;
1211 new_set
= (arg
>> 16) & 0xff;
1215 if (new_set
>= IPFW_DEFAULT_SET
)
1217 if (cmd
== 0 || cmd
== 2) {
1218 if (rulenum
== IPFW_DEFAULT_RULE
)
1221 if (rulenum
>= IPFW_DEFAULT_SET
)
1226 case 0: /* delete rules with given number */
1227 error
= ipfw_alt_delete_rule(rulenum
);
1230 case 1: /* delete all rules with given set number */
1231 error
= ipfw_alt_delete_ruleset(rulenum
);
1234 case 2: /* move rules with given number to new set */
1235 error
= ipfw_alt_move_rule(rulenum
, new_set
);
1238 case 3: /* move rules with given set number to new set */
1239 error
= ipfw_alt_move_ruleset(rulenum
, new_set
);
1242 case 4: /* swap two sets */
1243 error
= ipfw_alt_swap_ruleset(rulenum
, new_set
);
1250 * Clear counters for a specific rule.
1253 clear_counters(struct ip_fw
*rule
)
1255 rule
->bcnt
= rule
->pcnt
= 0;
1256 rule
->timestamp
= 0;
1260 ipfw_zero_entry_dispatch(netmsg_t nmsg
)
1262 struct netmsg_zent
*zmsg
= (struct netmsg_zent
*)nmsg
;
1263 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1266 if (zmsg
->rulenum
== 0) {
1267 for (rule
= ctx
->ipfw_rule_chain
; rule
; rule
= rule
->next
) {
1268 clear_counters(rule
);
1271 for (rule
= ctx
->ipfw_rule_chain
; rule
; rule
= rule
->next
) {
1272 if (rule
->rulenum
== zmsg
->rulenum
) {
1273 clear_counters(rule
);
1277 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
1281 * Reset some or all counters on firewall rules.
1282 * @arg frwl is null to clear all entries, or contains a specific
1284 * @arg log_only is 1 if we only want to reset logs, zero otherwise.
1287 ipfw_ctl_zero_entry(int rulenum
, int log_only
)
1289 struct netmsg_zent zmsg
;
1290 struct netmsg_base
*nmsg
;
1292 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1294 bzero(&zmsg
, sizeof(zmsg
));
1296 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
1297 0, ipfw_zero_entry_dispatch
);
1298 zmsg
.log_only
= log_only
;
1301 msg
= log_only
? "ipfw: All logging counts reset.\n"
1302 : "ipfw: Accounting cleared.\n";
1307 * Locate the first rule with 'rulenum'
1309 for (rule
= ctx
->ipfw_rule_chain
; rule
; rule
= rule
->next
) {
1310 if (rule
->rulenum
== rulenum
)
1313 if (rule
== NULL
) /* we did not find any matching rules */
1315 zmsg
.start_rule
= rule
;
1316 zmsg
.rulenum
= rulenum
;
1318 msg
= log_only
? "ipfw: Entry %d logging count reset.\n"
1319 : "ipfw: Entry %d cleared.\n";
1321 netisr_domsg(nmsg
, 0);
1322 KKASSERT(zmsg
.start_rule
== NULL
);
1325 log(LOG_SECURITY
| LOG_NOTICE
, msg
, rulenum
);
1330 ipfw_ctl_add_state(struct sockopt
*sopt
)
1332 struct ipfw_ioc_state
*ioc_state
;
1333 ioc_state
= sopt
->sopt_val
;
1334 if (ipfw_basic_append_state_prt
!= NULL
) {
1335 struct netmsg_del dmsg
;
1336 bzero(&dmsg
, sizeof(dmsg
));
1337 netmsg_init(&dmsg
.base
, NULL
, &curthread
->td_msgport
,
1338 0, ipfw_append_state_dispatch
);
1339 (&dmsg
)->ioc_state
= ioc_state
;
1340 netisr_domsg(&dmsg
.base
, 0);
1346 ipfw_ctl_delete_state(struct sockopt
*sopt
)
1348 int rulenum
= 0, error
;
1349 if (sopt
->sopt_valsize
!= 0) {
1350 error
= soopt_to_kbuf(sopt
, &rulenum
, sizeof(int), sizeof(int));
1355 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1356 struct ip_fw
*rule
= ctx
->ipfw_rule_chain
;
1358 while (rule
!=NULL
) {
1359 if (rule
->rulenum
== rulenum
) {
1368 struct netmsg_del dmsg
;
1369 struct netmsg_base
*nmsg
;
1371 * delete the state which stub is the rule
1372 * which belongs to the CPU and the rulenum
1374 bzero(&dmsg
, sizeof(dmsg
));
1376 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
1377 0, ipfw_delete_state_dispatch
);
1378 dmsg
.rulenum
= rulenum
;
1379 netisr_domsg(nmsg
, 0);
1384 ipfw_ctl_flush_state(struct sockopt
*sopt
)
1386 struct netmsg_del dmsg
;
1387 struct netmsg_base
*nmsg
;
1389 * delete the state which stub is the rule
1390 * which belongs to the CPU and the rulenum
1392 bzero(&dmsg
, sizeof(dmsg
));
1394 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
1395 0, ipfw_delete_state_dispatch
);
1397 netisr_domsg(nmsg
, 0);
1402 * Get the ioc_rule from the sopt
1403 * call ipfw_add_rule to add the rule
1406 ipfw_ctl_add_rule(struct sockopt
*sopt
)
1408 struct ipfw_ioc_rule
*ioc_rule
;
1411 size
= sopt
->sopt_valsize
;
1412 if (size
> (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX
) ||
1413 size
< sizeof(*ioc_rule
)) {
1416 if (size
!= (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX
)) {
1417 sopt
->sopt_val
= krealloc(sopt
->sopt_val
, sizeof(uint32_t) *
1418 IPFW_RULE_SIZE_MAX
, M_TEMP
, M_WAITOK
);
1420 ioc_rule
= sopt
->sopt_val
;
1422 ipfw_add_rule(ioc_rule
);
1427 ipfw_copy_state(struct ip_fw_state
*state
, struct ipfw_ioc_state
*ioc_state
, int cpuid
)
1429 ioc_state
->pcnt
= state
->pcnt
;
1430 ioc_state
->bcnt
= state
->bcnt
;
1431 ioc_state
->lifetime
= state
->lifetime
;
1432 ioc_state
->timestamp
= state
->timestamp
;
1433 ioc_state
->cpuid
= cpuid
;
1434 ioc_state
->expiry
= state
->expiry
;
1435 ioc_state
->rulenum
= state
->stub
->rulenum
;
1437 bcopy(&state
->flow_id
, &ioc_state
->flow_id
, sizeof(struct ipfw_flow_id
));
1438 return ioc_state
+ 1;
1442 ipfw_copy_rule(const struct ip_fw
*rule
, struct ipfw_ioc_rule
*ioc_rule
)
1444 const struct ip_fw
*sibling
;
1449 ioc_rule
->act_ofs
= rule
->act_ofs
;
1450 ioc_rule
->cmd_len
= rule
->cmd_len
;
1451 ioc_rule
->rulenum
= rule
->rulenum
;
1452 ioc_rule
->set
= rule
->set
;
1454 ioc_rule
->set_disable
= ipfw_ctx
[mycpuid
]->ipfw_set_disable
;
1455 ioc_rule
->static_count
= static_count
;
1456 ioc_rule
->static_len
= static_ioc_len
;
1460 ioc_rule
->timestamp
= 0;
1467 ioc_rule
->timestamp
= 0;
1468 for (sibling
= rule
; sibling
!= NULL
; sibling
= sibling
->sibling
) {
1469 ioc_rule
->pcnt
+= sibling
->pcnt
;
1470 ioc_rule
->bcnt
+= sibling
->bcnt
;
1471 if (sibling
->timestamp
> ioc_rule
->timestamp
)
1472 ioc_rule
->timestamp
= sibling
->timestamp
;
1478 KASSERT(i
== ncpus
, ("static rule is not duplicated on every cpu"));
1480 bcopy(rule
->cmd
, ioc_rule
->cmd
, ioc_rule
->cmd_len
* 4 /* XXX */);
1482 return ((uint8_t *)ioc_rule
+ IOC_RULESIZE(ioc_rule
));
1486 ipfw_ctl_get_modules(struct sockopt
*sopt
)
1489 struct ipfw_module
*mod
;
1490 char module_str
[1024];
1491 memset(module_str
,0,1024);
1492 for (i
= 0, mod
= ipfw_modules
; i
< MAX_MODULE
; i
++, mod
++) {
1493 if (mod
->type
!= 0) {
1495 strcat(module_str
,",");
1496 strcat(module_str
,mod
->name
);
1499 bzero(sopt
->sopt_val
, sopt
->sopt_valsize
);
1500 bcopy(module_str
, sopt
->sopt_val
, strlen(module_str
));
1501 sopt
->sopt_valsize
= strlen(module_str
);
1506 * Copy all static rules and states on all CPU
1509 ipfw_ctl_get_rules(struct sockopt
*sopt
)
1511 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1512 struct ipfw_state_context
*state_ctx
;
1514 struct ip_fw_state
*state
;
1517 int i
, j
, state_count
= 0;
1519 size
= static_ioc_len
;
1520 for (i
= 0; i
< ncpus
; i
++) {
1521 for (j
= 0; j
< ctx
->state_hash_size
; j
++) {
1522 state_ctx
= &ipfw_ctx
[i
]->state_ctx
[j
];
1523 state_count
+= state_ctx
->count
;
1526 if (state_count
> 0) {
1527 size
+= state_count
* sizeof(struct ipfw_ioc_state
);
1530 if (sopt
->sopt_valsize
< size
) {
1531 /* XXX TODO sopt_val is not big enough */
1532 bzero(sopt
->sopt_val
, sopt
->sopt_valsize
);
1536 sopt
->sopt_valsize
= size
;
1537 bp
= sopt
->sopt_val
;
1539 for (rule
= ctx
->ipfw_rule_chain
; rule
; rule
= rule
->next
) {
1540 bp
= ipfw_copy_rule(rule
, bp
);
1542 if (state_count
> 0 ) {
1543 for (i
= 0; i
< ncpus
; i
++) {
1544 for (j
= 0; j
< ctx
->state_hash_size
; j
++) {
1545 state_ctx
= &ipfw_ctx
[i
]->state_ctx
[j
];
1546 state
= state_ctx
->state
;
1547 while (state
!= NULL
) {
1548 bp
= ipfw_copy_state(state
, bp
, i
);
1549 state
= state
->next
;
1558 ipfw_set_disable_dispatch(netmsg_t nmsg
)
1560 struct lwkt_msg
*lmsg
= &nmsg
->lmsg
;
1561 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1563 ctx
->ipfw_set_disable
= lmsg
->u
.ms_result32
;
1565 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
1569 ipfw_ctl_set_disable(uint32_t disable
, uint32_t enable
)
1571 struct netmsg_base nmsg
;
1572 struct lwkt_msg
*lmsg
;
1573 uint32_t set_disable
;
1575 /* IPFW_DEFAULT_SET is always enabled */
1576 enable
|= (1 << IPFW_DEFAULT_SET
);
1577 set_disable
= (ipfw_ctx
[mycpuid
]->ipfw_set_disable
| disable
) & ~enable
;
1579 bzero(&nmsg
, sizeof(nmsg
));
1580 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
1581 0, ipfw_set_disable_dispatch
);
1583 lmsg
->u
.ms_result32
= set_disable
;
1585 netisr_domsg(&nmsg
, 0);
1590 * ipfw_ctl_x - extended version of ipfw_ctl
1591 * remove the x_header, and adjust the sopt_name,sopt_val and sopt_valsize.
1594 ipfw_ctl_x(struct sockopt
*sopt
)
1596 ip_fw_x_header
*x_header
;
1597 x_header
= (ip_fw_x_header
*)(sopt
->sopt_val
);
1598 sopt
->sopt_name
= x_header
->opcode
;
1599 sopt
->sopt_valsize
-= sizeof(ip_fw_x_header
);
1600 bcopy(++x_header
, sopt
->sopt_val
, sopt
->sopt_valsize
);
1601 return ipfw_ctl(sopt
);
1606 * {set|get}sockopt parser.
1609 ipfw_ctl(struct sockopt
*sopt
)
1616 switch (sopt
->sopt_name
) {
1621 error
= ipfw_ctl_get_rules(sopt
);
1624 error
= ipfw_ctl_get_modules(sopt
);
1628 ipfw_ctl_flush_rule(0);
1632 error
= ipfw_ctl_add_rule(sopt
);
1637 * IP_FW_DEL is used for deleting single rules or sets,
1638 * and (ab)used to atomically manipulate sets.
1639 * Argument size is used to distinguish between the two:
1641 * delete single rule or set of rules,
1642 * or reassign rules (or sets) to a different set.
1643 * 2 * sizeof(uint32_t)
1644 * atomic disable/enable sets.
1645 * first uint32_t contains sets to be disabled,
1646 * second uint32_t contains sets to be enabled.
1648 masks
= sopt
->sopt_val
;
1649 size
= sopt
->sopt_valsize
;
1650 if (size
== sizeof(*masks
)) {
1652 * Delete or reassign static rule
1654 error
= ipfw_ctl_alter(masks
[0]);
1655 } else if (size
== (2 * sizeof(*masks
))) {
1657 * Set enable/disable
1659 ipfw_ctl_set_disable(masks
[0], masks
[1]);
1665 case IP_FW_RESETLOG
: /* argument is an int, the rule number */
1667 if (sopt
->sopt_valsize
!= 0) {
1668 error
= soopt_to_kbuf(sopt
, &rulenum
,
1669 sizeof(int), sizeof(int));
1674 error
= ipfw_ctl_zero_entry(rulenum
,
1675 sopt
->sopt_name
== IP_FW_RESETLOG
);
1679 case IP_FW_NAT_FLUSH
:
1681 case IP_FW_NAT_GET_RECORD
:
1682 if (ipfw_ctl_nat_ptr
!= NULL
) {
1683 error
= ipfw_ctl_nat_ptr(sopt
);
1686 case IP_DUMMYNET_GET
:
1687 case IP_DUMMYNET_CONFIGURE
:
1688 case IP_DUMMYNET_DEL
:
1689 case IP_DUMMYNET_FLUSH
:
1690 error
= ip_dn_sockopt(sopt
);
1692 case IP_FW_STATE_ADD
:
1693 error
= ipfw_ctl_add_state(sopt
);
1695 case IP_FW_STATE_DEL
:
1696 error
= ipfw_ctl_delete_state(sopt
);
1698 case IP_FW_STATE_FLUSH
:
1699 error
= ipfw_ctl_flush_state(sopt
);
1701 case IP_FW_TABLE_CREATE
:
1702 case IP_FW_TABLE_DELETE
:
1703 case IP_FW_TABLE_APPEND
:
1704 case IP_FW_TABLE_REMOVE
:
1705 case IP_FW_TABLE_LIST
:
1706 case IP_FW_TABLE_FLUSH
:
1707 case IP_FW_TABLE_SHOW
:
1708 case IP_FW_TABLE_TEST
:
1709 case IP_FW_TABLE_RENAME
:
1710 error
= ipfw_ctl_table_sockopt(sopt
);
1712 case IP_FW_SYNC_SHOW_CONF
:
1713 case IP_FW_SYNC_SHOW_STATUS
:
1714 case IP_FW_SYNC_EDGE_CONF
:
1715 case IP_FW_SYNC_EDGE_START
:
1716 case IP_FW_SYNC_EDGE_STOP
:
1717 case IP_FW_SYNC_EDGE_TEST
:
1718 case IP_FW_SYNC_EDGE_CLEAR
:
1719 case IP_FW_SYNC_CENTRE_CONF
:
1720 case IP_FW_SYNC_CENTRE_START
:
1721 case IP_FW_SYNC_CENTRE_STOP
:
1722 case IP_FW_SYNC_CENTRE_TEST
:
1723 case IP_FW_SYNC_CENTRE_CLEAR
:
1724 error
= ipfw_ctl_sync_sockopt(sopt
);
1727 kprintf("ipfw_ctl invalid option %d\n",
1735 ipfw_check_in(void *arg
, struct mbuf
**m0
, struct ifnet
*ifp
, int dir
)
1737 struct ip_fw_args args
;
1738 struct mbuf
*m
= *m0
;
1740 int tee
= 0, error
= 0, ret
;
1742 if (m
->m_pkthdr
.fw_flags
& DUMMYNET_MBUF_TAGGED
) {
1743 /* Extract info from dummynet tag */
1744 mtag
= m_tag_find(m
, PACKET_TAG_DUMMYNET
, NULL
);
1745 KKASSERT(mtag
!= NULL
);
1746 args
.rule
= ((struct dn_pkt
*)m_tag_data(mtag
))->dn_priv
;
1747 KKASSERT(args
.rule
!= NULL
);
1749 m_tag_delete(m
, mtag
);
1750 m
->m_pkthdr
.fw_flags
&= ~DUMMYNET_MBUF_TAGGED
;
1758 ret
= ipfw_chk(&args
);
1775 case IP_FW_DUMMYNET
:
1776 /* Send packet to the appropriate pipe */
1777 m
= ipfw_dummynet_io(m
, args
.cookie
, DN_TO_IP_IN
,
1787 * Must clear bridge tag when changing
1789 m
->m_pkthdr
.fw_flags
&= ~BRIDGE_MBUF_TAGGED
;
1790 if (ip_divert_p
!= NULL
) {
1791 m
= ip_divert_p(m
, tee
, 1);
1795 /* not sure this is the right error msg */
1805 panic("unknown ipfw return value: %d", ret
);
1813 ipfw_check_out(void *arg
, struct mbuf
**m0
, struct ifnet
*ifp
, int dir
)
1815 struct ip_fw_args args
;
1816 struct mbuf
*m
= *m0
;
1818 int tee
= 0, error
= 0, ret
;
1820 if (m
->m_pkthdr
.fw_flags
& DUMMYNET_MBUF_TAGGED
) {
1821 /* Extract info from dummynet tag */
1822 mtag
= m_tag_find(m
, PACKET_TAG_DUMMYNET
, NULL
);
1823 KKASSERT(mtag
!= NULL
);
1824 args
.rule
= ((struct dn_pkt
*)m_tag_data(mtag
))->dn_priv
;
1825 KKASSERT(args
.rule
!= NULL
);
1827 m_tag_delete(m
, mtag
);
1828 m
->m_pkthdr
.fw_flags
&= ~DUMMYNET_MBUF_TAGGED
;
1836 ret
= ipfw_chk(&args
);
1854 case IP_FW_DUMMYNET
:
1855 m
= ipfw_dummynet_io(m
, args
.cookie
, DN_TO_IP_OUT
,
1864 if (ip_divert_p
!= NULL
) {
1865 m
= ip_divert_p(m
, tee
, 0);
1869 /* not sure this is the right error msg */
1879 panic("unknown ipfw return value: %d", ret
);
1889 struct pfil_head
*pfh
;
1890 IPFW_ASSERT_CFGPORT(&curthread
->td_msgport
);
1892 pfh
= pfil_head_get(PFIL_TYPE_AF
, AF_INET
);
1896 pfil_add_hook(ipfw_check_in
, NULL
, PFIL_IN
, pfh
);
1897 pfil_add_hook(ipfw_check_out
, NULL
, PFIL_OUT
, pfh
);
1903 struct pfil_head
*pfh
;
1905 IPFW_ASSERT_CFGPORT(&curthread
->td_msgport
);
1907 pfh
= pfil_head_get(PFIL_TYPE_AF
, AF_INET
);
1911 pfil_remove_hook(ipfw_check_in
, NULL
, PFIL_IN
, pfh
);
1912 pfil_remove_hook(ipfw_check_out
, NULL
, PFIL_OUT
, pfh
);
1916 ipfw_sysctl_enable_dispatch(netmsg_t nmsg
)
1918 struct lwkt_msg
*lmsg
= &nmsg
->lmsg
;
1919 int enable
= lmsg
->u
.ms_result
;
1921 if (fw3_enable
== enable
)
1924 fw3_enable
= enable
;
1931 lwkt_replymsg(lmsg
, 0);
1935 ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS
)
1937 struct netmsg_base nmsg
;
1938 struct lwkt_msg
*lmsg
;
1941 enable
= fw3_enable
;
1942 error
= sysctl_handle_int(oidp
, &enable
, 0, req
);
1943 if (error
|| req
->newptr
== NULL
)
1946 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
1947 0, ipfw_sysctl_enable_dispatch
);
1949 lmsg
->u
.ms_result
= enable
;
1951 return lwkt_domsg(IPFW_CFGPORT
, lmsg
, 0);
1955 ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS
)
1957 return sysctl_int_range(oidp
, arg1
, arg2
, req
,
1958 IPFW_AUTOINC_STEP_MIN
, IPFW_AUTOINC_STEP_MAX
);
1963 ipfw_ctx_init_dispatch(netmsg_t nmsg
)
1965 struct netmsg_ipfw
*fwmsg
= (struct netmsg_ipfw
*)nmsg
;
1966 struct ipfw_context
*ctx
;
1967 struct ip_fw
*def_rule
;
1969 ctx
= kmalloc(sizeof(struct ipfw_context
), M_IPFW3
, M_WAITOK
| M_ZERO
);
1970 ipfw_ctx
[mycpuid
] = ctx
;
1972 def_rule
= kmalloc(sizeof(struct ip_fw
), M_IPFW3
, M_WAITOK
| M_ZERO
);
1973 def_rule
->act_ofs
= 0;
1974 def_rule
->rulenum
= IPFW_DEFAULT_RULE
;
1975 def_rule
->cmd_len
= 2;
1976 def_rule
->set
= IPFW_DEFAULT_SET
;
1978 def_rule
->cmd
[0].len
= LEN_OF_IPFWINSN
;
1979 def_rule
->cmd
[0].module
= MODULE_BASIC_ID
;
1980 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
1981 def_rule
->cmd
[0].opcode
= O_BASIC_ACCEPT
;
1983 if (filters_default_to_accept
)
1984 def_rule
->cmd
[0].opcode
= O_BASIC_ACCEPT
;
1986 def_rule
->cmd
[0].opcode
= O_BASIC_DENY
;
1989 /* Install the default rule */
1990 ctx
->ipfw_default_rule
= def_rule
;
1991 ctx
->ipfw_rule_chain
= def_rule
;
1994 * if sibiling in last CPU is exists,
1995 * then it's sibling should be current rule
1997 if (fwmsg
->sibling
!= NULL
) {
1998 fwmsg
->sibling
->sibling
= def_rule
;
2000 /* prepare for next CPU */
2001 fwmsg
->sibling
= def_rule
;
2003 /* Statistics only need to be updated once */
2005 ipfw_inc_static_count(def_rule
);
2007 netisr_forwardmsg_all(&nmsg
->base
, mycpuid
+ 1);
2011 ipfw_init_dispatch(netmsg_t nmsg
)
2013 struct netmsg_ipfw fwmsg
;
2016 kprintf("ipfw3 already loaded\n");
2021 bzero(&fwmsg
, sizeof(fwmsg
));
2022 netmsg_init(&fwmsg
.base
, NULL
, &curthread
->td_msgport
,
2023 0, ipfw_ctx_init_dispatch
);
2024 netisr_domsg(&fwmsg
.base
, 0);
2026 ip_fw_chk_ptr
= ipfw_chk
;
2027 ip_fw_ctl_x_ptr
= ipfw_ctl_x
;
2028 ip_fw_dn_io_ptr
= ipfw_dummynet_io
;
2030 kprintf("ipfw3 initialized, default to %s\n",
2031 filters_default_to_accept
? "accept" : "deny");
2037 lwkt_replymsg(&nmsg
->lmsg
, error
);
2043 struct netmsg_base smsg
;
2046 ipfw3_log_modevent(MOD_LOAD
);
2047 ipfw3_sync_modevent(MOD_LOAD
);
2050 netmsg_init(&smsg
, NULL
, &curthread
->td_msgport
,
2051 0, ipfw_init_dispatch
);
2052 error
= lwkt_domsg(IPFW_CFGPORT
, &smsg
.lmsg
, 0);
2053 netmsg_init(&smsg
, NULL
, &curthread
->td_msgport
,
2054 0, table_init_dispatch
);
2055 error
= lwkt_domsg(IPFW_CFGPORT
, &smsg
.lmsg
, 0);
2062 ipfw_fini_dispatch(netmsg_t nmsg
)
2069 netmsg_service_sync();
2070 ip_fw_chk_ptr
= NULL
;
2071 ip_fw_ctl_x_ptr
= NULL
;
2072 ip_fw_dn_io_ptr
= NULL
;
2073 ipfw_ctl_flush_rule(1 /* kill default rule */);
2075 /* Free pre-cpu context */
2076 for (cpu
= 0; cpu
< ncpus
; ++cpu
) {
2077 if (ipfw_ctx
[cpu
] != NULL
) {
2078 kfree(ipfw_ctx
[cpu
], M_IPFW3
);
2079 ipfw_ctx
[cpu
] = NULL
;
2082 kprintf("ipfw3 unloaded\n");
2084 lwkt_replymsg(&nmsg
->lmsg
, error
);
2090 struct netmsg_base smsg
;
2092 ipfw3_log_modevent(MOD_UNLOAD
);
2093 ipfw3_sync_modevent(MOD_UNLOAD
);
2095 netmsg_init(&smsg
, NULL
, &curthread
->td_msgport
,
2096 0, ipfw_fini_dispatch
);
2097 return lwkt_domsg(IPFW_CFGPORT
, &smsg
.lmsg
, 0);
2100 #endif /* KLD_MODULE */
2103 ipfw3_modevent(module_t mod
, int type
, void *unused
)
2115 kprintf("ipfw statically compiled, cannot unload\n");
2127 static moduledata_t ipfw3mod
= {
2132 /* ipfw3 must init before ipfw3_basic */
2133 DECLARE_MODULE(ipfw3
, ipfw3mod
, SI_SUB_PROTO_END
, SI_ORDER_FIRST
);
2134 MODULE_VERSION(ipfw3
, 1);