ipfw: Rework states and tracks.
[dragonfly.git] / sys / net / ipfw / ip_fw2.c
blobd84285eae566fcd959a1fe60b392bb1b4db406b3
1 /*
2 * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
25 * $FreeBSD: src/sys/netinet/ip_fw2.c,v 1.6.2.12 2003/04/08 10:42:32 maxim Exp $
29 * Implement IP packet firewall (new version)
32 #include "opt_ipfw.h"
33 #include "opt_inet.h"
34 #ifndef INET
35 #error IPFIREWALL requires INET.
36 #endif /* INET */
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/kernel.h>
43 #include <sys/proc.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <sys/sysctl.h>
47 #include <sys/syslog.h>
48 #include <sys/ucred.h>
49 #include <sys/in_cksum.h>
50 #include <sys/limits.h>
51 #include <sys/lock.h>
52 #include <sys/tree.h>
54 #include <net/if.h>
55 #include <net/route.h>
56 #include <net/pfil.h>
57 #include <net/dummynet/ip_dummynet.h>
59 #include <sys/thread2.h>
60 #include <sys/mplock2.h>
61 #include <net/netmsg2.h>
63 #include <netinet/in.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in_var.h>
66 #include <netinet/in_pcb.h>
67 #include <netinet/ip.h>
68 #include <netinet/ip_var.h>
69 #include <netinet/ip_icmp.h>
70 #include <netinet/tcp.h>
71 #include <netinet/tcp_seq.h>
72 #include <netinet/tcp_timer.h>
73 #include <netinet/tcp_var.h>
74 #include <netinet/tcpip.h>
75 #include <netinet/udp.h>
76 #include <netinet/udp_var.h>
77 #include <netinet/ip_divert.h>
78 #include <netinet/if_ether.h> /* XXX for ETHERTYPE_IP */
80 #include <net/ipfw/ip_fw2.h>
82 #ifdef IPFIREWALL_DEBUG
83 #define DPRINTF(fmt, ...) \
84 do { \
85 if (fw_debug > 0) \
86 kprintf(fmt, __VA_ARGS__); \
87 } while (0)
88 #else
89 #define DPRINTF(fmt, ...) ((void)0)
90 #endif
93 * Description about per-CPU rule duplication:
95 * Module loading/unloading and all ioctl operations are serialized
96 * by netisr0, so we don't have any ordering or locking problems.
98 * Following graph shows how operation on per-CPU rule list is
99 * performed [2 CPU case]:
101 * CPU0 CPU1
103 * netisr0 <------------------------------------+
104 * domsg |
105 * : |
106 * :(delete/add...) |
107 * : |
108 * : netmsg | netmsg
109 * forwardmsg---------->netisr1 |
110 * : |
111 * :(delete/add...) |
112 * : |
113 * : |
114 * replymsg--------------+
118 * Rule structure [2 CPU case]
120 * CPU0 CPU1
122 * layer3_chain layer3_chain
123 * | |
124 * V V
125 * +-------+ sibling +-------+ sibling
126 * | rule1 |--------->| rule1 |--------->NULL
127 * +-------+ +-------+
128 * | |
129 * |next |next
130 * V V
131 * +-------+ sibling +-------+ sibling
132 * | rule2 |--------->| rule2 |--------->NULL
133 * +-------+ +-------+
135 * ip_fw.sibling:
136 * 1) Ease statistics calculation during IP_FW_GET. We only need to
137 * iterate layer3_chain in netisr0; the current rule's duplication
138 * to the other CPUs could safely be read-only accessed through
139 * ip_fw.sibling.
140 * 2) Accelerate rule insertion and deletion, e.g. rule insertion:
141 * a) In netisr0 rule3 is determined to be inserted between rule1
142 * and rule2. To make this decision we need to iterate the
143 * layer3_chain in netisr0. The netmsg, which is used to insert
144 * the rule, will contain rule1 in netisr0 as prev_rule and rule2
145 * in netisr0 as next_rule.
146 * b) After the insertion in netisr0 is done, we will move on to
147 * netisr1. But instead of relocating the rule3's position in
148 * netisr1 by iterating the layer3_chain in netisr1, we set the
149 * netmsg's prev_rule to rule1->sibling and next_rule to
150 * rule2->sibling before the netmsg is forwarded to netisr1 from
151 * netisr0.
155 * Description of states and tracks.
157 * Both states and tracks are stored in per-cpu RB trees instead of
158 * per-cpu hash tables to avoid the worst case hash degeneration.
160 * The lifetimes of states and tracks are regulated by dyn_*_lifetime,
161 * measured in seconds and depending on the flags.
163 * When a packet is received, its address fields are first masked with
164 * the mask defined for the rule, then matched against the entries in
165 * the per-cpu state RB tree. States are generated by 'keep-state'
166 * and 'limit' options.
168 * The max number of states is ipfw_state_max. When we reach the
169 * maximum number of states we do not create anymore. This is done to
170 * avoid consuming too much memory, but also too much time when
171 * searching on each packet.
173 * Each state holds a pointer to the parent ipfw rule of the current
174 * CPU so we know what action to perform. States are removed when the
175 * parent rule is deleted. XXX we should make them survive.
177 * There are some limitations with states -- we do not obey the
178 * 'randomized match', and we do not do multiple passes through the
179 * firewall. XXX check the latter!!!
181 * States grow independently on each CPU, e.g. 2 CPU case:
183 * CPU0 CPU1
184 * ................... ...................
185 * : state RB tree : : state RB tree :
186 * : : : :
187 * : state1 state2 : : state3 :
188 * : | | : : | :
189 * :.....|....|......: :........|........:
190 * | | |
191 * | | |st_rule
192 * | | |
193 * V V V
194 * +-------+ +-------+
195 * | rule1 | | rule1 |
196 * +-------+ +-------+
198 * Tracks are used to enforce limits on the number of sessions. Tracks
199 * are generated by 'limit' option.
201 * The max number of tracks is ipfw_track_max. When we reach the
202 * maximum number of tracks we do not create anymore. This is done to
203 * avoid consuming too much memory.
205 * Tracks are organized into two layers, track counter RB tree is
206 * shared between CPUs, track RB tree is per-cpu. States generated by
207 * 'limit' option are linked to the track in addition to the per-cpu
208 * state RB tree; mainly to ease expiration. e.g. 2 CPU case:
210 * ..............................
211 * : track counter RB tree :
212 * : :
213 * : +-----------+ :
214 * : | trkcnt1 | :
215 * : | | :
216 * : +--->counter<----+ :
217 * : | | | | :
218 * : | +-----------+ | :
219 * :......|................|....:
220 * | |
221 * CPU0 | | CPU1
222 * ................. |t_count | .................
223 * : track RB tree : | | : track RB tree :
224 * : : | | : :
225 * : +-->track1-------+ +--------track2 :
226 * : | A : : :
227 * : | | : : :
228 * :.|.....|.......: :...............:
229 * | +----------------+
230 * | .................... |
231 * | : state RB tree : |st_track
232 * | : : |
233 * +---state1 state2---+
234 * : | | :
235 * :.....|.......|....:
236 * | |
237 * | |st_rule
238 * V V
239 * +----------+
240 * | rule1 |
241 * +----------+
244 #define IPFW_AUTOINC_STEP_MIN 1
245 #define IPFW_AUTOINC_STEP_MAX 1000
246 #define IPFW_AUTOINC_STEP_DEF 100
248 #define IPFW_DEFAULT_RULE 65535 /* rulenum for the default rule */
249 #define IPFW_DEFAULT_SET 31 /* set number for the default rule */
251 #define MATCH_REVERSE 0
252 #define MATCH_FORWARD 1
253 #define MATCH_NONE 2
254 #define MATCH_UNKNOWN 3
256 #define IPFW_STATE_TCPFLAGS (TH_SYN | TH_FIN | TH_RST)
257 #define IPFW_STATE_TCPSTATES (IPFW_STATE_TCPFLAGS | \
258 (IPFW_STATE_TCPFLAGS << 8))
260 #define BOTH_SYN (TH_SYN | (TH_SYN << 8))
261 #define BOTH_FIN (TH_FIN | (TH_FIN << 8))
262 #define BOTH_RST (TH_RST | (TH_RST << 8))
263 /* TH_ACK here means FIN was ACKed. */
264 #define BOTH_FINACK (TH_ACK | (TH_ACK << 8))
266 #define IPFW_STATE_TCPCLOSED(s) ((s)->st_proto == IPPROTO_TCP && \
267 (((s)->st_state & BOTH_RST) || \
268 ((s)->st_state & BOTH_FINACK) == BOTH_FINACK))
270 #define O_ANCHOR O_NOP
272 struct netmsg_ipfw {
273 struct netmsg_base base;
274 const struct ipfw_ioc_rule *ioc_rule;
275 struct ip_fw *next_rule;
276 struct ip_fw *prev_rule;
277 struct ip_fw *sibling;
278 uint32_t rule_flags;
281 struct netmsg_del {
282 struct netmsg_base base;
283 struct ip_fw *start_rule;
284 struct ip_fw *prev_rule;
285 uint16_t rulenum;
286 uint8_t from_set;
287 uint8_t to_set;
290 struct netmsg_zent {
291 struct netmsg_base base;
292 struct ip_fw *start_rule;
293 uint16_t rulenum;
294 uint16_t log_only;
297 struct netmsg_cpstate {
298 struct netmsg_base base;
299 struct ipfw_ioc_state *ioc_state;
300 int state_cntmax;
301 int state_cnt;
304 struct ipfw_addrs {
305 uint32_t addr1;
306 uint32_t addr2;
309 struct ipfw_ports {
310 uint16_t port1;
311 uint16_t port2;
314 struct ipfw_key {
315 union {
316 struct ipfw_addrs addrs;
317 uint64_t value;
318 } addr_u;
319 union {
320 struct ipfw_ports ports;
321 uint32_t value;
322 } port_u;
323 uint8_t proto;
324 uint8_t swap; /* IPFW_KEY_SWAP_ */
325 uint16_t rsvd2;
328 #define IPFW_KEY_SWAP_ADDRS 0x1
329 #define IPFW_KEY_SWAP_PORTS 0x2
330 #define IPFW_KEY_SWAP_ALL (IPFW_KEY_SWAP_ADDRS | IPFW_KEY_SWAP_PORTS)
332 struct ipfw_trkcnt {
333 RB_ENTRY(ipfw_trkcnt) tc_rblink;
334 struct ipfw_key tc_key;
335 uintptr_t tc_ruleid;
336 int tc_refs;
337 int tc_count;
338 time_t tc_expire; /* userland get-only */
339 uint16_t tc_rulenum; /* userland get-only */
340 } __cachealign;
342 #define tc_addrs tc_key.addr_u.value
343 #define tc_ports tc_key.port_u.value
344 #define tc_proto tc_key.proto
345 #define tc_saddr tc_key.addr_u.addrs.addr1
346 #define tc_daddr tc_key.addr_u.addrs.addr2
347 #define tc_sport tc_key.port_u.ports.port1
348 #define tc_dport tc_key.port_u.ports.port2
350 RB_HEAD(ipfw_trkcnt_tree, ipfw_trkcnt);
352 struct ipfw_state;
354 struct ipfw_track {
355 RB_ENTRY(ipfw_track) t_rblink;
356 struct ipfw_key t_key;
357 struct ip_fw *t_rule;
358 time_t t_lastexp;
359 LIST_HEAD(, ipfw_state) t_state_list;
360 time_t t_expire;
361 volatile int *t_count;
362 struct ipfw_trkcnt *t_trkcnt;
363 TAILQ_ENTRY(ipfw_track) t_link;
366 #define t_addrs t_key.addr_u.value
367 #define t_ports t_key.port_u.value
368 #define t_proto t_key.proto
369 #define t_saddr t_key.addr_u.addrs.addr1
370 #define t_daddr t_key.addr_u.addrs.addr2
371 #define t_sport t_key.port_u.ports.port1
372 #define t_dport t_key.port_u.ports.port2
374 RB_HEAD(ipfw_track_tree, ipfw_track);
375 TAILQ_HEAD(ipfw_track_list, ipfw_track);
377 struct ipfw_state {
378 RB_ENTRY(ipfw_state) st_rblink;
379 struct ipfw_key st_key;
381 time_t st_expire; /* expire time */
382 struct ip_fw *st_rule;
384 uint64_t st_pcnt; /* packets */
385 uint64_t st_bcnt; /* bytes */
388 * st_state:
389 * State of this rule, typically a combination of TCP flags.
391 * st_ack_fwd/st_ack_rev:
392 * Most recent ACKs in forward and reverse direction. They
393 * are used to generate keepalives.
395 uint32_t st_state;
396 uint32_t st_ack_fwd;
397 uint32_t st_seq_fwd;
398 uint32_t st_ack_rev;
399 uint32_t st_seq_rev;
401 uint16_t st_flags; /* IPFW_STATE_F_ */
402 uint16_t st_type; /* O_KEEP_STATE/O_LIMIT */
403 struct ipfw_track *st_track;
405 LIST_ENTRY(ipfw_state) st_trklink;
406 TAILQ_ENTRY(ipfw_state) st_link;
409 #define st_addrs st_key.addr_u.value
410 #define st_ports st_key.port_u.value
411 #define st_proto st_key.proto
412 #define st_swap st_key.swap
414 #define IPFW_STATE_F_ACKFWD 0x0001
415 #define IPFW_STATE_F_SEQFWD 0x0002
416 #define IPFW_STATE_F_ACKREV 0x0004
417 #define IPFW_STATE_F_SEQREV 0x0008
419 TAILQ_HEAD(ipfw_state_list, ipfw_state);
420 RB_HEAD(ipfw_state_tree, ipfw_state);
422 struct ipfw_context {
423 struct ip_fw *ipfw_layer3_chain; /* rules for layer3 */
424 struct ip_fw *ipfw_default_rule; /* default rule */
425 uint64_t ipfw_norule_counter; /* ipfw_log(NULL) stat*/
428 * ipfw_set_disable contains one bit per set value (0..31).
429 * If the bit is set, all rules with the corresponding set
430 * are disabled. Set IPDW_DEFAULT_SET is reserved for the
431 * default rule and CANNOT be disabled.
433 uint32_t ipfw_set_disable;
435 uint8_t ipfw_flags; /* IPFW_FLAG_ */
437 struct ipfw_state_tree ipfw_state_tree;
438 struct ipfw_state_list ipfw_state_list;
439 int ipfw_state_loosecnt;
440 int ipfw_state_cnt;
442 union {
443 struct ipfw_state state;
444 struct ipfw_track track;
445 struct ipfw_trkcnt trkcnt;
446 } ipfw_tmpkey;
448 struct ipfw_track_tree ipfw_track_tree;
449 struct ipfw_track_list ipfw_track_list;
450 struct ipfw_trkcnt *ipfw_trkcnt_spare;
452 struct callout ipfw_stateto_ch;
453 time_t ipfw_state_lastexp;
454 struct netmsg_base ipfw_stateexp_nm;
455 struct netmsg_base ipfw_stateexp_more;
456 struct ipfw_state ipfw_stateexp_anch;
458 struct callout ipfw_trackto_ch;
459 time_t ipfw_track_lastexp;
460 struct netmsg_base ipfw_trackexp_nm;
461 struct netmsg_base ipfw_trackexp_more;
462 struct ipfw_track ipfw_trackexp_anch;
464 struct callout ipfw_keepalive_ch;
465 struct netmsg_base ipfw_keepalive_nm;
466 struct netmsg_base ipfw_keepalive_more;
467 struct ipfw_state ipfw_keepalive_anch;
470 * Statistics
472 u_long ipfw_sts_reap;
473 u_long ipfw_sts_reapfailed;
474 u_long ipfw_sts_overflow;
475 u_long ipfw_sts_nomem;
476 u_long ipfw_sts_tcprecycled;
478 u_long ipfw_tks_nomem;
479 u_long ipfw_tks_reap;
480 u_long ipfw_tks_reapfailed;
481 u_long ipfw_tks_overflow;
482 u_long ipfw_tks_cntnomem;
485 #define IPFW_FLAG_KEEPALIVE 0x01
486 #define IPFW_FLAG_STATEEXP 0x02
487 #define IPFW_FLAG_TRACKEXP 0x04
488 #define IPFW_FLAG_STATEREAP 0x08
489 #define IPFW_FLAG_TRACKREAP 0x10
491 #define ipfw_state_tmpkey ipfw_tmpkey.state
492 #define ipfw_track_tmpkey ipfw_tmpkey.track
493 #define ipfw_trkcnt_tmpkey ipfw_tmpkey.trkcnt
495 struct ipfw_global {
496 int ipfw_state_loosecnt; /* cache aligned */
497 time_t ipfw_state_globexp __cachealign;
499 struct lwkt_token ipfw_trkcnt_token __cachealign;
500 struct ipfw_trkcnt_tree ipfw_trkcnt_tree;
501 int ipfw_trkcnt_cnt;
502 time_t ipfw_track_globexp;
504 #ifdef KLD_MODULE
506 * Module can not be unloaded, if there are references to
507 * certains rules of ipfw(4), e.g. dummynet(4)
509 int ipfw_refcnt __cachealign;
510 #endif
511 } __cachealign;
513 static struct ipfw_context *ipfw_ctx[MAXCPU];
515 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's");
518 * Following two global variables are accessed and updated only
519 * in netisr0.
521 static uint32_t static_count; /* # of static rules */
522 static uint32_t static_ioc_len; /* bytes of static rules */
525 * If 1, then ipfw static rules are being flushed,
526 * ipfw_chk() will skip to the default rule.
528 static int ipfw_flushing;
530 static int fw_verbose;
531 static int verbose_limit;
533 static int fw_debug;
534 static int autoinc_step = IPFW_AUTOINC_STEP_DEF;
536 static int ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS);
537 static int ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS);
539 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW, 0, "Firewall");
540 SYSCTL_NODE(_net_inet_ip_fw, OID_AUTO, stats, CTLFLAG_RW, 0,
541 "Firewall statistics");
543 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
544 &fw_enable, 0, ipfw_sysctl_enable, "I", "Enable ipfw");
545 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, autoinc_step, CTLTYPE_INT | CTLFLAG_RW,
546 &autoinc_step, 0, ipfw_sysctl_autoinc_step, "I",
547 "Rule number autincrement step");
548 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO,one_pass,CTLFLAG_RW,
549 &fw_one_pass, 0,
550 "Only do a single pass through ipfw when using dummynet(4)");
551 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, debug, CTLFLAG_RW,
552 &fw_debug, 0, "Enable printing of debug ip_fw statements");
553 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose, CTLFLAG_RW,
554 &fw_verbose, 0, "Log matches to ipfw rules");
555 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit, CTLFLAG_RW,
556 &verbose_limit, 0, "Set upper limit of matches of ipfw rules logged");
558 static int ipfw_sysctl_dyncnt(SYSCTL_HANDLER_ARGS);
559 static int ipfw_sysctl_dynmax(SYSCTL_HANDLER_ARGS);
560 static int ipfw_sysctl_statecnt(SYSCTL_HANDLER_ARGS);
561 static int ipfw_sysctl_statemax(SYSCTL_HANDLER_ARGS);
562 static int ipfw_sysctl_scancnt(SYSCTL_HANDLER_ARGS);
563 static int ipfw_sysctl_stat(SYSCTL_HANDLER_ARGS);
566 * Timeouts for various events in handing states.
568 * NOTE:
569 * 1 == 0~1 second.
570 * 2 == 1~2 second(s).
572 * We use 2 seconds for FIN lifetime, so that the states will not be
573 * ripped prematurely.
575 static uint32_t dyn_ack_lifetime = 300;
576 static uint32_t dyn_syn_lifetime = 20;
577 static uint32_t dyn_finwait_lifetime = 20;
578 static uint32_t dyn_fin_lifetime = 2;
579 static uint32_t dyn_rst_lifetime = 2;
580 static uint32_t dyn_udp_lifetime = 10;
581 static uint32_t dyn_short_lifetime = 5; /* used by tracks too */
584 * Keepalives are sent if dyn_keepalive is set. They are sent every
585 * dyn_keepalive_period seconds, in the last dyn_keepalive_interval
586 * seconds of lifetime of a rule.
588 static uint32_t dyn_keepalive_interval = 20;
589 static uint32_t dyn_keepalive_period = 5;
590 static uint32_t dyn_keepalive = 1; /* do send keepalives */
592 static struct ipfw_global ipfw_gd;
593 static int ipfw_state_loosecnt_updthr;
594 static int ipfw_state_max = 4096; /* max # of states */
595 static int ipfw_track_max = 4096; /* max # of tracks */
597 static int ipfw_state_headroom; /* setup at module load time */
598 static int ipfw_state_reap_min = 8;
599 static int ipfw_state_expire_max = 32;
600 static int ipfw_state_scan_max = 256;
601 static int ipfw_keepalive_max = 8;
602 static int ipfw_track_reap_max = 4;
603 static int ipfw_track_expire_max = 16;
604 static int ipfw_track_scan_max = 128;
606 /* Compat */
607 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_count,
608 CTLTYPE_INT | CTLFLAG_RD, NULL, 0, ipfw_sysctl_dyncnt, "I",
609 "Number of states and tracks");
610 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_max,
611 CTLTYPE_INT | CTLFLAG_RW, NULL, 0, ipfw_sysctl_dynmax, "I",
612 "Max number of states and tracks");
614 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, state_cnt,
615 CTLTYPE_INT | CTLFLAG_RD, NULL, 0, ipfw_sysctl_statecnt, "I",
616 "Number of states");
617 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, state_max,
618 CTLTYPE_INT | CTLFLAG_RW, NULL, 0, ipfw_sysctl_statemax, "I",
619 "Max number of states");
620 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, state_headroom, CTLFLAG_RW,
621 &ipfw_state_headroom, 0, "headroom for state reap");
622 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, track_cnt, CTLFLAG_RD,
623 &ipfw_gd.ipfw_trkcnt_cnt, 0, "Number of tracks");
624 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, track_max, CTLFLAG_RW,
625 &ipfw_track_max, 0, "Max number of tracks");
626 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, static_count, CTLFLAG_RD,
627 &static_count, 0, "Number of static rules");
628 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime, CTLFLAG_RW,
629 &dyn_ack_lifetime, 0, "Lifetime of dyn. rules for acks");
630 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime, CTLFLAG_RW,
631 &dyn_syn_lifetime, 0, "Lifetime of dyn. rules for syn");
632 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime, CTLFLAG_RW,
633 &dyn_fin_lifetime, 0, "Lifetime of dyn. rules for fin");
634 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_finwait_lifetime, CTLFLAG_RW,
635 &dyn_finwait_lifetime, 0, "Lifetime of dyn. rules for fin wait");
636 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime, CTLFLAG_RW,
637 &dyn_rst_lifetime, 0, "Lifetime of dyn. rules for rst");
638 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime, CTLFLAG_RW,
639 &dyn_udp_lifetime, 0, "Lifetime of dyn. rules for UDP");
640 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime, CTLFLAG_RW,
641 &dyn_short_lifetime, 0, "Lifetime of dyn. rules for other situations");
642 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive, CTLFLAG_RW,
643 &dyn_keepalive, 0, "Enable keepalives for dyn. rules");
644 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, state_scan_max,
645 CTLTYPE_INT | CTLFLAG_RW, &ipfw_state_scan_max, 0, ipfw_sysctl_scancnt,
646 "I", "# of states to scan for each expire iteration");
647 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, state_expire_max,
648 CTLTYPE_INT | CTLFLAG_RW, &ipfw_state_expire_max, 0, ipfw_sysctl_scancnt,
649 "I", "# of states to expire for each expire iteration");
650 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, keepalive_max,
651 CTLTYPE_INT | CTLFLAG_RW, &ipfw_keepalive_max, 0, ipfw_sysctl_scancnt,
652 "I", "# of states to expire for each expire iteration");
653 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, state_reap_min,
654 CTLTYPE_INT | CTLFLAG_RW, &ipfw_state_reap_min, 0, ipfw_sysctl_scancnt,
655 "I", "# of states to reap for state shortage");
656 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, track_scan_max,
657 CTLTYPE_INT | CTLFLAG_RW, &ipfw_track_scan_max, 0, ipfw_sysctl_scancnt,
658 "I", "# of tracks to scan for each expire iteration");
659 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, track_expire_max,
660 CTLTYPE_INT | CTLFLAG_RW, &ipfw_track_expire_max, 0, ipfw_sysctl_scancnt,
661 "I", "# of tracks to expire for each expire iteration");
662 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, track_reap_max,
663 CTLTYPE_INT | CTLFLAG_RW, &ipfw_track_reap_max, 0, ipfw_sysctl_scancnt,
664 "I", "# of tracks to reap for track shortage");
666 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, state_reap,
667 CTLTYPE_ULONG | CTLFLAG_RW, NULL,
668 __offsetof(struct ipfw_context, ipfw_sts_reap), ipfw_sysctl_stat,
669 "LU", "# of state reaps due to states shortage");
670 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, state_reapfailed,
671 CTLTYPE_ULONG | CTLFLAG_RW, NULL,
672 __offsetof(struct ipfw_context, ipfw_sts_reapfailed), ipfw_sysctl_stat,
673 "LU", "# of state reap failure");
674 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, state_overflow,
675 CTLTYPE_ULONG | CTLFLAG_RW, NULL,
676 __offsetof(struct ipfw_context, ipfw_sts_overflow), ipfw_sysctl_stat,
677 "LU", "# of state overflow");
678 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, state_nomem,
679 CTLTYPE_ULONG | CTLFLAG_RW, NULL,
680 __offsetof(struct ipfw_context, ipfw_sts_nomem), ipfw_sysctl_stat,
681 "LU", "# of state allocation failure");
682 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, state_tcprecycled,
683 CTLTYPE_ULONG | CTLFLAG_RW, NULL,
684 __offsetof(struct ipfw_context, ipfw_sts_tcprecycled), ipfw_sysctl_stat,
685 "LU", "# of state deleted due to fast TCP port recycling");
687 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, track_nomem,
688 CTLTYPE_ULONG | CTLFLAG_RW, NULL,
689 __offsetof(struct ipfw_context, ipfw_tks_nomem), ipfw_sysctl_stat,
690 "LU", "# of track allocation failure");
691 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, track_reap,
692 CTLTYPE_ULONG | CTLFLAG_RW, NULL,
693 __offsetof(struct ipfw_context, ipfw_tks_reap), ipfw_sysctl_stat,
694 "LU", "# of track reap due to tracks shortage");
695 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, track_reapfailed,
696 CTLTYPE_ULONG | CTLFLAG_RW, NULL,
697 __offsetof(struct ipfw_context, ipfw_tks_reapfailed), ipfw_sysctl_stat,
698 "LU", "# of track reap failure");
699 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, track_overflow,
700 CTLTYPE_ULONG | CTLFLAG_RW, NULL,
701 __offsetof(struct ipfw_context, ipfw_tks_overflow), ipfw_sysctl_stat,
702 "LU", "# of track overflow");
703 SYSCTL_PROC(_net_inet_ip_fw_stats, OID_AUTO, track_cntnomem,
704 CTLTYPE_ULONG | CTLFLAG_RW, NULL,
705 __offsetof(struct ipfw_context, ipfw_tks_cntnomem), ipfw_sysctl_stat,
706 "LU", "# of track counter allocation failure");
708 static int ipfw_state_cmp(struct ipfw_state *,
709 struct ipfw_state *);
710 static int ipfw_trkcnt_cmp(struct ipfw_trkcnt *,
711 struct ipfw_trkcnt *);
712 static int ipfw_track_cmp(struct ipfw_track *,
713 struct ipfw_track *);
715 RB_PROTOTYPE(ipfw_state_tree, ipfw_state, st_rblink, ipfw_state_cmp);
716 RB_GENERATE(ipfw_state_tree, ipfw_state, st_rblink, ipfw_state_cmp);
718 RB_PROTOTYPE(ipfw_trkcnt_tree, ipfw_trkcnt, tc_rblink, ipfw_trkcnt_cmp);
719 RB_GENERATE(ipfw_trkcnt_tree, ipfw_trkcnt, tc_rblink, ipfw_trkcnt_cmp);
721 RB_PROTOTYPE(ipfw_track_tree, ipfw_track, t_rblink, ipfw_track_cmp);
722 RB_GENERATE(ipfw_track_tree, ipfw_track, t_rblink, ipfw_track_cmp);
724 static ip_fw_chk_t ipfw_chk;
725 static void ipfw_track_expire_ipifunc(void *);
726 static void ipfw_state_expire_ipifunc(void *);
727 static void ipfw_keepalive(void *);
728 static int ipfw_state_expire_start(struct ipfw_context *,
729 int, int);
731 #define IPFW_TRKCNT_TOKGET lwkt_gettoken(&ipfw_gd.ipfw_trkcnt_token)
732 #define IPFW_TRKCNT_TOKREL lwkt_reltoken(&ipfw_gd.ipfw_trkcnt_token)
733 #define IPFW_TRKCNT_TOKINIT \
734 lwkt_token_init(&ipfw_gd.ipfw_trkcnt_token, "ipfw_trkcnt");
736 static __inline void
737 ipfw_key_build(struct ipfw_key *key, in_addr_t saddr, uint16_t sport,
738 in_addr_t daddr, uint16_t dport, uint8_t proto)
741 key->proto = proto;
742 key->swap = 0;
744 if (saddr < daddr) {
745 key->addr_u.addrs.addr1 = daddr;
746 key->addr_u.addrs.addr2 = saddr;
747 key->swap |= IPFW_KEY_SWAP_ADDRS;
748 } else {
749 key->addr_u.addrs.addr1 = saddr;
750 key->addr_u.addrs.addr2 = daddr;
753 if (sport < dport) {
754 key->port_u.ports.port1 = dport;
755 key->port_u.ports.port2 = sport;
756 key->swap |= IPFW_KEY_SWAP_PORTS;
757 } else {
758 key->port_u.ports.port1 = sport;
759 key->port_u.ports.port2 = dport;
762 if (sport == dport && (key->swap & IPFW_KEY_SWAP_ADDRS))
763 key->swap |= IPFW_KEY_SWAP_PORTS;
764 if (saddr == daddr && (key->swap & IPFW_KEY_SWAP_PORTS))
765 key->swap |= IPFW_KEY_SWAP_ADDRS;
768 static __inline void
769 ipfw_key_4tuple(const struct ipfw_key *key, in_addr_t *saddr, uint16_t *sport,
770 in_addr_t *daddr, uint16_t *dport)
773 if (key->swap & IPFW_KEY_SWAP_ADDRS) {
774 *saddr = key->addr_u.addrs.addr2;
775 *daddr = key->addr_u.addrs.addr1;
776 } else {
777 *saddr = key->addr_u.addrs.addr1;
778 *daddr = key->addr_u.addrs.addr2;
781 if (key->swap & IPFW_KEY_SWAP_PORTS) {
782 *sport = key->port_u.ports.port2;
783 *dport = key->port_u.ports.port1;
784 } else {
785 *sport = key->port_u.ports.port1;
786 *dport = key->port_u.ports.port2;
790 static int
791 ipfw_state_cmp(struct ipfw_state *s1, struct ipfw_state *s2)
794 if (s1->st_proto > s2->st_proto)
795 return (1);
796 if (s1->st_proto < s2->st_proto)
797 return (-1);
799 if (s1->st_addrs > s2->st_addrs)
800 return (1);
801 if (s1->st_addrs < s2->st_addrs)
802 return (-1);
804 if (s1->st_ports > s2->st_ports)
805 return (1);
806 if (s1->st_ports < s2->st_ports)
807 return (-1);
809 if (s1->st_swap == s2->st_swap ||
810 (s1->st_swap ^ s2->st_swap) == IPFW_KEY_SWAP_ALL)
811 return (0);
813 if (s1->st_swap > s2->st_swap)
814 return (1);
815 else
816 return (-1);
819 static int
820 ipfw_trkcnt_cmp(struct ipfw_trkcnt *t1, struct ipfw_trkcnt *t2)
823 if (t1->tc_proto > t2->tc_proto)
824 return (1);
825 if (t1->tc_proto < t2->tc_proto)
826 return (-1);
828 if (t1->tc_addrs > t2->tc_addrs)
829 return (1);
830 if (t1->tc_addrs < t2->tc_addrs)
831 return (-1);
833 if (t1->tc_ports > t2->tc_ports)
834 return (1);
835 if (t1->tc_ports < t2->tc_ports)
836 return (-1);
838 if (t1->tc_ruleid > t2->tc_ruleid)
839 return (1);
840 if (t1->tc_ruleid < t2->tc_ruleid)
841 return (-1);
843 return (0);
846 static int
847 ipfw_track_cmp(struct ipfw_track *t1, struct ipfw_track *t2)
850 if (t1->t_proto > t2->t_proto)
851 return (1);
852 if (t1->t_proto < t2->t_proto)
853 return (-1);
855 if (t1->t_addrs > t2->t_addrs)
856 return (1);
857 if (t1->t_addrs < t2->t_addrs)
858 return (-1);
860 if (t1->t_ports > t2->t_ports)
861 return (1);
862 if (t1->t_ports < t2->t_ports)
863 return (-1);
865 if ((uintptr_t)t1->t_rule > (uintptr_t)t2->t_rule)
866 return (1);
867 if ((uintptr_t)t1->t_rule < (uintptr_t)t2->t_rule)
868 return (-1);
870 return (0);
873 static void
874 ipfw_state_max_set(int state_max)
877 ipfw_state_max = state_max;
878 /* Allow 5% states over-allocation. */
879 ipfw_state_loosecnt_updthr = (state_max / 20) / netisr_ncpus;
882 static __inline int
883 ipfw_state_cntcoll(void)
885 int cpu, state_cnt = 0;
887 for (cpu = 0; cpu < netisr_ncpus; ++cpu)
888 state_cnt += ipfw_ctx[cpu]->ipfw_state_cnt;
889 return (state_cnt);
892 static __inline int
893 ipfw_state_cntsync(void)
895 int state_cnt;
897 state_cnt = ipfw_state_cntcoll();
898 ipfw_gd.ipfw_state_loosecnt = state_cnt;
899 return (state_cnt);
902 static __inline int
903 ipfw_free_rule(struct ip_fw *rule)
905 KASSERT(rule->cpuid == mycpuid, ("rule freed on cpu%d", mycpuid));
906 KASSERT(rule->refcnt > 0, ("invalid refcnt %u", rule->refcnt));
907 rule->refcnt--;
908 if (rule->refcnt == 0) {
909 kfree(rule, M_IPFW);
910 return 1;
912 return 0;
915 static void
916 ipfw_unref_rule(void *priv)
918 ipfw_free_rule(priv);
919 #ifdef KLD_MODULE
920 atomic_subtract_int(&ipfw_gd.ipfw_refcnt, 1);
921 #endif
924 static __inline void
925 ipfw_ref_rule(struct ip_fw *rule)
927 KASSERT(rule->cpuid == mycpuid, ("rule used on cpu%d", mycpuid));
928 #ifdef KLD_MODULE
929 atomic_add_int(&ipfw_gd.ipfw_refcnt, 1);
930 #endif
931 rule->refcnt++;
935 * This macro maps an ip pointer into a layer3 header pointer of type T
937 #define L3HDR(T, ip) ((T *)((uint32_t *)(ip) + (ip)->ip_hl))
939 static __inline int
940 icmptype_match(struct ip *ip, ipfw_insn_u32 *cmd)
942 int type = L3HDR(struct icmp,ip)->icmp_type;
944 return (type <= ICMP_MAXTYPE && (cmd->d[0] & (1 << type)));
947 #define TT ((1 << ICMP_ECHO) | \
948 (1 << ICMP_ROUTERSOLICIT) | \
949 (1 << ICMP_TSTAMP) | \
950 (1 << ICMP_IREQ) | \
951 (1 << ICMP_MASKREQ))
953 static int
954 is_icmp_query(struct ip *ip)
956 int type = L3HDR(struct icmp, ip)->icmp_type;
958 return (type <= ICMP_MAXTYPE && (TT & (1 << type)));
961 #undef TT
964 * The following checks use two arrays of 8 or 16 bits to store the
965 * bits that we want set or clear, respectively. They are in the
966 * low and high half of cmd->arg1 or cmd->d[0].
968 * We scan options and store the bits we find set. We succeed if
970 * (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear
972 * The code is sometimes optimized not to store additional variables.
974 static int
975 flags_match(ipfw_insn *cmd, uint8_t bits)
977 u_char want_clear;
978 bits = ~bits;
980 if (((cmd->arg1 & 0xff) & bits) != 0)
981 return 0; /* some bits we want set were clear */
983 want_clear = (cmd->arg1 >> 8) & 0xff;
984 if ((want_clear & bits) != want_clear)
985 return 0; /* some bits we want clear were set */
986 return 1;
989 static int
990 ipopts_match(struct ip *ip, ipfw_insn *cmd)
992 int optlen, bits = 0;
993 u_char *cp = (u_char *)(ip + 1);
994 int x = (ip->ip_hl << 2) - sizeof(struct ip);
996 for (; x > 0; x -= optlen, cp += optlen) {
997 int opt = cp[IPOPT_OPTVAL];
999 if (opt == IPOPT_EOL)
1000 break;
1002 if (opt == IPOPT_NOP) {
1003 optlen = 1;
1004 } else {
1005 optlen = cp[IPOPT_OLEN];
1006 if (optlen <= 0 || optlen > x)
1007 return 0; /* invalid or truncated */
1010 switch (opt) {
1011 case IPOPT_LSRR:
1012 bits |= IP_FW_IPOPT_LSRR;
1013 break;
1015 case IPOPT_SSRR:
1016 bits |= IP_FW_IPOPT_SSRR;
1017 break;
1019 case IPOPT_RR:
1020 bits |= IP_FW_IPOPT_RR;
1021 break;
1023 case IPOPT_TS:
1024 bits |= IP_FW_IPOPT_TS;
1025 break;
1027 default:
1028 break;
1031 return (flags_match(cmd, bits));
1034 static int
1035 tcpopts_match(struct ip *ip, ipfw_insn *cmd)
1037 int optlen, bits = 0;
1038 struct tcphdr *tcp = L3HDR(struct tcphdr,ip);
1039 u_char *cp = (u_char *)(tcp + 1);
1040 int x = (tcp->th_off << 2) - sizeof(struct tcphdr);
1042 for (; x > 0; x -= optlen, cp += optlen) {
1043 int opt = cp[0];
1045 if (opt == TCPOPT_EOL)
1046 break;
1048 if (opt == TCPOPT_NOP) {
1049 optlen = 1;
1050 } else {
1051 optlen = cp[1];
1052 if (optlen <= 0)
1053 break;
1056 switch (opt) {
1057 case TCPOPT_MAXSEG:
1058 bits |= IP_FW_TCPOPT_MSS;
1059 break;
1061 case TCPOPT_WINDOW:
1062 bits |= IP_FW_TCPOPT_WINDOW;
1063 break;
1065 case TCPOPT_SACK_PERMITTED:
1066 case TCPOPT_SACK:
1067 bits |= IP_FW_TCPOPT_SACK;
1068 break;
1070 case TCPOPT_TIMESTAMP:
1071 bits |= IP_FW_TCPOPT_TS;
1072 break;
1074 case TCPOPT_CC:
1075 case TCPOPT_CCNEW:
1076 case TCPOPT_CCECHO:
1077 bits |= IP_FW_TCPOPT_CC;
1078 break;
1080 default:
1081 break;
1084 return (flags_match(cmd, bits));
1087 static int
1088 iface_match(struct ifnet *ifp, ipfw_insn_if *cmd)
1090 if (ifp == NULL) /* no iface with this packet, match fails */
1091 return 0;
1093 /* Check by name or by IP address */
1094 if (cmd->name[0] != '\0') { /* match by name */
1095 /* Check name */
1096 if (cmd->p.glob) {
1097 if (kfnmatch(cmd->name, ifp->if_xname, 0) == 0)
1098 return(1);
1099 } else {
1100 if (strncmp(ifp->if_xname, cmd->name, IFNAMSIZ) == 0)
1101 return(1);
1103 } else {
1104 struct ifaddr_container *ifac;
1106 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
1107 struct ifaddr *ia = ifac->ifa;
1109 if (ia->ifa_addr == NULL)
1110 continue;
1111 if (ia->ifa_addr->sa_family != AF_INET)
1112 continue;
1113 if (cmd->p.ip.s_addr == ((struct sockaddr_in *)
1114 (ia->ifa_addr))->sin_addr.s_addr)
1115 return(1); /* match */
1118 return(0); /* no match, fail ... */
1121 #define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0
1124 * We enter here when we have a rule with O_LOG.
1125 * XXX this function alone takes about 2Kbytes of code!
1127 static void
1128 ipfw_log(struct ipfw_context *ctx, struct ip_fw *f, u_int hlen,
1129 struct ether_header *eh, struct mbuf *m, struct ifnet *oif)
1131 char *action;
1132 int limit_reached = 0;
1133 char action2[40], proto[48], fragment[28], abuf[INET_ADDRSTRLEN];
1135 fragment[0] = '\0';
1136 proto[0] = '\0';
1138 if (f == NULL) { /* bogus pkt */
1139 if (verbose_limit != 0 &&
1140 ctx->ipfw_norule_counter >= verbose_limit)
1141 return;
1142 ctx->ipfw_norule_counter++;
1143 if (ctx->ipfw_norule_counter == verbose_limit)
1144 limit_reached = verbose_limit;
1145 action = "Refuse";
1146 } else { /* O_LOG is the first action, find the real one */
1147 ipfw_insn *cmd = ACTION_PTR(f);
1148 ipfw_insn_log *l = (ipfw_insn_log *)cmd;
1150 if (l->max_log != 0 && l->log_left == 0)
1151 return;
1152 l->log_left--;
1153 if (l->log_left == 0)
1154 limit_reached = l->max_log;
1155 cmd += F_LEN(cmd); /* point to first action */
1156 if (cmd->opcode == O_PROB)
1157 cmd += F_LEN(cmd);
1159 action = action2;
1160 switch (cmd->opcode) {
1161 case O_DENY:
1162 action = "Deny";
1163 break;
1165 case O_REJECT:
1166 if (cmd->arg1==ICMP_REJECT_RST) {
1167 action = "Reset";
1168 } else if (cmd->arg1==ICMP_UNREACH_HOST) {
1169 action = "Reject";
1170 } else {
1171 ksnprintf(SNPARGS(action2, 0), "Unreach %d",
1172 cmd->arg1);
1174 break;
1176 case O_ACCEPT:
1177 action = "Accept";
1178 break;
1180 case O_COUNT:
1181 action = "Count";
1182 break;
1184 case O_DIVERT:
1185 ksnprintf(SNPARGS(action2, 0), "Divert %d", cmd->arg1);
1186 break;
1188 case O_TEE:
1189 ksnprintf(SNPARGS(action2, 0), "Tee %d", cmd->arg1);
1190 break;
1192 case O_SKIPTO:
1193 ksnprintf(SNPARGS(action2, 0), "SkipTo %d", cmd->arg1);
1194 break;
1196 case O_PIPE:
1197 ksnprintf(SNPARGS(action2, 0), "Pipe %d", cmd->arg1);
1198 break;
1200 case O_QUEUE:
1201 ksnprintf(SNPARGS(action2, 0), "Queue %d", cmd->arg1);
1202 break;
1204 case O_FORWARD_IP:
1206 ipfw_insn_sa *sa = (ipfw_insn_sa *)cmd;
1207 int len;
1209 len = ksnprintf(SNPARGS(action2, 0),
1210 "Forward to %s",
1211 kinet_ntoa(sa->sa.sin_addr, abuf));
1212 if (sa->sa.sin_port) {
1213 ksnprintf(SNPARGS(action2, len), ":%d",
1214 sa->sa.sin_port);
1217 break;
1219 default:
1220 action = "UNKNOWN";
1221 break;
1225 if (hlen == 0) { /* non-ip */
1226 ksnprintf(SNPARGS(proto, 0), "MAC");
1227 } else {
1228 struct ip *ip = mtod(m, struct ip *);
1229 /* these three are all aliases to the same thing */
1230 struct icmp *const icmp = L3HDR(struct icmp, ip);
1231 struct tcphdr *const tcp = (struct tcphdr *)icmp;
1232 struct udphdr *const udp = (struct udphdr *)icmp;
1234 int ip_off, offset, ip_len;
1235 int len;
1237 if (eh != NULL) { /* layer 2 packets are as on the wire */
1238 ip_off = ntohs(ip->ip_off);
1239 ip_len = ntohs(ip->ip_len);
1240 } else {
1241 ip_off = ip->ip_off;
1242 ip_len = ip->ip_len;
1244 offset = ip_off & IP_OFFMASK;
1245 switch (ip->ip_p) {
1246 case IPPROTO_TCP:
1247 len = ksnprintf(SNPARGS(proto, 0), "TCP %s",
1248 kinet_ntoa(ip->ip_src, abuf));
1249 if (offset == 0) {
1250 ksnprintf(SNPARGS(proto, len), ":%d %s:%d",
1251 ntohs(tcp->th_sport),
1252 kinet_ntoa(ip->ip_dst, abuf),
1253 ntohs(tcp->th_dport));
1254 } else {
1255 ksnprintf(SNPARGS(proto, len), " %s",
1256 kinet_ntoa(ip->ip_dst, abuf));
1258 break;
1260 case IPPROTO_UDP:
1261 len = ksnprintf(SNPARGS(proto, 0), "UDP %s",
1262 kinet_ntoa(ip->ip_src, abuf));
1263 if (offset == 0) {
1264 ksnprintf(SNPARGS(proto, len), ":%d %s:%d",
1265 ntohs(udp->uh_sport),
1266 kinet_ntoa(ip->ip_dst, abuf),
1267 ntohs(udp->uh_dport));
1268 } else {
1269 ksnprintf(SNPARGS(proto, len), " %s",
1270 kinet_ntoa(ip->ip_dst, abuf));
1272 break;
1274 case IPPROTO_ICMP:
1275 if (offset == 0) {
1276 len = ksnprintf(SNPARGS(proto, 0),
1277 "ICMP:%u.%u ",
1278 icmp->icmp_type,
1279 icmp->icmp_code);
1280 } else {
1281 len = ksnprintf(SNPARGS(proto, 0), "ICMP ");
1283 len += ksnprintf(SNPARGS(proto, len), "%s",
1284 kinet_ntoa(ip->ip_src, abuf));
1285 ksnprintf(SNPARGS(proto, len), " %s",
1286 kinet_ntoa(ip->ip_dst, abuf));
1287 break;
1289 default:
1290 len = ksnprintf(SNPARGS(proto, 0), "P:%d %s", ip->ip_p,
1291 kinet_ntoa(ip->ip_src, abuf));
1292 ksnprintf(SNPARGS(proto, len), " %s",
1293 kinet_ntoa(ip->ip_dst, abuf));
1294 break;
1297 if (ip_off & (IP_MF | IP_OFFMASK)) {
1298 ksnprintf(SNPARGS(fragment, 0), " (frag %d:%d@%d%s)",
1299 ntohs(ip->ip_id), ip_len - (ip->ip_hl << 2),
1300 offset << 3, (ip_off & IP_MF) ? "+" : "");
1304 if (oif || m->m_pkthdr.rcvif) {
1305 log(LOG_SECURITY | LOG_INFO,
1306 "ipfw: %d %s %s %s via %s%s\n",
1307 f ? f->rulenum : -1,
1308 action, proto, oif ? "out" : "in",
1309 oif ? oif->if_xname : m->m_pkthdr.rcvif->if_xname,
1310 fragment);
1311 } else {
1312 log(LOG_SECURITY | LOG_INFO,
1313 "ipfw: %d %s %s [no if info]%s\n",
1314 f ? f->rulenum : -1,
1315 action, proto, fragment);
1318 if (limit_reached) {
1319 log(LOG_SECURITY | LOG_NOTICE,
1320 "ipfw: limit %d reached on entry %d\n",
1321 limit_reached, f ? f->rulenum : -1);
1325 #undef SNPARGS
1327 #define TIME_LEQ(a, b) ((a) - (b) <= 0)
1329 static void
1330 ipfw_state_del(struct ipfw_context *ctx, struct ipfw_state *s)
1333 KASSERT(s->st_type == O_KEEP_STATE || s->st_type == O_LIMIT,
1334 ("invalid state type %u", s->st_type));
1335 KASSERT(ctx->ipfw_state_cnt > 0,
1336 ("invalid state count %d", ctx->ipfw_state_cnt));
1338 if (s->st_track != NULL) {
1339 struct ipfw_track *t = s->st_track;
1341 KASSERT(!LIST_EMPTY(&t->t_state_list),
1342 ("track state list is empty"));
1343 LIST_REMOVE(s, st_trklink);
1345 KASSERT(*t->t_count > 0,
1346 ("invalid track count %d", *t->t_count));
1347 atomic_subtract_int(t->t_count, 1);
1350 TAILQ_REMOVE(&ctx->ipfw_state_list, s, st_link);
1351 RB_REMOVE(ipfw_state_tree, &ctx->ipfw_state_tree, s);
1352 kfree(s, M_IPFW);
1354 ctx->ipfw_state_cnt--;
1355 if (ctx->ipfw_state_loosecnt > 0)
1356 ctx->ipfw_state_loosecnt--;
1359 static int
1360 ipfw_state_reap(struct ipfw_context *ctx, int reap_max)
1362 struct ipfw_state *s, *anchor;
1363 int expired;
1365 if (reap_max < ipfw_state_reap_min)
1366 reap_max = ipfw_state_reap_min;
1368 if ((ctx->ipfw_flags & IPFW_FLAG_STATEEXP) == 0) {
1370 * Kick start state expiring. Ignore scan limit,
1371 * we are short of states.
1373 ctx->ipfw_flags |= IPFW_FLAG_STATEREAP;
1374 expired = ipfw_state_expire_start(ctx, INT_MAX, reap_max);
1375 ctx->ipfw_flags &= ~IPFW_FLAG_STATEREAP;
1376 return (expired);
1380 * States are being expired.
1383 if (ctx->ipfw_state_cnt == 0)
1384 return (0);
1386 expired = 0;
1387 anchor = &ctx->ipfw_stateexp_anch;
1388 while ((s = TAILQ_NEXT(anchor, st_link)) != NULL) {
1390 * Ignore scan limit; we are short of states.
1393 TAILQ_REMOVE(&ctx->ipfw_state_list, anchor, st_link);
1394 TAILQ_INSERT_AFTER(&ctx->ipfw_state_list, s, anchor, st_link);
1396 if (s->st_type == O_ANCHOR)
1397 continue;
1399 if (IPFW_STATE_TCPCLOSED(s) ||
1400 TIME_LEQ(s->st_expire, time_uptime)) {
1401 ipfw_state_del(ctx, s);
1402 if (++expired >= reap_max)
1403 break;
1404 if ((expired & 0xff) == 0 &&
1405 ipfw_state_cntcoll() + ipfw_state_headroom <=
1406 ipfw_state_max)
1407 break;
1411 * NOTE:
1412 * Leave the anchor on the list, even if the end of the list has
1413 * been reached. ipfw_state_expire_more_dispatch() will handle
1414 * the removal.
1416 return (expired);
1419 static void
1420 ipfw_state_flush(struct ipfw_context *ctx, const struct ip_fw *rule)
1422 struct ipfw_state *s, *sn;
1424 TAILQ_FOREACH_MUTABLE(s, &ctx->ipfw_state_list, st_link, sn) {
1425 if (s->st_type == O_ANCHOR)
1426 continue;
1427 if (rule != NULL && s->st_rule != rule)
1428 continue;
1429 ipfw_state_del(ctx, s);
1433 static void
1434 ipfw_state_expire_done(struct ipfw_context *ctx)
1437 KASSERT(ctx->ipfw_flags & IPFW_FLAG_STATEEXP,
1438 ("stateexp is not in progress"));
1439 ctx->ipfw_flags &= ~IPFW_FLAG_STATEEXP;
1440 callout_reset(&ctx->ipfw_stateto_ch, hz,
1441 ipfw_state_expire_ipifunc, NULL);
1444 static void
1445 ipfw_state_expire_more(struct ipfw_context *ctx)
1447 struct netmsg_base *nm = &ctx->ipfw_stateexp_more;
1449 KASSERT(ctx->ipfw_flags & IPFW_FLAG_STATEEXP,
1450 ("stateexp is not in progress"));
1451 KASSERT(nm->lmsg.ms_flags & MSGF_DONE,
1452 ("stateexp more did not finish"));
1453 netisr_sendmsg_oncpu(nm);
1456 static int
1457 ipfw_state_expire_loop(struct ipfw_context *ctx, struct ipfw_state *anchor,
1458 int scan_max, int expire_max)
1460 struct ipfw_state *s;
1461 int scanned = 0, expired = 0;
1463 KASSERT(ctx->ipfw_flags & IPFW_FLAG_STATEEXP,
1464 ("stateexp is not in progress"));
1466 while ((s = TAILQ_NEXT(anchor, st_link)) != NULL) {
1467 if (scanned++ >= scan_max) {
1468 ipfw_state_expire_more(ctx);
1469 return (expired);
1472 TAILQ_REMOVE(&ctx->ipfw_state_list, anchor, st_link);
1473 TAILQ_INSERT_AFTER(&ctx->ipfw_state_list, s, anchor, st_link);
1475 if (s->st_type == O_ANCHOR)
1476 continue;
1478 if (TIME_LEQ(s->st_expire, time_uptime) ||
1479 ((ctx->ipfw_flags & IPFW_FLAG_STATEREAP) &&
1480 IPFW_STATE_TCPCLOSED(s))) {
1481 ipfw_state_del(ctx, s);
1482 if (++expired >= expire_max) {
1483 ipfw_state_expire_more(ctx);
1484 return (expired);
1486 if ((ctx->ipfw_flags & IPFW_FLAG_STATEREAP) &&
1487 (expired & 0xff) == 0 &&
1488 ipfw_state_cntcoll() + ipfw_state_headroom <=
1489 ipfw_state_max) {
1490 ipfw_state_expire_more(ctx);
1491 return (expired);
1495 TAILQ_REMOVE(&ctx->ipfw_state_list, anchor, st_link);
1496 ipfw_state_expire_done(ctx);
1497 return (expired);
1500 static void
1501 ipfw_state_expire_more_dispatch(netmsg_t nm)
1503 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1504 struct ipfw_state *anchor;
1506 ASSERT_NETISR_NCPUS(mycpuid);
1507 KASSERT(ctx->ipfw_flags & IPFW_FLAG_STATEEXP,
1508 ("statexp is not in progress"));
1510 /* Reply ASAP */
1511 netisr_replymsg(&nm->base, 0);
1513 anchor = &ctx->ipfw_stateexp_anch;
1514 if (ctx->ipfw_state_cnt == 0) {
1515 TAILQ_REMOVE(&ctx->ipfw_state_list, anchor, st_link);
1516 ipfw_state_expire_done(ctx);
1517 return;
1519 ipfw_state_expire_loop(ctx, anchor,
1520 ipfw_state_scan_max, ipfw_state_expire_max);
1523 static int
1524 ipfw_state_expire_start(struct ipfw_context *ctx, int scan_max, int expire_max)
1526 struct ipfw_state *anchor;
1528 KASSERT((ctx->ipfw_flags & IPFW_FLAG_STATEEXP) == 0,
1529 ("stateexp is in progress"));
1530 ctx->ipfw_flags |= IPFW_FLAG_STATEEXP;
1532 if (ctx->ipfw_state_cnt == 0) {
1533 ipfw_state_expire_done(ctx);
1534 return (0);
1538 * Do not expire more than once per second, it is useless.
1540 if ((ctx->ipfw_flags & IPFW_FLAG_STATEREAP) == 0 &&
1541 ctx->ipfw_state_lastexp == time_uptime) {
1542 ipfw_state_expire_done(ctx);
1543 return (0);
1545 ctx->ipfw_state_lastexp = time_uptime;
1547 anchor = &ctx->ipfw_stateexp_anch;
1548 TAILQ_INSERT_HEAD(&ctx->ipfw_state_list, anchor, st_link);
1549 return (ipfw_state_expire_loop(ctx, anchor, scan_max, expire_max));
1552 static void
1553 ipfw_state_expire_dispatch(netmsg_t nm)
1555 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1557 ASSERT_NETISR_NCPUS(mycpuid);
1559 /* Reply ASAP */
1560 crit_enter();
1561 netisr_replymsg(&nm->base, 0);
1562 crit_exit();
1564 if (ctx->ipfw_flags & IPFW_FLAG_STATEEXP) {
1565 /* Running; done. */
1566 return;
1568 ipfw_state_expire_start(ctx,
1569 ipfw_state_scan_max, ipfw_state_expire_max);
1572 static void
1573 ipfw_state_expire_ipifunc(void *dummy __unused)
1575 struct netmsg_base *msg;
1577 KKASSERT(mycpuid < netisr_ncpus);
1578 msg = &ipfw_ctx[mycpuid]->ipfw_stateexp_nm;
1580 crit_enter();
1581 if (msg->lmsg.ms_flags & MSGF_DONE)
1582 netisr_sendmsg_oncpu(msg);
1583 crit_exit();
1586 static boolean_t
1587 ipfw_state_update_tcp(struct ipfw_state *s, int dir, const struct tcphdr *tcp)
1589 uint32_t seq = ntohl(tcp->th_seq);
1590 uint32_t ack = ntohl(tcp->th_ack);
1592 if (tcp->th_flags & TH_RST)
1593 return (TRUE);
1595 if (dir == MATCH_FORWARD) {
1596 if ((s->st_flags & IPFW_STATE_F_SEQFWD) == 0) {
1597 s->st_flags |= IPFW_STATE_F_SEQFWD;
1598 s->st_seq_fwd = seq;
1599 } else if (SEQ_GEQ(seq, s->st_seq_fwd)) {
1600 s->st_seq_fwd = seq;
1601 } else {
1602 /* Out-of-sequence; done. */
1603 return (FALSE);
1605 if (tcp->th_flags & TH_ACK) {
1606 if ((s->st_flags & IPFW_STATE_F_ACKFWD) == 0) {
1607 s->st_flags |= IPFW_STATE_F_ACKFWD;
1608 s->st_ack_fwd = ack;
1609 } else if (SEQ_GEQ(ack, s->st_ack_fwd)) {
1610 s->st_ack_fwd = ack;
1611 } else {
1612 /* Out-of-sequence; done. */
1613 return (FALSE);
1616 if ((s->st_state & ((TH_FIN | TH_ACK) << 8)) ==
1617 (TH_FIN << 8) && s->st_ack_fwd == s->st_seq_rev + 1)
1618 s->st_state |= (TH_ACK << 8);
1620 } else {
1621 if ((s->st_flags & IPFW_STATE_F_SEQREV) == 0) {
1622 s->st_flags |= IPFW_STATE_F_SEQREV;
1623 s->st_seq_rev = seq;
1624 } else if (SEQ_GEQ(seq, s->st_seq_rev)) {
1625 s->st_seq_rev = seq;
1626 } else {
1627 /* Out-of-sequence; done. */
1628 return (FALSE);
1630 if (tcp->th_flags & TH_ACK) {
1631 if ((s->st_flags & IPFW_STATE_F_ACKREV) == 0) {
1632 s->st_flags |= IPFW_STATE_F_ACKREV;
1633 s->st_ack_rev= ack;
1634 } else if (SEQ_GEQ(ack, s->st_ack_rev)) {
1635 s->st_ack_rev = ack;
1636 } else {
1637 /* Out-of-sequence; done. */
1638 return (FALSE);
1641 if ((s->st_state & (TH_FIN | TH_ACK)) == TH_FIN &&
1642 s->st_ack_rev == s->st_seq_fwd + 1)
1643 s->st_state |= TH_ACK;
1646 return (TRUE);
1649 static void
1650 ipfw_state_update(const struct ipfw_flow_id *pkt, int dir,
1651 const struct tcphdr *tcp, struct ipfw_state *s)
1654 if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */
1655 u_char flags = pkt->flags & IPFW_STATE_TCPFLAGS;
1657 if (tcp != NULL && !ipfw_state_update_tcp(s, dir, tcp))
1658 return;
1660 s->st_state |= (dir == MATCH_FORWARD) ? flags : (flags << 8);
1661 switch (s->st_state & IPFW_STATE_TCPSTATES) {
1662 case TH_SYN: /* opening */
1663 s->st_expire = time_uptime + dyn_syn_lifetime;
1664 break;
1666 case BOTH_SYN: /* move to established */
1667 case BOTH_SYN | TH_FIN: /* one side tries to close */
1668 case BOTH_SYN | (TH_FIN << 8):
1669 s->st_expire = time_uptime + dyn_ack_lifetime;
1670 break;
1672 case BOTH_SYN | BOTH_FIN: /* both sides closed */
1673 if ((s->st_state & BOTH_FINACK) == BOTH_FINACK) {
1674 /* And both FINs were ACKed. */
1675 s->st_expire = time_uptime + dyn_fin_lifetime;
1676 } else {
1677 s->st_expire = time_uptime +
1678 dyn_finwait_lifetime;
1680 break;
1682 default:
1683 #if 0
1685 * reset or some invalid combination, but can also
1686 * occur if we use keep-state the wrong way.
1688 if ((s->st_state & ((TH_RST << 8) | TH_RST)) == 0)
1689 kprintf("invalid state: 0x%x\n", s->st_state);
1690 #endif
1691 s->st_expire = time_uptime + dyn_rst_lifetime;
1692 break;
1694 } else if (pkt->proto == IPPROTO_UDP) {
1695 s->st_expire = time_uptime + dyn_udp_lifetime;
1696 } else {
1697 /* other protocols */
1698 s->st_expire = time_uptime + dyn_short_lifetime;
1703 * Lookup a state.
1705 static struct ipfw_state *
1706 ipfw_state_lookup(struct ipfw_context *ctx, const struct ipfw_flow_id *pkt,
1707 int *match_direction, const struct tcphdr *tcp)
1709 struct ipfw_state *key, *s;
1710 int dir = MATCH_NONE;
1712 key = &ctx->ipfw_state_tmpkey;
1713 ipfw_key_build(&key->st_key, pkt->src_ip, pkt->src_port,
1714 pkt->dst_ip, pkt->dst_port, pkt->proto);
1715 s = RB_FIND(ipfw_state_tree, &ctx->ipfw_state_tree, key);
1716 if (s == NULL)
1717 goto done; /* not found. */
1718 if (TIME_LEQ(s->st_expire, time_uptime)) {
1719 /* Expired. */
1720 ipfw_state_del(ctx, s);
1721 s = NULL;
1722 goto done;
1724 if ((pkt->flags & TH_SYN) && IPFW_STATE_TCPCLOSED(s)) {
1725 /* TCP ports recycling is too fast. */
1726 ctx->ipfw_sts_tcprecycled++;
1727 ipfw_state_del(ctx, s);
1728 s = NULL;
1729 goto done;
1732 if (s->st_swap == key->st_swap) {
1733 dir = MATCH_FORWARD;
1734 } else {
1735 KASSERT((s->st_swap & key->st_swap) == 0,
1736 ("found mismatch state"));
1737 dir = MATCH_REVERSE;
1740 /* Update this state. */
1741 ipfw_state_update(pkt, dir, tcp, s);
1743 if (s->st_track != NULL) {
1744 /* This track has been used. */
1745 s->st_track->t_expire = time_uptime + dyn_short_lifetime;
1747 done:
1748 if (match_direction)
1749 *match_direction = dir;
1750 return (s);
1753 static __inline struct ip_fw *
1754 ipfw_state_lookup_rule(struct ipfw_context *ctx, const struct ipfw_flow_id *pkt,
1755 int *match_direction, const struct tcphdr *tcp, uint16_t len)
1757 struct ipfw_state *s;
1759 s = ipfw_state_lookup(ctx, pkt, match_direction, tcp);
1760 if (s == NULL)
1761 return (NULL);
1763 KASSERT(s->st_rule->cpuid == mycpuid,
1764 ("rule %p (cpu%d) does not belong to the current cpu%d",
1765 s->st_rule, s->st_rule->cpuid, mycpuid));
1767 s->st_pcnt++;
1768 s->st_bcnt += len;
1770 return (s->st_rule);
1773 static struct ipfw_state *
1774 ipfw_state_add(struct ipfw_context *ctx, const struct ipfw_flow_id *id,
1775 uint16_t type, struct ip_fw *rule, struct ipfw_track *t,
1776 const struct tcphdr *tcp)
1778 struct ipfw_state *s, *dup;
1780 KASSERT(type == O_KEEP_STATE || type == O_LIMIT,
1781 ("invalid state type %u", type));
1783 s = kmalloc(sizeof(*s), M_IPFW, M_INTWAIT | M_NULLOK | M_ZERO);
1784 if (s == NULL) {
1785 ctx->ipfw_sts_nomem++;
1786 return (NULL);
1789 ipfw_key_build(&s->st_key, id->src_ip, id->src_port,
1790 id->dst_ip, id->dst_port, id->proto);
1792 s->st_rule = rule;
1793 s->st_type = type;
1795 ctx->ipfw_state_cnt++;
1796 ctx->ipfw_state_loosecnt++;
1797 if (ctx->ipfw_state_loosecnt >= ipfw_state_loosecnt_updthr) {
1798 ipfw_gd.ipfw_state_loosecnt += ctx->ipfw_state_loosecnt;
1799 ctx->ipfw_state_loosecnt = 0;
1802 dup = RB_INSERT(ipfw_state_tree, &ctx->ipfw_state_tree, s);
1803 if (dup != NULL)
1804 panic("ipfw: state exists");
1805 TAILQ_INSERT_TAIL(&ctx->ipfw_state_list, s, st_link);
1808 * Update this state:
1809 * Set st_expire and st_state.
1811 ipfw_state_update(id, MATCH_FORWARD, tcp, s);
1813 if (t != NULL) {
1814 /* Keep the track referenced. */
1815 LIST_INSERT_HEAD(&t->t_state_list, s, st_trklink);
1816 s->st_track = t;
1818 return (s);
1821 static boolean_t
1822 ipfw_track_free(struct ipfw_context *ctx, struct ipfw_track *t)
1824 struct ipfw_trkcnt *trk;
1825 boolean_t trk_freed = FALSE;
1827 KASSERT(t->t_count != NULL, ("track anchor"));
1828 KASSERT(LIST_EMPTY(&t->t_state_list),
1829 ("invalid track is still referenced"));
1831 trk = t->t_trkcnt;
1832 KASSERT(trk != NULL, ("track has no trkcnt"));
1834 RB_REMOVE(ipfw_track_tree, &ctx->ipfw_track_tree, t);
1835 TAILQ_REMOVE(&ctx->ipfw_track_list, t, t_link);
1836 kfree(t, M_IPFW);
1839 * fdrop() style reference counting.
1840 * See kern/kern_descrip.c fdrop().
1842 for (;;) {
1843 int refs = trk->tc_refs;
1845 cpu_ccfence();
1846 KASSERT(refs > 0, ("invalid trkcnt refs %d", refs));
1847 if (refs == 1) {
1848 IPFW_TRKCNT_TOKGET;
1849 if (atomic_cmpset_int(&trk->tc_refs, refs, 0)) {
1850 KASSERT(trk->tc_count == 0,
1851 ("%d states reference this trkcnt",
1852 trk->tc_count));
1853 RB_REMOVE(ipfw_trkcnt_tree,
1854 &ipfw_gd.ipfw_trkcnt_tree, trk);
1856 KASSERT(ipfw_gd.ipfw_trkcnt_cnt > 0,
1857 ("invalid trkcnt cnt %d",
1858 ipfw_gd.ipfw_trkcnt_cnt));
1859 ipfw_gd.ipfw_trkcnt_cnt--;
1860 IPFW_TRKCNT_TOKREL;
1862 if (ctx->ipfw_trkcnt_spare == NULL)
1863 ctx->ipfw_trkcnt_spare = trk;
1864 else
1865 kfree(trk, M_IPFW);
1866 trk_freed = TRUE;
1867 break; /* done! */
1869 IPFW_TRKCNT_TOKREL;
1870 /* retry */
1871 } else if (atomic_cmpset_int(&trk->tc_refs, refs, refs - 1)) {
1872 break; /* done! */
1874 /* retry */
1876 return (trk_freed);
1879 static void
1880 ipfw_track_flush(struct ipfw_context *ctx, struct ip_fw *rule)
1882 struct ipfw_track *t, *tn;
1884 TAILQ_FOREACH_MUTABLE(t, &ctx->ipfw_track_list, t_link, tn) {
1885 if (t->t_count == NULL) /* anchor */
1886 continue;
1887 if (rule != NULL && t->t_rule != rule)
1888 continue;
1889 ipfw_track_free(ctx, t);
1893 static boolean_t
1894 ipfw_track_state_expire(struct ipfw_context *ctx, struct ipfw_track *t,
1895 boolean_t reap)
1897 struct ipfw_state *s, *sn;
1898 boolean_t ret = FALSE;
1900 KASSERT(t->t_count != NULL, ("track anchor"));
1902 if (LIST_EMPTY(&t->t_state_list))
1903 return (FALSE);
1906 * Do not expire more than once per second, it is useless.
1908 if (t->t_lastexp == time_uptime)
1909 return (FALSE);
1910 t->t_lastexp = time_uptime;
1912 LIST_FOREACH_MUTABLE(s, &t->t_state_list, st_trklink, sn) {
1913 if (TIME_LEQ(s->st_expire, time_uptime) ||
1914 (reap && IPFW_STATE_TCPCLOSED(s))) {
1915 KASSERT(s->st_track == t,
1916 ("state track %p does not match %p",
1917 s->st_track, t));
1918 ipfw_state_del(ctx, s);
1919 ret = TRUE;
1922 return (ret);
1925 static __inline struct ipfw_trkcnt *
1926 ipfw_trkcnt_alloc(struct ipfw_context *ctx)
1928 struct ipfw_trkcnt *trk;
1930 if (ctx->ipfw_trkcnt_spare != NULL) {
1931 trk = ctx->ipfw_trkcnt_spare;
1932 ctx->ipfw_trkcnt_spare = NULL;
1933 } else {
1934 trk = kmalloc_cachealign(sizeof(*trk), M_IPFW,
1935 M_INTWAIT | M_NULLOK);
1937 return (trk);
1940 static void
1941 ipfw_track_expire_done(struct ipfw_context *ctx)
1944 KASSERT(ctx->ipfw_flags & IPFW_FLAG_TRACKEXP,
1945 ("trackexp is not in progress"));
1946 ctx->ipfw_flags &= ~IPFW_FLAG_TRACKEXP;
1947 callout_reset(&ctx->ipfw_trackto_ch, hz,
1948 ipfw_track_expire_ipifunc, NULL);
1951 static void
1952 ipfw_track_expire_more(struct ipfw_context *ctx)
1954 struct netmsg_base *nm = &ctx->ipfw_trackexp_more;
1956 KASSERT(ctx->ipfw_flags & IPFW_FLAG_TRACKEXP,
1957 ("trackexp is not in progress"));
1958 KASSERT(nm->lmsg.ms_flags & MSGF_DONE,
1959 ("trackexp more did not finish"));
1960 netisr_sendmsg_oncpu(nm);
1963 static int
1964 ipfw_track_expire_loop(struct ipfw_context *ctx, struct ipfw_track *anchor,
1965 int scan_max, int expire_max)
1967 struct ipfw_track *t;
1968 int scanned = 0, expired = 0;
1969 boolean_t reap = FALSE;
1971 KASSERT(ctx->ipfw_flags & IPFW_FLAG_TRACKEXP,
1972 ("trackexp is not in progress"));
1974 if (ctx->ipfw_flags & IPFW_FLAG_TRACKREAP)
1975 reap = TRUE;
1977 while ((t = TAILQ_NEXT(anchor, t_link)) != NULL) {
1978 if (scanned++ >= scan_max) {
1979 ipfw_track_expire_more(ctx);
1980 return (expired);
1983 TAILQ_REMOVE(&ctx->ipfw_track_list, anchor, t_link);
1984 TAILQ_INSERT_AFTER(&ctx->ipfw_track_list, t, anchor, t_link);
1986 if (t->t_count == NULL) /* anchor */
1987 continue;
1989 ipfw_track_state_expire(ctx, t, reap);
1990 if (!LIST_EMPTY(&t->t_state_list)) {
1991 /* There are states referencing this track. */
1992 continue;
1995 if (TIME_LEQ(t->t_expire, time_uptime) || reap) {
1996 /* Expired. */
1997 if (ipfw_track_free(ctx, t)) {
1998 if (++expired >= expire_max) {
1999 ipfw_track_expire_more(ctx);
2000 return (expired);
2005 TAILQ_REMOVE(&ctx->ipfw_track_list, anchor, t_link);
2006 ipfw_track_expire_done(ctx);
2007 return (expired);
2010 static int
2011 ipfw_track_expire_start(struct ipfw_context *ctx, int scan_max, int expire_max)
2013 struct ipfw_track *anchor;
2015 KASSERT((ctx->ipfw_flags & IPFW_FLAG_TRACKEXP) == 0,
2016 ("trackexp is in progress"));
2017 ctx->ipfw_flags |= IPFW_FLAG_TRACKEXP;
2019 if (RB_EMPTY(&ctx->ipfw_track_tree)) {
2020 ipfw_track_expire_done(ctx);
2021 return (0);
2025 * Do not expire more than once per second, it is useless.
2027 if ((ctx->ipfw_flags & IPFW_FLAG_TRACKREAP) == 0 &&
2028 ctx->ipfw_track_lastexp == time_uptime) {
2029 ipfw_track_expire_done(ctx);
2030 return (0);
2032 ctx->ipfw_track_lastexp = time_uptime;
2034 anchor = &ctx->ipfw_trackexp_anch;
2035 TAILQ_INSERT_HEAD(&ctx->ipfw_track_list, anchor, t_link);
2036 return (ipfw_track_expire_loop(ctx, anchor, scan_max, expire_max));
2039 static void
2040 ipfw_track_expire_more_dispatch(netmsg_t nm)
2042 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2043 struct ipfw_track *anchor;
2045 ASSERT_NETISR_NCPUS(mycpuid);
2046 KASSERT(ctx->ipfw_flags & IPFW_FLAG_TRACKEXP,
2047 ("trackexp is not in progress"));
2049 /* Reply ASAP */
2050 netisr_replymsg(&nm->base, 0);
2052 anchor = &ctx->ipfw_trackexp_anch;
2053 if (RB_EMPTY(&ctx->ipfw_track_tree)) {
2054 TAILQ_REMOVE(&ctx->ipfw_track_list, anchor, t_link);
2055 ipfw_track_expire_done(ctx);
2056 return;
2058 ipfw_track_expire_loop(ctx, anchor,
2059 ipfw_track_scan_max, ipfw_track_expire_max);
2062 static void
2063 ipfw_track_expire_dispatch(netmsg_t nm)
2065 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2067 ASSERT_NETISR_NCPUS(mycpuid);
2069 /* Reply ASAP */
2070 crit_enter();
2071 netisr_replymsg(&nm->base, 0);
2072 crit_exit();
2074 if (ctx->ipfw_flags & IPFW_FLAG_TRACKEXP) {
2075 /* Running; done. */
2076 return;
2078 ipfw_track_expire_start(ctx,
2079 ipfw_track_scan_max, ipfw_track_expire_max);
2082 static void
2083 ipfw_track_expire_ipifunc(void *dummy __unused)
2085 struct netmsg_base *msg;
2087 KKASSERT(mycpuid < netisr_ncpus);
2088 msg = &ipfw_ctx[mycpuid]->ipfw_trackexp_nm;
2090 crit_enter();
2091 if (msg->lmsg.ms_flags & MSGF_DONE)
2092 netisr_sendmsg_oncpu(msg);
2093 crit_exit();
2096 static int
2097 ipfw_track_reap(struct ipfw_context *ctx)
2099 struct ipfw_track *t, *anchor;
2100 int expired;
2102 if ((ctx->ipfw_flags & IPFW_FLAG_TRACKEXP) == 0) {
2104 * Kick start track expiring. Ignore scan limit,
2105 * we are short of tracks.
2107 ctx->ipfw_flags |= IPFW_FLAG_TRACKREAP;
2108 expired = ipfw_track_expire_start(ctx, INT_MAX,
2109 ipfw_track_reap_max);
2110 ctx->ipfw_flags &= ~IPFW_FLAG_TRACKREAP;
2111 return (expired);
2115 * Tracks are being expired.
2118 if (RB_EMPTY(&ctx->ipfw_track_tree))
2119 return (0);
2121 expired = 0;
2122 anchor = &ctx->ipfw_trackexp_anch;
2123 while ((t = TAILQ_NEXT(anchor, t_link)) != NULL) {
2125 * Ignore scan limit; we are short of tracks.
2128 TAILQ_REMOVE(&ctx->ipfw_track_list, anchor, t_link);
2129 TAILQ_INSERT_AFTER(&ctx->ipfw_track_list, t, anchor, t_link);
2131 if (t->t_count == NULL) /* anchor */
2132 continue;
2134 ipfw_track_state_expire(ctx, t, TRUE);
2135 if (!LIST_EMPTY(&t->t_state_list)) {
2136 /* There are states referencing this track. */
2137 continue;
2140 if (ipfw_track_free(ctx, t)) {
2141 if (++expired >= ipfw_track_reap_max) {
2142 ipfw_track_expire_more(ctx);
2143 break;
2148 * NOTE:
2149 * Leave the anchor on the list, even if the end of the list has
2150 * been reached. ipfw_track_expire_more_dispatch() will handle
2151 * the removal.
2153 return (expired);
2156 static struct ipfw_track *
2157 ipfw_track_alloc(struct ipfw_context *ctx, const struct ipfw_flow_id *id,
2158 uint16_t limit_mask, struct ip_fw *rule)
2160 struct ipfw_track *key, *t, *dup;
2161 struct ipfw_trkcnt *trk, *ret;
2162 boolean_t do_expire = FALSE;
2164 KASSERT(rule->track_ruleid != 0,
2165 ("rule %u has no track ruleid", rule->rulenum));
2167 key = &ctx->ipfw_track_tmpkey;
2168 key->t_proto = id->proto;
2169 key->t_addrs = 0;
2170 key->t_ports = 0;
2171 key->t_rule = rule;
2172 if (limit_mask & DYN_SRC_ADDR)
2173 key->t_saddr = id->src_ip;
2174 if (limit_mask & DYN_DST_ADDR)
2175 key->t_daddr = id->dst_ip;
2176 if (limit_mask & DYN_SRC_PORT)
2177 key->t_sport = id->src_port;
2178 if (limit_mask & DYN_DST_PORT)
2179 key->t_dport = id->dst_port;
2181 t = RB_FIND(ipfw_track_tree, &ctx->ipfw_track_tree, key);
2182 if (t != NULL)
2183 goto done;
2185 t = kmalloc(sizeof(*t), M_IPFW, M_INTWAIT | M_NULLOK);
2186 if (t == NULL) {
2187 ctx->ipfw_tks_nomem++;
2188 return (NULL);
2191 t->t_key = key->t_key;
2192 t->t_rule = rule;
2193 t->t_lastexp = 0;
2194 LIST_INIT(&t->t_state_list);
2196 if (ipfw_gd.ipfw_trkcnt_cnt >= ipfw_track_max) {
2197 time_t globexp, uptime;
2199 trk = NULL;
2200 do_expire = TRUE;
2203 * Do not expire globally more than once per second,
2204 * it is useless.
2206 uptime = time_uptime;
2207 globexp = ipfw_gd.ipfw_track_globexp;
2208 if (globexp != uptime &&
2209 atomic_cmpset_long(&ipfw_gd.ipfw_track_globexp,
2210 globexp, uptime)) {
2211 int cpu;
2213 /* Expire tracks on other CPUs. */
2214 for (cpu = 0; cpu < netisr_ncpus; ++cpu) {
2215 if (cpu == mycpuid)
2216 continue;
2217 lwkt_send_ipiq(globaldata_find(cpu),
2218 ipfw_track_expire_ipifunc, NULL);
2221 } else {
2222 trk = ipfw_trkcnt_alloc(ctx);
2224 if (trk == NULL) {
2225 struct ipfw_trkcnt *tkey;
2227 tkey = &ctx->ipfw_trkcnt_tmpkey;
2228 key = NULL; /* tkey overlaps key */
2230 tkey->tc_key = t->t_key;
2231 tkey->tc_ruleid = rule->track_ruleid;
2233 IPFW_TRKCNT_TOKGET;
2234 trk = RB_FIND(ipfw_trkcnt_tree, &ipfw_gd.ipfw_trkcnt_tree,
2235 tkey);
2236 if (trk == NULL) {
2237 IPFW_TRKCNT_TOKREL;
2238 if (do_expire) {
2239 ctx->ipfw_tks_reap++;
2240 if (ipfw_track_reap(ctx) > 0) {
2241 if (ipfw_gd.ipfw_trkcnt_cnt <
2242 ipfw_track_max) {
2243 trk = ipfw_trkcnt_alloc(ctx);
2244 if (trk != NULL)
2245 goto install;
2246 ctx->ipfw_tks_cntnomem++;
2247 } else {
2248 ctx->ipfw_tks_overflow++;
2250 } else {
2251 ctx->ipfw_tks_reapfailed++;
2252 ctx->ipfw_tks_overflow++;
2254 } else {
2255 ctx->ipfw_tks_cntnomem++;
2257 kfree(t, M_IPFW);
2258 return (NULL);
2260 KASSERT(trk->tc_refs > 0 && trk->tc_refs < netisr_ncpus,
2261 ("invalid trkcnt refs %d", trk->tc_refs));
2262 atomic_add_int(&trk->tc_refs, 1);
2263 IPFW_TRKCNT_TOKREL;
2264 } else {
2265 install:
2266 trk->tc_key = t->t_key;
2267 trk->tc_ruleid = rule->track_ruleid;
2268 trk->tc_refs = 0;
2269 trk->tc_count = 0;
2270 trk->tc_expire = 0;
2271 trk->tc_rulenum = rule->rulenum;
2273 IPFW_TRKCNT_TOKGET;
2274 ret = RB_INSERT(ipfw_trkcnt_tree, &ipfw_gd.ipfw_trkcnt_tree,
2275 trk);
2276 if (ret != NULL) {
2277 KASSERT(ret->tc_refs > 0 &&
2278 ret->tc_refs < netisr_ncpus,
2279 ("invalid trkcnt refs %d", ret->tc_refs));
2280 KASSERT(ctx->ipfw_trkcnt_spare == NULL,
2281 ("trkcnt spare was installed"));
2282 ctx->ipfw_trkcnt_spare = trk;
2283 trk = ret;
2284 } else {
2285 ipfw_gd.ipfw_trkcnt_cnt++;
2287 atomic_add_int(&trk->tc_refs, 1);
2288 IPFW_TRKCNT_TOKREL;
2290 t->t_count = &trk->tc_count;
2291 t->t_trkcnt = trk;
2293 dup = RB_INSERT(ipfw_track_tree, &ctx->ipfw_track_tree, t);
2294 if (dup != NULL)
2295 panic("ipfw: track exists");
2296 TAILQ_INSERT_TAIL(&ctx->ipfw_track_list, t, t_link);
2297 done:
2298 t->t_expire = time_uptime + dyn_short_lifetime;
2299 return (t);
2303 * Install state for rule type cmd->o.opcode
2305 * Returns 1 (failure) if state is not installed because of errors or because
2306 * states limitations are enforced.
2308 static int
2309 ipfw_state_install(struct ipfw_context *ctx, struct ip_fw *rule,
2310 ipfw_insn_limit *cmd, struct ip_fw_args *args, const struct tcphdr *tcp)
2312 struct ipfw_state *s;
2313 struct ipfw_track *t;
2314 int count, diff;
2316 if (ipfw_gd.ipfw_state_loosecnt >= ipfw_state_max &&
2317 (diff = (ipfw_state_cntsync() - ipfw_state_max)) >= 0) {
2318 boolean_t overflow = TRUE;
2320 ctx->ipfw_sts_reap++;
2321 if (ipfw_state_reap(ctx, diff) == 0)
2322 ctx->ipfw_sts_reapfailed++;
2323 if (ipfw_state_cntsync() < ipfw_state_max)
2324 overflow = FALSE;
2326 if (overflow) {
2327 time_t globexp, uptime;
2328 int cpu;
2331 * Do not expire globally more than once per second,
2332 * it is useless.
2334 uptime = time_uptime;
2335 globexp = ipfw_gd.ipfw_state_globexp;
2336 if (globexp == uptime ||
2337 !atomic_cmpset_long(&ipfw_gd.ipfw_state_globexp,
2338 globexp, uptime)) {
2339 ctx->ipfw_sts_overflow++;
2340 return (1);
2343 /* Expire states on other CPUs. */
2344 for (cpu = 0; cpu < netisr_ncpus; ++cpu) {
2345 if (cpu == mycpuid)
2346 continue;
2347 lwkt_send_ipiq(globaldata_find(cpu),
2348 ipfw_state_expire_ipifunc, NULL);
2350 ctx->ipfw_sts_overflow++;
2351 return (1);
2355 switch (cmd->o.opcode) {
2356 case O_KEEP_STATE: /* bidir rule */
2357 s = ipfw_state_add(ctx, &args->f_id, O_KEEP_STATE, rule, NULL,
2358 tcp);
2359 if (s == NULL)
2360 return (1);
2361 break;
2363 case O_LIMIT: /* limit number of sessions */
2364 t = ipfw_track_alloc(ctx, &args->f_id, cmd->limit_mask, rule);
2365 if (t == NULL)
2366 return (1);
2368 if (*t->t_count >= cmd->conn_limit) {
2369 if (!ipfw_track_state_expire(ctx, t, TRUE))
2370 return (1);
2372 for (;;) {
2373 count = *t->t_count;
2374 if (count >= cmd->conn_limit)
2375 return (1);
2376 if (atomic_cmpset_int(t->t_count, count, count + 1))
2377 break;
2380 s = ipfw_state_add(ctx, &args->f_id, O_LIMIT, rule, t, tcp);
2381 if (s == NULL) {
2382 /* Undo damage. */
2383 atomic_subtract_int(t->t_count, 1);
2384 return (1);
2386 break;
2388 default:
2389 panic("unknown state type %u\n", cmd->o.opcode);
2391 return (0);
2395 * Transmit a TCP packet, containing either a RST or a keepalive.
2396 * When flags & TH_RST, we are sending a RST packet, because of a
2397 * "reset" action matched the packet.
2398 * Otherwise we are sending a keepalive, and flags & TH_
2400 * Only {src,dst}_{ip,port} of "id" are used.
2402 static void
2403 send_pkt(const struct ipfw_flow_id *id, uint32_t seq, uint32_t ack, int flags)
2405 struct mbuf *m;
2406 struct ip *ip;
2407 struct tcphdr *tcp;
2408 struct route sro; /* fake route */
2410 MGETHDR(m, M_NOWAIT, MT_HEADER);
2411 if (m == NULL)
2412 return;
2413 m->m_pkthdr.rcvif = NULL;
2414 m->m_pkthdr.len = m->m_len = sizeof(struct ip) + sizeof(struct tcphdr);
2415 m->m_data += max_linkhdr;
2417 ip = mtod(m, struct ip *);
2418 bzero(ip, m->m_len);
2419 tcp = (struct tcphdr *)(ip + 1); /* no IP options */
2420 ip->ip_p = IPPROTO_TCP;
2421 tcp->th_off = 5;
2424 * Assume we are sending a RST (or a keepalive in the reverse
2425 * direction), swap src and destination addresses and ports.
2427 ip->ip_src.s_addr = htonl(id->dst_ip);
2428 ip->ip_dst.s_addr = htonl(id->src_ip);
2429 tcp->th_sport = htons(id->dst_port);
2430 tcp->th_dport = htons(id->src_port);
2431 if (flags & TH_RST) { /* we are sending a RST */
2432 if (flags & TH_ACK) {
2433 tcp->th_seq = htonl(ack);
2434 tcp->th_ack = htonl(0);
2435 tcp->th_flags = TH_RST;
2436 } else {
2437 if (flags & TH_SYN)
2438 seq++;
2439 tcp->th_seq = htonl(0);
2440 tcp->th_ack = htonl(seq);
2441 tcp->th_flags = TH_RST | TH_ACK;
2443 } else {
2445 * We are sending a keepalive. flags & TH_SYN determines
2446 * the direction, forward if set, reverse if clear.
2447 * NOTE: seq and ack are always assumed to be correct
2448 * as set by the caller. This may be confusing...
2450 if (flags & TH_SYN) {
2452 * we have to rewrite the correct addresses!
2454 ip->ip_dst.s_addr = htonl(id->dst_ip);
2455 ip->ip_src.s_addr = htonl(id->src_ip);
2456 tcp->th_dport = htons(id->dst_port);
2457 tcp->th_sport = htons(id->src_port);
2459 tcp->th_seq = htonl(seq);
2460 tcp->th_ack = htonl(ack);
2461 tcp->th_flags = TH_ACK;
2465 * set ip_len to the payload size so we can compute
2466 * the tcp checksum on the pseudoheader
2467 * XXX check this, could save a couple of words ?
2469 ip->ip_len = htons(sizeof(struct tcphdr));
2470 tcp->th_sum = in_cksum(m, m->m_pkthdr.len);
2473 * now fill fields left out earlier
2475 ip->ip_ttl = ip_defttl;
2476 ip->ip_len = m->m_pkthdr.len;
2478 bzero(&sro, sizeof(sro));
2479 ip_rtaddr(ip->ip_dst, &sro);
2481 m->m_pkthdr.fw_flags |= IPFW_MBUF_GENERATED;
2482 ip_output(m, NULL, &sro, 0, NULL, NULL);
2483 if (sro.ro_rt)
2484 RTFREE(sro.ro_rt);
2488 * Send a reject message, consuming the mbuf passed as an argument.
2490 static void
2491 send_reject(struct ip_fw_args *args, int code, int offset, int ip_len)
2493 if (code != ICMP_REJECT_RST) { /* Send an ICMP unreach */
2494 /* We need the IP header in host order for icmp_error(). */
2495 if (args->eh != NULL) {
2496 struct ip *ip = mtod(args->m, struct ip *);
2498 ip->ip_len = ntohs(ip->ip_len);
2499 ip->ip_off = ntohs(ip->ip_off);
2501 icmp_error(args->m, ICMP_UNREACH, code, 0L, 0);
2502 } else if (offset == 0 && args->f_id.proto == IPPROTO_TCP) {
2503 struct tcphdr *const tcp =
2504 L3HDR(struct tcphdr, mtod(args->m, struct ip *));
2506 if ((tcp->th_flags & TH_RST) == 0) {
2507 send_pkt(&args->f_id, ntohl(tcp->th_seq),
2508 ntohl(tcp->th_ack), tcp->th_flags | TH_RST);
2510 m_freem(args->m);
2511 } else {
2512 m_freem(args->m);
2514 args->m = NULL;
2518 * Given an ip_fw *, lookup_next_rule will return a pointer
2519 * to the next rule, which can be either the jump
2520 * target (for skipto instructions) or the next one in the list (in
2521 * all other cases including a missing jump target).
2522 * The result is also written in the "next_rule" field of the rule.
2523 * Backward jumps are not allowed, so start looking from the next
2524 * rule...
2526 * This never returns NULL -- in case we do not have an exact match,
2527 * the next rule is returned. When the ruleset is changed,
2528 * pointers are flushed so we are always correct.
2530 static struct ip_fw *
2531 lookup_next_rule(struct ip_fw *me)
2533 struct ip_fw *rule = NULL;
2534 ipfw_insn *cmd;
2536 /* look for action, in case it is a skipto */
2537 cmd = ACTION_PTR(me);
2538 if (cmd->opcode == O_LOG)
2539 cmd += F_LEN(cmd);
2540 if (cmd->opcode == O_SKIPTO) {
2541 for (rule = me->next; rule; rule = rule->next) {
2542 if (rule->rulenum >= cmd->arg1)
2543 break;
2546 if (rule == NULL) /* failure or not a skipto */
2547 rule = me->next;
2548 me->next_rule = rule;
2549 return rule;
2552 static int
2553 ipfw_match_uid(const struct ipfw_flow_id *fid, struct ifnet *oif,
2554 enum ipfw_opcodes opcode, uid_t uid)
2556 struct in_addr src_ip, dst_ip;
2557 struct inpcbinfo *pi;
2558 boolean_t wildcard;
2559 struct inpcb *pcb;
2561 if (fid->proto == IPPROTO_TCP) {
2562 wildcard = FALSE;
2563 pi = &tcbinfo[mycpuid];
2564 } else if (fid->proto == IPPROTO_UDP) {
2565 wildcard = TRUE;
2566 pi = &udbinfo[mycpuid];
2567 } else {
2568 return 0;
2572 * Values in 'fid' are in host byte order
2574 dst_ip.s_addr = htonl(fid->dst_ip);
2575 src_ip.s_addr = htonl(fid->src_ip);
2576 if (oif) {
2577 pcb = in_pcblookup_hash(pi,
2578 dst_ip, htons(fid->dst_port),
2579 src_ip, htons(fid->src_port),
2580 wildcard, oif);
2581 } else {
2582 pcb = in_pcblookup_hash(pi,
2583 src_ip, htons(fid->src_port),
2584 dst_ip, htons(fid->dst_port),
2585 wildcard, NULL);
2587 if (pcb == NULL || pcb->inp_socket == NULL)
2588 return 0;
2590 if (opcode == O_UID) {
2591 #define socheckuid(a,b) ((a)->so_cred->cr_uid != (b))
2592 return !socheckuid(pcb->inp_socket, uid);
2593 #undef socheckuid
2594 } else {
2595 return groupmember(uid, pcb->inp_socket->so_cred);
2600 * The main check routine for the firewall.
2602 * All arguments are in args so we can modify them and return them
2603 * back to the caller.
2605 * Parameters:
2607 * args->m (in/out) The packet; we set to NULL when/if we nuke it.
2608 * Starts with the IP header.
2609 * args->eh (in) Mac header if present, or NULL for layer3 packet.
2610 * args->oif Outgoing interface, or NULL if packet is incoming.
2611 * The incoming interface is in the mbuf. (in)
2613 * args->rule Pointer to the last matching rule (in/out)
2614 * args->f_id Addresses grabbed from the packet (out)
2616 * Return value:
2618 * If the packet was denied/rejected and has been dropped, *m is equal
2619 * to NULL upon return.
2621 * IP_FW_DENY the packet must be dropped.
2622 * IP_FW_PASS The packet is to be accepted and routed normally.
2623 * IP_FW_DIVERT Divert the packet to port (args->cookie)
2624 * IP_FW_TEE Tee the packet to port (args->cookie)
2625 * IP_FW_DUMMYNET Send the packet to pipe/queue (args->cookie)
2627 static int
2628 ipfw_chk(struct ip_fw_args *args)
2631 * Local variables hold state during the processing of a packet.
2633 * IMPORTANT NOTE: to speed up the processing of rules, there
2634 * are some assumption on the values of the variables, which
2635 * are documented here. Should you change them, please check
2636 * the implementation of the various instructions to make sure
2637 * that they still work.
2639 * args->eh The MAC header. It is non-null for a layer2
2640 * packet, it is NULL for a layer-3 packet.
2642 * m | args->m Pointer to the mbuf, as received from the caller.
2643 * It may change if ipfw_chk() does an m_pullup, or if it
2644 * consumes the packet because it calls send_reject().
2645 * XXX This has to change, so that ipfw_chk() never modifies
2646 * or consumes the buffer.
2647 * ip is simply an alias of the value of m, and it is kept
2648 * in sync with it (the packet is supposed to start with
2649 * the ip header).
2651 struct mbuf *m = args->m;
2652 struct ip *ip = mtod(m, struct ip *);
2655 * oif | args->oif If NULL, ipfw_chk has been called on the
2656 * inbound path (ether_input, ip_input).
2657 * If non-NULL, ipfw_chk has been called on the outbound path
2658 * (ether_output, ip_output).
2660 struct ifnet *oif = args->oif;
2662 struct ip_fw *f = NULL; /* matching rule */
2663 int retval = IP_FW_PASS;
2664 struct m_tag *mtag;
2665 struct divert_info *divinfo;
2668 * hlen The length of the IPv4 header.
2669 * hlen >0 means we have an IPv4 packet.
2671 u_int hlen = 0; /* hlen >0 means we have an IP pkt */
2674 * offset The offset of a fragment. offset != 0 means that
2675 * we have a fragment at this offset of an IPv4 packet.
2676 * offset == 0 means that (if this is an IPv4 packet)
2677 * this is the first or only fragment.
2679 u_short offset = 0;
2682 * Local copies of addresses. They are only valid if we have
2683 * an IP packet.
2685 * proto The protocol. Set to 0 for non-ip packets,
2686 * or to the protocol read from the packet otherwise.
2687 * proto != 0 means that we have an IPv4 packet.
2689 * src_port, dst_port port numbers, in HOST format. Only
2690 * valid for TCP and UDP packets.
2692 * src_ip, dst_ip ip addresses, in NETWORK format.
2693 * Only valid for IPv4 packets.
2695 uint8_t proto;
2696 uint16_t src_port = 0, dst_port = 0; /* NOTE: host format */
2697 struct in_addr src_ip, dst_ip; /* NOTE: network format */
2698 uint16_t ip_len = 0;
2701 * dyn_dir = MATCH_UNKNOWN when rules unchecked,
2702 * MATCH_NONE when checked and not matched (dyn_f = NULL),
2703 * MATCH_FORWARD or MATCH_REVERSE otherwise (dyn_f != NULL)
2705 int dyn_dir = MATCH_UNKNOWN;
2706 struct ip_fw *dyn_f = NULL;
2707 int cpuid = mycpuid;
2708 struct ipfw_context *ctx;
2710 ASSERT_NETISR_NCPUS(cpuid);
2711 ctx = ipfw_ctx[cpuid];
2713 if (m->m_pkthdr.fw_flags & IPFW_MBUF_GENERATED)
2714 return IP_FW_PASS; /* accept */
2716 if (args->eh == NULL || /* layer 3 packet */
2717 (m->m_pkthdr.len >= sizeof(struct ip) &&
2718 ntohs(args->eh->ether_type) == ETHERTYPE_IP))
2719 hlen = ip->ip_hl << 2;
2722 * Collect parameters into local variables for faster matching.
2724 if (hlen == 0) { /* do not grab addresses for non-ip pkts */
2725 proto = args->f_id.proto = 0; /* mark f_id invalid */
2726 goto after_ip_checks;
2729 proto = args->f_id.proto = ip->ip_p;
2730 src_ip = ip->ip_src;
2731 dst_ip = ip->ip_dst;
2732 if (args->eh != NULL) { /* layer 2 packets are as on the wire */
2733 offset = ntohs(ip->ip_off) & IP_OFFMASK;
2734 ip_len = ntohs(ip->ip_len);
2735 } else {
2736 offset = ip->ip_off & IP_OFFMASK;
2737 ip_len = ip->ip_len;
2740 #define PULLUP_TO(len) \
2741 do { \
2742 if (m->m_len < (len)) { \
2743 args->m = m = m_pullup(m, (len));\
2744 if (m == NULL) \
2745 goto pullup_failed; \
2746 ip = mtod(m, struct ip *); \
2748 } while (0)
2750 if (offset == 0) {
2751 switch (proto) {
2752 case IPPROTO_TCP:
2754 struct tcphdr *tcp;
2756 PULLUP_TO(hlen + sizeof(struct tcphdr));
2757 tcp = L3HDR(struct tcphdr, ip);
2758 dst_port = tcp->th_dport;
2759 src_port = tcp->th_sport;
2760 args->f_id.flags = tcp->th_flags;
2762 break;
2764 case IPPROTO_UDP:
2766 struct udphdr *udp;
2768 PULLUP_TO(hlen + sizeof(struct udphdr));
2769 udp = L3HDR(struct udphdr, ip);
2770 dst_port = udp->uh_dport;
2771 src_port = udp->uh_sport;
2773 break;
2775 case IPPROTO_ICMP:
2776 PULLUP_TO(hlen + 4); /* type, code and checksum. */
2777 args->f_id.flags = L3HDR(struct icmp, ip)->icmp_type;
2778 break;
2780 default:
2781 break;
2785 #undef PULLUP_TO
2787 args->f_id.src_ip = ntohl(src_ip.s_addr);
2788 args->f_id.dst_ip = ntohl(dst_ip.s_addr);
2789 args->f_id.src_port = src_port = ntohs(src_port);
2790 args->f_id.dst_port = dst_port = ntohs(dst_port);
2792 after_ip_checks:
2793 if (args->rule) {
2795 * Packet has already been tagged. Look for the next rule
2796 * to restart processing.
2798 * If fw_one_pass != 0 then just accept it.
2799 * XXX should not happen here, but optimized out in
2800 * the caller.
2802 if (fw_one_pass)
2803 return IP_FW_PASS;
2805 /* This rule is being/has been flushed */
2806 if (ipfw_flushing)
2807 return IP_FW_DENY;
2809 KASSERT(args->rule->cpuid == cpuid,
2810 ("rule used on cpu%d", cpuid));
2812 /* This rule was deleted */
2813 if (args->rule->rule_flags & IPFW_RULE_F_INVALID)
2814 return IP_FW_DENY;
2816 f = args->rule->next_rule;
2817 if (f == NULL)
2818 f = lookup_next_rule(args->rule);
2819 } else {
2821 * Find the starting rule. It can be either the first
2822 * one, or the one after divert_rule if asked so.
2824 int skipto;
2826 mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL);
2827 if (mtag != NULL) {
2828 divinfo = m_tag_data(mtag);
2829 skipto = divinfo->skipto;
2830 } else {
2831 skipto = 0;
2834 f = ctx->ipfw_layer3_chain;
2835 if (args->eh == NULL && skipto != 0) {
2836 /* No skipto during rule flushing */
2837 if (ipfw_flushing)
2838 return IP_FW_DENY;
2840 if (skipto >= IPFW_DEFAULT_RULE)
2841 return IP_FW_DENY; /* invalid */
2843 while (f && f->rulenum <= skipto)
2844 f = f->next;
2845 if (f == NULL) /* drop packet */
2846 return IP_FW_DENY;
2847 } else if (ipfw_flushing) {
2848 /* Rules are being flushed; skip to default rule */
2849 f = ctx->ipfw_default_rule;
2852 if ((mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL)) != NULL)
2853 m_tag_delete(m, mtag);
2856 * Now scan the rules, and parse microinstructions for each rule.
2858 for (; f; f = f->next) {
2859 int l, cmdlen;
2860 ipfw_insn *cmd;
2861 int skip_or; /* skip rest of OR block */
2863 again:
2864 if (ctx->ipfw_set_disable & (1 << f->set))
2865 continue;
2867 skip_or = 0;
2868 for (l = f->cmd_len, cmd = f->cmd; l > 0;
2869 l -= cmdlen, cmd += cmdlen) {
2870 int match;
2873 * check_body is a jump target used when we find a
2874 * CHECK_STATE, and need to jump to the body of
2875 * the target rule.
2878 check_body:
2879 cmdlen = F_LEN(cmd);
2881 * An OR block (insn_1 || .. || insn_n) has the
2882 * F_OR bit set in all but the last instruction.
2883 * The first match will set "skip_or", and cause
2884 * the following instructions to be skipped until
2885 * past the one with the F_OR bit clear.
2887 if (skip_or) { /* skip this instruction */
2888 if ((cmd->len & F_OR) == 0)
2889 skip_or = 0; /* next one is good */
2890 continue;
2892 match = 0; /* set to 1 if we succeed */
2894 switch (cmd->opcode) {
2896 * The first set of opcodes compares the packet's
2897 * fields with some pattern, setting 'match' if a
2898 * match is found. At the end of the loop there is
2899 * logic to deal with F_NOT and F_OR flags associated
2900 * with the opcode.
2902 case O_NOP:
2903 match = 1;
2904 break;
2906 case O_FORWARD_MAC:
2907 kprintf("ipfw: opcode %d unimplemented\n",
2908 cmd->opcode);
2909 break;
2911 case O_GID:
2912 case O_UID:
2914 * We only check offset == 0 && proto != 0,
2915 * as this ensures that we have an IPv4
2916 * packet with the ports info.
2918 if (offset!=0)
2919 break;
2921 match = ipfw_match_uid(&args->f_id, oif,
2922 cmd->opcode,
2923 (uid_t)((ipfw_insn_u32 *)cmd)->d[0]);
2924 break;
2926 case O_RECV:
2927 match = iface_match(m->m_pkthdr.rcvif,
2928 (ipfw_insn_if *)cmd);
2929 break;
2931 case O_XMIT:
2932 match = iface_match(oif, (ipfw_insn_if *)cmd);
2933 break;
2935 case O_VIA:
2936 match = iface_match(oif ? oif :
2937 m->m_pkthdr.rcvif, (ipfw_insn_if *)cmd);
2938 break;
2940 case O_MACADDR2:
2941 if (args->eh != NULL) { /* have MAC header */
2942 uint32_t *want = (uint32_t *)
2943 ((ipfw_insn_mac *)cmd)->addr;
2944 uint32_t *mask = (uint32_t *)
2945 ((ipfw_insn_mac *)cmd)->mask;
2946 uint32_t *hdr = (uint32_t *)args->eh;
2948 match =
2949 (want[0] == (hdr[0] & mask[0]) &&
2950 want[1] == (hdr[1] & mask[1]) &&
2951 want[2] == (hdr[2] & mask[2]));
2953 break;
2955 case O_MAC_TYPE:
2956 if (args->eh != NULL) {
2957 uint16_t t =
2958 ntohs(args->eh->ether_type);
2959 uint16_t *p =
2960 ((ipfw_insn_u16 *)cmd)->ports;
2961 int i;
2963 /* Special vlan handling */
2964 if (m->m_flags & M_VLANTAG)
2965 t = ETHERTYPE_VLAN;
2967 for (i = cmdlen - 1; !match && i > 0;
2968 i--, p += 2) {
2969 match =
2970 (t >= p[0] && t <= p[1]);
2973 break;
2975 case O_FRAG:
2976 match = (hlen > 0 && offset != 0);
2977 break;
2979 case O_IN: /* "out" is "not in" */
2980 match = (oif == NULL);
2981 break;
2983 case O_LAYER2:
2984 match = (args->eh != NULL);
2985 break;
2987 case O_PROTO:
2989 * We do not allow an arg of 0 so the
2990 * check of "proto" only suffices.
2992 match = (proto == cmd->arg1);
2993 break;
2995 case O_IP_SRC:
2996 match = (hlen > 0 &&
2997 ((ipfw_insn_ip *)cmd)->addr.s_addr ==
2998 src_ip.s_addr);
2999 break;
3001 case O_IP_SRC_MASK:
3002 match = (hlen > 0 &&
3003 ((ipfw_insn_ip *)cmd)->addr.s_addr ==
3004 (src_ip.s_addr &
3005 ((ipfw_insn_ip *)cmd)->mask.s_addr));
3006 break;
3008 case O_IP_SRC_ME:
3009 if (hlen > 0) {
3010 struct ifnet *tif;
3012 tif = INADDR_TO_IFP(&src_ip);
3013 match = (tif != NULL);
3015 break;
3017 case O_IP_DST_SET:
3018 case O_IP_SRC_SET:
3019 if (hlen > 0) {
3020 uint32_t *d = (uint32_t *)(cmd + 1);
3021 uint32_t addr =
3022 cmd->opcode == O_IP_DST_SET ?
3023 args->f_id.dst_ip :
3024 args->f_id.src_ip;
3026 if (addr < d[0])
3027 break;
3028 addr -= d[0]; /* subtract base */
3029 match =
3030 (addr < cmd->arg1) &&
3031 (d[1 + (addr >> 5)] &
3032 (1 << (addr & 0x1f)));
3034 break;
3036 case O_IP_DST:
3037 match = (hlen > 0 &&
3038 ((ipfw_insn_ip *)cmd)->addr.s_addr ==
3039 dst_ip.s_addr);
3040 break;
3042 case O_IP_DST_MASK:
3043 match = (hlen > 0) &&
3044 (((ipfw_insn_ip *)cmd)->addr.s_addr ==
3045 (dst_ip.s_addr &
3046 ((ipfw_insn_ip *)cmd)->mask.s_addr));
3047 break;
3049 case O_IP_DST_ME:
3050 if (hlen > 0) {
3051 struct ifnet *tif;
3053 tif = INADDR_TO_IFP(&dst_ip);
3054 match = (tif != NULL);
3056 break;
3058 case O_IP_SRCPORT:
3059 case O_IP_DSTPORT:
3061 * offset == 0 && proto != 0 is enough
3062 * to guarantee that we have an IPv4
3063 * packet with port info.
3065 if ((proto==IPPROTO_UDP || proto==IPPROTO_TCP)
3066 && offset == 0) {
3067 uint16_t x =
3068 (cmd->opcode == O_IP_SRCPORT) ?
3069 src_port : dst_port ;
3070 uint16_t *p =
3071 ((ipfw_insn_u16 *)cmd)->ports;
3072 int i;
3074 for (i = cmdlen - 1; !match && i > 0;
3075 i--, p += 2) {
3076 match =
3077 (x >= p[0] && x <= p[1]);
3080 break;
3082 case O_ICMPTYPE:
3083 match = (offset == 0 && proto==IPPROTO_ICMP &&
3084 icmptype_match(ip, (ipfw_insn_u32 *)cmd));
3085 break;
3087 case O_IPOPT:
3088 match = (hlen > 0 && ipopts_match(ip, cmd));
3089 break;
3091 case O_IPVER:
3092 match = (hlen > 0 && cmd->arg1 == ip->ip_v);
3093 break;
3095 case O_IPTTL:
3096 match = (hlen > 0 && cmd->arg1 == ip->ip_ttl);
3097 break;
3099 case O_IPID:
3100 match = (hlen > 0 &&
3101 cmd->arg1 == ntohs(ip->ip_id));
3102 break;
3104 case O_IPLEN:
3105 match = (hlen > 0 && cmd->arg1 == ip_len);
3106 break;
3108 case O_IPPRECEDENCE:
3109 match = (hlen > 0 &&
3110 (cmd->arg1 == (ip->ip_tos & 0xe0)));
3111 break;
3113 case O_IPTOS:
3114 match = (hlen > 0 &&
3115 flags_match(cmd, ip->ip_tos));
3116 break;
3118 case O_TCPFLAGS:
3119 match = (proto == IPPROTO_TCP && offset == 0 &&
3120 flags_match(cmd,
3121 L3HDR(struct tcphdr,ip)->th_flags));
3122 break;
3124 case O_TCPOPTS:
3125 match = (proto == IPPROTO_TCP && offset == 0 &&
3126 tcpopts_match(ip, cmd));
3127 break;
3129 case O_TCPSEQ:
3130 match = (proto == IPPROTO_TCP && offset == 0 &&
3131 ((ipfw_insn_u32 *)cmd)->d[0] ==
3132 L3HDR(struct tcphdr,ip)->th_seq);
3133 break;
3135 case O_TCPACK:
3136 match = (proto == IPPROTO_TCP && offset == 0 &&
3137 ((ipfw_insn_u32 *)cmd)->d[0] ==
3138 L3HDR(struct tcphdr,ip)->th_ack);
3139 break;
3141 case O_TCPWIN:
3142 match = (proto == IPPROTO_TCP && offset == 0 &&
3143 cmd->arg1 ==
3144 L3HDR(struct tcphdr,ip)->th_win);
3145 break;
3147 case O_ESTAB:
3148 /* reject packets which have SYN only */
3149 /* XXX should i also check for TH_ACK ? */
3150 match = (proto == IPPROTO_TCP && offset == 0 &&
3151 (L3HDR(struct tcphdr,ip)->th_flags &
3152 (TH_RST | TH_ACK | TH_SYN)) != TH_SYN);
3153 break;
3155 case O_LOG:
3156 if (fw_verbose) {
3157 ipfw_log(ctx, f, hlen, args->eh, m,
3158 oif);
3160 match = 1;
3161 break;
3163 case O_PROB:
3164 match = (krandom() <
3165 ((ipfw_insn_u32 *)cmd)->d[0]);
3166 break;
3169 * The second set of opcodes represents 'actions',
3170 * i.e. the terminal part of a rule once the packet
3171 * matches all previous patterns.
3172 * Typically there is only one action for each rule,
3173 * and the opcode is stored at the end of the rule
3174 * (but there are exceptions -- see below).
3176 * In general, here we set retval and terminate the
3177 * outer loop (would be a 'break 3' in some language,
3178 * but we need to do a 'goto done').
3180 * Exceptions:
3181 * O_COUNT and O_SKIPTO actions:
3182 * instead of terminating, we jump to the next rule
3183 * ('goto next_rule', equivalent to a 'break 2'),
3184 * or to the SKIPTO target ('goto again' after
3185 * having set f, cmd and l), respectively.
3187 * O_LIMIT and O_KEEP_STATE: these opcodes are
3188 * not real 'actions', and are stored right
3189 * before the 'action' part of the rule.
3190 * These opcodes try to install an entry in the
3191 * state tables; if successful, we continue with
3192 * the next opcode (match=1; break;), otherwise
3193 * the packet must be dropped ('goto done' after
3194 * setting retval). If static rules are changed
3195 * during the state installation, the packet will
3196 * be dropped and rule's stats will not beupdated
3197 * ('return IP_FW_DENY').
3199 * O_PROBE_STATE and O_CHECK_STATE: these opcodes
3200 * cause a lookup of the state table, and a jump
3201 * to the 'action' part of the parent rule
3202 * ('goto check_body') if an entry is found, or
3203 * (CHECK_STATE only) a jump to the next rule if
3204 * the entry is not found ('goto next_rule').
3205 * The result of the lookup is cached to make
3206 * further instances of these opcodes are
3207 * effectively NOPs. If static rules are changed
3208 * during the state looking up, the packet will
3209 * be dropped and rule's stats will not be updated
3210 * ('return IP_FW_DENY').
3212 case O_LIMIT:
3213 case O_KEEP_STATE:
3214 if (ipfw_state_install(ctx, f,
3215 (ipfw_insn_limit *)cmd, args,
3216 (offset == 0 && proto == IPPROTO_TCP) ?
3217 L3HDR(struct tcphdr, ip) : NULL)) {
3218 retval = IP_FW_DENY;
3219 goto done; /* error/limit violation */
3221 match = 1;
3222 break;
3224 case O_PROBE_STATE:
3225 case O_CHECK_STATE:
3227 * States are checked at the first keep-state
3228 * check-state occurrence, with the result
3229 * being stored in dyn_dir. The compiler
3230 * introduces a PROBE_STATE instruction for
3231 * us when we have a KEEP_STATE/LIMIT (because
3232 * PROBE_STATE needs to be run first).
3234 if (dyn_dir == MATCH_UNKNOWN) {
3235 dyn_f = ipfw_state_lookup_rule(ctx,
3236 &args->f_id, &dyn_dir,
3237 (offset == 0 &&
3238 proto == IPPROTO_TCP) ?
3239 L3HDR(struct tcphdr, ip) : NULL,
3240 ip_len);
3241 if (dyn_f != NULL) {
3243 * Found a rule from a state;
3244 * jump to the 'action' part
3245 * of the rule.
3247 f = dyn_f;
3248 cmd = ACTION_PTR(f);
3249 l = f->cmd_len - f->act_ofs;
3250 goto check_body;
3254 * State not found. If CHECK_STATE, skip to
3255 * next rule, if PROBE_STATE just ignore and
3256 * continue with next opcode.
3258 if (cmd->opcode == O_CHECK_STATE)
3259 goto next_rule;
3260 match = 1;
3261 break;
3263 case O_ACCEPT:
3264 retval = IP_FW_PASS; /* accept */
3265 goto done;
3267 case O_PIPE:
3268 case O_QUEUE:
3269 args->rule = f; /* report matching rule */
3270 args->cookie = cmd->arg1;
3271 retval = IP_FW_DUMMYNET;
3272 goto done;
3274 case O_DIVERT:
3275 case O_TEE:
3276 if (args->eh) /* not on layer 2 */
3277 break;
3279 mtag = m_tag_get(PACKET_TAG_IPFW_DIVERT,
3280 sizeof(*divinfo), M_NOWAIT);
3281 if (mtag == NULL) {
3282 retval = IP_FW_DENY;
3283 goto done;
3285 divinfo = m_tag_data(mtag);
3287 divinfo->skipto = f->rulenum;
3288 divinfo->port = cmd->arg1;
3289 divinfo->tee = (cmd->opcode == O_TEE);
3290 m_tag_prepend(m, mtag);
3292 args->cookie = cmd->arg1;
3293 retval = (cmd->opcode == O_DIVERT) ?
3294 IP_FW_DIVERT : IP_FW_TEE;
3295 goto done;
3297 case O_COUNT:
3298 case O_SKIPTO:
3299 f->pcnt++; /* update stats */
3300 f->bcnt += ip_len;
3301 f->timestamp = time_second;
3302 if (cmd->opcode == O_COUNT)
3303 goto next_rule;
3304 /* handle skipto */
3305 if (f->next_rule == NULL)
3306 lookup_next_rule(f);
3307 f = f->next_rule;
3308 goto again;
3310 case O_REJECT:
3312 * Drop the packet and send a reject notice
3313 * if the packet is not ICMP (or is an ICMP
3314 * query), and it is not multicast/broadcast.
3316 if (hlen > 0 &&
3317 (proto != IPPROTO_ICMP ||
3318 is_icmp_query(ip)) &&
3319 !(m->m_flags & (M_BCAST|M_MCAST)) &&
3320 !IN_MULTICAST(ntohl(dst_ip.s_addr))) {
3322 * Update statistics before the possible
3323 * blocking 'send_reject'
3325 f->pcnt++;
3326 f->bcnt += ip_len;
3327 f->timestamp = time_second;
3329 send_reject(args, cmd->arg1,
3330 offset,ip_len);
3331 m = args->m;
3334 * Return directly here, rule stats
3335 * have been updated above.
3337 return IP_FW_DENY;
3339 /* FALLTHROUGH */
3340 case O_DENY:
3341 retval = IP_FW_DENY;
3342 goto done;
3344 case O_FORWARD_IP:
3345 if (args->eh) /* not valid on layer2 pkts */
3346 break;
3347 if (!dyn_f || dyn_dir == MATCH_FORWARD) {
3348 struct sockaddr_in *sin;
3350 mtag = m_tag_get(PACKET_TAG_IPFORWARD,
3351 sizeof(*sin), M_NOWAIT);
3352 if (mtag == NULL) {
3353 retval = IP_FW_DENY;
3354 goto done;
3356 sin = m_tag_data(mtag);
3358 /* Structure copy */
3359 *sin = ((ipfw_insn_sa *)cmd)->sa;
3361 m_tag_prepend(m, mtag);
3362 m->m_pkthdr.fw_flags |=
3363 IPFORWARD_MBUF_TAGGED;
3364 m->m_pkthdr.fw_flags &=
3365 ~BRIDGE_MBUF_TAGGED;
3367 retval = IP_FW_PASS;
3368 goto done;
3370 default:
3371 panic("-- unknown opcode %d", cmd->opcode);
3372 } /* end of switch() on opcodes */
3374 if (cmd->len & F_NOT)
3375 match = !match;
3377 if (match) {
3378 if (cmd->len & F_OR)
3379 skip_or = 1;
3380 } else {
3381 if (!(cmd->len & F_OR)) /* not an OR block, */
3382 break; /* try next rule */
3385 } /* end of inner for, scan opcodes */
3387 next_rule:; /* try next rule */
3389 } /* end of outer for, scan rules */
3390 kprintf("+++ ipfw: ouch!, skip past end of rules, denying packet\n");
3391 return IP_FW_DENY;
3393 done:
3394 /* Update statistics */
3395 f->pcnt++;
3396 f->bcnt += ip_len;
3397 f->timestamp = time_second;
3398 return retval;
3400 pullup_failed:
3401 if (fw_verbose)
3402 kprintf("pullup failed\n");
3403 return IP_FW_DENY;
3406 static void
3407 ipfw_dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
3409 struct m_tag *mtag;
3410 struct dn_pkt *pkt;
3411 ipfw_insn *cmd;
3412 const struct ipfw_flow_id *id;
3413 struct dn_flow_id *fid;
3415 M_ASSERTPKTHDR(m);
3417 mtag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*pkt), M_NOWAIT);
3418 if (mtag == NULL) {
3419 m_freem(m);
3420 return;
3422 m_tag_prepend(m, mtag);
3424 pkt = m_tag_data(mtag);
3425 bzero(pkt, sizeof(*pkt));
3427 cmd = fwa->rule->cmd + fwa->rule->act_ofs;
3428 if (cmd->opcode == O_LOG)
3429 cmd += F_LEN(cmd);
3430 KASSERT(cmd->opcode == O_PIPE || cmd->opcode == O_QUEUE,
3431 ("Rule is not PIPE or QUEUE, opcode %d", cmd->opcode));
3433 pkt->dn_m = m;
3434 pkt->dn_flags = (dir & DN_FLAGS_DIR_MASK);
3435 pkt->ifp = fwa->oif;
3436 pkt->pipe_nr = pipe_nr;
3438 pkt->cpuid = mycpuid;
3439 pkt->msgport = netisr_curport();
3441 id = &fwa->f_id;
3442 fid = &pkt->id;
3443 fid->fid_dst_ip = id->dst_ip;
3444 fid->fid_src_ip = id->src_ip;
3445 fid->fid_dst_port = id->dst_port;
3446 fid->fid_src_port = id->src_port;
3447 fid->fid_proto = id->proto;
3448 fid->fid_flags = id->flags;
3450 ipfw_ref_rule(fwa->rule);
3451 pkt->dn_priv = fwa->rule;
3452 pkt->dn_unref_priv = ipfw_unref_rule;
3454 if (cmd->opcode == O_PIPE)
3455 pkt->dn_flags |= DN_FLAGS_IS_PIPE;
3457 m->m_pkthdr.fw_flags |= DUMMYNET_MBUF_TAGGED;
3461 * When a rule is added/deleted, clear the next_rule pointers in all rules.
3462 * These will be reconstructed on the fly as packets are matched.
3464 static void
3465 ipfw_flush_rule_ptrs(struct ipfw_context *ctx)
3467 struct ip_fw *rule;
3469 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next)
3470 rule->next_rule = NULL;
3473 static __inline void
3474 ipfw_inc_static_count(struct ip_fw *rule)
3476 /* Static rule's counts are updated only on CPU0 */
3477 KKASSERT(mycpuid == 0);
3479 static_count++;
3480 static_ioc_len += IOC_RULESIZE(rule);
3483 static __inline void
3484 ipfw_dec_static_count(struct ip_fw *rule)
3486 int l = IOC_RULESIZE(rule);
3488 /* Static rule's counts are updated only on CPU0 */
3489 KKASSERT(mycpuid == 0);
3491 KASSERT(static_count > 0, ("invalid static count %u", static_count));
3492 static_count--;
3494 KASSERT(static_ioc_len >= l,
3495 ("invalid static len %u", static_ioc_len));
3496 static_ioc_len -= l;
3499 static void
3500 ipfw_link_sibling(struct netmsg_ipfw *fwmsg, struct ip_fw *rule)
3502 if (fwmsg->sibling != NULL) {
3503 KKASSERT(mycpuid > 0 && fwmsg->sibling->cpuid == mycpuid - 1);
3504 fwmsg->sibling->sibling = rule;
3506 fwmsg->sibling = rule;
3509 static struct ip_fw *
3510 ipfw_create_rule(const struct ipfw_ioc_rule *ioc_rule, uint32_t rule_flags)
3512 struct ip_fw *rule;
3514 rule = kmalloc(RULESIZE(ioc_rule), M_IPFW, M_WAITOK | M_ZERO);
3516 rule->act_ofs = ioc_rule->act_ofs;
3517 rule->cmd_len = ioc_rule->cmd_len;
3518 rule->rulenum = ioc_rule->rulenum;
3519 rule->set = ioc_rule->set;
3520 rule->usr_flags = ioc_rule->usr_flags;
3522 bcopy(ioc_rule->cmd, rule->cmd, rule->cmd_len * 4 /* XXX */);
3524 rule->refcnt = 1;
3525 rule->cpuid = mycpuid;
3526 rule->rule_flags = rule_flags;
3528 return rule;
3531 static void
3532 ipfw_add_rule_dispatch(netmsg_t nmsg)
3534 struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
3535 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3536 struct ip_fw *rule;
3538 rule = ipfw_create_rule(fwmsg->ioc_rule, fwmsg->rule_flags);
3541 * Insert rule into the pre-determined position
3543 if (fwmsg->prev_rule != NULL) {
3544 struct ip_fw *prev, *next;
3546 prev = fwmsg->prev_rule;
3547 KKASSERT(prev->cpuid == mycpuid);
3549 next = fwmsg->next_rule;
3550 KKASSERT(next->cpuid == mycpuid);
3552 rule->next = next;
3553 prev->next = rule;
3556 * Move to the position on the next CPU
3557 * before the msg is forwarded.
3559 fwmsg->prev_rule = prev->sibling;
3560 fwmsg->next_rule = next->sibling;
3561 } else {
3562 KKASSERT(fwmsg->next_rule == NULL);
3563 rule->next = ctx->ipfw_layer3_chain;
3564 ctx->ipfw_layer3_chain = rule;
3567 /* Link rule CPU sibling */
3568 ipfw_link_sibling(fwmsg, rule);
3570 ipfw_flush_rule_ptrs(ctx);
3572 if (mycpuid == 0) {
3573 /* Statistics only need to be updated once */
3574 ipfw_inc_static_count(rule);
3576 /* Return the rule on CPU0 */
3577 nmsg->lmsg.u.ms_resultp = rule;
3580 if (rule->rule_flags & IPFW_RULE_F_GENTRACK)
3581 rule->track_ruleid = (uintptr_t)nmsg->lmsg.u.ms_resultp;
3583 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
3587 * Add a new rule to the list. Copy the rule into a malloc'ed area,
3588 * then possibly create a rule number and add the rule to the list.
3589 * Update the rule_number in the input struct so the caller knows
3590 * it as well.
3592 static void
3593 ipfw_add_rule(struct ipfw_ioc_rule *ioc_rule, uint32_t rule_flags)
3595 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3596 struct netmsg_ipfw fwmsg;
3597 struct netmsg_base *nmsg;
3598 struct ip_fw *f, *prev, *rule;
3600 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
3603 * If rulenum is 0, find highest numbered rule before the
3604 * default rule, and add rule number incremental step.
3606 if (ioc_rule->rulenum == 0) {
3607 int step = autoinc_step;
3609 KKASSERT(step >= IPFW_AUTOINC_STEP_MIN &&
3610 step <= IPFW_AUTOINC_STEP_MAX);
3613 * Locate the highest numbered rule before default
3615 for (f = ctx->ipfw_layer3_chain; f; f = f->next) {
3616 if (f->rulenum == IPFW_DEFAULT_RULE)
3617 break;
3618 ioc_rule->rulenum = f->rulenum;
3620 if (ioc_rule->rulenum < IPFW_DEFAULT_RULE - step)
3621 ioc_rule->rulenum += step;
3623 KASSERT(ioc_rule->rulenum != IPFW_DEFAULT_RULE &&
3624 ioc_rule->rulenum != 0,
3625 ("invalid rule num %d", ioc_rule->rulenum));
3628 * Now find the right place for the new rule in the sorted list.
3630 for (prev = NULL, f = ctx->ipfw_layer3_chain; f;
3631 prev = f, f = f->next) {
3632 if (f->rulenum > ioc_rule->rulenum) {
3633 /* Found the location */
3634 break;
3637 KASSERT(f != NULL, ("no default rule?!"));
3640 * Duplicate the rule onto each CPU.
3641 * The rule duplicated on CPU0 will be returned.
3643 bzero(&fwmsg, sizeof(fwmsg));
3644 nmsg = &fwmsg.base;
3645 netmsg_init(nmsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
3646 ipfw_add_rule_dispatch);
3647 fwmsg.ioc_rule = ioc_rule;
3648 fwmsg.prev_rule = prev;
3649 fwmsg.next_rule = prev == NULL ? NULL : f;
3650 fwmsg.rule_flags = rule_flags;
3652 netisr_domsg(nmsg, 0);
3653 KKASSERT(fwmsg.prev_rule == NULL && fwmsg.next_rule == NULL);
3655 rule = nmsg->lmsg.u.ms_resultp;
3656 KKASSERT(rule != NULL && rule->cpuid == mycpuid);
3658 DPRINTF("++ installed rule %d, static count now %d\n",
3659 rule->rulenum, static_count);
3663 * Free storage associated with a static rule (including derived
3664 * states/tracks).
3665 * The caller is in charge of clearing rule pointers to avoid
3666 * dangling pointers.
3667 * @return a pointer to the next entry.
3668 * Arguments are not checked, so they better be correct.
3670 static struct ip_fw *
3671 ipfw_delete_rule(struct ipfw_context *ctx,
3672 struct ip_fw *prev, struct ip_fw *rule)
3674 struct ip_fw *n;
3676 n = rule->next;
3677 if (prev == NULL)
3678 ctx->ipfw_layer3_chain = n;
3679 else
3680 prev->next = n;
3682 /* Mark the rule as invalid */
3683 rule->rule_flags |= IPFW_RULE_F_INVALID;
3684 rule->next_rule = NULL;
3685 rule->sibling = NULL;
3686 #ifdef foo
3687 /* Don't reset cpuid here; keep various assertion working */
3688 rule->cpuid = -1;
3689 #endif
3691 /* Statistics only need to be updated once */
3692 if (mycpuid == 0)
3693 ipfw_dec_static_count(rule);
3695 /* Try to free this rule */
3696 ipfw_free_rule(rule);
3698 /* Return the next rule */
3699 return n;
3702 static void
3703 ipfw_flush_dispatch(netmsg_t nmsg)
3705 int kill_default = nmsg->lmsg.u.ms_result;
3706 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3707 struct ip_fw *rule;
3710 * Flush states.
3712 ipfw_state_flush(ctx, NULL);
3713 KASSERT(ctx->ipfw_state_cnt == 0,
3714 ("%d pcpu states remain", ctx->ipfw_state_cnt));
3715 ctx->ipfw_state_loosecnt = 0;
3716 ctx->ipfw_state_lastexp = 0;
3719 * Flush tracks.
3721 ipfw_track_flush(ctx, NULL);
3722 ctx->ipfw_track_lastexp = 0;
3723 if (ctx->ipfw_trkcnt_spare != NULL) {
3724 kfree(ctx->ipfw_trkcnt_spare, M_IPFW);
3725 ctx->ipfw_trkcnt_spare = NULL;
3728 ipfw_flush_rule_ptrs(ctx); /* more efficient to do outside the loop */
3730 while ((rule = ctx->ipfw_layer3_chain) != NULL &&
3731 (kill_default || rule->rulenum != IPFW_DEFAULT_RULE))
3732 ipfw_delete_rule(ctx, NULL, rule);
3734 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
3738 * Deletes all rules from a chain (including the default rule
3739 * if the second argument is set).
3741 static void
3742 ipfw_flush(int kill_default)
3744 struct netmsg_base nmsg;
3745 #ifdef INVARIANTS
3746 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3747 int state_cnt;
3748 #endif
3750 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
3753 * If 'kill_default' then caller has done the necessary
3754 * msgport syncing; unnecessary to do it again.
3756 if (!kill_default) {
3758 * Let ipfw_chk() know the rules are going to
3759 * be flushed, so it could jump directly to
3760 * the default rule.
3762 ipfw_flushing = 1;
3763 /* XXX use priority sync */
3764 netmsg_service_sync();
3768 * Press the 'flush' button
3770 bzero(&nmsg, sizeof(nmsg));
3771 netmsg_init(&nmsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
3772 ipfw_flush_dispatch);
3773 nmsg.lmsg.u.ms_result = kill_default;
3774 netisr_domsg(&nmsg, 0);
3775 ipfw_gd.ipfw_state_loosecnt = 0;
3776 ipfw_gd.ipfw_state_globexp = 0;
3777 ipfw_gd.ipfw_track_globexp = 0;
3779 #ifdef INVARIANTS
3780 state_cnt = ipfw_state_cntcoll();
3781 KASSERT(state_cnt == 0, ("%d states remain", state_cnt));
3783 KASSERT(ipfw_gd.ipfw_trkcnt_cnt == 0,
3784 ("%d trkcnts remain", ipfw_gd.ipfw_trkcnt_cnt));
3786 if (kill_default) {
3787 KASSERT(static_count == 0,
3788 ("%u static rules remain", static_count));
3789 KASSERT(static_ioc_len == 0,
3790 ("%u bytes of static rules remain", static_ioc_len));
3791 } else {
3792 KASSERT(static_count == 1,
3793 ("%u static rules remain", static_count));
3794 KASSERT(static_ioc_len == IOC_RULESIZE(ctx->ipfw_default_rule),
3795 ("%u bytes of static rules remain, should be %lu",
3796 static_ioc_len,
3797 (u_long)IOC_RULESIZE(ctx->ipfw_default_rule)));
3799 #endif
3801 /* Flush is done */
3802 ipfw_flushing = 0;
3805 static void
3806 ipfw_alt_delete_rule_dispatch(netmsg_t nmsg)
3808 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3809 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3810 struct ip_fw *rule, *prev;
3812 rule = dmsg->start_rule;
3813 KKASSERT(rule->cpuid == mycpuid);
3814 dmsg->start_rule = rule->sibling;
3816 prev = dmsg->prev_rule;
3817 if (prev != NULL) {
3818 KKASSERT(prev->cpuid == mycpuid);
3821 * Move to the position on the next CPU
3822 * before the msg is forwarded.
3824 dmsg->prev_rule = prev->sibling;
3828 * flush pointers outside the loop, then delete all matching
3829 * rules. 'prev' remains the same throughout the cycle.
3831 ipfw_flush_rule_ptrs(ctx);
3832 while (rule && rule->rulenum == dmsg->rulenum) {
3833 if (rule->rule_flags & IPFW_RULE_F_GENSTATE) {
3834 /* Flush states generated by this rule. */
3835 ipfw_state_flush(ctx, rule);
3837 if (rule->rule_flags & IPFW_RULE_F_GENTRACK) {
3838 /* Flush tracks generated by this rule. */
3839 ipfw_track_flush(ctx, rule);
3841 rule = ipfw_delete_rule(ctx, prev, rule);
3844 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
3847 static int
3848 ipfw_alt_delete_rule(uint16_t rulenum)
3850 struct ip_fw *prev, *rule;
3851 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3852 struct netmsg_del dmsg;
3855 * Locate first rule to delete
3857 for (prev = NULL, rule = ctx->ipfw_layer3_chain;
3858 rule && rule->rulenum < rulenum;
3859 prev = rule, rule = rule->next)
3860 ; /* EMPTY */
3861 if (rule->rulenum != rulenum)
3862 return EINVAL;
3865 * Get rid of the rule duplications on all CPUs
3867 bzero(&dmsg, sizeof(dmsg));
3868 netmsg_init(&dmsg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
3869 ipfw_alt_delete_rule_dispatch);
3870 dmsg.prev_rule = prev;
3871 dmsg.start_rule = rule;
3872 dmsg.rulenum = rulenum;
3874 netisr_domsg(&dmsg.base, 0);
3875 KKASSERT(dmsg.prev_rule == NULL && dmsg.start_rule == NULL);
3876 return 0;
3879 static void
3880 ipfw_alt_delete_ruleset_dispatch(netmsg_t nmsg)
3882 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3883 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3884 struct ip_fw *prev, *rule;
3885 #ifdef INVARIANTS
3886 int del = 0;
3887 #endif
3889 ipfw_flush_rule_ptrs(ctx);
3891 prev = NULL;
3892 rule = ctx->ipfw_layer3_chain;
3893 while (rule != NULL) {
3894 if (rule->set == dmsg->from_set) {
3895 if (rule->rule_flags & IPFW_RULE_F_GENSTATE) {
3896 /* Flush states generated by this rule. */
3897 ipfw_state_flush(ctx, rule);
3899 if (rule->rule_flags & IPFW_RULE_F_GENTRACK) {
3900 /* Flush tracks generated by this rule. */
3901 ipfw_track_flush(ctx, rule);
3903 rule = ipfw_delete_rule(ctx, prev, rule);
3904 #ifdef INVARIANTS
3905 del = 1;
3906 #endif
3907 } else {
3908 prev = rule;
3909 rule = rule->next;
3912 KASSERT(del, ("no match set?!"));
3914 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
3917 static int
3918 ipfw_alt_delete_ruleset(uint8_t set)
3920 struct netmsg_del dmsg;
3921 int del;
3922 struct ip_fw *rule;
3923 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3926 * Check whether the 'set' exists. If it exists,
3927 * then check whether any rules within the set will
3928 * try to create states.
3930 del = 0;
3931 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3932 if (rule->set == set)
3933 del = 1;
3935 if (!del)
3936 return 0; /* XXX EINVAL? */
3939 * Delete this set
3941 bzero(&dmsg, sizeof(dmsg));
3942 netmsg_init(&dmsg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
3943 ipfw_alt_delete_ruleset_dispatch);
3944 dmsg.from_set = set;
3945 netisr_domsg(&dmsg.base, 0);
3947 return 0;
3950 static void
3951 ipfw_alt_move_rule_dispatch(netmsg_t nmsg)
3953 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3954 struct ip_fw *rule;
3956 rule = dmsg->start_rule;
3957 KKASSERT(rule->cpuid == mycpuid);
3960 * Move to the position on the next CPU
3961 * before the msg is forwarded.
3963 dmsg->start_rule = rule->sibling;
3965 while (rule && rule->rulenum <= dmsg->rulenum) {
3966 if (rule->rulenum == dmsg->rulenum)
3967 rule->set = dmsg->to_set;
3968 rule = rule->next;
3970 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
3973 static int
3974 ipfw_alt_move_rule(uint16_t rulenum, uint8_t set)
3976 struct netmsg_del dmsg;
3977 struct netmsg_base *nmsg;
3978 struct ip_fw *rule;
3979 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3982 * Locate first rule to move
3984 for (rule = ctx->ipfw_layer3_chain; rule && rule->rulenum <= rulenum;
3985 rule = rule->next) {
3986 if (rule->rulenum == rulenum && rule->set != set)
3987 break;
3989 if (rule == NULL || rule->rulenum > rulenum)
3990 return 0; /* XXX error? */
3992 bzero(&dmsg, sizeof(dmsg));
3993 nmsg = &dmsg.base;
3994 netmsg_init(nmsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
3995 ipfw_alt_move_rule_dispatch);
3996 dmsg.start_rule = rule;
3997 dmsg.rulenum = rulenum;
3998 dmsg.to_set = set;
4000 netisr_domsg(nmsg, 0);
4001 KKASSERT(dmsg.start_rule == NULL);
4002 return 0;
4005 static void
4006 ipfw_alt_move_ruleset_dispatch(netmsg_t nmsg)
4008 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
4009 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4010 struct ip_fw *rule;
4012 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
4013 if (rule->set == dmsg->from_set)
4014 rule->set = dmsg->to_set;
4016 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
4019 static int
4020 ipfw_alt_move_ruleset(uint8_t from_set, uint8_t to_set)
4022 struct netmsg_del dmsg;
4023 struct netmsg_base *nmsg;
4025 bzero(&dmsg, sizeof(dmsg));
4026 nmsg = &dmsg.base;
4027 netmsg_init(nmsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
4028 ipfw_alt_move_ruleset_dispatch);
4029 dmsg.from_set = from_set;
4030 dmsg.to_set = to_set;
4032 netisr_domsg(nmsg, 0);
4033 return 0;
4036 static void
4037 ipfw_alt_swap_ruleset_dispatch(netmsg_t nmsg)
4039 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
4040 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4041 struct ip_fw *rule;
4043 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
4044 if (rule->set == dmsg->from_set)
4045 rule->set = dmsg->to_set;
4046 else if (rule->set == dmsg->to_set)
4047 rule->set = dmsg->from_set;
4049 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
4052 static int
4053 ipfw_alt_swap_ruleset(uint8_t set1, uint8_t set2)
4055 struct netmsg_del dmsg;
4056 struct netmsg_base *nmsg;
4058 bzero(&dmsg, sizeof(dmsg));
4059 nmsg = &dmsg.base;
4060 netmsg_init(nmsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
4061 ipfw_alt_swap_ruleset_dispatch);
4062 dmsg.from_set = set1;
4063 dmsg.to_set = set2;
4065 netisr_domsg(nmsg, 0);
4066 return 0;
4070 * Remove all rules with given number, and also do set manipulation.
4072 * The argument is an uint32_t. The low 16 bit are the rule or set number,
4073 * the next 8 bits are the new set, the top 8 bits are the command:
4075 * 0 delete rules with given number
4076 * 1 delete rules with given set number
4077 * 2 move rules with given number to new set
4078 * 3 move rules with given set number to new set
4079 * 4 swap sets with given numbers
4081 static int
4082 ipfw_ctl_alter(uint32_t arg)
4084 uint16_t rulenum;
4085 uint8_t cmd, new_set;
4086 int error = 0;
4088 rulenum = arg & 0xffff;
4089 cmd = (arg >> 24) & 0xff;
4090 new_set = (arg >> 16) & 0xff;
4092 if (cmd > 4)
4093 return EINVAL;
4094 if (new_set >= IPFW_DEFAULT_SET)
4095 return EINVAL;
4096 if (cmd == 0 || cmd == 2) {
4097 if (rulenum == IPFW_DEFAULT_RULE)
4098 return EINVAL;
4099 } else {
4100 if (rulenum >= IPFW_DEFAULT_SET)
4101 return EINVAL;
4104 switch (cmd) {
4105 case 0: /* delete rules with given number */
4106 error = ipfw_alt_delete_rule(rulenum);
4107 break;
4109 case 1: /* delete all rules with given set number */
4110 error = ipfw_alt_delete_ruleset(rulenum);
4111 break;
4113 case 2: /* move rules with given number to new set */
4114 error = ipfw_alt_move_rule(rulenum, new_set);
4115 break;
4117 case 3: /* move rules with given set number to new set */
4118 error = ipfw_alt_move_ruleset(rulenum, new_set);
4119 break;
4121 case 4: /* swap two sets */
4122 error = ipfw_alt_swap_ruleset(rulenum, new_set);
4123 break;
4125 return error;
4129 * Clear counters for a specific rule.
4131 static void
4132 clear_counters(struct ip_fw *rule, int log_only)
4134 ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule);
4136 if (log_only == 0) {
4137 rule->bcnt = rule->pcnt = 0;
4138 rule->timestamp = 0;
4140 if (l->o.opcode == O_LOG)
4141 l->log_left = l->max_log;
4144 static void
4145 ipfw_zero_entry_dispatch(netmsg_t nmsg)
4147 struct netmsg_zent *zmsg = (struct netmsg_zent *)nmsg;
4148 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4149 struct ip_fw *rule;
4151 if (zmsg->rulenum == 0) {
4152 KKASSERT(zmsg->start_rule == NULL);
4154 ctx->ipfw_norule_counter = 0;
4155 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next)
4156 clear_counters(rule, zmsg->log_only);
4157 } else {
4158 struct ip_fw *start = zmsg->start_rule;
4160 KKASSERT(start->cpuid == mycpuid);
4161 KKASSERT(start->rulenum == zmsg->rulenum);
4164 * We can have multiple rules with the same number, so we
4165 * need to clear them all.
4167 for (rule = start; rule && rule->rulenum == zmsg->rulenum;
4168 rule = rule->next)
4169 clear_counters(rule, zmsg->log_only);
4172 * Move to the position on the next CPU
4173 * before the msg is forwarded.
4175 zmsg->start_rule = start->sibling;
4177 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
4181 * Reset some or all counters on firewall rules.
4182 * @arg frwl is null to clear all entries, or contains a specific
4183 * rule number.
4184 * @arg log_only is 1 if we only want to reset logs, zero otherwise.
4186 static int
4187 ipfw_ctl_zero_entry(int rulenum, int log_only)
4189 struct netmsg_zent zmsg;
4190 struct netmsg_base *nmsg;
4191 const char *msg;
4192 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4194 bzero(&zmsg, sizeof(zmsg));
4195 nmsg = &zmsg.base;
4196 netmsg_init(nmsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
4197 ipfw_zero_entry_dispatch);
4198 zmsg.log_only = log_only;
4200 if (rulenum == 0) {
4201 msg = log_only ? "ipfw: All logging counts reset.\n"
4202 : "ipfw: Accounting cleared.\n";
4203 } else {
4204 struct ip_fw *rule;
4207 * Locate the first rule with 'rulenum'
4209 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
4210 if (rule->rulenum == rulenum)
4211 break;
4213 if (rule == NULL) /* we did not find any matching rules */
4214 return (EINVAL);
4215 zmsg.start_rule = rule;
4216 zmsg.rulenum = rulenum;
4218 msg = log_only ? "ipfw: Entry %d logging count reset.\n"
4219 : "ipfw: Entry %d cleared.\n";
4221 netisr_domsg(nmsg, 0);
4222 KKASSERT(zmsg.start_rule == NULL);
4224 if (fw_verbose)
4225 log(LOG_SECURITY | LOG_NOTICE, msg, rulenum);
4226 return (0);
4230 * Check validity of the structure before insert.
4231 * Fortunately rules are simple, so this mostly need to check rule sizes.
4233 static int
4234 ipfw_check_ioc_rule(struct ipfw_ioc_rule *rule, int size, uint32_t *rule_flags)
4236 int l, cmdlen = 0;
4237 int have_action = 0;
4238 ipfw_insn *cmd;
4240 *rule_flags = 0;
4242 /* Check for valid size */
4243 if (size < sizeof(*rule)) {
4244 kprintf("ipfw: rule too short\n");
4245 return EINVAL;
4247 l = IOC_RULESIZE(rule);
4248 if (l != size) {
4249 kprintf("ipfw: size mismatch (have %d want %d)\n", size, l);
4250 return EINVAL;
4253 /* Check rule number */
4254 if (rule->rulenum == IPFW_DEFAULT_RULE) {
4255 kprintf("ipfw: invalid rule number\n");
4256 return EINVAL;
4260 * Now go for the individual checks. Very simple ones, basically only
4261 * instruction sizes.
4263 for (l = rule->cmd_len, cmd = rule->cmd; l > 0;
4264 l -= cmdlen, cmd += cmdlen) {
4265 cmdlen = F_LEN(cmd);
4266 if (cmdlen > l) {
4267 kprintf("ipfw: opcode %d size truncated\n",
4268 cmd->opcode);
4269 return EINVAL;
4272 DPRINTF("ipfw: opcode %d\n", cmd->opcode);
4274 if (cmd->opcode == O_KEEP_STATE || cmd->opcode == O_LIMIT) {
4275 /* This rule will generate states. */
4276 *rule_flags |= IPFW_RULE_F_GENSTATE;
4277 if (cmd->opcode == O_LIMIT)
4278 *rule_flags |= IPFW_RULE_F_GENTRACK;
4281 switch (cmd->opcode) {
4282 case O_NOP:
4283 case O_PROBE_STATE:
4284 case O_KEEP_STATE:
4285 case O_PROTO:
4286 case O_IP_SRC_ME:
4287 case O_IP_DST_ME:
4288 case O_LAYER2:
4289 case O_IN:
4290 case O_FRAG:
4291 case O_IPOPT:
4292 case O_IPLEN:
4293 case O_IPID:
4294 case O_IPTOS:
4295 case O_IPPRECEDENCE:
4296 case O_IPTTL:
4297 case O_IPVER:
4298 case O_TCPWIN:
4299 case O_TCPFLAGS:
4300 case O_TCPOPTS:
4301 case O_ESTAB:
4302 if (cmdlen != F_INSN_SIZE(ipfw_insn))
4303 goto bad_size;
4304 break;
4306 case O_UID:
4307 case O_GID:
4308 case O_IP_SRC:
4309 case O_IP_DST:
4310 case O_TCPSEQ:
4311 case O_TCPACK:
4312 case O_PROB:
4313 case O_ICMPTYPE:
4314 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32))
4315 goto bad_size;
4316 break;
4318 case O_LIMIT:
4319 if (cmdlen != F_INSN_SIZE(ipfw_insn_limit))
4320 goto bad_size;
4321 break;
4323 case O_LOG:
4324 if (cmdlen != F_INSN_SIZE(ipfw_insn_log))
4325 goto bad_size;
4327 ((ipfw_insn_log *)cmd)->log_left =
4328 ((ipfw_insn_log *)cmd)->max_log;
4330 break;
4332 case O_IP_SRC_MASK:
4333 case O_IP_DST_MASK:
4334 if (cmdlen != F_INSN_SIZE(ipfw_insn_ip))
4335 goto bad_size;
4336 if (((ipfw_insn_ip *)cmd)->mask.s_addr == 0) {
4337 kprintf("ipfw: opcode %d, useless rule\n",
4338 cmd->opcode);
4339 return EINVAL;
4341 break;
4343 case O_IP_SRC_SET:
4344 case O_IP_DST_SET:
4345 if (cmd->arg1 == 0 || cmd->arg1 > 256) {
4346 kprintf("ipfw: invalid set size %d\n",
4347 cmd->arg1);
4348 return EINVAL;
4350 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
4351 (cmd->arg1+31)/32 )
4352 goto bad_size;
4353 break;
4355 case O_MACADDR2:
4356 if (cmdlen != F_INSN_SIZE(ipfw_insn_mac))
4357 goto bad_size;
4358 break;
4360 case O_MAC_TYPE:
4361 case O_IP_SRCPORT:
4362 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */
4363 if (cmdlen < 2 || cmdlen > 31)
4364 goto bad_size;
4365 break;
4367 case O_RECV:
4368 case O_XMIT:
4369 case O_VIA:
4370 if (cmdlen != F_INSN_SIZE(ipfw_insn_if))
4371 goto bad_size;
4372 break;
4374 case O_PIPE:
4375 case O_QUEUE:
4376 if (cmdlen != F_INSN_SIZE(ipfw_insn_pipe))
4377 goto bad_size;
4378 goto check_action;
4380 case O_FORWARD_IP:
4381 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa)) {
4382 goto bad_size;
4383 } else {
4384 in_addr_t fwd_addr;
4386 fwd_addr = ((ipfw_insn_sa *)cmd)->
4387 sa.sin_addr.s_addr;
4388 if (IN_MULTICAST(ntohl(fwd_addr))) {
4389 kprintf("ipfw: try forwarding to "
4390 "multicast address\n");
4391 return EINVAL;
4394 goto check_action;
4396 case O_FORWARD_MAC: /* XXX not implemented yet */
4397 case O_CHECK_STATE:
4398 case O_COUNT:
4399 case O_ACCEPT:
4400 case O_DENY:
4401 case O_REJECT:
4402 case O_SKIPTO:
4403 case O_DIVERT:
4404 case O_TEE:
4405 if (cmdlen != F_INSN_SIZE(ipfw_insn))
4406 goto bad_size;
4407 check_action:
4408 if (have_action) {
4409 kprintf("ipfw: opcode %d, multiple actions"
4410 " not allowed\n",
4411 cmd->opcode);
4412 return EINVAL;
4414 have_action = 1;
4415 if (l != cmdlen) {
4416 kprintf("ipfw: opcode %d, action must be"
4417 " last opcode\n",
4418 cmd->opcode);
4419 return EINVAL;
4421 break;
4422 default:
4423 kprintf("ipfw: opcode %d, unknown opcode\n",
4424 cmd->opcode);
4425 return EINVAL;
4428 if (have_action == 0) {
4429 kprintf("ipfw: missing action\n");
4430 return EINVAL;
4432 return 0;
4434 bad_size:
4435 kprintf("ipfw: opcode %d size %d wrong\n",
4436 cmd->opcode, cmdlen);
4437 return EINVAL;
4440 static int
4441 ipfw_ctl_add_rule(struct sockopt *sopt)
4443 struct ipfw_ioc_rule *ioc_rule;
4444 size_t size;
4445 uint32_t rule_flags;
4446 int error;
4448 size = sopt->sopt_valsize;
4449 if (size > (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX) ||
4450 size < sizeof(*ioc_rule)) {
4451 return EINVAL;
4453 if (size != (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX)) {
4454 sopt->sopt_val = krealloc(sopt->sopt_val, sizeof(uint32_t) *
4455 IPFW_RULE_SIZE_MAX, M_TEMP, M_WAITOK);
4457 ioc_rule = sopt->sopt_val;
4459 error = ipfw_check_ioc_rule(ioc_rule, size, &rule_flags);
4460 if (error)
4461 return error;
4463 ipfw_add_rule(ioc_rule, rule_flags);
4465 if (sopt->sopt_dir == SOPT_GET)
4466 sopt->sopt_valsize = IOC_RULESIZE(ioc_rule);
4467 return 0;
4470 static void *
4471 ipfw_copy_rule(const struct ipfw_context *ctx, const struct ip_fw *rule,
4472 struct ipfw_ioc_rule *ioc_rule)
4474 const struct ip_fw *sibling;
4475 #ifdef INVARIANTS
4476 int i;
4477 #endif
4479 KKASSERT(rule->cpuid == IPFW_CFGCPUID);
4481 ioc_rule->act_ofs = rule->act_ofs;
4482 ioc_rule->cmd_len = rule->cmd_len;
4483 ioc_rule->rulenum = rule->rulenum;
4484 ioc_rule->set = rule->set;
4485 ioc_rule->usr_flags = rule->usr_flags;
4487 ioc_rule->set_disable = ctx->ipfw_set_disable;
4488 ioc_rule->static_count = static_count;
4489 ioc_rule->static_len = static_ioc_len;
4492 * Visit (read-only) all of the rule's duplications to get
4493 * the necessary statistics
4495 #ifdef INVARIANTS
4496 i = 0;
4497 #endif
4498 ioc_rule->pcnt = 0;
4499 ioc_rule->bcnt = 0;
4500 ioc_rule->timestamp = 0;
4501 for (sibling = rule; sibling != NULL; sibling = sibling->sibling) {
4502 ioc_rule->pcnt += sibling->pcnt;
4503 ioc_rule->bcnt += sibling->bcnt;
4504 if (sibling->timestamp > ioc_rule->timestamp)
4505 ioc_rule->timestamp = sibling->timestamp;
4506 #ifdef INVARIANTS
4507 ++i;
4508 #endif
4510 KASSERT(i == netisr_ncpus,
4511 ("static rule is not duplicated on netisr_ncpus %d", netisr_ncpus));
4513 bcopy(rule->cmd, ioc_rule->cmd, ioc_rule->cmd_len * 4 /* XXX */);
4515 return ((uint8_t *)ioc_rule + IOC_RULESIZE(ioc_rule));
4518 static boolean_t
4519 ipfw_track_copy(const struct ipfw_trkcnt *trk, struct ipfw_ioc_state *ioc_state)
4521 struct ipfw_ioc_flowid *ioc_id;
4523 if (trk->tc_expire == 0) {
4524 /* Not a scanned one. */
4525 return (FALSE);
4528 ioc_state->expire = TIME_LEQ(trk->tc_expire, time_uptime) ?
4529 0 : trk->tc_expire - time_uptime;
4530 ioc_state->pcnt = 0;
4531 ioc_state->bcnt = 0;
4533 ioc_state->dyn_type = O_LIMIT_PARENT;
4534 ioc_state->count = trk->tc_count;
4536 ioc_state->rulenum = trk->tc_rulenum;
4538 ioc_id = &ioc_state->id;
4539 ioc_id->type = ETHERTYPE_IP;
4540 ioc_id->u.ip.proto = trk->tc_proto;
4541 ioc_id->u.ip.src_ip = trk->tc_saddr;
4542 ioc_id->u.ip.dst_ip = trk->tc_daddr;
4543 ioc_id->u.ip.src_port = trk->tc_sport;
4544 ioc_id->u.ip.dst_port = trk->tc_dport;
4546 return (TRUE);
4549 static boolean_t
4550 ipfw_state_copy(const struct ipfw_state *s, struct ipfw_ioc_state *ioc_state)
4552 struct ipfw_ioc_flowid *ioc_id;
4554 if (s->st_type == O_ANCHOR)
4555 return (FALSE);
4557 ioc_state->expire = TIME_LEQ(s->st_expire, time_uptime) ?
4558 0 : s->st_expire - time_uptime;
4559 ioc_state->pcnt = s->st_pcnt;
4560 ioc_state->bcnt = s->st_bcnt;
4562 ioc_state->dyn_type = s->st_type;
4563 ioc_state->count = 0;
4565 ioc_state->rulenum = s->st_rule->rulenum;
4567 ioc_id = &ioc_state->id;
4568 ioc_id->type = ETHERTYPE_IP;
4569 ioc_id->u.ip.proto = s->st_proto;
4570 ipfw_key_4tuple(&s->st_key,
4571 &ioc_id->u.ip.src_ip, &ioc_id->u.ip.src_port,
4572 &ioc_id->u.ip.dst_ip, &ioc_id->u.ip.dst_port);
4574 return (TRUE);
4577 static void
4578 ipfw_state_copy_dispatch(netmsg_t nmsg)
4580 struct netmsg_cpstate *nm = (struct netmsg_cpstate *)nmsg;
4581 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4582 const struct ipfw_state *s;
4583 const struct ipfw_track *t;
4585 KASSERT(nm->state_cnt < nm->state_cntmax,
4586 ("invalid state count %d, max %d",
4587 nm->state_cnt, nm->state_cntmax));
4589 TAILQ_FOREACH(s, &ctx->ipfw_state_list, st_link) {
4590 if (ipfw_state_copy(s, nm->ioc_state)) {
4591 nm->ioc_state++;
4592 nm->state_cnt++;
4593 if (nm->state_cnt == nm->state_cntmax)
4594 goto done;
4599 * Prepare tracks in the global track tree for userland.
4601 TAILQ_FOREACH(t, &ctx->ipfw_track_list, t_link) {
4602 struct ipfw_trkcnt *trk;
4604 if (t->t_count == NULL) /* anchor */
4605 continue;
4606 trk = t->t_trkcnt;
4609 * Only one netisr can run this function at
4610 * any time, and only this function accesses
4611 * trkcnt's tc_expire, so this is safe w/o
4612 * ipfw_gd.ipfw_trkcnt_token.
4614 if (trk->tc_expire > t->t_expire)
4615 continue;
4616 trk->tc_expire = t->t_expire;
4620 * Copy tracks in the global track tree to userland in
4621 * the last netisr.
4623 if (mycpuid == netisr_ncpus - 1) {
4624 struct ipfw_trkcnt *trk;
4626 KASSERT(nm->state_cnt < nm->state_cntmax,
4627 ("invalid state count %d, max %d",
4628 nm->state_cnt, nm->state_cntmax));
4630 IPFW_TRKCNT_TOKGET;
4631 RB_FOREACH(trk, ipfw_trkcnt_tree, &ipfw_gd.ipfw_trkcnt_tree) {
4632 if (ipfw_track_copy(trk, nm->ioc_state)) {
4633 nm->ioc_state++;
4634 nm->state_cnt++;
4635 if (nm->state_cnt == nm->state_cntmax) {
4636 IPFW_TRKCNT_TOKREL;
4637 goto done;
4641 IPFW_TRKCNT_TOKREL;
4643 done:
4644 if (nm->state_cnt == nm->state_cntmax) {
4645 /* No more space; done. */
4646 netisr_replymsg(&nm->base, 0);
4647 } else {
4648 netisr_forwardmsg(&nm->base, mycpuid + 1);
4652 static int
4653 ipfw_ctl_get_rules(struct sockopt *sopt)
4655 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4656 struct ip_fw *rule;
4657 void *bp;
4658 size_t size;
4659 int state_cnt;
4662 * pass up a copy of the current rules. Static rules
4663 * come first (the last of which has number IPFW_DEFAULT_RULE),
4664 * followed by a possibly empty list of states.
4667 size = static_ioc_len; /* size of static rules */
4670 * Size of the states.
4671 * XXX take tracks as state for userland compat.
4673 state_cnt = ipfw_state_cntcoll() + ipfw_gd.ipfw_trkcnt_cnt;
4674 state_cnt = (state_cnt * 5) / 4; /* leave 25% headroom */
4675 size += state_cnt * sizeof(struct ipfw_ioc_state);
4677 if (sopt->sopt_valsize < size) {
4678 /* short length, no need to return incomplete rules */
4679 /* XXX: if superuser, no need to zero buffer */
4680 bzero(sopt->sopt_val, sopt->sopt_valsize);
4681 return 0;
4683 bp = sopt->sopt_val;
4685 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next)
4686 bp = ipfw_copy_rule(ctx, rule, bp);
4688 if (state_cnt) {
4689 struct netmsg_cpstate nm;
4690 #ifdef INVARIANTS
4691 size_t old_size = size;
4692 #endif
4694 netmsg_init(&nm.base, NULL, &curthread->td_msgport,
4695 MSGF_PRIORITY, ipfw_state_copy_dispatch);
4696 nm.ioc_state = bp;
4697 nm.state_cntmax = state_cnt;
4698 nm.state_cnt = 0;
4699 netisr_domsg(&nm.base, 0);
4702 * The # of states may be shrinked after the snapshot
4703 * of the state count was taken. To give user a correct
4704 * state count, nm->state_cnt is used to recalculate
4705 * the actual size.
4707 size = static_ioc_len +
4708 (nm.state_cnt * sizeof(struct ipfw_ioc_state));
4709 KKASSERT(size <= old_size);
4712 sopt->sopt_valsize = size;
4713 return 0;
4716 static void
4717 ipfw_set_disable_dispatch(netmsg_t nmsg)
4719 struct lwkt_msg *lmsg = &nmsg->lmsg;
4720 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4722 ctx->ipfw_set_disable = lmsg->u.ms_result32;
4724 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
4727 static void
4728 ipfw_ctl_set_disable(uint32_t disable, uint32_t enable)
4730 struct netmsg_base nmsg;
4731 struct lwkt_msg *lmsg;
4732 uint32_t set_disable;
4734 /* IPFW_DEFAULT_SET is always enabled */
4735 enable |= (1 << IPFW_DEFAULT_SET);
4736 set_disable = (ipfw_ctx[mycpuid]->ipfw_set_disable | disable) & ~enable;
4738 bzero(&nmsg, sizeof(nmsg));
4739 netmsg_init(&nmsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
4740 ipfw_set_disable_dispatch);
4741 lmsg = &nmsg.lmsg;
4742 lmsg->u.ms_result32 = set_disable;
4744 netisr_domsg(&nmsg, 0);
4748 * {set|get}sockopt parser.
4750 static int
4751 ipfw_ctl(struct sockopt *sopt)
4753 int error, rulenum;
4754 uint32_t *masks;
4755 size_t size;
4757 error = 0;
4759 switch (sopt->sopt_name) {
4760 case IP_FW_GET:
4761 error = ipfw_ctl_get_rules(sopt);
4762 break;
4764 case IP_FW_FLUSH:
4765 ipfw_flush(0 /* keep default rule */);
4766 break;
4768 case IP_FW_ADD:
4769 error = ipfw_ctl_add_rule(sopt);
4770 break;
4772 case IP_FW_DEL:
4774 * IP_FW_DEL is used for deleting single rules or sets,
4775 * and (ab)used to atomically manipulate sets.
4776 * Argument size is used to distinguish between the two:
4777 * sizeof(uint32_t)
4778 * delete single rule or set of rules,
4779 * or reassign rules (or sets) to a different set.
4780 * 2 * sizeof(uint32_t)
4781 * atomic disable/enable sets.
4782 * first uint32_t contains sets to be disabled,
4783 * second uint32_t contains sets to be enabled.
4785 masks = sopt->sopt_val;
4786 size = sopt->sopt_valsize;
4787 if (size == sizeof(*masks)) {
4789 * Delete or reassign static rule
4791 error = ipfw_ctl_alter(masks[0]);
4792 } else if (size == (2 * sizeof(*masks))) {
4794 * Set enable/disable
4796 ipfw_ctl_set_disable(masks[0], masks[1]);
4797 } else {
4798 error = EINVAL;
4800 break;
4802 case IP_FW_ZERO:
4803 case IP_FW_RESETLOG: /* argument is an int, the rule number */
4804 rulenum = 0;
4806 if (sopt->sopt_val != 0) {
4807 error = soopt_to_kbuf(sopt, &rulenum,
4808 sizeof(int), sizeof(int));
4809 if (error)
4810 break;
4812 error = ipfw_ctl_zero_entry(rulenum,
4813 sopt->sopt_name == IP_FW_RESETLOG);
4814 break;
4816 default:
4817 kprintf("ipfw_ctl invalid option %d\n", sopt->sopt_name);
4818 error = EINVAL;
4820 return error;
4823 static void
4824 ipfw_keepalive_done(struct ipfw_context *ctx)
4827 KASSERT(ctx->ipfw_flags & IPFW_FLAG_KEEPALIVE,
4828 ("keepalive is not in progress"));
4829 ctx->ipfw_flags &= ~IPFW_FLAG_KEEPALIVE;
4830 callout_reset(&ctx->ipfw_keepalive_ch, dyn_keepalive_period * hz,
4831 ipfw_keepalive, NULL);
4834 static void
4835 ipfw_keepalive_more(struct ipfw_context *ctx)
4837 struct netmsg_base *nm = &ctx->ipfw_keepalive_more;
4839 KASSERT(ctx->ipfw_flags & IPFW_FLAG_KEEPALIVE,
4840 ("keepalive is not in progress"));
4841 KASSERT(nm->lmsg.ms_flags & MSGF_DONE,
4842 ("keepalive more did not finish"));
4843 netisr_sendmsg_oncpu(nm);
4846 static void
4847 ipfw_keepalive_loop(struct ipfw_context *ctx, struct ipfw_state *anchor)
4849 struct ipfw_state *s;
4850 int scanned = 0, expired = 0, kept = 0;
4852 KASSERT(ctx->ipfw_flags & IPFW_FLAG_KEEPALIVE,
4853 ("keepalive is not in progress"));
4855 while ((s = TAILQ_NEXT(anchor, st_link)) != NULL) {
4856 uint32_t ack_rev, ack_fwd;
4857 struct ipfw_flow_id id;
4859 if (scanned++ >= ipfw_state_scan_max) {
4860 ipfw_keepalive_more(ctx);
4861 return;
4864 TAILQ_REMOVE(&ctx->ipfw_state_list, anchor, st_link);
4865 TAILQ_INSERT_AFTER(&ctx->ipfw_state_list, s, anchor, st_link);
4867 if (s->st_type == O_ANCHOR)
4868 continue;
4870 if (TIME_LEQ(s->st_expire, time_uptime)) {
4871 /* State expired. */
4872 ipfw_state_del(ctx, s);
4873 if (++expired >= ipfw_state_expire_max) {
4874 ipfw_keepalive_more(ctx);
4875 return;
4877 continue;
4881 * Keep alive processing
4884 if (s->st_proto != IPPROTO_TCP)
4885 continue;
4886 if ((s->st_state & IPFW_STATE_TCPSTATES) != BOTH_SYN)
4887 continue;
4888 if (TIME_LEQ(time_uptime + dyn_keepalive_interval,
4889 s->st_expire))
4890 continue; /* too early */
4892 ipfw_key_4tuple(&s->st_key, &id.src_ip, &id.src_port,
4893 &id.dst_ip, &id.dst_port);
4894 ack_rev = s->st_ack_rev;
4895 ack_fwd = s->st_ack_fwd;
4897 send_pkt(&id, ack_rev - 1, ack_fwd, TH_SYN);
4898 send_pkt(&id, ack_fwd - 1, ack_rev, 0);
4900 if (++kept >= ipfw_keepalive_max) {
4901 ipfw_keepalive_more(ctx);
4902 return;
4905 TAILQ_REMOVE(&ctx->ipfw_state_list, anchor, st_link);
4906 ipfw_keepalive_done(ctx);
4909 static void
4910 ipfw_keepalive_more_dispatch(netmsg_t nm)
4912 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4913 struct ipfw_state *anchor;
4915 ASSERT_NETISR_NCPUS(mycpuid);
4916 KASSERT(ctx->ipfw_flags & IPFW_FLAG_KEEPALIVE,
4917 ("keepalive is not in progress"));
4919 /* Reply ASAP */
4920 netisr_replymsg(&nm->base, 0);
4922 anchor = &ctx->ipfw_keepalive_anch;
4923 if (!dyn_keepalive || ctx->ipfw_state_cnt == 0) {
4924 TAILQ_REMOVE(&ctx->ipfw_state_list, anchor, st_link);
4925 ipfw_keepalive_done(ctx);
4926 return;
4928 ipfw_keepalive_loop(ctx, anchor);
4932 * This procedure is only used to handle keepalives. It is invoked
4933 * every dyn_keepalive_period
4935 static void
4936 ipfw_keepalive_dispatch(netmsg_t nm)
4938 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
4939 struct ipfw_state *anchor;
4941 ASSERT_NETISR_NCPUS(mycpuid);
4942 KASSERT((ctx->ipfw_flags & IPFW_FLAG_KEEPALIVE) == 0,
4943 ("keepalive is in progress"));
4944 ctx->ipfw_flags |= IPFW_FLAG_KEEPALIVE;
4946 /* Reply ASAP */
4947 crit_enter();
4948 netisr_replymsg(&nm->base, 0);
4949 crit_exit();
4951 if (!dyn_keepalive || ctx->ipfw_state_cnt == 0) {
4952 ipfw_keepalive_done(ctx);
4953 return;
4956 anchor = &ctx->ipfw_keepalive_anch;
4957 TAILQ_INSERT_HEAD(&ctx->ipfw_state_list, anchor, st_link);
4958 ipfw_keepalive_loop(ctx, anchor);
4962 * This procedure is only used to handle keepalives. It is invoked
4963 * every dyn_keepalive_period
4965 static void
4966 ipfw_keepalive(void *dummy __unused)
4968 struct netmsg_base *msg;
4970 KKASSERT(mycpuid < netisr_ncpus);
4971 msg = &ipfw_ctx[mycpuid]->ipfw_keepalive_nm;
4973 crit_enter();
4974 if (msg->lmsg.ms_flags & MSGF_DONE)
4975 netisr_sendmsg_oncpu(msg);
4976 crit_exit();
4979 static int
4980 ipfw_check_in(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir)
4982 struct ip_fw_args args;
4983 struct mbuf *m = *m0;
4984 struct m_tag *mtag;
4985 int tee = 0, error = 0, ret;
4987 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
4988 /* Extract info from dummynet tag */
4989 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
4990 KKASSERT(mtag != NULL);
4991 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv;
4992 KKASSERT(args.rule != NULL);
4994 m_tag_delete(m, mtag);
4995 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
4996 } else {
4997 args.rule = NULL;
5000 args.eh = NULL;
5001 args.oif = NULL;
5002 args.m = m;
5003 ret = ipfw_chk(&args);
5004 m = args.m;
5006 if (m == NULL) {
5007 error = EACCES;
5008 goto back;
5011 switch (ret) {
5012 case IP_FW_PASS:
5013 break;
5015 case IP_FW_DENY:
5016 m_freem(m);
5017 m = NULL;
5018 error = EACCES;
5019 break;
5021 case IP_FW_DUMMYNET:
5022 /* Send packet to the appropriate pipe */
5023 ipfw_dummynet_io(m, args.cookie, DN_TO_IP_IN, &args);
5024 break;
5026 case IP_FW_TEE:
5027 tee = 1;
5028 /* FALL THROUGH */
5030 case IP_FW_DIVERT:
5032 * Must clear bridge tag when changing
5034 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
5035 if (ip_divert_p != NULL) {
5036 m = ip_divert_p(m, tee, 1);
5037 } else {
5038 m_freem(m);
5039 m = NULL;
5040 /* not sure this is the right error msg */
5041 error = EACCES;
5043 break;
5045 default:
5046 panic("unknown ipfw return value: %d", ret);
5048 back:
5049 *m0 = m;
5050 return error;
5053 static int
5054 ipfw_check_out(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir)
5056 struct ip_fw_args args;
5057 struct mbuf *m = *m0;
5058 struct m_tag *mtag;
5059 int tee = 0, error = 0, ret;
5061 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
5062 /* Extract info from dummynet tag */
5063 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
5064 KKASSERT(mtag != NULL);
5065 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv;
5066 KKASSERT(args.rule != NULL);
5068 m_tag_delete(m, mtag);
5069 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
5070 } else {
5071 args.rule = NULL;
5074 args.eh = NULL;
5075 args.m = m;
5076 args.oif = ifp;
5077 ret = ipfw_chk(&args);
5078 m = args.m;
5080 if (m == NULL) {
5081 error = EACCES;
5082 goto back;
5085 switch (ret) {
5086 case IP_FW_PASS:
5087 break;
5089 case IP_FW_DENY:
5090 m_freem(m);
5091 m = NULL;
5092 error = EACCES;
5093 break;
5095 case IP_FW_DUMMYNET:
5096 ipfw_dummynet_io(m, args.cookie, DN_TO_IP_OUT, &args);
5097 break;
5099 case IP_FW_TEE:
5100 tee = 1;
5101 /* FALL THROUGH */
5103 case IP_FW_DIVERT:
5104 if (ip_divert_p != NULL) {
5105 m = ip_divert_p(m, tee, 0);
5106 } else {
5107 m_freem(m);
5108 m = NULL;
5109 /* not sure this is the right error msg */
5110 error = EACCES;
5112 break;
5114 default:
5115 panic("unknown ipfw return value: %d", ret);
5117 back:
5118 *m0 = m;
5119 return error;
5122 static void
5123 ipfw_hook(void)
5125 struct pfil_head *pfh;
5127 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
5129 pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET);
5130 if (pfh == NULL)
5131 return;
5133 pfil_add_hook(ipfw_check_in, NULL, PFIL_IN, pfh);
5134 pfil_add_hook(ipfw_check_out, NULL, PFIL_OUT, pfh);
5137 static void
5138 ipfw_dehook(void)
5140 struct pfil_head *pfh;
5142 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
5144 pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET);
5145 if (pfh == NULL)
5146 return;
5148 pfil_remove_hook(ipfw_check_in, NULL, PFIL_IN, pfh);
5149 pfil_remove_hook(ipfw_check_out, NULL, PFIL_OUT, pfh);
5152 static int
5153 ipfw_sysctl_dyncnt(SYSCTL_HANDLER_ARGS)
5155 int dyn_cnt;
5157 dyn_cnt = ipfw_state_cntcoll();
5158 dyn_cnt += ipfw_gd.ipfw_trkcnt_cnt;
5160 return (sysctl_handle_int(oidp, &dyn_cnt, 0, req));
5163 static int
5164 ipfw_sysctl_statecnt(SYSCTL_HANDLER_ARGS)
5166 int state_cnt;
5168 state_cnt = ipfw_state_cntcoll();
5169 return (sysctl_handle_int(oidp, &state_cnt, 0, req));
5172 static int
5173 ipfw_sysctl_statemax(SYSCTL_HANDLER_ARGS)
5175 int state_max, error;
5177 state_max = ipfw_state_max;
5178 error = sysctl_handle_int(oidp, &state_max, 0, req);
5179 if (error || req->newptr == NULL)
5180 return (error);
5182 if (state_max < 1)
5183 return (EINVAL);
5185 ipfw_state_max_set(state_max);
5186 return (0);
5189 static int
5190 ipfw_sysctl_dynmax(SYSCTL_HANDLER_ARGS)
5192 int dyn_max, error;
5194 dyn_max = ipfw_state_max + ipfw_track_max;
5196 error = sysctl_handle_int(oidp, &dyn_max, 0, req);
5197 if (error || req->newptr == NULL)
5198 return (error);
5200 if (dyn_max < 2)
5201 return (EINVAL);
5203 ipfw_state_max_set(dyn_max / 2);
5204 ipfw_track_max = dyn_max / 2;
5205 return (0);
5208 static void
5209 ipfw_sysctl_enable_dispatch(netmsg_t nmsg)
5211 struct lwkt_msg *lmsg = &nmsg->lmsg;
5212 int enable = lmsg->u.ms_result;
5214 if (fw_enable == enable)
5215 goto reply;
5217 fw_enable = enable;
5218 if (fw_enable)
5219 ipfw_hook();
5220 else
5221 ipfw_dehook();
5222 reply:
5223 lwkt_replymsg(lmsg, 0);
5226 static int
5227 ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS)
5229 struct netmsg_base nmsg;
5230 struct lwkt_msg *lmsg;
5231 int enable, error;
5233 enable = fw_enable;
5234 error = sysctl_handle_int(oidp, &enable, 0, req);
5235 if (error || req->newptr == NULL)
5236 return error;
5238 netmsg_init(&nmsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
5239 ipfw_sysctl_enable_dispatch);
5240 lmsg = &nmsg.lmsg;
5241 lmsg->u.ms_result = enable;
5243 return lwkt_domsg(IPFW_CFGPORT, lmsg, 0);
5246 static int
5247 ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS)
5249 return sysctl_int_range(oidp, arg1, arg2, req,
5250 IPFW_AUTOINC_STEP_MIN, IPFW_AUTOINC_STEP_MAX);
5253 static int
5254 ipfw_sysctl_scancnt(SYSCTL_HANDLER_ARGS)
5257 return sysctl_int_range(oidp, arg1, arg2, req, 1, INT_MAX);
5260 static int
5261 ipfw_sysctl_stat(SYSCTL_HANDLER_ARGS)
5263 u_long stat = 0;
5264 int cpu, error;
5266 for (cpu = 0; cpu < netisr_ncpus; ++cpu)
5267 stat += *((u_long *)((uint8_t *)ipfw_ctx[cpu] + arg2));
5269 error = sysctl_handle_long(oidp, &stat, 0, req);
5270 if (error || req->newptr == NULL)
5271 return (error);
5273 /* Zero out this stat. */
5274 for (cpu = 0; cpu < netisr_ncpus; ++cpu)
5275 *((u_long *)((uint8_t *)ipfw_ctx[cpu] + arg2)) = 0;
5276 return (0);
5279 static void
5280 ipfw_ctx_init_dispatch(netmsg_t nmsg)
5282 struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
5283 struct ipfw_context *ctx;
5284 struct ip_fw *def_rule;
5286 ctx = kmalloc(sizeof(*ctx), M_IPFW, M_WAITOK | M_ZERO);
5288 RB_INIT(&ctx->ipfw_state_tree);
5289 TAILQ_INIT(&ctx->ipfw_state_list);
5291 RB_INIT(&ctx->ipfw_track_tree);
5292 TAILQ_INIT(&ctx->ipfw_track_list);
5294 callout_init_mp(&ctx->ipfw_stateto_ch);
5295 netmsg_init(&ctx->ipfw_stateexp_nm, NULL, &netisr_adone_rport,
5296 MSGF_DROPABLE | MSGF_PRIORITY, ipfw_state_expire_dispatch);
5297 ctx->ipfw_stateexp_anch.st_type = O_ANCHOR;
5298 netmsg_init(&ctx->ipfw_stateexp_more, NULL, &netisr_adone_rport,
5299 MSGF_DROPABLE, ipfw_state_expire_more_dispatch);
5301 callout_init_mp(&ctx->ipfw_trackto_ch);
5302 netmsg_init(&ctx->ipfw_trackexp_nm, NULL, &netisr_adone_rport,
5303 MSGF_DROPABLE | MSGF_PRIORITY, ipfw_track_expire_dispatch);
5304 netmsg_init(&ctx->ipfw_trackexp_more, NULL, &netisr_adone_rport,
5305 MSGF_DROPABLE, ipfw_track_expire_more_dispatch);
5307 callout_init_mp(&ctx->ipfw_keepalive_ch);
5308 netmsg_init(&ctx->ipfw_keepalive_nm, NULL, &netisr_adone_rport,
5309 MSGF_DROPABLE | MSGF_PRIORITY, ipfw_keepalive_dispatch);
5310 ctx->ipfw_keepalive_anch.st_type = O_ANCHOR;
5311 netmsg_init(&ctx->ipfw_keepalive_more, NULL, &netisr_adone_rport,
5312 MSGF_DROPABLE, ipfw_keepalive_more_dispatch);
5314 ipfw_ctx[mycpuid] = ctx;
5316 def_rule = kmalloc(sizeof(*def_rule), M_IPFW, M_WAITOK | M_ZERO);
5318 def_rule->act_ofs = 0;
5319 def_rule->rulenum = IPFW_DEFAULT_RULE;
5320 def_rule->cmd_len = 1;
5321 def_rule->set = IPFW_DEFAULT_SET;
5323 def_rule->cmd[0].len = 1;
5324 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
5325 def_rule->cmd[0].opcode = O_ACCEPT;
5326 #else
5327 if (filters_default_to_accept)
5328 def_rule->cmd[0].opcode = O_ACCEPT;
5329 else
5330 def_rule->cmd[0].opcode = O_DENY;
5331 #endif
5333 def_rule->refcnt = 1;
5334 def_rule->cpuid = mycpuid;
5336 /* Install the default rule */
5337 ctx->ipfw_default_rule = def_rule;
5338 ctx->ipfw_layer3_chain = def_rule;
5340 /* Link rule CPU sibling */
5341 ipfw_link_sibling(fwmsg, def_rule);
5343 /* Statistics only need to be updated once */
5344 if (mycpuid == 0)
5345 ipfw_inc_static_count(def_rule);
5347 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
5350 static void
5351 ipfw_init_dispatch(netmsg_t nmsg)
5353 struct netmsg_ipfw fwmsg;
5354 int error = 0, cpu;
5356 if (IPFW_LOADED) {
5357 kprintf("IP firewall already loaded\n");
5358 error = EEXIST;
5359 goto reply;
5362 /* Initialize global track tree. */
5363 RB_INIT(&ipfw_gd.ipfw_trkcnt_tree);
5364 IPFW_TRKCNT_TOKINIT;
5366 ipfw_state_max_set(ipfw_state_max);
5367 ipfw_state_headroom = 8 * netisr_ncpus;
5369 bzero(&fwmsg, sizeof(fwmsg));
5370 netmsg_init(&fwmsg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
5371 ipfw_ctx_init_dispatch);
5372 netisr_domsg(&fwmsg.base, 0);
5374 ip_fw_chk_ptr = ipfw_chk;
5375 ip_fw_ctl_ptr = ipfw_ctl;
5376 ip_fw_dn_io_ptr = ipfw_dummynet_io;
5378 kprintf("ipfw2 initialized, default to %s, logging ",
5379 ipfw_ctx[mycpuid]->ipfw_default_rule->cmd[0].opcode ==
5380 O_ACCEPT ? "accept" : "deny");
5382 #ifdef IPFIREWALL_VERBOSE
5383 fw_verbose = 1;
5384 #endif
5385 #ifdef IPFIREWALL_VERBOSE_LIMIT
5386 verbose_limit = IPFIREWALL_VERBOSE_LIMIT;
5387 #endif
5388 if (fw_verbose == 0) {
5389 kprintf("disabled\n");
5390 } else if (verbose_limit == 0) {
5391 kprintf("unlimited\n");
5392 } else {
5393 kprintf("limited to %d packets/entry by default\n",
5394 verbose_limit);
5397 ip_fw_loaded = 1;
5398 for (cpu = 0; cpu < netisr_ncpus; ++cpu) {
5399 callout_reset_bycpu(&ipfw_ctx[cpu]->ipfw_stateto_ch, hz,
5400 ipfw_state_expire_ipifunc, NULL, cpu);
5401 callout_reset_bycpu(&ipfw_ctx[cpu]->ipfw_trackto_ch, hz,
5402 ipfw_track_expire_ipifunc, NULL, cpu);
5403 callout_reset_bycpu(&ipfw_ctx[cpu]->ipfw_keepalive_ch, hz,
5404 ipfw_keepalive, NULL, cpu);
5407 if (fw_enable)
5408 ipfw_hook();
5409 reply:
5410 lwkt_replymsg(&nmsg->lmsg, error);
5413 static int
5414 ipfw_init(void)
5416 struct netmsg_base smsg;
5418 netmsg_init(&smsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
5419 ipfw_init_dispatch);
5420 return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0);
5423 #ifdef KLD_MODULE
5425 static void
5426 ipfw_ctx_fini_dispatch(netmsg_t nmsg)
5428 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
5430 callout_stop_sync(&ctx->ipfw_stateto_ch);
5431 callout_stop_sync(&ctx->ipfw_trackto_ch);
5432 callout_stop_sync(&ctx->ipfw_keepalive_ch);
5434 crit_enter();
5435 lwkt_dropmsg(&ctx->ipfw_stateexp_more.lmsg);
5436 lwkt_dropmsg(&ctx->ipfw_stateexp_nm.lmsg);
5437 lwkt_dropmsg(&ctx->ipfw_trackexp_more.lmsg);
5438 lwkt_dropmsg(&ctx->ipfw_trackexp_nm.lmsg);
5439 lwkt_dropmsg(&ctx->ipfw_keepalive_more.lmsg);
5440 lwkt_dropmsg(&ctx->ipfw_keepalive_nm.lmsg);
5441 crit_exit();
5443 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
5446 static void
5447 ipfw_fini_dispatch(netmsg_t nmsg)
5449 struct netmsg_base nm;
5450 int error = 0, cpu;
5452 if (ipfw_gd.ipfw_refcnt != 0) {
5453 error = EBUSY;
5454 goto reply;
5457 ip_fw_loaded = 0;
5458 ipfw_dehook();
5460 /* Synchronize any inflight state/track expire IPIs. */
5461 lwkt_synchronize_ipiqs("ipfwfini");
5463 netmsg_init(&nm, NULL, &curthread->td_msgport, MSGF_PRIORITY,
5464 ipfw_ctx_fini_dispatch);
5465 netisr_domsg(&nm, 0);
5467 ip_fw_chk_ptr = NULL;
5468 ip_fw_ctl_ptr = NULL;
5469 ip_fw_dn_io_ptr = NULL;
5470 ipfw_flush(1 /* kill default rule */);
5472 /* Free pre-cpu context */
5473 for (cpu = 0; cpu < netisr_ncpus; ++cpu)
5474 kfree(ipfw_ctx[cpu], M_IPFW);
5476 kprintf("IP firewall unloaded\n");
5477 reply:
5478 lwkt_replymsg(&nmsg->lmsg, error);
5481 static int
5482 ipfw_fini(void)
5484 struct netmsg_base smsg;
5486 netmsg_init(&smsg, NULL, &curthread->td_msgport, MSGF_PRIORITY,
5487 ipfw_fini_dispatch);
5488 return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0);
5491 #endif /* KLD_MODULE */
5493 static int
5494 ipfw_modevent(module_t mod, int type, void *unused)
5496 int err = 0;
5498 switch (type) {
5499 case MOD_LOAD:
5500 err = ipfw_init();
5501 break;
5503 case MOD_UNLOAD:
5504 #ifndef KLD_MODULE
5505 kprintf("ipfw statically compiled, cannot unload\n");
5506 err = EBUSY;
5507 #else
5508 err = ipfw_fini();
5509 #endif
5510 break;
5511 default:
5512 break;
5514 return err;
5517 static moduledata_t ipfwmod = {
5518 "ipfw",
5519 ipfw_modevent,
5522 DECLARE_MODULE(ipfw, ipfwmod, SI_SUB_PROTO_END, SI_ORDER_ANY);
5523 MODULE_VERSION(ipfw, 1);