ipfw: Remove context generation; the race it tries to fix no longer exists
[dragonfly.git] / sys / net / ipfw / ip_fw2.c
blob96cdfb010dd6889731ed936240bc63589dd38f4d
1 /*
2 * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
25 * $FreeBSD: src/sys/netinet/ip_fw2.c,v 1.6.2.12 2003/04/08 10:42:32 maxim Exp $
29 * Implement IP packet firewall (new version)
32 #include "opt_ipfw.h"
33 #include "opt_inet.h"
34 #ifndef INET
35 #error IPFIREWALL requires INET.
36 #endif /* INET */
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/kernel.h>
43 #include <sys/proc.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <sys/sysctl.h>
47 #include <sys/syslog.h>
48 #include <sys/ucred.h>
49 #include <sys/in_cksum.h>
50 #include <sys/lock.h>
52 #include <net/if.h>
53 #include <net/route.h>
54 #include <net/pfil.h>
55 #include <net/dummynet/ip_dummynet.h>
57 #include <sys/thread2.h>
58 #include <sys/mplock2.h>
59 #include <net/netmsg2.h>
61 #include <netinet/in.h>
62 #include <netinet/in_systm.h>
63 #include <netinet/in_var.h>
64 #include <netinet/in_pcb.h>
65 #include <netinet/ip.h>
66 #include <netinet/ip_var.h>
67 #include <netinet/ip_icmp.h>
68 #include <netinet/tcp.h>
69 #include <netinet/tcp_timer.h>
70 #include <netinet/tcp_var.h>
71 #include <netinet/tcpip.h>
72 #include <netinet/udp.h>
73 #include <netinet/udp_var.h>
74 #include <netinet/ip_divert.h>
75 #include <netinet/if_ether.h> /* XXX for ETHERTYPE_IP */
77 #include <net/ipfw/ip_fw2.h>
79 #ifdef IPFIREWALL_DEBUG
80 #define DPRINTF(fmt, ...) \
81 do { \
82 if (fw_debug > 0) \
83 kprintf(fmt, __VA_ARGS__); \
84 } while (0)
85 #else
86 #define DPRINTF(fmt, ...) ((void)0)
87 #endif
90 * Description about per-CPU rule duplication:
92 * Module loading/unloading and all ioctl operations are serialized
93 * by netisr0, so we don't have any ordering or locking problems.
95 * Following graph shows how operation on per-CPU rule list is
96 * performed [2 CPU case]:
98 * CPU0 CPU1
100 * netisr0 <------------------------------------+
101 * domsg |
102 * : |
103 * :(delete/add...) |
104 * : |
105 * : netmsg | netmsg
106 * forwardmsg---------->netisr1 |
107 * : |
108 * :(delete/add...) |
109 * : |
110 * : |
111 * replymsg--------------+
115 * Rules which will not create states (dyn rules) [2 CPU case]
117 * CPU0 CPU1
119 * layer3_chain layer3_chain
120 * | |
121 * V V
122 * +-------+ sibling +-------+ sibling
123 * | rule1 |--------->| rule1 |--------->NULL
124 * +-------+ +-------+
125 * | |
126 * |next |next
127 * V V
128 * +-------+ sibling +-------+ sibling
129 * | rule2 |--------->| rule2 |--------->NULL
130 * +-------+ +-------+
132 * ip_fw.sibling:
133 * 1) Ease statistics calculation during IP_FW_GET. We only need to
134 * iterate layer3_chain in netisr0; the current rule's duplication
135 * to the other CPUs could safely be read-only accessed through
136 * ip_fw.sibling.
137 * 2) Accelerate rule insertion and deletion, e.g. rule insertion:
138 * a) In netisr0 rule3 is determined to be inserted between rule1
139 * and rule2. To make this decision we need to iterate the
140 * layer3_chain in netisr0. The netmsg, which is used to insert
141 * the rule, will contain rule1 in netisr0 as prev_rule and rule2
142 * in netisr0 as next_rule.
143 * b) After the insertion in netisr0 is done, we will move on to
144 * netisr1. But instead of relocating the rule3's position in
145 * netisr1 by iterating the layer3_chain in netisr1, we set the
146 * netmsg's prev_rule to rule1->sibling and next_rule to
147 * rule2->sibling before the netmsg is forwarded to netisr1 from
148 * netisr0.
152 * Rules which will create states (dyn rules) [2 CPU case]
153 * (unnecessary parts are omitted; they are same as in the previous figure)
155 * CPU0 CPU1
157 * +-------+ +-------+
158 * | rule1 | | rule1 |
159 * +-------+ +-------+
160 * ^ | | ^
161 * | |stub stub| |
162 * | | | |
163 * | +----+ +----+ |
164 * | | | |
165 * | V V |
166 * | +--------------------+ |
167 * | | rule_stub | |
168 * | | (read-only shared) | |
169 * | | | |
170 * | | back pointer array | |
171 * | | (indexed by cpuid) | |
172 * | | | |
173 * +----|---------[0] | |
174 * | [1]--------|----+
175 * | |
176 * +--------------------+
177 * ^ ^
178 * | |
179 * ........|............|............
180 * : | | :
181 * : |stub |stub :
182 * : | | :
183 * : +---------+ +---------+ :
184 * : | state1a | | state1b | .... :
185 * : +---------+ +---------+ :
186 * : :
187 * : states table :
188 * : (shared) :
189 * : (protected by dyn_lock) :
190 * ..................................
192 * [state1a and state1b are states created by rule1]
194 * ip_fw_stub:
195 * This structure is introduced so that shared (locked) state table could
196 * work with per-CPU (duplicated) static rules. It mainly bridges states
197 * and static rules and serves as static rule's place holder (a read-only
198 * shared part of duplicated rules) from states point of view.
200 * IPFW_RULE_F_STATE (only for rules which create states):
201 * o During rule installation, this flag is turned on after rule's
202 * duplications reach all CPUs, to avoid at least following race:
203 * 1) rule1 is duplicated on CPU0 and is not duplicated on CPU1 yet
204 * 2) rule1 creates state1
205 * 3) state1 is located on CPU1 by check-state
206 * But rule1 is not duplicated on CPU1 yet
207 * o During rule deletion, this flag is turned off before deleting states
208 * created by the rule and before deleting the rule itself, so no
209 * more states will be created by the to-be-deleted rule even when its
210 * duplication on certain CPUs are not eliminated yet.
213 #define IPFW_AUTOINC_STEP_MIN 1
214 #define IPFW_AUTOINC_STEP_MAX 1000
215 #define IPFW_AUTOINC_STEP_DEF 100
217 #define IPFW_DEFAULT_RULE 65535 /* rulenum for the default rule */
218 #define IPFW_DEFAULT_SET 31 /* set number for the default rule */
220 struct netmsg_ipfw {
221 struct netmsg_base base;
222 const struct ipfw_ioc_rule *ioc_rule;
223 struct ip_fw *next_rule;
224 struct ip_fw *prev_rule;
225 struct ip_fw *sibling;
226 struct ip_fw_stub *stub;
229 struct netmsg_del {
230 struct netmsg_base base;
231 struct ip_fw *start_rule;
232 struct ip_fw *prev_rule;
233 uint16_t rulenum;
234 uint8_t from_set;
235 uint8_t to_set;
238 struct netmsg_zent {
239 struct netmsg_base base;
240 struct ip_fw *start_rule;
241 uint16_t rulenum;
242 uint16_t log_only;
245 struct ipfw_context {
246 struct ip_fw *ipfw_layer3_chain; /* list of rules for layer3 */
247 struct ip_fw *ipfw_default_rule; /* default rule */
248 uint64_t ipfw_norule_counter; /* counter for ipfw_log(NULL) */
251 * ipfw_set_disable contains one bit per set value (0..31).
252 * If the bit is set, all rules with the corresponding set
253 * are disabled. Set IPDW_DEFAULT_SET is reserved for the
254 * default rule and CANNOT be disabled.
256 uint32_t ipfw_set_disable;
259 static struct ipfw_context *ipfw_ctx[MAXCPU];
261 #ifdef KLD_MODULE
263 * Module can not be unloaded, if there are references to
264 * certains rules of ipfw(4), e.g. dummynet(4)
266 static int ipfw_refcnt;
267 #endif
269 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's");
272 * Following two global variables are accessed and updated only
273 * in netisr0.
275 static uint32_t static_count; /* # of static rules */
276 static uint32_t static_ioc_len; /* bytes of static rules */
279 * If 1, then ipfw static rules are being flushed,
280 * ipfw_chk() will skip to the default rule.
282 static int ipfw_flushing;
284 static int fw_verbose;
285 static int verbose_limit;
287 static int fw_debug;
288 static int autoinc_step = IPFW_AUTOINC_STEP_DEF;
290 static int ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS);
291 static int ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS);
292 static int ipfw_sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS);
293 static int ipfw_sysctl_dyn_fin(SYSCTL_HANDLER_ARGS);
294 static int ipfw_sysctl_dyn_rst(SYSCTL_HANDLER_ARGS);
296 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW, 0, "Firewall");
297 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
298 &fw_enable, 0, ipfw_sysctl_enable, "I", "Enable ipfw");
299 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, autoinc_step, CTLTYPE_INT | CTLFLAG_RW,
300 &autoinc_step, 0, ipfw_sysctl_autoinc_step, "I",
301 "Rule number autincrement step");
302 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO,one_pass,CTLFLAG_RW,
303 &fw_one_pass, 0,
304 "Only do a single pass through ipfw when using dummynet(4)");
305 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, debug, CTLFLAG_RW,
306 &fw_debug, 0, "Enable printing of debug ip_fw statements");
307 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose, CTLFLAG_RW,
308 &fw_verbose, 0, "Log matches to ipfw rules");
309 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit, CTLFLAG_RW,
310 &verbose_limit, 0, "Set upper limit of matches of ipfw rules logged");
313 * Description of dynamic rules.
315 * Dynamic rules are stored in lists accessed through a hash table
316 * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can
317 * be modified through the sysctl variable dyn_buckets which is
318 * updated when the table becomes empty.
320 * XXX currently there is only one list, ipfw_dyn.
322 * When a packet is received, its address fields are first masked
323 * with the mask defined for the rule, then hashed, then matched
324 * against the entries in the corresponding list.
325 * Dynamic rules can be used for different purposes:
326 * + stateful rules;
327 * + enforcing limits on the number of sessions;
328 * + in-kernel NAT (not implemented yet)
330 * The lifetime of dynamic rules is regulated by dyn_*_lifetime,
331 * measured in seconds and depending on the flags.
333 * The total number of dynamic rules is stored in dyn_count.
334 * The max number of dynamic rules is dyn_max. When we reach
335 * the maximum number of rules we do not create anymore. This is
336 * done to avoid consuming too much memory, but also too much
337 * time when searching on each packet (ideally, we should try instead
338 * to put a limit on the length of the list on each bucket...).
340 * Each dynamic rule holds a pointer to the parent ipfw rule so
341 * we know what action to perform. Dynamic rules are removed when
342 * the parent rule is deleted. XXX we should make them survive.
344 * There are some limitations with dynamic rules -- we do not
345 * obey the 'randomized match', and we do not do multiple
346 * passes through the firewall. XXX check the latter!!!
348 * NOTE about the SHARED LOCKMGR LOCK during dynamic rule looking up:
349 * Only TCP state transition will change dynamic rule's state and ack
350 * sequences, while all packets of one TCP connection only goes through
351 * one TCP thread, so it is safe to use shared lockmgr lock during dynamic
352 * rule looking up. The keep alive callout uses exclusive lockmgr lock
353 * when it tries to find suitable dynamic rules to send keep alive, so
354 * it will not see half updated state and ack sequences. Though the expire
355 * field updating looks racy for other protocols, the resolution (second)
356 * of expire field makes this kind of race harmless.
357 * XXX statistics' updating is _not_ MPsafe!!!
358 * XXX once UDP output path is fixed, we could use lockless dynamic rule
359 * hash table
361 static ipfw_dyn_rule **ipfw_dyn_v = NULL;
362 static uint32_t dyn_buckets = 256; /* must be power of 2 */
363 static uint32_t curr_dyn_buckets = 256; /* must be power of 2 */
364 static uint32_t dyn_buckets_gen; /* generation of dyn buckets array */
365 static struct lock dyn_lock; /* dynamic rules' hash table lock */
367 static struct netmsg_base ipfw_timeout_netmsg; /* schedule ipfw timeout */
368 static struct callout ipfw_timeout_h;
371 * Timeouts for various events in handing dynamic rules.
373 static uint32_t dyn_ack_lifetime = 300;
374 static uint32_t dyn_syn_lifetime = 20;
375 static uint32_t dyn_fin_lifetime = 1;
376 static uint32_t dyn_rst_lifetime = 1;
377 static uint32_t dyn_udp_lifetime = 10;
378 static uint32_t dyn_short_lifetime = 5;
381 * Keepalives are sent if dyn_keepalive is set. They are sent every
382 * dyn_keepalive_period seconds, in the last dyn_keepalive_interval
383 * seconds of lifetime of a rule.
384 * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower
385 * than dyn_keepalive_period.
388 static uint32_t dyn_keepalive_interval = 20;
389 static uint32_t dyn_keepalive_period = 5;
390 static uint32_t dyn_keepalive = 1; /* do send keepalives */
392 static uint32_t dyn_count; /* # of dynamic rules */
393 static uint32_t dyn_max = 4096; /* max # of dynamic rules */
395 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_buckets, CTLTYPE_INT | CTLFLAG_RW,
396 &dyn_buckets, 0, ipfw_sysctl_dyn_buckets, "I", "Number of dyn. buckets");
397 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets, CTLFLAG_RD,
398 &curr_dyn_buckets, 0, "Current Number of dyn. buckets");
399 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_count, CTLFLAG_RD,
400 &dyn_count, 0, "Number of dyn. rules");
401 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_max, CTLFLAG_RW,
402 &dyn_max, 0, "Max number of dyn. rules");
403 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, static_count, CTLFLAG_RD,
404 &static_count, 0, "Number of static rules");
405 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime, CTLFLAG_RW,
406 &dyn_ack_lifetime, 0, "Lifetime of dyn. rules for acks");
407 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime, CTLFLAG_RW,
408 &dyn_syn_lifetime, 0, "Lifetime of dyn. rules for syn");
409 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime,
410 CTLTYPE_INT | CTLFLAG_RW, &dyn_fin_lifetime, 0, ipfw_sysctl_dyn_fin, "I",
411 "Lifetime of dyn. rules for fin");
412 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime,
413 CTLTYPE_INT | CTLFLAG_RW, &dyn_rst_lifetime, 0, ipfw_sysctl_dyn_rst, "I",
414 "Lifetime of dyn. rules for rst");
415 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime, CTLFLAG_RW,
416 &dyn_udp_lifetime, 0, "Lifetime of dyn. rules for UDP");
417 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime, CTLFLAG_RW,
418 &dyn_short_lifetime, 0, "Lifetime of dyn. rules for other situations");
419 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive, CTLFLAG_RW,
420 &dyn_keepalive, 0, "Enable keepalives for dyn. rules");
422 static ip_fw_chk_t ipfw_chk;
423 static void ipfw_tick(void *);
425 static __inline int
426 ipfw_free_rule(struct ip_fw *rule)
428 KASSERT(rule->cpuid == mycpuid, ("rule freed on cpu%d", mycpuid));
429 KASSERT(rule->refcnt > 0, ("invalid refcnt %u", rule->refcnt));
430 rule->refcnt--;
431 if (rule->refcnt == 0) {
432 kfree(rule, M_IPFW);
433 return 1;
435 return 0;
438 static void
439 ipfw_unref_rule(void *priv)
441 ipfw_free_rule(priv);
442 #ifdef KLD_MODULE
443 atomic_subtract_int(&ipfw_refcnt, 1);
444 #endif
447 static __inline void
448 ipfw_ref_rule(struct ip_fw *rule)
450 KASSERT(rule->cpuid == mycpuid, ("rule used on cpu%d", mycpuid));
451 #ifdef KLD_MODULE
452 atomic_add_int(&ipfw_refcnt, 1);
453 #endif
454 rule->refcnt++;
458 * This macro maps an ip pointer into a layer3 header pointer of type T
460 #define L3HDR(T, ip) ((T *)((uint32_t *)(ip) + (ip)->ip_hl))
462 static __inline int
463 icmptype_match(struct ip *ip, ipfw_insn_u32 *cmd)
465 int type = L3HDR(struct icmp,ip)->icmp_type;
467 return (type <= ICMP_MAXTYPE && (cmd->d[0] & (1 << type)));
470 #define TT ((1 << ICMP_ECHO) | \
471 (1 << ICMP_ROUTERSOLICIT) | \
472 (1 << ICMP_TSTAMP) | \
473 (1 << ICMP_IREQ) | \
474 (1 << ICMP_MASKREQ))
476 static int
477 is_icmp_query(struct ip *ip)
479 int type = L3HDR(struct icmp, ip)->icmp_type;
481 return (type <= ICMP_MAXTYPE && (TT & (1 << type)));
484 #undef TT
487 * The following checks use two arrays of 8 or 16 bits to store the
488 * bits that we want set or clear, respectively. They are in the
489 * low and high half of cmd->arg1 or cmd->d[0].
491 * We scan options and store the bits we find set. We succeed if
493 * (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear
495 * The code is sometimes optimized not to store additional variables.
497 static int
498 flags_match(ipfw_insn *cmd, uint8_t bits)
500 u_char want_clear;
501 bits = ~bits;
503 if (((cmd->arg1 & 0xff) & bits) != 0)
504 return 0; /* some bits we want set were clear */
506 want_clear = (cmd->arg1 >> 8) & 0xff;
507 if ((want_clear & bits) != want_clear)
508 return 0; /* some bits we want clear were set */
509 return 1;
512 static int
513 ipopts_match(struct ip *ip, ipfw_insn *cmd)
515 int optlen, bits = 0;
516 u_char *cp = (u_char *)(ip + 1);
517 int x = (ip->ip_hl << 2) - sizeof(struct ip);
519 for (; x > 0; x -= optlen, cp += optlen) {
520 int opt = cp[IPOPT_OPTVAL];
522 if (opt == IPOPT_EOL)
523 break;
525 if (opt == IPOPT_NOP) {
526 optlen = 1;
527 } else {
528 optlen = cp[IPOPT_OLEN];
529 if (optlen <= 0 || optlen > x)
530 return 0; /* invalid or truncated */
533 switch (opt) {
534 case IPOPT_LSRR:
535 bits |= IP_FW_IPOPT_LSRR;
536 break;
538 case IPOPT_SSRR:
539 bits |= IP_FW_IPOPT_SSRR;
540 break;
542 case IPOPT_RR:
543 bits |= IP_FW_IPOPT_RR;
544 break;
546 case IPOPT_TS:
547 bits |= IP_FW_IPOPT_TS;
548 break;
550 default:
551 break;
554 return (flags_match(cmd, bits));
557 static int
558 tcpopts_match(struct ip *ip, ipfw_insn *cmd)
560 int optlen, bits = 0;
561 struct tcphdr *tcp = L3HDR(struct tcphdr,ip);
562 u_char *cp = (u_char *)(tcp + 1);
563 int x = (tcp->th_off << 2) - sizeof(struct tcphdr);
565 for (; x > 0; x -= optlen, cp += optlen) {
566 int opt = cp[0];
568 if (opt == TCPOPT_EOL)
569 break;
571 if (opt == TCPOPT_NOP) {
572 optlen = 1;
573 } else {
574 optlen = cp[1];
575 if (optlen <= 0)
576 break;
579 switch (opt) {
580 case TCPOPT_MAXSEG:
581 bits |= IP_FW_TCPOPT_MSS;
582 break;
584 case TCPOPT_WINDOW:
585 bits |= IP_FW_TCPOPT_WINDOW;
586 break;
588 case TCPOPT_SACK_PERMITTED:
589 case TCPOPT_SACK:
590 bits |= IP_FW_TCPOPT_SACK;
591 break;
593 case TCPOPT_TIMESTAMP:
594 bits |= IP_FW_TCPOPT_TS;
595 break;
597 case TCPOPT_CC:
598 case TCPOPT_CCNEW:
599 case TCPOPT_CCECHO:
600 bits |= IP_FW_TCPOPT_CC;
601 break;
603 default:
604 break;
607 return (flags_match(cmd, bits));
610 static int
611 iface_match(struct ifnet *ifp, ipfw_insn_if *cmd)
613 if (ifp == NULL) /* no iface with this packet, match fails */
614 return 0;
616 /* Check by name or by IP address */
617 if (cmd->name[0] != '\0') { /* match by name */
618 /* Check name */
619 if (cmd->p.glob) {
620 if (kfnmatch(cmd->name, ifp->if_xname, 0) == 0)
621 return(1);
622 } else {
623 if (strncmp(ifp->if_xname, cmd->name, IFNAMSIZ) == 0)
624 return(1);
626 } else {
627 struct ifaddr_container *ifac;
629 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
630 struct ifaddr *ia = ifac->ifa;
632 if (ia->ifa_addr == NULL)
633 continue;
634 if (ia->ifa_addr->sa_family != AF_INET)
635 continue;
636 if (cmd->p.ip.s_addr == ((struct sockaddr_in *)
637 (ia->ifa_addr))->sin_addr.s_addr)
638 return(1); /* match */
641 return(0); /* no match, fail ... */
644 #define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0
647 * We enter here when we have a rule with O_LOG.
648 * XXX this function alone takes about 2Kbytes of code!
650 static void
651 ipfw_log(struct ip_fw *f, u_int hlen, struct ether_header *eh,
652 struct mbuf *m, struct ifnet *oif)
654 char *action;
655 int limit_reached = 0;
656 char action2[40], proto[48], fragment[28], abuf[INET_ADDRSTRLEN];
658 fragment[0] = '\0';
659 proto[0] = '\0';
661 if (f == NULL) { /* bogus pkt */
662 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
664 if (verbose_limit != 0 &&
665 ctx->ipfw_norule_counter >= verbose_limit)
666 return;
667 ctx->ipfw_norule_counter++;
668 if (ctx->ipfw_norule_counter == verbose_limit)
669 limit_reached = verbose_limit;
670 action = "Refuse";
671 } else { /* O_LOG is the first action, find the real one */
672 ipfw_insn *cmd = ACTION_PTR(f);
673 ipfw_insn_log *l = (ipfw_insn_log *)cmd;
675 if (l->max_log != 0 && l->log_left == 0)
676 return;
677 l->log_left--;
678 if (l->log_left == 0)
679 limit_reached = l->max_log;
680 cmd += F_LEN(cmd); /* point to first action */
681 if (cmd->opcode == O_PROB)
682 cmd += F_LEN(cmd);
684 action = action2;
685 switch (cmd->opcode) {
686 case O_DENY:
687 action = "Deny";
688 break;
690 case O_REJECT:
691 if (cmd->arg1==ICMP_REJECT_RST) {
692 action = "Reset";
693 } else if (cmd->arg1==ICMP_UNREACH_HOST) {
694 action = "Reject";
695 } else {
696 ksnprintf(SNPARGS(action2, 0), "Unreach %d",
697 cmd->arg1);
699 break;
701 case O_ACCEPT:
702 action = "Accept";
703 break;
705 case O_COUNT:
706 action = "Count";
707 break;
709 case O_DIVERT:
710 ksnprintf(SNPARGS(action2, 0), "Divert %d", cmd->arg1);
711 break;
713 case O_TEE:
714 ksnprintf(SNPARGS(action2, 0), "Tee %d", cmd->arg1);
715 break;
717 case O_SKIPTO:
718 ksnprintf(SNPARGS(action2, 0), "SkipTo %d", cmd->arg1);
719 break;
721 case O_PIPE:
722 ksnprintf(SNPARGS(action2, 0), "Pipe %d", cmd->arg1);
723 break;
725 case O_QUEUE:
726 ksnprintf(SNPARGS(action2, 0), "Queue %d", cmd->arg1);
727 break;
729 case O_FORWARD_IP:
731 ipfw_insn_sa *sa = (ipfw_insn_sa *)cmd;
732 int len;
734 len = ksnprintf(SNPARGS(action2, 0),
735 "Forward to %s",
736 kinet_ntoa(sa->sa.sin_addr, abuf));
737 if (sa->sa.sin_port) {
738 ksnprintf(SNPARGS(action2, len), ":%d",
739 sa->sa.sin_port);
742 break;
744 default:
745 action = "UNKNOWN";
746 break;
750 if (hlen == 0) { /* non-ip */
751 ksnprintf(SNPARGS(proto, 0), "MAC");
752 } else {
753 struct ip *ip = mtod(m, struct ip *);
754 /* these three are all aliases to the same thing */
755 struct icmp *const icmp = L3HDR(struct icmp, ip);
756 struct tcphdr *const tcp = (struct tcphdr *)icmp;
757 struct udphdr *const udp = (struct udphdr *)icmp;
759 int ip_off, offset, ip_len;
760 int len;
762 if (eh != NULL) { /* layer 2 packets are as on the wire */
763 ip_off = ntohs(ip->ip_off);
764 ip_len = ntohs(ip->ip_len);
765 } else {
766 ip_off = ip->ip_off;
767 ip_len = ip->ip_len;
769 offset = ip_off & IP_OFFMASK;
770 switch (ip->ip_p) {
771 case IPPROTO_TCP:
772 len = ksnprintf(SNPARGS(proto, 0), "TCP %s",
773 kinet_ntoa(ip->ip_src, abuf));
774 if (offset == 0) {
775 ksnprintf(SNPARGS(proto, len), ":%d %s:%d",
776 ntohs(tcp->th_sport),
777 kinet_ntoa(ip->ip_dst, abuf),
778 ntohs(tcp->th_dport));
779 } else {
780 ksnprintf(SNPARGS(proto, len), " %s",
781 kinet_ntoa(ip->ip_dst, abuf));
783 break;
785 case IPPROTO_UDP:
786 len = ksnprintf(SNPARGS(proto, 0), "UDP %s",
787 kinet_ntoa(ip->ip_src, abuf));
788 if (offset == 0) {
789 ksnprintf(SNPARGS(proto, len), ":%d %s:%d",
790 ntohs(udp->uh_sport),
791 kinet_ntoa(ip->ip_dst, abuf),
792 ntohs(udp->uh_dport));
793 } else {
794 ksnprintf(SNPARGS(proto, len), " %s",
795 kinet_ntoa(ip->ip_dst, abuf));
797 break;
799 case IPPROTO_ICMP:
800 if (offset == 0) {
801 len = ksnprintf(SNPARGS(proto, 0),
802 "ICMP:%u.%u ",
803 icmp->icmp_type,
804 icmp->icmp_code);
805 } else {
806 len = ksnprintf(SNPARGS(proto, 0), "ICMP ");
808 len += ksnprintf(SNPARGS(proto, len), "%s",
809 kinet_ntoa(ip->ip_src, abuf));
810 ksnprintf(SNPARGS(proto, len), " %s",
811 kinet_ntoa(ip->ip_dst, abuf));
812 break;
814 default:
815 len = ksnprintf(SNPARGS(proto, 0), "P:%d %s", ip->ip_p,
816 kinet_ntoa(ip->ip_src, abuf));
817 ksnprintf(SNPARGS(proto, len), " %s",
818 kinet_ntoa(ip->ip_dst, abuf));
819 break;
822 if (ip_off & (IP_MF | IP_OFFMASK)) {
823 ksnprintf(SNPARGS(fragment, 0), " (frag %d:%d@%d%s)",
824 ntohs(ip->ip_id), ip_len - (ip->ip_hl << 2),
825 offset << 3, (ip_off & IP_MF) ? "+" : "");
829 if (oif || m->m_pkthdr.rcvif) {
830 log(LOG_SECURITY | LOG_INFO,
831 "ipfw: %d %s %s %s via %s%s\n",
832 f ? f->rulenum : -1,
833 action, proto, oif ? "out" : "in",
834 oif ? oif->if_xname : m->m_pkthdr.rcvif->if_xname,
835 fragment);
836 } else {
837 log(LOG_SECURITY | LOG_INFO,
838 "ipfw: %d %s %s [no if info]%s\n",
839 f ? f->rulenum : -1,
840 action, proto, fragment);
843 if (limit_reached) {
844 log(LOG_SECURITY | LOG_NOTICE,
845 "ipfw: limit %d reached on entry %d\n",
846 limit_reached, f ? f->rulenum : -1);
850 #undef SNPARGS
853 * IMPORTANT: the hash function for dynamic rules must be commutative
854 * in source and destination (ip,port), because rules are bidirectional
855 * and we want to find both in the same bucket.
857 static __inline int
858 hash_packet(struct ipfw_flow_id *id)
860 uint32_t i;
862 i = (id->dst_ip) ^ (id->src_ip) ^ (id->dst_port) ^ (id->src_port);
863 i &= (curr_dyn_buckets - 1);
864 return i;
868 * Unlink a dynamic rule from a chain. prev is a pointer to
869 * the previous one, q is a pointer to the rule to delete,
870 * head is a pointer to the head of the queue.
871 * Modifies q and potentially also head.
873 #define UNLINK_DYN_RULE(prev, head, q) \
874 do { \
875 ipfw_dyn_rule *old_q = q; \
877 /* remove a refcount to the parent */ \
878 if (q->dyn_type == O_LIMIT) \
879 q->parent->count--; \
880 DPRINTF("-- unlink entry 0x%08x %d -> 0x%08x %d, %d left\n", \
881 q->id.src_ip, q->id.src_port, \
882 q->id.dst_ip, q->id.dst_port, dyn_count - 1); \
883 if (prev != NULL) \
884 prev->next = q = q->next; \
885 else \
886 head = q = q->next; \
887 KASSERT(dyn_count > 0, ("invalid dyn count %u", dyn_count)); \
888 dyn_count--; \
889 kfree(old_q, M_IPFW); \
890 } while (0)
892 #define TIME_LEQ(a, b) ((int)((a) - (b)) <= 0)
895 * Remove dynamic rules pointing to "rule", or all of them if rule == NULL.
897 * If keep_me == NULL, rules are deleted even if not expired,
898 * otherwise only expired rules are removed.
900 * The value of the second parameter is also used to point to identify
901 * a rule we absolutely do not want to remove (e.g. because we are
902 * holding a reference to it -- this is the case with O_LIMIT_PARENT
903 * rules). The pointer is only used for comparison, so any non-null
904 * value will do.
906 static void
907 remove_dyn_rule_locked(struct ip_fw *rule, ipfw_dyn_rule *keep_me)
909 static time_t last_remove = 0; /* XXX */
911 #define FORCE (keep_me == NULL)
913 ipfw_dyn_rule *prev, *q;
914 int i, pass = 0, max_pass = 0, unlinked = 0;
916 if (ipfw_dyn_v == NULL || dyn_count == 0)
917 return;
918 /* do not expire more than once per second, it is useless */
919 if (!FORCE && last_remove == time_uptime)
920 return;
921 last_remove = time_uptime;
924 * because O_LIMIT refer to parent rules, during the first pass only
925 * remove child and mark any pending LIMIT_PARENT, and remove
926 * them in a second pass.
928 next_pass:
929 for (i = 0; i < curr_dyn_buckets; i++) {
930 for (prev = NULL, q = ipfw_dyn_v[i]; q;) {
932 * Logic can become complex here, so we split tests.
934 if (q == keep_me)
935 goto next;
936 if (rule != NULL && rule->stub != q->stub)
937 goto next; /* not the one we are looking for */
938 if (q->dyn_type == O_LIMIT_PARENT) {
940 * handle parent in the second pass,
941 * record we need one.
943 max_pass = 1;
944 if (pass == 0)
945 goto next;
946 if (FORCE && q->count != 0) {
947 /* XXX should not happen! */
948 kprintf("OUCH! cannot remove rule, "
949 "count %d\n", q->count);
951 } else {
952 if (!FORCE && !TIME_LEQ(q->expire, time_second))
953 goto next;
955 unlinked = 1;
956 UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q);
957 continue;
958 next:
959 prev = q;
960 q = q->next;
963 if (pass++ < max_pass)
964 goto next_pass;
966 if (unlinked)
967 ++dyn_buckets_gen;
969 #undef FORCE
973 * Lookup a dynamic rule.
975 static ipfw_dyn_rule *
976 lookup_dyn_rule(struct ipfw_flow_id *pkt, int *match_direction,
977 struct tcphdr *tcp)
980 * stateful ipfw extensions.
981 * Lookup into dynamic session queue
983 #define MATCH_REVERSE 0
984 #define MATCH_FORWARD 1
985 #define MATCH_NONE 2
986 #define MATCH_UNKNOWN 3
987 int i, dir = MATCH_NONE;
988 ipfw_dyn_rule *q=NULL;
990 if (ipfw_dyn_v == NULL)
991 goto done; /* not found */
993 i = hash_packet(pkt);
994 for (q = ipfw_dyn_v[i]; q != NULL;) {
995 if (q->dyn_type == O_LIMIT_PARENT)
996 goto next;
998 if (TIME_LEQ(q->expire, time_second)) {
1000 * Entry expired; skip.
1001 * Let ipfw_tick() take care of it
1003 goto next;
1006 if (pkt->proto == q->id.proto) {
1007 if (pkt->src_ip == q->id.src_ip &&
1008 pkt->dst_ip == q->id.dst_ip &&
1009 pkt->src_port == q->id.src_port &&
1010 pkt->dst_port == q->id.dst_port) {
1011 dir = MATCH_FORWARD;
1012 break;
1014 if (pkt->src_ip == q->id.dst_ip &&
1015 pkt->dst_ip == q->id.src_ip &&
1016 pkt->src_port == q->id.dst_port &&
1017 pkt->dst_port == q->id.src_port) {
1018 dir = MATCH_REVERSE;
1019 break;
1022 next:
1023 q = q->next;
1025 if (q == NULL)
1026 goto done; /* q = NULL, not found */
1028 if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */
1029 u_char flags = pkt->flags & (TH_FIN|TH_SYN|TH_RST);
1031 #define BOTH_SYN (TH_SYN | (TH_SYN << 8))
1032 #define BOTH_FIN (TH_FIN | (TH_FIN << 8))
1034 q->state |= (dir == MATCH_FORWARD ) ? flags : (flags << 8);
1035 switch (q->state) {
1036 case TH_SYN: /* opening */
1037 q->expire = time_second + dyn_syn_lifetime;
1038 break;
1040 case BOTH_SYN: /* move to established */
1041 case BOTH_SYN | TH_FIN : /* one side tries to close */
1042 case BOTH_SYN | (TH_FIN << 8) :
1043 if (tcp) {
1044 uint32_t ack = ntohl(tcp->th_ack);
1046 #define _SEQ_GE(a, b) ((int)(a) - (int)(b) >= 0)
1048 if (dir == MATCH_FORWARD) {
1049 if (q->ack_fwd == 0 ||
1050 _SEQ_GE(ack, q->ack_fwd))
1051 q->ack_fwd = ack;
1052 else /* ignore out-of-sequence */
1053 break;
1054 } else {
1055 if (q->ack_rev == 0 ||
1056 _SEQ_GE(ack, q->ack_rev))
1057 q->ack_rev = ack;
1058 else /* ignore out-of-sequence */
1059 break;
1061 #undef _SEQ_GE
1063 q->expire = time_second + dyn_ack_lifetime;
1064 break;
1066 case BOTH_SYN | BOTH_FIN: /* both sides closed */
1067 KKASSERT(dyn_fin_lifetime < dyn_keepalive_period);
1068 q->expire = time_second + dyn_fin_lifetime;
1069 break;
1071 default:
1072 #if 0
1074 * reset or some invalid combination, but can also
1075 * occur if we use keep-state the wrong way.
1077 if ((q->state & ((TH_RST << 8) | TH_RST)) == 0)
1078 kprintf("invalid state: 0x%x\n", q->state);
1079 #endif
1080 KKASSERT(dyn_rst_lifetime < dyn_keepalive_period);
1081 q->expire = time_second + dyn_rst_lifetime;
1082 break;
1084 } else if (pkt->proto == IPPROTO_UDP) {
1085 q->expire = time_second + dyn_udp_lifetime;
1086 } else {
1087 /* other protocols */
1088 q->expire = time_second + dyn_short_lifetime;
1090 done:
1091 if (match_direction)
1092 *match_direction = dir;
1093 return q;
1096 static struct ip_fw *
1097 lookup_rule(struct ipfw_flow_id *pkt, int *match_direction, struct tcphdr *tcp,
1098 uint16_t len)
1100 struct ip_fw *rule = NULL;
1101 ipfw_dyn_rule *q;
1103 lockmgr(&dyn_lock, LK_SHARED);
1104 q = lookup_dyn_rule(pkt, match_direction, tcp);
1105 if (q == NULL) {
1106 rule = NULL;
1107 } else {
1108 rule = q->stub->rule[mycpuid];
1109 KKASSERT(rule->stub == q->stub && rule->cpuid == mycpuid);
1111 /* XXX */
1112 q->pcnt++;
1113 q->bcnt += len;
1115 lockmgr(&dyn_lock, LK_RELEASE);
1116 return rule;
1119 static void
1120 realloc_dynamic_table(void)
1122 ipfw_dyn_rule **old_dyn_v;
1123 uint32_t old_curr_dyn_buckets;
1125 KASSERT(dyn_buckets <= 65536 && (dyn_buckets & (dyn_buckets - 1)) == 0,
1126 ("invalid dyn_buckets %d", dyn_buckets));
1128 /* Save the current buckets array for later error recovery */
1129 old_dyn_v = ipfw_dyn_v;
1130 old_curr_dyn_buckets = curr_dyn_buckets;
1132 curr_dyn_buckets = dyn_buckets;
1133 for (;;) {
1134 ipfw_dyn_v = kmalloc(curr_dyn_buckets * sizeof(ipfw_dyn_rule *),
1135 M_IPFW, M_NOWAIT | M_ZERO);
1136 if (ipfw_dyn_v != NULL || curr_dyn_buckets <= 2)
1137 break;
1139 curr_dyn_buckets /= 2;
1140 if (curr_dyn_buckets <= old_curr_dyn_buckets &&
1141 old_dyn_v != NULL) {
1143 * Don't try allocating smaller buckets array, reuse
1144 * the old one, which alreay contains enough buckets
1146 break;
1150 if (ipfw_dyn_v != NULL) {
1151 if (old_dyn_v != NULL)
1152 kfree(old_dyn_v, M_IPFW);
1153 } else {
1154 /* Allocation failed, restore old buckets array */
1155 ipfw_dyn_v = old_dyn_v;
1156 curr_dyn_buckets = old_curr_dyn_buckets;
1159 if (ipfw_dyn_v != NULL)
1160 ++dyn_buckets_gen;
1164 * Install state of type 'type' for a dynamic session.
1165 * The hash table contains two type of rules:
1166 * - regular rules (O_KEEP_STATE)
1167 * - rules for sessions with limited number of sess per user
1168 * (O_LIMIT). When they are created, the parent is
1169 * increased by 1, and decreased on delete. In this case,
1170 * the third parameter is the parent rule and not the chain.
1171 * - "parent" rules for the above (O_LIMIT_PARENT).
1173 static ipfw_dyn_rule *
1174 add_dyn_rule(struct ipfw_flow_id *id, uint8_t dyn_type, struct ip_fw *rule)
1176 ipfw_dyn_rule *r;
1177 int i;
1179 if (ipfw_dyn_v == NULL ||
1180 (dyn_count == 0 && dyn_buckets != curr_dyn_buckets)) {
1181 realloc_dynamic_table();
1182 if (ipfw_dyn_v == NULL)
1183 return NULL; /* failed ! */
1185 i = hash_packet(id);
1187 r = kmalloc(sizeof(*r), M_IPFW, M_NOWAIT | M_ZERO);
1188 if (r == NULL)
1189 return NULL;
1191 /* increase refcount on parent, and set pointer */
1192 if (dyn_type == O_LIMIT) {
1193 ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule;
1195 if (parent->dyn_type != O_LIMIT_PARENT)
1196 panic("invalid parent");
1197 parent->count++;
1198 r->parent = parent;
1199 rule = parent->stub->rule[mycpuid];
1200 KKASSERT(rule->stub == parent->stub);
1202 KKASSERT(rule->cpuid == mycpuid && rule->stub != NULL);
1204 r->id = *id;
1205 r->expire = time_second + dyn_syn_lifetime;
1206 r->stub = rule->stub;
1207 r->dyn_type = dyn_type;
1208 r->pcnt = r->bcnt = 0;
1209 r->count = 0;
1211 r->bucket = i;
1212 r->next = ipfw_dyn_v[i];
1213 ipfw_dyn_v[i] = r;
1214 dyn_count++;
1215 dyn_buckets_gen++;
1216 DPRINTF("-- add dyn entry ty %d 0x%08x %d -> 0x%08x %d, total %d\n",
1217 dyn_type,
1218 r->id.src_ip, r->id.src_port,
1219 r->id.dst_ip, r->id.dst_port, dyn_count);
1220 return r;
1224 * Lookup dynamic parent rule using pkt and rule as search keys.
1225 * If the lookup fails, then install one.
1227 static ipfw_dyn_rule *
1228 lookup_dyn_parent(struct ipfw_flow_id *pkt, struct ip_fw *rule)
1230 ipfw_dyn_rule *q;
1231 int i;
1233 if (ipfw_dyn_v) {
1234 i = hash_packet(pkt);
1235 for (q = ipfw_dyn_v[i]; q != NULL; q = q->next) {
1236 if (q->dyn_type == O_LIMIT_PARENT &&
1237 rule->stub == q->stub &&
1238 pkt->proto == q->id.proto &&
1239 pkt->src_ip == q->id.src_ip &&
1240 pkt->dst_ip == q->id.dst_ip &&
1241 pkt->src_port == q->id.src_port &&
1242 pkt->dst_port == q->id.dst_port) {
1243 q->expire = time_second + dyn_short_lifetime;
1244 DPRINTF("lookup_dyn_parent found 0x%p\n", q);
1245 return q;
1249 return add_dyn_rule(pkt, O_LIMIT_PARENT, rule);
1253 * Install dynamic state for rule type cmd->o.opcode
1255 * Returns 1 (failure) if state is not installed because of errors or because
1256 * session limitations are enforced.
1258 static int
1259 install_state_locked(struct ip_fw *rule, ipfw_insn_limit *cmd,
1260 struct ip_fw_args *args)
1262 static int last_log; /* XXX */
1264 ipfw_dyn_rule *q;
1266 DPRINTF("-- install state type %d 0x%08x %u -> 0x%08x %u\n",
1267 cmd->o.opcode,
1268 args->f_id.src_ip, args->f_id.src_port,
1269 args->f_id.dst_ip, args->f_id.dst_port);
1271 q = lookup_dyn_rule(&args->f_id, NULL, NULL);
1272 if (q != NULL) { /* should never occur */
1273 if (last_log != time_second) {
1274 last_log = time_second;
1275 kprintf(" install_state: entry already present, done\n");
1277 return 0;
1280 if (dyn_count >= dyn_max) {
1282 * Run out of slots, try to remove any expired rule.
1284 remove_dyn_rule_locked(NULL, (ipfw_dyn_rule *)1);
1285 if (dyn_count >= dyn_max) {
1286 if (last_log != time_second) {
1287 last_log = time_second;
1288 kprintf("install_state: "
1289 "Too many dynamic rules\n");
1291 return 1; /* cannot install, notify caller */
1295 switch (cmd->o.opcode) {
1296 case O_KEEP_STATE: /* bidir rule */
1297 if (add_dyn_rule(&args->f_id, O_KEEP_STATE, rule) == NULL)
1298 return 1;
1299 break;
1301 case O_LIMIT: /* limit number of sessions */
1303 uint16_t limit_mask = cmd->limit_mask;
1304 struct ipfw_flow_id id;
1305 ipfw_dyn_rule *parent;
1307 DPRINTF("installing dyn-limit rule %d\n",
1308 cmd->conn_limit);
1310 id.dst_ip = id.src_ip = 0;
1311 id.dst_port = id.src_port = 0;
1312 id.proto = args->f_id.proto;
1314 if (limit_mask & DYN_SRC_ADDR)
1315 id.src_ip = args->f_id.src_ip;
1316 if (limit_mask & DYN_DST_ADDR)
1317 id.dst_ip = args->f_id.dst_ip;
1318 if (limit_mask & DYN_SRC_PORT)
1319 id.src_port = args->f_id.src_port;
1320 if (limit_mask & DYN_DST_PORT)
1321 id.dst_port = args->f_id.dst_port;
1323 parent = lookup_dyn_parent(&id, rule);
1324 if (parent == NULL) {
1325 kprintf("add parent failed\n");
1326 return 1;
1329 if (parent->count >= cmd->conn_limit) {
1331 * See if we can remove some expired rule.
1333 remove_dyn_rule_locked(rule, parent);
1334 if (parent->count >= cmd->conn_limit) {
1335 if (fw_verbose &&
1336 last_log != time_second) {
1337 last_log = time_second;
1338 log(LOG_SECURITY | LOG_DEBUG,
1339 "drop session, "
1340 "too many entries\n");
1342 return 1;
1345 if (add_dyn_rule(&args->f_id, O_LIMIT,
1346 (struct ip_fw *)parent) == NULL)
1347 return 1;
1349 break;
1350 default:
1351 kprintf("unknown dynamic rule type %u\n", cmd->o.opcode);
1352 return 1;
1354 lookup_dyn_rule(&args->f_id, NULL, NULL); /* XXX just set lifetime */
1355 return 0;
1358 static int
1359 install_state(struct ip_fw *rule, ipfw_insn_limit *cmd, struct ip_fw_args *args)
1361 int ret;
1363 lockmgr(&dyn_lock, LK_EXCLUSIVE);
1364 ret = install_state_locked(rule, cmd, args);
1365 lockmgr(&dyn_lock, LK_RELEASE);
1367 return ret;
1371 * Transmit a TCP packet, containing either a RST or a keepalive.
1372 * When flags & TH_RST, we are sending a RST packet, because of a
1373 * "reset" action matched the packet.
1374 * Otherwise we are sending a keepalive, and flags & TH_
1376 static void
1377 send_pkt(struct ipfw_flow_id *id, uint32_t seq, uint32_t ack, int flags)
1379 struct mbuf *m;
1380 struct ip *ip;
1381 struct tcphdr *tcp;
1382 struct route sro; /* fake route */
1384 MGETHDR(m, M_NOWAIT, MT_HEADER);
1385 if (m == NULL)
1386 return;
1387 m->m_pkthdr.rcvif = NULL;
1388 m->m_pkthdr.len = m->m_len = sizeof(struct ip) + sizeof(struct tcphdr);
1389 m->m_data += max_linkhdr;
1391 ip = mtod(m, struct ip *);
1392 bzero(ip, m->m_len);
1393 tcp = (struct tcphdr *)(ip + 1); /* no IP options */
1394 ip->ip_p = IPPROTO_TCP;
1395 tcp->th_off = 5;
1398 * Assume we are sending a RST (or a keepalive in the reverse
1399 * direction), swap src and destination addresses and ports.
1401 ip->ip_src.s_addr = htonl(id->dst_ip);
1402 ip->ip_dst.s_addr = htonl(id->src_ip);
1403 tcp->th_sport = htons(id->dst_port);
1404 tcp->th_dport = htons(id->src_port);
1405 if (flags & TH_RST) { /* we are sending a RST */
1406 if (flags & TH_ACK) {
1407 tcp->th_seq = htonl(ack);
1408 tcp->th_ack = htonl(0);
1409 tcp->th_flags = TH_RST;
1410 } else {
1411 if (flags & TH_SYN)
1412 seq++;
1413 tcp->th_seq = htonl(0);
1414 tcp->th_ack = htonl(seq);
1415 tcp->th_flags = TH_RST | TH_ACK;
1417 } else {
1419 * We are sending a keepalive. flags & TH_SYN determines
1420 * the direction, forward if set, reverse if clear.
1421 * NOTE: seq and ack are always assumed to be correct
1422 * as set by the caller. This may be confusing...
1424 if (flags & TH_SYN) {
1426 * we have to rewrite the correct addresses!
1428 ip->ip_dst.s_addr = htonl(id->dst_ip);
1429 ip->ip_src.s_addr = htonl(id->src_ip);
1430 tcp->th_dport = htons(id->dst_port);
1431 tcp->th_sport = htons(id->src_port);
1433 tcp->th_seq = htonl(seq);
1434 tcp->th_ack = htonl(ack);
1435 tcp->th_flags = TH_ACK;
1439 * set ip_len to the payload size so we can compute
1440 * the tcp checksum on the pseudoheader
1441 * XXX check this, could save a couple of words ?
1443 ip->ip_len = htons(sizeof(struct tcphdr));
1444 tcp->th_sum = in_cksum(m, m->m_pkthdr.len);
1447 * now fill fields left out earlier
1449 ip->ip_ttl = ip_defttl;
1450 ip->ip_len = m->m_pkthdr.len;
1452 bzero(&sro, sizeof(sro));
1453 ip_rtaddr(ip->ip_dst, &sro);
1455 m->m_pkthdr.fw_flags |= IPFW_MBUF_GENERATED;
1456 ip_output(m, NULL, &sro, 0, NULL, NULL);
1457 if (sro.ro_rt)
1458 RTFREE(sro.ro_rt);
1462 * Send a reject message, consuming the mbuf passed as an argument.
1464 static void
1465 send_reject(struct ip_fw_args *args, int code, int offset, int ip_len)
1467 if (code != ICMP_REJECT_RST) { /* Send an ICMP unreach */
1468 /* We need the IP header in host order for icmp_error(). */
1469 if (args->eh != NULL) {
1470 struct ip *ip = mtod(args->m, struct ip *);
1472 ip->ip_len = ntohs(ip->ip_len);
1473 ip->ip_off = ntohs(ip->ip_off);
1475 icmp_error(args->m, ICMP_UNREACH, code, 0L, 0);
1476 } else if (offset == 0 && args->f_id.proto == IPPROTO_TCP) {
1477 struct tcphdr *const tcp =
1478 L3HDR(struct tcphdr, mtod(args->m, struct ip *));
1480 if ((tcp->th_flags & TH_RST) == 0) {
1481 send_pkt(&args->f_id, ntohl(tcp->th_seq),
1482 ntohl(tcp->th_ack), tcp->th_flags | TH_RST);
1484 m_freem(args->m);
1485 } else {
1486 m_freem(args->m);
1488 args->m = NULL;
1492 * Given an ip_fw *, lookup_next_rule will return a pointer
1493 * to the next rule, which can be either the jump
1494 * target (for skipto instructions) or the next one in the list (in
1495 * all other cases including a missing jump target).
1496 * The result is also written in the "next_rule" field of the rule.
1497 * Backward jumps are not allowed, so start looking from the next
1498 * rule...
1500 * This never returns NULL -- in case we do not have an exact match,
1501 * the next rule is returned. When the ruleset is changed,
1502 * pointers are flushed so we are always correct.
1504 static struct ip_fw *
1505 lookup_next_rule(struct ip_fw *me)
1507 struct ip_fw *rule = NULL;
1508 ipfw_insn *cmd;
1510 /* look for action, in case it is a skipto */
1511 cmd = ACTION_PTR(me);
1512 if (cmd->opcode == O_LOG)
1513 cmd += F_LEN(cmd);
1514 if (cmd->opcode == O_SKIPTO) {
1515 for (rule = me->next; rule; rule = rule->next) {
1516 if (rule->rulenum >= cmd->arg1)
1517 break;
1520 if (rule == NULL) /* failure or not a skipto */
1521 rule = me->next;
1522 me->next_rule = rule;
1523 return rule;
1526 static int
1527 ipfw_match_uid(const struct ipfw_flow_id *fid, struct ifnet *oif,
1528 enum ipfw_opcodes opcode, uid_t uid)
1530 struct in_addr src_ip, dst_ip;
1531 struct inpcbinfo *pi;
1532 boolean_t wildcard;
1533 struct inpcb *pcb;
1535 if (fid->proto == IPPROTO_TCP) {
1536 wildcard = FALSE;
1537 pi = &tcbinfo[mycpuid];
1538 } else if (fid->proto == IPPROTO_UDP) {
1539 wildcard = TRUE;
1540 pi = &udbinfo[mycpuid];
1541 } else {
1542 return 0;
1546 * Values in 'fid' are in host byte order
1548 dst_ip.s_addr = htonl(fid->dst_ip);
1549 src_ip.s_addr = htonl(fid->src_ip);
1550 if (oif) {
1551 pcb = in_pcblookup_hash(pi,
1552 dst_ip, htons(fid->dst_port),
1553 src_ip, htons(fid->src_port),
1554 wildcard, oif);
1555 } else {
1556 pcb = in_pcblookup_hash(pi,
1557 src_ip, htons(fid->src_port),
1558 dst_ip, htons(fid->dst_port),
1559 wildcard, NULL);
1561 if (pcb == NULL || pcb->inp_socket == NULL)
1562 return 0;
1564 if (opcode == O_UID) {
1565 #define socheckuid(a,b) ((a)->so_cred->cr_uid != (b))
1566 return !socheckuid(pcb->inp_socket, uid);
1567 #undef socheckuid
1568 } else {
1569 return groupmember(uid, pcb->inp_socket->so_cred);
1574 * The main check routine for the firewall.
1576 * All arguments are in args so we can modify them and return them
1577 * back to the caller.
1579 * Parameters:
1581 * args->m (in/out) The packet; we set to NULL when/if we nuke it.
1582 * Starts with the IP header.
1583 * args->eh (in) Mac header if present, or NULL for layer3 packet.
1584 * args->oif Outgoing interface, or NULL if packet is incoming.
1585 * The incoming interface is in the mbuf. (in)
1587 * args->rule Pointer to the last matching rule (in/out)
1588 * args->f_id Addresses grabbed from the packet (out)
1590 * Return value:
1592 * If the packet was denied/rejected and has been dropped, *m is equal
1593 * to NULL upon return.
1595 * IP_FW_DENY the packet must be dropped.
1596 * IP_FW_PASS The packet is to be accepted and routed normally.
1597 * IP_FW_DIVERT Divert the packet to port (args->cookie)
1598 * IP_FW_TEE Tee the packet to port (args->cookie)
1599 * IP_FW_DUMMYNET Send the packet to pipe/queue (args->cookie)
1601 static int
1602 ipfw_chk(struct ip_fw_args *args)
1605 * Local variables hold state during the processing of a packet.
1607 * IMPORTANT NOTE: to speed up the processing of rules, there
1608 * are some assumption on the values of the variables, which
1609 * are documented here. Should you change them, please check
1610 * the implementation of the various instructions to make sure
1611 * that they still work.
1613 * args->eh The MAC header. It is non-null for a layer2
1614 * packet, it is NULL for a layer-3 packet.
1616 * m | args->m Pointer to the mbuf, as received from the caller.
1617 * It may change if ipfw_chk() does an m_pullup, or if it
1618 * consumes the packet because it calls send_reject().
1619 * XXX This has to change, so that ipfw_chk() never modifies
1620 * or consumes the buffer.
1621 * ip is simply an alias of the value of m, and it is kept
1622 * in sync with it (the packet is supposed to start with
1623 * the ip header).
1625 struct mbuf *m = args->m;
1626 struct ip *ip = mtod(m, struct ip *);
1629 * oif | args->oif If NULL, ipfw_chk has been called on the
1630 * inbound path (ether_input, ip_input).
1631 * If non-NULL, ipfw_chk has been called on the outbound path
1632 * (ether_output, ip_output).
1634 struct ifnet *oif = args->oif;
1636 struct ip_fw *f = NULL; /* matching rule */
1637 int retval = IP_FW_PASS;
1638 struct m_tag *mtag;
1639 struct divert_info *divinfo;
1642 * hlen The length of the IPv4 header.
1643 * hlen >0 means we have an IPv4 packet.
1645 u_int hlen = 0; /* hlen >0 means we have an IP pkt */
1648 * offset The offset of a fragment. offset != 0 means that
1649 * we have a fragment at this offset of an IPv4 packet.
1650 * offset == 0 means that (if this is an IPv4 packet)
1651 * this is the first or only fragment.
1653 u_short offset = 0;
1656 * Local copies of addresses. They are only valid if we have
1657 * an IP packet.
1659 * proto The protocol. Set to 0 for non-ip packets,
1660 * or to the protocol read from the packet otherwise.
1661 * proto != 0 means that we have an IPv4 packet.
1663 * src_port, dst_port port numbers, in HOST format. Only
1664 * valid for TCP and UDP packets.
1666 * src_ip, dst_ip ip addresses, in NETWORK format.
1667 * Only valid for IPv4 packets.
1669 uint8_t proto;
1670 uint16_t src_port = 0, dst_port = 0; /* NOTE: host format */
1671 struct in_addr src_ip, dst_ip; /* NOTE: network format */
1672 uint16_t ip_len = 0;
1675 * dyn_dir = MATCH_UNKNOWN when rules unchecked,
1676 * MATCH_NONE when checked and not matched (dyn_f = NULL),
1677 * MATCH_FORWARD or MATCH_REVERSE otherwise (dyn_f != NULL)
1679 int dyn_dir = MATCH_UNKNOWN;
1680 struct ip_fw *dyn_f = NULL;
1681 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
1683 if (m->m_pkthdr.fw_flags & IPFW_MBUF_GENERATED)
1684 return IP_FW_PASS; /* accept */
1686 if (args->eh == NULL || /* layer 3 packet */
1687 (m->m_pkthdr.len >= sizeof(struct ip) &&
1688 ntohs(args->eh->ether_type) == ETHERTYPE_IP))
1689 hlen = ip->ip_hl << 2;
1692 * Collect parameters into local variables for faster matching.
1694 if (hlen == 0) { /* do not grab addresses for non-ip pkts */
1695 proto = args->f_id.proto = 0; /* mark f_id invalid */
1696 goto after_ip_checks;
1699 proto = args->f_id.proto = ip->ip_p;
1700 src_ip = ip->ip_src;
1701 dst_ip = ip->ip_dst;
1702 if (args->eh != NULL) { /* layer 2 packets are as on the wire */
1703 offset = ntohs(ip->ip_off) & IP_OFFMASK;
1704 ip_len = ntohs(ip->ip_len);
1705 } else {
1706 offset = ip->ip_off & IP_OFFMASK;
1707 ip_len = ip->ip_len;
1710 #define PULLUP_TO(len) \
1711 do { \
1712 if (m->m_len < (len)) { \
1713 args->m = m = m_pullup(m, (len));\
1714 if (m == NULL) \
1715 goto pullup_failed; \
1716 ip = mtod(m, struct ip *); \
1718 } while (0)
1720 if (offset == 0) {
1721 switch (proto) {
1722 case IPPROTO_TCP:
1724 struct tcphdr *tcp;
1726 PULLUP_TO(hlen + sizeof(struct tcphdr));
1727 tcp = L3HDR(struct tcphdr, ip);
1728 dst_port = tcp->th_dport;
1729 src_port = tcp->th_sport;
1730 args->f_id.flags = tcp->th_flags;
1732 break;
1734 case IPPROTO_UDP:
1736 struct udphdr *udp;
1738 PULLUP_TO(hlen + sizeof(struct udphdr));
1739 udp = L3HDR(struct udphdr, ip);
1740 dst_port = udp->uh_dport;
1741 src_port = udp->uh_sport;
1743 break;
1745 case IPPROTO_ICMP:
1746 PULLUP_TO(hlen + 4); /* type, code and checksum. */
1747 args->f_id.flags = L3HDR(struct icmp, ip)->icmp_type;
1748 break;
1750 default:
1751 break;
1755 #undef PULLUP_TO
1757 args->f_id.src_ip = ntohl(src_ip.s_addr);
1758 args->f_id.dst_ip = ntohl(dst_ip.s_addr);
1759 args->f_id.src_port = src_port = ntohs(src_port);
1760 args->f_id.dst_port = dst_port = ntohs(dst_port);
1762 after_ip_checks:
1763 if (args->rule) {
1765 * Packet has already been tagged. Look for the next rule
1766 * to restart processing.
1768 * If fw_one_pass != 0 then just accept it.
1769 * XXX should not happen here, but optimized out in
1770 * the caller.
1772 if (fw_one_pass)
1773 return IP_FW_PASS;
1775 /* This rule is being/has been flushed */
1776 if (ipfw_flushing)
1777 return IP_FW_DENY;
1779 KASSERT(args->rule->cpuid == mycpuid,
1780 ("rule used on cpu%d", mycpuid));
1782 /* This rule was deleted */
1783 if (args->rule->rule_flags & IPFW_RULE_F_INVALID)
1784 return IP_FW_DENY;
1786 f = args->rule->next_rule;
1787 if (f == NULL)
1788 f = lookup_next_rule(args->rule);
1789 } else {
1791 * Find the starting rule. It can be either the first
1792 * one, or the one after divert_rule if asked so.
1794 int skipto;
1796 mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL);
1797 if (mtag != NULL) {
1798 divinfo = m_tag_data(mtag);
1799 skipto = divinfo->skipto;
1800 } else {
1801 skipto = 0;
1804 f = ctx->ipfw_layer3_chain;
1805 if (args->eh == NULL && skipto != 0) {
1806 /* No skipto during rule flushing */
1807 if (ipfw_flushing)
1808 return IP_FW_DENY;
1810 if (skipto >= IPFW_DEFAULT_RULE)
1811 return IP_FW_DENY; /* invalid */
1813 while (f && f->rulenum <= skipto)
1814 f = f->next;
1815 if (f == NULL) /* drop packet */
1816 return IP_FW_DENY;
1817 } else if (ipfw_flushing) {
1818 /* Rules are being flushed; skip to default rule */
1819 f = ctx->ipfw_default_rule;
1822 if ((mtag = m_tag_find(m, PACKET_TAG_IPFW_DIVERT, NULL)) != NULL)
1823 m_tag_delete(m, mtag);
1826 * Now scan the rules, and parse microinstructions for each rule.
1828 for (; f; f = f->next) {
1829 int l, cmdlen;
1830 ipfw_insn *cmd;
1831 int skip_or; /* skip rest of OR block */
1833 again:
1834 if (ctx->ipfw_set_disable & (1 << f->set))
1835 continue;
1837 skip_or = 0;
1838 for (l = f->cmd_len, cmd = f->cmd; l > 0;
1839 l -= cmdlen, cmd += cmdlen) {
1840 int match;
1843 * check_body is a jump target used when we find a
1844 * CHECK_STATE, and need to jump to the body of
1845 * the target rule.
1848 check_body:
1849 cmdlen = F_LEN(cmd);
1851 * An OR block (insn_1 || .. || insn_n) has the
1852 * F_OR bit set in all but the last instruction.
1853 * The first match will set "skip_or", and cause
1854 * the following instructions to be skipped until
1855 * past the one with the F_OR bit clear.
1857 if (skip_or) { /* skip this instruction */
1858 if ((cmd->len & F_OR) == 0)
1859 skip_or = 0; /* next one is good */
1860 continue;
1862 match = 0; /* set to 1 if we succeed */
1864 switch (cmd->opcode) {
1866 * The first set of opcodes compares the packet's
1867 * fields with some pattern, setting 'match' if a
1868 * match is found. At the end of the loop there is
1869 * logic to deal with F_NOT and F_OR flags associated
1870 * with the opcode.
1872 case O_NOP:
1873 match = 1;
1874 break;
1876 case O_FORWARD_MAC:
1877 kprintf("ipfw: opcode %d unimplemented\n",
1878 cmd->opcode);
1879 break;
1881 case O_GID:
1882 case O_UID:
1884 * We only check offset == 0 && proto != 0,
1885 * as this ensures that we have an IPv4
1886 * packet with the ports info.
1888 if (offset!=0)
1889 break;
1891 match = ipfw_match_uid(&args->f_id, oif,
1892 cmd->opcode,
1893 (uid_t)((ipfw_insn_u32 *)cmd)->d[0]);
1894 break;
1896 case O_RECV:
1897 match = iface_match(m->m_pkthdr.rcvif,
1898 (ipfw_insn_if *)cmd);
1899 break;
1901 case O_XMIT:
1902 match = iface_match(oif, (ipfw_insn_if *)cmd);
1903 break;
1905 case O_VIA:
1906 match = iface_match(oif ? oif :
1907 m->m_pkthdr.rcvif, (ipfw_insn_if *)cmd);
1908 break;
1910 case O_MACADDR2:
1911 if (args->eh != NULL) { /* have MAC header */
1912 uint32_t *want = (uint32_t *)
1913 ((ipfw_insn_mac *)cmd)->addr;
1914 uint32_t *mask = (uint32_t *)
1915 ((ipfw_insn_mac *)cmd)->mask;
1916 uint32_t *hdr = (uint32_t *)args->eh;
1918 match =
1919 (want[0] == (hdr[0] & mask[0]) &&
1920 want[1] == (hdr[1] & mask[1]) &&
1921 want[2] == (hdr[2] & mask[2]));
1923 break;
1925 case O_MAC_TYPE:
1926 if (args->eh != NULL) {
1927 uint16_t t =
1928 ntohs(args->eh->ether_type);
1929 uint16_t *p =
1930 ((ipfw_insn_u16 *)cmd)->ports;
1931 int i;
1933 /* Special vlan handling */
1934 if (m->m_flags & M_VLANTAG)
1935 t = ETHERTYPE_VLAN;
1937 for (i = cmdlen - 1; !match && i > 0;
1938 i--, p += 2) {
1939 match =
1940 (t >= p[0] && t <= p[1]);
1943 break;
1945 case O_FRAG:
1946 match = (hlen > 0 && offset != 0);
1947 break;
1949 case O_IN: /* "out" is "not in" */
1950 match = (oif == NULL);
1951 break;
1953 case O_LAYER2:
1954 match = (args->eh != NULL);
1955 break;
1957 case O_PROTO:
1959 * We do not allow an arg of 0 so the
1960 * check of "proto" only suffices.
1962 match = (proto == cmd->arg1);
1963 break;
1965 case O_IP_SRC:
1966 match = (hlen > 0 &&
1967 ((ipfw_insn_ip *)cmd)->addr.s_addr ==
1968 src_ip.s_addr);
1969 break;
1971 case O_IP_SRC_MASK:
1972 match = (hlen > 0 &&
1973 ((ipfw_insn_ip *)cmd)->addr.s_addr ==
1974 (src_ip.s_addr &
1975 ((ipfw_insn_ip *)cmd)->mask.s_addr));
1976 break;
1978 case O_IP_SRC_ME:
1979 if (hlen > 0) {
1980 struct ifnet *tif;
1982 tif = INADDR_TO_IFP(&src_ip);
1983 match = (tif != NULL);
1985 break;
1987 case O_IP_DST_SET:
1988 case O_IP_SRC_SET:
1989 if (hlen > 0) {
1990 uint32_t *d = (uint32_t *)(cmd + 1);
1991 uint32_t addr =
1992 cmd->opcode == O_IP_DST_SET ?
1993 args->f_id.dst_ip :
1994 args->f_id.src_ip;
1996 if (addr < d[0])
1997 break;
1998 addr -= d[0]; /* subtract base */
1999 match =
2000 (addr < cmd->arg1) &&
2001 (d[1 + (addr >> 5)] &
2002 (1 << (addr & 0x1f)));
2004 break;
2006 case O_IP_DST:
2007 match = (hlen > 0 &&
2008 ((ipfw_insn_ip *)cmd)->addr.s_addr ==
2009 dst_ip.s_addr);
2010 break;
2012 case O_IP_DST_MASK:
2013 match = (hlen > 0) &&
2014 (((ipfw_insn_ip *)cmd)->addr.s_addr ==
2015 (dst_ip.s_addr &
2016 ((ipfw_insn_ip *)cmd)->mask.s_addr));
2017 break;
2019 case O_IP_DST_ME:
2020 if (hlen > 0) {
2021 struct ifnet *tif;
2023 tif = INADDR_TO_IFP(&dst_ip);
2024 match = (tif != NULL);
2026 break;
2028 case O_IP_SRCPORT:
2029 case O_IP_DSTPORT:
2031 * offset == 0 && proto != 0 is enough
2032 * to guarantee that we have an IPv4
2033 * packet with port info.
2035 if ((proto==IPPROTO_UDP || proto==IPPROTO_TCP)
2036 && offset == 0) {
2037 uint16_t x =
2038 (cmd->opcode == O_IP_SRCPORT) ?
2039 src_port : dst_port ;
2040 uint16_t *p =
2041 ((ipfw_insn_u16 *)cmd)->ports;
2042 int i;
2044 for (i = cmdlen - 1; !match && i > 0;
2045 i--, p += 2) {
2046 match =
2047 (x >= p[0] && x <= p[1]);
2050 break;
2052 case O_ICMPTYPE:
2053 match = (offset == 0 && proto==IPPROTO_ICMP &&
2054 icmptype_match(ip, (ipfw_insn_u32 *)cmd));
2055 break;
2057 case O_IPOPT:
2058 match = (hlen > 0 && ipopts_match(ip, cmd));
2059 break;
2061 case O_IPVER:
2062 match = (hlen > 0 && cmd->arg1 == ip->ip_v);
2063 break;
2065 case O_IPTTL:
2066 match = (hlen > 0 && cmd->arg1 == ip->ip_ttl);
2067 break;
2069 case O_IPID:
2070 match = (hlen > 0 &&
2071 cmd->arg1 == ntohs(ip->ip_id));
2072 break;
2074 case O_IPLEN:
2075 match = (hlen > 0 && cmd->arg1 == ip_len);
2076 break;
2078 case O_IPPRECEDENCE:
2079 match = (hlen > 0 &&
2080 (cmd->arg1 == (ip->ip_tos & 0xe0)));
2081 break;
2083 case O_IPTOS:
2084 match = (hlen > 0 &&
2085 flags_match(cmd, ip->ip_tos));
2086 break;
2088 case O_TCPFLAGS:
2089 match = (proto == IPPROTO_TCP && offset == 0 &&
2090 flags_match(cmd,
2091 L3HDR(struct tcphdr,ip)->th_flags));
2092 break;
2094 case O_TCPOPTS:
2095 match = (proto == IPPROTO_TCP && offset == 0 &&
2096 tcpopts_match(ip, cmd));
2097 break;
2099 case O_TCPSEQ:
2100 match = (proto == IPPROTO_TCP && offset == 0 &&
2101 ((ipfw_insn_u32 *)cmd)->d[0] ==
2102 L3HDR(struct tcphdr,ip)->th_seq);
2103 break;
2105 case O_TCPACK:
2106 match = (proto == IPPROTO_TCP && offset == 0 &&
2107 ((ipfw_insn_u32 *)cmd)->d[0] ==
2108 L3HDR(struct tcphdr,ip)->th_ack);
2109 break;
2111 case O_TCPWIN:
2112 match = (proto == IPPROTO_TCP && offset == 0 &&
2113 cmd->arg1 ==
2114 L3HDR(struct tcphdr,ip)->th_win);
2115 break;
2117 case O_ESTAB:
2118 /* reject packets which have SYN only */
2119 /* XXX should i also check for TH_ACK ? */
2120 match = (proto == IPPROTO_TCP && offset == 0 &&
2121 (L3HDR(struct tcphdr,ip)->th_flags &
2122 (TH_RST | TH_ACK | TH_SYN)) != TH_SYN);
2123 break;
2125 case O_LOG:
2126 if (fw_verbose)
2127 ipfw_log(f, hlen, args->eh, m, oif);
2128 match = 1;
2129 break;
2131 case O_PROB:
2132 match = (krandom() <
2133 ((ipfw_insn_u32 *)cmd)->d[0]);
2134 break;
2137 * The second set of opcodes represents 'actions',
2138 * i.e. the terminal part of a rule once the packet
2139 * matches all previous patterns.
2140 * Typically there is only one action for each rule,
2141 * and the opcode is stored at the end of the rule
2142 * (but there are exceptions -- see below).
2144 * In general, here we set retval and terminate the
2145 * outer loop (would be a 'break 3' in some language,
2146 * but we need to do a 'goto done').
2148 * Exceptions:
2149 * O_COUNT and O_SKIPTO actions:
2150 * instead of terminating, we jump to the next rule
2151 * ('goto next_rule', equivalent to a 'break 2'),
2152 * or to the SKIPTO target ('goto again' after
2153 * having set f, cmd and l), respectively.
2155 * O_LIMIT and O_KEEP_STATE: these opcodes are
2156 * not real 'actions', and are stored right
2157 * before the 'action' part of the rule.
2158 * These opcodes try to install an entry in the
2159 * state tables; if successful, we continue with
2160 * the next opcode (match=1; break;), otherwise
2161 * the packet must be dropped ('goto done' after
2162 * setting retval). If static rules are changed
2163 * during the state installation, the packet will
2164 * be dropped and rule's stats will not beupdated
2165 * ('return IP_FW_DENY').
2167 * O_PROBE_STATE and O_CHECK_STATE: these opcodes
2168 * cause a lookup of the state table, and a jump
2169 * to the 'action' part of the parent rule
2170 * ('goto check_body') if an entry is found, or
2171 * (CHECK_STATE only) a jump to the next rule if
2172 * the entry is not found ('goto next_rule').
2173 * The result of the lookup is cached to make
2174 * further instances of these opcodes are
2175 * effectively NOPs. If static rules are changed
2176 * during the state looking up, the packet will
2177 * be dropped and rule's stats will not be updated
2178 * ('return IP_FW_DENY').
2180 case O_LIMIT:
2181 case O_KEEP_STATE:
2182 if (!(f->rule_flags & IPFW_RULE_F_STATE)) {
2183 kprintf("%s rule (%d) is not ready "
2184 "on cpu%d\n",
2185 cmd->opcode == O_LIMIT ?
2186 "limit" : "keep state",
2187 f->rulenum, f->cpuid);
2188 goto next_rule;
2190 if (install_state(f,
2191 (ipfw_insn_limit *)cmd, args)) {
2192 retval = IP_FW_DENY;
2193 goto done; /* error/limit violation */
2195 match = 1;
2196 break;
2198 case O_PROBE_STATE:
2199 case O_CHECK_STATE:
2201 * dynamic rules are checked at the first
2202 * keep-state or check-state occurrence,
2203 * with the result being stored in dyn_dir.
2204 * The compiler introduces a PROBE_STATE
2205 * instruction for us when we have a
2206 * KEEP_STATE (because PROBE_STATE needs
2207 * to be run first).
2209 if (dyn_dir == MATCH_UNKNOWN) {
2210 dyn_f = lookup_rule(&args->f_id,
2211 &dyn_dir,
2212 proto == IPPROTO_TCP ?
2213 L3HDR(struct tcphdr, ip) : NULL,
2214 ip_len);
2215 if (dyn_f != NULL) {
2217 * Found a rule from a dynamic
2218 * entry; jump to the 'action'
2219 * part of the rule.
2221 f = dyn_f;
2222 cmd = ACTION_PTR(f);
2223 l = f->cmd_len - f->act_ofs;
2224 goto check_body;
2228 * Dynamic entry not found. If CHECK_STATE,
2229 * skip to next rule, if PROBE_STATE just
2230 * ignore and continue with next opcode.
2232 if (cmd->opcode == O_CHECK_STATE)
2233 goto next_rule;
2234 else if (!(f->rule_flags & IPFW_RULE_F_STATE))
2235 goto next_rule; /* not ready yet */
2236 match = 1;
2237 break;
2239 case O_ACCEPT:
2240 retval = IP_FW_PASS; /* accept */
2241 goto done;
2243 case O_PIPE:
2244 case O_QUEUE:
2245 args->rule = f; /* report matching rule */
2246 args->cookie = cmd->arg1;
2247 retval = IP_FW_DUMMYNET;
2248 goto done;
2250 case O_DIVERT:
2251 case O_TEE:
2252 if (args->eh) /* not on layer 2 */
2253 break;
2255 mtag = m_tag_get(PACKET_TAG_IPFW_DIVERT,
2256 sizeof(*divinfo), M_NOWAIT);
2257 if (mtag == NULL) {
2258 retval = IP_FW_DENY;
2259 goto done;
2261 divinfo = m_tag_data(mtag);
2263 divinfo->skipto = f->rulenum;
2264 divinfo->port = cmd->arg1;
2265 divinfo->tee = (cmd->opcode == O_TEE);
2266 m_tag_prepend(m, mtag);
2268 args->cookie = cmd->arg1;
2269 retval = (cmd->opcode == O_DIVERT) ?
2270 IP_FW_DIVERT : IP_FW_TEE;
2271 goto done;
2273 case O_COUNT:
2274 case O_SKIPTO:
2275 f->pcnt++; /* update stats */
2276 f->bcnt += ip_len;
2277 f->timestamp = time_second;
2278 if (cmd->opcode == O_COUNT)
2279 goto next_rule;
2280 /* handle skipto */
2281 if (f->next_rule == NULL)
2282 lookup_next_rule(f);
2283 f = f->next_rule;
2284 goto again;
2286 case O_REJECT:
2288 * Drop the packet and send a reject notice
2289 * if the packet is not ICMP (or is an ICMP
2290 * query), and it is not multicast/broadcast.
2292 if (hlen > 0 &&
2293 (proto != IPPROTO_ICMP ||
2294 is_icmp_query(ip)) &&
2295 !(m->m_flags & (M_BCAST|M_MCAST)) &&
2296 !IN_MULTICAST(ntohl(dst_ip.s_addr))) {
2298 * Update statistics before the possible
2299 * blocking 'send_reject'
2301 f->pcnt++;
2302 f->bcnt += ip_len;
2303 f->timestamp = time_second;
2305 send_reject(args, cmd->arg1,
2306 offset,ip_len);
2307 m = args->m;
2310 * Return directly here, rule stats
2311 * have been updated above.
2313 return IP_FW_DENY;
2315 /* FALLTHROUGH */
2316 case O_DENY:
2317 retval = IP_FW_DENY;
2318 goto done;
2320 case O_FORWARD_IP:
2321 if (args->eh) /* not valid on layer2 pkts */
2322 break;
2323 if (!dyn_f || dyn_dir == MATCH_FORWARD) {
2324 struct sockaddr_in *sin;
2326 mtag = m_tag_get(PACKET_TAG_IPFORWARD,
2327 sizeof(*sin), M_NOWAIT);
2328 if (mtag == NULL) {
2329 retval = IP_FW_DENY;
2330 goto done;
2332 sin = m_tag_data(mtag);
2334 /* Structure copy */
2335 *sin = ((ipfw_insn_sa *)cmd)->sa;
2337 m_tag_prepend(m, mtag);
2338 m->m_pkthdr.fw_flags |=
2339 IPFORWARD_MBUF_TAGGED;
2340 m->m_pkthdr.fw_flags &=
2341 ~BRIDGE_MBUF_TAGGED;
2343 retval = IP_FW_PASS;
2344 goto done;
2346 default:
2347 panic("-- unknown opcode %d", cmd->opcode);
2348 } /* end of switch() on opcodes */
2350 if (cmd->len & F_NOT)
2351 match = !match;
2353 if (match) {
2354 if (cmd->len & F_OR)
2355 skip_or = 1;
2356 } else {
2357 if (!(cmd->len & F_OR)) /* not an OR block, */
2358 break; /* try next rule */
2361 } /* end of inner for, scan opcodes */
2363 next_rule:; /* try next rule */
2365 } /* end of outer for, scan rules */
2366 kprintf("+++ ipfw: ouch!, skip past end of rules, denying packet\n");
2367 return IP_FW_DENY;
2369 done:
2370 /* Update statistics */
2371 f->pcnt++;
2372 f->bcnt += ip_len;
2373 f->timestamp = time_second;
2374 return retval;
2376 pullup_failed:
2377 if (fw_verbose)
2378 kprintf("pullup failed\n");
2379 return IP_FW_DENY;
2382 static void
2383 ipfw_dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
2385 struct m_tag *mtag;
2386 struct dn_pkt *pkt;
2387 ipfw_insn *cmd;
2388 const struct ipfw_flow_id *id;
2389 struct dn_flow_id *fid;
2391 M_ASSERTPKTHDR(m);
2393 mtag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*pkt), M_NOWAIT);
2394 if (mtag == NULL) {
2395 m_freem(m);
2396 return;
2398 m_tag_prepend(m, mtag);
2400 pkt = m_tag_data(mtag);
2401 bzero(pkt, sizeof(*pkt));
2403 cmd = fwa->rule->cmd + fwa->rule->act_ofs;
2404 if (cmd->opcode == O_LOG)
2405 cmd += F_LEN(cmd);
2406 KASSERT(cmd->opcode == O_PIPE || cmd->opcode == O_QUEUE,
2407 ("Rule is not PIPE or QUEUE, opcode %d", cmd->opcode));
2409 pkt->dn_m = m;
2410 pkt->dn_flags = (dir & DN_FLAGS_DIR_MASK);
2411 pkt->ifp = fwa->oif;
2412 pkt->pipe_nr = pipe_nr;
2414 pkt->cpuid = mycpuid;
2415 pkt->msgport = netisr_curport();
2417 id = &fwa->f_id;
2418 fid = &pkt->id;
2419 fid->fid_dst_ip = id->dst_ip;
2420 fid->fid_src_ip = id->src_ip;
2421 fid->fid_dst_port = id->dst_port;
2422 fid->fid_src_port = id->src_port;
2423 fid->fid_proto = id->proto;
2424 fid->fid_flags = id->flags;
2426 ipfw_ref_rule(fwa->rule);
2427 pkt->dn_priv = fwa->rule;
2428 pkt->dn_unref_priv = ipfw_unref_rule;
2430 if (cmd->opcode == O_PIPE)
2431 pkt->dn_flags |= DN_FLAGS_IS_PIPE;
2433 m->m_pkthdr.fw_flags |= DUMMYNET_MBUF_TAGGED;
2437 * When a rule is added/deleted, clear the next_rule pointers in all rules.
2438 * These will be reconstructed on the fly as packets are matched.
2440 static void
2441 ipfw_flush_rule_ptrs(struct ipfw_context *ctx)
2443 struct ip_fw *rule;
2445 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next)
2446 rule->next_rule = NULL;
2449 static __inline void
2450 ipfw_inc_static_count(struct ip_fw *rule)
2452 /* Static rule's counts are updated only on CPU0 */
2453 KKASSERT(mycpuid == 0);
2455 static_count++;
2456 static_ioc_len += IOC_RULESIZE(rule);
2459 static __inline void
2460 ipfw_dec_static_count(struct ip_fw *rule)
2462 int l = IOC_RULESIZE(rule);
2464 /* Static rule's counts are updated only on CPU0 */
2465 KKASSERT(mycpuid == 0);
2467 KASSERT(static_count > 0, ("invalid static count %u", static_count));
2468 static_count--;
2470 KASSERT(static_ioc_len >= l,
2471 ("invalid static len %u", static_ioc_len));
2472 static_ioc_len -= l;
2475 static void
2476 ipfw_link_sibling(struct netmsg_ipfw *fwmsg, struct ip_fw *rule)
2478 if (fwmsg->sibling != NULL) {
2479 KKASSERT(mycpuid > 0 && fwmsg->sibling->cpuid == mycpuid - 1);
2480 fwmsg->sibling->sibling = rule;
2482 fwmsg->sibling = rule;
2485 static struct ip_fw *
2486 ipfw_create_rule(const struct ipfw_ioc_rule *ioc_rule, struct ip_fw_stub *stub)
2488 struct ip_fw *rule;
2490 rule = kmalloc(RULESIZE(ioc_rule), M_IPFW, M_WAITOK | M_ZERO);
2492 rule->act_ofs = ioc_rule->act_ofs;
2493 rule->cmd_len = ioc_rule->cmd_len;
2494 rule->rulenum = ioc_rule->rulenum;
2495 rule->set = ioc_rule->set;
2496 rule->usr_flags = ioc_rule->usr_flags;
2498 bcopy(ioc_rule->cmd, rule->cmd, rule->cmd_len * 4 /* XXX */);
2500 rule->refcnt = 1;
2501 rule->cpuid = mycpuid;
2503 rule->stub = stub;
2504 if (stub != NULL)
2505 stub->rule[mycpuid] = rule;
2507 return rule;
2510 static void
2511 ipfw_add_rule_dispatch(netmsg_t nmsg)
2513 struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
2514 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2515 struct ip_fw *rule;
2517 rule = ipfw_create_rule(fwmsg->ioc_rule, fwmsg->stub);
2520 * Insert rule into the pre-determined position
2522 if (fwmsg->prev_rule != NULL) {
2523 struct ip_fw *prev, *next;
2525 prev = fwmsg->prev_rule;
2526 KKASSERT(prev->cpuid == mycpuid);
2528 next = fwmsg->next_rule;
2529 KKASSERT(next->cpuid == mycpuid);
2531 rule->next = next;
2532 prev->next = rule;
2535 * Move to the position on the next CPU
2536 * before the msg is forwarded.
2538 fwmsg->prev_rule = prev->sibling;
2539 fwmsg->next_rule = next->sibling;
2540 } else {
2541 KKASSERT(fwmsg->next_rule == NULL);
2542 rule->next = ctx->ipfw_layer3_chain;
2543 ctx->ipfw_layer3_chain = rule;
2546 /* Link rule CPU sibling */
2547 ipfw_link_sibling(fwmsg, rule);
2549 ipfw_flush_rule_ptrs(ctx);
2551 if (mycpuid == 0) {
2552 /* Statistics only need to be updated once */
2553 ipfw_inc_static_count(rule);
2555 /* Return the rule on CPU0 */
2556 nmsg->lmsg.u.ms_resultp = rule;
2559 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
2562 static void
2563 ipfw_enable_state_dispatch(netmsg_t nmsg)
2565 struct lwkt_msg *lmsg = &nmsg->lmsg;
2566 struct ip_fw *rule = lmsg->u.ms_resultp;
2568 KKASSERT(rule->cpuid == mycpuid);
2569 KKASSERT(rule->stub != NULL && rule->stub->rule[mycpuid] == rule);
2570 KKASSERT(!(rule->rule_flags & IPFW_RULE_F_STATE));
2571 rule->rule_flags |= IPFW_RULE_F_STATE;
2572 lmsg->u.ms_resultp = rule->sibling;
2574 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
2578 * Add a new rule to the list. Copy the rule into a malloc'ed area,
2579 * then possibly create a rule number and add the rule to the list.
2580 * Update the rule_number in the input struct so the caller knows
2581 * it as well.
2583 static void
2584 ipfw_add_rule(struct ipfw_ioc_rule *ioc_rule, uint32_t rule_flags)
2586 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2587 struct netmsg_ipfw fwmsg;
2588 struct netmsg_base *nmsg;
2589 struct ip_fw *f, *prev, *rule;
2590 struct ip_fw_stub *stub;
2592 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
2595 * If rulenum is 0, find highest numbered rule before the
2596 * default rule, and add rule number incremental step.
2598 if (ioc_rule->rulenum == 0) {
2599 int step = autoinc_step;
2601 KKASSERT(step >= IPFW_AUTOINC_STEP_MIN &&
2602 step <= IPFW_AUTOINC_STEP_MAX);
2605 * Locate the highest numbered rule before default
2607 for (f = ctx->ipfw_layer3_chain; f; f = f->next) {
2608 if (f->rulenum == IPFW_DEFAULT_RULE)
2609 break;
2610 ioc_rule->rulenum = f->rulenum;
2612 if (ioc_rule->rulenum < IPFW_DEFAULT_RULE - step)
2613 ioc_rule->rulenum += step;
2615 KASSERT(ioc_rule->rulenum != IPFW_DEFAULT_RULE &&
2616 ioc_rule->rulenum != 0,
2617 ("invalid rule num %d", ioc_rule->rulenum));
2620 * Now find the right place for the new rule in the sorted list.
2622 for (prev = NULL, f = ctx->ipfw_layer3_chain; f;
2623 prev = f, f = f->next) {
2624 if (f->rulenum > ioc_rule->rulenum) {
2625 /* Found the location */
2626 break;
2629 KASSERT(f != NULL, ("no default rule?!"));
2631 if (rule_flags & IPFW_RULE_F_STATE) {
2632 int size;
2635 * If the new rule will create states, then allocate
2636 * a rule stub, which will be referenced by states
2637 * (dyn rules)
2639 size = sizeof(*stub) + ((ncpus - 1) * sizeof(struct ip_fw *));
2640 stub = kmalloc(size, M_IPFW, M_WAITOK | M_ZERO);
2641 } else {
2642 stub = NULL;
2646 * Duplicate the rule onto each CPU.
2647 * The rule duplicated on CPU0 will be returned.
2649 bzero(&fwmsg, sizeof(fwmsg));
2650 nmsg = &fwmsg.base;
2651 netmsg_init(nmsg, NULL, &curthread->td_msgport,
2652 0, ipfw_add_rule_dispatch);
2653 fwmsg.ioc_rule = ioc_rule;
2654 fwmsg.prev_rule = prev;
2655 fwmsg.next_rule = prev == NULL ? NULL : f;
2656 fwmsg.stub = stub;
2658 netisr_domsg(nmsg, 0);
2659 KKASSERT(fwmsg.prev_rule == NULL && fwmsg.next_rule == NULL);
2661 rule = nmsg->lmsg.u.ms_resultp;
2662 KKASSERT(rule != NULL && rule->cpuid == mycpuid);
2664 if (rule_flags & IPFW_RULE_F_STATE) {
2666 * Turn on state flag, _after_ everything on all
2667 * CPUs have been setup.
2669 bzero(nmsg, sizeof(*nmsg));
2670 netmsg_init(nmsg, NULL, &curthread->td_msgport,
2671 0, ipfw_enable_state_dispatch);
2672 nmsg->lmsg.u.ms_resultp = rule;
2674 netisr_domsg(nmsg, 0);
2675 KKASSERT(nmsg->lmsg.u.ms_resultp == NULL);
2678 DPRINTF("++ installed rule %d, static count now %d\n",
2679 rule->rulenum, static_count);
2683 * Free storage associated with a static rule (including derived
2684 * dynamic rules).
2685 * The caller is in charge of clearing rule pointers to avoid
2686 * dangling pointers.
2687 * @return a pointer to the next entry.
2688 * Arguments are not checked, so they better be correct.
2690 static struct ip_fw *
2691 ipfw_delete_rule(struct ipfw_context *ctx,
2692 struct ip_fw *prev, struct ip_fw *rule)
2694 struct ip_fw *n;
2695 struct ip_fw_stub *stub;
2697 /* STATE flag should have been cleared before we reach here */
2698 KKASSERT((rule->rule_flags & IPFW_RULE_F_STATE) == 0);
2700 stub = rule->stub;
2701 n = rule->next;
2702 if (prev == NULL)
2703 ctx->ipfw_layer3_chain = n;
2704 else
2705 prev->next = n;
2707 /* Mark the rule as invalid */
2708 rule->rule_flags |= IPFW_RULE_F_INVALID;
2709 rule->next_rule = NULL;
2710 rule->sibling = NULL;
2711 rule->stub = NULL;
2712 #ifdef foo
2713 /* Don't reset cpuid here; keep various assertion working */
2714 rule->cpuid = -1;
2715 #endif
2717 /* Statistics only need to be updated once */
2718 if (mycpuid == 0)
2719 ipfw_dec_static_count(rule);
2721 /* Free 'stub' on the last CPU */
2722 if (stub != NULL && mycpuid == ncpus - 1)
2723 kfree(stub, M_IPFW);
2725 /* Try to free this rule */
2726 ipfw_free_rule(rule);
2728 /* Return the next rule */
2729 return n;
2732 static void
2733 ipfw_flush_dispatch(netmsg_t nmsg)
2735 struct lwkt_msg *lmsg = &nmsg->lmsg;
2736 int kill_default = lmsg->u.ms_result;
2737 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2738 struct ip_fw *rule;
2740 ipfw_flush_rule_ptrs(ctx); /* more efficient to do outside the loop */
2742 while ((rule = ctx->ipfw_layer3_chain) != NULL &&
2743 (kill_default || rule->rulenum != IPFW_DEFAULT_RULE))
2744 ipfw_delete_rule(ctx, NULL, rule);
2746 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
2749 static void
2750 ipfw_disable_rule_state_dispatch(netmsg_t nmsg)
2752 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
2753 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2754 struct ip_fw *rule;
2756 rule = dmsg->start_rule;
2757 if (rule != NULL) {
2758 KKASSERT(rule->cpuid == mycpuid);
2761 * Move to the position on the next CPU
2762 * before the msg is forwarded.
2764 dmsg->start_rule = rule->sibling;
2765 } else {
2766 KKASSERT(dmsg->rulenum == 0);
2767 rule = ctx->ipfw_layer3_chain;
2770 while (rule != NULL) {
2771 if (dmsg->rulenum && rule->rulenum != dmsg->rulenum)
2772 break;
2773 rule->rule_flags &= ~IPFW_RULE_F_STATE;
2774 rule = rule->next;
2777 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
2781 * Deletes all rules from a chain (including the default rule
2782 * if the second argument is set).
2784 static void
2785 ipfw_flush(int kill_default)
2787 struct netmsg_del dmsg;
2788 struct netmsg_base nmsg;
2789 struct lwkt_msg *lmsg;
2790 struct ip_fw *rule;
2791 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2793 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
2796 * If 'kill_default' then caller has done the necessary
2797 * msgport syncing; unnecessary to do it again.
2799 if (!kill_default) {
2801 * Let ipfw_chk() know the rules are going to
2802 * be flushed, so it could jump directly to
2803 * the default rule.
2805 ipfw_flushing = 1;
2806 netmsg_service_sync();
2810 * Clear STATE flag on rules, so no more states (dyn rules)
2811 * will be created.
2813 bzero(&dmsg, sizeof(dmsg));
2814 netmsg_init(&dmsg.base, NULL, &curthread->td_msgport,
2815 0, ipfw_disable_rule_state_dispatch);
2816 netisr_domsg(&dmsg.base, 0);
2819 * This actually nukes all states (dyn rules)
2821 lockmgr(&dyn_lock, LK_EXCLUSIVE);
2822 for (rule = ctx->ipfw_layer3_chain; rule != NULL; rule = rule->next) {
2824 * Can't check IPFW_RULE_F_STATE here,
2825 * since it has been cleared previously.
2826 * Check 'stub' instead.
2828 if (rule->stub != NULL) {
2829 /* Force removal */
2830 remove_dyn_rule_locked(rule, NULL);
2833 lockmgr(&dyn_lock, LK_RELEASE);
2836 * Press the 'flush' button
2838 bzero(&nmsg, sizeof(nmsg));
2839 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
2840 0, ipfw_flush_dispatch);
2841 lmsg = &nmsg.lmsg;
2842 lmsg->u.ms_result = kill_default;
2843 netisr_domsg(&nmsg, 0);
2845 KASSERT(dyn_count == 0, ("%u dyn rule remains", dyn_count));
2847 if (kill_default) {
2848 if (ipfw_dyn_v != NULL) {
2850 * Free dynamic rules(state) hash table
2852 kfree(ipfw_dyn_v, M_IPFW);
2853 ipfw_dyn_v = NULL;
2856 KASSERT(static_count == 0,
2857 ("%u static rules remain", static_count));
2858 KASSERT(static_ioc_len == 0,
2859 ("%u bytes of static rules remain", static_ioc_len));
2860 } else {
2861 KASSERT(static_count == 1,
2862 ("%u static rules remain", static_count));
2863 KASSERT(static_ioc_len == IOC_RULESIZE(ctx->ipfw_default_rule),
2864 ("%u bytes of static rules remain, should be %lu",
2865 static_ioc_len,
2866 (u_long)IOC_RULESIZE(ctx->ipfw_default_rule)));
2869 /* Flush is done */
2870 ipfw_flushing = 0;
2873 static void
2874 ipfw_alt_delete_rule_dispatch(netmsg_t nmsg)
2876 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
2877 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2878 struct ip_fw *rule, *prev;
2880 rule = dmsg->start_rule;
2881 KKASSERT(rule->cpuid == mycpuid);
2882 dmsg->start_rule = rule->sibling;
2884 prev = dmsg->prev_rule;
2885 if (prev != NULL) {
2886 KKASSERT(prev->cpuid == mycpuid);
2889 * Move to the position on the next CPU
2890 * before the msg is forwarded.
2892 dmsg->prev_rule = prev->sibling;
2896 * flush pointers outside the loop, then delete all matching
2897 * rules. 'prev' remains the same throughout the cycle.
2899 ipfw_flush_rule_ptrs(ctx);
2900 while (rule && rule->rulenum == dmsg->rulenum)
2901 rule = ipfw_delete_rule(ctx, prev, rule);
2903 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
2906 static int
2907 ipfw_alt_delete_rule(uint16_t rulenum)
2909 struct ip_fw *prev, *rule, *f;
2910 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2911 struct netmsg_del dmsg;
2912 struct netmsg_base *nmsg;
2913 int state;
2916 * Locate first rule to delete
2918 for (prev = NULL, rule = ctx->ipfw_layer3_chain;
2919 rule && rule->rulenum < rulenum;
2920 prev = rule, rule = rule->next)
2921 ; /* EMPTY */
2922 if (rule->rulenum != rulenum)
2923 return EINVAL;
2926 * Check whether any rules with the given number will
2927 * create states.
2929 state = 0;
2930 for (f = rule; f && f->rulenum == rulenum; f = f->next) {
2931 if (f->rule_flags & IPFW_RULE_F_STATE) {
2932 state = 1;
2933 break;
2937 if (state) {
2939 * Clear the STATE flag, so no more states will be
2940 * created based the rules numbered 'rulenum'.
2942 bzero(&dmsg, sizeof(dmsg));
2943 nmsg = &dmsg.base;
2944 netmsg_init(nmsg, NULL, &curthread->td_msgport,
2945 0, ipfw_disable_rule_state_dispatch);
2946 dmsg.start_rule = rule;
2947 dmsg.rulenum = rulenum;
2949 netisr_domsg(nmsg, 0);
2950 KKASSERT(dmsg.start_rule == NULL);
2953 * Nuke all related states
2955 lockmgr(&dyn_lock, LK_EXCLUSIVE);
2956 for (f = rule; f && f->rulenum == rulenum; f = f->next) {
2958 * Can't check IPFW_RULE_F_STATE here,
2959 * since it has been cleared previously.
2960 * Check 'stub' instead.
2962 if (f->stub != NULL) {
2963 /* Force removal */
2964 remove_dyn_rule_locked(f, NULL);
2967 lockmgr(&dyn_lock, LK_RELEASE);
2971 * Get rid of the rule duplications on all CPUs
2973 bzero(&dmsg, sizeof(dmsg));
2974 nmsg = &dmsg.base;
2975 netmsg_init(nmsg, NULL, &curthread->td_msgport,
2976 0, ipfw_alt_delete_rule_dispatch);
2977 dmsg.prev_rule = prev;
2978 dmsg.start_rule = rule;
2979 dmsg.rulenum = rulenum;
2981 netisr_domsg(nmsg, 0);
2982 KKASSERT(dmsg.prev_rule == NULL && dmsg.start_rule == NULL);
2983 return 0;
2986 static void
2987 ipfw_alt_delete_ruleset_dispatch(netmsg_t nmsg)
2989 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
2990 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
2991 struct ip_fw *prev, *rule;
2992 #ifdef INVARIANTS
2993 int del = 0;
2994 #endif
2996 ipfw_flush_rule_ptrs(ctx);
2998 prev = NULL;
2999 rule = ctx->ipfw_layer3_chain;
3000 while (rule != NULL) {
3001 if (rule->set == dmsg->from_set) {
3002 rule = ipfw_delete_rule(ctx, prev, rule);
3003 #ifdef INVARIANTS
3004 del = 1;
3005 #endif
3006 } else {
3007 prev = rule;
3008 rule = rule->next;
3011 KASSERT(del, ("no match set?!"));
3013 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
3016 static void
3017 ipfw_disable_ruleset_state_dispatch(netmsg_t nmsg)
3019 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3020 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3021 struct ip_fw *rule;
3022 #ifdef INVARIANTS
3023 int cleared = 0;
3024 #endif
3026 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3027 if (rule->set == dmsg->from_set) {
3028 #ifdef INVARIANTS
3029 cleared = 1;
3030 #endif
3031 rule->rule_flags &= ~IPFW_RULE_F_STATE;
3034 KASSERT(cleared, ("no match set?!"));
3036 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
3039 static int
3040 ipfw_alt_delete_ruleset(uint8_t set)
3042 struct netmsg_del dmsg;
3043 struct netmsg_base *nmsg;
3044 int state, del;
3045 struct ip_fw *rule;
3046 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3049 * Check whether the 'set' exists. If it exists,
3050 * then check whether any rules within the set will
3051 * try to create states.
3053 state = 0;
3054 del = 0;
3055 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3056 if (rule->set == set) {
3057 del = 1;
3058 if (rule->rule_flags & IPFW_RULE_F_STATE) {
3059 state = 1;
3060 break;
3064 if (!del)
3065 return 0; /* XXX EINVAL? */
3067 if (state) {
3069 * Clear the STATE flag, so no more states will be
3070 * created based the rules in this set.
3072 bzero(&dmsg, sizeof(dmsg));
3073 nmsg = &dmsg.base;
3074 netmsg_init(nmsg, NULL, &curthread->td_msgport,
3075 0, ipfw_disable_ruleset_state_dispatch);
3076 dmsg.from_set = set;
3078 netisr_domsg(nmsg, 0);
3081 * Nuke all related states
3083 lockmgr(&dyn_lock, LK_EXCLUSIVE);
3084 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3085 if (rule->set != set)
3086 continue;
3089 * Can't check IPFW_RULE_F_STATE here,
3090 * since it has been cleared previously.
3091 * Check 'stub' instead.
3093 if (rule->stub != NULL) {
3094 /* Force removal */
3095 remove_dyn_rule_locked(rule, NULL);
3098 lockmgr(&dyn_lock, LK_RELEASE);
3102 * Delete this set
3104 bzero(&dmsg, sizeof(dmsg));
3105 nmsg = &dmsg.base;
3106 netmsg_init(nmsg, NULL, &curthread->td_msgport,
3107 0, ipfw_alt_delete_ruleset_dispatch);
3108 dmsg.from_set = set;
3110 netisr_domsg(nmsg, 0);
3111 return 0;
3114 static void
3115 ipfw_alt_move_rule_dispatch(netmsg_t nmsg)
3117 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3118 struct ip_fw *rule;
3120 rule = dmsg->start_rule;
3121 KKASSERT(rule->cpuid == mycpuid);
3124 * Move to the position on the next CPU
3125 * before the msg is forwarded.
3127 dmsg->start_rule = rule->sibling;
3129 while (rule && rule->rulenum <= dmsg->rulenum) {
3130 if (rule->rulenum == dmsg->rulenum)
3131 rule->set = dmsg->to_set;
3132 rule = rule->next;
3134 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
3137 static int
3138 ipfw_alt_move_rule(uint16_t rulenum, uint8_t set)
3140 struct netmsg_del dmsg;
3141 struct netmsg_base *nmsg;
3142 struct ip_fw *rule;
3143 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3146 * Locate first rule to move
3148 for (rule = ctx->ipfw_layer3_chain; rule && rule->rulenum <= rulenum;
3149 rule = rule->next) {
3150 if (rule->rulenum == rulenum && rule->set != set)
3151 break;
3153 if (rule == NULL || rule->rulenum > rulenum)
3154 return 0; /* XXX error? */
3156 bzero(&dmsg, sizeof(dmsg));
3157 nmsg = &dmsg.base;
3158 netmsg_init(nmsg, NULL, &curthread->td_msgport,
3159 0, ipfw_alt_move_rule_dispatch);
3160 dmsg.start_rule = rule;
3161 dmsg.rulenum = rulenum;
3162 dmsg.to_set = set;
3164 netisr_domsg(nmsg, 0);
3165 KKASSERT(dmsg.start_rule == NULL);
3166 return 0;
3169 static void
3170 ipfw_alt_move_ruleset_dispatch(netmsg_t nmsg)
3172 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3173 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3174 struct ip_fw *rule;
3176 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3177 if (rule->set == dmsg->from_set)
3178 rule->set = dmsg->to_set;
3180 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
3183 static int
3184 ipfw_alt_move_ruleset(uint8_t from_set, uint8_t to_set)
3186 struct netmsg_del dmsg;
3187 struct netmsg_base *nmsg;
3189 bzero(&dmsg, sizeof(dmsg));
3190 nmsg = &dmsg.base;
3191 netmsg_init(nmsg, NULL, &curthread->td_msgport,
3192 0, ipfw_alt_move_ruleset_dispatch);
3193 dmsg.from_set = from_set;
3194 dmsg.to_set = to_set;
3196 netisr_domsg(nmsg, 0);
3197 return 0;
3200 static void
3201 ipfw_alt_swap_ruleset_dispatch(netmsg_t nmsg)
3203 struct netmsg_del *dmsg = (struct netmsg_del *)nmsg;
3204 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3205 struct ip_fw *rule;
3207 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3208 if (rule->set == dmsg->from_set)
3209 rule->set = dmsg->to_set;
3210 else if (rule->set == dmsg->to_set)
3211 rule->set = dmsg->from_set;
3213 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
3216 static int
3217 ipfw_alt_swap_ruleset(uint8_t set1, uint8_t set2)
3219 struct netmsg_del dmsg;
3220 struct netmsg_base *nmsg;
3222 bzero(&dmsg, sizeof(dmsg));
3223 nmsg = &dmsg.base;
3224 netmsg_init(nmsg, NULL, &curthread->td_msgport,
3225 0, ipfw_alt_swap_ruleset_dispatch);
3226 dmsg.from_set = set1;
3227 dmsg.to_set = set2;
3229 netisr_domsg(nmsg, 0);
3230 return 0;
3234 * Remove all rules with given number, and also do set manipulation.
3236 * The argument is an uint32_t. The low 16 bit are the rule or set number,
3237 * the next 8 bits are the new set, the top 8 bits are the command:
3239 * 0 delete rules with given number
3240 * 1 delete rules with given set number
3241 * 2 move rules with given number to new set
3242 * 3 move rules with given set number to new set
3243 * 4 swap sets with given numbers
3245 static int
3246 ipfw_ctl_alter(uint32_t arg)
3248 uint16_t rulenum;
3249 uint8_t cmd, new_set;
3250 int error = 0;
3252 rulenum = arg & 0xffff;
3253 cmd = (arg >> 24) & 0xff;
3254 new_set = (arg >> 16) & 0xff;
3256 if (cmd > 4)
3257 return EINVAL;
3258 if (new_set >= IPFW_DEFAULT_SET)
3259 return EINVAL;
3260 if (cmd == 0 || cmd == 2) {
3261 if (rulenum == IPFW_DEFAULT_RULE)
3262 return EINVAL;
3263 } else {
3264 if (rulenum >= IPFW_DEFAULT_SET)
3265 return EINVAL;
3268 switch (cmd) {
3269 case 0: /* delete rules with given number */
3270 error = ipfw_alt_delete_rule(rulenum);
3271 break;
3273 case 1: /* delete all rules with given set number */
3274 error = ipfw_alt_delete_ruleset(rulenum);
3275 break;
3277 case 2: /* move rules with given number to new set */
3278 error = ipfw_alt_move_rule(rulenum, new_set);
3279 break;
3281 case 3: /* move rules with given set number to new set */
3282 error = ipfw_alt_move_ruleset(rulenum, new_set);
3283 break;
3285 case 4: /* swap two sets */
3286 error = ipfw_alt_swap_ruleset(rulenum, new_set);
3287 break;
3289 return error;
3293 * Clear counters for a specific rule.
3295 static void
3296 clear_counters(struct ip_fw *rule, int log_only)
3298 ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule);
3300 if (log_only == 0) {
3301 rule->bcnt = rule->pcnt = 0;
3302 rule->timestamp = 0;
3304 if (l->o.opcode == O_LOG)
3305 l->log_left = l->max_log;
3308 static void
3309 ipfw_zero_entry_dispatch(netmsg_t nmsg)
3311 struct netmsg_zent *zmsg = (struct netmsg_zent *)nmsg;
3312 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3313 struct ip_fw *rule;
3315 if (zmsg->rulenum == 0) {
3316 KKASSERT(zmsg->start_rule == NULL);
3318 ctx->ipfw_norule_counter = 0;
3319 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next)
3320 clear_counters(rule, zmsg->log_only);
3321 } else {
3322 struct ip_fw *start = zmsg->start_rule;
3324 KKASSERT(start->cpuid == mycpuid);
3325 KKASSERT(start->rulenum == zmsg->rulenum);
3328 * We can have multiple rules with the same number, so we
3329 * need to clear them all.
3331 for (rule = start; rule && rule->rulenum == zmsg->rulenum;
3332 rule = rule->next)
3333 clear_counters(rule, zmsg->log_only);
3336 * Move to the position on the next CPU
3337 * before the msg is forwarded.
3339 zmsg->start_rule = start->sibling;
3341 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
3345 * Reset some or all counters on firewall rules.
3346 * @arg frwl is null to clear all entries, or contains a specific
3347 * rule number.
3348 * @arg log_only is 1 if we only want to reset logs, zero otherwise.
3350 static int
3351 ipfw_ctl_zero_entry(int rulenum, int log_only)
3353 struct netmsg_zent zmsg;
3354 struct netmsg_base *nmsg;
3355 const char *msg;
3356 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3358 bzero(&zmsg, sizeof(zmsg));
3359 nmsg = &zmsg.base;
3360 netmsg_init(nmsg, NULL, &curthread->td_msgport,
3361 0, ipfw_zero_entry_dispatch);
3362 zmsg.log_only = log_only;
3364 if (rulenum == 0) {
3365 msg = log_only ? "ipfw: All logging counts reset.\n"
3366 : "ipfw: Accounting cleared.\n";
3367 } else {
3368 struct ip_fw *rule;
3371 * Locate the first rule with 'rulenum'
3373 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next) {
3374 if (rule->rulenum == rulenum)
3375 break;
3377 if (rule == NULL) /* we did not find any matching rules */
3378 return (EINVAL);
3379 zmsg.start_rule = rule;
3380 zmsg.rulenum = rulenum;
3382 msg = log_only ? "ipfw: Entry %d logging count reset.\n"
3383 : "ipfw: Entry %d cleared.\n";
3385 netisr_domsg(nmsg, 0);
3386 KKASSERT(zmsg.start_rule == NULL);
3388 if (fw_verbose)
3389 log(LOG_SECURITY | LOG_NOTICE, msg, rulenum);
3390 return (0);
3394 * Check validity of the structure before insert.
3395 * Fortunately rules are simple, so this mostly need to check rule sizes.
3397 static int
3398 ipfw_check_ioc_rule(struct ipfw_ioc_rule *rule, int size, uint32_t *rule_flags)
3400 int l, cmdlen = 0;
3401 int have_action = 0;
3402 ipfw_insn *cmd;
3404 *rule_flags = 0;
3406 /* Check for valid size */
3407 if (size < sizeof(*rule)) {
3408 kprintf("ipfw: rule too short\n");
3409 return EINVAL;
3411 l = IOC_RULESIZE(rule);
3412 if (l != size) {
3413 kprintf("ipfw: size mismatch (have %d want %d)\n", size, l);
3414 return EINVAL;
3417 /* Check rule number */
3418 if (rule->rulenum == IPFW_DEFAULT_RULE) {
3419 kprintf("ipfw: invalid rule number\n");
3420 return EINVAL;
3424 * Now go for the individual checks. Very simple ones, basically only
3425 * instruction sizes.
3427 for (l = rule->cmd_len, cmd = rule->cmd; l > 0;
3428 l -= cmdlen, cmd += cmdlen) {
3429 cmdlen = F_LEN(cmd);
3430 if (cmdlen > l) {
3431 kprintf("ipfw: opcode %d size truncated\n",
3432 cmd->opcode);
3433 return EINVAL;
3436 DPRINTF("ipfw: opcode %d\n", cmd->opcode);
3438 if (cmd->opcode == O_KEEP_STATE || cmd->opcode == O_LIMIT) {
3439 /* This rule will create states */
3440 *rule_flags |= IPFW_RULE_F_STATE;
3443 switch (cmd->opcode) {
3444 case O_NOP:
3445 case O_PROBE_STATE:
3446 case O_KEEP_STATE:
3447 case O_PROTO:
3448 case O_IP_SRC_ME:
3449 case O_IP_DST_ME:
3450 case O_LAYER2:
3451 case O_IN:
3452 case O_FRAG:
3453 case O_IPOPT:
3454 case O_IPLEN:
3455 case O_IPID:
3456 case O_IPTOS:
3457 case O_IPPRECEDENCE:
3458 case O_IPTTL:
3459 case O_IPVER:
3460 case O_TCPWIN:
3461 case O_TCPFLAGS:
3462 case O_TCPOPTS:
3463 case O_ESTAB:
3464 if (cmdlen != F_INSN_SIZE(ipfw_insn))
3465 goto bad_size;
3466 break;
3468 case O_UID:
3469 case O_GID:
3470 case O_IP_SRC:
3471 case O_IP_DST:
3472 case O_TCPSEQ:
3473 case O_TCPACK:
3474 case O_PROB:
3475 case O_ICMPTYPE:
3476 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32))
3477 goto bad_size;
3478 break;
3480 case O_LIMIT:
3481 if (cmdlen != F_INSN_SIZE(ipfw_insn_limit))
3482 goto bad_size;
3483 break;
3485 case O_LOG:
3486 if (cmdlen != F_INSN_SIZE(ipfw_insn_log))
3487 goto bad_size;
3489 ((ipfw_insn_log *)cmd)->log_left =
3490 ((ipfw_insn_log *)cmd)->max_log;
3492 break;
3494 case O_IP_SRC_MASK:
3495 case O_IP_DST_MASK:
3496 if (cmdlen != F_INSN_SIZE(ipfw_insn_ip))
3497 goto bad_size;
3498 if (((ipfw_insn_ip *)cmd)->mask.s_addr == 0) {
3499 kprintf("ipfw: opcode %d, useless rule\n",
3500 cmd->opcode);
3501 return EINVAL;
3503 break;
3505 case O_IP_SRC_SET:
3506 case O_IP_DST_SET:
3507 if (cmd->arg1 == 0 || cmd->arg1 > 256) {
3508 kprintf("ipfw: invalid set size %d\n",
3509 cmd->arg1);
3510 return EINVAL;
3512 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
3513 (cmd->arg1+31)/32 )
3514 goto bad_size;
3515 break;
3517 case O_MACADDR2:
3518 if (cmdlen != F_INSN_SIZE(ipfw_insn_mac))
3519 goto bad_size;
3520 break;
3522 case O_MAC_TYPE:
3523 case O_IP_SRCPORT:
3524 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */
3525 if (cmdlen < 2 || cmdlen > 31)
3526 goto bad_size;
3527 break;
3529 case O_RECV:
3530 case O_XMIT:
3531 case O_VIA:
3532 if (cmdlen != F_INSN_SIZE(ipfw_insn_if))
3533 goto bad_size;
3534 break;
3536 case O_PIPE:
3537 case O_QUEUE:
3538 if (cmdlen != F_INSN_SIZE(ipfw_insn_pipe))
3539 goto bad_size;
3540 goto check_action;
3542 case O_FORWARD_IP:
3543 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa)) {
3544 goto bad_size;
3545 } else {
3546 in_addr_t fwd_addr;
3548 fwd_addr = ((ipfw_insn_sa *)cmd)->
3549 sa.sin_addr.s_addr;
3550 if (IN_MULTICAST(ntohl(fwd_addr))) {
3551 kprintf("ipfw: try forwarding to "
3552 "multicast address\n");
3553 return EINVAL;
3556 goto check_action;
3558 case O_FORWARD_MAC: /* XXX not implemented yet */
3559 case O_CHECK_STATE:
3560 case O_COUNT:
3561 case O_ACCEPT:
3562 case O_DENY:
3563 case O_REJECT:
3564 case O_SKIPTO:
3565 case O_DIVERT:
3566 case O_TEE:
3567 if (cmdlen != F_INSN_SIZE(ipfw_insn))
3568 goto bad_size;
3569 check_action:
3570 if (have_action) {
3571 kprintf("ipfw: opcode %d, multiple actions"
3572 " not allowed\n",
3573 cmd->opcode);
3574 return EINVAL;
3576 have_action = 1;
3577 if (l != cmdlen) {
3578 kprintf("ipfw: opcode %d, action must be"
3579 " last opcode\n",
3580 cmd->opcode);
3581 return EINVAL;
3583 break;
3584 default:
3585 kprintf("ipfw: opcode %d, unknown opcode\n",
3586 cmd->opcode);
3587 return EINVAL;
3590 if (have_action == 0) {
3591 kprintf("ipfw: missing action\n");
3592 return EINVAL;
3594 return 0;
3596 bad_size:
3597 kprintf("ipfw: opcode %d size %d wrong\n",
3598 cmd->opcode, cmdlen);
3599 return EINVAL;
3602 static int
3603 ipfw_ctl_add_rule(struct sockopt *sopt)
3605 struct ipfw_ioc_rule *ioc_rule;
3606 size_t size;
3607 uint32_t rule_flags;
3608 int error;
3610 size = sopt->sopt_valsize;
3611 if (size > (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX) ||
3612 size < sizeof(*ioc_rule)) {
3613 return EINVAL;
3615 if (size != (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX)) {
3616 sopt->sopt_val = krealloc(sopt->sopt_val, sizeof(uint32_t) *
3617 IPFW_RULE_SIZE_MAX, M_TEMP, M_WAITOK);
3619 ioc_rule = sopt->sopt_val;
3621 error = ipfw_check_ioc_rule(ioc_rule, size, &rule_flags);
3622 if (error)
3623 return error;
3625 ipfw_add_rule(ioc_rule, rule_flags);
3627 if (sopt->sopt_dir == SOPT_GET)
3628 sopt->sopt_valsize = IOC_RULESIZE(ioc_rule);
3629 return 0;
3632 static void *
3633 ipfw_copy_rule(const struct ip_fw *rule, struct ipfw_ioc_rule *ioc_rule)
3635 const struct ip_fw *sibling;
3636 #ifdef INVARIANTS
3637 int i;
3638 #endif
3640 KKASSERT(rule->cpuid == IPFW_CFGCPUID);
3642 ioc_rule->act_ofs = rule->act_ofs;
3643 ioc_rule->cmd_len = rule->cmd_len;
3644 ioc_rule->rulenum = rule->rulenum;
3645 ioc_rule->set = rule->set;
3646 ioc_rule->usr_flags = rule->usr_flags;
3648 ioc_rule->set_disable = ipfw_ctx[mycpuid]->ipfw_set_disable;
3649 ioc_rule->static_count = static_count;
3650 ioc_rule->static_len = static_ioc_len;
3653 * Visit (read-only) all of the rule's duplications to get
3654 * the necessary statistics
3656 #ifdef INVARIANTS
3657 i = 0;
3658 #endif
3659 ioc_rule->pcnt = 0;
3660 ioc_rule->bcnt = 0;
3661 ioc_rule->timestamp = 0;
3662 for (sibling = rule; sibling != NULL; sibling = sibling->sibling) {
3663 ioc_rule->pcnt += sibling->pcnt;
3664 ioc_rule->bcnt += sibling->bcnt;
3665 if (sibling->timestamp > ioc_rule->timestamp)
3666 ioc_rule->timestamp = sibling->timestamp;
3667 #ifdef INVARIANTS
3668 ++i;
3669 #endif
3671 KASSERT(i == ncpus, ("static rule is not duplicated on every cpu"));
3673 bcopy(rule->cmd, ioc_rule->cmd, ioc_rule->cmd_len * 4 /* XXX */);
3675 return ((uint8_t *)ioc_rule + IOC_RULESIZE(ioc_rule));
3678 static void
3679 ipfw_copy_state(const ipfw_dyn_rule *dyn_rule,
3680 struct ipfw_ioc_state *ioc_state)
3682 const struct ipfw_flow_id *id;
3683 struct ipfw_ioc_flowid *ioc_id;
3685 ioc_state->expire = TIME_LEQ(dyn_rule->expire, time_second) ?
3686 0 : dyn_rule->expire - time_second;
3687 ioc_state->pcnt = dyn_rule->pcnt;
3688 ioc_state->bcnt = dyn_rule->bcnt;
3690 ioc_state->dyn_type = dyn_rule->dyn_type;
3691 ioc_state->count = dyn_rule->count;
3693 ioc_state->rulenum = dyn_rule->stub->rule[mycpuid]->rulenum;
3695 id = &dyn_rule->id;
3696 ioc_id = &ioc_state->id;
3698 ioc_id->type = ETHERTYPE_IP;
3699 ioc_id->u.ip.dst_ip = id->dst_ip;
3700 ioc_id->u.ip.src_ip = id->src_ip;
3701 ioc_id->u.ip.dst_port = id->dst_port;
3702 ioc_id->u.ip.src_port = id->src_port;
3703 ioc_id->u.ip.proto = id->proto;
3706 static int
3707 ipfw_ctl_get_rules(struct sockopt *sopt)
3709 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3710 struct ip_fw *rule;
3711 void *bp;
3712 size_t size;
3713 uint32_t dcount = 0;
3716 * pass up a copy of the current rules. Static rules
3717 * come first (the last of which has number IPFW_DEFAULT_RULE),
3718 * followed by a possibly empty list of dynamic rule.
3721 size = static_ioc_len; /* size of static rules */
3722 if (ipfw_dyn_v) { /* add size of dyn.rules */
3723 dcount = dyn_count;
3724 size += dcount * sizeof(struct ipfw_ioc_state);
3727 if (sopt->sopt_valsize < size) {
3728 /* short length, no need to return incomplete rules */
3729 /* XXX: if superuser, no need to zero buffer */
3730 bzero(sopt->sopt_val, sopt->sopt_valsize);
3731 return 0;
3733 bp = sopt->sopt_val;
3735 for (rule = ctx->ipfw_layer3_chain; rule; rule = rule->next)
3736 bp = ipfw_copy_rule(rule, bp);
3738 if (ipfw_dyn_v && dcount != 0) {
3739 struct ipfw_ioc_state *ioc_state = bp;
3740 uint32_t dcount2 = 0;
3741 #ifdef INVARIANTS
3742 size_t old_size = size;
3743 #endif
3744 int i;
3746 lockmgr(&dyn_lock, LK_SHARED);
3748 /* Check 'ipfw_dyn_v' again with lock held */
3749 if (ipfw_dyn_v == NULL)
3750 goto skip;
3752 for (i = 0; i < curr_dyn_buckets; i++) {
3753 ipfw_dyn_rule *p;
3756 * The # of dynamic rules may have grown after the
3757 * snapshot of 'dyn_count' was taken, so we will have
3758 * to check 'dcount' (snapshot of dyn_count) here to
3759 * make sure that we don't overflow the pre-allocated
3760 * buffer.
3762 for (p = ipfw_dyn_v[i]; p != NULL && dcount != 0;
3763 p = p->next, ioc_state++, dcount--, dcount2++)
3764 ipfw_copy_state(p, ioc_state);
3766 skip:
3767 lockmgr(&dyn_lock, LK_RELEASE);
3770 * The # of dynamic rules may be shrinked after the
3771 * snapshot of 'dyn_count' was taken. To give user a
3772 * correct dynamic rule count, we use the 'dcount2'
3773 * calculated above (with shared lockmgr lock held).
3775 size = static_ioc_len +
3776 (dcount2 * sizeof(struct ipfw_ioc_state));
3777 KKASSERT(size <= old_size);
3780 sopt->sopt_valsize = size;
3781 return 0;
3784 static void
3785 ipfw_set_disable_dispatch(netmsg_t nmsg)
3787 struct lwkt_msg *lmsg = &nmsg->lmsg;
3788 struct ipfw_context *ctx = ipfw_ctx[mycpuid];
3790 ctx->ipfw_set_disable = lmsg->u.ms_result32;
3792 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
3795 static void
3796 ipfw_ctl_set_disable(uint32_t disable, uint32_t enable)
3798 struct netmsg_base nmsg;
3799 struct lwkt_msg *lmsg;
3800 uint32_t set_disable;
3802 /* IPFW_DEFAULT_SET is always enabled */
3803 enable |= (1 << IPFW_DEFAULT_SET);
3804 set_disable = (ipfw_ctx[mycpuid]->ipfw_set_disable | disable) & ~enable;
3806 bzero(&nmsg, sizeof(nmsg));
3807 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
3808 0, ipfw_set_disable_dispatch);
3809 lmsg = &nmsg.lmsg;
3810 lmsg->u.ms_result32 = set_disable;
3812 netisr_domsg(&nmsg, 0);
3816 * {set|get}sockopt parser.
3818 static int
3819 ipfw_ctl(struct sockopt *sopt)
3821 int error, rulenum;
3822 uint32_t *masks;
3823 size_t size;
3825 error = 0;
3827 switch (sopt->sopt_name) {
3828 case IP_FW_GET:
3829 error = ipfw_ctl_get_rules(sopt);
3830 break;
3832 case IP_FW_FLUSH:
3833 ipfw_flush(0 /* keep default rule */);
3834 break;
3836 case IP_FW_ADD:
3837 error = ipfw_ctl_add_rule(sopt);
3838 break;
3840 case IP_FW_DEL:
3842 * IP_FW_DEL is used for deleting single rules or sets,
3843 * and (ab)used to atomically manipulate sets.
3844 * Argument size is used to distinguish between the two:
3845 * sizeof(uint32_t)
3846 * delete single rule or set of rules,
3847 * or reassign rules (or sets) to a different set.
3848 * 2 * sizeof(uint32_t)
3849 * atomic disable/enable sets.
3850 * first uint32_t contains sets to be disabled,
3851 * second uint32_t contains sets to be enabled.
3853 masks = sopt->sopt_val;
3854 size = sopt->sopt_valsize;
3855 if (size == sizeof(*masks)) {
3857 * Delete or reassign static rule
3859 error = ipfw_ctl_alter(masks[0]);
3860 } else if (size == (2 * sizeof(*masks))) {
3862 * Set enable/disable
3864 ipfw_ctl_set_disable(masks[0], masks[1]);
3865 } else {
3866 error = EINVAL;
3868 break;
3870 case IP_FW_ZERO:
3871 case IP_FW_RESETLOG: /* argument is an int, the rule number */
3872 rulenum = 0;
3874 if (sopt->sopt_val != 0) {
3875 error = soopt_to_kbuf(sopt, &rulenum,
3876 sizeof(int), sizeof(int));
3877 if (error)
3878 break;
3880 error = ipfw_ctl_zero_entry(rulenum,
3881 sopt->sopt_name == IP_FW_RESETLOG);
3882 break;
3884 default:
3885 kprintf("ipfw_ctl invalid option %d\n", sopt->sopt_name);
3886 error = EINVAL;
3888 return error;
3892 * This procedure is only used to handle keepalives. It is invoked
3893 * every dyn_keepalive_period
3895 static void
3896 ipfw_tick_dispatch(netmsg_t nmsg)
3898 time_t keep_alive;
3899 uint32_t gen;
3900 int i;
3902 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
3903 KKASSERT(IPFW_LOADED);
3905 /* Reply ASAP */
3906 crit_enter();
3907 lwkt_replymsg(&nmsg->lmsg, 0);
3908 crit_exit();
3910 if (ipfw_dyn_v == NULL || dyn_count == 0)
3911 goto done;
3913 keep_alive = time_second;
3915 lockmgr(&dyn_lock, LK_EXCLUSIVE);
3916 again:
3917 if (ipfw_dyn_v == NULL || dyn_count == 0) {
3918 lockmgr(&dyn_lock, LK_RELEASE);
3919 goto done;
3921 gen = dyn_buckets_gen;
3923 for (i = 0; i < curr_dyn_buckets; i++) {
3924 ipfw_dyn_rule *q, *prev;
3926 for (prev = NULL, q = ipfw_dyn_v[i]; q != NULL;) {
3927 uint32_t ack_rev, ack_fwd;
3928 struct ipfw_flow_id id;
3930 if (q->dyn_type == O_LIMIT_PARENT)
3931 goto next;
3933 if (TIME_LEQ(q->expire, time_second)) {
3934 /* State expired */
3935 UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q);
3936 continue;
3940 * Keep alive processing
3943 if (!dyn_keepalive)
3944 goto next;
3945 if (q->id.proto != IPPROTO_TCP)
3946 goto next;
3947 if ((q->state & BOTH_SYN) != BOTH_SYN)
3948 goto next;
3949 if (TIME_LEQ(time_second + dyn_keepalive_interval,
3950 q->expire))
3951 goto next; /* too early */
3952 if (q->keep_alive == keep_alive)
3953 goto next; /* alreay done */
3956 * Save necessary information, so that they could
3957 * survive after possible blocking in send_pkt()
3959 id = q->id;
3960 ack_rev = q->ack_rev;
3961 ack_fwd = q->ack_fwd;
3963 /* Sending has been started */
3964 q->keep_alive = keep_alive;
3966 /* Release lock to avoid possible dead lock */
3967 lockmgr(&dyn_lock, LK_RELEASE);
3968 send_pkt(&id, ack_rev - 1, ack_fwd, TH_SYN);
3969 send_pkt(&id, ack_fwd - 1, ack_rev, 0);
3970 lockmgr(&dyn_lock, LK_EXCLUSIVE);
3972 if (gen != dyn_buckets_gen) {
3974 * Dyn bucket array has been changed during
3975 * the above two sending; reiterate.
3977 goto again;
3979 next:
3980 prev = q;
3981 q = q->next;
3984 lockmgr(&dyn_lock, LK_RELEASE);
3985 done:
3986 callout_reset(&ipfw_timeout_h, dyn_keepalive_period * hz,
3987 ipfw_tick, NULL);
3991 * This procedure is only used to handle keepalives. It is invoked
3992 * every dyn_keepalive_period
3994 static void
3995 ipfw_tick(void *dummy __unused)
3997 struct lwkt_msg *lmsg = &ipfw_timeout_netmsg.lmsg;
3999 KKASSERT(mycpuid == IPFW_CFGCPUID);
4001 crit_enter();
4003 KKASSERT(lmsg->ms_flags & MSGF_DONE);
4004 if (IPFW_LOADED) {
4005 lwkt_sendmsg_oncpu(IPFW_CFGPORT, lmsg);
4006 /* ipfw_timeout_netmsg's handler reset this callout */
4009 crit_exit();
4012 static int
4013 ipfw_check_in(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir)
4015 struct ip_fw_args args;
4016 struct mbuf *m = *m0;
4017 struct m_tag *mtag;
4018 int tee = 0, error = 0, ret;
4020 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
4021 /* Extract info from dummynet tag */
4022 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
4023 KKASSERT(mtag != NULL);
4024 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv;
4025 KKASSERT(args.rule != NULL);
4027 m_tag_delete(m, mtag);
4028 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
4029 } else {
4030 args.rule = NULL;
4033 args.eh = NULL;
4034 args.oif = NULL;
4035 args.m = m;
4036 ret = ipfw_chk(&args);
4037 m = args.m;
4039 if (m == NULL) {
4040 error = EACCES;
4041 goto back;
4044 switch (ret) {
4045 case IP_FW_PASS:
4046 break;
4048 case IP_FW_DENY:
4049 m_freem(m);
4050 m = NULL;
4051 error = EACCES;
4052 break;
4054 case IP_FW_DUMMYNET:
4055 /* Send packet to the appropriate pipe */
4056 ipfw_dummynet_io(m, args.cookie, DN_TO_IP_IN, &args);
4057 break;
4059 case IP_FW_TEE:
4060 tee = 1;
4061 /* FALL THROUGH */
4063 case IP_FW_DIVERT:
4065 * Must clear bridge tag when changing
4067 m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
4068 if (ip_divert_p != NULL) {
4069 m = ip_divert_p(m, tee, 1);
4070 } else {
4071 m_freem(m);
4072 m = NULL;
4073 /* not sure this is the right error msg */
4074 error = EACCES;
4076 break;
4078 default:
4079 panic("unknown ipfw return value: %d", ret);
4081 back:
4082 *m0 = m;
4083 return error;
4086 static int
4087 ipfw_check_out(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir)
4089 struct ip_fw_args args;
4090 struct mbuf *m = *m0;
4091 struct m_tag *mtag;
4092 int tee = 0, error = 0, ret;
4094 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
4095 /* Extract info from dummynet tag */
4096 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
4097 KKASSERT(mtag != NULL);
4098 args.rule = ((struct dn_pkt *)m_tag_data(mtag))->dn_priv;
4099 KKASSERT(args.rule != NULL);
4101 m_tag_delete(m, mtag);
4102 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
4103 } else {
4104 args.rule = NULL;
4107 args.eh = NULL;
4108 args.m = m;
4109 args.oif = ifp;
4110 ret = ipfw_chk(&args);
4111 m = args.m;
4113 if (m == NULL) {
4114 error = EACCES;
4115 goto back;
4118 switch (ret) {
4119 case IP_FW_PASS:
4120 break;
4122 case IP_FW_DENY:
4123 m_freem(m);
4124 m = NULL;
4125 error = EACCES;
4126 break;
4128 case IP_FW_DUMMYNET:
4129 ipfw_dummynet_io(m, args.cookie, DN_TO_IP_OUT, &args);
4130 break;
4132 case IP_FW_TEE:
4133 tee = 1;
4134 /* FALL THROUGH */
4136 case IP_FW_DIVERT:
4137 if (ip_divert_p != NULL) {
4138 m = ip_divert_p(m, tee, 0);
4139 } else {
4140 m_freem(m);
4141 m = NULL;
4142 /* not sure this is the right error msg */
4143 error = EACCES;
4145 break;
4147 default:
4148 panic("unknown ipfw return value: %d", ret);
4150 back:
4151 *m0 = m;
4152 return error;
4155 static void
4156 ipfw_hook(void)
4158 struct pfil_head *pfh;
4160 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
4162 pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET);
4163 if (pfh == NULL)
4164 return;
4166 pfil_add_hook(ipfw_check_in, NULL, PFIL_IN, pfh);
4167 pfil_add_hook(ipfw_check_out, NULL, PFIL_OUT, pfh);
4170 static void
4171 ipfw_dehook(void)
4173 struct pfil_head *pfh;
4175 IPFW_ASSERT_CFGPORT(&curthread->td_msgport);
4177 pfh = pfil_head_get(PFIL_TYPE_AF, AF_INET);
4178 if (pfh == NULL)
4179 return;
4181 pfil_remove_hook(ipfw_check_in, NULL, PFIL_IN, pfh);
4182 pfil_remove_hook(ipfw_check_out, NULL, PFIL_OUT, pfh);
4185 static void
4186 ipfw_sysctl_enable_dispatch(netmsg_t nmsg)
4188 struct lwkt_msg *lmsg = &nmsg->lmsg;
4189 int enable = lmsg->u.ms_result;
4191 if (fw_enable == enable)
4192 goto reply;
4194 fw_enable = enable;
4195 if (fw_enable)
4196 ipfw_hook();
4197 else
4198 ipfw_dehook();
4199 reply:
4200 lwkt_replymsg(lmsg, 0);
4203 static int
4204 ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS)
4206 struct netmsg_base nmsg;
4207 struct lwkt_msg *lmsg;
4208 int enable, error;
4210 enable = fw_enable;
4211 error = sysctl_handle_int(oidp, &enable, 0, req);
4212 if (error || req->newptr == NULL)
4213 return error;
4215 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
4216 0, ipfw_sysctl_enable_dispatch);
4217 lmsg = &nmsg.lmsg;
4218 lmsg->u.ms_result = enable;
4220 return lwkt_domsg(IPFW_CFGPORT, lmsg, 0);
4223 static int
4224 ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS)
4226 return sysctl_int_range(oidp, arg1, arg2, req,
4227 IPFW_AUTOINC_STEP_MIN, IPFW_AUTOINC_STEP_MAX);
4230 static int
4231 ipfw_sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS)
4233 int error, value;
4235 lockmgr(&dyn_lock, LK_EXCLUSIVE);
4237 value = dyn_buckets;
4238 error = sysctl_handle_int(oidp, &value, 0, req);
4239 if (error || !req->newptr)
4240 goto back;
4243 * Make sure we have a power of 2 and
4244 * do not allow more than 64k entries.
4246 error = EINVAL;
4247 if (value <= 1 || value > 65536)
4248 goto back;
4249 if ((value & (value - 1)) != 0)
4250 goto back;
4252 error = 0;
4253 dyn_buckets = value;
4254 back:
4255 lockmgr(&dyn_lock, LK_RELEASE);
4256 return error;
4259 static int
4260 ipfw_sysctl_dyn_fin(SYSCTL_HANDLER_ARGS)
4262 return sysctl_int_range(oidp, arg1, arg2, req,
4263 1, dyn_keepalive_period - 1);
4266 static int
4267 ipfw_sysctl_dyn_rst(SYSCTL_HANDLER_ARGS)
4269 return sysctl_int_range(oidp, arg1, arg2, req,
4270 1, dyn_keepalive_period - 1);
4273 static void
4274 ipfw_ctx_init_dispatch(netmsg_t nmsg)
4276 struct netmsg_ipfw *fwmsg = (struct netmsg_ipfw *)nmsg;
4277 struct ipfw_context *ctx;
4278 struct ip_fw *def_rule;
4280 ctx = kmalloc(sizeof(*ctx), M_IPFW, M_WAITOK | M_ZERO);
4281 ipfw_ctx[mycpuid] = ctx;
4283 def_rule = kmalloc(sizeof(*def_rule), M_IPFW, M_WAITOK | M_ZERO);
4285 def_rule->act_ofs = 0;
4286 def_rule->rulenum = IPFW_DEFAULT_RULE;
4287 def_rule->cmd_len = 1;
4288 def_rule->set = IPFW_DEFAULT_SET;
4290 def_rule->cmd[0].len = 1;
4291 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
4292 def_rule->cmd[0].opcode = O_ACCEPT;
4293 #else
4294 if (filters_default_to_accept)
4295 def_rule->cmd[0].opcode = O_ACCEPT;
4296 else
4297 def_rule->cmd[0].opcode = O_DENY;
4298 #endif
4300 def_rule->refcnt = 1;
4301 def_rule->cpuid = mycpuid;
4303 /* Install the default rule */
4304 ctx->ipfw_default_rule = def_rule;
4305 ctx->ipfw_layer3_chain = def_rule;
4307 /* Link rule CPU sibling */
4308 ipfw_link_sibling(fwmsg, def_rule);
4310 /* Statistics only need to be updated once */
4311 if (mycpuid == 0)
4312 ipfw_inc_static_count(def_rule);
4314 netisr_forwardmsg(&nmsg->base, mycpuid + 1);
4317 static void
4318 ipfw_init_dispatch(netmsg_t nmsg)
4320 struct netmsg_ipfw fwmsg;
4321 int error = 0;
4323 if (IPFW_LOADED) {
4324 kprintf("IP firewall already loaded\n");
4325 error = EEXIST;
4326 goto reply;
4329 bzero(&fwmsg, sizeof(fwmsg));
4330 netmsg_init(&fwmsg.base, NULL, &curthread->td_msgport,
4331 0, ipfw_ctx_init_dispatch);
4332 netisr_domsg(&fwmsg.base, 0);
4334 ip_fw_chk_ptr = ipfw_chk;
4335 ip_fw_ctl_ptr = ipfw_ctl;
4336 ip_fw_dn_io_ptr = ipfw_dummynet_io;
4338 kprintf("ipfw2 initialized, default to %s, logging ",
4339 ipfw_ctx[mycpuid]->ipfw_default_rule->cmd[0].opcode ==
4340 O_ACCEPT ? "accept" : "deny");
4342 #ifdef IPFIREWALL_VERBOSE
4343 fw_verbose = 1;
4344 #endif
4345 #ifdef IPFIREWALL_VERBOSE_LIMIT
4346 verbose_limit = IPFIREWALL_VERBOSE_LIMIT;
4347 #endif
4348 if (fw_verbose == 0) {
4349 kprintf("disabled\n");
4350 } else if (verbose_limit == 0) {
4351 kprintf("unlimited\n");
4352 } else {
4353 kprintf("limited to %d packets/entry by default\n",
4354 verbose_limit);
4357 callout_init_mp(&ipfw_timeout_h);
4358 netmsg_init(&ipfw_timeout_netmsg, NULL, &netisr_adone_rport,
4359 MSGF_DROPABLE | MSGF_PRIORITY,
4360 ipfw_tick_dispatch);
4361 lockinit(&dyn_lock, "ipfw_dyn", 0, 0);
4363 ip_fw_loaded = 1;
4364 callout_reset(&ipfw_timeout_h, hz, ipfw_tick, NULL);
4366 if (fw_enable)
4367 ipfw_hook();
4368 reply:
4369 lwkt_replymsg(&nmsg->lmsg, error);
4372 static int
4373 ipfw_init(void)
4375 struct netmsg_base smsg;
4377 netmsg_init(&smsg, NULL, &curthread->td_msgport,
4378 0, ipfw_init_dispatch);
4379 return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0);
4382 #ifdef KLD_MODULE
4384 static void
4385 ipfw_fini_dispatch(netmsg_t nmsg)
4387 int error = 0, cpu;
4389 if (ipfw_refcnt != 0) {
4390 error = EBUSY;
4391 goto reply;
4394 ip_fw_loaded = 0;
4396 ipfw_dehook();
4397 callout_stop(&ipfw_timeout_h);
4399 netmsg_service_sync();
4401 crit_enter();
4402 lwkt_dropmsg(&ipfw_timeout_netmsg.lmsg);
4403 crit_exit();
4405 ip_fw_chk_ptr = NULL;
4406 ip_fw_ctl_ptr = NULL;
4407 ip_fw_dn_io_ptr = NULL;
4408 ipfw_flush(1 /* kill default rule */);
4410 /* Free pre-cpu context */
4411 for (cpu = 0; cpu < ncpus; ++cpu)
4412 kfree(ipfw_ctx[cpu], M_IPFW);
4414 kprintf("IP firewall unloaded\n");
4415 reply:
4416 lwkt_replymsg(&nmsg->lmsg, error);
4419 static int
4420 ipfw_fini(void)
4422 struct netmsg_base smsg;
4424 netmsg_init(&smsg, NULL, &curthread->td_msgport,
4425 0, ipfw_fini_dispatch);
4426 return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0);
4429 #endif /* KLD_MODULE */
4431 static int
4432 ipfw_modevent(module_t mod, int type, void *unused)
4434 int err = 0;
4436 switch (type) {
4437 case MOD_LOAD:
4438 err = ipfw_init();
4439 break;
4441 case MOD_UNLOAD:
4442 #ifndef KLD_MODULE
4443 kprintf("ipfw statically compiled, cannot unload\n");
4444 err = EBUSY;
4445 #else
4446 err = ipfw_fini();
4447 #endif
4448 break;
4449 default:
4450 break;
4452 return err;
4455 static moduledata_t ipfwmod = {
4456 "ipfw",
4457 ipfw_modevent,
4460 DECLARE_MODULE(ipfw, ipfwmod, SI_SUB_PROTO_END, SI_ORDER_ANY);
4461 MODULE_VERSION(ipfw, 1);