2 * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * $FreeBSD: src/sys/netinet/ip_fw2.c,v 1.6.2.12 2003/04/08 10:42:32 maxim Exp $
26 * $DragonFly: src/sys/net/ipfw/ip_fw2.c,v 1.100 2008/11/22 11:03:35 sephe Exp $
30 * Implement IP packet firewall (new version)
36 #error IPFIREWALL requires INET.
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
43 #include <sys/kernel.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
47 #include <sys/sysctl.h>
48 #include <sys/syslog.h>
49 #include <sys/ucred.h>
50 #include <sys/in_cksum.h>
54 #include <net/route.h>
56 #include <net/dummynet/ip_dummynet.h>
58 #include <sys/thread2.h>
59 #include <sys/mplock2.h>
60 #include <net/netmsg2.h>
62 #include <netinet/in.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/in_var.h>
65 #include <netinet/in_pcb.h>
66 #include <netinet/ip.h>
67 #include <netinet/ip_var.h>
68 #include <netinet/ip_icmp.h>
69 #include <netinet/tcp.h>
70 #include <netinet/tcp_timer.h>
71 #include <netinet/tcp_var.h>
72 #include <netinet/tcpip.h>
73 #include <netinet/udp.h>
74 #include <netinet/udp_var.h>
75 #include <netinet/ip_divert.h>
76 #include <netinet/if_ether.h> /* XXX for ETHERTYPE_IP */
78 #include <net/ipfw/ip_fw2.h>
80 #ifdef IPFIREWALL_DEBUG
81 #define DPRINTF(fmt, ...) \
84 kprintf(fmt, __VA_ARGS__); \
87 #define DPRINTF(fmt, ...) ((void)0)
91 * Description about per-CPU rule duplication:
93 * Module loading/unloading and all ioctl operations are serialized
94 * by netisr0, so we don't have any ordering or locking problems.
96 * Following graph shows how operation on per-CPU rule list is
97 * performed [2 CPU case]:
101 * netisr0 <------------------------------------+
112 * forwardmsg---------->ifnet1 |
117 * replymsg--------------+
122 * Rules which will not create states (dyn rules) [2 CPU case]
125 * layer3_chain layer3_chain
128 * +-------+ sibling +-------+ sibling
129 * | rule1 |--------->| rule1 |--------->NULL
130 * +-------+ +-------+
134 * +-------+ sibling +-------+ sibling
135 * | rule2 |--------->| rule2 |--------->NULL
136 * +-------+ +-------+
139 * 1) Ease statistics calculation during IP_FW_GET. We only need to
140 * iterate layer3_chain on CPU0; the current rule's duplication on
141 * the other CPUs could safely be read-only accessed by using
143 * 2) Accelerate rule insertion and deletion, e.g. rule insertion:
144 * a) In netisr0 (on CPU0) rule3 is determined to be inserted between
145 * rule1 and rule2. To make this decision we need to iterate the
146 * layer3_chain on CPU0. The netmsg, which is used to insert the
147 * rule, will contain rule1 on CPU0 as prev_rule and rule2 on CPU0
149 * b) After the insertion on CPU0 is done, we will move on to CPU1.
150 * But instead of relocating the rule3's position on CPU1 by
151 * iterating the layer3_chain on CPU1, we set the netmsg's prev_rule
152 * to rule1->sibling and next_rule to rule2->sibling before the
153 * netmsg is forwarded to CPU1 from CPU0
157 * Rules which will create states (dyn rules) [2 CPU case]
158 * (unnecessary parts are omitted; they are same as in the previous figure)
162 * +-------+ +-------+
163 * | rule1 | | rule1 |
164 * +-------+ +-------+
171 * | +--------------------+ |
173 * | | (read-only shared) | |
175 * | | back pointer array | |
176 * | | (indexed by cpuid) | |
178 * +----|---------[0] | |
179 * | [1]--------|----+
181 * +--------------------+
184 * ........|............|............
188 * : +---------+ +---------+ :
189 * : | state1a | | state1b | .... :
190 * : +---------+ +---------+ :
194 * : (protected by dyn_lock) :
195 * ..................................
197 * [state1a and state1b are states created by rule1]
200 * This structure is introduced so that shared (locked) state table could
201 * work with per-CPU (duplicated) static rules. It mainly bridges states
202 * and static rules and serves as static rule's place holder (a read-only
203 * shared part of duplicated rules) from states point of view.
205 * IPFW_RULE_F_STATE (only for rules which create states):
206 * o During rule installation, this flag is turned on after rule's
207 * duplications reach all CPUs, to avoid at least following race:
208 * 1) rule1 is duplicated on CPU0 and is not duplicated on CPU1 yet
209 * 2) rule1 creates state1
210 * 3) state1 is located on CPU1 by check-state
211 * But rule1 is not duplicated on CPU1 yet
212 * o During rule deletion, this flag is turned off before deleting states
213 * created by the rule and before deleting the rule itself, so no
214 * more states will be created by the to-be-deleted rule even when its
215 * duplication on certain CPUs are not eliminated yet.
218 #define IPFW_AUTOINC_STEP_MIN 1
219 #define IPFW_AUTOINC_STEP_MAX 1000
220 #define IPFW_AUTOINC_STEP_DEF 100
222 #define IPFW_DEFAULT_RULE 65535 /* rulenum for the default rule */
223 #define IPFW_DEFAULT_SET 31 /* set number for the default rule */
227 const struct ipfw_ioc_rule
*ioc_rule
;
228 struct ip_fw
*next_rule
;
229 struct ip_fw
*prev_rule
;
230 struct ip_fw
*sibling
;
231 struct ip_fw_stub
*stub
;
236 struct ip_fw
*start_rule
;
237 struct ip_fw
*prev_rule
;
245 struct ip_fw
*start_rule
;
250 struct ipfw_context
{
251 struct ip_fw
*ipfw_layer3_chain
; /* list of rules for layer3 */
252 struct ip_fw
*ipfw_default_rule
; /* default rule */
253 uint64_t ipfw_norule_counter
; /* counter for ipfw_log(NULL) */
256 * ipfw_set_disable contains one bit per set value (0..31).
257 * If the bit is set, all rules with the corresponding set
258 * are disabled. Set IPDW_DEFAULT_SET is reserved for the
259 * default rule and CANNOT be disabled.
261 uint32_t ipfw_set_disable
;
262 uint32_t ipfw_gen
; /* generation of rule list */
265 static struct ipfw_context
*ipfw_ctx
[MAXCPU
];
269 * Module can not be unloaded, if there are references to
270 * certains rules of ipfw(4), e.g. dummynet(4)
272 static int ipfw_refcnt
;
275 MALLOC_DEFINE(M_IPFW
, "IpFw/IpAcct", "IpFw/IpAcct chain's");
278 * Following two global variables are accessed and
279 * updated only on CPU0
281 static uint32_t static_count
; /* # of static rules */
282 static uint32_t static_ioc_len
; /* bytes of static rules */
285 * If 1, then ipfw static rules are being flushed,
286 * ipfw_chk() will skip to the default rule.
288 static int ipfw_flushing
;
290 static int fw_verbose
;
291 static int verbose_limit
;
294 static int autoinc_step
= IPFW_AUTOINC_STEP_DEF
;
296 static int ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS
);
297 static int ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS
);
298 static int ipfw_sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS
);
299 static int ipfw_sysctl_dyn_fin(SYSCTL_HANDLER_ARGS
);
300 static int ipfw_sysctl_dyn_rst(SYSCTL_HANDLER_ARGS
);
302 SYSCTL_NODE(_net_inet_ip
, OID_AUTO
, fw
, CTLFLAG_RW
, 0, "Firewall");
303 SYSCTL_PROC(_net_inet_ip_fw
, OID_AUTO
, enable
, CTLTYPE_INT
| CTLFLAG_RW
,
304 &fw_enable
, 0, ipfw_sysctl_enable
, "I", "Enable ipfw");
305 SYSCTL_PROC(_net_inet_ip_fw
, OID_AUTO
, autoinc_step
, CTLTYPE_INT
| CTLFLAG_RW
,
306 &autoinc_step
, 0, ipfw_sysctl_autoinc_step
, "I",
307 "Rule number autincrement step");
308 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
,one_pass
,CTLFLAG_RW
,
310 "Only do a single pass through ipfw when using dummynet(4)");
311 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, debug
, CTLFLAG_RW
,
312 &fw_debug
, 0, "Enable printing of debug ip_fw statements");
313 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, verbose
, CTLFLAG_RW
,
314 &fw_verbose
, 0, "Log matches to ipfw rules");
315 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, verbose_limit
, CTLFLAG_RW
,
316 &verbose_limit
, 0, "Set upper limit of matches of ipfw rules logged");
319 * Description of dynamic rules.
321 * Dynamic rules are stored in lists accessed through a hash table
322 * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can
323 * be modified through the sysctl variable dyn_buckets which is
324 * updated when the table becomes empty.
326 * XXX currently there is only one list, ipfw_dyn.
328 * When a packet is received, its address fields are first masked
329 * with the mask defined for the rule, then hashed, then matched
330 * against the entries in the corresponding list.
331 * Dynamic rules can be used for different purposes:
333 * + enforcing limits on the number of sessions;
334 * + in-kernel NAT (not implemented yet)
336 * The lifetime of dynamic rules is regulated by dyn_*_lifetime,
337 * measured in seconds and depending on the flags.
339 * The total number of dynamic rules is stored in dyn_count.
340 * The max number of dynamic rules is dyn_max. When we reach
341 * the maximum number of rules we do not create anymore. This is
342 * done to avoid consuming too much memory, but also too much
343 * time when searching on each packet (ideally, we should try instead
344 * to put a limit on the length of the list on each bucket...).
346 * Each dynamic rule holds a pointer to the parent ipfw rule so
347 * we know what action to perform. Dynamic rules are removed when
348 * the parent rule is deleted. XXX we should make them survive.
350 * There are some limitations with dynamic rules -- we do not
351 * obey the 'randomized match', and we do not do multiple
352 * passes through the firewall. XXX check the latter!!!
354 * NOTE about the SHARED LOCKMGR LOCK during dynamic rule looking up:
355 * Only TCP state transition will change dynamic rule's state and ack
356 * sequences, while all packets of one TCP connection only goes through
357 * one TCP thread, so it is safe to use shared lockmgr lock during dynamic
358 * rule looking up. The keep alive callout uses exclusive lockmgr lock
359 * when it tries to find suitable dynamic rules to send keep alive, so
360 * it will not see half updated state and ack sequences. Though the expire
361 * field updating looks racy for other protocols, the resolution (second)
362 * of expire field makes this kind of race harmless.
363 * XXX statistics' updating is _not_ MPsafe!!!
364 * XXX once UDP output path is fixed, we could use lockless dynamic rule
367 static ipfw_dyn_rule
**ipfw_dyn_v
= NULL
;
368 static uint32_t dyn_buckets
= 256; /* must be power of 2 */
369 static uint32_t curr_dyn_buckets
= 256; /* must be power of 2 */
370 static uint32_t dyn_buckets_gen
; /* generation of dyn buckets array */
371 static struct lock dyn_lock
; /* dynamic rules' hash table lock */
373 static struct netmsg ipfw_timeout_netmsg
; /* schedule ipfw timeout */
374 static struct callout ipfw_timeout_h
;
377 * Timeouts for various events in handing dynamic rules.
379 static uint32_t dyn_ack_lifetime
= 300;
380 static uint32_t dyn_syn_lifetime
= 20;
381 static uint32_t dyn_fin_lifetime
= 1;
382 static uint32_t dyn_rst_lifetime
= 1;
383 static uint32_t dyn_udp_lifetime
= 10;
384 static uint32_t dyn_short_lifetime
= 5;
387 * Keepalives are sent if dyn_keepalive is set. They are sent every
388 * dyn_keepalive_period seconds, in the last dyn_keepalive_interval
389 * seconds of lifetime of a rule.
390 * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower
391 * than dyn_keepalive_period.
394 static uint32_t dyn_keepalive_interval
= 20;
395 static uint32_t dyn_keepalive_period
= 5;
396 static uint32_t dyn_keepalive
= 1; /* do send keepalives */
398 static uint32_t dyn_count
; /* # of dynamic rules */
399 static uint32_t dyn_max
= 4096; /* max # of dynamic rules */
401 SYSCTL_PROC(_net_inet_ip_fw
, OID_AUTO
, dyn_buckets
, CTLTYPE_INT
| CTLFLAG_RW
,
402 &dyn_buckets
, 0, ipfw_sysctl_dyn_buckets
, "I", "Number of dyn. buckets");
403 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, curr_dyn_buckets
, CTLFLAG_RD
,
404 &curr_dyn_buckets
, 0, "Current Number of dyn. buckets");
405 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, dyn_count
, CTLFLAG_RD
,
406 &dyn_count
, 0, "Number of dyn. rules");
407 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, dyn_max
, CTLFLAG_RW
,
408 &dyn_max
, 0, "Max number of dyn. rules");
409 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, static_count
, CTLFLAG_RD
,
410 &static_count
, 0, "Number of static rules");
411 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, dyn_ack_lifetime
, CTLFLAG_RW
,
412 &dyn_ack_lifetime
, 0, "Lifetime of dyn. rules for acks");
413 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, dyn_syn_lifetime
, CTLFLAG_RW
,
414 &dyn_syn_lifetime
, 0, "Lifetime of dyn. rules for syn");
415 SYSCTL_PROC(_net_inet_ip_fw
, OID_AUTO
, dyn_fin_lifetime
,
416 CTLTYPE_INT
| CTLFLAG_RW
, &dyn_fin_lifetime
, 0, ipfw_sysctl_dyn_fin
, "I",
417 "Lifetime of dyn. rules for fin");
418 SYSCTL_PROC(_net_inet_ip_fw
, OID_AUTO
, dyn_rst_lifetime
,
419 CTLTYPE_INT
| CTLFLAG_RW
, &dyn_rst_lifetime
, 0, ipfw_sysctl_dyn_rst
, "I",
420 "Lifetime of dyn. rules for rst");
421 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, dyn_udp_lifetime
, CTLFLAG_RW
,
422 &dyn_udp_lifetime
, 0, "Lifetime of dyn. rules for UDP");
423 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, dyn_short_lifetime
, CTLFLAG_RW
,
424 &dyn_short_lifetime
, 0, "Lifetime of dyn. rules for other situations");
425 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, dyn_keepalive
, CTLFLAG_RW
,
426 &dyn_keepalive
, 0, "Enable keepalives for dyn. rules");
428 static ip_fw_chk_t ipfw_chk
;
429 static void ipfw_tick(void *);
432 ipfw_free_rule(struct ip_fw
*rule
)
434 KASSERT(rule
->cpuid
== mycpuid
, ("rule freed on cpu%d\n", mycpuid
));
435 KASSERT(rule
->refcnt
> 0, ("invalid refcnt %u\n", rule
->refcnt
));
437 if (rule
->refcnt
== 0) {
445 ipfw_unref_rule(void *priv
)
447 ipfw_free_rule(priv
);
449 atomic_subtract_int(&ipfw_refcnt
, 1);
454 ipfw_ref_rule(struct ip_fw
*rule
)
456 KASSERT(rule
->cpuid
== mycpuid
, ("rule used on cpu%d\n", mycpuid
));
458 atomic_add_int(&ipfw_refcnt
, 1);
464 * This macro maps an ip pointer into a layer3 header pointer of type T
466 #define L3HDR(T, ip) ((T *)((uint32_t *)(ip) + (ip)->ip_hl))
469 icmptype_match(struct ip
*ip
, ipfw_insn_u32
*cmd
)
471 int type
= L3HDR(struct icmp
,ip
)->icmp_type
;
473 return (type
<= ICMP_MAXTYPE
&& (cmd
->d
[0] & (1 << type
)));
476 #define TT ((1 << ICMP_ECHO) | \
477 (1 << ICMP_ROUTERSOLICIT) | \
478 (1 << ICMP_TSTAMP) | \
483 is_icmp_query(struct ip
*ip
)
485 int type
= L3HDR(struct icmp
, ip
)->icmp_type
;
487 return (type
<= ICMP_MAXTYPE
&& (TT
& (1 << type
)));
493 * The following checks use two arrays of 8 or 16 bits to store the
494 * bits that we want set or clear, respectively. They are in the
495 * low and high half of cmd->arg1 or cmd->d[0].
497 * We scan options and store the bits we find set. We succeed if
499 * (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear
501 * The code is sometimes optimized not to store additional variables.
505 flags_match(ipfw_insn
*cmd
, uint8_t bits
)
510 if (((cmd
->arg1
& 0xff) & bits
) != 0)
511 return 0; /* some bits we want set were clear */
513 want_clear
= (cmd
->arg1
>> 8) & 0xff;
514 if ((want_clear
& bits
) != want_clear
)
515 return 0; /* some bits we want clear were set */
520 ipopts_match(struct ip
*ip
, ipfw_insn
*cmd
)
522 int optlen
, bits
= 0;
523 u_char
*cp
= (u_char
*)(ip
+ 1);
524 int x
= (ip
->ip_hl
<< 2) - sizeof(struct ip
);
526 for (; x
> 0; x
-= optlen
, cp
+= optlen
) {
527 int opt
= cp
[IPOPT_OPTVAL
];
529 if (opt
== IPOPT_EOL
)
532 if (opt
== IPOPT_NOP
) {
535 optlen
= cp
[IPOPT_OLEN
];
536 if (optlen
<= 0 || optlen
> x
)
537 return 0; /* invalid or truncated */
542 bits
|= IP_FW_IPOPT_LSRR
;
546 bits
|= IP_FW_IPOPT_SSRR
;
550 bits
|= IP_FW_IPOPT_RR
;
554 bits
|= IP_FW_IPOPT_TS
;
561 return (flags_match(cmd
, bits
));
565 tcpopts_match(struct ip
*ip
, ipfw_insn
*cmd
)
567 int optlen
, bits
= 0;
568 struct tcphdr
*tcp
= L3HDR(struct tcphdr
,ip
);
569 u_char
*cp
= (u_char
*)(tcp
+ 1);
570 int x
= (tcp
->th_off
<< 2) - sizeof(struct tcphdr
);
572 for (; x
> 0; x
-= optlen
, cp
+= optlen
) {
575 if (opt
== TCPOPT_EOL
)
578 if (opt
== TCPOPT_NOP
) {
588 bits
|= IP_FW_TCPOPT_MSS
;
592 bits
|= IP_FW_TCPOPT_WINDOW
;
595 case TCPOPT_SACK_PERMITTED
:
597 bits
|= IP_FW_TCPOPT_SACK
;
600 case TCPOPT_TIMESTAMP
:
601 bits
|= IP_FW_TCPOPT_TS
;
607 bits
|= IP_FW_TCPOPT_CC
;
614 return (flags_match(cmd
, bits
));
618 iface_match(struct ifnet
*ifp
, ipfw_insn_if
*cmd
)
620 if (ifp
== NULL
) /* no iface with this packet, match fails */
623 /* Check by name or by IP address */
624 if (cmd
->name
[0] != '\0') { /* match by name */
627 if (kfnmatch(cmd
->name
, ifp
->if_xname
, 0) == 0)
630 if (strncmp(ifp
->if_xname
, cmd
->name
, IFNAMSIZ
) == 0)
634 struct ifaddr_container
*ifac
;
636 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
637 struct ifaddr
*ia
= ifac
->ifa
;
639 if (ia
->ifa_addr
== NULL
)
641 if (ia
->ifa_addr
->sa_family
!= AF_INET
)
643 if (cmd
->p
.ip
.s_addr
== ((struct sockaddr_in
*)
644 (ia
->ifa_addr
))->sin_addr
.s_addr
)
645 return(1); /* match */
648 return(0); /* no match, fail ... */
651 #define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0
654 * We enter here when we have a rule with O_LOG.
655 * XXX this function alone takes about 2Kbytes of code!
658 ipfw_log(struct ip_fw
*f
, u_int hlen
, struct ether_header
*eh
,
659 struct mbuf
*m
, struct ifnet
*oif
)
662 int limit_reached
= 0;
663 char action2
[40], proto
[48], fragment
[28];
668 if (f
== NULL
) { /* bogus pkt */
669 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
671 if (verbose_limit
!= 0 &&
672 ctx
->ipfw_norule_counter
>= verbose_limit
)
674 ctx
->ipfw_norule_counter
++;
675 if (ctx
->ipfw_norule_counter
== verbose_limit
)
676 limit_reached
= verbose_limit
;
678 } else { /* O_LOG is the first action, find the real one */
679 ipfw_insn
*cmd
= ACTION_PTR(f
);
680 ipfw_insn_log
*l
= (ipfw_insn_log
*)cmd
;
682 if (l
->max_log
!= 0 && l
->log_left
== 0)
685 if (l
->log_left
== 0)
686 limit_reached
= l
->max_log
;
687 cmd
+= F_LEN(cmd
); /* point to first action */
688 if (cmd
->opcode
== O_PROB
)
692 switch (cmd
->opcode
) {
698 if (cmd
->arg1
==ICMP_REJECT_RST
) {
700 } else if (cmd
->arg1
==ICMP_UNREACH_HOST
) {
703 ksnprintf(SNPARGS(action2
, 0), "Unreach %d",
717 ksnprintf(SNPARGS(action2
, 0), "Divert %d", cmd
->arg1
);
721 ksnprintf(SNPARGS(action2
, 0), "Tee %d", cmd
->arg1
);
725 ksnprintf(SNPARGS(action2
, 0), "SkipTo %d", cmd
->arg1
);
729 ksnprintf(SNPARGS(action2
, 0), "Pipe %d", cmd
->arg1
);
733 ksnprintf(SNPARGS(action2
, 0), "Queue %d", cmd
->arg1
);
738 ipfw_insn_sa
*sa
= (ipfw_insn_sa
*)cmd
;
741 len
= ksnprintf(SNPARGS(action2
, 0),
743 inet_ntoa(sa
->sa
.sin_addr
));
744 if (sa
->sa
.sin_port
) {
745 ksnprintf(SNPARGS(action2
, len
), ":%d",
757 if (hlen
== 0) { /* non-ip */
758 ksnprintf(SNPARGS(proto
, 0), "MAC");
760 struct ip
*ip
= mtod(m
, struct ip
*);
761 /* these three are all aliases to the same thing */
762 struct icmp
*const icmp
= L3HDR(struct icmp
, ip
);
763 struct tcphdr
*const tcp
= (struct tcphdr
*)icmp
;
764 struct udphdr
*const udp
= (struct udphdr
*)icmp
;
766 int ip_off
, offset
, ip_len
;
769 if (eh
!= NULL
) { /* layer 2 packets are as on the wire */
770 ip_off
= ntohs(ip
->ip_off
);
771 ip_len
= ntohs(ip
->ip_len
);
776 offset
= ip_off
& IP_OFFMASK
;
779 len
= ksnprintf(SNPARGS(proto
, 0), "TCP %s",
780 inet_ntoa(ip
->ip_src
));
782 ksnprintf(SNPARGS(proto
, len
), ":%d %s:%d",
783 ntohs(tcp
->th_sport
),
784 inet_ntoa(ip
->ip_dst
),
785 ntohs(tcp
->th_dport
));
787 ksnprintf(SNPARGS(proto
, len
), " %s",
788 inet_ntoa(ip
->ip_dst
));
793 len
= ksnprintf(SNPARGS(proto
, 0), "UDP %s",
794 inet_ntoa(ip
->ip_src
));
796 ksnprintf(SNPARGS(proto
, len
), ":%d %s:%d",
797 ntohs(udp
->uh_sport
),
798 inet_ntoa(ip
->ip_dst
),
799 ntohs(udp
->uh_dport
));
801 ksnprintf(SNPARGS(proto
, len
), " %s",
802 inet_ntoa(ip
->ip_dst
));
808 len
= ksnprintf(SNPARGS(proto
, 0),
813 len
= ksnprintf(SNPARGS(proto
, 0), "ICMP ");
815 len
+= ksnprintf(SNPARGS(proto
, len
), "%s",
816 inet_ntoa(ip
->ip_src
));
817 ksnprintf(SNPARGS(proto
, len
), " %s",
818 inet_ntoa(ip
->ip_dst
));
822 len
= ksnprintf(SNPARGS(proto
, 0), "P:%d %s", ip
->ip_p
,
823 inet_ntoa(ip
->ip_src
));
824 ksnprintf(SNPARGS(proto
, len
), " %s",
825 inet_ntoa(ip
->ip_dst
));
829 if (ip_off
& (IP_MF
| IP_OFFMASK
)) {
830 ksnprintf(SNPARGS(fragment
, 0), " (frag %d:%d@%d%s)",
831 ntohs(ip
->ip_id
), ip_len
- (ip
->ip_hl
<< 2),
832 offset
<< 3, (ip_off
& IP_MF
) ? "+" : "");
836 if (oif
|| m
->m_pkthdr
.rcvif
) {
837 log(LOG_SECURITY
| LOG_INFO
,
838 "ipfw: %d %s %s %s via %s%s\n",
840 action
, proto
, oif
? "out" : "in",
841 oif
? oif
->if_xname
: m
->m_pkthdr
.rcvif
->if_xname
,
844 log(LOG_SECURITY
| LOG_INFO
,
845 "ipfw: %d %s %s [no if info]%s\n",
847 action
, proto
, fragment
);
851 log(LOG_SECURITY
| LOG_NOTICE
,
852 "ipfw: limit %d reached on entry %d\n",
853 limit_reached
, f
? f
->rulenum
: -1);
860 * IMPORTANT: the hash function for dynamic rules must be commutative
861 * in source and destination (ip,port), because rules are bidirectional
862 * and we want to find both in the same bucket.
865 hash_packet(struct ipfw_flow_id
*id
)
869 i
= (id
->dst_ip
) ^ (id
->src_ip
) ^ (id
->dst_port
) ^ (id
->src_port
);
870 i
&= (curr_dyn_buckets
- 1);
875 * unlink a dynamic rule from a chain. prev is a pointer to
876 * the previous one, q is a pointer to the rule to delete,
877 * head is a pointer to the head of the queue.
878 * Modifies q and potentially also head.
880 #define UNLINK_DYN_RULE(prev, head, q) \
882 ipfw_dyn_rule *old_q = q; \
884 /* remove a refcount to the parent */ \
885 if (q->dyn_type == O_LIMIT) \
886 q->parent->count--; \
887 DPRINTF("-- unlink entry 0x%08x %d -> 0x%08x %d, %d left\n", \
888 q->id.src_ip, q->id.src_port, \
889 q->id.dst_ip, q->id.dst_port, dyn_count - 1); \
891 prev->next = q = q->next; \
893 head = q = q->next; \
894 KASSERT(dyn_count > 0, ("invalid dyn count %u\n", dyn_count)); \
896 kfree(old_q, M_IPFW); \
899 #define TIME_LEQ(a, b) ((int)((a) - (b)) <= 0)
902 * Remove dynamic rules pointing to "rule", or all of them if rule == NULL.
904 * If keep_me == NULL, rules are deleted even if not expired,
905 * otherwise only expired rules are removed.
907 * The value of the second parameter is also used to point to identify
908 * a rule we absolutely do not want to remove (e.g. because we are
909 * holding a reference to it -- this is the case with O_LIMIT_PARENT
910 * rules). The pointer is only used for comparison, so any non-null
914 remove_dyn_rule_locked(struct ip_fw
*rule
, ipfw_dyn_rule
*keep_me
)
916 static uint32_t last_remove
= 0; /* XXX */
918 #define FORCE (keep_me == NULL)
920 ipfw_dyn_rule
*prev
, *q
;
921 int i
, pass
= 0, max_pass
= 0, unlinked
= 0;
923 if (ipfw_dyn_v
== NULL
|| dyn_count
== 0)
925 /* do not expire more than once per second, it is useless */
926 if (!FORCE
&& last_remove
== time_second
)
928 last_remove
= time_second
;
931 * because O_LIMIT refer to parent rules, during the first pass only
932 * remove child and mark any pending LIMIT_PARENT, and remove
933 * them in a second pass.
936 for (i
= 0; i
< curr_dyn_buckets
; i
++) {
937 for (prev
= NULL
, q
= ipfw_dyn_v
[i
]; q
;) {
939 * Logic can become complex here, so we split tests.
943 if (rule
!= NULL
&& rule
->stub
!= q
->stub
)
944 goto next
; /* not the one we are looking for */
945 if (q
->dyn_type
== O_LIMIT_PARENT
) {
947 * handle parent in the second pass,
948 * record we need one.
953 if (FORCE
&& q
->count
!= 0) {
954 /* XXX should not happen! */
955 kprintf("OUCH! cannot remove rule, "
956 "count %d\n", q
->count
);
959 if (!FORCE
&& !TIME_LEQ(q
->expire
, time_second
))
963 UNLINK_DYN_RULE(prev
, ipfw_dyn_v
[i
], q
);
970 if (pass
++ < max_pass
)
980 * lookup a dynamic rule.
982 static ipfw_dyn_rule
*
983 lookup_dyn_rule(struct ipfw_flow_id
*pkt
, int *match_direction
,
987 * stateful ipfw extensions.
988 * Lookup into dynamic session queue
990 #define MATCH_REVERSE 0
991 #define MATCH_FORWARD 1
993 #define MATCH_UNKNOWN 3
994 int i
, dir
= MATCH_NONE
;
995 ipfw_dyn_rule
*prev
, *q
=NULL
;
997 if (ipfw_dyn_v
== NULL
)
998 goto done
; /* not found */
1000 i
= hash_packet(pkt
);
1001 for (prev
= NULL
, q
= ipfw_dyn_v
[i
]; q
!= NULL
;) {
1002 if (q
->dyn_type
== O_LIMIT_PARENT
)
1005 if (TIME_LEQ(q
->expire
, time_second
)) {
1007 * Entry expired; skip.
1008 * Let ipfw_tick() take care of it
1013 if (pkt
->proto
== q
->id
.proto
) {
1014 if (pkt
->src_ip
== q
->id
.src_ip
&&
1015 pkt
->dst_ip
== q
->id
.dst_ip
&&
1016 pkt
->src_port
== q
->id
.src_port
&&
1017 pkt
->dst_port
== q
->id
.dst_port
) {
1018 dir
= MATCH_FORWARD
;
1021 if (pkt
->src_ip
== q
->id
.dst_ip
&&
1022 pkt
->dst_ip
== q
->id
.src_ip
&&
1023 pkt
->src_port
== q
->id
.dst_port
&&
1024 pkt
->dst_port
== q
->id
.src_port
) {
1025 dir
= MATCH_REVERSE
;
1034 goto done
; /* q = NULL, not found */
1036 if (pkt
->proto
== IPPROTO_TCP
) { /* update state according to flags */
1037 u_char flags
= pkt
->flags
& (TH_FIN
|TH_SYN
|TH_RST
);
1039 #define BOTH_SYN (TH_SYN | (TH_SYN << 8))
1040 #define BOTH_FIN (TH_FIN | (TH_FIN << 8))
1042 q
->state
|= (dir
== MATCH_FORWARD
) ? flags
: (flags
<< 8);
1044 case TH_SYN
: /* opening */
1045 q
->expire
= time_second
+ dyn_syn_lifetime
;
1048 case BOTH_SYN
: /* move to established */
1049 case BOTH_SYN
| TH_FIN
: /* one side tries to close */
1050 case BOTH_SYN
| (TH_FIN
<< 8) :
1052 uint32_t ack
= ntohl(tcp
->th_ack
);
1054 #define _SEQ_GE(a, b) ((int)(a) - (int)(b) >= 0)
1056 if (dir
== MATCH_FORWARD
) {
1057 if (q
->ack_fwd
== 0 ||
1058 _SEQ_GE(ack
, q
->ack_fwd
))
1060 else /* ignore out-of-sequence */
1063 if (q
->ack_rev
== 0 ||
1064 _SEQ_GE(ack
, q
->ack_rev
))
1066 else /* ignore out-of-sequence */
1071 q
->expire
= time_second
+ dyn_ack_lifetime
;
1074 case BOTH_SYN
| BOTH_FIN
: /* both sides closed */
1075 KKASSERT(dyn_fin_lifetime
< dyn_keepalive_period
);
1076 q
->expire
= time_second
+ dyn_fin_lifetime
;
1082 * reset or some invalid combination, but can also
1083 * occur if we use keep-state the wrong way.
1085 if ((q
->state
& ((TH_RST
<< 8) | TH_RST
)) == 0)
1086 kprintf("invalid state: 0x%x\n", q
->state
);
1088 KKASSERT(dyn_rst_lifetime
< dyn_keepalive_period
);
1089 q
->expire
= time_second
+ dyn_rst_lifetime
;
1092 } else if (pkt
->proto
== IPPROTO_UDP
) {
1093 q
->expire
= time_second
+ dyn_udp_lifetime
;
1095 /* other protocols */
1096 q
->expire
= time_second
+ dyn_short_lifetime
;
1099 if (match_direction
)
1100 *match_direction
= dir
;
1104 static struct ip_fw
*
1105 lookup_rule(struct ipfw_flow_id
*pkt
, int *match_direction
, struct tcphdr
*tcp
,
1106 uint16_t len
, int *deny
)
1108 struct ip_fw
*rule
= NULL
;
1110 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1114 gen
= ctx
->ipfw_gen
;
1116 lockmgr(&dyn_lock
, LK_SHARED
);
1118 if (ctx
->ipfw_gen
!= gen
) {
1120 * Static rules had been change when we were waiting
1121 * for the dynamic hash table lock; deny this packet,
1122 * since it is _not_ known whether it is safe to keep
1123 * iterating the static rules.
1129 q
= lookup_dyn_rule(pkt
, match_direction
, tcp
);
1133 rule
= q
->stub
->rule
[mycpuid
];
1134 KKASSERT(rule
->stub
== q
->stub
&& rule
->cpuid
== mycpuid
);
1141 lockmgr(&dyn_lock
, LK_RELEASE
);
1146 realloc_dynamic_table(void)
1148 ipfw_dyn_rule
**old_dyn_v
;
1149 uint32_t old_curr_dyn_buckets
;
1151 KASSERT(dyn_buckets
<= 65536 && (dyn_buckets
& (dyn_buckets
- 1)) == 0,
1152 ("invalid dyn_buckets %d\n", dyn_buckets
));
1154 /* Save the current buckets array for later error recovery */
1155 old_dyn_v
= ipfw_dyn_v
;
1156 old_curr_dyn_buckets
= curr_dyn_buckets
;
1158 curr_dyn_buckets
= dyn_buckets
;
1160 ipfw_dyn_v
= kmalloc(curr_dyn_buckets
* sizeof(ipfw_dyn_rule
*),
1161 M_IPFW
, M_NOWAIT
| M_ZERO
);
1162 if (ipfw_dyn_v
!= NULL
|| curr_dyn_buckets
<= 2)
1165 curr_dyn_buckets
/= 2;
1166 if (curr_dyn_buckets
<= old_curr_dyn_buckets
&&
1167 old_dyn_v
!= NULL
) {
1169 * Don't try allocating smaller buckets array, reuse
1170 * the old one, which alreay contains enough buckets
1176 if (ipfw_dyn_v
!= NULL
) {
1177 if (old_dyn_v
!= NULL
)
1178 kfree(old_dyn_v
, M_IPFW
);
1180 /* Allocation failed, restore old buckets array */
1181 ipfw_dyn_v
= old_dyn_v
;
1182 curr_dyn_buckets
= old_curr_dyn_buckets
;
1185 if (ipfw_dyn_v
!= NULL
)
1190 * Install state of type 'type' for a dynamic session.
1191 * The hash table contains two type of rules:
1192 * - regular rules (O_KEEP_STATE)
1193 * - rules for sessions with limited number of sess per user
1194 * (O_LIMIT). When they are created, the parent is
1195 * increased by 1, and decreased on delete. In this case,
1196 * the third parameter is the parent rule and not the chain.
1197 * - "parent" rules for the above (O_LIMIT_PARENT).
1199 static ipfw_dyn_rule
*
1200 add_dyn_rule(struct ipfw_flow_id
*id
, uint8_t dyn_type
, struct ip_fw
*rule
)
1205 if (ipfw_dyn_v
== NULL
||
1206 (dyn_count
== 0 && dyn_buckets
!= curr_dyn_buckets
)) {
1207 realloc_dynamic_table();
1208 if (ipfw_dyn_v
== NULL
)
1209 return NULL
; /* failed ! */
1211 i
= hash_packet(id
);
1213 r
= kmalloc(sizeof(*r
), M_IPFW
, M_NOWAIT
| M_ZERO
);
1215 kprintf ("sorry cannot allocate state\n");
1219 /* increase refcount on parent, and set pointer */
1220 if (dyn_type
== O_LIMIT
) {
1221 ipfw_dyn_rule
*parent
= (ipfw_dyn_rule
*)rule
;
1223 if (parent
->dyn_type
!= O_LIMIT_PARENT
)
1224 panic("invalid parent");
1227 rule
= parent
->stub
->rule
[mycpuid
];
1228 KKASSERT(rule
->stub
== parent
->stub
);
1230 KKASSERT(rule
->cpuid
== mycpuid
&& rule
->stub
!= NULL
);
1233 r
->expire
= time_second
+ dyn_syn_lifetime
;
1234 r
->stub
= rule
->stub
;
1235 r
->dyn_type
= dyn_type
;
1236 r
->pcnt
= r
->bcnt
= 0;
1240 r
->next
= ipfw_dyn_v
[i
];
1244 DPRINTF("-- add dyn entry ty %d 0x%08x %d -> 0x%08x %d, total %d\n",
1246 r
->id
.src_ip
, r
->id
.src_port
,
1247 r
->id
.dst_ip
, r
->id
.dst_port
, dyn_count
);
1252 * lookup dynamic parent rule using pkt and rule as search keys.
1253 * If the lookup fails, then install one.
1255 static ipfw_dyn_rule
*
1256 lookup_dyn_parent(struct ipfw_flow_id
*pkt
, struct ip_fw
*rule
)
1262 i
= hash_packet(pkt
);
1263 for (q
= ipfw_dyn_v
[i
]; q
!= NULL
; q
= q
->next
) {
1264 if (q
->dyn_type
== O_LIMIT_PARENT
&&
1265 rule
->stub
== q
->stub
&&
1266 pkt
->proto
== q
->id
.proto
&&
1267 pkt
->src_ip
== q
->id
.src_ip
&&
1268 pkt
->dst_ip
== q
->id
.dst_ip
&&
1269 pkt
->src_port
== q
->id
.src_port
&&
1270 pkt
->dst_port
== q
->id
.dst_port
) {
1271 q
->expire
= time_second
+ dyn_short_lifetime
;
1272 DPRINTF("lookup_dyn_parent found 0x%p\n", q
);
1277 return add_dyn_rule(pkt
, O_LIMIT_PARENT
, rule
);
1281 * Install dynamic state for rule type cmd->o.opcode
1283 * Returns 1 (failure) if state is not installed because of errors or because
1284 * session limitations are enforced.
1287 install_state_locked(struct ip_fw
*rule
, ipfw_insn_limit
*cmd
,
1288 struct ip_fw_args
*args
)
1290 static int last_log
; /* XXX */
1294 DPRINTF("-- install state type %d 0x%08x %u -> 0x%08x %u\n",
1296 args
->f_id
.src_ip
, args
->f_id
.src_port
,
1297 args
->f_id
.dst_ip
, args
->f_id
.dst_port
);
1299 q
= lookup_dyn_rule(&args
->f_id
, NULL
, NULL
);
1300 if (q
!= NULL
) { /* should never occur */
1301 if (last_log
!= time_second
) {
1302 last_log
= time_second
;
1303 kprintf(" install_state: entry already present, done\n");
1308 if (dyn_count
>= dyn_max
) {
1310 * Run out of slots, try to remove any expired rule.
1312 remove_dyn_rule_locked(NULL
, (ipfw_dyn_rule
*)1);
1313 if (dyn_count
>= dyn_max
) {
1314 if (last_log
!= time_second
) {
1315 last_log
= time_second
;
1316 kprintf("install_state: "
1317 "Too many dynamic rules\n");
1319 return 1; /* cannot install, notify caller */
1323 switch (cmd
->o
.opcode
) {
1324 case O_KEEP_STATE
: /* bidir rule */
1325 if (add_dyn_rule(&args
->f_id
, O_KEEP_STATE
, rule
) == NULL
)
1329 case O_LIMIT
: /* limit number of sessions */
1331 uint16_t limit_mask
= cmd
->limit_mask
;
1332 struct ipfw_flow_id id
;
1333 ipfw_dyn_rule
*parent
;
1335 DPRINTF("installing dyn-limit rule %d\n",
1338 id
.dst_ip
= id
.src_ip
= 0;
1339 id
.dst_port
= id
.src_port
= 0;
1340 id
.proto
= args
->f_id
.proto
;
1342 if (limit_mask
& DYN_SRC_ADDR
)
1343 id
.src_ip
= args
->f_id
.src_ip
;
1344 if (limit_mask
& DYN_DST_ADDR
)
1345 id
.dst_ip
= args
->f_id
.dst_ip
;
1346 if (limit_mask
& DYN_SRC_PORT
)
1347 id
.src_port
= args
->f_id
.src_port
;
1348 if (limit_mask
& DYN_DST_PORT
)
1349 id
.dst_port
= args
->f_id
.dst_port
;
1351 parent
= lookup_dyn_parent(&id
, rule
);
1352 if (parent
== NULL
) {
1353 kprintf("add parent failed\n");
1357 if (parent
->count
>= cmd
->conn_limit
) {
1359 * See if we can remove some expired rule.
1361 remove_dyn_rule_locked(rule
, parent
);
1362 if (parent
->count
>= cmd
->conn_limit
) {
1364 last_log
!= time_second
) {
1365 last_log
= time_second
;
1366 log(LOG_SECURITY
| LOG_DEBUG
,
1368 "too many entries\n");
1373 if (add_dyn_rule(&args
->f_id
, O_LIMIT
,
1374 (struct ip_fw
*)parent
) == NULL
)
1379 kprintf("unknown dynamic rule type %u\n", cmd
->o
.opcode
);
1382 lookup_dyn_rule(&args
->f_id
, NULL
, NULL
); /* XXX just set lifetime */
1387 install_state(struct ip_fw
*rule
, ipfw_insn_limit
*cmd
,
1388 struct ip_fw_args
*args
, int *deny
)
1390 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1395 gen
= ctx
->ipfw_gen
;
1397 lockmgr(&dyn_lock
, LK_EXCLUSIVE
);
1398 if (ctx
->ipfw_gen
!= gen
) {
1399 /* See the comment in lookup_rule() */
1402 ret
= install_state_locked(rule
, cmd
, args
);
1404 lockmgr(&dyn_lock
, LK_RELEASE
);
1410 * Transmit a TCP packet, containing either a RST or a keepalive.
1411 * When flags & TH_RST, we are sending a RST packet, because of a
1412 * "reset" action matched the packet.
1413 * Otherwise we are sending a keepalive, and flags & TH_
1416 send_pkt(struct ipfw_flow_id
*id
, uint32_t seq
, uint32_t ack
, int flags
)
1421 struct route sro
; /* fake route */
1423 MGETHDR(m
, MB_DONTWAIT
, MT_HEADER
);
1426 m
->m_pkthdr
.rcvif
= NULL
;
1427 m
->m_pkthdr
.len
= m
->m_len
= sizeof(struct ip
) + sizeof(struct tcphdr
);
1428 m
->m_data
+= max_linkhdr
;
1430 ip
= mtod(m
, struct ip
*);
1431 bzero(ip
, m
->m_len
);
1432 tcp
= (struct tcphdr
*)(ip
+ 1); /* no IP options */
1433 ip
->ip_p
= IPPROTO_TCP
;
1437 * Assume we are sending a RST (or a keepalive in the reverse
1438 * direction), swap src and destination addresses and ports.
1440 ip
->ip_src
.s_addr
= htonl(id
->dst_ip
);
1441 ip
->ip_dst
.s_addr
= htonl(id
->src_ip
);
1442 tcp
->th_sport
= htons(id
->dst_port
);
1443 tcp
->th_dport
= htons(id
->src_port
);
1444 if (flags
& TH_RST
) { /* we are sending a RST */
1445 if (flags
& TH_ACK
) {
1446 tcp
->th_seq
= htonl(ack
);
1447 tcp
->th_ack
= htonl(0);
1448 tcp
->th_flags
= TH_RST
;
1452 tcp
->th_seq
= htonl(0);
1453 tcp
->th_ack
= htonl(seq
);
1454 tcp
->th_flags
= TH_RST
| TH_ACK
;
1458 * We are sending a keepalive. flags & TH_SYN determines
1459 * the direction, forward if set, reverse if clear.
1460 * NOTE: seq and ack are always assumed to be correct
1461 * as set by the caller. This may be confusing...
1463 if (flags
& TH_SYN
) {
1465 * we have to rewrite the correct addresses!
1467 ip
->ip_dst
.s_addr
= htonl(id
->dst_ip
);
1468 ip
->ip_src
.s_addr
= htonl(id
->src_ip
);
1469 tcp
->th_dport
= htons(id
->dst_port
);
1470 tcp
->th_sport
= htons(id
->src_port
);
1472 tcp
->th_seq
= htonl(seq
);
1473 tcp
->th_ack
= htonl(ack
);
1474 tcp
->th_flags
= TH_ACK
;
1478 * set ip_len to the payload size so we can compute
1479 * the tcp checksum on the pseudoheader
1480 * XXX check this, could save a couple of words ?
1482 ip
->ip_len
= htons(sizeof(struct tcphdr
));
1483 tcp
->th_sum
= in_cksum(m
, m
->m_pkthdr
.len
);
1486 * now fill fields left out earlier
1488 ip
->ip_ttl
= ip_defttl
;
1489 ip
->ip_len
= m
->m_pkthdr
.len
;
1491 bzero(&sro
, sizeof(sro
));
1492 ip_rtaddr(ip
->ip_dst
, &sro
);
1494 m
->m_pkthdr
.fw_flags
|= IPFW_MBUF_GENERATED
;
1495 ip_output(m
, NULL
, &sro
, 0, NULL
, NULL
);
1501 * sends a reject message, consuming the mbuf passed as an argument.
1504 send_reject(struct ip_fw_args
*args
, int code
, int offset
, int ip_len
)
1506 if (code
!= ICMP_REJECT_RST
) { /* Send an ICMP unreach */
1507 /* We need the IP header in host order for icmp_error(). */
1508 if (args
->eh
!= NULL
) {
1509 struct ip
*ip
= mtod(args
->m
, struct ip
*);
1511 ip
->ip_len
= ntohs(ip
->ip_len
);
1512 ip
->ip_off
= ntohs(ip
->ip_off
);
1514 icmp_error(args
->m
, ICMP_UNREACH
, code
, 0L, 0);
1515 } else if (offset
== 0 && args
->f_id
.proto
== IPPROTO_TCP
) {
1516 struct tcphdr
*const tcp
=
1517 L3HDR(struct tcphdr
, mtod(args
->m
, struct ip
*));
1519 if ((tcp
->th_flags
& TH_RST
) == 0) {
1520 send_pkt(&args
->f_id
, ntohl(tcp
->th_seq
),
1521 ntohl(tcp
->th_ack
), tcp
->th_flags
| TH_RST
);
1532 * Given an ip_fw *, lookup_next_rule will return a pointer
1533 * to the next rule, which can be either the jump
1534 * target (for skipto instructions) or the next one in the list (in
1535 * all other cases including a missing jump target).
1536 * The result is also written in the "next_rule" field of the rule.
1537 * Backward jumps are not allowed, so start looking from the next
1540 * This never returns NULL -- in case we do not have an exact match,
1541 * the next rule is returned. When the ruleset is changed,
1542 * pointers are flushed so we are always correct.
1545 static struct ip_fw
*
1546 lookup_next_rule(struct ip_fw
*me
)
1548 struct ip_fw
*rule
= NULL
;
1551 /* look for action, in case it is a skipto */
1552 cmd
= ACTION_PTR(me
);
1553 if (cmd
->opcode
== O_LOG
)
1555 if (cmd
->opcode
== O_SKIPTO
) {
1556 for (rule
= me
->next
; rule
; rule
= rule
->next
) {
1557 if (rule
->rulenum
>= cmd
->arg1
)
1561 if (rule
== NULL
) /* failure or not a skipto */
1563 me
->next_rule
= rule
;
1568 _ipfw_match_uid(const struct ipfw_flow_id
*fid
, struct ifnet
*oif
,
1569 enum ipfw_opcodes opcode
, uid_t uid
)
1571 struct in_addr src_ip
, dst_ip
;
1572 struct inpcbinfo
*pi
;
1576 if (fid
->proto
== IPPROTO_TCP
) {
1578 pi
= &tcbinfo
[mycpuid
];
1579 } else if (fid
->proto
== IPPROTO_UDP
) {
1587 * Values in 'fid' are in host byte order
1589 dst_ip
.s_addr
= htonl(fid
->dst_ip
);
1590 src_ip
.s_addr
= htonl(fid
->src_ip
);
1592 pcb
= in_pcblookup_hash(pi
,
1593 dst_ip
, htons(fid
->dst_port
),
1594 src_ip
, htons(fid
->src_port
),
1597 pcb
= in_pcblookup_hash(pi
,
1598 src_ip
, htons(fid
->src_port
),
1599 dst_ip
, htons(fid
->dst_port
),
1602 if (pcb
== NULL
|| pcb
->inp_socket
== NULL
)
1605 if (opcode
== O_UID
) {
1606 #define socheckuid(a,b) ((a)->so_cred->cr_uid != (b))
1607 return !socheckuid(pcb
->inp_socket
, uid
);
1610 return groupmember(uid
, pcb
->inp_socket
->so_cred
);
1615 ipfw_match_uid(const struct ipfw_flow_id
*fid
, struct ifnet
*oif
,
1616 enum ipfw_opcodes opcode
, uid_t uid
, int *deny
)
1618 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1623 gen
= ctx
->ipfw_gen
;
1626 if (gen
!= ctx
->ipfw_gen
) {
1627 /* See the comment in lookup_rule() */
1630 match
= _ipfw_match_uid(fid
, oif
, opcode
, uid
);
1637 * The main check routine for the firewall.
1639 * All arguments are in args so we can modify them and return them
1640 * back to the caller.
1644 * args->m (in/out) The packet; we set to NULL when/if we nuke it.
1645 * Starts with the IP header.
1646 * args->eh (in) Mac header if present, or NULL for layer3 packet.
1647 * args->oif Outgoing interface, or NULL if packet is incoming.
1648 * The incoming interface is in the mbuf. (in)
1650 * args->rule Pointer to the last matching rule (in/out)
1651 * args->f_id Addresses grabbed from the packet (out)
1655 * If the packet was denied/rejected and has been dropped, *m is equal
1656 * to NULL upon return.
1658 * IP_FW_DENY the packet must be dropped.
1659 * IP_FW_PASS The packet is to be accepted and routed normally.
1660 * IP_FW_DIVERT Divert the packet to port (args->cookie)
1661 * IP_FW_TEE Tee the packet to port (args->cookie)
1662 * IP_FW_DUMMYNET Send the packet to pipe/queue (args->cookie)
1666 ipfw_chk(struct ip_fw_args
*args
)
1669 * Local variables hold state during the processing of a packet.
1671 * IMPORTANT NOTE: to speed up the processing of rules, there
1672 * are some assumption on the values of the variables, which
1673 * are documented here. Should you change them, please check
1674 * the implementation of the various instructions to make sure
1675 * that they still work.
1677 * args->eh The MAC header. It is non-null for a layer2
1678 * packet, it is NULL for a layer-3 packet.
1680 * m | args->m Pointer to the mbuf, as received from the caller.
1681 * It may change if ipfw_chk() does an m_pullup, or if it
1682 * consumes the packet because it calls send_reject().
1683 * XXX This has to change, so that ipfw_chk() never modifies
1684 * or consumes the buffer.
1685 * ip is simply an alias of the value of m, and it is kept
1686 * in sync with it (the packet is supposed to start with
1689 struct mbuf
*m
= args
->m
;
1690 struct ip
*ip
= mtod(m
, struct ip
*);
1693 * oif | args->oif If NULL, ipfw_chk has been called on the
1694 * inbound path (ether_input, ip_input).
1695 * If non-NULL, ipfw_chk has been called on the outbound path
1696 * (ether_output, ip_output).
1698 struct ifnet
*oif
= args
->oif
;
1700 struct ip_fw
*f
= NULL
; /* matching rule */
1701 int retval
= IP_FW_PASS
;
1703 struct divert_info
*divinfo
;
1706 * hlen The length of the IPv4 header.
1707 * hlen >0 means we have an IPv4 packet.
1709 u_int hlen
= 0; /* hlen >0 means we have an IP pkt */
1712 * offset The offset of a fragment. offset != 0 means that
1713 * we have a fragment at this offset of an IPv4 packet.
1714 * offset == 0 means that (if this is an IPv4 packet)
1715 * this is the first or only fragment.
1720 * Local copies of addresses. They are only valid if we have
1723 * proto The protocol. Set to 0 for non-ip packets,
1724 * or to the protocol read from the packet otherwise.
1725 * proto != 0 means that we have an IPv4 packet.
1727 * src_port, dst_port port numbers, in HOST format. Only
1728 * valid for TCP and UDP packets.
1730 * src_ip, dst_ip ip addresses, in NETWORK format.
1731 * Only valid for IPv4 packets.
1734 uint16_t src_port
= 0, dst_port
= 0; /* NOTE: host format */
1735 struct in_addr src_ip
, dst_ip
; /* NOTE: network format */
1736 uint16_t ip_len
= 0;
1739 * dyn_dir = MATCH_UNKNOWN when rules unchecked,
1740 * MATCH_NONE when checked and not matched (dyn_f = NULL),
1741 * MATCH_FORWARD or MATCH_REVERSE otherwise (dyn_f != NULL)
1743 int dyn_dir
= MATCH_UNKNOWN
;
1744 struct ip_fw
*dyn_f
= NULL
;
1745 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1747 if (m
->m_pkthdr
.fw_flags
& IPFW_MBUF_GENERATED
)
1748 return IP_FW_PASS
; /* accept */
1750 if (args
->eh
== NULL
|| /* layer 3 packet */
1751 (m
->m_pkthdr
.len
>= sizeof(struct ip
) &&
1752 ntohs(args
->eh
->ether_type
) == ETHERTYPE_IP
))
1753 hlen
= ip
->ip_hl
<< 2;
1756 * Collect parameters into local variables for faster matching.
1758 if (hlen
== 0) { /* do not grab addresses for non-ip pkts */
1759 proto
= args
->f_id
.proto
= 0; /* mark f_id invalid */
1760 goto after_ip_checks
;
1763 proto
= args
->f_id
.proto
= ip
->ip_p
;
1764 src_ip
= ip
->ip_src
;
1765 dst_ip
= ip
->ip_dst
;
1766 if (args
->eh
!= NULL
) { /* layer 2 packets are as on the wire */
1767 offset
= ntohs(ip
->ip_off
) & IP_OFFMASK
;
1768 ip_len
= ntohs(ip
->ip_len
);
1770 offset
= ip
->ip_off
& IP_OFFMASK
;
1771 ip_len
= ip
->ip_len
;
1774 #define PULLUP_TO(len) \
1776 if (m->m_len < (len)) { \
1777 args->m = m = m_pullup(m, (len));\
1779 goto pullup_failed; \
1780 ip = mtod(m, struct ip *); \
1790 PULLUP_TO(hlen
+ sizeof(struct tcphdr
));
1791 tcp
= L3HDR(struct tcphdr
, ip
);
1792 dst_port
= tcp
->th_dport
;
1793 src_port
= tcp
->th_sport
;
1794 args
->f_id
.flags
= tcp
->th_flags
;
1802 PULLUP_TO(hlen
+ sizeof(struct udphdr
));
1803 udp
= L3HDR(struct udphdr
, ip
);
1804 dst_port
= udp
->uh_dport
;
1805 src_port
= udp
->uh_sport
;
1810 PULLUP_TO(hlen
+ 4); /* type, code and checksum. */
1811 args
->f_id
.flags
= L3HDR(struct icmp
, ip
)->icmp_type
;
1821 args
->f_id
.src_ip
= ntohl(src_ip
.s_addr
);
1822 args
->f_id
.dst_ip
= ntohl(dst_ip
.s_addr
);
1823 args
->f_id
.src_port
= src_port
= ntohs(src_port
);
1824 args
->f_id
.dst_port
= dst_port
= ntohs(dst_port
);
1829 * Packet has already been tagged. Look for the next rule
1830 * to restart processing.
1832 * If fw_one_pass != 0 then just accept it.
1833 * XXX should not happen here, but optimized out in
1839 /* This rule is being/has been flushed */
1843 KASSERT(args
->rule
->cpuid
== mycpuid
,
1844 ("rule used on cpu%d\n", mycpuid
));
1846 /* This rule was deleted */
1847 if (args
->rule
->rule_flags
& IPFW_RULE_F_INVALID
)
1850 f
= args
->rule
->next_rule
;
1852 f
= lookup_next_rule(args
->rule
);
1855 * Find the starting rule. It can be either the first
1856 * one, or the one after divert_rule if asked so.
1860 mtag
= m_tag_find(m
, PACKET_TAG_IPFW_DIVERT
, NULL
);
1862 divinfo
= m_tag_data(mtag
);
1863 skipto
= divinfo
->skipto
;
1868 f
= ctx
->ipfw_layer3_chain
;
1869 if (args
->eh
== NULL
&& skipto
!= 0) {
1870 /* No skipto during rule flushing */
1874 if (skipto
>= IPFW_DEFAULT_RULE
)
1875 return IP_FW_DENY
; /* invalid */
1877 while (f
&& f
->rulenum
<= skipto
)
1879 if (f
== NULL
) /* drop packet */
1881 } else if (ipfw_flushing
) {
1882 /* Rules are being flushed; skip to default rule */
1883 f
= ctx
->ipfw_default_rule
;
1886 if ((mtag
= m_tag_find(m
, PACKET_TAG_IPFW_DIVERT
, NULL
)) != NULL
)
1887 m_tag_delete(m
, mtag
);
1890 * Now scan the rules, and parse microinstructions for each rule.
1892 for (; f
; f
= f
->next
) {
1895 int skip_or
; /* skip rest of OR block */
1898 if (ctx
->ipfw_set_disable
& (1 << f
->set
))
1902 for (l
= f
->cmd_len
, cmd
= f
->cmd
; l
> 0;
1903 l
-= cmdlen
, cmd
+= cmdlen
) {
1907 * check_body is a jump target used when we find a
1908 * CHECK_STATE, and need to jump to the body of
1913 cmdlen
= F_LEN(cmd
);
1915 * An OR block (insn_1 || .. || insn_n) has the
1916 * F_OR bit set in all but the last instruction.
1917 * The first match will set "skip_or", and cause
1918 * the following instructions to be skipped until
1919 * past the one with the F_OR bit clear.
1921 if (skip_or
) { /* skip this instruction */
1922 if ((cmd
->len
& F_OR
) == 0)
1923 skip_or
= 0; /* next one is good */
1926 match
= 0; /* set to 1 if we succeed */
1928 switch (cmd
->opcode
) {
1930 * The first set of opcodes compares the packet's
1931 * fields with some pattern, setting 'match' if a
1932 * match is found. At the end of the loop there is
1933 * logic to deal with F_NOT and F_OR flags associated
1941 kprintf("ipfw: opcode %d unimplemented\n",
1948 * We only check offset == 0 && proto != 0,
1949 * as this ensures that we have an IPv4
1950 * packet with the ports info.
1955 match
= ipfw_match_uid(&args
->f_id
, oif
,
1957 (uid_t
)((ipfw_insn_u32
*)cmd
)->d
[0],
1964 match
= iface_match(m
->m_pkthdr
.rcvif
,
1965 (ipfw_insn_if
*)cmd
);
1969 match
= iface_match(oif
, (ipfw_insn_if
*)cmd
);
1973 match
= iface_match(oif
? oif
:
1974 m
->m_pkthdr
.rcvif
, (ipfw_insn_if
*)cmd
);
1978 if (args
->eh
!= NULL
) { /* have MAC header */
1979 uint32_t *want
= (uint32_t *)
1980 ((ipfw_insn_mac
*)cmd
)->addr
;
1981 uint32_t *mask
= (uint32_t *)
1982 ((ipfw_insn_mac
*)cmd
)->mask
;
1983 uint32_t *hdr
= (uint32_t *)args
->eh
;
1986 (want
[0] == (hdr
[0] & mask
[0]) &&
1987 want
[1] == (hdr
[1] & mask
[1]) &&
1988 want
[2] == (hdr
[2] & mask
[2]));
1993 if (args
->eh
!= NULL
) {
1995 ntohs(args
->eh
->ether_type
);
1997 ((ipfw_insn_u16
*)cmd
)->ports
;
2000 /* Special vlan handling */
2001 if (m
->m_flags
& M_VLANTAG
)
2004 for (i
= cmdlen
- 1; !match
&& i
> 0;
2007 (t
>= p
[0] && t
<= p
[1]);
2013 match
= (hlen
> 0 && offset
!= 0);
2016 case O_IN
: /* "out" is "not in" */
2017 match
= (oif
== NULL
);
2021 match
= (args
->eh
!= NULL
);
2026 * We do not allow an arg of 0 so the
2027 * check of "proto" only suffices.
2029 match
= (proto
== cmd
->arg1
);
2033 match
= (hlen
> 0 &&
2034 ((ipfw_insn_ip
*)cmd
)->addr
.s_addr
==
2039 match
= (hlen
> 0 &&
2040 ((ipfw_insn_ip
*)cmd
)->addr
.s_addr
==
2042 ((ipfw_insn_ip
*)cmd
)->mask
.s_addr
));
2049 tif
= INADDR_TO_IFP(&src_ip
);
2050 match
= (tif
!= NULL
);
2057 uint32_t *d
= (uint32_t *)(cmd
+ 1);
2059 cmd
->opcode
== O_IP_DST_SET
?
2065 addr
-= d
[0]; /* subtract base */
2067 (addr
< cmd
->arg1
) &&
2068 (d
[1 + (addr
>> 5)] &
2069 (1 << (addr
& 0x1f)));
2074 match
= (hlen
> 0 &&
2075 ((ipfw_insn_ip
*)cmd
)->addr
.s_addr
==
2080 match
= (hlen
> 0) &&
2081 (((ipfw_insn_ip
*)cmd
)->addr
.s_addr
==
2083 ((ipfw_insn_ip
*)cmd
)->mask
.s_addr
));
2090 tif
= INADDR_TO_IFP(&dst_ip
);
2091 match
= (tif
!= NULL
);
2098 * offset == 0 && proto != 0 is enough
2099 * to guarantee that we have an IPv4
2100 * packet with port info.
2102 if ((proto
==IPPROTO_UDP
|| proto
==IPPROTO_TCP
)
2105 (cmd
->opcode
== O_IP_SRCPORT
) ?
2106 src_port
: dst_port
;
2108 ((ipfw_insn_u16
*)cmd
)->ports
;
2111 for (i
= cmdlen
- 1; !match
&& i
> 0;
2114 (x
>= p
[0] && x
<= p
[1]);
2120 match
= (offset
== 0 && proto
==IPPROTO_ICMP
&&
2121 icmptype_match(ip
, (ipfw_insn_u32
*)cmd
));
2125 match
= (hlen
> 0 && ipopts_match(ip
, cmd
));
2129 match
= (hlen
> 0 && cmd
->arg1
== ip
->ip_v
);
2133 match
= (hlen
> 0 && cmd
->arg1
== ip
->ip_ttl
);
2137 match
= (hlen
> 0 &&
2138 cmd
->arg1
== ntohs(ip
->ip_id
));
2142 match
= (hlen
> 0 && cmd
->arg1
== ip_len
);
2145 case O_IPPRECEDENCE
:
2146 match
= (hlen
> 0 &&
2147 (cmd
->arg1
== (ip
->ip_tos
& 0xe0)));
2151 match
= (hlen
> 0 &&
2152 flags_match(cmd
, ip
->ip_tos
));
2156 match
= (proto
== IPPROTO_TCP
&& offset
== 0 &&
2158 L3HDR(struct tcphdr
,ip
)->th_flags
));
2162 match
= (proto
== IPPROTO_TCP
&& offset
== 0 &&
2163 tcpopts_match(ip
, cmd
));
2167 match
= (proto
== IPPROTO_TCP
&& offset
== 0 &&
2168 ((ipfw_insn_u32
*)cmd
)->d
[0] ==
2169 L3HDR(struct tcphdr
,ip
)->th_seq
);
2173 match
= (proto
== IPPROTO_TCP
&& offset
== 0 &&
2174 ((ipfw_insn_u32
*)cmd
)->d
[0] ==
2175 L3HDR(struct tcphdr
,ip
)->th_ack
);
2179 match
= (proto
== IPPROTO_TCP
&& offset
== 0 &&
2181 L3HDR(struct tcphdr
,ip
)->th_win
);
2185 /* reject packets which have SYN only */
2186 /* XXX should i also check for TH_ACK ? */
2187 match
= (proto
== IPPROTO_TCP
&& offset
== 0 &&
2188 (L3HDR(struct tcphdr
,ip
)->th_flags
&
2189 (TH_RST
| TH_ACK
| TH_SYN
)) != TH_SYN
);
2194 ipfw_log(f
, hlen
, args
->eh
, m
, oif
);
2199 match
= (krandom() <
2200 ((ipfw_insn_u32
*)cmd
)->d
[0]);
2204 * The second set of opcodes represents 'actions',
2205 * i.e. the terminal part of a rule once the packet
2206 * matches all previous patterns.
2207 * Typically there is only one action for each rule,
2208 * and the opcode is stored at the end of the rule
2209 * (but there are exceptions -- see below).
2211 * In general, here we set retval and terminate the
2212 * outer loop (would be a 'break 3' in some language,
2213 * but we need to do a 'goto done').
2216 * O_COUNT and O_SKIPTO actions:
2217 * instead of terminating, we jump to the next rule
2218 * ('goto next_rule', equivalent to a 'break 2'),
2219 * or to the SKIPTO target ('goto again' after
2220 * having set f, cmd and l), respectively.
2222 * O_LIMIT and O_KEEP_STATE: these opcodes are
2223 * not real 'actions', and are stored right
2224 * before the 'action' part of the rule.
2225 * These opcodes try to install an entry in the
2226 * state tables; if successful, we continue with
2227 * the next opcode (match=1; break;), otherwise
2228 * the packet must be dropped ('goto done' after
2229 * setting retval). If static rules are changed
2230 * during the state installation, the packet will
2231 * be dropped and rule's stats will not beupdated
2232 * ('return IP_FW_DENY').
2234 * O_PROBE_STATE and O_CHECK_STATE: these opcodes
2235 * cause a lookup of the state table, and a jump
2236 * to the 'action' part of the parent rule
2237 * ('goto check_body') if an entry is found, or
2238 * (CHECK_STATE only) a jump to the next rule if
2239 * the entry is not found ('goto next_rule').
2240 * The result of the lookup is cached to make
2241 * further instances of these opcodes are
2242 * effectively NOPs. If static rules are changed
2243 * during the state looking up, the packet will
2244 * be dropped and rule's stats will not be updated
2245 * ('return IP_FW_DENY').
2249 if (!(f
->rule_flags
& IPFW_RULE_F_STATE
)) {
2250 kprintf("%s rule (%d) is not ready "
2252 cmd
->opcode
== O_LIMIT
?
2253 "limit" : "keep state",
2254 f
->rulenum
, f
->cpuid
);
2257 if (install_state(f
,
2258 (ipfw_insn_limit
*)cmd
, args
, &deny
)) {
2262 retval
= IP_FW_DENY
;
2263 goto done
; /* error/limit violation */
2273 * dynamic rules are checked at the first
2274 * keep-state or check-state occurrence,
2275 * with the result being stored in dyn_dir.
2276 * The compiler introduces a PROBE_STATE
2277 * instruction for us when we have a
2278 * KEEP_STATE (because PROBE_STATE needs
2281 if (dyn_dir
== MATCH_UNKNOWN
) {
2282 dyn_f
= lookup_rule(&args
->f_id
,
2284 proto
== IPPROTO_TCP
?
2285 L3HDR(struct tcphdr
, ip
) : NULL
,
2289 if (dyn_f
!= NULL
) {
2291 * Found a rule from a dynamic
2292 * entry; jump to the 'action'
2296 cmd
= ACTION_PTR(f
);
2297 l
= f
->cmd_len
- f
->act_ofs
;
2302 * Dynamic entry not found. If CHECK_STATE,
2303 * skip to next rule, if PROBE_STATE just
2304 * ignore and continue with next opcode.
2306 if (cmd
->opcode
== O_CHECK_STATE
)
2308 else if (!(f
->rule_flags
& IPFW_RULE_F_STATE
))
2309 goto next_rule
; /* not ready yet */
2314 retval
= IP_FW_PASS
; /* accept */
2319 args
->rule
= f
; /* report matching rule */
2320 args
->cookie
= cmd
->arg1
;
2321 retval
= IP_FW_DUMMYNET
;
2326 if (args
->eh
) /* not on layer 2 */
2329 mtag
= m_tag_get(PACKET_TAG_IPFW_DIVERT
,
2330 sizeof(*divinfo
), MB_DONTWAIT
);
2332 retval
= IP_FW_DENY
;
2335 divinfo
= m_tag_data(mtag
);
2337 divinfo
->skipto
= f
->rulenum
;
2338 divinfo
->port
= cmd
->arg1
;
2339 divinfo
->tee
= (cmd
->opcode
== O_TEE
);
2340 m_tag_prepend(m
, mtag
);
2342 args
->cookie
= cmd
->arg1
;
2343 retval
= (cmd
->opcode
== O_DIVERT
) ?
2344 IP_FW_DIVERT
: IP_FW_TEE
;
2349 f
->pcnt
++; /* update stats */
2351 f
->timestamp
= time_second
;
2352 if (cmd
->opcode
== O_COUNT
)
2355 if (f
->next_rule
== NULL
)
2356 lookup_next_rule(f
);
2362 * Drop the packet and send a reject notice
2363 * if the packet is not ICMP (or is an ICMP
2364 * query), and it is not multicast/broadcast.
2367 (proto
!= IPPROTO_ICMP
||
2368 is_icmp_query(ip
)) &&
2369 !(m
->m_flags
& (M_BCAST
|M_MCAST
)) &&
2370 !IN_MULTICAST(ntohl(dst_ip
.s_addr
))) {
2372 * Update statistics before the possible
2373 * blocking 'send_reject'
2377 f
->timestamp
= time_second
;
2379 send_reject(args
, cmd
->arg1
,
2384 * Return directly here, rule stats
2385 * have been updated above.
2391 retval
= IP_FW_DENY
;
2395 if (args
->eh
) /* not valid on layer2 pkts */
2397 if (!dyn_f
|| dyn_dir
== MATCH_FORWARD
) {
2398 struct sockaddr_in
*sin
;
2400 mtag
= m_tag_get(PACKET_TAG_IPFORWARD
,
2401 sizeof(*sin
), MB_DONTWAIT
);
2403 retval
= IP_FW_DENY
;
2406 sin
= m_tag_data(mtag
);
2408 /* Structure copy */
2409 *sin
= ((ipfw_insn_sa
*)cmd
)->sa
;
2411 m_tag_prepend(m
, mtag
);
2412 m
->m_pkthdr
.fw_flags
|=
2413 IPFORWARD_MBUF_TAGGED
;
2415 retval
= IP_FW_PASS
;
2419 panic("-- unknown opcode %d\n", cmd
->opcode
);
2420 } /* end of switch() on opcodes */
2422 if (cmd
->len
& F_NOT
)
2426 if (cmd
->len
& F_OR
)
2429 if (!(cmd
->len
& F_OR
)) /* not an OR block, */
2430 break; /* try next rule */
2433 } /* end of inner for, scan opcodes */
2435 next_rule
:; /* try next rule */
2437 } /* end of outer for, scan rules */
2438 kprintf("+++ ipfw: ouch!, skip past end of rules, denying packet\n");
2442 /* Update statistics */
2445 f
->timestamp
= time_second
;
2450 kprintf("pullup failed\n");
2455 ipfw_dummynet_io(struct mbuf
*m
, int pipe_nr
, int dir
, struct ip_fw_args
*fwa
)
2460 const struct ipfw_flow_id
*id
;
2461 struct dn_flow_id
*fid
;
2465 mtag
= m_tag_get(PACKET_TAG_DUMMYNET
, sizeof(*pkt
), MB_DONTWAIT
);
2470 m_tag_prepend(m
, mtag
);
2472 pkt
= m_tag_data(mtag
);
2473 bzero(pkt
, sizeof(*pkt
));
2475 cmd
= fwa
->rule
->cmd
+ fwa
->rule
->act_ofs
;
2476 if (cmd
->opcode
== O_LOG
)
2478 KASSERT(cmd
->opcode
== O_PIPE
|| cmd
->opcode
== O_QUEUE
,
2479 ("Rule is not PIPE or QUEUE, opcode %d\n", cmd
->opcode
));
2482 pkt
->dn_flags
= (dir
& DN_FLAGS_DIR_MASK
);
2483 pkt
->ifp
= fwa
->oif
;
2484 pkt
->pipe_nr
= pipe_nr
;
2486 pkt
->cpuid
= mycpuid
;
2487 pkt
->msgport
= curnetport
;
2491 fid
->fid_dst_ip
= id
->dst_ip
;
2492 fid
->fid_src_ip
= id
->src_ip
;
2493 fid
->fid_dst_port
= id
->dst_port
;
2494 fid
->fid_src_port
= id
->src_port
;
2495 fid
->fid_proto
= id
->proto
;
2496 fid
->fid_flags
= id
->flags
;
2498 ipfw_ref_rule(fwa
->rule
);
2499 pkt
->dn_priv
= fwa
->rule
;
2500 pkt
->dn_unref_priv
= ipfw_unref_rule
;
2502 if (cmd
->opcode
== O_PIPE
)
2503 pkt
->dn_flags
|= DN_FLAGS_IS_PIPE
;
2505 m
->m_pkthdr
.fw_flags
|= DUMMYNET_MBUF_TAGGED
;
2509 * When a rule is added/deleted, clear the next_rule pointers in all rules.
2510 * These will be reconstructed on the fly as packets are matched.
2511 * Must be called at splimp().
2514 ipfw_flush_rule_ptrs(struct ipfw_context
*ctx
)
2518 for (rule
= ctx
->ipfw_layer3_chain
; rule
; rule
= rule
->next
)
2519 rule
->next_rule
= NULL
;
2522 static __inline
void
2523 ipfw_inc_static_count(struct ip_fw
*rule
)
2525 /* Static rule's counts are updated only on CPU0 */
2526 KKASSERT(mycpuid
== 0);
2529 static_ioc_len
+= IOC_RULESIZE(rule
);
2532 static __inline
void
2533 ipfw_dec_static_count(struct ip_fw
*rule
)
2535 int l
= IOC_RULESIZE(rule
);
2537 /* Static rule's counts are updated only on CPU0 */
2538 KKASSERT(mycpuid
== 0);
2540 KASSERT(static_count
> 0, ("invalid static count %u\n", static_count
));
2543 KASSERT(static_ioc_len
>= l
,
2544 ("invalid static len %u\n", static_ioc_len
));
2545 static_ioc_len
-= l
;
2549 ipfw_link_sibling(struct netmsg_ipfw
*fwmsg
, struct ip_fw
*rule
)
2551 if (fwmsg
->sibling
!= NULL
) {
2552 KKASSERT(mycpuid
> 0 && fwmsg
->sibling
->cpuid
== mycpuid
- 1);
2553 fwmsg
->sibling
->sibling
= rule
;
2555 fwmsg
->sibling
= rule
;
2558 static struct ip_fw
*
2559 ipfw_create_rule(const struct ipfw_ioc_rule
*ioc_rule
, struct ip_fw_stub
*stub
)
2563 rule
= kmalloc(RULESIZE(ioc_rule
), M_IPFW
, M_WAITOK
| M_ZERO
);
2565 rule
->act_ofs
= ioc_rule
->act_ofs
;
2566 rule
->cmd_len
= ioc_rule
->cmd_len
;
2567 rule
->rulenum
= ioc_rule
->rulenum
;
2568 rule
->set
= ioc_rule
->set
;
2569 rule
->usr_flags
= ioc_rule
->usr_flags
;
2571 bcopy(ioc_rule
->cmd
, rule
->cmd
, rule
->cmd_len
* 4 /* XXX */);
2574 rule
->cpuid
= mycpuid
;
2578 stub
->rule
[mycpuid
] = rule
;
2584 ipfw_add_rule_dispatch(struct netmsg
*nmsg
)
2586 struct netmsg_ipfw
*fwmsg
= (struct netmsg_ipfw
*)nmsg
;
2587 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
2590 rule
= ipfw_create_rule(fwmsg
->ioc_rule
, fwmsg
->stub
);
2593 * Bump generation after ipfw_create_rule(),
2594 * since this function is blocking
2599 * Insert rule into the pre-determined position
2601 if (fwmsg
->prev_rule
!= NULL
) {
2602 struct ip_fw
*prev
, *next
;
2604 prev
= fwmsg
->prev_rule
;
2605 KKASSERT(prev
->cpuid
== mycpuid
);
2607 next
= fwmsg
->next_rule
;
2608 KKASSERT(next
->cpuid
== mycpuid
);
2614 * Move to the position on the next CPU
2615 * before the msg is forwarded.
2617 fwmsg
->prev_rule
= prev
->sibling
;
2618 fwmsg
->next_rule
= next
->sibling
;
2620 KKASSERT(fwmsg
->next_rule
== NULL
);
2621 rule
->next
= ctx
->ipfw_layer3_chain
;
2622 ctx
->ipfw_layer3_chain
= rule
;
2625 /* Link rule CPU sibling */
2626 ipfw_link_sibling(fwmsg
, rule
);
2628 ipfw_flush_rule_ptrs(ctx
);
2631 /* Statistics only need to be updated once */
2632 ipfw_inc_static_count(rule
);
2634 /* Return the rule on CPU0 */
2635 nmsg
->nm_lmsg
.u
.ms_resultp
= rule
;
2638 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
2642 ipfw_enable_state_dispatch(struct netmsg
*nmsg
)
2644 struct lwkt_msg
*lmsg
= &nmsg
->nm_lmsg
;
2645 struct ip_fw
*rule
= lmsg
->u
.ms_resultp
;
2646 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
2650 KKASSERT(rule
->cpuid
== mycpuid
);
2651 KKASSERT(rule
->stub
!= NULL
&& rule
->stub
->rule
[mycpuid
] == rule
);
2652 KKASSERT(!(rule
->rule_flags
& IPFW_RULE_F_STATE
));
2653 rule
->rule_flags
|= IPFW_RULE_F_STATE
;
2654 lmsg
->u
.ms_resultp
= rule
->sibling
;
2656 ifnet_forwardmsg(lmsg
, mycpuid
+ 1);
2660 * Add a new rule to the list. Copy the rule into a malloc'ed area,
2661 * then possibly create a rule number and add the rule to the list.
2662 * Update the rule_number in the input struct so the caller knows
2666 ipfw_add_rule(struct ipfw_ioc_rule
*ioc_rule
, uint32_t rule_flags
)
2668 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
2669 struct netmsg_ipfw fwmsg
;
2670 struct netmsg
*nmsg
;
2671 struct ip_fw
*f
, *prev
, *rule
;
2672 struct ip_fw_stub
*stub
;
2674 IPFW_ASSERT_CFGPORT(&curthread
->td_msgport
);
2677 * If rulenum is 0, find highest numbered rule before the
2678 * default rule, and add rule number incremental step.
2680 if (ioc_rule
->rulenum
== 0) {
2681 int step
= autoinc_step
;
2683 KKASSERT(step
>= IPFW_AUTOINC_STEP_MIN
&&
2684 step
<= IPFW_AUTOINC_STEP_MAX
);
2687 * Locate the highest numbered rule before default
2689 for (f
= ctx
->ipfw_layer3_chain
; f
; f
= f
->next
) {
2690 if (f
->rulenum
== IPFW_DEFAULT_RULE
)
2692 ioc_rule
->rulenum
= f
->rulenum
;
2694 if (ioc_rule
->rulenum
< IPFW_DEFAULT_RULE
- step
)
2695 ioc_rule
->rulenum
+= step
;
2697 KASSERT(ioc_rule
->rulenum
!= IPFW_DEFAULT_RULE
&&
2698 ioc_rule
->rulenum
!= 0,
2699 ("invalid rule num %d\n", ioc_rule
->rulenum
));
2702 * Now find the right place for the new rule in the sorted list.
2704 for (prev
= NULL
, f
= ctx
->ipfw_layer3_chain
; f
;
2705 prev
= f
, f
= f
->next
) {
2706 if (f
->rulenum
> ioc_rule
->rulenum
) {
2707 /* Found the location */
2711 KASSERT(f
!= NULL
, ("no default rule?!\n"));
2713 if (rule_flags
& IPFW_RULE_F_STATE
) {
2717 * If the new rule will create states, then allocate
2718 * a rule stub, which will be referenced by states
2721 size
= sizeof(*stub
) + ((ncpus
- 1) * sizeof(struct ip_fw
*));
2722 stub
= kmalloc(size
, M_IPFW
, M_WAITOK
| M_ZERO
);
2728 * Duplicate the rule onto each CPU.
2729 * The rule duplicated on CPU0 will be returned.
2731 bzero(&fwmsg
, sizeof(fwmsg
));
2733 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
2734 0, ipfw_add_rule_dispatch
);
2735 fwmsg
.ioc_rule
= ioc_rule
;
2736 fwmsg
.prev_rule
= prev
;
2737 fwmsg
.next_rule
= prev
== NULL
? NULL
: f
;
2740 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
2741 KKASSERT(fwmsg
.prev_rule
== NULL
&& fwmsg
.next_rule
== NULL
);
2743 rule
= nmsg
->nm_lmsg
.u
.ms_resultp
;
2744 KKASSERT(rule
!= NULL
&& rule
->cpuid
== mycpuid
);
2746 if (rule_flags
& IPFW_RULE_F_STATE
) {
2748 * Turn on state flag, _after_ everything on all
2749 * CPUs have been setup.
2751 bzero(nmsg
, sizeof(*nmsg
));
2752 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
2753 0, ipfw_enable_state_dispatch
);
2754 nmsg
->nm_lmsg
.u
.ms_resultp
= rule
;
2756 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
2757 KKASSERT(nmsg
->nm_lmsg
.u
.ms_resultp
== NULL
);
2760 DPRINTF("++ installed rule %d, static count now %d\n",
2761 rule
->rulenum
, static_count
);
2765 * Free storage associated with a static rule (including derived
2767 * The caller is in charge of clearing rule pointers to avoid
2768 * dangling pointers.
2769 * @return a pointer to the next entry.
2770 * Arguments are not checked, so they better be correct.
2771 * Must be called at splimp().
2773 static struct ip_fw
*
2774 ipfw_delete_rule(struct ipfw_context
*ctx
,
2775 struct ip_fw
*prev
, struct ip_fw
*rule
)
2778 struct ip_fw_stub
*stub
;
2782 /* STATE flag should have been cleared before we reach here */
2783 KKASSERT((rule
->rule_flags
& IPFW_RULE_F_STATE
) == 0);
2788 ctx
->ipfw_layer3_chain
= n
;
2792 /* Mark the rule as invalid */
2793 rule
->rule_flags
|= IPFW_RULE_F_INVALID
;
2794 rule
->next_rule
= NULL
;
2795 rule
->sibling
= NULL
;
2798 /* Don't reset cpuid here; keep various assertion working */
2802 /* Statistics only need to be updated once */
2804 ipfw_dec_static_count(rule
);
2806 /* Free 'stub' on the last CPU */
2807 if (stub
!= NULL
&& mycpuid
== ncpus
- 1)
2808 kfree(stub
, M_IPFW
);
2810 /* Try to free this rule */
2811 ipfw_free_rule(rule
);
2813 /* Return the next rule */
2818 ipfw_flush_dispatch(struct netmsg
*nmsg
)
2820 struct lwkt_msg
*lmsg
= &nmsg
->nm_lmsg
;
2821 int kill_default
= lmsg
->u
.ms_result
;
2822 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
2825 ipfw_flush_rule_ptrs(ctx
); /* more efficient to do outside the loop */
2827 while ((rule
= ctx
->ipfw_layer3_chain
) != NULL
&&
2828 (kill_default
|| rule
->rulenum
!= IPFW_DEFAULT_RULE
))
2829 ipfw_delete_rule(ctx
, NULL
, rule
);
2831 ifnet_forwardmsg(lmsg
, mycpuid
+ 1);
2835 ipfw_disable_rule_state_dispatch(struct netmsg
*nmsg
)
2837 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
2838 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
2843 rule
= dmsg
->start_rule
;
2845 KKASSERT(rule
->cpuid
== mycpuid
);
2848 * Move to the position on the next CPU
2849 * before the msg is forwarded.
2851 dmsg
->start_rule
= rule
->sibling
;
2853 KKASSERT(dmsg
->rulenum
== 0);
2854 rule
= ctx
->ipfw_layer3_chain
;
2857 while (rule
!= NULL
) {
2858 if (dmsg
->rulenum
&& rule
->rulenum
!= dmsg
->rulenum
)
2860 rule
->rule_flags
&= ~IPFW_RULE_F_STATE
;
2864 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
2868 * Deletes all rules from a chain (including the default rule
2869 * if the second argument is set).
2870 * Must be called at splimp().
2873 ipfw_flush(int kill_default
)
2875 struct netmsg_del dmsg
;
2877 struct lwkt_msg
*lmsg
;
2879 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
2881 IPFW_ASSERT_CFGPORT(&curthread
->td_msgport
);
2884 * If 'kill_default' then caller has done the necessary
2885 * msgport syncing; unnecessary to do it again.
2887 if (!kill_default
) {
2889 * Let ipfw_chk() know the rules are going to
2890 * be flushed, so it could jump directly to
2894 netmsg_service_sync();
2898 * Clear STATE flag on rules, so no more states (dyn rules)
2901 bzero(&dmsg
, sizeof(dmsg
));
2902 netmsg_init(&dmsg
.nmsg
, NULL
, &curthread
->td_msgport
,
2903 0, ipfw_disable_rule_state_dispatch
);
2904 ifnet_domsg(&dmsg
.nmsg
.nm_lmsg
, 0);
2907 * This actually nukes all states (dyn rules)
2909 lockmgr(&dyn_lock
, LK_EXCLUSIVE
);
2910 for (rule
= ctx
->ipfw_layer3_chain
; rule
!= NULL
; rule
= rule
->next
) {
2912 * Can't check IPFW_RULE_F_STATE here,
2913 * since it has been cleared previously.
2914 * Check 'stub' instead.
2916 if (rule
->stub
!= NULL
) {
2918 remove_dyn_rule_locked(rule
, NULL
);
2921 lockmgr(&dyn_lock
, LK_RELEASE
);
2924 * Press the 'flush' button
2926 bzero(&nmsg
, sizeof(nmsg
));
2927 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
2928 0, ipfw_flush_dispatch
);
2929 lmsg
= &nmsg
.nm_lmsg
;
2930 lmsg
->u
.ms_result
= kill_default
;
2931 ifnet_domsg(lmsg
, 0);
2933 KASSERT(dyn_count
== 0, ("%u dyn rule remains\n", dyn_count
));
2936 if (ipfw_dyn_v
!= NULL
) {
2938 * Free dynamic rules(state) hash table
2940 kfree(ipfw_dyn_v
, M_IPFW
);
2944 KASSERT(static_count
== 0,
2945 ("%u static rules remains\n", static_count
));
2946 KASSERT(static_ioc_len
== 0,
2947 ("%u bytes of static rules remains\n", static_ioc_len
));
2949 KASSERT(static_count
== 1,
2950 ("%u static rules remains\n", static_count
));
2951 KASSERT(static_ioc_len
== IOC_RULESIZE(ctx
->ipfw_default_rule
),
2952 ("%u bytes of static rules remains, should be %lu\n",
2954 (u_long
)IOC_RULESIZE(ctx
->ipfw_default_rule
)));
2962 ipfw_alt_delete_rule_dispatch(struct netmsg
*nmsg
)
2964 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
2965 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
2966 struct ip_fw
*rule
, *prev
;
2968 rule
= dmsg
->start_rule
;
2969 KKASSERT(rule
->cpuid
== mycpuid
);
2970 dmsg
->start_rule
= rule
->sibling
;
2972 prev
= dmsg
->prev_rule
;
2974 KKASSERT(prev
->cpuid
== mycpuid
);
2977 * Move to the position on the next CPU
2978 * before the msg is forwarded.
2980 dmsg
->prev_rule
= prev
->sibling
;
2984 * flush pointers outside the loop, then delete all matching
2985 * rules. 'prev' remains the same throughout the cycle.
2987 ipfw_flush_rule_ptrs(ctx
);
2988 while (rule
&& rule
->rulenum
== dmsg
->rulenum
)
2989 rule
= ipfw_delete_rule(ctx
, prev
, rule
);
2991 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
2995 ipfw_alt_delete_rule(uint16_t rulenum
)
2997 struct ip_fw
*prev
, *rule
, *f
;
2998 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
2999 struct netmsg_del dmsg
;
3000 struct netmsg
*nmsg
;
3004 * Locate first rule to delete
3006 for (prev
= NULL
, rule
= ctx
->ipfw_layer3_chain
;
3007 rule
&& rule
->rulenum
< rulenum
;
3008 prev
= rule
, rule
= rule
->next
)
3010 if (rule
->rulenum
!= rulenum
)
3014 * Check whether any rules with the given number will
3018 for (f
= rule
; f
&& f
->rulenum
== rulenum
; f
= f
->next
) {
3019 if (f
->rule_flags
& IPFW_RULE_F_STATE
) {
3027 * Clear the STATE flag, so no more states will be
3028 * created based the rules numbered 'rulenum'.
3030 bzero(&dmsg
, sizeof(dmsg
));
3032 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
3033 0, ipfw_disable_rule_state_dispatch
);
3034 dmsg
.start_rule
= rule
;
3035 dmsg
.rulenum
= rulenum
;
3037 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
3038 KKASSERT(dmsg
.start_rule
== NULL
);
3041 * Nuke all related states
3043 lockmgr(&dyn_lock
, LK_EXCLUSIVE
);
3044 for (f
= rule
; f
&& f
->rulenum
== rulenum
; f
= f
->next
) {
3046 * Can't check IPFW_RULE_F_STATE here,
3047 * since it has been cleared previously.
3048 * Check 'stub' instead.
3050 if (f
->stub
!= NULL
) {
3052 remove_dyn_rule_locked(f
, NULL
);
3055 lockmgr(&dyn_lock
, LK_RELEASE
);
3059 * Get rid of the rule duplications on all CPUs
3061 bzero(&dmsg
, sizeof(dmsg
));
3063 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
3064 0, ipfw_alt_delete_rule_dispatch
);
3065 dmsg
.prev_rule
= prev
;
3066 dmsg
.start_rule
= rule
;
3067 dmsg
.rulenum
= rulenum
;
3069 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
3070 KKASSERT(dmsg
.prev_rule
== NULL
&& dmsg
.start_rule
== NULL
);
3075 ipfw_alt_delete_ruleset_dispatch(struct netmsg
*nmsg
)
3077 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
3078 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3079 struct ip_fw
*prev
, *rule
;
3084 ipfw_flush_rule_ptrs(ctx
);
3087 rule
= ctx
->ipfw_layer3_chain
;
3088 while (rule
!= NULL
) {
3089 if (rule
->set
== dmsg
->from_set
) {
3090 rule
= ipfw_delete_rule(ctx
, prev
, rule
);
3099 KASSERT(del
, ("no match set?!\n"));
3101 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
3105 ipfw_disable_ruleset_state_dispatch(struct netmsg
*nmsg
)
3107 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
3108 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3116 for (rule
= ctx
->ipfw_layer3_chain
; rule
; rule
= rule
->next
) {
3117 if (rule
->set
== dmsg
->from_set
) {
3121 rule
->rule_flags
&= ~IPFW_RULE_F_STATE
;
3124 KASSERT(cleared
, ("no match set?!\n"));
3126 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
3130 ipfw_alt_delete_ruleset(uint8_t set
)
3132 struct netmsg_del dmsg
;
3133 struct netmsg
*nmsg
;
3136 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3139 * Check whether the 'set' exists. If it exists,
3140 * then check whether any rules within the set will
3141 * try to create states.
3145 for (rule
= ctx
->ipfw_layer3_chain
; rule
; rule
= rule
->next
) {
3146 if (rule
->set
== set
) {
3148 if (rule
->rule_flags
& IPFW_RULE_F_STATE
) {
3155 return 0; /* XXX EINVAL? */
3159 * Clear the STATE flag, so no more states will be
3160 * created based the rules in this set.
3162 bzero(&dmsg
, sizeof(dmsg
));
3164 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
3165 0, ipfw_disable_ruleset_state_dispatch
);
3166 dmsg
.from_set
= set
;
3168 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
3171 * Nuke all related states
3173 lockmgr(&dyn_lock
, LK_EXCLUSIVE
);
3174 for (rule
= ctx
->ipfw_layer3_chain
; rule
; rule
= rule
->next
) {
3175 if (rule
->set
!= set
)
3179 * Can't check IPFW_RULE_F_STATE here,
3180 * since it has been cleared previously.
3181 * Check 'stub' instead.
3183 if (rule
->stub
!= NULL
) {
3185 remove_dyn_rule_locked(rule
, NULL
);
3188 lockmgr(&dyn_lock
, LK_RELEASE
);
3194 bzero(&dmsg
, sizeof(dmsg
));
3196 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
3197 0, ipfw_alt_delete_ruleset_dispatch
);
3198 dmsg
.from_set
= set
;
3200 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
3205 ipfw_alt_move_rule_dispatch(struct netmsg
*nmsg
)
3207 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
3210 rule
= dmsg
->start_rule
;
3211 KKASSERT(rule
->cpuid
== mycpuid
);
3214 * Move to the position on the next CPU
3215 * before the msg is forwarded.
3217 dmsg
->start_rule
= rule
->sibling
;
3219 while (rule
&& rule
->rulenum
<= dmsg
->rulenum
) {
3220 if (rule
->rulenum
== dmsg
->rulenum
)
3221 rule
->set
= dmsg
->to_set
;
3224 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
3228 ipfw_alt_move_rule(uint16_t rulenum
, uint8_t set
)
3230 struct netmsg_del dmsg
;
3231 struct netmsg
*nmsg
;
3233 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3236 * Locate first rule to move
3238 for (rule
= ctx
->ipfw_layer3_chain
; rule
&& rule
->rulenum
<= rulenum
;
3239 rule
= rule
->next
) {
3240 if (rule
->rulenum
== rulenum
&& rule
->set
!= set
)
3243 if (rule
== NULL
|| rule
->rulenum
> rulenum
)
3244 return 0; /* XXX error? */
3246 bzero(&dmsg
, sizeof(dmsg
));
3248 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
3249 0, ipfw_alt_move_rule_dispatch
);
3250 dmsg
.start_rule
= rule
;
3251 dmsg
.rulenum
= rulenum
;
3254 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
3255 KKASSERT(dmsg
.start_rule
== NULL
);
3260 ipfw_alt_move_ruleset_dispatch(struct netmsg
*nmsg
)
3262 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
3263 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3266 for (rule
= ctx
->ipfw_layer3_chain
; rule
; rule
= rule
->next
) {
3267 if (rule
->set
== dmsg
->from_set
)
3268 rule
->set
= dmsg
->to_set
;
3270 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
3274 ipfw_alt_move_ruleset(uint8_t from_set
, uint8_t to_set
)
3276 struct netmsg_del dmsg
;
3277 struct netmsg
*nmsg
;
3279 bzero(&dmsg
, sizeof(dmsg
));
3281 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
3282 0, ipfw_alt_move_ruleset_dispatch
);
3283 dmsg
.from_set
= from_set
;
3284 dmsg
.to_set
= to_set
;
3286 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
3291 ipfw_alt_swap_ruleset_dispatch(struct netmsg
*nmsg
)
3293 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
3294 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3297 for (rule
= ctx
->ipfw_layer3_chain
; rule
; rule
= rule
->next
) {
3298 if (rule
->set
== dmsg
->from_set
)
3299 rule
->set
= dmsg
->to_set
;
3300 else if (rule
->set
== dmsg
->to_set
)
3301 rule
->set
= dmsg
->from_set
;
3303 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
3307 ipfw_alt_swap_ruleset(uint8_t set1
, uint8_t set2
)
3309 struct netmsg_del dmsg
;
3310 struct netmsg
*nmsg
;
3312 bzero(&dmsg
, sizeof(dmsg
));
3314 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
3315 0, ipfw_alt_swap_ruleset_dispatch
);
3316 dmsg
.from_set
= set1
;
3319 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
3324 * Remove all rules with given number, and also do set manipulation.
3326 * The argument is an uint32_t. The low 16 bit are the rule or set number,
3327 * the next 8 bits are the new set, the top 8 bits are the command:
3329 * 0 delete rules with given number
3330 * 1 delete rules with given set number
3331 * 2 move rules with given number to new set
3332 * 3 move rules with given set number to new set
3333 * 4 swap sets with given numbers
3336 ipfw_ctl_alter(uint32_t arg
)
3339 uint8_t cmd
, new_set
;
3342 rulenum
= arg
& 0xffff;
3343 cmd
= (arg
>> 24) & 0xff;
3344 new_set
= (arg
>> 16) & 0xff;
3348 if (new_set
>= IPFW_DEFAULT_SET
)
3350 if (cmd
== 0 || cmd
== 2) {
3351 if (rulenum
== IPFW_DEFAULT_RULE
)
3354 if (rulenum
>= IPFW_DEFAULT_SET
)
3359 case 0: /* delete rules with given number */
3360 error
= ipfw_alt_delete_rule(rulenum
);
3363 case 1: /* delete all rules with given set number */
3364 error
= ipfw_alt_delete_ruleset(rulenum
);
3367 case 2: /* move rules with given number to new set */
3368 error
= ipfw_alt_move_rule(rulenum
, new_set
);
3371 case 3: /* move rules with given set number to new set */
3372 error
= ipfw_alt_move_ruleset(rulenum
, new_set
);
3375 case 4: /* swap two sets */
3376 error
= ipfw_alt_swap_ruleset(rulenum
, new_set
);
3383 * Clear counters for a specific rule.
3386 clear_counters(struct ip_fw
*rule
, int log_only
)
3388 ipfw_insn_log
*l
= (ipfw_insn_log
*)ACTION_PTR(rule
);
3390 if (log_only
== 0) {
3391 rule
->bcnt
= rule
->pcnt
= 0;
3392 rule
->timestamp
= 0;
3394 if (l
->o
.opcode
== O_LOG
)
3395 l
->log_left
= l
->max_log
;
3399 ipfw_zero_entry_dispatch(struct netmsg
*nmsg
)
3401 struct netmsg_zent
*zmsg
= (struct netmsg_zent
*)nmsg
;
3402 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3405 if (zmsg
->rulenum
== 0) {
3406 KKASSERT(zmsg
->start_rule
== NULL
);
3408 ctx
->ipfw_norule_counter
= 0;
3409 for (rule
= ctx
->ipfw_layer3_chain
; rule
; rule
= rule
->next
)
3410 clear_counters(rule
, zmsg
->log_only
);
3412 struct ip_fw
*start
= zmsg
->start_rule
;
3414 KKASSERT(start
->cpuid
== mycpuid
);
3415 KKASSERT(start
->rulenum
== zmsg
->rulenum
);
3418 * We can have multiple rules with the same number, so we
3419 * need to clear them all.
3421 for (rule
= start
; rule
&& rule
->rulenum
== zmsg
->rulenum
;
3423 clear_counters(rule
, zmsg
->log_only
);
3426 * Move to the position on the next CPU
3427 * before the msg is forwarded.
3429 zmsg
->start_rule
= start
->sibling
;
3431 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
3435 * Reset some or all counters on firewall rules.
3436 * @arg frwl is null to clear all entries, or contains a specific
3438 * @arg log_only is 1 if we only want to reset logs, zero otherwise.
3441 ipfw_ctl_zero_entry(int rulenum
, int log_only
)
3443 struct netmsg_zent zmsg
;
3444 struct netmsg
*nmsg
;
3446 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3448 bzero(&zmsg
, sizeof(zmsg
));
3450 netmsg_init(nmsg
, NULL
, &curthread
->td_msgport
,
3451 0, ipfw_zero_entry_dispatch
);
3452 zmsg
.log_only
= log_only
;
3455 msg
= log_only
? "ipfw: All logging counts reset.\n"
3456 : "ipfw: Accounting cleared.\n";
3461 * Locate the first rule with 'rulenum'
3463 for (rule
= ctx
->ipfw_layer3_chain
; rule
; rule
= rule
->next
) {
3464 if (rule
->rulenum
== rulenum
)
3467 if (rule
== NULL
) /* we did not find any matching rules */
3469 zmsg
.start_rule
= rule
;
3470 zmsg
.rulenum
= rulenum
;
3472 msg
= log_only
? "ipfw: Entry %d logging count reset.\n"
3473 : "ipfw: Entry %d cleared.\n";
3475 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
3476 KKASSERT(zmsg
.start_rule
== NULL
);
3479 log(LOG_SECURITY
| LOG_NOTICE
, msg
, rulenum
);
3484 * Check validity of the structure before insert.
3485 * Fortunately rules are simple, so this mostly need to check rule sizes.
3488 ipfw_check_ioc_rule(struct ipfw_ioc_rule
*rule
, int size
, uint32_t *rule_flags
)
3491 int have_action
= 0;
3496 /* Check for valid size */
3497 if (size
< sizeof(*rule
)) {
3498 kprintf("ipfw: rule too short\n");
3501 l
= IOC_RULESIZE(rule
);
3503 kprintf("ipfw: size mismatch (have %d want %d)\n", size
, l
);
3507 /* Check rule number */
3508 if (rule
->rulenum
== IPFW_DEFAULT_RULE
) {
3509 kprintf("ipfw: invalid rule number\n");
3514 * Now go for the individual checks. Very simple ones, basically only
3515 * instruction sizes.
3517 for (l
= rule
->cmd_len
, cmd
= rule
->cmd
; l
> 0;
3518 l
-= cmdlen
, cmd
+= cmdlen
) {
3519 cmdlen
= F_LEN(cmd
);
3521 kprintf("ipfw: opcode %d size truncated\n",
3526 DPRINTF("ipfw: opcode %d\n", cmd
->opcode
);
3528 if (cmd
->opcode
== O_KEEP_STATE
|| cmd
->opcode
== O_LIMIT
) {
3529 /* This rule will create states */
3530 *rule_flags
|= IPFW_RULE_F_STATE
;
3533 switch (cmd
->opcode
) {
3547 case O_IPPRECEDENCE
:
3554 if (cmdlen
!= F_INSN_SIZE(ipfw_insn
))
3566 if (cmdlen
!= F_INSN_SIZE(ipfw_insn_u32
))
3571 if (cmdlen
!= F_INSN_SIZE(ipfw_insn_limit
))
3576 if (cmdlen
!= F_INSN_SIZE(ipfw_insn_log
))
3579 ((ipfw_insn_log
*)cmd
)->log_left
=
3580 ((ipfw_insn_log
*)cmd
)->max_log
;
3586 if (cmdlen
!= F_INSN_SIZE(ipfw_insn_ip
))
3588 if (((ipfw_insn_ip
*)cmd
)->mask
.s_addr
== 0) {
3589 kprintf("ipfw: opcode %d, useless rule\n",
3597 if (cmd
->arg1
== 0 || cmd
->arg1
> 256) {
3598 kprintf("ipfw: invalid set size %d\n",
3602 if (cmdlen
!= F_INSN_SIZE(ipfw_insn_u32
) +
3608 if (cmdlen
!= F_INSN_SIZE(ipfw_insn_mac
))
3614 case O_IP_DSTPORT
: /* XXX artificial limit, 30 port pairs */
3615 if (cmdlen
< 2 || cmdlen
> 31)
3622 if (cmdlen
!= F_INSN_SIZE(ipfw_insn_if
))
3628 if (cmdlen
!= F_INSN_SIZE(ipfw_insn_pipe
))
3633 if (cmdlen
!= F_INSN_SIZE(ipfw_insn_sa
)) {
3638 fwd_addr
= ((ipfw_insn_sa
*)cmd
)->
3640 if (IN_MULTICAST(ntohl(fwd_addr
))) {
3641 kprintf("ipfw: try forwarding to "
3642 "multicast address\n");
3648 case O_FORWARD_MAC
: /* XXX not implemented yet */
3657 if (cmdlen
!= F_INSN_SIZE(ipfw_insn
))
3661 kprintf("ipfw: opcode %d, multiple actions"
3668 kprintf("ipfw: opcode %d, action must be"
3675 kprintf("ipfw: opcode %d, unknown opcode\n",
3680 if (have_action
== 0) {
3681 kprintf("ipfw: missing action\n");
3687 kprintf("ipfw: opcode %d size %d wrong\n",
3688 cmd
->opcode
, cmdlen
);
3693 ipfw_ctl_add_rule(struct sockopt
*sopt
)
3695 struct ipfw_ioc_rule
*ioc_rule
;
3697 uint32_t rule_flags
;
3700 size
= sopt
->sopt_valsize
;
3701 if (size
> (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX
) ||
3702 size
< sizeof(*ioc_rule
)) {
3705 if (size
!= (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX
)) {
3706 sopt
->sopt_val
= krealloc(sopt
->sopt_val
, sizeof(uint32_t) *
3707 IPFW_RULE_SIZE_MAX
, M_TEMP
, M_WAITOK
);
3709 ioc_rule
= sopt
->sopt_val
;
3711 error
= ipfw_check_ioc_rule(ioc_rule
, size
, &rule_flags
);
3715 ipfw_add_rule(ioc_rule
, rule_flags
);
3717 if (sopt
->sopt_dir
== SOPT_GET
)
3718 sopt
->sopt_valsize
= IOC_RULESIZE(ioc_rule
);
3723 ipfw_copy_rule(const struct ip_fw
*rule
, struct ipfw_ioc_rule
*ioc_rule
)
3725 const struct ip_fw
*sibling
;
3730 KKASSERT(rule
->cpuid
== IPFW_CFGCPUID
);
3732 ioc_rule
->act_ofs
= rule
->act_ofs
;
3733 ioc_rule
->cmd_len
= rule
->cmd_len
;
3734 ioc_rule
->rulenum
= rule
->rulenum
;
3735 ioc_rule
->set
= rule
->set
;
3736 ioc_rule
->usr_flags
= rule
->usr_flags
;
3738 ioc_rule
->set_disable
= ipfw_ctx
[mycpuid
]->ipfw_set_disable
;
3739 ioc_rule
->static_count
= static_count
;
3740 ioc_rule
->static_len
= static_ioc_len
;
3743 * Visit (read-only) all of the rule's duplications to get
3744 * the necessary statistics
3751 ioc_rule
->timestamp
= 0;
3752 for (sibling
= rule
; sibling
!= NULL
; sibling
= sibling
->sibling
) {
3753 ioc_rule
->pcnt
+= sibling
->pcnt
;
3754 ioc_rule
->bcnt
+= sibling
->bcnt
;
3755 if (sibling
->timestamp
> ioc_rule
->timestamp
)
3756 ioc_rule
->timestamp
= sibling
->timestamp
;
3761 KASSERT(i
== ncpus
, ("static rule is not duplicated on every cpu\n"));
3763 bcopy(rule
->cmd
, ioc_rule
->cmd
, ioc_rule
->cmd_len
* 4 /* XXX */);
3765 return ((uint8_t *)ioc_rule
+ IOC_RULESIZE(ioc_rule
));
3769 ipfw_copy_state(const ipfw_dyn_rule
*dyn_rule
,
3770 struct ipfw_ioc_state
*ioc_state
)
3772 const struct ipfw_flow_id
*id
;
3773 struct ipfw_ioc_flowid
*ioc_id
;
3775 ioc_state
->expire
= TIME_LEQ(dyn_rule
->expire
, time_second
) ?
3776 0 : dyn_rule
->expire
- time_second
;
3777 ioc_state
->pcnt
= dyn_rule
->pcnt
;
3778 ioc_state
->bcnt
= dyn_rule
->bcnt
;
3780 ioc_state
->dyn_type
= dyn_rule
->dyn_type
;
3781 ioc_state
->count
= dyn_rule
->count
;
3783 ioc_state
->rulenum
= dyn_rule
->stub
->rule
[mycpuid
]->rulenum
;
3786 ioc_id
= &ioc_state
->id
;
3788 ioc_id
->type
= ETHERTYPE_IP
;
3789 ioc_id
->u
.ip
.dst_ip
= id
->dst_ip
;
3790 ioc_id
->u
.ip
.src_ip
= id
->src_ip
;
3791 ioc_id
->u
.ip
.dst_port
= id
->dst_port
;
3792 ioc_id
->u
.ip
.src_port
= id
->src_port
;
3793 ioc_id
->u
.ip
.proto
= id
->proto
;
3797 ipfw_ctl_get_rules(struct sockopt
*sopt
)
3799 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3803 uint32_t dcount
= 0;
3806 * pass up a copy of the current rules. Static rules
3807 * come first (the last of which has number IPFW_DEFAULT_RULE),
3808 * followed by a possibly empty list of dynamic rule.
3811 size
= static_ioc_len
; /* size of static rules */
3812 if (ipfw_dyn_v
) { /* add size of dyn.rules */
3814 size
+= dcount
* sizeof(struct ipfw_ioc_state
);
3817 if (sopt
->sopt_valsize
< size
) {
3818 /* short length, no need to return incomplete rules */
3819 /* XXX: if superuser, no need to zero buffer */
3820 bzero(sopt
->sopt_val
, sopt
->sopt_valsize
);
3823 bp
= sopt
->sopt_val
;
3825 for (rule
= ctx
->ipfw_layer3_chain
; rule
; rule
= rule
->next
)
3826 bp
= ipfw_copy_rule(rule
, bp
);
3828 if (ipfw_dyn_v
&& dcount
!= 0) {
3829 struct ipfw_ioc_state
*ioc_state
= bp
;
3830 uint32_t dcount2
= 0;
3832 size_t old_size
= size
;
3836 lockmgr(&dyn_lock
, LK_SHARED
);
3838 /* Check 'ipfw_dyn_v' again with lock held */
3839 if (ipfw_dyn_v
== NULL
)
3842 for (i
= 0; i
< curr_dyn_buckets
; i
++) {
3846 * The # of dynamic rules may have grown after the
3847 * snapshot of 'dyn_count' was taken, so we will have
3848 * to check 'dcount' (snapshot of dyn_count) here to
3849 * make sure that we don't overflow the pre-allocated
3852 for (p
= ipfw_dyn_v
[i
]; p
!= NULL
&& dcount
!= 0;
3853 p
= p
->next
, ioc_state
++, dcount
--, dcount2
++)
3854 ipfw_copy_state(p
, ioc_state
);
3857 lockmgr(&dyn_lock
, LK_RELEASE
);
3860 * The # of dynamic rules may be shrinked after the
3861 * snapshot of 'dyn_count' was taken. To give user a
3862 * correct dynamic rule count, we use the 'dcount2'
3863 * calculated above (with shared lockmgr lock held).
3865 size
= static_ioc_len
+
3866 (dcount2
* sizeof(struct ipfw_ioc_state
));
3867 KKASSERT(size
<= old_size
);
3870 sopt
->sopt_valsize
= size
;
3875 ipfw_set_disable_dispatch(struct netmsg
*nmsg
)
3877 struct lwkt_msg
*lmsg
= &nmsg
->nm_lmsg
;
3878 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3881 ctx
->ipfw_set_disable
= lmsg
->u
.ms_result32
;
3883 ifnet_forwardmsg(lmsg
, mycpuid
+ 1);
3887 ipfw_ctl_set_disable(uint32_t disable
, uint32_t enable
)
3890 struct lwkt_msg
*lmsg
;
3891 uint32_t set_disable
;
3893 /* IPFW_DEFAULT_SET is always enabled */
3894 enable
|= (1 << IPFW_DEFAULT_SET
);
3895 set_disable
= (ipfw_ctx
[mycpuid
]->ipfw_set_disable
| disable
) & ~enable
;
3897 bzero(&nmsg
, sizeof(nmsg
));
3898 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
3899 0, ipfw_set_disable_dispatch
);
3900 lmsg
= &nmsg
.nm_lmsg
;
3901 lmsg
->u
.ms_result32
= set_disable
;
3903 ifnet_domsg(lmsg
, 0);
3907 * {set|get}sockopt parser.
3910 ipfw_ctl(struct sockopt
*sopt
)
3918 switch (sopt
->sopt_name
) {
3920 error
= ipfw_ctl_get_rules(sopt
);
3924 ipfw_flush(0 /* keep default rule */);
3928 error
= ipfw_ctl_add_rule(sopt
);
3933 * IP_FW_DEL is used for deleting single rules or sets,
3934 * and (ab)used to atomically manipulate sets.
3935 * Argument size is used to distinguish between the two:
3937 * delete single rule or set of rules,
3938 * or reassign rules (or sets) to a different set.
3939 * 2 * sizeof(uint32_t)
3940 * atomic disable/enable sets.
3941 * first uint32_t contains sets to be disabled,
3942 * second uint32_t contains sets to be enabled.
3944 masks
= sopt
->sopt_val
;
3945 size
= sopt
->sopt_valsize
;
3946 if (size
== sizeof(*masks
)) {
3948 * Delete or reassign static rule
3950 error
= ipfw_ctl_alter(masks
[0]);
3951 } else if (size
== (2 * sizeof(*masks
))) {
3953 * Set enable/disable
3955 ipfw_ctl_set_disable(masks
[0], masks
[1]);
3962 case IP_FW_RESETLOG
: /* argument is an int, the rule number */
3965 if (sopt
->sopt_val
!= 0) {
3966 error
= soopt_to_kbuf(sopt
, &rulenum
,
3967 sizeof(int), sizeof(int));
3971 error
= ipfw_ctl_zero_entry(rulenum
,
3972 sopt
->sopt_name
== IP_FW_RESETLOG
);
3976 kprintf("ipfw_ctl invalid option %d\n", sopt
->sopt_name
);
3983 * This procedure is only used to handle keepalives. It is invoked
3984 * every dyn_keepalive_period
3987 ipfw_tick_dispatch(struct netmsg
*nmsg
)
3993 IPFW_ASSERT_CFGPORT(&curthread
->td_msgport
);
3994 KKASSERT(IPFW_LOADED
);
3998 lwkt_replymsg(&nmsg
->nm_lmsg
, 0);
4001 if (ipfw_dyn_v
== NULL
|| dyn_count
== 0)
4004 keep_alive
= time_second
;
4006 lockmgr(&dyn_lock
, LK_EXCLUSIVE
);
4008 if (ipfw_dyn_v
== NULL
|| dyn_count
== 0) {
4009 lockmgr(&dyn_lock
, LK_RELEASE
);
4012 gen
= dyn_buckets_gen
;
4014 for (i
= 0; i
< curr_dyn_buckets
; i
++) {
4015 ipfw_dyn_rule
*q
, *prev
;
4017 for (prev
= NULL
, q
= ipfw_dyn_v
[i
]; q
!= NULL
;) {
4018 uint32_t ack_rev
, ack_fwd
;
4019 struct ipfw_flow_id id
;
4021 if (q
->dyn_type
== O_LIMIT_PARENT
)
4024 if (TIME_LEQ(q
->expire
, time_second
)) {
4026 UNLINK_DYN_RULE(prev
, ipfw_dyn_v
[i
], q
);
4031 * Keep alive processing
4036 if (q
->id
.proto
!= IPPROTO_TCP
)
4038 if ((q
->state
& BOTH_SYN
) != BOTH_SYN
)
4040 if (TIME_LEQ(time_second
+ dyn_keepalive_interval
,
4042 goto next
; /* too early */
4043 if (q
->keep_alive
== keep_alive
)
4044 goto next
; /* alreay done */
4047 * Save necessary information, so that they could
4048 * survive after possible blocking in send_pkt()
4051 ack_rev
= q
->ack_rev
;
4052 ack_fwd
= q
->ack_fwd
;
4054 /* Sending has been started */
4055 q
->keep_alive
= keep_alive
;
4057 /* Release lock to avoid possible dead lock */
4058 lockmgr(&dyn_lock
, LK_RELEASE
);
4059 send_pkt(&id
, ack_rev
- 1, ack_fwd
, TH_SYN
);
4060 send_pkt(&id
, ack_fwd
- 1, ack_rev
, 0);
4061 lockmgr(&dyn_lock
, LK_EXCLUSIVE
);
4063 if (gen
!= dyn_buckets_gen
) {
4065 * Dyn bucket array has been changed during
4066 * the above two sending; reiterate.
4075 lockmgr(&dyn_lock
, LK_RELEASE
);
4077 callout_reset(&ipfw_timeout_h
, dyn_keepalive_period
* hz
,
4082 * This procedure is only used to handle keepalives. It is invoked
4083 * every dyn_keepalive_period
4086 ipfw_tick(void *dummy __unused
)
4088 struct lwkt_msg
*lmsg
= &ipfw_timeout_netmsg
.nm_lmsg
;
4090 KKASSERT(mycpuid
== IPFW_CFGCPUID
);
4094 KKASSERT(lmsg
->ms_flags
& MSGF_DONE
);
4096 lwkt_sendmsg(IPFW_CFGPORT
, lmsg
);
4097 /* ipfw_timeout_netmsg's handler reset this callout */
4104 ipfw_check_in(void *arg
, struct mbuf
**m0
, struct ifnet
*ifp
, int dir
)
4106 struct ip_fw_args args
;
4107 struct mbuf
*m
= *m0
;
4109 int tee
= 0, error
= 0, ret
;
4111 if (m
->m_pkthdr
.fw_flags
& DUMMYNET_MBUF_TAGGED
) {
4112 /* Extract info from dummynet tag */
4113 mtag
= m_tag_find(m
, PACKET_TAG_DUMMYNET
, NULL
);
4114 KKASSERT(mtag
!= NULL
);
4115 args
.rule
= ((struct dn_pkt
*)m_tag_data(mtag
))->dn_priv
;
4116 KKASSERT(args
.rule
!= NULL
);
4118 m_tag_delete(m
, mtag
);
4119 m
->m_pkthdr
.fw_flags
&= ~DUMMYNET_MBUF_TAGGED
;
4127 ret
= ipfw_chk(&args
);
4145 case IP_FW_DUMMYNET
:
4146 /* Send packet to the appropriate pipe */
4147 ipfw_dummynet_io(m
, args
.cookie
, DN_TO_IP_IN
, &args
);
4155 if (ip_divert_p
!= NULL
) {
4156 m
= ip_divert_p(m
, tee
, 1);
4160 /* not sure this is the right error msg */
4166 panic("unknown ipfw return value: %d\n", ret
);
4174 ipfw_check_out(void *arg
, struct mbuf
**m0
, struct ifnet
*ifp
, int dir
)
4176 struct ip_fw_args args
;
4177 struct mbuf
*m
= *m0
;
4179 int tee
= 0, error
= 0, ret
;
4181 if (m
->m_pkthdr
.fw_flags
& DUMMYNET_MBUF_TAGGED
) {
4182 /* Extract info from dummynet tag */
4183 mtag
= m_tag_find(m
, PACKET_TAG_DUMMYNET
, NULL
);
4184 KKASSERT(mtag
!= NULL
);
4185 args
.rule
= ((struct dn_pkt
*)m_tag_data(mtag
))->dn_priv
;
4186 KKASSERT(args
.rule
!= NULL
);
4188 m_tag_delete(m
, mtag
);
4189 m
->m_pkthdr
.fw_flags
&= ~DUMMYNET_MBUF_TAGGED
;
4197 ret
= ipfw_chk(&args
);
4215 case IP_FW_DUMMYNET
:
4216 ipfw_dummynet_io(m
, args
.cookie
, DN_TO_IP_OUT
, &args
);
4224 if (ip_divert_p
!= NULL
) {
4225 m
= ip_divert_p(m
, tee
, 0);
4229 /* not sure this is the right error msg */
4235 panic("unknown ipfw return value: %d\n", ret
);
4245 struct pfil_head
*pfh
;
4247 IPFW_ASSERT_CFGPORT(&curthread
->td_msgport
);
4249 pfh
= pfil_head_get(PFIL_TYPE_AF
, AF_INET
);
4253 pfil_add_hook(ipfw_check_in
, NULL
, PFIL_IN
| PFIL_MPSAFE
, pfh
);
4254 pfil_add_hook(ipfw_check_out
, NULL
, PFIL_OUT
| PFIL_MPSAFE
, pfh
);
4260 struct pfil_head
*pfh
;
4262 IPFW_ASSERT_CFGPORT(&curthread
->td_msgport
);
4264 pfh
= pfil_head_get(PFIL_TYPE_AF
, AF_INET
);
4268 pfil_remove_hook(ipfw_check_in
, NULL
, PFIL_IN
, pfh
);
4269 pfil_remove_hook(ipfw_check_out
, NULL
, PFIL_OUT
, pfh
);
4273 ipfw_sysctl_enable_dispatch(struct netmsg
*nmsg
)
4275 struct lwkt_msg
*lmsg
= &nmsg
->nm_lmsg
;
4276 int enable
= lmsg
->u
.ms_result
;
4278 if (fw_enable
== enable
)
4287 lwkt_replymsg(lmsg
, 0);
4291 ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS
)
4294 struct lwkt_msg
*lmsg
;
4298 error
= sysctl_handle_int(oidp
, &enable
, 0, req
);
4299 if (error
|| req
->newptr
== NULL
)
4302 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
4303 0, ipfw_sysctl_enable_dispatch
);
4304 lmsg
= &nmsg
.nm_lmsg
;
4305 lmsg
->u
.ms_result
= enable
;
4307 return lwkt_domsg(IPFW_CFGPORT
, lmsg
, 0);
4311 ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS
)
4313 return sysctl_int_range(oidp
, arg1
, arg2
, req
,
4314 IPFW_AUTOINC_STEP_MIN
, IPFW_AUTOINC_STEP_MAX
);
4318 ipfw_sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS
)
4322 lockmgr(&dyn_lock
, LK_EXCLUSIVE
);
4324 value
= dyn_buckets
;
4325 error
= sysctl_handle_int(oidp
, &value
, 0, req
);
4326 if (error
|| !req
->newptr
)
4330 * Make sure we have a power of 2 and
4331 * do not allow more than 64k entries.
4334 if (value
<= 1 || value
> 65536)
4336 if ((value
& (value
- 1)) != 0)
4340 dyn_buckets
= value
;
4342 lockmgr(&dyn_lock
, LK_RELEASE
);
4347 ipfw_sysctl_dyn_fin(SYSCTL_HANDLER_ARGS
)
4349 return sysctl_int_range(oidp
, arg1
, arg2
, req
,
4350 1, dyn_keepalive_period
- 1);
4354 ipfw_sysctl_dyn_rst(SYSCTL_HANDLER_ARGS
)
4356 return sysctl_int_range(oidp
, arg1
, arg2
, req
,
4357 1, dyn_keepalive_period
- 1);
4361 ipfw_ctx_init_dispatch(struct netmsg
*nmsg
)
4363 struct netmsg_ipfw
*fwmsg
= (struct netmsg_ipfw
*)nmsg
;
4364 struct ipfw_context
*ctx
;
4365 struct ip_fw
*def_rule
;
4367 ctx
= kmalloc(sizeof(*ctx
), M_IPFW
, M_WAITOK
| M_ZERO
);
4368 ipfw_ctx
[mycpuid
] = ctx
;
4370 def_rule
= kmalloc(sizeof(*def_rule
), M_IPFW
, M_WAITOK
| M_ZERO
);
4372 def_rule
->act_ofs
= 0;
4373 def_rule
->rulenum
= IPFW_DEFAULT_RULE
;
4374 def_rule
->cmd_len
= 1;
4375 def_rule
->set
= IPFW_DEFAULT_SET
;
4377 def_rule
->cmd
[0].len
= 1;
4378 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
4379 def_rule
->cmd
[0].opcode
= O_ACCEPT
;
4381 def_rule
->cmd
[0].opcode
= O_DENY
;
4384 def_rule
->refcnt
= 1;
4385 def_rule
->cpuid
= mycpuid
;
4387 /* Install the default rule */
4388 ctx
->ipfw_default_rule
= def_rule
;
4389 ctx
->ipfw_layer3_chain
= def_rule
;
4391 /* Link rule CPU sibling */
4392 ipfw_link_sibling(fwmsg
, def_rule
);
4394 /* Statistics only need to be updated once */
4396 ipfw_inc_static_count(def_rule
);
4398 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
4402 ipfw_init_dispatch(struct netmsg
*nmsg
)
4404 struct netmsg_ipfw fwmsg
;
4408 kprintf("IP firewall already loaded\n");
4413 bzero(&fwmsg
, sizeof(fwmsg
));
4414 netmsg_init(&fwmsg
.nmsg
, NULL
, &curthread
->td_msgport
,
4415 0, ipfw_ctx_init_dispatch
);
4416 ifnet_domsg(&fwmsg
.nmsg
.nm_lmsg
, 0);
4418 ip_fw_chk_ptr
= ipfw_chk
;
4419 ip_fw_ctl_ptr
= ipfw_ctl
;
4420 ip_fw_dn_io_ptr
= ipfw_dummynet_io
;
4422 kprintf("ipfw2 initialized, default to %s, logging ",
4423 ipfw_ctx
[mycpuid
]->ipfw_default_rule
->cmd
[0].opcode
==
4424 O_ACCEPT
? "accept" : "deny");
4426 #ifdef IPFIREWALL_VERBOSE
4429 #ifdef IPFIREWALL_VERBOSE_LIMIT
4430 verbose_limit
= IPFIREWALL_VERBOSE_LIMIT
;
4432 if (fw_verbose
== 0) {
4433 kprintf("disabled\n");
4434 } else if (verbose_limit
== 0) {
4435 kprintf("unlimited\n");
4437 kprintf("limited to %d packets/entry by default\n",
4441 callout_init_mp(&ipfw_timeout_h
);
4442 netmsg_init(&ipfw_timeout_netmsg
, NULL
, &netisr_adone_rport
,
4443 MSGF_MPSAFE
| MSGF_DROPABLE
| MSGF_PRIORITY
,
4444 ipfw_tick_dispatch
);
4445 lockinit(&dyn_lock
, "ipfw_dyn", 0, 0);
4448 callout_reset(&ipfw_timeout_h
, hz
, ipfw_tick
, NULL
);
4453 lwkt_replymsg(&nmsg
->nm_lmsg
, error
);
4461 netmsg_init(&smsg
, NULL
, &curthread
->td_msgport
,
4462 0, ipfw_init_dispatch
);
4463 return lwkt_domsg(IPFW_CFGPORT
, &smsg
.nm_lmsg
, 0);
4469 ipfw_fini_dispatch(struct netmsg
*nmsg
)
4473 if (ipfw_refcnt
!= 0) {
4481 callout_stop(&ipfw_timeout_h
);
4483 netmsg_service_sync();
4486 if ((ipfw_timeout_netmsg
.nm_lmsg
.ms_flags
& MSGF_DONE
) == 0) {
4488 * Callout message is pending; drop it
4490 lwkt_dropmsg(&ipfw_timeout_netmsg
.nm_lmsg
);
4494 ip_fw_chk_ptr
= NULL
;
4495 ip_fw_ctl_ptr
= NULL
;
4496 ip_fw_dn_io_ptr
= NULL
;
4497 ipfw_flush(1 /* kill default rule */);
4499 /* Free pre-cpu context */
4500 for (cpu
= 0; cpu
< ncpus
; ++cpu
)
4501 kfree(ipfw_ctx
[cpu
], M_IPFW
);
4503 kprintf("IP firewall unloaded\n");
4505 lwkt_replymsg(&nmsg
->nm_lmsg
, error
);
4513 netmsg_init(&smsg
, NULL
, &curthread
->td_msgport
,
4514 0, ipfw_fini_dispatch
);
4515 return lwkt_domsg(IPFW_CFGPORT
, &smsg
.nm_lmsg
, 0);
4518 #endif /* KLD_MODULE */
4521 ipfw_modevent(module_t mod
, int type
, void *unused
)
4532 kprintf("ipfw statically compiled, cannot unload\n");
4544 static moduledata_t ipfwmod
= {
4549 DECLARE_MODULE(ipfw
, ipfwmod
, SI_SUB_PROTO_END
, SI_ORDER_ANY
);
4550 MODULE_VERSION(ipfw
, 1);