2 * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * $FreeBSD: src/sys/netinet/ip_fw2.c,v 1.6.2.12 2003/04/08 10:42:32 maxim Exp $
26 * $DragonFly: src/sys/net/ipfw/ip_fw2.c,v 1.100 2008/11/22 11:03:35 sephe Exp $
30 * Implement IP packet firewall (new version)
36 #error IPFIREWALL requires INET.
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
43 #include <sys/kernel.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
47 #include <sys/sysctl.h>
48 #include <sys/syslog.h>
49 #include <sys/thread2.h>
50 #include <sys/ucred.h>
51 #include <sys/in_cksum.h>
55 #include <net/route.h>
56 #include <net/netmsg2.h>
58 #include <net/dummynet/ip_dummynet.h>
60 #include <netinet/in.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/in_var.h>
63 #include <netinet/in_pcb.h>
64 #include <netinet/ip.h>
65 #include <netinet/ip_var.h>
66 #include <netinet/ip_icmp.h>
67 #include <netinet/tcp.h>
68 #include <netinet/tcp_timer.h>
69 #include <netinet/tcp_var.h>
70 #include <netinet/tcpip.h>
71 #include <netinet/udp.h>
72 #include <netinet/udp_var.h>
73 #include <netinet/ip_divert.h>
74 #include <netinet/if_ether.h> /* XXX for ETHERTYPE_IP */
76 #include <net/ipfw/ip_fw2.h>
78 #ifdef IPFIREWALL_DEBUG
79 #define DPRINTF(fmt, ...) \
82 kprintf(fmt, __VA_ARGS__); \
85 #define DPRINTF(fmt, ...) ((void)0)
89 * Description about per-CPU rule duplication:
91 * Module loading/unloading and all ioctl operations are serialized
92 * by netisr0, so we don't have any ordering or locking problems.
94 * Following graph shows how operation on per-CPU rule list is
95 * performed [2 CPU case]:
99 * netisr0 <------------------------------------+
110 * forwardmsg---------->ifnet1 |
115 * replymsg--------------+
120 * Rules which will not create states (dyn rules) [2 CPU case]
123 * layer3_chain layer3_chain
126 * +-------+ sibling +-------+ sibling
127 * | rule1 |--------->| rule1 |--------->NULL
128 * +-------+ +-------+
132 * +-------+ sibling +-------+ sibling
133 * | rule2 |--------->| rule2 |--------->NULL
134 * +-------+ +-------+
137 * 1) Ease statistics calculation during IP_FW_GET. We only need to
138 * iterate layer3_chain on CPU0; the current rule's duplication on
139 * the other CPUs could safely be read-only accessed by using
141 * 2) Accelerate rule insertion and deletion, e.g. rule insertion:
142 * a) In netisr0 (on CPU0) rule3 is determined to be inserted between
143 * rule1 and rule2. To make this decision we need to iterate the
144 * layer3_chain on CPU0. The netmsg, which is used to insert the
145 * rule, will contain rule1 on CPU0 as prev_rule and rule2 on CPU0
147 * b) After the insertion on CPU0 is done, we will move on to CPU1.
148 * But instead of relocating the rule3's position on CPU1 by
149 * iterating the layer3_chain on CPU1, we set the netmsg's prev_rule
150 * to rule1->sibling and next_rule to rule2->sibling before the
151 * netmsg is forwarded to CPU1 from CPU0
155 * Rules which will create states (dyn rules) [2 CPU case]
156 * (unnecessary parts are omitted; they are same as in the previous figure)
160 * +-------+ +-------+
161 * | rule1 | | rule1 |
162 * +-------+ +-------+
169 * | +--------------------+ |
171 * | | (read-only shared) | |
173 * | | back pointer array | |
174 * | | (indexed by cpuid) | |
176 * +----|---------[0] | |
177 * | [1]--------|----+
179 * +--------------------+
182 * ........|............|............
186 * : +---------+ +---------+ :
187 * : | state1a | | state1b | .... :
188 * : +---------+ +---------+ :
192 * : (protected by dyn_lock) :
193 * ..................................
195 * [state1a and state1b are states created by rule1]
198 * This structure is introduced so that shared (locked) state table could
199 * work with per-CPU (duplicated) static rules. It mainly bridges states
200 * and static rules and serves as static rule's place holder (a read-only
201 * shared part of duplicated rules) from states point of view.
203 * IPFW_RULE_F_STATE (only for rules which create states):
204 * o During rule installation, this flag is turned on after rule's
205 * duplications reach all CPUs, to avoid at least following race:
206 * 1) rule1 is duplicated on CPU0 and is not duplicated on CPU1 yet
207 * 2) rule1 creates state1
208 * 3) state1 is located on CPU1 by check-state
209 * But rule1 is not duplicated on CPU1 yet
210 * o During rule deletion, this flag is turned off before deleting states
211 * created by the rule and before deleting the rule itself, so no
212 * more states will be created by the to-be-deleted rule even when its
213 * duplication on certain CPUs are not eliminated yet.
216 #define IPFW_AUTOINC_STEP_MIN 1
217 #define IPFW_AUTOINC_STEP_MAX 1000
218 #define IPFW_AUTOINC_STEP_DEF 100
220 #define IPFW_DEFAULT_RULE 65535 /* rulenum for the default rule */
221 #define IPFW_DEFAULT_SET 31 /* set number for the default rule */
225 const struct ipfw_ioc_rule
*ioc_rule
;
226 struct ip_fw
*next_rule
;
227 struct ip_fw
*prev_rule
;
228 struct ip_fw
*sibling
;
229 struct ip_fw_stub
*stub
;
234 struct ip_fw
*start_rule
;
235 struct ip_fw
*prev_rule
;
243 struct ip_fw
*start_rule
;
248 struct ipfw_context
{
249 struct ip_fw
*ipfw_layer3_chain
; /* list of rules for layer3 */
250 struct ip_fw
*ipfw_default_rule
; /* default rule */
251 uint64_t ipfw_norule_counter
; /* counter for ipfw_log(NULL) */
254 * ipfw_set_disable contains one bit per set value (0..31).
255 * If the bit is set, all rules with the corresponding set
256 * are disabled. Set IPDW_DEFAULT_SET is reserved for the
257 * default rule and CANNOT be disabled.
259 uint32_t ipfw_set_disable
;
260 uint32_t ipfw_gen
; /* generation of rule list */
263 static struct ipfw_context
*ipfw_ctx
[MAXCPU
];
267 * Module can not be unloaded, if there are references to
268 * certains rules of ipfw(4), e.g. dummynet(4)
270 static int ipfw_refcnt
;
273 MALLOC_DEFINE(M_IPFW
, "IpFw/IpAcct", "IpFw/IpAcct chain's");
276 * Following two global variables are accessed and
277 * updated only on CPU0
279 static uint32_t static_count
; /* # of static rules */
280 static uint32_t static_ioc_len
; /* bytes of static rules */
283 * If 1, then ipfw static rules are being flushed,
284 * ipfw_chk() will skip to the default rule.
286 static int ipfw_flushing
;
288 static int fw_verbose
;
289 static int verbose_limit
;
292 static int autoinc_step
= IPFW_AUTOINC_STEP_DEF
;
294 static int ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS
);
295 static int ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS
);
296 static int ipfw_sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS
);
297 static int ipfw_sysctl_dyn_fin(SYSCTL_HANDLER_ARGS
);
298 static int ipfw_sysctl_dyn_rst(SYSCTL_HANDLER_ARGS
);
300 SYSCTL_NODE(_net_inet_ip
, OID_AUTO
, fw
, CTLFLAG_RW
, 0, "Firewall");
301 SYSCTL_PROC(_net_inet_ip_fw
, OID_AUTO
, enable
, CTLTYPE_INT
| CTLFLAG_RW
,
302 &fw_enable
, 0, ipfw_sysctl_enable
, "I", "Enable ipfw");
303 SYSCTL_PROC(_net_inet_ip_fw
, OID_AUTO
, autoinc_step
, CTLTYPE_INT
| CTLFLAG_RW
,
304 &autoinc_step
, 0, ipfw_sysctl_autoinc_step
, "I",
305 "Rule number autincrement step");
306 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
,one_pass
,CTLFLAG_RW
,
308 "Only do a single pass through ipfw when using dummynet(4)");
309 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, debug
, CTLFLAG_RW
,
310 &fw_debug
, 0, "Enable printing of debug ip_fw statements");
311 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, verbose
, CTLFLAG_RW
,
312 &fw_verbose
, 0, "Log matches to ipfw rules");
313 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, verbose_limit
, CTLFLAG_RW
,
314 &verbose_limit
, 0, "Set upper limit of matches of ipfw rules logged");
317 * Description of dynamic rules.
319 * Dynamic rules are stored in lists accessed through a hash table
320 * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can
321 * be modified through the sysctl variable dyn_buckets which is
322 * updated when the table becomes empty.
324 * XXX currently there is only one list, ipfw_dyn.
326 * When a packet is received, its address fields are first masked
327 * with the mask defined for the rule, then hashed, then matched
328 * against the entries in the corresponding list.
329 * Dynamic rules can be used for different purposes:
331 * + enforcing limits on the number of sessions;
332 * + in-kernel NAT (not implemented yet)
334 * The lifetime of dynamic rules is regulated by dyn_*_lifetime,
335 * measured in seconds and depending on the flags.
337 * The total number of dynamic rules is stored in dyn_count.
338 * The max number of dynamic rules is dyn_max. When we reach
339 * the maximum number of rules we do not create anymore. This is
340 * done to avoid consuming too much memory, but also too much
341 * time when searching on each packet (ideally, we should try instead
342 * to put a limit on the length of the list on each bucket...).
344 * Each dynamic rule holds a pointer to the parent ipfw rule so
345 * we know what action to perform. Dynamic rules are removed when
346 * the parent rule is deleted. XXX we should make them survive.
348 * There are some limitations with dynamic rules -- we do not
349 * obey the 'randomized match', and we do not do multiple
350 * passes through the firewall. XXX check the latter!!!
352 * NOTE about the SHARED LOCKMGR LOCK during dynamic rule looking up:
353 * Only TCP state transition will change dynamic rule's state and ack
354 * sequences, while all packets of one TCP connection only goes through
355 * one TCP thread, so it is safe to use shared lockmgr lock during dynamic
356 * rule looking up. The keep alive callout uses exclusive lockmgr lock
357 * when it tries to find suitable dynamic rules to send keep alive, so
358 * it will not see half updated state and ack sequences. Though the expire
359 * field updating looks racy for other protocols, the resolution (second)
360 * of expire field makes this kind of race harmless.
361 * XXX statistics' updating is _not_ MPsafe!!!
362 * XXX once UDP output path is fixed, we could use lockless dynamic rule
365 static ipfw_dyn_rule
**ipfw_dyn_v
= NULL
;
366 static uint32_t dyn_buckets
= 256; /* must be power of 2 */
367 static uint32_t curr_dyn_buckets
= 256; /* must be power of 2 */
368 static uint32_t dyn_buckets_gen
; /* generation of dyn buckets array */
369 static struct lock dyn_lock
; /* dynamic rules' hash table lock */
371 static struct netmsg ipfw_timeout_netmsg
; /* schedule ipfw timeout */
372 static struct callout ipfw_timeout_h
;
375 * Timeouts for various events in handing dynamic rules.
377 static uint32_t dyn_ack_lifetime
= 300;
378 static uint32_t dyn_syn_lifetime
= 20;
379 static uint32_t dyn_fin_lifetime
= 1;
380 static uint32_t dyn_rst_lifetime
= 1;
381 static uint32_t dyn_udp_lifetime
= 10;
382 static uint32_t dyn_short_lifetime
= 5;
385 * Keepalives are sent if dyn_keepalive is set. They are sent every
386 * dyn_keepalive_period seconds, in the last dyn_keepalive_interval
387 * seconds of lifetime of a rule.
388 * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower
389 * than dyn_keepalive_period.
392 static uint32_t dyn_keepalive_interval
= 20;
393 static uint32_t dyn_keepalive_period
= 5;
394 static uint32_t dyn_keepalive
= 1; /* do send keepalives */
396 static uint32_t dyn_count
; /* # of dynamic rules */
397 static uint32_t dyn_max
= 4096; /* max # of dynamic rules */
399 SYSCTL_PROC(_net_inet_ip_fw
, OID_AUTO
, dyn_buckets
, CTLTYPE_INT
| CTLFLAG_RW
,
400 &dyn_buckets
, 0, ipfw_sysctl_dyn_buckets
, "I", "Number of dyn. buckets");
401 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, curr_dyn_buckets
, CTLFLAG_RD
,
402 &curr_dyn_buckets
, 0, "Current Number of dyn. buckets");
403 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, dyn_count
, CTLFLAG_RD
,
404 &dyn_count
, 0, "Number of dyn. rules");
405 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, dyn_max
, CTLFLAG_RW
,
406 &dyn_max
, 0, "Max number of dyn. rules");
407 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, static_count
, CTLFLAG_RD
,
408 &static_count
, 0, "Number of static rules");
409 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, dyn_ack_lifetime
, CTLFLAG_RW
,
410 &dyn_ack_lifetime
, 0, "Lifetime of dyn. rules for acks");
411 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, dyn_syn_lifetime
, CTLFLAG_RW
,
412 &dyn_syn_lifetime
, 0, "Lifetime of dyn. rules for syn");
413 SYSCTL_PROC(_net_inet_ip_fw
, OID_AUTO
, dyn_fin_lifetime
,
414 CTLTYPE_INT
| CTLFLAG_RW
, &dyn_fin_lifetime
, 0, ipfw_sysctl_dyn_fin
, "I",
415 "Lifetime of dyn. rules for fin");
416 SYSCTL_PROC(_net_inet_ip_fw
, OID_AUTO
, dyn_rst_lifetime
,
417 CTLTYPE_INT
| CTLFLAG_RW
, &dyn_rst_lifetime
, 0, ipfw_sysctl_dyn_rst
, "I",
418 "Lifetime of dyn. rules for rst");
419 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, dyn_udp_lifetime
, CTLFLAG_RW
,
420 &dyn_udp_lifetime
, 0, "Lifetime of dyn. rules for UDP");
421 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, dyn_short_lifetime
, CTLFLAG_RW
,
422 &dyn_short_lifetime
, 0, "Lifetime of dyn. rules for other situations");
423 SYSCTL_INT(_net_inet_ip_fw
, OID_AUTO
, dyn_keepalive
, CTLFLAG_RW
,
424 &dyn_keepalive
, 0, "Enable keepalives for dyn. rules");
426 static ip_fw_chk_t ipfw_chk
;
427 static void ipfw_tick(void *);
430 ipfw_free_rule(struct ip_fw
*rule
)
432 KASSERT(rule
->cpuid
== mycpuid
, ("rule freed on cpu%d\n", mycpuid
));
433 KASSERT(rule
->refcnt
> 0, ("invalid refcnt %u\n", rule
->refcnt
));
435 if (rule
->refcnt
== 0) {
443 ipfw_unref_rule(void *priv
)
445 ipfw_free_rule(priv
);
447 atomic_subtract_int(&ipfw_refcnt
, 1);
452 ipfw_ref_rule(struct ip_fw
*rule
)
454 KASSERT(rule
->cpuid
== mycpuid
, ("rule used on cpu%d\n", mycpuid
));
456 atomic_add_int(&ipfw_refcnt
, 1);
462 * This macro maps an ip pointer into a layer3 header pointer of type T
464 #define L3HDR(T, ip) ((T *)((uint32_t *)(ip) + (ip)->ip_hl))
467 icmptype_match(struct ip
*ip
, ipfw_insn_u32
*cmd
)
469 int type
= L3HDR(struct icmp
,ip
)->icmp_type
;
471 return (type
<= ICMP_MAXTYPE
&& (cmd
->d
[0] & (1 << type
)));
474 #define TT ((1 << ICMP_ECHO) | \
475 (1 << ICMP_ROUTERSOLICIT) | \
476 (1 << ICMP_TSTAMP) | \
481 is_icmp_query(struct ip
*ip
)
483 int type
= L3HDR(struct icmp
, ip
)->icmp_type
;
485 return (type
<= ICMP_MAXTYPE
&& (TT
& (1 << type
)));
491 * The following checks use two arrays of 8 or 16 bits to store the
492 * bits that we want set or clear, respectively. They are in the
493 * low and high half of cmd->arg1 or cmd->d[0].
495 * We scan options and store the bits we find set. We succeed if
497 * (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear
499 * The code is sometimes optimized not to store additional variables.
503 flags_match(ipfw_insn
*cmd
, uint8_t bits
)
508 if (((cmd
->arg1
& 0xff) & bits
) != 0)
509 return 0; /* some bits we want set were clear */
511 want_clear
= (cmd
->arg1
>> 8) & 0xff;
512 if ((want_clear
& bits
) != want_clear
)
513 return 0; /* some bits we want clear were set */
518 ipopts_match(struct ip
*ip
, ipfw_insn
*cmd
)
520 int optlen
, bits
= 0;
521 u_char
*cp
= (u_char
*)(ip
+ 1);
522 int x
= (ip
->ip_hl
<< 2) - sizeof(struct ip
);
524 for (; x
> 0; x
-= optlen
, cp
+= optlen
) {
525 int opt
= cp
[IPOPT_OPTVAL
];
527 if (opt
== IPOPT_EOL
)
530 if (opt
== IPOPT_NOP
) {
533 optlen
= cp
[IPOPT_OLEN
];
534 if (optlen
<= 0 || optlen
> x
)
535 return 0; /* invalid or truncated */
540 bits
|= IP_FW_IPOPT_LSRR
;
544 bits
|= IP_FW_IPOPT_SSRR
;
548 bits
|= IP_FW_IPOPT_RR
;
552 bits
|= IP_FW_IPOPT_TS
;
559 return (flags_match(cmd
, bits
));
563 tcpopts_match(struct ip
*ip
, ipfw_insn
*cmd
)
565 int optlen
, bits
= 0;
566 struct tcphdr
*tcp
= L3HDR(struct tcphdr
,ip
);
567 u_char
*cp
= (u_char
*)(tcp
+ 1);
568 int x
= (tcp
->th_off
<< 2) - sizeof(struct tcphdr
);
570 for (; x
> 0; x
-= optlen
, cp
+= optlen
) {
573 if (opt
== TCPOPT_EOL
)
576 if (opt
== TCPOPT_NOP
) {
586 bits
|= IP_FW_TCPOPT_MSS
;
590 bits
|= IP_FW_TCPOPT_WINDOW
;
593 case TCPOPT_SACK_PERMITTED
:
595 bits
|= IP_FW_TCPOPT_SACK
;
598 case TCPOPT_TIMESTAMP
:
599 bits
|= IP_FW_TCPOPT_TS
;
605 bits
|= IP_FW_TCPOPT_CC
;
612 return (flags_match(cmd
, bits
));
616 iface_match(struct ifnet
*ifp
, ipfw_insn_if
*cmd
)
618 if (ifp
== NULL
) /* no iface with this packet, match fails */
621 /* Check by name or by IP address */
622 if (cmd
->name
[0] != '\0') { /* match by name */
625 if (kfnmatch(cmd
->name
, ifp
->if_xname
, 0) == 0)
628 if (strncmp(ifp
->if_xname
, cmd
->name
, IFNAMSIZ
) == 0)
632 struct ifaddr_container
*ifac
;
634 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
635 struct ifaddr
*ia
= ifac
->ifa
;
637 if (ia
->ifa_addr
== NULL
)
639 if (ia
->ifa_addr
->sa_family
!= AF_INET
)
641 if (cmd
->p
.ip
.s_addr
== ((struct sockaddr_in
*)
642 (ia
->ifa_addr
))->sin_addr
.s_addr
)
643 return(1); /* match */
646 return(0); /* no match, fail ... */
649 #define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0
652 * We enter here when we have a rule with O_LOG.
653 * XXX this function alone takes about 2Kbytes of code!
656 ipfw_log(struct ip_fw
*f
, u_int hlen
, struct ether_header
*eh
,
657 struct mbuf
*m
, struct ifnet
*oif
)
660 int limit_reached
= 0;
661 char action2
[40], proto
[48], fragment
[28];
666 if (f
== NULL
) { /* bogus pkt */
667 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
669 if (verbose_limit
!= 0 &&
670 ctx
->ipfw_norule_counter
>= verbose_limit
)
672 ctx
->ipfw_norule_counter
++;
673 if (ctx
->ipfw_norule_counter
== verbose_limit
)
674 limit_reached
= verbose_limit
;
676 } else { /* O_LOG is the first action, find the real one */
677 ipfw_insn
*cmd
= ACTION_PTR(f
);
678 ipfw_insn_log
*l
= (ipfw_insn_log
*)cmd
;
680 if (l
->max_log
!= 0 && l
->log_left
== 0)
683 if (l
->log_left
== 0)
684 limit_reached
= l
->max_log
;
685 cmd
+= F_LEN(cmd
); /* point to first action */
686 if (cmd
->opcode
== O_PROB
)
690 switch (cmd
->opcode
) {
696 if (cmd
->arg1
==ICMP_REJECT_RST
) {
698 } else if (cmd
->arg1
==ICMP_UNREACH_HOST
) {
701 ksnprintf(SNPARGS(action2
, 0), "Unreach %d",
715 ksnprintf(SNPARGS(action2
, 0), "Divert %d", cmd
->arg1
);
719 ksnprintf(SNPARGS(action2
, 0), "Tee %d", cmd
->arg1
);
723 ksnprintf(SNPARGS(action2
, 0), "SkipTo %d", cmd
->arg1
);
727 ksnprintf(SNPARGS(action2
, 0), "Pipe %d", cmd
->arg1
);
731 ksnprintf(SNPARGS(action2
, 0), "Queue %d", cmd
->arg1
);
736 ipfw_insn_sa
*sa
= (ipfw_insn_sa
*)cmd
;
739 len
= ksnprintf(SNPARGS(action2
, 0),
741 inet_ntoa(sa
->sa
.sin_addr
));
742 if (sa
->sa
.sin_port
) {
743 ksnprintf(SNPARGS(action2
, len
), ":%d",
755 if (hlen
== 0) { /* non-ip */
756 ksnprintf(SNPARGS(proto
, 0), "MAC");
758 struct ip
*ip
= mtod(m
, struct ip
*);
759 /* these three are all aliases to the same thing */
760 struct icmp
*const icmp
= L3HDR(struct icmp
, ip
);
761 struct tcphdr
*const tcp
= (struct tcphdr
*)icmp
;
762 struct udphdr
*const udp
= (struct udphdr
*)icmp
;
764 int ip_off
, offset
, ip_len
;
767 if (eh
!= NULL
) { /* layer 2 packets are as on the wire */
768 ip_off
= ntohs(ip
->ip_off
);
769 ip_len
= ntohs(ip
->ip_len
);
774 offset
= ip_off
& IP_OFFMASK
;
777 len
= ksnprintf(SNPARGS(proto
, 0), "TCP %s",
778 inet_ntoa(ip
->ip_src
));
780 ksnprintf(SNPARGS(proto
, len
), ":%d %s:%d",
781 ntohs(tcp
->th_sport
),
782 inet_ntoa(ip
->ip_dst
),
783 ntohs(tcp
->th_dport
));
785 ksnprintf(SNPARGS(proto
, len
), " %s",
786 inet_ntoa(ip
->ip_dst
));
791 len
= ksnprintf(SNPARGS(proto
, 0), "UDP %s",
792 inet_ntoa(ip
->ip_src
));
794 ksnprintf(SNPARGS(proto
, len
), ":%d %s:%d",
795 ntohs(udp
->uh_sport
),
796 inet_ntoa(ip
->ip_dst
),
797 ntohs(udp
->uh_dport
));
799 ksnprintf(SNPARGS(proto
, len
), " %s",
800 inet_ntoa(ip
->ip_dst
));
806 len
= ksnprintf(SNPARGS(proto
, 0),
811 len
= ksnprintf(SNPARGS(proto
, 0), "ICMP ");
813 len
+= ksnprintf(SNPARGS(proto
, len
), "%s",
814 inet_ntoa(ip
->ip_src
));
815 ksnprintf(SNPARGS(proto
, len
), " %s",
816 inet_ntoa(ip
->ip_dst
));
820 len
= ksnprintf(SNPARGS(proto
, 0), "P:%d %s", ip
->ip_p
,
821 inet_ntoa(ip
->ip_src
));
822 ksnprintf(SNPARGS(proto
, len
), " %s",
823 inet_ntoa(ip
->ip_dst
));
827 if (ip_off
& (IP_MF
| IP_OFFMASK
)) {
828 ksnprintf(SNPARGS(fragment
, 0), " (frag %d:%d@%d%s)",
829 ntohs(ip
->ip_id
), ip_len
- (ip
->ip_hl
<< 2),
830 offset
<< 3, (ip_off
& IP_MF
) ? "+" : "");
834 if (oif
|| m
->m_pkthdr
.rcvif
) {
835 log(LOG_SECURITY
| LOG_INFO
,
836 "ipfw: %d %s %s %s via %s%s\n",
838 action
, proto
, oif
? "out" : "in",
839 oif
? oif
->if_xname
: m
->m_pkthdr
.rcvif
->if_xname
,
842 log(LOG_SECURITY
| LOG_INFO
,
843 "ipfw: %d %s %s [no if info]%s\n",
845 action
, proto
, fragment
);
849 log(LOG_SECURITY
| LOG_NOTICE
,
850 "ipfw: limit %d reached on entry %d\n",
851 limit_reached
, f
? f
->rulenum
: -1);
858 * IMPORTANT: the hash function for dynamic rules must be commutative
859 * in source and destination (ip,port), because rules are bidirectional
860 * and we want to find both in the same bucket.
863 hash_packet(struct ipfw_flow_id
*id
)
867 i
= (id
->dst_ip
) ^ (id
->src_ip
) ^ (id
->dst_port
) ^ (id
->src_port
);
868 i
&= (curr_dyn_buckets
- 1);
873 * unlink a dynamic rule from a chain. prev is a pointer to
874 * the previous one, q is a pointer to the rule to delete,
875 * head is a pointer to the head of the queue.
876 * Modifies q and potentially also head.
878 #define UNLINK_DYN_RULE(prev, head, q) \
880 ipfw_dyn_rule *old_q = q; \
882 /* remove a refcount to the parent */ \
883 if (q->dyn_type == O_LIMIT) \
884 q->parent->count--; \
885 DPRINTF("-- unlink entry 0x%08x %d -> 0x%08x %d, %d left\n", \
886 q->id.src_ip, q->id.src_port, \
887 q->id.dst_ip, q->id.dst_port, dyn_count - 1); \
889 prev->next = q = q->next; \
891 head = q = q->next; \
892 KASSERT(dyn_count > 0, ("invalid dyn count %u\n", dyn_count)); \
894 kfree(old_q, M_IPFW); \
897 #define TIME_LEQ(a, b) ((int)((a) - (b)) <= 0)
900 * Remove dynamic rules pointing to "rule", or all of them if rule == NULL.
902 * If keep_me == NULL, rules are deleted even if not expired,
903 * otherwise only expired rules are removed.
905 * The value of the second parameter is also used to point to identify
906 * a rule we absolutely do not want to remove (e.g. because we are
907 * holding a reference to it -- this is the case with O_LIMIT_PARENT
908 * rules). The pointer is only used for comparison, so any non-null
912 remove_dyn_rule_locked(struct ip_fw
*rule
, ipfw_dyn_rule
*keep_me
)
914 static uint32_t last_remove
= 0; /* XXX */
916 #define FORCE (keep_me == NULL)
918 ipfw_dyn_rule
*prev
, *q
;
919 int i
, pass
= 0, max_pass
= 0, unlinked
= 0;
921 if (ipfw_dyn_v
== NULL
|| dyn_count
== 0)
923 /* do not expire more than once per second, it is useless */
924 if (!FORCE
&& last_remove
== time_second
)
926 last_remove
= time_second
;
929 * because O_LIMIT refer to parent rules, during the first pass only
930 * remove child and mark any pending LIMIT_PARENT, and remove
931 * them in a second pass.
934 for (i
= 0; i
< curr_dyn_buckets
; i
++) {
935 for (prev
= NULL
, q
= ipfw_dyn_v
[i
]; q
;) {
937 * Logic can become complex here, so we split tests.
941 if (rule
!= NULL
&& rule
->stub
!= q
->stub
)
942 goto next
; /* not the one we are looking for */
943 if (q
->dyn_type
== O_LIMIT_PARENT
) {
945 * handle parent in the second pass,
946 * record we need one.
951 if (FORCE
&& q
->count
!= 0) {
952 /* XXX should not happen! */
953 kprintf("OUCH! cannot remove rule, "
954 "count %d\n", q
->count
);
957 if (!FORCE
&& !TIME_LEQ(q
->expire
, time_second
))
961 UNLINK_DYN_RULE(prev
, ipfw_dyn_v
[i
], q
);
968 if (pass
++ < max_pass
)
978 * lookup a dynamic rule.
980 static ipfw_dyn_rule
*
981 lookup_dyn_rule(struct ipfw_flow_id
*pkt
, int *match_direction
,
985 * stateful ipfw extensions.
986 * Lookup into dynamic session queue
988 #define MATCH_REVERSE 0
989 #define MATCH_FORWARD 1
991 #define MATCH_UNKNOWN 3
992 int i
, dir
= MATCH_NONE
;
993 ipfw_dyn_rule
*prev
, *q
=NULL
;
995 if (ipfw_dyn_v
== NULL
)
996 goto done
; /* not found */
998 i
= hash_packet(pkt
);
999 for (prev
= NULL
, q
= ipfw_dyn_v
[i
]; q
!= NULL
;) {
1000 if (q
->dyn_type
== O_LIMIT_PARENT
)
1003 if (TIME_LEQ(q
->expire
, time_second
)) {
1005 * Entry expired; skip.
1006 * Let ipfw_tick() take care of it
1011 if (pkt
->proto
== q
->id
.proto
) {
1012 if (pkt
->src_ip
== q
->id
.src_ip
&&
1013 pkt
->dst_ip
== q
->id
.dst_ip
&&
1014 pkt
->src_port
== q
->id
.src_port
&&
1015 pkt
->dst_port
== q
->id
.dst_port
) {
1016 dir
= MATCH_FORWARD
;
1019 if (pkt
->src_ip
== q
->id
.dst_ip
&&
1020 pkt
->dst_ip
== q
->id
.src_ip
&&
1021 pkt
->src_port
== q
->id
.dst_port
&&
1022 pkt
->dst_port
== q
->id
.src_port
) {
1023 dir
= MATCH_REVERSE
;
1032 goto done
; /* q = NULL, not found */
1034 if (pkt
->proto
== IPPROTO_TCP
) { /* update state according to flags */
1035 u_char flags
= pkt
->flags
& (TH_FIN
|TH_SYN
|TH_RST
);
1037 #define BOTH_SYN (TH_SYN | (TH_SYN << 8))
1038 #define BOTH_FIN (TH_FIN | (TH_FIN << 8))
1040 q
->state
|= (dir
== MATCH_FORWARD
) ? flags
: (flags
<< 8);
1042 case TH_SYN
: /* opening */
1043 q
->expire
= time_second
+ dyn_syn_lifetime
;
1046 case BOTH_SYN
: /* move to established */
1047 case BOTH_SYN
| TH_FIN
: /* one side tries to close */
1048 case BOTH_SYN
| (TH_FIN
<< 8) :
1050 uint32_t ack
= ntohl(tcp
->th_ack
);
1052 #define _SEQ_GE(a, b) ((int)(a) - (int)(b) >= 0)
1054 if (dir
== MATCH_FORWARD
) {
1055 if (q
->ack_fwd
== 0 ||
1056 _SEQ_GE(ack
, q
->ack_fwd
))
1058 else /* ignore out-of-sequence */
1061 if (q
->ack_rev
== 0 ||
1062 _SEQ_GE(ack
, q
->ack_rev
))
1064 else /* ignore out-of-sequence */
1069 q
->expire
= time_second
+ dyn_ack_lifetime
;
1072 case BOTH_SYN
| BOTH_FIN
: /* both sides closed */
1073 KKASSERT(dyn_fin_lifetime
< dyn_keepalive_period
);
1074 q
->expire
= time_second
+ dyn_fin_lifetime
;
1080 * reset or some invalid combination, but can also
1081 * occur if we use keep-state the wrong way.
1083 if ((q
->state
& ((TH_RST
<< 8) | TH_RST
)) == 0)
1084 kprintf("invalid state: 0x%x\n", q
->state
);
1086 KKASSERT(dyn_rst_lifetime
< dyn_keepalive_period
);
1087 q
->expire
= time_second
+ dyn_rst_lifetime
;
1090 } else if (pkt
->proto
== IPPROTO_UDP
) {
1091 q
->expire
= time_second
+ dyn_udp_lifetime
;
1093 /* other protocols */
1094 q
->expire
= time_second
+ dyn_short_lifetime
;
1097 if (match_direction
)
1098 *match_direction
= dir
;
1102 static struct ip_fw
*
1103 lookup_rule(struct ipfw_flow_id
*pkt
, int *match_direction
, struct tcphdr
*tcp
,
1104 uint16_t len
, int *deny
)
1106 struct ip_fw
*rule
= NULL
;
1108 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1112 gen
= ctx
->ipfw_gen
;
1114 lockmgr(&dyn_lock
, LK_SHARED
);
1116 if (ctx
->ipfw_gen
!= gen
) {
1118 * Static rules had been change when we were waiting
1119 * for the dynamic hash table lock; deny this packet,
1120 * since it is _not_ known whether it is safe to keep
1121 * iterating the static rules.
1127 q
= lookup_dyn_rule(pkt
, match_direction
, tcp
);
1131 rule
= q
->stub
->rule
[mycpuid
];
1132 KKASSERT(rule
->stub
== q
->stub
&& rule
->cpuid
== mycpuid
);
1139 lockmgr(&dyn_lock
, LK_RELEASE
);
1144 realloc_dynamic_table(void)
1146 ipfw_dyn_rule
**old_dyn_v
;
1147 uint32_t old_curr_dyn_buckets
;
1149 KASSERT(dyn_buckets
<= 65536 && (dyn_buckets
& (dyn_buckets
- 1)) == 0,
1150 ("invalid dyn_buckets %d\n", dyn_buckets
));
1152 /* Save the current buckets array for later error recovery */
1153 old_dyn_v
= ipfw_dyn_v
;
1154 old_curr_dyn_buckets
= curr_dyn_buckets
;
1156 curr_dyn_buckets
= dyn_buckets
;
1158 ipfw_dyn_v
= kmalloc(curr_dyn_buckets
* sizeof(ipfw_dyn_rule
*),
1159 M_IPFW
, M_NOWAIT
| M_ZERO
);
1160 if (ipfw_dyn_v
!= NULL
|| curr_dyn_buckets
<= 2)
1163 curr_dyn_buckets
/= 2;
1164 if (curr_dyn_buckets
<= old_curr_dyn_buckets
&&
1165 old_dyn_v
!= NULL
) {
1167 * Don't try allocating smaller buckets array, reuse
1168 * the old one, which alreay contains enough buckets
1174 if (ipfw_dyn_v
!= NULL
) {
1175 if (old_dyn_v
!= NULL
)
1176 kfree(old_dyn_v
, M_IPFW
);
1178 /* Allocation failed, restore old buckets array */
1179 ipfw_dyn_v
= old_dyn_v
;
1180 curr_dyn_buckets
= old_curr_dyn_buckets
;
1183 if (ipfw_dyn_v
!= NULL
)
1188 * Install state of type 'type' for a dynamic session.
1189 * The hash table contains two type of rules:
1190 * - regular rules (O_KEEP_STATE)
1191 * - rules for sessions with limited number of sess per user
1192 * (O_LIMIT). When they are created, the parent is
1193 * increased by 1, and decreased on delete. In this case,
1194 * the third parameter is the parent rule and not the chain.
1195 * - "parent" rules for the above (O_LIMIT_PARENT).
1197 static ipfw_dyn_rule
*
1198 add_dyn_rule(struct ipfw_flow_id
*id
, uint8_t dyn_type
, struct ip_fw
*rule
)
1203 if (ipfw_dyn_v
== NULL
||
1204 (dyn_count
== 0 && dyn_buckets
!= curr_dyn_buckets
)) {
1205 realloc_dynamic_table();
1206 if (ipfw_dyn_v
== NULL
)
1207 return NULL
; /* failed ! */
1209 i
= hash_packet(id
);
1211 r
= kmalloc(sizeof(*r
), M_IPFW
, M_NOWAIT
| M_ZERO
);
1213 kprintf ("sorry cannot allocate state\n");
1217 /* increase refcount on parent, and set pointer */
1218 if (dyn_type
== O_LIMIT
) {
1219 ipfw_dyn_rule
*parent
= (ipfw_dyn_rule
*)rule
;
1221 if (parent
->dyn_type
!= O_LIMIT_PARENT
)
1222 panic("invalid parent");
1225 rule
= parent
->stub
->rule
[mycpuid
];
1226 KKASSERT(rule
->stub
== parent
->stub
);
1228 KKASSERT(rule
->cpuid
== mycpuid
&& rule
->stub
!= NULL
);
1231 r
->expire
= time_second
+ dyn_syn_lifetime
;
1232 r
->stub
= rule
->stub
;
1233 r
->dyn_type
= dyn_type
;
1234 r
->pcnt
= r
->bcnt
= 0;
1238 r
->next
= ipfw_dyn_v
[i
];
1242 DPRINTF("-- add dyn entry ty %d 0x%08x %d -> 0x%08x %d, total %d\n",
1244 r
->id
.src_ip
, r
->id
.src_port
,
1245 r
->id
.dst_ip
, r
->id
.dst_port
, dyn_count
);
1250 * lookup dynamic parent rule using pkt and rule as search keys.
1251 * If the lookup fails, then install one.
1253 static ipfw_dyn_rule
*
1254 lookup_dyn_parent(struct ipfw_flow_id
*pkt
, struct ip_fw
*rule
)
1260 i
= hash_packet(pkt
);
1261 for (q
= ipfw_dyn_v
[i
]; q
!= NULL
; q
= q
->next
) {
1262 if (q
->dyn_type
== O_LIMIT_PARENT
&&
1263 rule
->stub
== q
->stub
&&
1264 pkt
->proto
== q
->id
.proto
&&
1265 pkt
->src_ip
== q
->id
.src_ip
&&
1266 pkt
->dst_ip
== q
->id
.dst_ip
&&
1267 pkt
->src_port
== q
->id
.src_port
&&
1268 pkt
->dst_port
== q
->id
.dst_port
) {
1269 q
->expire
= time_second
+ dyn_short_lifetime
;
1270 DPRINTF("lookup_dyn_parent found 0x%p\n", q
);
1275 return add_dyn_rule(pkt
, O_LIMIT_PARENT
, rule
);
1279 * Install dynamic state for rule type cmd->o.opcode
1281 * Returns 1 (failure) if state is not installed because of errors or because
1282 * session limitations are enforced.
1285 install_state_locked(struct ip_fw
*rule
, ipfw_insn_limit
*cmd
,
1286 struct ip_fw_args
*args
)
1288 static int last_log
; /* XXX */
1292 DPRINTF("-- install state type %d 0x%08x %u -> 0x%08x %u\n",
1294 args
->f_id
.src_ip
, args
->f_id
.src_port
,
1295 args
->f_id
.dst_ip
, args
->f_id
.dst_port
);
1297 q
= lookup_dyn_rule(&args
->f_id
, NULL
, NULL
);
1298 if (q
!= NULL
) { /* should never occur */
1299 if (last_log
!= time_second
) {
1300 last_log
= time_second
;
1301 kprintf(" install_state: entry already present, done\n");
1306 if (dyn_count
>= dyn_max
) {
1308 * Run out of slots, try to remove any expired rule.
1310 remove_dyn_rule_locked(NULL
, (ipfw_dyn_rule
*)1);
1311 if (dyn_count
>= dyn_max
) {
1312 if (last_log
!= time_second
) {
1313 last_log
= time_second
;
1314 kprintf("install_state: "
1315 "Too many dynamic rules\n");
1317 return 1; /* cannot install, notify caller */
1321 switch (cmd
->o
.opcode
) {
1322 case O_KEEP_STATE
: /* bidir rule */
1323 if (add_dyn_rule(&args
->f_id
, O_KEEP_STATE
, rule
) == NULL
)
1327 case O_LIMIT
: /* limit number of sessions */
1329 uint16_t limit_mask
= cmd
->limit_mask
;
1330 struct ipfw_flow_id id
;
1331 ipfw_dyn_rule
*parent
;
1333 DPRINTF("installing dyn-limit rule %d\n",
1336 id
.dst_ip
= id
.src_ip
= 0;
1337 id
.dst_port
= id
.src_port
= 0;
1338 id
.proto
= args
->f_id
.proto
;
1340 if (limit_mask
& DYN_SRC_ADDR
)
1341 id
.src_ip
= args
->f_id
.src_ip
;
1342 if (limit_mask
& DYN_DST_ADDR
)
1343 id
.dst_ip
= args
->f_id
.dst_ip
;
1344 if (limit_mask
& DYN_SRC_PORT
)
1345 id
.src_port
= args
->f_id
.src_port
;
1346 if (limit_mask
& DYN_DST_PORT
)
1347 id
.dst_port
= args
->f_id
.dst_port
;
1349 parent
= lookup_dyn_parent(&id
, rule
);
1350 if (parent
== NULL
) {
1351 kprintf("add parent failed\n");
1355 if (parent
->count
>= cmd
->conn_limit
) {
1357 * See if we can remove some expired rule.
1359 remove_dyn_rule_locked(rule
, parent
);
1360 if (parent
->count
>= cmd
->conn_limit
) {
1362 last_log
!= time_second
) {
1363 last_log
= time_second
;
1364 log(LOG_SECURITY
| LOG_DEBUG
,
1366 "too many entries\n");
1371 if (add_dyn_rule(&args
->f_id
, O_LIMIT
,
1372 (struct ip_fw
*)parent
) == NULL
)
1377 kprintf("unknown dynamic rule type %u\n", cmd
->o
.opcode
);
1380 lookup_dyn_rule(&args
->f_id
, NULL
, NULL
); /* XXX just set lifetime */
1385 install_state(struct ip_fw
*rule
, ipfw_insn_limit
*cmd
,
1386 struct ip_fw_args
*args
, int *deny
)
1388 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1393 gen
= ctx
->ipfw_gen
;
1395 lockmgr(&dyn_lock
, LK_EXCLUSIVE
);
1396 if (ctx
->ipfw_gen
!= gen
) {
1397 /* See the comment in lookup_rule() */
1400 ret
= install_state_locked(rule
, cmd
, args
);
1402 lockmgr(&dyn_lock
, LK_RELEASE
);
1408 * Transmit a TCP packet, containing either a RST or a keepalive.
1409 * When flags & TH_RST, we are sending a RST packet, because of a
1410 * "reset" action matched the packet.
1411 * Otherwise we are sending a keepalive, and flags & TH_
1414 send_pkt(struct ipfw_flow_id
*id
, uint32_t seq
, uint32_t ack
, int flags
)
1419 struct route sro
; /* fake route */
1421 MGETHDR(m
, MB_DONTWAIT
, MT_HEADER
);
1424 m
->m_pkthdr
.rcvif
= NULL
;
1425 m
->m_pkthdr
.len
= m
->m_len
= sizeof(struct ip
) + sizeof(struct tcphdr
);
1426 m
->m_data
+= max_linkhdr
;
1428 ip
= mtod(m
, struct ip
*);
1429 bzero(ip
, m
->m_len
);
1430 tcp
= (struct tcphdr
*)(ip
+ 1); /* no IP options */
1431 ip
->ip_p
= IPPROTO_TCP
;
1435 * Assume we are sending a RST (or a keepalive in the reverse
1436 * direction), swap src and destination addresses and ports.
1438 ip
->ip_src
.s_addr
= htonl(id
->dst_ip
);
1439 ip
->ip_dst
.s_addr
= htonl(id
->src_ip
);
1440 tcp
->th_sport
= htons(id
->dst_port
);
1441 tcp
->th_dport
= htons(id
->src_port
);
1442 if (flags
& TH_RST
) { /* we are sending a RST */
1443 if (flags
& TH_ACK
) {
1444 tcp
->th_seq
= htonl(ack
);
1445 tcp
->th_ack
= htonl(0);
1446 tcp
->th_flags
= TH_RST
;
1450 tcp
->th_seq
= htonl(0);
1451 tcp
->th_ack
= htonl(seq
);
1452 tcp
->th_flags
= TH_RST
| TH_ACK
;
1456 * We are sending a keepalive. flags & TH_SYN determines
1457 * the direction, forward if set, reverse if clear.
1458 * NOTE: seq and ack are always assumed to be correct
1459 * as set by the caller. This may be confusing...
1461 if (flags
& TH_SYN
) {
1463 * we have to rewrite the correct addresses!
1465 ip
->ip_dst
.s_addr
= htonl(id
->dst_ip
);
1466 ip
->ip_src
.s_addr
= htonl(id
->src_ip
);
1467 tcp
->th_dport
= htons(id
->dst_port
);
1468 tcp
->th_sport
= htons(id
->src_port
);
1470 tcp
->th_seq
= htonl(seq
);
1471 tcp
->th_ack
= htonl(ack
);
1472 tcp
->th_flags
= TH_ACK
;
1476 * set ip_len to the payload size so we can compute
1477 * the tcp checksum on the pseudoheader
1478 * XXX check this, could save a couple of words ?
1480 ip
->ip_len
= htons(sizeof(struct tcphdr
));
1481 tcp
->th_sum
= in_cksum(m
, m
->m_pkthdr
.len
);
1484 * now fill fields left out earlier
1486 ip
->ip_ttl
= ip_defttl
;
1487 ip
->ip_len
= m
->m_pkthdr
.len
;
1489 bzero(&sro
, sizeof(sro
));
1490 ip_rtaddr(ip
->ip_dst
, &sro
);
1492 m
->m_pkthdr
.fw_flags
|= IPFW_MBUF_GENERATED
;
1493 ip_output(m
, NULL
, &sro
, 0, NULL
, NULL
);
1499 * sends a reject message, consuming the mbuf passed as an argument.
1502 send_reject(struct ip_fw_args
*args
, int code
, int offset
, int ip_len
)
1504 if (code
!= ICMP_REJECT_RST
) { /* Send an ICMP unreach */
1505 /* We need the IP header in host order for icmp_error(). */
1506 if (args
->eh
!= NULL
) {
1507 struct ip
*ip
= mtod(args
->m
, struct ip
*);
1509 ip
->ip_len
= ntohs(ip
->ip_len
);
1510 ip
->ip_off
= ntohs(ip
->ip_off
);
1512 icmp_error(args
->m
, ICMP_UNREACH
, code
, 0L, 0);
1513 } else if (offset
== 0 && args
->f_id
.proto
== IPPROTO_TCP
) {
1514 struct tcphdr
*const tcp
=
1515 L3HDR(struct tcphdr
, mtod(args
->m
, struct ip
*));
1517 if ((tcp
->th_flags
& TH_RST
) == 0) {
1518 send_pkt(&args
->f_id
, ntohl(tcp
->th_seq
),
1519 ntohl(tcp
->th_ack
), tcp
->th_flags
| TH_RST
);
1530 * Given an ip_fw *, lookup_next_rule will return a pointer
1531 * to the next rule, which can be either the jump
1532 * target (for skipto instructions) or the next one in the list (in
1533 * all other cases including a missing jump target).
1534 * The result is also written in the "next_rule" field of the rule.
1535 * Backward jumps are not allowed, so start looking from the next
1538 * This never returns NULL -- in case we do not have an exact match,
1539 * the next rule is returned. When the ruleset is changed,
1540 * pointers are flushed so we are always correct.
1543 static struct ip_fw
*
1544 lookup_next_rule(struct ip_fw
*me
)
1546 struct ip_fw
*rule
= NULL
;
1549 /* look for action, in case it is a skipto */
1550 cmd
= ACTION_PTR(me
);
1551 if (cmd
->opcode
== O_LOG
)
1553 if (cmd
->opcode
== O_SKIPTO
) {
1554 for (rule
= me
->next
; rule
; rule
= rule
->next
) {
1555 if (rule
->rulenum
>= cmd
->arg1
)
1559 if (rule
== NULL
) /* failure or not a skipto */
1561 me
->next_rule
= rule
;
1566 _ipfw_match_uid(const struct ipfw_flow_id
*fid
, struct ifnet
*oif
,
1567 enum ipfw_opcodes opcode
, uid_t uid
)
1569 struct in_addr src_ip
, dst_ip
;
1570 struct inpcbinfo
*pi
;
1574 if (fid
->proto
== IPPROTO_TCP
) {
1576 pi
= &tcbinfo
[mycpuid
];
1577 } else if (fid
->proto
== IPPROTO_UDP
) {
1585 * Values in 'fid' are in host byte order
1587 dst_ip
.s_addr
= htonl(fid
->dst_ip
);
1588 src_ip
.s_addr
= htonl(fid
->src_ip
);
1590 pcb
= in_pcblookup_hash(pi
,
1591 dst_ip
, htons(fid
->dst_port
),
1592 src_ip
, htons(fid
->src_port
),
1595 pcb
= in_pcblookup_hash(pi
,
1596 src_ip
, htons(fid
->src_port
),
1597 dst_ip
, htons(fid
->dst_port
),
1600 if (pcb
== NULL
|| pcb
->inp_socket
== NULL
)
1603 if (opcode
== O_UID
) {
1604 #define socheckuid(a,b) ((a)->so_cred->cr_uid != (b))
1605 return !socheckuid(pcb
->inp_socket
, uid
);
1608 return groupmember(uid
, pcb
->inp_socket
->so_cred
);
1613 ipfw_match_uid(const struct ipfw_flow_id
*fid
, struct ifnet
*oif
,
1614 enum ipfw_opcodes opcode
, uid_t uid
, int *deny
)
1616 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1621 gen
= ctx
->ipfw_gen
;
1624 if (gen
!= ctx
->ipfw_gen
) {
1625 /* See the comment in lookup_rule() */
1628 match
= _ipfw_match_uid(fid
, oif
, opcode
, uid
);
1635 * The main check routine for the firewall.
1637 * All arguments are in args so we can modify them and return them
1638 * back to the caller.
1642 * args->m (in/out) The packet; we set to NULL when/if we nuke it.
1643 * Starts with the IP header.
1644 * args->eh (in) Mac header if present, or NULL for layer3 packet.
1645 * args->oif Outgoing interface, or NULL if packet is incoming.
1646 * The incoming interface is in the mbuf. (in)
1648 * args->rule Pointer to the last matching rule (in/out)
1649 * args->f_id Addresses grabbed from the packet (out)
1653 * If the packet was denied/rejected and has been dropped, *m is equal
1654 * to NULL upon return.
1656 * IP_FW_DENY the packet must be dropped.
1657 * IP_FW_PASS The packet is to be accepted and routed normally.
1658 * IP_FW_DIVERT Divert the packet to port (args->cookie)
1659 * IP_FW_TEE Tee the packet to port (args->cookie)
1660 * IP_FW_DUMMYNET Send the packet to pipe/queue (args->cookie)
1664 ipfw_chk(struct ip_fw_args
*args
)
1667 * Local variables hold state during the processing of a packet.
1669 * IMPORTANT NOTE: to speed up the processing of rules, there
1670 * are some assumption on the values of the variables, which
1671 * are documented here. Should you change them, please check
1672 * the implementation of the various instructions to make sure
1673 * that they still work.
1675 * args->eh The MAC header. It is non-null for a layer2
1676 * packet, it is NULL for a layer-3 packet.
1678 * m | args->m Pointer to the mbuf, as received from the caller.
1679 * It may change if ipfw_chk() does an m_pullup, or if it
1680 * consumes the packet because it calls send_reject().
1681 * XXX This has to change, so that ipfw_chk() never modifies
1682 * or consumes the buffer.
1683 * ip is simply an alias of the value of m, and it is kept
1684 * in sync with it (the packet is supposed to start with
1687 struct mbuf
*m
= args
->m
;
1688 struct ip
*ip
= mtod(m
, struct ip
*);
1691 * oif | args->oif If NULL, ipfw_chk has been called on the
1692 * inbound path (ether_input, ip_input).
1693 * If non-NULL, ipfw_chk has been called on the outbound path
1694 * (ether_output, ip_output).
1696 struct ifnet
*oif
= args
->oif
;
1698 struct ip_fw
*f
= NULL
; /* matching rule */
1699 int retval
= IP_FW_PASS
;
1701 struct divert_info
*divinfo
;
1704 * hlen The length of the IPv4 header.
1705 * hlen >0 means we have an IPv4 packet.
1707 u_int hlen
= 0; /* hlen >0 means we have an IP pkt */
1710 * offset The offset of a fragment. offset != 0 means that
1711 * we have a fragment at this offset of an IPv4 packet.
1712 * offset == 0 means that (if this is an IPv4 packet)
1713 * this is the first or only fragment.
1718 * Local copies of addresses. They are only valid if we have
1721 * proto The protocol. Set to 0 for non-ip packets,
1722 * or to the protocol read from the packet otherwise.
1723 * proto != 0 means that we have an IPv4 packet.
1725 * src_port, dst_port port numbers, in HOST format. Only
1726 * valid for TCP and UDP packets.
1728 * src_ip, dst_ip ip addresses, in NETWORK format.
1729 * Only valid for IPv4 packets.
1732 uint16_t src_port
= 0, dst_port
= 0; /* NOTE: host format */
1733 struct in_addr src_ip
, dst_ip
; /* NOTE: network format */
1734 uint16_t ip_len
= 0;
1737 * dyn_dir = MATCH_UNKNOWN when rules unchecked,
1738 * MATCH_NONE when checked and not matched (dyn_f = NULL),
1739 * MATCH_FORWARD or MATCH_REVERSE otherwise (dyn_f != NULL)
1741 int dyn_dir
= MATCH_UNKNOWN
;
1742 struct ip_fw
*dyn_f
= NULL
;
1743 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
1745 if (m
->m_pkthdr
.fw_flags
& IPFW_MBUF_GENERATED
)
1746 return IP_FW_PASS
; /* accept */
1748 if (args
->eh
== NULL
|| /* layer 3 packet */
1749 (m
->m_pkthdr
.len
>= sizeof(struct ip
) &&
1750 ntohs(args
->eh
->ether_type
) == ETHERTYPE_IP
))
1751 hlen
= ip
->ip_hl
<< 2;
1754 * Collect parameters into local variables for faster matching.
1756 if (hlen
== 0) { /* do not grab addresses for non-ip pkts */
1757 proto
= args
->f_id
.proto
= 0; /* mark f_id invalid */
1758 goto after_ip_checks
;
1761 proto
= args
->f_id
.proto
= ip
->ip_p
;
1762 src_ip
= ip
->ip_src
;
1763 dst_ip
= ip
->ip_dst
;
1764 if (args
->eh
!= NULL
) { /* layer 2 packets are as on the wire */
1765 offset
= ntohs(ip
->ip_off
) & IP_OFFMASK
;
1766 ip_len
= ntohs(ip
->ip_len
);
1768 offset
= ip
->ip_off
& IP_OFFMASK
;
1769 ip_len
= ip
->ip_len
;
1772 #define PULLUP_TO(len) \
1774 if (m->m_len < (len)) { \
1775 args->m = m = m_pullup(m, (len));\
1777 goto pullup_failed; \
1778 ip = mtod(m, struct ip *); \
1788 PULLUP_TO(hlen
+ sizeof(struct tcphdr
));
1789 tcp
= L3HDR(struct tcphdr
, ip
);
1790 dst_port
= tcp
->th_dport
;
1791 src_port
= tcp
->th_sport
;
1792 args
->f_id
.flags
= tcp
->th_flags
;
1800 PULLUP_TO(hlen
+ sizeof(struct udphdr
));
1801 udp
= L3HDR(struct udphdr
, ip
);
1802 dst_port
= udp
->uh_dport
;
1803 src_port
= udp
->uh_sport
;
1808 PULLUP_TO(hlen
+ 4); /* type, code and checksum. */
1809 args
->f_id
.flags
= L3HDR(struct icmp
, ip
)->icmp_type
;
1819 args
->f_id
.src_ip
= ntohl(src_ip
.s_addr
);
1820 args
->f_id
.dst_ip
= ntohl(dst_ip
.s_addr
);
1821 args
->f_id
.src_port
= src_port
= ntohs(src_port
);
1822 args
->f_id
.dst_port
= dst_port
= ntohs(dst_port
);
1827 * Packet has already been tagged. Look for the next rule
1828 * to restart processing.
1830 * If fw_one_pass != 0 then just accept it.
1831 * XXX should not happen here, but optimized out in
1837 /* This rule is being/has been flushed */
1841 KASSERT(args
->rule
->cpuid
== mycpuid
,
1842 ("rule used on cpu%d\n", mycpuid
));
1844 /* This rule was deleted */
1845 if (args
->rule
->rule_flags
& IPFW_RULE_F_INVALID
)
1848 f
= args
->rule
->next_rule
;
1850 f
= lookup_next_rule(args
->rule
);
1853 * Find the starting rule. It can be either the first
1854 * one, or the one after divert_rule if asked so.
1858 mtag
= m_tag_find(m
, PACKET_TAG_IPFW_DIVERT
, NULL
);
1860 divinfo
= m_tag_data(mtag
);
1861 skipto
= divinfo
->skipto
;
1866 f
= ctx
->ipfw_layer3_chain
;
1867 if (args
->eh
== NULL
&& skipto
!= 0) {
1868 /* No skipto during rule flushing */
1872 if (skipto
>= IPFW_DEFAULT_RULE
)
1873 return IP_FW_DENY
; /* invalid */
1875 while (f
&& f
->rulenum
<= skipto
)
1877 if (f
== NULL
) /* drop packet */
1879 } else if (ipfw_flushing
) {
1880 /* Rules are being flushed; skip to default rule */
1881 f
= ctx
->ipfw_default_rule
;
1884 if ((mtag
= m_tag_find(m
, PACKET_TAG_IPFW_DIVERT
, NULL
)) != NULL
)
1885 m_tag_delete(m
, mtag
);
1888 * Now scan the rules, and parse microinstructions for each rule.
1890 for (; f
; f
= f
->next
) {
1893 int skip_or
; /* skip rest of OR block */
1896 if (ctx
->ipfw_set_disable
& (1 << f
->set
))
1900 for (l
= f
->cmd_len
, cmd
= f
->cmd
; l
> 0;
1901 l
-= cmdlen
, cmd
+= cmdlen
) {
1905 * check_body is a jump target used when we find a
1906 * CHECK_STATE, and need to jump to the body of
1911 cmdlen
= F_LEN(cmd
);
1913 * An OR block (insn_1 || .. || insn_n) has the
1914 * F_OR bit set in all but the last instruction.
1915 * The first match will set "skip_or", and cause
1916 * the following instructions to be skipped until
1917 * past the one with the F_OR bit clear.
1919 if (skip_or
) { /* skip this instruction */
1920 if ((cmd
->len
& F_OR
) == 0)
1921 skip_or
= 0; /* next one is good */
1924 match
= 0; /* set to 1 if we succeed */
1926 switch (cmd
->opcode
) {
1928 * The first set of opcodes compares the packet's
1929 * fields with some pattern, setting 'match' if a
1930 * match is found. At the end of the loop there is
1931 * logic to deal with F_NOT and F_OR flags associated
1939 kprintf("ipfw: opcode %d unimplemented\n",
1946 * We only check offset == 0 && proto != 0,
1947 * as this ensures that we have an IPv4
1948 * packet with the ports info.
1953 match
= ipfw_match_uid(&args
->f_id
, oif
,
1955 (uid_t
)((ipfw_insn_u32
*)cmd
)->d
[0],
1962 match
= iface_match(m
->m_pkthdr
.rcvif
,
1963 (ipfw_insn_if
*)cmd
);
1967 match
= iface_match(oif
, (ipfw_insn_if
*)cmd
);
1971 match
= iface_match(oif
? oif
:
1972 m
->m_pkthdr
.rcvif
, (ipfw_insn_if
*)cmd
);
1976 if (args
->eh
!= NULL
) { /* have MAC header */
1977 uint32_t *want
= (uint32_t *)
1978 ((ipfw_insn_mac
*)cmd
)->addr
;
1979 uint32_t *mask
= (uint32_t *)
1980 ((ipfw_insn_mac
*)cmd
)->mask
;
1981 uint32_t *hdr
= (uint32_t *)args
->eh
;
1984 (want
[0] == (hdr
[0] & mask
[0]) &&
1985 want
[1] == (hdr
[1] & mask
[1]) &&
1986 want
[2] == (hdr
[2] & mask
[2]));
1991 if (args
->eh
!= NULL
) {
1993 ntohs(args
->eh
->ether_type
);
1995 ((ipfw_insn_u16
*)cmd
)->ports
;
1998 /* Special vlan handling */
1999 if (m
->m_flags
& M_VLANTAG
)
2002 for (i
= cmdlen
- 1; !match
&& i
> 0;
2005 (t
>= p
[0] && t
<= p
[1]);
2011 match
= (hlen
> 0 && offset
!= 0);
2014 case O_IN
: /* "out" is "not in" */
2015 match
= (oif
== NULL
);
2019 match
= (args
->eh
!= NULL
);
2024 * We do not allow an arg of 0 so the
2025 * check of "proto" only suffices.
2027 match
= (proto
== cmd
->arg1
);
2031 match
= (hlen
> 0 &&
2032 ((ipfw_insn_ip
*)cmd
)->addr
.s_addr
==
2037 match
= (hlen
> 0 &&
2038 ((ipfw_insn_ip
*)cmd
)->addr
.s_addr
==
2040 ((ipfw_insn_ip
*)cmd
)->mask
.s_addr
));
2047 tif
= INADDR_TO_IFP(&src_ip
);
2048 match
= (tif
!= NULL
);
2055 uint32_t *d
= (uint32_t *)(cmd
+ 1);
2057 cmd
->opcode
== O_IP_DST_SET
?
2063 addr
-= d
[0]; /* subtract base */
2065 (addr
< cmd
->arg1
) &&
2066 (d
[1 + (addr
>> 5)] &
2067 (1 << (addr
& 0x1f)));
2072 match
= (hlen
> 0 &&
2073 ((ipfw_insn_ip
*)cmd
)->addr
.s_addr
==
2078 match
= (hlen
> 0) &&
2079 (((ipfw_insn_ip
*)cmd
)->addr
.s_addr
==
2081 ((ipfw_insn_ip
*)cmd
)->mask
.s_addr
));
2088 tif
= INADDR_TO_IFP(&dst_ip
);
2089 match
= (tif
!= NULL
);
2096 * offset == 0 && proto != 0 is enough
2097 * to guarantee that we have an IPv4
2098 * packet with port info.
2100 if ((proto
==IPPROTO_UDP
|| proto
==IPPROTO_TCP
)
2103 (cmd
->opcode
== O_IP_SRCPORT
) ?
2104 src_port
: dst_port
;
2106 ((ipfw_insn_u16
*)cmd
)->ports
;
2109 for (i
= cmdlen
- 1; !match
&& i
> 0;
2112 (x
>= p
[0] && x
<= p
[1]);
2118 match
= (offset
== 0 && proto
==IPPROTO_ICMP
&&
2119 icmptype_match(ip
, (ipfw_insn_u32
*)cmd
));
2123 match
= (hlen
> 0 && ipopts_match(ip
, cmd
));
2127 match
= (hlen
> 0 && cmd
->arg1
== ip
->ip_v
);
2131 match
= (hlen
> 0 && cmd
->arg1
== ip
->ip_ttl
);
2135 match
= (hlen
> 0 &&
2136 cmd
->arg1
== ntohs(ip
->ip_id
));
2140 match
= (hlen
> 0 && cmd
->arg1
== ip_len
);
2143 case O_IPPRECEDENCE
:
2144 match
= (hlen
> 0 &&
2145 (cmd
->arg1
== (ip
->ip_tos
& 0xe0)));
2149 match
= (hlen
> 0 &&
2150 flags_match(cmd
, ip
->ip_tos
));
2154 match
= (proto
== IPPROTO_TCP
&& offset
== 0 &&
2156 L3HDR(struct tcphdr
,ip
)->th_flags
));
2160 match
= (proto
== IPPROTO_TCP
&& offset
== 0 &&
2161 tcpopts_match(ip
, cmd
));
2165 match
= (proto
== IPPROTO_TCP
&& offset
== 0 &&
2166 ((ipfw_insn_u32
*)cmd
)->d
[0] ==
2167 L3HDR(struct tcphdr
,ip
)->th_seq
);
2171 match
= (proto
== IPPROTO_TCP
&& offset
== 0 &&
2172 ((ipfw_insn_u32
*)cmd
)->d
[0] ==
2173 L3HDR(struct tcphdr
,ip
)->th_ack
);
2177 match
= (proto
== IPPROTO_TCP
&& offset
== 0 &&
2179 L3HDR(struct tcphdr
,ip
)->th_win
);
2183 /* reject packets which have SYN only */
2184 /* XXX should i also check for TH_ACK ? */
2185 match
= (proto
== IPPROTO_TCP
&& offset
== 0 &&
2186 (L3HDR(struct tcphdr
,ip
)->th_flags
&
2187 (TH_RST
| TH_ACK
| TH_SYN
)) != TH_SYN
);
2192 ipfw_log(f
, hlen
, args
->eh
, m
, oif
);
2197 match
= (krandom() <
2198 ((ipfw_insn_u32
*)cmd
)->d
[0]);
2202 * The second set of opcodes represents 'actions',
2203 * i.e. the terminal part of a rule once the packet
2204 * matches all previous patterns.
2205 * Typically there is only one action for each rule,
2206 * and the opcode is stored at the end of the rule
2207 * (but there are exceptions -- see below).
2209 * In general, here we set retval and terminate the
2210 * outer loop (would be a 'break 3' in some language,
2211 * but we need to do a 'goto done').
2214 * O_COUNT and O_SKIPTO actions:
2215 * instead of terminating, we jump to the next rule
2216 * ('goto next_rule', equivalent to a 'break 2'),
2217 * or to the SKIPTO target ('goto again' after
2218 * having set f, cmd and l), respectively.
2220 * O_LIMIT and O_KEEP_STATE: these opcodes are
2221 * not real 'actions', and are stored right
2222 * before the 'action' part of the rule.
2223 * These opcodes try to install an entry in the
2224 * state tables; if successful, we continue with
2225 * the next opcode (match=1; break;), otherwise
2226 * the packet must be dropped ('goto done' after
2227 * setting retval). If static rules are changed
2228 * during the state installation, the packet will
2229 * be dropped and rule's stats will not beupdated
2230 * ('return IP_FW_DENY').
2232 * O_PROBE_STATE and O_CHECK_STATE: these opcodes
2233 * cause a lookup of the state table, and a jump
2234 * to the 'action' part of the parent rule
2235 * ('goto check_body') if an entry is found, or
2236 * (CHECK_STATE only) a jump to the next rule if
2237 * the entry is not found ('goto next_rule').
2238 * The result of the lookup is cached to make
2239 * further instances of these opcodes are
2240 * effectively NOPs. If static rules are changed
2241 * during the state looking up, the packet will
2242 * be dropped and rule's stats will not be updated
2243 * ('return IP_FW_DENY').
2247 if (!(f
->rule_flags
& IPFW_RULE_F_STATE
)) {
2248 kprintf("%s rule (%d) is not ready "
2250 cmd
->opcode
== O_LIMIT
?
2251 "limit" : "keep state",
2252 f
->rulenum
, f
->cpuid
);
2255 if (install_state(f
,
2256 (ipfw_insn_limit
*)cmd
, args
, &deny
)) {
2260 retval
= IP_FW_DENY
;
2261 goto done
; /* error/limit violation */
2271 * dynamic rules are checked at the first
2272 * keep-state or check-state occurrence,
2273 * with the result being stored in dyn_dir.
2274 * The compiler introduces a PROBE_STATE
2275 * instruction for us when we have a
2276 * KEEP_STATE (because PROBE_STATE needs
2279 if (dyn_dir
== MATCH_UNKNOWN
) {
2280 dyn_f
= lookup_rule(&args
->f_id
,
2282 proto
== IPPROTO_TCP
?
2283 L3HDR(struct tcphdr
, ip
) : NULL
,
2287 if (dyn_f
!= NULL
) {
2289 * Found a rule from a dynamic
2290 * entry; jump to the 'action'
2294 cmd
= ACTION_PTR(f
);
2295 l
= f
->cmd_len
- f
->act_ofs
;
2300 * Dynamic entry not found. If CHECK_STATE,
2301 * skip to next rule, if PROBE_STATE just
2302 * ignore and continue with next opcode.
2304 if (cmd
->opcode
== O_CHECK_STATE
)
2306 else if (!(f
->rule_flags
& IPFW_RULE_F_STATE
))
2307 goto next_rule
; /* not ready yet */
2312 retval
= IP_FW_PASS
; /* accept */
2317 args
->rule
= f
; /* report matching rule */
2318 args
->cookie
= cmd
->arg1
;
2319 retval
= IP_FW_DUMMYNET
;
2324 if (args
->eh
) /* not on layer 2 */
2327 mtag
= m_tag_get(PACKET_TAG_IPFW_DIVERT
,
2328 sizeof(*divinfo
), MB_DONTWAIT
);
2330 retval
= IP_FW_DENY
;
2333 divinfo
= m_tag_data(mtag
);
2335 divinfo
->skipto
= f
->rulenum
;
2336 divinfo
->port
= cmd
->arg1
;
2337 divinfo
->tee
= (cmd
->opcode
== O_TEE
);
2338 m_tag_prepend(m
, mtag
);
2340 args
->cookie
= cmd
->arg1
;
2341 retval
= (cmd
->opcode
== O_DIVERT
) ?
2342 IP_FW_DIVERT
: IP_FW_TEE
;
2347 f
->pcnt
++; /* update stats */
2349 f
->timestamp
= time_second
;
2350 if (cmd
->opcode
== O_COUNT
)
2353 if (f
->next_rule
== NULL
)
2354 lookup_next_rule(f
);
2360 * Drop the packet and send a reject notice
2361 * if the packet is not ICMP (or is an ICMP
2362 * query), and it is not multicast/broadcast.
2365 (proto
!= IPPROTO_ICMP
||
2366 is_icmp_query(ip
)) &&
2367 !(m
->m_flags
& (M_BCAST
|M_MCAST
)) &&
2368 !IN_MULTICAST(ntohl(dst_ip
.s_addr
))) {
2370 * Update statistics before the possible
2371 * blocking 'send_reject'
2375 f
->timestamp
= time_second
;
2377 send_reject(args
, cmd
->arg1
,
2382 * Return directly here, rule stats
2383 * have been updated above.
2389 retval
= IP_FW_DENY
;
2393 if (args
->eh
) /* not valid on layer2 pkts */
2395 if (!dyn_f
|| dyn_dir
== MATCH_FORWARD
) {
2396 struct sockaddr_in
*sin
;
2398 mtag
= m_tag_get(PACKET_TAG_IPFORWARD
,
2399 sizeof(*sin
), MB_DONTWAIT
);
2401 retval
= IP_FW_DENY
;
2404 sin
= m_tag_data(mtag
);
2406 /* Structure copy */
2407 *sin
= ((ipfw_insn_sa
*)cmd
)->sa
;
2409 m_tag_prepend(m
, mtag
);
2410 m
->m_pkthdr
.fw_flags
|=
2411 IPFORWARD_MBUF_TAGGED
;
2413 retval
= IP_FW_PASS
;
2417 panic("-- unknown opcode %d\n", cmd
->opcode
);
2418 } /* end of switch() on opcodes */
2420 if (cmd
->len
& F_NOT
)
2424 if (cmd
->len
& F_OR
)
2427 if (!(cmd
->len
& F_OR
)) /* not an OR block, */
2428 break; /* try next rule */
2431 } /* end of inner for, scan opcodes */
2433 next_rule
:; /* try next rule */
2435 } /* end of outer for, scan rules */
2436 kprintf("+++ ipfw: ouch!, skip past end of rules, denying packet\n");
2440 /* Update statistics */
2443 f
->timestamp
= time_second
;
2448 kprintf("pullup failed\n");
2453 ipfw_dummynet_io(struct mbuf
*m
, int pipe_nr
, int dir
, struct ip_fw_args
*fwa
)
2458 const struct ipfw_flow_id
*id
;
2459 struct dn_flow_id
*fid
;
2463 mtag
= m_tag_get(PACKET_TAG_DUMMYNET
, sizeof(*pkt
), MB_DONTWAIT
);
2468 m_tag_prepend(m
, mtag
);
2470 pkt
= m_tag_data(mtag
);
2471 bzero(pkt
, sizeof(*pkt
));
2473 cmd
= fwa
->rule
->cmd
+ fwa
->rule
->act_ofs
;
2474 if (cmd
->opcode
== O_LOG
)
2476 KASSERT(cmd
->opcode
== O_PIPE
|| cmd
->opcode
== O_QUEUE
,
2477 ("Rule is not PIPE or QUEUE, opcode %d\n", cmd
->opcode
));
2480 pkt
->dn_flags
= (dir
& DN_FLAGS_DIR_MASK
);
2481 pkt
->ifp
= fwa
->oif
;
2482 pkt
->pipe_nr
= pipe_nr
;
2484 pkt
->cpuid
= mycpuid
;
2485 pkt
->msgport
= curnetport
;
2489 fid
->fid_dst_ip
= id
->dst_ip
;
2490 fid
->fid_src_ip
= id
->src_ip
;
2491 fid
->fid_dst_port
= id
->dst_port
;
2492 fid
->fid_src_port
= id
->src_port
;
2493 fid
->fid_proto
= id
->proto
;
2494 fid
->fid_flags
= id
->flags
;
2496 ipfw_ref_rule(fwa
->rule
);
2497 pkt
->dn_priv
= fwa
->rule
;
2498 pkt
->dn_unref_priv
= ipfw_unref_rule
;
2500 if (cmd
->opcode
== O_PIPE
)
2501 pkt
->dn_flags
|= DN_FLAGS_IS_PIPE
;
2503 m
->m_pkthdr
.fw_flags
|= DUMMYNET_MBUF_TAGGED
;
2507 * When a rule is added/deleted, clear the next_rule pointers in all rules.
2508 * These will be reconstructed on the fly as packets are matched.
2509 * Must be called at splimp().
2512 ipfw_flush_rule_ptrs(struct ipfw_context
*ctx
)
2516 for (rule
= ctx
->ipfw_layer3_chain
; rule
; rule
= rule
->next
)
2517 rule
->next_rule
= NULL
;
2520 static __inline
void
2521 ipfw_inc_static_count(struct ip_fw
*rule
)
2523 /* Static rule's counts are updated only on CPU0 */
2524 KKASSERT(mycpuid
== 0);
2527 static_ioc_len
+= IOC_RULESIZE(rule
);
2530 static __inline
void
2531 ipfw_dec_static_count(struct ip_fw
*rule
)
2533 int l
= IOC_RULESIZE(rule
);
2535 /* Static rule's counts are updated only on CPU0 */
2536 KKASSERT(mycpuid
== 0);
2538 KASSERT(static_count
> 0, ("invalid static count %u\n", static_count
));
2541 KASSERT(static_ioc_len
>= l
,
2542 ("invalid static len %u\n", static_ioc_len
));
2543 static_ioc_len
-= l
;
2547 ipfw_link_sibling(struct netmsg_ipfw
*fwmsg
, struct ip_fw
*rule
)
2549 if (fwmsg
->sibling
!= NULL
) {
2550 KKASSERT(mycpuid
> 0 && fwmsg
->sibling
->cpuid
== mycpuid
- 1);
2551 fwmsg
->sibling
->sibling
= rule
;
2553 fwmsg
->sibling
= rule
;
2556 static struct ip_fw
*
2557 ipfw_create_rule(const struct ipfw_ioc_rule
*ioc_rule
, struct ip_fw_stub
*stub
)
2561 rule
= kmalloc(RULESIZE(ioc_rule
), M_IPFW
, M_WAITOK
| M_ZERO
);
2563 rule
->act_ofs
= ioc_rule
->act_ofs
;
2564 rule
->cmd_len
= ioc_rule
->cmd_len
;
2565 rule
->rulenum
= ioc_rule
->rulenum
;
2566 rule
->set
= ioc_rule
->set
;
2567 rule
->usr_flags
= ioc_rule
->usr_flags
;
2569 bcopy(ioc_rule
->cmd
, rule
->cmd
, rule
->cmd_len
* 4 /* XXX */);
2572 rule
->cpuid
= mycpuid
;
2576 stub
->rule
[mycpuid
] = rule
;
2582 ipfw_add_rule_dispatch(struct netmsg
*nmsg
)
2584 struct netmsg_ipfw
*fwmsg
= (struct netmsg_ipfw
*)nmsg
;
2585 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
2588 rule
= ipfw_create_rule(fwmsg
->ioc_rule
, fwmsg
->stub
);
2591 * Bump generation after ipfw_create_rule(),
2592 * since this function is blocking
2597 * Insert rule into the pre-determined position
2599 if (fwmsg
->prev_rule
!= NULL
) {
2600 struct ip_fw
*prev
, *next
;
2602 prev
= fwmsg
->prev_rule
;
2603 KKASSERT(prev
->cpuid
== mycpuid
);
2605 next
= fwmsg
->next_rule
;
2606 KKASSERT(next
->cpuid
== mycpuid
);
2612 * Move to the position on the next CPU
2613 * before the msg is forwarded.
2615 fwmsg
->prev_rule
= prev
->sibling
;
2616 fwmsg
->next_rule
= next
->sibling
;
2618 KKASSERT(fwmsg
->next_rule
== NULL
);
2619 rule
->next
= ctx
->ipfw_layer3_chain
;
2620 ctx
->ipfw_layer3_chain
= rule
;
2623 /* Link rule CPU sibling */
2624 ipfw_link_sibling(fwmsg
, rule
);
2626 ipfw_flush_rule_ptrs(ctx
);
2629 /* Statistics only need to be updated once */
2630 ipfw_inc_static_count(rule
);
2632 /* Return the rule on CPU0 */
2633 nmsg
->nm_lmsg
.u
.ms_resultp
= rule
;
2636 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
2640 ipfw_enable_state_dispatch(struct netmsg
*nmsg
)
2642 struct lwkt_msg
*lmsg
= &nmsg
->nm_lmsg
;
2643 struct ip_fw
*rule
= lmsg
->u
.ms_resultp
;
2644 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
2648 KKASSERT(rule
->cpuid
== mycpuid
);
2649 KKASSERT(rule
->stub
!= NULL
&& rule
->stub
->rule
[mycpuid
] == rule
);
2650 KKASSERT(!(rule
->rule_flags
& IPFW_RULE_F_STATE
));
2651 rule
->rule_flags
|= IPFW_RULE_F_STATE
;
2652 lmsg
->u
.ms_resultp
= rule
->sibling
;
2654 ifnet_forwardmsg(lmsg
, mycpuid
+ 1);
2658 * Add a new rule to the list. Copy the rule into a malloc'ed area,
2659 * then possibly create a rule number and add the rule to the list.
2660 * Update the rule_number in the input struct so the caller knows
2664 ipfw_add_rule(struct ipfw_ioc_rule
*ioc_rule
, uint32_t rule_flags
)
2666 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
2667 struct netmsg_ipfw fwmsg
;
2668 struct netmsg
*nmsg
;
2669 struct ip_fw
*f
, *prev
, *rule
;
2670 struct ip_fw_stub
*stub
;
2672 IPFW_ASSERT_CFGPORT(&curthread
->td_msgport
);
2675 * If rulenum is 0, find highest numbered rule before the
2676 * default rule, and add rule number incremental step.
2678 if (ioc_rule
->rulenum
== 0) {
2679 int step
= autoinc_step
;
2681 KKASSERT(step
>= IPFW_AUTOINC_STEP_MIN
&&
2682 step
<= IPFW_AUTOINC_STEP_MAX
);
2685 * Locate the highest numbered rule before default
2687 for (f
= ctx
->ipfw_layer3_chain
; f
; f
= f
->next
) {
2688 if (f
->rulenum
== IPFW_DEFAULT_RULE
)
2690 ioc_rule
->rulenum
= f
->rulenum
;
2692 if (ioc_rule
->rulenum
< IPFW_DEFAULT_RULE
- step
)
2693 ioc_rule
->rulenum
+= step
;
2695 KASSERT(ioc_rule
->rulenum
!= IPFW_DEFAULT_RULE
&&
2696 ioc_rule
->rulenum
!= 0,
2697 ("invalid rule num %d\n", ioc_rule
->rulenum
));
2700 * Now find the right place for the new rule in the sorted list.
2702 for (prev
= NULL
, f
= ctx
->ipfw_layer3_chain
; f
;
2703 prev
= f
, f
= f
->next
) {
2704 if (f
->rulenum
> ioc_rule
->rulenum
) {
2705 /* Found the location */
2709 KASSERT(f
!= NULL
, ("no default rule?!\n"));
2711 if (rule_flags
& IPFW_RULE_F_STATE
) {
2715 * If the new rule will create states, then allocate
2716 * a rule stub, which will be referenced by states
2719 size
= sizeof(*stub
) + ((ncpus
- 1) * sizeof(struct ip_fw
*));
2720 stub
= kmalloc(size
, M_IPFW
, M_WAITOK
| M_ZERO
);
2726 * Duplicate the rule onto each CPU.
2727 * The rule duplicated on CPU0 will be returned.
2729 bzero(&fwmsg
, sizeof(fwmsg
));
2731 netmsg_init(nmsg
, &curthread
->td_msgport
, 0, ipfw_add_rule_dispatch
);
2732 fwmsg
.ioc_rule
= ioc_rule
;
2733 fwmsg
.prev_rule
= prev
;
2734 fwmsg
.next_rule
= prev
== NULL
? NULL
: f
;
2737 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
2738 KKASSERT(fwmsg
.prev_rule
== NULL
&& fwmsg
.next_rule
== NULL
);
2740 rule
= nmsg
->nm_lmsg
.u
.ms_resultp
;
2741 KKASSERT(rule
!= NULL
&& rule
->cpuid
== mycpuid
);
2743 if (rule_flags
& IPFW_RULE_F_STATE
) {
2745 * Turn on state flag, _after_ everything on all
2746 * CPUs have been setup.
2748 bzero(nmsg
, sizeof(*nmsg
));
2749 netmsg_init(nmsg
, &curthread
->td_msgport
, 0,
2750 ipfw_enable_state_dispatch
);
2751 nmsg
->nm_lmsg
.u
.ms_resultp
= rule
;
2753 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
2754 KKASSERT(nmsg
->nm_lmsg
.u
.ms_resultp
== NULL
);
2757 DPRINTF("++ installed rule %d, static count now %d\n",
2758 rule
->rulenum
, static_count
);
2762 * Free storage associated with a static rule (including derived
2764 * The caller is in charge of clearing rule pointers to avoid
2765 * dangling pointers.
2766 * @return a pointer to the next entry.
2767 * Arguments are not checked, so they better be correct.
2768 * Must be called at splimp().
2770 static struct ip_fw
*
2771 ipfw_delete_rule(struct ipfw_context
*ctx
,
2772 struct ip_fw
*prev
, struct ip_fw
*rule
)
2775 struct ip_fw_stub
*stub
;
2779 /* STATE flag should have been cleared before we reach here */
2780 KKASSERT((rule
->rule_flags
& IPFW_RULE_F_STATE
) == 0);
2785 ctx
->ipfw_layer3_chain
= n
;
2789 /* Mark the rule as invalid */
2790 rule
->rule_flags
|= IPFW_RULE_F_INVALID
;
2791 rule
->next_rule
= NULL
;
2792 rule
->sibling
= NULL
;
2795 /* Don't reset cpuid here; keep various assertion working */
2799 /* Statistics only need to be updated once */
2801 ipfw_dec_static_count(rule
);
2803 /* Free 'stub' on the last CPU */
2804 if (stub
!= NULL
&& mycpuid
== ncpus
- 1)
2805 kfree(stub
, M_IPFW
);
2807 /* Try to free this rule */
2808 ipfw_free_rule(rule
);
2810 /* Return the next rule */
2815 ipfw_flush_dispatch(struct netmsg
*nmsg
)
2817 struct lwkt_msg
*lmsg
= &nmsg
->nm_lmsg
;
2818 int kill_default
= lmsg
->u
.ms_result
;
2819 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
2822 ipfw_flush_rule_ptrs(ctx
); /* more efficient to do outside the loop */
2824 while ((rule
= ctx
->ipfw_layer3_chain
) != NULL
&&
2825 (kill_default
|| rule
->rulenum
!= IPFW_DEFAULT_RULE
))
2826 ipfw_delete_rule(ctx
, NULL
, rule
);
2828 ifnet_forwardmsg(lmsg
, mycpuid
+ 1);
2832 ipfw_disable_rule_state_dispatch(struct netmsg
*nmsg
)
2834 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
2835 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
2840 rule
= dmsg
->start_rule
;
2842 KKASSERT(rule
->cpuid
== mycpuid
);
2845 * Move to the position on the next CPU
2846 * before the msg is forwarded.
2848 dmsg
->start_rule
= rule
->sibling
;
2850 KKASSERT(dmsg
->rulenum
== 0);
2851 rule
= ctx
->ipfw_layer3_chain
;
2854 while (rule
!= NULL
) {
2855 if (dmsg
->rulenum
&& rule
->rulenum
!= dmsg
->rulenum
)
2857 rule
->rule_flags
&= ~IPFW_RULE_F_STATE
;
2861 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
2865 * Deletes all rules from a chain (including the default rule
2866 * if the second argument is set).
2867 * Must be called at splimp().
2870 ipfw_flush(int kill_default
)
2872 struct netmsg_del dmsg
;
2874 struct lwkt_msg
*lmsg
;
2876 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
2878 IPFW_ASSERT_CFGPORT(&curthread
->td_msgport
);
2881 * If 'kill_default' then caller has done the necessary
2882 * msgport syncing; unnecessary to do it again.
2884 if (!kill_default
) {
2886 * Let ipfw_chk() know the rules are going to
2887 * be flushed, so it could jump directly to
2891 netmsg_service_sync();
2895 * Clear STATE flag on rules, so no more states (dyn rules)
2898 bzero(&dmsg
, sizeof(dmsg
));
2899 netmsg_init(&dmsg
.nmsg
, &curthread
->td_msgport
, 0,
2900 ipfw_disable_rule_state_dispatch
);
2901 ifnet_domsg(&dmsg
.nmsg
.nm_lmsg
, 0);
2904 * This actually nukes all states (dyn rules)
2906 lockmgr(&dyn_lock
, LK_EXCLUSIVE
);
2907 for (rule
= ctx
->ipfw_layer3_chain
; rule
!= NULL
; rule
= rule
->next
) {
2909 * Can't check IPFW_RULE_F_STATE here,
2910 * since it has been cleared previously.
2911 * Check 'stub' instead.
2913 if (rule
->stub
!= NULL
) {
2915 remove_dyn_rule_locked(rule
, NULL
);
2918 lockmgr(&dyn_lock
, LK_RELEASE
);
2921 * Press the 'flush' button
2923 bzero(&nmsg
, sizeof(nmsg
));
2924 netmsg_init(&nmsg
, &curthread
->td_msgport
, 0, ipfw_flush_dispatch
);
2925 lmsg
= &nmsg
.nm_lmsg
;
2926 lmsg
->u
.ms_result
= kill_default
;
2927 ifnet_domsg(lmsg
, 0);
2929 KASSERT(dyn_count
== 0, ("%u dyn rule remains\n", dyn_count
));
2932 if (ipfw_dyn_v
!= NULL
) {
2934 * Free dynamic rules(state) hash table
2936 kfree(ipfw_dyn_v
, M_IPFW
);
2940 KASSERT(static_count
== 0,
2941 ("%u static rules remains\n", static_count
));
2942 KASSERT(static_ioc_len
== 0,
2943 ("%u bytes of static rules remains\n", static_ioc_len
));
2945 KASSERT(static_count
== 1,
2946 ("%u static rules remains\n", static_count
));
2947 KASSERT(static_ioc_len
== IOC_RULESIZE(ctx
->ipfw_default_rule
),
2948 ("%u bytes of static rules remains, should be %lu\n",
2950 (u_long
)IOC_RULESIZE(ctx
->ipfw_default_rule
)));
2958 ipfw_alt_delete_rule_dispatch(struct netmsg
*nmsg
)
2960 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
2961 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
2962 struct ip_fw
*rule
, *prev
;
2964 rule
= dmsg
->start_rule
;
2965 KKASSERT(rule
->cpuid
== mycpuid
);
2966 dmsg
->start_rule
= rule
->sibling
;
2968 prev
= dmsg
->prev_rule
;
2970 KKASSERT(prev
->cpuid
== mycpuid
);
2973 * Move to the position on the next CPU
2974 * before the msg is forwarded.
2976 dmsg
->prev_rule
= prev
->sibling
;
2980 * flush pointers outside the loop, then delete all matching
2981 * rules. 'prev' remains the same throughout the cycle.
2983 ipfw_flush_rule_ptrs(ctx
);
2984 while (rule
&& rule
->rulenum
== dmsg
->rulenum
)
2985 rule
= ipfw_delete_rule(ctx
, prev
, rule
);
2987 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
2991 ipfw_alt_delete_rule(uint16_t rulenum
)
2993 struct ip_fw
*prev
, *rule
, *f
;
2994 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
2995 struct netmsg_del dmsg
;
2996 struct netmsg
*nmsg
;
3000 * Locate first rule to delete
3002 for (prev
= NULL
, rule
= ctx
->ipfw_layer3_chain
;
3003 rule
&& rule
->rulenum
< rulenum
;
3004 prev
= rule
, rule
= rule
->next
)
3006 if (rule
->rulenum
!= rulenum
)
3010 * Check whether any rules with the given number will
3014 for (f
= rule
; f
&& f
->rulenum
== rulenum
; f
= f
->next
) {
3015 if (f
->rule_flags
& IPFW_RULE_F_STATE
) {
3023 * Clear the STATE flag, so no more states will be
3024 * created based the rules numbered 'rulenum'.
3026 bzero(&dmsg
, sizeof(dmsg
));
3028 netmsg_init(nmsg
, &curthread
->td_msgport
, 0,
3029 ipfw_disable_rule_state_dispatch
);
3030 dmsg
.start_rule
= rule
;
3031 dmsg
.rulenum
= rulenum
;
3033 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
3034 KKASSERT(dmsg
.start_rule
== NULL
);
3037 * Nuke all related states
3039 lockmgr(&dyn_lock
, LK_EXCLUSIVE
);
3040 for (f
= rule
; f
&& f
->rulenum
== rulenum
; f
= f
->next
) {
3042 * Can't check IPFW_RULE_F_STATE here,
3043 * since it has been cleared previously.
3044 * Check 'stub' instead.
3046 if (f
->stub
!= NULL
) {
3048 remove_dyn_rule_locked(f
, NULL
);
3051 lockmgr(&dyn_lock
, LK_RELEASE
);
3055 * Get rid of the rule duplications on all CPUs
3057 bzero(&dmsg
, sizeof(dmsg
));
3059 netmsg_init(nmsg
, &curthread
->td_msgport
, 0,
3060 ipfw_alt_delete_rule_dispatch
);
3061 dmsg
.prev_rule
= prev
;
3062 dmsg
.start_rule
= rule
;
3063 dmsg
.rulenum
= rulenum
;
3065 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
3066 KKASSERT(dmsg
.prev_rule
== NULL
&& dmsg
.start_rule
== NULL
);
3071 ipfw_alt_delete_ruleset_dispatch(struct netmsg
*nmsg
)
3073 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
3074 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3075 struct ip_fw
*prev
, *rule
;
3080 ipfw_flush_rule_ptrs(ctx
);
3083 rule
= ctx
->ipfw_layer3_chain
;
3084 while (rule
!= NULL
) {
3085 if (rule
->set
== dmsg
->from_set
) {
3086 rule
= ipfw_delete_rule(ctx
, prev
, rule
);
3095 KASSERT(del
, ("no match set?!\n"));
3097 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
3101 ipfw_disable_ruleset_state_dispatch(struct netmsg
*nmsg
)
3103 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
3104 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3112 for (rule
= ctx
->ipfw_layer3_chain
; rule
; rule
= rule
->next
) {
3113 if (rule
->set
== dmsg
->from_set
) {
3117 rule
->rule_flags
&= ~IPFW_RULE_F_STATE
;
3120 KASSERT(cleared
, ("no match set?!\n"));
3122 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
3126 ipfw_alt_delete_ruleset(uint8_t set
)
3128 struct netmsg_del dmsg
;
3129 struct netmsg
*nmsg
;
3132 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3135 * Check whether the 'set' exists. If it exists,
3136 * then check whether any rules within the set will
3137 * try to create states.
3141 for (rule
= ctx
->ipfw_layer3_chain
; rule
; rule
= rule
->next
) {
3142 if (rule
->set
== set
) {
3144 if (rule
->rule_flags
& IPFW_RULE_F_STATE
) {
3151 return 0; /* XXX EINVAL? */
3155 * Clear the STATE flag, so no more states will be
3156 * created based the rules in this set.
3158 bzero(&dmsg
, sizeof(dmsg
));
3160 netmsg_init(nmsg
, &curthread
->td_msgport
, 0,
3161 ipfw_disable_ruleset_state_dispatch
);
3162 dmsg
.from_set
= set
;
3164 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
3167 * Nuke all related states
3169 lockmgr(&dyn_lock
, LK_EXCLUSIVE
);
3170 for (rule
= ctx
->ipfw_layer3_chain
; rule
; rule
= rule
->next
) {
3171 if (rule
->set
!= set
)
3175 * Can't check IPFW_RULE_F_STATE here,
3176 * since it has been cleared previously.
3177 * Check 'stub' instead.
3179 if (rule
->stub
!= NULL
) {
3181 remove_dyn_rule_locked(rule
, NULL
);
3184 lockmgr(&dyn_lock
, LK_RELEASE
);
3190 bzero(&dmsg
, sizeof(dmsg
));
3192 netmsg_init(nmsg
, &curthread
->td_msgport
, 0,
3193 ipfw_alt_delete_ruleset_dispatch
);
3194 dmsg
.from_set
= set
;
3196 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
3201 ipfw_alt_move_rule_dispatch(struct netmsg
*nmsg
)
3203 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
3206 rule
= dmsg
->start_rule
;
3207 KKASSERT(rule
->cpuid
== mycpuid
);
3210 * Move to the position on the next CPU
3211 * before the msg is forwarded.
3213 dmsg
->start_rule
= rule
->sibling
;
3215 while (rule
&& rule
->rulenum
<= dmsg
->rulenum
) {
3216 if (rule
->rulenum
== dmsg
->rulenum
)
3217 rule
->set
= dmsg
->to_set
;
3220 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
3224 ipfw_alt_move_rule(uint16_t rulenum
, uint8_t set
)
3226 struct netmsg_del dmsg
;
3227 struct netmsg
*nmsg
;
3229 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3232 * Locate first rule to move
3234 for (rule
= ctx
->ipfw_layer3_chain
; rule
&& rule
->rulenum
<= rulenum
;
3235 rule
= rule
->next
) {
3236 if (rule
->rulenum
== rulenum
&& rule
->set
!= set
)
3239 if (rule
== NULL
|| rule
->rulenum
> rulenum
)
3240 return 0; /* XXX error? */
3242 bzero(&dmsg
, sizeof(dmsg
));
3244 netmsg_init(nmsg
, &curthread
->td_msgport
, 0,
3245 ipfw_alt_move_rule_dispatch
);
3246 dmsg
.start_rule
= rule
;
3247 dmsg
.rulenum
= rulenum
;
3250 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
3251 KKASSERT(dmsg
.start_rule
== NULL
);
3256 ipfw_alt_move_ruleset_dispatch(struct netmsg
*nmsg
)
3258 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
3259 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3262 for (rule
= ctx
->ipfw_layer3_chain
; rule
; rule
= rule
->next
) {
3263 if (rule
->set
== dmsg
->from_set
)
3264 rule
->set
= dmsg
->to_set
;
3266 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
3270 ipfw_alt_move_ruleset(uint8_t from_set
, uint8_t to_set
)
3272 struct netmsg_del dmsg
;
3273 struct netmsg
*nmsg
;
3275 bzero(&dmsg
, sizeof(dmsg
));
3277 netmsg_init(nmsg
, &curthread
->td_msgport
, 0,
3278 ipfw_alt_move_ruleset_dispatch
);
3279 dmsg
.from_set
= from_set
;
3280 dmsg
.to_set
= to_set
;
3282 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
3287 ipfw_alt_swap_ruleset_dispatch(struct netmsg
*nmsg
)
3289 struct netmsg_del
*dmsg
= (struct netmsg_del
*)nmsg
;
3290 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3293 for (rule
= ctx
->ipfw_layer3_chain
; rule
; rule
= rule
->next
) {
3294 if (rule
->set
== dmsg
->from_set
)
3295 rule
->set
= dmsg
->to_set
;
3296 else if (rule
->set
== dmsg
->to_set
)
3297 rule
->set
= dmsg
->from_set
;
3299 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
3303 ipfw_alt_swap_ruleset(uint8_t set1
, uint8_t set2
)
3305 struct netmsg_del dmsg
;
3306 struct netmsg
*nmsg
;
3308 bzero(&dmsg
, sizeof(dmsg
));
3310 netmsg_init(nmsg
, &curthread
->td_msgport
, 0,
3311 ipfw_alt_swap_ruleset_dispatch
);
3312 dmsg
.from_set
= set1
;
3315 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
3320 * Remove all rules with given number, and also do set manipulation.
3322 * The argument is an uint32_t. The low 16 bit are the rule or set number,
3323 * the next 8 bits are the new set, the top 8 bits are the command:
3325 * 0 delete rules with given number
3326 * 1 delete rules with given set number
3327 * 2 move rules with given number to new set
3328 * 3 move rules with given set number to new set
3329 * 4 swap sets with given numbers
3332 ipfw_ctl_alter(uint32_t arg
)
3335 uint8_t cmd
, new_set
;
3338 rulenum
= arg
& 0xffff;
3339 cmd
= (arg
>> 24) & 0xff;
3340 new_set
= (arg
>> 16) & 0xff;
3344 if (new_set
>= IPFW_DEFAULT_SET
)
3346 if (cmd
== 0 || cmd
== 2) {
3347 if (rulenum
== IPFW_DEFAULT_RULE
)
3350 if (rulenum
>= IPFW_DEFAULT_SET
)
3355 case 0: /* delete rules with given number */
3356 error
= ipfw_alt_delete_rule(rulenum
);
3359 case 1: /* delete all rules with given set number */
3360 error
= ipfw_alt_delete_ruleset(rulenum
);
3363 case 2: /* move rules with given number to new set */
3364 error
= ipfw_alt_move_rule(rulenum
, new_set
);
3367 case 3: /* move rules with given set number to new set */
3368 error
= ipfw_alt_move_ruleset(rulenum
, new_set
);
3371 case 4: /* swap two sets */
3372 error
= ipfw_alt_swap_ruleset(rulenum
, new_set
);
3379 * Clear counters for a specific rule.
3382 clear_counters(struct ip_fw
*rule
, int log_only
)
3384 ipfw_insn_log
*l
= (ipfw_insn_log
*)ACTION_PTR(rule
);
3386 if (log_only
== 0) {
3387 rule
->bcnt
= rule
->pcnt
= 0;
3388 rule
->timestamp
= 0;
3390 if (l
->o
.opcode
== O_LOG
)
3391 l
->log_left
= l
->max_log
;
3395 ipfw_zero_entry_dispatch(struct netmsg
*nmsg
)
3397 struct netmsg_zent
*zmsg
= (struct netmsg_zent
*)nmsg
;
3398 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3401 if (zmsg
->rulenum
== 0) {
3402 KKASSERT(zmsg
->start_rule
== NULL
);
3404 ctx
->ipfw_norule_counter
= 0;
3405 for (rule
= ctx
->ipfw_layer3_chain
; rule
; rule
= rule
->next
)
3406 clear_counters(rule
, zmsg
->log_only
);
3408 struct ip_fw
*start
= zmsg
->start_rule
;
3410 KKASSERT(start
->cpuid
== mycpuid
);
3411 KKASSERT(start
->rulenum
== zmsg
->rulenum
);
3414 * We can have multiple rules with the same number, so we
3415 * need to clear them all.
3417 for (rule
= start
; rule
&& rule
->rulenum
== zmsg
->rulenum
;
3419 clear_counters(rule
, zmsg
->log_only
);
3422 * Move to the position on the next CPU
3423 * before the msg is forwarded.
3425 zmsg
->start_rule
= start
->sibling
;
3427 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
3431 * Reset some or all counters on firewall rules.
3432 * @arg frwl is null to clear all entries, or contains a specific
3434 * @arg log_only is 1 if we only want to reset logs, zero otherwise.
3437 ipfw_ctl_zero_entry(int rulenum
, int log_only
)
3439 struct netmsg_zent zmsg
;
3440 struct netmsg
*nmsg
;
3442 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3444 bzero(&zmsg
, sizeof(zmsg
));
3446 netmsg_init(nmsg
, &curthread
->td_msgport
, 0, ipfw_zero_entry_dispatch
);
3447 zmsg
.log_only
= log_only
;
3450 msg
= log_only
? "ipfw: All logging counts reset.\n"
3451 : "ipfw: Accounting cleared.\n";
3456 * Locate the first rule with 'rulenum'
3458 for (rule
= ctx
->ipfw_layer3_chain
; rule
; rule
= rule
->next
) {
3459 if (rule
->rulenum
== rulenum
)
3462 if (rule
== NULL
) /* we did not find any matching rules */
3464 zmsg
.start_rule
= rule
;
3465 zmsg
.rulenum
= rulenum
;
3467 msg
= log_only
? "ipfw: Entry %d logging count reset.\n"
3468 : "ipfw: Entry %d cleared.\n";
3470 ifnet_domsg(&nmsg
->nm_lmsg
, 0);
3471 KKASSERT(zmsg
.start_rule
== NULL
);
3474 log(LOG_SECURITY
| LOG_NOTICE
, msg
, rulenum
);
3479 * Check validity of the structure before insert.
3480 * Fortunately rules are simple, so this mostly need to check rule sizes.
3483 ipfw_check_ioc_rule(struct ipfw_ioc_rule
*rule
, int size
, uint32_t *rule_flags
)
3486 int have_action
= 0;
3491 /* Check for valid size */
3492 if (size
< sizeof(*rule
)) {
3493 kprintf("ipfw: rule too short\n");
3496 l
= IOC_RULESIZE(rule
);
3498 kprintf("ipfw: size mismatch (have %d want %d)\n", size
, l
);
3502 /* Check rule number */
3503 if (rule
->rulenum
== IPFW_DEFAULT_RULE
) {
3504 kprintf("ipfw: invalid rule number\n");
3509 * Now go for the individual checks. Very simple ones, basically only
3510 * instruction sizes.
3512 for (l
= rule
->cmd_len
, cmd
= rule
->cmd
; l
> 0;
3513 l
-= cmdlen
, cmd
+= cmdlen
) {
3514 cmdlen
= F_LEN(cmd
);
3516 kprintf("ipfw: opcode %d size truncated\n",
3521 DPRINTF("ipfw: opcode %d\n", cmd
->opcode
);
3523 if (cmd
->opcode
== O_KEEP_STATE
|| cmd
->opcode
== O_LIMIT
) {
3524 /* This rule will create states */
3525 *rule_flags
|= IPFW_RULE_F_STATE
;
3528 switch (cmd
->opcode
) {
3542 case O_IPPRECEDENCE
:
3549 if (cmdlen
!= F_INSN_SIZE(ipfw_insn
))
3561 if (cmdlen
!= F_INSN_SIZE(ipfw_insn_u32
))
3566 if (cmdlen
!= F_INSN_SIZE(ipfw_insn_limit
))
3571 if (cmdlen
!= F_INSN_SIZE(ipfw_insn_log
))
3574 ((ipfw_insn_log
*)cmd
)->log_left
=
3575 ((ipfw_insn_log
*)cmd
)->max_log
;
3581 if (cmdlen
!= F_INSN_SIZE(ipfw_insn_ip
))
3583 if (((ipfw_insn_ip
*)cmd
)->mask
.s_addr
== 0) {
3584 kprintf("ipfw: opcode %d, useless rule\n",
3592 if (cmd
->arg1
== 0 || cmd
->arg1
> 256) {
3593 kprintf("ipfw: invalid set size %d\n",
3597 if (cmdlen
!= F_INSN_SIZE(ipfw_insn_u32
) +
3603 if (cmdlen
!= F_INSN_SIZE(ipfw_insn_mac
))
3609 case O_IP_DSTPORT
: /* XXX artificial limit, 30 port pairs */
3610 if (cmdlen
< 2 || cmdlen
> 31)
3617 if (cmdlen
!= F_INSN_SIZE(ipfw_insn_if
))
3623 if (cmdlen
!= F_INSN_SIZE(ipfw_insn_pipe
))
3628 if (cmdlen
!= F_INSN_SIZE(ipfw_insn_sa
)) {
3633 fwd_addr
= ((ipfw_insn_sa
*)cmd
)->
3635 if (IN_MULTICAST(ntohl(fwd_addr
))) {
3636 kprintf("ipfw: try forwarding to "
3637 "multicast address\n");
3643 case O_FORWARD_MAC
: /* XXX not implemented yet */
3652 if (cmdlen
!= F_INSN_SIZE(ipfw_insn
))
3656 kprintf("ipfw: opcode %d, multiple actions"
3663 kprintf("ipfw: opcode %d, action must be"
3670 kprintf("ipfw: opcode %d, unknown opcode\n",
3675 if (have_action
== 0) {
3676 kprintf("ipfw: missing action\n");
3682 kprintf("ipfw: opcode %d size %d wrong\n",
3683 cmd
->opcode
, cmdlen
);
3688 ipfw_ctl_add_rule(struct sockopt
*sopt
)
3690 struct ipfw_ioc_rule
*ioc_rule
;
3692 uint32_t rule_flags
;
3695 size
= sopt
->sopt_valsize
;
3696 if (size
> (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX
) ||
3697 size
< sizeof(*ioc_rule
)) {
3700 if (size
!= (sizeof(uint32_t) * IPFW_RULE_SIZE_MAX
)) {
3701 sopt
->sopt_val
= krealloc(sopt
->sopt_val
, sizeof(uint32_t) *
3702 IPFW_RULE_SIZE_MAX
, M_TEMP
, M_WAITOK
);
3704 ioc_rule
= sopt
->sopt_val
;
3706 error
= ipfw_check_ioc_rule(ioc_rule
, size
, &rule_flags
);
3710 ipfw_add_rule(ioc_rule
, rule_flags
);
3712 if (sopt
->sopt_dir
== SOPT_GET
)
3713 sopt
->sopt_valsize
= IOC_RULESIZE(ioc_rule
);
3718 ipfw_copy_rule(const struct ip_fw
*rule
, struct ipfw_ioc_rule
*ioc_rule
)
3720 const struct ip_fw
*sibling
;
3725 KKASSERT(rule
->cpuid
== IPFW_CFGCPUID
);
3727 ioc_rule
->act_ofs
= rule
->act_ofs
;
3728 ioc_rule
->cmd_len
= rule
->cmd_len
;
3729 ioc_rule
->rulenum
= rule
->rulenum
;
3730 ioc_rule
->set
= rule
->set
;
3731 ioc_rule
->usr_flags
= rule
->usr_flags
;
3733 ioc_rule
->set_disable
= ipfw_ctx
[mycpuid
]->ipfw_set_disable
;
3734 ioc_rule
->static_count
= static_count
;
3735 ioc_rule
->static_len
= static_ioc_len
;
3738 * Visit (read-only) all of the rule's duplications to get
3739 * the necessary statistics
3746 ioc_rule
->timestamp
= 0;
3747 for (sibling
= rule
; sibling
!= NULL
; sibling
= sibling
->sibling
) {
3748 ioc_rule
->pcnt
+= sibling
->pcnt
;
3749 ioc_rule
->bcnt
+= sibling
->bcnt
;
3750 if (sibling
->timestamp
> ioc_rule
->timestamp
)
3751 ioc_rule
->timestamp
= sibling
->timestamp
;
3756 KASSERT(i
== ncpus
, ("static rule is not duplicated on every cpu\n"));
3758 bcopy(rule
->cmd
, ioc_rule
->cmd
, ioc_rule
->cmd_len
* 4 /* XXX */);
3760 return ((uint8_t *)ioc_rule
+ IOC_RULESIZE(ioc_rule
));
3764 ipfw_copy_state(const ipfw_dyn_rule
*dyn_rule
,
3765 struct ipfw_ioc_state
*ioc_state
)
3767 const struct ipfw_flow_id
*id
;
3768 struct ipfw_ioc_flowid
*ioc_id
;
3770 ioc_state
->expire
= TIME_LEQ(dyn_rule
->expire
, time_second
) ?
3771 0 : dyn_rule
->expire
- time_second
;
3772 ioc_state
->pcnt
= dyn_rule
->pcnt
;
3773 ioc_state
->bcnt
= dyn_rule
->bcnt
;
3775 ioc_state
->dyn_type
= dyn_rule
->dyn_type
;
3776 ioc_state
->count
= dyn_rule
->count
;
3778 ioc_state
->rulenum
= dyn_rule
->stub
->rule
[mycpuid
]->rulenum
;
3781 ioc_id
= &ioc_state
->id
;
3783 ioc_id
->type
= ETHERTYPE_IP
;
3784 ioc_id
->u
.ip
.dst_ip
= id
->dst_ip
;
3785 ioc_id
->u
.ip
.src_ip
= id
->src_ip
;
3786 ioc_id
->u
.ip
.dst_port
= id
->dst_port
;
3787 ioc_id
->u
.ip
.src_port
= id
->src_port
;
3788 ioc_id
->u
.ip
.proto
= id
->proto
;
3792 ipfw_ctl_get_rules(struct sockopt
*sopt
)
3794 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3798 uint32_t dcount
= 0;
3801 * pass up a copy of the current rules. Static rules
3802 * come first (the last of which has number IPFW_DEFAULT_RULE),
3803 * followed by a possibly empty list of dynamic rule.
3806 size
= static_ioc_len
; /* size of static rules */
3807 if (ipfw_dyn_v
) { /* add size of dyn.rules */
3809 size
+= dcount
* sizeof(struct ipfw_ioc_state
);
3812 if (sopt
->sopt_valsize
< size
) {
3813 /* short length, no need to return incomplete rules */
3814 /* XXX: if superuser, no need to zero buffer */
3815 bzero(sopt
->sopt_val
, sopt
->sopt_valsize
);
3818 bp
= sopt
->sopt_val
;
3820 for (rule
= ctx
->ipfw_layer3_chain
; rule
; rule
= rule
->next
)
3821 bp
= ipfw_copy_rule(rule
, bp
);
3823 if (ipfw_dyn_v
&& dcount
!= 0) {
3824 struct ipfw_ioc_state
*ioc_state
= bp
;
3825 uint32_t dcount2
= 0;
3827 size_t old_size
= size
;
3831 lockmgr(&dyn_lock
, LK_SHARED
);
3833 /* Check 'ipfw_dyn_v' again with lock held */
3834 if (ipfw_dyn_v
== NULL
)
3837 for (i
= 0; i
< curr_dyn_buckets
; i
++) {
3841 * The # of dynamic rules may have grown after the
3842 * snapshot of 'dyn_count' was taken, so we will have
3843 * to check 'dcount' (snapshot of dyn_count) here to
3844 * make sure that we don't overflow the pre-allocated
3847 for (p
= ipfw_dyn_v
[i
]; p
!= NULL
&& dcount
!= 0;
3848 p
= p
->next
, ioc_state
++, dcount
--, dcount2
++)
3849 ipfw_copy_state(p
, ioc_state
);
3852 lockmgr(&dyn_lock
, LK_RELEASE
);
3855 * The # of dynamic rules may be shrinked after the
3856 * snapshot of 'dyn_count' was taken. To give user a
3857 * correct dynamic rule count, we use the 'dcount2'
3858 * calculated above (with shared lockmgr lock held).
3860 size
= static_ioc_len
+
3861 (dcount2
* sizeof(struct ipfw_ioc_state
));
3862 KKASSERT(size
<= old_size
);
3865 sopt
->sopt_valsize
= size
;
3870 ipfw_set_disable_dispatch(struct netmsg
*nmsg
)
3872 struct lwkt_msg
*lmsg
= &nmsg
->nm_lmsg
;
3873 struct ipfw_context
*ctx
= ipfw_ctx
[mycpuid
];
3876 ctx
->ipfw_set_disable
= lmsg
->u
.ms_result32
;
3878 ifnet_forwardmsg(lmsg
, mycpuid
+ 1);
3882 ipfw_ctl_set_disable(uint32_t disable
, uint32_t enable
)
3885 struct lwkt_msg
*lmsg
;
3886 uint32_t set_disable
;
3888 /* IPFW_DEFAULT_SET is always enabled */
3889 enable
|= (1 << IPFW_DEFAULT_SET
);
3890 set_disable
= (ipfw_ctx
[mycpuid
]->ipfw_set_disable
| disable
) & ~enable
;
3892 bzero(&nmsg
, sizeof(nmsg
));
3893 netmsg_init(&nmsg
, &curthread
->td_msgport
, 0, ipfw_set_disable_dispatch
);
3894 lmsg
= &nmsg
.nm_lmsg
;
3895 lmsg
->u
.ms_result32
= set_disable
;
3897 ifnet_domsg(lmsg
, 0);
3901 * {set|get}sockopt parser.
3904 ipfw_ctl(struct sockopt
*sopt
)
3912 switch (sopt
->sopt_name
) {
3914 error
= ipfw_ctl_get_rules(sopt
);
3918 ipfw_flush(0 /* keep default rule */);
3922 error
= ipfw_ctl_add_rule(sopt
);
3927 * IP_FW_DEL is used for deleting single rules or sets,
3928 * and (ab)used to atomically manipulate sets.
3929 * Argument size is used to distinguish between the two:
3931 * delete single rule or set of rules,
3932 * or reassign rules (or sets) to a different set.
3933 * 2 * sizeof(uint32_t)
3934 * atomic disable/enable sets.
3935 * first uint32_t contains sets to be disabled,
3936 * second uint32_t contains sets to be enabled.
3938 masks
= sopt
->sopt_val
;
3939 size
= sopt
->sopt_valsize
;
3940 if (size
== sizeof(*masks
)) {
3942 * Delete or reassign static rule
3944 error
= ipfw_ctl_alter(masks
[0]);
3945 } else if (size
== (2 * sizeof(*masks
))) {
3947 * Set enable/disable
3949 ipfw_ctl_set_disable(masks
[0], masks
[1]);
3956 case IP_FW_RESETLOG
: /* argument is an int, the rule number */
3959 if (sopt
->sopt_val
!= 0) {
3960 error
= soopt_to_kbuf(sopt
, &rulenum
,
3961 sizeof(int), sizeof(int));
3965 error
= ipfw_ctl_zero_entry(rulenum
,
3966 sopt
->sopt_name
== IP_FW_RESETLOG
);
3970 kprintf("ipfw_ctl invalid option %d\n", sopt
->sopt_name
);
3977 * This procedure is only used to handle keepalives. It is invoked
3978 * every dyn_keepalive_period
3981 ipfw_tick_dispatch(struct netmsg
*nmsg
)
3987 IPFW_ASSERT_CFGPORT(&curthread
->td_msgport
);
3988 KKASSERT(IPFW_LOADED
);
3992 lwkt_replymsg(&nmsg
->nm_lmsg
, 0);
3995 if (ipfw_dyn_v
== NULL
|| dyn_count
== 0)
3998 keep_alive
= time_second
;
4000 lockmgr(&dyn_lock
, LK_EXCLUSIVE
);
4002 if (ipfw_dyn_v
== NULL
|| dyn_count
== 0) {
4003 lockmgr(&dyn_lock
, LK_RELEASE
);
4006 gen
= dyn_buckets_gen
;
4008 for (i
= 0; i
< curr_dyn_buckets
; i
++) {
4009 ipfw_dyn_rule
*q
, *prev
;
4011 for (prev
= NULL
, q
= ipfw_dyn_v
[i
]; q
!= NULL
;) {
4012 uint32_t ack_rev
, ack_fwd
;
4013 struct ipfw_flow_id id
;
4015 if (q
->dyn_type
== O_LIMIT_PARENT
)
4018 if (TIME_LEQ(q
->expire
, time_second
)) {
4020 UNLINK_DYN_RULE(prev
, ipfw_dyn_v
[i
], q
);
4025 * Keep alive processing
4030 if (q
->id
.proto
!= IPPROTO_TCP
)
4032 if ((q
->state
& BOTH_SYN
) != BOTH_SYN
)
4034 if (TIME_LEQ(time_second
+ dyn_keepalive_interval
,
4036 goto next
; /* too early */
4037 if (q
->keep_alive
== keep_alive
)
4038 goto next
; /* alreay done */
4041 * Save necessary information, so that they could
4042 * survive after possible blocking in send_pkt()
4045 ack_rev
= q
->ack_rev
;
4046 ack_fwd
= q
->ack_fwd
;
4048 /* Sending has been started */
4049 q
->keep_alive
= keep_alive
;
4051 /* Release lock to avoid possible dead lock */
4052 lockmgr(&dyn_lock
, LK_RELEASE
);
4053 send_pkt(&id
, ack_rev
- 1, ack_fwd
, TH_SYN
);
4054 send_pkt(&id
, ack_fwd
- 1, ack_rev
, 0);
4055 lockmgr(&dyn_lock
, LK_EXCLUSIVE
);
4057 if (gen
!= dyn_buckets_gen
) {
4059 * Dyn bucket array has been changed during
4060 * the above two sending; reiterate.
4069 lockmgr(&dyn_lock
, LK_RELEASE
);
4071 callout_reset(&ipfw_timeout_h
, dyn_keepalive_period
* hz
,
4076 * This procedure is only used to handle keepalives. It is invoked
4077 * every dyn_keepalive_period
4080 ipfw_tick(void *dummy __unused
)
4082 struct lwkt_msg
*lmsg
= &ipfw_timeout_netmsg
.nm_lmsg
;
4084 KKASSERT(mycpuid
== IPFW_CFGCPUID
);
4088 KKASSERT(lmsg
->ms_flags
& MSGF_DONE
);
4090 lwkt_sendmsg(IPFW_CFGPORT
, lmsg
);
4091 /* ipfw_timeout_netmsg's handler reset this callout */
4098 ipfw_check_in(void *arg
, struct mbuf
**m0
, struct ifnet
*ifp
, int dir
)
4100 struct ip_fw_args args
;
4101 struct mbuf
*m
= *m0
;
4103 int tee
= 0, error
= 0, ret
;
4105 if (m
->m_pkthdr
.fw_flags
& DUMMYNET_MBUF_TAGGED
) {
4106 /* Extract info from dummynet tag */
4107 mtag
= m_tag_find(m
, PACKET_TAG_DUMMYNET
, NULL
);
4108 KKASSERT(mtag
!= NULL
);
4109 args
.rule
= ((struct dn_pkt
*)m_tag_data(mtag
))->dn_priv
;
4110 KKASSERT(args
.rule
!= NULL
);
4112 m_tag_delete(m
, mtag
);
4113 m
->m_pkthdr
.fw_flags
&= ~DUMMYNET_MBUF_TAGGED
;
4121 ret
= ipfw_chk(&args
);
4139 case IP_FW_DUMMYNET
:
4140 /* Send packet to the appropriate pipe */
4141 ipfw_dummynet_io(m
, args
.cookie
, DN_TO_IP_IN
, &args
);
4149 if (ip_divert_p
!= NULL
) {
4150 m
= ip_divert_p(m
, tee
, 1);
4154 /* not sure this is the right error msg */
4160 panic("unknown ipfw return value: %d\n", ret
);
4168 ipfw_check_out(void *arg
, struct mbuf
**m0
, struct ifnet
*ifp
, int dir
)
4170 struct ip_fw_args args
;
4171 struct mbuf
*m
= *m0
;
4173 int tee
= 0, error
= 0, ret
;
4175 if (m
->m_pkthdr
.fw_flags
& DUMMYNET_MBUF_TAGGED
) {
4176 /* Extract info from dummynet tag */
4177 mtag
= m_tag_find(m
, PACKET_TAG_DUMMYNET
, NULL
);
4178 KKASSERT(mtag
!= NULL
);
4179 args
.rule
= ((struct dn_pkt
*)m_tag_data(mtag
))->dn_priv
;
4180 KKASSERT(args
.rule
!= NULL
);
4182 m_tag_delete(m
, mtag
);
4183 m
->m_pkthdr
.fw_flags
&= ~DUMMYNET_MBUF_TAGGED
;
4191 ret
= ipfw_chk(&args
);
4209 case IP_FW_DUMMYNET
:
4210 ipfw_dummynet_io(m
, args
.cookie
, DN_TO_IP_OUT
, &args
);
4218 if (ip_divert_p
!= NULL
) {
4219 m
= ip_divert_p(m
, tee
, 0);
4223 /* not sure this is the right error msg */
4229 panic("unknown ipfw return value: %d\n", ret
);
4239 struct pfil_head
*pfh
;
4241 IPFW_ASSERT_CFGPORT(&curthread
->td_msgport
);
4243 pfh
= pfil_head_get(PFIL_TYPE_AF
, AF_INET
);
4247 pfil_add_hook(ipfw_check_in
, NULL
, PFIL_IN
| PFIL_MPSAFE
, pfh
);
4248 pfil_add_hook(ipfw_check_out
, NULL
, PFIL_OUT
| PFIL_MPSAFE
, pfh
);
4254 struct pfil_head
*pfh
;
4256 IPFW_ASSERT_CFGPORT(&curthread
->td_msgport
);
4258 pfh
= pfil_head_get(PFIL_TYPE_AF
, AF_INET
);
4262 pfil_remove_hook(ipfw_check_in
, NULL
, PFIL_IN
, pfh
);
4263 pfil_remove_hook(ipfw_check_out
, NULL
, PFIL_OUT
, pfh
);
4267 ipfw_sysctl_enable_dispatch(struct netmsg
*nmsg
)
4269 struct lwkt_msg
*lmsg
= &nmsg
->nm_lmsg
;
4270 int enable
= lmsg
->u
.ms_result
;
4272 if (fw_enable
== enable
)
4281 lwkt_replymsg(lmsg
, 0);
4285 ipfw_sysctl_enable(SYSCTL_HANDLER_ARGS
)
4288 struct lwkt_msg
*lmsg
;
4292 error
= sysctl_handle_int(oidp
, &enable
, 0, req
);
4293 if (error
|| req
->newptr
== NULL
)
4296 netmsg_init(&nmsg
, &curthread
->td_msgport
, 0,
4297 ipfw_sysctl_enable_dispatch
);
4298 lmsg
= &nmsg
.nm_lmsg
;
4299 lmsg
->u
.ms_result
= enable
;
4301 return lwkt_domsg(IPFW_CFGPORT
, lmsg
, 0);
4305 ipfw_sysctl_autoinc_step(SYSCTL_HANDLER_ARGS
)
4307 return sysctl_int_range(oidp
, arg1
, arg2
, req
,
4308 IPFW_AUTOINC_STEP_MIN
, IPFW_AUTOINC_STEP_MAX
);
4312 ipfw_sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS
)
4316 lockmgr(&dyn_lock
, LK_EXCLUSIVE
);
4318 value
= dyn_buckets
;
4319 error
= sysctl_handle_int(oidp
, &value
, 0, req
);
4320 if (error
|| !req
->newptr
)
4324 * Make sure we have a power of 2 and
4325 * do not allow more than 64k entries.
4328 if (value
<= 1 || value
> 65536)
4330 if ((value
& (value
- 1)) != 0)
4334 dyn_buckets
= value
;
4336 lockmgr(&dyn_lock
, LK_RELEASE
);
4341 ipfw_sysctl_dyn_fin(SYSCTL_HANDLER_ARGS
)
4343 return sysctl_int_range(oidp
, arg1
, arg2
, req
,
4344 1, dyn_keepalive_period
- 1);
4348 ipfw_sysctl_dyn_rst(SYSCTL_HANDLER_ARGS
)
4350 return sysctl_int_range(oidp
, arg1
, arg2
, req
,
4351 1, dyn_keepalive_period
- 1);
4355 ipfw_ctx_init_dispatch(struct netmsg
*nmsg
)
4357 struct netmsg_ipfw
*fwmsg
= (struct netmsg_ipfw
*)nmsg
;
4358 struct ipfw_context
*ctx
;
4359 struct ip_fw
*def_rule
;
4361 ctx
= kmalloc(sizeof(*ctx
), M_IPFW
, M_WAITOK
| M_ZERO
);
4362 ipfw_ctx
[mycpuid
] = ctx
;
4364 def_rule
= kmalloc(sizeof(*def_rule
), M_IPFW
, M_WAITOK
| M_ZERO
);
4366 def_rule
->act_ofs
= 0;
4367 def_rule
->rulenum
= IPFW_DEFAULT_RULE
;
4368 def_rule
->cmd_len
= 1;
4369 def_rule
->set
= IPFW_DEFAULT_SET
;
4371 def_rule
->cmd
[0].len
= 1;
4372 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
4373 def_rule
->cmd
[0].opcode
= O_ACCEPT
;
4375 def_rule
->cmd
[0].opcode
= O_DENY
;
4378 def_rule
->refcnt
= 1;
4379 def_rule
->cpuid
= mycpuid
;
4381 /* Install the default rule */
4382 ctx
->ipfw_default_rule
= def_rule
;
4383 ctx
->ipfw_layer3_chain
= def_rule
;
4385 /* Link rule CPU sibling */
4386 ipfw_link_sibling(fwmsg
, def_rule
);
4388 /* Statistics only need to be updated once */
4390 ipfw_inc_static_count(def_rule
);
4392 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
4396 ipfw_init_dispatch(struct netmsg
*nmsg
)
4398 struct netmsg_ipfw fwmsg
;
4402 kprintf("IP firewall already loaded\n");
4407 bzero(&fwmsg
, sizeof(fwmsg
));
4408 netmsg_init(&fwmsg
.nmsg
, &curthread
->td_msgport
, 0,
4409 ipfw_ctx_init_dispatch
);
4410 ifnet_domsg(&fwmsg
.nmsg
.nm_lmsg
, 0);
4412 ip_fw_chk_ptr
= ipfw_chk
;
4413 ip_fw_ctl_ptr
= ipfw_ctl
;
4414 ip_fw_dn_io_ptr
= ipfw_dummynet_io
;
4416 kprintf("ipfw2 initialized, default to %s, logging ",
4417 ipfw_ctx
[mycpuid
]->ipfw_default_rule
->cmd
[0].opcode
==
4418 O_ACCEPT
? "accept" : "deny");
4420 #ifdef IPFIREWALL_VERBOSE
4423 #ifdef IPFIREWALL_VERBOSE_LIMIT
4424 verbose_limit
= IPFIREWALL_VERBOSE_LIMIT
;
4426 if (fw_verbose
== 0) {
4427 kprintf("disabled\n");
4428 } else if (verbose_limit
== 0) {
4429 kprintf("unlimited\n");
4431 kprintf("limited to %d packets/entry by default\n",
4435 callout_init_mp(&ipfw_timeout_h
);
4436 netmsg_init(&ipfw_timeout_netmsg
, &netisr_adone_rport
,
4437 MSGF_MPSAFE
| MSGF_DROPABLE
| MSGF_PRIORITY
,
4438 ipfw_tick_dispatch
);
4439 lockinit(&dyn_lock
, "ipfw_dyn", 0, 0);
4442 callout_reset(&ipfw_timeout_h
, hz
, ipfw_tick
, NULL
);
4447 lwkt_replymsg(&nmsg
->nm_lmsg
, error
);
4455 netmsg_init(&smsg
, &curthread
->td_msgport
, 0, ipfw_init_dispatch
);
4456 return lwkt_domsg(IPFW_CFGPORT
, &smsg
.nm_lmsg
, 0);
4462 ipfw_fini_dispatch(struct netmsg
*nmsg
)
4466 if (ipfw_refcnt
!= 0) {
4474 callout_stop(&ipfw_timeout_h
);
4476 netmsg_service_sync();
4479 if ((ipfw_timeout_netmsg
.nm_lmsg
.ms_flags
& MSGF_DONE
) == 0) {
4481 * Callout message is pending; drop it
4483 lwkt_dropmsg(&ipfw_timeout_netmsg
.nm_lmsg
);
4487 ip_fw_chk_ptr
= NULL
;
4488 ip_fw_ctl_ptr
= NULL
;
4489 ip_fw_dn_io_ptr
= NULL
;
4490 ipfw_flush(1 /* kill default rule */);
4492 /* Free pre-cpu context */
4493 for (cpu
= 0; cpu
< ncpus
; ++cpu
)
4494 kfree(ipfw_ctx
[cpu
], M_IPFW
);
4496 kprintf("IP firewall unloaded\n");
4498 lwkt_replymsg(&nmsg
->nm_lmsg
, error
);
4506 netmsg_init(&smsg
, &curthread
->td_msgport
, 0, ipfw_fini_dispatch
);
4507 return lwkt_domsg(IPFW_CFGPORT
, &smsg
.nm_lmsg
, 0);
4510 #endif /* KLD_MODULE */
4513 ipfw_modevent(module_t mod
, int type
, void *unused
)
4524 kprintf("ipfw statically compiled, cannot unload\n");
4536 static moduledata_t ipfwmod
= {
4541 DECLARE_MODULE(ipfw
, ipfwmod
, SI_SUB_PROTO_END
, SI_ORDER_ANY
);
4542 MODULE_VERSION(ipfw
, 1);