2 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by the 3am Software Foundry ("3am"). It was developed by Matt Thomas.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the NetBSD
19 * Foundation, Inc. and its contributors.
20 * 4. Neither the name of The NetBSD Foundation nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
36 * $FreeBSD: src/sys/netinet/ip_flow.c,v 1.9.2.2 2001/11/04 17:35:31 luigi Exp $
37 * $DragonFly: src/sys/netinet/ip_flow.c,v 1.27 2008/10/28 07:09:26 sephe Exp $
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
44 #include <sys/protosw.h>
45 #include <sys/socket.h>
46 #include <sys/sysctl.h>
47 #include <sys/thread2.h>
48 #include <sys/in_cksum.h>
50 #include <machine/smp.h>
53 #include <net/if_var.h>
54 #include <net/route.h>
55 #include <net/netisr.h>
56 #include <net/netmsg2.h>
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip_var.h>
62 #include <netinet/ip_flow.h>
64 #define IPFLOW_TIMER (5 * PR_SLOWHZ)
65 #define IPFLOW_HASHBITS 6 /* should not be a multiple of 8 */
66 #define IPFLOW_HASHSIZE (1 << IPFLOW_HASHBITS)
67 #define IPFLOW_MAX 256
69 #define IPFLOW_RTENTRY_ISDOWN(rt) \
70 (((rt)->rt_flags & RTF_UP) == 0 || \
71 ((rt)->rt_ifp->if_flags & IFF_UP) == 0)
73 struct netmsg_ipfaddr
{
74 struct netmsg ipf_nmsg
;
75 struct in_addr ipf_addr
;
79 LIST_ENTRY(ipflow
) ipf_hash
; /* next ipflow in hash bucket */
80 LIST_ENTRY(ipflow
) ipf_list
; /* next ipflow in list */
82 struct in_addr ipf_dst
; /* destination address */
83 struct in_addr ipf_src
; /* source address */
84 uint8_t ipf_tos
; /* type-of-service */
86 uint8_t ipf_flags
; /* see IPFLOW_FLAG_ */
87 uint8_t ipf_pad
[2]; /* explicit pad */
88 int ipf_refcnt
; /* reference count */
90 struct route ipf_ro
; /* associated route entry */
91 u_long ipf_uses
; /* number of uses in this period */
93 int ipf_timer
; /* remaining lifetime of this entry */
94 u_long ipf_dropped
; /* ENOBUFS returned by if_output */
95 u_long ipf_errors
; /* other errors returned by if_output */
96 u_long ipf_last_uses
; /* number of uses in last period */
98 LIST_HEAD(ipflowhead
, ipflow
);
100 #define IPFLOW_FLAG_ONLIST 0x1
102 #define ipflow_inuse ipflow_inuse_pcpu[mycpuid]
103 #define ipflowtable ipflowtable_pcpu[mycpuid]
104 #define ipflowlist ipflowlist_pcpu[mycpuid]
106 static struct ipflowhead ipflowtable_pcpu
[MAXCPU
][IPFLOW_HASHSIZE
];
107 static struct ipflowhead ipflowlist_pcpu
[MAXCPU
];
108 static int ipflow_inuse_pcpu
[MAXCPU
];
109 static struct netmsg ipflow_timo_netmsgs
[MAXCPU
];
110 static int ipflow_active
= 0;
112 #define IPFLOW_REFCNT_INIT 1
114 /* ipflow is alive and active */
115 #define IPFLOW_IS_ACTIVE(ipf) ((ipf)->ipf_refcnt > IPFLOW_REFCNT_INIT)
116 /* ipflow is alive but not active */
117 #define IPFLOW_NOT_ACTIVE(ipf) ((ipf)->ipf_refcnt == IPFLOW_REFCNT_INIT)
119 #define IPFLOW_REF(ipf) \
121 KKASSERT((ipf)->ipf_refcnt > 0); \
122 (ipf)->ipf_refcnt++; \
125 #define IPFLOW_FREE(ipf) \
127 KKASSERT((ipf)->ipf_refcnt > 0); \
128 (ipf)->ipf_refcnt--; \
129 if ((ipf)->ipf_refcnt == 0) \
130 ipflow_free((ipf)); \
133 #define IPFLOW_INSERT(bucket, ipf) \
135 KKASSERT(((ipf)->ipf_flags & IPFLOW_FLAG_ONLIST) == 0); \
136 (ipf)->ipf_flags |= IPFLOW_FLAG_ONLIST; \
137 LIST_INSERT_HEAD((bucket), (ipf), ipf_hash); \
138 LIST_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \
141 #define IPFLOW_REMOVE(ipf) \
143 KKASSERT((ipf)->ipf_flags & IPFLOW_FLAG_ONLIST); \
144 (ipf)->ipf_flags &= ~IPFLOW_FLAG_ONLIST; \
145 LIST_REMOVE((ipf), ipf_hash); \
146 LIST_REMOVE((ipf), ipf_list); \
149 SYSCTL_NODE(_net_inet_ip
, OID_AUTO
, ipflow
, CTLFLAG_RW
, 0, "ip flow");
150 SYSCTL_INT(_net_inet_ip
, IPCTL_FASTFORWARDING
, fastforwarding
, CTLFLAG_RW
,
151 &ipflow_active
, 0, "Enable flow-based IP forwarding");
153 static MALLOC_DEFINE(M_IPFLOW
, "ip_flow", "IP flow");
155 static void ipflow_free(struct ipflow
*);
158 ipflow_hash(struct in_addr dst
, struct in_addr src
, unsigned tos
)
163 for (idx
= 0; idx
< 32; idx
+= IPFLOW_HASHBITS
)
164 hash
+= (dst
.s_addr
>> (32 - idx
)) + (src
.s_addr
>> idx
);
165 return hash
& (IPFLOW_HASHSIZE
-1);
168 static struct ipflow
*
169 ipflow_lookup(const struct ip
*ip
)
174 hash
= ipflow_hash(ip
->ip_dst
, ip
->ip_src
, ip
->ip_tos
);
175 LIST_FOREACH(ipf
, &ipflowtable
[hash
], ipf_hash
) {
176 if (ip
->ip_dst
.s_addr
== ipf
->ipf_dst
.s_addr
&&
177 ip
->ip_src
.s_addr
== ipf
->ipf_src
.s_addr
&&
178 ip
->ip_tos
== ipf
->ipf_tos
)
185 ipflow_fastforward(struct mbuf
*m
)
190 struct sockaddr
*dst
;
195 * Are we forwarding packets?
197 if (!ipforwarding
|| !ipflow_active
)
201 * Was packet received as a link-level multicast or broadcast?
202 * If so, don't try to fast forward..
204 if (m
->m_flags
& (M_BCAST
| M_MCAST
))
207 /* length checks already done in ip_mport() */
208 KASSERT(m
->m_len
>= sizeof(struct ip
), ("IP header not in one mbuf"));
209 ip
= mtod(m
, struct ip
*);
212 * IP header with no option and valid version
214 if (ip
->ip_v
!= IPVERSION
|| ip
->ip_hl
!= (sizeof(struct ip
) >> 2))
217 iplen
= ntohs(ip
->ip_len
);
218 /* length checks already done in ip_mport() */
219 KASSERT(iplen
>= sizeof(struct ip
),
220 ("total length less then header length"));
221 KASSERT(m
->m_pkthdr
.len
>= iplen
, ("mbuf too short"));
226 ipf
= ipflow_lookup(ip
);
231 * Verify the IP header checksum.
233 if (m
->m_pkthdr
.csum_flags
& CSUM_IP_CHECKED
) {
234 if (!(m
->m_pkthdr
.csum_flags
& CSUM_IP_VALID
))
237 /* Must compute it ourselves. */
238 if (in_cksum_hdr(ip
) != 0)
243 * Route and interface still up?
245 rt
= ipf
->ipf_ro
.ro_rt
;
246 if (IPFLOW_RTENTRY_ISDOWN(rt
))
251 * Packet size OK? TTL?
253 if (m
->m_pkthdr
.len
> ifp
->if_mtu
|| ip
->ip_ttl
<= IPTTLDEC
)
257 * Clear any in-bound checksum flags for this packet.
259 m
->m_pkthdr
.csum_flags
= 0;
262 * Everything checks out and so we can forward this packet.
263 * Modify the TTL and incrementally change the checksum.
265 * This method of adding the checksum works on either endian CPU.
266 * If htons() is inlined, all the arithmetic is folded; otherwise
267 * the htons()s are combined by CSE due to the __const__ attribute.
269 * Don't bother using HW checksumming here -- the incremental
270 * update is pretty fast.
272 ip
->ip_ttl
-= IPTTLDEC
;
273 if (ip
->ip_sum
>= (uint16_t)~htons(IPTTLDEC
<< 8))
274 ip
->ip_sum
-= ~htons(IPTTLDEC
<< 8);
276 ip
->ip_sum
+= htons(IPTTLDEC
<< 8);
279 * Trim the packet in case it's too long..
281 if (m
->m_pkthdr
.len
> iplen
) {
282 if (m
->m_len
== m
->m_pkthdr
.len
) {
284 m
->m_pkthdr
.len
= iplen
;
286 m_adj(m
, iplen
- m
->m_pkthdr
.len
);
291 * Send the packet on its way. All we can get back is ENOBUFS
294 ipf
->ipf_timer
= IPFLOW_TIMER
;
296 if (rt
->rt_flags
& RTF_GATEWAY
)
297 dst
= rt
->rt_gateway
;
299 dst
= &ipf
->ipf_ro
.ro_dst
;
302 * Reference count this ipflow, before the possible blocking
303 * ifnet.if_output(), so this ipflow will not be changed or
304 * reaped behind our back.
308 error
= ifp
->if_output(ifp
, m
, dst
, rt
);
310 if (error
== ENOBUFS
)
321 ipflow_addstats(struct ipflow
*ipf
)
323 ipf
->ipf_ro
.ro_rt
->rt_use
+= ipf
->ipf_uses
;
324 ipstat
.ips_cantforward
+= ipf
->ipf_errors
+ ipf
->ipf_dropped
;
325 ipstat
.ips_total
+= ipf
->ipf_uses
;
326 ipstat
.ips_forward
+= ipf
->ipf_uses
;
327 ipstat
.ips_fastforward
+= ipf
->ipf_uses
;
331 ipflow_free(struct ipflow
*ipf
)
333 KKASSERT(ipf
->ipf_refcnt
== 0);
334 KKASSERT((ipf
->ipf_flags
& IPFLOW_FLAG_ONLIST
) == 0);
336 KKASSERT(ipflow_inuse
> 0);
339 ipflow_addstats(ipf
);
340 RTFREE(ipf
->ipf_ro
.ro_rt
);
341 kfree(ipf
, M_IPFLOW
);
345 ipflow_reset(struct ipflow
*ipf
)
347 ipflow_addstats(ipf
);
348 RTFREE(ipf
->ipf_ro
.ro_rt
);
349 ipf
->ipf_uses
= ipf
->ipf_last_uses
= 0;
350 ipf
->ipf_errors
= ipf
->ipf_dropped
= 0;
353 static struct ipflow
*
356 struct ipflow
*ipf
, *maybe_ipf
= NULL
;
358 LIST_FOREACH(ipf
, &ipflowlist
, ipf_list
) {
360 * Skip actively used ipflow
362 if (IPFLOW_IS_ACTIVE(ipf
))
366 * If this no longer points to a valid route
369 if ((ipf
->ipf_ro
.ro_rt
->rt_flags
& RTF_UP
) == 0)
373 * choose the one that's been least recently used
374 * or has had the least uses in the last 1.5
377 if (maybe_ipf
== NULL
||
378 ipf
->ipf_timer
< maybe_ipf
->ipf_timer
||
379 (ipf
->ipf_timer
== maybe_ipf
->ipf_timer
&&
380 ipf
->ipf_last_uses
+ ipf
->ipf_uses
<
381 maybe_ipf
->ipf_last_uses
+ maybe_ipf
->ipf_uses
))
384 if (maybe_ipf
== NULL
)
390 * Remove the entry from the flow table and reset its states
398 ipflow_timo_dispatch(struct netmsg
*nmsg
)
400 struct ipflow
*ipf
, *next_ipf
;
403 lwkt_replymsg(&nmsg
->nm_lmsg
, 0); /* reply ASAP */
406 LIST_FOREACH_MUTABLE(ipf
, &ipflowlist
, ipf_list
, next_ipf
) {
407 if (--ipf
->ipf_timer
== 0) {
411 ipf
->ipf_last_uses
= ipf
->ipf_uses
;
412 ipf
->ipf_ro
.ro_rt
->rt_use
+= ipf
->ipf_uses
;
413 ipstat
.ips_total
+= ipf
->ipf_uses
;
414 ipstat
.ips_forward
+= ipf
->ipf_uses
;
415 ipstat
.ips_fastforward
+= ipf
->ipf_uses
;
422 ipflow_timo_ipi(void *arg __unused
)
424 struct lwkt_msg
*msg
= &ipflow_timo_netmsgs
[mycpuid
].nm_lmsg
;
427 if (msg
->ms_flags
& MSGF_DONE
)
428 lwkt_sendmsg(cpu_portfn(mycpuid
), msg
);
433 ipflow_slowtimo(void)
439 for (i
= 0; i
< ncpus
; ++i
) {
440 if (ipflow_inuse_pcpu
[i
])
443 mask
&= smp_active_mask
;
445 lwkt_send_ipiq_mask(mask
, ipflow_timo_ipi
, NULL
);
448 ipflow_timo_ipi(NULL
);
453 ipflow_create(const struct route
*ro
, struct mbuf
*m
)
455 const struct ip
*const ip
= mtod(m
, struct ip
*);
460 * Don't create cache entries for ICMP messages.
462 if (!ipflow_active
|| ip
->ip_p
== IPPROTO_ICMP
)
466 * See if an existing flow struct exists. If so remove it from it's
467 * list and free the old route. If not, try to malloc a new one
468 * (if we aren't at our limit).
470 ipf
= ipflow_lookup(ip
);
472 if (ipflow_inuse
== IPFLOW_MAX
) {
477 ipf
= kmalloc(sizeof(*ipf
), M_IPFLOW
,
481 ipf
->ipf_refcnt
= IPFLOW_REFCNT_INIT
;
486 if (IPFLOW_NOT_ACTIVE(ipf
)) {
490 /* This ipflow is being used; don't change it */
491 KKASSERT(IPFLOW_IS_ACTIVE(ipf
));
495 /* This ipflow should not be actively used */
496 KKASSERT(IPFLOW_NOT_ACTIVE(ipf
));
499 * Fill in the updated information.
502 ro
->ro_rt
->rt_refcnt
++;
503 ipf
->ipf_dst
= ip
->ip_dst
;
504 ipf
->ipf_src
= ip
->ip_src
;
505 ipf
->ipf_tos
= ip
->ip_tos
;
506 ipf
->ipf_timer
= IPFLOW_TIMER
;
509 * Insert into the approriate bucket of the flow table.
511 hash
= ipflow_hash(ip
->ip_dst
, ip
->ip_src
, ip
->ip_tos
);
512 IPFLOW_INSERT(&ipflowtable
[hash
], ipf
);
516 ipflow_flush_oncpu(void)
520 while ((ipf
= LIST_FIRST(&ipflowlist
)) != NULL
) {
527 ipflow_ifaddr_handler(struct netmsg
*nmsg
)
529 struct netmsg_ipfaddr
*amsg
= (struct netmsg_ipfaddr
*)nmsg
;
530 struct ipflow
*ipf
, *next_ipf
;
532 LIST_FOREACH_MUTABLE(ipf
, &ipflowlist
, ipf_list
, next_ipf
) {
533 if (ipf
->ipf_dst
.s_addr
== amsg
->ipf_addr
.s_addr
||
534 ipf
->ipf_src
.s_addr
== amsg
->ipf_addr
.s_addr
) {
539 ifnet_forwardmsg(&nmsg
->nm_lmsg
, mycpuid
+ 1);
543 ipflow_ifaddr(void *arg __unused
, struct ifnet
*ifp __unused
,
544 enum ifaddr_event event
, struct ifaddr
*ifa
)
546 struct netmsg_ipfaddr amsg
;
548 if (ifa
->ifa_addr
->sa_family
!= AF_INET
)
551 /* Only add/change events need to be handled */
553 case IFADDR_EVENT_ADD
:
554 case IFADDR_EVENT_CHANGE
:
557 case IFADDR_EVENT_DELETE
:
561 netmsg_init(&amsg
.ipf_nmsg
, NULL
, &curthread
->td_msgport
,
562 MSGF_PRIORITY
, ipflow_ifaddr_handler
);
563 amsg
.ipf_addr
= ifatoia(ifa
)->ia_addr
.sin_addr
;
565 ifnet_domsg(&amsg
.ipf_nmsg
.nm_lmsg
, 0);
574 for (i
= 0; i
< ncpus
; ++i
) {
575 netmsg_init(&ipflow_timo_netmsgs
[i
], NULL
, &netisr_adone_rport
,
576 MSGF_MPSAFE
, ipflow_timo_dispatch
);
578 ksnprintf(oid_name
, sizeof(oid_name
), "inuse%d", i
);
581 SYSCTL_STATIC_CHILDREN(_net_inet_ip_ipflow
),
582 OID_AUTO
, oid_name
, CTLFLAG_RD
, &ipflow_inuse_pcpu
[i
], 0,
583 "# of ip flow being used");
585 EVENTHANDLER_REGISTER(ifaddr_event
, ipflow_ifaddr
, NULL
,
586 EVENTHANDLER_PRI_ANY
);
588 SYSINIT(arp
, SI_SUB_PROTO_DOMAIN
, SI_ORDER_ANY
, ipflow_init
, 0);