2 * Copyright (c) 2004-2005 Gleb Smirnoff <glebius@FreeBSD.org>
3 * Copyright (c) 2001-2003 Roman V. Palagin <romanp@unshadow.net>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $SourceForge: netflow.c,v 1.41 2004/09/05 11:41:10 glebius Exp $
28 * $FreeBSD: src/sys/netgraph/netflow/netflow.c,v 1.29 2008/05/09 23:02:57 julian Exp $
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/limits.h>
34 #include <sys/malloc.h>
36 #include <sys/syslog.h>
37 #include <sys/systm.h>
38 #include <sys/socket.h>
40 #include <machine/atomic.h>
43 #include <net/route.h>
44 #include <netinet/in.h>
45 #include <netinet/in_systm.h>
46 #include <netinet/ip.h>
47 #include <netinet/tcp.h>
48 #include <netinet/udp.h>
50 #include "ng_message.h"
53 #include "netflow/netflow.h"
54 #include "netflow/ng_netflow.h"
56 #define NBUCKETS (65536) /* must be power of 2 */
58 /* This hash is for TCP or UDP packets. */
59 #define FULL_HASH(addr1, addr2, port1, port2) \
60 (((addr1 ^ (addr1 >> 16) ^ \
61 htons(addr2 ^ (addr2 >> 16))) ^ \
62 port1 ^ htons(port2)) & \
65 /* This hash is for all other IP packets. */
66 #define ADDR_HASH(addr1, addr2) \
67 ((addr1 ^ (addr1 >> 16) ^ \
68 htons(addr2 ^ (addr2 >> 16))) & \
71 /* Macros to shorten logical constructions */
72 /* XXX: priv must exist in namespace */
73 #define INACTIVE(fle) (time_uptime - fle->f.last > priv->info.nfinfo_inact_t)
74 #define AGED(fle) (time_uptime - fle->f.first > priv->info.nfinfo_act_t)
75 #define ISFREE(fle) (fle->f.packets == 0)
78 * 4 is a magical number: statistically number of 4-packet flows is
79 * bigger than 5,6,7...-packet flows by an order of magnitude. Most UDP/ICMP
80 * scans are 1 packet (~ 90% of flow cache). TCP scans are 2-packet in case
81 * of reachable host and 4-packet otherwise.
83 #define SMALL(fle) (fle->f.packets <= 4)
86 * Cisco uses milliseconds for uptime. Bad idea, since it overflows
87 * every 48+ days. But we will do same to keep compatibility. This macro
88 * does overflowable multiplication to 1000.
90 #define MILLIUPTIME(t) (((t) << 9) + /* 512 */ \
91 ((t) << 8) + /* 256 */ \
92 ((t) << 7) + /* 128 */ \
93 ((t) << 6) + /* 64 */ \
94 ((t) << 5) + /* 32 */ \
97 MALLOC_DECLARE(M_NETFLOW_HASH
);
98 MALLOC_DEFINE(M_NETFLOW_HASH
, "netflow_hash", "NetFlow hash");
100 static int export_add(item_p
, struct flow_entry
*);
101 static int export_send(priv_p
, item_p
, int flags
);
103 /* Generate hash for a given flow record. */
104 static __inline
uint32_t
105 ip_hash(struct flow_rec
*r
)
110 return FULL_HASH(r
->r_src
.s_addr
, r
->r_dst
.s_addr
,
111 r
->r_sport
, r
->r_dport
);
113 return ADDR_HASH(r
->r_src
.s_addr
, r
->r_dst
.s_addr
);
117 /* This is callback from uma(9), called on alloc. */
119 uma_ctor_flow(void *mem
, int size
, void *arg
, int how
)
121 priv_p priv
= (priv_p
)arg
;
123 if (atomic_load_acq_32(&priv
->info
.nfinfo_used
) >= CACHESIZE
)
126 atomic_add_32(&priv
->info
.nfinfo_used
, 1);
131 /* This is callback from uma(9), called on free. */
133 uma_dtor_flow(void *mem
, int size
, void *arg
)
135 priv_p priv
= (priv_p
)arg
;
137 atomic_subtract_32(&priv
->info
.nfinfo_used
, 1);
141 * Detach export datagram from priv, if there is any.
142 * If there is no, allocate a new one.
145 get_export_dgram(priv_p priv
)
149 mtx_lock(&priv
->export_mtx
);
150 if (priv
->export_item
!= NULL
) {
151 item
= priv
->export_item
;
152 priv
->export_item
= NULL
;
154 mtx_unlock(&priv
->export_mtx
);
157 struct netflow_v5_export_dgram
*dgram
;
160 m
= m_getcl(M_NOWAIT
, MT_DATA
, M_PKTHDR
);
163 item
= ng_package_data(m
, NG_NOFLAGS
);
166 dgram
= mtod(m
, struct netflow_v5_export_dgram
*);
167 dgram
->header
.count
= 0;
168 dgram
->header
.version
= htons(NETFLOW_V5
);
176 * Re-attach incomplete datagram back to priv.
177 * If there is already another one, then send incomplete. */
179 return_export_dgram(priv_p priv
, item_p item
, int flags
)
182 * It may happen on SMP, that some thread has already
183 * put its item there, in this case we bail out and
184 * send what we have to collector.
186 mtx_lock(&priv
->export_mtx
);
187 if (priv
->export_item
== NULL
) {
188 priv
->export_item
= item
;
189 mtx_unlock(&priv
->export_mtx
);
191 mtx_unlock(&priv
->export_mtx
);
192 export_send(priv
, item
, flags
);
197 * The flow is over. Call export_add() and free it. If datagram is
198 * full, then call export_send().
201 expire_flow(priv_p priv
, item_p
*item
, struct flow_entry
*fle
, int flags
)
204 *item
= get_export_dgram(priv
);
206 atomic_add_32(&priv
->info
.nfinfo_export_failed
, 1);
207 uma_zfree_arg(priv
->zone
, fle
, priv
);
210 if (export_add(*item
, fle
) > 0) {
211 export_send(priv
, *item
, flags
);
214 uma_zfree_arg(priv
->zone
, fle
, priv
);
217 /* Get a snapshot of node statistics */
219 ng_netflow_copyinfo(priv_p priv
, struct ng_netflow_info
*i
)
222 memcpy((void *)i
, (void *)&priv
->info
, sizeof(priv
->info
));
226 * Insert a record into defined slot.
228 * First we get for us a free flow entry, then fill in all
229 * possible fields in it.
231 * TODO: consider dropping hash mutex while filling in datagram,
232 * as this was done in previous version. Need to test & profile
236 hash_insert(priv_p priv
, struct flow_hash_entry
*hsh
, struct flow_rec
*r
,
237 int plen
, uint8_t tcp_flags
)
239 struct flow_entry
*fle
;
240 struct sockaddr_in sin
;
243 KKASSERT(mtx_owned(&hsh
->mtx
));
245 fle
= uma_zalloc_arg(priv
->zone
, priv
, M_WAITOK
| M_NULLOK
);
247 atomic_add_32(&priv
->info
.nfinfo_alloc_failed
, 1);
252 * Now fle is totally ours. It is detached from all lists,
253 * we can safely edit it.
256 bcopy(r
, &fle
->f
.r
, sizeof(struct flow_rec
));
259 fle
->f
.tcp_flags
= tcp_flags
;
261 fle
->f
.first
= fle
->f
.last
= time_uptime
;
264 * First we do route table lookup on destination address. So we can
265 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
267 bzero(&sin
, sizeof(sin
));
268 sin
.sin_len
= sizeof(struct sockaddr_in
);
269 sin
.sin_family
= AF_INET
;
270 sin
.sin_addr
= fle
->f
.r
.r_dst
;
271 /* XXX MRT 0 as a default.. need the m here to get fib */
272 rt
= rtalloc1_fib((struct sockaddr
*)&sin
, 0, RTF_CLONING
, 0);
274 fle
->f
.fle_o_ifx
= rt
->rt_ifp
->if_index
;
276 if (rt
->rt_flags
& RTF_GATEWAY
&&
277 rt
->rt_gateway
->sa_family
== AF_INET
)
279 ((struct sockaddr_in
*)(rt
->rt_gateway
))->sin_addr
;
282 fle
->f
.dst_mask
= bitcount32(((struct sockaddr_in
*)
283 rt_mask(rt
))->sin_addr
.s_addr
);
284 else if (rt
->rt_flags
& RTF_HOST
)
285 /* Give up. We can't determine mask :( */
286 fle
->f
.dst_mask
= 32;
291 /* Do route lookup on source address, to fill in src_mask. */
292 bzero(&sin
, sizeof(sin
));
293 sin
.sin_len
= sizeof(struct sockaddr_in
);
294 sin
.sin_family
= AF_INET
;
295 sin
.sin_addr
= fle
->f
.r
.r_src
;
296 /* XXX MRT 0 as a default revisit. need the mbuf for fib*/
297 rt
= rtalloc1_fib((struct sockaddr
*)&sin
, 0, RTF_CLONING
, 0);
300 fle
->f
.src_mask
= bitcount32(((struct sockaddr_in
*)
301 rt_mask(rt
))->sin_addr
.s_addr
);
302 else if (rt
->rt_flags
& RTF_HOST
)
303 /* Give up. We can't determine mask :( */
304 fle
->f
.src_mask
= 32;
309 /* Push new flow at the and of hash. */
310 TAILQ_INSERT_TAIL(&hsh
->head
, fle
, fle_hash
);
317 * Non-static functions called from ng_netflow.c
320 /* Allocate memory and set up flow cache */
322 ng_netflow_cache_init(priv_p priv
)
324 struct flow_hash_entry
*hsh
;
327 /* Initialize cache UMA zone. */
328 priv
->zone
= uma_zcreate("NetFlow cache", sizeof(struct flow_entry
),
329 uma_ctor_flow
, uma_dtor_flow
, NULL
, NULL
, UMA_ALIGN_CACHE
, 0);
330 uma_zone_set_max(priv
->zone
, CACHESIZE
);
333 priv
->hash
= kmalloc(NBUCKETS
* sizeof(struct flow_hash_entry
),
334 M_NETFLOW_HASH
, M_WAITOK
| M_ZERO
);
336 /* Initialize hash. */
337 for (i
= 0, hsh
= priv
->hash
; i
< NBUCKETS
; i
++, hsh
++) {
338 mtx_init(&hsh
->mtx
, "hash mutex", NULL
, MTX_DEF
);
339 TAILQ_INIT(&hsh
->head
);
342 mtx_init(&priv
->export_mtx
, "export dgram lock", NULL
, MTX_DEF
);
347 /* Free all flow cache memory. Called from node close method. */
349 ng_netflow_cache_flush(priv_p priv
)
351 struct flow_entry
*fle
, *fle1
;
352 struct flow_hash_entry
*hsh
;
357 * We are going to free probably billable data.
358 * Expire everything before freeing it.
359 * No locking is required since callout is already drained.
361 for (hsh
= priv
->hash
, i
= 0; i
< NBUCKETS
; hsh
++, i
++)
362 TAILQ_FOREACH_SAFE(fle
, &hsh
->head
, fle_hash
, fle1
) {
363 TAILQ_REMOVE(&hsh
->head
, fle
, fle_hash
);
364 expire_flow(priv
, &item
, fle
, NG_QUEUE
);
368 export_send(priv
, item
, NG_QUEUE
);
370 uma_zdestroy(priv
->zone
);
372 /* Destroy hash mutexes. */
373 for (i
= 0, hsh
= priv
->hash
; i
< NBUCKETS
; i
++, hsh
++)
374 mtx_destroy(&hsh
->mtx
);
376 /* Free hash memory. */
378 kfree(priv
->hash
, M_NETFLOW_HASH
);
380 mtx_destroy(&priv
->export_mtx
);
383 /* Insert packet from into flow cache. */
385 ng_netflow_flow_add(priv_p priv
, struct ip
*ip
, iface_p iface
,
388 register struct flow_entry
*fle
, *fle1
;
389 struct flow_hash_entry
*hsh
;
394 uint8_t tcp_flags
= 0;
396 /* Try to fill flow_rec r */
397 bzero(&r
, sizeof(r
));
399 if (ip
->ip_v
!= IPVERSION
)
402 /* verify min header length */
403 hlen
= ip
->ip_hl
<< 2;
405 if (hlen
< sizeof(struct ip
))
408 r
.r_src
= ip
->ip_src
;
409 r
.r_dst
= ip
->ip_dst
;
411 /* save packet length */
412 plen
= ntohs(ip
->ip_len
);
415 r
.r_tos
= ip
->ip_tos
;
417 /* Configured in_ifx overrides mbuf's */
418 if (iface
->info
.ifinfo_index
== 0) {
420 r
.r_i_ifx
= ifp
->if_index
;
422 r
.r_i_ifx
= iface
->info
.ifinfo_index
;
425 * XXX NOTE: only first fragment of fragmented TCP, UDP and
426 * ICMP packet will be recorded with proper s_port and d_port.
427 * Following fragments will be recorded simply as IP packet with
428 * ip_proto = ip->ip_p and s_port, d_port set to zero.
429 * I know, it looks like bug. But I don't want to re-implement
430 * ip packet assebmling here. Anyway, (in)famous trafd works this way -
431 * and nobody complains yet :)
433 if ((ip
->ip_off
& htons(IP_OFFMASK
)) == 0)
437 register struct tcphdr
*tcp
;
439 tcp
= (struct tcphdr
*)((caddr_t
)ip
+ hlen
);
440 r
.r_sport
= tcp
->th_sport
;
441 r
.r_dport
= tcp
->th_dport
;
442 tcp_flags
= tcp
->th_flags
;
446 r
.r_ports
= *(uint32_t *)((caddr_t
)ip
+ hlen
);
450 /* Update node statistics. XXX: race... */
451 priv
->info
.nfinfo_packets
++;
452 priv
->info
.nfinfo_bytes
+= plen
;
454 /* Find hash slot. */
455 hsh
= &priv
->hash
[ip_hash(&r
)];
460 * Go through hash and find our entry. If we encounter an
461 * entry, that should be expired, purge it. We do a reverse
462 * search since most active entries are first, and most
463 * searches are done on most active entries.
465 TAILQ_FOREACH_REVERSE_SAFE(fle
, &hsh
->head
, fhead
, fle_hash
, fle1
) {
466 if (bcmp(&r
, &fle
->f
.r
, sizeof(struct flow_rec
)) == 0)
468 if ((INACTIVE(fle
) && SMALL(fle
)) || AGED(fle
)) {
469 TAILQ_REMOVE(&hsh
->head
, fle
, fle_hash
);
470 expire_flow(priv
, &item
, fle
, NG_QUEUE
);
471 atomic_add_32(&priv
->info
.nfinfo_act_exp
, 1);
475 if (fle
) { /* An existent entry. */
477 fle
->f
.bytes
+= plen
;
479 fle
->f
.tcp_flags
|= tcp_flags
;
480 fle
->f
.last
= time_uptime
;
483 * We have the following reasons to expire flow in active way:
484 * - it hit active timeout
485 * - a TCP connection closed
486 * - it is going to overflow counter
488 if (tcp_flags
& TH_FIN
|| tcp_flags
& TH_RST
|| AGED(fle
) ||
489 (fle
->f
.bytes
>= (UINT_MAX
- IF_MAXMTU
)) ) {
490 TAILQ_REMOVE(&hsh
->head
, fle
, fle_hash
);
491 expire_flow(priv
, &item
, fle
, NG_QUEUE
);
492 atomic_add_32(&priv
->info
.nfinfo_act_exp
, 1);
495 * It is the newest, move it to the tail,
496 * if it isn't there already. Next search will
499 if (fle
!= TAILQ_LAST(&hsh
->head
, fhead
)) {
500 TAILQ_REMOVE(&hsh
->head
, fle
, fle_hash
);
501 TAILQ_INSERT_TAIL(&hsh
->head
, fle
, fle_hash
);
504 } else /* A new flow entry. */
505 error
= hash_insert(priv
, hsh
, &r
, plen
, tcp_flags
);
507 mtx_unlock(&hsh
->mtx
);
510 return_export_dgram(priv
, item
, NG_QUEUE
);
516 * Return records from cache to userland.
518 * TODO: matching particular IP should be done in kernel, here.
521 ng_netflow_flow_show(priv_p priv
, uint32_t last
, struct ng_mesg
*resp
)
523 struct flow_hash_entry
*hsh
;
524 struct flow_entry
*fle
;
525 struct ngnf_flows
*data
;
528 data
= (struct ngnf_flows
*)resp
->data
;
532 /* Check if this is a first run */
537 if (last
> NBUCKETS
-1)
539 hsh
= priv
->hash
+ last
;
544 * We will transfer not more than NREC_AT_ONCE. More data
545 * will come in next message.
546 * We send current hash index to userland, and userland should
547 * return it back to us. Then, we will restart with new entry.
549 * The resulting cache snapshot is inaccurate for the
551 * - we skip locked hash entries
552 * - we bail out, if someone wants our entry
553 * - we skip rest of entry, when hit NREC_AT_ONCE
555 for (; i
< NBUCKETS
; hsh
++, i
++) {
556 if (mtx_trylock(&hsh
->mtx
) == 0)
559 TAILQ_FOREACH(fle
, &hsh
->head
, fle_hash
) {
560 if (mtx_contested(&hsh
->mtx
))
563 bcopy(&fle
->f
, &(data
->entries
[data
->nentries
]),
566 if (data
->nentries
== NREC_AT_ONCE
) {
567 mtx_unlock(&hsh
->mtx
);
573 mtx_unlock(&hsh
->mtx
);
579 /* We have full datagram in privdata. Send it to export hook. */
581 export_send(priv_p priv
, item_p item
, int flags
)
583 struct mbuf
*m
= NGI_M(item
);
584 struct netflow_v5_export_dgram
*dgram
= mtod(m
,
585 struct netflow_v5_export_dgram
*);
586 struct netflow_v5_header
*header
= &dgram
->header
;
590 /* Fill mbuf header. */
591 m
->m_len
= m
->m_pkthdr
.len
= sizeof(struct netflow_v5_record
) *
592 header
->count
+ sizeof(struct netflow_v5_header
);
594 /* Fill export header. */
595 header
->sys_uptime
= htonl(MILLIUPTIME(time_uptime
));
597 header
->unix_secs
= htonl(ts
.tv_sec
);
598 header
->unix_nsecs
= htonl(ts
.tv_nsec
);
599 header
->engine_type
= 0;
600 header
->engine_id
= 0;
602 header
->flow_seq
= htonl(atomic_fetchadd_32(&priv
->flow_seq
,
604 header
->count
= htons(header
->count
);
606 if (priv
->export
!= NULL
)
607 NG_FWD_ITEM_HOOK_FLAGS(error
, item
, priv
->export
, flags
);
615 /* Add export record to dgram. */
617 export_add(item_p item
, struct flow_entry
*fle
)
619 struct netflow_v5_export_dgram
*dgram
= mtod(NGI_M(item
),
620 struct netflow_v5_export_dgram
*);
621 struct netflow_v5_header
*header
= &dgram
->header
;
622 struct netflow_v5_record
*rec
;
624 rec
= &dgram
->r
[header
->count
];
627 KASSERT(header
->count
<= NETFLOW_V5_MAX_RECORDS
,
628 ("ng_netflow: export too big"));
630 /* Fill in export record. */
631 rec
->src_addr
= fle
->f
.r
.r_src
.s_addr
;
632 rec
->dst_addr
= fle
->f
.r
.r_dst
.s_addr
;
633 rec
->next_hop
= fle
->f
.next_hop
.s_addr
;
634 rec
->i_ifx
= htons(fle
->f
.fle_i_ifx
);
635 rec
->o_ifx
= htons(fle
->f
.fle_o_ifx
);
636 rec
->packets
= htonl(fle
->f
.packets
);
637 rec
->octets
= htonl(fle
->f
.bytes
);
638 rec
->first
= htonl(MILLIUPTIME(fle
->f
.first
));
639 rec
->last
= htonl(MILLIUPTIME(fle
->f
.last
));
640 rec
->s_port
= fle
->f
.r
.r_sport
;
641 rec
->d_port
= fle
->f
.r
.r_dport
;
642 rec
->flags
= fle
->f
.tcp_flags
;
643 rec
->prot
= fle
->f
.r
.r_ip_p
;
644 rec
->tos
= fle
->f
.r
.r_tos
;
645 rec
->dst_mask
= fle
->f
.dst_mask
;
646 rec
->src_mask
= fle
->f
.src_mask
;
648 /* Not supported fields. */
649 rec
->src_as
= rec
->dst_as
= 0;
651 if (header
->count
== NETFLOW_V5_MAX_RECORDS
)
652 return (1); /* end of datagram */
657 /* Periodic flow expiry run. */
659 ng_netflow_expire(void *arg
)
661 struct flow_entry
*fle
, *fle1
;
662 struct flow_hash_entry
*hsh
;
663 priv_p priv
= (priv_p
)arg
;
669 * Going through all the cache.
671 for (hsh
= priv
->hash
, i
= 0; i
< NBUCKETS
; hsh
++, i
++) {
673 * Skip entries, that are already being worked on.
675 if (mtx_trylock(&hsh
->mtx
) == 0)
678 used
= atomic_load_acq_32(&priv
->info
.nfinfo_used
);
679 TAILQ_FOREACH_SAFE(fle
, &hsh
->head
, fle_hash
, fle1
) {
681 * Interrupt thread wants this entry!
682 * Quick! Quick! Bail out!
684 if (mtx_contested(&hsh
->mtx
))
688 * Don't expire aggressively while hash collision
689 * ratio is predicted small.
691 if (used
<= (NBUCKETS
*2) && !INACTIVE(fle
))
694 if ((INACTIVE(fle
) && (SMALL(fle
) ||
695 (used
> (NBUCKETS
*2)))) || AGED(fle
)) {
696 TAILQ_REMOVE(&hsh
->head
, fle
, fle_hash
);
697 expire_flow(priv
, &item
, fle
, NG_NOFLAGS
);
699 atomic_add_32(&priv
->info
.nfinfo_inact_exp
, 1);
702 mtx_unlock(&hsh
->mtx
);
706 return_export_dgram(priv
, item
, NG_NOFLAGS
);
708 /* Schedule next expire. */
709 callout_reset(&priv
->exp_callout
, (1*hz
), &ng_netflow_expire
,