2 * This is a module which is used for logging packets to userspace via
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
7 * Based on the old ipv4-only ipt_ULOG.c:
8 * (C) 2000-2004 by Harald Welte <laforge@netfilter.org>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/init.h>
18 #include <linux/ipv6.h>
19 #include <linux/netdevice.h>
20 #include <linux/netfilter.h>
21 #include <linux/netlink.h>
22 #include <linux/netfilter/nfnetlink.h>
23 #include <linux/netfilter/nfnetlink_log.h>
24 #include <linux/spinlock.h>
25 #include <linux/sysctl.h>
26 #include <linux/proc_fs.h>
27 #include <linux/security.h>
28 #include <linux/list.h>
29 #include <linux/jhash.h>
30 #include <linux/random.h>
31 #include <linux/slab.h>
33 #include <net/netfilter/nf_log.h>
34 #include <net/netfilter/nfnetlink_log.h>
36 #include <linux/atomic.h>
38 #ifdef CONFIG_BRIDGE_NETFILTER
39 #include "../bridge/br_private.h"
42 #define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE
43 #define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
44 #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
45 #define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */
47 #define PRINTR(x, args...) do { if (net_ratelimit()) \
48 printk(x, ## args); } while (0);
50 struct nfulnl_instance
{
51 struct hlist_node hlist
; /* global list of instances */
53 atomic_t use
; /* use count */
55 unsigned int qlen
; /* number of nlmsgs in skb */
56 struct sk_buff
*skb
; /* pre-allocatd skb */
57 struct timer_list timer
;
58 int peer_pid
; /* PID of the peer process */
60 /* configurable parameters */
61 unsigned int flushtimeout
; /* timeout until queue flush */
62 unsigned int nlbufsiz
; /* netlink buffer allocation size */
63 unsigned int qthreshold
; /* threshold of the queue */
65 u_int32_t seq
; /* instance-local sequential counter */
66 u_int16_t group_num
; /* number of this queue */
72 static DEFINE_SPINLOCK(instances_lock
);
73 static atomic_t global_seq
;
75 #define INSTANCE_BUCKETS 16
76 static struct hlist_head instance_table
[INSTANCE_BUCKETS
];
77 static unsigned int hash_init
;
79 static inline u_int8_t
instance_hashfn(u_int16_t group_num
)
81 return ((group_num
& 0xff) % INSTANCE_BUCKETS
);
84 static struct nfulnl_instance
*
85 __instance_lookup(u_int16_t group_num
)
87 struct hlist_head
*head
;
88 struct hlist_node
*pos
;
89 struct nfulnl_instance
*inst
;
91 head
= &instance_table
[instance_hashfn(group_num
)];
92 hlist_for_each_entry_rcu(inst
, pos
, head
, hlist
) {
93 if (inst
->group_num
== group_num
)
100 instance_get(struct nfulnl_instance
*inst
)
102 atomic_inc(&inst
->use
);
105 static struct nfulnl_instance
*
106 instance_lookup_get(u_int16_t group_num
)
108 struct nfulnl_instance
*inst
;
111 inst
= __instance_lookup(group_num
);
112 if (inst
&& !atomic_inc_not_zero(&inst
->use
))
114 rcu_read_unlock_bh();
119 static void nfulnl_instance_free_rcu(struct rcu_head
*head
)
121 kfree(container_of(head
, struct nfulnl_instance
, rcu
));
122 module_put(THIS_MODULE
);
126 instance_put(struct nfulnl_instance
*inst
)
128 if (inst
&& atomic_dec_and_test(&inst
->use
))
129 call_rcu_bh(&inst
->rcu
, nfulnl_instance_free_rcu
);
132 static void nfulnl_timer(unsigned long data
);
134 static struct nfulnl_instance
*
135 instance_create(u_int16_t group_num
, int pid
)
137 struct nfulnl_instance
*inst
;
140 spin_lock_bh(&instances_lock
);
141 if (__instance_lookup(group_num
)) {
146 inst
= kzalloc(sizeof(*inst
), GFP_ATOMIC
);
152 if (!try_module_get(THIS_MODULE
)) {
158 INIT_HLIST_NODE(&inst
->hlist
);
159 spin_lock_init(&inst
->lock
);
160 /* needs to be two, since we _put() after creation */
161 atomic_set(&inst
->use
, 2);
163 setup_timer(&inst
->timer
, nfulnl_timer
, (unsigned long)inst
);
165 inst
->peer_pid
= pid
;
166 inst
->group_num
= group_num
;
168 inst
->qthreshold
= NFULNL_QTHRESH_DEFAULT
;
169 inst
->flushtimeout
= NFULNL_TIMEOUT_DEFAULT
;
170 inst
->nlbufsiz
= NFULNL_NLBUFSIZ_DEFAULT
;
171 inst
->copy_mode
= NFULNL_COPY_PACKET
;
172 inst
->copy_range
= NFULNL_COPY_RANGE_MAX
;
174 hlist_add_head_rcu(&inst
->hlist
,
175 &instance_table
[instance_hashfn(group_num
)]);
177 spin_unlock_bh(&instances_lock
);
182 spin_unlock_bh(&instances_lock
);
186 static void __nfulnl_flush(struct nfulnl_instance
*inst
);
188 /* called with BH disabled */
190 __instance_destroy(struct nfulnl_instance
*inst
)
192 /* first pull it out of the global list */
193 hlist_del_rcu(&inst
->hlist
);
195 /* then flush all pending packets from skb */
197 spin_lock(&inst
->lock
);
199 /* lockless readers wont be able to use us */
200 inst
->copy_mode
= NFULNL_COPY_DISABLED
;
203 __nfulnl_flush(inst
);
204 spin_unlock(&inst
->lock
);
206 /* and finally put the refcount */
211 instance_destroy(struct nfulnl_instance
*inst
)
213 spin_lock_bh(&instances_lock
);
214 __instance_destroy(inst
);
215 spin_unlock_bh(&instances_lock
);
219 nfulnl_set_mode(struct nfulnl_instance
*inst
, u_int8_t mode
,
224 spin_lock_bh(&inst
->lock
);
227 case NFULNL_COPY_NONE
:
228 case NFULNL_COPY_META
:
229 inst
->copy_mode
= mode
;
230 inst
->copy_range
= 0;
233 case NFULNL_COPY_PACKET
:
234 inst
->copy_mode
= mode
;
235 inst
->copy_range
= min_t(unsigned int,
236 range
, NFULNL_COPY_RANGE_MAX
);
244 spin_unlock_bh(&inst
->lock
);
250 nfulnl_set_nlbufsiz(struct nfulnl_instance
*inst
, u_int32_t nlbufsiz
)
254 spin_lock_bh(&inst
->lock
);
255 if (nlbufsiz
< NFULNL_NLBUFSIZ_DEFAULT
)
257 else if (nlbufsiz
> 131072)
260 inst
->nlbufsiz
= nlbufsiz
;
263 spin_unlock_bh(&inst
->lock
);
269 nfulnl_set_timeout(struct nfulnl_instance
*inst
, u_int32_t timeout
)
271 spin_lock_bh(&inst
->lock
);
272 inst
->flushtimeout
= timeout
;
273 spin_unlock_bh(&inst
->lock
);
279 nfulnl_set_qthresh(struct nfulnl_instance
*inst
, u_int32_t qthresh
)
281 spin_lock_bh(&inst
->lock
);
282 inst
->qthreshold
= qthresh
;
283 spin_unlock_bh(&inst
->lock
);
289 nfulnl_set_flags(struct nfulnl_instance
*inst
, u_int16_t flags
)
291 spin_lock_bh(&inst
->lock
);
293 spin_unlock_bh(&inst
->lock
);
298 static struct sk_buff
*
299 nfulnl_alloc_skb(unsigned int inst_size
, unsigned int pkt_size
)
304 /* alloc skb which should be big enough for a whole multipart
305 * message. WARNING: has to be <= 128k due to slab restrictions */
307 n
= max(inst_size
, pkt_size
);
308 skb
= alloc_skb(n
, GFP_ATOMIC
);
311 /* try to allocate only as much as we need for current
314 skb
= alloc_skb(pkt_size
, GFP_ATOMIC
);
316 pr_err("nfnetlink_log: can't even alloc %u bytes\n",
325 __nfulnl_send(struct nfulnl_instance
*inst
)
330 NLMSG_PUT(inst
->skb
, 0, 0,
332 sizeof(struct nfgenmsg
));
334 status
= nfnetlink_unicast(inst
->skb
, &init_net
, inst
->peer_pid
,
345 __nfulnl_flush(struct nfulnl_instance
*inst
)
347 /* timer holds a reference */
348 if (del_timer(&inst
->timer
))
355 nfulnl_timer(unsigned long data
)
357 struct nfulnl_instance
*inst
= (struct nfulnl_instance
*)data
;
359 spin_lock_bh(&inst
->lock
);
362 spin_unlock_bh(&inst
->lock
);
366 /* This is an inline function, we don't really care about a long
367 * list of arguments */
369 __build_packet_message(struct nfulnl_instance
*inst
,
370 const struct sk_buff
*skb
,
371 unsigned int data_len
,
373 unsigned int hooknum
,
374 const struct net_device
*indev
,
375 const struct net_device
*outdev
,
376 const char *prefix
, unsigned int plen
)
378 struct nfulnl_msg_packet_hdr pmsg
;
379 struct nlmsghdr
*nlh
;
380 struct nfgenmsg
*nfmsg
;
381 sk_buff_data_t old_tail
= inst
->skb
->tail
;
383 nlh
= NLMSG_PUT(inst
->skb
, 0, 0,
384 NFNL_SUBSYS_ULOG
<< 8 | NFULNL_MSG_PACKET
,
385 sizeof(struct nfgenmsg
));
386 nfmsg
= NLMSG_DATA(nlh
);
387 nfmsg
->nfgen_family
= pf
;
388 nfmsg
->version
= NFNETLINK_V0
;
389 nfmsg
->res_id
= htons(inst
->group_num
);
391 pmsg
.hw_protocol
= skb
->protocol
;
394 NLA_PUT(inst
->skb
, NFULA_PACKET_HDR
, sizeof(pmsg
), &pmsg
);
397 NLA_PUT(inst
->skb
, NFULA_PREFIX
, plen
, prefix
);
400 #ifndef CONFIG_BRIDGE_NETFILTER
401 NLA_PUT_BE32(inst
->skb
, NFULA_IFINDEX_INDEV
,
402 htonl(indev
->ifindex
));
404 if (pf
== PF_BRIDGE
) {
405 /* Case 1: outdev is physical input device, we need to
406 * look for bridge group (when called from
407 * netfilter_bridge) */
408 NLA_PUT_BE32(inst
->skb
, NFULA_IFINDEX_PHYSINDEV
,
409 htonl(indev
->ifindex
));
410 /* this is the bridge group "brX" */
411 /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
412 NLA_PUT_BE32(inst
->skb
, NFULA_IFINDEX_INDEV
,
413 htonl(br_port_get_rcu(indev
)->br
->dev
->ifindex
));
415 /* Case 2: indev is bridge group, we need to look for
416 * physical device (when called from ipv4) */
417 NLA_PUT_BE32(inst
->skb
, NFULA_IFINDEX_INDEV
,
418 htonl(indev
->ifindex
));
419 if (skb
->nf_bridge
&& skb
->nf_bridge
->physindev
)
420 NLA_PUT_BE32(inst
->skb
, NFULA_IFINDEX_PHYSINDEV
,
421 htonl(skb
->nf_bridge
->physindev
->ifindex
));
427 #ifndef CONFIG_BRIDGE_NETFILTER
428 NLA_PUT_BE32(inst
->skb
, NFULA_IFINDEX_OUTDEV
,
429 htonl(outdev
->ifindex
));
431 if (pf
== PF_BRIDGE
) {
432 /* Case 1: outdev is physical output device, we need to
433 * look for bridge group (when called from
434 * netfilter_bridge) */
435 NLA_PUT_BE32(inst
->skb
, NFULA_IFINDEX_PHYSOUTDEV
,
436 htonl(outdev
->ifindex
));
437 /* this is the bridge group "brX" */
438 /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
439 NLA_PUT_BE32(inst
->skb
, NFULA_IFINDEX_OUTDEV
,
440 htonl(br_port_get_rcu(outdev
)->br
->dev
->ifindex
));
442 /* Case 2: indev is a bridge group, we need to look
443 * for physical device (when called from ipv4) */
444 NLA_PUT_BE32(inst
->skb
, NFULA_IFINDEX_OUTDEV
,
445 htonl(outdev
->ifindex
));
446 if (skb
->nf_bridge
&& skb
->nf_bridge
->physoutdev
)
447 NLA_PUT_BE32(inst
->skb
, NFULA_IFINDEX_PHYSOUTDEV
,
448 htonl(skb
->nf_bridge
->physoutdev
->ifindex
));
454 NLA_PUT_BE32(inst
->skb
, NFULA_MARK
, htonl(skb
->mark
));
456 if (indev
&& skb
->dev
&&
457 skb
->mac_header
!= skb
->network_header
) {
458 struct nfulnl_msg_packet_hw phw
;
459 int len
= dev_parse_header(skb
, phw
.hw_addr
);
461 phw
.hw_addrlen
= htons(len
);
462 NLA_PUT(inst
->skb
, NFULA_HWADDR
, sizeof(phw
), &phw
);
466 if (indev
&& skb_mac_header_was_set(skb
)) {
467 NLA_PUT_BE16(inst
->skb
, NFULA_HWTYPE
, htons(skb
->dev
->type
));
468 NLA_PUT_BE16(inst
->skb
, NFULA_HWLEN
,
469 htons(skb
->dev
->hard_header_len
));
470 NLA_PUT(inst
->skb
, NFULA_HWHEADER
, skb
->dev
->hard_header_len
,
471 skb_mac_header(skb
));
474 if (skb
->tstamp
.tv64
) {
475 struct nfulnl_msg_packet_timestamp ts
;
476 struct timeval tv
= ktime_to_timeval(skb
->tstamp
);
477 ts
.sec
= cpu_to_be64(tv
.tv_sec
);
478 ts
.usec
= cpu_to_be64(tv
.tv_usec
);
480 NLA_PUT(inst
->skb
, NFULA_TIMESTAMP
, sizeof(ts
), &ts
);
485 read_lock_bh(&skb
->sk
->sk_callback_lock
);
486 if (skb
->sk
->sk_socket
&& skb
->sk
->sk_socket
->file
) {
487 struct file
*file
= skb
->sk
->sk_socket
->file
;
488 __be32 uid
= htonl(file
->f_cred
->fsuid
);
489 __be32 gid
= htonl(file
->f_cred
->fsgid
);
490 /* need to unlock here since NLA_PUT may goto */
491 read_unlock_bh(&skb
->sk
->sk_callback_lock
);
492 NLA_PUT_BE32(inst
->skb
, NFULA_UID
, uid
);
493 NLA_PUT_BE32(inst
->skb
, NFULA_GID
, gid
);
495 read_unlock_bh(&skb
->sk
->sk_callback_lock
);
498 /* local sequence number */
499 if (inst
->flags
& NFULNL_CFG_F_SEQ
)
500 NLA_PUT_BE32(inst
->skb
, NFULA_SEQ
, htonl(inst
->seq
++));
502 /* global sequence number */
503 if (inst
->flags
& NFULNL_CFG_F_SEQ_GLOBAL
)
504 NLA_PUT_BE32(inst
->skb
, NFULA_SEQ_GLOBAL
,
505 htonl(atomic_inc_return(&global_seq
)));
509 int size
= nla_attr_size(data_len
);
511 if (skb_tailroom(inst
->skb
) < nla_total_size(data_len
)) {
512 printk(KERN_WARNING
"nfnetlink_log: no tailroom!\n");
516 nla
= (struct nlattr
*)skb_put(inst
->skb
, nla_total_size(data_len
));
517 nla
->nla_type
= NFULA_PAYLOAD
;
520 if (skb_copy_bits(skb
, 0, nla_data(nla
), data_len
))
524 nlh
->nlmsg_len
= inst
->skb
->tail
- old_tail
;
529 PRINTR(KERN_ERR
"nfnetlink_log: error creating log nlmsg\n");
533 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
535 static struct nf_loginfo default_loginfo
= {
536 .type
= NF_LOG_TYPE_ULOG
,
546 /* log handler for internal netfilter logging api */
548 nfulnl_log_packet(u_int8_t pf
,
549 unsigned int hooknum
,
550 const struct sk_buff
*skb
,
551 const struct net_device
*in
,
552 const struct net_device
*out
,
553 const struct nf_loginfo
*li_user
,
556 unsigned int size
, data_len
;
557 struct nfulnl_instance
*inst
;
558 const struct nf_loginfo
*li
;
559 unsigned int qthreshold
;
562 if (li_user
&& li_user
->type
== NF_LOG_TYPE_ULOG
)
565 li
= &default_loginfo
;
567 inst
= instance_lookup_get(li
->u
.ulog
.group
);
573 plen
= strlen(prefix
) + 1;
575 /* FIXME: do we want to make the size calculation conditional based on
576 * what is actually present? way more branches and checks, but more
577 * memory efficient... */
578 size
= NLMSG_SPACE(sizeof(struct nfgenmsg
))
579 + nla_total_size(sizeof(struct nfulnl_msg_packet_hdr
))
580 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
581 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
582 #ifdef CONFIG_BRIDGE_NETFILTER
583 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
584 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
586 + nla_total_size(sizeof(u_int32_t
)) /* mark */
587 + nla_total_size(sizeof(u_int32_t
)) /* uid */
588 + nla_total_size(sizeof(u_int32_t
)) /* gid */
589 + nla_total_size(plen
) /* prefix */
590 + nla_total_size(sizeof(struct nfulnl_msg_packet_hw
))
591 + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp
));
593 if (in
&& skb_mac_header_was_set(skb
)) {
594 size
+= nla_total_size(skb
->dev
->hard_header_len
)
595 + nla_total_size(sizeof(u_int16_t
)) /* hwtype */
596 + nla_total_size(sizeof(u_int16_t
)); /* hwlen */
599 spin_lock_bh(&inst
->lock
);
601 if (inst
->flags
& NFULNL_CFG_F_SEQ
)
602 size
+= nla_total_size(sizeof(u_int32_t
));
603 if (inst
->flags
& NFULNL_CFG_F_SEQ_GLOBAL
)
604 size
+= nla_total_size(sizeof(u_int32_t
));
606 qthreshold
= inst
->qthreshold
;
607 /* per-rule qthreshold overrides per-instance */
608 if (li
->u
.ulog
.qthreshold
)
609 if (qthreshold
> li
->u
.ulog
.qthreshold
)
610 qthreshold
= li
->u
.ulog
.qthreshold
;
613 switch (inst
->copy_mode
) {
614 case NFULNL_COPY_META
:
615 case NFULNL_COPY_NONE
:
619 case NFULNL_COPY_PACKET
:
620 if (inst
->copy_range
== 0
621 || inst
->copy_range
> skb
->len
)
624 data_len
= inst
->copy_range
;
626 size
+= nla_total_size(data_len
);
629 case NFULNL_COPY_DISABLED
:
631 goto unlock_and_release
;
635 size
> skb_tailroom(inst
->skb
) - sizeof(struct nfgenmsg
)) {
636 /* either the queue len is too high or we don't have
637 * enough room in the skb left. flush to userspace. */
638 __nfulnl_flush(inst
);
642 inst
->skb
= nfulnl_alloc_skb(inst
->nlbufsiz
, size
);
649 __build_packet_message(inst
, skb
, data_len
, pf
,
650 hooknum
, in
, out
, prefix
, plen
);
652 if (inst
->qlen
>= qthreshold
)
653 __nfulnl_flush(inst
);
654 /* timer_pending always called within inst->lock, so there
655 * is no chance of a race here */
656 else if (!timer_pending(&inst
->timer
)) {
658 inst
->timer
.expires
= jiffies
+ (inst
->flushtimeout
*HZ
/100);
659 add_timer(&inst
->timer
);
663 spin_unlock_bh(&inst
->lock
);
668 /* FIXME: statistics */
669 goto unlock_and_release
;
671 EXPORT_SYMBOL_GPL(nfulnl_log_packet
);
674 nfulnl_rcv_nl_event(struct notifier_block
*this,
675 unsigned long event
, void *ptr
)
677 struct netlink_notify
*n
= ptr
;
679 if (event
== NETLINK_URELEASE
&& n
->protocol
== NETLINK_NETFILTER
) {
682 /* destroy all instances for this pid */
683 spin_lock_bh(&instances_lock
);
684 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++) {
685 struct hlist_node
*tmp
, *t2
;
686 struct nfulnl_instance
*inst
;
687 struct hlist_head
*head
= &instance_table
[i
];
689 hlist_for_each_entry_safe(inst
, tmp
, t2
, head
, hlist
) {
690 if ((net_eq(n
->net
, &init_net
)) &&
691 (n
->pid
== inst
->peer_pid
))
692 __instance_destroy(inst
);
695 spin_unlock_bh(&instances_lock
);
700 static struct notifier_block nfulnl_rtnl_notifier
= {
701 .notifier_call
= nfulnl_rcv_nl_event
,
705 nfulnl_recv_unsupp(struct sock
*ctnl
, struct sk_buff
*skb
,
706 const struct nlmsghdr
*nlh
,
707 const struct nlattr
* const nfqa
[])
712 static struct nf_logger nfulnl_logger __read_mostly
= {
713 .name
= "nfnetlink_log",
714 .logfn
= &nfulnl_log_packet
,
718 static const struct nla_policy nfula_cfg_policy
[NFULA_CFG_MAX
+1] = {
719 [NFULA_CFG_CMD
] = { .len
= sizeof(struct nfulnl_msg_config_cmd
) },
720 [NFULA_CFG_MODE
] = { .len
= sizeof(struct nfulnl_msg_config_mode
) },
721 [NFULA_CFG_TIMEOUT
] = { .type
= NLA_U32
},
722 [NFULA_CFG_QTHRESH
] = { .type
= NLA_U32
},
723 [NFULA_CFG_NLBUFSIZ
] = { .type
= NLA_U32
},
724 [NFULA_CFG_FLAGS
] = { .type
= NLA_U16
},
728 nfulnl_recv_config(struct sock
*ctnl
, struct sk_buff
*skb
,
729 const struct nlmsghdr
*nlh
,
730 const struct nlattr
* const nfula
[])
732 struct nfgenmsg
*nfmsg
= NLMSG_DATA(nlh
);
733 u_int16_t group_num
= ntohs(nfmsg
->res_id
);
734 struct nfulnl_instance
*inst
;
735 struct nfulnl_msg_config_cmd
*cmd
= NULL
;
738 if (nfula
[NFULA_CFG_CMD
]) {
739 u_int8_t pf
= nfmsg
->nfgen_family
;
740 cmd
= nla_data(nfula
[NFULA_CFG_CMD
]);
742 /* Commands without queue context */
743 switch (cmd
->command
) {
744 case NFULNL_CFG_CMD_PF_BIND
:
745 return nf_log_bind_pf(pf
, &nfulnl_logger
);
746 case NFULNL_CFG_CMD_PF_UNBIND
:
747 nf_log_unbind_pf(pf
);
752 inst
= instance_lookup_get(group_num
);
753 if (inst
&& inst
->peer_pid
!= NETLINK_CB(skb
).pid
) {
759 switch (cmd
->command
) {
760 case NFULNL_CFG_CMD_BIND
:
766 inst
= instance_create(group_num
,
767 NETLINK_CB(skb
).pid
);
773 case NFULNL_CFG_CMD_UNBIND
:
779 instance_destroy(inst
);
787 if (nfula
[NFULA_CFG_MODE
]) {
788 struct nfulnl_msg_config_mode
*params
;
789 params
= nla_data(nfula
[NFULA_CFG_MODE
]);
795 nfulnl_set_mode(inst
, params
->copy_mode
,
796 ntohl(params
->copy_range
));
799 if (nfula
[NFULA_CFG_TIMEOUT
]) {
800 __be32 timeout
= nla_get_be32(nfula
[NFULA_CFG_TIMEOUT
]);
806 nfulnl_set_timeout(inst
, ntohl(timeout
));
809 if (nfula
[NFULA_CFG_NLBUFSIZ
]) {
810 __be32 nlbufsiz
= nla_get_be32(nfula
[NFULA_CFG_NLBUFSIZ
]);
816 nfulnl_set_nlbufsiz(inst
, ntohl(nlbufsiz
));
819 if (nfula
[NFULA_CFG_QTHRESH
]) {
820 __be32 qthresh
= nla_get_be32(nfula
[NFULA_CFG_QTHRESH
]);
826 nfulnl_set_qthresh(inst
, ntohl(qthresh
));
829 if (nfula
[NFULA_CFG_FLAGS
]) {
830 __be16 flags
= nla_get_be16(nfula
[NFULA_CFG_FLAGS
]);
836 nfulnl_set_flags(inst
, ntohs(flags
));
845 static const struct nfnl_callback nfulnl_cb
[NFULNL_MSG_MAX
] = {
846 [NFULNL_MSG_PACKET
] = { .call
= nfulnl_recv_unsupp
,
847 .attr_count
= NFULA_MAX
, },
848 [NFULNL_MSG_CONFIG
] = { .call
= nfulnl_recv_config
,
849 .attr_count
= NFULA_CFG_MAX
,
850 .policy
= nfula_cfg_policy
},
853 static const struct nfnetlink_subsystem nfulnl_subsys
= {
855 .subsys_id
= NFNL_SUBSYS_ULOG
,
856 .cb_count
= NFULNL_MSG_MAX
,
860 #ifdef CONFIG_PROC_FS
865 static struct hlist_node
*get_first(struct iter_state
*st
)
870 for (st
->bucket
= 0; st
->bucket
< INSTANCE_BUCKETS
; st
->bucket
++) {
871 if (!hlist_empty(&instance_table
[st
->bucket
]))
872 return rcu_dereference_bh(hlist_first_rcu(&instance_table
[st
->bucket
]));
877 static struct hlist_node
*get_next(struct iter_state
*st
, struct hlist_node
*h
)
879 h
= rcu_dereference_bh(hlist_next_rcu(h
));
881 if (++st
->bucket
>= INSTANCE_BUCKETS
)
884 h
= rcu_dereference_bh(hlist_first_rcu(&instance_table
[st
->bucket
]));
889 static struct hlist_node
*get_idx(struct iter_state
*st
, loff_t pos
)
891 struct hlist_node
*head
;
892 head
= get_first(st
);
895 while (pos
&& (head
= get_next(st
, head
)))
897 return pos
? NULL
: head
;
900 static void *seq_start(struct seq_file
*seq
, loff_t
*pos
)
904 return get_idx(seq
->private, *pos
);
907 static void *seq_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
910 return get_next(s
->private, v
);
913 static void seq_stop(struct seq_file
*s
, void *v
)
916 rcu_read_unlock_bh();
919 static int seq_show(struct seq_file
*s
, void *v
)
921 const struct nfulnl_instance
*inst
= v
;
923 return seq_printf(s
, "%5d %6d %5d %1d %5d %6d %2d\n",
925 inst
->peer_pid
, inst
->qlen
,
926 inst
->copy_mode
, inst
->copy_range
,
927 inst
->flushtimeout
, atomic_read(&inst
->use
));
930 static const struct seq_operations nful_seq_ops
= {
937 static int nful_open(struct inode
*inode
, struct file
*file
)
939 return seq_open_private(file
, &nful_seq_ops
,
940 sizeof(struct iter_state
));
943 static const struct file_operations nful_file_ops
= {
944 .owner
= THIS_MODULE
,
948 .release
= seq_release_private
,
953 static int __init
nfnetlink_log_init(void)
955 int i
, status
= -ENOMEM
;
957 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++)
958 INIT_HLIST_HEAD(&instance_table
[i
]);
960 /* it's not really all that important to have a random value, so
961 * we can do this from the init function, even if there hasn't
962 * been that much entropy yet */
963 get_random_bytes(&hash_init
, sizeof(hash_init
));
965 netlink_register_notifier(&nfulnl_rtnl_notifier
);
966 status
= nfnetlink_subsys_register(&nfulnl_subsys
);
968 printk(KERN_ERR
"log: failed to create netlink socket\n");
969 goto cleanup_netlink_notifier
;
972 status
= nf_log_register(NFPROTO_UNSPEC
, &nfulnl_logger
);
974 printk(KERN_ERR
"log: failed to register logger\n");
978 #ifdef CONFIG_PROC_FS
979 if (!proc_create("nfnetlink_log", 0440,
980 proc_net_netfilter
, &nful_file_ops
))
985 #ifdef CONFIG_PROC_FS
987 nf_log_unregister(&nfulnl_logger
);
990 nfnetlink_subsys_unregister(&nfulnl_subsys
);
991 cleanup_netlink_notifier
:
992 netlink_unregister_notifier(&nfulnl_rtnl_notifier
);
996 static void __exit
nfnetlink_log_fini(void)
998 nf_log_unregister(&nfulnl_logger
);
999 #ifdef CONFIG_PROC_FS
1000 remove_proc_entry("nfnetlink_log", proc_net_netfilter
);
1002 nfnetlink_subsys_unregister(&nfulnl_subsys
);
1003 netlink_unregister_notifier(&nfulnl_rtnl_notifier
);
1006 MODULE_DESCRIPTION("netfilter userspace logging");
1007 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1008 MODULE_LICENSE("GPL");
1009 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ULOG
);
1011 module_init(nfnetlink_log_init
);
1012 module_exit(nfnetlink_log_fini
);