2 * This is a module which is used for queueing packets and communicating with
3 * userspace via nfetlink.
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
7 * Based on the old ipv4-only ip_queue.c:
8 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
9 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/init.h>
19 #include <linux/spinlock.h>
20 #include <linux/notifier.h>
21 #include <linux/netdevice.h>
22 #include <linux/netfilter.h>
23 #include <linux/proc_fs.h>
24 #include <linux/netfilter_ipv4.h>
25 #include <linux/netfilter_ipv6.h>
26 #include <linux/netfilter/nfnetlink.h>
27 #include <linux/netfilter/nfnetlink_queue.h>
28 #include <linux/list.h>
30 #include <net/netfilter/nf_queue.h>
32 #include <asm/atomic.h>
34 #ifdef CONFIG_BRIDGE_NETFILTER
35 #include "../bridge/br_private.h"
38 #define NFQNL_QMAX_DEFAULT 1024
41 #define QDEBUG(x, args ...) printk(KERN_DEBUG "%s(%d):%s(): " x, \
42 __FILE__, __LINE__, __FUNCTION__, \
45 #define QDEBUG(x, ...)
48 struct nfqnl_instance
{
49 struct hlist_node hlist
; /* global list of queues */
53 unsigned int queue_maxlen
;
54 unsigned int copy_range
;
55 unsigned int queue_total
;
56 unsigned int queue_dropped
;
57 unsigned int queue_user_dropped
;
59 unsigned int id_sequence
; /* 'sequence' of pkt ids */
61 u_int16_t queue_num
; /* number of this queue */
66 struct list_head queue_list
; /* packets in queue */
69 typedef int (*nfqnl_cmpfn
)(struct nf_queue_entry
*, unsigned long);
71 static DEFINE_RWLOCK(instances_lock
);
73 #define INSTANCE_BUCKETS 16
74 static struct hlist_head instance_table
[INSTANCE_BUCKETS
];
76 static inline u_int8_t
instance_hashfn(u_int16_t queue_num
)
78 return ((queue_num
>> 8) | queue_num
) % INSTANCE_BUCKETS
;
81 static struct nfqnl_instance
*
82 __instance_lookup(u_int16_t queue_num
)
84 struct hlist_head
*head
;
85 struct hlist_node
*pos
;
86 struct nfqnl_instance
*inst
;
88 head
= &instance_table
[instance_hashfn(queue_num
)];
89 hlist_for_each_entry(inst
, pos
, head
, hlist
) {
90 if (inst
->queue_num
== queue_num
)
96 static struct nfqnl_instance
*
97 instance_lookup_get(u_int16_t queue_num
)
99 struct nfqnl_instance
*inst
;
101 read_lock_bh(&instances_lock
);
102 inst
= __instance_lookup(queue_num
);
104 atomic_inc(&inst
->use
);
105 read_unlock_bh(&instances_lock
);
111 instance_put(struct nfqnl_instance
*inst
)
113 if (inst
&& atomic_dec_and_test(&inst
->use
)) {
114 QDEBUG("kfree(inst=%p)\n", inst
);
119 static struct nfqnl_instance
*
120 instance_create(u_int16_t queue_num
, int pid
)
122 struct nfqnl_instance
*inst
;
124 QDEBUG("entering for queue_num=%u, pid=%d\n", queue_num
, pid
);
126 write_lock_bh(&instances_lock
);
127 if (__instance_lookup(queue_num
)) {
129 QDEBUG("aborting, instance already exists\n");
133 inst
= kzalloc(sizeof(*inst
), GFP_ATOMIC
);
137 inst
->queue_num
= queue_num
;
138 inst
->peer_pid
= pid
;
139 inst
->queue_maxlen
= NFQNL_QMAX_DEFAULT
;
140 inst
->copy_range
= 0xfffff;
141 inst
->copy_mode
= NFQNL_COPY_NONE
;
142 /* needs to be two, since we _put() after creation */
143 atomic_set(&inst
->use
, 2);
144 spin_lock_init(&inst
->lock
);
145 INIT_LIST_HEAD(&inst
->queue_list
);
147 if (!try_module_get(THIS_MODULE
))
150 hlist_add_head(&inst
->hlist
,
151 &instance_table
[instance_hashfn(queue_num
)]);
153 write_unlock_bh(&instances_lock
);
155 QDEBUG("successfully created new instance\n");
162 write_unlock_bh(&instances_lock
);
166 static void nfqnl_flush(struct nfqnl_instance
*queue
, nfqnl_cmpfn cmpfn
,
170 _instance_destroy2(struct nfqnl_instance
*inst
, int lock
)
172 /* first pull it out of the global list */
174 write_lock_bh(&instances_lock
);
176 QDEBUG("removing instance %p (queuenum=%u) from hash\n",
177 inst
, inst
->queue_num
);
178 hlist_del(&inst
->hlist
);
181 write_unlock_bh(&instances_lock
);
183 /* then flush all pending skbs from the queue */
184 nfqnl_flush(inst
, NULL
, 0);
186 /* and finally put the refcount */
189 module_put(THIS_MODULE
);
193 __instance_destroy(struct nfqnl_instance
*inst
)
195 _instance_destroy2(inst
, 0);
199 instance_destroy(struct nfqnl_instance
*inst
)
201 _instance_destroy2(inst
, 1);
205 __enqueue_entry(struct nfqnl_instance
*queue
, struct nf_queue_entry
*entry
)
207 list_add_tail(&entry
->list
, &queue
->queue_list
);
208 queue
->queue_total
++;
212 __nfqnl_set_mode(struct nfqnl_instance
*queue
,
213 unsigned char mode
, unsigned int range
)
218 case NFQNL_COPY_NONE
:
219 case NFQNL_COPY_META
:
220 queue
->copy_mode
= mode
;
221 queue
->copy_range
= 0;
224 case NFQNL_COPY_PACKET
:
225 queue
->copy_mode
= mode
;
226 /* we're using struct nlattr which has 16bit nla_len */
228 queue
->copy_range
= 0xffff;
230 queue
->copy_range
= range
;
240 static struct nf_queue_entry
*
241 find_dequeue_entry(struct nfqnl_instance
*queue
, unsigned int id
)
243 struct nf_queue_entry
*entry
= NULL
, *i
;
245 spin_lock_bh(&queue
->lock
);
247 list_for_each_entry(i
, &queue
->queue_list
, list
) {
255 list_del(&entry
->list
);
256 queue
->queue_total
--;
259 spin_unlock_bh(&queue
->lock
);
265 nfqnl_flush(struct nfqnl_instance
*queue
, nfqnl_cmpfn cmpfn
, unsigned long data
)
267 struct nf_queue_entry
*entry
, *next
;
269 spin_lock_bh(&queue
->lock
);
270 list_for_each_entry_safe(entry
, next
, &queue
->queue_list
, list
) {
271 if (!cmpfn
|| cmpfn(entry
, data
)) {
272 list_del(&entry
->list
);
273 queue
->queue_total
--;
274 nf_reinject(entry
, NF_DROP
);
277 spin_unlock_bh(&queue
->lock
);
280 static struct sk_buff
*
281 nfqnl_build_packet_message(struct nfqnl_instance
*queue
,
282 struct nf_queue_entry
*entry
, int *errp
)
284 sk_buff_data_t old_tail
;
288 struct nfqnl_msg_packet_hdr pmsg
;
289 struct nlmsghdr
*nlh
;
290 struct nfgenmsg
*nfmsg
;
291 struct sk_buff
*entskb
= entry
->skb
;
292 struct net_device
*indev
;
293 struct net_device
*outdev
;
298 size
= NLMSG_ALIGN(sizeof(struct nfgenmsg
))
299 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr
))
300 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
301 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
302 #ifdef CONFIG_BRIDGE_NETFILTER
303 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
304 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
306 + nla_total_size(sizeof(u_int32_t
)) /* mark */
307 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw
))
308 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp
));
310 outdev
= entry
->outdev
;
312 spin_lock_bh(&queue
->lock
);
314 switch (queue
->copy_mode
) {
315 case NFQNL_COPY_META
:
316 case NFQNL_COPY_NONE
:
320 case NFQNL_COPY_PACKET
:
321 if ((entskb
->ip_summed
== CHECKSUM_PARTIAL
||
322 entskb
->ip_summed
== CHECKSUM_COMPLETE
) &&
323 (*errp
= skb_checksum_help(entskb
))) {
324 spin_unlock_bh(&queue
->lock
);
327 if (queue
->copy_range
== 0
328 || queue
->copy_range
> entskb
->len
)
329 data_len
= entskb
->len
;
331 data_len
= queue
->copy_range
;
333 size
+= nla_total_size(data_len
);
338 spin_unlock_bh(&queue
->lock
);
342 entry
->id
= queue
->id_sequence
++;
344 spin_unlock_bh(&queue
->lock
);
346 skb
= alloc_skb(size
, GFP_ATOMIC
);
350 old_tail
= skb
->tail
;
351 nlh
= NLMSG_PUT(skb
, 0, 0,
352 NFNL_SUBSYS_QUEUE
<< 8 | NFQNL_MSG_PACKET
,
353 sizeof(struct nfgenmsg
));
354 nfmsg
= NLMSG_DATA(nlh
);
355 nfmsg
->nfgen_family
= entry
->pf
;
356 nfmsg
->version
= NFNETLINK_V0
;
357 nfmsg
->res_id
= htons(queue
->queue_num
);
359 pmsg
.packet_id
= htonl(entry
->id
);
360 pmsg
.hw_protocol
= entskb
->protocol
;
361 pmsg
.hook
= entry
->hook
;
363 NLA_PUT(skb
, NFQA_PACKET_HDR
, sizeof(pmsg
), &pmsg
);
365 indev
= entry
->indev
;
367 tmp_uint
= htonl(indev
->ifindex
);
368 #ifndef CONFIG_BRIDGE_NETFILTER
369 NLA_PUT(skb
, NFQA_IFINDEX_INDEV
, sizeof(tmp_uint
), &tmp_uint
);
371 if (entry
->pf
== PF_BRIDGE
) {
372 /* Case 1: indev is physical input device, we need to
373 * look for bridge group (when called from
374 * netfilter_bridge) */
375 NLA_PUT(skb
, NFQA_IFINDEX_PHYSINDEV
, sizeof(tmp_uint
),
377 /* this is the bridge group "brX" */
378 tmp_uint
= htonl(indev
->br_port
->br
->dev
->ifindex
);
379 NLA_PUT(skb
, NFQA_IFINDEX_INDEV
, sizeof(tmp_uint
),
382 /* Case 2: indev is bridge group, we need to look for
383 * physical device (when called from ipv4) */
384 NLA_PUT(skb
, NFQA_IFINDEX_INDEV
, sizeof(tmp_uint
),
386 if (entskb
->nf_bridge
387 && entskb
->nf_bridge
->physindev
) {
388 tmp_uint
= htonl(entskb
->nf_bridge
->physindev
->ifindex
);
389 NLA_PUT(skb
, NFQA_IFINDEX_PHYSINDEV
,
390 sizeof(tmp_uint
), &tmp_uint
);
397 tmp_uint
= htonl(outdev
->ifindex
);
398 #ifndef CONFIG_BRIDGE_NETFILTER
399 NLA_PUT(skb
, NFQA_IFINDEX_OUTDEV
, sizeof(tmp_uint
), &tmp_uint
);
401 if (entry
->pf
== PF_BRIDGE
) {
402 /* Case 1: outdev is physical output device, we need to
403 * look for bridge group (when called from
404 * netfilter_bridge) */
405 NLA_PUT(skb
, NFQA_IFINDEX_PHYSOUTDEV
, sizeof(tmp_uint
),
407 /* this is the bridge group "brX" */
408 tmp_uint
= htonl(outdev
->br_port
->br
->dev
->ifindex
);
409 NLA_PUT(skb
, NFQA_IFINDEX_OUTDEV
, sizeof(tmp_uint
),
412 /* Case 2: outdev is bridge group, we need to look for
413 * physical output device (when called from ipv4) */
414 NLA_PUT(skb
, NFQA_IFINDEX_OUTDEV
, sizeof(tmp_uint
),
416 if (entskb
->nf_bridge
417 && entskb
->nf_bridge
->physoutdev
) {
418 tmp_uint
= htonl(entskb
->nf_bridge
->physoutdev
->ifindex
);
419 NLA_PUT(skb
, NFQA_IFINDEX_PHYSOUTDEV
,
420 sizeof(tmp_uint
), &tmp_uint
);
427 tmp_uint
= htonl(entskb
->mark
);
428 NLA_PUT(skb
, NFQA_MARK
, sizeof(u_int32_t
), &tmp_uint
);
431 if (indev
&& entskb
->dev
) {
432 struct nfqnl_msg_packet_hw phw
;
433 int len
= dev_parse_header(entskb
, phw
.hw_addr
);
435 phw
.hw_addrlen
= htons(len
);
436 NLA_PUT(skb
, NFQA_HWADDR
, sizeof(phw
), &phw
);
440 if (entskb
->tstamp
.tv64
) {
441 struct nfqnl_msg_packet_timestamp ts
;
442 struct timeval tv
= ktime_to_timeval(entskb
->tstamp
);
443 ts
.sec
= cpu_to_be64(tv
.tv_sec
);
444 ts
.usec
= cpu_to_be64(tv
.tv_usec
);
446 NLA_PUT(skb
, NFQA_TIMESTAMP
, sizeof(ts
), &ts
);
451 int size
= nla_attr_size(data_len
);
453 if (skb_tailroom(skb
) < nla_total_size(data_len
)) {
454 printk(KERN_WARNING
"nf_queue: no tailroom!\n");
458 nla
= (struct nlattr
*)skb_put(skb
, nla_total_size(data_len
));
459 nla
->nla_type
= NFQA_PAYLOAD
;
462 if (skb_copy_bits(entskb
, 0, nla_data(nla
), data_len
))
466 nlh
->nlmsg_len
= skb
->tail
- old_tail
;
475 printk(KERN_ERR
"nf_queue: error creating packet message\n");
480 nfqnl_enqueue_packet(struct nf_queue_entry
*entry
, unsigned int queuenum
)
482 int status
= -EINVAL
;
483 struct sk_buff
*nskb
;
484 struct nfqnl_instance
*queue
;
488 queue
= instance_lookup_get(queuenum
);
490 QDEBUG("no queue instance matching\n");
494 if (queue
->copy_mode
== NFQNL_COPY_NONE
) {
495 QDEBUG("mode COPY_NONE, aborting\n");
500 nskb
= nfqnl_build_packet_message(queue
, entry
, &status
);
504 spin_lock_bh(&queue
->lock
);
506 if (!queue
->peer_pid
)
507 goto err_out_free_nskb
;
509 if (queue
->queue_total
>= queue
->queue_maxlen
) {
510 queue
->queue_dropped
++;
513 printk(KERN_WARNING
"nf_queue: full at %d entries, "
514 "dropping packets(s). Dropped: %d\n",
515 queue
->queue_total
, queue
->queue_dropped
);
516 goto err_out_free_nskb
;
519 /* nfnetlink_unicast will either free the nskb or add it to a socket */
520 status
= nfnetlink_unicast(nskb
, queue
->peer_pid
, MSG_DONTWAIT
);
522 queue
->queue_user_dropped
++;
526 __enqueue_entry(queue
, entry
);
528 spin_unlock_bh(&queue
->lock
);
536 spin_unlock_bh(&queue
->lock
);
544 nfqnl_mangle(void *data
, int data_len
, struct nf_queue_entry
*e
)
549 diff
= data_len
- e
->skb
->len
;
551 if (pskb_trim(e
->skb
, data_len
))
553 } else if (diff
> 0) {
554 if (data_len
> 0xFFFF)
556 if (diff
> skb_tailroom(e
->skb
)) {
557 err
= pskb_expand_head(e
->skb
, 0,
558 diff
- skb_tailroom(e
->skb
),
561 printk(KERN_WARNING
"nf_queue: OOM "
562 "in mangle, dropping packet\n");
566 skb_put(e
->skb
, diff
);
568 if (!skb_make_writable(e
->skb
, data_len
))
570 skb_copy_to_linear_data(e
->skb
, data
, data_len
);
571 e
->skb
->ip_summed
= CHECKSUM_NONE
;
576 nfqnl_set_mode(struct nfqnl_instance
*queue
,
577 unsigned char mode
, unsigned int range
)
581 spin_lock_bh(&queue
->lock
);
582 status
= __nfqnl_set_mode(queue
, mode
, range
);
583 spin_unlock_bh(&queue
->lock
);
589 dev_cmp(struct nf_queue_entry
*entry
, unsigned long ifindex
)
592 if (entry
->indev
->ifindex
== ifindex
)
595 if (entry
->outdev
->ifindex
== ifindex
)
597 #ifdef CONFIG_BRIDGE_NETFILTER
598 if (entry
->skb
->nf_bridge
) {
599 if (entry
->skb
->nf_bridge
->physindev
&&
600 entry
->skb
->nf_bridge
->physindev
->ifindex
== ifindex
)
602 if (entry
->skb
->nf_bridge
->physoutdev
&&
603 entry
->skb
->nf_bridge
->physoutdev
->ifindex
== ifindex
)
610 /* drop all packets with either indev or outdev == ifindex from all queue
613 nfqnl_dev_drop(int ifindex
)
617 QDEBUG("entering for ifindex %u\n", ifindex
);
619 /* this only looks like we have to hold the readlock for a way too long
620 * time, issue_verdict(), nf_reinject(), ... - but we always only
621 * issue NF_DROP, which is processed directly in nf_reinject() */
622 read_lock_bh(&instances_lock
);
624 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++) {
625 struct hlist_node
*tmp
;
626 struct nfqnl_instance
*inst
;
627 struct hlist_head
*head
= &instance_table
[i
];
629 hlist_for_each_entry(inst
, tmp
, head
, hlist
)
630 nfqnl_flush(inst
, dev_cmp
, ifindex
);
633 read_unlock_bh(&instances_lock
);
636 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
639 nfqnl_rcv_dev_event(struct notifier_block
*this,
640 unsigned long event
, void *ptr
)
642 struct net_device
*dev
= ptr
;
644 if (dev
->nd_net
!= &init_net
)
647 /* Drop any packets associated with the downed device */
648 if (event
== NETDEV_DOWN
)
649 nfqnl_dev_drop(dev
->ifindex
);
653 static struct notifier_block nfqnl_dev_notifier
= {
654 .notifier_call
= nfqnl_rcv_dev_event
,
658 nfqnl_rcv_nl_event(struct notifier_block
*this,
659 unsigned long event
, void *ptr
)
661 struct netlink_notify
*n
= ptr
;
663 if (event
== NETLINK_URELEASE
&&
664 n
->protocol
== NETLINK_NETFILTER
&& n
->pid
) {
667 /* destroy all instances for this pid */
668 write_lock_bh(&instances_lock
);
669 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++) {
670 struct hlist_node
*tmp
, *t2
;
671 struct nfqnl_instance
*inst
;
672 struct hlist_head
*head
= &instance_table
[i
];
674 hlist_for_each_entry_safe(inst
, tmp
, t2
, head
, hlist
) {
675 if ((n
->net
== &init_net
) &&
676 (n
->pid
== inst
->peer_pid
))
677 __instance_destroy(inst
);
680 write_unlock_bh(&instances_lock
);
685 static struct notifier_block nfqnl_rtnl_notifier
= {
686 .notifier_call
= nfqnl_rcv_nl_event
,
689 static const struct nla_policy nfqa_verdict_policy
[NFQA_MAX
+1] = {
690 [NFQA_VERDICT_HDR
] = { .len
= sizeof(struct nfqnl_msg_verdict_hdr
) },
691 [NFQA_MARK
] = { .type
= NLA_U32
},
692 [NFQA_PAYLOAD
] = { .type
= NLA_UNSPEC
},
696 nfqnl_recv_verdict(struct sock
*ctnl
, struct sk_buff
*skb
,
697 struct nlmsghdr
*nlh
, struct nlattr
*nfqa
[])
699 struct nfgenmsg
*nfmsg
= NLMSG_DATA(nlh
);
700 u_int16_t queue_num
= ntohs(nfmsg
->res_id
);
702 struct nfqnl_msg_verdict_hdr
*vhdr
;
703 struct nfqnl_instance
*queue
;
704 unsigned int verdict
;
705 struct nf_queue_entry
*entry
;
708 queue
= instance_lookup_get(queue_num
);
712 if (queue
->peer_pid
!= NETLINK_CB(skb
).pid
) {
717 if (!nfqa
[NFQA_VERDICT_HDR
]) {
722 vhdr
= nla_data(nfqa
[NFQA_VERDICT_HDR
]);
723 verdict
= ntohl(vhdr
->verdict
);
725 if ((verdict
& NF_VERDICT_MASK
) > NF_MAX_VERDICT
) {
730 entry
= find_dequeue_entry(queue
, ntohl(vhdr
->id
));
736 if (nfqa
[NFQA_PAYLOAD
]) {
737 if (nfqnl_mangle(nla_data(nfqa
[NFQA_PAYLOAD
]),
738 nla_len(nfqa
[NFQA_PAYLOAD
]), entry
) < 0)
743 entry
->skb
->mark
= ntohl(*(__be32
*)
744 nla_data(nfqa
[NFQA_MARK
]));
746 nf_reinject(entry
, verdict
);
756 nfqnl_recv_unsupp(struct sock
*ctnl
, struct sk_buff
*skb
,
757 struct nlmsghdr
*nlh
, struct nlattr
*nfqa
[])
762 static const struct nla_policy nfqa_cfg_policy
[NFQA_CFG_MAX
+1] = {
763 [NFQA_CFG_CMD
] = { .len
= sizeof(struct nfqnl_msg_config_cmd
) },
764 [NFQA_CFG_PARAMS
] = { .len
= sizeof(struct nfqnl_msg_config_params
) },
767 static const struct nf_queue_handler nfqh
= {
769 .outfn
= &nfqnl_enqueue_packet
,
773 nfqnl_recv_config(struct sock
*ctnl
, struct sk_buff
*skb
,
774 struct nlmsghdr
*nlh
, struct nlattr
*nfqa
[])
776 struct nfgenmsg
*nfmsg
= NLMSG_DATA(nlh
);
777 u_int16_t queue_num
= ntohs(nfmsg
->res_id
);
778 struct nfqnl_instance
*queue
;
781 QDEBUG("entering for msg %u\n", NFNL_MSG_TYPE(nlh
->nlmsg_type
));
783 queue
= instance_lookup_get(queue_num
);
784 if (nfqa
[NFQA_CFG_CMD
]) {
785 struct nfqnl_msg_config_cmd
*cmd
;
786 cmd
= nla_data(nfqa
[NFQA_CFG_CMD
]);
787 QDEBUG("found CFG_CMD\n");
789 switch (cmd
->command
) {
790 case NFQNL_CFG_CMD_BIND
:
794 queue
= instance_create(queue_num
, NETLINK_CB(skb
).pid
);
798 case NFQNL_CFG_CMD_UNBIND
:
802 if (queue
->peer_pid
!= NETLINK_CB(skb
).pid
) {
807 instance_destroy(queue
);
809 case NFQNL_CFG_CMD_PF_BIND
:
810 QDEBUG("registering queue handler for pf=%u\n",
812 ret
= nf_register_queue_handler(ntohs(cmd
->pf
), &nfqh
);
814 case NFQNL_CFG_CMD_PF_UNBIND
:
815 QDEBUG("unregistering queue handler for pf=%u\n",
817 ret
= nf_unregister_queue_handler(ntohs(cmd
->pf
), &nfqh
);
825 QDEBUG("no config command, and no instance ENOENT\n");
830 if (queue
->peer_pid
!= NETLINK_CB(skb
).pid
) {
831 QDEBUG("no config command, and wrong pid\n");
837 if (nfqa
[NFQA_CFG_PARAMS
]) {
838 struct nfqnl_msg_config_params
*params
;
844 params
= nla_data(nfqa
[NFQA_CFG_PARAMS
]);
845 nfqnl_set_mode(queue
, params
->copy_mode
,
846 ntohl(params
->copy_range
));
849 if (nfqa
[NFQA_CFG_QUEUE_MAXLEN
]) {
850 __be32
*queue_maxlen
;
851 queue_maxlen
= nla_data(nfqa
[NFQA_CFG_QUEUE_MAXLEN
]);
852 spin_lock_bh(&queue
->lock
);
853 queue
->queue_maxlen
= ntohl(*queue_maxlen
);
854 spin_unlock_bh(&queue
->lock
);
862 static const struct nfnl_callback nfqnl_cb
[NFQNL_MSG_MAX
] = {
863 [NFQNL_MSG_PACKET
] = { .call
= nfqnl_recv_unsupp
,
864 .attr_count
= NFQA_MAX
, },
865 [NFQNL_MSG_VERDICT
] = { .call
= nfqnl_recv_verdict
,
866 .attr_count
= NFQA_MAX
,
867 .policy
= nfqa_verdict_policy
},
868 [NFQNL_MSG_CONFIG
] = { .call
= nfqnl_recv_config
,
869 .attr_count
= NFQA_CFG_MAX
,
870 .policy
= nfqa_cfg_policy
},
873 static const struct nfnetlink_subsystem nfqnl_subsys
= {
875 .subsys_id
= NFNL_SUBSYS_QUEUE
,
876 .cb_count
= NFQNL_MSG_MAX
,
880 #ifdef CONFIG_PROC_FS
885 static struct hlist_node
*get_first(struct seq_file
*seq
)
887 struct iter_state
*st
= seq
->private;
892 for (st
->bucket
= 0; st
->bucket
< INSTANCE_BUCKETS
; st
->bucket
++) {
893 if (!hlist_empty(&instance_table
[st
->bucket
]))
894 return instance_table
[st
->bucket
].first
;
899 static struct hlist_node
*get_next(struct seq_file
*seq
, struct hlist_node
*h
)
901 struct iter_state
*st
= seq
->private;
905 if (++st
->bucket
>= INSTANCE_BUCKETS
)
908 h
= instance_table
[st
->bucket
].first
;
913 static struct hlist_node
*get_idx(struct seq_file
*seq
, loff_t pos
)
915 struct hlist_node
*head
;
916 head
= get_first(seq
);
919 while (pos
&& (head
= get_next(seq
, head
)))
921 return pos
? NULL
: head
;
924 static void *seq_start(struct seq_file
*seq
, loff_t
*pos
)
926 read_lock_bh(&instances_lock
);
927 return get_idx(seq
, *pos
);
930 static void *seq_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
933 return get_next(s
, v
);
936 static void seq_stop(struct seq_file
*s
, void *v
)
938 read_unlock_bh(&instances_lock
);
941 static int seq_show(struct seq_file
*s
, void *v
)
943 const struct nfqnl_instance
*inst
= v
;
945 return seq_printf(s
, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
947 inst
->peer_pid
, inst
->queue_total
,
948 inst
->copy_mode
, inst
->copy_range
,
949 inst
->queue_dropped
, inst
->queue_user_dropped
,
951 atomic_read(&inst
->use
));
954 static const struct seq_operations nfqnl_seq_ops
= {
961 static int nfqnl_open(struct inode
*inode
, struct file
*file
)
963 return seq_open_private(file
, &nfqnl_seq_ops
,
964 sizeof(struct iter_state
));
967 static const struct file_operations nfqnl_file_ops
= {
968 .owner
= THIS_MODULE
,
972 .release
= seq_release_private
,
977 static int __init
nfnetlink_queue_init(void)
979 int i
, status
= -ENOMEM
;
980 #ifdef CONFIG_PROC_FS
981 struct proc_dir_entry
*proc_nfqueue
;
984 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++)
985 INIT_HLIST_HEAD(&instance_table
[i
]);
987 netlink_register_notifier(&nfqnl_rtnl_notifier
);
988 status
= nfnetlink_subsys_register(&nfqnl_subsys
);
990 printk(KERN_ERR
"nf_queue: failed to create netlink socket\n");
991 goto cleanup_netlink_notifier
;
994 #ifdef CONFIG_PROC_FS
995 proc_nfqueue
= create_proc_entry("nfnetlink_queue", 0440,
999 proc_nfqueue
->proc_fops
= &nfqnl_file_ops
;
1002 register_netdevice_notifier(&nfqnl_dev_notifier
);
1005 #ifdef CONFIG_PROC_FS
1007 nfnetlink_subsys_unregister(&nfqnl_subsys
);
1009 cleanup_netlink_notifier
:
1010 netlink_unregister_notifier(&nfqnl_rtnl_notifier
);
1014 static void __exit
nfnetlink_queue_fini(void)
1016 nf_unregister_queue_handlers(&nfqh
);
1017 unregister_netdevice_notifier(&nfqnl_dev_notifier
);
1018 #ifdef CONFIG_PROC_FS
1019 remove_proc_entry("nfnetlink_queue", proc_net_netfilter
);
1021 nfnetlink_subsys_unregister(&nfqnl_subsys
);
1022 netlink_unregister_notifier(&nfqnl_rtnl_notifier
);
1025 MODULE_DESCRIPTION("netfilter packet queue handler");
1026 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1027 MODULE_LICENSE("GPL");
1028 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE
);
1030 module_init(nfnetlink_queue_init
);
1031 module_exit(nfnetlink_queue_fini
);