[NETFILTER]: nfnetlink_queue: avoid unnecessary atomic operation
[linux-2.6/mini2440.git] / net / netfilter / nfnetlink_queue.c
blobbd18de72e3c516a87574c6c04d28028510fc78fb
1 /*
2 * This is a module which is used for queueing packets and communicating with
3 * userspace via nfetlink.
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
7 * Based on the old ipv4-only ip_queue.c:
8 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
9 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/init.h>
19 #include <linux/spinlock.h>
20 #include <linux/notifier.h>
21 #include <linux/netdevice.h>
22 #include <linux/netfilter.h>
23 #include <linux/proc_fs.h>
24 #include <linux/netfilter_ipv4.h>
25 #include <linux/netfilter_ipv6.h>
26 #include <linux/netfilter/nfnetlink.h>
27 #include <linux/netfilter/nfnetlink_queue.h>
28 #include <linux/list.h>
29 #include <net/sock.h>
30 #include <net/netfilter/nf_queue.h>
32 #include <asm/atomic.h>
34 #ifdef CONFIG_BRIDGE_NETFILTER
35 #include "../bridge/br_private.h"
36 #endif
38 #define NFQNL_QMAX_DEFAULT 1024
40 #if 0
41 #define QDEBUG(x, args ...) printk(KERN_DEBUG "%s(%d):%s(): " x, \
42 __FILE__, __LINE__, __FUNCTION__, \
43 ## args)
44 #else
45 #define QDEBUG(x, ...)
46 #endif
48 struct nfqnl_instance {
49 struct hlist_node hlist; /* global list of queues */
50 atomic_t use;
52 int peer_pid;
53 unsigned int queue_maxlen;
54 unsigned int copy_range;
55 unsigned int queue_total;
56 unsigned int queue_dropped;
57 unsigned int queue_user_dropped;
59 unsigned int id_sequence; /* 'sequence' of pkt ids */
61 u_int16_t queue_num; /* number of this queue */
62 u_int8_t copy_mode;
64 spinlock_t lock;
66 struct list_head queue_list; /* packets in queue */
69 typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
71 static DEFINE_RWLOCK(instances_lock);
73 #define INSTANCE_BUCKETS 16
74 static struct hlist_head instance_table[INSTANCE_BUCKETS];
76 static inline u_int8_t instance_hashfn(u_int16_t queue_num)
78 return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
81 static struct nfqnl_instance *
82 __instance_lookup(u_int16_t queue_num)
84 struct hlist_head *head;
85 struct hlist_node *pos;
86 struct nfqnl_instance *inst;
88 head = &instance_table[instance_hashfn(queue_num)];
89 hlist_for_each_entry(inst, pos, head, hlist) {
90 if (inst->queue_num == queue_num)
91 return inst;
93 return NULL;
96 static struct nfqnl_instance *
97 instance_lookup_get(u_int16_t queue_num)
99 struct nfqnl_instance *inst;
101 read_lock_bh(&instances_lock);
102 inst = __instance_lookup(queue_num);
103 if (inst)
104 atomic_inc(&inst->use);
105 read_unlock_bh(&instances_lock);
107 return inst;
110 static void
111 instance_put(struct nfqnl_instance *inst)
113 if (inst && atomic_dec_and_test(&inst->use)) {
114 QDEBUG("kfree(inst=%p)\n", inst);
115 kfree(inst);
119 static struct nfqnl_instance *
120 instance_create(u_int16_t queue_num, int pid)
122 struct nfqnl_instance *inst;
124 QDEBUG("entering for queue_num=%u, pid=%d\n", queue_num, pid);
126 write_lock_bh(&instances_lock);
127 if (__instance_lookup(queue_num)) {
128 inst = NULL;
129 QDEBUG("aborting, instance already exists\n");
130 goto out_unlock;
133 inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
134 if (!inst)
135 goto out_unlock;
137 inst->queue_num = queue_num;
138 inst->peer_pid = pid;
139 inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
140 inst->copy_range = 0xfffff;
141 inst->copy_mode = NFQNL_COPY_NONE;
142 /* needs to be two, since we _put() after creation */
143 atomic_set(&inst->use, 2);
144 spin_lock_init(&inst->lock);
145 INIT_LIST_HEAD(&inst->queue_list);
147 if (!try_module_get(THIS_MODULE))
148 goto out_free;
150 hlist_add_head(&inst->hlist,
151 &instance_table[instance_hashfn(queue_num)]);
153 write_unlock_bh(&instances_lock);
155 QDEBUG("successfully created new instance\n");
157 return inst;
159 out_free:
160 kfree(inst);
161 out_unlock:
162 write_unlock_bh(&instances_lock);
163 return NULL;
166 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
167 unsigned long data);
169 static void
170 _instance_destroy2(struct nfqnl_instance *inst, int lock)
172 /* first pull it out of the global list */
173 if (lock)
174 write_lock_bh(&instances_lock);
176 QDEBUG("removing instance %p (queuenum=%u) from hash\n",
177 inst, inst->queue_num);
178 hlist_del(&inst->hlist);
180 if (lock)
181 write_unlock_bh(&instances_lock);
183 /* then flush all pending skbs from the queue */
184 nfqnl_flush(inst, NULL, 0);
186 /* and finally put the refcount */
187 instance_put(inst);
189 module_put(THIS_MODULE);
192 static inline void
193 __instance_destroy(struct nfqnl_instance *inst)
195 _instance_destroy2(inst, 0);
198 static inline void
199 instance_destroy(struct nfqnl_instance *inst)
201 _instance_destroy2(inst, 1);
204 static inline void
205 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
207 list_add_tail(&entry->list, &queue->queue_list);
208 queue->queue_total++;
211 static inline int
212 __nfqnl_set_mode(struct nfqnl_instance *queue,
213 unsigned char mode, unsigned int range)
215 int status = 0;
217 switch (mode) {
218 case NFQNL_COPY_NONE:
219 case NFQNL_COPY_META:
220 queue->copy_mode = mode;
221 queue->copy_range = 0;
222 break;
224 case NFQNL_COPY_PACKET:
225 queue->copy_mode = mode;
226 /* we're using struct nlattr which has 16bit nla_len */
227 if (range > 0xffff)
228 queue->copy_range = 0xffff;
229 else
230 queue->copy_range = range;
231 break;
233 default:
234 status = -EINVAL;
237 return status;
240 static struct nf_queue_entry *
241 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
243 struct nf_queue_entry *entry = NULL, *i;
245 spin_lock_bh(&queue->lock);
247 list_for_each_entry(i, &queue->queue_list, list) {
248 if (i->id == id) {
249 entry = i;
250 break;
254 if (entry) {
255 list_del(&entry->list);
256 queue->queue_total--;
259 spin_unlock_bh(&queue->lock);
261 return entry;
264 static void
265 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
267 struct nf_queue_entry *entry, *next;
269 spin_lock_bh(&queue->lock);
270 list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
271 if (!cmpfn || cmpfn(entry, data)) {
272 list_del(&entry->list);
273 queue->queue_total--;
274 nf_reinject(entry, NF_DROP);
277 spin_unlock_bh(&queue->lock);
280 static struct sk_buff *
281 nfqnl_build_packet_message(struct nfqnl_instance *queue,
282 struct nf_queue_entry *entry, int *errp)
284 sk_buff_data_t old_tail;
285 size_t size;
286 size_t data_len = 0;
287 struct sk_buff *skb;
288 struct nfqnl_msg_packet_hdr pmsg;
289 struct nlmsghdr *nlh;
290 struct nfgenmsg *nfmsg;
291 struct sk_buff *entskb = entry->skb;
292 struct net_device *indev;
293 struct net_device *outdev;
294 __be32 tmp_uint;
296 QDEBUG("entered\n");
298 size = NLMSG_ALIGN(sizeof(struct nfgenmsg))
299 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
300 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
301 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
302 #ifdef CONFIG_BRIDGE_NETFILTER
303 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
304 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
305 #endif
306 + nla_total_size(sizeof(u_int32_t)) /* mark */
307 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
308 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
310 outdev = entry->outdev;
312 spin_lock_bh(&queue->lock);
314 switch (queue->copy_mode) {
315 case NFQNL_COPY_META:
316 case NFQNL_COPY_NONE:
317 data_len = 0;
318 break;
320 case NFQNL_COPY_PACKET:
321 if ((entskb->ip_summed == CHECKSUM_PARTIAL ||
322 entskb->ip_summed == CHECKSUM_COMPLETE) &&
323 (*errp = skb_checksum_help(entskb))) {
324 spin_unlock_bh(&queue->lock);
325 return NULL;
327 if (queue->copy_range == 0
328 || queue->copy_range > entskb->len)
329 data_len = entskb->len;
330 else
331 data_len = queue->copy_range;
333 size += nla_total_size(data_len);
334 break;
336 default:
337 *errp = -EINVAL;
338 spin_unlock_bh(&queue->lock);
339 return NULL;
342 entry->id = queue->id_sequence++;
344 spin_unlock_bh(&queue->lock);
346 skb = alloc_skb(size, GFP_ATOMIC);
347 if (!skb)
348 goto nlmsg_failure;
350 old_tail = skb->tail;
351 nlh = NLMSG_PUT(skb, 0, 0,
352 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
353 sizeof(struct nfgenmsg));
354 nfmsg = NLMSG_DATA(nlh);
355 nfmsg->nfgen_family = entry->pf;
356 nfmsg->version = NFNETLINK_V0;
357 nfmsg->res_id = htons(queue->queue_num);
359 pmsg.packet_id = htonl(entry->id);
360 pmsg.hw_protocol = entskb->protocol;
361 pmsg.hook = entry->hook;
363 NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg);
365 indev = entry->indev;
366 if (indev) {
367 tmp_uint = htonl(indev->ifindex);
368 #ifndef CONFIG_BRIDGE_NETFILTER
369 NLA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint);
370 #else
371 if (entry->pf == PF_BRIDGE) {
372 /* Case 1: indev is physical input device, we need to
373 * look for bridge group (when called from
374 * netfilter_bridge) */
375 NLA_PUT(skb, NFQA_IFINDEX_PHYSINDEV, sizeof(tmp_uint),
376 &tmp_uint);
377 /* this is the bridge group "brX" */
378 tmp_uint = htonl(indev->br_port->br->dev->ifindex);
379 NLA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint),
380 &tmp_uint);
381 } else {
382 /* Case 2: indev is bridge group, we need to look for
383 * physical device (when called from ipv4) */
384 NLA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint),
385 &tmp_uint);
386 if (entskb->nf_bridge
387 && entskb->nf_bridge->physindev) {
388 tmp_uint = htonl(entskb->nf_bridge->physindev->ifindex);
389 NLA_PUT(skb, NFQA_IFINDEX_PHYSINDEV,
390 sizeof(tmp_uint), &tmp_uint);
393 #endif
396 if (outdev) {
397 tmp_uint = htonl(outdev->ifindex);
398 #ifndef CONFIG_BRIDGE_NETFILTER
399 NLA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint);
400 #else
401 if (entry->pf == PF_BRIDGE) {
402 /* Case 1: outdev is physical output device, we need to
403 * look for bridge group (when called from
404 * netfilter_bridge) */
405 NLA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV, sizeof(tmp_uint),
406 &tmp_uint);
407 /* this is the bridge group "brX" */
408 tmp_uint = htonl(outdev->br_port->br->dev->ifindex);
409 NLA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint),
410 &tmp_uint);
411 } else {
412 /* Case 2: outdev is bridge group, we need to look for
413 * physical output device (when called from ipv4) */
414 NLA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint),
415 &tmp_uint);
416 if (entskb->nf_bridge
417 && entskb->nf_bridge->physoutdev) {
418 tmp_uint = htonl(entskb->nf_bridge->physoutdev->ifindex);
419 NLA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV,
420 sizeof(tmp_uint), &tmp_uint);
423 #endif
426 if (entskb->mark) {
427 tmp_uint = htonl(entskb->mark);
428 NLA_PUT(skb, NFQA_MARK, sizeof(u_int32_t), &tmp_uint);
431 if (indev && entskb->dev) {
432 struct nfqnl_msg_packet_hw phw;
433 int len = dev_parse_header(entskb, phw.hw_addr);
434 if (len) {
435 phw.hw_addrlen = htons(len);
436 NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
440 if (entskb->tstamp.tv64) {
441 struct nfqnl_msg_packet_timestamp ts;
442 struct timeval tv = ktime_to_timeval(entskb->tstamp);
443 ts.sec = cpu_to_be64(tv.tv_sec);
444 ts.usec = cpu_to_be64(tv.tv_usec);
446 NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
449 if (data_len) {
450 struct nlattr *nla;
451 int size = nla_attr_size(data_len);
453 if (skb_tailroom(skb) < nla_total_size(data_len)) {
454 printk(KERN_WARNING "nf_queue: no tailroom!\n");
455 goto nlmsg_failure;
458 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
459 nla->nla_type = NFQA_PAYLOAD;
460 nla->nla_len = size;
462 if (skb_copy_bits(entskb, 0, nla_data(nla), data_len))
463 BUG();
466 nlh->nlmsg_len = skb->tail - old_tail;
467 return skb;
469 nlmsg_failure:
470 nla_put_failure:
471 if (skb)
472 kfree_skb(skb);
473 *errp = -EINVAL;
474 if (net_ratelimit())
475 printk(KERN_ERR "nf_queue: error creating packet message\n");
476 return NULL;
479 static int
480 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
482 int status = -EINVAL;
483 struct sk_buff *nskb;
484 struct nfqnl_instance *queue;
486 QDEBUG("entered\n");
488 queue = instance_lookup_get(queuenum);
489 if (!queue) {
490 QDEBUG("no queue instance matching\n");
491 return -EINVAL;
494 if (queue->copy_mode == NFQNL_COPY_NONE) {
495 QDEBUG("mode COPY_NONE, aborting\n");
496 status = -EAGAIN;
497 goto err_out_put;
500 nskb = nfqnl_build_packet_message(queue, entry, &status);
501 if (nskb == NULL)
502 goto err_out_put;
504 spin_lock_bh(&queue->lock);
506 if (!queue->peer_pid)
507 goto err_out_free_nskb;
509 if (queue->queue_total >= queue->queue_maxlen) {
510 queue->queue_dropped++;
511 status = -ENOSPC;
512 if (net_ratelimit())
513 printk(KERN_WARNING "nf_queue: full at %d entries, "
514 "dropping packets(s). Dropped: %d\n",
515 queue->queue_total, queue->queue_dropped);
516 goto err_out_free_nskb;
519 /* nfnetlink_unicast will either free the nskb or add it to a socket */
520 status = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT);
521 if (status < 0) {
522 queue->queue_user_dropped++;
523 goto err_out_unlock;
526 __enqueue_entry(queue, entry);
528 spin_unlock_bh(&queue->lock);
529 instance_put(queue);
530 return status;
532 err_out_free_nskb:
533 kfree_skb(nskb);
535 err_out_unlock:
536 spin_unlock_bh(&queue->lock);
538 err_out_put:
539 instance_put(queue);
540 return status;
543 static int
544 nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e)
546 int diff;
547 int err;
549 diff = data_len - e->skb->len;
550 if (diff < 0) {
551 if (pskb_trim(e->skb, data_len))
552 return -ENOMEM;
553 } else if (diff > 0) {
554 if (data_len > 0xFFFF)
555 return -EINVAL;
556 if (diff > skb_tailroom(e->skb)) {
557 err = pskb_expand_head(e->skb, 0,
558 diff - skb_tailroom(e->skb),
559 GFP_ATOMIC);
560 if (err) {
561 printk(KERN_WARNING "nf_queue: OOM "
562 "in mangle, dropping packet\n");
563 return err;
566 skb_put(e->skb, diff);
568 if (!skb_make_writable(e->skb, data_len))
569 return -ENOMEM;
570 skb_copy_to_linear_data(e->skb, data, data_len);
571 e->skb->ip_summed = CHECKSUM_NONE;
572 return 0;
575 static int
576 nfqnl_set_mode(struct nfqnl_instance *queue,
577 unsigned char mode, unsigned int range)
579 int status;
581 spin_lock_bh(&queue->lock);
582 status = __nfqnl_set_mode(queue, mode, range);
583 spin_unlock_bh(&queue->lock);
585 return status;
588 static int
589 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
591 if (entry->indev)
592 if (entry->indev->ifindex == ifindex)
593 return 1;
594 if (entry->outdev)
595 if (entry->outdev->ifindex == ifindex)
596 return 1;
597 #ifdef CONFIG_BRIDGE_NETFILTER
598 if (entry->skb->nf_bridge) {
599 if (entry->skb->nf_bridge->physindev &&
600 entry->skb->nf_bridge->physindev->ifindex == ifindex)
601 return 1;
602 if (entry->skb->nf_bridge->physoutdev &&
603 entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
604 return 1;
606 #endif
607 return 0;
610 /* drop all packets with either indev or outdev == ifindex from all queue
611 * instances */
612 static void
613 nfqnl_dev_drop(int ifindex)
615 int i;
617 QDEBUG("entering for ifindex %u\n", ifindex);
619 /* this only looks like we have to hold the readlock for a way too long
620 * time, issue_verdict(), nf_reinject(), ... - but we always only
621 * issue NF_DROP, which is processed directly in nf_reinject() */
622 read_lock_bh(&instances_lock);
624 for (i = 0; i < INSTANCE_BUCKETS; i++) {
625 struct hlist_node *tmp;
626 struct nfqnl_instance *inst;
627 struct hlist_head *head = &instance_table[i];
629 hlist_for_each_entry(inst, tmp, head, hlist)
630 nfqnl_flush(inst, dev_cmp, ifindex);
633 read_unlock_bh(&instances_lock);
636 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
638 static int
639 nfqnl_rcv_dev_event(struct notifier_block *this,
640 unsigned long event, void *ptr)
642 struct net_device *dev = ptr;
644 if (dev->nd_net != &init_net)
645 return NOTIFY_DONE;
647 /* Drop any packets associated with the downed device */
648 if (event == NETDEV_DOWN)
649 nfqnl_dev_drop(dev->ifindex);
650 return NOTIFY_DONE;
653 static struct notifier_block nfqnl_dev_notifier = {
654 .notifier_call = nfqnl_rcv_dev_event,
657 static int
658 nfqnl_rcv_nl_event(struct notifier_block *this,
659 unsigned long event, void *ptr)
661 struct netlink_notify *n = ptr;
663 if (event == NETLINK_URELEASE &&
664 n->protocol == NETLINK_NETFILTER && n->pid) {
665 int i;
667 /* destroy all instances for this pid */
668 write_lock_bh(&instances_lock);
669 for (i = 0; i < INSTANCE_BUCKETS; i++) {
670 struct hlist_node *tmp, *t2;
671 struct nfqnl_instance *inst;
672 struct hlist_head *head = &instance_table[i];
674 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
675 if ((n->net == &init_net) &&
676 (n->pid == inst->peer_pid))
677 __instance_destroy(inst);
680 write_unlock_bh(&instances_lock);
682 return NOTIFY_DONE;
685 static struct notifier_block nfqnl_rtnl_notifier = {
686 .notifier_call = nfqnl_rcv_nl_event,
689 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
690 [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
691 [NFQA_MARK] = { .type = NLA_U32 },
692 [NFQA_PAYLOAD] = { .type = NLA_UNSPEC },
695 static int
696 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
697 struct nlmsghdr *nlh, struct nlattr *nfqa[])
699 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
700 u_int16_t queue_num = ntohs(nfmsg->res_id);
702 struct nfqnl_msg_verdict_hdr *vhdr;
703 struct nfqnl_instance *queue;
704 unsigned int verdict;
705 struct nf_queue_entry *entry;
706 int err;
708 queue = instance_lookup_get(queue_num);
709 if (!queue)
710 return -ENODEV;
712 if (queue->peer_pid != NETLINK_CB(skb).pid) {
713 err = -EPERM;
714 goto err_out_put;
717 if (!nfqa[NFQA_VERDICT_HDR]) {
718 err = -EINVAL;
719 goto err_out_put;
722 vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
723 verdict = ntohl(vhdr->verdict);
725 if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) {
726 err = -EINVAL;
727 goto err_out_put;
730 entry = find_dequeue_entry(queue, ntohl(vhdr->id));
731 if (entry == NULL) {
732 err = -ENOENT;
733 goto err_out_put;
736 if (nfqa[NFQA_PAYLOAD]) {
737 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
738 nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0)
739 verdict = NF_DROP;
742 if (nfqa[NFQA_MARK])
743 entry->skb->mark = ntohl(*(__be32 *)
744 nla_data(nfqa[NFQA_MARK]));
746 nf_reinject(entry, verdict);
747 instance_put(queue);
748 return 0;
750 err_out_put:
751 instance_put(queue);
752 return err;
755 static int
756 nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
757 struct nlmsghdr *nlh, struct nlattr *nfqa[])
759 return -ENOTSUPP;
762 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
763 [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) },
764 [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) },
767 static const struct nf_queue_handler nfqh = {
768 .name = "nf_queue",
769 .outfn = &nfqnl_enqueue_packet,
772 static int
773 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
774 struct nlmsghdr *nlh, struct nlattr *nfqa[])
776 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
777 u_int16_t queue_num = ntohs(nfmsg->res_id);
778 struct nfqnl_instance *queue;
779 int ret = 0;
781 QDEBUG("entering for msg %u\n", NFNL_MSG_TYPE(nlh->nlmsg_type));
783 queue = instance_lookup_get(queue_num);
784 if (nfqa[NFQA_CFG_CMD]) {
785 struct nfqnl_msg_config_cmd *cmd;
786 cmd = nla_data(nfqa[NFQA_CFG_CMD]);
787 QDEBUG("found CFG_CMD\n");
789 switch (cmd->command) {
790 case NFQNL_CFG_CMD_BIND:
791 if (queue)
792 return -EBUSY;
794 queue = instance_create(queue_num, NETLINK_CB(skb).pid);
795 if (!queue)
796 return -EINVAL;
797 break;
798 case NFQNL_CFG_CMD_UNBIND:
799 if (!queue)
800 return -ENODEV;
802 if (queue->peer_pid != NETLINK_CB(skb).pid) {
803 ret = -EPERM;
804 goto out_put;
807 instance_destroy(queue);
808 break;
809 case NFQNL_CFG_CMD_PF_BIND:
810 QDEBUG("registering queue handler for pf=%u\n",
811 ntohs(cmd->pf));
812 ret = nf_register_queue_handler(ntohs(cmd->pf), &nfqh);
813 break;
814 case NFQNL_CFG_CMD_PF_UNBIND:
815 QDEBUG("unregistering queue handler for pf=%u\n",
816 ntohs(cmd->pf));
817 ret = nf_unregister_queue_handler(ntohs(cmd->pf), &nfqh);
818 break;
819 default:
820 ret = -EINVAL;
821 break;
823 } else {
824 if (!queue) {
825 QDEBUG("no config command, and no instance ENOENT\n");
826 ret = -ENOENT;
827 goto out_put;
830 if (queue->peer_pid != NETLINK_CB(skb).pid) {
831 QDEBUG("no config command, and wrong pid\n");
832 ret = -EPERM;
833 goto out_put;
837 if (nfqa[NFQA_CFG_PARAMS]) {
838 struct nfqnl_msg_config_params *params;
840 if (!queue) {
841 ret = -ENOENT;
842 goto out_put;
844 params = nla_data(nfqa[NFQA_CFG_PARAMS]);
845 nfqnl_set_mode(queue, params->copy_mode,
846 ntohl(params->copy_range));
849 if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
850 __be32 *queue_maxlen;
851 queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
852 spin_lock_bh(&queue->lock);
853 queue->queue_maxlen = ntohl(*queue_maxlen);
854 spin_unlock_bh(&queue->lock);
857 out_put:
858 instance_put(queue);
859 return ret;
862 static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
863 [NFQNL_MSG_PACKET] = { .call = nfqnl_recv_unsupp,
864 .attr_count = NFQA_MAX, },
865 [NFQNL_MSG_VERDICT] = { .call = nfqnl_recv_verdict,
866 .attr_count = NFQA_MAX,
867 .policy = nfqa_verdict_policy },
868 [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config,
869 .attr_count = NFQA_CFG_MAX,
870 .policy = nfqa_cfg_policy },
873 static const struct nfnetlink_subsystem nfqnl_subsys = {
874 .name = "nf_queue",
875 .subsys_id = NFNL_SUBSYS_QUEUE,
876 .cb_count = NFQNL_MSG_MAX,
877 .cb = nfqnl_cb,
880 #ifdef CONFIG_PROC_FS
881 struct iter_state {
882 unsigned int bucket;
885 static struct hlist_node *get_first(struct seq_file *seq)
887 struct iter_state *st = seq->private;
889 if (!st)
890 return NULL;
892 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
893 if (!hlist_empty(&instance_table[st->bucket]))
894 return instance_table[st->bucket].first;
896 return NULL;
899 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
901 struct iter_state *st = seq->private;
903 h = h->next;
904 while (!h) {
905 if (++st->bucket >= INSTANCE_BUCKETS)
906 return NULL;
908 h = instance_table[st->bucket].first;
910 return h;
913 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
915 struct hlist_node *head;
916 head = get_first(seq);
918 if (head)
919 while (pos && (head = get_next(seq, head)))
920 pos--;
921 return pos ? NULL : head;
924 static void *seq_start(struct seq_file *seq, loff_t *pos)
926 read_lock_bh(&instances_lock);
927 return get_idx(seq, *pos);
930 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
932 (*pos)++;
933 return get_next(s, v);
936 static void seq_stop(struct seq_file *s, void *v)
938 read_unlock_bh(&instances_lock);
941 static int seq_show(struct seq_file *s, void *v)
943 const struct nfqnl_instance *inst = v;
945 return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
946 inst->queue_num,
947 inst->peer_pid, inst->queue_total,
948 inst->copy_mode, inst->copy_range,
949 inst->queue_dropped, inst->queue_user_dropped,
950 inst->id_sequence,
951 atomic_read(&inst->use));
954 static const struct seq_operations nfqnl_seq_ops = {
955 .start = seq_start,
956 .next = seq_next,
957 .stop = seq_stop,
958 .show = seq_show,
961 static int nfqnl_open(struct inode *inode, struct file *file)
963 return seq_open_private(file, &nfqnl_seq_ops,
964 sizeof(struct iter_state));
967 static const struct file_operations nfqnl_file_ops = {
968 .owner = THIS_MODULE,
969 .open = nfqnl_open,
970 .read = seq_read,
971 .llseek = seq_lseek,
972 .release = seq_release_private,
975 #endif /* PROC_FS */
977 static int __init nfnetlink_queue_init(void)
979 int i, status = -ENOMEM;
980 #ifdef CONFIG_PROC_FS
981 struct proc_dir_entry *proc_nfqueue;
982 #endif
984 for (i = 0; i < INSTANCE_BUCKETS; i++)
985 INIT_HLIST_HEAD(&instance_table[i]);
987 netlink_register_notifier(&nfqnl_rtnl_notifier);
988 status = nfnetlink_subsys_register(&nfqnl_subsys);
989 if (status < 0) {
990 printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
991 goto cleanup_netlink_notifier;
994 #ifdef CONFIG_PROC_FS
995 proc_nfqueue = create_proc_entry("nfnetlink_queue", 0440,
996 proc_net_netfilter);
997 if (!proc_nfqueue)
998 goto cleanup_subsys;
999 proc_nfqueue->proc_fops = &nfqnl_file_ops;
1000 #endif
1002 register_netdevice_notifier(&nfqnl_dev_notifier);
1003 return status;
1005 #ifdef CONFIG_PROC_FS
1006 cleanup_subsys:
1007 nfnetlink_subsys_unregister(&nfqnl_subsys);
1008 #endif
1009 cleanup_netlink_notifier:
1010 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1011 return status;
1014 static void __exit nfnetlink_queue_fini(void)
1016 nf_unregister_queue_handlers(&nfqh);
1017 unregister_netdevice_notifier(&nfqnl_dev_notifier);
1018 #ifdef CONFIG_PROC_FS
1019 remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
1020 #endif
1021 nfnetlink_subsys_unregister(&nfqnl_subsys);
1022 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1025 MODULE_DESCRIPTION("netfilter packet queue handler");
1026 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1027 MODULE_LICENSE("GPL");
1028 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
1030 module_init(nfnetlink_queue_init);
1031 module_exit(nfnetlink_queue_fini);