[NETFILTER]: nf_queue: move queueing related functions/struct to seperate header
[linux-2.6/verdex.git] / net / netfilter / nfnetlink_queue.c
blob3a09f021065ad7ff38301f00acac767f9c6a728e
1 /*
2 * This is a module which is used for queueing packets and communicating with
3 * userspace via nfetlink.
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
7 * Based on the old ipv4-only ip_queue.c:
8 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
9 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/init.h>
19 #include <linux/spinlock.h>
20 #include <linux/notifier.h>
21 #include <linux/netdevice.h>
22 #include <linux/netfilter.h>
23 #include <linux/proc_fs.h>
24 #include <linux/netfilter_ipv4.h>
25 #include <linux/netfilter_ipv6.h>
26 #include <linux/netfilter/nfnetlink.h>
27 #include <linux/netfilter/nfnetlink_queue.h>
28 #include <linux/list.h>
29 #include <net/sock.h>
30 #include <net/netfilter/nf_queue.h>
32 #include <asm/atomic.h>
34 #ifdef CONFIG_BRIDGE_NETFILTER
35 #include "../bridge/br_private.h"
36 #endif
38 #define NFQNL_QMAX_DEFAULT 1024
40 #if 0
41 #define QDEBUG(x, args ...) printk(KERN_DEBUG "%s(%d):%s(): " x, \
42 __FILE__, __LINE__, __FUNCTION__, \
43 ## args)
44 #else
45 #define QDEBUG(x, ...)
46 #endif
48 struct nfqnl_queue_entry {
49 struct list_head list;
50 struct nf_info *info;
51 struct sk_buff *skb;
52 unsigned int id;
55 struct nfqnl_instance {
56 struct hlist_node hlist; /* global list of queues */
57 atomic_t use;
59 int peer_pid;
60 unsigned int queue_maxlen;
61 unsigned int copy_range;
62 unsigned int queue_total;
63 unsigned int queue_dropped;
64 unsigned int queue_user_dropped;
66 atomic_t id_sequence; /* 'sequence' of pkt ids */
68 u_int16_t queue_num; /* number of this queue */
69 u_int8_t copy_mode;
71 spinlock_t lock;
73 struct list_head queue_list; /* packets in queue */
76 typedef int (*nfqnl_cmpfn)(struct nfqnl_queue_entry *, unsigned long);
78 static DEFINE_RWLOCK(instances_lock);
80 #define INSTANCE_BUCKETS 16
81 static struct hlist_head instance_table[INSTANCE_BUCKETS];
83 static inline u_int8_t instance_hashfn(u_int16_t queue_num)
85 return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
88 static struct nfqnl_instance *
89 __instance_lookup(u_int16_t queue_num)
91 struct hlist_head *head;
92 struct hlist_node *pos;
93 struct nfqnl_instance *inst;
95 head = &instance_table[instance_hashfn(queue_num)];
96 hlist_for_each_entry(inst, pos, head, hlist) {
97 if (inst->queue_num == queue_num)
98 return inst;
100 return NULL;
103 static struct nfqnl_instance *
104 instance_lookup_get(u_int16_t queue_num)
106 struct nfqnl_instance *inst;
108 read_lock_bh(&instances_lock);
109 inst = __instance_lookup(queue_num);
110 if (inst)
111 atomic_inc(&inst->use);
112 read_unlock_bh(&instances_lock);
114 return inst;
117 static void
118 instance_put(struct nfqnl_instance *inst)
120 if (inst && atomic_dec_and_test(&inst->use)) {
121 QDEBUG("kfree(inst=%p)\n", inst);
122 kfree(inst);
126 static struct nfqnl_instance *
127 instance_create(u_int16_t queue_num, int pid)
129 struct nfqnl_instance *inst;
131 QDEBUG("entering for queue_num=%u, pid=%d\n", queue_num, pid);
133 write_lock_bh(&instances_lock);
134 if (__instance_lookup(queue_num)) {
135 inst = NULL;
136 QDEBUG("aborting, instance already exists\n");
137 goto out_unlock;
140 inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
141 if (!inst)
142 goto out_unlock;
144 inst->queue_num = queue_num;
145 inst->peer_pid = pid;
146 inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
147 inst->copy_range = 0xfffff;
148 inst->copy_mode = NFQNL_COPY_NONE;
149 atomic_set(&inst->id_sequence, 0);
150 /* needs to be two, since we _put() after creation */
151 atomic_set(&inst->use, 2);
152 spin_lock_init(&inst->lock);
153 INIT_LIST_HEAD(&inst->queue_list);
155 if (!try_module_get(THIS_MODULE))
156 goto out_free;
158 hlist_add_head(&inst->hlist,
159 &instance_table[instance_hashfn(queue_num)]);
161 write_unlock_bh(&instances_lock);
163 QDEBUG("successfully created new instance\n");
165 return inst;
167 out_free:
168 kfree(inst);
169 out_unlock:
170 write_unlock_bh(&instances_lock);
171 return NULL;
174 static void nfqnl_flush(struct nfqnl_instance *queue, int verdict);
176 static void
177 _instance_destroy2(struct nfqnl_instance *inst, int lock)
179 /* first pull it out of the global list */
180 if (lock)
181 write_lock_bh(&instances_lock);
183 QDEBUG("removing instance %p (queuenum=%u) from hash\n",
184 inst, inst->queue_num);
185 hlist_del(&inst->hlist);
187 if (lock)
188 write_unlock_bh(&instances_lock);
190 /* then flush all pending skbs from the queue */
191 nfqnl_flush(inst, NF_DROP);
193 /* and finally put the refcount */
194 instance_put(inst);
196 module_put(THIS_MODULE);
199 static inline void
200 __instance_destroy(struct nfqnl_instance *inst)
202 _instance_destroy2(inst, 0);
205 static inline void
206 instance_destroy(struct nfqnl_instance *inst)
208 _instance_destroy2(inst, 1);
213 static void
214 issue_verdict(struct nfqnl_queue_entry *entry, int verdict)
216 QDEBUG("entering for entry %p, verdict %u\n", entry, verdict);
218 /* TCP input path (and probably other bits) assume to be called
219 * from softirq context, not from syscall, like issue_verdict is
220 * called. TCP input path deadlocks with locks taken from timer
221 * softirq, e.g. We therefore emulate this by local_bh_disable() */
223 local_bh_disable();
224 nf_reinject(entry->skb, entry->info, verdict);
225 local_bh_enable();
227 kfree(entry);
230 static inline void
231 __enqueue_entry(struct nfqnl_instance *queue,
232 struct nfqnl_queue_entry *entry)
234 list_add(&entry->list, &queue->queue_list);
235 queue->queue_total++;
239 * Find and return a queued entry matched by cmpfn, or return the last
240 * entry if cmpfn is NULL.
242 static inline struct nfqnl_queue_entry *
243 __find_entry(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
244 unsigned long data)
246 struct list_head *p;
248 list_for_each_prev(p, &queue->queue_list) {
249 struct nfqnl_queue_entry *entry = (struct nfqnl_queue_entry *)p;
251 if (!cmpfn || cmpfn(entry, data))
252 return entry;
254 return NULL;
257 static inline void
258 __dequeue_entry(struct nfqnl_instance *q, struct nfqnl_queue_entry *entry)
260 list_del(&entry->list);
261 q->queue_total--;
264 static inline struct nfqnl_queue_entry *
265 __find_dequeue_entry(struct nfqnl_instance *queue,
266 nfqnl_cmpfn cmpfn, unsigned long data)
268 struct nfqnl_queue_entry *entry;
270 entry = __find_entry(queue, cmpfn, data);
271 if (entry == NULL)
272 return NULL;
274 __dequeue_entry(queue, entry);
275 return entry;
279 static inline void
280 __nfqnl_flush(struct nfqnl_instance *queue, int verdict)
282 struct nfqnl_queue_entry *entry;
284 while ((entry = __find_dequeue_entry(queue, NULL, 0)))
285 issue_verdict(entry, verdict);
288 static inline int
289 __nfqnl_set_mode(struct nfqnl_instance *queue,
290 unsigned char mode, unsigned int range)
292 int status = 0;
294 switch (mode) {
295 case NFQNL_COPY_NONE:
296 case NFQNL_COPY_META:
297 queue->copy_mode = mode;
298 queue->copy_range = 0;
299 break;
301 case NFQNL_COPY_PACKET:
302 queue->copy_mode = mode;
303 /* we're using struct nlattr which has 16bit nla_len */
304 if (range > 0xffff)
305 queue->copy_range = 0xffff;
306 else
307 queue->copy_range = range;
308 break;
310 default:
311 status = -EINVAL;
314 return status;
317 static struct nfqnl_queue_entry *
318 find_dequeue_entry(struct nfqnl_instance *queue,
319 nfqnl_cmpfn cmpfn, unsigned long data)
321 struct nfqnl_queue_entry *entry;
323 spin_lock_bh(&queue->lock);
324 entry = __find_dequeue_entry(queue, cmpfn, data);
325 spin_unlock_bh(&queue->lock);
327 return entry;
330 static void
331 nfqnl_flush(struct nfqnl_instance *queue, int verdict)
333 spin_lock_bh(&queue->lock);
334 __nfqnl_flush(queue, verdict);
335 spin_unlock_bh(&queue->lock);
338 static struct sk_buff *
339 nfqnl_build_packet_message(struct nfqnl_instance *queue,
340 struct nfqnl_queue_entry *entry, int *errp)
342 sk_buff_data_t old_tail;
343 size_t size;
344 size_t data_len = 0;
345 struct sk_buff *skb;
346 struct nfqnl_msg_packet_hdr pmsg;
347 struct nlmsghdr *nlh;
348 struct nfgenmsg *nfmsg;
349 struct nf_info *entinf = entry->info;
350 struct sk_buff *entskb = entry->skb;
351 struct net_device *indev;
352 struct net_device *outdev;
353 __be32 tmp_uint;
355 QDEBUG("entered\n");
357 size = NLMSG_ALIGN(sizeof(struct nfgenmsg))
358 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
359 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
360 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
361 #ifdef CONFIG_BRIDGE_NETFILTER
362 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
363 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
364 #endif
365 + nla_total_size(sizeof(u_int32_t)) /* mark */
366 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
367 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
369 outdev = entinf->outdev;
371 spin_lock_bh(&queue->lock);
373 switch (queue->copy_mode) {
374 case NFQNL_COPY_META:
375 case NFQNL_COPY_NONE:
376 data_len = 0;
377 break;
379 case NFQNL_COPY_PACKET:
380 if ((entskb->ip_summed == CHECKSUM_PARTIAL ||
381 entskb->ip_summed == CHECKSUM_COMPLETE) &&
382 (*errp = skb_checksum_help(entskb))) {
383 spin_unlock_bh(&queue->lock);
384 return NULL;
386 if (queue->copy_range == 0
387 || queue->copy_range > entskb->len)
388 data_len = entskb->len;
389 else
390 data_len = queue->copy_range;
392 size += nla_total_size(data_len);
393 break;
395 default:
396 *errp = -EINVAL;
397 spin_unlock_bh(&queue->lock);
398 return NULL;
401 spin_unlock_bh(&queue->lock);
403 skb = alloc_skb(size, GFP_ATOMIC);
404 if (!skb)
405 goto nlmsg_failure;
407 old_tail = skb->tail;
408 nlh = NLMSG_PUT(skb, 0, 0,
409 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
410 sizeof(struct nfgenmsg));
411 nfmsg = NLMSG_DATA(nlh);
412 nfmsg->nfgen_family = entinf->pf;
413 nfmsg->version = NFNETLINK_V0;
414 nfmsg->res_id = htons(queue->queue_num);
416 pmsg.packet_id = htonl(entry->id);
417 pmsg.hw_protocol = entskb->protocol;
418 pmsg.hook = entinf->hook;
420 NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg);
422 indev = entinf->indev;
423 if (indev) {
424 tmp_uint = htonl(indev->ifindex);
425 #ifndef CONFIG_BRIDGE_NETFILTER
426 NLA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint);
427 #else
428 if (entinf->pf == PF_BRIDGE) {
429 /* Case 1: indev is physical input device, we need to
430 * look for bridge group (when called from
431 * netfilter_bridge) */
432 NLA_PUT(skb, NFQA_IFINDEX_PHYSINDEV, sizeof(tmp_uint),
433 &tmp_uint);
434 /* this is the bridge group "brX" */
435 tmp_uint = htonl(indev->br_port->br->dev->ifindex);
436 NLA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint),
437 &tmp_uint);
438 } else {
439 /* Case 2: indev is bridge group, we need to look for
440 * physical device (when called from ipv4) */
441 NLA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint),
442 &tmp_uint);
443 if (entskb->nf_bridge
444 && entskb->nf_bridge->physindev) {
445 tmp_uint = htonl(entskb->nf_bridge->physindev->ifindex);
446 NLA_PUT(skb, NFQA_IFINDEX_PHYSINDEV,
447 sizeof(tmp_uint), &tmp_uint);
450 #endif
453 if (outdev) {
454 tmp_uint = htonl(outdev->ifindex);
455 #ifndef CONFIG_BRIDGE_NETFILTER
456 NLA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint);
457 #else
458 if (entinf->pf == PF_BRIDGE) {
459 /* Case 1: outdev is physical output device, we need to
460 * look for bridge group (when called from
461 * netfilter_bridge) */
462 NLA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV, sizeof(tmp_uint),
463 &tmp_uint);
464 /* this is the bridge group "brX" */
465 tmp_uint = htonl(outdev->br_port->br->dev->ifindex);
466 NLA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint),
467 &tmp_uint);
468 } else {
469 /* Case 2: outdev is bridge group, we need to look for
470 * physical output device (when called from ipv4) */
471 NLA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint),
472 &tmp_uint);
473 if (entskb->nf_bridge
474 && entskb->nf_bridge->physoutdev) {
475 tmp_uint = htonl(entskb->nf_bridge->physoutdev->ifindex);
476 NLA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV,
477 sizeof(tmp_uint), &tmp_uint);
480 #endif
483 if (entskb->mark) {
484 tmp_uint = htonl(entskb->mark);
485 NLA_PUT(skb, NFQA_MARK, sizeof(u_int32_t), &tmp_uint);
488 if (indev && entskb->dev) {
489 struct nfqnl_msg_packet_hw phw;
490 int len = dev_parse_header(entskb, phw.hw_addr);
491 if (len) {
492 phw.hw_addrlen = htons(len);
493 NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
497 if (entskb->tstamp.tv64) {
498 struct nfqnl_msg_packet_timestamp ts;
499 struct timeval tv = ktime_to_timeval(entskb->tstamp);
500 ts.sec = cpu_to_be64(tv.tv_sec);
501 ts.usec = cpu_to_be64(tv.tv_usec);
503 NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
506 if (data_len) {
507 struct nlattr *nla;
508 int size = nla_attr_size(data_len);
510 if (skb_tailroom(skb) < nla_total_size(data_len)) {
511 printk(KERN_WARNING "nf_queue: no tailroom!\n");
512 goto nlmsg_failure;
515 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
516 nla->nla_type = NFQA_PAYLOAD;
517 nla->nla_len = size;
519 if (skb_copy_bits(entskb, 0, nla_data(nla), data_len))
520 BUG();
523 nlh->nlmsg_len = skb->tail - old_tail;
524 return skb;
526 nlmsg_failure:
527 nla_put_failure:
528 if (skb)
529 kfree_skb(skb);
530 *errp = -EINVAL;
531 if (net_ratelimit())
532 printk(KERN_ERR "nf_queue: error creating packet message\n");
533 return NULL;
536 static int
537 nfqnl_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
538 unsigned int queuenum)
540 int status = -EINVAL;
541 struct sk_buff *nskb;
542 struct nfqnl_instance *queue;
543 struct nfqnl_queue_entry *entry;
545 QDEBUG("entered\n");
547 queue = instance_lookup_get(queuenum);
548 if (!queue) {
549 QDEBUG("no queue instance matching\n");
550 return -EINVAL;
553 if (queue->copy_mode == NFQNL_COPY_NONE) {
554 QDEBUG("mode COPY_NONE, aborting\n");
555 status = -EAGAIN;
556 goto err_out_put;
559 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
560 if (entry == NULL) {
561 if (net_ratelimit())
562 printk(KERN_ERR
563 "nf_queue: OOM in nfqnl_enqueue_packet()\n");
564 status = -ENOMEM;
565 goto err_out_put;
568 entry->info = info;
569 entry->skb = skb;
570 entry->id = atomic_inc_return(&queue->id_sequence);
572 nskb = nfqnl_build_packet_message(queue, entry, &status);
573 if (nskb == NULL)
574 goto err_out_free;
576 spin_lock_bh(&queue->lock);
578 if (!queue->peer_pid)
579 goto err_out_free_nskb;
581 if (queue->queue_total >= queue->queue_maxlen) {
582 queue->queue_dropped++;
583 status = -ENOSPC;
584 if (net_ratelimit())
585 printk(KERN_WARNING "nf_queue: full at %d entries, "
586 "dropping packets(s). Dropped: %d\n",
587 queue->queue_total, queue->queue_dropped);
588 goto err_out_free_nskb;
591 /* nfnetlink_unicast will either free the nskb or add it to a socket */
592 status = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT);
593 if (status < 0) {
594 queue->queue_user_dropped++;
595 goto err_out_unlock;
598 __enqueue_entry(queue, entry);
600 spin_unlock_bh(&queue->lock);
601 instance_put(queue);
602 return status;
604 err_out_free_nskb:
605 kfree_skb(nskb);
607 err_out_unlock:
608 spin_unlock_bh(&queue->lock);
610 err_out_free:
611 kfree(entry);
612 err_out_put:
613 instance_put(queue);
614 return status;
617 static int
618 nfqnl_mangle(void *data, int data_len, struct nfqnl_queue_entry *e)
620 int diff;
621 int err;
623 diff = data_len - e->skb->len;
624 if (diff < 0) {
625 if (pskb_trim(e->skb, data_len))
626 return -ENOMEM;
627 } else if (diff > 0) {
628 if (data_len > 0xFFFF)
629 return -EINVAL;
630 if (diff > skb_tailroom(e->skb)) {
631 err = pskb_expand_head(e->skb, 0,
632 diff - skb_tailroom(e->skb),
633 GFP_ATOMIC);
634 if (err) {
635 printk(KERN_WARNING "nf_queue: OOM "
636 "in mangle, dropping packet\n");
637 return err;
640 skb_put(e->skb, diff);
642 if (!skb_make_writable(e->skb, data_len))
643 return -ENOMEM;
644 skb_copy_to_linear_data(e->skb, data, data_len);
645 e->skb->ip_summed = CHECKSUM_NONE;
646 return 0;
649 static inline int
650 id_cmp(struct nfqnl_queue_entry *e, unsigned long id)
652 return (id == e->id);
655 static int
656 nfqnl_set_mode(struct nfqnl_instance *queue,
657 unsigned char mode, unsigned int range)
659 int status;
661 spin_lock_bh(&queue->lock);
662 status = __nfqnl_set_mode(queue, mode, range);
663 spin_unlock_bh(&queue->lock);
665 return status;
668 static int
669 dev_cmp(struct nfqnl_queue_entry *entry, unsigned long ifindex)
671 struct nf_info *entinf = entry->info;
673 if (entinf->indev)
674 if (entinf->indev->ifindex == ifindex)
675 return 1;
676 if (entinf->outdev)
677 if (entinf->outdev->ifindex == ifindex)
678 return 1;
679 #ifdef CONFIG_BRIDGE_NETFILTER
680 if (entry->skb->nf_bridge) {
681 if (entry->skb->nf_bridge->physindev &&
682 entry->skb->nf_bridge->physindev->ifindex == ifindex)
683 return 1;
684 if (entry->skb->nf_bridge->physoutdev &&
685 entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
686 return 1;
688 #endif
689 return 0;
692 /* drop all packets with either indev or outdev == ifindex from all queue
693 * instances */
694 static void
695 nfqnl_dev_drop(int ifindex)
697 int i;
699 QDEBUG("entering for ifindex %u\n", ifindex);
701 /* this only looks like we have to hold the readlock for a way too long
702 * time, issue_verdict(), nf_reinject(), ... - but we always only
703 * issue NF_DROP, which is processed directly in nf_reinject() */
704 read_lock_bh(&instances_lock);
706 for (i = 0; i < INSTANCE_BUCKETS; i++) {
707 struct hlist_node *tmp;
708 struct nfqnl_instance *inst;
709 struct hlist_head *head = &instance_table[i];
711 hlist_for_each_entry(inst, tmp, head, hlist) {
712 struct nfqnl_queue_entry *entry;
713 while ((entry = find_dequeue_entry(inst, dev_cmp,
714 ifindex)) != NULL)
715 issue_verdict(entry, NF_DROP);
719 read_unlock_bh(&instances_lock);
722 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
724 static int
725 nfqnl_rcv_dev_event(struct notifier_block *this,
726 unsigned long event, void *ptr)
728 struct net_device *dev = ptr;
730 if (dev->nd_net != &init_net)
731 return NOTIFY_DONE;
733 /* Drop any packets associated with the downed device */
734 if (event == NETDEV_DOWN)
735 nfqnl_dev_drop(dev->ifindex);
736 return NOTIFY_DONE;
739 static struct notifier_block nfqnl_dev_notifier = {
740 .notifier_call = nfqnl_rcv_dev_event,
743 static int
744 nfqnl_rcv_nl_event(struct notifier_block *this,
745 unsigned long event, void *ptr)
747 struct netlink_notify *n = ptr;
749 if (event == NETLINK_URELEASE &&
750 n->protocol == NETLINK_NETFILTER && n->pid) {
751 int i;
753 /* destroy all instances for this pid */
754 write_lock_bh(&instances_lock);
755 for (i = 0; i < INSTANCE_BUCKETS; i++) {
756 struct hlist_node *tmp, *t2;
757 struct nfqnl_instance *inst;
758 struct hlist_head *head = &instance_table[i];
760 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
761 if ((n->net == &init_net) &&
762 (n->pid == inst->peer_pid))
763 __instance_destroy(inst);
766 write_unlock_bh(&instances_lock);
768 return NOTIFY_DONE;
771 static struct notifier_block nfqnl_rtnl_notifier = {
772 .notifier_call = nfqnl_rcv_nl_event,
775 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
776 [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
777 [NFQA_MARK] = { .type = NLA_U32 },
778 [NFQA_PAYLOAD] = { .type = NLA_UNSPEC },
781 static int
782 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
783 struct nlmsghdr *nlh, struct nlattr *nfqa[])
785 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
786 u_int16_t queue_num = ntohs(nfmsg->res_id);
788 struct nfqnl_msg_verdict_hdr *vhdr;
789 struct nfqnl_instance *queue;
790 unsigned int verdict;
791 struct nfqnl_queue_entry *entry;
792 int err;
794 queue = instance_lookup_get(queue_num);
795 if (!queue)
796 return -ENODEV;
798 if (queue->peer_pid != NETLINK_CB(skb).pid) {
799 err = -EPERM;
800 goto err_out_put;
803 if (!nfqa[NFQA_VERDICT_HDR]) {
804 err = -EINVAL;
805 goto err_out_put;
808 vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
809 verdict = ntohl(vhdr->verdict);
811 if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) {
812 err = -EINVAL;
813 goto err_out_put;
816 entry = find_dequeue_entry(queue, id_cmp, ntohl(vhdr->id));
817 if (entry == NULL) {
818 err = -ENOENT;
819 goto err_out_put;
822 if (nfqa[NFQA_PAYLOAD]) {
823 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
824 nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0)
825 verdict = NF_DROP;
828 if (nfqa[NFQA_MARK])
829 entry->skb->mark = ntohl(*(__be32 *)
830 nla_data(nfqa[NFQA_MARK]));
832 issue_verdict(entry, verdict);
833 instance_put(queue);
834 return 0;
836 err_out_put:
837 instance_put(queue);
838 return err;
841 static int
842 nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
843 struct nlmsghdr *nlh, struct nlattr *nfqa[])
845 return -ENOTSUPP;
848 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
849 [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) },
850 [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) },
853 static const struct nf_queue_handler nfqh = {
854 .name = "nf_queue",
855 .outfn = &nfqnl_enqueue_packet,
858 static int
859 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
860 struct nlmsghdr *nlh, struct nlattr *nfqa[])
862 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
863 u_int16_t queue_num = ntohs(nfmsg->res_id);
864 struct nfqnl_instance *queue;
865 int ret = 0;
867 QDEBUG("entering for msg %u\n", NFNL_MSG_TYPE(nlh->nlmsg_type));
869 queue = instance_lookup_get(queue_num);
870 if (nfqa[NFQA_CFG_CMD]) {
871 struct nfqnl_msg_config_cmd *cmd;
872 cmd = nla_data(nfqa[NFQA_CFG_CMD]);
873 QDEBUG("found CFG_CMD\n");
875 switch (cmd->command) {
876 case NFQNL_CFG_CMD_BIND:
877 if (queue)
878 return -EBUSY;
880 queue = instance_create(queue_num, NETLINK_CB(skb).pid);
881 if (!queue)
882 return -EINVAL;
883 break;
884 case NFQNL_CFG_CMD_UNBIND:
885 if (!queue)
886 return -ENODEV;
888 if (queue->peer_pid != NETLINK_CB(skb).pid) {
889 ret = -EPERM;
890 goto out_put;
893 instance_destroy(queue);
894 break;
895 case NFQNL_CFG_CMD_PF_BIND:
896 QDEBUG("registering queue handler for pf=%u\n",
897 ntohs(cmd->pf));
898 ret = nf_register_queue_handler(ntohs(cmd->pf), &nfqh);
899 break;
900 case NFQNL_CFG_CMD_PF_UNBIND:
901 QDEBUG("unregistering queue handler for pf=%u\n",
902 ntohs(cmd->pf));
903 ret = nf_unregister_queue_handler(ntohs(cmd->pf), &nfqh);
904 break;
905 default:
906 ret = -EINVAL;
907 break;
909 } else {
910 if (!queue) {
911 QDEBUG("no config command, and no instance ENOENT\n");
912 ret = -ENOENT;
913 goto out_put;
916 if (queue->peer_pid != NETLINK_CB(skb).pid) {
917 QDEBUG("no config command, and wrong pid\n");
918 ret = -EPERM;
919 goto out_put;
923 if (nfqa[NFQA_CFG_PARAMS]) {
924 struct nfqnl_msg_config_params *params;
926 if (!queue) {
927 ret = -ENOENT;
928 goto out_put;
930 params = nla_data(nfqa[NFQA_CFG_PARAMS]);
931 nfqnl_set_mode(queue, params->copy_mode,
932 ntohl(params->copy_range));
935 if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
936 __be32 *queue_maxlen;
937 queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
938 spin_lock_bh(&queue->lock);
939 queue->queue_maxlen = ntohl(*queue_maxlen);
940 spin_unlock_bh(&queue->lock);
943 out_put:
944 instance_put(queue);
945 return ret;
948 static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
949 [NFQNL_MSG_PACKET] = { .call = nfqnl_recv_unsupp,
950 .attr_count = NFQA_MAX, },
951 [NFQNL_MSG_VERDICT] = { .call = nfqnl_recv_verdict,
952 .attr_count = NFQA_MAX,
953 .policy = nfqa_verdict_policy },
954 [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config,
955 .attr_count = NFQA_CFG_MAX,
956 .policy = nfqa_cfg_policy },
959 static const struct nfnetlink_subsystem nfqnl_subsys = {
960 .name = "nf_queue",
961 .subsys_id = NFNL_SUBSYS_QUEUE,
962 .cb_count = NFQNL_MSG_MAX,
963 .cb = nfqnl_cb,
966 #ifdef CONFIG_PROC_FS
967 struct iter_state {
968 unsigned int bucket;
971 static struct hlist_node *get_first(struct seq_file *seq)
973 struct iter_state *st = seq->private;
975 if (!st)
976 return NULL;
978 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
979 if (!hlist_empty(&instance_table[st->bucket]))
980 return instance_table[st->bucket].first;
982 return NULL;
985 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
987 struct iter_state *st = seq->private;
989 h = h->next;
990 while (!h) {
991 if (++st->bucket >= INSTANCE_BUCKETS)
992 return NULL;
994 h = instance_table[st->bucket].first;
996 return h;
999 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
1001 struct hlist_node *head;
1002 head = get_first(seq);
1004 if (head)
1005 while (pos && (head = get_next(seq, head)))
1006 pos--;
1007 return pos ? NULL : head;
1010 static void *seq_start(struct seq_file *seq, loff_t *pos)
1012 read_lock_bh(&instances_lock);
1013 return get_idx(seq, *pos);
1016 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
1018 (*pos)++;
1019 return get_next(s, v);
1022 static void seq_stop(struct seq_file *s, void *v)
1024 read_unlock_bh(&instances_lock);
1027 static int seq_show(struct seq_file *s, void *v)
1029 const struct nfqnl_instance *inst = v;
1031 return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
1032 inst->queue_num,
1033 inst->peer_pid, inst->queue_total,
1034 inst->copy_mode, inst->copy_range,
1035 inst->queue_dropped, inst->queue_user_dropped,
1036 atomic_read(&inst->id_sequence),
1037 atomic_read(&inst->use));
1040 static const struct seq_operations nfqnl_seq_ops = {
1041 .start = seq_start,
1042 .next = seq_next,
1043 .stop = seq_stop,
1044 .show = seq_show,
1047 static int nfqnl_open(struct inode *inode, struct file *file)
1049 return seq_open_private(file, &nfqnl_seq_ops,
1050 sizeof(struct iter_state));
1053 static const struct file_operations nfqnl_file_ops = {
1054 .owner = THIS_MODULE,
1055 .open = nfqnl_open,
1056 .read = seq_read,
1057 .llseek = seq_lseek,
1058 .release = seq_release_private,
1061 #endif /* PROC_FS */
1063 static int __init nfnetlink_queue_init(void)
1065 int i, status = -ENOMEM;
1066 #ifdef CONFIG_PROC_FS
1067 struct proc_dir_entry *proc_nfqueue;
1068 #endif
1070 for (i = 0; i < INSTANCE_BUCKETS; i++)
1071 INIT_HLIST_HEAD(&instance_table[i]);
1073 netlink_register_notifier(&nfqnl_rtnl_notifier);
1074 status = nfnetlink_subsys_register(&nfqnl_subsys);
1075 if (status < 0) {
1076 printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
1077 goto cleanup_netlink_notifier;
1080 #ifdef CONFIG_PROC_FS
1081 proc_nfqueue = create_proc_entry("nfnetlink_queue", 0440,
1082 proc_net_netfilter);
1083 if (!proc_nfqueue)
1084 goto cleanup_subsys;
1085 proc_nfqueue->proc_fops = &nfqnl_file_ops;
1086 #endif
1088 register_netdevice_notifier(&nfqnl_dev_notifier);
1089 return status;
1091 #ifdef CONFIG_PROC_FS
1092 cleanup_subsys:
1093 nfnetlink_subsys_unregister(&nfqnl_subsys);
1094 #endif
1095 cleanup_netlink_notifier:
1096 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1097 return status;
1100 static void __exit nfnetlink_queue_fini(void)
1102 nf_unregister_queue_handlers(&nfqh);
1103 unregister_netdevice_notifier(&nfqnl_dev_notifier);
1104 #ifdef CONFIG_PROC_FS
1105 remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
1106 #endif
1107 nfnetlink_subsys_unregister(&nfqnl_subsys);
1108 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1111 MODULE_DESCRIPTION("netfilter packet queue handler");
1112 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1113 MODULE_LICENSE("GPL");
1114 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
1116 module_init(nfnetlink_queue_init);
1117 module_exit(nfnetlink_queue_fini);