Linux 2.6.14.3
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / netfilter / nfnetlink_queue.c
blobeaa44c49567be362e95ceaf359b4f7fa9d2ea532
1 /*
2 * This is a module which is used for queueing packets and communicating with
3 * userspace via nfetlink.
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
7 * Based on the old ipv4-only ip_queue.c:
8 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
9 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/init.h>
19 #include <linux/spinlock.h>
20 #include <linux/notifier.h>
21 #include <linux/netdevice.h>
22 #include <linux/netfilter.h>
23 #include <linux/proc_fs.h>
24 #include <linux/netfilter_ipv4.h>
25 #include <linux/netfilter_ipv6.h>
26 #include <linux/netfilter/nfnetlink.h>
27 #include <linux/netfilter/nfnetlink_queue.h>
28 #include <linux/list.h>
29 #include <net/sock.h>
31 #include <asm/atomic.h>
33 #ifdef CONFIG_BRIDGE_NETFILTER
34 #include "../bridge/br_private.h"
35 #endif
37 #define NFQNL_QMAX_DEFAULT 1024
39 #if 0
40 #define QDEBUG(x, args ...) printk(KERN_DEBUG "%s(%d):%s(): " x, \
41 __FILE__, __LINE__, __FUNCTION__, \
42 ## args)
43 #else
44 #define QDEBUG(x, ...)
45 #endif
47 struct nfqnl_queue_entry {
48 struct list_head list;
49 struct nf_info *info;
50 struct sk_buff *skb;
51 unsigned int id;
54 struct nfqnl_instance {
55 struct hlist_node hlist; /* global list of queues */
56 atomic_t use;
58 int peer_pid;
59 unsigned int queue_maxlen;
60 unsigned int copy_range;
61 unsigned int queue_total;
62 unsigned int queue_dropped;
63 unsigned int queue_user_dropped;
65 atomic_t id_sequence; /* 'sequence' of pkt ids */
67 u_int16_t queue_num; /* number of this queue */
68 u_int8_t copy_mode;
70 spinlock_t lock;
72 struct list_head queue_list; /* packets in queue */
75 typedef int (*nfqnl_cmpfn)(struct nfqnl_queue_entry *, unsigned long);
77 static DEFINE_RWLOCK(instances_lock);
79 #define INSTANCE_BUCKETS 16
80 static struct hlist_head instance_table[INSTANCE_BUCKETS];
82 static inline u_int8_t instance_hashfn(u_int16_t queue_num)
84 return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
87 static struct nfqnl_instance *
88 __instance_lookup(u_int16_t queue_num)
90 struct hlist_head *head;
91 struct hlist_node *pos;
92 struct nfqnl_instance *inst;
94 head = &instance_table[instance_hashfn(queue_num)];
95 hlist_for_each_entry(inst, pos, head, hlist) {
96 if (inst->queue_num == queue_num)
97 return inst;
99 return NULL;
102 static struct nfqnl_instance *
103 instance_lookup_get(u_int16_t queue_num)
105 struct nfqnl_instance *inst;
107 read_lock_bh(&instances_lock);
108 inst = __instance_lookup(queue_num);
109 if (inst)
110 atomic_inc(&inst->use);
111 read_unlock_bh(&instances_lock);
113 return inst;
116 static void
117 instance_put(struct nfqnl_instance *inst)
119 if (inst && atomic_dec_and_test(&inst->use)) {
120 QDEBUG("kfree(inst=%p)\n", inst);
121 kfree(inst);
125 static struct nfqnl_instance *
126 instance_create(u_int16_t queue_num, int pid)
128 struct nfqnl_instance *inst;
130 QDEBUG("entering for queue_num=%u, pid=%d\n", queue_num, pid);
132 write_lock_bh(&instances_lock);
133 if (__instance_lookup(queue_num)) {
134 inst = NULL;
135 QDEBUG("aborting, instance already exists\n");
136 goto out_unlock;
139 inst = kmalloc(sizeof(*inst), GFP_ATOMIC);
140 if (!inst)
141 goto out_unlock;
143 memset(inst, 0, sizeof(*inst));
144 inst->queue_num = queue_num;
145 inst->peer_pid = pid;
146 inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
147 inst->copy_range = 0xfffff;
148 inst->copy_mode = NFQNL_COPY_NONE;
149 atomic_set(&inst->id_sequence, 0);
150 /* needs to be two, since we _put() after creation */
151 atomic_set(&inst->use, 2);
152 inst->lock = SPIN_LOCK_UNLOCKED;
153 INIT_LIST_HEAD(&inst->queue_list);
155 if (!try_module_get(THIS_MODULE))
156 goto out_free;
158 hlist_add_head(&inst->hlist,
159 &instance_table[instance_hashfn(queue_num)]);
161 write_unlock_bh(&instances_lock);
163 QDEBUG("successfully created new instance\n");
165 return inst;
167 out_free:
168 kfree(inst);
169 out_unlock:
170 write_unlock_bh(&instances_lock);
171 return NULL;
174 static void nfqnl_flush(struct nfqnl_instance *queue, int verdict);
176 static void
177 _instance_destroy2(struct nfqnl_instance *inst, int lock)
179 /* first pull it out of the global list */
180 if (lock)
181 write_lock_bh(&instances_lock);
183 QDEBUG("removing instance %p (queuenum=%u) from hash\n",
184 inst, inst->queue_num);
185 hlist_del(&inst->hlist);
187 if (lock)
188 write_unlock_bh(&instances_lock);
190 /* then flush all pending skbs from the queue */
191 nfqnl_flush(inst, NF_DROP);
193 /* and finally put the refcount */
194 instance_put(inst);
196 module_put(THIS_MODULE);
199 static inline void
200 __instance_destroy(struct nfqnl_instance *inst)
202 _instance_destroy2(inst, 0);
205 static inline void
206 instance_destroy(struct nfqnl_instance *inst)
208 _instance_destroy2(inst, 1);
213 static void
214 issue_verdict(struct nfqnl_queue_entry *entry, int verdict)
216 QDEBUG("entering for entry %p, verdict %u\n", entry, verdict);
218 /* TCP input path (and probably other bits) assume to be called
219 * from softirq context, not from syscall, like issue_verdict is
220 * called. TCP input path deadlocks with locks taken from timer
221 * softirq, e.g. We therefore emulate this by local_bh_disable() */
223 local_bh_disable();
224 nf_reinject(entry->skb, entry->info, verdict);
225 local_bh_enable();
227 kfree(entry);
230 static inline void
231 __enqueue_entry(struct nfqnl_instance *queue,
232 struct nfqnl_queue_entry *entry)
234 list_add(&entry->list, &queue->queue_list);
235 queue->queue_total++;
239 * Find and return a queued entry matched by cmpfn, or return the last
240 * entry if cmpfn is NULL.
242 static inline struct nfqnl_queue_entry *
243 __find_entry(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
244 unsigned long data)
246 struct list_head *p;
248 list_for_each_prev(p, &queue->queue_list) {
249 struct nfqnl_queue_entry *entry = (struct nfqnl_queue_entry *)p;
251 if (!cmpfn || cmpfn(entry, data))
252 return entry;
254 return NULL;
257 static inline void
258 __dequeue_entry(struct nfqnl_instance *q, struct nfqnl_queue_entry *entry)
260 list_del(&entry->list);
261 q->queue_total--;
264 static inline struct nfqnl_queue_entry *
265 __find_dequeue_entry(struct nfqnl_instance *queue,
266 nfqnl_cmpfn cmpfn, unsigned long data)
268 struct nfqnl_queue_entry *entry;
270 entry = __find_entry(queue, cmpfn, data);
271 if (entry == NULL)
272 return NULL;
274 __dequeue_entry(queue, entry);
275 return entry;
279 static inline void
280 __nfqnl_flush(struct nfqnl_instance *queue, int verdict)
282 struct nfqnl_queue_entry *entry;
284 while ((entry = __find_dequeue_entry(queue, NULL, 0)))
285 issue_verdict(entry, verdict);
288 static inline int
289 __nfqnl_set_mode(struct nfqnl_instance *queue,
290 unsigned char mode, unsigned int range)
292 int status = 0;
294 switch (mode) {
295 case NFQNL_COPY_NONE:
296 case NFQNL_COPY_META:
297 queue->copy_mode = mode;
298 queue->copy_range = 0;
299 break;
301 case NFQNL_COPY_PACKET:
302 queue->copy_mode = mode;
303 /* we're using struct nfattr which has 16bit nfa_len */
304 if (range > 0xffff)
305 queue->copy_range = 0xffff;
306 else
307 queue->copy_range = range;
308 break;
310 default:
311 status = -EINVAL;
314 return status;
317 static struct nfqnl_queue_entry *
318 find_dequeue_entry(struct nfqnl_instance *queue,
319 nfqnl_cmpfn cmpfn, unsigned long data)
321 struct nfqnl_queue_entry *entry;
323 spin_lock_bh(&queue->lock);
324 entry = __find_dequeue_entry(queue, cmpfn, data);
325 spin_unlock_bh(&queue->lock);
327 return entry;
330 static void
331 nfqnl_flush(struct nfqnl_instance *queue, int verdict)
333 spin_lock_bh(&queue->lock);
334 __nfqnl_flush(queue, verdict);
335 spin_unlock_bh(&queue->lock);
338 static struct sk_buff *
339 nfqnl_build_packet_message(struct nfqnl_instance *queue,
340 struct nfqnl_queue_entry *entry, int *errp)
342 unsigned char *old_tail;
343 size_t size;
344 size_t data_len = 0;
345 struct sk_buff *skb;
346 struct nfqnl_msg_packet_hdr pmsg;
347 struct nlmsghdr *nlh;
348 struct nfgenmsg *nfmsg;
349 unsigned int tmp_uint;
351 QDEBUG("entered\n");
353 /* all macros expand to constant values at compile time */
354 size = NLMSG_SPACE(sizeof(struct nfqnl_msg_packet_hdr))
355 + NLMSG_SPACE(sizeof(u_int32_t)) /* ifindex */
356 + NLMSG_SPACE(sizeof(u_int32_t)) /* ifindex */
357 #ifdef CONFIG_BRIDGE_NETFILTER
358 + NLMSG_SPACE(sizeof(u_int32_t)) /* ifindex */
359 + NLMSG_SPACE(sizeof(u_int32_t)) /* ifindex */
360 #endif
361 + NLMSG_SPACE(sizeof(u_int32_t)) /* mark */
362 + NLMSG_SPACE(sizeof(struct nfqnl_msg_packet_hw))
363 + NLMSG_SPACE(sizeof(struct nfqnl_msg_packet_timestamp));
365 spin_lock_bh(&queue->lock);
367 switch (queue->copy_mode) {
368 case NFQNL_COPY_META:
369 case NFQNL_COPY_NONE:
370 data_len = 0;
371 break;
373 case NFQNL_COPY_PACKET:
374 if (entry->skb->ip_summed == CHECKSUM_HW &&
375 (*errp = skb_checksum_help(entry->skb,
376 entry->info->outdev == NULL))) {
377 spin_unlock_bh(&queue->lock);
378 return NULL;
380 if (queue->copy_range == 0
381 || queue->copy_range > entry->skb->len)
382 data_len = entry->skb->len;
383 else
384 data_len = queue->copy_range;
386 size += NLMSG_SPACE(data_len);
387 break;
389 default:
390 *errp = -EINVAL;
391 spin_unlock_bh(&queue->lock);
392 return NULL;
395 spin_unlock_bh(&queue->lock);
397 skb = alloc_skb(size, GFP_ATOMIC);
398 if (!skb)
399 goto nlmsg_failure;
401 old_tail= skb->tail;
402 nlh = NLMSG_PUT(skb, 0, 0,
403 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
404 sizeof(struct nfgenmsg));
405 nfmsg = NLMSG_DATA(nlh);
406 nfmsg->nfgen_family = entry->info->pf;
407 nfmsg->version = NFNETLINK_V0;
408 nfmsg->res_id = htons(queue->queue_num);
410 pmsg.packet_id = htonl(entry->id);
411 pmsg.hw_protocol = htons(entry->skb->protocol);
412 pmsg.hook = entry->info->hook;
414 NFA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg);
416 if (entry->info->indev) {
417 tmp_uint = htonl(entry->info->indev->ifindex);
418 #ifndef CONFIG_BRIDGE_NETFILTER
419 NFA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint);
420 #else
421 if (entry->info->pf == PF_BRIDGE) {
422 /* Case 1: indev is physical input device, we need to
423 * look for bridge group (when called from
424 * netfilter_bridge) */
425 NFA_PUT(skb, NFQA_IFINDEX_PHYSINDEV, sizeof(tmp_uint),
426 &tmp_uint);
427 /* this is the bridge group "brX" */
428 tmp_uint = htonl(entry->info->indev->br_port->br->dev->ifindex);
429 NFA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint),
430 &tmp_uint);
431 } else {
432 /* Case 2: indev is bridge group, we need to look for
433 * physical device (when called from ipv4) */
434 NFA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint),
435 &tmp_uint);
436 if (entry->skb->nf_bridge
437 && entry->skb->nf_bridge->physindev) {
438 tmp_uint = htonl(entry->skb->nf_bridge->physindev->ifindex);
439 NFA_PUT(skb, NFQA_IFINDEX_PHYSINDEV,
440 sizeof(tmp_uint), &tmp_uint);
443 #endif
446 if (entry->info->outdev) {
447 tmp_uint = htonl(entry->info->outdev->ifindex);
448 #ifndef CONFIG_BRIDGE_NETFILTER
449 NFA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint);
450 #else
451 if (entry->info->pf == PF_BRIDGE) {
452 /* Case 1: outdev is physical output device, we need to
453 * look for bridge group (when called from
454 * netfilter_bridge) */
455 NFA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV, sizeof(tmp_uint),
456 &tmp_uint);
457 /* this is the bridge group "brX" */
458 tmp_uint = htonl(entry->info->outdev->br_port->br->dev->ifindex);
459 NFA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint),
460 &tmp_uint);
461 } else {
462 /* Case 2: outdev is bridge group, we need to look for
463 * physical output device (when called from ipv4) */
464 NFA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint),
465 &tmp_uint);
466 if (entry->skb->nf_bridge
467 && entry->skb->nf_bridge->physoutdev) {
468 tmp_uint = htonl(entry->skb->nf_bridge->physoutdev->ifindex);
469 NFA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV,
470 sizeof(tmp_uint), &tmp_uint);
473 #endif
476 if (entry->skb->nfmark) {
477 tmp_uint = htonl(entry->skb->nfmark);
478 NFA_PUT(skb, NFQA_MARK, sizeof(u_int32_t), &tmp_uint);
481 if (entry->info->indev && entry->skb->dev
482 && entry->skb->dev->hard_header_parse) {
483 struct nfqnl_msg_packet_hw phw;
485 phw.hw_addrlen =
486 entry->skb->dev->hard_header_parse(entry->skb,
487 phw.hw_addr);
488 phw.hw_addrlen = htons(phw.hw_addrlen);
489 NFA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
492 if (entry->skb->tstamp.off_sec) {
493 struct nfqnl_msg_packet_timestamp ts;
495 ts.sec = cpu_to_be64(entry->skb->tstamp.off_sec);
496 ts.usec = cpu_to_be64(entry->skb->tstamp.off_usec);
498 NFA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
501 if (data_len) {
502 struct nfattr *nfa;
503 int size = NFA_LENGTH(data_len);
505 if (skb_tailroom(skb) < (int)NFA_SPACE(data_len)) {
506 printk(KERN_WARNING "nf_queue: no tailroom!\n");
507 goto nlmsg_failure;
510 nfa = (struct nfattr *)skb_put(skb, NFA_ALIGN(size));
511 nfa->nfa_type = NFQA_PAYLOAD;
512 nfa->nfa_len = size;
514 if (skb_copy_bits(entry->skb, 0, NFA_DATA(nfa), data_len))
515 BUG();
518 nlh->nlmsg_len = skb->tail - old_tail;
519 return skb;
521 nlmsg_failure:
522 nfattr_failure:
523 if (skb)
524 kfree_skb(skb);
525 *errp = -EINVAL;
526 if (net_ratelimit())
527 printk(KERN_ERR "nf_queue: error creating packet message\n");
528 return NULL;
531 static int
532 nfqnl_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
533 unsigned int queuenum, void *data)
535 int status = -EINVAL;
536 struct sk_buff *nskb;
537 struct nfqnl_instance *queue;
538 struct nfqnl_queue_entry *entry;
540 QDEBUG("entered\n");
542 queue = instance_lookup_get(queuenum);
543 if (!queue) {
544 QDEBUG("no queue instance matching\n");
545 return -EINVAL;
548 if (queue->copy_mode == NFQNL_COPY_NONE) {
549 QDEBUG("mode COPY_NONE, aborting\n");
550 status = -EAGAIN;
551 goto err_out_put;
554 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
555 if (entry == NULL) {
556 if (net_ratelimit())
557 printk(KERN_ERR
558 "nf_queue: OOM in nfqnl_enqueue_packet()\n");
559 status = -ENOMEM;
560 goto err_out_put;
563 entry->info = info;
564 entry->skb = skb;
565 entry->id = atomic_inc_return(&queue->id_sequence);
567 nskb = nfqnl_build_packet_message(queue, entry, &status);
568 if (nskb == NULL)
569 goto err_out_free;
571 spin_lock_bh(&queue->lock);
573 if (!queue->peer_pid)
574 goto err_out_free_nskb;
576 if (queue->queue_total >= queue->queue_maxlen) {
577 queue->queue_dropped++;
578 status = -ENOSPC;
579 if (net_ratelimit())
580 printk(KERN_WARNING "ip_queue: full at %d entries, "
581 "dropping packets(s). Dropped: %d\n",
582 queue->queue_total, queue->queue_dropped);
583 goto err_out_free_nskb;
586 /* nfnetlink_unicast will either free the nskb or add it to a socket */
587 status = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT);
588 if (status < 0) {
589 queue->queue_user_dropped++;
590 goto err_out_unlock;
593 __enqueue_entry(queue, entry);
595 spin_unlock_bh(&queue->lock);
596 instance_put(queue);
597 return status;
599 err_out_free_nskb:
600 kfree_skb(nskb);
602 err_out_unlock:
603 spin_unlock_bh(&queue->lock);
605 err_out_free:
606 kfree(entry);
607 err_out_put:
608 instance_put(queue);
609 return status;
612 static int
613 nfqnl_mangle(void *data, int data_len, struct nfqnl_queue_entry *e)
615 int diff;
617 diff = data_len - e->skb->len;
618 if (diff < 0)
619 skb_trim(e->skb, data_len);
620 else if (diff > 0) {
621 if (data_len > 0xFFFF)
622 return -EINVAL;
623 if (diff > skb_tailroom(e->skb)) {
624 struct sk_buff *newskb;
626 newskb = skb_copy_expand(e->skb,
627 skb_headroom(e->skb),
628 diff,
629 GFP_ATOMIC);
630 if (newskb == NULL) {
631 printk(KERN_WARNING "ip_queue: OOM "
632 "in mangle, dropping packet\n");
633 return -ENOMEM;
635 if (e->skb->sk)
636 skb_set_owner_w(newskb, e->skb->sk);
637 kfree_skb(e->skb);
638 e->skb = newskb;
640 skb_put(e->skb, diff);
642 if (!skb_make_writable(&e->skb, data_len))
643 return -ENOMEM;
644 memcpy(e->skb->data, data, data_len);
645 e->skb->ip_summed = CHECKSUM_NONE;
646 return 0;
649 static inline int
650 id_cmp(struct nfqnl_queue_entry *e, unsigned long id)
652 return (id == e->id);
655 static int
656 nfqnl_set_mode(struct nfqnl_instance *queue,
657 unsigned char mode, unsigned int range)
659 int status;
661 spin_lock_bh(&queue->lock);
662 status = __nfqnl_set_mode(queue, mode, range);
663 spin_unlock_bh(&queue->lock);
665 return status;
668 static int
669 dev_cmp(struct nfqnl_queue_entry *entry, unsigned long ifindex)
671 if (entry->info->indev)
672 if (entry->info->indev->ifindex == ifindex)
673 return 1;
675 if (entry->info->outdev)
676 if (entry->info->outdev->ifindex == ifindex)
677 return 1;
679 return 0;
682 /* drop all packets with either indev or outdev == ifindex from all queue
683 * instances */
684 static void
685 nfqnl_dev_drop(int ifindex)
687 int i;
689 QDEBUG("entering for ifindex %u\n", ifindex);
691 /* this only looks like we have to hold the readlock for a way too long
692 * time, issue_verdict(), nf_reinject(), ... - but we always only
693 * issue NF_DROP, which is processed directly in nf_reinject() */
694 read_lock_bh(&instances_lock);
696 for (i = 0; i < INSTANCE_BUCKETS; i++) {
697 struct hlist_node *tmp;
698 struct nfqnl_instance *inst;
699 struct hlist_head *head = &instance_table[i];
701 hlist_for_each_entry(inst, tmp, head, hlist) {
702 struct nfqnl_queue_entry *entry;
703 while ((entry = find_dequeue_entry(inst, dev_cmp,
704 ifindex)) != NULL)
705 issue_verdict(entry, NF_DROP);
709 read_unlock_bh(&instances_lock);
712 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
714 static int
715 nfqnl_rcv_dev_event(struct notifier_block *this,
716 unsigned long event, void *ptr)
718 struct net_device *dev = ptr;
720 /* Drop any packets associated with the downed device */
721 if (event == NETDEV_DOWN)
722 nfqnl_dev_drop(dev->ifindex);
723 return NOTIFY_DONE;
726 static struct notifier_block nfqnl_dev_notifier = {
727 .notifier_call = nfqnl_rcv_dev_event,
730 static int
731 nfqnl_rcv_nl_event(struct notifier_block *this,
732 unsigned long event, void *ptr)
734 struct netlink_notify *n = ptr;
736 if (event == NETLINK_URELEASE &&
737 n->protocol == NETLINK_NETFILTER && n->pid) {
738 int i;
740 /* destroy all instances for this pid */
741 write_lock_bh(&instances_lock);
742 for (i = 0; i < INSTANCE_BUCKETS; i++) {
743 struct hlist_node *tmp, *t2;
744 struct nfqnl_instance *inst;
745 struct hlist_head *head = &instance_table[i];
747 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
748 if (n->pid == inst->peer_pid)
749 __instance_destroy(inst);
752 write_unlock_bh(&instances_lock);
754 return NOTIFY_DONE;
757 static struct notifier_block nfqnl_rtnl_notifier = {
758 .notifier_call = nfqnl_rcv_nl_event,
761 static const int nfqa_verdict_min[NFQA_MAX] = {
762 [NFQA_VERDICT_HDR-1] = sizeof(struct nfqnl_msg_verdict_hdr),
763 [NFQA_MARK-1] = sizeof(u_int32_t),
764 [NFQA_PAYLOAD-1] = 0,
767 static int
768 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
769 struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp)
771 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
772 u_int16_t queue_num = ntohs(nfmsg->res_id);
774 struct nfqnl_msg_verdict_hdr *vhdr;
775 struct nfqnl_instance *queue;
776 unsigned int verdict;
777 struct nfqnl_queue_entry *entry;
778 int err;
780 if (nfattr_bad_size(nfqa, NFQA_MAX, nfqa_verdict_min)) {
781 QDEBUG("bad attribute size\n");
782 return -EINVAL;
785 queue = instance_lookup_get(queue_num);
786 if (!queue)
787 return -ENODEV;
789 if (queue->peer_pid != NETLINK_CB(skb).pid) {
790 err = -EPERM;
791 goto err_out_put;
794 if (!nfqa[NFQA_VERDICT_HDR-1]) {
795 err = -EINVAL;
796 goto err_out_put;
799 vhdr = NFA_DATA(nfqa[NFQA_VERDICT_HDR-1]);
800 verdict = ntohl(vhdr->verdict);
802 if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) {
803 err = -EINVAL;
804 goto err_out_put;
807 entry = find_dequeue_entry(queue, id_cmp, ntohl(vhdr->id));
808 if (entry == NULL) {
809 err = -ENOENT;
810 goto err_out_put;
813 if (nfqa[NFQA_PAYLOAD-1]) {
814 if (nfqnl_mangle(NFA_DATA(nfqa[NFQA_PAYLOAD-1]),
815 NFA_PAYLOAD(nfqa[NFQA_PAYLOAD-1]), entry) < 0)
816 verdict = NF_DROP;
819 if (nfqa[NFQA_MARK-1])
820 skb->nfmark = ntohl(*(u_int32_t *)NFA_DATA(nfqa[NFQA_MARK-1]));
822 issue_verdict(entry, verdict);
823 instance_put(queue);
824 return 0;
826 err_out_put:
827 instance_put(queue);
828 return err;
831 static int
832 nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
833 struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp)
835 return -ENOTSUPP;
838 static const int nfqa_cfg_min[NFQA_CFG_MAX] = {
839 [NFQA_CFG_CMD-1] = sizeof(struct nfqnl_msg_config_cmd),
840 [NFQA_CFG_PARAMS-1] = sizeof(struct nfqnl_msg_config_params),
843 static struct nf_queue_handler nfqh = {
844 .name = "nf_queue",
845 .outfn = &nfqnl_enqueue_packet,
848 static int
849 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
850 struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp)
852 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
853 u_int16_t queue_num = ntohs(nfmsg->res_id);
854 struct nfqnl_instance *queue;
855 int ret = 0;
857 QDEBUG("entering for msg %u\n", NFNL_MSG_TYPE(nlh->nlmsg_type));
859 if (nfattr_bad_size(nfqa, NFQA_CFG_MAX, nfqa_cfg_min)) {
860 QDEBUG("bad attribute size\n");
861 return -EINVAL;
864 queue = instance_lookup_get(queue_num);
865 if (nfqa[NFQA_CFG_CMD-1]) {
866 struct nfqnl_msg_config_cmd *cmd;
867 cmd = NFA_DATA(nfqa[NFQA_CFG_CMD-1]);
868 QDEBUG("found CFG_CMD\n");
870 switch (cmd->command) {
871 case NFQNL_CFG_CMD_BIND:
872 if (queue)
873 return -EBUSY;
875 queue = instance_create(queue_num, NETLINK_CB(skb).pid);
876 if (!queue)
877 return -EINVAL;
878 break;
879 case NFQNL_CFG_CMD_UNBIND:
880 if (!queue)
881 return -ENODEV;
883 if (queue->peer_pid != NETLINK_CB(skb).pid) {
884 ret = -EPERM;
885 goto out_put;
888 instance_destroy(queue);
889 break;
890 case NFQNL_CFG_CMD_PF_BIND:
891 QDEBUG("registering queue handler for pf=%u\n",
892 ntohs(cmd->pf));
893 ret = nf_register_queue_handler(ntohs(cmd->pf), &nfqh);
894 break;
895 case NFQNL_CFG_CMD_PF_UNBIND:
896 QDEBUG("unregistering queue handler for pf=%u\n",
897 ntohs(cmd->pf));
898 /* This is a bug and a feature. We can unregister
899 * other handlers(!) */
900 ret = nf_unregister_queue_handler(ntohs(cmd->pf));
901 break;
902 default:
903 ret = -EINVAL;
904 break;
906 } else {
907 if (!queue) {
908 QDEBUG("no config command, and no instance ENOENT\n");
909 ret = -ENOENT;
910 goto out_put;
913 if (queue->peer_pid != NETLINK_CB(skb).pid) {
914 QDEBUG("no config command, and wrong pid\n");
915 ret = -EPERM;
916 goto out_put;
920 if (nfqa[NFQA_CFG_PARAMS-1]) {
921 struct nfqnl_msg_config_params *params;
922 params = NFA_DATA(nfqa[NFQA_CFG_PARAMS-1]);
924 nfqnl_set_mode(queue, params->copy_mode,
925 ntohl(params->copy_range));
928 out_put:
929 instance_put(queue);
930 return ret;
933 static struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
934 [NFQNL_MSG_PACKET] = { .call = nfqnl_recv_unsupp,
935 .attr_count = NFQA_MAX,
936 .cap_required = CAP_NET_ADMIN },
937 [NFQNL_MSG_VERDICT] = { .call = nfqnl_recv_verdict,
938 .attr_count = NFQA_MAX,
939 .cap_required = CAP_NET_ADMIN },
940 [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config,
941 .attr_count = NFQA_CFG_MAX,
942 .cap_required = CAP_NET_ADMIN },
945 static struct nfnetlink_subsystem nfqnl_subsys = {
946 .name = "nf_queue",
947 .subsys_id = NFNL_SUBSYS_QUEUE,
948 .cb_count = NFQNL_MSG_MAX,
949 .cb = nfqnl_cb,
952 #ifdef CONFIG_PROC_FS
953 struct iter_state {
954 unsigned int bucket;
957 static struct hlist_node *get_first(struct seq_file *seq)
959 struct iter_state *st = seq->private;
961 if (!st)
962 return NULL;
964 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
965 if (!hlist_empty(&instance_table[st->bucket]))
966 return instance_table[st->bucket].first;
968 return NULL;
971 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
973 struct iter_state *st = seq->private;
975 h = h->next;
976 while (!h) {
977 if (++st->bucket >= INSTANCE_BUCKETS)
978 return NULL;
980 h = instance_table[st->bucket].first;
982 return h;
985 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
987 struct hlist_node *head;
988 head = get_first(seq);
990 if (head)
991 while (pos && (head = get_next(seq, head)))
992 pos--;
993 return pos ? NULL : head;
996 static void *seq_start(struct seq_file *seq, loff_t *pos)
998 read_lock_bh(&instances_lock);
999 return get_idx(seq, *pos);
1002 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
1004 (*pos)++;
1005 return get_next(s, v);
1008 static void seq_stop(struct seq_file *s, void *v)
1010 read_unlock_bh(&instances_lock);
1013 static int seq_show(struct seq_file *s, void *v)
1015 const struct nfqnl_instance *inst = v;
1017 return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
1018 inst->queue_num,
1019 inst->peer_pid, inst->queue_total,
1020 inst->copy_mode, inst->copy_range,
1021 inst->queue_dropped, inst->queue_user_dropped,
1022 atomic_read(&inst->id_sequence),
1023 atomic_read(&inst->use));
1026 static struct seq_operations nfqnl_seq_ops = {
1027 .start = seq_start,
1028 .next = seq_next,
1029 .stop = seq_stop,
1030 .show = seq_show,
1033 static int nfqnl_open(struct inode *inode, struct file *file)
1035 struct seq_file *seq;
1036 struct iter_state *is;
1037 int ret;
1039 is = kmalloc(sizeof(*is), GFP_KERNEL);
1040 if (!is)
1041 return -ENOMEM;
1042 memset(is, 0, sizeof(*is));
1043 ret = seq_open(file, &nfqnl_seq_ops);
1044 if (ret < 0)
1045 goto out_free;
1046 seq = file->private_data;
1047 seq->private = is;
1048 return ret;
1049 out_free:
1050 kfree(is);
1051 return ret;
1054 static struct file_operations nfqnl_file_ops = {
1055 .owner = THIS_MODULE,
1056 .open = nfqnl_open,
1057 .read = seq_read,
1058 .llseek = seq_lseek,
1059 .release = seq_release_private,
1062 #endif /* PROC_FS */
1064 static int
1065 init_or_cleanup(int init)
1067 int i, status = -ENOMEM;
1068 #ifdef CONFIG_PROC_FS
1069 struct proc_dir_entry *proc_nfqueue;
1070 #endif
1072 if (!init)
1073 goto cleanup;
1075 for (i = 0; i < INSTANCE_BUCKETS; i++)
1076 INIT_HLIST_HEAD(&instance_table[i]);
1078 netlink_register_notifier(&nfqnl_rtnl_notifier);
1079 status = nfnetlink_subsys_register(&nfqnl_subsys);
1080 if (status < 0) {
1081 printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
1082 goto cleanup_netlink_notifier;
1085 #ifdef CONFIG_PROC_FS
1086 proc_nfqueue = create_proc_entry("nfnetlink_queue", 0440,
1087 proc_net_netfilter);
1088 if (!proc_nfqueue)
1089 goto cleanup_subsys;
1090 proc_nfqueue->proc_fops = &nfqnl_file_ops;
1091 #endif
1093 register_netdevice_notifier(&nfqnl_dev_notifier);
1095 return status;
1097 cleanup:
1098 nf_unregister_queue_handlers(&nfqh);
1099 unregister_netdevice_notifier(&nfqnl_dev_notifier);
1100 #ifdef CONFIG_PROC_FS
1101 remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
1102 cleanup_subsys:
1103 #endif
1104 nfnetlink_subsys_unregister(&nfqnl_subsys);
1105 cleanup_netlink_notifier:
1106 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1107 return status;
1110 static int __init init(void)
1113 return init_or_cleanup(1);
1116 static void __exit fini(void)
1118 init_or_cleanup(0);
1121 MODULE_DESCRIPTION("netfilter packet queue handler");
1122 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1123 MODULE_LICENSE("GPL");
1124 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
1126 module_init(init);
1127 module_exit(fini);