2 * x_tables core - Backend for {ip,ip6,arp}_tables
4 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
6 * Based on existing ip_tables code which is
7 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
8 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
16 #include <linux/kernel.h>
17 #include <linux/socket.h>
18 #include <linux/net.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/string.h>
22 #include <linux/vmalloc.h>
23 #include <linux/mutex.h>
26 #include <linux/netfilter/x_tables.h>
27 #include <linux/netfilter_arp.h>
30 MODULE_LICENSE("GPL");
31 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
32 MODULE_DESCRIPTION("[ip,ip6,arp]_tables backend module");
34 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
38 struct list_head match
;
39 struct list_head target
;
40 struct list_head tables
;
41 struct mutex compat_mutex
;
44 static struct xt_af
*xt
;
46 #ifdef DEBUG_IP_FIREWALL_USER
47 #define duprintf(format, args...) printk(format , ## args)
49 #define duprintf(format, args...)
58 static const char *xt_prefix
[NPROTO
] = {
64 /* Registration hooks for targets. */
66 xt_register_target(struct xt_target
*target
)
68 int ret
, af
= target
->family
;
70 ret
= mutex_lock_interruptible(&xt
[af
].mutex
);
73 list_add(&target
->list
, &xt
[af
].target
);
74 mutex_unlock(&xt
[af
].mutex
);
77 EXPORT_SYMBOL(xt_register_target
);
80 xt_unregister_target(struct xt_target
*target
)
82 int af
= target
->family
;
84 mutex_lock(&xt
[af
].mutex
);
85 list_del(&target
->list
);
86 mutex_unlock(&xt
[af
].mutex
);
88 EXPORT_SYMBOL(xt_unregister_target
);
91 xt_register_targets(struct xt_target
*target
, unsigned int n
)
96 for (i
= 0; i
< n
; i
++) {
97 err
= xt_register_target(&target
[i
]);
105 xt_unregister_targets(target
, i
);
108 EXPORT_SYMBOL(xt_register_targets
);
111 xt_unregister_targets(struct xt_target
*target
, unsigned int n
)
115 for (i
= 0; i
< n
; i
++)
116 xt_unregister_target(&target
[i
]);
118 EXPORT_SYMBOL(xt_unregister_targets
);
121 xt_register_match(struct xt_match
*match
)
123 int ret
, af
= match
->family
;
125 ret
= mutex_lock_interruptible(&xt
[af
].mutex
);
129 list_add(&match
->list
, &xt
[af
].match
);
130 mutex_unlock(&xt
[af
].mutex
);
134 EXPORT_SYMBOL(xt_register_match
);
137 xt_unregister_match(struct xt_match
*match
)
139 int af
= match
->family
;
141 mutex_lock(&xt
[af
].mutex
);
142 list_del(&match
->list
);
143 mutex_unlock(&xt
[af
].mutex
);
145 EXPORT_SYMBOL(xt_unregister_match
);
148 xt_register_matches(struct xt_match
*match
, unsigned int n
)
153 for (i
= 0; i
< n
; i
++) {
154 err
= xt_register_match(&match
[i
]);
162 xt_unregister_matches(match
, i
);
165 EXPORT_SYMBOL(xt_register_matches
);
168 xt_unregister_matches(struct xt_match
*match
, unsigned int n
)
172 for (i
= 0; i
< n
; i
++)
173 xt_unregister_match(&match
[i
]);
175 EXPORT_SYMBOL(xt_unregister_matches
);
179 * These are weird, but module loading must not be done with mutex
180 * held (since they will register), and we have to have a single
181 * function to use try_then_request_module().
184 /* Find match, grabs ref. Returns ERR_PTR() on error. */
185 struct xt_match
*xt_find_match(int af
, const char *name
, u8 revision
)
190 if (mutex_lock_interruptible(&xt
[af
].mutex
) != 0)
191 return ERR_PTR(-EINTR
);
193 list_for_each_entry(m
, &xt
[af
].match
, list
) {
194 if (strcmp(m
->name
, name
) == 0) {
195 if (m
->revision
== revision
) {
196 if (try_module_get(m
->me
)) {
197 mutex_unlock(&xt
[af
].mutex
);
201 err
= -EPROTOTYPE
; /* Found something. */
204 mutex_unlock(&xt
[af
].mutex
);
207 EXPORT_SYMBOL(xt_find_match
);
209 /* Find target, grabs ref. Returns ERR_PTR() on error. */
210 struct xt_target
*xt_find_target(int af
, const char *name
, u8 revision
)
215 if (mutex_lock_interruptible(&xt
[af
].mutex
) != 0)
216 return ERR_PTR(-EINTR
);
218 list_for_each_entry(t
, &xt
[af
].target
, list
) {
219 if (strcmp(t
->name
, name
) == 0) {
220 if (t
->revision
== revision
) {
221 if (try_module_get(t
->me
)) {
222 mutex_unlock(&xt
[af
].mutex
);
226 err
= -EPROTOTYPE
; /* Found something. */
229 mutex_unlock(&xt
[af
].mutex
);
232 EXPORT_SYMBOL(xt_find_target
);
234 struct xt_target
*xt_request_find_target(int af
, const char *name
, u8 revision
)
236 struct xt_target
*target
;
238 target
= try_then_request_module(xt_find_target(af
, name
, revision
),
239 "%st_%s", xt_prefix
[af
], name
);
240 if (IS_ERR(target
) || !target
)
244 EXPORT_SYMBOL_GPL(xt_request_find_target
);
246 static int match_revfn(int af
, const char *name
, u8 revision
, int *bestp
)
251 list_for_each_entry(m
, &xt
[af
].match
, list
) {
252 if (strcmp(m
->name
, name
) == 0) {
253 if (m
->revision
> *bestp
)
254 *bestp
= m
->revision
;
255 if (m
->revision
== revision
)
262 static int target_revfn(int af
, const char *name
, u8 revision
, int *bestp
)
267 list_for_each_entry(t
, &xt
[af
].target
, list
) {
268 if (strcmp(t
->name
, name
) == 0) {
269 if (t
->revision
> *bestp
)
270 *bestp
= t
->revision
;
271 if (t
->revision
== revision
)
278 /* Returns true or false (if no such extension at all) */
279 int xt_find_revision(int af
, const char *name
, u8 revision
, int target
,
282 int have_rev
, best
= -1;
284 if (mutex_lock_interruptible(&xt
[af
].mutex
) != 0) {
289 have_rev
= target_revfn(af
, name
, revision
, &best
);
291 have_rev
= match_revfn(af
, name
, revision
, &best
);
292 mutex_unlock(&xt
[af
].mutex
);
294 /* Nothing at all? Return 0 to try loading module. */
302 *err
= -EPROTONOSUPPORT
;
305 EXPORT_SYMBOL_GPL(xt_find_revision
);
307 int xt_check_match(const struct xt_match
*match
, unsigned short family
,
308 unsigned int size
, const char *table
, unsigned int hook_mask
,
309 unsigned short proto
, int inv_proto
)
311 if (XT_ALIGN(match
->matchsize
) != size
) {
312 printk("%s_tables: %s match: invalid size %Zu != %u\n",
313 xt_prefix
[family
], match
->name
,
314 XT_ALIGN(match
->matchsize
), size
);
317 if (match
->table
&& strcmp(match
->table
, table
)) {
318 printk("%s_tables: %s match: only valid in %s table, not %s\n",
319 xt_prefix
[family
], match
->name
, match
->table
, table
);
322 if (match
->hooks
&& (hook_mask
& ~match
->hooks
) != 0) {
323 printk("%s_tables: %s match: bad hook_mask %u/%u\n",
324 xt_prefix
[family
], match
->name
, hook_mask
, match
->hooks
);
327 if (match
->proto
&& (match
->proto
!= proto
|| inv_proto
)) {
328 printk("%s_tables: %s match: only valid for protocol %u\n",
329 xt_prefix
[family
], match
->name
, match
->proto
);
334 EXPORT_SYMBOL_GPL(xt_check_match
);
337 int xt_compat_match_offset(struct xt_match
*match
)
339 u_int16_t csize
= match
->compatsize
? : match
->matchsize
;
340 return XT_ALIGN(match
->matchsize
) - COMPAT_XT_ALIGN(csize
);
342 EXPORT_SYMBOL_GPL(xt_compat_match_offset
);
344 void xt_compat_match_from_user(struct xt_entry_match
*m
, void **dstptr
,
347 struct xt_match
*match
= m
->u
.kernel
.match
;
348 struct compat_xt_entry_match
*cm
= (struct compat_xt_entry_match
*)m
;
349 int pad
, off
= xt_compat_match_offset(match
);
350 u_int16_t msize
= cm
->u
.user
.match_size
;
353 memcpy(m
, cm
, sizeof(*cm
));
354 if (match
->compat_from_user
)
355 match
->compat_from_user(m
->data
, cm
->data
);
357 memcpy(m
->data
, cm
->data
, msize
- sizeof(*cm
));
358 pad
= XT_ALIGN(match
->matchsize
) - match
->matchsize
;
360 memset(m
->data
+ match
->matchsize
, 0, pad
);
363 m
->u
.user
.match_size
= msize
;
368 EXPORT_SYMBOL_GPL(xt_compat_match_from_user
);
370 int xt_compat_match_to_user(struct xt_entry_match
*m
, void __user
**dstptr
,
373 struct xt_match
*match
= m
->u
.kernel
.match
;
374 struct compat_xt_entry_match __user
*cm
= *dstptr
;
375 int off
= xt_compat_match_offset(match
);
376 u_int16_t msize
= m
->u
.user
.match_size
- off
;
378 if (copy_to_user(cm
, m
, sizeof(*cm
)) ||
379 put_user(msize
, &cm
->u
.user
.match_size
))
382 if (match
->compat_to_user
) {
383 if (match
->compat_to_user((void __user
*)cm
->data
, m
->data
))
386 if (copy_to_user(cm
->data
, m
->data
, msize
- sizeof(*cm
)))
394 EXPORT_SYMBOL_GPL(xt_compat_match_to_user
);
395 #endif /* CONFIG_COMPAT */
397 int xt_check_target(const struct xt_target
*target
, unsigned short family
,
398 unsigned int size
, const char *table
, unsigned int hook_mask
,
399 unsigned short proto
, int inv_proto
)
401 if (XT_ALIGN(target
->targetsize
) != size
) {
402 printk("%s_tables: %s target: invalid size %Zu != %u\n",
403 xt_prefix
[family
], target
->name
,
404 XT_ALIGN(target
->targetsize
), size
);
407 if (target
->table
&& strcmp(target
->table
, table
)) {
408 printk("%s_tables: %s target: only valid in %s table, not %s\n",
409 xt_prefix
[family
], target
->name
, target
->table
, table
);
412 if (target
->hooks
&& (hook_mask
& ~target
->hooks
) != 0) {
413 printk("%s_tables: %s target: bad hook_mask %u/%u\n",
414 xt_prefix
[family
], target
->name
, hook_mask
,
418 if (target
->proto
&& (target
->proto
!= proto
|| inv_proto
)) {
419 printk("%s_tables: %s target: only valid for protocol %u\n",
420 xt_prefix
[family
], target
->name
, target
->proto
);
425 EXPORT_SYMBOL_GPL(xt_check_target
);
428 int xt_compat_target_offset(struct xt_target
*target
)
430 u_int16_t csize
= target
->compatsize
? : target
->targetsize
;
431 return XT_ALIGN(target
->targetsize
) - COMPAT_XT_ALIGN(csize
);
433 EXPORT_SYMBOL_GPL(xt_compat_target_offset
);
435 void xt_compat_target_from_user(struct xt_entry_target
*t
, void **dstptr
,
438 struct xt_target
*target
= t
->u
.kernel
.target
;
439 struct compat_xt_entry_target
*ct
= (struct compat_xt_entry_target
*)t
;
440 int pad
, off
= xt_compat_target_offset(target
);
441 u_int16_t tsize
= ct
->u
.user
.target_size
;
444 memcpy(t
, ct
, sizeof(*ct
));
445 if (target
->compat_from_user
)
446 target
->compat_from_user(t
->data
, ct
->data
);
448 memcpy(t
->data
, ct
->data
, tsize
- sizeof(*ct
));
449 pad
= XT_ALIGN(target
->targetsize
) - target
->targetsize
;
451 memset(t
->data
+ target
->targetsize
, 0, pad
);
454 t
->u
.user
.target_size
= tsize
;
459 EXPORT_SYMBOL_GPL(xt_compat_target_from_user
);
461 int xt_compat_target_to_user(struct xt_entry_target
*t
, void __user
**dstptr
,
464 struct xt_target
*target
= t
->u
.kernel
.target
;
465 struct compat_xt_entry_target __user
*ct
= *dstptr
;
466 int off
= xt_compat_target_offset(target
);
467 u_int16_t tsize
= t
->u
.user
.target_size
- off
;
469 if (copy_to_user(ct
, t
, sizeof(*ct
)) ||
470 put_user(tsize
, &ct
->u
.user
.target_size
))
473 if (target
->compat_to_user
) {
474 if (target
->compat_to_user((void __user
*)ct
->data
, t
->data
))
477 if (copy_to_user(ct
->data
, t
->data
, tsize
- sizeof(*ct
)))
485 EXPORT_SYMBOL_GPL(xt_compat_target_to_user
);
488 struct xt_table_info
*xt_alloc_table_info(unsigned int size
)
490 struct xt_table_info
*newinfo
;
493 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
494 if ((SMP_ALIGN(size
) >> PAGE_SHIFT
) + 2 > num_physpages
)
497 newinfo
= kzalloc(sizeof(struct xt_table_info
), GFP_KERNEL
);
501 newinfo
->size
= size
;
503 for_each_possible_cpu(cpu
) {
504 if (size
<= PAGE_SIZE
)
505 newinfo
->entries
[cpu
] = kmalloc_node(size
,
509 newinfo
->entries
[cpu
] = vmalloc_node(size
,
512 if (newinfo
->entries
[cpu
] == NULL
) {
513 xt_free_table_info(newinfo
);
520 EXPORT_SYMBOL(xt_alloc_table_info
);
522 void xt_free_table_info(struct xt_table_info
*info
)
526 for_each_possible_cpu(cpu
) {
527 if (info
->size
<= PAGE_SIZE
)
528 kfree(info
->entries
[cpu
]);
530 vfree(info
->entries
[cpu
]);
534 EXPORT_SYMBOL(xt_free_table_info
);
536 /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
537 struct xt_table
*xt_find_table_lock(int af
, const char *name
)
541 if (mutex_lock_interruptible(&xt
[af
].mutex
) != 0)
542 return ERR_PTR(-EINTR
);
544 list_for_each_entry(t
, &xt
[af
].tables
, list
)
545 if (strcmp(t
->name
, name
) == 0 && try_module_get(t
->me
))
547 mutex_unlock(&xt
[af
].mutex
);
550 EXPORT_SYMBOL_GPL(xt_find_table_lock
);
552 void xt_table_unlock(struct xt_table
*table
)
554 mutex_unlock(&xt
[table
->af
].mutex
);
556 EXPORT_SYMBOL_GPL(xt_table_unlock
);
559 void xt_compat_lock(int af
)
561 mutex_lock(&xt
[af
].compat_mutex
);
563 EXPORT_SYMBOL_GPL(xt_compat_lock
);
565 void xt_compat_unlock(int af
)
567 mutex_unlock(&xt
[af
].compat_mutex
);
569 EXPORT_SYMBOL_GPL(xt_compat_unlock
);
572 struct xt_table_info
*
573 xt_replace_table(struct xt_table
*table
,
574 unsigned int num_counters
,
575 struct xt_table_info
*newinfo
,
578 struct xt_table_info
*oldinfo
, *private;
580 /* Do the substitution. */
581 write_lock_bh(&table
->lock
);
582 private = table
->private;
583 /* Check inside lock: is the old number correct? */
584 if (num_counters
!= private->number
) {
585 duprintf("num_counters != table->private->number (%u/%u)\n",
586 num_counters
, private->number
);
587 write_unlock_bh(&table
->lock
);
592 table
->private = newinfo
;
593 newinfo
->initial_entries
= oldinfo
->initial_entries
;
594 write_unlock_bh(&table
->lock
);
598 EXPORT_SYMBOL_GPL(xt_replace_table
);
600 int xt_register_table(struct xt_table
*table
,
601 struct xt_table_info
*bootstrap
,
602 struct xt_table_info
*newinfo
)
605 struct xt_table_info
*private;
608 ret
= mutex_lock_interruptible(&xt
[table
->af
].mutex
);
612 /* Don't autoload: we'd eat our tail... */
613 list_for_each_entry(t
, &xt
[table
->af
].tables
, list
) {
614 if (strcmp(t
->name
, table
->name
) == 0) {
620 /* Simplifies replace_table code. */
621 table
->private = bootstrap
;
622 rwlock_init(&table
->lock
);
623 if (!xt_replace_table(table
, 0, newinfo
, &ret
))
626 private = table
->private;
627 duprintf("table->private->number = %u\n", private->number
);
629 /* save number of initial entries */
630 private->initial_entries
= private->number
;
632 list_add(&table
->list
, &xt
[table
->af
].tables
);
636 mutex_unlock(&xt
[table
->af
].mutex
);
639 EXPORT_SYMBOL_GPL(xt_register_table
);
641 void *xt_unregister_table(struct xt_table
*table
)
643 struct xt_table_info
*private;
645 mutex_lock(&xt
[table
->af
].mutex
);
646 private = table
->private;
647 list_del(&table
->list
);
648 mutex_unlock(&xt
[table
->af
].mutex
);
652 EXPORT_SYMBOL_GPL(xt_unregister_table
);
654 #ifdef CONFIG_PROC_FS
655 static struct list_head
*xt_get_idx(struct list_head
*list
, struct seq_file
*seq
, loff_t pos
)
657 struct list_head
*head
= list
->next
;
659 if (!head
|| list_empty(list
))
662 while (pos
&& (head
= head
->next
)) {
667 return pos
? NULL
: head
;
670 static struct list_head
*type2list(u_int16_t af
, u_int16_t type
)
672 struct list_head
*list
;
676 list
= &xt
[af
].target
;
679 list
= &xt
[af
].match
;
682 list
= &xt
[af
].tables
;
692 static void *xt_tgt_seq_start(struct seq_file
*seq
, loff_t
*pos
)
694 struct proc_dir_entry
*pde
= (struct proc_dir_entry
*) seq
->private;
695 u_int16_t af
= (unsigned long)pde
->data
& 0xffff;
696 u_int16_t type
= (unsigned long)pde
->data
>> 16;
697 struct list_head
*list
;
702 list
= type2list(af
, type
);
706 if (mutex_lock_interruptible(&xt
[af
].mutex
) != 0)
709 return xt_get_idx(list
, seq
, *pos
);
712 static void *xt_tgt_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
714 struct proc_dir_entry
*pde
= seq
->private;
715 u_int16_t af
= (unsigned long)pde
->data
& 0xffff;
716 u_int16_t type
= (unsigned long)pde
->data
>> 16;
717 struct list_head
*list
;
722 list
= type2list(af
, type
);
727 return xt_get_idx(list
, seq
, *pos
);
730 static void xt_tgt_seq_stop(struct seq_file
*seq
, void *v
)
732 struct proc_dir_entry
*pde
= seq
->private;
733 u_int16_t af
= (unsigned long)pde
->data
& 0xffff;
735 mutex_unlock(&xt
[af
].mutex
);
738 static int xt_name_seq_show(struct seq_file
*seq
, void *v
)
740 char *name
= (char *)v
+ sizeof(struct list_head
);
743 return seq_printf(seq
, "%s\n", name
);
748 static const struct seq_operations xt_tgt_seq_ops
= {
749 .start
= xt_tgt_seq_start
,
750 .next
= xt_tgt_seq_next
,
751 .stop
= xt_tgt_seq_stop
,
752 .show
= xt_name_seq_show
,
755 static int xt_tgt_open(struct inode
*inode
, struct file
*file
)
759 ret
= seq_open(file
, &xt_tgt_seq_ops
);
761 struct seq_file
*seq
= file
->private_data
;
762 struct proc_dir_entry
*pde
= PDE(inode
);
770 static const struct file_operations xt_file_ops
= {
771 .owner
= THIS_MODULE
,
775 .release
= seq_release
,
778 #define FORMAT_TABLES "_tables_names"
779 #define FORMAT_MATCHES "_tables_matches"
780 #define FORMAT_TARGETS "_tables_targets"
782 #endif /* CONFIG_PROC_FS */
784 int xt_proto_init(int af
)
786 #ifdef CONFIG_PROC_FS
787 char buf
[XT_FUNCTION_MAXNAMELEN
];
788 struct proc_dir_entry
*proc
;
795 #ifdef CONFIG_PROC_FS
796 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
797 strlcat(buf
, FORMAT_TABLES
, sizeof(buf
));
798 proc
= proc_net_fops_create(buf
, 0440, &xt_file_ops
);
801 proc
->data
= (void *) ((unsigned long) af
| (TABLE
<< 16));
804 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
805 strlcat(buf
, FORMAT_MATCHES
, sizeof(buf
));
806 proc
= proc_net_fops_create(buf
, 0440, &xt_file_ops
);
808 goto out_remove_tables
;
809 proc
->data
= (void *) ((unsigned long) af
| (MATCH
<< 16));
811 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
812 strlcat(buf
, FORMAT_TARGETS
, sizeof(buf
));
813 proc
= proc_net_fops_create(buf
, 0440, &xt_file_ops
);
815 goto out_remove_matches
;
816 proc
->data
= (void *) ((unsigned long) af
| (TARGET
<< 16));
821 #ifdef CONFIG_PROC_FS
823 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
824 strlcat(buf
, FORMAT_MATCHES
, sizeof(buf
));
825 proc_net_remove(buf
);
828 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
829 strlcat(buf
, FORMAT_TABLES
, sizeof(buf
));
830 proc_net_remove(buf
);
835 EXPORT_SYMBOL_GPL(xt_proto_init
);
837 void xt_proto_fini(int af
)
839 #ifdef CONFIG_PROC_FS
840 char buf
[XT_FUNCTION_MAXNAMELEN
];
842 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
843 strlcat(buf
, FORMAT_TABLES
, sizeof(buf
));
844 proc_net_remove(buf
);
846 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
847 strlcat(buf
, FORMAT_TARGETS
, sizeof(buf
));
848 proc_net_remove(buf
);
850 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
851 strlcat(buf
, FORMAT_MATCHES
, sizeof(buf
));
852 proc_net_remove(buf
);
853 #endif /*CONFIG_PROC_FS*/
855 EXPORT_SYMBOL_GPL(xt_proto_fini
);
858 static int __init
xt_init(void)
862 xt
= kmalloc(sizeof(struct xt_af
) * NPROTO
, GFP_KERNEL
);
866 for (i
= 0; i
< NPROTO
; i
++) {
867 mutex_init(&xt
[i
].mutex
);
869 mutex_init(&xt
[i
].compat_mutex
);
871 INIT_LIST_HEAD(&xt
[i
].target
);
872 INIT_LIST_HEAD(&xt
[i
].match
);
873 INIT_LIST_HEAD(&xt
[i
].tables
);
878 static void __exit
xt_fini(void)
883 module_init(xt_init
);
884 module_exit(xt_fini
);