GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / net / bridge / netfilter / ebtables.c
blobbcc102e3be4daa2c5f09ee27710cc5ed03b096fa
1 /*
2 * ebtables
4 * Author:
5 * Bart De Schuymer <bdschuym@pandora.be>
7 * ebtables.c,v 2.0, July, 2002
9 * This code is stongly inspired on the iptables code which is
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kmod.h>
19 #include <linux/module.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netfilter/x_tables.h>
22 #include <linux/netfilter_bridge/ebtables.h>
23 #include <linux/spinlock.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <asm/uaccess.h>
27 #include <linux/smp.h>
28 #include <linux/cpumask.h>
29 #include <net/sock.h>
30 /* needed for logical [in,out]-dev filtering */
31 #include "../br_private.h"
33 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
34 "report to author: "format, ## args)
35 /* #define BUGPRINT(format, args...) */
38 * Each cpu has its own set of counters, so there is no need for write_lock in
39 * the softirq
40 * For reading or updating the counters, the user context needs to
41 * get a write_lock
44 /* The size of each set of counters is altered to get cache alignment */
45 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
46 #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
47 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
48 COUNTER_OFFSET(n) * cpu))
52 static DEFINE_MUTEX(ebt_mutex);
54 #ifdef CONFIG_COMPAT
55 static void ebt_standard_compat_from_user(void *dst, const void *src)
57 int v = *(compat_int_t *)src;
59 if (v >= 0)
60 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
61 memcpy(dst, &v, sizeof(v));
64 static int ebt_standard_compat_to_user(void __user *dst, const void *src)
66 compat_int_t cv = *(int *)src;
68 if (cv >= 0)
69 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
70 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
72 #endif
75 static struct xt_target ebt_standard_target = {
76 .name = "standard",
77 .revision = 0,
78 .family = NFPROTO_BRIDGE,
79 .targetsize = sizeof(int),
80 #ifdef CONFIG_COMPAT
81 .compatsize = sizeof(compat_int_t),
82 .compat_from_user = ebt_standard_compat_from_user,
83 .compat_to_user = ebt_standard_compat_to_user,
84 #endif
87 static inline int
88 ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
89 struct xt_action_param *par)
91 par->target = w->u.watcher;
92 par->targinfo = w->data;
93 w->u.watcher->target(skb, par);
94 /* watchers don't give a verdict */
95 return 0;
98 static inline int
99 ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
100 struct xt_action_param *par)
102 par->match = m->u.match;
103 par->matchinfo = m->data;
104 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
107 static inline int
108 ebt_dev_check(const char *entry, const struct net_device *device)
110 int i = 0;
111 const char *devname;
113 if (*entry == '\0')
114 return 0;
115 if (!device)
116 return 1;
117 devname = device->name;
118 /* 1 is the wildcard token */
119 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
120 i++;
121 return (devname[i] != entry[i] && entry[i] != 1);
124 #define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
125 /* process standard matches */
126 static inline int
127 ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
128 const struct net_device *in, const struct net_device *out)
130 int verdict, i;
132 if (e->bitmask & EBT_802_3) {
133 if (FWINV2(ntohs(h->h_proto) >= 1536, EBT_IPROTO))
134 return 1;
135 } else if (!(e->bitmask & EBT_NOPROTO) &&
136 FWINV2(e->ethproto != h->h_proto, EBT_IPROTO))
137 return 1;
139 if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
140 return 1;
141 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
142 return 1;
143 /* rcu_read_lock()ed by nf_hook_slow */
144 if (in && br_port_exists(in) &&
145 FWINV2(ebt_dev_check(e->logical_in, br_port_get_rcu(in)->br->dev),
146 EBT_ILOGICALIN))
147 return 1;
148 if (out && br_port_exists(out) &&
149 FWINV2(ebt_dev_check(e->logical_out, br_port_get_rcu(out)->br->dev),
150 EBT_ILOGICALOUT))
151 return 1;
153 if (e->bitmask & EBT_SOURCEMAC) {
154 verdict = 0;
155 for (i = 0; i < 6; i++)
156 verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
157 e->sourcemsk[i];
158 if (FWINV2(verdict != 0, EBT_ISOURCE) )
159 return 1;
161 if (e->bitmask & EBT_DESTMAC) {
162 verdict = 0;
163 for (i = 0; i < 6; i++)
164 verdict |= (h->h_dest[i] ^ e->destmac[i]) &
165 e->destmsk[i];
166 if (FWINV2(verdict != 0, EBT_IDEST) )
167 return 1;
169 return 0;
172 static inline __pure
173 struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
175 return (void *)entry + entry->next_offset;
178 /* Do some firewalling */
179 unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
180 const struct net_device *in, const struct net_device *out,
181 struct ebt_table *table)
183 int i, nentries;
184 struct ebt_entry *point;
185 struct ebt_counter *counter_base, *cb_base;
186 const struct ebt_entry_target *t;
187 int verdict, sp = 0;
188 struct ebt_chainstack *cs;
189 struct ebt_entries *chaininfo;
190 const char *base;
191 const struct ebt_table_info *private;
192 struct xt_action_param acpar;
194 acpar.family = NFPROTO_BRIDGE;
195 acpar.in = in;
196 acpar.out = out;
197 acpar.hotdrop = false;
198 acpar.hooknum = hook;
200 read_lock_bh(&table->lock);
201 private = table->private;
202 cb_base = COUNTER_BASE(private->counters, private->nentries,
203 smp_processor_id());
204 if (private->chainstack)
205 cs = private->chainstack[smp_processor_id()];
206 else
207 cs = NULL;
208 chaininfo = private->hook_entry[hook];
209 nentries = private->hook_entry[hook]->nentries;
210 point = (struct ebt_entry *)(private->hook_entry[hook]->data);
211 counter_base = cb_base + private->hook_entry[hook]->counter_offset;
212 /* base for chain jumps */
213 base = private->entries;
214 i = 0;
215 while (i < nentries) {
216 if (ebt_basic_match(point, eth_hdr(skb), in, out))
217 goto letscontinue;
219 if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
220 goto letscontinue;
221 if (acpar.hotdrop) {
222 read_unlock_bh(&table->lock);
223 return NF_DROP;
226 /* increase counter */
227 (*(counter_base + i)).pcnt++;
228 (*(counter_base + i)).bcnt += skb->len;
230 /* these should only watch: not modify, nor tell us
231 what to do with the packet */
232 EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
234 t = (struct ebt_entry_target *)
235 (((char *)point) + point->target_offset);
236 /* standard target */
237 if (!t->u.target->target)
238 verdict = ((struct ebt_standard_target *)t)->verdict;
239 else {
240 acpar.target = t->u.target;
241 acpar.targinfo = t->data;
242 verdict = t->u.target->target(skb, &acpar);
244 if (verdict == EBT_ACCEPT) {
245 read_unlock_bh(&table->lock);
246 return NF_ACCEPT;
248 if (verdict == EBT_DROP) {
249 read_unlock_bh(&table->lock);
250 return NF_DROP;
252 if (verdict == EBT_RETURN) {
253 letsreturn:
254 #ifdef CONFIG_NETFILTER_DEBUG
255 if (sp == 0) {
256 BUGPRINT("RETURN on base chain");
257 /* act like this is EBT_CONTINUE */
258 goto letscontinue;
260 #endif
261 sp--;
262 /* put all the local variables right */
263 i = cs[sp].n;
264 chaininfo = cs[sp].chaininfo;
265 nentries = chaininfo->nentries;
266 point = cs[sp].e;
267 counter_base = cb_base +
268 chaininfo->counter_offset;
269 continue;
271 if (verdict == EBT_CONTINUE)
272 goto letscontinue;
273 #ifdef CONFIG_NETFILTER_DEBUG
274 if (verdict < 0) {
275 BUGPRINT("bogus standard verdict\n");
276 read_unlock_bh(&table->lock);
277 return NF_DROP;
279 #endif
280 /* jump to a udc */
281 cs[sp].n = i + 1;
282 cs[sp].chaininfo = chaininfo;
283 cs[sp].e = ebt_next_entry(point);
284 i = 0;
285 chaininfo = (struct ebt_entries *) (base + verdict);
286 #ifdef CONFIG_NETFILTER_DEBUG
287 if (chaininfo->distinguisher) {
288 BUGPRINT("jump to non-chain\n");
289 read_unlock_bh(&table->lock);
290 return NF_DROP;
292 #endif
293 nentries = chaininfo->nentries;
294 point = (struct ebt_entry *)chaininfo->data;
295 counter_base = cb_base + chaininfo->counter_offset;
296 sp++;
297 continue;
298 letscontinue:
299 point = ebt_next_entry(point);
300 i++;
303 /* I actually like this :) */
304 if (chaininfo->policy == EBT_RETURN)
305 goto letsreturn;
306 if (chaininfo->policy == EBT_ACCEPT) {
307 read_unlock_bh(&table->lock);
308 return NF_ACCEPT;
310 read_unlock_bh(&table->lock);
311 return NF_DROP;
314 /* If it succeeds, returns element and locks mutex */
315 static inline void *
316 find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
317 struct mutex *mutex)
319 struct {
320 struct list_head list;
321 char name[EBT_FUNCTION_MAXNAMELEN];
322 } *e;
324 *error = mutex_lock_interruptible(mutex);
325 if (*error != 0)
326 return NULL;
328 list_for_each_entry(e, head, list) {
329 if (strcmp(e->name, name) == 0)
330 return e;
332 *error = -ENOENT;
333 mutex_unlock(mutex);
334 return NULL;
337 static void *
338 find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
339 int *error, struct mutex *mutex)
341 return try_then_request_module(
342 find_inlist_lock_noload(head, name, error, mutex),
343 "%s%s", prefix, name);
346 static inline struct ebt_table *
347 find_table_lock(struct net *net, const char *name, int *error,
348 struct mutex *mutex)
350 return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
351 "ebtable_", error, mutex);
354 static inline int
355 ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
356 unsigned int *cnt)
358 const struct ebt_entry *e = par->entryinfo;
359 struct xt_match *match;
360 size_t left = ((char *)e + e->watchers_offset) - (char *)m;
361 int ret;
363 if (left < sizeof(struct ebt_entry_match) ||
364 left - sizeof(struct ebt_entry_match) < m->match_size)
365 return -EINVAL;
367 match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
368 if (IS_ERR(match))
369 return PTR_ERR(match);
370 m->u.match = match;
372 par->match = match;
373 par->matchinfo = m->data;
374 ret = xt_check_match(par, m->match_size,
375 e->ethproto, e->invflags & EBT_IPROTO);
376 if (ret < 0) {
377 module_put(match->me);
378 return ret;
381 (*cnt)++;
382 return 0;
385 static inline int
386 ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
387 unsigned int *cnt)
389 const struct ebt_entry *e = par->entryinfo;
390 struct xt_target *watcher;
391 size_t left = ((char *)e + e->target_offset) - (char *)w;
392 int ret;
394 if (left < sizeof(struct ebt_entry_watcher) ||
395 left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
396 return -EINVAL;
398 watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
399 if (IS_ERR(watcher))
400 return PTR_ERR(watcher);
401 w->u.watcher = watcher;
403 par->target = watcher;
404 par->targinfo = w->data;
405 ret = xt_check_target(par, w->watcher_size,
406 e->ethproto, e->invflags & EBT_IPROTO);
407 if (ret < 0) {
408 module_put(watcher->me);
409 return ret;
412 (*cnt)++;
413 return 0;
416 static int ebt_verify_pointers(const struct ebt_replace *repl,
417 struct ebt_table_info *newinfo)
419 unsigned int limit = repl->entries_size;
420 unsigned int valid_hooks = repl->valid_hooks;
421 unsigned int offset = 0;
422 int i;
424 for (i = 0; i < NF_BR_NUMHOOKS; i++)
425 newinfo->hook_entry[i] = NULL;
427 newinfo->entries_size = repl->entries_size;
428 newinfo->nentries = repl->nentries;
430 while (offset < limit) {
431 size_t left = limit - offset;
432 struct ebt_entry *e = (void *)newinfo->entries + offset;
434 if (left < sizeof(unsigned int))
435 break;
437 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
438 if ((valid_hooks & (1 << i)) == 0)
439 continue;
440 if ((char __user *)repl->hook_entry[i] ==
441 repl->entries + offset)
442 break;
445 if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
446 if (e->bitmask != 0) {
447 /* we make userspace set this right,
448 so there is no misunderstanding */
449 BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
450 "in distinguisher\n");
451 return -EINVAL;
453 if (i != NF_BR_NUMHOOKS)
454 newinfo->hook_entry[i] = (struct ebt_entries *)e;
455 if (left < sizeof(struct ebt_entries))
456 break;
457 offset += sizeof(struct ebt_entries);
458 } else {
459 if (left < sizeof(struct ebt_entry))
460 break;
461 if (left < e->next_offset)
462 break;
463 if (e->next_offset < sizeof(struct ebt_entry))
464 return -EINVAL;
465 offset += e->next_offset;
468 if (offset != limit) {
469 BUGPRINT("entries_size too small\n");
470 return -EINVAL;
473 /* check if all valid hooks have a chain */
474 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
475 if (!newinfo->hook_entry[i] &&
476 (valid_hooks & (1 << i))) {
477 BUGPRINT("Valid hook without chain\n");
478 return -EINVAL;
481 return 0;
485 * this one is very careful, as it is the first function
486 * to parse the userspace data
488 static inline int
489 ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
490 const struct ebt_table_info *newinfo,
491 unsigned int *n, unsigned int *cnt,
492 unsigned int *totalcnt, unsigned int *udc_cnt)
494 int i;
496 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
497 if ((void *)e == (void *)newinfo->hook_entry[i])
498 break;
500 /* beginning of a new chain
501 if i == NF_BR_NUMHOOKS it must be a user defined chain */
502 if (i != NF_BR_NUMHOOKS || !e->bitmask) {
503 /* this checks if the previous chain has as many entries
504 as it said it has */
505 if (*n != *cnt) {
506 BUGPRINT("nentries does not equal the nr of entries "
507 "in the chain\n");
508 return -EINVAL;
510 if (((struct ebt_entries *)e)->policy != EBT_DROP &&
511 ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
512 /* only RETURN from udc */
513 if (i != NF_BR_NUMHOOKS ||
514 ((struct ebt_entries *)e)->policy != EBT_RETURN) {
515 BUGPRINT("bad policy\n");
516 return -EINVAL;
519 if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
520 (*udc_cnt)++;
521 if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
522 BUGPRINT("counter_offset != totalcnt");
523 return -EINVAL;
525 *n = ((struct ebt_entries *)e)->nentries;
526 *cnt = 0;
527 return 0;
529 /* a plain old entry, heh */
530 if (sizeof(struct ebt_entry) > e->watchers_offset ||
531 e->watchers_offset > e->target_offset ||
532 e->target_offset >= e->next_offset) {
533 BUGPRINT("entry offsets not in right order\n");
534 return -EINVAL;
536 /* this is not checked anywhere else */
537 if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
538 BUGPRINT("target size too small\n");
539 return -EINVAL;
541 (*cnt)++;
542 (*totalcnt)++;
543 return 0;
546 struct ebt_cl_stack
548 struct ebt_chainstack cs;
549 int from;
550 unsigned int hookmask;
554 * we need these positions to check that the jumps to a different part of the
555 * entries is a jump to the beginning of a new chain.
557 static inline int
558 ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
559 unsigned int *n, struct ebt_cl_stack *udc)
561 int i;
563 /* we're only interested in chain starts */
564 if (e->bitmask)
565 return 0;
566 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
567 if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
568 break;
570 /* only care about udc */
571 if (i != NF_BR_NUMHOOKS)
572 return 0;
574 udc[*n].cs.chaininfo = (struct ebt_entries *)e;
575 /* these initialisations are depended on later in check_chainloops() */
576 udc[*n].cs.n = 0;
577 udc[*n].hookmask = 0;
579 (*n)++;
580 return 0;
583 static inline int
584 ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
586 struct xt_mtdtor_param par;
588 if (i && (*i)-- == 0)
589 return 1;
591 par.net = net;
592 par.match = m->u.match;
593 par.matchinfo = m->data;
594 par.family = NFPROTO_BRIDGE;
595 if (par.match->destroy != NULL)
596 par.match->destroy(&par);
597 module_put(par.match->me);
598 return 0;
601 static inline int
602 ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
604 struct xt_tgdtor_param par;
606 if (i && (*i)-- == 0)
607 return 1;
609 par.net = net;
610 par.target = w->u.watcher;
611 par.targinfo = w->data;
612 par.family = NFPROTO_BRIDGE;
613 if (par.target->destroy != NULL)
614 par.target->destroy(&par);
615 module_put(par.target->me);
616 return 0;
619 static inline int
620 ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
622 struct xt_tgdtor_param par;
623 struct ebt_entry_target *t;
625 if (e->bitmask == 0)
626 return 0;
627 /* we're done */
628 if (cnt && (*cnt)-- == 0)
629 return 1;
630 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
631 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
632 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
634 par.net = net;
635 par.target = t->u.target;
636 par.targinfo = t->data;
637 par.family = NFPROTO_BRIDGE;
638 if (par.target->destroy != NULL)
639 par.target->destroy(&par);
640 module_put(par.target->me);
641 return 0;
644 static inline int
645 ebt_check_entry(struct ebt_entry *e, struct net *net,
646 const struct ebt_table_info *newinfo,
647 const char *name, unsigned int *cnt,
648 struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
650 struct ebt_entry_target *t;
651 struct xt_target *target;
652 unsigned int i, j, hook = 0, hookmask = 0;
653 size_t gap;
654 int ret;
655 struct xt_mtchk_param mtpar;
656 struct xt_tgchk_param tgpar;
658 /* don't mess with the struct ebt_entries */
659 if (e->bitmask == 0)
660 return 0;
662 if (e->bitmask & ~EBT_F_MASK) {
663 BUGPRINT("Unknown flag for bitmask\n");
664 return -EINVAL;
666 if (e->invflags & ~EBT_INV_MASK) {
667 BUGPRINT("Unknown flag for inv bitmask\n");
668 return -EINVAL;
670 if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
671 BUGPRINT("NOPROTO & 802_3 not allowed\n");
672 return -EINVAL;
674 /* what hook do we belong to? */
675 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
676 if (!newinfo->hook_entry[i])
677 continue;
678 if ((char *)newinfo->hook_entry[i] < (char *)e)
679 hook = i;
680 else
681 break;
683 /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
684 a base chain */
685 if (i < NF_BR_NUMHOOKS)
686 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
687 else {
688 for (i = 0; i < udc_cnt; i++)
689 if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
690 break;
691 if (i == 0)
692 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
693 else
694 hookmask = cl_s[i - 1].hookmask;
696 i = 0;
698 mtpar.net = tgpar.net = net;
699 mtpar.table = tgpar.table = name;
700 mtpar.entryinfo = tgpar.entryinfo = e;
701 mtpar.hook_mask = tgpar.hook_mask = hookmask;
702 mtpar.family = tgpar.family = NFPROTO_BRIDGE;
703 ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
704 if (ret != 0)
705 goto cleanup_matches;
706 j = 0;
707 ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
708 if (ret != 0)
709 goto cleanup_watchers;
710 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
711 gap = e->next_offset - e->target_offset;
713 target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
714 if (IS_ERR(target)) {
715 ret = PTR_ERR(target);
716 goto cleanup_watchers;
719 t->u.target = target;
720 if (t->u.target == &ebt_standard_target) {
721 if (gap < sizeof(struct ebt_standard_target)) {
722 BUGPRINT("Standard target size too big\n");
723 ret = -EFAULT;
724 goto cleanup_watchers;
726 if (((struct ebt_standard_target *)t)->verdict <
727 -NUM_STANDARD_TARGETS) {
728 BUGPRINT("Invalid standard target\n");
729 ret = -EFAULT;
730 goto cleanup_watchers;
732 } else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
733 module_put(t->u.target->me);
734 ret = -EFAULT;
735 goto cleanup_watchers;
738 tgpar.target = target;
739 tgpar.targinfo = t->data;
740 ret = xt_check_target(&tgpar, t->target_size,
741 e->ethproto, e->invflags & EBT_IPROTO);
742 if (ret < 0) {
743 module_put(target->me);
744 goto cleanup_watchers;
746 (*cnt)++;
747 return 0;
748 cleanup_watchers:
749 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
750 cleanup_matches:
751 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
752 return ret;
756 * checks for loops and sets the hook mask for udc
757 * the hook mask for udc tells us from which base chains the udc can be
758 * accessed. This mask is a parameter to the check() functions of the extensions
760 static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
761 unsigned int udc_cnt, unsigned int hooknr, char *base)
763 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
764 const struct ebt_entry *e = (struct ebt_entry *)chain->data;
765 const struct ebt_entry_target *t;
767 while (pos < nentries || chain_nr != -1) {
768 /* end of udc, go back one 'recursion' step */
769 if (pos == nentries) {
770 /* put back values of the time when this chain was called */
771 e = cl_s[chain_nr].cs.e;
772 if (cl_s[chain_nr].from != -1)
773 nentries =
774 cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
775 else
776 nentries = chain->nentries;
777 pos = cl_s[chain_nr].cs.n;
778 /* make sure we won't see a loop that isn't one */
779 cl_s[chain_nr].cs.n = 0;
780 chain_nr = cl_s[chain_nr].from;
781 if (pos == nentries)
782 continue;
784 t = (struct ebt_entry_target *)
785 (((char *)e) + e->target_offset);
786 if (strcmp(t->u.name, EBT_STANDARD_TARGET))
787 goto letscontinue;
788 if (e->target_offset + sizeof(struct ebt_standard_target) >
789 e->next_offset) {
790 BUGPRINT("Standard target size too big\n");
791 return -1;
793 verdict = ((struct ebt_standard_target *)t)->verdict;
794 if (verdict >= 0) { /* jump to another chain */
795 struct ebt_entries *hlp2 =
796 (struct ebt_entries *)(base + verdict);
797 for (i = 0; i < udc_cnt; i++)
798 if (hlp2 == cl_s[i].cs.chaininfo)
799 break;
800 /* bad destination or loop */
801 if (i == udc_cnt) {
802 BUGPRINT("bad destination\n");
803 return -1;
805 if (cl_s[i].cs.n) {
806 BUGPRINT("loop\n");
807 return -1;
809 if (cl_s[i].hookmask & (1 << hooknr))
810 goto letscontinue;
811 /* this can't be 0, so the loop test is correct */
812 cl_s[i].cs.n = pos + 1;
813 pos = 0;
814 cl_s[i].cs.e = ebt_next_entry(e);
815 e = (struct ebt_entry *)(hlp2->data);
816 nentries = hlp2->nentries;
817 cl_s[i].from = chain_nr;
818 chain_nr = i;
819 /* this udc is accessible from the base chain for hooknr */
820 cl_s[i].hookmask |= (1 << hooknr);
821 continue;
823 letscontinue:
824 e = ebt_next_entry(e);
825 pos++;
827 return 0;
830 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
831 static int translate_table(struct net *net, const char *name,
832 struct ebt_table_info *newinfo)
834 unsigned int i, j, k, udc_cnt;
835 int ret;
836 struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
838 i = 0;
839 while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
840 i++;
841 if (i == NF_BR_NUMHOOKS) {
842 BUGPRINT("No valid hooks specified\n");
843 return -EINVAL;
845 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
846 BUGPRINT("Chains don't start at beginning\n");
847 return -EINVAL;
849 /* make sure chains are ordered after each other in same order
850 as their corresponding hooks */
851 for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
852 if (!newinfo->hook_entry[j])
853 continue;
854 if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
855 BUGPRINT("Hook order must be followed\n");
856 return -EINVAL;
858 i = j;
861 /* do some early checkings and initialize some things */
862 i = 0; /* holds the expected nr. of entries for the chain */
863 j = 0; /* holds the up to now counted entries for the chain */
864 k = 0; /* holds the total nr. of entries, should equal
865 newinfo->nentries afterwards */
866 udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
867 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
868 ebt_check_entry_size_and_hooks, newinfo,
869 &i, &j, &k, &udc_cnt);
871 if (ret != 0)
872 return ret;
874 if (i != j) {
875 BUGPRINT("nentries does not equal the nr of entries in the "
876 "(last) chain\n");
877 return -EINVAL;
879 if (k != newinfo->nentries) {
880 BUGPRINT("Total nentries is wrong\n");
881 return -EINVAL;
884 /* get the location of the udc, put them in an array
885 while we're at it, allocate the chainstack */
886 if (udc_cnt) {
887 /* this will get free'd in do_replace()/ebt_register_table()
888 if an error occurs */
889 newinfo->chainstack =
890 vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
891 if (!newinfo->chainstack)
892 return -ENOMEM;
893 for_each_possible_cpu(i) {
894 newinfo->chainstack[i] =
895 vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
896 if (!newinfo->chainstack[i]) {
897 while (i)
898 vfree(newinfo->chainstack[--i]);
899 vfree(newinfo->chainstack);
900 newinfo->chainstack = NULL;
901 return -ENOMEM;
905 cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
906 if (!cl_s)
907 return -ENOMEM;
908 i = 0; /* the i'th udc */
909 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
910 ebt_get_udc_positions, newinfo, &i, cl_s);
911 /* sanity check */
912 if (i != udc_cnt) {
913 BUGPRINT("i != udc_cnt\n");
914 vfree(cl_s);
915 return -EFAULT;
919 /* Check for loops */
920 for (i = 0; i < NF_BR_NUMHOOKS; i++)
921 if (newinfo->hook_entry[i])
922 if (check_chainloops(newinfo->hook_entry[i],
923 cl_s, udc_cnt, i, newinfo->entries)) {
924 vfree(cl_s);
925 return -EINVAL;
928 /* we now know the following (along with E=mc²):
929 - the nr of entries in each chain is right
930 - the size of the allocated space is right
931 - all valid hooks have a corresponding chain
932 - there are no loops
933 - wrong data can still be on the level of a single entry
934 - could be there are jumps to places that are not the
935 beginning of a chain. This can only occur in chains that
936 are not accessible from any base chains, so we don't care. */
938 /* used to know what we need to clean up if something goes wrong */
939 i = 0;
940 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
941 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
942 if (ret != 0) {
943 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
944 ebt_cleanup_entry, net, &i);
946 vfree(cl_s);
947 return ret;
950 /* called under write_lock */
951 static void get_counters(const struct ebt_counter *oldcounters,
952 struct ebt_counter *counters, unsigned int nentries)
954 int i, cpu;
955 struct ebt_counter *counter_base;
957 /* counters of cpu 0 */
958 memcpy(counters, oldcounters,
959 sizeof(struct ebt_counter) * nentries);
961 /* add other counters to those of cpu 0 */
962 for_each_possible_cpu(cpu) {
963 if (cpu == 0)
964 continue;
965 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
966 for (i = 0; i < nentries; i++) {
967 counters[i].pcnt += counter_base[i].pcnt;
968 counters[i].bcnt += counter_base[i].bcnt;
973 static int do_replace_finish(struct net *net, struct ebt_replace *repl,
974 struct ebt_table_info *newinfo)
976 int ret, i;
977 struct ebt_counter *counterstmp = NULL;
978 /* used to be able to unlock earlier */
979 struct ebt_table_info *table;
980 struct ebt_table *t;
982 /* the user wants counters back
983 the check on the size is done later, when we have the lock */
984 if (repl->num_counters) {
985 unsigned long size = repl->num_counters * sizeof(*counterstmp);
986 counterstmp = vmalloc(size);
987 if (!counterstmp)
988 return -ENOMEM;
991 newinfo->chainstack = NULL;
992 ret = ebt_verify_pointers(repl, newinfo);
993 if (ret != 0)
994 goto free_counterstmp;
996 ret = translate_table(net, repl->name, newinfo);
998 if (ret != 0)
999 goto free_counterstmp;
1001 t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1002 if (!t) {
1003 ret = -ENOENT;
1004 goto free_iterate;
1007 /* the table doesn't like it */
1008 if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1009 goto free_unlock;
1011 if (repl->num_counters && repl->num_counters != t->private->nentries) {
1012 BUGPRINT("Wrong nr. of counters requested\n");
1013 ret = -EINVAL;
1014 goto free_unlock;
1017 /* we have the mutex lock, so no danger in reading this pointer */
1018 table = t->private;
1019 /* make sure the table can only be rmmod'ed if it contains no rules */
1020 if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1021 ret = -ENOENT;
1022 goto free_unlock;
1023 } else if (table->nentries && !newinfo->nentries)
1024 module_put(t->me);
1025 /* we need an atomic snapshot of the counters */
1026 write_lock_bh(&t->lock);
1027 if (repl->num_counters)
1028 get_counters(t->private->counters, counterstmp,
1029 t->private->nentries);
1031 t->private = newinfo;
1032 write_unlock_bh(&t->lock);
1033 mutex_unlock(&ebt_mutex);
1034 /* so, a user can change the chains while having messed up her counter
1035 allocation. Only reason why this is done is because this way the lock
1036 is held only once, while this doesn't bring the kernel into a
1037 dangerous state. */
1038 if (repl->num_counters &&
1039 copy_to_user(repl->counters, counterstmp,
1040 repl->num_counters * sizeof(struct ebt_counter))) {
1041 ret = -EFAULT;
1043 else
1044 ret = 0;
1046 /* decrease module count and free resources */
1047 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1048 ebt_cleanup_entry, net, NULL);
1050 vfree(table->entries);
1051 if (table->chainstack) {
1052 for_each_possible_cpu(i)
1053 vfree(table->chainstack[i]);
1054 vfree(table->chainstack);
1056 vfree(table);
1058 vfree(counterstmp);
1059 return ret;
1061 free_unlock:
1062 mutex_unlock(&ebt_mutex);
1063 free_iterate:
1064 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1065 ebt_cleanup_entry, net, NULL);
1066 free_counterstmp:
1067 vfree(counterstmp);
1068 /* can be initialized in translate_table() */
1069 if (newinfo->chainstack) {
1070 for_each_possible_cpu(i)
1071 vfree(newinfo->chainstack[i]);
1072 vfree(newinfo->chainstack);
1074 return ret;
1077 /* replace the table */
1078 static int do_replace(struct net *net, const void __user *user,
1079 unsigned int len)
1081 int ret, countersize;
1082 struct ebt_table_info *newinfo;
1083 struct ebt_replace tmp;
1085 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1086 return -EFAULT;
1088 if (len != sizeof(tmp) + tmp.entries_size) {
1089 BUGPRINT("Wrong len argument\n");
1090 return -EINVAL;
1093 if (tmp.entries_size == 0) {
1094 BUGPRINT("Entries_size never zero\n");
1095 return -EINVAL;
1097 /* overflow check */
1098 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1099 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1100 return -ENOMEM;
1101 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1102 return -ENOMEM;
1104 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1105 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1106 if (!newinfo)
1107 return -ENOMEM;
1109 if (countersize)
1110 memset(newinfo->counters, 0, countersize);
1112 newinfo->entries = vmalloc(tmp.entries_size);
1113 if (!newinfo->entries) {
1114 ret = -ENOMEM;
1115 goto free_newinfo;
1117 if (copy_from_user(
1118 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1119 BUGPRINT("Couldn't copy entries from userspace\n");
1120 ret = -EFAULT;
1121 goto free_entries;
1124 ret = do_replace_finish(net, &tmp, newinfo);
1125 if (ret == 0)
1126 return ret;
1127 free_entries:
1128 vfree(newinfo->entries);
1129 free_newinfo:
1130 vfree(newinfo);
1131 return ret;
1134 struct ebt_table *
1135 ebt_register_table(struct net *net, const struct ebt_table *input_table)
1137 struct ebt_table_info *newinfo;
1138 struct ebt_table *t, *table;
1139 struct ebt_replace_kernel *repl;
1140 int ret, i, countersize;
1141 void *p;
1143 if (input_table == NULL || (repl = input_table->table) == NULL ||
1144 repl->entries == 0 || repl->entries_size == 0 ||
1145 repl->counters != NULL || input_table->private != NULL) {
1146 BUGPRINT("Bad table data for ebt_register_table!!!\n");
1147 return ERR_PTR(-EINVAL);
1150 /* Don't add one table to multiple lists. */
1151 table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1152 if (!table) {
1153 ret = -ENOMEM;
1154 goto out;
1157 countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1158 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1159 ret = -ENOMEM;
1160 if (!newinfo)
1161 goto free_table;
1163 p = vmalloc(repl->entries_size);
1164 if (!p)
1165 goto free_newinfo;
1167 memcpy(p, repl->entries, repl->entries_size);
1168 newinfo->entries = p;
1170 newinfo->entries_size = repl->entries_size;
1171 newinfo->nentries = repl->nentries;
1173 if (countersize)
1174 memset(newinfo->counters, 0, countersize);
1176 /* fill in newinfo and parse the entries */
1177 newinfo->chainstack = NULL;
1178 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1179 if ((repl->valid_hooks & (1 << i)) == 0)
1180 newinfo->hook_entry[i] = NULL;
1181 else
1182 newinfo->hook_entry[i] = p +
1183 ((char *)repl->hook_entry[i] - repl->entries);
1185 ret = translate_table(net, repl->name, newinfo);
1186 if (ret != 0) {
1187 BUGPRINT("Translate_table failed\n");
1188 goto free_chainstack;
1191 if (table->check && table->check(newinfo, table->valid_hooks)) {
1192 BUGPRINT("The table doesn't like its own initial data, lol\n");
1193 return ERR_PTR(-EINVAL);
1196 table->private = newinfo;
1197 rwlock_init(&table->lock);
1198 ret = mutex_lock_interruptible(&ebt_mutex);
1199 if (ret != 0)
1200 goto free_chainstack;
1202 list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1203 if (strcmp(t->name, table->name) == 0) {
1204 ret = -EEXIST;
1205 BUGPRINT("Table name already exists\n");
1206 goto free_unlock;
1210 /* Hold a reference count if the chains aren't empty */
1211 if (newinfo->nentries && !try_module_get(table->me)) {
1212 ret = -ENOENT;
1213 goto free_unlock;
1215 list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1216 mutex_unlock(&ebt_mutex);
1217 return table;
1218 free_unlock:
1219 mutex_unlock(&ebt_mutex);
1220 free_chainstack:
1221 if (newinfo->chainstack) {
1222 for_each_possible_cpu(i)
1223 vfree(newinfo->chainstack[i]);
1224 vfree(newinfo->chainstack);
1226 vfree(newinfo->entries);
1227 free_newinfo:
1228 vfree(newinfo);
1229 free_table:
1230 kfree(table);
1231 out:
1232 return ERR_PTR(ret);
1235 void ebt_unregister_table(struct net *net, struct ebt_table *table)
1237 int i;
1239 if (!table) {
1240 BUGPRINT("Request to unregister NULL table!!!\n");
1241 return;
1243 mutex_lock(&ebt_mutex);
1244 list_del(&table->list);
1245 mutex_unlock(&ebt_mutex);
1246 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1247 ebt_cleanup_entry, net, NULL);
1248 if (table->private->nentries)
1249 module_put(table->me);
1250 vfree(table->private->entries);
1251 if (table->private->chainstack) {
1252 for_each_possible_cpu(i)
1253 vfree(table->private->chainstack[i]);
1254 vfree(table->private->chainstack);
1256 vfree(table->private);
1257 kfree(table);
1260 /* userspace just supplied us with counters */
1261 static int do_update_counters(struct net *net, const char *name,
1262 struct ebt_counter __user *counters,
1263 unsigned int num_counters,
1264 const void __user *user, unsigned int len)
1266 int i, ret;
1267 struct ebt_counter *tmp;
1268 struct ebt_table *t;
1270 if (num_counters == 0)
1271 return -EINVAL;
1273 tmp = vmalloc(num_counters * sizeof(*tmp));
1274 if (!tmp)
1275 return -ENOMEM;
1277 t = find_table_lock(net, name, &ret, &ebt_mutex);
1278 if (!t)
1279 goto free_tmp;
1281 if (num_counters != t->private->nentries) {
1282 BUGPRINT("Wrong nr of counters\n");
1283 ret = -EINVAL;
1284 goto unlock_mutex;
1287 if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1288 ret = -EFAULT;
1289 goto unlock_mutex;
1292 /* we want an atomic add of the counters */
1293 write_lock_bh(&t->lock);
1295 /* we add to the counters of the first cpu */
1296 for (i = 0; i < num_counters; i++) {
1297 t->private->counters[i].pcnt += tmp[i].pcnt;
1298 t->private->counters[i].bcnt += tmp[i].bcnt;
1301 write_unlock_bh(&t->lock);
1302 ret = 0;
1303 unlock_mutex:
1304 mutex_unlock(&ebt_mutex);
1305 free_tmp:
1306 vfree(tmp);
1307 return ret;
1310 static int update_counters(struct net *net, const void __user *user,
1311 unsigned int len)
1313 struct ebt_replace hlp;
1315 if (copy_from_user(&hlp, user, sizeof(hlp)))
1316 return -EFAULT;
1318 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1319 return -EINVAL;
1321 return do_update_counters(net, hlp.name, hlp.counters,
1322 hlp.num_counters, user, len);
1325 static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1326 const char *base, char __user *ubase)
1328 char __user *hlp = ubase + ((char *)m - base);
1329 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
1330 return -EFAULT;
1331 return 0;
1334 static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1335 const char *base, char __user *ubase)
1337 char __user *hlp = ubase + ((char *)w - base);
1338 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
1339 return -EFAULT;
1340 return 0;
1343 static inline int
1344 ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1346 int ret;
1347 char __user *hlp;
1348 const struct ebt_entry_target *t;
1350 if (e->bitmask == 0)
1351 return 0;
1353 hlp = ubase + (((char *)e + e->target_offset) - base);
1354 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1356 ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1357 if (ret != 0)
1358 return ret;
1359 ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1360 if (ret != 0)
1361 return ret;
1362 if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
1363 return -EFAULT;
1364 return 0;
1367 static int copy_counters_to_user(struct ebt_table *t,
1368 const struct ebt_counter *oldcounters,
1369 void __user *user, unsigned int num_counters,
1370 unsigned int nentries)
1372 struct ebt_counter *counterstmp;
1373 int ret = 0;
1375 /* userspace might not need the counters */
1376 if (num_counters == 0)
1377 return 0;
1379 if (num_counters != nentries) {
1380 BUGPRINT("Num_counters wrong\n");
1381 return -EINVAL;
1384 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1385 if (!counterstmp)
1386 return -ENOMEM;
1388 write_lock_bh(&t->lock);
1389 get_counters(oldcounters, counterstmp, nentries);
1390 write_unlock_bh(&t->lock);
1392 if (copy_to_user(user, counterstmp,
1393 nentries * sizeof(struct ebt_counter)))
1394 ret = -EFAULT;
1395 vfree(counterstmp);
1396 return ret;
1399 /* called with ebt_mutex locked */
1400 static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1401 const int *len, int cmd)
1403 struct ebt_replace tmp;
1404 const struct ebt_counter *oldcounters;
1405 unsigned int entries_size, nentries;
1406 int ret;
1407 char *entries;
1409 if (cmd == EBT_SO_GET_ENTRIES) {
1410 entries_size = t->private->entries_size;
1411 nentries = t->private->nentries;
1412 entries = t->private->entries;
1413 oldcounters = t->private->counters;
1414 } else {
1415 entries_size = t->table->entries_size;
1416 nentries = t->table->nentries;
1417 entries = t->table->entries;
1418 oldcounters = t->table->counters;
1421 if (copy_from_user(&tmp, user, sizeof(tmp)))
1422 return -EFAULT;
1424 if (*len != sizeof(struct ebt_replace) + entries_size +
1425 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1426 return -EINVAL;
1428 if (tmp.nentries != nentries) {
1429 BUGPRINT("Nentries wrong\n");
1430 return -EINVAL;
1433 if (tmp.entries_size != entries_size) {
1434 BUGPRINT("Wrong size\n");
1435 return -EINVAL;
1438 ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1439 tmp.num_counters, nentries);
1440 if (ret)
1441 return ret;
1443 if (copy_to_user(tmp.entries, entries, entries_size)) {
1444 BUGPRINT("Couldn't copy entries to userspace\n");
1445 return -EFAULT;
1447 /* set the match/watcher/target names right */
1448 return EBT_ENTRY_ITERATE(entries, entries_size,
1449 ebt_make_names, entries, tmp.entries);
1452 static int do_ebt_set_ctl(struct sock *sk,
1453 int cmd, void __user *user, unsigned int len)
1455 int ret;
1457 if (!capable(CAP_NET_ADMIN))
1458 return -EPERM;
1460 switch(cmd) {
1461 case EBT_SO_SET_ENTRIES:
1462 ret = do_replace(sock_net(sk), user, len);
1463 break;
1464 case EBT_SO_SET_COUNTERS:
1465 ret = update_counters(sock_net(sk), user, len);
1466 break;
1467 default:
1468 ret = -EINVAL;
1470 return ret;
1473 static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1475 int ret;
1476 struct ebt_replace tmp;
1477 struct ebt_table *t;
1479 if (!capable(CAP_NET_ADMIN))
1480 return -EPERM;
1482 if (copy_from_user(&tmp, user, sizeof(tmp)))
1483 return -EFAULT;
1485 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
1486 if (!t)
1487 return ret;
1489 switch(cmd) {
1490 case EBT_SO_GET_INFO:
1491 case EBT_SO_GET_INIT_INFO:
1492 if (*len != sizeof(struct ebt_replace)){
1493 ret = -EINVAL;
1494 mutex_unlock(&ebt_mutex);
1495 break;
1497 if (cmd == EBT_SO_GET_INFO) {
1498 tmp.nentries = t->private->nentries;
1499 tmp.entries_size = t->private->entries_size;
1500 tmp.valid_hooks = t->valid_hooks;
1501 } else {
1502 tmp.nentries = t->table->nentries;
1503 tmp.entries_size = t->table->entries_size;
1504 tmp.valid_hooks = t->table->valid_hooks;
1506 mutex_unlock(&ebt_mutex);
1507 if (copy_to_user(user, &tmp, *len) != 0){
1508 BUGPRINT("c2u Didn't work\n");
1509 ret = -EFAULT;
1510 break;
1512 ret = 0;
1513 break;
1515 case EBT_SO_GET_ENTRIES:
1516 case EBT_SO_GET_INIT_ENTRIES:
1517 ret = copy_everything_to_user(t, user, len, cmd);
1518 mutex_unlock(&ebt_mutex);
1519 break;
1521 default:
1522 mutex_unlock(&ebt_mutex);
1523 ret = -EINVAL;
1526 return ret;
1529 #ifdef CONFIG_COMPAT
1530 /* 32 bit-userspace compatibility definitions. */
1531 struct compat_ebt_replace {
1532 char name[EBT_TABLE_MAXNAMELEN];
1533 compat_uint_t valid_hooks;
1534 compat_uint_t nentries;
1535 compat_uint_t entries_size;
1536 /* start of the chains */
1537 compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1538 /* nr of counters userspace expects back */
1539 compat_uint_t num_counters;
1540 /* where the kernel will put the old counters. */
1541 compat_uptr_t counters;
1542 compat_uptr_t entries;
1545 /* struct ebt_entry_match, _target and _watcher have same layout */
1546 struct compat_ebt_entry_mwt {
1547 union {
1548 char name[EBT_FUNCTION_MAXNAMELEN];
1549 compat_uptr_t ptr;
1550 } u;
1551 compat_uint_t match_size;
1552 compat_uint_t data[0];
1555 /* account for possible padding between match_size and ->data */
1556 static int ebt_compat_entry_padsize(void)
1558 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1559 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1560 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1561 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1564 static int ebt_compat_match_offset(const struct xt_match *match,
1565 unsigned int userlen)
1568 * ebt_among needs special handling. The kernel .matchsize is
1569 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1570 * value is expected.
1571 * Example: userspace sends 4500, ebt_among.c wants 4504.
1573 if (unlikely(match->matchsize == -1))
1574 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1575 return xt_compat_match_offset(match);
1578 static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1579 unsigned int *size)
1581 const struct xt_match *match = m->u.match;
1582 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1583 int off = ebt_compat_match_offset(match, m->match_size);
1584 compat_uint_t msize = m->match_size - off;
1586 BUG_ON(off >= m->match_size);
1588 if (copy_to_user(cm->u.name, match->name,
1589 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1590 return -EFAULT;
1592 if (match->compat_to_user) {
1593 if (match->compat_to_user(cm->data, m->data))
1594 return -EFAULT;
1595 } else if (copy_to_user(cm->data, m->data, msize))
1596 return -EFAULT;
1598 *size -= ebt_compat_entry_padsize() + off;
1599 *dstptr = cm->data;
1600 *dstptr += msize;
1601 return 0;
1604 static int compat_target_to_user(struct ebt_entry_target *t,
1605 void __user **dstptr,
1606 unsigned int *size)
1608 const struct xt_target *target = t->u.target;
1609 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1610 int off = xt_compat_target_offset(target);
1611 compat_uint_t tsize = t->target_size - off;
1613 BUG_ON(off >= t->target_size);
1615 if (copy_to_user(cm->u.name, target->name,
1616 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1617 return -EFAULT;
1619 if (target->compat_to_user) {
1620 if (target->compat_to_user(cm->data, t->data))
1621 return -EFAULT;
1622 } else if (copy_to_user(cm->data, t->data, tsize))
1623 return -EFAULT;
1625 *size -= ebt_compat_entry_padsize() + off;
1626 *dstptr = cm->data;
1627 *dstptr += tsize;
1628 return 0;
1631 static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1632 void __user **dstptr,
1633 unsigned int *size)
1635 return compat_target_to_user((struct ebt_entry_target *)w,
1636 dstptr, size);
1639 static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1640 unsigned int *size)
1642 struct ebt_entry_target *t;
1643 struct ebt_entry __user *ce;
1644 u32 watchers_offset, target_offset, next_offset;
1645 compat_uint_t origsize;
1646 int ret;
1648 if (e->bitmask == 0) {
1649 if (*size < sizeof(struct ebt_entries))
1650 return -EINVAL;
1651 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1652 return -EFAULT;
1654 *dstptr += sizeof(struct ebt_entries);
1655 *size -= sizeof(struct ebt_entries);
1656 return 0;
1659 if (*size < sizeof(*ce))
1660 return -EINVAL;
1662 ce = (struct ebt_entry __user *)*dstptr;
1663 if (copy_to_user(ce, e, sizeof(*ce)))
1664 return -EFAULT;
1666 origsize = *size;
1667 *dstptr += sizeof(*ce);
1669 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1670 if (ret)
1671 return ret;
1672 watchers_offset = e->watchers_offset - (origsize - *size);
1674 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1675 if (ret)
1676 return ret;
1677 target_offset = e->target_offset - (origsize - *size);
1679 t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1681 ret = compat_target_to_user(t, dstptr, size);
1682 if (ret)
1683 return ret;
1684 next_offset = e->next_offset - (origsize - *size);
1686 if (put_user(watchers_offset, &ce->watchers_offset) ||
1687 put_user(target_offset, &ce->target_offset) ||
1688 put_user(next_offset, &ce->next_offset))
1689 return -EFAULT;
1691 *size -= sizeof(*ce);
1692 return 0;
1695 static int compat_calc_match(struct ebt_entry_match *m, int *off)
1697 *off += ebt_compat_match_offset(m->u.match, m->match_size);
1698 *off += ebt_compat_entry_padsize();
1699 return 0;
1702 static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1704 *off += xt_compat_target_offset(w->u.watcher);
1705 *off += ebt_compat_entry_padsize();
1706 return 0;
1709 static int compat_calc_entry(const struct ebt_entry *e,
1710 const struct ebt_table_info *info,
1711 const void *base,
1712 struct compat_ebt_replace *newinfo)
1714 const struct ebt_entry_target *t;
1715 unsigned int entry_offset;
1716 int off, ret, i;
1718 if (e->bitmask == 0)
1719 return 0;
1721 off = 0;
1722 entry_offset = (void *)e - base;
1724 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1725 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1727 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1729 off += xt_compat_target_offset(t->u.target);
1730 off += ebt_compat_entry_padsize();
1732 newinfo->entries_size -= off;
1734 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1735 if (ret)
1736 return ret;
1738 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1739 const void *hookptr = info->hook_entry[i];
1740 if (info->hook_entry[i] &&
1741 (e < (struct ebt_entry *)(base - hookptr))) {
1742 newinfo->hook_entry[i] -= off;
1743 pr_debug("0x%08X -> 0x%08X\n",
1744 newinfo->hook_entry[i] + off,
1745 newinfo->hook_entry[i]);
1749 return 0;
1753 static int compat_table_info(const struct ebt_table_info *info,
1754 struct compat_ebt_replace *newinfo)
1756 unsigned int size = info->entries_size;
1757 const void *entries = info->entries;
1759 newinfo->entries_size = size;
1761 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1762 entries, newinfo);
1765 static int compat_copy_everything_to_user(struct ebt_table *t,
1766 void __user *user, int *len, int cmd)
1768 struct compat_ebt_replace repl, tmp;
1769 struct ebt_counter *oldcounters;
1770 struct ebt_table_info tinfo;
1771 int ret;
1772 void __user *pos;
1774 memset(&tinfo, 0, sizeof(tinfo));
1776 if (cmd == EBT_SO_GET_ENTRIES) {
1777 tinfo.entries_size = t->private->entries_size;
1778 tinfo.nentries = t->private->nentries;
1779 tinfo.entries = t->private->entries;
1780 oldcounters = t->private->counters;
1781 } else {
1782 tinfo.entries_size = t->table->entries_size;
1783 tinfo.nentries = t->table->nentries;
1784 tinfo.entries = t->table->entries;
1785 oldcounters = t->table->counters;
1788 if (copy_from_user(&tmp, user, sizeof(tmp)))
1789 return -EFAULT;
1791 if (tmp.nentries != tinfo.nentries ||
1792 (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1793 return -EINVAL;
1795 memcpy(&repl, &tmp, sizeof(repl));
1796 if (cmd == EBT_SO_GET_ENTRIES)
1797 ret = compat_table_info(t->private, &repl);
1798 else
1799 ret = compat_table_info(&tinfo, &repl);
1800 if (ret)
1801 return ret;
1803 if (*len != sizeof(tmp) + repl.entries_size +
1804 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1805 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1806 *len, tinfo.entries_size, repl.entries_size);
1807 return -EINVAL;
1810 /* userspace might not need the counters */
1811 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1812 tmp.num_counters, tinfo.nentries);
1813 if (ret)
1814 return ret;
1816 pos = compat_ptr(tmp.entries);
1817 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1818 compat_copy_entry_to_user, &pos, &tmp.entries_size);
1821 struct ebt_entries_buf_state {
1822 char *buf_kern_start; /* kernel buffer to copy (translated) data to */
1823 u32 buf_kern_len; /* total size of kernel buffer */
1824 u32 buf_kern_offset; /* amount of data copied so far */
1825 u32 buf_user_offset; /* read position in userspace buffer */
1828 static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1830 state->buf_kern_offset += sz;
1831 return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1834 static int ebt_buf_add(struct ebt_entries_buf_state *state,
1835 void *data, unsigned int sz)
1837 if (state->buf_kern_start == NULL)
1838 goto count_only;
1840 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1842 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1844 count_only:
1845 state->buf_user_offset += sz;
1846 return ebt_buf_count(state, sz);
1849 static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1851 char *b = state->buf_kern_start;
1853 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1855 if (b != NULL && sz > 0)
1856 memset(b + state->buf_kern_offset, 0, sz);
1857 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1858 return ebt_buf_count(state, sz);
1861 enum compat_mwt {
1862 EBT_COMPAT_MATCH,
1863 EBT_COMPAT_WATCHER,
1864 EBT_COMPAT_TARGET,
1867 static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1868 enum compat_mwt compat_mwt,
1869 struct ebt_entries_buf_state *state,
1870 const unsigned char *base)
1872 char name[EBT_FUNCTION_MAXNAMELEN];
1873 struct xt_match *match;
1874 struct xt_target *wt;
1875 void *dst = NULL;
1876 int off, pad = 0, ret = 0;
1877 unsigned int size_kern, entry_offset, match_size = mwt->match_size;
1879 strlcpy(name, mwt->u.name, sizeof(name));
1881 if (state->buf_kern_start)
1882 dst = state->buf_kern_start + state->buf_kern_offset;
1884 entry_offset = (unsigned char *) mwt - base;
1885 switch (compat_mwt) {
1886 case EBT_COMPAT_MATCH:
1887 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
1888 name, 0), "ebt_%s", name);
1889 if (match == NULL)
1890 return -ENOENT;
1891 if (IS_ERR(match))
1892 return PTR_ERR(match);
1894 off = ebt_compat_match_offset(match, match_size);
1895 if (dst) {
1896 if (match->compat_from_user)
1897 match->compat_from_user(dst, mwt->data);
1898 else
1899 memcpy(dst, mwt->data, match_size);
1902 size_kern = match->matchsize;
1903 if (unlikely(size_kern == -1))
1904 size_kern = match_size;
1905 module_put(match->me);
1906 break;
1907 case EBT_COMPAT_WATCHER: /* fallthrough */
1908 case EBT_COMPAT_TARGET:
1909 wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
1910 name, 0), "ebt_%s", name);
1911 if (wt == NULL)
1912 return -ENOENT;
1913 if (IS_ERR(wt))
1914 return PTR_ERR(wt);
1915 off = xt_compat_target_offset(wt);
1917 if (dst) {
1918 if (wt->compat_from_user)
1919 wt->compat_from_user(dst, mwt->data);
1920 else
1921 memcpy(dst, mwt->data, match_size);
1924 size_kern = wt->targetsize;
1925 module_put(wt->me);
1926 break;
1929 if (!dst) {
1930 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
1931 off + ebt_compat_entry_padsize());
1932 if (ret < 0)
1933 return ret;
1936 state->buf_kern_offset += match_size + off;
1937 state->buf_user_offset += match_size;
1938 pad = XT_ALIGN(size_kern) - size_kern;
1940 if (pad > 0 && dst) {
1941 BUG_ON(state->buf_kern_len <= pad);
1942 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1943 memset(dst + size_kern, 0, pad);
1945 return off + match_size;
1949 * return size of all matches, watchers or target, including necessary
1950 * alignment and padding.
1952 static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1953 unsigned int size_left, enum compat_mwt type,
1954 struct ebt_entries_buf_state *state, const void *base)
1956 int growth = 0;
1957 char *buf;
1959 if (size_left == 0)
1960 return 0;
1962 buf = (char *) match32;
1964 while (size_left >= sizeof(*match32)) {
1965 struct ebt_entry_match *match_kern;
1966 int ret;
1968 match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1969 if (match_kern) {
1970 char *tmp;
1971 tmp = state->buf_kern_start + state->buf_kern_offset;
1972 match_kern = (struct ebt_entry_match *) tmp;
1974 ret = ebt_buf_add(state, buf, sizeof(*match32));
1975 if (ret < 0)
1976 return ret;
1977 size_left -= sizeof(*match32);
1979 /* add padding before match->data (if any) */
1980 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1981 if (ret < 0)
1982 return ret;
1984 if (match32->match_size > size_left)
1985 return -EINVAL;
1987 size_left -= match32->match_size;
1989 ret = compat_mtw_from_user(match32, type, state, base);
1990 if (ret < 0)
1991 return ret;
1993 BUG_ON(ret < match32->match_size);
1994 growth += ret - match32->match_size;
1995 growth += ebt_compat_entry_padsize();
1997 buf += sizeof(*match32);
1998 buf += match32->match_size;
2000 if (match_kern)
2001 match_kern->match_size = ret;
2003 WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2004 match32 = (struct compat_ebt_entry_mwt *) buf;
2007 return growth;
2010 #define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2011 ({ \
2012 unsigned int __i; \
2013 int __ret = 0; \
2014 struct compat_ebt_entry_mwt *__watcher; \
2016 for (__i = e->watchers_offset; \
2017 __i < (e)->target_offset; \
2018 __i += __watcher->watcher_size + \
2019 sizeof(struct compat_ebt_entry_mwt)) { \
2020 __watcher = (void *)(e) + __i; \
2021 __ret = fn(__watcher , ## args); \
2022 if (__ret != 0) \
2023 break; \
2025 if (__ret == 0) { \
2026 if (__i != (e)->target_offset) \
2027 __ret = -EINVAL; \
2029 __ret; \
2032 #define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2033 ({ \
2034 unsigned int __i; \
2035 int __ret = 0; \
2036 struct compat_ebt_entry_mwt *__match; \
2038 for (__i = sizeof(struct ebt_entry); \
2039 __i < (e)->watchers_offset; \
2040 __i += __match->match_size + \
2041 sizeof(struct compat_ebt_entry_mwt)) { \
2042 __match = (void *)(e) + __i; \
2043 __ret = fn(__match , ## args); \
2044 if (__ret != 0) \
2045 break; \
2047 if (__ret == 0) { \
2048 if (__i != (e)->watchers_offset) \
2049 __ret = -EINVAL; \
2051 __ret; \
2054 /* called for all ebt_entry structures. */
2055 static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2056 unsigned int *total,
2057 struct ebt_entries_buf_state *state)
2059 unsigned int i, j, startoff, new_offset = 0;
2060 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2061 unsigned int offsets[4];
2062 unsigned int *offsets_update = NULL;
2063 int ret;
2064 char *buf_start;
2066 if (*total < sizeof(struct ebt_entries))
2067 return -EINVAL;
2069 if (!entry->bitmask) {
2070 *total -= sizeof(struct ebt_entries);
2071 return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2073 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2074 return -EINVAL;
2076 startoff = state->buf_user_offset;
2077 /* pull in most part of ebt_entry, it does not need to be changed. */
2078 ret = ebt_buf_add(state, entry,
2079 offsetof(struct ebt_entry, watchers_offset));
2080 if (ret < 0)
2081 return ret;
2083 offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2084 memcpy(&offsets[1], &entry->watchers_offset,
2085 sizeof(offsets) - sizeof(offsets[0]));
2087 if (state->buf_kern_start) {
2088 buf_start = state->buf_kern_start + state->buf_kern_offset;
2089 offsets_update = (unsigned int *) buf_start;
2091 ret = ebt_buf_add(state, &offsets[1],
2092 sizeof(offsets) - sizeof(offsets[0]));
2093 if (ret < 0)
2094 return ret;
2095 buf_start = (char *) entry;
2097 * 0: matches offset, always follows ebt_entry.
2098 * 1: watchers offset, from ebt_entry structure
2099 * 2: target offset, from ebt_entry structure
2100 * 3: next ebt_entry offset, from ebt_entry structure
2102 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2104 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2105 struct compat_ebt_entry_mwt *match32;
2106 unsigned int size;
2107 char *buf = buf_start;
2109 buf = buf_start + offsets[i];
2110 if (offsets[i] > offsets[j])
2111 return -EINVAL;
2113 match32 = (struct compat_ebt_entry_mwt *) buf;
2114 size = offsets[j] - offsets[i];
2115 ret = ebt_size_mwt(match32, size, i, state, base);
2116 if (ret < 0)
2117 return ret;
2118 new_offset += ret;
2119 if (offsets_update && new_offset) {
2120 pr_debug("change offset %d to %d\n",
2121 offsets_update[i], offsets[j] + new_offset);
2122 offsets_update[i] = offsets[j] + new_offset;
2126 startoff = state->buf_user_offset - startoff;
2128 BUG_ON(*total < startoff);
2129 *total -= startoff;
2130 return 0;
2134 * repl->entries_size is the size of the ebt_entry blob in userspace.
2135 * It might need more memory when copied to a 64 bit kernel in case
2136 * userspace is 32-bit. So, first task: find out how much memory is needed.
2138 * Called before validation is performed.
2140 static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2141 struct ebt_entries_buf_state *state)
2143 unsigned int size_remaining = size_user;
2144 int ret;
2146 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2147 &size_remaining, state);
2148 if (ret < 0)
2149 return ret;
2151 WARN_ON(size_remaining);
2152 return state->buf_kern_offset;
2156 static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2157 void __user *user, unsigned int len)
2159 struct compat_ebt_replace tmp;
2160 int i;
2162 if (len < sizeof(tmp))
2163 return -EINVAL;
2165 if (copy_from_user(&tmp, user, sizeof(tmp)))
2166 return -EFAULT;
2168 if (len != sizeof(tmp) + tmp.entries_size)
2169 return -EINVAL;
2171 if (tmp.entries_size == 0)
2172 return -EINVAL;
2174 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2175 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2176 return -ENOMEM;
2177 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2178 return -ENOMEM;
2180 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2182 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2183 for (i = 0; i < NF_BR_NUMHOOKS; i++)
2184 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2186 repl->num_counters = tmp.num_counters;
2187 repl->counters = compat_ptr(tmp.counters);
2188 repl->entries = compat_ptr(tmp.entries);
2189 return 0;
2192 static int compat_do_replace(struct net *net, void __user *user,
2193 unsigned int len)
2195 int ret, i, countersize, size64;
2196 struct ebt_table_info *newinfo;
2197 struct ebt_replace tmp;
2198 struct ebt_entries_buf_state state;
2199 void *entries_tmp;
2201 ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2202 if (ret) {
2203 /* try real handler in case userland supplied needed padding */
2204 if (ret == -EINVAL && do_replace(net, user, len) == 0)
2205 ret = 0;
2206 return ret;
2209 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2210 newinfo = vmalloc(sizeof(*newinfo) + countersize);
2211 if (!newinfo)
2212 return -ENOMEM;
2214 if (countersize)
2215 memset(newinfo->counters, 0, countersize);
2217 memset(&state, 0, sizeof(state));
2219 newinfo->entries = vmalloc(tmp.entries_size);
2220 if (!newinfo->entries) {
2221 ret = -ENOMEM;
2222 goto free_newinfo;
2224 if (copy_from_user(
2225 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2226 ret = -EFAULT;
2227 goto free_entries;
2230 entries_tmp = newinfo->entries;
2232 xt_compat_lock(NFPROTO_BRIDGE);
2234 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2235 if (ret < 0)
2236 goto out_unlock;
2238 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2239 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2240 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2242 size64 = ret;
2243 newinfo->entries = vmalloc(size64);
2244 if (!newinfo->entries) {
2245 vfree(entries_tmp);
2246 ret = -ENOMEM;
2247 goto out_unlock;
2250 memset(&state, 0, sizeof(state));
2251 state.buf_kern_start = newinfo->entries;
2252 state.buf_kern_len = size64;
2254 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2255 BUG_ON(ret < 0); /* parses same data again */
2257 vfree(entries_tmp);
2258 tmp.entries_size = size64;
2260 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2261 char __user *usrptr;
2262 if (tmp.hook_entry[i]) {
2263 unsigned int delta;
2264 usrptr = (char __user *) tmp.hook_entry[i];
2265 delta = usrptr - tmp.entries;
2266 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2267 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2271 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2272 xt_compat_unlock(NFPROTO_BRIDGE);
2274 ret = do_replace_finish(net, &tmp, newinfo);
2275 if (ret == 0)
2276 return ret;
2277 free_entries:
2278 vfree(newinfo->entries);
2279 free_newinfo:
2280 vfree(newinfo);
2281 return ret;
2282 out_unlock:
2283 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2284 xt_compat_unlock(NFPROTO_BRIDGE);
2285 goto free_entries;
2288 static int compat_update_counters(struct net *net, void __user *user,
2289 unsigned int len)
2291 struct compat_ebt_replace hlp;
2293 if (copy_from_user(&hlp, user, sizeof(hlp)))
2294 return -EFAULT;
2296 /* try real handler in case userland supplied needed padding */
2297 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2298 return update_counters(net, user, len);
2300 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2301 hlp.num_counters, user, len);
2304 static int compat_do_ebt_set_ctl(struct sock *sk,
2305 int cmd, void __user *user, unsigned int len)
2307 int ret;
2309 if (!capable(CAP_NET_ADMIN))
2310 return -EPERM;
2312 switch (cmd) {
2313 case EBT_SO_SET_ENTRIES:
2314 ret = compat_do_replace(sock_net(sk), user, len);
2315 break;
2316 case EBT_SO_SET_COUNTERS:
2317 ret = compat_update_counters(sock_net(sk), user, len);
2318 break;
2319 default:
2320 ret = -EINVAL;
2322 return ret;
2325 static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2326 void __user *user, int *len)
2328 int ret;
2329 struct compat_ebt_replace tmp;
2330 struct ebt_table *t;
2332 if (!capable(CAP_NET_ADMIN))
2333 return -EPERM;
2335 /* try real handler in case userland supplied needed padding */
2336 if ((cmd == EBT_SO_GET_INFO ||
2337 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2338 return do_ebt_get_ctl(sk, cmd, user, len);
2340 if (copy_from_user(&tmp, user, sizeof(tmp)))
2341 return -EFAULT;
2343 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
2344 if (!t)
2345 return ret;
2347 xt_compat_lock(NFPROTO_BRIDGE);
2348 switch (cmd) {
2349 case EBT_SO_GET_INFO:
2350 tmp.nentries = t->private->nentries;
2351 ret = compat_table_info(t->private, &tmp);
2352 if (ret)
2353 goto out;
2354 tmp.valid_hooks = t->valid_hooks;
2356 if (copy_to_user(user, &tmp, *len) != 0) {
2357 ret = -EFAULT;
2358 break;
2360 ret = 0;
2361 break;
2362 case EBT_SO_GET_INIT_INFO:
2363 tmp.nentries = t->table->nentries;
2364 tmp.entries_size = t->table->entries_size;
2365 tmp.valid_hooks = t->table->valid_hooks;
2367 if (copy_to_user(user, &tmp, *len) != 0) {
2368 ret = -EFAULT;
2369 break;
2371 ret = 0;
2372 break;
2373 case EBT_SO_GET_ENTRIES:
2374 case EBT_SO_GET_INIT_ENTRIES:
2376 * try real handler first in case of userland-side padding.
2377 * in case we are dealing with an 'ordinary' 32 bit binary
2378 * without 64bit compatibility padding, this will fail right
2379 * after copy_from_user when the *len argument is validated.
2381 * the compat_ variant needs to do one pass over the kernel
2382 * data set to adjust for size differences before it the check.
2384 if (copy_everything_to_user(t, user, len, cmd) == 0)
2385 ret = 0;
2386 else
2387 ret = compat_copy_everything_to_user(t, user, len, cmd);
2388 break;
2389 default:
2390 ret = -EINVAL;
2392 out:
2393 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2394 xt_compat_unlock(NFPROTO_BRIDGE);
2395 mutex_unlock(&ebt_mutex);
2396 return ret;
2398 #endif
2400 static struct nf_sockopt_ops ebt_sockopts =
2402 .pf = PF_INET,
2403 .set_optmin = EBT_BASE_CTL,
2404 .set_optmax = EBT_SO_SET_MAX + 1,
2405 .set = do_ebt_set_ctl,
2406 #ifdef CONFIG_COMPAT
2407 .compat_set = compat_do_ebt_set_ctl,
2408 #endif
2409 .get_optmin = EBT_BASE_CTL,
2410 .get_optmax = EBT_SO_GET_MAX + 1,
2411 .get = do_ebt_get_ctl,
2412 #ifdef CONFIG_COMPAT
2413 .compat_get = compat_do_ebt_get_ctl,
2414 #endif
2415 .owner = THIS_MODULE,
2418 static int __init ebtables_init(void)
2420 int ret;
2422 ret = xt_register_target(&ebt_standard_target);
2423 if (ret < 0)
2424 return ret;
2425 ret = nf_register_sockopt(&ebt_sockopts);
2426 if (ret < 0) {
2427 xt_unregister_target(&ebt_standard_target);
2428 return ret;
2431 printk(KERN_INFO "Ebtables v2.0 registered\n");
2432 return 0;
2435 static void __exit ebtables_fini(void)
2437 nf_unregister_sockopt(&ebt_sockopts);
2438 xt_unregister_target(&ebt_standard_target);
2439 printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2442 EXPORT_SYMBOL(ebt_register_table);
2443 EXPORT_SYMBOL(ebt_unregister_table);
2444 EXPORT_SYMBOL(ebt_do_table);
2445 module_init(ebtables_init);
2446 module_exit(ebtables_fini);
2447 MODULE_LICENSE("GPL");