netfilter: x_table: speedup compat operations
[linux-2.6/cjktty.git] / net / bridge / netfilter / ebtables.c
blob5f1825df9dcad7cb546ae7b4b8182210cd60d7a5
1 /*
2 * ebtables
4 * Author:
5 * Bart De Schuymer <bdschuym@pandora.be>
7 * ebtables.c,v 2.0, July, 2002
9 * This code is stongly inspired on the iptables code which is
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kmod.h>
19 #include <linux/module.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netfilter/x_tables.h>
22 #include <linux/netfilter_bridge/ebtables.h>
23 #include <linux/spinlock.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <asm/uaccess.h>
27 #include <linux/smp.h>
28 #include <linux/cpumask.h>
29 #include <net/sock.h>
30 /* needed for logical [in,out]-dev filtering */
31 #include "../br_private.h"
33 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
34 "report to author: "format, ## args)
35 /* #define BUGPRINT(format, args...) */
38 * Each cpu has its own set of counters, so there is no need for write_lock in
39 * the softirq
40 * For reading or updating the counters, the user context needs to
41 * get a write_lock
44 /* The size of each set of counters is altered to get cache alignment */
45 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
46 #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
47 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
48 COUNTER_OFFSET(n) * cpu))
52 static DEFINE_MUTEX(ebt_mutex);
54 #ifdef CONFIG_COMPAT
55 static void ebt_standard_compat_from_user(void *dst, const void *src)
57 int v = *(compat_int_t *)src;
59 if (v >= 0)
60 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
61 memcpy(dst, &v, sizeof(v));
64 static int ebt_standard_compat_to_user(void __user *dst, const void *src)
66 compat_int_t cv = *(int *)src;
68 if (cv >= 0)
69 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
70 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
72 #endif
75 static struct xt_target ebt_standard_target = {
76 .name = "standard",
77 .revision = 0,
78 .family = NFPROTO_BRIDGE,
79 .targetsize = sizeof(int),
80 #ifdef CONFIG_COMPAT
81 .compatsize = sizeof(compat_int_t),
82 .compat_from_user = ebt_standard_compat_from_user,
83 .compat_to_user = ebt_standard_compat_to_user,
84 #endif
87 static inline int
88 ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
89 struct xt_action_param *par)
91 par->target = w->u.watcher;
92 par->targinfo = w->data;
93 w->u.watcher->target(skb, par);
94 /* watchers don't give a verdict */
95 return 0;
98 static inline int
99 ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
100 struct xt_action_param *par)
102 par->match = m->u.match;
103 par->matchinfo = m->data;
104 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
107 static inline int
108 ebt_dev_check(const char *entry, const struct net_device *device)
110 int i = 0;
111 const char *devname;
113 if (*entry == '\0')
114 return 0;
115 if (!device)
116 return 1;
117 devname = device->name;
118 /* 1 is the wildcard token */
119 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
120 i++;
121 return (devname[i] != entry[i] && entry[i] != 1);
124 #define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
125 /* process standard matches */
126 static inline int
127 ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
128 const struct net_device *in, const struct net_device *out)
130 const struct ethhdr *h = eth_hdr(skb);
131 const struct net_bridge_port *p;
132 __be16 ethproto;
133 int verdict, i;
135 if (vlan_tx_tag_present(skb))
136 ethproto = htons(ETH_P_8021Q);
137 else
138 ethproto = h->h_proto;
140 if (e->bitmask & EBT_802_3) {
141 if (FWINV2(ntohs(ethproto) >= 1536, EBT_IPROTO))
142 return 1;
143 } else if (!(e->bitmask & EBT_NOPROTO) &&
144 FWINV2(e->ethproto != ethproto, EBT_IPROTO))
145 return 1;
147 if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
148 return 1;
149 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
150 return 1;
151 /* rcu_read_lock()ed by nf_hook_slow */
152 if (in && (p = br_port_get_rcu(in)) != NULL &&
153 FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN))
154 return 1;
155 if (out && (p = br_port_get_rcu(out)) != NULL &&
156 FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
157 return 1;
159 if (e->bitmask & EBT_SOURCEMAC) {
160 verdict = 0;
161 for (i = 0; i < 6; i++)
162 verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
163 e->sourcemsk[i];
164 if (FWINV2(verdict != 0, EBT_ISOURCE) )
165 return 1;
167 if (e->bitmask & EBT_DESTMAC) {
168 verdict = 0;
169 for (i = 0; i < 6; i++)
170 verdict |= (h->h_dest[i] ^ e->destmac[i]) &
171 e->destmsk[i];
172 if (FWINV2(verdict != 0, EBT_IDEST) )
173 return 1;
175 return 0;
178 static inline __pure
179 struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
181 return (void *)entry + entry->next_offset;
184 /* Do some firewalling */
185 unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
186 const struct net_device *in, const struct net_device *out,
187 struct ebt_table *table)
189 int i, nentries;
190 struct ebt_entry *point;
191 struct ebt_counter *counter_base, *cb_base;
192 const struct ebt_entry_target *t;
193 int verdict, sp = 0;
194 struct ebt_chainstack *cs;
195 struct ebt_entries *chaininfo;
196 const char *base;
197 const struct ebt_table_info *private;
198 struct xt_action_param acpar;
200 acpar.family = NFPROTO_BRIDGE;
201 acpar.in = in;
202 acpar.out = out;
203 acpar.hotdrop = false;
204 acpar.hooknum = hook;
206 read_lock_bh(&table->lock);
207 private = table->private;
208 cb_base = COUNTER_BASE(private->counters, private->nentries,
209 smp_processor_id());
210 if (private->chainstack)
211 cs = private->chainstack[smp_processor_id()];
212 else
213 cs = NULL;
214 chaininfo = private->hook_entry[hook];
215 nentries = private->hook_entry[hook]->nentries;
216 point = (struct ebt_entry *)(private->hook_entry[hook]->data);
217 counter_base = cb_base + private->hook_entry[hook]->counter_offset;
218 /* base for chain jumps */
219 base = private->entries;
220 i = 0;
221 while (i < nentries) {
222 if (ebt_basic_match(point, skb, in, out))
223 goto letscontinue;
225 if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
226 goto letscontinue;
227 if (acpar.hotdrop) {
228 read_unlock_bh(&table->lock);
229 return NF_DROP;
232 /* increase counter */
233 (*(counter_base + i)).pcnt++;
234 (*(counter_base + i)).bcnt += skb->len;
236 /* these should only watch: not modify, nor tell us
237 what to do with the packet */
238 EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
240 t = (struct ebt_entry_target *)
241 (((char *)point) + point->target_offset);
242 /* standard target */
243 if (!t->u.target->target)
244 verdict = ((struct ebt_standard_target *)t)->verdict;
245 else {
246 acpar.target = t->u.target;
247 acpar.targinfo = t->data;
248 verdict = t->u.target->target(skb, &acpar);
250 if (verdict == EBT_ACCEPT) {
251 read_unlock_bh(&table->lock);
252 return NF_ACCEPT;
254 if (verdict == EBT_DROP) {
255 read_unlock_bh(&table->lock);
256 return NF_DROP;
258 if (verdict == EBT_RETURN) {
259 letsreturn:
260 #ifdef CONFIG_NETFILTER_DEBUG
261 if (sp == 0) {
262 BUGPRINT("RETURN on base chain");
263 /* act like this is EBT_CONTINUE */
264 goto letscontinue;
266 #endif
267 sp--;
268 /* put all the local variables right */
269 i = cs[sp].n;
270 chaininfo = cs[sp].chaininfo;
271 nentries = chaininfo->nentries;
272 point = cs[sp].e;
273 counter_base = cb_base +
274 chaininfo->counter_offset;
275 continue;
277 if (verdict == EBT_CONTINUE)
278 goto letscontinue;
279 #ifdef CONFIG_NETFILTER_DEBUG
280 if (verdict < 0) {
281 BUGPRINT("bogus standard verdict\n");
282 read_unlock_bh(&table->lock);
283 return NF_DROP;
285 #endif
286 /* jump to a udc */
287 cs[sp].n = i + 1;
288 cs[sp].chaininfo = chaininfo;
289 cs[sp].e = ebt_next_entry(point);
290 i = 0;
291 chaininfo = (struct ebt_entries *) (base + verdict);
292 #ifdef CONFIG_NETFILTER_DEBUG
293 if (chaininfo->distinguisher) {
294 BUGPRINT("jump to non-chain\n");
295 read_unlock_bh(&table->lock);
296 return NF_DROP;
298 #endif
299 nentries = chaininfo->nentries;
300 point = (struct ebt_entry *)chaininfo->data;
301 counter_base = cb_base + chaininfo->counter_offset;
302 sp++;
303 continue;
304 letscontinue:
305 point = ebt_next_entry(point);
306 i++;
309 /* I actually like this :) */
310 if (chaininfo->policy == EBT_RETURN)
311 goto letsreturn;
312 if (chaininfo->policy == EBT_ACCEPT) {
313 read_unlock_bh(&table->lock);
314 return NF_ACCEPT;
316 read_unlock_bh(&table->lock);
317 return NF_DROP;
320 /* If it succeeds, returns element and locks mutex */
321 static inline void *
322 find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
323 struct mutex *mutex)
325 struct {
326 struct list_head list;
327 char name[EBT_FUNCTION_MAXNAMELEN];
328 } *e;
330 *error = mutex_lock_interruptible(mutex);
331 if (*error != 0)
332 return NULL;
334 list_for_each_entry(e, head, list) {
335 if (strcmp(e->name, name) == 0)
336 return e;
338 *error = -ENOENT;
339 mutex_unlock(mutex);
340 return NULL;
343 static void *
344 find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
345 int *error, struct mutex *mutex)
347 return try_then_request_module(
348 find_inlist_lock_noload(head, name, error, mutex),
349 "%s%s", prefix, name);
352 static inline struct ebt_table *
353 find_table_lock(struct net *net, const char *name, int *error,
354 struct mutex *mutex)
356 return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
357 "ebtable_", error, mutex);
360 static inline int
361 ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
362 unsigned int *cnt)
364 const struct ebt_entry *e = par->entryinfo;
365 struct xt_match *match;
366 size_t left = ((char *)e + e->watchers_offset) - (char *)m;
367 int ret;
369 if (left < sizeof(struct ebt_entry_match) ||
370 left - sizeof(struct ebt_entry_match) < m->match_size)
371 return -EINVAL;
373 match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
374 if (IS_ERR(match))
375 return PTR_ERR(match);
376 m->u.match = match;
378 par->match = match;
379 par->matchinfo = m->data;
380 ret = xt_check_match(par, m->match_size,
381 e->ethproto, e->invflags & EBT_IPROTO);
382 if (ret < 0) {
383 module_put(match->me);
384 return ret;
387 (*cnt)++;
388 return 0;
391 static inline int
392 ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
393 unsigned int *cnt)
395 const struct ebt_entry *e = par->entryinfo;
396 struct xt_target *watcher;
397 size_t left = ((char *)e + e->target_offset) - (char *)w;
398 int ret;
400 if (left < sizeof(struct ebt_entry_watcher) ||
401 left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
402 return -EINVAL;
404 watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
405 if (IS_ERR(watcher))
406 return PTR_ERR(watcher);
407 w->u.watcher = watcher;
409 par->target = watcher;
410 par->targinfo = w->data;
411 ret = xt_check_target(par, w->watcher_size,
412 e->ethproto, e->invflags & EBT_IPROTO);
413 if (ret < 0) {
414 module_put(watcher->me);
415 return ret;
418 (*cnt)++;
419 return 0;
422 static int ebt_verify_pointers(const struct ebt_replace *repl,
423 struct ebt_table_info *newinfo)
425 unsigned int limit = repl->entries_size;
426 unsigned int valid_hooks = repl->valid_hooks;
427 unsigned int offset = 0;
428 int i;
430 for (i = 0; i < NF_BR_NUMHOOKS; i++)
431 newinfo->hook_entry[i] = NULL;
433 newinfo->entries_size = repl->entries_size;
434 newinfo->nentries = repl->nentries;
436 while (offset < limit) {
437 size_t left = limit - offset;
438 struct ebt_entry *e = (void *)newinfo->entries + offset;
440 if (left < sizeof(unsigned int))
441 break;
443 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
444 if ((valid_hooks & (1 << i)) == 0)
445 continue;
446 if ((char __user *)repl->hook_entry[i] ==
447 repl->entries + offset)
448 break;
451 if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
452 if (e->bitmask != 0) {
453 /* we make userspace set this right,
454 so there is no misunderstanding */
455 BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
456 "in distinguisher\n");
457 return -EINVAL;
459 if (i != NF_BR_NUMHOOKS)
460 newinfo->hook_entry[i] = (struct ebt_entries *)e;
461 if (left < sizeof(struct ebt_entries))
462 break;
463 offset += sizeof(struct ebt_entries);
464 } else {
465 if (left < sizeof(struct ebt_entry))
466 break;
467 if (left < e->next_offset)
468 break;
469 if (e->next_offset < sizeof(struct ebt_entry))
470 return -EINVAL;
471 offset += e->next_offset;
474 if (offset != limit) {
475 BUGPRINT("entries_size too small\n");
476 return -EINVAL;
479 /* check if all valid hooks have a chain */
480 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
481 if (!newinfo->hook_entry[i] &&
482 (valid_hooks & (1 << i))) {
483 BUGPRINT("Valid hook without chain\n");
484 return -EINVAL;
487 return 0;
491 * this one is very careful, as it is the first function
492 * to parse the userspace data
494 static inline int
495 ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
496 const struct ebt_table_info *newinfo,
497 unsigned int *n, unsigned int *cnt,
498 unsigned int *totalcnt, unsigned int *udc_cnt)
500 int i;
502 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
503 if ((void *)e == (void *)newinfo->hook_entry[i])
504 break;
506 /* beginning of a new chain
507 if i == NF_BR_NUMHOOKS it must be a user defined chain */
508 if (i != NF_BR_NUMHOOKS || !e->bitmask) {
509 /* this checks if the previous chain has as many entries
510 as it said it has */
511 if (*n != *cnt) {
512 BUGPRINT("nentries does not equal the nr of entries "
513 "in the chain\n");
514 return -EINVAL;
516 if (((struct ebt_entries *)e)->policy != EBT_DROP &&
517 ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
518 /* only RETURN from udc */
519 if (i != NF_BR_NUMHOOKS ||
520 ((struct ebt_entries *)e)->policy != EBT_RETURN) {
521 BUGPRINT("bad policy\n");
522 return -EINVAL;
525 if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
526 (*udc_cnt)++;
527 if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
528 BUGPRINT("counter_offset != totalcnt");
529 return -EINVAL;
531 *n = ((struct ebt_entries *)e)->nentries;
532 *cnt = 0;
533 return 0;
535 /* a plain old entry, heh */
536 if (sizeof(struct ebt_entry) > e->watchers_offset ||
537 e->watchers_offset > e->target_offset ||
538 e->target_offset >= e->next_offset) {
539 BUGPRINT("entry offsets not in right order\n");
540 return -EINVAL;
542 /* this is not checked anywhere else */
543 if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
544 BUGPRINT("target size too small\n");
545 return -EINVAL;
547 (*cnt)++;
548 (*totalcnt)++;
549 return 0;
552 struct ebt_cl_stack
554 struct ebt_chainstack cs;
555 int from;
556 unsigned int hookmask;
560 * we need these positions to check that the jumps to a different part of the
561 * entries is a jump to the beginning of a new chain.
563 static inline int
564 ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
565 unsigned int *n, struct ebt_cl_stack *udc)
567 int i;
569 /* we're only interested in chain starts */
570 if (e->bitmask)
571 return 0;
572 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
573 if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
574 break;
576 /* only care about udc */
577 if (i != NF_BR_NUMHOOKS)
578 return 0;
580 udc[*n].cs.chaininfo = (struct ebt_entries *)e;
581 /* these initialisations are depended on later in check_chainloops() */
582 udc[*n].cs.n = 0;
583 udc[*n].hookmask = 0;
585 (*n)++;
586 return 0;
589 static inline int
590 ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
592 struct xt_mtdtor_param par;
594 if (i && (*i)-- == 0)
595 return 1;
597 par.net = net;
598 par.match = m->u.match;
599 par.matchinfo = m->data;
600 par.family = NFPROTO_BRIDGE;
601 if (par.match->destroy != NULL)
602 par.match->destroy(&par);
603 module_put(par.match->me);
604 return 0;
607 static inline int
608 ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
610 struct xt_tgdtor_param par;
612 if (i && (*i)-- == 0)
613 return 1;
615 par.net = net;
616 par.target = w->u.watcher;
617 par.targinfo = w->data;
618 par.family = NFPROTO_BRIDGE;
619 if (par.target->destroy != NULL)
620 par.target->destroy(&par);
621 module_put(par.target->me);
622 return 0;
625 static inline int
626 ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
628 struct xt_tgdtor_param par;
629 struct ebt_entry_target *t;
631 if (e->bitmask == 0)
632 return 0;
633 /* we're done */
634 if (cnt && (*cnt)-- == 0)
635 return 1;
636 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
637 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
638 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
640 par.net = net;
641 par.target = t->u.target;
642 par.targinfo = t->data;
643 par.family = NFPROTO_BRIDGE;
644 if (par.target->destroy != NULL)
645 par.target->destroy(&par);
646 module_put(par.target->me);
647 return 0;
650 static inline int
651 ebt_check_entry(struct ebt_entry *e, struct net *net,
652 const struct ebt_table_info *newinfo,
653 const char *name, unsigned int *cnt,
654 struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
656 struct ebt_entry_target *t;
657 struct xt_target *target;
658 unsigned int i, j, hook = 0, hookmask = 0;
659 size_t gap;
660 int ret;
661 struct xt_mtchk_param mtpar;
662 struct xt_tgchk_param tgpar;
664 /* don't mess with the struct ebt_entries */
665 if (e->bitmask == 0)
666 return 0;
668 if (e->bitmask & ~EBT_F_MASK) {
669 BUGPRINT("Unknown flag for bitmask\n");
670 return -EINVAL;
672 if (e->invflags & ~EBT_INV_MASK) {
673 BUGPRINT("Unknown flag for inv bitmask\n");
674 return -EINVAL;
676 if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
677 BUGPRINT("NOPROTO & 802_3 not allowed\n");
678 return -EINVAL;
680 /* what hook do we belong to? */
681 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
682 if (!newinfo->hook_entry[i])
683 continue;
684 if ((char *)newinfo->hook_entry[i] < (char *)e)
685 hook = i;
686 else
687 break;
689 /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
690 a base chain */
691 if (i < NF_BR_NUMHOOKS)
692 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
693 else {
694 for (i = 0; i < udc_cnt; i++)
695 if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
696 break;
697 if (i == 0)
698 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
699 else
700 hookmask = cl_s[i - 1].hookmask;
702 i = 0;
704 mtpar.net = tgpar.net = net;
705 mtpar.table = tgpar.table = name;
706 mtpar.entryinfo = tgpar.entryinfo = e;
707 mtpar.hook_mask = tgpar.hook_mask = hookmask;
708 mtpar.family = tgpar.family = NFPROTO_BRIDGE;
709 ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
710 if (ret != 0)
711 goto cleanup_matches;
712 j = 0;
713 ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
714 if (ret != 0)
715 goto cleanup_watchers;
716 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
717 gap = e->next_offset - e->target_offset;
719 target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
720 if (IS_ERR(target)) {
721 ret = PTR_ERR(target);
722 goto cleanup_watchers;
725 t->u.target = target;
726 if (t->u.target == &ebt_standard_target) {
727 if (gap < sizeof(struct ebt_standard_target)) {
728 BUGPRINT("Standard target size too big\n");
729 ret = -EFAULT;
730 goto cleanup_watchers;
732 if (((struct ebt_standard_target *)t)->verdict <
733 -NUM_STANDARD_TARGETS) {
734 BUGPRINT("Invalid standard target\n");
735 ret = -EFAULT;
736 goto cleanup_watchers;
738 } else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
739 module_put(t->u.target->me);
740 ret = -EFAULT;
741 goto cleanup_watchers;
744 tgpar.target = target;
745 tgpar.targinfo = t->data;
746 ret = xt_check_target(&tgpar, t->target_size,
747 e->ethproto, e->invflags & EBT_IPROTO);
748 if (ret < 0) {
749 module_put(target->me);
750 goto cleanup_watchers;
752 (*cnt)++;
753 return 0;
754 cleanup_watchers:
755 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
756 cleanup_matches:
757 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
758 return ret;
762 * checks for loops and sets the hook mask for udc
763 * the hook mask for udc tells us from which base chains the udc can be
764 * accessed. This mask is a parameter to the check() functions of the extensions
766 static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
767 unsigned int udc_cnt, unsigned int hooknr, char *base)
769 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
770 const struct ebt_entry *e = (struct ebt_entry *)chain->data;
771 const struct ebt_entry_target *t;
773 while (pos < nentries || chain_nr != -1) {
774 /* end of udc, go back one 'recursion' step */
775 if (pos == nentries) {
776 /* put back values of the time when this chain was called */
777 e = cl_s[chain_nr].cs.e;
778 if (cl_s[chain_nr].from != -1)
779 nentries =
780 cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
781 else
782 nentries = chain->nentries;
783 pos = cl_s[chain_nr].cs.n;
784 /* make sure we won't see a loop that isn't one */
785 cl_s[chain_nr].cs.n = 0;
786 chain_nr = cl_s[chain_nr].from;
787 if (pos == nentries)
788 continue;
790 t = (struct ebt_entry_target *)
791 (((char *)e) + e->target_offset);
792 if (strcmp(t->u.name, EBT_STANDARD_TARGET))
793 goto letscontinue;
794 if (e->target_offset + sizeof(struct ebt_standard_target) >
795 e->next_offset) {
796 BUGPRINT("Standard target size too big\n");
797 return -1;
799 verdict = ((struct ebt_standard_target *)t)->verdict;
800 if (verdict >= 0) { /* jump to another chain */
801 struct ebt_entries *hlp2 =
802 (struct ebt_entries *)(base + verdict);
803 for (i = 0; i < udc_cnt; i++)
804 if (hlp2 == cl_s[i].cs.chaininfo)
805 break;
806 /* bad destination or loop */
807 if (i == udc_cnt) {
808 BUGPRINT("bad destination\n");
809 return -1;
811 if (cl_s[i].cs.n) {
812 BUGPRINT("loop\n");
813 return -1;
815 if (cl_s[i].hookmask & (1 << hooknr))
816 goto letscontinue;
817 /* this can't be 0, so the loop test is correct */
818 cl_s[i].cs.n = pos + 1;
819 pos = 0;
820 cl_s[i].cs.e = ebt_next_entry(e);
821 e = (struct ebt_entry *)(hlp2->data);
822 nentries = hlp2->nentries;
823 cl_s[i].from = chain_nr;
824 chain_nr = i;
825 /* this udc is accessible from the base chain for hooknr */
826 cl_s[i].hookmask |= (1 << hooknr);
827 continue;
829 letscontinue:
830 e = ebt_next_entry(e);
831 pos++;
833 return 0;
836 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
837 static int translate_table(struct net *net, const char *name,
838 struct ebt_table_info *newinfo)
840 unsigned int i, j, k, udc_cnt;
841 int ret;
842 struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
844 i = 0;
845 while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
846 i++;
847 if (i == NF_BR_NUMHOOKS) {
848 BUGPRINT("No valid hooks specified\n");
849 return -EINVAL;
851 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
852 BUGPRINT("Chains don't start at beginning\n");
853 return -EINVAL;
855 /* make sure chains are ordered after each other in same order
856 as their corresponding hooks */
857 for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
858 if (!newinfo->hook_entry[j])
859 continue;
860 if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
861 BUGPRINT("Hook order must be followed\n");
862 return -EINVAL;
864 i = j;
867 /* do some early checkings and initialize some things */
868 i = 0; /* holds the expected nr. of entries for the chain */
869 j = 0; /* holds the up to now counted entries for the chain */
870 k = 0; /* holds the total nr. of entries, should equal
871 newinfo->nentries afterwards */
872 udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
873 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
874 ebt_check_entry_size_and_hooks, newinfo,
875 &i, &j, &k, &udc_cnt);
877 if (ret != 0)
878 return ret;
880 if (i != j) {
881 BUGPRINT("nentries does not equal the nr of entries in the "
882 "(last) chain\n");
883 return -EINVAL;
885 if (k != newinfo->nentries) {
886 BUGPRINT("Total nentries is wrong\n");
887 return -EINVAL;
890 /* get the location of the udc, put them in an array
891 while we're at it, allocate the chainstack */
892 if (udc_cnt) {
893 /* this will get free'd in do_replace()/ebt_register_table()
894 if an error occurs */
895 newinfo->chainstack =
896 vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
897 if (!newinfo->chainstack)
898 return -ENOMEM;
899 for_each_possible_cpu(i) {
900 newinfo->chainstack[i] =
901 vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
902 if (!newinfo->chainstack[i]) {
903 while (i)
904 vfree(newinfo->chainstack[--i]);
905 vfree(newinfo->chainstack);
906 newinfo->chainstack = NULL;
907 return -ENOMEM;
911 cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
912 if (!cl_s)
913 return -ENOMEM;
914 i = 0; /* the i'th udc */
915 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
916 ebt_get_udc_positions, newinfo, &i, cl_s);
917 /* sanity check */
918 if (i != udc_cnt) {
919 BUGPRINT("i != udc_cnt\n");
920 vfree(cl_s);
921 return -EFAULT;
925 /* Check for loops */
926 for (i = 0; i < NF_BR_NUMHOOKS; i++)
927 if (newinfo->hook_entry[i])
928 if (check_chainloops(newinfo->hook_entry[i],
929 cl_s, udc_cnt, i, newinfo->entries)) {
930 vfree(cl_s);
931 return -EINVAL;
934 /* we now know the following (along with E=mc²):
935 - the nr of entries in each chain is right
936 - the size of the allocated space is right
937 - all valid hooks have a corresponding chain
938 - there are no loops
939 - wrong data can still be on the level of a single entry
940 - could be there are jumps to places that are not the
941 beginning of a chain. This can only occur in chains that
942 are not accessible from any base chains, so we don't care. */
944 /* used to know what we need to clean up if something goes wrong */
945 i = 0;
946 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
947 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
948 if (ret != 0) {
949 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
950 ebt_cleanup_entry, net, &i);
952 vfree(cl_s);
953 return ret;
956 /* called under write_lock */
957 static void get_counters(const struct ebt_counter *oldcounters,
958 struct ebt_counter *counters, unsigned int nentries)
960 int i, cpu;
961 struct ebt_counter *counter_base;
963 /* counters of cpu 0 */
964 memcpy(counters, oldcounters,
965 sizeof(struct ebt_counter) * nentries);
967 /* add other counters to those of cpu 0 */
968 for_each_possible_cpu(cpu) {
969 if (cpu == 0)
970 continue;
971 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
972 for (i = 0; i < nentries; i++) {
973 counters[i].pcnt += counter_base[i].pcnt;
974 counters[i].bcnt += counter_base[i].bcnt;
979 static int do_replace_finish(struct net *net, struct ebt_replace *repl,
980 struct ebt_table_info *newinfo)
982 int ret, i;
983 struct ebt_counter *counterstmp = NULL;
984 /* used to be able to unlock earlier */
985 struct ebt_table_info *table;
986 struct ebt_table *t;
988 /* the user wants counters back
989 the check on the size is done later, when we have the lock */
990 if (repl->num_counters) {
991 unsigned long size = repl->num_counters * sizeof(*counterstmp);
992 counterstmp = vmalloc(size);
993 if (!counterstmp)
994 return -ENOMEM;
997 newinfo->chainstack = NULL;
998 ret = ebt_verify_pointers(repl, newinfo);
999 if (ret != 0)
1000 goto free_counterstmp;
1002 ret = translate_table(net, repl->name, newinfo);
1004 if (ret != 0)
1005 goto free_counterstmp;
1007 t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1008 if (!t) {
1009 ret = -ENOENT;
1010 goto free_iterate;
1013 /* the table doesn't like it */
1014 if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1015 goto free_unlock;
1017 if (repl->num_counters && repl->num_counters != t->private->nentries) {
1018 BUGPRINT("Wrong nr. of counters requested\n");
1019 ret = -EINVAL;
1020 goto free_unlock;
1023 /* we have the mutex lock, so no danger in reading this pointer */
1024 table = t->private;
1025 /* make sure the table can only be rmmod'ed if it contains no rules */
1026 if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1027 ret = -ENOENT;
1028 goto free_unlock;
1029 } else if (table->nentries && !newinfo->nentries)
1030 module_put(t->me);
1031 /* we need an atomic snapshot of the counters */
1032 write_lock_bh(&t->lock);
1033 if (repl->num_counters)
1034 get_counters(t->private->counters, counterstmp,
1035 t->private->nentries);
1037 t->private = newinfo;
1038 write_unlock_bh(&t->lock);
1039 mutex_unlock(&ebt_mutex);
1040 /* so, a user can change the chains while having messed up her counter
1041 allocation. Only reason why this is done is because this way the lock
1042 is held only once, while this doesn't bring the kernel into a
1043 dangerous state. */
1044 if (repl->num_counters &&
1045 copy_to_user(repl->counters, counterstmp,
1046 repl->num_counters * sizeof(struct ebt_counter))) {
1047 ret = -EFAULT;
1049 else
1050 ret = 0;
1052 /* decrease module count and free resources */
1053 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1054 ebt_cleanup_entry, net, NULL);
1056 vfree(table->entries);
1057 if (table->chainstack) {
1058 for_each_possible_cpu(i)
1059 vfree(table->chainstack[i]);
1060 vfree(table->chainstack);
1062 vfree(table);
1064 vfree(counterstmp);
1065 return ret;
1067 free_unlock:
1068 mutex_unlock(&ebt_mutex);
1069 free_iterate:
1070 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1071 ebt_cleanup_entry, net, NULL);
1072 free_counterstmp:
1073 vfree(counterstmp);
1074 /* can be initialized in translate_table() */
1075 if (newinfo->chainstack) {
1076 for_each_possible_cpu(i)
1077 vfree(newinfo->chainstack[i]);
1078 vfree(newinfo->chainstack);
1080 return ret;
1083 /* replace the table */
1084 static int do_replace(struct net *net, const void __user *user,
1085 unsigned int len)
1087 int ret, countersize;
1088 struct ebt_table_info *newinfo;
1089 struct ebt_replace tmp;
1091 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1092 return -EFAULT;
1094 if (len != sizeof(tmp) + tmp.entries_size) {
1095 BUGPRINT("Wrong len argument\n");
1096 return -EINVAL;
1099 if (tmp.entries_size == 0) {
1100 BUGPRINT("Entries_size never zero\n");
1101 return -EINVAL;
1103 /* overflow check */
1104 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1105 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1106 return -ENOMEM;
1107 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1108 return -ENOMEM;
1110 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1111 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1112 if (!newinfo)
1113 return -ENOMEM;
1115 if (countersize)
1116 memset(newinfo->counters, 0, countersize);
1118 newinfo->entries = vmalloc(tmp.entries_size);
1119 if (!newinfo->entries) {
1120 ret = -ENOMEM;
1121 goto free_newinfo;
1123 if (copy_from_user(
1124 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1125 BUGPRINT("Couldn't copy entries from userspace\n");
1126 ret = -EFAULT;
1127 goto free_entries;
1130 ret = do_replace_finish(net, &tmp, newinfo);
1131 if (ret == 0)
1132 return ret;
1133 free_entries:
1134 vfree(newinfo->entries);
1135 free_newinfo:
1136 vfree(newinfo);
1137 return ret;
1140 struct ebt_table *
1141 ebt_register_table(struct net *net, const struct ebt_table *input_table)
1143 struct ebt_table_info *newinfo;
1144 struct ebt_table *t, *table;
1145 struct ebt_replace_kernel *repl;
1146 int ret, i, countersize;
1147 void *p;
1149 if (input_table == NULL || (repl = input_table->table) == NULL ||
1150 repl->entries == NULL || repl->entries_size == 0 ||
1151 repl->counters != NULL || input_table->private != NULL) {
1152 BUGPRINT("Bad table data for ebt_register_table!!!\n");
1153 return ERR_PTR(-EINVAL);
1156 /* Don't add one table to multiple lists. */
1157 table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1158 if (!table) {
1159 ret = -ENOMEM;
1160 goto out;
1163 countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1164 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1165 ret = -ENOMEM;
1166 if (!newinfo)
1167 goto free_table;
1169 p = vmalloc(repl->entries_size);
1170 if (!p)
1171 goto free_newinfo;
1173 memcpy(p, repl->entries, repl->entries_size);
1174 newinfo->entries = p;
1176 newinfo->entries_size = repl->entries_size;
1177 newinfo->nentries = repl->nentries;
1179 if (countersize)
1180 memset(newinfo->counters, 0, countersize);
1182 /* fill in newinfo and parse the entries */
1183 newinfo->chainstack = NULL;
1184 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1185 if ((repl->valid_hooks & (1 << i)) == 0)
1186 newinfo->hook_entry[i] = NULL;
1187 else
1188 newinfo->hook_entry[i] = p +
1189 ((char *)repl->hook_entry[i] - repl->entries);
1191 ret = translate_table(net, repl->name, newinfo);
1192 if (ret != 0) {
1193 BUGPRINT("Translate_table failed\n");
1194 goto free_chainstack;
1197 if (table->check && table->check(newinfo, table->valid_hooks)) {
1198 BUGPRINT("The table doesn't like its own initial data, lol\n");
1199 return ERR_PTR(-EINVAL);
1202 table->private = newinfo;
1203 rwlock_init(&table->lock);
1204 ret = mutex_lock_interruptible(&ebt_mutex);
1205 if (ret != 0)
1206 goto free_chainstack;
1208 list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1209 if (strcmp(t->name, table->name) == 0) {
1210 ret = -EEXIST;
1211 BUGPRINT("Table name already exists\n");
1212 goto free_unlock;
1216 /* Hold a reference count if the chains aren't empty */
1217 if (newinfo->nentries && !try_module_get(table->me)) {
1218 ret = -ENOENT;
1219 goto free_unlock;
1221 list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1222 mutex_unlock(&ebt_mutex);
1223 return table;
1224 free_unlock:
1225 mutex_unlock(&ebt_mutex);
1226 free_chainstack:
1227 if (newinfo->chainstack) {
1228 for_each_possible_cpu(i)
1229 vfree(newinfo->chainstack[i]);
1230 vfree(newinfo->chainstack);
1232 vfree(newinfo->entries);
1233 free_newinfo:
1234 vfree(newinfo);
1235 free_table:
1236 kfree(table);
1237 out:
1238 return ERR_PTR(ret);
1241 void ebt_unregister_table(struct net *net, struct ebt_table *table)
1243 int i;
1245 if (!table) {
1246 BUGPRINT("Request to unregister NULL table!!!\n");
1247 return;
1249 mutex_lock(&ebt_mutex);
1250 list_del(&table->list);
1251 mutex_unlock(&ebt_mutex);
1252 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1253 ebt_cleanup_entry, net, NULL);
1254 if (table->private->nentries)
1255 module_put(table->me);
1256 vfree(table->private->entries);
1257 if (table->private->chainstack) {
1258 for_each_possible_cpu(i)
1259 vfree(table->private->chainstack[i]);
1260 vfree(table->private->chainstack);
1262 vfree(table->private);
1263 kfree(table);
1266 /* userspace just supplied us with counters */
1267 static int do_update_counters(struct net *net, const char *name,
1268 struct ebt_counter __user *counters,
1269 unsigned int num_counters,
1270 const void __user *user, unsigned int len)
1272 int i, ret;
1273 struct ebt_counter *tmp;
1274 struct ebt_table *t;
1276 if (num_counters == 0)
1277 return -EINVAL;
1279 tmp = vmalloc(num_counters * sizeof(*tmp));
1280 if (!tmp)
1281 return -ENOMEM;
1283 t = find_table_lock(net, name, &ret, &ebt_mutex);
1284 if (!t)
1285 goto free_tmp;
1287 if (num_counters != t->private->nentries) {
1288 BUGPRINT("Wrong nr of counters\n");
1289 ret = -EINVAL;
1290 goto unlock_mutex;
1293 if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1294 ret = -EFAULT;
1295 goto unlock_mutex;
1298 /* we want an atomic add of the counters */
1299 write_lock_bh(&t->lock);
1301 /* we add to the counters of the first cpu */
1302 for (i = 0; i < num_counters; i++) {
1303 t->private->counters[i].pcnt += tmp[i].pcnt;
1304 t->private->counters[i].bcnt += tmp[i].bcnt;
1307 write_unlock_bh(&t->lock);
1308 ret = 0;
1309 unlock_mutex:
1310 mutex_unlock(&ebt_mutex);
1311 free_tmp:
1312 vfree(tmp);
1313 return ret;
1316 static int update_counters(struct net *net, const void __user *user,
1317 unsigned int len)
1319 struct ebt_replace hlp;
1321 if (copy_from_user(&hlp, user, sizeof(hlp)))
1322 return -EFAULT;
1324 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1325 return -EINVAL;
1327 return do_update_counters(net, hlp.name, hlp.counters,
1328 hlp.num_counters, user, len);
1331 static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1332 const char *base, char __user *ubase)
1334 char __user *hlp = ubase + ((char *)m - base);
1335 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
1336 return -EFAULT;
1337 return 0;
1340 static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1341 const char *base, char __user *ubase)
1343 char __user *hlp = ubase + ((char *)w - base);
1344 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
1345 return -EFAULT;
1346 return 0;
1349 static inline int
1350 ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1352 int ret;
1353 char __user *hlp;
1354 const struct ebt_entry_target *t;
1356 if (e->bitmask == 0)
1357 return 0;
1359 hlp = ubase + (((char *)e + e->target_offset) - base);
1360 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1362 ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1363 if (ret != 0)
1364 return ret;
1365 ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1366 if (ret != 0)
1367 return ret;
1368 if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
1369 return -EFAULT;
1370 return 0;
1373 static int copy_counters_to_user(struct ebt_table *t,
1374 const struct ebt_counter *oldcounters,
1375 void __user *user, unsigned int num_counters,
1376 unsigned int nentries)
1378 struct ebt_counter *counterstmp;
1379 int ret = 0;
1381 /* userspace might not need the counters */
1382 if (num_counters == 0)
1383 return 0;
1385 if (num_counters != nentries) {
1386 BUGPRINT("Num_counters wrong\n");
1387 return -EINVAL;
1390 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1391 if (!counterstmp)
1392 return -ENOMEM;
1394 write_lock_bh(&t->lock);
1395 get_counters(oldcounters, counterstmp, nentries);
1396 write_unlock_bh(&t->lock);
1398 if (copy_to_user(user, counterstmp,
1399 nentries * sizeof(struct ebt_counter)))
1400 ret = -EFAULT;
1401 vfree(counterstmp);
1402 return ret;
1405 /* called with ebt_mutex locked */
1406 static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1407 const int *len, int cmd)
1409 struct ebt_replace tmp;
1410 const struct ebt_counter *oldcounters;
1411 unsigned int entries_size, nentries;
1412 int ret;
1413 char *entries;
1415 if (cmd == EBT_SO_GET_ENTRIES) {
1416 entries_size = t->private->entries_size;
1417 nentries = t->private->nentries;
1418 entries = t->private->entries;
1419 oldcounters = t->private->counters;
1420 } else {
1421 entries_size = t->table->entries_size;
1422 nentries = t->table->nentries;
1423 entries = t->table->entries;
1424 oldcounters = t->table->counters;
1427 if (copy_from_user(&tmp, user, sizeof(tmp)))
1428 return -EFAULT;
1430 if (*len != sizeof(struct ebt_replace) + entries_size +
1431 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1432 return -EINVAL;
1434 if (tmp.nentries != nentries) {
1435 BUGPRINT("Nentries wrong\n");
1436 return -EINVAL;
1439 if (tmp.entries_size != entries_size) {
1440 BUGPRINT("Wrong size\n");
1441 return -EINVAL;
1444 ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1445 tmp.num_counters, nentries);
1446 if (ret)
1447 return ret;
1449 if (copy_to_user(tmp.entries, entries, entries_size)) {
1450 BUGPRINT("Couldn't copy entries to userspace\n");
1451 return -EFAULT;
1453 /* set the match/watcher/target names right */
1454 return EBT_ENTRY_ITERATE(entries, entries_size,
1455 ebt_make_names, entries, tmp.entries);
1458 static int do_ebt_set_ctl(struct sock *sk,
1459 int cmd, void __user *user, unsigned int len)
1461 int ret;
1463 if (!capable(CAP_NET_ADMIN))
1464 return -EPERM;
1466 switch(cmd) {
1467 case EBT_SO_SET_ENTRIES:
1468 ret = do_replace(sock_net(sk), user, len);
1469 break;
1470 case EBT_SO_SET_COUNTERS:
1471 ret = update_counters(sock_net(sk), user, len);
1472 break;
1473 default:
1474 ret = -EINVAL;
1476 return ret;
1479 static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1481 int ret;
1482 struct ebt_replace tmp;
1483 struct ebt_table *t;
1485 if (!capable(CAP_NET_ADMIN))
1486 return -EPERM;
1488 if (copy_from_user(&tmp, user, sizeof(tmp)))
1489 return -EFAULT;
1491 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
1492 if (!t)
1493 return ret;
1495 switch(cmd) {
1496 case EBT_SO_GET_INFO:
1497 case EBT_SO_GET_INIT_INFO:
1498 if (*len != sizeof(struct ebt_replace)){
1499 ret = -EINVAL;
1500 mutex_unlock(&ebt_mutex);
1501 break;
1503 if (cmd == EBT_SO_GET_INFO) {
1504 tmp.nentries = t->private->nentries;
1505 tmp.entries_size = t->private->entries_size;
1506 tmp.valid_hooks = t->valid_hooks;
1507 } else {
1508 tmp.nentries = t->table->nentries;
1509 tmp.entries_size = t->table->entries_size;
1510 tmp.valid_hooks = t->table->valid_hooks;
1512 mutex_unlock(&ebt_mutex);
1513 if (copy_to_user(user, &tmp, *len) != 0){
1514 BUGPRINT("c2u Didn't work\n");
1515 ret = -EFAULT;
1516 break;
1518 ret = 0;
1519 break;
1521 case EBT_SO_GET_ENTRIES:
1522 case EBT_SO_GET_INIT_ENTRIES:
1523 ret = copy_everything_to_user(t, user, len, cmd);
1524 mutex_unlock(&ebt_mutex);
1525 break;
1527 default:
1528 mutex_unlock(&ebt_mutex);
1529 ret = -EINVAL;
1532 return ret;
1535 #ifdef CONFIG_COMPAT
1536 /* 32 bit-userspace compatibility definitions. */
1537 struct compat_ebt_replace {
1538 char name[EBT_TABLE_MAXNAMELEN];
1539 compat_uint_t valid_hooks;
1540 compat_uint_t nentries;
1541 compat_uint_t entries_size;
1542 /* start of the chains */
1543 compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1544 /* nr of counters userspace expects back */
1545 compat_uint_t num_counters;
1546 /* where the kernel will put the old counters. */
1547 compat_uptr_t counters;
1548 compat_uptr_t entries;
1551 /* struct ebt_entry_match, _target and _watcher have same layout */
1552 struct compat_ebt_entry_mwt {
1553 union {
1554 char name[EBT_FUNCTION_MAXNAMELEN];
1555 compat_uptr_t ptr;
1556 } u;
1557 compat_uint_t match_size;
1558 compat_uint_t data[0];
1561 /* account for possible padding between match_size and ->data */
1562 static int ebt_compat_entry_padsize(void)
1564 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1565 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1566 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1567 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1570 static int ebt_compat_match_offset(const struct xt_match *match,
1571 unsigned int userlen)
1574 * ebt_among needs special handling. The kernel .matchsize is
1575 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1576 * value is expected.
1577 * Example: userspace sends 4500, ebt_among.c wants 4504.
1579 if (unlikely(match->matchsize == -1))
1580 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1581 return xt_compat_match_offset(match);
1584 static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1585 unsigned int *size)
1587 const struct xt_match *match = m->u.match;
1588 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1589 int off = ebt_compat_match_offset(match, m->match_size);
1590 compat_uint_t msize = m->match_size - off;
1592 BUG_ON(off >= m->match_size);
1594 if (copy_to_user(cm->u.name, match->name,
1595 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1596 return -EFAULT;
1598 if (match->compat_to_user) {
1599 if (match->compat_to_user(cm->data, m->data))
1600 return -EFAULT;
1601 } else if (copy_to_user(cm->data, m->data, msize))
1602 return -EFAULT;
1604 *size -= ebt_compat_entry_padsize() + off;
1605 *dstptr = cm->data;
1606 *dstptr += msize;
1607 return 0;
1610 static int compat_target_to_user(struct ebt_entry_target *t,
1611 void __user **dstptr,
1612 unsigned int *size)
1614 const struct xt_target *target = t->u.target;
1615 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1616 int off = xt_compat_target_offset(target);
1617 compat_uint_t tsize = t->target_size - off;
1619 BUG_ON(off >= t->target_size);
1621 if (copy_to_user(cm->u.name, target->name,
1622 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1623 return -EFAULT;
1625 if (target->compat_to_user) {
1626 if (target->compat_to_user(cm->data, t->data))
1627 return -EFAULT;
1628 } else if (copy_to_user(cm->data, t->data, tsize))
1629 return -EFAULT;
1631 *size -= ebt_compat_entry_padsize() + off;
1632 *dstptr = cm->data;
1633 *dstptr += tsize;
1634 return 0;
1637 static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1638 void __user **dstptr,
1639 unsigned int *size)
1641 return compat_target_to_user((struct ebt_entry_target *)w,
1642 dstptr, size);
1645 static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1646 unsigned int *size)
1648 struct ebt_entry_target *t;
1649 struct ebt_entry __user *ce;
1650 u32 watchers_offset, target_offset, next_offset;
1651 compat_uint_t origsize;
1652 int ret;
1654 if (e->bitmask == 0) {
1655 if (*size < sizeof(struct ebt_entries))
1656 return -EINVAL;
1657 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1658 return -EFAULT;
1660 *dstptr += sizeof(struct ebt_entries);
1661 *size -= sizeof(struct ebt_entries);
1662 return 0;
1665 if (*size < sizeof(*ce))
1666 return -EINVAL;
1668 ce = (struct ebt_entry __user *)*dstptr;
1669 if (copy_to_user(ce, e, sizeof(*ce)))
1670 return -EFAULT;
1672 origsize = *size;
1673 *dstptr += sizeof(*ce);
1675 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1676 if (ret)
1677 return ret;
1678 watchers_offset = e->watchers_offset - (origsize - *size);
1680 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1681 if (ret)
1682 return ret;
1683 target_offset = e->target_offset - (origsize - *size);
1685 t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1687 ret = compat_target_to_user(t, dstptr, size);
1688 if (ret)
1689 return ret;
1690 next_offset = e->next_offset - (origsize - *size);
1692 if (put_user(watchers_offset, &ce->watchers_offset) ||
1693 put_user(target_offset, &ce->target_offset) ||
1694 put_user(next_offset, &ce->next_offset))
1695 return -EFAULT;
1697 *size -= sizeof(*ce);
1698 return 0;
1701 static int compat_calc_match(struct ebt_entry_match *m, int *off)
1703 *off += ebt_compat_match_offset(m->u.match, m->match_size);
1704 *off += ebt_compat_entry_padsize();
1705 return 0;
1708 static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1710 *off += xt_compat_target_offset(w->u.watcher);
1711 *off += ebt_compat_entry_padsize();
1712 return 0;
1715 static int compat_calc_entry(const struct ebt_entry *e,
1716 const struct ebt_table_info *info,
1717 const void *base,
1718 struct compat_ebt_replace *newinfo)
1720 const struct ebt_entry_target *t;
1721 unsigned int entry_offset;
1722 int off, ret, i;
1724 if (e->bitmask == 0)
1725 return 0;
1727 off = 0;
1728 entry_offset = (void *)e - base;
1730 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1731 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1733 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1735 off += xt_compat_target_offset(t->u.target);
1736 off += ebt_compat_entry_padsize();
1738 newinfo->entries_size -= off;
1740 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1741 if (ret)
1742 return ret;
1744 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1745 const void *hookptr = info->hook_entry[i];
1746 if (info->hook_entry[i] &&
1747 (e < (struct ebt_entry *)(base - hookptr))) {
1748 newinfo->hook_entry[i] -= off;
1749 pr_debug("0x%08X -> 0x%08X\n",
1750 newinfo->hook_entry[i] + off,
1751 newinfo->hook_entry[i]);
1755 return 0;
1759 static int compat_table_info(const struct ebt_table_info *info,
1760 struct compat_ebt_replace *newinfo)
1762 unsigned int size = info->entries_size;
1763 const void *entries = info->entries;
1765 newinfo->entries_size = size;
1767 xt_compat_init_offsets(AF_INET, info->nentries);
1768 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1769 entries, newinfo);
1772 static int compat_copy_everything_to_user(struct ebt_table *t,
1773 void __user *user, int *len, int cmd)
1775 struct compat_ebt_replace repl, tmp;
1776 struct ebt_counter *oldcounters;
1777 struct ebt_table_info tinfo;
1778 int ret;
1779 void __user *pos;
1781 memset(&tinfo, 0, sizeof(tinfo));
1783 if (cmd == EBT_SO_GET_ENTRIES) {
1784 tinfo.entries_size = t->private->entries_size;
1785 tinfo.nentries = t->private->nentries;
1786 tinfo.entries = t->private->entries;
1787 oldcounters = t->private->counters;
1788 } else {
1789 tinfo.entries_size = t->table->entries_size;
1790 tinfo.nentries = t->table->nentries;
1791 tinfo.entries = t->table->entries;
1792 oldcounters = t->table->counters;
1795 if (copy_from_user(&tmp, user, sizeof(tmp)))
1796 return -EFAULT;
1798 if (tmp.nentries != tinfo.nentries ||
1799 (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1800 return -EINVAL;
1802 memcpy(&repl, &tmp, sizeof(repl));
1803 if (cmd == EBT_SO_GET_ENTRIES)
1804 ret = compat_table_info(t->private, &repl);
1805 else
1806 ret = compat_table_info(&tinfo, &repl);
1807 if (ret)
1808 return ret;
1810 if (*len != sizeof(tmp) + repl.entries_size +
1811 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1812 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1813 *len, tinfo.entries_size, repl.entries_size);
1814 return -EINVAL;
1817 /* userspace might not need the counters */
1818 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1819 tmp.num_counters, tinfo.nentries);
1820 if (ret)
1821 return ret;
1823 pos = compat_ptr(tmp.entries);
1824 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1825 compat_copy_entry_to_user, &pos, &tmp.entries_size);
1828 struct ebt_entries_buf_state {
1829 char *buf_kern_start; /* kernel buffer to copy (translated) data to */
1830 u32 buf_kern_len; /* total size of kernel buffer */
1831 u32 buf_kern_offset; /* amount of data copied so far */
1832 u32 buf_user_offset; /* read position in userspace buffer */
1835 static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1837 state->buf_kern_offset += sz;
1838 return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1841 static int ebt_buf_add(struct ebt_entries_buf_state *state,
1842 void *data, unsigned int sz)
1844 if (state->buf_kern_start == NULL)
1845 goto count_only;
1847 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1849 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1851 count_only:
1852 state->buf_user_offset += sz;
1853 return ebt_buf_count(state, sz);
1856 static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1858 char *b = state->buf_kern_start;
1860 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1862 if (b != NULL && sz > 0)
1863 memset(b + state->buf_kern_offset, 0, sz);
1864 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1865 return ebt_buf_count(state, sz);
1868 enum compat_mwt {
1869 EBT_COMPAT_MATCH,
1870 EBT_COMPAT_WATCHER,
1871 EBT_COMPAT_TARGET,
1874 static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1875 enum compat_mwt compat_mwt,
1876 struct ebt_entries_buf_state *state,
1877 const unsigned char *base)
1879 char name[EBT_FUNCTION_MAXNAMELEN];
1880 struct xt_match *match;
1881 struct xt_target *wt;
1882 void *dst = NULL;
1883 int off, pad = 0, ret = 0;
1884 unsigned int size_kern, entry_offset, match_size = mwt->match_size;
1886 strlcpy(name, mwt->u.name, sizeof(name));
1888 if (state->buf_kern_start)
1889 dst = state->buf_kern_start + state->buf_kern_offset;
1891 entry_offset = (unsigned char *) mwt - base;
1892 switch (compat_mwt) {
1893 case EBT_COMPAT_MATCH:
1894 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
1895 name, 0), "ebt_%s", name);
1896 if (match == NULL)
1897 return -ENOENT;
1898 if (IS_ERR(match))
1899 return PTR_ERR(match);
1901 off = ebt_compat_match_offset(match, match_size);
1902 if (dst) {
1903 if (match->compat_from_user)
1904 match->compat_from_user(dst, mwt->data);
1905 else
1906 memcpy(dst, mwt->data, match_size);
1909 size_kern = match->matchsize;
1910 if (unlikely(size_kern == -1))
1911 size_kern = match_size;
1912 module_put(match->me);
1913 break;
1914 case EBT_COMPAT_WATCHER: /* fallthrough */
1915 case EBT_COMPAT_TARGET:
1916 wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
1917 name, 0), "ebt_%s", name);
1918 if (wt == NULL)
1919 return -ENOENT;
1920 if (IS_ERR(wt))
1921 return PTR_ERR(wt);
1922 off = xt_compat_target_offset(wt);
1924 if (dst) {
1925 if (wt->compat_from_user)
1926 wt->compat_from_user(dst, mwt->data);
1927 else
1928 memcpy(dst, mwt->data, match_size);
1931 size_kern = wt->targetsize;
1932 module_put(wt->me);
1933 break;
1936 if (!dst) {
1937 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
1938 off + ebt_compat_entry_padsize());
1939 if (ret < 0)
1940 return ret;
1943 state->buf_kern_offset += match_size + off;
1944 state->buf_user_offset += match_size;
1945 pad = XT_ALIGN(size_kern) - size_kern;
1947 if (pad > 0 && dst) {
1948 BUG_ON(state->buf_kern_len <= pad);
1949 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1950 memset(dst + size_kern, 0, pad);
1952 return off + match_size;
1956 * return size of all matches, watchers or target, including necessary
1957 * alignment and padding.
1959 static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1960 unsigned int size_left, enum compat_mwt type,
1961 struct ebt_entries_buf_state *state, const void *base)
1963 int growth = 0;
1964 char *buf;
1966 if (size_left == 0)
1967 return 0;
1969 buf = (char *) match32;
1971 while (size_left >= sizeof(*match32)) {
1972 struct ebt_entry_match *match_kern;
1973 int ret;
1975 match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1976 if (match_kern) {
1977 char *tmp;
1978 tmp = state->buf_kern_start + state->buf_kern_offset;
1979 match_kern = (struct ebt_entry_match *) tmp;
1981 ret = ebt_buf_add(state, buf, sizeof(*match32));
1982 if (ret < 0)
1983 return ret;
1984 size_left -= sizeof(*match32);
1986 /* add padding before match->data (if any) */
1987 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1988 if (ret < 0)
1989 return ret;
1991 if (match32->match_size > size_left)
1992 return -EINVAL;
1994 size_left -= match32->match_size;
1996 ret = compat_mtw_from_user(match32, type, state, base);
1997 if (ret < 0)
1998 return ret;
2000 BUG_ON(ret < match32->match_size);
2001 growth += ret - match32->match_size;
2002 growth += ebt_compat_entry_padsize();
2004 buf += sizeof(*match32);
2005 buf += match32->match_size;
2007 if (match_kern)
2008 match_kern->match_size = ret;
2010 WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2011 match32 = (struct compat_ebt_entry_mwt *) buf;
2014 return growth;
2017 #define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2018 ({ \
2019 unsigned int __i; \
2020 int __ret = 0; \
2021 struct compat_ebt_entry_mwt *__watcher; \
2023 for (__i = e->watchers_offset; \
2024 __i < (e)->target_offset; \
2025 __i += __watcher->watcher_size + \
2026 sizeof(struct compat_ebt_entry_mwt)) { \
2027 __watcher = (void *)(e) + __i; \
2028 __ret = fn(__watcher , ## args); \
2029 if (__ret != 0) \
2030 break; \
2032 if (__ret == 0) { \
2033 if (__i != (e)->target_offset) \
2034 __ret = -EINVAL; \
2036 __ret; \
2039 #define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2040 ({ \
2041 unsigned int __i; \
2042 int __ret = 0; \
2043 struct compat_ebt_entry_mwt *__match; \
2045 for (__i = sizeof(struct ebt_entry); \
2046 __i < (e)->watchers_offset; \
2047 __i += __match->match_size + \
2048 sizeof(struct compat_ebt_entry_mwt)) { \
2049 __match = (void *)(e) + __i; \
2050 __ret = fn(__match , ## args); \
2051 if (__ret != 0) \
2052 break; \
2054 if (__ret == 0) { \
2055 if (__i != (e)->watchers_offset) \
2056 __ret = -EINVAL; \
2058 __ret; \
2061 /* called for all ebt_entry structures. */
2062 static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2063 unsigned int *total,
2064 struct ebt_entries_buf_state *state)
2066 unsigned int i, j, startoff, new_offset = 0;
2067 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2068 unsigned int offsets[4];
2069 unsigned int *offsets_update = NULL;
2070 int ret;
2071 char *buf_start;
2073 if (*total < sizeof(struct ebt_entries))
2074 return -EINVAL;
2076 if (!entry->bitmask) {
2077 *total -= sizeof(struct ebt_entries);
2078 return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2080 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2081 return -EINVAL;
2083 startoff = state->buf_user_offset;
2084 /* pull in most part of ebt_entry, it does not need to be changed. */
2085 ret = ebt_buf_add(state, entry,
2086 offsetof(struct ebt_entry, watchers_offset));
2087 if (ret < 0)
2088 return ret;
2090 offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2091 memcpy(&offsets[1], &entry->watchers_offset,
2092 sizeof(offsets) - sizeof(offsets[0]));
2094 if (state->buf_kern_start) {
2095 buf_start = state->buf_kern_start + state->buf_kern_offset;
2096 offsets_update = (unsigned int *) buf_start;
2098 ret = ebt_buf_add(state, &offsets[1],
2099 sizeof(offsets) - sizeof(offsets[0]));
2100 if (ret < 0)
2101 return ret;
2102 buf_start = (char *) entry;
2104 * 0: matches offset, always follows ebt_entry.
2105 * 1: watchers offset, from ebt_entry structure
2106 * 2: target offset, from ebt_entry structure
2107 * 3: next ebt_entry offset, from ebt_entry structure
2109 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2111 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2112 struct compat_ebt_entry_mwt *match32;
2113 unsigned int size;
2114 char *buf = buf_start;
2116 buf = buf_start + offsets[i];
2117 if (offsets[i] > offsets[j])
2118 return -EINVAL;
2120 match32 = (struct compat_ebt_entry_mwt *) buf;
2121 size = offsets[j] - offsets[i];
2122 ret = ebt_size_mwt(match32, size, i, state, base);
2123 if (ret < 0)
2124 return ret;
2125 new_offset += ret;
2126 if (offsets_update && new_offset) {
2127 pr_debug("change offset %d to %d\n",
2128 offsets_update[i], offsets[j] + new_offset);
2129 offsets_update[i] = offsets[j] + new_offset;
2133 startoff = state->buf_user_offset - startoff;
2135 BUG_ON(*total < startoff);
2136 *total -= startoff;
2137 return 0;
2141 * repl->entries_size is the size of the ebt_entry blob in userspace.
2142 * It might need more memory when copied to a 64 bit kernel in case
2143 * userspace is 32-bit. So, first task: find out how much memory is needed.
2145 * Called before validation is performed.
2147 static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2148 struct ebt_entries_buf_state *state)
2150 unsigned int size_remaining = size_user;
2151 int ret;
2153 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2154 &size_remaining, state);
2155 if (ret < 0)
2156 return ret;
2158 WARN_ON(size_remaining);
2159 return state->buf_kern_offset;
2163 static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2164 void __user *user, unsigned int len)
2166 struct compat_ebt_replace tmp;
2167 int i;
2169 if (len < sizeof(tmp))
2170 return -EINVAL;
2172 if (copy_from_user(&tmp, user, sizeof(tmp)))
2173 return -EFAULT;
2175 if (len != sizeof(tmp) + tmp.entries_size)
2176 return -EINVAL;
2178 if (tmp.entries_size == 0)
2179 return -EINVAL;
2181 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2182 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2183 return -ENOMEM;
2184 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2185 return -ENOMEM;
2187 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2189 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2190 for (i = 0; i < NF_BR_NUMHOOKS; i++)
2191 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2193 repl->num_counters = tmp.num_counters;
2194 repl->counters = compat_ptr(tmp.counters);
2195 repl->entries = compat_ptr(tmp.entries);
2196 return 0;
2199 static int compat_do_replace(struct net *net, void __user *user,
2200 unsigned int len)
2202 int ret, i, countersize, size64;
2203 struct ebt_table_info *newinfo;
2204 struct ebt_replace tmp;
2205 struct ebt_entries_buf_state state;
2206 void *entries_tmp;
2208 ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2209 if (ret) {
2210 /* try real handler in case userland supplied needed padding */
2211 if (ret == -EINVAL && do_replace(net, user, len) == 0)
2212 ret = 0;
2213 return ret;
2216 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2217 newinfo = vmalloc(sizeof(*newinfo) + countersize);
2218 if (!newinfo)
2219 return -ENOMEM;
2221 if (countersize)
2222 memset(newinfo->counters, 0, countersize);
2224 memset(&state, 0, sizeof(state));
2226 newinfo->entries = vmalloc(tmp.entries_size);
2227 if (!newinfo->entries) {
2228 ret = -ENOMEM;
2229 goto free_newinfo;
2231 if (copy_from_user(
2232 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2233 ret = -EFAULT;
2234 goto free_entries;
2237 entries_tmp = newinfo->entries;
2239 xt_compat_lock(NFPROTO_BRIDGE);
2241 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2242 if (ret < 0)
2243 goto out_unlock;
2245 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2246 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2247 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2249 size64 = ret;
2250 newinfo->entries = vmalloc(size64);
2251 if (!newinfo->entries) {
2252 vfree(entries_tmp);
2253 ret = -ENOMEM;
2254 goto out_unlock;
2257 memset(&state, 0, sizeof(state));
2258 state.buf_kern_start = newinfo->entries;
2259 state.buf_kern_len = size64;
2261 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2262 BUG_ON(ret < 0); /* parses same data again */
2264 vfree(entries_tmp);
2265 tmp.entries_size = size64;
2267 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2268 char __user *usrptr;
2269 if (tmp.hook_entry[i]) {
2270 unsigned int delta;
2271 usrptr = (char __user *) tmp.hook_entry[i];
2272 delta = usrptr - tmp.entries;
2273 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2274 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2278 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2279 xt_compat_unlock(NFPROTO_BRIDGE);
2281 ret = do_replace_finish(net, &tmp, newinfo);
2282 if (ret == 0)
2283 return ret;
2284 free_entries:
2285 vfree(newinfo->entries);
2286 free_newinfo:
2287 vfree(newinfo);
2288 return ret;
2289 out_unlock:
2290 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2291 xt_compat_unlock(NFPROTO_BRIDGE);
2292 goto free_entries;
2295 static int compat_update_counters(struct net *net, void __user *user,
2296 unsigned int len)
2298 struct compat_ebt_replace hlp;
2300 if (copy_from_user(&hlp, user, sizeof(hlp)))
2301 return -EFAULT;
2303 /* try real handler in case userland supplied needed padding */
2304 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2305 return update_counters(net, user, len);
2307 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2308 hlp.num_counters, user, len);
2311 static int compat_do_ebt_set_ctl(struct sock *sk,
2312 int cmd, void __user *user, unsigned int len)
2314 int ret;
2316 if (!capable(CAP_NET_ADMIN))
2317 return -EPERM;
2319 switch (cmd) {
2320 case EBT_SO_SET_ENTRIES:
2321 ret = compat_do_replace(sock_net(sk), user, len);
2322 break;
2323 case EBT_SO_SET_COUNTERS:
2324 ret = compat_update_counters(sock_net(sk), user, len);
2325 break;
2326 default:
2327 ret = -EINVAL;
2329 return ret;
2332 static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2333 void __user *user, int *len)
2335 int ret;
2336 struct compat_ebt_replace tmp;
2337 struct ebt_table *t;
2339 if (!capable(CAP_NET_ADMIN))
2340 return -EPERM;
2342 /* try real handler in case userland supplied needed padding */
2343 if ((cmd == EBT_SO_GET_INFO ||
2344 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2345 return do_ebt_get_ctl(sk, cmd, user, len);
2347 if (copy_from_user(&tmp, user, sizeof(tmp)))
2348 return -EFAULT;
2350 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
2351 if (!t)
2352 return ret;
2354 xt_compat_lock(NFPROTO_BRIDGE);
2355 switch (cmd) {
2356 case EBT_SO_GET_INFO:
2357 tmp.nentries = t->private->nentries;
2358 ret = compat_table_info(t->private, &tmp);
2359 if (ret)
2360 goto out;
2361 tmp.valid_hooks = t->valid_hooks;
2363 if (copy_to_user(user, &tmp, *len) != 0) {
2364 ret = -EFAULT;
2365 break;
2367 ret = 0;
2368 break;
2369 case EBT_SO_GET_INIT_INFO:
2370 tmp.nentries = t->table->nentries;
2371 tmp.entries_size = t->table->entries_size;
2372 tmp.valid_hooks = t->table->valid_hooks;
2374 if (copy_to_user(user, &tmp, *len) != 0) {
2375 ret = -EFAULT;
2376 break;
2378 ret = 0;
2379 break;
2380 case EBT_SO_GET_ENTRIES:
2381 case EBT_SO_GET_INIT_ENTRIES:
2383 * try real handler first in case of userland-side padding.
2384 * in case we are dealing with an 'ordinary' 32 bit binary
2385 * without 64bit compatibility padding, this will fail right
2386 * after copy_from_user when the *len argument is validated.
2388 * the compat_ variant needs to do one pass over the kernel
2389 * data set to adjust for size differences before it the check.
2391 if (copy_everything_to_user(t, user, len, cmd) == 0)
2392 ret = 0;
2393 else
2394 ret = compat_copy_everything_to_user(t, user, len, cmd);
2395 break;
2396 default:
2397 ret = -EINVAL;
2399 out:
2400 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2401 xt_compat_unlock(NFPROTO_BRIDGE);
2402 mutex_unlock(&ebt_mutex);
2403 return ret;
2405 #endif
2407 static struct nf_sockopt_ops ebt_sockopts =
2409 .pf = PF_INET,
2410 .set_optmin = EBT_BASE_CTL,
2411 .set_optmax = EBT_SO_SET_MAX + 1,
2412 .set = do_ebt_set_ctl,
2413 #ifdef CONFIG_COMPAT
2414 .compat_set = compat_do_ebt_set_ctl,
2415 #endif
2416 .get_optmin = EBT_BASE_CTL,
2417 .get_optmax = EBT_SO_GET_MAX + 1,
2418 .get = do_ebt_get_ctl,
2419 #ifdef CONFIG_COMPAT
2420 .compat_get = compat_do_ebt_get_ctl,
2421 #endif
2422 .owner = THIS_MODULE,
2425 static int __init ebtables_init(void)
2427 int ret;
2429 ret = xt_register_target(&ebt_standard_target);
2430 if (ret < 0)
2431 return ret;
2432 ret = nf_register_sockopt(&ebt_sockopts);
2433 if (ret < 0) {
2434 xt_unregister_target(&ebt_standard_target);
2435 return ret;
2438 printk(KERN_INFO "Ebtables v2.0 registered\n");
2439 return 0;
2442 static void __exit ebtables_fini(void)
2444 nf_unregister_sockopt(&ebt_sockopts);
2445 xt_unregister_target(&ebt_standard_target);
2446 printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2449 EXPORT_SYMBOL(ebt_register_table);
2450 EXPORT_SYMBOL(ebt_unregister_table);
2451 EXPORT_SYMBOL(ebt_do_table);
2452 module_init(ebtables_init);
2453 module_exit(ebtables_fini);
2454 MODULE_LICENSE("GPL");