netfilter: xt extensions: use pr_<level> (2)
[linux-2.6/libata-dev.git] / net / bridge / netfilter / ebtables.c
blob989d72cc81482a9dfa3a77e72ed417188e826beb
1 /*
2 * ebtables
4 * Author:
5 * Bart De Schuymer <bdschuym@pandora.be>
7 * ebtables.c,v 2.0, July, 2002
9 * This code is stongly inspired on the iptables code which is
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kmod.h>
19 #include <linux/module.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netfilter/x_tables.h>
22 #include <linux/netfilter_bridge/ebtables.h>
23 #include <linux/spinlock.h>
24 #include <linux/mutex.h>
25 #include <asm/uaccess.h>
26 #include <linux/smp.h>
27 #include <linux/cpumask.h>
28 #include <net/sock.h>
29 /* needed for logical [in,out]-dev filtering */
30 #include "../br_private.h"
32 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
33 "report to author: "format, ## args)
34 /* #define BUGPRINT(format, args...) */
37 * Each cpu has its own set of counters, so there is no need for write_lock in
38 * the softirq
39 * For reading or updating the counters, the user context needs to
40 * get a write_lock
43 /* The size of each set of counters is altered to get cache alignment */
44 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
45 #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
46 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
47 COUNTER_OFFSET(n) * cpu))
51 static DEFINE_MUTEX(ebt_mutex);
53 #ifdef CONFIG_COMPAT
54 static void ebt_standard_compat_from_user(void *dst, const void *src)
56 int v = *(compat_int_t *)src;
58 if (v >= 0)
59 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
60 memcpy(dst, &v, sizeof(v));
63 static int ebt_standard_compat_to_user(void __user *dst, const void *src)
65 compat_int_t cv = *(int *)src;
67 if (cv >= 0)
68 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
69 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
71 #endif
74 static struct xt_target ebt_standard_target = {
75 .name = "standard",
76 .revision = 0,
77 .family = NFPROTO_BRIDGE,
78 .targetsize = sizeof(int),
79 #ifdef CONFIG_COMPAT
80 .compatsize = sizeof(compat_int_t),
81 .compat_from_user = ebt_standard_compat_from_user,
82 .compat_to_user = ebt_standard_compat_to_user,
83 #endif
86 static inline int
87 ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
88 struct xt_target_param *par)
90 par->target = w->u.watcher;
91 par->targinfo = w->data;
92 w->u.watcher->target(skb, par);
93 /* watchers don't give a verdict */
94 return 0;
97 static inline int ebt_do_match (struct ebt_entry_match *m,
98 const struct sk_buff *skb, struct xt_match_param *par)
100 par->match = m->u.match;
101 par->matchinfo = m->data;
102 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
105 static inline int
106 ebt_dev_check(const char *entry, const struct net_device *device)
108 int i = 0;
109 const char *devname;
111 if (*entry == '\0')
112 return 0;
113 if (!device)
114 return 1;
115 devname = device->name;
116 /* 1 is the wildcard token */
117 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
118 i++;
119 return (devname[i] != entry[i] && entry[i] != 1);
122 #define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
123 /* process standard matches */
124 static inline int
125 ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
126 const struct net_device *in, const struct net_device *out)
128 int verdict, i;
130 if (e->bitmask & EBT_802_3) {
131 if (FWINV2(ntohs(h->h_proto) >= 1536, EBT_IPROTO))
132 return 1;
133 } else if (!(e->bitmask & EBT_NOPROTO) &&
134 FWINV2(e->ethproto != h->h_proto, EBT_IPROTO))
135 return 1;
137 if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
138 return 1;
139 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
140 return 1;
141 if ((!in || !in->br_port) ? 0 : FWINV2(ebt_dev_check(
142 e->logical_in, in->br_port->br->dev), EBT_ILOGICALIN))
143 return 1;
144 if ((!out || !out->br_port) ? 0 : FWINV2(ebt_dev_check(
145 e->logical_out, out->br_port->br->dev), EBT_ILOGICALOUT))
146 return 1;
148 if (e->bitmask & EBT_SOURCEMAC) {
149 verdict = 0;
150 for (i = 0; i < 6; i++)
151 verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
152 e->sourcemsk[i];
153 if (FWINV2(verdict != 0, EBT_ISOURCE) )
154 return 1;
156 if (e->bitmask & EBT_DESTMAC) {
157 verdict = 0;
158 for (i = 0; i < 6; i++)
159 verdict |= (h->h_dest[i] ^ e->destmac[i]) &
160 e->destmsk[i];
161 if (FWINV2(verdict != 0, EBT_IDEST) )
162 return 1;
164 return 0;
167 static inline __pure
168 struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
170 return (void *)entry + entry->next_offset;
173 /* Do some firewalling */
174 unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
175 const struct net_device *in, const struct net_device *out,
176 struct ebt_table *table)
178 int i, nentries;
179 struct ebt_entry *point;
180 struct ebt_counter *counter_base, *cb_base;
181 const struct ebt_entry_target *t;
182 int verdict, sp = 0;
183 struct ebt_chainstack *cs;
184 struct ebt_entries *chaininfo;
185 const char *base;
186 const struct ebt_table_info *private;
187 bool hotdrop = false;
188 struct xt_match_param mtpar;
189 struct xt_target_param tgpar;
191 mtpar.family = tgpar.family = NFPROTO_BRIDGE;
192 mtpar.in = tgpar.in = in;
193 mtpar.out = tgpar.out = out;
194 mtpar.hotdrop = &hotdrop;
195 mtpar.hooknum = tgpar.hooknum = hook;
197 read_lock_bh(&table->lock);
198 private = table->private;
199 cb_base = COUNTER_BASE(private->counters, private->nentries,
200 smp_processor_id());
201 if (private->chainstack)
202 cs = private->chainstack[smp_processor_id()];
203 else
204 cs = NULL;
205 chaininfo = private->hook_entry[hook];
206 nentries = private->hook_entry[hook]->nentries;
207 point = (struct ebt_entry *)(private->hook_entry[hook]->data);
208 counter_base = cb_base + private->hook_entry[hook]->counter_offset;
209 /* base for chain jumps */
210 base = private->entries;
211 i = 0;
212 while (i < nentries) {
213 if (ebt_basic_match(point, eth_hdr(skb), in, out))
214 goto letscontinue;
216 if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &mtpar) != 0)
217 goto letscontinue;
218 if (hotdrop) {
219 read_unlock_bh(&table->lock);
220 return NF_DROP;
223 /* increase counter */
224 (*(counter_base + i)).pcnt++;
225 (*(counter_base + i)).bcnt += skb->len;
227 /* these should only watch: not modify, nor tell us
228 what to do with the packet */
229 EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &tgpar);
231 t = (struct ebt_entry_target *)
232 (((char *)point) + point->target_offset);
233 /* standard target */
234 if (!t->u.target->target)
235 verdict = ((struct ebt_standard_target *)t)->verdict;
236 else {
237 tgpar.target = t->u.target;
238 tgpar.targinfo = t->data;
239 verdict = t->u.target->target(skb, &tgpar);
241 if (verdict == EBT_ACCEPT) {
242 read_unlock_bh(&table->lock);
243 return NF_ACCEPT;
245 if (verdict == EBT_DROP) {
246 read_unlock_bh(&table->lock);
247 return NF_DROP;
249 if (verdict == EBT_RETURN) {
250 letsreturn:
251 #ifdef CONFIG_NETFILTER_DEBUG
252 if (sp == 0) {
253 BUGPRINT("RETURN on base chain");
254 /* act like this is EBT_CONTINUE */
255 goto letscontinue;
257 #endif
258 sp--;
259 /* put all the local variables right */
260 i = cs[sp].n;
261 chaininfo = cs[sp].chaininfo;
262 nentries = chaininfo->nentries;
263 point = cs[sp].e;
264 counter_base = cb_base +
265 chaininfo->counter_offset;
266 continue;
268 if (verdict == EBT_CONTINUE)
269 goto letscontinue;
270 #ifdef CONFIG_NETFILTER_DEBUG
271 if (verdict < 0) {
272 BUGPRINT("bogus standard verdict\n");
273 read_unlock_bh(&table->lock);
274 return NF_DROP;
276 #endif
277 /* jump to a udc */
278 cs[sp].n = i + 1;
279 cs[sp].chaininfo = chaininfo;
280 cs[sp].e = ebt_next_entry(point);
281 i = 0;
282 chaininfo = (struct ebt_entries *) (base + verdict);
283 #ifdef CONFIG_NETFILTER_DEBUG
284 if (chaininfo->distinguisher) {
285 BUGPRINT("jump to non-chain\n");
286 read_unlock_bh(&table->lock);
287 return NF_DROP;
289 #endif
290 nentries = chaininfo->nentries;
291 point = (struct ebt_entry *)chaininfo->data;
292 counter_base = cb_base + chaininfo->counter_offset;
293 sp++;
294 continue;
295 letscontinue:
296 point = ebt_next_entry(point);
297 i++;
300 /* I actually like this :) */
301 if (chaininfo->policy == EBT_RETURN)
302 goto letsreturn;
303 if (chaininfo->policy == EBT_ACCEPT) {
304 read_unlock_bh(&table->lock);
305 return NF_ACCEPT;
307 read_unlock_bh(&table->lock);
308 return NF_DROP;
311 /* If it succeeds, returns element and locks mutex */
312 static inline void *
313 find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
314 struct mutex *mutex)
316 struct {
317 struct list_head list;
318 char name[EBT_FUNCTION_MAXNAMELEN];
319 } *e;
321 *error = mutex_lock_interruptible(mutex);
322 if (*error != 0)
323 return NULL;
325 list_for_each_entry(e, head, list) {
326 if (strcmp(e->name, name) == 0)
327 return e;
329 *error = -ENOENT;
330 mutex_unlock(mutex);
331 return NULL;
334 static void *
335 find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
336 int *error, struct mutex *mutex)
338 return try_then_request_module(
339 find_inlist_lock_noload(head, name, error, mutex),
340 "%s%s", prefix, name);
343 static inline struct ebt_table *
344 find_table_lock(struct net *net, const char *name, int *error,
345 struct mutex *mutex)
347 return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
348 "ebtable_", error, mutex);
351 static inline int
352 ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
353 unsigned int *cnt)
355 const struct ebt_entry *e = par->entryinfo;
356 struct xt_match *match;
357 size_t left = ((char *)e + e->watchers_offset) - (char *)m;
358 int ret;
360 if (left < sizeof(struct ebt_entry_match) ||
361 left - sizeof(struct ebt_entry_match) < m->match_size)
362 return -EINVAL;
364 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
365 m->u.name, 0), "ebt_%s", m->u.name);
366 if (IS_ERR(match))
367 return PTR_ERR(match);
368 if (match == NULL)
369 return -ENOENT;
370 m->u.match = match;
372 par->match = match;
373 par->matchinfo = m->data;
374 ret = xt_check_match(par, m->match_size,
375 e->ethproto, e->invflags & EBT_IPROTO);
376 if (ret < 0) {
377 module_put(match->me);
378 return ret;
381 (*cnt)++;
382 return 0;
385 static inline int
386 ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
387 unsigned int *cnt)
389 const struct ebt_entry *e = par->entryinfo;
390 struct xt_target *watcher;
391 size_t left = ((char *)e + e->target_offset) - (char *)w;
392 int ret;
394 if (left < sizeof(struct ebt_entry_watcher) ||
395 left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
396 return -EINVAL;
398 watcher = try_then_request_module(
399 xt_find_target(NFPROTO_BRIDGE, w->u.name, 0),
400 "ebt_%s", w->u.name);
401 if (IS_ERR(watcher))
402 return PTR_ERR(watcher);
403 if (watcher == NULL)
404 return -ENOENT;
405 w->u.watcher = watcher;
407 par->target = watcher;
408 par->targinfo = w->data;
409 ret = xt_check_target(par, w->watcher_size,
410 e->ethproto, e->invflags & EBT_IPROTO);
411 if (ret < 0) {
412 module_put(watcher->me);
413 return ret;
416 (*cnt)++;
417 return 0;
420 static int ebt_verify_pointers(const struct ebt_replace *repl,
421 struct ebt_table_info *newinfo)
423 unsigned int limit = repl->entries_size;
424 unsigned int valid_hooks = repl->valid_hooks;
425 unsigned int offset = 0;
426 int i;
428 for (i = 0; i < NF_BR_NUMHOOKS; i++)
429 newinfo->hook_entry[i] = NULL;
431 newinfo->entries_size = repl->entries_size;
432 newinfo->nentries = repl->nentries;
434 while (offset < limit) {
435 size_t left = limit - offset;
436 struct ebt_entry *e = (void *)newinfo->entries + offset;
438 if (left < sizeof(unsigned int))
439 break;
441 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
442 if ((valid_hooks & (1 << i)) == 0)
443 continue;
444 if ((char __user *)repl->hook_entry[i] ==
445 repl->entries + offset)
446 break;
449 if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
450 if (e->bitmask != 0) {
451 /* we make userspace set this right,
452 so there is no misunderstanding */
453 BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
454 "in distinguisher\n");
455 return -EINVAL;
457 if (i != NF_BR_NUMHOOKS)
458 newinfo->hook_entry[i] = (struct ebt_entries *)e;
459 if (left < sizeof(struct ebt_entries))
460 break;
461 offset += sizeof(struct ebt_entries);
462 } else {
463 if (left < sizeof(struct ebt_entry))
464 break;
465 if (left < e->next_offset)
466 break;
467 if (e->next_offset < sizeof(struct ebt_entry))
468 return -EINVAL;
469 offset += e->next_offset;
472 if (offset != limit) {
473 BUGPRINT("entries_size too small\n");
474 return -EINVAL;
477 /* check if all valid hooks have a chain */
478 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
479 if (!newinfo->hook_entry[i] &&
480 (valid_hooks & (1 << i))) {
481 BUGPRINT("Valid hook without chain\n");
482 return -EINVAL;
485 return 0;
489 * this one is very careful, as it is the first function
490 * to parse the userspace data
492 static inline int
493 ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
494 const struct ebt_table_info *newinfo,
495 unsigned int *n, unsigned int *cnt,
496 unsigned int *totalcnt, unsigned int *udc_cnt)
498 int i;
500 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
501 if ((void *)e == (void *)newinfo->hook_entry[i])
502 break;
504 /* beginning of a new chain
505 if i == NF_BR_NUMHOOKS it must be a user defined chain */
506 if (i != NF_BR_NUMHOOKS || !e->bitmask) {
507 /* this checks if the previous chain has as many entries
508 as it said it has */
509 if (*n != *cnt) {
510 BUGPRINT("nentries does not equal the nr of entries "
511 "in the chain\n");
512 return -EINVAL;
514 if (((struct ebt_entries *)e)->policy != EBT_DROP &&
515 ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
516 /* only RETURN from udc */
517 if (i != NF_BR_NUMHOOKS ||
518 ((struct ebt_entries *)e)->policy != EBT_RETURN) {
519 BUGPRINT("bad policy\n");
520 return -EINVAL;
523 if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
524 (*udc_cnt)++;
525 if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
526 BUGPRINT("counter_offset != totalcnt");
527 return -EINVAL;
529 *n = ((struct ebt_entries *)e)->nentries;
530 *cnt = 0;
531 return 0;
533 /* a plain old entry, heh */
534 if (sizeof(struct ebt_entry) > e->watchers_offset ||
535 e->watchers_offset > e->target_offset ||
536 e->target_offset >= e->next_offset) {
537 BUGPRINT("entry offsets not in right order\n");
538 return -EINVAL;
540 /* this is not checked anywhere else */
541 if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
542 BUGPRINT("target size too small\n");
543 return -EINVAL;
545 (*cnt)++;
546 (*totalcnt)++;
547 return 0;
550 struct ebt_cl_stack
552 struct ebt_chainstack cs;
553 int from;
554 unsigned int hookmask;
558 * we need these positions to check that the jumps to a different part of the
559 * entries is a jump to the beginning of a new chain.
561 static inline int
562 ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
563 unsigned int *n, struct ebt_cl_stack *udc)
565 int i;
567 /* we're only interested in chain starts */
568 if (e->bitmask)
569 return 0;
570 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
571 if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
572 break;
574 /* only care about udc */
575 if (i != NF_BR_NUMHOOKS)
576 return 0;
578 udc[*n].cs.chaininfo = (struct ebt_entries *)e;
579 /* these initialisations are depended on later in check_chainloops() */
580 udc[*n].cs.n = 0;
581 udc[*n].hookmask = 0;
583 (*n)++;
584 return 0;
587 static inline int
588 ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
590 struct xt_mtdtor_param par;
592 if (i && (*i)-- == 0)
593 return 1;
595 par.net = net;
596 par.match = m->u.match;
597 par.matchinfo = m->data;
598 par.family = NFPROTO_BRIDGE;
599 if (par.match->destroy != NULL)
600 par.match->destroy(&par);
601 module_put(par.match->me);
602 return 0;
605 static inline int
606 ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
608 struct xt_tgdtor_param par;
610 if (i && (*i)-- == 0)
611 return 1;
613 par.net = net;
614 par.target = w->u.watcher;
615 par.targinfo = w->data;
616 par.family = NFPROTO_BRIDGE;
617 if (par.target->destroy != NULL)
618 par.target->destroy(&par);
619 module_put(par.target->me);
620 return 0;
623 static inline int
624 ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
626 struct xt_tgdtor_param par;
627 struct ebt_entry_target *t;
629 if (e->bitmask == 0)
630 return 0;
631 /* we're done */
632 if (cnt && (*cnt)-- == 0)
633 return 1;
634 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
635 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
636 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
638 par.net = net;
639 par.target = t->u.target;
640 par.targinfo = t->data;
641 par.family = NFPROTO_BRIDGE;
642 if (par.target->destroy != NULL)
643 par.target->destroy(&par);
644 module_put(par.target->me);
645 return 0;
648 static inline int
649 ebt_check_entry(struct ebt_entry *e, struct net *net,
650 const struct ebt_table_info *newinfo,
651 const char *name, unsigned int *cnt,
652 struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
654 struct ebt_entry_target *t;
655 struct xt_target *target;
656 unsigned int i, j, hook = 0, hookmask = 0;
657 size_t gap;
658 int ret;
659 struct xt_mtchk_param mtpar;
660 struct xt_tgchk_param tgpar;
662 /* don't mess with the struct ebt_entries */
663 if (e->bitmask == 0)
664 return 0;
666 if (e->bitmask & ~EBT_F_MASK) {
667 BUGPRINT("Unknown flag for bitmask\n");
668 return -EINVAL;
670 if (e->invflags & ~EBT_INV_MASK) {
671 BUGPRINT("Unknown flag for inv bitmask\n");
672 return -EINVAL;
674 if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
675 BUGPRINT("NOPROTO & 802_3 not allowed\n");
676 return -EINVAL;
678 /* what hook do we belong to? */
679 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
680 if (!newinfo->hook_entry[i])
681 continue;
682 if ((char *)newinfo->hook_entry[i] < (char *)e)
683 hook = i;
684 else
685 break;
687 /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
688 a base chain */
689 if (i < NF_BR_NUMHOOKS)
690 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
691 else {
692 for (i = 0; i < udc_cnt; i++)
693 if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
694 break;
695 if (i == 0)
696 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
697 else
698 hookmask = cl_s[i - 1].hookmask;
700 i = 0;
702 mtpar.net = tgpar.net = net;
703 mtpar.table = tgpar.table = name;
704 mtpar.entryinfo = tgpar.entryinfo = e;
705 mtpar.hook_mask = tgpar.hook_mask = hookmask;
706 mtpar.family = tgpar.family = NFPROTO_BRIDGE;
707 ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
708 if (ret != 0)
709 goto cleanup_matches;
710 j = 0;
711 ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
712 if (ret != 0)
713 goto cleanup_watchers;
714 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
715 gap = e->next_offset - e->target_offset;
717 target = try_then_request_module(
718 xt_find_target(NFPROTO_BRIDGE, t->u.name, 0),
719 "ebt_%s", t->u.name);
720 if (IS_ERR(target)) {
721 ret = PTR_ERR(target);
722 goto cleanup_watchers;
723 } else if (target == NULL) {
724 ret = -ENOENT;
725 goto cleanup_watchers;
728 t->u.target = target;
729 if (t->u.target == &ebt_standard_target) {
730 if (gap < sizeof(struct ebt_standard_target)) {
731 BUGPRINT("Standard target size too big\n");
732 ret = -EFAULT;
733 goto cleanup_watchers;
735 if (((struct ebt_standard_target *)t)->verdict <
736 -NUM_STANDARD_TARGETS) {
737 BUGPRINT("Invalid standard target\n");
738 ret = -EFAULT;
739 goto cleanup_watchers;
741 } else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
742 module_put(t->u.target->me);
743 ret = -EFAULT;
744 goto cleanup_watchers;
747 tgpar.target = target;
748 tgpar.targinfo = t->data;
749 ret = xt_check_target(&tgpar, t->target_size,
750 e->ethproto, e->invflags & EBT_IPROTO);
751 if (ret < 0) {
752 module_put(target->me);
753 goto cleanup_watchers;
755 (*cnt)++;
756 return 0;
757 cleanup_watchers:
758 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
759 cleanup_matches:
760 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
761 return ret;
765 * checks for loops and sets the hook mask for udc
766 * the hook mask for udc tells us from which base chains the udc can be
767 * accessed. This mask is a parameter to the check() functions of the extensions
769 static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
770 unsigned int udc_cnt, unsigned int hooknr, char *base)
772 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
773 const struct ebt_entry *e = (struct ebt_entry *)chain->data;
774 const struct ebt_entry_target *t;
776 while (pos < nentries || chain_nr != -1) {
777 /* end of udc, go back one 'recursion' step */
778 if (pos == nentries) {
779 /* put back values of the time when this chain was called */
780 e = cl_s[chain_nr].cs.e;
781 if (cl_s[chain_nr].from != -1)
782 nentries =
783 cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
784 else
785 nentries = chain->nentries;
786 pos = cl_s[chain_nr].cs.n;
787 /* make sure we won't see a loop that isn't one */
788 cl_s[chain_nr].cs.n = 0;
789 chain_nr = cl_s[chain_nr].from;
790 if (pos == nentries)
791 continue;
793 t = (struct ebt_entry_target *)
794 (((char *)e) + e->target_offset);
795 if (strcmp(t->u.name, EBT_STANDARD_TARGET))
796 goto letscontinue;
797 if (e->target_offset + sizeof(struct ebt_standard_target) >
798 e->next_offset) {
799 BUGPRINT("Standard target size too big\n");
800 return -1;
802 verdict = ((struct ebt_standard_target *)t)->verdict;
803 if (verdict >= 0) { /* jump to another chain */
804 struct ebt_entries *hlp2 =
805 (struct ebt_entries *)(base + verdict);
806 for (i = 0; i < udc_cnt; i++)
807 if (hlp2 == cl_s[i].cs.chaininfo)
808 break;
809 /* bad destination or loop */
810 if (i == udc_cnt) {
811 BUGPRINT("bad destination\n");
812 return -1;
814 if (cl_s[i].cs.n) {
815 BUGPRINT("loop\n");
816 return -1;
818 if (cl_s[i].hookmask & (1 << hooknr))
819 goto letscontinue;
820 /* this can't be 0, so the loop test is correct */
821 cl_s[i].cs.n = pos + 1;
822 pos = 0;
823 cl_s[i].cs.e = ebt_next_entry(e);
824 e = (struct ebt_entry *)(hlp2->data);
825 nentries = hlp2->nentries;
826 cl_s[i].from = chain_nr;
827 chain_nr = i;
828 /* this udc is accessible from the base chain for hooknr */
829 cl_s[i].hookmask |= (1 << hooknr);
830 continue;
832 letscontinue:
833 e = ebt_next_entry(e);
834 pos++;
836 return 0;
839 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
840 static int translate_table(struct net *net, const char *name,
841 struct ebt_table_info *newinfo)
843 unsigned int i, j, k, udc_cnt;
844 int ret;
845 struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
847 i = 0;
848 while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
849 i++;
850 if (i == NF_BR_NUMHOOKS) {
851 BUGPRINT("No valid hooks specified\n");
852 return -EINVAL;
854 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
855 BUGPRINT("Chains don't start at beginning\n");
856 return -EINVAL;
858 /* make sure chains are ordered after each other in same order
859 as their corresponding hooks */
860 for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
861 if (!newinfo->hook_entry[j])
862 continue;
863 if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
864 BUGPRINT("Hook order must be followed\n");
865 return -EINVAL;
867 i = j;
870 /* do some early checkings and initialize some things */
871 i = 0; /* holds the expected nr. of entries for the chain */
872 j = 0; /* holds the up to now counted entries for the chain */
873 k = 0; /* holds the total nr. of entries, should equal
874 newinfo->nentries afterwards */
875 udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
876 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
877 ebt_check_entry_size_and_hooks, newinfo,
878 &i, &j, &k, &udc_cnt);
880 if (ret != 0)
881 return ret;
883 if (i != j) {
884 BUGPRINT("nentries does not equal the nr of entries in the "
885 "(last) chain\n");
886 return -EINVAL;
888 if (k != newinfo->nentries) {
889 BUGPRINT("Total nentries is wrong\n");
890 return -EINVAL;
893 /* get the location of the udc, put them in an array
894 while we're at it, allocate the chainstack */
895 if (udc_cnt) {
896 /* this will get free'd in do_replace()/ebt_register_table()
897 if an error occurs */
898 newinfo->chainstack =
899 vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
900 if (!newinfo->chainstack)
901 return -ENOMEM;
902 for_each_possible_cpu(i) {
903 newinfo->chainstack[i] =
904 vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
905 if (!newinfo->chainstack[i]) {
906 while (i)
907 vfree(newinfo->chainstack[--i]);
908 vfree(newinfo->chainstack);
909 newinfo->chainstack = NULL;
910 return -ENOMEM;
914 cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
915 if (!cl_s)
916 return -ENOMEM;
917 i = 0; /* the i'th udc */
918 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
919 ebt_get_udc_positions, newinfo, &i, cl_s);
920 /* sanity check */
921 if (i != udc_cnt) {
922 BUGPRINT("i != udc_cnt\n");
923 vfree(cl_s);
924 return -EFAULT;
928 /* Check for loops */
929 for (i = 0; i < NF_BR_NUMHOOKS; i++)
930 if (newinfo->hook_entry[i])
931 if (check_chainloops(newinfo->hook_entry[i],
932 cl_s, udc_cnt, i, newinfo->entries)) {
933 vfree(cl_s);
934 return -EINVAL;
937 /* we now know the following (along with E=mc²):
938 - the nr of entries in each chain is right
939 - the size of the allocated space is right
940 - all valid hooks have a corresponding chain
941 - there are no loops
942 - wrong data can still be on the level of a single entry
943 - could be there are jumps to places that are not the
944 beginning of a chain. This can only occur in chains that
945 are not accessible from any base chains, so we don't care. */
947 /* used to know what we need to clean up if something goes wrong */
948 i = 0;
949 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
950 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
951 if (ret != 0) {
952 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
953 ebt_cleanup_entry, net, &i);
955 vfree(cl_s);
956 return ret;
959 /* called under write_lock */
960 static void get_counters(const struct ebt_counter *oldcounters,
961 struct ebt_counter *counters, unsigned int nentries)
963 int i, cpu;
964 struct ebt_counter *counter_base;
966 /* counters of cpu 0 */
967 memcpy(counters, oldcounters,
968 sizeof(struct ebt_counter) * nentries);
970 /* add other counters to those of cpu 0 */
971 for_each_possible_cpu(cpu) {
972 if (cpu == 0)
973 continue;
974 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
975 for (i = 0; i < nentries; i++) {
976 counters[i].pcnt += counter_base[i].pcnt;
977 counters[i].bcnt += counter_base[i].bcnt;
982 static int do_replace_finish(struct net *net, struct ebt_replace *repl,
983 struct ebt_table_info *newinfo)
985 int ret, i;
986 struct ebt_counter *counterstmp = NULL;
987 /* used to be able to unlock earlier */
988 struct ebt_table_info *table;
989 struct ebt_table *t;
991 /* the user wants counters back
992 the check on the size is done later, when we have the lock */
993 if (repl->num_counters) {
994 unsigned long size = repl->num_counters * sizeof(*counterstmp);
995 counterstmp = vmalloc(size);
996 if (!counterstmp)
997 return -ENOMEM;
1000 newinfo->chainstack = NULL;
1001 ret = ebt_verify_pointers(repl, newinfo);
1002 if (ret != 0)
1003 goto free_counterstmp;
1005 ret = translate_table(net, repl->name, newinfo);
1007 if (ret != 0)
1008 goto free_counterstmp;
1010 t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1011 if (!t) {
1012 ret = -ENOENT;
1013 goto free_iterate;
1016 /* the table doesn't like it */
1017 if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1018 goto free_unlock;
1020 if (repl->num_counters && repl->num_counters != t->private->nentries) {
1021 BUGPRINT("Wrong nr. of counters requested\n");
1022 ret = -EINVAL;
1023 goto free_unlock;
1026 /* we have the mutex lock, so no danger in reading this pointer */
1027 table = t->private;
1028 /* make sure the table can only be rmmod'ed if it contains no rules */
1029 if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1030 ret = -ENOENT;
1031 goto free_unlock;
1032 } else if (table->nentries && !newinfo->nentries)
1033 module_put(t->me);
1034 /* we need an atomic snapshot of the counters */
1035 write_lock_bh(&t->lock);
1036 if (repl->num_counters)
1037 get_counters(t->private->counters, counterstmp,
1038 t->private->nentries);
1040 t->private = newinfo;
1041 write_unlock_bh(&t->lock);
1042 mutex_unlock(&ebt_mutex);
1043 /* so, a user can change the chains while having messed up her counter
1044 allocation. Only reason why this is done is because this way the lock
1045 is held only once, while this doesn't bring the kernel into a
1046 dangerous state. */
1047 if (repl->num_counters &&
1048 copy_to_user(repl->counters, counterstmp,
1049 repl->num_counters * sizeof(struct ebt_counter))) {
1050 ret = -EFAULT;
1052 else
1053 ret = 0;
1055 /* decrease module count and free resources */
1056 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1057 ebt_cleanup_entry, net, NULL);
1059 vfree(table->entries);
1060 if (table->chainstack) {
1061 for_each_possible_cpu(i)
1062 vfree(table->chainstack[i]);
1063 vfree(table->chainstack);
1065 vfree(table);
1067 vfree(counterstmp);
1068 return ret;
1070 free_unlock:
1071 mutex_unlock(&ebt_mutex);
1072 free_iterate:
1073 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1074 ebt_cleanup_entry, net, NULL);
1075 free_counterstmp:
1076 vfree(counterstmp);
1077 /* can be initialized in translate_table() */
1078 if (newinfo->chainstack) {
1079 for_each_possible_cpu(i)
1080 vfree(newinfo->chainstack[i]);
1081 vfree(newinfo->chainstack);
1083 return ret;
1086 /* replace the table */
1087 static int do_replace(struct net *net, const void __user *user,
1088 unsigned int len)
1090 int ret, countersize;
1091 struct ebt_table_info *newinfo;
1092 struct ebt_replace tmp;
1094 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1095 return -EFAULT;
1097 if (len != sizeof(tmp) + tmp.entries_size) {
1098 BUGPRINT("Wrong len argument\n");
1099 return -EINVAL;
1102 if (tmp.entries_size == 0) {
1103 BUGPRINT("Entries_size never zero\n");
1104 return -EINVAL;
1106 /* overflow check */
1107 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1108 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1109 return -ENOMEM;
1110 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1111 return -ENOMEM;
1113 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1114 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1115 if (!newinfo)
1116 return -ENOMEM;
1118 if (countersize)
1119 memset(newinfo->counters, 0, countersize);
1121 newinfo->entries = vmalloc(tmp.entries_size);
1122 if (!newinfo->entries) {
1123 ret = -ENOMEM;
1124 goto free_newinfo;
1126 if (copy_from_user(
1127 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1128 BUGPRINT("Couldn't copy entries from userspace\n");
1129 ret = -EFAULT;
1130 goto free_entries;
1133 ret = do_replace_finish(net, &tmp, newinfo);
1134 if (ret == 0)
1135 return ret;
1136 free_entries:
1137 vfree(newinfo->entries);
1138 free_newinfo:
1139 vfree(newinfo);
1140 return ret;
1143 struct ebt_table *
1144 ebt_register_table(struct net *net, const struct ebt_table *input_table)
1146 struct ebt_table_info *newinfo;
1147 struct ebt_table *t, *table;
1148 struct ebt_replace_kernel *repl;
1149 int ret, i, countersize;
1150 void *p;
1152 if (input_table == NULL || (repl = input_table->table) == NULL ||
1153 repl->entries == 0 || repl->entries_size == 0 ||
1154 repl->counters != NULL || input_table->private != NULL) {
1155 BUGPRINT("Bad table data for ebt_register_table!!!\n");
1156 return ERR_PTR(-EINVAL);
1159 /* Don't add one table to multiple lists. */
1160 table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1161 if (!table) {
1162 ret = -ENOMEM;
1163 goto out;
1166 countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1167 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1168 ret = -ENOMEM;
1169 if (!newinfo)
1170 goto free_table;
1172 p = vmalloc(repl->entries_size);
1173 if (!p)
1174 goto free_newinfo;
1176 memcpy(p, repl->entries, repl->entries_size);
1177 newinfo->entries = p;
1179 newinfo->entries_size = repl->entries_size;
1180 newinfo->nentries = repl->nentries;
1182 if (countersize)
1183 memset(newinfo->counters, 0, countersize);
1185 /* fill in newinfo and parse the entries */
1186 newinfo->chainstack = NULL;
1187 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1188 if ((repl->valid_hooks & (1 << i)) == 0)
1189 newinfo->hook_entry[i] = NULL;
1190 else
1191 newinfo->hook_entry[i] = p +
1192 ((char *)repl->hook_entry[i] - repl->entries);
1194 ret = translate_table(net, repl->name, newinfo);
1195 if (ret != 0) {
1196 BUGPRINT("Translate_table failed\n");
1197 goto free_chainstack;
1200 if (table->check && table->check(newinfo, table->valid_hooks)) {
1201 BUGPRINT("The table doesn't like its own initial data, lol\n");
1202 return ERR_PTR(-EINVAL);
1205 table->private = newinfo;
1206 rwlock_init(&table->lock);
1207 ret = mutex_lock_interruptible(&ebt_mutex);
1208 if (ret != 0)
1209 goto free_chainstack;
1211 list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1212 if (strcmp(t->name, table->name) == 0) {
1213 ret = -EEXIST;
1214 BUGPRINT("Table name already exists\n");
1215 goto free_unlock;
1219 /* Hold a reference count if the chains aren't empty */
1220 if (newinfo->nentries && !try_module_get(table->me)) {
1221 ret = -ENOENT;
1222 goto free_unlock;
1224 list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1225 mutex_unlock(&ebt_mutex);
1226 return table;
1227 free_unlock:
1228 mutex_unlock(&ebt_mutex);
1229 free_chainstack:
1230 if (newinfo->chainstack) {
1231 for_each_possible_cpu(i)
1232 vfree(newinfo->chainstack[i]);
1233 vfree(newinfo->chainstack);
1235 vfree(newinfo->entries);
1236 free_newinfo:
1237 vfree(newinfo);
1238 free_table:
1239 kfree(table);
1240 out:
1241 return ERR_PTR(ret);
1244 void ebt_unregister_table(struct net *net, struct ebt_table *table)
1246 int i;
1248 if (!table) {
1249 BUGPRINT("Request to unregister NULL table!!!\n");
1250 return;
1252 mutex_lock(&ebt_mutex);
1253 list_del(&table->list);
1254 mutex_unlock(&ebt_mutex);
1255 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1256 ebt_cleanup_entry, net, NULL);
1257 if (table->private->nentries)
1258 module_put(table->me);
1259 vfree(table->private->entries);
1260 if (table->private->chainstack) {
1261 for_each_possible_cpu(i)
1262 vfree(table->private->chainstack[i]);
1263 vfree(table->private->chainstack);
1265 vfree(table->private);
1266 kfree(table);
1269 /* userspace just supplied us with counters */
1270 static int do_update_counters(struct net *net, const char *name,
1271 struct ebt_counter __user *counters,
1272 unsigned int num_counters,
1273 const void __user *user, unsigned int len)
1275 int i, ret;
1276 struct ebt_counter *tmp;
1277 struct ebt_table *t;
1279 if (num_counters == 0)
1280 return -EINVAL;
1282 tmp = vmalloc(num_counters * sizeof(*tmp));
1283 if (!tmp)
1284 return -ENOMEM;
1286 t = find_table_lock(net, name, &ret, &ebt_mutex);
1287 if (!t)
1288 goto free_tmp;
1290 if (num_counters != t->private->nentries) {
1291 BUGPRINT("Wrong nr of counters\n");
1292 ret = -EINVAL;
1293 goto unlock_mutex;
1296 if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1297 ret = -EFAULT;
1298 goto unlock_mutex;
1301 /* we want an atomic add of the counters */
1302 write_lock_bh(&t->lock);
1304 /* we add to the counters of the first cpu */
1305 for (i = 0; i < num_counters; i++) {
1306 t->private->counters[i].pcnt += tmp[i].pcnt;
1307 t->private->counters[i].bcnt += tmp[i].bcnt;
1310 write_unlock_bh(&t->lock);
1311 ret = 0;
1312 unlock_mutex:
1313 mutex_unlock(&ebt_mutex);
1314 free_tmp:
1315 vfree(tmp);
1316 return ret;
1319 static int update_counters(struct net *net, const void __user *user,
1320 unsigned int len)
1322 struct ebt_replace hlp;
1324 if (copy_from_user(&hlp, user, sizeof(hlp)))
1325 return -EFAULT;
1327 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1328 return -EINVAL;
1330 return do_update_counters(net, hlp.name, hlp.counters,
1331 hlp.num_counters, user, len);
1334 static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1335 const char *base, char __user *ubase)
1337 char __user *hlp = ubase + ((char *)m - base);
1338 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
1339 return -EFAULT;
1340 return 0;
1343 static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1344 const char *base, char __user *ubase)
1346 char __user *hlp = ubase + ((char *)w - base);
1347 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
1348 return -EFAULT;
1349 return 0;
1352 static inline int
1353 ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1355 int ret;
1356 char __user *hlp;
1357 const struct ebt_entry_target *t;
1359 if (e->bitmask == 0)
1360 return 0;
1362 hlp = ubase + (((char *)e + e->target_offset) - base);
1363 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1365 ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1366 if (ret != 0)
1367 return ret;
1368 ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1369 if (ret != 0)
1370 return ret;
1371 if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
1372 return -EFAULT;
1373 return 0;
1376 static int copy_counters_to_user(struct ebt_table *t,
1377 const struct ebt_counter *oldcounters,
1378 void __user *user, unsigned int num_counters,
1379 unsigned int nentries)
1381 struct ebt_counter *counterstmp;
1382 int ret = 0;
1384 /* userspace might not need the counters */
1385 if (num_counters == 0)
1386 return 0;
1388 if (num_counters != nentries) {
1389 BUGPRINT("Num_counters wrong\n");
1390 return -EINVAL;
1393 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1394 if (!counterstmp)
1395 return -ENOMEM;
1397 write_lock_bh(&t->lock);
1398 get_counters(oldcounters, counterstmp, nentries);
1399 write_unlock_bh(&t->lock);
1401 if (copy_to_user(user, counterstmp,
1402 nentries * sizeof(struct ebt_counter)))
1403 ret = -EFAULT;
1404 vfree(counterstmp);
1405 return ret;
1408 /* called with ebt_mutex locked */
1409 static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1410 const int *len, int cmd)
1412 struct ebt_replace tmp;
1413 const struct ebt_counter *oldcounters;
1414 unsigned int entries_size, nentries;
1415 int ret;
1416 char *entries;
1418 if (cmd == EBT_SO_GET_ENTRIES) {
1419 entries_size = t->private->entries_size;
1420 nentries = t->private->nentries;
1421 entries = t->private->entries;
1422 oldcounters = t->private->counters;
1423 } else {
1424 entries_size = t->table->entries_size;
1425 nentries = t->table->nentries;
1426 entries = t->table->entries;
1427 oldcounters = t->table->counters;
1430 if (copy_from_user(&tmp, user, sizeof(tmp)))
1431 return -EFAULT;
1433 if (*len != sizeof(struct ebt_replace) + entries_size +
1434 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1435 return -EINVAL;
1437 if (tmp.nentries != nentries) {
1438 BUGPRINT("Nentries wrong\n");
1439 return -EINVAL;
1442 if (tmp.entries_size != entries_size) {
1443 BUGPRINT("Wrong size\n");
1444 return -EINVAL;
1447 ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1448 tmp.num_counters, nentries);
1449 if (ret)
1450 return ret;
1452 if (copy_to_user(tmp.entries, entries, entries_size)) {
1453 BUGPRINT("Couldn't copy entries to userspace\n");
1454 return -EFAULT;
1456 /* set the match/watcher/target names right */
1457 return EBT_ENTRY_ITERATE(entries, entries_size,
1458 ebt_make_names, entries, tmp.entries);
1461 static int do_ebt_set_ctl(struct sock *sk,
1462 int cmd, void __user *user, unsigned int len)
1464 int ret;
1466 if (!capable(CAP_NET_ADMIN))
1467 return -EPERM;
1469 switch(cmd) {
1470 case EBT_SO_SET_ENTRIES:
1471 ret = do_replace(sock_net(sk), user, len);
1472 break;
1473 case EBT_SO_SET_COUNTERS:
1474 ret = update_counters(sock_net(sk), user, len);
1475 break;
1476 default:
1477 ret = -EINVAL;
1479 return ret;
1482 static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1484 int ret;
1485 struct ebt_replace tmp;
1486 struct ebt_table *t;
1488 if (!capable(CAP_NET_ADMIN))
1489 return -EPERM;
1491 if (copy_from_user(&tmp, user, sizeof(tmp)))
1492 return -EFAULT;
1494 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
1495 if (!t)
1496 return ret;
1498 switch(cmd) {
1499 case EBT_SO_GET_INFO:
1500 case EBT_SO_GET_INIT_INFO:
1501 if (*len != sizeof(struct ebt_replace)){
1502 ret = -EINVAL;
1503 mutex_unlock(&ebt_mutex);
1504 break;
1506 if (cmd == EBT_SO_GET_INFO) {
1507 tmp.nentries = t->private->nentries;
1508 tmp.entries_size = t->private->entries_size;
1509 tmp.valid_hooks = t->valid_hooks;
1510 } else {
1511 tmp.nentries = t->table->nentries;
1512 tmp.entries_size = t->table->entries_size;
1513 tmp.valid_hooks = t->table->valid_hooks;
1515 mutex_unlock(&ebt_mutex);
1516 if (copy_to_user(user, &tmp, *len) != 0){
1517 BUGPRINT("c2u Didn't work\n");
1518 ret = -EFAULT;
1519 break;
1521 ret = 0;
1522 break;
1524 case EBT_SO_GET_ENTRIES:
1525 case EBT_SO_GET_INIT_ENTRIES:
1526 ret = copy_everything_to_user(t, user, len, cmd);
1527 mutex_unlock(&ebt_mutex);
1528 break;
1530 default:
1531 mutex_unlock(&ebt_mutex);
1532 ret = -EINVAL;
1535 return ret;
1538 #ifdef CONFIG_COMPAT
1539 /* 32 bit-userspace compatibility definitions. */
1540 struct compat_ebt_replace {
1541 char name[EBT_TABLE_MAXNAMELEN];
1542 compat_uint_t valid_hooks;
1543 compat_uint_t nentries;
1544 compat_uint_t entries_size;
1545 /* start of the chains */
1546 compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1547 /* nr of counters userspace expects back */
1548 compat_uint_t num_counters;
1549 /* where the kernel will put the old counters. */
1550 compat_uptr_t counters;
1551 compat_uptr_t entries;
1554 /* struct ebt_entry_match, _target and _watcher have same layout */
1555 struct compat_ebt_entry_mwt {
1556 union {
1557 char name[EBT_FUNCTION_MAXNAMELEN];
1558 compat_uptr_t ptr;
1559 } u;
1560 compat_uint_t match_size;
1561 compat_uint_t data[0];
1564 /* account for possible padding between match_size and ->data */
1565 static int ebt_compat_entry_padsize(void)
1567 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1568 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1569 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1570 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1573 static int ebt_compat_match_offset(const struct xt_match *match,
1574 unsigned int userlen)
1577 * ebt_among needs special handling. The kernel .matchsize is
1578 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1579 * value is expected.
1580 * Example: userspace sends 4500, ebt_among.c wants 4504.
1582 if (unlikely(match->matchsize == -1))
1583 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1584 return xt_compat_match_offset(match);
1587 static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1588 unsigned int *size)
1590 const struct xt_match *match = m->u.match;
1591 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1592 int off = ebt_compat_match_offset(match, m->match_size);
1593 compat_uint_t msize = m->match_size - off;
1595 BUG_ON(off >= m->match_size);
1597 if (copy_to_user(cm->u.name, match->name,
1598 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1599 return -EFAULT;
1601 if (match->compat_to_user) {
1602 if (match->compat_to_user(cm->data, m->data))
1603 return -EFAULT;
1604 } else if (copy_to_user(cm->data, m->data, msize))
1605 return -EFAULT;
1607 *size -= ebt_compat_entry_padsize() + off;
1608 *dstptr = cm->data;
1609 *dstptr += msize;
1610 return 0;
1613 static int compat_target_to_user(struct ebt_entry_target *t,
1614 void __user **dstptr,
1615 unsigned int *size)
1617 const struct xt_target *target = t->u.target;
1618 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1619 int off = xt_compat_target_offset(target);
1620 compat_uint_t tsize = t->target_size - off;
1622 BUG_ON(off >= t->target_size);
1624 if (copy_to_user(cm->u.name, target->name,
1625 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1626 return -EFAULT;
1628 if (target->compat_to_user) {
1629 if (target->compat_to_user(cm->data, t->data))
1630 return -EFAULT;
1631 } else if (copy_to_user(cm->data, t->data, tsize))
1632 return -EFAULT;
1634 *size -= ebt_compat_entry_padsize() + off;
1635 *dstptr = cm->data;
1636 *dstptr += tsize;
1637 return 0;
1640 static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1641 void __user **dstptr,
1642 unsigned int *size)
1644 return compat_target_to_user((struct ebt_entry_target *)w,
1645 dstptr, size);
1648 static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1649 unsigned int *size)
1651 struct ebt_entry_target *t;
1652 struct ebt_entry __user *ce;
1653 u32 watchers_offset, target_offset, next_offset;
1654 compat_uint_t origsize;
1655 int ret;
1657 if (e->bitmask == 0) {
1658 if (*size < sizeof(struct ebt_entries))
1659 return -EINVAL;
1660 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1661 return -EFAULT;
1663 *dstptr += sizeof(struct ebt_entries);
1664 *size -= sizeof(struct ebt_entries);
1665 return 0;
1668 if (*size < sizeof(*ce))
1669 return -EINVAL;
1671 ce = (struct ebt_entry __user *)*dstptr;
1672 if (copy_to_user(ce, e, sizeof(*ce)))
1673 return -EFAULT;
1675 origsize = *size;
1676 *dstptr += sizeof(*ce);
1678 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1679 if (ret)
1680 return ret;
1681 watchers_offset = e->watchers_offset - (origsize - *size);
1683 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1684 if (ret)
1685 return ret;
1686 target_offset = e->target_offset - (origsize - *size);
1688 t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1690 ret = compat_target_to_user(t, dstptr, size);
1691 if (ret)
1692 return ret;
1693 next_offset = e->next_offset - (origsize - *size);
1695 if (put_user(watchers_offset, &ce->watchers_offset) ||
1696 put_user(target_offset, &ce->target_offset) ||
1697 put_user(next_offset, &ce->next_offset))
1698 return -EFAULT;
1700 *size -= sizeof(*ce);
1701 return 0;
1704 static int compat_calc_match(struct ebt_entry_match *m, int *off)
1706 *off += ebt_compat_match_offset(m->u.match, m->match_size);
1707 *off += ebt_compat_entry_padsize();
1708 return 0;
1711 static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1713 *off += xt_compat_target_offset(w->u.watcher);
1714 *off += ebt_compat_entry_padsize();
1715 return 0;
1718 static int compat_calc_entry(const struct ebt_entry *e,
1719 const struct ebt_table_info *info,
1720 const void *base,
1721 struct compat_ebt_replace *newinfo)
1723 const struct ebt_entry_target *t;
1724 unsigned int entry_offset;
1725 int off, ret, i;
1727 if (e->bitmask == 0)
1728 return 0;
1730 off = 0;
1731 entry_offset = (void *)e - base;
1733 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1734 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1736 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1738 off += xt_compat_target_offset(t->u.target);
1739 off += ebt_compat_entry_padsize();
1741 newinfo->entries_size -= off;
1743 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1744 if (ret)
1745 return ret;
1747 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1748 const void *hookptr = info->hook_entry[i];
1749 if (info->hook_entry[i] &&
1750 (e < (struct ebt_entry *)(base - hookptr))) {
1751 newinfo->hook_entry[i] -= off;
1752 pr_debug("0x%08X -> 0x%08X\n",
1753 newinfo->hook_entry[i] + off,
1754 newinfo->hook_entry[i]);
1758 return 0;
1762 static int compat_table_info(const struct ebt_table_info *info,
1763 struct compat_ebt_replace *newinfo)
1765 unsigned int size = info->entries_size;
1766 const void *entries = info->entries;
1768 newinfo->entries_size = size;
1770 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1771 entries, newinfo);
1774 static int compat_copy_everything_to_user(struct ebt_table *t,
1775 void __user *user, int *len, int cmd)
1777 struct compat_ebt_replace repl, tmp;
1778 struct ebt_counter *oldcounters;
1779 struct ebt_table_info tinfo;
1780 int ret;
1781 void __user *pos;
1783 memset(&tinfo, 0, sizeof(tinfo));
1785 if (cmd == EBT_SO_GET_ENTRIES) {
1786 tinfo.entries_size = t->private->entries_size;
1787 tinfo.nentries = t->private->nentries;
1788 tinfo.entries = t->private->entries;
1789 oldcounters = t->private->counters;
1790 } else {
1791 tinfo.entries_size = t->table->entries_size;
1792 tinfo.nentries = t->table->nentries;
1793 tinfo.entries = t->table->entries;
1794 oldcounters = t->table->counters;
1797 if (copy_from_user(&tmp, user, sizeof(tmp)))
1798 return -EFAULT;
1800 if (tmp.nentries != tinfo.nentries ||
1801 (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1802 return -EINVAL;
1804 memcpy(&repl, &tmp, sizeof(repl));
1805 if (cmd == EBT_SO_GET_ENTRIES)
1806 ret = compat_table_info(t->private, &repl);
1807 else
1808 ret = compat_table_info(&tinfo, &repl);
1809 if (ret)
1810 return ret;
1812 if (*len != sizeof(tmp) + repl.entries_size +
1813 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1814 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1815 *len, tinfo.entries_size, repl.entries_size);
1816 return -EINVAL;
1819 /* userspace might not need the counters */
1820 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1821 tmp.num_counters, tinfo.nentries);
1822 if (ret)
1823 return ret;
1825 pos = compat_ptr(tmp.entries);
1826 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1827 compat_copy_entry_to_user, &pos, &tmp.entries_size);
1830 struct ebt_entries_buf_state {
1831 char *buf_kern_start; /* kernel buffer to copy (translated) data to */
1832 u32 buf_kern_len; /* total size of kernel buffer */
1833 u32 buf_kern_offset; /* amount of data copied so far */
1834 u32 buf_user_offset; /* read position in userspace buffer */
1837 static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1839 state->buf_kern_offset += sz;
1840 return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1843 static int ebt_buf_add(struct ebt_entries_buf_state *state,
1844 void *data, unsigned int sz)
1846 if (state->buf_kern_start == NULL)
1847 goto count_only;
1849 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1851 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1853 count_only:
1854 state->buf_user_offset += sz;
1855 return ebt_buf_count(state, sz);
1858 static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1860 char *b = state->buf_kern_start;
1862 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1864 if (b != NULL && sz > 0)
1865 memset(b + state->buf_kern_offset, 0, sz);
1866 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1867 return ebt_buf_count(state, sz);
1870 enum compat_mwt {
1871 EBT_COMPAT_MATCH,
1872 EBT_COMPAT_WATCHER,
1873 EBT_COMPAT_TARGET,
1876 static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1877 enum compat_mwt compat_mwt,
1878 struct ebt_entries_buf_state *state,
1879 const unsigned char *base)
1881 char name[EBT_FUNCTION_MAXNAMELEN];
1882 struct xt_match *match;
1883 struct xt_target *wt;
1884 void *dst = NULL;
1885 int off, pad = 0, ret = 0;
1886 unsigned int size_kern, entry_offset, match_size = mwt->match_size;
1888 strlcpy(name, mwt->u.name, sizeof(name));
1890 if (state->buf_kern_start)
1891 dst = state->buf_kern_start + state->buf_kern_offset;
1893 entry_offset = (unsigned char *) mwt - base;
1894 switch (compat_mwt) {
1895 case EBT_COMPAT_MATCH:
1896 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
1897 name, 0), "ebt_%s", name);
1898 if (match == NULL)
1899 return -ENOENT;
1900 if (IS_ERR(match))
1901 return PTR_ERR(match);
1903 off = ebt_compat_match_offset(match, match_size);
1904 if (dst) {
1905 if (match->compat_from_user)
1906 match->compat_from_user(dst, mwt->data);
1907 else
1908 memcpy(dst, mwt->data, match_size);
1911 size_kern = match->matchsize;
1912 if (unlikely(size_kern == -1))
1913 size_kern = match_size;
1914 module_put(match->me);
1915 break;
1916 case EBT_COMPAT_WATCHER: /* fallthrough */
1917 case EBT_COMPAT_TARGET:
1918 wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
1919 name, 0), "ebt_%s", name);
1920 if (wt == NULL)
1921 return -ENOENT;
1922 if (IS_ERR(wt))
1923 return PTR_ERR(wt);
1924 off = xt_compat_target_offset(wt);
1926 if (dst) {
1927 if (wt->compat_from_user)
1928 wt->compat_from_user(dst, mwt->data);
1929 else
1930 memcpy(dst, mwt->data, match_size);
1933 size_kern = wt->targetsize;
1934 module_put(wt->me);
1935 break;
1938 if (!dst) {
1939 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
1940 off + ebt_compat_entry_padsize());
1941 if (ret < 0)
1942 return ret;
1945 state->buf_kern_offset += match_size + off;
1946 state->buf_user_offset += match_size;
1947 pad = XT_ALIGN(size_kern) - size_kern;
1949 if (pad > 0 && dst) {
1950 BUG_ON(state->buf_kern_len <= pad);
1951 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1952 memset(dst + size_kern, 0, pad);
1954 return off + match_size;
1958 * return size of all matches, watchers or target, including necessary
1959 * alignment and padding.
1961 static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1962 unsigned int size_left, enum compat_mwt type,
1963 struct ebt_entries_buf_state *state, const void *base)
1965 int growth = 0;
1966 char *buf;
1968 if (size_left == 0)
1969 return 0;
1971 buf = (char *) match32;
1973 while (size_left >= sizeof(*match32)) {
1974 struct ebt_entry_match *match_kern;
1975 int ret;
1977 match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1978 if (match_kern) {
1979 char *tmp;
1980 tmp = state->buf_kern_start + state->buf_kern_offset;
1981 match_kern = (struct ebt_entry_match *) tmp;
1983 ret = ebt_buf_add(state, buf, sizeof(*match32));
1984 if (ret < 0)
1985 return ret;
1986 size_left -= sizeof(*match32);
1988 /* add padding before match->data (if any) */
1989 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1990 if (ret < 0)
1991 return ret;
1993 if (match32->match_size > size_left)
1994 return -EINVAL;
1996 size_left -= match32->match_size;
1998 ret = compat_mtw_from_user(match32, type, state, base);
1999 if (ret < 0)
2000 return ret;
2002 BUG_ON(ret < match32->match_size);
2003 growth += ret - match32->match_size;
2004 growth += ebt_compat_entry_padsize();
2006 buf += sizeof(*match32);
2007 buf += match32->match_size;
2009 if (match_kern)
2010 match_kern->match_size = ret;
2012 WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2013 match32 = (struct compat_ebt_entry_mwt *) buf;
2016 return growth;
2019 #define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2020 ({ \
2021 unsigned int __i; \
2022 int __ret = 0; \
2023 struct compat_ebt_entry_mwt *__watcher; \
2025 for (__i = e->watchers_offset; \
2026 __i < (e)->target_offset; \
2027 __i += __watcher->watcher_size + \
2028 sizeof(struct compat_ebt_entry_mwt)) { \
2029 __watcher = (void *)(e) + __i; \
2030 __ret = fn(__watcher , ## args); \
2031 if (__ret != 0) \
2032 break; \
2034 if (__ret == 0) { \
2035 if (__i != (e)->target_offset) \
2036 __ret = -EINVAL; \
2038 __ret; \
2041 #define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2042 ({ \
2043 unsigned int __i; \
2044 int __ret = 0; \
2045 struct compat_ebt_entry_mwt *__match; \
2047 for (__i = sizeof(struct ebt_entry); \
2048 __i < (e)->watchers_offset; \
2049 __i += __match->match_size + \
2050 sizeof(struct compat_ebt_entry_mwt)) { \
2051 __match = (void *)(e) + __i; \
2052 __ret = fn(__match , ## args); \
2053 if (__ret != 0) \
2054 break; \
2056 if (__ret == 0) { \
2057 if (__i != (e)->watchers_offset) \
2058 __ret = -EINVAL; \
2060 __ret; \
2063 /* called for all ebt_entry structures. */
2064 static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2065 unsigned int *total,
2066 struct ebt_entries_buf_state *state)
2068 unsigned int i, j, startoff, new_offset = 0;
2069 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2070 unsigned int offsets[4];
2071 unsigned int *offsets_update = NULL;
2072 int ret;
2073 char *buf_start;
2075 if (*total < sizeof(struct ebt_entries))
2076 return -EINVAL;
2078 if (!entry->bitmask) {
2079 *total -= sizeof(struct ebt_entries);
2080 return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2082 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2083 return -EINVAL;
2085 startoff = state->buf_user_offset;
2086 /* pull in most part of ebt_entry, it does not need to be changed. */
2087 ret = ebt_buf_add(state, entry,
2088 offsetof(struct ebt_entry, watchers_offset));
2089 if (ret < 0)
2090 return ret;
2092 offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2093 memcpy(&offsets[1], &entry->watchers_offset,
2094 sizeof(offsets) - sizeof(offsets[0]));
2096 if (state->buf_kern_start) {
2097 buf_start = state->buf_kern_start + state->buf_kern_offset;
2098 offsets_update = (unsigned int *) buf_start;
2100 ret = ebt_buf_add(state, &offsets[1],
2101 sizeof(offsets) - sizeof(offsets[0]));
2102 if (ret < 0)
2103 return ret;
2104 buf_start = (char *) entry;
2106 * 0: matches offset, always follows ebt_entry.
2107 * 1: watchers offset, from ebt_entry structure
2108 * 2: target offset, from ebt_entry structure
2109 * 3: next ebt_entry offset, from ebt_entry structure
2111 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2113 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2114 struct compat_ebt_entry_mwt *match32;
2115 unsigned int size;
2116 char *buf = buf_start;
2118 buf = buf_start + offsets[i];
2119 if (offsets[i] > offsets[j])
2120 return -EINVAL;
2122 match32 = (struct compat_ebt_entry_mwt *) buf;
2123 size = offsets[j] - offsets[i];
2124 ret = ebt_size_mwt(match32, size, i, state, base);
2125 if (ret < 0)
2126 return ret;
2127 new_offset += ret;
2128 if (offsets_update && new_offset) {
2129 pr_debug("change offset %d to %d\n",
2130 offsets_update[i], offsets[j] + new_offset);
2131 offsets_update[i] = offsets[j] + new_offset;
2135 startoff = state->buf_user_offset - startoff;
2137 BUG_ON(*total < startoff);
2138 *total -= startoff;
2139 return 0;
2143 * repl->entries_size is the size of the ebt_entry blob in userspace.
2144 * It might need more memory when copied to a 64 bit kernel in case
2145 * userspace is 32-bit. So, first task: find out how much memory is needed.
2147 * Called before validation is performed.
2149 static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2150 struct ebt_entries_buf_state *state)
2152 unsigned int size_remaining = size_user;
2153 int ret;
2155 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2156 &size_remaining, state);
2157 if (ret < 0)
2158 return ret;
2160 WARN_ON(size_remaining);
2161 return state->buf_kern_offset;
2165 static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2166 void __user *user, unsigned int len)
2168 struct compat_ebt_replace tmp;
2169 int i;
2171 if (len < sizeof(tmp))
2172 return -EINVAL;
2174 if (copy_from_user(&tmp, user, sizeof(tmp)))
2175 return -EFAULT;
2177 if (len != sizeof(tmp) + tmp.entries_size)
2178 return -EINVAL;
2180 if (tmp.entries_size == 0)
2181 return -EINVAL;
2183 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2184 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2185 return -ENOMEM;
2186 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2187 return -ENOMEM;
2189 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2191 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2192 for (i = 0; i < NF_BR_NUMHOOKS; i++)
2193 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2195 repl->num_counters = tmp.num_counters;
2196 repl->counters = compat_ptr(tmp.counters);
2197 repl->entries = compat_ptr(tmp.entries);
2198 return 0;
2201 static int compat_do_replace(struct net *net, void __user *user,
2202 unsigned int len)
2204 int ret, i, countersize, size64;
2205 struct ebt_table_info *newinfo;
2206 struct ebt_replace tmp;
2207 struct ebt_entries_buf_state state;
2208 void *entries_tmp;
2210 ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2211 if (ret) {
2212 /* try real handler in case userland supplied needed padding */
2213 if (ret == -EINVAL && do_replace(net, user, len) == 0)
2214 ret = 0;
2215 return ret;
2218 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2219 newinfo = vmalloc(sizeof(*newinfo) + countersize);
2220 if (!newinfo)
2221 return -ENOMEM;
2223 if (countersize)
2224 memset(newinfo->counters, 0, countersize);
2226 memset(&state, 0, sizeof(state));
2228 newinfo->entries = vmalloc(tmp.entries_size);
2229 if (!newinfo->entries) {
2230 ret = -ENOMEM;
2231 goto free_newinfo;
2233 if (copy_from_user(
2234 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2235 ret = -EFAULT;
2236 goto free_entries;
2239 entries_tmp = newinfo->entries;
2241 xt_compat_lock(NFPROTO_BRIDGE);
2243 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2244 if (ret < 0)
2245 goto out_unlock;
2247 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2248 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2249 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2251 size64 = ret;
2252 newinfo->entries = vmalloc(size64);
2253 if (!newinfo->entries) {
2254 vfree(entries_tmp);
2255 ret = -ENOMEM;
2256 goto out_unlock;
2259 memset(&state, 0, sizeof(state));
2260 state.buf_kern_start = newinfo->entries;
2261 state.buf_kern_len = size64;
2263 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2264 BUG_ON(ret < 0); /* parses same data again */
2266 vfree(entries_tmp);
2267 tmp.entries_size = size64;
2269 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2270 char __user *usrptr;
2271 if (tmp.hook_entry[i]) {
2272 unsigned int delta;
2273 usrptr = (char __user *) tmp.hook_entry[i];
2274 delta = usrptr - tmp.entries;
2275 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2276 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2280 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2281 xt_compat_unlock(NFPROTO_BRIDGE);
2283 ret = do_replace_finish(net, &tmp, newinfo);
2284 if (ret == 0)
2285 return ret;
2286 free_entries:
2287 vfree(newinfo->entries);
2288 free_newinfo:
2289 vfree(newinfo);
2290 return ret;
2291 out_unlock:
2292 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2293 xt_compat_unlock(NFPROTO_BRIDGE);
2294 goto free_entries;
2297 static int compat_update_counters(struct net *net, void __user *user,
2298 unsigned int len)
2300 struct compat_ebt_replace hlp;
2302 if (copy_from_user(&hlp, user, sizeof(hlp)))
2303 return -EFAULT;
2305 /* try real handler in case userland supplied needed padding */
2306 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2307 return update_counters(net, user, len);
2309 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2310 hlp.num_counters, user, len);
2313 static int compat_do_ebt_set_ctl(struct sock *sk,
2314 int cmd, void __user *user, unsigned int len)
2316 int ret;
2318 if (!capable(CAP_NET_ADMIN))
2319 return -EPERM;
2321 switch (cmd) {
2322 case EBT_SO_SET_ENTRIES:
2323 ret = compat_do_replace(sock_net(sk), user, len);
2324 break;
2325 case EBT_SO_SET_COUNTERS:
2326 ret = compat_update_counters(sock_net(sk), user, len);
2327 break;
2328 default:
2329 ret = -EINVAL;
2331 return ret;
2334 static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2335 void __user *user, int *len)
2337 int ret;
2338 struct compat_ebt_replace tmp;
2339 struct ebt_table *t;
2341 if (!capable(CAP_NET_ADMIN))
2342 return -EPERM;
2344 /* try real handler in case userland supplied needed padding */
2345 if ((cmd == EBT_SO_GET_INFO ||
2346 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2347 return do_ebt_get_ctl(sk, cmd, user, len);
2349 if (copy_from_user(&tmp, user, sizeof(tmp)))
2350 return -EFAULT;
2352 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
2353 if (!t)
2354 return ret;
2356 xt_compat_lock(NFPROTO_BRIDGE);
2357 switch (cmd) {
2358 case EBT_SO_GET_INFO:
2359 tmp.nentries = t->private->nentries;
2360 ret = compat_table_info(t->private, &tmp);
2361 if (ret)
2362 goto out;
2363 tmp.valid_hooks = t->valid_hooks;
2365 if (copy_to_user(user, &tmp, *len) != 0) {
2366 ret = -EFAULT;
2367 break;
2369 ret = 0;
2370 break;
2371 case EBT_SO_GET_INIT_INFO:
2372 tmp.nentries = t->table->nentries;
2373 tmp.entries_size = t->table->entries_size;
2374 tmp.valid_hooks = t->table->valid_hooks;
2376 if (copy_to_user(user, &tmp, *len) != 0) {
2377 ret = -EFAULT;
2378 break;
2380 ret = 0;
2381 break;
2382 case EBT_SO_GET_ENTRIES:
2383 case EBT_SO_GET_INIT_ENTRIES:
2385 * try real handler first in case of userland-side padding.
2386 * in case we are dealing with an 'ordinary' 32 bit binary
2387 * without 64bit compatibility padding, this will fail right
2388 * after copy_from_user when the *len argument is validated.
2390 * the compat_ variant needs to do one pass over the kernel
2391 * data set to adjust for size differences before it the check.
2393 if (copy_everything_to_user(t, user, len, cmd) == 0)
2394 ret = 0;
2395 else
2396 ret = compat_copy_everything_to_user(t, user, len, cmd);
2397 break;
2398 default:
2399 ret = -EINVAL;
2401 out:
2402 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2403 xt_compat_unlock(NFPROTO_BRIDGE);
2404 mutex_unlock(&ebt_mutex);
2405 return ret;
2407 #endif
2409 static struct nf_sockopt_ops ebt_sockopts =
2411 .pf = PF_INET,
2412 .set_optmin = EBT_BASE_CTL,
2413 .set_optmax = EBT_SO_SET_MAX + 1,
2414 .set = do_ebt_set_ctl,
2415 #ifdef CONFIG_COMPAT
2416 .compat_set = compat_do_ebt_set_ctl,
2417 #endif
2418 .get_optmin = EBT_BASE_CTL,
2419 .get_optmax = EBT_SO_GET_MAX + 1,
2420 .get = do_ebt_get_ctl,
2421 #ifdef CONFIG_COMPAT
2422 .compat_get = compat_do_ebt_get_ctl,
2423 #endif
2424 .owner = THIS_MODULE,
2427 static int __init ebtables_init(void)
2429 int ret;
2431 ret = xt_register_target(&ebt_standard_target);
2432 if (ret < 0)
2433 return ret;
2434 ret = nf_register_sockopt(&ebt_sockopts);
2435 if (ret < 0) {
2436 xt_unregister_target(&ebt_standard_target);
2437 return ret;
2440 printk(KERN_INFO "Ebtables v2.0 registered\n");
2441 return 0;
2444 static void __exit ebtables_fini(void)
2446 nf_unregister_sockopt(&ebt_sockopts);
2447 xt_unregister_target(&ebt_standard_target);
2448 printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2451 EXPORT_SYMBOL(ebt_register_table);
2452 EXPORT_SYMBOL(ebt_unregister_table);
2453 EXPORT_SYMBOL(ebt_do_table);
2454 module_init(ebtables_init);
2455 module_exit(ebtables_fini);
2456 MODULE_LICENSE("GPL");