Staging: rt2860: fix coding style issue in mac_pci.h, mac_usb.h, rtmp_mac.h, rtmp_phy.h
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bridge / netfilter / ebtables.c
blobf0865fd1e3eca770f2218e576093a139b9676243
1 /*
2 * ebtables
4 * Author:
5 * Bart De Schuymer <bdschuym@pandora.be>
7 * ebtables.c,v 2.0, July, 2002
9 * This code is stongly inspired on the iptables code which is
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <linux/kmod.h>
20 #include <linux/module.h>
21 #include <linux/vmalloc.h>
22 #include <linux/netfilter/x_tables.h>
23 #include <linux/netfilter_bridge/ebtables.h>
24 #include <linux/spinlock.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <asm/uaccess.h>
28 #include <linux/smp.h>
29 #include <linux/cpumask.h>
30 #include <net/sock.h>
31 /* needed for logical [in,out]-dev filtering */
32 #include "../br_private.h"
34 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
35 "report to author: "format, ## args)
36 /* #define BUGPRINT(format, args...) */
39 * Each cpu has its own set of counters, so there is no need for write_lock in
40 * the softirq
41 * For reading or updating the counters, the user context needs to
42 * get a write_lock
45 /* The size of each set of counters is altered to get cache alignment */
46 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
47 #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
48 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
49 COUNTER_OFFSET(n) * cpu))
53 static DEFINE_MUTEX(ebt_mutex);
55 #ifdef CONFIG_COMPAT
56 static void ebt_standard_compat_from_user(void *dst, const void *src)
58 int v = *(compat_int_t *)src;
60 if (v >= 0)
61 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
62 memcpy(dst, &v, sizeof(v));
65 static int ebt_standard_compat_to_user(void __user *dst, const void *src)
67 compat_int_t cv = *(int *)src;
69 if (cv >= 0)
70 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
71 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
73 #endif
76 static struct xt_target ebt_standard_target = {
77 .name = "standard",
78 .revision = 0,
79 .family = NFPROTO_BRIDGE,
80 .targetsize = sizeof(int),
81 #ifdef CONFIG_COMPAT
82 .compatsize = sizeof(compat_int_t),
83 .compat_from_user = ebt_standard_compat_from_user,
84 .compat_to_user = ebt_standard_compat_to_user,
85 #endif
88 static inline int
89 ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
90 struct xt_target_param *par)
92 par->target = w->u.watcher;
93 par->targinfo = w->data;
94 w->u.watcher->target(skb, par);
95 /* watchers don't give a verdict */
96 return 0;
99 static inline int ebt_do_match (struct ebt_entry_match *m,
100 const struct sk_buff *skb, struct xt_match_param *par)
102 par->match = m->u.match;
103 par->matchinfo = m->data;
104 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
107 static inline int
108 ebt_dev_check(const char *entry, const struct net_device *device)
110 int i = 0;
111 const char *devname;
113 if (*entry == '\0')
114 return 0;
115 if (!device)
116 return 1;
117 devname = device->name;
118 /* 1 is the wildcard token */
119 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
120 i++;
121 return (devname[i] != entry[i] && entry[i] != 1);
124 #define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
125 /* process standard matches */
126 static inline int
127 ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
128 const struct net_device *in, const struct net_device *out)
130 int verdict, i;
132 if (e->bitmask & EBT_802_3) {
133 if (FWINV2(ntohs(h->h_proto) >= 1536, EBT_IPROTO))
134 return 1;
135 } else if (!(e->bitmask & EBT_NOPROTO) &&
136 FWINV2(e->ethproto != h->h_proto, EBT_IPROTO))
137 return 1;
139 if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
140 return 1;
141 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
142 return 1;
143 if ((!in || !in->br_port) ? 0 : FWINV2(ebt_dev_check(
144 e->logical_in, in->br_port->br->dev), EBT_ILOGICALIN))
145 return 1;
146 if ((!out || !out->br_port) ? 0 : FWINV2(ebt_dev_check(
147 e->logical_out, out->br_port->br->dev), EBT_ILOGICALOUT))
148 return 1;
150 if (e->bitmask & EBT_SOURCEMAC) {
151 verdict = 0;
152 for (i = 0; i < 6; i++)
153 verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
154 e->sourcemsk[i];
155 if (FWINV2(verdict != 0, EBT_ISOURCE) )
156 return 1;
158 if (e->bitmask & EBT_DESTMAC) {
159 verdict = 0;
160 for (i = 0; i < 6; i++)
161 verdict |= (h->h_dest[i] ^ e->destmac[i]) &
162 e->destmsk[i];
163 if (FWINV2(verdict != 0, EBT_IDEST) )
164 return 1;
166 return 0;
169 static inline __pure
170 struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
172 return (void *)entry + entry->next_offset;
175 /* Do some firewalling */
176 unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
177 const struct net_device *in, const struct net_device *out,
178 struct ebt_table *table)
180 int i, nentries;
181 struct ebt_entry *point;
182 struct ebt_counter *counter_base, *cb_base;
183 const struct ebt_entry_target *t;
184 int verdict, sp = 0;
185 struct ebt_chainstack *cs;
186 struct ebt_entries *chaininfo;
187 const char *base;
188 const struct ebt_table_info *private;
189 bool hotdrop = false;
190 struct xt_match_param mtpar;
191 struct xt_target_param tgpar;
193 mtpar.family = tgpar.family = NFPROTO_BRIDGE;
194 mtpar.in = tgpar.in = in;
195 mtpar.out = tgpar.out = out;
196 mtpar.hotdrop = &hotdrop;
197 mtpar.hooknum = tgpar.hooknum = hook;
199 read_lock_bh(&table->lock);
200 private = table->private;
201 cb_base = COUNTER_BASE(private->counters, private->nentries,
202 smp_processor_id());
203 if (private->chainstack)
204 cs = private->chainstack[smp_processor_id()];
205 else
206 cs = NULL;
207 chaininfo = private->hook_entry[hook];
208 nentries = private->hook_entry[hook]->nentries;
209 point = (struct ebt_entry *)(private->hook_entry[hook]->data);
210 counter_base = cb_base + private->hook_entry[hook]->counter_offset;
211 /* base for chain jumps */
212 base = private->entries;
213 i = 0;
214 while (i < nentries) {
215 if (ebt_basic_match(point, eth_hdr(skb), in, out))
216 goto letscontinue;
218 if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &mtpar) != 0)
219 goto letscontinue;
220 if (hotdrop) {
221 read_unlock_bh(&table->lock);
222 return NF_DROP;
225 /* increase counter */
226 (*(counter_base + i)).pcnt++;
227 (*(counter_base + i)).bcnt += skb->len;
229 /* these should only watch: not modify, nor tell us
230 what to do with the packet */
231 EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &tgpar);
233 t = (struct ebt_entry_target *)
234 (((char *)point) + point->target_offset);
235 /* standard target */
236 if (!t->u.target->target)
237 verdict = ((struct ebt_standard_target *)t)->verdict;
238 else {
239 tgpar.target = t->u.target;
240 tgpar.targinfo = t->data;
241 verdict = t->u.target->target(skb, &tgpar);
243 if (verdict == EBT_ACCEPT) {
244 read_unlock_bh(&table->lock);
245 return NF_ACCEPT;
247 if (verdict == EBT_DROP) {
248 read_unlock_bh(&table->lock);
249 return NF_DROP;
251 if (verdict == EBT_RETURN) {
252 letsreturn:
253 #ifdef CONFIG_NETFILTER_DEBUG
254 if (sp == 0) {
255 BUGPRINT("RETURN on base chain");
256 /* act like this is EBT_CONTINUE */
257 goto letscontinue;
259 #endif
260 sp--;
261 /* put all the local variables right */
262 i = cs[sp].n;
263 chaininfo = cs[sp].chaininfo;
264 nentries = chaininfo->nentries;
265 point = cs[sp].e;
266 counter_base = cb_base +
267 chaininfo->counter_offset;
268 continue;
270 if (verdict == EBT_CONTINUE)
271 goto letscontinue;
272 #ifdef CONFIG_NETFILTER_DEBUG
273 if (verdict < 0) {
274 BUGPRINT("bogus standard verdict\n");
275 read_unlock_bh(&table->lock);
276 return NF_DROP;
278 #endif
279 /* jump to a udc */
280 cs[sp].n = i + 1;
281 cs[sp].chaininfo = chaininfo;
282 cs[sp].e = ebt_next_entry(point);
283 i = 0;
284 chaininfo = (struct ebt_entries *) (base + verdict);
285 #ifdef CONFIG_NETFILTER_DEBUG
286 if (chaininfo->distinguisher) {
287 BUGPRINT("jump to non-chain\n");
288 read_unlock_bh(&table->lock);
289 return NF_DROP;
291 #endif
292 nentries = chaininfo->nentries;
293 point = (struct ebt_entry *)chaininfo->data;
294 counter_base = cb_base + chaininfo->counter_offset;
295 sp++;
296 continue;
297 letscontinue:
298 point = ebt_next_entry(point);
299 i++;
302 /* I actually like this :) */
303 if (chaininfo->policy == EBT_RETURN)
304 goto letsreturn;
305 if (chaininfo->policy == EBT_ACCEPT) {
306 read_unlock_bh(&table->lock);
307 return NF_ACCEPT;
309 read_unlock_bh(&table->lock);
310 return NF_DROP;
313 /* If it succeeds, returns element and locks mutex */
314 static inline void *
315 find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
316 struct mutex *mutex)
318 struct {
319 struct list_head list;
320 char name[EBT_FUNCTION_MAXNAMELEN];
321 } *e;
323 *error = mutex_lock_interruptible(mutex);
324 if (*error != 0)
325 return NULL;
327 list_for_each_entry(e, head, list) {
328 if (strcmp(e->name, name) == 0)
329 return e;
331 *error = -ENOENT;
332 mutex_unlock(mutex);
333 return NULL;
336 static void *
337 find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
338 int *error, struct mutex *mutex)
340 return try_then_request_module(
341 find_inlist_lock_noload(head, name, error, mutex),
342 "%s%s", prefix, name);
345 static inline struct ebt_table *
346 find_table_lock(struct net *net, const char *name, int *error,
347 struct mutex *mutex)
349 return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
350 "ebtable_", error, mutex);
353 static inline int
354 ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
355 unsigned int *cnt)
357 const struct ebt_entry *e = par->entryinfo;
358 struct xt_match *match;
359 size_t left = ((char *)e + e->watchers_offset) - (char *)m;
360 int ret;
362 if (left < sizeof(struct ebt_entry_match) ||
363 left - sizeof(struct ebt_entry_match) < m->match_size)
364 return -EINVAL;
366 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
367 m->u.name, 0), "ebt_%s", m->u.name);
368 if (IS_ERR(match))
369 return PTR_ERR(match);
370 if (match == NULL)
371 return -ENOENT;
372 m->u.match = match;
374 par->match = match;
375 par->matchinfo = m->data;
376 ret = xt_check_match(par, m->match_size,
377 e->ethproto, e->invflags & EBT_IPROTO);
378 if (ret < 0) {
379 module_put(match->me);
380 return ret;
383 (*cnt)++;
384 return 0;
387 static inline int
388 ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
389 unsigned int *cnt)
391 const struct ebt_entry *e = par->entryinfo;
392 struct xt_target *watcher;
393 size_t left = ((char *)e + e->target_offset) - (char *)w;
394 int ret;
396 if (left < sizeof(struct ebt_entry_watcher) ||
397 left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
398 return -EINVAL;
400 watcher = try_then_request_module(
401 xt_find_target(NFPROTO_BRIDGE, w->u.name, 0),
402 "ebt_%s", w->u.name);
403 if (IS_ERR(watcher))
404 return PTR_ERR(watcher);
405 if (watcher == NULL)
406 return -ENOENT;
407 w->u.watcher = watcher;
409 par->target = watcher;
410 par->targinfo = w->data;
411 ret = xt_check_target(par, w->watcher_size,
412 e->ethproto, e->invflags & EBT_IPROTO);
413 if (ret < 0) {
414 module_put(watcher->me);
415 return ret;
418 (*cnt)++;
419 return 0;
422 static int ebt_verify_pointers(const struct ebt_replace *repl,
423 struct ebt_table_info *newinfo)
425 unsigned int limit = repl->entries_size;
426 unsigned int valid_hooks = repl->valid_hooks;
427 unsigned int offset = 0;
428 int i;
430 for (i = 0; i < NF_BR_NUMHOOKS; i++)
431 newinfo->hook_entry[i] = NULL;
433 newinfo->entries_size = repl->entries_size;
434 newinfo->nentries = repl->nentries;
436 while (offset < limit) {
437 size_t left = limit - offset;
438 struct ebt_entry *e = (void *)newinfo->entries + offset;
440 if (left < sizeof(unsigned int))
441 break;
443 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
444 if ((valid_hooks & (1 << i)) == 0)
445 continue;
446 if ((char __user *)repl->hook_entry[i] ==
447 repl->entries + offset)
448 break;
451 if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
452 if (e->bitmask != 0) {
453 /* we make userspace set this right,
454 so there is no misunderstanding */
455 BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
456 "in distinguisher\n");
457 return -EINVAL;
459 if (i != NF_BR_NUMHOOKS)
460 newinfo->hook_entry[i] = (struct ebt_entries *)e;
461 if (left < sizeof(struct ebt_entries))
462 break;
463 offset += sizeof(struct ebt_entries);
464 } else {
465 if (left < sizeof(struct ebt_entry))
466 break;
467 if (left < e->next_offset)
468 break;
469 if (e->next_offset < sizeof(struct ebt_entry))
470 return -EINVAL;
471 offset += e->next_offset;
474 if (offset != limit) {
475 BUGPRINT("entries_size too small\n");
476 return -EINVAL;
479 /* check if all valid hooks have a chain */
480 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
481 if (!newinfo->hook_entry[i] &&
482 (valid_hooks & (1 << i))) {
483 BUGPRINT("Valid hook without chain\n");
484 return -EINVAL;
487 return 0;
491 * this one is very careful, as it is the first function
492 * to parse the userspace data
494 static inline int
495 ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
496 const struct ebt_table_info *newinfo,
497 unsigned int *n, unsigned int *cnt,
498 unsigned int *totalcnt, unsigned int *udc_cnt)
500 int i;
502 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
503 if ((void *)e == (void *)newinfo->hook_entry[i])
504 break;
506 /* beginning of a new chain
507 if i == NF_BR_NUMHOOKS it must be a user defined chain */
508 if (i != NF_BR_NUMHOOKS || !e->bitmask) {
509 /* this checks if the previous chain has as many entries
510 as it said it has */
511 if (*n != *cnt) {
512 BUGPRINT("nentries does not equal the nr of entries "
513 "in the chain\n");
514 return -EINVAL;
516 if (((struct ebt_entries *)e)->policy != EBT_DROP &&
517 ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
518 /* only RETURN from udc */
519 if (i != NF_BR_NUMHOOKS ||
520 ((struct ebt_entries *)e)->policy != EBT_RETURN) {
521 BUGPRINT("bad policy\n");
522 return -EINVAL;
525 if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
526 (*udc_cnt)++;
527 if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
528 BUGPRINT("counter_offset != totalcnt");
529 return -EINVAL;
531 *n = ((struct ebt_entries *)e)->nentries;
532 *cnt = 0;
533 return 0;
535 /* a plain old entry, heh */
536 if (sizeof(struct ebt_entry) > e->watchers_offset ||
537 e->watchers_offset > e->target_offset ||
538 e->target_offset >= e->next_offset) {
539 BUGPRINT("entry offsets not in right order\n");
540 return -EINVAL;
542 /* this is not checked anywhere else */
543 if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
544 BUGPRINT("target size too small\n");
545 return -EINVAL;
547 (*cnt)++;
548 (*totalcnt)++;
549 return 0;
552 struct ebt_cl_stack
554 struct ebt_chainstack cs;
555 int from;
556 unsigned int hookmask;
560 * we need these positions to check that the jumps to a different part of the
561 * entries is a jump to the beginning of a new chain.
563 static inline int
564 ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
565 unsigned int *n, struct ebt_cl_stack *udc)
567 int i;
569 /* we're only interested in chain starts */
570 if (e->bitmask)
571 return 0;
572 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
573 if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
574 break;
576 /* only care about udc */
577 if (i != NF_BR_NUMHOOKS)
578 return 0;
580 udc[*n].cs.chaininfo = (struct ebt_entries *)e;
581 /* these initialisations are depended on later in check_chainloops() */
582 udc[*n].cs.n = 0;
583 udc[*n].hookmask = 0;
585 (*n)++;
586 return 0;
589 static inline int
590 ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
592 struct xt_mtdtor_param par;
594 if (i && (*i)-- == 0)
595 return 1;
597 par.net = net;
598 par.match = m->u.match;
599 par.matchinfo = m->data;
600 par.family = NFPROTO_BRIDGE;
601 if (par.match->destroy != NULL)
602 par.match->destroy(&par);
603 module_put(par.match->me);
604 return 0;
607 static inline int
608 ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
610 struct xt_tgdtor_param par;
612 if (i && (*i)-- == 0)
613 return 1;
615 par.net = net;
616 par.target = w->u.watcher;
617 par.targinfo = w->data;
618 par.family = NFPROTO_BRIDGE;
619 if (par.target->destroy != NULL)
620 par.target->destroy(&par);
621 module_put(par.target->me);
622 return 0;
625 static inline int
626 ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
628 struct xt_tgdtor_param par;
629 struct ebt_entry_target *t;
631 if (e->bitmask == 0)
632 return 0;
633 /* we're done */
634 if (cnt && (*cnt)-- == 0)
635 return 1;
636 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
637 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
638 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
640 par.net = net;
641 par.target = t->u.target;
642 par.targinfo = t->data;
643 par.family = NFPROTO_BRIDGE;
644 if (par.target->destroy != NULL)
645 par.target->destroy(&par);
646 module_put(par.target->me);
647 return 0;
650 static inline int
651 ebt_check_entry(struct ebt_entry *e, struct net *net,
652 const struct ebt_table_info *newinfo,
653 const char *name, unsigned int *cnt,
654 struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
656 struct ebt_entry_target *t;
657 struct xt_target *target;
658 unsigned int i, j, hook = 0, hookmask = 0;
659 size_t gap;
660 int ret;
661 struct xt_mtchk_param mtpar;
662 struct xt_tgchk_param tgpar;
664 /* don't mess with the struct ebt_entries */
665 if (e->bitmask == 0)
666 return 0;
668 if (e->bitmask & ~EBT_F_MASK) {
669 BUGPRINT("Unknown flag for bitmask\n");
670 return -EINVAL;
672 if (e->invflags & ~EBT_INV_MASK) {
673 BUGPRINT("Unknown flag for inv bitmask\n");
674 return -EINVAL;
676 if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
677 BUGPRINT("NOPROTO & 802_3 not allowed\n");
678 return -EINVAL;
680 /* what hook do we belong to? */
681 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
682 if (!newinfo->hook_entry[i])
683 continue;
684 if ((char *)newinfo->hook_entry[i] < (char *)e)
685 hook = i;
686 else
687 break;
689 /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
690 a base chain */
691 if (i < NF_BR_NUMHOOKS)
692 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
693 else {
694 for (i = 0; i < udc_cnt; i++)
695 if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
696 break;
697 if (i == 0)
698 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
699 else
700 hookmask = cl_s[i - 1].hookmask;
702 i = 0;
704 mtpar.net = tgpar.net = net;
705 mtpar.table = tgpar.table = name;
706 mtpar.entryinfo = tgpar.entryinfo = e;
707 mtpar.hook_mask = tgpar.hook_mask = hookmask;
708 mtpar.family = tgpar.family = NFPROTO_BRIDGE;
709 ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
710 if (ret != 0)
711 goto cleanup_matches;
712 j = 0;
713 ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
714 if (ret != 0)
715 goto cleanup_watchers;
716 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
717 gap = e->next_offset - e->target_offset;
719 target = try_then_request_module(
720 xt_find_target(NFPROTO_BRIDGE, t->u.name, 0),
721 "ebt_%s", t->u.name);
722 if (IS_ERR(target)) {
723 ret = PTR_ERR(target);
724 goto cleanup_watchers;
725 } else if (target == NULL) {
726 ret = -ENOENT;
727 goto cleanup_watchers;
730 t->u.target = target;
731 if (t->u.target == &ebt_standard_target) {
732 if (gap < sizeof(struct ebt_standard_target)) {
733 BUGPRINT("Standard target size too big\n");
734 ret = -EFAULT;
735 goto cleanup_watchers;
737 if (((struct ebt_standard_target *)t)->verdict <
738 -NUM_STANDARD_TARGETS) {
739 BUGPRINT("Invalid standard target\n");
740 ret = -EFAULT;
741 goto cleanup_watchers;
743 } else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
744 module_put(t->u.target->me);
745 ret = -EFAULT;
746 goto cleanup_watchers;
749 tgpar.target = target;
750 tgpar.targinfo = t->data;
751 ret = xt_check_target(&tgpar, t->target_size,
752 e->ethproto, e->invflags & EBT_IPROTO);
753 if (ret < 0) {
754 module_put(target->me);
755 goto cleanup_watchers;
757 (*cnt)++;
758 return 0;
759 cleanup_watchers:
760 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
761 cleanup_matches:
762 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
763 return ret;
767 * checks for loops and sets the hook mask for udc
768 * the hook mask for udc tells us from which base chains the udc can be
769 * accessed. This mask is a parameter to the check() functions of the extensions
771 static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
772 unsigned int udc_cnt, unsigned int hooknr, char *base)
774 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
775 const struct ebt_entry *e = (struct ebt_entry *)chain->data;
776 const struct ebt_entry_target *t;
778 while (pos < nentries || chain_nr != -1) {
779 /* end of udc, go back one 'recursion' step */
780 if (pos == nentries) {
781 /* put back values of the time when this chain was called */
782 e = cl_s[chain_nr].cs.e;
783 if (cl_s[chain_nr].from != -1)
784 nentries =
785 cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
786 else
787 nentries = chain->nentries;
788 pos = cl_s[chain_nr].cs.n;
789 /* make sure we won't see a loop that isn't one */
790 cl_s[chain_nr].cs.n = 0;
791 chain_nr = cl_s[chain_nr].from;
792 if (pos == nentries)
793 continue;
795 t = (struct ebt_entry_target *)
796 (((char *)e) + e->target_offset);
797 if (strcmp(t->u.name, EBT_STANDARD_TARGET))
798 goto letscontinue;
799 if (e->target_offset + sizeof(struct ebt_standard_target) >
800 e->next_offset) {
801 BUGPRINT("Standard target size too big\n");
802 return -1;
804 verdict = ((struct ebt_standard_target *)t)->verdict;
805 if (verdict >= 0) { /* jump to another chain */
806 struct ebt_entries *hlp2 =
807 (struct ebt_entries *)(base + verdict);
808 for (i = 0; i < udc_cnt; i++)
809 if (hlp2 == cl_s[i].cs.chaininfo)
810 break;
811 /* bad destination or loop */
812 if (i == udc_cnt) {
813 BUGPRINT("bad destination\n");
814 return -1;
816 if (cl_s[i].cs.n) {
817 BUGPRINT("loop\n");
818 return -1;
820 if (cl_s[i].hookmask & (1 << hooknr))
821 goto letscontinue;
822 /* this can't be 0, so the loop test is correct */
823 cl_s[i].cs.n = pos + 1;
824 pos = 0;
825 cl_s[i].cs.e = ebt_next_entry(e);
826 e = (struct ebt_entry *)(hlp2->data);
827 nentries = hlp2->nentries;
828 cl_s[i].from = chain_nr;
829 chain_nr = i;
830 /* this udc is accessible from the base chain for hooknr */
831 cl_s[i].hookmask |= (1 << hooknr);
832 continue;
834 letscontinue:
835 e = ebt_next_entry(e);
836 pos++;
838 return 0;
841 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
842 static int translate_table(struct net *net, const char *name,
843 struct ebt_table_info *newinfo)
845 unsigned int i, j, k, udc_cnt;
846 int ret;
847 struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
849 i = 0;
850 while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
851 i++;
852 if (i == NF_BR_NUMHOOKS) {
853 BUGPRINT("No valid hooks specified\n");
854 return -EINVAL;
856 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
857 BUGPRINT("Chains don't start at beginning\n");
858 return -EINVAL;
860 /* make sure chains are ordered after each other in same order
861 as their corresponding hooks */
862 for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
863 if (!newinfo->hook_entry[j])
864 continue;
865 if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
866 BUGPRINT("Hook order must be followed\n");
867 return -EINVAL;
869 i = j;
872 /* do some early checkings and initialize some things */
873 i = 0; /* holds the expected nr. of entries for the chain */
874 j = 0; /* holds the up to now counted entries for the chain */
875 k = 0; /* holds the total nr. of entries, should equal
876 newinfo->nentries afterwards */
877 udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
878 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
879 ebt_check_entry_size_and_hooks, newinfo,
880 &i, &j, &k, &udc_cnt);
882 if (ret != 0)
883 return ret;
885 if (i != j) {
886 BUGPRINT("nentries does not equal the nr of entries in the "
887 "(last) chain\n");
888 return -EINVAL;
890 if (k != newinfo->nentries) {
891 BUGPRINT("Total nentries is wrong\n");
892 return -EINVAL;
895 /* get the location of the udc, put them in an array
896 while we're at it, allocate the chainstack */
897 if (udc_cnt) {
898 /* this will get free'd in do_replace()/ebt_register_table()
899 if an error occurs */
900 newinfo->chainstack =
901 vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
902 if (!newinfo->chainstack)
903 return -ENOMEM;
904 for_each_possible_cpu(i) {
905 newinfo->chainstack[i] =
906 vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
907 if (!newinfo->chainstack[i]) {
908 while (i)
909 vfree(newinfo->chainstack[--i]);
910 vfree(newinfo->chainstack);
911 newinfo->chainstack = NULL;
912 return -ENOMEM;
916 cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
917 if (!cl_s)
918 return -ENOMEM;
919 i = 0; /* the i'th udc */
920 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
921 ebt_get_udc_positions, newinfo, &i, cl_s);
922 /* sanity check */
923 if (i != udc_cnt) {
924 BUGPRINT("i != udc_cnt\n");
925 vfree(cl_s);
926 return -EFAULT;
930 /* Check for loops */
931 for (i = 0; i < NF_BR_NUMHOOKS; i++)
932 if (newinfo->hook_entry[i])
933 if (check_chainloops(newinfo->hook_entry[i],
934 cl_s, udc_cnt, i, newinfo->entries)) {
935 vfree(cl_s);
936 return -EINVAL;
939 /* we now know the following (along with E=mc²):
940 - the nr of entries in each chain is right
941 - the size of the allocated space is right
942 - all valid hooks have a corresponding chain
943 - there are no loops
944 - wrong data can still be on the level of a single entry
945 - could be there are jumps to places that are not the
946 beginning of a chain. This can only occur in chains that
947 are not accessible from any base chains, so we don't care. */
949 /* used to know what we need to clean up if something goes wrong */
950 i = 0;
951 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
952 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
953 if (ret != 0) {
954 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
955 ebt_cleanup_entry, net, &i);
957 vfree(cl_s);
958 return ret;
961 /* called under write_lock */
962 static void get_counters(const struct ebt_counter *oldcounters,
963 struct ebt_counter *counters, unsigned int nentries)
965 int i, cpu;
966 struct ebt_counter *counter_base;
968 /* counters of cpu 0 */
969 memcpy(counters, oldcounters,
970 sizeof(struct ebt_counter) * nentries);
972 /* add other counters to those of cpu 0 */
973 for_each_possible_cpu(cpu) {
974 if (cpu == 0)
975 continue;
976 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
977 for (i = 0; i < nentries; i++) {
978 counters[i].pcnt += counter_base[i].pcnt;
979 counters[i].bcnt += counter_base[i].bcnt;
984 static int do_replace_finish(struct net *net, struct ebt_replace *repl,
985 struct ebt_table_info *newinfo)
987 int ret, i;
988 struct ebt_counter *counterstmp = NULL;
989 /* used to be able to unlock earlier */
990 struct ebt_table_info *table;
991 struct ebt_table *t;
993 /* the user wants counters back
994 the check on the size is done later, when we have the lock */
995 if (repl->num_counters) {
996 unsigned long size = repl->num_counters * sizeof(*counterstmp);
997 counterstmp = vmalloc(size);
998 if (!counterstmp)
999 return -ENOMEM;
1002 newinfo->chainstack = NULL;
1003 ret = ebt_verify_pointers(repl, newinfo);
1004 if (ret != 0)
1005 goto free_counterstmp;
1007 ret = translate_table(net, repl->name, newinfo);
1009 if (ret != 0)
1010 goto free_counterstmp;
1012 t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1013 if (!t) {
1014 ret = -ENOENT;
1015 goto free_iterate;
1018 /* the table doesn't like it */
1019 if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1020 goto free_unlock;
1022 if (repl->num_counters && repl->num_counters != t->private->nentries) {
1023 BUGPRINT("Wrong nr. of counters requested\n");
1024 ret = -EINVAL;
1025 goto free_unlock;
1028 /* we have the mutex lock, so no danger in reading this pointer */
1029 table = t->private;
1030 /* make sure the table can only be rmmod'ed if it contains no rules */
1031 if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1032 ret = -ENOENT;
1033 goto free_unlock;
1034 } else if (table->nentries && !newinfo->nentries)
1035 module_put(t->me);
1036 /* we need an atomic snapshot of the counters */
1037 write_lock_bh(&t->lock);
1038 if (repl->num_counters)
1039 get_counters(t->private->counters, counterstmp,
1040 t->private->nentries);
1042 t->private = newinfo;
1043 write_unlock_bh(&t->lock);
1044 mutex_unlock(&ebt_mutex);
1045 /* so, a user can change the chains while having messed up her counter
1046 allocation. Only reason why this is done is because this way the lock
1047 is held only once, while this doesn't bring the kernel into a
1048 dangerous state. */
1049 if (repl->num_counters &&
1050 copy_to_user(repl->counters, counterstmp,
1051 repl->num_counters * sizeof(struct ebt_counter))) {
1052 ret = -EFAULT;
1054 else
1055 ret = 0;
1057 /* decrease module count and free resources */
1058 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1059 ebt_cleanup_entry, net, NULL);
1061 vfree(table->entries);
1062 if (table->chainstack) {
1063 for_each_possible_cpu(i)
1064 vfree(table->chainstack[i]);
1065 vfree(table->chainstack);
1067 vfree(table);
1069 vfree(counterstmp);
1070 return ret;
1072 free_unlock:
1073 mutex_unlock(&ebt_mutex);
1074 free_iterate:
1075 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1076 ebt_cleanup_entry, net, NULL);
1077 free_counterstmp:
1078 vfree(counterstmp);
1079 /* can be initialized in translate_table() */
1080 if (newinfo->chainstack) {
1081 for_each_possible_cpu(i)
1082 vfree(newinfo->chainstack[i]);
1083 vfree(newinfo->chainstack);
1085 return ret;
1088 /* replace the table */
1089 static int do_replace(struct net *net, const void __user *user,
1090 unsigned int len)
1092 int ret, countersize;
1093 struct ebt_table_info *newinfo;
1094 struct ebt_replace tmp;
1096 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1097 return -EFAULT;
1099 if (len != sizeof(tmp) + tmp.entries_size) {
1100 BUGPRINT("Wrong len argument\n");
1101 return -EINVAL;
1104 if (tmp.entries_size == 0) {
1105 BUGPRINT("Entries_size never zero\n");
1106 return -EINVAL;
1108 /* overflow check */
1109 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1110 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1111 return -ENOMEM;
1112 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1113 return -ENOMEM;
1115 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1116 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1117 if (!newinfo)
1118 return -ENOMEM;
1120 if (countersize)
1121 memset(newinfo->counters, 0, countersize);
1123 newinfo->entries = vmalloc(tmp.entries_size);
1124 if (!newinfo->entries) {
1125 ret = -ENOMEM;
1126 goto free_newinfo;
1128 if (copy_from_user(
1129 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1130 BUGPRINT("Couldn't copy entries from userspace\n");
1131 ret = -EFAULT;
1132 goto free_entries;
1135 ret = do_replace_finish(net, &tmp, newinfo);
1136 if (ret == 0)
1137 return ret;
1138 free_entries:
1139 vfree(newinfo->entries);
1140 free_newinfo:
1141 vfree(newinfo);
1142 return ret;
1145 struct ebt_table *
1146 ebt_register_table(struct net *net, const struct ebt_table *input_table)
1148 struct ebt_table_info *newinfo;
1149 struct ebt_table *t, *table;
1150 struct ebt_replace_kernel *repl;
1151 int ret, i, countersize;
1152 void *p;
1154 if (input_table == NULL || (repl = input_table->table) == NULL ||
1155 repl->entries == 0 || repl->entries_size == 0 ||
1156 repl->counters != NULL || input_table->private != NULL) {
1157 BUGPRINT("Bad table data for ebt_register_table!!!\n");
1158 return ERR_PTR(-EINVAL);
1161 /* Don't add one table to multiple lists. */
1162 table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1163 if (!table) {
1164 ret = -ENOMEM;
1165 goto out;
1168 countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1169 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1170 ret = -ENOMEM;
1171 if (!newinfo)
1172 goto free_table;
1174 p = vmalloc(repl->entries_size);
1175 if (!p)
1176 goto free_newinfo;
1178 memcpy(p, repl->entries, repl->entries_size);
1179 newinfo->entries = p;
1181 newinfo->entries_size = repl->entries_size;
1182 newinfo->nentries = repl->nentries;
1184 if (countersize)
1185 memset(newinfo->counters, 0, countersize);
1187 /* fill in newinfo and parse the entries */
1188 newinfo->chainstack = NULL;
1189 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1190 if ((repl->valid_hooks & (1 << i)) == 0)
1191 newinfo->hook_entry[i] = NULL;
1192 else
1193 newinfo->hook_entry[i] = p +
1194 ((char *)repl->hook_entry[i] - repl->entries);
1196 ret = translate_table(net, repl->name, newinfo);
1197 if (ret != 0) {
1198 BUGPRINT("Translate_table failed\n");
1199 goto free_chainstack;
1202 if (table->check && table->check(newinfo, table->valid_hooks)) {
1203 BUGPRINT("The table doesn't like its own initial data, lol\n");
1204 return ERR_PTR(-EINVAL);
1207 table->private = newinfo;
1208 rwlock_init(&table->lock);
1209 ret = mutex_lock_interruptible(&ebt_mutex);
1210 if (ret != 0)
1211 goto free_chainstack;
1213 list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1214 if (strcmp(t->name, table->name) == 0) {
1215 ret = -EEXIST;
1216 BUGPRINT("Table name already exists\n");
1217 goto free_unlock;
1221 /* Hold a reference count if the chains aren't empty */
1222 if (newinfo->nentries && !try_module_get(table->me)) {
1223 ret = -ENOENT;
1224 goto free_unlock;
1226 list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1227 mutex_unlock(&ebt_mutex);
1228 return table;
1229 free_unlock:
1230 mutex_unlock(&ebt_mutex);
1231 free_chainstack:
1232 if (newinfo->chainstack) {
1233 for_each_possible_cpu(i)
1234 vfree(newinfo->chainstack[i]);
1235 vfree(newinfo->chainstack);
1237 vfree(newinfo->entries);
1238 free_newinfo:
1239 vfree(newinfo);
1240 free_table:
1241 kfree(table);
1242 out:
1243 return ERR_PTR(ret);
1246 void ebt_unregister_table(struct net *net, struct ebt_table *table)
1248 int i;
1250 if (!table) {
1251 BUGPRINT("Request to unregister NULL table!!!\n");
1252 return;
1254 mutex_lock(&ebt_mutex);
1255 list_del(&table->list);
1256 mutex_unlock(&ebt_mutex);
1257 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1258 ebt_cleanup_entry, net, NULL);
1259 if (table->private->nentries)
1260 module_put(table->me);
1261 vfree(table->private->entries);
1262 if (table->private->chainstack) {
1263 for_each_possible_cpu(i)
1264 vfree(table->private->chainstack[i]);
1265 vfree(table->private->chainstack);
1267 vfree(table->private);
1268 kfree(table);
1271 /* userspace just supplied us with counters */
1272 static int do_update_counters(struct net *net, const char *name,
1273 struct ebt_counter __user *counters,
1274 unsigned int num_counters,
1275 const void __user *user, unsigned int len)
1277 int i, ret;
1278 struct ebt_counter *tmp;
1279 struct ebt_table *t;
1281 if (num_counters == 0)
1282 return -EINVAL;
1284 tmp = vmalloc(num_counters * sizeof(*tmp));
1285 if (!tmp)
1286 return -ENOMEM;
1288 t = find_table_lock(net, name, &ret, &ebt_mutex);
1289 if (!t)
1290 goto free_tmp;
1292 if (num_counters != t->private->nentries) {
1293 BUGPRINT("Wrong nr of counters\n");
1294 ret = -EINVAL;
1295 goto unlock_mutex;
1298 if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1299 ret = -EFAULT;
1300 goto unlock_mutex;
1303 /* we want an atomic add of the counters */
1304 write_lock_bh(&t->lock);
1306 /* we add to the counters of the first cpu */
1307 for (i = 0; i < num_counters; i++) {
1308 t->private->counters[i].pcnt += tmp[i].pcnt;
1309 t->private->counters[i].bcnt += tmp[i].bcnt;
1312 write_unlock_bh(&t->lock);
1313 ret = 0;
1314 unlock_mutex:
1315 mutex_unlock(&ebt_mutex);
1316 free_tmp:
1317 vfree(tmp);
1318 return ret;
1321 static int update_counters(struct net *net, const void __user *user,
1322 unsigned int len)
1324 struct ebt_replace hlp;
1326 if (copy_from_user(&hlp, user, sizeof(hlp)))
1327 return -EFAULT;
1329 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1330 return -EINVAL;
1332 return do_update_counters(net, hlp.name, hlp.counters,
1333 hlp.num_counters, user, len);
1336 static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1337 const char *base, char __user *ubase)
1339 char __user *hlp = ubase + ((char *)m - base);
1340 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
1341 return -EFAULT;
1342 return 0;
1345 static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1346 const char *base, char __user *ubase)
1348 char __user *hlp = ubase + ((char *)w - base);
1349 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
1350 return -EFAULT;
1351 return 0;
1354 static inline int
1355 ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1357 int ret;
1358 char __user *hlp;
1359 const struct ebt_entry_target *t;
1361 if (e->bitmask == 0)
1362 return 0;
1364 hlp = ubase + (((char *)e + e->target_offset) - base);
1365 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1367 ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1368 if (ret != 0)
1369 return ret;
1370 ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1371 if (ret != 0)
1372 return ret;
1373 if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
1374 return -EFAULT;
1375 return 0;
1378 static int copy_counters_to_user(struct ebt_table *t,
1379 const struct ebt_counter *oldcounters,
1380 void __user *user, unsigned int num_counters,
1381 unsigned int nentries)
1383 struct ebt_counter *counterstmp;
1384 int ret = 0;
1386 /* userspace might not need the counters */
1387 if (num_counters == 0)
1388 return 0;
1390 if (num_counters != nentries) {
1391 BUGPRINT("Num_counters wrong\n");
1392 return -EINVAL;
1395 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1396 if (!counterstmp)
1397 return -ENOMEM;
1399 write_lock_bh(&t->lock);
1400 get_counters(oldcounters, counterstmp, nentries);
1401 write_unlock_bh(&t->lock);
1403 if (copy_to_user(user, counterstmp,
1404 nentries * sizeof(struct ebt_counter)))
1405 ret = -EFAULT;
1406 vfree(counterstmp);
1407 return ret;
1410 /* called with ebt_mutex locked */
1411 static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1412 const int *len, int cmd)
1414 struct ebt_replace tmp;
1415 const struct ebt_counter *oldcounters;
1416 unsigned int entries_size, nentries;
1417 int ret;
1418 char *entries;
1420 if (cmd == EBT_SO_GET_ENTRIES) {
1421 entries_size = t->private->entries_size;
1422 nentries = t->private->nentries;
1423 entries = t->private->entries;
1424 oldcounters = t->private->counters;
1425 } else {
1426 entries_size = t->table->entries_size;
1427 nentries = t->table->nentries;
1428 entries = t->table->entries;
1429 oldcounters = t->table->counters;
1432 if (copy_from_user(&tmp, user, sizeof(tmp)))
1433 return -EFAULT;
1435 if (*len != sizeof(struct ebt_replace) + entries_size +
1436 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1437 return -EINVAL;
1439 if (tmp.nentries != nentries) {
1440 BUGPRINT("Nentries wrong\n");
1441 return -EINVAL;
1444 if (tmp.entries_size != entries_size) {
1445 BUGPRINT("Wrong size\n");
1446 return -EINVAL;
1449 ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1450 tmp.num_counters, nentries);
1451 if (ret)
1452 return ret;
1454 if (copy_to_user(tmp.entries, entries, entries_size)) {
1455 BUGPRINT("Couldn't copy entries to userspace\n");
1456 return -EFAULT;
1458 /* set the match/watcher/target names right */
1459 return EBT_ENTRY_ITERATE(entries, entries_size,
1460 ebt_make_names, entries, tmp.entries);
1463 static int do_ebt_set_ctl(struct sock *sk,
1464 int cmd, void __user *user, unsigned int len)
1466 int ret;
1468 if (!capable(CAP_NET_ADMIN))
1469 return -EPERM;
1471 switch(cmd) {
1472 case EBT_SO_SET_ENTRIES:
1473 ret = do_replace(sock_net(sk), user, len);
1474 break;
1475 case EBT_SO_SET_COUNTERS:
1476 ret = update_counters(sock_net(sk), user, len);
1477 break;
1478 default:
1479 ret = -EINVAL;
1481 return ret;
1484 static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1486 int ret;
1487 struct ebt_replace tmp;
1488 struct ebt_table *t;
1490 if (!capable(CAP_NET_ADMIN))
1491 return -EPERM;
1493 if (copy_from_user(&tmp, user, sizeof(tmp)))
1494 return -EFAULT;
1496 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
1497 if (!t)
1498 return ret;
1500 switch(cmd) {
1501 case EBT_SO_GET_INFO:
1502 case EBT_SO_GET_INIT_INFO:
1503 if (*len != sizeof(struct ebt_replace)){
1504 ret = -EINVAL;
1505 mutex_unlock(&ebt_mutex);
1506 break;
1508 if (cmd == EBT_SO_GET_INFO) {
1509 tmp.nentries = t->private->nentries;
1510 tmp.entries_size = t->private->entries_size;
1511 tmp.valid_hooks = t->valid_hooks;
1512 } else {
1513 tmp.nentries = t->table->nentries;
1514 tmp.entries_size = t->table->entries_size;
1515 tmp.valid_hooks = t->table->valid_hooks;
1517 mutex_unlock(&ebt_mutex);
1518 if (copy_to_user(user, &tmp, *len) != 0){
1519 BUGPRINT("c2u Didn't work\n");
1520 ret = -EFAULT;
1521 break;
1523 ret = 0;
1524 break;
1526 case EBT_SO_GET_ENTRIES:
1527 case EBT_SO_GET_INIT_ENTRIES:
1528 ret = copy_everything_to_user(t, user, len, cmd);
1529 mutex_unlock(&ebt_mutex);
1530 break;
1532 default:
1533 mutex_unlock(&ebt_mutex);
1534 ret = -EINVAL;
1537 return ret;
1540 #ifdef CONFIG_COMPAT
1541 /* 32 bit-userspace compatibility definitions. */
1542 struct compat_ebt_replace {
1543 char name[EBT_TABLE_MAXNAMELEN];
1544 compat_uint_t valid_hooks;
1545 compat_uint_t nentries;
1546 compat_uint_t entries_size;
1547 /* start of the chains */
1548 compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1549 /* nr of counters userspace expects back */
1550 compat_uint_t num_counters;
1551 /* where the kernel will put the old counters. */
1552 compat_uptr_t counters;
1553 compat_uptr_t entries;
1556 /* struct ebt_entry_match, _target and _watcher have same layout */
1557 struct compat_ebt_entry_mwt {
1558 union {
1559 char name[EBT_FUNCTION_MAXNAMELEN];
1560 compat_uptr_t ptr;
1561 } u;
1562 compat_uint_t match_size;
1563 compat_uint_t data[0];
1566 /* account for possible padding between match_size and ->data */
1567 static int ebt_compat_entry_padsize(void)
1569 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1570 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1571 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1572 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1575 static int ebt_compat_match_offset(const struct xt_match *match,
1576 unsigned int userlen)
1579 * ebt_among needs special handling. The kernel .matchsize is
1580 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1581 * value is expected.
1582 * Example: userspace sends 4500, ebt_among.c wants 4504.
1584 if (unlikely(match->matchsize == -1))
1585 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1586 return xt_compat_match_offset(match);
1589 static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1590 unsigned int *size)
1592 const struct xt_match *match = m->u.match;
1593 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1594 int off = ebt_compat_match_offset(match, m->match_size);
1595 compat_uint_t msize = m->match_size - off;
1597 BUG_ON(off >= m->match_size);
1599 if (copy_to_user(cm->u.name, match->name,
1600 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1601 return -EFAULT;
1603 if (match->compat_to_user) {
1604 if (match->compat_to_user(cm->data, m->data))
1605 return -EFAULT;
1606 } else if (copy_to_user(cm->data, m->data, msize))
1607 return -EFAULT;
1609 *size -= ebt_compat_entry_padsize() + off;
1610 *dstptr = cm->data;
1611 *dstptr += msize;
1612 return 0;
1615 static int compat_target_to_user(struct ebt_entry_target *t,
1616 void __user **dstptr,
1617 unsigned int *size)
1619 const struct xt_target *target = t->u.target;
1620 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1621 int off = xt_compat_target_offset(target);
1622 compat_uint_t tsize = t->target_size - off;
1624 BUG_ON(off >= t->target_size);
1626 if (copy_to_user(cm->u.name, target->name,
1627 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1628 return -EFAULT;
1630 if (target->compat_to_user) {
1631 if (target->compat_to_user(cm->data, t->data))
1632 return -EFAULT;
1633 } else if (copy_to_user(cm->data, t->data, tsize))
1634 return -EFAULT;
1636 *size -= ebt_compat_entry_padsize() + off;
1637 *dstptr = cm->data;
1638 *dstptr += tsize;
1639 return 0;
1642 static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1643 void __user **dstptr,
1644 unsigned int *size)
1646 return compat_target_to_user((struct ebt_entry_target *)w,
1647 dstptr, size);
1650 static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1651 unsigned int *size)
1653 struct ebt_entry_target *t;
1654 struct ebt_entry __user *ce;
1655 u32 watchers_offset, target_offset, next_offset;
1656 compat_uint_t origsize;
1657 int ret;
1659 if (e->bitmask == 0) {
1660 if (*size < sizeof(struct ebt_entries))
1661 return -EINVAL;
1662 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1663 return -EFAULT;
1665 *dstptr += sizeof(struct ebt_entries);
1666 *size -= sizeof(struct ebt_entries);
1667 return 0;
1670 if (*size < sizeof(*ce))
1671 return -EINVAL;
1673 ce = (struct ebt_entry __user *)*dstptr;
1674 if (copy_to_user(ce, e, sizeof(*ce)))
1675 return -EFAULT;
1677 origsize = *size;
1678 *dstptr += sizeof(*ce);
1680 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1681 if (ret)
1682 return ret;
1683 watchers_offset = e->watchers_offset - (origsize - *size);
1685 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1686 if (ret)
1687 return ret;
1688 target_offset = e->target_offset - (origsize - *size);
1690 t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1692 ret = compat_target_to_user(t, dstptr, size);
1693 if (ret)
1694 return ret;
1695 next_offset = e->next_offset - (origsize - *size);
1697 if (put_user(watchers_offset, &ce->watchers_offset) ||
1698 put_user(target_offset, &ce->target_offset) ||
1699 put_user(next_offset, &ce->next_offset))
1700 return -EFAULT;
1702 *size -= sizeof(*ce);
1703 return 0;
1706 static int compat_calc_match(struct ebt_entry_match *m, int *off)
1708 *off += ebt_compat_match_offset(m->u.match, m->match_size);
1709 *off += ebt_compat_entry_padsize();
1710 return 0;
1713 static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1715 *off += xt_compat_target_offset(w->u.watcher);
1716 *off += ebt_compat_entry_padsize();
1717 return 0;
1720 static int compat_calc_entry(const struct ebt_entry *e,
1721 const struct ebt_table_info *info,
1722 const void *base,
1723 struct compat_ebt_replace *newinfo)
1725 const struct ebt_entry_target *t;
1726 unsigned int entry_offset;
1727 int off, ret, i;
1729 if (e->bitmask == 0)
1730 return 0;
1732 off = 0;
1733 entry_offset = (void *)e - base;
1735 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1736 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1738 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1740 off += xt_compat_target_offset(t->u.target);
1741 off += ebt_compat_entry_padsize();
1743 newinfo->entries_size -= off;
1745 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1746 if (ret)
1747 return ret;
1749 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1750 const void *hookptr = info->hook_entry[i];
1751 if (info->hook_entry[i] &&
1752 (e < (struct ebt_entry *)(base - hookptr))) {
1753 newinfo->hook_entry[i] -= off;
1754 pr_debug("0x%08X -> 0x%08X\n",
1755 newinfo->hook_entry[i] + off,
1756 newinfo->hook_entry[i]);
1760 return 0;
1764 static int compat_table_info(const struct ebt_table_info *info,
1765 struct compat_ebt_replace *newinfo)
1767 unsigned int size = info->entries_size;
1768 const void *entries = info->entries;
1770 newinfo->entries_size = size;
1772 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1773 entries, newinfo);
1776 static int compat_copy_everything_to_user(struct ebt_table *t,
1777 void __user *user, int *len, int cmd)
1779 struct compat_ebt_replace repl, tmp;
1780 struct ebt_counter *oldcounters;
1781 struct ebt_table_info tinfo;
1782 int ret;
1783 void __user *pos;
1785 memset(&tinfo, 0, sizeof(tinfo));
1787 if (cmd == EBT_SO_GET_ENTRIES) {
1788 tinfo.entries_size = t->private->entries_size;
1789 tinfo.nentries = t->private->nentries;
1790 tinfo.entries = t->private->entries;
1791 oldcounters = t->private->counters;
1792 } else {
1793 tinfo.entries_size = t->table->entries_size;
1794 tinfo.nentries = t->table->nentries;
1795 tinfo.entries = t->table->entries;
1796 oldcounters = t->table->counters;
1799 if (copy_from_user(&tmp, user, sizeof(tmp)))
1800 return -EFAULT;
1802 if (tmp.nentries != tinfo.nentries ||
1803 (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1804 return -EINVAL;
1806 memcpy(&repl, &tmp, sizeof(repl));
1807 if (cmd == EBT_SO_GET_ENTRIES)
1808 ret = compat_table_info(t->private, &repl);
1809 else
1810 ret = compat_table_info(&tinfo, &repl);
1811 if (ret)
1812 return ret;
1814 if (*len != sizeof(tmp) + repl.entries_size +
1815 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1816 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1817 *len, tinfo.entries_size, repl.entries_size);
1818 return -EINVAL;
1821 /* userspace might not need the counters */
1822 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1823 tmp.num_counters, tinfo.nentries);
1824 if (ret)
1825 return ret;
1827 pos = compat_ptr(tmp.entries);
1828 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1829 compat_copy_entry_to_user, &pos, &tmp.entries_size);
1832 struct ebt_entries_buf_state {
1833 char *buf_kern_start; /* kernel buffer to copy (translated) data to */
1834 u32 buf_kern_len; /* total size of kernel buffer */
1835 u32 buf_kern_offset; /* amount of data copied so far */
1836 u32 buf_user_offset; /* read position in userspace buffer */
1839 static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1841 state->buf_kern_offset += sz;
1842 return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1845 static int ebt_buf_add(struct ebt_entries_buf_state *state,
1846 void *data, unsigned int sz)
1848 if (state->buf_kern_start == NULL)
1849 goto count_only;
1851 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1853 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1855 count_only:
1856 state->buf_user_offset += sz;
1857 return ebt_buf_count(state, sz);
1860 static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1862 char *b = state->buf_kern_start;
1864 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1866 if (b != NULL && sz > 0)
1867 memset(b + state->buf_kern_offset, 0, sz);
1868 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1869 return ebt_buf_count(state, sz);
1872 enum compat_mwt {
1873 EBT_COMPAT_MATCH,
1874 EBT_COMPAT_WATCHER,
1875 EBT_COMPAT_TARGET,
1878 static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1879 enum compat_mwt compat_mwt,
1880 struct ebt_entries_buf_state *state,
1881 const unsigned char *base)
1883 char name[EBT_FUNCTION_MAXNAMELEN];
1884 struct xt_match *match;
1885 struct xt_target *wt;
1886 void *dst = NULL;
1887 int off, pad = 0, ret = 0;
1888 unsigned int size_kern, entry_offset, match_size = mwt->match_size;
1890 strlcpy(name, mwt->u.name, sizeof(name));
1892 if (state->buf_kern_start)
1893 dst = state->buf_kern_start + state->buf_kern_offset;
1895 entry_offset = (unsigned char *) mwt - base;
1896 switch (compat_mwt) {
1897 case EBT_COMPAT_MATCH:
1898 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
1899 name, 0), "ebt_%s", name);
1900 if (match == NULL)
1901 return -ENOENT;
1902 if (IS_ERR(match))
1903 return PTR_ERR(match);
1905 off = ebt_compat_match_offset(match, match_size);
1906 if (dst) {
1907 if (match->compat_from_user)
1908 match->compat_from_user(dst, mwt->data);
1909 else
1910 memcpy(dst, mwt->data, match_size);
1913 size_kern = match->matchsize;
1914 if (unlikely(size_kern == -1))
1915 size_kern = match_size;
1916 module_put(match->me);
1917 break;
1918 case EBT_COMPAT_WATCHER: /* fallthrough */
1919 case EBT_COMPAT_TARGET:
1920 wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
1921 name, 0), "ebt_%s", name);
1922 if (wt == NULL)
1923 return -ENOENT;
1924 if (IS_ERR(wt))
1925 return PTR_ERR(wt);
1926 off = xt_compat_target_offset(wt);
1928 if (dst) {
1929 if (wt->compat_from_user)
1930 wt->compat_from_user(dst, mwt->data);
1931 else
1932 memcpy(dst, mwt->data, match_size);
1935 size_kern = wt->targetsize;
1936 module_put(wt->me);
1937 break;
1940 if (!dst) {
1941 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
1942 off + ebt_compat_entry_padsize());
1943 if (ret < 0)
1944 return ret;
1947 state->buf_kern_offset += match_size + off;
1948 state->buf_user_offset += match_size;
1949 pad = XT_ALIGN(size_kern) - size_kern;
1951 if (pad > 0 && dst) {
1952 BUG_ON(state->buf_kern_len <= pad);
1953 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1954 memset(dst + size_kern, 0, pad);
1956 return off + match_size;
1960 * return size of all matches, watchers or target, including necessary
1961 * alignment and padding.
1963 static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1964 unsigned int size_left, enum compat_mwt type,
1965 struct ebt_entries_buf_state *state, const void *base)
1967 int growth = 0;
1968 char *buf;
1970 if (size_left == 0)
1971 return 0;
1973 buf = (char *) match32;
1975 while (size_left >= sizeof(*match32)) {
1976 struct ebt_entry_match *match_kern;
1977 int ret;
1979 match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1980 if (match_kern) {
1981 char *tmp;
1982 tmp = state->buf_kern_start + state->buf_kern_offset;
1983 match_kern = (struct ebt_entry_match *) tmp;
1985 ret = ebt_buf_add(state, buf, sizeof(*match32));
1986 if (ret < 0)
1987 return ret;
1988 size_left -= sizeof(*match32);
1990 /* add padding before match->data (if any) */
1991 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1992 if (ret < 0)
1993 return ret;
1995 if (match32->match_size > size_left)
1996 return -EINVAL;
1998 size_left -= match32->match_size;
2000 ret = compat_mtw_from_user(match32, type, state, base);
2001 if (ret < 0)
2002 return ret;
2004 BUG_ON(ret < match32->match_size);
2005 growth += ret - match32->match_size;
2006 growth += ebt_compat_entry_padsize();
2008 buf += sizeof(*match32);
2009 buf += match32->match_size;
2011 if (match_kern)
2012 match_kern->match_size = ret;
2014 WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2015 match32 = (struct compat_ebt_entry_mwt *) buf;
2018 return growth;
2021 #define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2022 ({ \
2023 unsigned int __i; \
2024 int __ret = 0; \
2025 struct compat_ebt_entry_mwt *__watcher; \
2027 for (__i = e->watchers_offset; \
2028 __i < (e)->target_offset; \
2029 __i += __watcher->watcher_size + \
2030 sizeof(struct compat_ebt_entry_mwt)) { \
2031 __watcher = (void *)(e) + __i; \
2032 __ret = fn(__watcher , ## args); \
2033 if (__ret != 0) \
2034 break; \
2036 if (__ret == 0) { \
2037 if (__i != (e)->target_offset) \
2038 __ret = -EINVAL; \
2040 __ret; \
2043 #define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2044 ({ \
2045 unsigned int __i; \
2046 int __ret = 0; \
2047 struct compat_ebt_entry_mwt *__match; \
2049 for (__i = sizeof(struct ebt_entry); \
2050 __i < (e)->watchers_offset; \
2051 __i += __match->match_size + \
2052 sizeof(struct compat_ebt_entry_mwt)) { \
2053 __match = (void *)(e) + __i; \
2054 __ret = fn(__match , ## args); \
2055 if (__ret != 0) \
2056 break; \
2058 if (__ret == 0) { \
2059 if (__i != (e)->watchers_offset) \
2060 __ret = -EINVAL; \
2062 __ret; \
2065 /* called for all ebt_entry structures. */
2066 static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2067 unsigned int *total,
2068 struct ebt_entries_buf_state *state)
2070 unsigned int i, j, startoff, new_offset = 0;
2071 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2072 unsigned int offsets[4];
2073 unsigned int *offsets_update = NULL;
2074 int ret;
2075 char *buf_start;
2077 if (*total < sizeof(struct ebt_entries))
2078 return -EINVAL;
2080 if (!entry->bitmask) {
2081 *total -= sizeof(struct ebt_entries);
2082 return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2084 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2085 return -EINVAL;
2087 startoff = state->buf_user_offset;
2088 /* pull in most part of ebt_entry, it does not need to be changed. */
2089 ret = ebt_buf_add(state, entry,
2090 offsetof(struct ebt_entry, watchers_offset));
2091 if (ret < 0)
2092 return ret;
2094 offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2095 memcpy(&offsets[1], &entry->watchers_offset,
2096 sizeof(offsets) - sizeof(offsets[0]));
2098 if (state->buf_kern_start) {
2099 buf_start = state->buf_kern_start + state->buf_kern_offset;
2100 offsets_update = (unsigned int *) buf_start;
2102 ret = ebt_buf_add(state, &offsets[1],
2103 sizeof(offsets) - sizeof(offsets[0]));
2104 if (ret < 0)
2105 return ret;
2106 buf_start = (char *) entry;
2108 * 0: matches offset, always follows ebt_entry.
2109 * 1: watchers offset, from ebt_entry structure
2110 * 2: target offset, from ebt_entry structure
2111 * 3: next ebt_entry offset, from ebt_entry structure
2113 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2115 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2116 struct compat_ebt_entry_mwt *match32;
2117 unsigned int size;
2118 char *buf = buf_start;
2120 buf = buf_start + offsets[i];
2121 if (offsets[i] > offsets[j])
2122 return -EINVAL;
2124 match32 = (struct compat_ebt_entry_mwt *) buf;
2125 size = offsets[j] - offsets[i];
2126 ret = ebt_size_mwt(match32, size, i, state, base);
2127 if (ret < 0)
2128 return ret;
2129 new_offset += ret;
2130 if (offsets_update && new_offset) {
2131 pr_debug("ebtables: change offset %d to %d\n",
2132 offsets_update[i], offsets[j] + new_offset);
2133 offsets_update[i] = offsets[j] + new_offset;
2137 startoff = state->buf_user_offset - startoff;
2139 BUG_ON(*total < startoff);
2140 *total -= startoff;
2141 return 0;
2145 * repl->entries_size is the size of the ebt_entry blob in userspace.
2146 * It might need more memory when copied to a 64 bit kernel in case
2147 * userspace is 32-bit. So, first task: find out how much memory is needed.
2149 * Called before validation is performed.
2151 static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2152 struct ebt_entries_buf_state *state)
2154 unsigned int size_remaining = size_user;
2155 int ret;
2157 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2158 &size_remaining, state);
2159 if (ret < 0)
2160 return ret;
2162 WARN_ON(size_remaining);
2163 return state->buf_kern_offset;
2167 static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2168 void __user *user, unsigned int len)
2170 struct compat_ebt_replace tmp;
2171 int i;
2173 if (len < sizeof(tmp))
2174 return -EINVAL;
2176 if (copy_from_user(&tmp, user, sizeof(tmp)))
2177 return -EFAULT;
2179 if (len != sizeof(tmp) + tmp.entries_size)
2180 return -EINVAL;
2182 if (tmp.entries_size == 0)
2183 return -EINVAL;
2185 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2186 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2187 return -ENOMEM;
2188 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2189 return -ENOMEM;
2191 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2193 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2194 for (i = 0; i < NF_BR_NUMHOOKS; i++)
2195 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2197 repl->num_counters = tmp.num_counters;
2198 repl->counters = compat_ptr(tmp.counters);
2199 repl->entries = compat_ptr(tmp.entries);
2200 return 0;
2203 static int compat_do_replace(struct net *net, void __user *user,
2204 unsigned int len)
2206 int ret, i, countersize, size64;
2207 struct ebt_table_info *newinfo;
2208 struct ebt_replace tmp;
2209 struct ebt_entries_buf_state state;
2210 void *entries_tmp;
2212 ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2213 if (ret) {
2214 /* try real handler in case userland supplied needed padding */
2215 if (ret == -EINVAL && do_replace(net, user, len) == 0)
2216 ret = 0;
2217 return ret;
2220 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2221 newinfo = vmalloc(sizeof(*newinfo) + countersize);
2222 if (!newinfo)
2223 return -ENOMEM;
2225 if (countersize)
2226 memset(newinfo->counters, 0, countersize);
2228 memset(&state, 0, sizeof(state));
2230 newinfo->entries = vmalloc(tmp.entries_size);
2231 if (!newinfo->entries) {
2232 ret = -ENOMEM;
2233 goto free_newinfo;
2235 if (copy_from_user(
2236 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2237 ret = -EFAULT;
2238 goto free_entries;
2241 entries_tmp = newinfo->entries;
2243 xt_compat_lock(NFPROTO_BRIDGE);
2245 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2246 if (ret < 0)
2247 goto out_unlock;
2249 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2250 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2251 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2253 size64 = ret;
2254 newinfo->entries = vmalloc(size64);
2255 if (!newinfo->entries) {
2256 vfree(entries_tmp);
2257 ret = -ENOMEM;
2258 goto out_unlock;
2261 memset(&state, 0, sizeof(state));
2262 state.buf_kern_start = newinfo->entries;
2263 state.buf_kern_len = size64;
2265 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2266 BUG_ON(ret < 0); /* parses same data again */
2268 vfree(entries_tmp);
2269 tmp.entries_size = size64;
2271 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2272 char __user *usrptr;
2273 if (tmp.hook_entry[i]) {
2274 unsigned int delta;
2275 usrptr = (char __user *) tmp.hook_entry[i];
2276 delta = usrptr - tmp.entries;
2277 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2278 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2282 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2283 xt_compat_unlock(NFPROTO_BRIDGE);
2285 ret = do_replace_finish(net, &tmp, newinfo);
2286 if (ret == 0)
2287 return ret;
2288 free_entries:
2289 vfree(newinfo->entries);
2290 free_newinfo:
2291 vfree(newinfo);
2292 return ret;
2293 out_unlock:
2294 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2295 xt_compat_unlock(NFPROTO_BRIDGE);
2296 goto free_entries;
2299 static int compat_update_counters(struct net *net, void __user *user,
2300 unsigned int len)
2302 struct compat_ebt_replace hlp;
2304 if (copy_from_user(&hlp, user, sizeof(hlp)))
2305 return -EFAULT;
2307 /* try real handler in case userland supplied needed padding */
2308 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2309 return update_counters(net, user, len);
2311 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2312 hlp.num_counters, user, len);
2315 static int compat_do_ebt_set_ctl(struct sock *sk,
2316 int cmd, void __user *user, unsigned int len)
2318 int ret;
2320 if (!capable(CAP_NET_ADMIN))
2321 return -EPERM;
2323 switch (cmd) {
2324 case EBT_SO_SET_ENTRIES:
2325 ret = compat_do_replace(sock_net(sk), user, len);
2326 break;
2327 case EBT_SO_SET_COUNTERS:
2328 ret = compat_update_counters(sock_net(sk), user, len);
2329 break;
2330 default:
2331 ret = -EINVAL;
2333 return ret;
2336 static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2337 void __user *user, int *len)
2339 int ret;
2340 struct compat_ebt_replace tmp;
2341 struct ebt_table *t;
2343 if (!capable(CAP_NET_ADMIN))
2344 return -EPERM;
2346 /* try real handler in case userland supplied needed padding */
2347 if ((cmd == EBT_SO_GET_INFO ||
2348 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2349 return do_ebt_get_ctl(sk, cmd, user, len);
2351 if (copy_from_user(&tmp, user, sizeof(tmp)))
2352 return -EFAULT;
2354 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
2355 if (!t)
2356 return ret;
2358 xt_compat_lock(NFPROTO_BRIDGE);
2359 switch (cmd) {
2360 case EBT_SO_GET_INFO:
2361 tmp.nentries = t->private->nentries;
2362 ret = compat_table_info(t->private, &tmp);
2363 if (ret)
2364 goto out;
2365 tmp.valid_hooks = t->valid_hooks;
2367 if (copy_to_user(user, &tmp, *len) != 0) {
2368 ret = -EFAULT;
2369 break;
2371 ret = 0;
2372 break;
2373 case EBT_SO_GET_INIT_INFO:
2374 tmp.nentries = t->table->nentries;
2375 tmp.entries_size = t->table->entries_size;
2376 tmp.valid_hooks = t->table->valid_hooks;
2378 if (copy_to_user(user, &tmp, *len) != 0) {
2379 ret = -EFAULT;
2380 break;
2382 ret = 0;
2383 break;
2384 case EBT_SO_GET_ENTRIES:
2385 case EBT_SO_GET_INIT_ENTRIES:
2387 * try real handler first in case of userland-side padding.
2388 * in case we are dealing with an 'ordinary' 32 bit binary
2389 * without 64bit compatibility padding, this will fail right
2390 * after copy_from_user when the *len argument is validated.
2392 * the compat_ variant needs to do one pass over the kernel
2393 * data set to adjust for size differences before it the check.
2395 if (copy_everything_to_user(t, user, len, cmd) == 0)
2396 ret = 0;
2397 else
2398 ret = compat_copy_everything_to_user(t, user, len, cmd);
2399 break;
2400 default:
2401 ret = -EINVAL;
2403 out:
2404 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2405 xt_compat_unlock(NFPROTO_BRIDGE);
2406 mutex_unlock(&ebt_mutex);
2407 return ret;
2409 #endif
2411 static struct nf_sockopt_ops ebt_sockopts =
2413 .pf = PF_INET,
2414 .set_optmin = EBT_BASE_CTL,
2415 .set_optmax = EBT_SO_SET_MAX + 1,
2416 .set = do_ebt_set_ctl,
2417 #ifdef CONFIG_COMPAT
2418 .compat_set = compat_do_ebt_set_ctl,
2419 #endif
2420 .get_optmin = EBT_BASE_CTL,
2421 .get_optmax = EBT_SO_GET_MAX + 1,
2422 .get = do_ebt_get_ctl,
2423 #ifdef CONFIG_COMPAT
2424 .compat_get = compat_do_ebt_get_ctl,
2425 #endif
2426 .owner = THIS_MODULE,
2429 static int __init ebtables_init(void)
2431 int ret;
2433 ret = xt_register_target(&ebt_standard_target);
2434 if (ret < 0)
2435 return ret;
2436 ret = nf_register_sockopt(&ebt_sockopts);
2437 if (ret < 0) {
2438 xt_unregister_target(&ebt_standard_target);
2439 return ret;
2442 printk(KERN_INFO "Ebtables v2.0 registered\n");
2443 return 0;
2446 static void __exit ebtables_fini(void)
2448 nf_unregister_sockopt(&ebt_sockopts);
2449 xt_unregister_target(&ebt_standard_target);
2450 printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2453 EXPORT_SYMBOL(ebt_register_table);
2454 EXPORT_SYMBOL(ebt_unregister_table);
2455 EXPORT_SYMBOL(ebt_do_table);
2456 module_init(ebtables_init);
2457 module_exit(ebtables_fini);
2458 MODULE_LICENSE("GPL");