AppArmor: Enable configuring and building of the AppArmor security module
[linux-2.6/libata-dev.git] / net / bridge / netfilter / ebtables.c
blob59ca00e40dec2401b483bad4a7799775766e9f0d
1 /*
2 * ebtables
4 * Author:
5 * Bart De Schuymer <bdschuym@pandora.be>
7 * ebtables.c,v 2.0, July, 2002
9 * This code is stongly inspired on the iptables code which is
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kmod.h>
19 #include <linux/module.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netfilter/x_tables.h>
22 #include <linux/netfilter_bridge/ebtables.h>
23 #include <linux/spinlock.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <asm/uaccess.h>
27 #include <linux/smp.h>
28 #include <linux/cpumask.h>
29 #include <net/sock.h>
30 /* needed for logical [in,out]-dev filtering */
31 #include "../br_private.h"
33 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
34 "report to author: "format, ## args)
35 /* #define BUGPRINT(format, args...) */
38 * Each cpu has its own set of counters, so there is no need for write_lock in
39 * the softirq
40 * For reading or updating the counters, the user context needs to
41 * get a write_lock
44 /* The size of each set of counters is altered to get cache alignment */
45 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
46 #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
47 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
48 COUNTER_OFFSET(n) * cpu))
52 static DEFINE_MUTEX(ebt_mutex);
54 #ifdef CONFIG_COMPAT
55 static void ebt_standard_compat_from_user(void *dst, const void *src)
57 int v = *(compat_int_t *)src;
59 if (v >= 0)
60 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
61 memcpy(dst, &v, sizeof(v));
64 static int ebt_standard_compat_to_user(void __user *dst, const void *src)
66 compat_int_t cv = *(int *)src;
68 if (cv >= 0)
69 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
70 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
72 #endif
75 static struct xt_target ebt_standard_target = {
76 .name = "standard",
77 .revision = 0,
78 .family = NFPROTO_BRIDGE,
79 .targetsize = sizeof(int),
80 #ifdef CONFIG_COMPAT
81 .compatsize = sizeof(compat_int_t),
82 .compat_from_user = ebt_standard_compat_from_user,
83 .compat_to_user = ebt_standard_compat_to_user,
84 #endif
87 static inline int
88 ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
89 struct xt_action_param *par)
91 par->target = w->u.watcher;
92 par->targinfo = w->data;
93 w->u.watcher->target(skb, par);
94 /* watchers don't give a verdict */
95 return 0;
98 static inline int
99 ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
100 struct xt_action_param *par)
102 par->match = m->u.match;
103 par->matchinfo = m->data;
104 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
107 static inline int
108 ebt_dev_check(const char *entry, const struct net_device *device)
110 int i = 0;
111 const char *devname;
113 if (*entry == '\0')
114 return 0;
115 if (!device)
116 return 1;
117 devname = device->name;
118 /* 1 is the wildcard token */
119 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
120 i++;
121 return (devname[i] != entry[i] && entry[i] != 1);
124 #define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
125 /* process standard matches */
126 static inline int
127 ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
128 const struct net_device *in, const struct net_device *out)
130 int verdict, i;
132 if (e->bitmask & EBT_802_3) {
133 if (FWINV2(ntohs(h->h_proto) >= 1536, EBT_IPROTO))
134 return 1;
135 } else if (!(e->bitmask & EBT_NOPROTO) &&
136 FWINV2(e->ethproto != h->h_proto, EBT_IPROTO))
137 return 1;
139 if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
140 return 1;
141 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
142 return 1;
143 if ((!in || !in->br_port) ? 0 : FWINV2(ebt_dev_check(
144 e->logical_in, in->br_port->br->dev), EBT_ILOGICALIN))
145 return 1;
146 if ((!out || !out->br_port) ? 0 : FWINV2(ebt_dev_check(
147 e->logical_out, out->br_port->br->dev), EBT_ILOGICALOUT))
148 return 1;
150 if (e->bitmask & EBT_SOURCEMAC) {
151 verdict = 0;
152 for (i = 0; i < 6; i++)
153 verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
154 e->sourcemsk[i];
155 if (FWINV2(verdict != 0, EBT_ISOURCE) )
156 return 1;
158 if (e->bitmask & EBT_DESTMAC) {
159 verdict = 0;
160 for (i = 0; i < 6; i++)
161 verdict |= (h->h_dest[i] ^ e->destmac[i]) &
162 e->destmsk[i];
163 if (FWINV2(verdict != 0, EBT_IDEST) )
164 return 1;
166 return 0;
169 static inline __pure
170 struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
172 return (void *)entry + entry->next_offset;
175 /* Do some firewalling */
176 unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
177 const struct net_device *in, const struct net_device *out,
178 struct ebt_table *table)
180 int i, nentries;
181 struct ebt_entry *point;
182 struct ebt_counter *counter_base, *cb_base;
183 const struct ebt_entry_target *t;
184 int verdict, sp = 0;
185 struct ebt_chainstack *cs;
186 struct ebt_entries *chaininfo;
187 const char *base;
188 const struct ebt_table_info *private;
189 struct xt_action_param acpar;
191 acpar.family = NFPROTO_BRIDGE;
192 acpar.in = in;
193 acpar.out = out;
194 acpar.hotdrop = false;
195 acpar.hooknum = hook;
197 read_lock_bh(&table->lock);
198 private = table->private;
199 cb_base = COUNTER_BASE(private->counters, private->nentries,
200 smp_processor_id());
201 if (private->chainstack)
202 cs = private->chainstack[smp_processor_id()];
203 else
204 cs = NULL;
205 chaininfo = private->hook_entry[hook];
206 nentries = private->hook_entry[hook]->nentries;
207 point = (struct ebt_entry *)(private->hook_entry[hook]->data);
208 counter_base = cb_base + private->hook_entry[hook]->counter_offset;
209 /* base for chain jumps */
210 base = private->entries;
211 i = 0;
212 while (i < nentries) {
213 if (ebt_basic_match(point, eth_hdr(skb), in, out))
214 goto letscontinue;
216 if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
217 goto letscontinue;
218 if (acpar.hotdrop) {
219 read_unlock_bh(&table->lock);
220 return NF_DROP;
223 /* increase counter */
224 (*(counter_base + i)).pcnt++;
225 (*(counter_base + i)).bcnt += skb->len;
227 /* these should only watch: not modify, nor tell us
228 what to do with the packet */
229 EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
231 t = (struct ebt_entry_target *)
232 (((char *)point) + point->target_offset);
233 /* standard target */
234 if (!t->u.target->target)
235 verdict = ((struct ebt_standard_target *)t)->verdict;
236 else {
237 acpar.target = t->u.target;
238 acpar.targinfo = t->data;
239 verdict = t->u.target->target(skb, &acpar);
241 if (verdict == EBT_ACCEPT) {
242 read_unlock_bh(&table->lock);
243 return NF_ACCEPT;
245 if (verdict == EBT_DROP) {
246 read_unlock_bh(&table->lock);
247 return NF_DROP;
249 if (verdict == EBT_RETURN) {
250 letsreturn:
251 #ifdef CONFIG_NETFILTER_DEBUG
252 if (sp == 0) {
253 BUGPRINT("RETURN on base chain");
254 /* act like this is EBT_CONTINUE */
255 goto letscontinue;
257 #endif
258 sp--;
259 /* put all the local variables right */
260 i = cs[sp].n;
261 chaininfo = cs[sp].chaininfo;
262 nentries = chaininfo->nentries;
263 point = cs[sp].e;
264 counter_base = cb_base +
265 chaininfo->counter_offset;
266 continue;
268 if (verdict == EBT_CONTINUE)
269 goto letscontinue;
270 #ifdef CONFIG_NETFILTER_DEBUG
271 if (verdict < 0) {
272 BUGPRINT("bogus standard verdict\n");
273 read_unlock_bh(&table->lock);
274 return NF_DROP;
276 #endif
277 /* jump to a udc */
278 cs[sp].n = i + 1;
279 cs[sp].chaininfo = chaininfo;
280 cs[sp].e = ebt_next_entry(point);
281 i = 0;
282 chaininfo = (struct ebt_entries *) (base + verdict);
283 #ifdef CONFIG_NETFILTER_DEBUG
284 if (chaininfo->distinguisher) {
285 BUGPRINT("jump to non-chain\n");
286 read_unlock_bh(&table->lock);
287 return NF_DROP;
289 #endif
290 nentries = chaininfo->nentries;
291 point = (struct ebt_entry *)chaininfo->data;
292 counter_base = cb_base + chaininfo->counter_offset;
293 sp++;
294 continue;
295 letscontinue:
296 point = ebt_next_entry(point);
297 i++;
300 /* I actually like this :) */
301 if (chaininfo->policy == EBT_RETURN)
302 goto letsreturn;
303 if (chaininfo->policy == EBT_ACCEPT) {
304 read_unlock_bh(&table->lock);
305 return NF_ACCEPT;
307 read_unlock_bh(&table->lock);
308 return NF_DROP;
311 /* If it succeeds, returns element and locks mutex */
312 static inline void *
313 find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
314 struct mutex *mutex)
316 struct {
317 struct list_head list;
318 char name[EBT_FUNCTION_MAXNAMELEN];
319 } *e;
321 *error = mutex_lock_interruptible(mutex);
322 if (*error != 0)
323 return NULL;
325 list_for_each_entry(e, head, list) {
326 if (strcmp(e->name, name) == 0)
327 return e;
329 *error = -ENOENT;
330 mutex_unlock(mutex);
331 return NULL;
334 static void *
335 find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
336 int *error, struct mutex *mutex)
338 return try_then_request_module(
339 find_inlist_lock_noload(head, name, error, mutex),
340 "%s%s", prefix, name);
343 static inline struct ebt_table *
344 find_table_lock(struct net *net, const char *name, int *error,
345 struct mutex *mutex)
347 return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
348 "ebtable_", error, mutex);
351 static inline int
352 ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
353 unsigned int *cnt)
355 const struct ebt_entry *e = par->entryinfo;
356 struct xt_match *match;
357 size_t left = ((char *)e + e->watchers_offset) - (char *)m;
358 int ret;
360 if (left < sizeof(struct ebt_entry_match) ||
361 left - sizeof(struct ebt_entry_match) < m->match_size)
362 return -EINVAL;
364 match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
365 if (IS_ERR(match))
366 return PTR_ERR(match);
367 m->u.match = match;
369 par->match = match;
370 par->matchinfo = m->data;
371 ret = xt_check_match(par, m->match_size,
372 e->ethproto, e->invflags & EBT_IPROTO);
373 if (ret < 0) {
374 module_put(match->me);
375 return ret;
378 (*cnt)++;
379 return 0;
382 static inline int
383 ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
384 unsigned int *cnt)
386 const struct ebt_entry *e = par->entryinfo;
387 struct xt_target *watcher;
388 size_t left = ((char *)e + e->target_offset) - (char *)w;
389 int ret;
391 if (left < sizeof(struct ebt_entry_watcher) ||
392 left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
393 return -EINVAL;
395 watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
396 if (IS_ERR(watcher))
397 return PTR_ERR(watcher);
398 w->u.watcher = watcher;
400 par->target = watcher;
401 par->targinfo = w->data;
402 ret = xt_check_target(par, w->watcher_size,
403 e->ethproto, e->invflags & EBT_IPROTO);
404 if (ret < 0) {
405 module_put(watcher->me);
406 return ret;
409 (*cnt)++;
410 return 0;
413 static int ebt_verify_pointers(const struct ebt_replace *repl,
414 struct ebt_table_info *newinfo)
416 unsigned int limit = repl->entries_size;
417 unsigned int valid_hooks = repl->valid_hooks;
418 unsigned int offset = 0;
419 int i;
421 for (i = 0; i < NF_BR_NUMHOOKS; i++)
422 newinfo->hook_entry[i] = NULL;
424 newinfo->entries_size = repl->entries_size;
425 newinfo->nentries = repl->nentries;
427 while (offset < limit) {
428 size_t left = limit - offset;
429 struct ebt_entry *e = (void *)newinfo->entries + offset;
431 if (left < sizeof(unsigned int))
432 break;
434 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
435 if ((valid_hooks & (1 << i)) == 0)
436 continue;
437 if ((char __user *)repl->hook_entry[i] ==
438 repl->entries + offset)
439 break;
442 if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
443 if (e->bitmask != 0) {
444 /* we make userspace set this right,
445 so there is no misunderstanding */
446 BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
447 "in distinguisher\n");
448 return -EINVAL;
450 if (i != NF_BR_NUMHOOKS)
451 newinfo->hook_entry[i] = (struct ebt_entries *)e;
452 if (left < sizeof(struct ebt_entries))
453 break;
454 offset += sizeof(struct ebt_entries);
455 } else {
456 if (left < sizeof(struct ebt_entry))
457 break;
458 if (left < e->next_offset)
459 break;
460 if (e->next_offset < sizeof(struct ebt_entry))
461 return -EINVAL;
462 offset += e->next_offset;
465 if (offset != limit) {
466 BUGPRINT("entries_size too small\n");
467 return -EINVAL;
470 /* check if all valid hooks have a chain */
471 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
472 if (!newinfo->hook_entry[i] &&
473 (valid_hooks & (1 << i))) {
474 BUGPRINT("Valid hook without chain\n");
475 return -EINVAL;
478 return 0;
482 * this one is very careful, as it is the first function
483 * to parse the userspace data
485 static inline int
486 ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
487 const struct ebt_table_info *newinfo,
488 unsigned int *n, unsigned int *cnt,
489 unsigned int *totalcnt, unsigned int *udc_cnt)
491 int i;
493 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
494 if ((void *)e == (void *)newinfo->hook_entry[i])
495 break;
497 /* beginning of a new chain
498 if i == NF_BR_NUMHOOKS it must be a user defined chain */
499 if (i != NF_BR_NUMHOOKS || !e->bitmask) {
500 /* this checks if the previous chain has as many entries
501 as it said it has */
502 if (*n != *cnt) {
503 BUGPRINT("nentries does not equal the nr of entries "
504 "in the chain\n");
505 return -EINVAL;
507 if (((struct ebt_entries *)e)->policy != EBT_DROP &&
508 ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
509 /* only RETURN from udc */
510 if (i != NF_BR_NUMHOOKS ||
511 ((struct ebt_entries *)e)->policy != EBT_RETURN) {
512 BUGPRINT("bad policy\n");
513 return -EINVAL;
516 if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
517 (*udc_cnt)++;
518 if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
519 BUGPRINT("counter_offset != totalcnt");
520 return -EINVAL;
522 *n = ((struct ebt_entries *)e)->nentries;
523 *cnt = 0;
524 return 0;
526 /* a plain old entry, heh */
527 if (sizeof(struct ebt_entry) > e->watchers_offset ||
528 e->watchers_offset > e->target_offset ||
529 e->target_offset >= e->next_offset) {
530 BUGPRINT("entry offsets not in right order\n");
531 return -EINVAL;
533 /* this is not checked anywhere else */
534 if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
535 BUGPRINT("target size too small\n");
536 return -EINVAL;
538 (*cnt)++;
539 (*totalcnt)++;
540 return 0;
543 struct ebt_cl_stack
545 struct ebt_chainstack cs;
546 int from;
547 unsigned int hookmask;
551 * we need these positions to check that the jumps to a different part of the
552 * entries is a jump to the beginning of a new chain.
554 static inline int
555 ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
556 unsigned int *n, struct ebt_cl_stack *udc)
558 int i;
560 /* we're only interested in chain starts */
561 if (e->bitmask)
562 return 0;
563 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
564 if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
565 break;
567 /* only care about udc */
568 if (i != NF_BR_NUMHOOKS)
569 return 0;
571 udc[*n].cs.chaininfo = (struct ebt_entries *)e;
572 /* these initialisations are depended on later in check_chainloops() */
573 udc[*n].cs.n = 0;
574 udc[*n].hookmask = 0;
576 (*n)++;
577 return 0;
580 static inline int
581 ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
583 struct xt_mtdtor_param par;
585 if (i && (*i)-- == 0)
586 return 1;
588 par.net = net;
589 par.match = m->u.match;
590 par.matchinfo = m->data;
591 par.family = NFPROTO_BRIDGE;
592 if (par.match->destroy != NULL)
593 par.match->destroy(&par);
594 module_put(par.match->me);
595 return 0;
598 static inline int
599 ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
601 struct xt_tgdtor_param par;
603 if (i && (*i)-- == 0)
604 return 1;
606 par.net = net;
607 par.target = w->u.watcher;
608 par.targinfo = w->data;
609 par.family = NFPROTO_BRIDGE;
610 if (par.target->destroy != NULL)
611 par.target->destroy(&par);
612 module_put(par.target->me);
613 return 0;
616 static inline int
617 ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
619 struct xt_tgdtor_param par;
620 struct ebt_entry_target *t;
622 if (e->bitmask == 0)
623 return 0;
624 /* we're done */
625 if (cnt && (*cnt)-- == 0)
626 return 1;
627 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
628 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
629 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
631 par.net = net;
632 par.target = t->u.target;
633 par.targinfo = t->data;
634 par.family = NFPROTO_BRIDGE;
635 if (par.target->destroy != NULL)
636 par.target->destroy(&par);
637 module_put(par.target->me);
638 return 0;
641 static inline int
642 ebt_check_entry(struct ebt_entry *e, struct net *net,
643 const struct ebt_table_info *newinfo,
644 const char *name, unsigned int *cnt,
645 struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
647 struct ebt_entry_target *t;
648 struct xt_target *target;
649 unsigned int i, j, hook = 0, hookmask = 0;
650 size_t gap;
651 int ret;
652 struct xt_mtchk_param mtpar;
653 struct xt_tgchk_param tgpar;
655 /* don't mess with the struct ebt_entries */
656 if (e->bitmask == 0)
657 return 0;
659 if (e->bitmask & ~EBT_F_MASK) {
660 BUGPRINT("Unknown flag for bitmask\n");
661 return -EINVAL;
663 if (e->invflags & ~EBT_INV_MASK) {
664 BUGPRINT("Unknown flag for inv bitmask\n");
665 return -EINVAL;
667 if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
668 BUGPRINT("NOPROTO & 802_3 not allowed\n");
669 return -EINVAL;
671 /* what hook do we belong to? */
672 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
673 if (!newinfo->hook_entry[i])
674 continue;
675 if ((char *)newinfo->hook_entry[i] < (char *)e)
676 hook = i;
677 else
678 break;
680 /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
681 a base chain */
682 if (i < NF_BR_NUMHOOKS)
683 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
684 else {
685 for (i = 0; i < udc_cnt; i++)
686 if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
687 break;
688 if (i == 0)
689 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
690 else
691 hookmask = cl_s[i - 1].hookmask;
693 i = 0;
695 mtpar.net = tgpar.net = net;
696 mtpar.table = tgpar.table = name;
697 mtpar.entryinfo = tgpar.entryinfo = e;
698 mtpar.hook_mask = tgpar.hook_mask = hookmask;
699 mtpar.family = tgpar.family = NFPROTO_BRIDGE;
700 ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
701 if (ret != 0)
702 goto cleanup_matches;
703 j = 0;
704 ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
705 if (ret != 0)
706 goto cleanup_watchers;
707 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
708 gap = e->next_offset - e->target_offset;
710 target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
711 if (IS_ERR(target)) {
712 ret = PTR_ERR(target);
713 goto cleanup_watchers;
716 t->u.target = target;
717 if (t->u.target == &ebt_standard_target) {
718 if (gap < sizeof(struct ebt_standard_target)) {
719 BUGPRINT("Standard target size too big\n");
720 ret = -EFAULT;
721 goto cleanup_watchers;
723 if (((struct ebt_standard_target *)t)->verdict <
724 -NUM_STANDARD_TARGETS) {
725 BUGPRINT("Invalid standard target\n");
726 ret = -EFAULT;
727 goto cleanup_watchers;
729 } else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
730 module_put(t->u.target->me);
731 ret = -EFAULT;
732 goto cleanup_watchers;
735 tgpar.target = target;
736 tgpar.targinfo = t->data;
737 ret = xt_check_target(&tgpar, t->target_size,
738 e->ethproto, e->invflags & EBT_IPROTO);
739 if (ret < 0) {
740 module_put(target->me);
741 goto cleanup_watchers;
743 (*cnt)++;
744 return 0;
745 cleanup_watchers:
746 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
747 cleanup_matches:
748 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
749 return ret;
753 * checks for loops and sets the hook mask for udc
754 * the hook mask for udc tells us from which base chains the udc can be
755 * accessed. This mask is a parameter to the check() functions of the extensions
757 static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
758 unsigned int udc_cnt, unsigned int hooknr, char *base)
760 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
761 const struct ebt_entry *e = (struct ebt_entry *)chain->data;
762 const struct ebt_entry_target *t;
764 while (pos < nentries || chain_nr != -1) {
765 /* end of udc, go back one 'recursion' step */
766 if (pos == nentries) {
767 /* put back values of the time when this chain was called */
768 e = cl_s[chain_nr].cs.e;
769 if (cl_s[chain_nr].from != -1)
770 nentries =
771 cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
772 else
773 nentries = chain->nentries;
774 pos = cl_s[chain_nr].cs.n;
775 /* make sure we won't see a loop that isn't one */
776 cl_s[chain_nr].cs.n = 0;
777 chain_nr = cl_s[chain_nr].from;
778 if (pos == nentries)
779 continue;
781 t = (struct ebt_entry_target *)
782 (((char *)e) + e->target_offset);
783 if (strcmp(t->u.name, EBT_STANDARD_TARGET))
784 goto letscontinue;
785 if (e->target_offset + sizeof(struct ebt_standard_target) >
786 e->next_offset) {
787 BUGPRINT("Standard target size too big\n");
788 return -1;
790 verdict = ((struct ebt_standard_target *)t)->verdict;
791 if (verdict >= 0) { /* jump to another chain */
792 struct ebt_entries *hlp2 =
793 (struct ebt_entries *)(base + verdict);
794 for (i = 0; i < udc_cnt; i++)
795 if (hlp2 == cl_s[i].cs.chaininfo)
796 break;
797 /* bad destination or loop */
798 if (i == udc_cnt) {
799 BUGPRINT("bad destination\n");
800 return -1;
802 if (cl_s[i].cs.n) {
803 BUGPRINT("loop\n");
804 return -1;
806 if (cl_s[i].hookmask & (1 << hooknr))
807 goto letscontinue;
808 /* this can't be 0, so the loop test is correct */
809 cl_s[i].cs.n = pos + 1;
810 pos = 0;
811 cl_s[i].cs.e = ebt_next_entry(e);
812 e = (struct ebt_entry *)(hlp2->data);
813 nentries = hlp2->nentries;
814 cl_s[i].from = chain_nr;
815 chain_nr = i;
816 /* this udc is accessible from the base chain for hooknr */
817 cl_s[i].hookmask |= (1 << hooknr);
818 continue;
820 letscontinue:
821 e = ebt_next_entry(e);
822 pos++;
824 return 0;
827 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
828 static int translate_table(struct net *net, const char *name,
829 struct ebt_table_info *newinfo)
831 unsigned int i, j, k, udc_cnt;
832 int ret;
833 struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
835 i = 0;
836 while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
837 i++;
838 if (i == NF_BR_NUMHOOKS) {
839 BUGPRINT("No valid hooks specified\n");
840 return -EINVAL;
842 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
843 BUGPRINT("Chains don't start at beginning\n");
844 return -EINVAL;
846 /* make sure chains are ordered after each other in same order
847 as their corresponding hooks */
848 for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
849 if (!newinfo->hook_entry[j])
850 continue;
851 if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
852 BUGPRINT("Hook order must be followed\n");
853 return -EINVAL;
855 i = j;
858 /* do some early checkings and initialize some things */
859 i = 0; /* holds the expected nr. of entries for the chain */
860 j = 0; /* holds the up to now counted entries for the chain */
861 k = 0; /* holds the total nr. of entries, should equal
862 newinfo->nentries afterwards */
863 udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
864 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
865 ebt_check_entry_size_and_hooks, newinfo,
866 &i, &j, &k, &udc_cnt);
868 if (ret != 0)
869 return ret;
871 if (i != j) {
872 BUGPRINT("nentries does not equal the nr of entries in the "
873 "(last) chain\n");
874 return -EINVAL;
876 if (k != newinfo->nentries) {
877 BUGPRINT("Total nentries is wrong\n");
878 return -EINVAL;
881 /* get the location of the udc, put them in an array
882 while we're at it, allocate the chainstack */
883 if (udc_cnt) {
884 /* this will get free'd in do_replace()/ebt_register_table()
885 if an error occurs */
886 newinfo->chainstack =
887 vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
888 if (!newinfo->chainstack)
889 return -ENOMEM;
890 for_each_possible_cpu(i) {
891 newinfo->chainstack[i] =
892 vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
893 if (!newinfo->chainstack[i]) {
894 while (i)
895 vfree(newinfo->chainstack[--i]);
896 vfree(newinfo->chainstack);
897 newinfo->chainstack = NULL;
898 return -ENOMEM;
902 cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
903 if (!cl_s)
904 return -ENOMEM;
905 i = 0; /* the i'th udc */
906 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
907 ebt_get_udc_positions, newinfo, &i, cl_s);
908 /* sanity check */
909 if (i != udc_cnt) {
910 BUGPRINT("i != udc_cnt\n");
911 vfree(cl_s);
912 return -EFAULT;
916 /* Check for loops */
917 for (i = 0; i < NF_BR_NUMHOOKS; i++)
918 if (newinfo->hook_entry[i])
919 if (check_chainloops(newinfo->hook_entry[i],
920 cl_s, udc_cnt, i, newinfo->entries)) {
921 vfree(cl_s);
922 return -EINVAL;
925 /* we now know the following (along with E=mc²):
926 - the nr of entries in each chain is right
927 - the size of the allocated space is right
928 - all valid hooks have a corresponding chain
929 - there are no loops
930 - wrong data can still be on the level of a single entry
931 - could be there are jumps to places that are not the
932 beginning of a chain. This can only occur in chains that
933 are not accessible from any base chains, so we don't care. */
935 /* used to know what we need to clean up if something goes wrong */
936 i = 0;
937 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
938 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
939 if (ret != 0) {
940 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
941 ebt_cleanup_entry, net, &i);
943 vfree(cl_s);
944 return ret;
947 /* called under write_lock */
948 static void get_counters(const struct ebt_counter *oldcounters,
949 struct ebt_counter *counters, unsigned int nentries)
951 int i, cpu;
952 struct ebt_counter *counter_base;
954 /* counters of cpu 0 */
955 memcpy(counters, oldcounters,
956 sizeof(struct ebt_counter) * nentries);
958 /* add other counters to those of cpu 0 */
959 for_each_possible_cpu(cpu) {
960 if (cpu == 0)
961 continue;
962 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
963 for (i = 0; i < nentries; i++) {
964 counters[i].pcnt += counter_base[i].pcnt;
965 counters[i].bcnt += counter_base[i].bcnt;
970 static int do_replace_finish(struct net *net, struct ebt_replace *repl,
971 struct ebt_table_info *newinfo)
973 int ret, i;
974 struct ebt_counter *counterstmp = NULL;
975 /* used to be able to unlock earlier */
976 struct ebt_table_info *table;
977 struct ebt_table *t;
979 /* the user wants counters back
980 the check on the size is done later, when we have the lock */
981 if (repl->num_counters) {
982 unsigned long size = repl->num_counters * sizeof(*counterstmp);
983 counterstmp = vmalloc(size);
984 if (!counterstmp)
985 return -ENOMEM;
988 newinfo->chainstack = NULL;
989 ret = ebt_verify_pointers(repl, newinfo);
990 if (ret != 0)
991 goto free_counterstmp;
993 ret = translate_table(net, repl->name, newinfo);
995 if (ret != 0)
996 goto free_counterstmp;
998 t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
999 if (!t) {
1000 ret = -ENOENT;
1001 goto free_iterate;
1004 /* the table doesn't like it */
1005 if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1006 goto free_unlock;
1008 if (repl->num_counters && repl->num_counters != t->private->nentries) {
1009 BUGPRINT("Wrong nr. of counters requested\n");
1010 ret = -EINVAL;
1011 goto free_unlock;
1014 /* we have the mutex lock, so no danger in reading this pointer */
1015 table = t->private;
1016 /* make sure the table can only be rmmod'ed if it contains no rules */
1017 if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1018 ret = -ENOENT;
1019 goto free_unlock;
1020 } else if (table->nentries && !newinfo->nentries)
1021 module_put(t->me);
1022 /* we need an atomic snapshot of the counters */
1023 write_lock_bh(&t->lock);
1024 if (repl->num_counters)
1025 get_counters(t->private->counters, counterstmp,
1026 t->private->nentries);
1028 t->private = newinfo;
1029 write_unlock_bh(&t->lock);
1030 mutex_unlock(&ebt_mutex);
1031 /* so, a user can change the chains while having messed up her counter
1032 allocation. Only reason why this is done is because this way the lock
1033 is held only once, while this doesn't bring the kernel into a
1034 dangerous state. */
1035 if (repl->num_counters &&
1036 copy_to_user(repl->counters, counterstmp,
1037 repl->num_counters * sizeof(struct ebt_counter))) {
1038 ret = -EFAULT;
1040 else
1041 ret = 0;
1043 /* decrease module count and free resources */
1044 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1045 ebt_cleanup_entry, net, NULL);
1047 vfree(table->entries);
1048 if (table->chainstack) {
1049 for_each_possible_cpu(i)
1050 vfree(table->chainstack[i]);
1051 vfree(table->chainstack);
1053 vfree(table);
1055 vfree(counterstmp);
1056 return ret;
1058 free_unlock:
1059 mutex_unlock(&ebt_mutex);
1060 free_iterate:
1061 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1062 ebt_cleanup_entry, net, NULL);
1063 free_counterstmp:
1064 vfree(counterstmp);
1065 /* can be initialized in translate_table() */
1066 if (newinfo->chainstack) {
1067 for_each_possible_cpu(i)
1068 vfree(newinfo->chainstack[i]);
1069 vfree(newinfo->chainstack);
1071 return ret;
1074 /* replace the table */
1075 static int do_replace(struct net *net, const void __user *user,
1076 unsigned int len)
1078 int ret, countersize;
1079 struct ebt_table_info *newinfo;
1080 struct ebt_replace tmp;
1082 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1083 return -EFAULT;
1085 if (len != sizeof(tmp) + tmp.entries_size) {
1086 BUGPRINT("Wrong len argument\n");
1087 return -EINVAL;
1090 if (tmp.entries_size == 0) {
1091 BUGPRINT("Entries_size never zero\n");
1092 return -EINVAL;
1094 /* overflow check */
1095 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1096 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1097 return -ENOMEM;
1098 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1099 return -ENOMEM;
1101 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1102 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1103 if (!newinfo)
1104 return -ENOMEM;
1106 if (countersize)
1107 memset(newinfo->counters, 0, countersize);
1109 newinfo->entries = vmalloc(tmp.entries_size);
1110 if (!newinfo->entries) {
1111 ret = -ENOMEM;
1112 goto free_newinfo;
1114 if (copy_from_user(
1115 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1116 BUGPRINT("Couldn't copy entries from userspace\n");
1117 ret = -EFAULT;
1118 goto free_entries;
1121 ret = do_replace_finish(net, &tmp, newinfo);
1122 if (ret == 0)
1123 return ret;
1124 free_entries:
1125 vfree(newinfo->entries);
1126 free_newinfo:
1127 vfree(newinfo);
1128 return ret;
1131 struct ebt_table *
1132 ebt_register_table(struct net *net, const struct ebt_table *input_table)
1134 struct ebt_table_info *newinfo;
1135 struct ebt_table *t, *table;
1136 struct ebt_replace_kernel *repl;
1137 int ret, i, countersize;
1138 void *p;
1140 if (input_table == NULL || (repl = input_table->table) == NULL ||
1141 repl->entries == 0 || repl->entries_size == 0 ||
1142 repl->counters != NULL || input_table->private != NULL) {
1143 BUGPRINT("Bad table data for ebt_register_table!!!\n");
1144 return ERR_PTR(-EINVAL);
1147 /* Don't add one table to multiple lists. */
1148 table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1149 if (!table) {
1150 ret = -ENOMEM;
1151 goto out;
1154 countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1155 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1156 ret = -ENOMEM;
1157 if (!newinfo)
1158 goto free_table;
1160 p = vmalloc(repl->entries_size);
1161 if (!p)
1162 goto free_newinfo;
1164 memcpy(p, repl->entries, repl->entries_size);
1165 newinfo->entries = p;
1167 newinfo->entries_size = repl->entries_size;
1168 newinfo->nentries = repl->nentries;
1170 if (countersize)
1171 memset(newinfo->counters, 0, countersize);
1173 /* fill in newinfo and parse the entries */
1174 newinfo->chainstack = NULL;
1175 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1176 if ((repl->valid_hooks & (1 << i)) == 0)
1177 newinfo->hook_entry[i] = NULL;
1178 else
1179 newinfo->hook_entry[i] = p +
1180 ((char *)repl->hook_entry[i] - repl->entries);
1182 ret = translate_table(net, repl->name, newinfo);
1183 if (ret != 0) {
1184 BUGPRINT("Translate_table failed\n");
1185 goto free_chainstack;
1188 if (table->check && table->check(newinfo, table->valid_hooks)) {
1189 BUGPRINT("The table doesn't like its own initial data, lol\n");
1190 return ERR_PTR(-EINVAL);
1193 table->private = newinfo;
1194 rwlock_init(&table->lock);
1195 ret = mutex_lock_interruptible(&ebt_mutex);
1196 if (ret != 0)
1197 goto free_chainstack;
1199 list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1200 if (strcmp(t->name, table->name) == 0) {
1201 ret = -EEXIST;
1202 BUGPRINT("Table name already exists\n");
1203 goto free_unlock;
1207 /* Hold a reference count if the chains aren't empty */
1208 if (newinfo->nentries && !try_module_get(table->me)) {
1209 ret = -ENOENT;
1210 goto free_unlock;
1212 list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1213 mutex_unlock(&ebt_mutex);
1214 return table;
1215 free_unlock:
1216 mutex_unlock(&ebt_mutex);
1217 free_chainstack:
1218 if (newinfo->chainstack) {
1219 for_each_possible_cpu(i)
1220 vfree(newinfo->chainstack[i]);
1221 vfree(newinfo->chainstack);
1223 vfree(newinfo->entries);
1224 free_newinfo:
1225 vfree(newinfo);
1226 free_table:
1227 kfree(table);
1228 out:
1229 return ERR_PTR(ret);
1232 void ebt_unregister_table(struct net *net, struct ebt_table *table)
1234 int i;
1236 if (!table) {
1237 BUGPRINT("Request to unregister NULL table!!!\n");
1238 return;
1240 mutex_lock(&ebt_mutex);
1241 list_del(&table->list);
1242 mutex_unlock(&ebt_mutex);
1243 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1244 ebt_cleanup_entry, net, NULL);
1245 if (table->private->nentries)
1246 module_put(table->me);
1247 vfree(table->private->entries);
1248 if (table->private->chainstack) {
1249 for_each_possible_cpu(i)
1250 vfree(table->private->chainstack[i]);
1251 vfree(table->private->chainstack);
1253 vfree(table->private);
1254 kfree(table);
1257 /* userspace just supplied us with counters */
1258 static int do_update_counters(struct net *net, const char *name,
1259 struct ebt_counter __user *counters,
1260 unsigned int num_counters,
1261 const void __user *user, unsigned int len)
1263 int i, ret;
1264 struct ebt_counter *tmp;
1265 struct ebt_table *t;
1267 if (num_counters == 0)
1268 return -EINVAL;
1270 tmp = vmalloc(num_counters * sizeof(*tmp));
1271 if (!tmp)
1272 return -ENOMEM;
1274 t = find_table_lock(net, name, &ret, &ebt_mutex);
1275 if (!t)
1276 goto free_tmp;
1278 if (num_counters != t->private->nentries) {
1279 BUGPRINT("Wrong nr of counters\n");
1280 ret = -EINVAL;
1281 goto unlock_mutex;
1284 if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1285 ret = -EFAULT;
1286 goto unlock_mutex;
1289 /* we want an atomic add of the counters */
1290 write_lock_bh(&t->lock);
1292 /* we add to the counters of the first cpu */
1293 for (i = 0; i < num_counters; i++) {
1294 t->private->counters[i].pcnt += tmp[i].pcnt;
1295 t->private->counters[i].bcnt += tmp[i].bcnt;
1298 write_unlock_bh(&t->lock);
1299 ret = 0;
1300 unlock_mutex:
1301 mutex_unlock(&ebt_mutex);
1302 free_tmp:
1303 vfree(tmp);
1304 return ret;
1307 static int update_counters(struct net *net, const void __user *user,
1308 unsigned int len)
1310 struct ebt_replace hlp;
1312 if (copy_from_user(&hlp, user, sizeof(hlp)))
1313 return -EFAULT;
1315 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1316 return -EINVAL;
1318 return do_update_counters(net, hlp.name, hlp.counters,
1319 hlp.num_counters, user, len);
1322 static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1323 const char *base, char __user *ubase)
1325 char __user *hlp = ubase + ((char *)m - base);
1326 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
1327 return -EFAULT;
1328 return 0;
1331 static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1332 const char *base, char __user *ubase)
1334 char __user *hlp = ubase + ((char *)w - base);
1335 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
1336 return -EFAULT;
1337 return 0;
1340 static inline int
1341 ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1343 int ret;
1344 char __user *hlp;
1345 const struct ebt_entry_target *t;
1347 if (e->bitmask == 0)
1348 return 0;
1350 hlp = ubase + (((char *)e + e->target_offset) - base);
1351 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1353 ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1354 if (ret != 0)
1355 return ret;
1356 ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1357 if (ret != 0)
1358 return ret;
1359 if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
1360 return -EFAULT;
1361 return 0;
1364 static int copy_counters_to_user(struct ebt_table *t,
1365 const struct ebt_counter *oldcounters,
1366 void __user *user, unsigned int num_counters,
1367 unsigned int nentries)
1369 struct ebt_counter *counterstmp;
1370 int ret = 0;
1372 /* userspace might not need the counters */
1373 if (num_counters == 0)
1374 return 0;
1376 if (num_counters != nentries) {
1377 BUGPRINT("Num_counters wrong\n");
1378 return -EINVAL;
1381 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1382 if (!counterstmp)
1383 return -ENOMEM;
1385 write_lock_bh(&t->lock);
1386 get_counters(oldcounters, counterstmp, nentries);
1387 write_unlock_bh(&t->lock);
1389 if (copy_to_user(user, counterstmp,
1390 nentries * sizeof(struct ebt_counter)))
1391 ret = -EFAULT;
1392 vfree(counterstmp);
1393 return ret;
1396 /* called with ebt_mutex locked */
1397 static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1398 const int *len, int cmd)
1400 struct ebt_replace tmp;
1401 const struct ebt_counter *oldcounters;
1402 unsigned int entries_size, nentries;
1403 int ret;
1404 char *entries;
1406 if (cmd == EBT_SO_GET_ENTRIES) {
1407 entries_size = t->private->entries_size;
1408 nentries = t->private->nentries;
1409 entries = t->private->entries;
1410 oldcounters = t->private->counters;
1411 } else {
1412 entries_size = t->table->entries_size;
1413 nentries = t->table->nentries;
1414 entries = t->table->entries;
1415 oldcounters = t->table->counters;
1418 if (copy_from_user(&tmp, user, sizeof(tmp)))
1419 return -EFAULT;
1421 if (*len != sizeof(struct ebt_replace) + entries_size +
1422 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1423 return -EINVAL;
1425 if (tmp.nentries != nentries) {
1426 BUGPRINT("Nentries wrong\n");
1427 return -EINVAL;
1430 if (tmp.entries_size != entries_size) {
1431 BUGPRINT("Wrong size\n");
1432 return -EINVAL;
1435 ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1436 tmp.num_counters, nentries);
1437 if (ret)
1438 return ret;
1440 if (copy_to_user(tmp.entries, entries, entries_size)) {
1441 BUGPRINT("Couldn't copy entries to userspace\n");
1442 return -EFAULT;
1444 /* set the match/watcher/target names right */
1445 return EBT_ENTRY_ITERATE(entries, entries_size,
1446 ebt_make_names, entries, tmp.entries);
1449 static int do_ebt_set_ctl(struct sock *sk,
1450 int cmd, void __user *user, unsigned int len)
1452 int ret;
1454 if (!capable(CAP_NET_ADMIN))
1455 return -EPERM;
1457 switch(cmd) {
1458 case EBT_SO_SET_ENTRIES:
1459 ret = do_replace(sock_net(sk), user, len);
1460 break;
1461 case EBT_SO_SET_COUNTERS:
1462 ret = update_counters(sock_net(sk), user, len);
1463 break;
1464 default:
1465 ret = -EINVAL;
1467 return ret;
1470 static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1472 int ret;
1473 struct ebt_replace tmp;
1474 struct ebt_table *t;
1476 if (!capable(CAP_NET_ADMIN))
1477 return -EPERM;
1479 if (copy_from_user(&tmp, user, sizeof(tmp)))
1480 return -EFAULT;
1482 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
1483 if (!t)
1484 return ret;
1486 switch(cmd) {
1487 case EBT_SO_GET_INFO:
1488 case EBT_SO_GET_INIT_INFO:
1489 if (*len != sizeof(struct ebt_replace)){
1490 ret = -EINVAL;
1491 mutex_unlock(&ebt_mutex);
1492 break;
1494 if (cmd == EBT_SO_GET_INFO) {
1495 tmp.nentries = t->private->nentries;
1496 tmp.entries_size = t->private->entries_size;
1497 tmp.valid_hooks = t->valid_hooks;
1498 } else {
1499 tmp.nentries = t->table->nentries;
1500 tmp.entries_size = t->table->entries_size;
1501 tmp.valid_hooks = t->table->valid_hooks;
1503 mutex_unlock(&ebt_mutex);
1504 if (copy_to_user(user, &tmp, *len) != 0){
1505 BUGPRINT("c2u Didn't work\n");
1506 ret = -EFAULT;
1507 break;
1509 ret = 0;
1510 break;
1512 case EBT_SO_GET_ENTRIES:
1513 case EBT_SO_GET_INIT_ENTRIES:
1514 ret = copy_everything_to_user(t, user, len, cmd);
1515 mutex_unlock(&ebt_mutex);
1516 break;
1518 default:
1519 mutex_unlock(&ebt_mutex);
1520 ret = -EINVAL;
1523 return ret;
1526 #ifdef CONFIG_COMPAT
1527 /* 32 bit-userspace compatibility definitions. */
1528 struct compat_ebt_replace {
1529 char name[EBT_TABLE_MAXNAMELEN];
1530 compat_uint_t valid_hooks;
1531 compat_uint_t nentries;
1532 compat_uint_t entries_size;
1533 /* start of the chains */
1534 compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1535 /* nr of counters userspace expects back */
1536 compat_uint_t num_counters;
1537 /* where the kernel will put the old counters. */
1538 compat_uptr_t counters;
1539 compat_uptr_t entries;
1542 /* struct ebt_entry_match, _target and _watcher have same layout */
1543 struct compat_ebt_entry_mwt {
1544 union {
1545 char name[EBT_FUNCTION_MAXNAMELEN];
1546 compat_uptr_t ptr;
1547 } u;
1548 compat_uint_t match_size;
1549 compat_uint_t data[0];
1552 /* account for possible padding between match_size and ->data */
1553 static int ebt_compat_entry_padsize(void)
1555 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1556 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1557 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1558 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1561 static int ebt_compat_match_offset(const struct xt_match *match,
1562 unsigned int userlen)
1565 * ebt_among needs special handling. The kernel .matchsize is
1566 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1567 * value is expected.
1568 * Example: userspace sends 4500, ebt_among.c wants 4504.
1570 if (unlikely(match->matchsize == -1))
1571 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1572 return xt_compat_match_offset(match);
1575 static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1576 unsigned int *size)
1578 const struct xt_match *match = m->u.match;
1579 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1580 int off = ebt_compat_match_offset(match, m->match_size);
1581 compat_uint_t msize = m->match_size - off;
1583 BUG_ON(off >= m->match_size);
1585 if (copy_to_user(cm->u.name, match->name,
1586 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1587 return -EFAULT;
1589 if (match->compat_to_user) {
1590 if (match->compat_to_user(cm->data, m->data))
1591 return -EFAULT;
1592 } else if (copy_to_user(cm->data, m->data, msize))
1593 return -EFAULT;
1595 *size -= ebt_compat_entry_padsize() + off;
1596 *dstptr = cm->data;
1597 *dstptr += msize;
1598 return 0;
1601 static int compat_target_to_user(struct ebt_entry_target *t,
1602 void __user **dstptr,
1603 unsigned int *size)
1605 const struct xt_target *target = t->u.target;
1606 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1607 int off = xt_compat_target_offset(target);
1608 compat_uint_t tsize = t->target_size - off;
1610 BUG_ON(off >= t->target_size);
1612 if (copy_to_user(cm->u.name, target->name,
1613 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1614 return -EFAULT;
1616 if (target->compat_to_user) {
1617 if (target->compat_to_user(cm->data, t->data))
1618 return -EFAULT;
1619 } else if (copy_to_user(cm->data, t->data, tsize))
1620 return -EFAULT;
1622 *size -= ebt_compat_entry_padsize() + off;
1623 *dstptr = cm->data;
1624 *dstptr += tsize;
1625 return 0;
1628 static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1629 void __user **dstptr,
1630 unsigned int *size)
1632 return compat_target_to_user((struct ebt_entry_target *)w,
1633 dstptr, size);
1636 static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1637 unsigned int *size)
1639 struct ebt_entry_target *t;
1640 struct ebt_entry __user *ce;
1641 u32 watchers_offset, target_offset, next_offset;
1642 compat_uint_t origsize;
1643 int ret;
1645 if (e->bitmask == 0) {
1646 if (*size < sizeof(struct ebt_entries))
1647 return -EINVAL;
1648 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1649 return -EFAULT;
1651 *dstptr += sizeof(struct ebt_entries);
1652 *size -= sizeof(struct ebt_entries);
1653 return 0;
1656 if (*size < sizeof(*ce))
1657 return -EINVAL;
1659 ce = (struct ebt_entry __user *)*dstptr;
1660 if (copy_to_user(ce, e, sizeof(*ce)))
1661 return -EFAULT;
1663 origsize = *size;
1664 *dstptr += sizeof(*ce);
1666 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1667 if (ret)
1668 return ret;
1669 watchers_offset = e->watchers_offset - (origsize - *size);
1671 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1672 if (ret)
1673 return ret;
1674 target_offset = e->target_offset - (origsize - *size);
1676 t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1678 ret = compat_target_to_user(t, dstptr, size);
1679 if (ret)
1680 return ret;
1681 next_offset = e->next_offset - (origsize - *size);
1683 if (put_user(watchers_offset, &ce->watchers_offset) ||
1684 put_user(target_offset, &ce->target_offset) ||
1685 put_user(next_offset, &ce->next_offset))
1686 return -EFAULT;
1688 *size -= sizeof(*ce);
1689 return 0;
1692 static int compat_calc_match(struct ebt_entry_match *m, int *off)
1694 *off += ebt_compat_match_offset(m->u.match, m->match_size);
1695 *off += ebt_compat_entry_padsize();
1696 return 0;
1699 static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1701 *off += xt_compat_target_offset(w->u.watcher);
1702 *off += ebt_compat_entry_padsize();
1703 return 0;
1706 static int compat_calc_entry(const struct ebt_entry *e,
1707 const struct ebt_table_info *info,
1708 const void *base,
1709 struct compat_ebt_replace *newinfo)
1711 const struct ebt_entry_target *t;
1712 unsigned int entry_offset;
1713 int off, ret, i;
1715 if (e->bitmask == 0)
1716 return 0;
1718 off = 0;
1719 entry_offset = (void *)e - base;
1721 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1722 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1724 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1726 off += xt_compat_target_offset(t->u.target);
1727 off += ebt_compat_entry_padsize();
1729 newinfo->entries_size -= off;
1731 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1732 if (ret)
1733 return ret;
1735 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1736 const void *hookptr = info->hook_entry[i];
1737 if (info->hook_entry[i] &&
1738 (e < (struct ebt_entry *)(base - hookptr))) {
1739 newinfo->hook_entry[i] -= off;
1740 pr_debug("0x%08X -> 0x%08X\n",
1741 newinfo->hook_entry[i] + off,
1742 newinfo->hook_entry[i]);
1746 return 0;
1750 static int compat_table_info(const struct ebt_table_info *info,
1751 struct compat_ebt_replace *newinfo)
1753 unsigned int size = info->entries_size;
1754 const void *entries = info->entries;
1756 newinfo->entries_size = size;
1758 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1759 entries, newinfo);
1762 static int compat_copy_everything_to_user(struct ebt_table *t,
1763 void __user *user, int *len, int cmd)
1765 struct compat_ebt_replace repl, tmp;
1766 struct ebt_counter *oldcounters;
1767 struct ebt_table_info tinfo;
1768 int ret;
1769 void __user *pos;
1771 memset(&tinfo, 0, sizeof(tinfo));
1773 if (cmd == EBT_SO_GET_ENTRIES) {
1774 tinfo.entries_size = t->private->entries_size;
1775 tinfo.nentries = t->private->nentries;
1776 tinfo.entries = t->private->entries;
1777 oldcounters = t->private->counters;
1778 } else {
1779 tinfo.entries_size = t->table->entries_size;
1780 tinfo.nentries = t->table->nentries;
1781 tinfo.entries = t->table->entries;
1782 oldcounters = t->table->counters;
1785 if (copy_from_user(&tmp, user, sizeof(tmp)))
1786 return -EFAULT;
1788 if (tmp.nentries != tinfo.nentries ||
1789 (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1790 return -EINVAL;
1792 memcpy(&repl, &tmp, sizeof(repl));
1793 if (cmd == EBT_SO_GET_ENTRIES)
1794 ret = compat_table_info(t->private, &repl);
1795 else
1796 ret = compat_table_info(&tinfo, &repl);
1797 if (ret)
1798 return ret;
1800 if (*len != sizeof(tmp) + repl.entries_size +
1801 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1802 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1803 *len, tinfo.entries_size, repl.entries_size);
1804 return -EINVAL;
1807 /* userspace might not need the counters */
1808 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1809 tmp.num_counters, tinfo.nentries);
1810 if (ret)
1811 return ret;
1813 pos = compat_ptr(tmp.entries);
1814 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1815 compat_copy_entry_to_user, &pos, &tmp.entries_size);
1818 struct ebt_entries_buf_state {
1819 char *buf_kern_start; /* kernel buffer to copy (translated) data to */
1820 u32 buf_kern_len; /* total size of kernel buffer */
1821 u32 buf_kern_offset; /* amount of data copied so far */
1822 u32 buf_user_offset; /* read position in userspace buffer */
1825 static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1827 state->buf_kern_offset += sz;
1828 return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1831 static int ebt_buf_add(struct ebt_entries_buf_state *state,
1832 void *data, unsigned int sz)
1834 if (state->buf_kern_start == NULL)
1835 goto count_only;
1837 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1839 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1841 count_only:
1842 state->buf_user_offset += sz;
1843 return ebt_buf_count(state, sz);
1846 static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1848 char *b = state->buf_kern_start;
1850 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1852 if (b != NULL && sz > 0)
1853 memset(b + state->buf_kern_offset, 0, sz);
1854 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1855 return ebt_buf_count(state, sz);
1858 enum compat_mwt {
1859 EBT_COMPAT_MATCH,
1860 EBT_COMPAT_WATCHER,
1861 EBT_COMPAT_TARGET,
1864 static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1865 enum compat_mwt compat_mwt,
1866 struct ebt_entries_buf_state *state,
1867 const unsigned char *base)
1869 char name[EBT_FUNCTION_MAXNAMELEN];
1870 struct xt_match *match;
1871 struct xt_target *wt;
1872 void *dst = NULL;
1873 int off, pad = 0, ret = 0;
1874 unsigned int size_kern, entry_offset, match_size = mwt->match_size;
1876 strlcpy(name, mwt->u.name, sizeof(name));
1878 if (state->buf_kern_start)
1879 dst = state->buf_kern_start + state->buf_kern_offset;
1881 entry_offset = (unsigned char *) mwt - base;
1882 switch (compat_mwt) {
1883 case EBT_COMPAT_MATCH:
1884 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
1885 name, 0), "ebt_%s", name);
1886 if (match == NULL)
1887 return -ENOENT;
1888 if (IS_ERR(match))
1889 return PTR_ERR(match);
1891 off = ebt_compat_match_offset(match, match_size);
1892 if (dst) {
1893 if (match->compat_from_user)
1894 match->compat_from_user(dst, mwt->data);
1895 else
1896 memcpy(dst, mwt->data, match_size);
1899 size_kern = match->matchsize;
1900 if (unlikely(size_kern == -1))
1901 size_kern = match_size;
1902 module_put(match->me);
1903 break;
1904 case EBT_COMPAT_WATCHER: /* fallthrough */
1905 case EBT_COMPAT_TARGET:
1906 wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
1907 name, 0), "ebt_%s", name);
1908 if (wt == NULL)
1909 return -ENOENT;
1910 if (IS_ERR(wt))
1911 return PTR_ERR(wt);
1912 off = xt_compat_target_offset(wt);
1914 if (dst) {
1915 if (wt->compat_from_user)
1916 wt->compat_from_user(dst, mwt->data);
1917 else
1918 memcpy(dst, mwt->data, match_size);
1921 size_kern = wt->targetsize;
1922 module_put(wt->me);
1923 break;
1926 if (!dst) {
1927 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
1928 off + ebt_compat_entry_padsize());
1929 if (ret < 0)
1930 return ret;
1933 state->buf_kern_offset += match_size + off;
1934 state->buf_user_offset += match_size;
1935 pad = XT_ALIGN(size_kern) - size_kern;
1937 if (pad > 0 && dst) {
1938 BUG_ON(state->buf_kern_len <= pad);
1939 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1940 memset(dst + size_kern, 0, pad);
1942 return off + match_size;
1946 * return size of all matches, watchers or target, including necessary
1947 * alignment and padding.
1949 static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1950 unsigned int size_left, enum compat_mwt type,
1951 struct ebt_entries_buf_state *state, const void *base)
1953 int growth = 0;
1954 char *buf;
1956 if (size_left == 0)
1957 return 0;
1959 buf = (char *) match32;
1961 while (size_left >= sizeof(*match32)) {
1962 struct ebt_entry_match *match_kern;
1963 int ret;
1965 match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1966 if (match_kern) {
1967 char *tmp;
1968 tmp = state->buf_kern_start + state->buf_kern_offset;
1969 match_kern = (struct ebt_entry_match *) tmp;
1971 ret = ebt_buf_add(state, buf, sizeof(*match32));
1972 if (ret < 0)
1973 return ret;
1974 size_left -= sizeof(*match32);
1976 /* add padding before match->data (if any) */
1977 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1978 if (ret < 0)
1979 return ret;
1981 if (match32->match_size > size_left)
1982 return -EINVAL;
1984 size_left -= match32->match_size;
1986 ret = compat_mtw_from_user(match32, type, state, base);
1987 if (ret < 0)
1988 return ret;
1990 BUG_ON(ret < match32->match_size);
1991 growth += ret - match32->match_size;
1992 growth += ebt_compat_entry_padsize();
1994 buf += sizeof(*match32);
1995 buf += match32->match_size;
1997 if (match_kern)
1998 match_kern->match_size = ret;
2000 WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2001 match32 = (struct compat_ebt_entry_mwt *) buf;
2004 return growth;
2007 #define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2008 ({ \
2009 unsigned int __i; \
2010 int __ret = 0; \
2011 struct compat_ebt_entry_mwt *__watcher; \
2013 for (__i = e->watchers_offset; \
2014 __i < (e)->target_offset; \
2015 __i += __watcher->watcher_size + \
2016 sizeof(struct compat_ebt_entry_mwt)) { \
2017 __watcher = (void *)(e) + __i; \
2018 __ret = fn(__watcher , ## args); \
2019 if (__ret != 0) \
2020 break; \
2022 if (__ret == 0) { \
2023 if (__i != (e)->target_offset) \
2024 __ret = -EINVAL; \
2026 __ret; \
2029 #define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2030 ({ \
2031 unsigned int __i; \
2032 int __ret = 0; \
2033 struct compat_ebt_entry_mwt *__match; \
2035 for (__i = sizeof(struct ebt_entry); \
2036 __i < (e)->watchers_offset; \
2037 __i += __match->match_size + \
2038 sizeof(struct compat_ebt_entry_mwt)) { \
2039 __match = (void *)(e) + __i; \
2040 __ret = fn(__match , ## args); \
2041 if (__ret != 0) \
2042 break; \
2044 if (__ret == 0) { \
2045 if (__i != (e)->watchers_offset) \
2046 __ret = -EINVAL; \
2048 __ret; \
2051 /* called for all ebt_entry structures. */
2052 static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2053 unsigned int *total,
2054 struct ebt_entries_buf_state *state)
2056 unsigned int i, j, startoff, new_offset = 0;
2057 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2058 unsigned int offsets[4];
2059 unsigned int *offsets_update = NULL;
2060 int ret;
2061 char *buf_start;
2063 if (*total < sizeof(struct ebt_entries))
2064 return -EINVAL;
2066 if (!entry->bitmask) {
2067 *total -= sizeof(struct ebt_entries);
2068 return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2070 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2071 return -EINVAL;
2073 startoff = state->buf_user_offset;
2074 /* pull in most part of ebt_entry, it does not need to be changed. */
2075 ret = ebt_buf_add(state, entry,
2076 offsetof(struct ebt_entry, watchers_offset));
2077 if (ret < 0)
2078 return ret;
2080 offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2081 memcpy(&offsets[1], &entry->watchers_offset,
2082 sizeof(offsets) - sizeof(offsets[0]));
2084 if (state->buf_kern_start) {
2085 buf_start = state->buf_kern_start + state->buf_kern_offset;
2086 offsets_update = (unsigned int *) buf_start;
2088 ret = ebt_buf_add(state, &offsets[1],
2089 sizeof(offsets) - sizeof(offsets[0]));
2090 if (ret < 0)
2091 return ret;
2092 buf_start = (char *) entry;
2094 * 0: matches offset, always follows ebt_entry.
2095 * 1: watchers offset, from ebt_entry structure
2096 * 2: target offset, from ebt_entry structure
2097 * 3: next ebt_entry offset, from ebt_entry structure
2099 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2101 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2102 struct compat_ebt_entry_mwt *match32;
2103 unsigned int size;
2104 char *buf = buf_start;
2106 buf = buf_start + offsets[i];
2107 if (offsets[i] > offsets[j])
2108 return -EINVAL;
2110 match32 = (struct compat_ebt_entry_mwt *) buf;
2111 size = offsets[j] - offsets[i];
2112 ret = ebt_size_mwt(match32, size, i, state, base);
2113 if (ret < 0)
2114 return ret;
2115 new_offset += ret;
2116 if (offsets_update && new_offset) {
2117 pr_debug("change offset %d to %d\n",
2118 offsets_update[i], offsets[j] + new_offset);
2119 offsets_update[i] = offsets[j] + new_offset;
2123 startoff = state->buf_user_offset - startoff;
2125 BUG_ON(*total < startoff);
2126 *total -= startoff;
2127 return 0;
2131 * repl->entries_size is the size of the ebt_entry blob in userspace.
2132 * It might need more memory when copied to a 64 bit kernel in case
2133 * userspace is 32-bit. So, first task: find out how much memory is needed.
2135 * Called before validation is performed.
2137 static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2138 struct ebt_entries_buf_state *state)
2140 unsigned int size_remaining = size_user;
2141 int ret;
2143 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2144 &size_remaining, state);
2145 if (ret < 0)
2146 return ret;
2148 WARN_ON(size_remaining);
2149 return state->buf_kern_offset;
2153 static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2154 void __user *user, unsigned int len)
2156 struct compat_ebt_replace tmp;
2157 int i;
2159 if (len < sizeof(tmp))
2160 return -EINVAL;
2162 if (copy_from_user(&tmp, user, sizeof(tmp)))
2163 return -EFAULT;
2165 if (len != sizeof(tmp) + tmp.entries_size)
2166 return -EINVAL;
2168 if (tmp.entries_size == 0)
2169 return -EINVAL;
2171 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2172 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2173 return -ENOMEM;
2174 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2175 return -ENOMEM;
2177 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2179 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2180 for (i = 0; i < NF_BR_NUMHOOKS; i++)
2181 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2183 repl->num_counters = tmp.num_counters;
2184 repl->counters = compat_ptr(tmp.counters);
2185 repl->entries = compat_ptr(tmp.entries);
2186 return 0;
2189 static int compat_do_replace(struct net *net, void __user *user,
2190 unsigned int len)
2192 int ret, i, countersize, size64;
2193 struct ebt_table_info *newinfo;
2194 struct ebt_replace tmp;
2195 struct ebt_entries_buf_state state;
2196 void *entries_tmp;
2198 ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2199 if (ret) {
2200 /* try real handler in case userland supplied needed padding */
2201 if (ret == -EINVAL && do_replace(net, user, len) == 0)
2202 ret = 0;
2203 return ret;
2206 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2207 newinfo = vmalloc(sizeof(*newinfo) + countersize);
2208 if (!newinfo)
2209 return -ENOMEM;
2211 if (countersize)
2212 memset(newinfo->counters, 0, countersize);
2214 memset(&state, 0, sizeof(state));
2216 newinfo->entries = vmalloc(tmp.entries_size);
2217 if (!newinfo->entries) {
2218 ret = -ENOMEM;
2219 goto free_newinfo;
2221 if (copy_from_user(
2222 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2223 ret = -EFAULT;
2224 goto free_entries;
2227 entries_tmp = newinfo->entries;
2229 xt_compat_lock(NFPROTO_BRIDGE);
2231 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2232 if (ret < 0)
2233 goto out_unlock;
2235 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2236 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2237 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2239 size64 = ret;
2240 newinfo->entries = vmalloc(size64);
2241 if (!newinfo->entries) {
2242 vfree(entries_tmp);
2243 ret = -ENOMEM;
2244 goto out_unlock;
2247 memset(&state, 0, sizeof(state));
2248 state.buf_kern_start = newinfo->entries;
2249 state.buf_kern_len = size64;
2251 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2252 BUG_ON(ret < 0); /* parses same data again */
2254 vfree(entries_tmp);
2255 tmp.entries_size = size64;
2257 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2258 char __user *usrptr;
2259 if (tmp.hook_entry[i]) {
2260 unsigned int delta;
2261 usrptr = (char __user *) tmp.hook_entry[i];
2262 delta = usrptr - tmp.entries;
2263 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2264 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2268 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2269 xt_compat_unlock(NFPROTO_BRIDGE);
2271 ret = do_replace_finish(net, &tmp, newinfo);
2272 if (ret == 0)
2273 return ret;
2274 free_entries:
2275 vfree(newinfo->entries);
2276 free_newinfo:
2277 vfree(newinfo);
2278 return ret;
2279 out_unlock:
2280 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2281 xt_compat_unlock(NFPROTO_BRIDGE);
2282 goto free_entries;
2285 static int compat_update_counters(struct net *net, void __user *user,
2286 unsigned int len)
2288 struct compat_ebt_replace hlp;
2290 if (copy_from_user(&hlp, user, sizeof(hlp)))
2291 return -EFAULT;
2293 /* try real handler in case userland supplied needed padding */
2294 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2295 return update_counters(net, user, len);
2297 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2298 hlp.num_counters, user, len);
2301 static int compat_do_ebt_set_ctl(struct sock *sk,
2302 int cmd, void __user *user, unsigned int len)
2304 int ret;
2306 if (!capable(CAP_NET_ADMIN))
2307 return -EPERM;
2309 switch (cmd) {
2310 case EBT_SO_SET_ENTRIES:
2311 ret = compat_do_replace(sock_net(sk), user, len);
2312 break;
2313 case EBT_SO_SET_COUNTERS:
2314 ret = compat_update_counters(sock_net(sk), user, len);
2315 break;
2316 default:
2317 ret = -EINVAL;
2319 return ret;
2322 static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2323 void __user *user, int *len)
2325 int ret;
2326 struct compat_ebt_replace tmp;
2327 struct ebt_table *t;
2329 if (!capable(CAP_NET_ADMIN))
2330 return -EPERM;
2332 /* try real handler in case userland supplied needed padding */
2333 if ((cmd == EBT_SO_GET_INFO ||
2334 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2335 return do_ebt_get_ctl(sk, cmd, user, len);
2337 if (copy_from_user(&tmp, user, sizeof(tmp)))
2338 return -EFAULT;
2340 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
2341 if (!t)
2342 return ret;
2344 xt_compat_lock(NFPROTO_BRIDGE);
2345 switch (cmd) {
2346 case EBT_SO_GET_INFO:
2347 tmp.nentries = t->private->nentries;
2348 ret = compat_table_info(t->private, &tmp);
2349 if (ret)
2350 goto out;
2351 tmp.valid_hooks = t->valid_hooks;
2353 if (copy_to_user(user, &tmp, *len) != 0) {
2354 ret = -EFAULT;
2355 break;
2357 ret = 0;
2358 break;
2359 case EBT_SO_GET_INIT_INFO:
2360 tmp.nentries = t->table->nentries;
2361 tmp.entries_size = t->table->entries_size;
2362 tmp.valid_hooks = t->table->valid_hooks;
2364 if (copy_to_user(user, &tmp, *len) != 0) {
2365 ret = -EFAULT;
2366 break;
2368 ret = 0;
2369 break;
2370 case EBT_SO_GET_ENTRIES:
2371 case EBT_SO_GET_INIT_ENTRIES:
2373 * try real handler first in case of userland-side padding.
2374 * in case we are dealing with an 'ordinary' 32 bit binary
2375 * without 64bit compatibility padding, this will fail right
2376 * after copy_from_user when the *len argument is validated.
2378 * the compat_ variant needs to do one pass over the kernel
2379 * data set to adjust for size differences before it the check.
2381 if (copy_everything_to_user(t, user, len, cmd) == 0)
2382 ret = 0;
2383 else
2384 ret = compat_copy_everything_to_user(t, user, len, cmd);
2385 break;
2386 default:
2387 ret = -EINVAL;
2389 out:
2390 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2391 xt_compat_unlock(NFPROTO_BRIDGE);
2392 mutex_unlock(&ebt_mutex);
2393 return ret;
2395 #endif
2397 static struct nf_sockopt_ops ebt_sockopts =
2399 .pf = PF_INET,
2400 .set_optmin = EBT_BASE_CTL,
2401 .set_optmax = EBT_SO_SET_MAX + 1,
2402 .set = do_ebt_set_ctl,
2403 #ifdef CONFIG_COMPAT
2404 .compat_set = compat_do_ebt_set_ctl,
2405 #endif
2406 .get_optmin = EBT_BASE_CTL,
2407 .get_optmax = EBT_SO_GET_MAX + 1,
2408 .get = do_ebt_get_ctl,
2409 #ifdef CONFIG_COMPAT
2410 .compat_get = compat_do_ebt_get_ctl,
2411 #endif
2412 .owner = THIS_MODULE,
2415 static int __init ebtables_init(void)
2417 int ret;
2419 ret = xt_register_target(&ebt_standard_target);
2420 if (ret < 0)
2421 return ret;
2422 ret = nf_register_sockopt(&ebt_sockopts);
2423 if (ret < 0) {
2424 xt_unregister_target(&ebt_standard_target);
2425 return ret;
2428 printk(KERN_INFO "Ebtables v2.0 registered\n");
2429 return 0;
2432 static void __exit ebtables_fini(void)
2434 nf_unregister_sockopt(&ebt_sockopts);
2435 xt_unregister_target(&ebt_standard_target);
2436 printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2439 EXPORT_SYMBOL(ebt_register_table);
2440 EXPORT_SYMBOL(ebt_unregister_table);
2441 EXPORT_SYMBOL(ebt_do_table);
2442 module_init(ebtables_init);
2443 module_exit(ebtables_fini);
2444 MODULE_LICENSE("GPL");