2 * Packet matching code for ARP packets.
4 * Based heavily, if not almost entirely, upon ip_tables.c framework.
6 * Some ARP specific bits are:
8 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/netdevice.h>
15 #include <linux/capability.h>
16 #include <linux/if_arp.h>
17 #include <linux/kmod.h>
18 #include <linux/vmalloc.h>
19 #include <linux/proc_fs.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/mutex.h>
23 #include <linux/err.h>
24 #include <net/compat.h>
26 #include <asm/uaccess.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter_arp/arp_tables.h>
31 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
33 MODULE_DESCRIPTION("arptables core");
35 /*#define DEBUG_ARP_TABLES*/
36 /*#define DEBUG_ARP_TABLES_USER*/
38 #ifdef DEBUG_ARP_TABLES
39 #define dprintf(format, args...) printk(format , ## args)
41 #define dprintf(format, args...)
44 #ifdef DEBUG_ARP_TABLES_USER
45 #define duprintf(format, args...) printk(format , ## args)
47 #define duprintf(format, args...)
50 #ifdef CONFIG_NETFILTER_DEBUG
51 #define ARP_NF_ASSERT(x) \
54 printk("ARP_NF_ASSERT: %s:%s:%u\n", \
55 __func__, __FILE__, __LINE__); \
58 #define ARP_NF_ASSERT(x)
61 static inline int arp_devaddr_compare(const struct arpt_devaddr_info
*ap
,
62 const char *hdr_addr
, int len
)
66 if (len
> ARPT_DEV_ADDR_LEN_MAX
)
67 len
= ARPT_DEV_ADDR_LEN_MAX
;
70 for (i
= 0; i
< len
; i
++)
71 ret
|= (hdr_addr
[i
] ^ ap
->addr
[i
]) & ap
->mask
[i
];
76 /* Returns whether packet matches rule or not. */
77 static inline int arp_packet_match(const struct arphdr
*arphdr
,
78 struct net_device
*dev
,
81 const struct arpt_arp
*arpinfo
)
83 const char *arpptr
= (char *)(arphdr
+ 1);
84 const char *src_devaddr
, *tgt_devaddr
;
85 __be32 src_ipaddr
, tgt_ipaddr
;
88 #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg)))
90 if (FWINV((arphdr
->ar_op
& arpinfo
->arpop_mask
) != arpinfo
->arpop
,
92 dprintf("ARP operation field mismatch.\n");
93 dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n",
94 arphdr
->ar_op
, arpinfo
->arpop
, arpinfo
->arpop_mask
);
98 if (FWINV((arphdr
->ar_hrd
& arpinfo
->arhrd_mask
) != arpinfo
->arhrd
,
100 dprintf("ARP hardware address format mismatch.\n");
101 dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n",
102 arphdr
->ar_hrd
, arpinfo
->arhrd
, arpinfo
->arhrd_mask
);
106 if (FWINV((arphdr
->ar_pro
& arpinfo
->arpro_mask
) != arpinfo
->arpro
,
108 dprintf("ARP protocol address format mismatch.\n");
109 dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n",
110 arphdr
->ar_pro
, arpinfo
->arpro
, arpinfo
->arpro_mask
);
114 if (FWINV((arphdr
->ar_hln
& arpinfo
->arhln_mask
) != arpinfo
->arhln
,
116 dprintf("ARP hardware address length mismatch.\n");
117 dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n",
118 arphdr
->ar_hln
, arpinfo
->arhln
, arpinfo
->arhln_mask
);
122 src_devaddr
= arpptr
;
123 arpptr
+= dev
->addr_len
;
124 memcpy(&src_ipaddr
, arpptr
, sizeof(u32
));
125 arpptr
+= sizeof(u32
);
126 tgt_devaddr
= arpptr
;
127 arpptr
+= dev
->addr_len
;
128 memcpy(&tgt_ipaddr
, arpptr
, sizeof(u32
));
130 if (FWINV(arp_devaddr_compare(&arpinfo
->src_devaddr
, src_devaddr
, dev
->addr_len
),
131 ARPT_INV_SRCDEVADDR
) ||
132 FWINV(arp_devaddr_compare(&arpinfo
->tgt_devaddr
, tgt_devaddr
, dev
->addr_len
),
133 ARPT_INV_TGTDEVADDR
)) {
134 dprintf("Source or target device address mismatch.\n");
139 if (FWINV((src_ipaddr
& arpinfo
->smsk
.s_addr
) != arpinfo
->src
.s_addr
,
141 FWINV(((tgt_ipaddr
& arpinfo
->tmsk
.s_addr
) != arpinfo
->tgt
.s_addr
),
143 dprintf("Source or target IP address mismatch.\n");
145 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
147 NIPQUAD(arpinfo
->smsk
.s_addr
),
148 NIPQUAD(arpinfo
->src
.s_addr
),
149 arpinfo
->invflags
& ARPT_INV_SRCIP
? " (INV)" : "");
150 dprintf("TGT: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
152 NIPQUAD(arpinfo
->tmsk
.s_addr
),
153 NIPQUAD(arpinfo
->tgt
.s_addr
),
154 arpinfo
->invflags
& ARPT_INV_TGTIP
? " (INV)" : "");
158 /* Look for ifname matches. */
159 for (i
= 0, ret
= 0; i
< IFNAMSIZ
; i
++) {
160 ret
|= (indev
[i
] ^ arpinfo
->iniface
[i
])
161 & arpinfo
->iniface_mask
[i
];
164 if (FWINV(ret
!= 0, ARPT_INV_VIA_IN
)) {
165 dprintf("VIA in mismatch (%s vs %s).%s\n",
166 indev
, arpinfo
->iniface
,
167 arpinfo
->invflags
&ARPT_INV_VIA_IN
?" (INV)":"");
171 for (i
= 0, ret
= 0; i
< IFNAMSIZ
; i
++) {
172 ret
|= (outdev
[i
] ^ arpinfo
->outiface
[i
])
173 & arpinfo
->outiface_mask
[i
];
176 if (FWINV(ret
!= 0, ARPT_INV_VIA_OUT
)) {
177 dprintf("VIA out mismatch (%s vs %s).%s\n",
178 outdev
, arpinfo
->outiface
,
179 arpinfo
->invflags
&ARPT_INV_VIA_OUT
?" (INV)":"");
187 static inline int arp_checkentry(const struct arpt_arp
*arp
)
189 if (arp
->flags
& ~ARPT_F_MASK
) {
190 duprintf("Unknown flag bits set: %08X\n",
191 arp
->flags
& ~ARPT_F_MASK
);
194 if (arp
->invflags
& ~ARPT_INV_MASK
) {
195 duprintf("Unknown invflag bits set: %08X\n",
196 arp
->invflags
& ~ARPT_INV_MASK
);
203 static unsigned int arpt_error(struct sk_buff
*skb
,
204 const struct net_device
*in
,
205 const struct net_device
*out
,
206 unsigned int hooknum
,
207 const struct xt_target
*target
,
208 const void *targinfo
)
211 printk("arp_tables: error: '%s'\n", (char *)targinfo
);
216 static inline struct arpt_entry
*get_entry(void *base
, unsigned int offset
)
218 return (struct arpt_entry
*)(base
+ offset
);
221 unsigned int arpt_do_table(struct sk_buff
*skb
,
223 const struct net_device
*in
,
224 const struct net_device
*out
,
225 struct xt_table
*table
)
227 static const char nulldevname
[IFNAMSIZ
];
228 unsigned int verdict
= NF_DROP
;
229 const struct arphdr
*arp
;
230 bool hotdrop
= false;
231 struct arpt_entry
*e
, *back
;
232 const char *indev
, *outdev
;
234 const struct xt_table_info
*private;
236 if (!pskb_may_pull(skb
, arp_hdr_len(skb
->dev
)))
239 indev
= in
? in
->name
: nulldevname
;
240 outdev
= out
? out
->name
: nulldevname
;
242 read_lock_bh(&table
->lock
);
243 private = table
->private;
244 table_base
= (void *)private->entries
[smp_processor_id()];
245 e
= get_entry(table_base
, private->hook_entry
[hook
]);
246 back
= get_entry(table_base
, private->underflow
[hook
]);
250 if (arp_packet_match(arp
, skb
->dev
, indev
, outdev
, &e
->arp
)) {
251 struct arpt_entry_target
*t
;
254 hdr_len
= sizeof(*arp
) + (2 * sizeof(struct in_addr
)) +
255 (2 * skb
->dev
->addr_len
);
256 ADD_COUNTER(e
->counters
, hdr_len
, 1);
258 t
= arpt_get_target(e
);
260 /* Standard target? */
261 if (!t
->u
.kernel
.target
->target
) {
264 v
= ((struct arpt_standard_target
*)t
)->verdict
;
266 /* Pop from stack? */
267 if (v
!= ARPT_RETURN
) {
268 verdict
= (unsigned)(-v
) - 1;
272 back
= get_entry(table_base
,
277 != (void *)e
+ e
->next_offset
) {
278 /* Save old back ptr in next entry */
279 struct arpt_entry
*next
280 = (void *)e
+ e
->next_offset
;
282 (void *)back
- table_base
;
284 /* set back pointer to next entry */
288 e
= get_entry(table_base
, v
);
290 /* Targets which reenter must return
293 verdict
= t
->u
.kernel
.target
->target(skb
,
299 /* Target might have changed stuff. */
302 if (verdict
== ARPT_CONTINUE
)
303 e
= (void *)e
+ e
->next_offset
;
309 e
= (void *)e
+ e
->next_offset
;
312 read_unlock_bh(&table
->lock
);
320 /* All zeroes == unconditional rule. */
321 static inline int unconditional(const struct arpt_arp
*arp
)
325 for (i
= 0; i
< sizeof(*arp
)/sizeof(__u32
); i
++)
326 if (((__u32
*)arp
)[i
])
332 /* Figures out from what hook each rule can be called: returns 0 if
333 * there are loops. Puts hook bitmask in comefrom.
335 static int mark_source_chains(struct xt_table_info
*newinfo
,
336 unsigned int valid_hooks
, void *entry0
)
340 /* No recursion; use packet counter to save back ptrs (reset
341 * to 0 as we leave), and comefrom to save source hook bitmask.
343 for (hook
= 0; hook
< NF_ARP_NUMHOOKS
; hook
++) {
344 unsigned int pos
= newinfo
->hook_entry
[hook
];
346 = (struct arpt_entry
*)(entry0
+ pos
);
348 if (!(valid_hooks
& (1 << hook
)))
351 /* Set initial back pointer. */
352 e
->counters
.pcnt
= pos
;
355 const struct arpt_standard_target
*t
356 = (void *)arpt_get_target(e
);
357 int visited
= e
->comefrom
& (1 << hook
);
359 if (e
->comefrom
& (1 << NF_ARP_NUMHOOKS
)) {
360 printk("arptables: loop hook %u pos %u %08X.\n",
361 hook
, pos
, e
->comefrom
);
365 |= ((1 << hook
) | (1 << NF_ARP_NUMHOOKS
));
367 /* Unconditional return/END. */
368 if ((e
->target_offset
== sizeof(struct arpt_entry
)
369 && (strcmp(t
->target
.u
.user
.name
,
370 ARPT_STANDARD_TARGET
) == 0)
372 && unconditional(&e
->arp
)) || visited
) {
373 unsigned int oldpos
, size
;
375 if (t
->verdict
< -NF_MAX_VERDICT
- 1) {
376 duprintf("mark_source_chains: bad "
377 "negative verdict (%i)\n",
382 /* Return: backtrack through the last
386 e
->comefrom
^= (1<<NF_ARP_NUMHOOKS
);
388 pos
= e
->counters
.pcnt
;
389 e
->counters
.pcnt
= 0;
391 /* We're at the start. */
395 e
= (struct arpt_entry
*)
397 } while (oldpos
== pos
+ e
->next_offset
);
400 size
= e
->next_offset
;
401 e
= (struct arpt_entry
*)
402 (entry0
+ pos
+ size
);
403 e
->counters
.pcnt
= pos
;
406 int newpos
= t
->verdict
;
408 if (strcmp(t
->target
.u
.user
.name
,
409 ARPT_STANDARD_TARGET
) == 0
411 if (newpos
> newinfo
->size
-
412 sizeof(struct arpt_entry
)) {
413 duprintf("mark_source_chains: "
414 "bad verdict (%i)\n",
419 /* This a jump; chase it. */
420 duprintf("Jump rule %u -> %u\n",
423 /* ... this is a fallthru */
424 newpos
= pos
+ e
->next_offset
;
426 e
= (struct arpt_entry
*)
428 e
->counters
.pcnt
= pos
;
433 duprintf("Finished chain %u\n", hook
);
438 static inline int check_entry(struct arpt_entry
*e
, const char *name
)
440 const struct arpt_entry_target
*t
;
442 if (!arp_checkentry(&e
->arp
)) {
443 duprintf("arp_tables: arp check failed %p %s.\n", e
, name
);
447 if (e
->target_offset
+ sizeof(struct arpt_entry_target
) > e
->next_offset
)
450 t
= arpt_get_target(e
);
451 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
457 static inline int check_target(struct arpt_entry
*e
, const char *name
)
459 struct arpt_entry_target
*t
;
460 struct xt_target
*target
;
463 t
= arpt_get_target(e
);
464 target
= t
->u
.kernel
.target
;
466 ret
= xt_check_target(target
, NF_ARP
, t
->u
.target_size
- sizeof(*t
),
467 name
, e
->comefrom
, 0, 0);
468 if (!ret
&& t
->u
.kernel
.target
->checkentry
469 && !t
->u
.kernel
.target
->checkentry(name
, e
, target
, t
->data
,
471 duprintf("arp_tables: check failed for `%s'.\n",
472 t
->u
.kernel
.target
->name
);
479 find_check_entry(struct arpt_entry
*e
, const char *name
, unsigned int size
,
482 struct arpt_entry_target
*t
;
483 struct xt_target
*target
;
486 ret
= check_entry(e
, name
);
490 t
= arpt_get_target(e
);
491 target
= try_then_request_module(xt_find_target(NF_ARP
, t
->u
.user
.name
,
493 "arpt_%s", t
->u
.user
.name
);
494 if (IS_ERR(target
) || !target
) {
495 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
496 ret
= target
? PTR_ERR(target
) : -ENOENT
;
499 t
->u
.kernel
.target
= target
;
501 ret
= check_target(e
, name
);
508 module_put(t
->u
.kernel
.target
->me
);
513 static inline int check_entry_size_and_hooks(struct arpt_entry
*e
,
514 struct xt_table_info
*newinfo
,
516 unsigned char *limit
,
517 const unsigned int *hook_entries
,
518 const unsigned int *underflows
,
523 if ((unsigned long)e
% __alignof__(struct arpt_entry
) != 0
524 || (unsigned char *)e
+ sizeof(struct arpt_entry
) >= limit
) {
525 duprintf("Bad offset %p\n", e
);
530 < sizeof(struct arpt_entry
) + sizeof(struct arpt_entry_target
)) {
531 duprintf("checking: element %p size %u\n",
536 /* Check hooks & underflows */
537 for (h
= 0; h
< NF_ARP_NUMHOOKS
; h
++) {
538 if ((unsigned char *)e
- base
== hook_entries
[h
])
539 newinfo
->hook_entry
[h
] = hook_entries
[h
];
540 if ((unsigned char *)e
- base
== underflows
[h
])
541 newinfo
->underflow
[h
] = underflows
[h
];
544 /* FIXME: underflows must be unconditional, standard verdicts
545 < 0 (not ARPT_RETURN). --RR */
547 /* Clear counters and comefrom */
548 e
->counters
= ((struct xt_counters
) { 0, 0 });
555 static inline int cleanup_entry(struct arpt_entry
*e
, unsigned int *i
)
557 struct arpt_entry_target
*t
;
559 if (i
&& (*i
)-- == 0)
562 t
= arpt_get_target(e
);
563 if (t
->u
.kernel
.target
->destroy
)
564 t
->u
.kernel
.target
->destroy(t
->u
.kernel
.target
, t
->data
);
565 module_put(t
->u
.kernel
.target
->me
);
569 /* Checks and translates the user-supplied table segment (held in
572 static int translate_table(const char *name
,
573 unsigned int valid_hooks
,
574 struct xt_table_info
*newinfo
,
578 const unsigned int *hook_entries
,
579 const unsigned int *underflows
)
584 newinfo
->size
= size
;
585 newinfo
->number
= number
;
587 /* Init all hooks to impossible value. */
588 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
589 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
590 newinfo
->underflow
[i
] = 0xFFFFFFFF;
593 duprintf("translate_table: size %u\n", newinfo
->size
);
596 /* Walk through entries, checking offsets. */
597 ret
= ARPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
598 check_entry_size_and_hooks
,
602 hook_entries
, underflows
, &i
);
603 duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret
);
608 duprintf("translate_table: %u not %u entries\n",
613 /* Check hooks all assigned */
614 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
615 /* Only hooks which are valid */
616 if (!(valid_hooks
& (1 << i
)))
618 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
619 duprintf("Invalid hook entry %u %u\n",
623 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
624 duprintf("Invalid underflow %u %u\n",
630 if (!mark_source_chains(newinfo
, valid_hooks
, entry0
)) {
631 duprintf("Looping hook\n");
635 /* Finally, each sanity check must pass */
637 ret
= ARPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
638 find_check_entry
, name
, size
, &i
);
641 ARPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
646 /* And one copy for every other CPU */
647 for_each_possible_cpu(i
) {
648 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
649 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
656 static inline int add_entry_to_counter(const struct arpt_entry
*e
,
657 struct xt_counters total
[],
660 ADD_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
666 static inline int set_entry_to_counter(const struct arpt_entry
*e
,
667 struct xt_counters total
[],
670 SET_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
676 static void get_counters(const struct xt_table_info
*t
,
677 struct xt_counters counters
[])
683 /* Instead of clearing (by a previous call to memset())
684 * the counters and using adds, we set the counters
685 * with data used by 'current' CPU
686 * We dont care about preemption here.
688 curcpu
= raw_smp_processor_id();
691 ARPT_ENTRY_ITERATE(t
->entries
[curcpu
],
693 set_entry_to_counter
,
697 for_each_possible_cpu(cpu
) {
701 ARPT_ENTRY_ITERATE(t
->entries
[cpu
],
703 add_entry_to_counter
,
709 static inline struct xt_counters
*alloc_counters(struct xt_table
*table
)
711 unsigned int countersize
;
712 struct xt_counters
*counters
;
713 const struct xt_table_info
*private = table
->private;
715 /* We need atomic snapshot of counters: rest doesn't change
716 * (other than comefrom, which userspace doesn't care
719 countersize
= sizeof(struct xt_counters
) * private->number
;
720 counters
= vmalloc_node(countersize
, numa_node_id());
722 if (counters
== NULL
)
723 return ERR_PTR(-ENOMEM
);
725 /* First, sum counters... */
726 write_lock_bh(&table
->lock
);
727 get_counters(private, counters
);
728 write_unlock_bh(&table
->lock
);
733 static int copy_entries_to_user(unsigned int total_size
,
734 struct xt_table
*table
,
735 void __user
*userptr
)
737 unsigned int off
, num
;
738 struct arpt_entry
*e
;
739 struct xt_counters
*counters
;
740 struct xt_table_info
*private = table
->private;
744 counters
= alloc_counters(table
);
745 if (IS_ERR(counters
))
746 return PTR_ERR(counters
);
748 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
749 /* ... then copy entire thing ... */
750 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
755 /* FIXME: use iterator macros --RR */
756 /* ... then go back and fix counters and names */
757 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
758 struct arpt_entry_target
*t
;
760 e
= (struct arpt_entry
*)(loc_cpu_entry
+ off
);
761 if (copy_to_user(userptr
+ off
762 + offsetof(struct arpt_entry
, counters
),
764 sizeof(counters
[num
])) != 0) {
769 t
= arpt_get_target(e
);
770 if (copy_to_user(userptr
+ off
+ e
->target_offset
771 + offsetof(struct arpt_entry_target
,
773 t
->u
.kernel
.target
->name
,
774 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
786 static void compat_standard_from_user(void *dst
, void *src
)
788 int v
= *(compat_int_t
*)src
;
791 v
+= xt_compat_calc_jump(NF_ARP
, v
);
792 memcpy(dst
, &v
, sizeof(v
));
795 static int compat_standard_to_user(void __user
*dst
, void *src
)
797 compat_int_t cv
= *(int *)src
;
800 cv
-= xt_compat_calc_jump(NF_ARP
, cv
);
801 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
804 static int compat_calc_entry(struct arpt_entry
*e
,
805 const struct xt_table_info
*info
,
806 void *base
, struct xt_table_info
*newinfo
)
808 struct arpt_entry_target
*t
;
809 unsigned int entry_offset
;
812 off
= sizeof(struct arpt_entry
) - sizeof(struct compat_arpt_entry
);
813 entry_offset
= (void *)e
- base
;
815 t
= arpt_get_target(e
);
816 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
817 newinfo
->size
-= off
;
818 ret
= xt_compat_add_offset(NF_ARP
, entry_offset
, off
);
822 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
823 if (info
->hook_entry
[i
] &&
824 (e
< (struct arpt_entry
*)(base
+ info
->hook_entry
[i
])))
825 newinfo
->hook_entry
[i
] -= off
;
826 if (info
->underflow
[i
] &&
827 (e
< (struct arpt_entry
*)(base
+ info
->underflow
[i
])))
828 newinfo
->underflow
[i
] -= off
;
833 static int compat_table_info(const struct xt_table_info
*info
,
834 struct xt_table_info
*newinfo
)
838 if (!newinfo
|| !info
)
841 /* we dont care about newinfo->entries[] */
842 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
843 newinfo
->initial_entries
= 0;
844 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
845 return ARPT_ENTRY_ITERATE(loc_cpu_entry
, info
->size
,
846 compat_calc_entry
, info
, loc_cpu_entry
,
851 static int get_info(struct net
*net
, void __user
*user
, int *len
, int compat
)
853 char name
[ARPT_TABLE_MAXNAMELEN
];
857 if (*len
!= sizeof(struct arpt_getinfo
)) {
858 duprintf("length %u != %Zu\n", *len
,
859 sizeof(struct arpt_getinfo
));
863 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
866 name
[ARPT_TABLE_MAXNAMELEN
-1] = '\0';
869 xt_compat_lock(NF_ARP
);
871 t
= try_then_request_module(xt_find_table_lock(net
, NF_ARP
, name
),
872 "arptable_%s", name
);
873 if (t
&& !IS_ERR(t
)) {
874 struct arpt_getinfo info
;
875 const struct xt_table_info
*private = t
->private;
879 struct xt_table_info tmp
;
880 ret
= compat_table_info(private, &tmp
);
881 xt_compat_flush_offsets(NF_ARP
);
885 info
.valid_hooks
= t
->valid_hooks
;
886 memcpy(info
.hook_entry
, private->hook_entry
,
887 sizeof(info
.hook_entry
));
888 memcpy(info
.underflow
, private->underflow
,
889 sizeof(info
.underflow
));
890 info
.num_entries
= private->number
;
891 info
.size
= private->size
;
892 strcpy(info
.name
, name
);
894 if (copy_to_user(user
, &info
, *len
) != 0)
901 ret
= t
? PTR_ERR(t
) : -ENOENT
;
904 xt_compat_unlock(NF_ARP
);
909 static int get_entries(struct net
*net
, struct arpt_get_entries __user
*uptr
,
913 struct arpt_get_entries get
;
916 if (*len
< sizeof(get
)) {
917 duprintf("get_entries: %u < %Zu\n", *len
, sizeof(get
));
920 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
922 if (*len
!= sizeof(struct arpt_get_entries
) + get
.size
) {
923 duprintf("get_entries: %u != %Zu\n", *len
,
924 sizeof(struct arpt_get_entries
) + get
.size
);
928 t
= xt_find_table_lock(net
, NF_ARP
, get
.name
);
929 if (t
&& !IS_ERR(t
)) {
930 const struct xt_table_info
*private = t
->private;
932 duprintf("t->private->number = %u\n",
934 if (get
.size
== private->size
)
935 ret
= copy_entries_to_user(private->size
,
936 t
, uptr
->entrytable
);
938 duprintf("get_entries: I've got %u not %u!\n",
939 private->size
, get
.size
);
945 ret
= t
? PTR_ERR(t
) : -ENOENT
;
950 static int __do_replace(struct net
*net
, const char *name
,
951 unsigned int valid_hooks
,
952 struct xt_table_info
*newinfo
,
953 unsigned int num_counters
,
954 void __user
*counters_ptr
)
958 struct xt_table_info
*oldinfo
;
959 struct xt_counters
*counters
;
960 void *loc_cpu_old_entry
;
963 counters
= vmalloc_node(num_counters
* sizeof(struct xt_counters
),
970 t
= try_then_request_module(xt_find_table_lock(net
, NF_ARP
, name
),
971 "arptable_%s", name
);
972 if (!t
|| IS_ERR(t
)) {
973 ret
= t
? PTR_ERR(t
) : -ENOENT
;
974 goto free_newinfo_counters_untrans
;
978 if (valid_hooks
!= t
->valid_hooks
) {
979 duprintf("Valid hook crap: %08X vs %08X\n",
980 valid_hooks
, t
->valid_hooks
);
985 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
989 /* Update module usage count based on number of rules */
990 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
991 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
992 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
993 (newinfo
->number
<= oldinfo
->initial_entries
))
995 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
996 (newinfo
->number
<= oldinfo
->initial_entries
))
999 /* Get the old counters. */
1000 get_counters(oldinfo
, counters
);
1001 /* Decrease module usage counts and free resource */
1002 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1003 ARPT_ENTRY_ITERATE(loc_cpu_old_entry
, oldinfo
->size
, cleanup_entry
,
1006 xt_free_table_info(oldinfo
);
1007 if (copy_to_user(counters_ptr
, counters
,
1008 sizeof(struct xt_counters
) * num_counters
) != 0)
1017 free_newinfo_counters_untrans
:
1023 static int do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1026 struct arpt_replace tmp
;
1027 struct xt_table_info
*newinfo
;
1028 void *loc_cpu_entry
;
1030 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1033 /* overflow check */
1034 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1037 newinfo
= xt_alloc_table_info(tmp
.size
);
1041 /* choose the copy that is on our node/cpu */
1042 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1043 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1049 ret
= translate_table(tmp
.name
, tmp
.valid_hooks
,
1050 newinfo
, loc_cpu_entry
, tmp
.size
, tmp
.num_entries
,
1051 tmp
.hook_entry
, tmp
.underflow
);
1055 duprintf("arp_tables: Translated table\n");
1057 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1058 tmp
.num_counters
, tmp
.counters
);
1060 goto free_newinfo_untrans
;
1063 free_newinfo_untrans
:
1064 ARPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, NULL
);
1066 xt_free_table_info(newinfo
);
1070 /* We're lazy, and add to the first CPU; overflow works its fey magic
1071 * and everything is OK.
1073 static inline int add_counter_to_entry(struct arpt_entry
*e
,
1074 const struct xt_counters addme
[],
1078 ADD_COUNTER(e
->counters
, addme
[*i
].bcnt
, addme
[*i
].pcnt
);
1084 static int do_add_counters(struct net
*net
, void __user
*user
, unsigned int len
,
1088 struct xt_counters_info tmp
;
1089 struct xt_counters
*paddc
;
1090 unsigned int num_counters
;
1095 const struct xt_table_info
*private;
1097 void *loc_cpu_entry
;
1098 #ifdef CONFIG_COMPAT
1099 struct compat_xt_counters_info compat_tmp
;
1103 size
= sizeof(struct compat_xt_counters_info
);
1108 size
= sizeof(struct xt_counters_info
);
1111 if (copy_from_user(ptmp
, user
, size
) != 0)
1114 #ifdef CONFIG_COMPAT
1116 num_counters
= compat_tmp
.num_counters
;
1117 name
= compat_tmp
.name
;
1121 num_counters
= tmp
.num_counters
;
1125 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1128 paddc
= vmalloc_node(len
- size
, numa_node_id());
1132 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1137 t
= xt_find_table_lock(net
, NF_ARP
, name
);
1138 if (!t
|| IS_ERR(t
)) {
1139 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1143 write_lock_bh(&t
->lock
);
1144 private = t
->private;
1145 if (private->number
!= num_counters
) {
1147 goto unlock_up_free
;
1151 /* Choose the copy that is on our node */
1152 loc_cpu_entry
= private->entries
[smp_processor_id()];
1153 ARPT_ENTRY_ITERATE(loc_cpu_entry
,
1155 add_counter_to_entry
,
1159 write_unlock_bh(&t
->lock
);
1168 #ifdef CONFIG_COMPAT
1170 compat_release_entry(struct compat_arpt_entry
*e
, unsigned int *i
)
1172 struct arpt_entry_target
*t
;
1174 if (i
&& (*i
)-- == 0)
1177 t
= compat_arpt_get_target(e
);
1178 module_put(t
->u
.kernel
.target
->me
);
1183 check_compat_entry_size_and_hooks(struct compat_arpt_entry
*e
,
1184 struct xt_table_info
*newinfo
,
1186 unsigned char *base
,
1187 unsigned char *limit
,
1188 unsigned int *hook_entries
,
1189 unsigned int *underflows
,
1193 struct arpt_entry_target
*t
;
1194 struct xt_target
*target
;
1195 unsigned int entry_offset
;
1198 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1199 if ((unsigned long)e
% __alignof__(struct compat_arpt_entry
) != 0
1200 || (unsigned char *)e
+ sizeof(struct compat_arpt_entry
) >= limit
) {
1201 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1205 if (e
->next_offset
< sizeof(struct compat_arpt_entry
) +
1206 sizeof(struct compat_xt_entry_target
)) {
1207 duprintf("checking: element %p size %u\n",
1212 /* For purposes of check_entry casting the compat entry is fine */
1213 ret
= check_entry((struct arpt_entry
*)e
, name
);
1217 off
= sizeof(struct arpt_entry
) - sizeof(struct compat_arpt_entry
);
1218 entry_offset
= (void *)e
- (void *)base
;
1220 t
= compat_arpt_get_target(e
);
1221 target
= try_then_request_module(xt_find_target(NF_ARP
,
1223 t
->u
.user
.revision
),
1224 "arpt_%s", t
->u
.user
.name
);
1225 if (IS_ERR(target
) || !target
) {
1226 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1228 ret
= target
? PTR_ERR(target
) : -ENOENT
;
1231 t
->u
.kernel
.target
= target
;
1233 off
+= xt_compat_target_offset(target
);
1235 ret
= xt_compat_add_offset(NF_ARP
, entry_offset
, off
);
1237 goto release_target
;
1239 /* Check hooks & underflows */
1240 for (h
= 0; h
< NF_ARP_NUMHOOKS
; h
++) {
1241 if ((unsigned char *)e
- base
== hook_entries
[h
])
1242 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1243 if ((unsigned char *)e
- base
== underflows
[h
])
1244 newinfo
->underflow
[h
] = underflows
[h
];
1247 /* Clear counters and comefrom */
1248 memset(&e
->counters
, 0, sizeof(e
->counters
));
1255 module_put(t
->u
.kernel
.target
->me
);
1261 compat_copy_entry_from_user(struct compat_arpt_entry
*e
, void **dstptr
,
1262 unsigned int *size
, const char *name
,
1263 struct xt_table_info
*newinfo
, unsigned char *base
)
1265 struct arpt_entry_target
*t
;
1266 struct xt_target
*target
;
1267 struct arpt_entry
*de
;
1268 unsigned int origsize
;
1273 de
= (struct arpt_entry
*)*dstptr
;
1274 memcpy(de
, e
, sizeof(struct arpt_entry
));
1275 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1277 *dstptr
+= sizeof(struct arpt_entry
);
1278 *size
+= sizeof(struct arpt_entry
) - sizeof(struct compat_arpt_entry
);
1280 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1281 t
= compat_arpt_get_target(e
);
1282 target
= t
->u
.kernel
.target
;
1283 xt_compat_target_from_user(t
, dstptr
, size
);
1285 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1286 for (h
= 0; h
< NF_ARP_NUMHOOKS
; h
++) {
1287 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1288 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1289 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1290 newinfo
->underflow
[h
] -= origsize
- *size
;
1295 static inline int compat_check_entry(struct arpt_entry
*e
, const char *name
,
1300 ret
= check_target(e
, name
);
1308 static int translate_compat_table(const char *name
,
1309 unsigned int valid_hooks
,
1310 struct xt_table_info
**pinfo
,
1312 unsigned int total_size
,
1313 unsigned int number
,
1314 unsigned int *hook_entries
,
1315 unsigned int *underflows
)
1318 struct xt_table_info
*newinfo
, *info
;
1319 void *pos
, *entry0
, *entry1
;
1326 info
->number
= number
;
1328 /* Init all hooks to impossible value. */
1329 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
1330 info
->hook_entry
[i
] = 0xFFFFFFFF;
1331 info
->underflow
[i
] = 0xFFFFFFFF;
1334 duprintf("translate_compat_table: size %u\n", info
->size
);
1336 xt_compat_lock(NF_ARP
);
1337 /* Walk through entries, checking offsets. */
1338 ret
= COMPAT_ARPT_ENTRY_ITERATE(entry0
, total_size
,
1339 check_compat_entry_size_and_hooks
,
1340 info
, &size
, entry0
,
1341 entry0
+ total_size
,
1342 hook_entries
, underflows
, &j
, name
);
1348 duprintf("translate_compat_table: %u not %u entries\n",
1353 /* Check hooks all assigned */
1354 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
1355 /* Only hooks which are valid */
1356 if (!(valid_hooks
& (1 << i
)))
1358 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1359 duprintf("Invalid hook entry %u %u\n",
1360 i
, hook_entries
[i
]);
1363 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1364 duprintf("Invalid underflow %u %u\n",
1371 newinfo
= xt_alloc_table_info(size
);
1375 newinfo
->number
= number
;
1376 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
1377 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1378 newinfo
->underflow
[i
] = info
->underflow
[i
];
1380 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1383 ret
= COMPAT_ARPT_ENTRY_ITERATE(entry0
, total_size
,
1384 compat_copy_entry_from_user
,
1385 &pos
, &size
, name
, newinfo
, entry1
);
1386 xt_compat_flush_offsets(NF_ARP
);
1387 xt_compat_unlock(NF_ARP
);
1392 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1396 ret
= ARPT_ENTRY_ITERATE(entry1
, newinfo
->size
, compat_check_entry
,
1400 COMPAT_ARPT_ENTRY_ITERATE_CONTINUE(entry0
, newinfo
->size
, i
,
1401 compat_release_entry
, &j
);
1402 ARPT_ENTRY_ITERATE(entry1
, newinfo
->size
, cleanup_entry
, &i
);
1403 xt_free_table_info(newinfo
);
1407 /* And one copy for every other CPU */
1408 for_each_possible_cpu(i
)
1409 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1410 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1414 xt_free_table_info(info
);
1418 xt_free_table_info(newinfo
);
1420 COMPAT_ARPT_ENTRY_ITERATE(entry0
, total_size
, compat_release_entry
, &j
);
1423 xt_compat_flush_offsets(NF_ARP
);
1424 xt_compat_unlock(NF_ARP
);
1428 struct compat_arpt_replace
{
1429 char name
[ARPT_TABLE_MAXNAMELEN
];
1433 u32 hook_entry
[NF_ARP_NUMHOOKS
];
1434 u32 underflow
[NF_ARP_NUMHOOKS
];
1436 compat_uptr_t counters
;
1437 struct compat_arpt_entry entries
[0];
1440 static int compat_do_replace(struct net
*net
, void __user
*user
,
1444 struct compat_arpt_replace tmp
;
1445 struct xt_table_info
*newinfo
;
1446 void *loc_cpu_entry
;
1448 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1451 /* overflow check */
1452 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1454 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1457 newinfo
= xt_alloc_table_info(tmp
.size
);
1461 /* choose the copy that is on our node/cpu */
1462 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1463 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
), tmp
.size
) != 0) {
1468 ret
= translate_compat_table(tmp
.name
, tmp
.valid_hooks
,
1469 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1470 tmp
.num_entries
, tmp
.hook_entry
,
1475 duprintf("compat_do_replace: Translated table\n");
1477 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1478 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1480 goto free_newinfo_untrans
;
1483 free_newinfo_untrans
:
1484 ARPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, NULL
);
1486 xt_free_table_info(newinfo
);
1490 static int compat_do_arpt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1495 if (!capable(CAP_NET_ADMIN
))
1499 case ARPT_SO_SET_REPLACE
:
1500 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1503 case ARPT_SO_SET_ADD_COUNTERS
:
1504 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1508 duprintf("do_arpt_set_ctl: unknown request %i\n", cmd
);
1515 static int compat_copy_entry_to_user(struct arpt_entry
*e
, void __user
**dstptr
,
1516 compat_uint_t
*size
,
1517 struct xt_counters
*counters
,
1520 struct arpt_entry_target
*t
;
1521 struct compat_arpt_entry __user
*ce
;
1522 u_int16_t target_offset
, next_offset
;
1523 compat_uint_t origsize
;
1528 ce
= (struct compat_arpt_entry __user
*)*dstptr
;
1529 if (copy_to_user(ce
, e
, sizeof(struct arpt_entry
)))
1532 if (copy_to_user(&ce
->counters
, &counters
[*i
], sizeof(counters
[*i
])))
1535 *dstptr
+= sizeof(struct compat_arpt_entry
);
1536 *size
-= sizeof(struct arpt_entry
) - sizeof(struct compat_arpt_entry
);
1538 target_offset
= e
->target_offset
- (origsize
- *size
);
1540 t
= arpt_get_target(e
);
1541 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1545 next_offset
= e
->next_offset
- (origsize
- *size
);
1546 if (put_user(target_offset
, &ce
->target_offset
))
1548 if (put_user(next_offset
, &ce
->next_offset
))
1557 static int compat_copy_entries_to_user(unsigned int total_size
,
1558 struct xt_table
*table
,
1559 void __user
*userptr
)
1561 struct xt_counters
*counters
;
1562 const struct xt_table_info
*private = table
->private;
1566 void *loc_cpu_entry
;
1569 counters
= alloc_counters(table
);
1570 if (IS_ERR(counters
))
1571 return PTR_ERR(counters
);
1573 /* choose the copy on our node/cpu */
1574 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1577 ret
= ARPT_ENTRY_ITERATE(loc_cpu_entry
, total_size
,
1578 compat_copy_entry_to_user
,
1579 &pos
, &size
, counters
, &i
);
1584 struct compat_arpt_get_entries
{
1585 char name
[ARPT_TABLE_MAXNAMELEN
];
1587 struct compat_arpt_entry entrytable
[0];
1590 static int compat_get_entries(struct net
*net
,
1591 struct compat_arpt_get_entries __user
*uptr
,
1595 struct compat_arpt_get_entries get
;
1598 if (*len
< sizeof(get
)) {
1599 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1602 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1604 if (*len
!= sizeof(struct compat_arpt_get_entries
) + get
.size
) {
1605 duprintf("compat_get_entries: %u != %zu\n",
1606 *len
, sizeof(get
) + get
.size
);
1610 xt_compat_lock(NF_ARP
);
1611 t
= xt_find_table_lock(net
, NF_ARP
, get
.name
);
1612 if (t
&& !IS_ERR(t
)) {
1613 const struct xt_table_info
*private = t
->private;
1614 struct xt_table_info info
;
1616 duprintf("t->private->number = %u\n", private->number
);
1617 ret
= compat_table_info(private, &info
);
1618 if (!ret
&& get
.size
== info
.size
) {
1619 ret
= compat_copy_entries_to_user(private->size
,
1620 t
, uptr
->entrytable
);
1622 duprintf("compat_get_entries: I've got %u not %u!\n",
1623 private->size
, get
.size
);
1626 xt_compat_flush_offsets(NF_ARP
);
1630 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1632 xt_compat_unlock(NF_ARP
);
1636 static int do_arpt_get_ctl(struct sock
*, int, void __user
*, int *);
1638 static int compat_do_arpt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1643 if (!capable(CAP_NET_ADMIN
))
1647 case ARPT_SO_GET_INFO
:
1648 ret
= get_info(sock_net(sk
), user
, len
, 1);
1650 case ARPT_SO_GET_ENTRIES
:
1651 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1654 ret
= do_arpt_get_ctl(sk
, cmd
, user
, len
);
1660 static int do_arpt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
1664 if (!capable(CAP_NET_ADMIN
))
1668 case ARPT_SO_SET_REPLACE
:
1669 ret
= do_replace(sock_net(sk
), user
, len
);
1672 case ARPT_SO_SET_ADD_COUNTERS
:
1673 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
1677 duprintf("do_arpt_set_ctl: unknown request %i\n", cmd
);
1684 static int do_arpt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1688 if (!capable(CAP_NET_ADMIN
))
1692 case ARPT_SO_GET_INFO
:
1693 ret
= get_info(sock_net(sk
), user
, len
, 0);
1696 case ARPT_SO_GET_ENTRIES
:
1697 ret
= get_entries(sock_net(sk
), user
, len
);
1700 case ARPT_SO_GET_REVISION_TARGET
: {
1701 struct xt_get_revision rev
;
1703 if (*len
!= sizeof(rev
)) {
1707 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
1712 try_then_request_module(xt_find_revision(NF_ARP
, rev
.name
,
1713 rev
.revision
, 1, &ret
),
1714 "arpt_%s", rev
.name
);
1719 duprintf("do_arpt_get_ctl: unknown request %i\n", cmd
);
1726 struct xt_table
*arpt_register_table(struct net
*net
, struct xt_table
*table
,
1727 const struct arpt_replace
*repl
)
1730 struct xt_table_info
*newinfo
;
1731 struct xt_table_info bootstrap
1732 = { 0, 0, 0, { 0 }, { 0 }, { } };
1733 void *loc_cpu_entry
;
1734 struct xt_table
*new_table
;
1736 newinfo
= xt_alloc_table_info(repl
->size
);
1742 /* choose the copy on our node/cpu */
1743 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1744 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
1746 ret
= translate_table(table
->name
, table
->valid_hooks
,
1747 newinfo
, loc_cpu_entry
, repl
->size
,
1752 duprintf("arpt_register_table: translate table gives %d\n", ret
);
1756 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
1757 if (IS_ERR(new_table
)) {
1758 ret
= PTR_ERR(new_table
);
1764 xt_free_table_info(newinfo
);
1766 return ERR_PTR(ret
);
1769 void arpt_unregister_table(struct xt_table
*table
)
1771 struct xt_table_info
*private;
1772 void *loc_cpu_entry
;
1773 struct module
*table_owner
= table
->me
;
1775 private = xt_unregister_table(table
);
1777 /* Decrease module usage counts and free resources */
1778 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1779 ARPT_ENTRY_ITERATE(loc_cpu_entry
, private->size
,
1780 cleanup_entry
, NULL
);
1781 if (private->number
> private->initial_entries
)
1782 module_put(table_owner
);
1783 xt_free_table_info(private);
1786 /* The built-in targets: standard (NULL) and error. */
1787 static struct xt_target arpt_standard_target __read_mostly
= {
1788 .name
= ARPT_STANDARD_TARGET
,
1789 .targetsize
= sizeof(int),
1791 #ifdef CONFIG_COMPAT
1792 .compatsize
= sizeof(compat_int_t
),
1793 .compat_from_user
= compat_standard_from_user
,
1794 .compat_to_user
= compat_standard_to_user
,
1798 static struct xt_target arpt_error_target __read_mostly
= {
1799 .name
= ARPT_ERROR_TARGET
,
1800 .target
= arpt_error
,
1801 .targetsize
= ARPT_FUNCTION_MAXNAMELEN
,
1805 static struct nf_sockopt_ops arpt_sockopts
= {
1807 .set_optmin
= ARPT_BASE_CTL
,
1808 .set_optmax
= ARPT_SO_SET_MAX
+1,
1809 .set
= do_arpt_set_ctl
,
1810 #ifdef CONFIG_COMPAT
1811 .compat_set
= compat_do_arpt_set_ctl
,
1813 .get_optmin
= ARPT_BASE_CTL
,
1814 .get_optmax
= ARPT_SO_GET_MAX
+1,
1815 .get
= do_arpt_get_ctl
,
1816 #ifdef CONFIG_COMPAT
1817 .compat_get
= compat_do_arpt_get_ctl
,
1819 .owner
= THIS_MODULE
,
1822 static int __net_init
arp_tables_net_init(struct net
*net
)
1824 return xt_proto_init(net
, NF_ARP
);
1827 static void __net_exit
arp_tables_net_exit(struct net
*net
)
1829 xt_proto_fini(net
, NF_ARP
);
1832 static struct pernet_operations arp_tables_net_ops
= {
1833 .init
= arp_tables_net_init
,
1834 .exit
= arp_tables_net_exit
,
1837 static int __init
arp_tables_init(void)
1841 ret
= register_pernet_subsys(&arp_tables_net_ops
);
1845 /* Noone else will be downing sem now, so we won't sleep */
1846 ret
= xt_register_target(&arpt_standard_target
);
1849 ret
= xt_register_target(&arpt_error_target
);
1853 /* Register setsockopt */
1854 ret
= nf_register_sockopt(&arpt_sockopts
);
1858 printk(KERN_INFO
"arp_tables: (C) 2002 David S. Miller\n");
1862 xt_unregister_target(&arpt_error_target
);
1864 xt_unregister_target(&arpt_standard_target
);
1866 unregister_pernet_subsys(&arp_tables_net_ops
);
1871 static void __exit
arp_tables_fini(void)
1873 nf_unregister_sockopt(&arpt_sockopts
);
1874 xt_unregister_target(&arpt_error_target
);
1875 xt_unregister_target(&arpt_standard_target
);
1876 unregister_pernet_subsys(&arp_tables_net_ops
);
1879 EXPORT_SYMBOL(arpt_register_table
);
1880 EXPORT_SYMBOL(arpt_unregister_table
);
1881 EXPORT_SYMBOL(arpt_do_table
);
1883 module_init(arp_tables_init
);
1884 module_exit(arp_tables_fini
);