2 * Generic address resolution entity
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
25 #include <linux/sysctl.h>
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
32 #include <net/netevent.h>
33 #include <net/netlink.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/string.h>
37 #include <linux/log2.h>
41 #define NEIGH_PRINTK(x...) printk(x)
42 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
43 #define NEIGH_PRINTK0 NEIGH_PRINTK
44 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
45 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
49 #define NEIGH_PRINTK1 NEIGH_PRINTK
53 #define NEIGH_PRINTK2 NEIGH_PRINTK
56 #define PNEIGH_HASHMASK 0xF
58 static void neigh_timer_handler(unsigned long arg
);
59 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
);
60 static void neigh_update_notify(struct neighbour
*neigh
);
61 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
);
63 static struct neigh_table
*neigh_tables
;
65 static const struct file_operations neigh_stat_seq_fops
;
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
83 Reference count prevents destruction.
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
99 static DEFINE_RWLOCK(neigh_tbl_lock
);
101 static int neigh_blackhole(struct sk_buff
*skb
)
107 static void neigh_cleanup_and_release(struct neighbour
*neigh
)
109 if (neigh
->parms
->neigh_cleanup
)
110 neigh
->parms
->neigh_cleanup(neigh
);
112 __neigh_notify(neigh
, RTM_DELNEIGH
, 0);
113 neigh_release(neigh
);
117 * It is random distribution in the interval (1/2)*base...(3/2)*base.
118 * It corresponds to default IPv6 settings and is not overridable,
119 * because it is really reasonable choice.
122 unsigned long neigh_rand_reach_time(unsigned long base
)
124 return (base
? (net_random() % base
) + (base
>> 1) : 0);
128 static int neigh_forced_gc(struct neigh_table
*tbl
)
133 NEIGH_CACHE_STAT_INC(tbl
, forced_gc_runs
);
135 write_lock_bh(&tbl
->lock
);
136 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
137 struct neighbour
*n
, **np
;
139 np
= &tbl
->hash_buckets
[i
];
140 while ((n
= *np
) != NULL
) {
141 /* Neighbour record may be discarded if:
142 * - nobody refers to it.
143 * - it is not permanent
145 write_lock(&n
->lock
);
146 if (atomic_read(&n
->refcnt
) == 1 &&
147 !(n
->nud_state
& NUD_PERMANENT
)) {
151 write_unlock(&n
->lock
);
152 neigh_cleanup_and_release(n
);
155 write_unlock(&n
->lock
);
160 tbl
->last_flush
= jiffies
;
162 write_unlock_bh(&tbl
->lock
);
167 static void neigh_add_timer(struct neighbour
*n
, unsigned long when
)
170 if (unlikely(mod_timer(&n
->timer
, when
))) {
171 printk("NEIGH: BUG, double timer add, state is %x\n",
177 static int neigh_del_timer(struct neighbour
*n
)
179 if ((n
->nud_state
& NUD_IN_TIMER
) &&
180 del_timer(&n
->timer
)) {
187 static void pneigh_queue_purge(struct sk_buff_head
*list
)
191 while ((skb
= skb_dequeue(list
)) != NULL
) {
197 static void neigh_flush_dev(struct neigh_table
*tbl
, struct net_device
*dev
)
201 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
202 struct neighbour
*n
, **np
= &tbl
->hash_buckets
[i
];
204 while ((n
= *np
) != NULL
) {
205 if (dev
&& n
->dev
!= dev
) {
210 write_lock(&n
->lock
);
214 if (atomic_read(&n
->refcnt
) != 1) {
215 /* The most unpleasant situation.
216 We must destroy neighbour entry,
217 but someone still uses it.
219 The destroy will be delayed until
220 the last user releases us, but
221 we must kill timers etc. and move
224 skb_queue_purge(&n
->arp_queue
);
225 n
->output
= neigh_blackhole
;
226 if (n
->nud_state
& NUD_VALID
)
227 n
->nud_state
= NUD_NOARP
;
229 n
->nud_state
= NUD_NONE
;
230 NEIGH_PRINTK2("neigh %p is stray.\n", n
);
232 write_unlock(&n
->lock
);
233 neigh_cleanup_and_release(n
);
238 void neigh_changeaddr(struct neigh_table
*tbl
, struct net_device
*dev
)
240 write_lock_bh(&tbl
->lock
);
241 neigh_flush_dev(tbl
, dev
);
242 write_unlock_bh(&tbl
->lock
);
245 int neigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
247 write_lock_bh(&tbl
->lock
);
248 neigh_flush_dev(tbl
, dev
);
249 pneigh_ifdown(tbl
, dev
);
250 write_unlock_bh(&tbl
->lock
);
252 del_timer_sync(&tbl
->proxy_timer
);
253 pneigh_queue_purge(&tbl
->proxy_queue
);
257 static struct neighbour
*neigh_alloc(struct neigh_table
*tbl
)
259 struct neighbour
*n
= NULL
;
260 unsigned long now
= jiffies
;
263 entries
= atomic_inc_return(&tbl
->entries
) - 1;
264 if (entries
>= tbl
->gc_thresh3
||
265 (entries
>= tbl
->gc_thresh2
&&
266 time_after(now
, tbl
->last_flush
+ 5 * HZ
))) {
267 if (!neigh_forced_gc(tbl
) &&
268 entries
>= tbl
->gc_thresh3
)
272 n
= kmem_cache_zalloc(tbl
->kmem_cachep
, GFP_ATOMIC
);
276 skb_queue_head_init(&n
->arp_queue
);
277 rwlock_init(&n
->lock
);
278 n
->updated
= n
->used
= now
;
279 n
->nud_state
= NUD_NONE
;
280 n
->output
= neigh_blackhole
;
281 n
->parms
= neigh_parms_clone(&tbl
->parms
);
282 setup_timer(&n
->timer
, neigh_timer_handler
, (unsigned long)n
);
284 NEIGH_CACHE_STAT_INC(tbl
, allocs
);
286 atomic_set(&n
->refcnt
, 1);
292 atomic_dec(&tbl
->entries
);
296 static struct neighbour
**neigh_hash_alloc(unsigned int entries
)
298 unsigned long size
= entries
* sizeof(struct neighbour
*);
299 struct neighbour
**ret
;
301 if (size
<= PAGE_SIZE
) {
302 ret
= kzalloc(size
, GFP_ATOMIC
);
304 ret
= (struct neighbour
**)
305 __get_free_pages(GFP_ATOMIC
|__GFP_ZERO
, get_order(size
));
310 static void neigh_hash_free(struct neighbour
**hash
, unsigned int entries
)
312 unsigned long size
= entries
* sizeof(struct neighbour
*);
314 if (size
<= PAGE_SIZE
)
317 free_pages((unsigned long)hash
, get_order(size
));
320 static void neigh_hash_grow(struct neigh_table
*tbl
, unsigned long new_entries
)
322 struct neighbour
**new_hash
, **old_hash
;
323 unsigned int i
, new_hash_mask
, old_entries
;
325 NEIGH_CACHE_STAT_INC(tbl
, hash_grows
);
327 BUG_ON(!is_power_of_2(new_entries
));
328 new_hash
= neigh_hash_alloc(new_entries
);
332 old_entries
= tbl
->hash_mask
+ 1;
333 new_hash_mask
= new_entries
- 1;
334 old_hash
= tbl
->hash_buckets
;
336 get_random_bytes(&tbl
->hash_rnd
, sizeof(tbl
->hash_rnd
));
337 for (i
= 0; i
< old_entries
; i
++) {
338 struct neighbour
*n
, *next
;
340 for (n
= old_hash
[i
]; n
; n
= next
) {
341 unsigned int hash_val
= tbl
->hash(n
->primary_key
, n
->dev
);
343 hash_val
&= new_hash_mask
;
346 n
->next
= new_hash
[hash_val
];
347 new_hash
[hash_val
] = n
;
350 tbl
->hash_buckets
= new_hash
;
351 tbl
->hash_mask
= new_hash_mask
;
353 neigh_hash_free(old_hash
, old_entries
);
356 struct neighbour
*neigh_lookup(struct neigh_table
*tbl
, const void *pkey
,
357 struct net_device
*dev
)
360 int key_len
= tbl
->key_len
;
361 u32 hash_val
= tbl
->hash(pkey
, dev
);
363 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
365 read_lock_bh(&tbl
->lock
);
366 for (n
= tbl
->hash_buckets
[hash_val
& tbl
->hash_mask
]; n
; n
= n
->next
) {
367 if (dev
== n
->dev
&& !memcmp(n
->primary_key
, pkey
, key_len
)) {
369 NEIGH_CACHE_STAT_INC(tbl
, hits
);
373 read_unlock_bh(&tbl
->lock
);
377 struct neighbour
*neigh_lookup_nodev(struct neigh_table
*tbl
, struct net
*net
,
381 int key_len
= tbl
->key_len
;
382 u32 hash_val
= tbl
->hash(pkey
, NULL
);
384 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
386 read_lock_bh(&tbl
->lock
);
387 for (n
= tbl
->hash_buckets
[hash_val
& tbl
->hash_mask
]; n
; n
= n
->next
) {
388 if (!memcmp(n
->primary_key
, pkey
, key_len
) &&
389 (net
== n
->dev
->nd_net
)) {
391 NEIGH_CACHE_STAT_INC(tbl
, hits
);
395 read_unlock_bh(&tbl
->lock
);
399 struct neighbour
*neigh_create(struct neigh_table
*tbl
, const void *pkey
,
400 struct net_device
*dev
)
403 int key_len
= tbl
->key_len
;
405 struct neighbour
*n1
, *rc
, *n
= neigh_alloc(tbl
);
408 rc
= ERR_PTR(-ENOBUFS
);
412 memcpy(n
->primary_key
, pkey
, key_len
);
416 /* Protocol specific setup. */
417 if (tbl
->constructor
&& (error
= tbl
->constructor(n
)) < 0) {
419 goto out_neigh_release
;
422 /* Device specific setup. */
423 if (n
->parms
->neigh_setup
&&
424 (error
= n
->parms
->neigh_setup(n
)) < 0) {
426 goto out_neigh_release
;
429 n
->confirmed
= jiffies
- (n
->parms
->base_reachable_time
<< 1);
431 write_lock_bh(&tbl
->lock
);
433 if (atomic_read(&tbl
->entries
) > (tbl
->hash_mask
+ 1))
434 neigh_hash_grow(tbl
, (tbl
->hash_mask
+ 1) << 1);
436 hash_val
= tbl
->hash(pkey
, dev
) & tbl
->hash_mask
;
438 if (n
->parms
->dead
) {
439 rc
= ERR_PTR(-EINVAL
);
443 for (n1
= tbl
->hash_buckets
[hash_val
]; n1
; n1
= n1
->next
) {
444 if (dev
== n1
->dev
&& !memcmp(n1
->primary_key
, pkey
, key_len
)) {
451 n
->next
= tbl
->hash_buckets
[hash_val
];
452 tbl
->hash_buckets
[hash_val
] = n
;
455 write_unlock_bh(&tbl
->lock
);
456 NEIGH_PRINTK2("neigh %p is created.\n", n
);
461 write_unlock_bh(&tbl
->lock
);
467 struct pneigh_entry
* pneigh_lookup(struct neigh_table
*tbl
,
468 struct net
*net
, const void *pkey
,
469 struct net_device
*dev
, int creat
)
471 struct pneigh_entry
*n
;
472 int key_len
= tbl
->key_len
;
473 u32 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
475 hash_val
^= (hash_val
>> 16);
476 hash_val
^= hash_val
>> 8;
477 hash_val
^= hash_val
>> 4;
478 hash_val
&= PNEIGH_HASHMASK
;
480 read_lock_bh(&tbl
->lock
);
482 for (n
= tbl
->phash_buckets
[hash_val
]; n
; n
= n
->next
) {
483 if (!memcmp(n
->key
, pkey
, key_len
) &&
485 (n
->dev
== dev
|| !n
->dev
)) {
486 read_unlock_bh(&tbl
->lock
);
490 read_unlock_bh(&tbl
->lock
);
497 n
= kmalloc(sizeof(*n
) + key_len
, GFP_KERNEL
);
501 n
->net
= hold_net(net
);
502 memcpy(n
->key
, pkey
, key_len
);
507 if (tbl
->pconstructor
&& tbl
->pconstructor(n
)) {
516 write_lock_bh(&tbl
->lock
);
517 n
->next
= tbl
->phash_buckets
[hash_val
];
518 tbl
->phash_buckets
[hash_val
] = n
;
519 write_unlock_bh(&tbl
->lock
);
525 int pneigh_delete(struct neigh_table
*tbl
, struct net
*net
, const void *pkey
,
526 struct net_device
*dev
)
528 struct pneigh_entry
*n
, **np
;
529 int key_len
= tbl
->key_len
;
530 u32 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
532 hash_val
^= (hash_val
>> 16);
533 hash_val
^= hash_val
>> 8;
534 hash_val
^= hash_val
>> 4;
535 hash_val
&= PNEIGH_HASHMASK
;
537 write_lock_bh(&tbl
->lock
);
538 for (np
= &tbl
->phash_buckets
[hash_val
]; (n
= *np
) != NULL
;
540 if (!memcmp(n
->key
, pkey
, key_len
) && n
->dev
== dev
&&
543 write_unlock_bh(&tbl
->lock
);
544 if (tbl
->pdestructor
)
553 write_unlock_bh(&tbl
->lock
);
557 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
559 struct pneigh_entry
*n
, **np
;
562 for (h
= 0; h
<= PNEIGH_HASHMASK
; h
++) {
563 np
= &tbl
->phash_buckets
[h
];
564 while ((n
= *np
) != NULL
) {
565 if (!dev
|| n
->dev
== dev
) {
567 if (tbl
->pdestructor
)
581 static void neigh_parms_destroy(struct neigh_parms
*parms
);
583 static inline void neigh_parms_put(struct neigh_parms
*parms
)
585 if (atomic_dec_and_test(&parms
->refcnt
))
586 neigh_parms_destroy(parms
);
590 * neighbour must already be out of the table;
593 void neigh_destroy(struct neighbour
*neigh
)
597 NEIGH_CACHE_STAT_INC(neigh
->tbl
, destroys
);
601 "Destroying alive neighbour %p\n", neigh
);
606 if (neigh_del_timer(neigh
))
607 printk(KERN_WARNING
"Impossible event.\n");
609 while ((hh
= neigh
->hh
) != NULL
) {
610 neigh
->hh
= hh
->hh_next
;
613 write_seqlock_bh(&hh
->hh_lock
);
614 hh
->hh_output
= neigh_blackhole
;
615 write_sequnlock_bh(&hh
->hh_lock
);
616 if (atomic_dec_and_test(&hh
->hh_refcnt
))
620 skb_queue_purge(&neigh
->arp_queue
);
623 neigh_parms_put(neigh
->parms
);
625 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh
);
627 atomic_dec(&neigh
->tbl
->entries
);
628 kmem_cache_free(neigh
->tbl
->kmem_cachep
, neigh
);
631 /* Neighbour state is suspicious;
634 Called with write_locked neigh.
636 static void neigh_suspect(struct neighbour
*neigh
)
640 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh
);
642 neigh
->output
= neigh
->ops
->output
;
644 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
)
645 hh
->hh_output
= neigh
->ops
->output
;
648 /* Neighbour state is OK;
651 Called with write_locked neigh.
653 static void neigh_connect(struct neighbour
*neigh
)
657 NEIGH_PRINTK2("neigh %p is connected.\n", neigh
);
659 neigh
->output
= neigh
->ops
->connected_output
;
661 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
)
662 hh
->hh_output
= neigh
->ops
->hh_output
;
665 static void neigh_periodic_timer(unsigned long arg
)
667 struct neigh_table
*tbl
= (struct neigh_table
*)arg
;
668 struct neighbour
*n
, **np
;
669 unsigned long expire
, now
= jiffies
;
671 NEIGH_CACHE_STAT_INC(tbl
, periodic_gc_runs
);
673 write_lock(&tbl
->lock
);
676 * periodically recompute ReachableTime from random function
679 if (time_after(now
, tbl
->last_rand
+ 300 * HZ
)) {
680 struct neigh_parms
*p
;
681 tbl
->last_rand
= now
;
682 for (p
= &tbl
->parms
; p
; p
= p
->next
)
684 neigh_rand_reach_time(p
->base_reachable_time
);
687 np
= &tbl
->hash_buckets
[tbl
->hash_chain_gc
];
688 tbl
->hash_chain_gc
= ((tbl
->hash_chain_gc
+ 1) & tbl
->hash_mask
);
690 while ((n
= *np
) != NULL
) {
693 write_lock(&n
->lock
);
695 state
= n
->nud_state
;
696 if (state
& (NUD_PERMANENT
| NUD_IN_TIMER
)) {
697 write_unlock(&n
->lock
);
701 if (time_before(n
->used
, n
->confirmed
))
702 n
->used
= n
->confirmed
;
704 if (atomic_read(&n
->refcnt
) == 1 &&
705 (state
== NUD_FAILED
||
706 time_after(now
, n
->used
+ n
->parms
->gc_staletime
))) {
709 write_unlock(&n
->lock
);
710 neigh_cleanup_and_release(n
);
713 write_unlock(&n
->lock
);
719 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
720 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
721 * base_reachable_time.
723 expire
= tbl
->parms
.base_reachable_time
>> 1;
724 expire
/= (tbl
->hash_mask
+ 1);
729 mod_timer(&tbl
->gc_timer
, round_jiffies(now
+ expire
));
731 mod_timer(&tbl
->gc_timer
, now
+ expire
);
733 write_unlock(&tbl
->lock
);
736 static __inline__
int neigh_max_probes(struct neighbour
*n
)
738 struct neigh_parms
*p
= n
->parms
;
739 return (n
->nud_state
& NUD_PROBE
?
741 p
->ucast_probes
+ p
->app_probes
+ p
->mcast_probes
);
744 /* Called when a timer expires for a neighbour entry. */
746 static void neigh_timer_handler(unsigned long arg
)
748 unsigned long now
, next
;
749 struct neighbour
*neigh
= (struct neighbour
*)arg
;
753 write_lock(&neigh
->lock
);
755 state
= neigh
->nud_state
;
759 if (!(state
& NUD_IN_TIMER
)) {
761 printk(KERN_WARNING
"neigh: timer & !nud_in_timer\n");
766 if (state
& NUD_REACHABLE
) {
767 if (time_before_eq(now
,
768 neigh
->confirmed
+ neigh
->parms
->reachable_time
)) {
769 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh
);
770 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
771 } else if (time_before_eq(now
,
772 neigh
->used
+ neigh
->parms
->delay_probe_time
)) {
773 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh
);
774 neigh
->nud_state
= NUD_DELAY
;
775 neigh
->updated
= jiffies
;
776 neigh_suspect(neigh
);
777 next
= now
+ neigh
->parms
->delay_probe_time
;
779 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh
);
780 neigh
->nud_state
= NUD_STALE
;
781 neigh
->updated
= jiffies
;
782 neigh_suspect(neigh
);
785 } else if (state
& NUD_DELAY
) {
786 if (time_before_eq(now
,
787 neigh
->confirmed
+ neigh
->parms
->delay_probe_time
)) {
788 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh
);
789 neigh
->nud_state
= NUD_REACHABLE
;
790 neigh
->updated
= jiffies
;
791 neigh_connect(neigh
);
793 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
795 NEIGH_PRINTK2("neigh %p is probed.\n", neigh
);
796 neigh
->nud_state
= NUD_PROBE
;
797 neigh
->updated
= jiffies
;
798 atomic_set(&neigh
->probes
, 0);
799 next
= now
+ neigh
->parms
->retrans_time
;
802 /* NUD_PROBE|NUD_INCOMPLETE */
803 next
= now
+ neigh
->parms
->retrans_time
;
806 if ((neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
807 atomic_read(&neigh
->probes
) >= neigh_max_probes(neigh
)) {
810 neigh
->nud_state
= NUD_FAILED
;
811 neigh
->updated
= jiffies
;
813 NEIGH_CACHE_STAT_INC(neigh
->tbl
, res_failed
);
814 NEIGH_PRINTK2("neigh %p is failed.\n", neigh
);
816 /* It is very thin place. report_unreachable is very complicated
817 routine. Particularly, it can hit the same neighbour entry!
819 So that, we try to be accurate and avoid dead loop. --ANK
821 while (neigh
->nud_state
== NUD_FAILED
&&
822 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
823 write_unlock(&neigh
->lock
);
824 neigh
->ops
->error_report(neigh
, skb
);
825 write_lock(&neigh
->lock
);
827 skb_queue_purge(&neigh
->arp_queue
);
830 if (neigh
->nud_state
& NUD_IN_TIMER
) {
831 if (time_before(next
, jiffies
+ HZ
/2))
832 next
= jiffies
+ HZ
/2;
833 if (!mod_timer(&neigh
->timer
, next
))
836 if (neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) {
837 struct sk_buff
*skb
= skb_peek(&neigh
->arp_queue
);
838 /* keep skb alive even if arp_queue overflows */
841 write_unlock(&neigh
->lock
);
842 neigh
->ops
->solicit(neigh
, skb
);
843 atomic_inc(&neigh
->probes
);
848 write_unlock(&neigh
->lock
);
852 neigh_update_notify(neigh
);
854 neigh_release(neigh
);
857 int __neigh_event_send(struct neighbour
*neigh
, struct sk_buff
*skb
)
862 write_lock_bh(&neigh
->lock
);
865 if (neigh
->nud_state
& (NUD_CONNECTED
| NUD_DELAY
| NUD_PROBE
))
870 if (!(neigh
->nud_state
& (NUD_STALE
| NUD_INCOMPLETE
))) {
871 if (neigh
->parms
->mcast_probes
+ neigh
->parms
->app_probes
) {
872 atomic_set(&neigh
->probes
, neigh
->parms
->ucast_probes
);
873 neigh
->nud_state
= NUD_INCOMPLETE
;
874 neigh
->updated
= jiffies
;
875 neigh_add_timer(neigh
, now
+ 1);
877 neigh
->nud_state
= NUD_FAILED
;
878 neigh
->updated
= jiffies
;
879 write_unlock_bh(&neigh
->lock
);
885 } else if (neigh
->nud_state
& NUD_STALE
) {
886 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh
);
887 neigh
->nud_state
= NUD_DELAY
;
888 neigh
->updated
= jiffies
;
889 neigh_add_timer(neigh
,
890 jiffies
+ neigh
->parms
->delay_probe_time
);
893 if (neigh
->nud_state
== NUD_INCOMPLETE
) {
895 if (skb_queue_len(&neigh
->arp_queue
) >=
896 neigh
->parms
->queue_len
) {
897 struct sk_buff
*buff
;
898 buff
= neigh
->arp_queue
.next
;
899 __skb_unlink(buff
, &neigh
->arp_queue
);
902 __skb_queue_tail(&neigh
->arp_queue
, skb
);
907 write_unlock_bh(&neigh
->lock
);
911 static void neigh_update_hhs(struct neighbour
*neigh
)
914 void (*update
)(struct hh_cache
*, const struct net_device
*, const unsigned char *)
915 = neigh
->dev
->header_ops
->cache_update
;
918 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
) {
919 write_seqlock_bh(&hh
->hh_lock
);
920 update(hh
, neigh
->dev
, neigh
->ha
);
921 write_sequnlock_bh(&hh
->hh_lock
);
928 /* Generic update routine.
929 -- lladdr is new lladdr or NULL, if it is not supplied.
932 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
934 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
935 lladdr instead of overriding it
937 It also allows to retain current state
938 if lladdr is unchanged.
939 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
941 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
943 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
946 Caller MUST hold reference count on the entry.
949 int neigh_update(struct neighbour
*neigh
, const u8
*lladdr
, u8
new,
955 struct net_device
*dev
;
956 int update_isrouter
= 0;
958 write_lock_bh(&neigh
->lock
);
961 old
= neigh
->nud_state
;
964 if (!(flags
& NEIGH_UPDATE_F_ADMIN
) &&
965 (old
& (NUD_NOARP
| NUD_PERMANENT
)))
968 if (!(new & NUD_VALID
)) {
969 neigh_del_timer(neigh
);
970 if (old
& NUD_CONNECTED
)
971 neigh_suspect(neigh
);
972 neigh
->nud_state
= new;
974 notify
= old
& NUD_VALID
;
978 /* Compare new lladdr with cached one */
979 if (!dev
->addr_len
) {
980 /* First case: device needs no address. */
983 /* The second case: if something is already cached
984 and a new address is proposed:
986 - if they are different, check override flag
988 if ((old
& NUD_VALID
) &&
989 !memcmp(lladdr
, neigh
->ha
, dev
->addr_len
))
992 /* No address is supplied; if we know something,
993 use it, otherwise discard the request.
996 if (!(old
& NUD_VALID
))
1001 if (new & NUD_CONNECTED
)
1002 neigh
->confirmed
= jiffies
;
1003 neigh
->updated
= jiffies
;
1005 /* If entry was valid and address is not changed,
1006 do not change entry state, if new one is STALE.
1009 update_isrouter
= flags
& NEIGH_UPDATE_F_OVERRIDE_ISROUTER
;
1010 if (old
& NUD_VALID
) {
1011 if (lladdr
!= neigh
->ha
&& !(flags
& NEIGH_UPDATE_F_OVERRIDE
)) {
1012 update_isrouter
= 0;
1013 if ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) &&
1014 (old
& NUD_CONNECTED
)) {
1020 if (lladdr
== neigh
->ha
&& new == NUD_STALE
&&
1021 ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) ||
1022 (old
& NUD_CONNECTED
))
1029 neigh_del_timer(neigh
);
1030 if (new & NUD_IN_TIMER
)
1031 neigh_add_timer(neigh
, (jiffies
+
1032 ((new & NUD_REACHABLE
) ?
1033 neigh
->parms
->reachable_time
:
1035 neigh
->nud_state
= new;
1038 if (lladdr
!= neigh
->ha
) {
1039 memcpy(&neigh
->ha
, lladdr
, dev
->addr_len
);
1040 neigh_update_hhs(neigh
);
1041 if (!(new & NUD_CONNECTED
))
1042 neigh
->confirmed
= jiffies
-
1043 (neigh
->parms
->base_reachable_time
<< 1);
1048 if (new & NUD_CONNECTED
)
1049 neigh_connect(neigh
);
1051 neigh_suspect(neigh
);
1052 if (!(old
& NUD_VALID
)) {
1053 struct sk_buff
*skb
;
1055 /* Again: avoid dead loop if something went wrong */
1057 while (neigh
->nud_state
& NUD_VALID
&&
1058 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
1059 struct neighbour
*n1
= neigh
;
1060 write_unlock_bh(&neigh
->lock
);
1061 /* On shaper/eql skb->dst->neighbour != neigh :( */
1062 if (skb
->dst
&& skb
->dst
->neighbour
)
1063 n1
= skb
->dst
->neighbour
;
1065 write_lock_bh(&neigh
->lock
);
1067 skb_queue_purge(&neigh
->arp_queue
);
1070 if (update_isrouter
) {
1071 neigh
->flags
= (flags
& NEIGH_UPDATE_F_ISROUTER
) ?
1072 (neigh
->flags
| NTF_ROUTER
) :
1073 (neigh
->flags
& ~NTF_ROUTER
);
1075 write_unlock_bh(&neigh
->lock
);
1078 neigh_update_notify(neigh
);
1083 struct neighbour
*neigh_event_ns(struct neigh_table
*tbl
,
1084 u8
*lladdr
, void *saddr
,
1085 struct net_device
*dev
)
1087 struct neighbour
*neigh
= __neigh_lookup(tbl
, saddr
, dev
,
1088 lladdr
|| !dev
->addr_len
);
1090 neigh_update(neigh
, lladdr
, NUD_STALE
,
1091 NEIGH_UPDATE_F_OVERRIDE
);
1095 static void neigh_hh_init(struct neighbour
*n
, struct dst_entry
*dst
,
1098 struct hh_cache
*hh
;
1099 struct net_device
*dev
= dst
->dev
;
1101 for (hh
= n
->hh
; hh
; hh
= hh
->hh_next
)
1102 if (hh
->hh_type
== protocol
)
1105 if (!hh
&& (hh
= kzalloc(sizeof(*hh
), GFP_ATOMIC
)) != NULL
) {
1106 seqlock_init(&hh
->hh_lock
);
1107 hh
->hh_type
= protocol
;
1108 atomic_set(&hh
->hh_refcnt
, 0);
1111 if (dev
->header_ops
->cache(n
, hh
)) {
1115 atomic_inc(&hh
->hh_refcnt
);
1116 hh
->hh_next
= n
->hh
;
1118 if (n
->nud_state
& NUD_CONNECTED
)
1119 hh
->hh_output
= n
->ops
->hh_output
;
1121 hh
->hh_output
= n
->ops
->output
;
1125 atomic_inc(&hh
->hh_refcnt
);
1130 /* This function can be used in contexts, where only old dev_queue_xmit
1131 worked, f.e. if you want to override normal output path (eql, shaper),
1132 but resolution is not made yet.
1135 int neigh_compat_output(struct sk_buff
*skb
)
1137 struct net_device
*dev
= skb
->dev
;
1139 __skb_pull(skb
, skb_network_offset(skb
));
1141 if (dev_hard_header(skb
, dev
, ntohs(skb
->protocol
), NULL
, NULL
,
1143 dev
->header_ops
->rebuild(skb
))
1146 return dev_queue_xmit(skb
);
1149 /* Slow and careful. */
1151 int neigh_resolve_output(struct sk_buff
*skb
)
1153 struct dst_entry
*dst
= skb
->dst
;
1154 struct neighbour
*neigh
;
1157 if (!dst
|| !(neigh
= dst
->neighbour
))
1160 __skb_pull(skb
, skb_network_offset(skb
));
1162 if (!neigh_event_send(neigh
, skb
)) {
1164 struct net_device
*dev
= neigh
->dev
;
1165 if (dev
->header_ops
->cache
&& !dst
->hh
) {
1166 write_lock_bh(&neigh
->lock
);
1168 neigh_hh_init(neigh
, dst
, dst
->ops
->protocol
);
1169 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1170 neigh
->ha
, NULL
, skb
->len
);
1171 write_unlock_bh(&neigh
->lock
);
1173 read_lock_bh(&neigh
->lock
);
1174 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1175 neigh
->ha
, NULL
, skb
->len
);
1176 read_unlock_bh(&neigh
->lock
);
1179 rc
= neigh
->ops
->queue_xmit(skb
);
1186 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1187 dst
, dst
? dst
->neighbour
: NULL
);
1194 /* As fast as possible without hh cache */
1196 int neigh_connected_output(struct sk_buff
*skb
)
1199 struct dst_entry
*dst
= skb
->dst
;
1200 struct neighbour
*neigh
= dst
->neighbour
;
1201 struct net_device
*dev
= neigh
->dev
;
1203 __skb_pull(skb
, skb_network_offset(skb
));
1205 read_lock_bh(&neigh
->lock
);
1206 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1207 neigh
->ha
, NULL
, skb
->len
);
1208 read_unlock_bh(&neigh
->lock
);
1210 err
= neigh
->ops
->queue_xmit(skb
);
1218 static void neigh_proxy_process(unsigned long arg
)
1220 struct neigh_table
*tbl
= (struct neigh_table
*)arg
;
1221 long sched_next
= 0;
1222 unsigned long now
= jiffies
;
1223 struct sk_buff
*skb
;
1225 spin_lock(&tbl
->proxy_queue
.lock
);
1227 skb
= tbl
->proxy_queue
.next
;
1229 while (skb
!= (struct sk_buff
*)&tbl
->proxy_queue
) {
1230 struct sk_buff
*back
= skb
;
1231 long tdif
= NEIGH_CB(back
)->sched_next
- now
;
1235 struct net_device
*dev
= back
->dev
;
1236 __skb_unlink(back
, &tbl
->proxy_queue
);
1237 if (tbl
->proxy_redo
&& netif_running(dev
))
1238 tbl
->proxy_redo(back
);
1243 } else if (!sched_next
|| tdif
< sched_next
)
1246 del_timer(&tbl
->proxy_timer
);
1248 mod_timer(&tbl
->proxy_timer
, jiffies
+ sched_next
);
1249 spin_unlock(&tbl
->proxy_queue
.lock
);
1252 void pneigh_enqueue(struct neigh_table
*tbl
, struct neigh_parms
*p
,
1253 struct sk_buff
*skb
)
1255 unsigned long now
= jiffies
;
1256 unsigned long sched_next
= now
+ (net_random() % p
->proxy_delay
);
1258 if (tbl
->proxy_queue
.qlen
> p
->proxy_qlen
) {
1263 NEIGH_CB(skb
)->sched_next
= sched_next
;
1264 NEIGH_CB(skb
)->flags
|= LOCALLY_ENQUEUED
;
1266 spin_lock(&tbl
->proxy_queue
.lock
);
1267 if (del_timer(&tbl
->proxy_timer
)) {
1268 if (time_before(tbl
->proxy_timer
.expires
, sched_next
))
1269 sched_next
= tbl
->proxy_timer
.expires
;
1271 dst_release(skb
->dst
);
1274 __skb_queue_tail(&tbl
->proxy_queue
, skb
);
1275 mod_timer(&tbl
->proxy_timer
, sched_next
);
1276 spin_unlock(&tbl
->proxy_queue
.lock
);
1279 static inline struct neigh_parms
*lookup_neigh_params(struct neigh_table
*tbl
,
1280 struct net
*net
, int ifindex
)
1282 struct neigh_parms
*p
;
1284 for (p
= &tbl
->parms
; p
; p
= p
->next
) {
1287 if ((p
->dev
&& p
->dev
->ifindex
== ifindex
) ||
1288 (!p
->dev
&& !ifindex
))
1295 struct neigh_parms
*neigh_parms_alloc(struct net_device
*dev
,
1296 struct neigh_table
*tbl
)
1298 struct neigh_parms
*p
, *ref
;
1302 ref
= lookup_neigh_params(tbl
, net
, 0);
1306 p
= kmemdup(ref
, sizeof(*p
), GFP_KERNEL
);
1309 atomic_set(&p
->refcnt
, 1);
1310 INIT_RCU_HEAD(&p
->rcu_head
);
1312 neigh_rand_reach_time(p
->base_reachable_time
);
1314 if (dev
->neigh_setup
&& dev
->neigh_setup(dev
, p
)) {
1321 p
->net
= hold_net(net
);
1322 p
->sysctl_table
= NULL
;
1323 write_lock_bh(&tbl
->lock
);
1324 p
->next
= tbl
->parms
.next
;
1325 tbl
->parms
.next
= p
;
1326 write_unlock_bh(&tbl
->lock
);
1331 static void neigh_rcu_free_parms(struct rcu_head
*head
)
1333 struct neigh_parms
*parms
=
1334 container_of(head
, struct neigh_parms
, rcu_head
);
1336 neigh_parms_put(parms
);
1339 void neigh_parms_release(struct neigh_table
*tbl
, struct neigh_parms
*parms
)
1341 struct neigh_parms
**p
;
1343 if (!parms
|| parms
== &tbl
->parms
)
1345 write_lock_bh(&tbl
->lock
);
1346 for (p
= &tbl
->parms
.next
; *p
; p
= &(*p
)->next
) {
1350 write_unlock_bh(&tbl
->lock
);
1352 dev_put(parms
->dev
);
1353 call_rcu(&parms
->rcu_head
, neigh_rcu_free_parms
);
1357 write_unlock_bh(&tbl
->lock
);
1358 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1361 static void neigh_parms_destroy(struct neigh_parms
*parms
)
1363 release_net(parms
->net
);
1367 static struct lock_class_key neigh_table_proxy_queue_class
;
1369 void neigh_table_init_no_netlink(struct neigh_table
*tbl
)
1371 unsigned long now
= jiffies
;
1372 unsigned long phsize
;
1374 tbl
->parms
.net
= &init_net
;
1375 atomic_set(&tbl
->parms
.refcnt
, 1);
1376 INIT_RCU_HEAD(&tbl
->parms
.rcu_head
);
1377 tbl
->parms
.reachable_time
=
1378 neigh_rand_reach_time(tbl
->parms
.base_reachable_time
);
1380 if (!tbl
->kmem_cachep
)
1382 kmem_cache_create(tbl
->id
, tbl
->entry_size
, 0,
1383 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
1385 tbl
->stats
= alloc_percpu(struct neigh_statistics
);
1387 panic("cannot create neighbour cache statistics");
1389 #ifdef CONFIG_PROC_FS
1390 tbl
->pde
= create_proc_entry(tbl
->id
, 0, init_net
.proc_net_stat
);
1392 panic("cannot create neighbour proc dir entry");
1393 tbl
->pde
->proc_fops
= &neigh_stat_seq_fops
;
1394 tbl
->pde
->data
= tbl
;
1398 tbl
->hash_buckets
= neigh_hash_alloc(tbl
->hash_mask
+ 1);
1400 phsize
= (PNEIGH_HASHMASK
+ 1) * sizeof(struct pneigh_entry
*);
1401 tbl
->phash_buckets
= kzalloc(phsize
, GFP_KERNEL
);
1403 if (!tbl
->hash_buckets
|| !tbl
->phash_buckets
)
1404 panic("cannot allocate neighbour cache hashes");
1406 get_random_bytes(&tbl
->hash_rnd
, sizeof(tbl
->hash_rnd
));
1408 rwlock_init(&tbl
->lock
);
1409 setup_timer(&tbl
->gc_timer
, neigh_periodic_timer
, (unsigned long)tbl
);
1410 tbl
->gc_timer
.expires
= now
+ 1;
1411 add_timer(&tbl
->gc_timer
);
1413 setup_timer(&tbl
->proxy_timer
, neigh_proxy_process
, (unsigned long)tbl
);
1414 skb_queue_head_init_class(&tbl
->proxy_queue
,
1415 &neigh_table_proxy_queue_class
);
1417 tbl
->last_flush
= now
;
1418 tbl
->last_rand
= now
+ tbl
->parms
.reachable_time
* 20;
1421 void neigh_table_init(struct neigh_table
*tbl
)
1423 struct neigh_table
*tmp
;
1425 neigh_table_init_no_netlink(tbl
);
1426 write_lock(&neigh_tbl_lock
);
1427 for (tmp
= neigh_tables
; tmp
; tmp
= tmp
->next
) {
1428 if (tmp
->family
== tbl
->family
)
1431 tbl
->next
= neigh_tables
;
1433 write_unlock(&neigh_tbl_lock
);
1435 if (unlikely(tmp
)) {
1436 printk(KERN_ERR
"NEIGH: Registering multiple tables for "
1437 "family %d\n", tbl
->family
);
1442 int neigh_table_clear(struct neigh_table
*tbl
)
1444 struct neigh_table
**tp
;
1446 /* It is not clean... Fix it to unload IPv6 module safely */
1447 del_timer_sync(&tbl
->gc_timer
);
1448 del_timer_sync(&tbl
->proxy_timer
);
1449 pneigh_queue_purge(&tbl
->proxy_queue
);
1450 neigh_ifdown(tbl
, NULL
);
1451 if (atomic_read(&tbl
->entries
))
1452 printk(KERN_CRIT
"neighbour leakage\n");
1453 write_lock(&neigh_tbl_lock
);
1454 for (tp
= &neigh_tables
; *tp
; tp
= &(*tp
)->next
) {
1460 write_unlock(&neigh_tbl_lock
);
1462 neigh_hash_free(tbl
->hash_buckets
, tbl
->hash_mask
+ 1);
1463 tbl
->hash_buckets
= NULL
;
1465 kfree(tbl
->phash_buckets
);
1466 tbl
->phash_buckets
= NULL
;
1468 remove_proc_entry(tbl
->id
, init_net
.proc_net_stat
);
1470 free_percpu(tbl
->stats
);
1473 kmem_cache_destroy(tbl
->kmem_cachep
);
1474 tbl
->kmem_cachep
= NULL
;
1479 static int neigh_delete(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1481 struct net
*net
= skb
->sk
->sk_net
;
1483 struct nlattr
*dst_attr
;
1484 struct neigh_table
*tbl
;
1485 struct net_device
*dev
= NULL
;
1488 if (nlmsg_len(nlh
) < sizeof(*ndm
))
1491 dst_attr
= nlmsg_find_attr(nlh
, sizeof(*ndm
), NDA_DST
);
1492 if (dst_attr
== NULL
)
1495 ndm
= nlmsg_data(nlh
);
1496 if (ndm
->ndm_ifindex
) {
1497 dev
= dev_get_by_index(net
, ndm
->ndm_ifindex
);
1504 read_lock(&neigh_tbl_lock
);
1505 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1506 struct neighbour
*neigh
;
1508 if (tbl
->family
!= ndm
->ndm_family
)
1510 read_unlock(&neigh_tbl_lock
);
1512 if (nla_len(dst_attr
) < tbl
->key_len
)
1515 if (ndm
->ndm_flags
& NTF_PROXY
) {
1516 err
= pneigh_delete(tbl
, net
, nla_data(dst_attr
), dev
);
1523 neigh
= neigh_lookup(tbl
, nla_data(dst_attr
), dev
);
1524 if (neigh
== NULL
) {
1529 err
= neigh_update(neigh
, NULL
, NUD_FAILED
,
1530 NEIGH_UPDATE_F_OVERRIDE
|
1531 NEIGH_UPDATE_F_ADMIN
);
1532 neigh_release(neigh
);
1535 read_unlock(&neigh_tbl_lock
);
1536 err
= -EAFNOSUPPORT
;
1545 static int neigh_add(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1547 struct net
*net
= skb
->sk
->sk_net
;
1549 struct nlattr
*tb
[NDA_MAX
+1];
1550 struct neigh_table
*tbl
;
1551 struct net_device
*dev
= NULL
;
1554 err
= nlmsg_parse(nlh
, sizeof(*ndm
), tb
, NDA_MAX
, NULL
);
1559 if (tb
[NDA_DST
] == NULL
)
1562 ndm
= nlmsg_data(nlh
);
1563 if (ndm
->ndm_ifindex
) {
1564 dev
= dev_get_by_index(net
, ndm
->ndm_ifindex
);
1570 if (tb
[NDA_LLADDR
] && nla_len(tb
[NDA_LLADDR
]) < dev
->addr_len
)
1574 read_lock(&neigh_tbl_lock
);
1575 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1576 int flags
= NEIGH_UPDATE_F_ADMIN
| NEIGH_UPDATE_F_OVERRIDE
;
1577 struct neighbour
*neigh
;
1580 if (tbl
->family
!= ndm
->ndm_family
)
1582 read_unlock(&neigh_tbl_lock
);
1584 if (nla_len(tb
[NDA_DST
]) < tbl
->key_len
)
1586 dst
= nla_data(tb
[NDA_DST
]);
1587 lladdr
= tb
[NDA_LLADDR
] ? nla_data(tb
[NDA_LLADDR
]) : NULL
;
1589 if (ndm
->ndm_flags
& NTF_PROXY
) {
1590 struct pneigh_entry
*pn
;
1593 pn
= pneigh_lookup(tbl
, net
, dst
, dev
, 1);
1595 pn
->flags
= ndm
->ndm_flags
;
1604 neigh
= neigh_lookup(tbl
, dst
, dev
);
1605 if (neigh
== NULL
) {
1606 if (!(nlh
->nlmsg_flags
& NLM_F_CREATE
)) {
1611 neigh
= __neigh_lookup_errno(tbl
, dst
, dev
);
1612 if (IS_ERR(neigh
)) {
1613 err
= PTR_ERR(neigh
);
1617 if (nlh
->nlmsg_flags
& NLM_F_EXCL
) {
1619 neigh_release(neigh
);
1623 if (!(nlh
->nlmsg_flags
& NLM_F_REPLACE
))
1624 flags
&= ~NEIGH_UPDATE_F_OVERRIDE
;
1627 err
= neigh_update(neigh
, lladdr
, ndm
->ndm_state
, flags
);
1628 neigh_release(neigh
);
1632 read_unlock(&neigh_tbl_lock
);
1633 err
= -EAFNOSUPPORT
;
1642 static int neightbl_fill_parms(struct sk_buff
*skb
, struct neigh_parms
*parms
)
1644 struct nlattr
*nest
;
1646 nest
= nla_nest_start(skb
, NDTA_PARMS
);
1651 NLA_PUT_U32(skb
, NDTPA_IFINDEX
, parms
->dev
->ifindex
);
1653 NLA_PUT_U32(skb
, NDTPA_REFCNT
, atomic_read(&parms
->refcnt
));
1654 NLA_PUT_U32(skb
, NDTPA_QUEUE_LEN
, parms
->queue_len
);
1655 NLA_PUT_U32(skb
, NDTPA_PROXY_QLEN
, parms
->proxy_qlen
);
1656 NLA_PUT_U32(skb
, NDTPA_APP_PROBES
, parms
->app_probes
);
1657 NLA_PUT_U32(skb
, NDTPA_UCAST_PROBES
, parms
->ucast_probes
);
1658 NLA_PUT_U32(skb
, NDTPA_MCAST_PROBES
, parms
->mcast_probes
);
1659 NLA_PUT_MSECS(skb
, NDTPA_REACHABLE_TIME
, parms
->reachable_time
);
1660 NLA_PUT_MSECS(skb
, NDTPA_BASE_REACHABLE_TIME
,
1661 parms
->base_reachable_time
);
1662 NLA_PUT_MSECS(skb
, NDTPA_GC_STALETIME
, parms
->gc_staletime
);
1663 NLA_PUT_MSECS(skb
, NDTPA_DELAY_PROBE_TIME
, parms
->delay_probe_time
);
1664 NLA_PUT_MSECS(skb
, NDTPA_RETRANS_TIME
, parms
->retrans_time
);
1665 NLA_PUT_MSECS(skb
, NDTPA_ANYCAST_DELAY
, parms
->anycast_delay
);
1666 NLA_PUT_MSECS(skb
, NDTPA_PROXY_DELAY
, parms
->proxy_delay
);
1667 NLA_PUT_MSECS(skb
, NDTPA_LOCKTIME
, parms
->locktime
);
1669 return nla_nest_end(skb
, nest
);
1672 return nla_nest_cancel(skb
, nest
);
1675 static int neightbl_fill_info(struct sk_buff
*skb
, struct neigh_table
*tbl
,
1676 u32 pid
, u32 seq
, int type
, int flags
)
1678 struct nlmsghdr
*nlh
;
1679 struct ndtmsg
*ndtmsg
;
1681 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
1685 ndtmsg
= nlmsg_data(nlh
);
1687 read_lock_bh(&tbl
->lock
);
1688 ndtmsg
->ndtm_family
= tbl
->family
;
1689 ndtmsg
->ndtm_pad1
= 0;
1690 ndtmsg
->ndtm_pad2
= 0;
1692 NLA_PUT_STRING(skb
, NDTA_NAME
, tbl
->id
);
1693 NLA_PUT_MSECS(skb
, NDTA_GC_INTERVAL
, tbl
->gc_interval
);
1694 NLA_PUT_U32(skb
, NDTA_THRESH1
, tbl
->gc_thresh1
);
1695 NLA_PUT_U32(skb
, NDTA_THRESH2
, tbl
->gc_thresh2
);
1696 NLA_PUT_U32(skb
, NDTA_THRESH3
, tbl
->gc_thresh3
);
1699 unsigned long now
= jiffies
;
1700 unsigned int flush_delta
= now
- tbl
->last_flush
;
1701 unsigned int rand_delta
= now
- tbl
->last_rand
;
1703 struct ndt_config ndc
= {
1704 .ndtc_key_len
= tbl
->key_len
,
1705 .ndtc_entry_size
= tbl
->entry_size
,
1706 .ndtc_entries
= atomic_read(&tbl
->entries
),
1707 .ndtc_last_flush
= jiffies_to_msecs(flush_delta
),
1708 .ndtc_last_rand
= jiffies_to_msecs(rand_delta
),
1709 .ndtc_hash_rnd
= tbl
->hash_rnd
,
1710 .ndtc_hash_mask
= tbl
->hash_mask
,
1711 .ndtc_hash_chain_gc
= tbl
->hash_chain_gc
,
1712 .ndtc_proxy_qlen
= tbl
->proxy_queue
.qlen
,
1715 NLA_PUT(skb
, NDTA_CONFIG
, sizeof(ndc
), &ndc
);
1720 struct ndt_stats ndst
;
1722 memset(&ndst
, 0, sizeof(ndst
));
1724 for_each_possible_cpu(cpu
) {
1725 struct neigh_statistics
*st
;
1727 st
= per_cpu_ptr(tbl
->stats
, cpu
);
1728 ndst
.ndts_allocs
+= st
->allocs
;
1729 ndst
.ndts_destroys
+= st
->destroys
;
1730 ndst
.ndts_hash_grows
+= st
->hash_grows
;
1731 ndst
.ndts_res_failed
+= st
->res_failed
;
1732 ndst
.ndts_lookups
+= st
->lookups
;
1733 ndst
.ndts_hits
+= st
->hits
;
1734 ndst
.ndts_rcv_probes_mcast
+= st
->rcv_probes_mcast
;
1735 ndst
.ndts_rcv_probes_ucast
+= st
->rcv_probes_ucast
;
1736 ndst
.ndts_periodic_gc_runs
+= st
->periodic_gc_runs
;
1737 ndst
.ndts_forced_gc_runs
+= st
->forced_gc_runs
;
1740 NLA_PUT(skb
, NDTA_STATS
, sizeof(ndst
), &ndst
);
1743 BUG_ON(tbl
->parms
.dev
);
1744 if (neightbl_fill_parms(skb
, &tbl
->parms
) < 0)
1745 goto nla_put_failure
;
1747 read_unlock_bh(&tbl
->lock
);
1748 return nlmsg_end(skb
, nlh
);
1751 read_unlock_bh(&tbl
->lock
);
1752 nlmsg_cancel(skb
, nlh
);
1756 static int neightbl_fill_param_info(struct sk_buff
*skb
,
1757 struct neigh_table
*tbl
,
1758 struct neigh_parms
*parms
,
1759 u32 pid
, u32 seq
, int type
,
1762 struct ndtmsg
*ndtmsg
;
1763 struct nlmsghdr
*nlh
;
1765 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
1769 ndtmsg
= nlmsg_data(nlh
);
1771 read_lock_bh(&tbl
->lock
);
1772 ndtmsg
->ndtm_family
= tbl
->family
;
1773 ndtmsg
->ndtm_pad1
= 0;
1774 ndtmsg
->ndtm_pad2
= 0;
1776 if (nla_put_string(skb
, NDTA_NAME
, tbl
->id
) < 0 ||
1777 neightbl_fill_parms(skb
, parms
) < 0)
1780 read_unlock_bh(&tbl
->lock
);
1781 return nlmsg_end(skb
, nlh
);
1783 read_unlock_bh(&tbl
->lock
);
1784 nlmsg_cancel(skb
, nlh
);
1788 static const struct nla_policy nl_neightbl_policy
[NDTA_MAX
+1] = {
1789 [NDTA_NAME
] = { .type
= NLA_STRING
},
1790 [NDTA_THRESH1
] = { .type
= NLA_U32
},
1791 [NDTA_THRESH2
] = { .type
= NLA_U32
},
1792 [NDTA_THRESH3
] = { .type
= NLA_U32
},
1793 [NDTA_GC_INTERVAL
] = { .type
= NLA_U64
},
1794 [NDTA_PARMS
] = { .type
= NLA_NESTED
},
1797 static const struct nla_policy nl_ntbl_parm_policy
[NDTPA_MAX
+1] = {
1798 [NDTPA_IFINDEX
] = { .type
= NLA_U32
},
1799 [NDTPA_QUEUE_LEN
] = { .type
= NLA_U32
},
1800 [NDTPA_PROXY_QLEN
] = { .type
= NLA_U32
},
1801 [NDTPA_APP_PROBES
] = { .type
= NLA_U32
},
1802 [NDTPA_UCAST_PROBES
] = { .type
= NLA_U32
},
1803 [NDTPA_MCAST_PROBES
] = { .type
= NLA_U32
},
1804 [NDTPA_BASE_REACHABLE_TIME
] = { .type
= NLA_U64
},
1805 [NDTPA_GC_STALETIME
] = { .type
= NLA_U64
},
1806 [NDTPA_DELAY_PROBE_TIME
] = { .type
= NLA_U64
},
1807 [NDTPA_RETRANS_TIME
] = { .type
= NLA_U64
},
1808 [NDTPA_ANYCAST_DELAY
] = { .type
= NLA_U64
},
1809 [NDTPA_PROXY_DELAY
] = { .type
= NLA_U64
},
1810 [NDTPA_LOCKTIME
] = { .type
= NLA_U64
},
1813 static int neightbl_set(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1815 struct net
*net
= skb
->sk
->sk_net
;
1816 struct neigh_table
*tbl
;
1817 struct ndtmsg
*ndtmsg
;
1818 struct nlattr
*tb
[NDTA_MAX
+1];
1821 err
= nlmsg_parse(nlh
, sizeof(*ndtmsg
), tb
, NDTA_MAX
,
1822 nl_neightbl_policy
);
1826 if (tb
[NDTA_NAME
] == NULL
) {
1831 ndtmsg
= nlmsg_data(nlh
);
1832 read_lock(&neigh_tbl_lock
);
1833 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1834 if (ndtmsg
->ndtm_family
&& tbl
->family
!= ndtmsg
->ndtm_family
)
1837 if (nla_strcmp(tb
[NDTA_NAME
], tbl
->id
) == 0)
1847 * We acquire tbl->lock to be nice to the periodic timers and
1848 * make sure they always see a consistent set of values.
1850 write_lock_bh(&tbl
->lock
);
1852 if (tb
[NDTA_PARMS
]) {
1853 struct nlattr
*tbp
[NDTPA_MAX
+1];
1854 struct neigh_parms
*p
;
1857 err
= nla_parse_nested(tbp
, NDTPA_MAX
, tb
[NDTA_PARMS
],
1858 nl_ntbl_parm_policy
);
1860 goto errout_tbl_lock
;
1862 if (tbp
[NDTPA_IFINDEX
])
1863 ifindex
= nla_get_u32(tbp
[NDTPA_IFINDEX
]);
1865 p
= lookup_neigh_params(tbl
, net
, ifindex
);
1868 goto errout_tbl_lock
;
1871 for (i
= 1; i
<= NDTPA_MAX
; i
++) {
1876 case NDTPA_QUEUE_LEN
:
1877 p
->queue_len
= nla_get_u32(tbp
[i
]);
1879 case NDTPA_PROXY_QLEN
:
1880 p
->proxy_qlen
= nla_get_u32(tbp
[i
]);
1882 case NDTPA_APP_PROBES
:
1883 p
->app_probes
= nla_get_u32(tbp
[i
]);
1885 case NDTPA_UCAST_PROBES
:
1886 p
->ucast_probes
= nla_get_u32(tbp
[i
]);
1888 case NDTPA_MCAST_PROBES
:
1889 p
->mcast_probes
= nla_get_u32(tbp
[i
]);
1891 case NDTPA_BASE_REACHABLE_TIME
:
1892 p
->base_reachable_time
= nla_get_msecs(tbp
[i
]);
1894 case NDTPA_GC_STALETIME
:
1895 p
->gc_staletime
= nla_get_msecs(tbp
[i
]);
1897 case NDTPA_DELAY_PROBE_TIME
:
1898 p
->delay_probe_time
= nla_get_msecs(tbp
[i
]);
1900 case NDTPA_RETRANS_TIME
:
1901 p
->retrans_time
= nla_get_msecs(tbp
[i
]);
1903 case NDTPA_ANYCAST_DELAY
:
1904 p
->anycast_delay
= nla_get_msecs(tbp
[i
]);
1906 case NDTPA_PROXY_DELAY
:
1907 p
->proxy_delay
= nla_get_msecs(tbp
[i
]);
1909 case NDTPA_LOCKTIME
:
1910 p
->locktime
= nla_get_msecs(tbp
[i
]);
1916 if (tb
[NDTA_THRESH1
])
1917 tbl
->gc_thresh1
= nla_get_u32(tb
[NDTA_THRESH1
]);
1919 if (tb
[NDTA_THRESH2
])
1920 tbl
->gc_thresh2
= nla_get_u32(tb
[NDTA_THRESH2
]);
1922 if (tb
[NDTA_THRESH3
])
1923 tbl
->gc_thresh3
= nla_get_u32(tb
[NDTA_THRESH3
]);
1925 if (tb
[NDTA_GC_INTERVAL
])
1926 tbl
->gc_interval
= nla_get_msecs(tb
[NDTA_GC_INTERVAL
]);
1931 write_unlock_bh(&tbl
->lock
);
1933 read_unlock(&neigh_tbl_lock
);
1938 static int neightbl_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1940 struct net
*net
= skb
->sk
->sk_net
;
1941 int family
, tidx
, nidx
= 0;
1942 int tbl_skip
= cb
->args
[0];
1943 int neigh_skip
= cb
->args
[1];
1944 struct neigh_table
*tbl
;
1946 family
= ((struct rtgenmsg
*) nlmsg_data(cb
->nlh
))->rtgen_family
;
1948 read_lock(&neigh_tbl_lock
);
1949 for (tbl
= neigh_tables
, tidx
= 0; tbl
; tbl
= tbl
->next
, tidx
++) {
1950 struct neigh_parms
*p
;
1952 if (tidx
< tbl_skip
|| (family
&& tbl
->family
!= family
))
1955 if (neightbl_fill_info(skb
, tbl
, NETLINK_CB(cb
->skb
).pid
,
1956 cb
->nlh
->nlmsg_seq
, RTM_NEWNEIGHTBL
,
1960 for (nidx
= 0, p
= tbl
->parms
.next
; p
; p
= p
->next
) {
1964 if (nidx
++ < neigh_skip
)
1967 if (neightbl_fill_param_info(skb
, tbl
, p
,
1968 NETLINK_CB(cb
->skb
).pid
,
1978 read_unlock(&neigh_tbl_lock
);
1985 static int neigh_fill_info(struct sk_buff
*skb
, struct neighbour
*neigh
,
1986 u32 pid
, u32 seq
, int type
, unsigned int flags
)
1988 unsigned long now
= jiffies
;
1989 struct nda_cacheinfo ci
;
1990 struct nlmsghdr
*nlh
;
1993 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndm
), flags
);
1997 ndm
= nlmsg_data(nlh
);
1998 ndm
->ndm_family
= neigh
->ops
->family
;
2001 ndm
->ndm_flags
= neigh
->flags
;
2002 ndm
->ndm_type
= neigh
->type
;
2003 ndm
->ndm_ifindex
= neigh
->dev
->ifindex
;
2005 NLA_PUT(skb
, NDA_DST
, neigh
->tbl
->key_len
, neigh
->primary_key
);
2007 read_lock_bh(&neigh
->lock
);
2008 ndm
->ndm_state
= neigh
->nud_state
;
2009 if ((neigh
->nud_state
& NUD_VALID
) &&
2010 nla_put(skb
, NDA_LLADDR
, neigh
->dev
->addr_len
, neigh
->ha
) < 0) {
2011 read_unlock_bh(&neigh
->lock
);
2012 goto nla_put_failure
;
2015 ci
.ndm_used
= now
- neigh
->used
;
2016 ci
.ndm_confirmed
= now
- neigh
->confirmed
;
2017 ci
.ndm_updated
= now
- neigh
->updated
;
2018 ci
.ndm_refcnt
= atomic_read(&neigh
->refcnt
) - 1;
2019 read_unlock_bh(&neigh
->lock
);
2021 NLA_PUT_U32(skb
, NDA_PROBES
, atomic_read(&neigh
->probes
));
2022 NLA_PUT(skb
, NDA_CACHEINFO
, sizeof(ci
), &ci
);
2024 return nlmsg_end(skb
, nlh
);
2027 nlmsg_cancel(skb
, nlh
);
2031 static void neigh_update_notify(struct neighbour
*neigh
)
2033 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE
, neigh
);
2034 __neigh_notify(neigh
, RTM_NEWNEIGH
, 0);
2037 static int neigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
,
2038 struct netlink_callback
*cb
)
2040 struct net
* net
= skb
->sk
->sk_net
;
2041 struct neighbour
*n
;
2042 int rc
, h
, s_h
= cb
->args
[1];
2043 int idx
, s_idx
= idx
= cb
->args
[2];
2045 read_lock_bh(&tbl
->lock
);
2046 for (h
= 0; h
<= tbl
->hash_mask
; h
++) {
2051 for (n
= tbl
->hash_buckets
[h
], idx
= 0; n
; n
= n
->next
) {
2053 if (n
->dev
->nd_net
!= net
)
2058 if (neigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).pid
,
2061 NLM_F_MULTI
) <= 0) {
2062 read_unlock_bh(&tbl
->lock
);
2068 read_unlock_bh(&tbl
->lock
);
2076 static int neigh_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2078 struct neigh_table
*tbl
;
2081 read_lock(&neigh_tbl_lock
);
2082 family
= ((struct rtgenmsg
*) nlmsg_data(cb
->nlh
))->rtgen_family
;
2085 for (tbl
= neigh_tables
, t
= 0; tbl
; tbl
= tbl
->next
, t
++) {
2086 if (t
< s_t
|| (family
&& tbl
->family
!= family
))
2089 memset(&cb
->args
[1], 0, sizeof(cb
->args
) -
2090 sizeof(cb
->args
[0]));
2091 if (neigh_dump_table(tbl
, skb
, cb
) < 0)
2094 read_unlock(&neigh_tbl_lock
);
2100 void neigh_for_each(struct neigh_table
*tbl
, void (*cb
)(struct neighbour
*, void *), void *cookie
)
2104 read_lock_bh(&tbl
->lock
);
2105 for (chain
= 0; chain
<= tbl
->hash_mask
; chain
++) {
2106 struct neighbour
*n
;
2108 for (n
= tbl
->hash_buckets
[chain
]; n
; n
= n
->next
)
2111 read_unlock_bh(&tbl
->lock
);
2113 EXPORT_SYMBOL(neigh_for_each
);
2115 /* The tbl->lock must be held as a writer and BH disabled. */
2116 void __neigh_for_each_release(struct neigh_table
*tbl
,
2117 int (*cb
)(struct neighbour
*))
2121 for (chain
= 0; chain
<= tbl
->hash_mask
; chain
++) {
2122 struct neighbour
*n
, **np
;
2124 np
= &tbl
->hash_buckets
[chain
];
2125 while ((n
= *np
) != NULL
) {
2128 write_lock(&n
->lock
);
2135 write_unlock(&n
->lock
);
2137 neigh_cleanup_and_release(n
);
2141 EXPORT_SYMBOL(__neigh_for_each_release
);
2143 #ifdef CONFIG_PROC_FS
2145 static struct neighbour
*neigh_get_first(struct seq_file
*seq
)
2147 struct neigh_seq_state
*state
= seq
->private;
2148 struct net
*net
= state
->p
.net
;
2149 struct neigh_table
*tbl
= state
->tbl
;
2150 struct neighbour
*n
= NULL
;
2151 int bucket
= state
->bucket
;
2153 state
->flags
&= ~NEIGH_SEQ_IS_PNEIGH
;
2154 for (bucket
= 0; bucket
<= tbl
->hash_mask
; bucket
++) {
2155 n
= tbl
->hash_buckets
[bucket
];
2158 if (n
->dev
->nd_net
!= net
)
2160 if (state
->neigh_sub_iter
) {
2164 v
= state
->neigh_sub_iter(state
, n
, &fakep
);
2168 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
2170 if (n
->nud_state
& ~NUD_NOARP
)
2179 state
->bucket
= bucket
;
2184 static struct neighbour
*neigh_get_next(struct seq_file
*seq
,
2185 struct neighbour
*n
,
2188 struct neigh_seq_state
*state
= seq
->private;
2189 struct net
*net
= state
->p
.net
;
2190 struct neigh_table
*tbl
= state
->tbl
;
2192 if (state
->neigh_sub_iter
) {
2193 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
2201 if (n
->dev
->nd_net
!= net
)
2203 if (state
->neigh_sub_iter
) {
2204 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
2209 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
2212 if (n
->nud_state
& ~NUD_NOARP
)
2221 if (++state
->bucket
> tbl
->hash_mask
)
2224 n
= tbl
->hash_buckets
[state
->bucket
];
2232 static struct neighbour
*neigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2234 struct neighbour
*n
= neigh_get_first(seq
);
2238 n
= neigh_get_next(seq
, n
, pos
);
2243 return *pos
? NULL
: n
;
2246 static struct pneigh_entry
*pneigh_get_first(struct seq_file
*seq
)
2248 struct neigh_seq_state
*state
= seq
->private;
2249 struct net
* net
= state
->p
.net
;
2250 struct neigh_table
*tbl
= state
->tbl
;
2251 struct pneigh_entry
*pn
= NULL
;
2252 int bucket
= state
->bucket
;
2254 state
->flags
|= NEIGH_SEQ_IS_PNEIGH
;
2255 for (bucket
= 0; bucket
<= PNEIGH_HASHMASK
; bucket
++) {
2256 pn
= tbl
->phash_buckets
[bucket
];
2257 while (pn
&& (pn
->net
!= net
))
2262 state
->bucket
= bucket
;
2267 static struct pneigh_entry
*pneigh_get_next(struct seq_file
*seq
,
2268 struct pneigh_entry
*pn
,
2271 struct neigh_seq_state
*state
= seq
->private;
2272 struct net
* net
= state
->p
.net
;
2273 struct neigh_table
*tbl
= state
->tbl
;
2277 if (++state
->bucket
> PNEIGH_HASHMASK
)
2279 pn
= tbl
->phash_buckets
[state
->bucket
];
2280 while (pn
&& (pn
->net
!= net
))
2292 static struct pneigh_entry
*pneigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2294 struct pneigh_entry
*pn
= pneigh_get_first(seq
);
2298 pn
= pneigh_get_next(seq
, pn
, pos
);
2303 return *pos
? NULL
: pn
;
2306 static void *neigh_get_idx_any(struct seq_file
*seq
, loff_t
*pos
)
2308 struct neigh_seq_state
*state
= seq
->private;
2311 rc
= neigh_get_idx(seq
, pos
);
2312 if (!rc
&& !(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
2313 rc
= pneigh_get_idx(seq
, pos
);
2318 void *neigh_seq_start(struct seq_file
*seq
, loff_t
*pos
, struct neigh_table
*tbl
, unsigned int neigh_seq_flags
)
2319 __acquires(tbl
->lock
)
2321 struct neigh_seq_state
*state
= seq
->private;
2322 loff_t pos_minus_one
;
2326 state
->flags
= (neigh_seq_flags
& ~NEIGH_SEQ_IS_PNEIGH
);
2328 read_lock_bh(&tbl
->lock
);
2330 pos_minus_one
= *pos
- 1;
2331 return *pos
? neigh_get_idx_any(seq
, &pos_minus_one
) : SEQ_START_TOKEN
;
2333 EXPORT_SYMBOL(neigh_seq_start
);
2335 void *neigh_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2337 struct neigh_seq_state
*state
;
2340 if (v
== SEQ_START_TOKEN
) {
2341 rc
= neigh_get_idx(seq
, pos
);
2345 state
= seq
->private;
2346 if (!(state
->flags
& NEIGH_SEQ_IS_PNEIGH
)) {
2347 rc
= neigh_get_next(seq
, v
, NULL
);
2350 if (!(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
2351 rc
= pneigh_get_first(seq
);
2353 BUG_ON(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
);
2354 rc
= pneigh_get_next(seq
, v
, NULL
);
2360 EXPORT_SYMBOL(neigh_seq_next
);
2362 void neigh_seq_stop(struct seq_file
*seq
, void *v
)
2363 __releases(tbl
->lock
)
2365 struct neigh_seq_state
*state
= seq
->private;
2366 struct neigh_table
*tbl
= state
->tbl
;
2368 read_unlock_bh(&tbl
->lock
);
2370 EXPORT_SYMBOL(neigh_seq_stop
);
2372 /* statistics via seq_file */
2374 static void *neigh_stat_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2376 struct proc_dir_entry
*pde
= seq
->private;
2377 struct neigh_table
*tbl
= pde
->data
;
2381 return SEQ_START_TOKEN
;
2383 for (cpu
= *pos
-1; cpu
< NR_CPUS
; ++cpu
) {
2384 if (!cpu_possible(cpu
))
2387 return per_cpu_ptr(tbl
->stats
, cpu
);
2392 static void *neigh_stat_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2394 struct proc_dir_entry
*pde
= seq
->private;
2395 struct neigh_table
*tbl
= pde
->data
;
2398 for (cpu
= *pos
; cpu
< NR_CPUS
; ++cpu
) {
2399 if (!cpu_possible(cpu
))
2402 return per_cpu_ptr(tbl
->stats
, cpu
);
2407 static void neigh_stat_seq_stop(struct seq_file
*seq
, void *v
)
2412 static int neigh_stat_seq_show(struct seq_file
*seq
, void *v
)
2414 struct proc_dir_entry
*pde
= seq
->private;
2415 struct neigh_table
*tbl
= pde
->data
;
2416 struct neigh_statistics
*st
= v
;
2418 if (v
== SEQ_START_TOKEN
) {
2419 seq_printf(seq
, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2423 seq_printf(seq
, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2424 "%08lx %08lx %08lx %08lx\n",
2425 atomic_read(&tbl
->entries
),
2436 st
->rcv_probes_mcast
,
2437 st
->rcv_probes_ucast
,
2439 st
->periodic_gc_runs
,
2446 static const struct seq_operations neigh_stat_seq_ops
= {
2447 .start
= neigh_stat_seq_start
,
2448 .next
= neigh_stat_seq_next
,
2449 .stop
= neigh_stat_seq_stop
,
2450 .show
= neigh_stat_seq_show
,
2453 static int neigh_stat_seq_open(struct inode
*inode
, struct file
*file
)
2455 int ret
= seq_open(file
, &neigh_stat_seq_ops
);
2458 struct seq_file
*sf
= file
->private_data
;
2459 sf
->private = PDE(inode
);
2464 static const struct file_operations neigh_stat_seq_fops
= {
2465 .owner
= THIS_MODULE
,
2466 .open
= neigh_stat_seq_open
,
2468 .llseek
= seq_lseek
,
2469 .release
= seq_release
,
2472 #endif /* CONFIG_PROC_FS */
2474 static inline size_t neigh_nlmsg_size(void)
2476 return NLMSG_ALIGN(sizeof(struct ndmsg
))
2477 + nla_total_size(MAX_ADDR_LEN
) /* NDA_DST */
2478 + nla_total_size(MAX_ADDR_LEN
) /* NDA_LLADDR */
2479 + nla_total_size(sizeof(struct nda_cacheinfo
))
2480 + nla_total_size(4); /* NDA_PROBES */
2483 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
)
2485 struct net
*net
= n
->dev
->nd_net
;
2486 struct sk_buff
*skb
;
2489 skb
= nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC
);
2493 err
= neigh_fill_info(skb
, n
, 0, 0, type
, flags
);
2495 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2496 WARN_ON(err
== -EMSGSIZE
);
2500 err
= rtnl_notify(skb
, net
, 0, RTNLGRP_NEIGH
, NULL
, GFP_ATOMIC
);
2503 rtnl_set_sk_err(net
, RTNLGRP_NEIGH
, err
);
2507 void neigh_app_ns(struct neighbour
*n
)
2509 __neigh_notify(n
, RTM_GETNEIGH
, NLM_F_REQUEST
);
2511 #endif /* CONFIG_ARPD */
2513 #ifdef CONFIG_SYSCTL
2515 static struct neigh_sysctl_table
{
2516 struct ctl_table_header
*sysctl_header
;
2517 struct ctl_table neigh_vars
[__NET_NEIGH_MAX
];
2519 } neigh_sysctl_template __read_mostly
= {
2522 .ctl_name
= NET_NEIGH_MCAST_SOLICIT
,
2523 .procname
= "mcast_solicit",
2524 .maxlen
= sizeof(int),
2526 .proc_handler
= &proc_dointvec
,
2529 .ctl_name
= NET_NEIGH_UCAST_SOLICIT
,
2530 .procname
= "ucast_solicit",
2531 .maxlen
= sizeof(int),
2533 .proc_handler
= &proc_dointvec
,
2536 .ctl_name
= NET_NEIGH_APP_SOLICIT
,
2537 .procname
= "app_solicit",
2538 .maxlen
= sizeof(int),
2540 .proc_handler
= &proc_dointvec
,
2543 .procname
= "retrans_time",
2544 .maxlen
= sizeof(int),
2546 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2549 .ctl_name
= NET_NEIGH_REACHABLE_TIME
,
2550 .procname
= "base_reachable_time",
2551 .maxlen
= sizeof(int),
2553 .proc_handler
= &proc_dointvec_jiffies
,
2554 .strategy
= &sysctl_jiffies
,
2557 .ctl_name
= NET_NEIGH_DELAY_PROBE_TIME
,
2558 .procname
= "delay_first_probe_time",
2559 .maxlen
= sizeof(int),
2561 .proc_handler
= &proc_dointvec_jiffies
,
2562 .strategy
= &sysctl_jiffies
,
2565 .ctl_name
= NET_NEIGH_GC_STALE_TIME
,
2566 .procname
= "gc_stale_time",
2567 .maxlen
= sizeof(int),
2569 .proc_handler
= &proc_dointvec_jiffies
,
2570 .strategy
= &sysctl_jiffies
,
2573 .ctl_name
= NET_NEIGH_UNRES_QLEN
,
2574 .procname
= "unres_qlen",
2575 .maxlen
= sizeof(int),
2577 .proc_handler
= &proc_dointvec
,
2580 .ctl_name
= NET_NEIGH_PROXY_QLEN
,
2581 .procname
= "proxy_qlen",
2582 .maxlen
= sizeof(int),
2584 .proc_handler
= &proc_dointvec
,
2587 .procname
= "anycast_delay",
2588 .maxlen
= sizeof(int),
2590 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2593 .procname
= "proxy_delay",
2594 .maxlen
= sizeof(int),
2596 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2599 .procname
= "locktime",
2600 .maxlen
= sizeof(int),
2602 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2605 .ctl_name
= NET_NEIGH_RETRANS_TIME_MS
,
2606 .procname
= "retrans_time_ms",
2607 .maxlen
= sizeof(int),
2609 .proc_handler
= &proc_dointvec_ms_jiffies
,
2610 .strategy
= &sysctl_ms_jiffies
,
2613 .ctl_name
= NET_NEIGH_REACHABLE_TIME_MS
,
2614 .procname
= "base_reachable_time_ms",
2615 .maxlen
= sizeof(int),
2617 .proc_handler
= &proc_dointvec_ms_jiffies
,
2618 .strategy
= &sysctl_ms_jiffies
,
2621 .ctl_name
= NET_NEIGH_GC_INTERVAL
,
2622 .procname
= "gc_interval",
2623 .maxlen
= sizeof(int),
2625 .proc_handler
= &proc_dointvec_jiffies
,
2626 .strategy
= &sysctl_jiffies
,
2629 .ctl_name
= NET_NEIGH_GC_THRESH1
,
2630 .procname
= "gc_thresh1",
2631 .maxlen
= sizeof(int),
2633 .proc_handler
= &proc_dointvec
,
2636 .ctl_name
= NET_NEIGH_GC_THRESH2
,
2637 .procname
= "gc_thresh2",
2638 .maxlen
= sizeof(int),
2640 .proc_handler
= &proc_dointvec
,
2643 .ctl_name
= NET_NEIGH_GC_THRESH3
,
2644 .procname
= "gc_thresh3",
2645 .maxlen
= sizeof(int),
2647 .proc_handler
= &proc_dointvec
,
2653 int neigh_sysctl_register(struct net_device
*dev
, struct neigh_parms
*p
,
2654 int p_id
, int pdev_id
, char *p_name
,
2655 proc_handler
*handler
, ctl_handler
*strategy
)
2657 struct neigh_sysctl_table
*t
;
2658 const char *dev_name_source
= NULL
;
2660 #define NEIGH_CTL_PATH_ROOT 0
2661 #define NEIGH_CTL_PATH_PROTO 1
2662 #define NEIGH_CTL_PATH_NEIGH 2
2663 #define NEIGH_CTL_PATH_DEV 3
2665 struct ctl_path neigh_path
[] = {
2666 { .procname
= "net", .ctl_name
= CTL_NET
, },
2667 { .procname
= "proto", .ctl_name
= 0, },
2668 { .procname
= "neigh", .ctl_name
= 0, },
2669 { .procname
= "default", .ctl_name
= NET_PROTO_CONF_DEFAULT
, },
2673 t
= kmemdup(&neigh_sysctl_template
, sizeof(*t
), GFP_KERNEL
);
2677 t
->neigh_vars
[0].data
= &p
->mcast_probes
;
2678 t
->neigh_vars
[1].data
= &p
->ucast_probes
;
2679 t
->neigh_vars
[2].data
= &p
->app_probes
;
2680 t
->neigh_vars
[3].data
= &p
->retrans_time
;
2681 t
->neigh_vars
[4].data
= &p
->base_reachable_time
;
2682 t
->neigh_vars
[5].data
= &p
->delay_probe_time
;
2683 t
->neigh_vars
[6].data
= &p
->gc_staletime
;
2684 t
->neigh_vars
[7].data
= &p
->queue_len
;
2685 t
->neigh_vars
[8].data
= &p
->proxy_qlen
;
2686 t
->neigh_vars
[9].data
= &p
->anycast_delay
;
2687 t
->neigh_vars
[10].data
= &p
->proxy_delay
;
2688 t
->neigh_vars
[11].data
= &p
->locktime
;
2689 t
->neigh_vars
[12].data
= &p
->retrans_time
;
2690 t
->neigh_vars
[13].data
= &p
->base_reachable_time
;
2693 dev_name_source
= dev
->name
;
2694 neigh_path
[NEIGH_CTL_PATH_DEV
].ctl_name
= dev
->ifindex
;
2695 /* Terminate the table early */
2696 memset(&t
->neigh_vars
[14], 0, sizeof(t
->neigh_vars
[14]));
2698 dev_name_source
= neigh_path
[NEIGH_CTL_PATH_DEV
].procname
;
2699 t
->neigh_vars
[14].data
= (int *)(p
+ 1);
2700 t
->neigh_vars
[15].data
= (int *)(p
+ 1) + 1;
2701 t
->neigh_vars
[16].data
= (int *)(p
+ 1) + 2;
2702 t
->neigh_vars
[17].data
= (int *)(p
+ 1) + 3;
2706 if (handler
|| strategy
) {
2708 t
->neigh_vars
[3].proc_handler
= handler
;
2709 t
->neigh_vars
[3].strategy
= strategy
;
2710 t
->neigh_vars
[3].extra1
= dev
;
2712 t
->neigh_vars
[3].ctl_name
= CTL_UNNUMBERED
;
2714 t
->neigh_vars
[4].proc_handler
= handler
;
2715 t
->neigh_vars
[4].strategy
= strategy
;
2716 t
->neigh_vars
[4].extra1
= dev
;
2718 t
->neigh_vars
[4].ctl_name
= CTL_UNNUMBERED
;
2719 /* RetransTime (in milliseconds)*/
2720 t
->neigh_vars
[12].proc_handler
= handler
;
2721 t
->neigh_vars
[12].strategy
= strategy
;
2722 t
->neigh_vars
[12].extra1
= dev
;
2724 t
->neigh_vars
[12].ctl_name
= CTL_UNNUMBERED
;
2725 /* ReachableTime (in milliseconds) */
2726 t
->neigh_vars
[13].proc_handler
= handler
;
2727 t
->neigh_vars
[13].strategy
= strategy
;
2728 t
->neigh_vars
[13].extra1
= dev
;
2730 t
->neigh_vars
[13].ctl_name
= CTL_UNNUMBERED
;
2733 t
->dev_name
= kstrdup(dev_name_source
, GFP_KERNEL
);
2737 neigh_path
[NEIGH_CTL_PATH_DEV
].procname
= t
->dev_name
;
2738 neigh_path
[NEIGH_CTL_PATH_NEIGH
].ctl_name
= pdev_id
;
2739 neigh_path
[NEIGH_CTL_PATH_PROTO
].procname
= p_name
;
2740 neigh_path
[NEIGH_CTL_PATH_PROTO
].ctl_name
= p_id
;
2742 t
->sysctl_header
= register_sysctl_paths(neigh_path
, t
->neigh_vars
);
2743 if (!t
->sysctl_header
)
2746 p
->sysctl_table
= t
;
2757 void neigh_sysctl_unregister(struct neigh_parms
*p
)
2759 if (p
->sysctl_table
) {
2760 struct neigh_sysctl_table
*t
= p
->sysctl_table
;
2761 p
->sysctl_table
= NULL
;
2762 unregister_sysctl_table(t
->sysctl_header
);
2768 #endif /* CONFIG_SYSCTL */
2770 static int __init
neigh_init(void)
2772 rtnl_register(PF_UNSPEC
, RTM_NEWNEIGH
, neigh_add
, NULL
);
2773 rtnl_register(PF_UNSPEC
, RTM_DELNEIGH
, neigh_delete
, NULL
);
2774 rtnl_register(PF_UNSPEC
, RTM_GETNEIGH
, NULL
, neigh_dump_info
);
2776 rtnl_register(PF_UNSPEC
, RTM_GETNEIGHTBL
, NULL
, neightbl_dump_info
);
2777 rtnl_register(PF_UNSPEC
, RTM_SETNEIGHTBL
, neightbl_set
, NULL
);
2782 subsys_initcall(neigh_init
);
2784 EXPORT_SYMBOL(__neigh_event_send
);
2785 EXPORT_SYMBOL(neigh_changeaddr
);
2786 EXPORT_SYMBOL(neigh_compat_output
);
2787 EXPORT_SYMBOL(neigh_connected_output
);
2788 EXPORT_SYMBOL(neigh_create
);
2789 EXPORT_SYMBOL(neigh_destroy
);
2790 EXPORT_SYMBOL(neigh_event_ns
);
2791 EXPORT_SYMBOL(neigh_ifdown
);
2792 EXPORT_SYMBOL(neigh_lookup
);
2793 EXPORT_SYMBOL(neigh_lookup_nodev
);
2794 EXPORT_SYMBOL(neigh_parms_alloc
);
2795 EXPORT_SYMBOL(neigh_parms_release
);
2796 EXPORT_SYMBOL(neigh_rand_reach_time
);
2797 EXPORT_SYMBOL(neigh_resolve_output
);
2798 EXPORT_SYMBOL(neigh_table_clear
);
2799 EXPORT_SYMBOL(neigh_table_init
);
2800 EXPORT_SYMBOL(neigh_table_init_no_netlink
);
2801 EXPORT_SYMBOL(neigh_update
);
2802 EXPORT_SYMBOL(pneigh_enqueue
);
2803 EXPORT_SYMBOL(pneigh_lookup
);
2806 EXPORT_SYMBOL(neigh_app_ns
);
2808 #ifdef CONFIG_SYSCTL
2809 EXPORT_SYMBOL(neigh_sysctl_register
);
2810 EXPORT_SYMBOL(neigh_sysctl_unregister
);