2 * Generic address resolution entity
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/sched.h>
23 #include <linux/netdevice.h>
24 #include <linux/proc_fs.h>
26 #include <linux/sysctl.h>
28 #include <linux/times.h>
29 #include <net/neighbour.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/random.h>
34 #include <linux/string.h>
38 #define NEIGH_PRINTK(x...) printk(x)
39 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
40 #define NEIGH_PRINTK0 NEIGH_PRINTK
41 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
42 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
46 #define NEIGH_PRINTK1 NEIGH_PRINTK
50 #define NEIGH_PRINTK2 NEIGH_PRINTK
53 #define PNEIGH_HASHMASK 0xF
55 static void neigh_timer_handler(unsigned long arg
);
57 static void neigh_app_notify(struct neighbour
*n
);
59 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
);
60 void neigh_changeaddr(struct neigh_table
*tbl
, struct net_device
*dev
);
62 static struct neigh_table
*neigh_tables
;
64 static struct file_operations neigh_stat_seq_fops
;
68 Neighbour hash table buckets are protected with rwlock tbl->lock.
70 - All the scans/updates to hash buckets MUST be made under this lock.
71 - NOTHING clever should be made under this lock: no callbacks
72 to protocol backends, no attempts to send something to network.
73 It will result in deadlocks, if backend/driver wants to use neighbour
75 - If the entry requires some non-trivial actions, increase
76 its reference count and release table lock.
78 Neighbour entries are protected:
79 - with reference count.
80 - with rwlock neigh->lock
82 Reference count prevents destruction.
84 neigh->lock mainly serializes ll address data and its validity state.
85 However, the same lock is used to protect another entry fields:
89 Again, nothing clever shall be made under neigh->lock,
90 the most complicated procedure, which we allow is dev->hard_header.
91 It is supposed, that dev->hard_header is simplistic and does
92 not make callbacks to neighbour tables.
94 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
95 list of neighbour tables. This list is used only in process context,
98 static DEFINE_RWLOCK(neigh_tbl_lock
);
100 static int neigh_blackhole(struct sk_buff
*skb
)
107 * It is random distribution in the interval (1/2)*base...(3/2)*base.
108 * It corresponds to default IPv6 settings and is not overridable,
109 * because it is really reasonable choice.
112 unsigned long neigh_rand_reach_time(unsigned long base
)
114 return (base
? (net_random() % base
) + (base
>> 1) : 0);
118 static int neigh_forced_gc(struct neigh_table
*tbl
)
123 NEIGH_CACHE_STAT_INC(tbl
, forced_gc_runs
);
125 write_lock_bh(&tbl
->lock
);
126 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
127 struct neighbour
*n
, **np
;
129 np
= &tbl
->hash_buckets
[i
];
130 while ((n
= *np
) != NULL
) {
131 /* Neighbour record may be discarded if:
132 * - nobody refers to it.
133 * - it is not permanent
135 write_lock(&n
->lock
);
136 if (atomic_read(&n
->refcnt
) == 1 &&
137 !(n
->nud_state
& NUD_PERMANENT
)) {
141 write_unlock(&n
->lock
);
145 write_unlock(&n
->lock
);
150 tbl
->last_flush
= jiffies
;
152 write_unlock_bh(&tbl
->lock
);
157 static int neigh_del_timer(struct neighbour
*n
)
159 if ((n
->nud_state
& NUD_IN_TIMER
) &&
160 del_timer(&n
->timer
)) {
167 static void pneigh_queue_purge(struct sk_buff_head
*list
)
171 while ((skb
= skb_dequeue(list
)) != NULL
) {
177 static void neigh_flush_dev(struct neigh_table
*tbl
, struct net_device
*dev
)
181 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
182 struct neighbour
*n
, **np
= &tbl
->hash_buckets
[i
];
184 while ((n
= *np
) != NULL
) {
185 if (dev
&& n
->dev
!= dev
) {
190 write_lock(&n
->lock
);
194 if (atomic_read(&n
->refcnt
) != 1) {
195 /* The most unpleasant situation.
196 We must destroy neighbour entry,
197 but someone still uses it.
199 The destroy will be delayed until
200 the last user releases us, but
201 we must kill timers etc. and move
204 skb_queue_purge(&n
->arp_queue
);
205 n
->output
= neigh_blackhole
;
206 if (n
->nud_state
& NUD_VALID
)
207 n
->nud_state
= NUD_NOARP
;
209 n
->nud_state
= NUD_NONE
;
210 NEIGH_PRINTK2("neigh %p is stray.\n", n
);
212 write_unlock(&n
->lock
);
218 void neigh_changeaddr(struct neigh_table
*tbl
, struct net_device
*dev
)
220 write_lock_bh(&tbl
->lock
);
221 neigh_flush_dev(tbl
, dev
);
222 write_unlock_bh(&tbl
->lock
);
225 int neigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
227 write_lock_bh(&tbl
->lock
);
228 neigh_flush_dev(tbl
, dev
);
229 pneigh_ifdown(tbl
, dev
);
230 write_unlock_bh(&tbl
->lock
);
232 del_timer_sync(&tbl
->proxy_timer
);
233 pneigh_queue_purge(&tbl
->proxy_queue
);
237 static struct neighbour
*neigh_alloc(struct neigh_table
*tbl
)
239 struct neighbour
*n
= NULL
;
240 unsigned long now
= jiffies
;
243 entries
= atomic_inc_return(&tbl
->entries
) - 1;
244 if (entries
>= tbl
->gc_thresh3
||
245 (entries
>= tbl
->gc_thresh2
&&
246 time_after(now
, tbl
->last_flush
+ 5 * HZ
))) {
247 if (!neigh_forced_gc(tbl
) &&
248 entries
>= tbl
->gc_thresh3
)
252 n
= kmem_cache_alloc(tbl
->kmem_cachep
, SLAB_ATOMIC
);
256 memset(n
, 0, tbl
->entry_size
);
258 skb_queue_head_init(&n
->arp_queue
);
259 rwlock_init(&n
->lock
);
260 n
->updated
= n
->used
= now
;
261 n
->nud_state
= NUD_NONE
;
262 n
->output
= neigh_blackhole
;
263 n
->parms
= neigh_parms_clone(&tbl
->parms
);
264 init_timer(&n
->timer
);
265 n
->timer
.function
= neigh_timer_handler
;
266 n
->timer
.data
= (unsigned long)n
;
268 NEIGH_CACHE_STAT_INC(tbl
, allocs
);
270 atomic_set(&n
->refcnt
, 1);
276 atomic_dec(&tbl
->entries
);
280 static struct neighbour
**neigh_hash_alloc(unsigned int entries
)
282 unsigned long size
= entries
* sizeof(struct neighbour
*);
283 struct neighbour
**ret
;
285 if (size
<= PAGE_SIZE
) {
286 ret
= kzalloc(size
, GFP_ATOMIC
);
288 ret
= (struct neighbour
**)
289 __get_free_pages(GFP_ATOMIC
|__GFP_ZERO
, get_order(size
));
294 static void neigh_hash_free(struct neighbour
**hash
, unsigned int entries
)
296 unsigned long size
= entries
* sizeof(struct neighbour
*);
298 if (size
<= PAGE_SIZE
)
301 free_pages((unsigned long)hash
, get_order(size
));
304 static void neigh_hash_grow(struct neigh_table
*tbl
, unsigned long new_entries
)
306 struct neighbour
**new_hash
, **old_hash
;
307 unsigned int i
, new_hash_mask
, old_entries
;
309 NEIGH_CACHE_STAT_INC(tbl
, hash_grows
);
311 BUG_ON(new_entries
& (new_entries
- 1));
312 new_hash
= neigh_hash_alloc(new_entries
);
316 old_entries
= tbl
->hash_mask
+ 1;
317 new_hash_mask
= new_entries
- 1;
318 old_hash
= tbl
->hash_buckets
;
320 get_random_bytes(&tbl
->hash_rnd
, sizeof(tbl
->hash_rnd
));
321 for (i
= 0; i
< old_entries
; i
++) {
322 struct neighbour
*n
, *next
;
324 for (n
= old_hash
[i
]; n
; n
= next
) {
325 unsigned int hash_val
= tbl
->hash(n
->primary_key
, n
->dev
);
327 hash_val
&= new_hash_mask
;
330 n
->next
= new_hash
[hash_val
];
331 new_hash
[hash_val
] = n
;
334 tbl
->hash_buckets
= new_hash
;
335 tbl
->hash_mask
= new_hash_mask
;
337 neigh_hash_free(old_hash
, old_entries
);
340 struct neighbour
*neigh_lookup(struct neigh_table
*tbl
, const void *pkey
,
341 struct net_device
*dev
)
344 int key_len
= tbl
->key_len
;
345 u32 hash_val
= tbl
->hash(pkey
, dev
) & tbl
->hash_mask
;
347 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
349 read_lock_bh(&tbl
->lock
);
350 for (n
= tbl
->hash_buckets
[hash_val
]; n
; n
= n
->next
) {
351 if (dev
== n
->dev
&& !memcmp(n
->primary_key
, pkey
, key_len
)) {
353 NEIGH_CACHE_STAT_INC(tbl
, hits
);
357 read_unlock_bh(&tbl
->lock
);
361 struct neighbour
*neigh_lookup_nodev(struct neigh_table
*tbl
, const void *pkey
)
364 int key_len
= tbl
->key_len
;
365 u32 hash_val
= tbl
->hash(pkey
, NULL
) & tbl
->hash_mask
;
367 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
369 read_lock_bh(&tbl
->lock
);
370 for (n
= tbl
->hash_buckets
[hash_val
]; n
; n
= n
->next
) {
371 if (!memcmp(n
->primary_key
, pkey
, key_len
)) {
373 NEIGH_CACHE_STAT_INC(tbl
, hits
);
377 read_unlock_bh(&tbl
->lock
);
381 struct neighbour
*neigh_create(struct neigh_table
*tbl
, const void *pkey
,
382 struct net_device
*dev
)
385 int key_len
= tbl
->key_len
;
387 struct neighbour
*n1
, *rc
, *n
= neigh_alloc(tbl
);
390 rc
= ERR_PTR(-ENOBUFS
);
394 memcpy(n
->primary_key
, pkey
, key_len
);
398 /* Protocol specific setup. */
399 if (tbl
->constructor
&& (error
= tbl
->constructor(n
)) < 0) {
401 goto out_neigh_release
;
404 /* Device specific setup. */
405 if (n
->parms
->neigh_setup
&&
406 (error
= n
->parms
->neigh_setup(n
)) < 0) {
408 goto out_neigh_release
;
411 n
->confirmed
= jiffies
- (n
->parms
->base_reachable_time
<< 1);
413 write_lock_bh(&tbl
->lock
);
415 if (atomic_read(&tbl
->entries
) > (tbl
->hash_mask
+ 1))
416 neigh_hash_grow(tbl
, (tbl
->hash_mask
+ 1) << 1);
418 hash_val
= tbl
->hash(pkey
, dev
) & tbl
->hash_mask
;
420 if (n
->parms
->dead
) {
421 rc
= ERR_PTR(-EINVAL
);
425 for (n1
= tbl
->hash_buckets
[hash_val
]; n1
; n1
= n1
->next
) {
426 if (dev
== n1
->dev
&& !memcmp(n1
->primary_key
, pkey
, key_len
)) {
433 n
->next
= tbl
->hash_buckets
[hash_val
];
434 tbl
->hash_buckets
[hash_val
] = n
;
437 write_unlock_bh(&tbl
->lock
);
438 NEIGH_PRINTK2("neigh %p is created.\n", n
);
443 write_unlock_bh(&tbl
->lock
);
449 struct pneigh_entry
* pneigh_lookup(struct neigh_table
*tbl
, const void *pkey
,
450 struct net_device
*dev
, int creat
)
452 struct pneigh_entry
*n
;
453 int key_len
= tbl
->key_len
;
454 u32 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
456 hash_val
^= (hash_val
>> 16);
457 hash_val
^= hash_val
>> 8;
458 hash_val
^= hash_val
>> 4;
459 hash_val
&= PNEIGH_HASHMASK
;
461 read_lock_bh(&tbl
->lock
);
463 for (n
= tbl
->phash_buckets
[hash_val
]; n
; n
= n
->next
) {
464 if (!memcmp(n
->key
, pkey
, key_len
) &&
465 (n
->dev
== dev
|| !n
->dev
)) {
466 read_unlock_bh(&tbl
->lock
);
470 read_unlock_bh(&tbl
->lock
);
475 n
= kmalloc(sizeof(*n
) + key_len
, GFP_KERNEL
);
479 memcpy(n
->key
, pkey
, key_len
);
484 if (tbl
->pconstructor
&& tbl
->pconstructor(n
)) {
492 write_lock_bh(&tbl
->lock
);
493 n
->next
= tbl
->phash_buckets
[hash_val
];
494 tbl
->phash_buckets
[hash_val
] = n
;
495 write_unlock_bh(&tbl
->lock
);
501 int pneigh_delete(struct neigh_table
*tbl
, const void *pkey
,
502 struct net_device
*dev
)
504 struct pneigh_entry
*n
, **np
;
505 int key_len
= tbl
->key_len
;
506 u32 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
508 hash_val
^= (hash_val
>> 16);
509 hash_val
^= hash_val
>> 8;
510 hash_val
^= hash_val
>> 4;
511 hash_val
&= PNEIGH_HASHMASK
;
513 write_lock_bh(&tbl
->lock
);
514 for (np
= &tbl
->phash_buckets
[hash_val
]; (n
= *np
) != NULL
;
516 if (!memcmp(n
->key
, pkey
, key_len
) && n
->dev
== dev
) {
518 write_unlock_bh(&tbl
->lock
);
519 if (tbl
->pdestructor
)
527 write_unlock_bh(&tbl
->lock
);
531 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
533 struct pneigh_entry
*n
, **np
;
536 for (h
= 0; h
<= PNEIGH_HASHMASK
; h
++) {
537 np
= &tbl
->phash_buckets
[h
];
538 while ((n
= *np
) != NULL
) {
539 if (!dev
|| n
->dev
== dev
) {
541 if (tbl
->pdestructor
)
556 * neighbour must already be out of the table;
559 void neigh_destroy(struct neighbour
*neigh
)
563 NEIGH_CACHE_STAT_INC(neigh
->tbl
, destroys
);
567 "Destroying alive neighbour %p\n", neigh
);
572 if (neigh_del_timer(neigh
))
573 printk(KERN_WARNING
"Impossible event.\n");
575 while ((hh
= neigh
->hh
) != NULL
) {
576 neigh
->hh
= hh
->hh_next
;
578 write_lock_bh(&hh
->hh_lock
);
579 hh
->hh_output
= neigh_blackhole
;
580 write_unlock_bh(&hh
->hh_lock
);
581 if (atomic_dec_and_test(&hh
->hh_refcnt
))
585 if (neigh
->parms
->neigh_destructor
)
586 (neigh
->parms
->neigh_destructor
)(neigh
);
588 skb_queue_purge(&neigh
->arp_queue
);
591 neigh_parms_put(neigh
->parms
);
593 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh
);
595 atomic_dec(&neigh
->tbl
->entries
);
596 kmem_cache_free(neigh
->tbl
->kmem_cachep
, neigh
);
599 /* Neighbour state is suspicious;
602 Called with write_locked neigh.
604 static void neigh_suspect(struct neighbour
*neigh
)
608 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh
);
610 neigh
->output
= neigh
->ops
->output
;
612 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
)
613 hh
->hh_output
= neigh
->ops
->output
;
616 /* Neighbour state is OK;
619 Called with write_locked neigh.
621 static void neigh_connect(struct neighbour
*neigh
)
625 NEIGH_PRINTK2("neigh %p is connected.\n", neigh
);
627 neigh
->output
= neigh
->ops
->connected_output
;
629 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
)
630 hh
->hh_output
= neigh
->ops
->hh_output
;
633 static void neigh_periodic_timer(unsigned long arg
)
635 struct neigh_table
*tbl
= (struct neigh_table
*)arg
;
636 struct neighbour
*n
, **np
;
637 unsigned long expire
, now
= jiffies
;
639 NEIGH_CACHE_STAT_INC(tbl
, periodic_gc_runs
);
641 write_lock(&tbl
->lock
);
644 * periodically recompute ReachableTime from random function
647 if (time_after(now
, tbl
->last_rand
+ 300 * HZ
)) {
648 struct neigh_parms
*p
;
649 tbl
->last_rand
= now
;
650 for (p
= &tbl
->parms
; p
; p
= p
->next
)
652 neigh_rand_reach_time(p
->base_reachable_time
);
655 np
= &tbl
->hash_buckets
[tbl
->hash_chain_gc
];
656 tbl
->hash_chain_gc
= ((tbl
->hash_chain_gc
+ 1) & tbl
->hash_mask
);
658 while ((n
= *np
) != NULL
) {
661 write_lock(&n
->lock
);
663 state
= n
->nud_state
;
664 if (state
& (NUD_PERMANENT
| NUD_IN_TIMER
)) {
665 write_unlock(&n
->lock
);
669 if (time_before(n
->used
, n
->confirmed
))
670 n
->used
= n
->confirmed
;
672 if (atomic_read(&n
->refcnt
) == 1 &&
673 (state
== NUD_FAILED
||
674 time_after(now
, n
->used
+ n
->parms
->gc_staletime
))) {
677 write_unlock(&n
->lock
);
681 write_unlock(&n
->lock
);
687 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
688 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
689 * base_reachable_time.
691 expire
= tbl
->parms
.base_reachable_time
>> 1;
692 expire
/= (tbl
->hash_mask
+ 1);
696 mod_timer(&tbl
->gc_timer
, now
+ expire
);
698 write_unlock(&tbl
->lock
);
701 static __inline__
int neigh_max_probes(struct neighbour
*n
)
703 struct neigh_parms
*p
= n
->parms
;
704 return (n
->nud_state
& NUD_PROBE
?
706 p
->ucast_probes
+ p
->app_probes
+ p
->mcast_probes
);
709 static inline void neigh_add_timer(struct neighbour
*n
, unsigned long when
)
711 if (unlikely(mod_timer(&n
->timer
, when
))) {
712 printk("NEIGH: BUG, double timer add, state is %x\n",
718 /* Called when a timer expires for a neighbour entry. */
720 static void neigh_timer_handler(unsigned long arg
)
722 unsigned long now
, next
;
723 struct neighbour
*neigh
= (struct neighbour
*)arg
;
727 write_lock(&neigh
->lock
);
729 state
= neigh
->nud_state
;
733 if (!(state
& NUD_IN_TIMER
)) {
735 printk(KERN_WARNING
"neigh: timer & !nud_in_timer\n");
740 if (state
& NUD_REACHABLE
) {
741 if (time_before_eq(now
,
742 neigh
->confirmed
+ neigh
->parms
->reachable_time
)) {
743 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh
);
744 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
745 } else if (time_before_eq(now
,
746 neigh
->used
+ neigh
->parms
->delay_probe_time
)) {
747 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh
);
748 neigh
->nud_state
= NUD_DELAY
;
749 neigh
->updated
= jiffies
;
750 neigh_suspect(neigh
);
751 next
= now
+ neigh
->parms
->delay_probe_time
;
753 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh
);
754 neigh
->nud_state
= NUD_STALE
;
755 neigh
->updated
= jiffies
;
756 neigh_suspect(neigh
);
758 } else if (state
& NUD_DELAY
) {
759 if (time_before_eq(now
,
760 neigh
->confirmed
+ neigh
->parms
->delay_probe_time
)) {
761 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh
);
762 neigh
->nud_state
= NUD_REACHABLE
;
763 neigh
->updated
= jiffies
;
764 neigh_connect(neigh
);
765 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
767 NEIGH_PRINTK2("neigh %p is probed.\n", neigh
);
768 neigh
->nud_state
= NUD_PROBE
;
769 neigh
->updated
= jiffies
;
770 atomic_set(&neigh
->probes
, 0);
771 next
= now
+ neigh
->parms
->retrans_time
;
774 /* NUD_PROBE|NUD_INCOMPLETE */
775 next
= now
+ neigh
->parms
->retrans_time
;
778 if ((neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
779 atomic_read(&neigh
->probes
) >= neigh_max_probes(neigh
)) {
782 neigh
->nud_state
= NUD_FAILED
;
783 neigh
->updated
= jiffies
;
785 NEIGH_CACHE_STAT_INC(neigh
->tbl
, res_failed
);
786 NEIGH_PRINTK2("neigh %p is failed.\n", neigh
);
788 /* It is very thin place. report_unreachable is very complicated
789 routine. Particularly, it can hit the same neighbour entry!
791 So that, we try to be accurate and avoid dead loop. --ANK
793 while (neigh
->nud_state
== NUD_FAILED
&&
794 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
795 write_unlock(&neigh
->lock
);
796 neigh
->ops
->error_report(neigh
, skb
);
797 write_lock(&neigh
->lock
);
799 skb_queue_purge(&neigh
->arp_queue
);
802 if (neigh
->nud_state
& NUD_IN_TIMER
) {
803 if (time_before(next
, jiffies
+ HZ
/2))
804 next
= jiffies
+ HZ
/2;
805 if (!mod_timer(&neigh
->timer
, next
))
808 if (neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) {
809 struct sk_buff
*skb
= skb_peek(&neigh
->arp_queue
);
810 /* keep skb alive even if arp_queue overflows */
813 write_unlock(&neigh
->lock
);
814 neigh
->ops
->solicit(neigh
, skb
);
815 atomic_inc(&neigh
->probes
);
820 write_unlock(&neigh
->lock
);
824 if (notify
&& neigh
->parms
->app_probes
)
825 neigh_app_notify(neigh
);
827 neigh_release(neigh
);
830 int __neigh_event_send(struct neighbour
*neigh
, struct sk_buff
*skb
)
835 write_lock_bh(&neigh
->lock
);
838 if (neigh
->nud_state
& (NUD_CONNECTED
| NUD_DELAY
| NUD_PROBE
))
843 if (!(neigh
->nud_state
& (NUD_STALE
| NUD_INCOMPLETE
))) {
844 if (neigh
->parms
->mcast_probes
+ neigh
->parms
->app_probes
) {
845 atomic_set(&neigh
->probes
, neigh
->parms
->ucast_probes
);
846 neigh
->nud_state
= NUD_INCOMPLETE
;
847 neigh
->updated
= jiffies
;
849 neigh_add_timer(neigh
, now
+ 1);
851 neigh
->nud_state
= NUD_FAILED
;
852 neigh
->updated
= jiffies
;
853 write_unlock_bh(&neigh
->lock
);
859 } else if (neigh
->nud_state
& NUD_STALE
) {
860 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh
);
862 neigh
->nud_state
= NUD_DELAY
;
863 neigh
->updated
= jiffies
;
864 neigh_add_timer(neigh
,
865 jiffies
+ neigh
->parms
->delay_probe_time
);
868 if (neigh
->nud_state
== NUD_INCOMPLETE
) {
870 if (skb_queue_len(&neigh
->arp_queue
) >=
871 neigh
->parms
->queue_len
) {
872 struct sk_buff
*buff
;
873 buff
= neigh
->arp_queue
.next
;
874 __skb_unlink(buff
, &neigh
->arp_queue
);
877 __skb_queue_tail(&neigh
->arp_queue
, skb
);
882 write_unlock_bh(&neigh
->lock
);
886 static __inline__
void neigh_update_hhs(struct neighbour
*neigh
)
889 void (*update
)(struct hh_cache
*, struct net_device
*, unsigned char *) =
890 neigh
->dev
->header_cache_update
;
893 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
) {
894 write_lock_bh(&hh
->hh_lock
);
895 update(hh
, neigh
->dev
, neigh
->ha
);
896 write_unlock_bh(&hh
->hh_lock
);
903 /* Generic update routine.
904 -- lladdr is new lladdr or NULL, if it is not supplied.
907 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
909 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
910 lladdr instead of overriding it
912 It also allows to retain current state
913 if lladdr is unchanged.
914 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
916 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
918 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
921 Caller MUST hold reference count on the entry.
924 int neigh_update(struct neighbour
*neigh
, const u8
*lladdr
, u8
new,
932 struct net_device
*dev
;
933 int update_isrouter
= 0;
935 write_lock_bh(&neigh
->lock
);
938 old
= neigh
->nud_state
;
941 if (!(flags
& NEIGH_UPDATE_F_ADMIN
) &&
942 (old
& (NUD_NOARP
| NUD_PERMANENT
)))
945 if (!(new & NUD_VALID
)) {
946 neigh_del_timer(neigh
);
947 if (old
& NUD_CONNECTED
)
948 neigh_suspect(neigh
);
949 neigh
->nud_state
= new;
952 notify
= old
& NUD_VALID
;
957 /* Compare new lladdr with cached one */
958 if (!dev
->addr_len
) {
959 /* First case: device needs no address. */
962 /* The second case: if something is already cached
963 and a new address is proposed:
965 - if they are different, check override flag
967 if ((old
& NUD_VALID
) &&
968 !memcmp(lladdr
, neigh
->ha
, dev
->addr_len
))
971 /* No address is supplied; if we know something,
972 use it, otherwise discard the request.
975 if (!(old
& NUD_VALID
))
980 if (new & NUD_CONNECTED
)
981 neigh
->confirmed
= jiffies
;
982 neigh
->updated
= jiffies
;
984 /* If entry was valid and address is not changed,
985 do not change entry state, if new one is STALE.
988 update_isrouter
= flags
& NEIGH_UPDATE_F_OVERRIDE_ISROUTER
;
989 if (old
& NUD_VALID
) {
990 if (lladdr
!= neigh
->ha
&& !(flags
& NEIGH_UPDATE_F_OVERRIDE
)) {
992 if ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) &&
993 (old
& NUD_CONNECTED
)) {
999 if (lladdr
== neigh
->ha
&& new == NUD_STALE
&&
1000 ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) ||
1001 (old
& NUD_CONNECTED
))
1008 neigh_del_timer(neigh
);
1009 if (new & NUD_IN_TIMER
) {
1011 neigh_add_timer(neigh
, (jiffies
+
1012 ((new & NUD_REACHABLE
) ?
1013 neigh
->parms
->reachable_time
:
1016 neigh
->nud_state
= new;
1019 if (lladdr
!= neigh
->ha
) {
1020 memcpy(&neigh
->ha
, lladdr
, dev
->addr_len
);
1021 neigh_update_hhs(neigh
);
1022 if (!(new & NUD_CONNECTED
))
1023 neigh
->confirmed
= jiffies
-
1024 (neigh
->parms
->base_reachable_time
<< 1);
1031 if (new & NUD_CONNECTED
)
1032 neigh_connect(neigh
);
1034 neigh_suspect(neigh
);
1035 if (!(old
& NUD_VALID
)) {
1036 struct sk_buff
*skb
;
1038 /* Again: avoid dead loop if something went wrong */
1040 while (neigh
->nud_state
& NUD_VALID
&&
1041 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
1042 struct neighbour
*n1
= neigh
;
1043 write_unlock_bh(&neigh
->lock
);
1044 /* On shaper/eql skb->dst->neighbour != neigh :( */
1045 if (skb
->dst
&& skb
->dst
->neighbour
)
1046 n1
= skb
->dst
->neighbour
;
1048 write_lock_bh(&neigh
->lock
);
1050 skb_queue_purge(&neigh
->arp_queue
);
1053 if (update_isrouter
) {
1054 neigh
->flags
= (flags
& NEIGH_UPDATE_F_ISROUTER
) ?
1055 (neigh
->flags
| NTF_ROUTER
) :
1056 (neigh
->flags
& ~NTF_ROUTER
);
1058 write_unlock_bh(&neigh
->lock
);
1060 if (notify
&& neigh
->parms
->app_probes
)
1061 neigh_app_notify(neigh
);
1066 struct neighbour
*neigh_event_ns(struct neigh_table
*tbl
,
1067 u8
*lladdr
, void *saddr
,
1068 struct net_device
*dev
)
1070 struct neighbour
*neigh
= __neigh_lookup(tbl
, saddr
, dev
,
1071 lladdr
|| !dev
->addr_len
);
1073 neigh_update(neigh
, lladdr
, NUD_STALE
,
1074 NEIGH_UPDATE_F_OVERRIDE
);
1078 static void neigh_hh_init(struct neighbour
*n
, struct dst_entry
*dst
,
1081 struct hh_cache
*hh
;
1082 struct net_device
*dev
= dst
->dev
;
1084 for (hh
= n
->hh
; hh
; hh
= hh
->hh_next
)
1085 if (hh
->hh_type
== protocol
)
1088 if (!hh
&& (hh
= kzalloc(sizeof(*hh
), GFP_ATOMIC
)) != NULL
) {
1089 rwlock_init(&hh
->hh_lock
);
1090 hh
->hh_type
= protocol
;
1091 atomic_set(&hh
->hh_refcnt
, 0);
1093 if (dev
->hard_header_cache(n
, hh
)) {
1097 atomic_inc(&hh
->hh_refcnt
);
1098 hh
->hh_next
= n
->hh
;
1100 if (n
->nud_state
& NUD_CONNECTED
)
1101 hh
->hh_output
= n
->ops
->hh_output
;
1103 hh
->hh_output
= n
->ops
->output
;
1107 atomic_inc(&hh
->hh_refcnt
);
1112 /* This function can be used in contexts, where only old dev_queue_xmit
1113 worked, f.e. if you want to override normal output path (eql, shaper),
1114 but resolution is not made yet.
1117 int neigh_compat_output(struct sk_buff
*skb
)
1119 struct net_device
*dev
= skb
->dev
;
1121 __skb_pull(skb
, skb
->nh
.raw
- skb
->data
);
1123 if (dev
->hard_header
&&
1124 dev
->hard_header(skb
, dev
, ntohs(skb
->protocol
), NULL
, NULL
,
1126 dev
->rebuild_header(skb
))
1129 return dev_queue_xmit(skb
);
1132 /* Slow and careful. */
1134 int neigh_resolve_output(struct sk_buff
*skb
)
1136 struct dst_entry
*dst
= skb
->dst
;
1137 struct neighbour
*neigh
;
1140 if (!dst
|| !(neigh
= dst
->neighbour
))
1143 __skb_pull(skb
, skb
->nh
.raw
- skb
->data
);
1145 if (!neigh_event_send(neigh
, skb
)) {
1147 struct net_device
*dev
= neigh
->dev
;
1148 if (dev
->hard_header_cache
&& !dst
->hh
) {
1149 write_lock_bh(&neigh
->lock
);
1151 neigh_hh_init(neigh
, dst
, dst
->ops
->protocol
);
1152 err
= dev
->hard_header(skb
, dev
, ntohs(skb
->protocol
),
1153 neigh
->ha
, NULL
, skb
->len
);
1154 write_unlock_bh(&neigh
->lock
);
1156 read_lock_bh(&neigh
->lock
);
1157 err
= dev
->hard_header(skb
, dev
, ntohs(skb
->protocol
),
1158 neigh
->ha
, NULL
, skb
->len
);
1159 read_unlock_bh(&neigh
->lock
);
1162 rc
= neigh
->ops
->queue_xmit(skb
);
1169 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1170 dst
, dst
? dst
->neighbour
: NULL
);
1177 /* As fast as possible without hh cache */
1179 int neigh_connected_output(struct sk_buff
*skb
)
1182 struct dst_entry
*dst
= skb
->dst
;
1183 struct neighbour
*neigh
= dst
->neighbour
;
1184 struct net_device
*dev
= neigh
->dev
;
1186 __skb_pull(skb
, skb
->nh
.raw
- skb
->data
);
1188 read_lock_bh(&neigh
->lock
);
1189 err
= dev
->hard_header(skb
, dev
, ntohs(skb
->protocol
),
1190 neigh
->ha
, NULL
, skb
->len
);
1191 read_unlock_bh(&neigh
->lock
);
1193 err
= neigh
->ops
->queue_xmit(skb
);
1201 static void neigh_proxy_process(unsigned long arg
)
1203 struct neigh_table
*tbl
= (struct neigh_table
*)arg
;
1204 long sched_next
= 0;
1205 unsigned long now
= jiffies
;
1206 struct sk_buff
*skb
;
1208 spin_lock(&tbl
->proxy_queue
.lock
);
1210 skb
= tbl
->proxy_queue
.next
;
1212 while (skb
!= (struct sk_buff
*)&tbl
->proxy_queue
) {
1213 struct sk_buff
*back
= skb
;
1214 long tdif
= NEIGH_CB(back
)->sched_next
- now
;
1218 struct net_device
*dev
= back
->dev
;
1219 __skb_unlink(back
, &tbl
->proxy_queue
);
1220 if (tbl
->proxy_redo
&& netif_running(dev
))
1221 tbl
->proxy_redo(back
);
1226 } else if (!sched_next
|| tdif
< sched_next
)
1229 del_timer(&tbl
->proxy_timer
);
1231 mod_timer(&tbl
->proxy_timer
, jiffies
+ sched_next
);
1232 spin_unlock(&tbl
->proxy_queue
.lock
);
1235 void pneigh_enqueue(struct neigh_table
*tbl
, struct neigh_parms
*p
,
1236 struct sk_buff
*skb
)
1238 unsigned long now
= jiffies
;
1239 unsigned long sched_next
= now
+ (net_random() % p
->proxy_delay
);
1241 if (tbl
->proxy_queue
.qlen
> p
->proxy_qlen
) {
1246 NEIGH_CB(skb
)->sched_next
= sched_next
;
1247 NEIGH_CB(skb
)->flags
|= LOCALLY_ENQUEUED
;
1249 spin_lock(&tbl
->proxy_queue
.lock
);
1250 if (del_timer(&tbl
->proxy_timer
)) {
1251 if (time_before(tbl
->proxy_timer
.expires
, sched_next
))
1252 sched_next
= tbl
->proxy_timer
.expires
;
1254 dst_release(skb
->dst
);
1257 __skb_queue_tail(&tbl
->proxy_queue
, skb
);
1258 mod_timer(&tbl
->proxy_timer
, sched_next
);
1259 spin_unlock(&tbl
->proxy_queue
.lock
);
1263 struct neigh_parms
*neigh_parms_alloc(struct net_device
*dev
,
1264 struct neigh_table
*tbl
)
1266 struct neigh_parms
*p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
1269 memcpy(p
, &tbl
->parms
, sizeof(*p
));
1271 atomic_set(&p
->refcnt
, 1);
1272 INIT_RCU_HEAD(&p
->rcu_head
);
1274 neigh_rand_reach_time(p
->base_reachable_time
);
1276 if (dev
->neigh_setup
&& dev
->neigh_setup(dev
, p
)) {
1284 p
->sysctl_table
= NULL
;
1285 write_lock_bh(&tbl
->lock
);
1286 p
->next
= tbl
->parms
.next
;
1287 tbl
->parms
.next
= p
;
1288 write_unlock_bh(&tbl
->lock
);
1293 static void neigh_rcu_free_parms(struct rcu_head
*head
)
1295 struct neigh_parms
*parms
=
1296 container_of(head
, struct neigh_parms
, rcu_head
);
1298 neigh_parms_put(parms
);
1301 void neigh_parms_release(struct neigh_table
*tbl
, struct neigh_parms
*parms
)
1303 struct neigh_parms
**p
;
1305 if (!parms
|| parms
== &tbl
->parms
)
1307 write_lock_bh(&tbl
->lock
);
1308 for (p
= &tbl
->parms
.next
; *p
; p
= &(*p
)->next
) {
1312 write_unlock_bh(&tbl
->lock
);
1314 dev_put(parms
->dev
);
1315 call_rcu(&parms
->rcu_head
, neigh_rcu_free_parms
);
1319 write_unlock_bh(&tbl
->lock
);
1320 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1323 void neigh_parms_destroy(struct neigh_parms
*parms
)
1328 void neigh_table_init_no_netlink(struct neigh_table
*tbl
)
1330 unsigned long now
= jiffies
;
1331 unsigned long phsize
;
1333 atomic_set(&tbl
->parms
.refcnt
, 1);
1334 INIT_RCU_HEAD(&tbl
->parms
.rcu_head
);
1335 tbl
->parms
.reachable_time
=
1336 neigh_rand_reach_time(tbl
->parms
.base_reachable_time
);
1338 if (!tbl
->kmem_cachep
)
1339 tbl
->kmem_cachep
= kmem_cache_create(tbl
->id
,
1341 0, SLAB_HWCACHE_ALIGN
,
1344 if (!tbl
->kmem_cachep
)
1345 panic("cannot create neighbour cache");
1347 tbl
->stats
= alloc_percpu(struct neigh_statistics
);
1349 panic("cannot create neighbour cache statistics");
1351 #ifdef CONFIG_PROC_FS
1352 tbl
->pde
= create_proc_entry(tbl
->id
, 0, proc_net_stat
);
1354 panic("cannot create neighbour proc dir entry");
1355 tbl
->pde
->proc_fops
= &neigh_stat_seq_fops
;
1356 tbl
->pde
->data
= tbl
;
1360 tbl
->hash_buckets
= neigh_hash_alloc(tbl
->hash_mask
+ 1);
1362 phsize
= (PNEIGH_HASHMASK
+ 1) * sizeof(struct pneigh_entry
*);
1363 tbl
->phash_buckets
= kzalloc(phsize
, GFP_KERNEL
);
1365 if (!tbl
->hash_buckets
|| !tbl
->phash_buckets
)
1366 panic("cannot allocate neighbour cache hashes");
1368 get_random_bytes(&tbl
->hash_rnd
, sizeof(tbl
->hash_rnd
));
1370 rwlock_init(&tbl
->lock
);
1371 init_timer(&tbl
->gc_timer
);
1372 tbl
->gc_timer
.data
= (unsigned long)tbl
;
1373 tbl
->gc_timer
.function
= neigh_periodic_timer
;
1374 tbl
->gc_timer
.expires
= now
+ 1;
1375 add_timer(&tbl
->gc_timer
);
1377 init_timer(&tbl
->proxy_timer
);
1378 tbl
->proxy_timer
.data
= (unsigned long)tbl
;
1379 tbl
->proxy_timer
.function
= neigh_proxy_process
;
1380 skb_queue_head_init(&tbl
->proxy_queue
);
1382 tbl
->last_flush
= now
;
1383 tbl
->last_rand
= now
+ tbl
->parms
.reachable_time
* 20;
1386 void neigh_table_init(struct neigh_table
*tbl
)
1388 struct neigh_table
*tmp
;
1390 neigh_table_init_no_netlink(tbl
);
1391 write_lock(&neigh_tbl_lock
);
1392 for (tmp
= neigh_tables
; tmp
; tmp
= tmp
->next
) {
1393 if (tmp
->family
== tbl
->family
)
1396 tbl
->next
= neigh_tables
;
1398 write_unlock(&neigh_tbl_lock
);
1400 if (unlikely(tmp
)) {
1401 printk(KERN_ERR
"NEIGH: Registering multiple tables for "
1402 "family %d\n", tbl
->family
);
1407 int neigh_table_clear(struct neigh_table
*tbl
)
1409 struct neigh_table
**tp
;
1411 /* It is not clean... Fix it to unload IPv6 module safely */
1412 del_timer_sync(&tbl
->gc_timer
);
1413 del_timer_sync(&tbl
->proxy_timer
);
1414 pneigh_queue_purge(&tbl
->proxy_queue
);
1415 neigh_ifdown(tbl
, NULL
);
1416 if (atomic_read(&tbl
->entries
))
1417 printk(KERN_CRIT
"neighbour leakage\n");
1418 write_lock(&neigh_tbl_lock
);
1419 for (tp
= &neigh_tables
; *tp
; tp
= &(*tp
)->next
) {
1425 write_unlock(&neigh_tbl_lock
);
1427 neigh_hash_free(tbl
->hash_buckets
, tbl
->hash_mask
+ 1);
1428 tbl
->hash_buckets
= NULL
;
1430 kfree(tbl
->phash_buckets
);
1431 tbl
->phash_buckets
= NULL
;
1436 int neigh_delete(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1438 struct ndmsg
*ndm
= NLMSG_DATA(nlh
);
1439 struct rtattr
**nda
= arg
;
1440 struct neigh_table
*tbl
;
1441 struct net_device
*dev
= NULL
;
1444 if (ndm
->ndm_ifindex
&&
1445 (dev
= dev_get_by_index(ndm
->ndm_ifindex
)) == NULL
)
1448 read_lock(&neigh_tbl_lock
);
1449 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1450 struct rtattr
*dst_attr
= nda
[NDA_DST
- 1];
1451 struct neighbour
*n
;
1453 if (tbl
->family
!= ndm
->ndm_family
)
1455 read_unlock(&neigh_tbl_lock
);
1458 if (!dst_attr
|| RTA_PAYLOAD(dst_attr
) < tbl
->key_len
)
1461 if (ndm
->ndm_flags
& NTF_PROXY
) {
1462 err
= pneigh_delete(tbl
, RTA_DATA(dst_attr
), dev
);
1469 n
= neigh_lookup(tbl
, RTA_DATA(dst_attr
), dev
);
1471 err
= neigh_update(n
, NULL
, NUD_FAILED
,
1472 NEIGH_UPDATE_F_OVERRIDE
|
1473 NEIGH_UPDATE_F_ADMIN
);
1478 read_unlock(&neigh_tbl_lock
);
1479 err
= -EADDRNOTAVAIL
;
1487 int neigh_add(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1489 struct ndmsg
*ndm
= NLMSG_DATA(nlh
);
1490 struct rtattr
**nda
= arg
;
1491 struct neigh_table
*tbl
;
1492 struct net_device
*dev
= NULL
;
1495 if (ndm
->ndm_ifindex
&&
1496 (dev
= dev_get_by_index(ndm
->ndm_ifindex
)) == NULL
)
1499 read_lock(&neigh_tbl_lock
);
1500 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1501 struct rtattr
*lladdr_attr
= nda
[NDA_LLADDR
- 1];
1502 struct rtattr
*dst_attr
= nda
[NDA_DST
- 1];
1504 struct neighbour
*n
;
1506 if (tbl
->family
!= ndm
->ndm_family
)
1508 read_unlock(&neigh_tbl_lock
);
1511 if (!dst_attr
|| RTA_PAYLOAD(dst_attr
) < tbl
->key_len
)
1514 if (ndm
->ndm_flags
& NTF_PROXY
) {
1516 if (pneigh_lookup(tbl
, RTA_DATA(dst_attr
), dev
, 1))
1524 if (lladdr_attr
&& RTA_PAYLOAD(lladdr_attr
) < dev
->addr_len
)
1527 n
= neigh_lookup(tbl
, RTA_DATA(dst_attr
), dev
);
1529 if (nlh
->nlmsg_flags
& NLM_F_EXCL
) {
1535 override
= nlh
->nlmsg_flags
& NLM_F_REPLACE
;
1536 } else if (!(nlh
->nlmsg_flags
& NLM_F_CREATE
)) {
1540 n
= __neigh_lookup_errno(tbl
, RTA_DATA(dst_attr
), dev
);
1547 err
= neigh_update(n
,
1548 lladdr_attr
? RTA_DATA(lladdr_attr
) : NULL
,
1550 (override
? NEIGH_UPDATE_F_OVERRIDE
: 0) |
1551 NEIGH_UPDATE_F_ADMIN
);
1557 read_unlock(&neigh_tbl_lock
);
1558 err
= -EADDRNOTAVAIL
;
1566 static int neightbl_fill_parms(struct sk_buff
*skb
, struct neigh_parms
*parms
)
1568 struct rtattr
*nest
= NULL
;
1570 nest
= RTA_NEST(skb
, NDTA_PARMS
);
1573 RTA_PUT_U32(skb
, NDTPA_IFINDEX
, parms
->dev
->ifindex
);
1575 RTA_PUT_U32(skb
, NDTPA_REFCNT
, atomic_read(&parms
->refcnt
));
1576 RTA_PUT_U32(skb
, NDTPA_QUEUE_LEN
, parms
->queue_len
);
1577 RTA_PUT_U32(skb
, NDTPA_PROXY_QLEN
, parms
->proxy_qlen
);
1578 RTA_PUT_U32(skb
, NDTPA_APP_PROBES
, parms
->app_probes
);
1579 RTA_PUT_U32(skb
, NDTPA_UCAST_PROBES
, parms
->ucast_probes
);
1580 RTA_PUT_U32(skb
, NDTPA_MCAST_PROBES
, parms
->mcast_probes
);
1581 RTA_PUT_MSECS(skb
, NDTPA_REACHABLE_TIME
, parms
->reachable_time
);
1582 RTA_PUT_MSECS(skb
, NDTPA_BASE_REACHABLE_TIME
,
1583 parms
->base_reachable_time
);
1584 RTA_PUT_MSECS(skb
, NDTPA_GC_STALETIME
, parms
->gc_staletime
);
1585 RTA_PUT_MSECS(skb
, NDTPA_DELAY_PROBE_TIME
, parms
->delay_probe_time
);
1586 RTA_PUT_MSECS(skb
, NDTPA_RETRANS_TIME
, parms
->retrans_time
);
1587 RTA_PUT_MSECS(skb
, NDTPA_ANYCAST_DELAY
, parms
->anycast_delay
);
1588 RTA_PUT_MSECS(skb
, NDTPA_PROXY_DELAY
, parms
->proxy_delay
);
1589 RTA_PUT_MSECS(skb
, NDTPA_LOCKTIME
, parms
->locktime
);
1591 return RTA_NEST_END(skb
, nest
);
1594 return RTA_NEST_CANCEL(skb
, nest
);
1597 static int neightbl_fill_info(struct neigh_table
*tbl
, struct sk_buff
*skb
,
1598 struct netlink_callback
*cb
)
1600 struct nlmsghdr
*nlh
;
1601 struct ndtmsg
*ndtmsg
;
1603 nlh
= NLMSG_NEW_ANSWER(skb
, cb
, RTM_NEWNEIGHTBL
, sizeof(struct ndtmsg
),
1606 ndtmsg
= NLMSG_DATA(nlh
);
1608 read_lock_bh(&tbl
->lock
);
1609 ndtmsg
->ndtm_family
= tbl
->family
;
1610 ndtmsg
->ndtm_pad1
= 0;
1611 ndtmsg
->ndtm_pad2
= 0;
1613 RTA_PUT_STRING(skb
, NDTA_NAME
, tbl
->id
);
1614 RTA_PUT_MSECS(skb
, NDTA_GC_INTERVAL
, tbl
->gc_interval
);
1615 RTA_PUT_U32(skb
, NDTA_THRESH1
, tbl
->gc_thresh1
);
1616 RTA_PUT_U32(skb
, NDTA_THRESH2
, tbl
->gc_thresh2
);
1617 RTA_PUT_U32(skb
, NDTA_THRESH3
, tbl
->gc_thresh3
);
1620 unsigned long now
= jiffies
;
1621 unsigned int flush_delta
= now
- tbl
->last_flush
;
1622 unsigned int rand_delta
= now
- tbl
->last_rand
;
1624 struct ndt_config ndc
= {
1625 .ndtc_key_len
= tbl
->key_len
,
1626 .ndtc_entry_size
= tbl
->entry_size
,
1627 .ndtc_entries
= atomic_read(&tbl
->entries
),
1628 .ndtc_last_flush
= jiffies_to_msecs(flush_delta
),
1629 .ndtc_last_rand
= jiffies_to_msecs(rand_delta
),
1630 .ndtc_hash_rnd
= tbl
->hash_rnd
,
1631 .ndtc_hash_mask
= tbl
->hash_mask
,
1632 .ndtc_hash_chain_gc
= tbl
->hash_chain_gc
,
1633 .ndtc_proxy_qlen
= tbl
->proxy_queue
.qlen
,
1636 RTA_PUT(skb
, NDTA_CONFIG
, sizeof(ndc
), &ndc
);
1641 struct ndt_stats ndst
;
1643 memset(&ndst
, 0, sizeof(ndst
));
1645 for_each_possible_cpu(cpu
) {
1646 struct neigh_statistics
*st
;
1648 st
= per_cpu_ptr(tbl
->stats
, cpu
);
1649 ndst
.ndts_allocs
+= st
->allocs
;
1650 ndst
.ndts_destroys
+= st
->destroys
;
1651 ndst
.ndts_hash_grows
+= st
->hash_grows
;
1652 ndst
.ndts_res_failed
+= st
->res_failed
;
1653 ndst
.ndts_lookups
+= st
->lookups
;
1654 ndst
.ndts_hits
+= st
->hits
;
1655 ndst
.ndts_rcv_probes_mcast
+= st
->rcv_probes_mcast
;
1656 ndst
.ndts_rcv_probes_ucast
+= st
->rcv_probes_ucast
;
1657 ndst
.ndts_periodic_gc_runs
+= st
->periodic_gc_runs
;
1658 ndst
.ndts_forced_gc_runs
+= st
->forced_gc_runs
;
1661 RTA_PUT(skb
, NDTA_STATS
, sizeof(ndst
), &ndst
);
1664 BUG_ON(tbl
->parms
.dev
);
1665 if (neightbl_fill_parms(skb
, &tbl
->parms
) < 0)
1666 goto rtattr_failure
;
1668 read_unlock_bh(&tbl
->lock
);
1669 return NLMSG_END(skb
, nlh
);
1672 read_unlock_bh(&tbl
->lock
);
1673 return NLMSG_CANCEL(skb
, nlh
);
1679 static int neightbl_fill_param_info(struct neigh_table
*tbl
,
1680 struct neigh_parms
*parms
,
1681 struct sk_buff
*skb
,
1682 struct netlink_callback
*cb
)
1684 struct ndtmsg
*ndtmsg
;
1685 struct nlmsghdr
*nlh
;
1687 nlh
= NLMSG_NEW_ANSWER(skb
, cb
, RTM_NEWNEIGHTBL
, sizeof(struct ndtmsg
),
1690 ndtmsg
= NLMSG_DATA(nlh
);
1692 read_lock_bh(&tbl
->lock
);
1693 ndtmsg
->ndtm_family
= tbl
->family
;
1694 ndtmsg
->ndtm_pad1
= 0;
1695 ndtmsg
->ndtm_pad2
= 0;
1696 RTA_PUT_STRING(skb
, NDTA_NAME
, tbl
->id
);
1698 if (neightbl_fill_parms(skb
, parms
) < 0)
1699 goto rtattr_failure
;
1701 read_unlock_bh(&tbl
->lock
);
1702 return NLMSG_END(skb
, nlh
);
1705 read_unlock_bh(&tbl
->lock
);
1706 return NLMSG_CANCEL(skb
, nlh
);
1712 static inline struct neigh_parms
*lookup_neigh_params(struct neigh_table
*tbl
,
1715 struct neigh_parms
*p
;
1717 for (p
= &tbl
->parms
; p
; p
= p
->next
)
1718 if ((p
->dev
&& p
->dev
->ifindex
== ifindex
) ||
1719 (!p
->dev
&& !ifindex
))
1725 int neightbl_set(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1727 struct neigh_table
*tbl
;
1728 struct ndtmsg
*ndtmsg
= NLMSG_DATA(nlh
);
1729 struct rtattr
**tb
= arg
;
1732 if (!tb
[NDTA_NAME
- 1] || !RTA_PAYLOAD(tb
[NDTA_NAME
- 1]))
1735 read_lock(&neigh_tbl_lock
);
1736 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1737 if (ndtmsg
->ndtm_family
&& tbl
->family
!= ndtmsg
->ndtm_family
)
1740 if (!rtattr_strcmp(tb
[NDTA_NAME
- 1], tbl
->id
))
1750 * We acquire tbl->lock to be nice to the periodic timers and
1751 * make sure they always see a consistent set of values.
1753 write_lock_bh(&tbl
->lock
);
1755 if (tb
[NDTA_THRESH1
- 1])
1756 tbl
->gc_thresh1
= RTA_GET_U32(tb
[NDTA_THRESH1
- 1]);
1758 if (tb
[NDTA_THRESH2
- 1])
1759 tbl
->gc_thresh2
= RTA_GET_U32(tb
[NDTA_THRESH2
- 1]);
1761 if (tb
[NDTA_THRESH3
- 1])
1762 tbl
->gc_thresh3
= RTA_GET_U32(tb
[NDTA_THRESH3
- 1]);
1764 if (tb
[NDTA_GC_INTERVAL
- 1])
1765 tbl
->gc_interval
= RTA_GET_MSECS(tb
[NDTA_GC_INTERVAL
- 1]);
1767 if (tb
[NDTA_PARMS
- 1]) {
1768 struct rtattr
*tbp
[NDTPA_MAX
];
1769 struct neigh_parms
*p
;
1772 if (rtattr_parse_nested(tbp
, NDTPA_MAX
, tb
[NDTA_PARMS
- 1]) < 0)
1773 goto rtattr_failure
;
1775 if (tbp
[NDTPA_IFINDEX
- 1])
1776 ifindex
= RTA_GET_U32(tbp
[NDTPA_IFINDEX
- 1]);
1778 p
= lookup_neigh_params(tbl
, ifindex
);
1781 goto rtattr_failure
;
1784 if (tbp
[NDTPA_QUEUE_LEN
- 1])
1785 p
->queue_len
= RTA_GET_U32(tbp
[NDTPA_QUEUE_LEN
- 1]);
1787 if (tbp
[NDTPA_PROXY_QLEN
- 1])
1788 p
->proxy_qlen
= RTA_GET_U32(tbp
[NDTPA_PROXY_QLEN
- 1]);
1790 if (tbp
[NDTPA_APP_PROBES
- 1])
1791 p
->app_probes
= RTA_GET_U32(tbp
[NDTPA_APP_PROBES
- 1]);
1793 if (tbp
[NDTPA_UCAST_PROBES
- 1])
1795 RTA_GET_U32(tbp
[NDTPA_UCAST_PROBES
- 1]);
1797 if (tbp
[NDTPA_MCAST_PROBES
- 1])
1799 RTA_GET_U32(tbp
[NDTPA_MCAST_PROBES
- 1]);
1801 if (tbp
[NDTPA_BASE_REACHABLE_TIME
- 1])
1802 p
->base_reachable_time
=
1803 RTA_GET_MSECS(tbp
[NDTPA_BASE_REACHABLE_TIME
- 1]);
1805 if (tbp
[NDTPA_GC_STALETIME
- 1])
1807 RTA_GET_MSECS(tbp
[NDTPA_GC_STALETIME
- 1]);
1809 if (tbp
[NDTPA_DELAY_PROBE_TIME
- 1])
1810 p
->delay_probe_time
=
1811 RTA_GET_MSECS(tbp
[NDTPA_DELAY_PROBE_TIME
- 1]);
1813 if (tbp
[NDTPA_RETRANS_TIME
- 1])
1815 RTA_GET_MSECS(tbp
[NDTPA_RETRANS_TIME
- 1]);
1817 if (tbp
[NDTPA_ANYCAST_DELAY
- 1])
1819 RTA_GET_MSECS(tbp
[NDTPA_ANYCAST_DELAY
- 1]);
1821 if (tbp
[NDTPA_PROXY_DELAY
- 1])
1823 RTA_GET_MSECS(tbp
[NDTPA_PROXY_DELAY
- 1]);
1825 if (tbp
[NDTPA_LOCKTIME
- 1])
1826 p
->locktime
= RTA_GET_MSECS(tbp
[NDTPA_LOCKTIME
- 1]);
1832 write_unlock_bh(&tbl
->lock
);
1834 read_unlock(&neigh_tbl_lock
);
1838 int neightbl_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1841 int s_idx
= cb
->args
[0];
1842 struct neigh_table
*tbl
;
1844 family
= ((struct rtgenmsg
*)NLMSG_DATA(cb
->nlh
))->rtgen_family
;
1846 read_lock(&neigh_tbl_lock
);
1847 for (tbl
= neigh_tables
, idx
= 0; tbl
; tbl
= tbl
->next
) {
1848 struct neigh_parms
*p
;
1850 if (idx
< s_idx
|| (family
&& tbl
->family
!= family
))
1853 if (neightbl_fill_info(tbl
, skb
, cb
) <= 0)
1856 for (++idx
, p
= tbl
->parms
.next
; p
; p
= p
->next
, idx
++) {
1860 if (neightbl_fill_param_info(tbl
, p
, skb
, cb
) <= 0)
1866 read_unlock(&neigh_tbl_lock
);
1872 static int neigh_fill_info(struct sk_buff
*skb
, struct neighbour
*n
,
1873 u32 pid
, u32 seq
, int event
, unsigned int flags
)
1875 unsigned long now
= jiffies
;
1876 unsigned char *b
= skb
->tail
;
1877 struct nda_cacheinfo ci
;
1880 struct nlmsghdr
*nlh
= NLMSG_NEW(skb
, pid
, seq
, event
,
1881 sizeof(struct ndmsg
), flags
);
1882 struct ndmsg
*ndm
= NLMSG_DATA(nlh
);
1884 ndm
->ndm_family
= n
->ops
->family
;
1887 ndm
->ndm_flags
= n
->flags
;
1888 ndm
->ndm_type
= n
->type
;
1889 ndm
->ndm_ifindex
= n
->dev
->ifindex
;
1890 RTA_PUT(skb
, NDA_DST
, n
->tbl
->key_len
, n
->primary_key
);
1891 read_lock_bh(&n
->lock
);
1893 ndm
->ndm_state
= n
->nud_state
;
1894 if (n
->nud_state
& NUD_VALID
)
1895 RTA_PUT(skb
, NDA_LLADDR
, n
->dev
->addr_len
, n
->ha
);
1896 ci
.ndm_used
= now
- n
->used
;
1897 ci
.ndm_confirmed
= now
- n
->confirmed
;
1898 ci
.ndm_updated
= now
- n
->updated
;
1899 ci
.ndm_refcnt
= atomic_read(&n
->refcnt
) - 1;
1900 probes
= atomic_read(&n
->probes
);
1901 read_unlock_bh(&n
->lock
);
1903 RTA_PUT(skb
, NDA_CACHEINFO
, sizeof(ci
), &ci
);
1904 RTA_PUT(skb
, NDA_PROBES
, sizeof(probes
), &probes
);
1905 nlh
->nlmsg_len
= skb
->tail
- b
;
1911 read_unlock_bh(&n
->lock
);
1912 skb_trim(skb
, b
- skb
->data
);
1917 static int neigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
,
1918 struct netlink_callback
*cb
)
1920 struct neighbour
*n
;
1921 int rc
, h
, s_h
= cb
->args
[1];
1922 int idx
, s_idx
= idx
= cb
->args
[2];
1924 for (h
= 0; h
<= tbl
->hash_mask
; h
++) {
1929 read_lock_bh(&tbl
->lock
);
1930 for (n
= tbl
->hash_buckets
[h
], idx
= 0; n
; n
= n
->next
, idx
++) {
1933 if (neigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).pid
,
1936 NLM_F_MULTI
) <= 0) {
1937 read_unlock_bh(&tbl
->lock
);
1942 read_unlock_bh(&tbl
->lock
);
1951 int neigh_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1953 struct neigh_table
*tbl
;
1956 read_lock(&neigh_tbl_lock
);
1957 family
= ((struct rtgenmsg
*)NLMSG_DATA(cb
->nlh
))->rtgen_family
;
1960 for (tbl
= neigh_tables
, t
= 0; tbl
; tbl
= tbl
->next
, t
++) {
1961 if (t
< s_t
|| (family
&& tbl
->family
!= family
))
1964 memset(&cb
->args
[1], 0, sizeof(cb
->args
) -
1965 sizeof(cb
->args
[0]));
1966 if (neigh_dump_table(tbl
, skb
, cb
) < 0)
1969 read_unlock(&neigh_tbl_lock
);
1975 void neigh_for_each(struct neigh_table
*tbl
, void (*cb
)(struct neighbour
*, void *), void *cookie
)
1979 read_lock_bh(&tbl
->lock
);
1980 for (chain
= 0; chain
<= tbl
->hash_mask
; chain
++) {
1981 struct neighbour
*n
;
1983 for (n
= tbl
->hash_buckets
[chain
]; n
; n
= n
->next
)
1986 read_unlock_bh(&tbl
->lock
);
1988 EXPORT_SYMBOL(neigh_for_each
);
1990 /* The tbl->lock must be held as a writer and BH disabled. */
1991 void __neigh_for_each_release(struct neigh_table
*tbl
,
1992 int (*cb
)(struct neighbour
*))
1996 for (chain
= 0; chain
<= tbl
->hash_mask
; chain
++) {
1997 struct neighbour
*n
, **np
;
1999 np
= &tbl
->hash_buckets
[chain
];
2000 while ((n
= *np
) != NULL
) {
2003 write_lock(&n
->lock
);
2010 write_unlock(&n
->lock
);
2016 EXPORT_SYMBOL(__neigh_for_each_release
);
2018 #ifdef CONFIG_PROC_FS
2020 static struct neighbour
*neigh_get_first(struct seq_file
*seq
)
2022 struct neigh_seq_state
*state
= seq
->private;
2023 struct neigh_table
*tbl
= state
->tbl
;
2024 struct neighbour
*n
= NULL
;
2025 int bucket
= state
->bucket
;
2027 state
->flags
&= ~NEIGH_SEQ_IS_PNEIGH
;
2028 for (bucket
= 0; bucket
<= tbl
->hash_mask
; bucket
++) {
2029 n
= tbl
->hash_buckets
[bucket
];
2032 if (state
->neigh_sub_iter
) {
2036 v
= state
->neigh_sub_iter(state
, n
, &fakep
);
2040 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
2042 if (n
->nud_state
& ~NUD_NOARP
)
2051 state
->bucket
= bucket
;
2056 static struct neighbour
*neigh_get_next(struct seq_file
*seq
,
2057 struct neighbour
*n
,
2060 struct neigh_seq_state
*state
= seq
->private;
2061 struct neigh_table
*tbl
= state
->tbl
;
2063 if (state
->neigh_sub_iter
) {
2064 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
2072 if (state
->neigh_sub_iter
) {
2073 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
2078 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
2081 if (n
->nud_state
& ~NUD_NOARP
)
2090 if (++state
->bucket
> tbl
->hash_mask
)
2093 n
= tbl
->hash_buckets
[state
->bucket
];
2101 static struct neighbour
*neigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2103 struct neighbour
*n
= neigh_get_first(seq
);
2107 n
= neigh_get_next(seq
, n
, pos
);
2112 return *pos
? NULL
: n
;
2115 static struct pneigh_entry
*pneigh_get_first(struct seq_file
*seq
)
2117 struct neigh_seq_state
*state
= seq
->private;
2118 struct neigh_table
*tbl
= state
->tbl
;
2119 struct pneigh_entry
*pn
= NULL
;
2120 int bucket
= state
->bucket
;
2122 state
->flags
|= NEIGH_SEQ_IS_PNEIGH
;
2123 for (bucket
= 0; bucket
<= PNEIGH_HASHMASK
; bucket
++) {
2124 pn
= tbl
->phash_buckets
[bucket
];
2128 state
->bucket
= bucket
;
2133 static struct pneigh_entry
*pneigh_get_next(struct seq_file
*seq
,
2134 struct pneigh_entry
*pn
,
2137 struct neigh_seq_state
*state
= seq
->private;
2138 struct neigh_table
*tbl
= state
->tbl
;
2142 if (++state
->bucket
> PNEIGH_HASHMASK
)
2144 pn
= tbl
->phash_buckets
[state
->bucket
];
2155 static struct pneigh_entry
*pneigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2157 struct pneigh_entry
*pn
= pneigh_get_first(seq
);
2161 pn
= pneigh_get_next(seq
, pn
, pos
);
2166 return *pos
? NULL
: pn
;
2169 static void *neigh_get_idx_any(struct seq_file
*seq
, loff_t
*pos
)
2171 struct neigh_seq_state
*state
= seq
->private;
2174 rc
= neigh_get_idx(seq
, pos
);
2175 if (!rc
&& !(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
2176 rc
= pneigh_get_idx(seq
, pos
);
2181 void *neigh_seq_start(struct seq_file
*seq
, loff_t
*pos
, struct neigh_table
*tbl
, unsigned int neigh_seq_flags
)
2183 struct neigh_seq_state
*state
= seq
->private;
2184 loff_t pos_minus_one
;
2188 state
->flags
= (neigh_seq_flags
& ~NEIGH_SEQ_IS_PNEIGH
);
2190 read_lock_bh(&tbl
->lock
);
2192 pos_minus_one
= *pos
- 1;
2193 return *pos
? neigh_get_idx_any(seq
, &pos_minus_one
) : SEQ_START_TOKEN
;
2195 EXPORT_SYMBOL(neigh_seq_start
);
2197 void *neigh_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2199 struct neigh_seq_state
*state
;
2202 if (v
== SEQ_START_TOKEN
) {
2203 rc
= neigh_get_idx(seq
, pos
);
2207 state
= seq
->private;
2208 if (!(state
->flags
& NEIGH_SEQ_IS_PNEIGH
)) {
2209 rc
= neigh_get_next(seq
, v
, NULL
);
2212 if (!(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
2213 rc
= pneigh_get_first(seq
);
2215 BUG_ON(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
);
2216 rc
= pneigh_get_next(seq
, v
, NULL
);
2222 EXPORT_SYMBOL(neigh_seq_next
);
2224 void neigh_seq_stop(struct seq_file
*seq
, void *v
)
2226 struct neigh_seq_state
*state
= seq
->private;
2227 struct neigh_table
*tbl
= state
->tbl
;
2229 read_unlock_bh(&tbl
->lock
);
2231 EXPORT_SYMBOL(neigh_seq_stop
);
2233 /* statistics via seq_file */
2235 static void *neigh_stat_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2237 struct proc_dir_entry
*pde
= seq
->private;
2238 struct neigh_table
*tbl
= pde
->data
;
2242 return SEQ_START_TOKEN
;
2244 for (cpu
= *pos
-1; cpu
< NR_CPUS
; ++cpu
) {
2245 if (!cpu_possible(cpu
))
2248 return per_cpu_ptr(tbl
->stats
, cpu
);
2253 static void *neigh_stat_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2255 struct proc_dir_entry
*pde
= seq
->private;
2256 struct neigh_table
*tbl
= pde
->data
;
2259 for (cpu
= *pos
; cpu
< NR_CPUS
; ++cpu
) {
2260 if (!cpu_possible(cpu
))
2263 return per_cpu_ptr(tbl
->stats
, cpu
);
2268 static void neigh_stat_seq_stop(struct seq_file
*seq
, void *v
)
2273 static int neigh_stat_seq_show(struct seq_file
*seq
, void *v
)
2275 struct proc_dir_entry
*pde
= seq
->private;
2276 struct neigh_table
*tbl
= pde
->data
;
2277 struct neigh_statistics
*st
= v
;
2279 if (v
== SEQ_START_TOKEN
) {
2280 seq_printf(seq
, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2284 seq_printf(seq
, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2285 "%08lx %08lx %08lx %08lx\n",
2286 atomic_read(&tbl
->entries
),
2297 st
->rcv_probes_mcast
,
2298 st
->rcv_probes_ucast
,
2300 st
->periodic_gc_runs
,
2307 static struct seq_operations neigh_stat_seq_ops
= {
2308 .start
= neigh_stat_seq_start
,
2309 .next
= neigh_stat_seq_next
,
2310 .stop
= neigh_stat_seq_stop
,
2311 .show
= neigh_stat_seq_show
,
2314 static int neigh_stat_seq_open(struct inode
*inode
, struct file
*file
)
2316 int ret
= seq_open(file
, &neigh_stat_seq_ops
);
2319 struct seq_file
*sf
= file
->private_data
;
2320 sf
->private = PDE(inode
);
2325 static struct file_operations neigh_stat_seq_fops
= {
2326 .owner
= THIS_MODULE
,
2327 .open
= neigh_stat_seq_open
,
2329 .llseek
= seq_lseek
,
2330 .release
= seq_release
,
2333 #endif /* CONFIG_PROC_FS */
2336 void neigh_app_ns(struct neighbour
*n
)
2338 struct nlmsghdr
*nlh
;
2339 int size
= NLMSG_SPACE(sizeof(struct ndmsg
) + 256);
2340 struct sk_buff
*skb
= alloc_skb(size
, GFP_ATOMIC
);
2345 if (neigh_fill_info(skb
, n
, 0, 0, RTM_GETNEIGH
, 0) < 0) {
2349 nlh
= (struct nlmsghdr
*)skb
->data
;
2350 nlh
->nlmsg_flags
= NLM_F_REQUEST
;
2351 NETLINK_CB(skb
).dst_group
= RTNLGRP_NEIGH
;
2352 netlink_broadcast(rtnl
, skb
, 0, RTNLGRP_NEIGH
, GFP_ATOMIC
);
2355 static void neigh_app_notify(struct neighbour
*n
)
2357 struct nlmsghdr
*nlh
;
2358 int size
= NLMSG_SPACE(sizeof(struct ndmsg
) + 256);
2359 struct sk_buff
*skb
= alloc_skb(size
, GFP_ATOMIC
);
2364 if (neigh_fill_info(skb
, n
, 0, 0, RTM_NEWNEIGH
, 0) < 0) {
2368 nlh
= (struct nlmsghdr
*)skb
->data
;
2369 NETLINK_CB(skb
).dst_group
= RTNLGRP_NEIGH
;
2370 netlink_broadcast(rtnl
, skb
, 0, RTNLGRP_NEIGH
, GFP_ATOMIC
);
2373 #endif /* CONFIG_ARPD */
2375 #ifdef CONFIG_SYSCTL
2377 static struct neigh_sysctl_table
{
2378 struct ctl_table_header
*sysctl_header
;
2379 ctl_table neigh_vars
[__NET_NEIGH_MAX
];
2380 ctl_table neigh_dev
[2];
2381 ctl_table neigh_neigh_dir
[2];
2382 ctl_table neigh_proto_dir
[2];
2383 ctl_table neigh_root_dir
[2];
2384 } neigh_sysctl_template
= {
2387 .ctl_name
= NET_NEIGH_MCAST_SOLICIT
,
2388 .procname
= "mcast_solicit",
2389 .maxlen
= sizeof(int),
2391 .proc_handler
= &proc_dointvec
,
2394 .ctl_name
= NET_NEIGH_UCAST_SOLICIT
,
2395 .procname
= "ucast_solicit",
2396 .maxlen
= sizeof(int),
2398 .proc_handler
= &proc_dointvec
,
2401 .ctl_name
= NET_NEIGH_APP_SOLICIT
,
2402 .procname
= "app_solicit",
2403 .maxlen
= sizeof(int),
2405 .proc_handler
= &proc_dointvec
,
2408 .ctl_name
= NET_NEIGH_RETRANS_TIME
,
2409 .procname
= "retrans_time",
2410 .maxlen
= sizeof(int),
2412 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2415 .ctl_name
= NET_NEIGH_REACHABLE_TIME
,
2416 .procname
= "base_reachable_time",
2417 .maxlen
= sizeof(int),
2419 .proc_handler
= &proc_dointvec_jiffies
,
2420 .strategy
= &sysctl_jiffies
,
2423 .ctl_name
= NET_NEIGH_DELAY_PROBE_TIME
,
2424 .procname
= "delay_first_probe_time",
2425 .maxlen
= sizeof(int),
2427 .proc_handler
= &proc_dointvec_jiffies
,
2428 .strategy
= &sysctl_jiffies
,
2431 .ctl_name
= NET_NEIGH_GC_STALE_TIME
,
2432 .procname
= "gc_stale_time",
2433 .maxlen
= sizeof(int),
2435 .proc_handler
= &proc_dointvec_jiffies
,
2436 .strategy
= &sysctl_jiffies
,
2439 .ctl_name
= NET_NEIGH_UNRES_QLEN
,
2440 .procname
= "unres_qlen",
2441 .maxlen
= sizeof(int),
2443 .proc_handler
= &proc_dointvec
,
2446 .ctl_name
= NET_NEIGH_PROXY_QLEN
,
2447 .procname
= "proxy_qlen",
2448 .maxlen
= sizeof(int),
2450 .proc_handler
= &proc_dointvec
,
2453 .ctl_name
= NET_NEIGH_ANYCAST_DELAY
,
2454 .procname
= "anycast_delay",
2455 .maxlen
= sizeof(int),
2457 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2460 .ctl_name
= NET_NEIGH_PROXY_DELAY
,
2461 .procname
= "proxy_delay",
2462 .maxlen
= sizeof(int),
2464 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2467 .ctl_name
= NET_NEIGH_LOCKTIME
,
2468 .procname
= "locktime",
2469 .maxlen
= sizeof(int),
2471 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2474 .ctl_name
= NET_NEIGH_GC_INTERVAL
,
2475 .procname
= "gc_interval",
2476 .maxlen
= sizeof(int),
2478 .proc_handler
= &proc_dointvec_jiffies
,
2479 .strategy
= &sysctl_jiffies
,
2482 .ctl_name
= NET_NEIGH_GC_THRESH1
,
2483 .procname
= "gc_thresh1",
2484 .maxlen
= sizeof(int),
2486 .proc_handler
= &proc_dointvec
,
2489 .ctl_name
= NET_NEIGH_GC_THRESH2
,
2490 .procname
= "gc_thresh2",
2491 .maxlen
= sizeof(int),
2493 .proc_handler
= &proc_dointvec
,
2496 .ctl_name
= NET_NEIGH_GC_THRESH3
,
2497 .procname
= "gc_thresh3",
2498 .maxlen
= sizeof(int),
2500 .proc_handler
= &proc_dointvec
,
2503 .ctl_name
= NET_NEIGH_RETRANS_TIME_MS
,
2504 .procname
= "retrans_time_ms",
2505 .maxlen
= sizeof(int),
2507 .proc_handler
= &proc_dointvec_ms_jiffies
,
2508 .strategy
= &sysctl_ms_jiffies
,
2511 .ctl_name
= NET_NEIGH_REACHABLE_TIME_MS
,
2512 .procname
= "base_reachable_time_ms",
2513 .maxlen
= sizeof(int),
2515 .proc_handler
= &proc_dointvec_ms_jiffies
,
2516 .strategy
= &sysctl_ms_jiffies
,
2521 .ctl_name
= NET_PROTO_CONF_DEFAULT
,
2522 .procname
= "default",
2526 .neigh_neigh_dir
= {
2528 .procname
= "neigh",
2532 .neigh_proto_dir
= {
2539 .ctl_name
= CTL_NET
,
2546 int neigh_sysctl_register(struct net_device
*dev
, struct neigh_parms
*p
,
2547 int p_id
, int pdev_id
, char *p_name
,
2548 proc_handler
*handler
, ctl_handler
*strategy
)
2550 struct neigh_sysctl_table
*t
= kmalloc(sizeof(*t
), GFP_KERNEL
);
2551 const char *dev_name_source
= NULL
;
2552 char *dev_name
= NULL
;
2557 memcpy(t
, &neigh_sysctl_template
, sizeof(*t
));
2558 t
->neigh_vars
[0].data
= &p
->mcast_probes
;
2559 t
->neigh_vars
[1].data
= &p
->ucast_probes
;
2560 t
->neigh_vars
[2].data
= &p
->app_probes
;
2561 t
->neigh_vars
[3].data
= &p
->retrans_time
;
2562 t
->neigh_vars
[4].data
= &p
->base_reachable_time
;
2563 t
->neigh_vars
[5].data
= &p
->delay_probe_time
;
2564 t
->neigh_vars
[6].data
= &p
->gc_staletime
;
2565 t
->neigh_vars
[7].data
= &p
->queue_len
;
2566 t
->neigh_vars
[8].data
= &p
->proxy_qlen
;
2567 t
->neigh_vars
[9].data
= &p
->anycast_delay
;
2568 t
->neigh_vars
[10].data
= &p
->proxy_delay
;
2569 t
->neigh_vars
[11].data
= &p
->locktime
;
2572 dev_name_source
= dev
->name
;
2573 t
->neigh_dev
[0].ctl_name
= dev
->ifindex
;
2574 t
->neigh_vars
[12].procname
= NULL
;
2575 t
->neigh_vars
[13].procname
= NULL
;
2576 t
->neigh_vars
[14].procname
= NULL
;
2577 t
->neigh_vars
[15].procname
= NULL
;
2579 dev_name_source
= t
->neigh_dev
[0].procname
;
2580 t
->neigh_vars
[12].data
= (int *)(p
+ 1);
2581 t
->neigh_vars
[13].data
= (int *)(p
+ 1) + 1;
2582 t
->neigh_vars
[14].data
= (int *)(p
+ 1) + 2;
2583 t
->neigh_vars
[15].data
= (int *)(p
+ 1) + 3;
2586 t
->neigh_vars
[16].data
= &p
->retrans_time
;
2587 t
->neigh_vars
[17].data
= &p
->base_reachable_time
;
2589 if (handler
|| strategy
) {
2591 t
->neigh_vars
[3].proc_handler
= handler
;
2592 t
->neigh_vars
[3].strategy
= strategy
;
2593 t
->neigh_vars
[3].extra1
= dev
;
2595 t
->neigh_vars
[4].proc_handler
= handler
;
2596 t
->neigh_vars
[4].strategy
= strategy
;
2597 t
->neigh_vars
[4].extra1
= dev
;
2598 /* RetransTime (in milliseconds)*/
2599 t
->neigh_vars
[16].proc_handler
= handler
;
2600 t
->neigh_vars
[16].strategy
= strategy
;
2601 t
->neigh_vars
[16].extra1
= dev
;
2602 /* ReachableTime (in milliseconds) */
2603 t
->neigh_vars
[17].proc_handler
= handler
;
2604 t
->neigh_vars
[17].strategy
= strategy
;
2605 t
->neigh_vars
[17].extra1
= dev
;
2608 dev_name
= kstrdup(dev_name_source
, GFP_KERNEL
);
2614 t
->neigh_dev
[0].procname
= dev_name
;
2616 t
->neigh_neigh_dir
[0].ctl_name
= pdev_id
;
2618 t
->neigh_proto_dir
[0].procname
= p_name
;
2619 t
->neigh_proto_dir
[0].ctl_name
= p_id
;
2621 t
->neigh_dev
[0].child
= t
->neigh_vars
;
2622 t
->neigh_neigh_dir
[0].child
= t
->neigh_dev
;
2623 t
->neigh_proto_dir
[0].child
= t
->neigh_neigh_dir
;
2624 t
->neigh_root_dir
[0].child
= t
->neigh_proto_dir
;
2626 t
->sysctl_header
= register_sysctl_table(t
->neigh_root_dir
, 0);
2627 if (!t
->sysctl_header
) {
2631 p
->sysctl_table
= t
;
2643 void neigh_sysctl_unregister(struct neigh_parms
*p
)
2645 if (p
->sysctl_table
) {
2646 struct neigh_sysctl_table
*t
= p
->sysctl_table
;
2647 p
->sysctl_table
= NULL
;
2648 unregister_sysctl_table(t
->sysctl_header
);
2649 kfree(t
->neigh_dev
[0].procname
);
2654 #endif /* CONFIG_SYSCTL */
2656 EXPORT_SYMBOL(__neigh_event_send
);
2657 EXPORT_SYMBOL(neigh_add
);
2658 EXPORT_SYMBOL(neigh_changeaddr
);
2659 EXPORT_SYMBOL(neigh_compat_output
);
2660 EXPORT_SYMBOL(neigh_connected_output
);
2661 EXPORT_SYMBOL(neigh_create
);
2662 EXPORT_SYMBOL(neigh_delete
);
2663 EXPORT_SYMBOL(neigh_destroy
);
2664 EXPORT_SYMBOL(neigh_dump_info
);
2665 EXPORT_SYMBOL(neigh_event_ns
);
2666 EXPORT_SYMBOL(neigh_ifdown
);
2667 EXPORT_SYMBOL(neigh_lookup
);
2668 EXPORT_SYMBOL(neigh_lookup_nodev
);
2669 EXPORT_SYMBOL(neigh_parms_alloc
);
2670 EXPORT_SYMBOL(neigh_parms_release
);
2671 EXPORT_SYMBOL(neigh_rand_reach_time
);
2672 EXPORT_SYMBOL(neigh_resolve_output
);
2673 EXPORT_SYMBOL(neigh_table_clear
);
2674 EXPORT_SYMBOL(neigh_table_init
);
2675 EXPORT_SYMBOL(neigh_table_init_no_netlink
);
2676 EXPORT_SYMBOL(neigh_update
);
2677 EXPORT_SYMBOL(neigh_update_hhs
);
2678 EXPORT_SYMBOL(pneigh_enqueue
);
2679 EXPORT_SYMBOL(pneigh_lookup
);
2680 EXPORT_SYMBOL(neightbl_dump_info
);
2681 EXPORT_SYMBOL(neightbl_set
);
2684 EXPORT_SYMBOL(neigh_app_ns
);
2686 #ifdef CONFIG_SYSCTL
2687 EXPORT_SYMBOL(neigh_sysctl_register
);
2688 EXPORT_SYMBOL(neigh_sysctl_unregister
);