net: skb->dst accessors
[linux-2.6/verdex.git] / net / core / neighbour.c
blobc54229befcfe97470363872f97b6aad37c004a0e
1 /*
2 * Generic address resolution entity
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/dst.h>
31 #include <net/sock.h>
32 #include <net/netevent.h>
33 #include <net/netlink.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/string.h>
37 #include <linux/log2.h>
39 #define NEIGH_DEBUG 1
41 #define NEIGH_PRINTK(x...) printk(x)
42 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
43 #define NEIGH_PRINTK0 NEIGH_PRINTK
44 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
45 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
47 #if NEIGH_DEBUG >= 1
48 #undef NEIGH_PRINTK1
49 #define NEIGH_PRINTK1 NEIGH_PRINTK
50 #endif
51 #if NEIGH_DEBUG >= 2
52 #undef NEIGH_PRINTK2
53 #define NEIGH_PRINTK2 NEIGH_PRINTK
54 #endif
56 #define PNEIGH_HASHMASK 0xF
58 static void neigh_timer_handler(unsigned long arg);
59 static void __neigh_notify(struct neighbour *n, int type, int flags);
60 static void neigh_update_notify(struct neighbour *neigh);
61 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
63 static struct neigh_table *neigh_tables;
64 #ifdef CONFIG_PROC_FS
65 static const struct file_operations neigh_stat_seq_fops;
66 #endif
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
75 cache.
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
83 Reference count prevents destruction.
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
87 - timer
88 - resolution queue
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
99 static DEFINE_RWLOCK(neigh_tbl_lock);
101 static int neigh_blackhole(struct sk_buff *skb)
103 kfree_skb(skb);
104 return -ENETDOWN;
107 static void neigh_cleanup_and_release(struct neighbour *neigh)
109 if (neigh->parms->neigh_cleanup)
110 neigh->parms->neigh_cleanup(neigh);
112 __neigh_notify(neigh, RTM_DELNEIGH, 0);
113 neigh_release(neigh);
117 * It is random distribution in the interval (1/2)*base...(3/2)*base.
118 * It corresponds to default IPv6 settings and is not overridable,
119 * because it is really reasonable choice.
122 unsigned long neigh_rand_reach_time(unsigned long base)
124 return (base ? (net_random() % base) + (base >> 1) : 0);
126 EXPORT_SYMBOL(neigh_rand_reach_time);
129 static int neigh_forced_gc(struct neigh_table *tbl)
131 int shrunk = 0;
132 int i;
134 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
136 write_lock_bh(&tbl->lock);
137 for (i = 0; i <= tbl->hash_mask; i++) {
138 struct neighbour *n, **np;
140 np = &tbl->hash_buckets[i];
141 while ((n = *np) != NULL) {
142 /* Neighbour record may be discarded if:
143 * - nobody refers to it.
144 * - it is not permanent
146 write_lock(&n->lock);
147 if (atomic_read(&n->refcnt) == 1 &&
148 !(n->nud_state & NUD_PERMANENT)) {
149 *np = n->next;
150 n->dead = 1;
151 shrunk = 1;
152 write_unlock(&n->lock);
153 neigh_cleanup_and_release(n);
154 continue;
156 write_unlock(&n->lock);
157 np = &n->next;
161 tbl->last_flush = jiffies;
163 write_unlock_bh(&tbl->lock);
165 return shrunk;
168 static void neigh_add_timer(struct neighbour *n, unsigned long when)
170 neigh_hold(n);
171 if (unlikely(mod_timer(&n->timer, when))) {
172 printk("NEIGH: BUG, double timer add, state is %x\n",
173 n->nud_state);
174 dump_stack();
178 static int neigh_del_timer(struct neighbour *n)
180 if ((n->nud_state & NUD_IN_TIMER) &&
181 del_timer(&n->timer)) {
182 neigh_release(n);
183 return 1;
185 return 0;
188 static void pneigh_queue_purge(struct sk_buff_head *list)
190 struct sk_buff *skb;
192 while ((skb = skb_dequeue(list)) != NULL) {
193 dev_put(skb->dev);
194 kfree_skb(skb);
198 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
200 int i;
202 for (i = 0; i <= tbl->hash_mask; i++) {
203 struct neighbour *n, **np = &tbl->hash_buckets[i];
205 while ((n = *np) != NULL) {
206 if (dev && n->dev != dev) {
207 np = &n->next;
208 continue;
210 *np = n->next;
211 write_lock(&n->lock);
212 neigh_del_timer(n);
213 n->dead = 1;
215 if (atomic_read(&n->refcnt) != 1) {
216 /* The most unpleasant situation.
217 We must destroy neighbour entry,
218 but someone still uses it.
220 The destroy will be delayed until
221 the last user releases us, but
222 we must kill timers etc. and move
223 it to safe state.
225 skb_queue_purge(&n->arp_queue);
226 n->output = neigh_blackhole;
227 if (n->nud_state & NUD_VALID)
228 n->nud_state = NUD_NOARP;
229 else
230 n->nud_state = NUD_NONE;
231 NEIGH_PRINTK2("neigh %p is stray.\n", n);
233 write_unlock(&n->lock);
234 neigh_cleanup_and_release(n);
239 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
241 write_lock_bh(&tbl->lock);
242 neigh_flush_dev(tbl, dev);
243 write_unlock_bh(&tbl->lock);
245 EXPORT_SYMBOL(neigh_changeaddr);
247 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
249 write_lock_bh(&tbl->lock);
250 neigh_flush_dev(tbl, dev);
251 pneigh_ifdown(tbl, dev);
252 write_unlock_bh(&tbl->lock);
254 del_timer_sync(&tbl->proxy_timer);
255 pneigh_queue_purge(&tbl->proxy_queue);
256 return 0;
258 EXPORT_SYMBOL(neigh_ifdown);
260 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
262 struct neighbour *n = NULL;
263 unsigned long now = jiffies;
264 int entries;
266 entries = atomic_inc_return(&tbl->entries) - 1;
267 if (entries >= tbl->gc_thresh3 ||
268 (entries >= tbl->gc_thresh2 &&
269 time_after(now, tbl->last_flush + 5 * HZ))) {
270 if (!neigh_forced_gc(tbl) &&
271 entries >= tbl->gc_thresh3)
272 goto out_entries;
275 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
276 if (!n)
277 goto out_entries;
279 skb_queue_head_init(&n->arp_queue);
280 rwlock_init(&n->lock);
281 n->updated = n->used = now;
282 n->nud_state = NUD_NONE;
283 n->output = neigh_blackhole;
284 n->parms = neigh_parms_clone(&tbl->parms);
285 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
287 NEIGH_CACHE_STAT_INC(tbl, allocs);
288 n->tbl = tbl;
289 atomic_set(&n->refcnt, 1);
290 n->dead = 1;
291 out:
292 return n;
294 out_entries:
295 atomic_dec(&tbl->entries);
296 goto out;
299 static struct neighbour **neigh_hash_alloc(unsigned int entries)
301 unsigned long size = entries * sizeof(struct neighbour *);
302 struct neighbour **ret;
304 if (size <= PAGE_SIZE) {
305 ret = kzalloc(size, GFP_ATOMIC);
306 } else {
307 ret = (struct neighbour **)
308 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
310 return ret;
313 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
315 unsigned long size = entries * sizeof(struct neighbour *);
317 if (size <= PAGE_SIZE)
318 kfree(hash);
319 else
320 free_pages((unsigned long)hash, get_order(size));
323 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
325 struct neighbour **new_hash, **old_hash;
326 unsigned int i, new_hash_mask, old_entries;
328 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
330 BUG_ON(!is_power_of_2(new_entries));
331 new_hash = neigh_hash_alloc(new_entries);
332 if (!new_hash)
333 return;
335 old_entries = tbl->hash_mask + 1;
336 new_hash_mask = new_entries - 1;
337 old_hash = tbl->hash_buckets;
339 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
340 for (i = 0; i < old_entries; i++) {
341 struct neighbour *n, *next;
343 for (n = old_hash[i]; n; n = next) {
344 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
346 hash_val &= new_hash_mask;
347 next = n->next;
349 n->next = new_hash[hash_val];
350 new_hash[hash_val] = n;
353 tbl->hash_buckets = new_hash;
354 tbl->hash_mask = new_hash_mask;
356 neigh_hash_free(old_hash, old_entries);
359 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
360 struct net_device *dev)
362 struct neighbour *n;
363 int key_len = tbl->key_len;
364 u32 hash_val;
366 NEIGH_CACHE_STAT_INC(tbl, lookups);
368 read_lock_bh(&tbl->lock);
369 hash_val = tbl->hash(pkey, dev);
370 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
371 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
372 neigh_hold(n);
373 NEIGH_CACHE_STAT_INC(tbl, hits);
374 break;
377 read_unlock_bh(&tbl->lock);
378 return n;
380 EXPORT_SYMBOL(neigh_lookup);
382 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
383 const void *pkey)
385 struct neighbour *n;
386 int key_len = tbl->key_len;
387 u32 hash_val;
389 NEIGH_CACHE_STAT_INC(tbl, lookups);
391 read_lock_bh(&tbl->lock);
392 hash_val = tbl->hash(pkey, NULL);
393 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
394 if (!memcmp(n->primary_key, pkey, key_len) &&
395 net_eq(dev_net(n->dev), net)) {
396 neigh_hold(n);
397 NEIGH_CACHE_STAT_INC(tbl, hits);
398 break;
401 read_unlock_bh(&tbl->lock);
402 return n;
404 EXPORT_SYMBOL(neigh_lookup_nodev);
406 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
407 struct net_device *dev)
409 u32 hash_val;
410 int key_len = tbl->key_len;
411 int error;
412 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
414 if (!n) {
415 rc = ERR_PTR(-ENOBUFS);
416 goto out;
419 memcpy(n->primary_key, pkey, key_len);
420 n->dev = dev;
421 dev_hold(dev);
423 /* Protocol specific setup. */
424 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
425 rc = ERR_PTR(error);
426 goto out_neigh_release;
429 /* Device specific setup. */
430 if (n->parms->neigh_setup &&
431 (error = n->parms->neigh_setup(n)) < 0) {
432 rc = ERR_PTR(error);
433 goto out_neigh_release;
436 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
438 write_lock_bh(&tbl->lock);
440 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
441 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
443 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
445 if (n->parms->dead) {
446 rc = ERR_PTR(-EINVAL);
447 goto out_tbl_unlock;
450 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
451 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
452 neigh_hold(n1);
453 rc = n1;
454 goto out_tbl_unlock;
458 n->next = tbl->hash_buckets[hash_val];
459 tbl->hash_buckets[hash_val] = n;
460 n->dead = 0;
461 neigh_hold(n);
462 write_unlock_bh(&tbl->lock);
463 NEIGH_PRINTK2("neigh %p is created.\n", n);
464 rc = n;
465 out:
466 return rc;
467 out_tbl_unlock:
468 write_unlock_bh(&tbl->lock);
469 out_neigh_release:
470 neigh_release(n);
471 goto out;
473 EXPORT_SYMBOL(neigh_create);
475 static u32 pneigh_hash(const void *pkey, int key_len)
477 u32 hash_val = *(u32 *)(pkey + key_len - 4);
478 hash_val ^= (hash_val >> 16);
479 hash_val ^= hash_val >> 8;
480 hash_val ^= hash_val >> 4;
481 hash_val &= PNEIGH_HASHMASK;
482 return hash_val;
485 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
486 struct net *net,
487 const void *pkey,
488 int key_len,
489 struct net_device *dev)
491 while (n) {
492 if (!memcmp(n->key, pkey, key_len) &&
493 net_eq(pneigh_net(n), net) &&
494 (n->dev == dev || !n->dev))
495 return n;
496 n = n->next;
498 return NULL;
501 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
502 struct net *net, const void *pkey, struct net_device *dev)
504 int key_len = tbl->key_len;
505 u32 hash_val = pneigh_hash(pkey, key_len);
507 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
508 net, pkey, key_len, dev);
510 EXPORT_SYMBOL_GPL(__pneigh_lookup);
512 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
513 struct net *net, const void *pkey,
514 struct net_device *dev, int creat)
516 struct pneigh_entry *n;
517 int key_len = tbl->key_len;
518 u32 hash_val = pneigh_hash(pkey, key_len);
520 read_lock_bh(&tbl->lock);
521 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
522 net, pkey, key_len, dev);
523 read_unlock_bh(&tbl->lock);
525 if (n || !creat)
526 goto out;
528 ASSERT_RTNL();
530 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
531 if (!n)
532 goto out;
534 write_pnet(&n->net, hold_net(net));
535 memcpy(n->key, pkey, key_len);
536 n->dev = dev;
537 if (dev)
538 dev_hold(dev);
540 if (tbl->pconstructor && tbl->pconstructor(n)) {
541 if (dev)
542 dev_put(dev);
543 release_net(net);
544 kfree(n);
545 n = NULL;
546 goto out;
549 write_lock_bh(&tbl->lock);
550 n->next = tbl->phash_buckets[hash_val];
551 tbl->phash_buckets[hash_val] = n;
552 write_unlock_bh(&tbl->lock);
553 out:
554 return n;
556 EXPORT_SYMBOL(pneigh_lookup);
559 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
560 struct net_device *dev)
562 struct pneigh_entry *n, **np;
563 int key_len = tbl->key_len;
564 u32 hash_val = pneigh_hash(pkey, key_len);
566 write_lock_bh(&tbl->lock);
567 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
568 np = &n->next) {
569 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
570 net_eq(pneigh_net(n), net)) {
571 *np = n->next;
572 write_unlock_bh(&tbl->lock);
573 if (tbl->pdestructor)
574 tbl->pdestructor(n);
575 if (n->dev)
576 dev_put(n->dev);
577 release_net(pneigh_net(n));
578 kfree(n);
579 return 0;
582 write_unlock_bh(&tbl->lock);
583 return -ENOENT;
586 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
588 struct pneigh_entry *n, **np;
589 u32 h;
591 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
592 np = &tbl->phash_buckets[h];
593 while ((n = *np) != NULL) {
594 if (!dev || n->dev == dev) {
595 *np = n->next;
596 if (tbl->pdestructor)
597 tbl->pdestructor(n);
598 if (n->dev)
599 dev_put(n->dev);
600 release_net(pneigh_net(n));
601 kfree(n);
602 continue;
604 np = &n->next;
607 return -ENOENT;
610 static void neigh_parms_destroy(struct neigh_parms *parms);
612 static inline void neigh_parms_put(struct neigh_parms *parms)
614 if (atomic_dec_and_test(&parms->refcnt))
615 neigh_parms_destroy(parms);
619 * neighbour must already be out of the table;
622 void neigh_destroy(struct neighbour *neigh)
624 struct hh_cache *hh;
626 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
628 if (!neigh->dead) {
629 printk(KERN_WARNING
630 "Destroying alive neighbour %p\n", neigh);
631 dump_stack();
632 return;
635 if (neigh_del_timer(neigh))
636 printk(KERN_WARNING "Impossible event.\n");
638 while ((hh = neigh->hh) != NULL) {
639 neigh->hh = hh->hh_next;
640 hh->hh_next = NULL;
642 write_seqlock_bh(&hh->hh_lock);
643 hh->hh_output = neigh_blackhole;
644 write_sequnlock_bh(&hh->hh_lock);
645 if (atomic_dec_and_test(&hh->hh_refcnt))
646 kfree(hh);
649 skb_queue_purge(&neigh->arp_queue);
651 dev_put(neigh->dev);
652 neigh_parms_put(neigh->parms);
654 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
656 atomic_dec(&neigh->tbl->entries);
657 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
659 EXPORT_SYMBOL(neigh_destroy);
661 /* Neighbour state is suspicious;
662 disable fast path.
664 Called with write_locked neigh.
666 static void neigh_suspect(struct neighbour *neigh)
668 struct hh_cache *hh;
670 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
672 neigh->output = neigh->ops->output;
674 for (hh = neigh->hh; hh; hh = hh->hh_next)
675 hh->hh_output = neigh->ops->output;
678 /* Neighbour state is OK;
679 enable fast path.
681 Called with write_locked neigh.
683 static void neigh_connect(struct neighbour *neigh)
685 struct hh_cache *hh;
687 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
689 neigh->output = neigh->ops->connected_output;
691 for (hh = neigh->hh; hh; hh = hh->hh_next)
692 hh->hh_output = neigh->ops->hh_output;
695 static void neigh_periodic_timer(unsigned long arg)
697 struct neigh_table *tbl = (struct neigh_table *)arg;
698 struct neighbour *n, **np;
699 unsigned long expire, now = jiffies;
701 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
703 write_lock(&tbl->lock);
706 * periodically recompute ReachableTime from random function
709 if (time_after(now, tbl->last_rand + 300 * HZ)) {
710 struct neigh_parms *p;
711 tbl->last_rand = now;
712 for (p = &tbl->parms; p; p = p->next)
713 p->reachable_time =
714 neigh_rand_reach_time(p->base_reachable_time);
717 np = &tbl->hash_buckets[tbl->hash_chain_gc];
718 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
720 while ((n = *np) != NULL) {
721 unsigned int state;
723 write_lock(&n->lock);
725 state = n->nud_state;
726 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
727 write_unlock(&n->lock);
728 goto next_elt;
731 if (time_before(n->used, n->confirmed))
732 n->used = n->confirmed;
734 if (atomic_read(&n->refcnt) == 1 &&
735 (state == NUD_FAILED ||
736 time_after(now, n->used + n->parms->gc_staletime))) {
737 *np = n->next;
738 n->dead = 1;
739 write_unlock(&n->lock);
740 neigh_cleanup_and_release(n);
741 continue;
743 write_unlock(&n->lock);
745 next_elt:
746 np = &n->next;
749 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
750 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
751 * base_reachable_time.
753 expire = tbl->parms.base_reachable_time >> 1;
754 expire /= (tbl->hash_mask + 1);
755 if (!expire)
756 expire = 1;
758 if (expire>HZ)
759 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
760 else
761 mod_timer(&tbl->gc_timer, now + expire);
763 write_unlock(&tbl->lock);
766 static __inline__ int neigh_max_probes(struct neighbour *n)
768 struct neigh_parms *p = n->parms;
769 return (n->nud_state & NUD_PROBE ?
770 p->ucast_probes :
771 p->ucast_probes + p->app_probes + p->mcast_probes);
774 /* Called when a timer expires for a neighbour entry. */
776 static void neigh_timer_handler(unsigned long arg)
778 unsigned long now, next;
779 struct neighbour *neigh = (struct neighbour *)arg;
780 unsigned state;
781 int notify = 0;
783 write_lock(&neigh->lock);
785 state = neigh->nud_state;
786 now = jiffies;
787 next = now + HZ;
789 if (!(state & NUD_IN_TIMER)) {
790 #ifndef CONFIG_SMP
791 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
792 #endif
793 goto out;
796 if (state & NUD_REACHABLE) {
797 if (time_before_eq(now,
798 neigh->confirmed + neigh->parms->reachable_time)) {
799 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
800 next = neigh->confirmed + neigh->parms->reachable_time;
801 } else if (time_before_eq(now,
802 neigh->used + neigh->parms->delay_probe_time)) {
803 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
804 neigh->nud_state = NUD_DELAY;
805 neigh->updated = jiffies;
806 neigh_suspect(neigh);
807 next = now + neigh->parms->delay_probe_time;
808 } else {
809 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
810 neigh->nud_state = NUD_STALE;
811 neigh->updated = jiffies;
812 neigh_suspect(neigh);
813 notify = 1;
815 } else if (state & NUD_DELAY) {
816 if (time_before_eq(now,
817 neigh->confirmed + neigh->parms->delay_probe_time)) {
818 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
819 neigh->nud_state = NUD_REACHABLE;
820 neigh->updated = jiffies;
821 neigh_connect(neigh);
822 notify = 1;
823 next = neigh->confirmed + neigh->parms->reachable_time;
824 } else {
825 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
826 neigh->nud_state = NUD_PROBE;
827 neigh->updated = jiffies;
828 atomic_set(&neigh->probes, 0);
829 next = now + neigh->parms->retrans_time;
831 } else {
832 /* NUD_PROBE|NUD_INCOMPLETE */
833 next = now + neigh->parms->retrans_time;
836 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
837 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
838 struct sk_buff *skb;
840 neigh->nud_state = NUD_FAILED;
841 neigh->updated = jiffies;
842 notify = 1;
843 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
844 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
846 /* It is very thin place. report_unreachable is very complicated
847 routine. Particularly, it can hit the same neighbour entry!
849 So that, we try to be accurate and avoid dead loop. --ANK
851 while (neigh->nud_state == NUD_FAILED &&
852 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
853 write_unlock(&neigh->lock);
854 neigh->ops->error_report(neigh, skb);
855 write_lock(&neigh->lock);
857 skb_queue_purge(&neigh->arp_queue);
860 if (neigh->nud_state & NUD_IN_TIMER) {
861 if (time_before(next, jiffies + HZ/2))
862 next = jiffies + HZ/2;
863 if (!mod_timer(&neigh->timer, next))
864 neigh_hold(neigh);
866 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
867 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
868 /* keep skb alive even if arp_queue overflows */
869 if (skb)
870 skb = skb_copy(skb, GFP_ATOMIC);
871 write_unlock(&neigh->lock);
872 neigh->ops->solicit(neigh, skb);
873 atomic_inc(&neigh->probes);
874 kfree_skb(skb);
875 } else {
876 out:
877 write_unlock(&neigh->lock);
880 if (notify)
881 neigh_update_notify(neigh);
883 neigh_release(neigh);
886 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
888 int rc;
889 unsigned long now;
891 write_lock_bh(&neigh->lock);
893 rc = 0;
894 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
895 goto out_unlock_bh;
897 now = jiffies;
899 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
900 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
901 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
902 neigh->nud_state = NUD_INCOMPLETE;
903 neigh->updated = jiffies;
904 neigh_add_timer(neigh, now + 1);
905 } else {
906 neigh->nud_state = NUD_FAILED;
907 neigh->updated = jiffies;
908 write_unlock_bh(&neigh->lock);
910 kfree_skb(skb);
911 return 1;
913 } else if (neigh->nud_state & NUD_STALE) {
914 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
915 neigh->nud_state = NUD_DELAY;
916 neigh->updated = jiffies;
917 neigh_add_timer(neigh,
918 jiffies + neigh->parms->delay_probe_time);
921 if (neigh->nud_state == NUD_INCOMPLETE) {
922 if (skb) {
923 if (skb_queue_len(&neigh->arp_queue) >=
924 neigh->parms->queue_len) {
925 struct sk_buff *buff;
926 buff = __skb_dequeue(&neigh->arp_queue);
927 kfree_skb(buff);
928 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
930 __skb_queue_tail(&neigh->arp_queue, skb);
932 rc = 1;
934 out_unlock_bh:
935 write_unlock_bh(&neigh->lock);
936 return rc;
938 EXPORT_SYMBOL(__neigh_event_send);
940 static void neigh_update_hhs(struct neighbour *neigh)
942 struct hh_cache *hh;
943 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
944 = neigh->dev->header_ops->cache_update;
946 if (update) {
947 for (hh = neigh->hh; hh; hh = hh->hh_next) {
948 write_seqlock_bh(&hh->hh_lock);
949 update(hh, neigh->dev, neigh->ha);
950 write_sequnlock_bh(&hh->hh_lock);
957 /* Generic update routine.
958 -- lladdr is new lladdr or NULL, if it is not supplied.
959 -- new is new state.
960 -- flags
961 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
962 if it is different.
963 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
964 lladdr instead of overriding it
965 if it is different.
966 It also allows to retain current state
967 if lladdr is unchanged.
968 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
970 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
971 NTF_ROUTER flag.
972 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
973 a router.
975 Caller MUST hold reference count on the entry.
978 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
979 u32 flags)
981 u8 old;
982 int err;
983 int notify = 0;
984 struct net_device *dev;
985 int update_isrouter = 0;
987 write_lock_bh(&neigh->lock);
989 dev = neigh->dev;
990 old = neigh->nud_state;
991 err = -EPERM;
993 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
994 (old & (NUD_NOARP | NUD_PERMANENT)))
995 goto out;
997 if (!(new & NUD_VALID)) {
998 neigh_del_timer(neigh);
999 if (old & NUD_CONNECTED)
1000 neigh_suspect(neigh);
1001 neigh->nud_state = new;
1002 err = 0;
1003 notify = old & NUD_VALID;
1004 goto out;
1007 /* Compare new lladdr with cached one */
1008 if (!dev->addr_len) {
1009 /* First case: device needs no address. */
1010 lladdr = neigh->ha;
1011 } else if (lladdr) {
1012 /* The second case: if something is already cached
1013 and a new address is proposed:
1014 - compare new & old
1015 - if they are different, check override flag
1017 if ((old & NUD_VALID) &&
1018 !memcmp(lladdr, neigh->ha, dev->addr_len))
1019 lladdr = neigh->ha;
1020 } else {
1021 /* No address is supplied; if we know something,
1022 use it, otherwise discard the request.
1024 err = -EINVAL;
1025 if (!(old & NUD_VALID))
1026 goto out;
1027 lladdr = neigh->ha;
1030 if (new & NUD_CONNECTED)
1031 neigh->confirmed = jiffies;
1032 neigh->updated = jiffies;
1034 /* If entry was valid and address is not changed,
1035 do not change entry state, if new one is STALE.
1037 err = 0;
1038 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1039 if (old & NUD_VALID) {
1040 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1041 update_isrouter = 0;
1042 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1043 (old & NUD_CONNECTED)) {
1044 lladdr = neigh->ha;
1045 new = NUD_STALE;
1046 } else
1047 goto out;
1048 } else {
1049 if (lladdr == neigh->ha && new == NUD_STALE &&
1050 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1051 (old & NUD_CONNECTED))
1053 new = old;
1057 if (new != old) {
1058 neigh_del_timer(neigh);
1059 if (new & NUD_IN_TIMER)
1060 neigh_add_timer(neigh, (jiffies +
1061 ((new & NUD_REACHABLE) ?
1062 neigh->parms->reachable_time :
1063 0)));
1064 neigh->nud_state = new;
1067 if (lladdr != neigh->ha) {
1068 memcpy(&neigh->ha, lladdr, dev->addr_len);
1069 neigh_update_hhs(neigh);
1070 if (!(new & NUD_CONNECTED))
1071 neigh->confirmed = jiffies -
1072 (neigh->parms->base_reachable_time << 1);
1073 notify = 1;
1075 if (new == old)
1076 goto out;
1077 if (new & NUD_CONNECTED)
1078 neigh_connect(neigh);
1079 else
1080 neigh_suspect(neigh);
1081 if (!(old & NUD_VALID)) {
1082 struct sk_buff *skb;
1084 /* Again: avoid dead loop if something went wrong */
1086 while (neigh->nud_state & NUD_VALID &&
1087 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1088 struct neighbour *n1 = neigh;
1089 write_unlock_bh(&neigh->lock);
1090 /* On shaper/eql skb->dst->neighbour != neigh :( */
1091 if (skb_dst(skb) && skb_dst(skb)->neighbour)
1092 n1 = skb_dst(skb)->neighbour;
1093 n1->output(skb);
1094 write_lock_bh(&neigh->lock);
1096 skb_queue_purge(&neigh->arp_queue);
1098 out:
1099 if (update_isrouter) {
1100 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1101 (neigh->flags | NTF_ROUTER) :
1102 (neigh->flags & ~NTF_ROUTER);
1104 write_unlock_bh(&neigh->lock);
1106 if (notify)
1107 neigh_update_notify(neigh);
1109 return err;
1111 EXPORT_SYMBOL(neigh_update);
1113 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1114 u8 *lladdr, void *saddr,
1115 struct net_device *dev)
1117 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1118 lladdr || !dev->addr_len);
1119 if (neigh)
1120 neigh_update(neigh, lladdr, NUD_STALE,
1121 NEIGH_UPDATE_F_OVERRIDE);
1122 return neigh;
1124 EXPORT_SYMBOL(neigh_event_ns);
1126 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1127 __be16 protocol)
1129 struct hh_cache *hh;
1130 struct net_device *dev = dst->dev;
1132 for (hh = n->hh; hh; hh = hh->hh_next)
1133 if (hh->hh_type == protocol)
1134 break;
1136 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1137 seqlock_init(&hh->hh_lock);
1138 hh->hh_type = protocol;
1139 atomic_set(&hh->hh_refcnt, 0);
1140 hh->hh_next = NULL;
1142 if (dev->header_ops->cache(n, hh)) {
1143 kfree(hh);
1144 hh = NULL;
1145 } else {
1146 atomic_inc(&hh->hh_refcnt);
1147 hh->hh_next = n->hh;
1148 n->hh = hh;
1149 if (n->nud_state & NUD_CONNECTED)
1150 hh->hh_output = n->ops->hh_output;
1151 else
1152 hh->hh_output = n->ops->output;
1155 if (hh) {
1156 atomic_inc(&hh->hh_refcnt);
1157 dst->hh = hh;
1161 /* This function can be used in contexts, where only old dev_queue_xmit
1162 worked, f.e. if you want to override normal output path (eql, shaper),
1163 but resolution is not made yet.
1166 int neigh_compat_output(struct sk_buff *skb)
1168 struct net_device *dev = skb->dev;
1170 __skb_pull(skb, skb_network_offset(skb));
1172 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1173 skb->len) < 0 &&
1174 dev->header_ops->rebuild(skb))
1175 return 0;
1177 return dev_queue_xmit(skb);
1179 EXPORT_SYMBOL(neigh_compat_output);
1181 /* Slow and careful. */
1183 int neigh_resolve_output(struct sk_buff *skb)
1185 struct dst_entry *dst = skb_dst(skb);
1186 struct neighbour *neigh;
1187 int rc = 0;
1189 if (!dst || !(neigh = dst->neighbour))
1190 goto discard;
1192 __skb_pull(skb, skb_network_offset(skb));
1194 if (!neigh_event_send(neigh, skb)) {
1195 int err;
1196 struct net_device *dev = neigh->dev;
1197 if (dev->header_ops->cache && !dst->hh) {
1198 write_lock_bh(&neigh->lock);
1199 if (!dst->hh)
1200 neigh_hh_init(neigh, dst, dst->ops->protocol);
1201 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1202 neigh->ha, NULL, skb->len);
1203 write_unlock_bh(&neigh->lock);
1204 } else {
1205 read_lock_bh(&neigh->lock);
1206 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1207 neigh->ha, NULL, skb->len);
1208 read_unlock_bh(&neigh->lock);
1210 if (err >= 0)
1211 rc = neigh->ops->queue_xmit(skb);
1212 else
1213 goto out_kfree_skb;
1215 out:
1216 return rc;
1217 discard:
1218 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1219 dst, dst ? dst->neighbour : NULL);
1220 out_kfree_skb:
1221 rc = -EINVAL;
1222 kfree_skb(skb);
1223 goto out;
1225 EXPORT_SYMBOL(neigh_resolve_output);
1227 /* As fast as possible without hh cache */
1229 int neigh_connected_output(struct sk_buff *skb)
1231 int err;
1232 struct dst_entry *dst = skb_dst(skb);
1233 struct neighbour *neigh = dst->neighbour;
1234 struct net_device *dev = neigh->dev;
1236 __skb_pull(skb, skb_network_offset(skb));
1238 read_lock_bh(&neigh->lock);
1239 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1240 neigh->ha, NULL, skb->len);
1241 read_unlock_bh(&neigh->lock);
1242 if (err >= 0)
1243 err = neigh->ops->queue_xmit(skb);
1244 else {
1245 err = -EINVAL;
1246 kfree_skb(skb);
1248 return err;
1250 EXPORT_SYMBOL(neigh_connected_output);
1252 static void neigh_proxy_process(unsigned long arg)
1254 struct neigh_table *tbl = (struct neigh_table *)arg;
1255 long sched_next = 0;
1256 unsigned long now = jiffies;
1257 struct sk_buff *skb, *n;
1259 spin_lock(&tbl->proxy_queue.lock);
1261 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1262 long tdif = NEIGH_CB(skb)->sched_next - now;
1264 if (tdif <= 0) {
1265 struct net_device *dev = skb->dev;
1266 __skb_unlink(skb, &tbl->proxy_queue);
1267 if (tbl->proxy_redo && netif_running(dev))
1268 tbl->proxy_redo(skb);
1269 else
1270 kfree_skb(skb);
1272 dev_put(dev);
1273 } else if (!sched_next || tdif < sched_next)
1274 sched_next = tdif;
1276 del_timer(&tbl->proxy_timer);
1277 if (sched_next)
1278 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1279 spin_unlock(&tbl->proxy_queue.lock);
1282 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1283 struct sk_buff *skb)
1285 unsigned long now = jiffies;
1286 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1288 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1289 kfree_skb(skb);
1290 return;
1293 NEIGH_CB(skb)->sched_next = sched_next;
1294 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1296 spin_lock(&tbl->proxy_queue.lock);
1297 if (del_timer(&tbl->proxy_timer)) {
1298 if (time_before(tbl->proxy_timer.expires, sched_next))
1299 sched_next = tbl->proxy_timer.expires;
1301 skb_dst_drop(skb);
1302 dev_hold(skb->dev);
1303 __skb_queue_tail(&tbl->proxy_queue, skb);
1304 mod_timer(&tbl->proxy_timer, sched_next);
1305 spin_unlock(&tbl->proxy_queue.lock);
1307 EXPORT_SYMBOL(pneigh_enqueue);
1309 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1310 struct net *net, int ifindex)
1312 struct neigh_parms *p;
1314 for (p = &tbl->parms; p; p = p->next) {
1315 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1316 (!p->dev && !ifindex))
1317 return p;
1320 return NULL;
1323 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1324 struct neigh_table *tbl)
1326 struct neigh_parms *p, *ref;
1327 struct net *net = dev_net(dev);
1328 const struct net_device_ops *ops = dev->netdev_ops;
1330 ref = lookup_neigh_params(tbl, net, 0);
1331 if (!ref)
1332 return NULL;
1334 p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1335 if (p) {
1336 p->tbl = tbl;
1337 atomic_set(&p->refcnt, 1);
1338 p->reachable_time =
1339 neigh_rand_reach_time(p->base_reachable_time);
1341 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1342 kfree(p);
1343 return NULL;
1346 dev_hold(dev);
1347 p->dev = dev;
1348 write_pnet(&p->net, hold_net(net));
1349 p->sysctl_table = NULL;
1350 write_lock_bh(&tbl->lock);
1351 p->next = tbl->parms.next;
1352 tbl->parms.next = p;
1353 write_unlock_bh(&tbl->lock);
1355 return p;
1357 EXPORT_SYMBOL(neigh_parms_alloc);
1359 static void neigh_rcu_free_parms(struct rcu_head *head)
1361 struct neigh_parms *parms =
1362 container_of(head, struct neigh_parms, rcu_head);
1364 neigh_parms_put(parms);
1367 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1369 struct neigh_parms **p;
1371 if (!parms || parms == &tbl->parms)
1372 return;
1373 write_lock_bh(&tbl->lock);
1374 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1375 if (*p == parms) {
1376 *p = parms->next;
1377 parms->dead = 1;
1378 write_unlock_bh(&tbl->lock);
1379 if (parms->dev)
1380 dev_put(parms->dev);
1381 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1382 return;
1385 write_unlock_bh(&tbl->lock);
1386 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1388 EXPORT_SYMBOL(neigh_parms_release);
1390 static void neigh_parms_destroy(struct neigh_parms *parms)
1392 release_net(neigh_parms_net(parms));
1393 kfree(parms);
1396 static struct lock_class_key neigh_table_proxy_queue_class;
1398 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1400 unsigned long now = jiffies;
1401 unsigned long phsize;
1403 write_pnet(&tbl->parms.net, &init_net);
1404 atomic_set(&tbl->parms.refcnt, 1);
1405 tbl->parms.reachable_time =
1406 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1408 if (!tbl->kmem_cachep)
1409 tbl->kmem_cachep =
1410 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1411 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1412 NULL);
1413 tbl->stats = alloc_percpu(struct neigh_statistics);
1414 if (!tbl->stats)
1415 panic("cannot create neighbour cache statistics");
1417 #ifdef CONFIG_PROC_FS
1418 if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1419 &neigh_stat_seq_fops, tbl))
1420 panic("cannot create neighbour proc dir entry");
1421 #endif
1423 tbl->hash_mask = 1;
1424 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1426 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1427 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1429 if (!tbl->hash_buckets || !tbl->phash_buckets)
1430 panic("cannot allocate neighbour cache hashes");
1432 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1434 rwlock_init(&tbl->lock);
1435 setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl);
1436 tbl->gc_timer.expires = now + 1;
1437 add_timer(&tbl->gc_timer);
1439 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1440 skb_queue_head_init_class(&tbl->proxy_queue,
1441 &neigh_table_proxy_queue_class);
1443 tbl->last_flush = now;
1444 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1446 EXPORT_SYMBOL(neigh_table_init_no_netlink);
1448 void neigh_table_init(struct neigh_table *tbl)
1450 struct neigh_table *tmp;
1452 neigh_table_init_no_netlink(tbl);
1453 write_lock(&neigh_tbl_lock);
1454 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1455 if (tmp->family == tbl->family)
1456 break;
1458 tbl->next = neigh_tables;
1459 neigh_tables = tbl;
1460 write_unlock(&neigh_tbl_lock);
1462 if (unlikely(tmp)) {
1463 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1464 "family %d\n", tbl->family);
1465 dump_stack();
1468 EXPORT_SYMBOL(neigh_table_init);
1470 int neigh_table_clear(struct neigh_table *tbl)
1472 struct neigh_table **tp;
1474 /* It is not clean... Fix it to unload IPv6 module safely */
1475 del_timer_sync(&tbl->gc_timer);
1476 del_timer_sync(&tbl->proxy_timer);
1477 pneigh_queue_purge(&tbl->proxy_queue);
1478 neigh_ifdown(tbl, NULL);
1479 if (atomic_read(&tbl->entries))
1480 printk(KERN_CRIT "neighbour leakage\n");
1481 write_lock(&neigh_tbl_lock);
1482 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1483 if (*tp == tbl) {
1484 *tp = tbl->next;
1485 break;
1488 write_unlock(&neigh_tbl_lock);
1490 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1491 tbl->hash_buckets = NULL;
1493 kfree(tbl->phash_buckets);
1494 tbl->phash_buckets = NULL;
1496 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1498 free_percpu(tbl->stats);
1499 tbl->stats = NULL;
1501 kmem_cache_destroy(tbl->kmem_cachep);
1502 tbl->kmem_cachep = NULL;
1504 return 0;
1506 EXPORT_SYMBOL(neigh_table_clear);
1508 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1510 struct net *net = sock_net(skb->sk);
1511 struct ndmsg *ndm;
1512 struct nlattr *dst_attr;
1513 struct neigh_table *tbl;
1514 struct net_device *dev = NULL;
1515 int err = -EINVAL;
1517 if (nlmsg_len(nlh) < sizeof(*ndm))
1518 goto out;
1520 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1521 if (dst_attr == NULL)
1522 goto out;
1524 ndm = nlmsg_data(nlh);
1525 if (ndm->ndm_ifindex) {
1526 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1527 if (dev == NULL) {
1528 err = -ENODEV;
1529 goto out;
1533 read_lock(&neigh_tbl_lock);
1534 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1535 struct neighbour *neigh;
1537 if (tbl->family != ndm->ndm_family)
1538 continue;
1539 read_unlock(&neigh_tbl_lock);
1541 if (nla_len(dst_attr) < tbl->key_len)
1542 goto out_dev_put;
1544 if (ndm->ndm_flags & NTF_PROXY) {
1545 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1546 goto out_dev_put;
1549 if (dev == NULL)
1550 goto out_dev_put;
1552 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1553 if (neigh == NULL) {
1554 err = -ENOENT;
1555 goto out_dev_put;
1558 err = neigh_update(neigh, NULL, NUD_FAILED,
1559 NEIGH_UPDATE_F_OVERRIDE |
1560 NEIGH_UPDATE_F_ADMIN);
1561 neigh_release(neigh);
1562 goto out_dev_put;
1564 read_unlock(&neigh_tbl_lock);
1565 err = -EAFNOSUPPORT;
1567 out_dev_put:
1568 if (dev)
1569 dev_put(dev);
1570 out:
1571 return err;
1574 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1576 struct net *net = sock_net(skb->sk);
1577 struct ndmsg *ndm;
1578 struct nlattr *tb[NDA_MAX+1];
1579 struct neigh_table *tbl;
1580 struct net_device *dev = NULL;
1581 int err;
1583 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1584 if (err < 0)
1585 goto out;
1587 err = -EINVAL;
1588 if (tb[NDA_DST] == NULL)
1589 goto out;
1591 ndm = nlmsg_data(nlh);
1592 if (ndm->ndm_ifindex) {
1593 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1594 if (dev == NULL) {
1595 err = -ENODEV;
1596 goto out;
1599 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1600 goto out_dev_put;
1603 read_lock(&neigh_tbl_lock);
1604 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1605 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1606 struct neighbour *neigh;
1607 void *dst, *lladdr;
1609 if (tbl->family != ndm->ndm_family)
1610 continue;
1611 read_unlock(&neigh_tbl_lock);
1613 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1614 goto out_dev_put;
1615 dst = nla_data(tb[NDA_DST]);
1616 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1618 if (ndm->ndm_flags & NTF_PROXY) {
1619 struct pneigh_entry *pn;
1621 err = -ENOBUFS;
1622 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1623 if (pn) {
1624 pn->flags = ndm->ndm_flags;
1625 err = 0;
1627 goto out_dev_put;
1630 if (dev == NULL)
1631 goto out_dev_put;
1633 neigh = neigh_lookup(tbl, dst, dev);
1634 if (neigh == NULL) {
1635 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1636 err = -ENOENT;
1637 goto out_dev_put;
1640 neigh = __neigh_lookup_errno(tbl, dst, dev);
1641 if (IS_ERR(neigh)) {
1642 err = PTR_ERR(neigh);
1643 goto out_dev_put;
1645 } else {
1646 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1647 err = -EEXIST;
1648 neigh_release(neigh);
1649 goto out_dev_put;
1652 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1653 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1656 if (ndm->ndm_flags & NTF_USE) {
1657 neigh_event_send(neigh, NULL);
1658 err = 0;
1659 } else
1660 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1661 neigh_release(neigh);
1662 goto out_dev_put;
1665 read_unlock(&neigh_tbl_lock);
1666 err = -EAFNOSUPPORT;
1668 out_dev_put:
1669 if (dev)
1670 dev_put(dev);
1671 out:
1672 return err;
1675 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1677 struct nlattr *nest;
1679 nest = nla_nest_start(skb, NDTA_PARMS);
1680 if (nest == NULL)
1681 return -ENOBUFS;
1683 if (parms->dev)
1684 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1686 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1687 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1688 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1689 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1690 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1691 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1692 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1693 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1694 parms->base_reachable_time);
1695 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1696 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1697 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1698 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1699 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1700 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1702 return nla_nest_end(skb, nest);
1704 nla_put_failure:
1705 nla_nest_cancel(skb, nest);
1706 return -EMSGSIZE;
1709 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1710 u32 pid, u32 seq, int type, int flags)
1712 struct nlmsghdr *nlh;
1713 struct ndtmsg *ndtmsg;
1715 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1716 if (nlh == NULL)
1717 return -EMSGSIZE;
1719 ndtmsg = nlmsg_data(nlh);
1721 read_lock_bh(&tbl->lock);
1722 ndtmsg->ndtm_family = tbl->family;
1723 ndtmsg->ndtm_pad1 = 0;
1724 ndtmsg->ndtm_pad2 = 0;
1726 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1727 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1728 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1729 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1730 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1733 unsigned long now = jiffies;
1734 unsigned int flush_delta = now - tbl->last_flush;
1735 unsigned int rand_delta = now - tbl->last_rand;
1737 struct ndt_config ndc = {
1738 .ndtc_key_len = tbl->key_len,
1739 .ndtc_entry_size = tbl->entry_size,
1740 .ndtc_entries = atomic_read(&tbl->entries),
1741 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1742 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1743 .ndtc_hash_rnd = tbl->hash_rnd,
1744 .ndtc_hash_mask = tbl->hash_mask,
1745 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1746 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1749 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1753 int cpu;
1754 struct ndt_stats ndst;
1756 memset(&ndst, 0, sizeof(ndst));
1758 for_each_possible_cpu(cpu) {
1759 struct neigh_statistics *st;
1761 st = per_cpu_ptr(tbl->stats, cpu);
1762 ndst.ndts_allocs += st->allocs;
1763 ndst.ndts_destroys += st->destroys;
1764 ndst.ndts_hash_grows += st->hash_grows;
1765 ndst.ndts_res_failed += st->res_failed;
1766 ndst.ndts_lookups += st->lookups;
1767 ndst.ndts_hits += st->hits;
1768 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1769 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1770 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1771 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1774 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1777 BUG_ON(tbl->parms.dev);
1778 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1779 goto nla_put_failure;
1781 read_unlock_bh(&tbl->lock);
1782 return nlmsg_end(skb, nlh);
1784 nla_put_failure:
1785 read_unlock_bh(&tbl->lock);
1786 nlmsg_cancel(skb, nlh);
1787 return -EMSGSIZE;
1790 static int neightbl_fill_param_info(struct sk_buff *skb,
1791 struct neigh_table *tbl,
1792 struct neigh_parms *parms,
1793 u32 pid, u32 seq, int type,
1794 unsigned int flags)
1796 struct ndtmsg *ndtmsg;
1797 struct nlmsghdr *nlh;
1799 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1800 if (nlh == NULL)
1801 return -EMSGSIZE;
1803 ndtmsg = nlmsg_data(nlh);
1805 read_lock_bh(&tbl->lock);
1806 ndtmsg->ndtm_family = tbl->family;
1807 ndtmsg->ndtm_pad1 = 0;
1808 ndtmsg->ndtm_pad2 = 0;
1810 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1811 neightbl_fill_parms(skb, parms) < 0)
1812 goto errout;
1814 read_unlock_bh(&tbl->lock);
1815 return nlmsg_end(skb, nlh);
1816 errout:
1817 read_unlock_bh(&tbl->lock);
1818 nlmsg_cancel(skb, nlh);
1819 return -EMSGSIZE;
1822 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1823 [NDTA_NAME] = { .type = NLA_STRING },
1824 [NDTA_THRESH1] = { .type = NLA_U32 },
1825 [NDTA_THRESH2] = { .type = NLA_U32 },
1826 [NDTA_THRESH3] = { .type = NLA_U32 },
1827 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1828 [NDTA_PARMS] = { .type = NLA_NESTED },
1831 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1832 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1833 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1834 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1835 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1836 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1837 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1838 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1839 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1840 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1841 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1842 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1843 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1844 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1847 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1849 struct net *net = sock_net(skb->sk);
1850 struct neigh_table *tbl;
1851 struct ndtmsg *ndtmsg;
1852 struct nlattr *tb[NDTA_MAX+1];
1853 int err;
1855 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1856 nl_neightbl_policy);
1857 if (err < 0)
1858 goto errout;
1860 if (tb[NDTA_NAME] == NULL) {
1861 err = -EINVAL;
1862 goto errout;
1865 ndtmsg = nlmsg_data(nlh);
1866 read_lock(&neigh_tbl_lock);
1867 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1868 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1869 continue;
1871 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1872 break;
1875 if (tbl == NULL) {
1876 err = -ENOENT;
1877 goto errout_locked;
1881 * We acquire tbl->lock to be nice to the periodic timers and
1882 * make sure they always see a consistent set of values.
1884 write_lock_bh(&tbl->lock);
1886 if (tb[NDTA_PARMS]) {
1887 struct nlattr *tbp[NDTPA_MAX+1];
1888 struct neigh_parms *p;
1889 int i, ifindex = 0;
1891 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1892 nl_ntbl_parm_policy);
1893 if (err < 0)
1894 goto errout_tbl_lock;
1896 if (tbp[NDTPA_IFINDEX])
1897 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1899 p = lookup_neigh_params(tbl, net, ifindex);
1900 if (p == NULL) {
1901 err = -ENOENT;
1902 goto errout_tbl_lock;
1905 for (i = 1; i <= NDTPA_MAX; i++) {
1906 if (tbp[i] == NULL)
1907 continue;
1909 switch (i) {
1910 case NDTPA_QUEUE_LEN:
1911 p->queue_len = nla_get_u32(tbp[i]);
1912 break;
1913 case NDTPA_PROXY_QLEN:
1914 p->proxy_qlen = nla_get_u32(tbp[i]);
1915 break;
1916 case NDTPA_APP_PROBES:
1917 p->app_probes = nla_get_u32(tbp[i]);
1918 break;
1919 case NDTPA_UCAST_PROBES:
1920 p->ucast_probes = nla_get_u32(tbp[i]);
1921 break;
1922 case NDTPA_MCAST_PROBES:
1923 p->mcast_probes = nla_get_u32(tbp[i]);
1924 break;
1925 case NDTPA_BASE_REACHABLE_TIME:
1926 p->base_reachable_time = nla_get_msecs(tbp[i]);
1927 break;
1928 case NDTPA_GC_STALETIME:
1929 p->gc_staletime = nla_get_msecs(tbp[i]);
1930 break;
1931 case NDTPA_DELAY_PROBE_TIME:
1932 p->delay_probe_time = nla_get_msecs(tbp[i]);
1933 break;
1934 case NDTPA_RETRANS_TIME:
1935 p->retrans_time = nla_get_msecs(tbp[i]);
1936 break;
1937 case NDTPA_ANYCAST_DELAY:
1938 p->anycast_delay = nla_get_msecs(tbp[i]);
1939 break;
1940 case NDTPA_PROXY_DELAY:
1941 p->proxy_delay = nla_get_msecs(tbp[i]);
1942 break;
1943 case NDTPA_LOCKTIME:
1944 p->locktime = nla_get_msecs(tbp[i]);
1945 break;
1950 if (tb[NDTA_THRESH1])
1951 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1953 if (tb[NDTA_THRESH2])
1954 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1956 if (tb[NDTA_THRESH3])
1957 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1959 if (tb[NDTA_GC_INTERVAL])
1960 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1962 err = 0;
1964 errout_tbl_lock:
1965 write_unlock_bh(&tbl->lock);
1966 errout_locked:
1967 read_unlock(&neigh_tbl_lock);
1968 errout:
1969 return err;
1972 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1974 struct net *net = sock_net(skb->sk);
1975 int family, tidx, nidx = 0;
1976 int tbl_skip = cb->args[0];
1977 int neigh_skip = cb->args[1];
1978 struct neigh_table *tbl;
1980 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1982 read_lock(&neigh_tbl_lock);
1983 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
1984 struct neigh_parms *p;
1986 if (tidx < tbl_skip || (family && tbl->family != family))
1987 continue;
1989 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
1990 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
1991 NLM_F_MULTI) <= 0)
1992 break;
1994 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
1995 if (!net_eq(neigh_parms_net(p), net))
1996 continue;
1998 if (nidx < neigh_skip)
1999 goto next;
2001 if (neightbl_fill_param_info(skb, tbl, p,
2002 NETLINK_CB(cb->skb).pid,
2003 cb->nlh->nlmsg_seq,
2004 RTM_NEWNEIGHTBL,
2005 NLM_F_MULTI) <= 0)
2006 goto out;
2007 next:
2008 nidx++;
2011 neigh_skip = 0;
2013 out:
2014 read_unlock(&neigh_tbl_lock);
2015 cb->args[0] = tidx;
2016 cb->args[1] = nidx;
2018 return skb->len;
2021 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2022 u32 pid, u32 seq, int type, unsigned int flags)
2024 unsigned long now = jiffies;
2025 struct nda_cacheinfo ci;
2026 struct nlmsghdr *nlh;
2027 struct ndmsg *ndm;
2029 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2030 if (nlh == NULL)
2031 return -EMSGSIZE;
2033 ndm = nlmsg_data(nlh);
2034 ndm->ndm_family = neigh->ops->family;
2035 ndm->ndm_pad1 = 0;
2036 ndm->ndm_pad2 = 0;
2037 ndm->ndm_flags = neigh->flags;
2038 ndm->ndm_type = neigh->type;
2039 ndm->ndm_ifindex = neigh->dev->ifindex;
2041 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
2043 read_lock_bh(&neigh->lock);
2044 ndm->ndm_state = neigh->nud_state;
2045 if ((neigh->nud_state & NUD_VALID) &&
2046 nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
2047 read_unlock_bh(&neigh->lock);
2048 goto nla_put_failure;
2051 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2052 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2053 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2054 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2055 read_unlock_bh(&neigh->lock);
2057 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2058 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2060 return nlmsg_end(skb, nlh);
2062 nla_put_failure:
2063 nlmsg_cancel(skb, nlh);
2064 return -EMSGSIZE;
2067 static void neigh_update_notify(struct neighbour *neigh)
2069 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2070 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2073 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2074 struct netlink_callback *cb)
2076 struct net * net = sock_net(skb->sk);
2077 struct neighbour *n;
2078 int rc, h, s_h = cb->args[1];
2079 int idx, s_idx = idx = cb->args[2];
2081 read_lock_bh(&tbl->lock);
2082 for (h = 0; h <= tbl->hash_mask; h++) {
2083 if (h < s_h)
2084 continue;
2085 if (h > s_h)
2086 s_idx = 0;
2087 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
2088 if (dev_net(n->dev) != net)
2089 continue;
2090 if (idx < s_idx)
2091 goto next;
2092 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2093 cb->nlh->nlmsg_seq,
2094 RTM_NEWNEIGH,
2095 NLM_F_MULTI) <= 0) {
2096 read_unlock_bh(&tbl->lock);
2097 rc = -1;
2098 goto out;
2100 next:
2101 idx++;
2104 read_unlock_bh(&tbl->lock);
2105 rc = skb->len;
2106 out:
2107 cb->args[1] = h;
2108 cb->args[2] = idx;
2109 return rc;
2112 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2114 struct neigh_table *tbl;
2115 int t, family, s_t;
2117 read_lock(&neigh_tbl_lock);
2118 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2119 s_t = cb->args[0];
2121 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2122 if (t < s_t || (family && tbl->family != family))
2123 continue;
2124 if (t > s_t)
2125 memset(&cb->args[1], 0, sizeof(cb->args) -
2126 sizeof(cb->args[0]));
2127 if (neigh_dump_table(tbl, skb, cb) < 0)
2128 break;
2130 read_unlock(&neigh_tbl_lock);
2132 cb->args[0] = t;
2133 return skb->len;
2136 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2138 int chain;
2140 read_lock_bh(&tbl->lock);
2141 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2142 struct neighbour *n;
2144 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2145 cb(n, cookie);
2147 read_unlock_bh(&tbl->lock);
2149 EXPORT_SYMBOL(neigh_for_each);
2151 /* The tbl->lock must be held as a writer and BH disabled. */
2152 void __neigh_for_each_release(struct neigh_table *tbl,
2153 int (*cb)(struct neighbour *))
2155 int chain;
2157 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2158 struct neighbour *n, **np;
2160 np = &tbl->hash_buckets[chain];
2161 while ((n = *np) != NULL) {
2162 int release;
2164 write_lock(&n->lock);
2165 release = cb(n);
2166 if (release) {
2167 *np = n->next;
2168 n->dead = 1;
2169 } else
2170 np = &n->next;
2171 write_unlock(&n->lock);
2172 if (release)
2173 neigh_cleanup_and_release(n);
2177 EXPORT_SYMBOL(__neigh_for_each_release);
2179 #ifdef CONFIG_PROC_FS
2181 static struct neighbour *neigh_get_first(struct seq_file *seq)
2183 struct neigh_seq_state *state = seq->private;
2184 struct net *net = seq_file_net(seq);
2185 struct neigh_table *tbl = state->tbl;
2186 struct neighbour *n = NULL;
2187 int bucket = state->bucket;
2189 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2190 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2191 n = tbl->hash_buckets[bucket];
2193 while (n) {
2194 if (!net_eq(dev_net(n->dev), net))
2195 goto next;
2196 if (state->neigh_sub_iter) {
2197 loff_t fakep = 0;
2198 void *v;
2200 v = state->neigh_sub_iter(state, n, &fakep);
2201 if (!v)
2202 goto next;
2204 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2205 break;
2206 if (n->nud_state & ~NUD_NOARP)
2207 break;
2208 next:
2209 n = n->next;
2212 if (n)
2213 break;
2215 state->bucket = bucket;
2217 return n;
2220 static struct neighbour *neigh_get_next(struct seq_file *seq,
2221 struct neighbour *n,
2222 loff_t *pos)
2224 struct neigh_seq_state *state = seq->private;
2225 struct net *net = seq_file_net(seq);
2226 struct neigh_table *tbl = state->tbl;
2228 if (state->neigh_sub_iter) {
2229 void *v = state->neigh_sub_iter(state, n, pos);
2230 if (v)
2231 return n;
2233 n = n->next;
2235 while (1) {
2236 while (n) {
2237 if (!net_eq(dev_net(n->dev), net))
2238 goto next;
2239 if (state->neigh_sub_iter) {
2240 void *v = state->neigh_sub_iter(state, n, pos);
2241 if (v)
2242 return n;
2243 goto next;
2245 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2246 break;
2248 if (n->nud_state & ~NUD_NOARP)
2249 break;
2250 next:
2251 n = n->next;
2254 if (n)
2255 break;
2257 if (++state->bucket > tbl->hash_mask)
2258 break;
2260 n = tbl->hash_buckets[state->bucket];
2263 if (n && pos)
2264 --(*pos);
2265 return n;
2268 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2270 struct neighbour *n = neigh_get_first(seq);
2272 if (n) {
2273 --(*pos);
2274 while (*pos) {
2275 n = neigh_get_next(seq, n, pos);
2276 if (!n)
2277 break;
2280 return *pos ? NULL : n;
2283 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2285 struct neigh_seq_state *state = seq->private;
2286 struct net *net = seq_file_net(seq);
2287 struct neigh_table *tbl = state->tbl;
2288 struct pneigh_entry *pn = NULL;
2289 int bucket = state->bucket;
2291 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2292 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2293 pn = tbl->phash_buckets[bucket];
2294 while (pn && !net_eq(pneigh_net(pn), net))
2295 pn = pn->next;
2296 if (pn)
2297 break;
2299 state->bucket = bucket;
2301 return pn;
2304 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2305 struct pneigh_entry *pn,
2306 loff_t *pos)
2308 struct neigh_seq_state *state = seq->private;
2309 struct net *net = seq_file_net(seq);
2310 struct neigh_table *tbl = state->tbl;
2312 pn = pn->next;
2313 while (!pn) {
2314 if (++state->bucket > PNEIGH_HASHMASK)
2315 break;
2316 pn = tbl->phash_buckets[state->bucket];
2317 while (pn && !net_eq(pneigh_net(pn), net))
2318 pn = pn->next;
2319 if (pn)
2320 break;
2323 if (pn && pos)
2324 --(*pos);
2326 return pn;
2329 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2331 struct pneigh_entry *pn = pneigh_get_first(seq);
2333 if (pn) {
2334 --(*pos);
2335 while (*pos) {
2336 pn = pneigh_get_next(seq, pn, pos);
2337 if (!pn)
2338 break;
2341 return *pos ? NULL : pn;
2344 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2346 struct neigh_seq_state *state = seq->private;
2347 void *rc;
2348 loff_t idxpos = *pos;
2350 rc = neigh_get_idx(seq, &idxpos);
2351 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2352 rc = pneigh_get_idx(seq, &idxpos);
2354 return rc;
2357 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2358 __acquires(tbl->lock)
2360 struct neigh_seq_state *state = seq->private;
2362 state->tbl = tbl;
2363 state->bucket = 0;
2364 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2366 read_lock_bh(&tbl->lock);
2368 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2370 EXPORT_SYMBOL(neigh_seq_start);
2372 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2374 struct neigh_seq_state *state;
2375 void *rc;
2377 if (v == SEQ_START_TOKEN) {
2378 rc = neigh_get_first(seq);
2379 goto out;
2382 state = seq->private;
2383 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2384 rc = neigh_get_next(seq, v, NULL);
2385 if (rc)
2386 goto out;
2387 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2388 rc = pneigh_get_first(seq);
2389 } else {
2390 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2391 rc = pneigh_get_next(seq, v, NULL);
2393 out:
2394 ++(*pos);
2395 return rc;
2397 EXPORT_SYMBOL(neigh_seq_next);
2399 void neigh_seq_stop(struct seq_file *seq, void *v)
2400 __releases(tbl->lock)
2402 struct neigh_seq_state *state = seq->private;
2403 struct neigh_table *tbl = state->tbl;
2405 read_unlock_bh(&tbl->lock);
2407 EXPORT_SYMBOL(neigh_seq_stop);
2409 /* statistics via seq_file */
2411 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2413 struct proc_dir_entry *pde = seq->private;
2414 struct neigh_table *tbl = pde->data;
2415 int cpu;
2417 if (*pos == 0)
2418 return SEQ_START_TOKEN;
2420 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2421 if (!cpu_possible(cpu))
2422 continue;
2423 *pos = cpu+1;
2424 return per_cpu_ptr(tbl->stats, cpu);
2426 return NULL;
2429 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2431 struct proc_dir_entry *pde = seq->private;
2432 struct neigh_table *tbl = pde->data;
2433 int cpu;
2435 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2436 if (!cpu_possible(cpu))
2437 continue;
2438 *pos = cpu+1;
2439 return per_cpu_ptr(tbl->stats, cpu);
2441 return NULL;
2444 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2449 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2451 struct proc_dir_entry *pde = seq->private;
2452 struct neigh_table *tbl = pde->data;
2453 struct neigh_statistics *st = v;
2455 if (v == SEQ_START_TOKEN) {
2456 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
2457 return 0;
2460 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2461 "%08lx %08lx %08lx %08lx %08lx\n",
2462 atomic_read(&tbl->entries),
2464 st->allocs,
2465 st->destroys,
2466 st->hash_grows,
2468 st->lookups,
2469 st->hits,
2471 st->res_failed,
2473 st->rcv_probes_mcast,
2474 st->rcv_probes_ucast,
2476 st->periodic_gc_runs,
2477 st->forced_gc_runs,
2478 st->unres_discards
2481 return 0;
2484 static const struct seq_operations neigh_stat_seq_ops = {
2485 .start = neigh_stat_seq_start,
2486 .next = neigh_stat_seq_next,
2487 .stop = neigh_stat_seq_stop,
2488 .show = neigh_stat_seq_show,
2491 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2493 int ret = seq_open(file, &neigh_stat_seq_ops);
2495 if (!ret) {
2496 struct seq_file *sf = file->private_data;
2497 sf->private = PDE(inode);
2499 return ret;
2502 static const struct file_operations neigh_stat_seq_fops = {
2503 .owner = THIS_MODULE,
2504 .open = neigh_stat_seq_open,
2505 .read = seq_read,
2506 .llseek = seq_lseek,
2507 .release = seq_release,
2510 #endif /* CONFIG_PROC_FS */
2512 static inline size_t neigh_nlmsg_size(void)
2514 return NLMSG_ALIGN(sizeof(struct ndmsg))
2515 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2516 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2517 + nla_total_size(sizeof(struct nda_cacheinfo))
2518 + nla_total_size(4); /* NDA_PROBES */
2521 static void __neigh_notify(struct neighbour *n, int type, int flags)
2523 struct net *net = dev_net(n->dev);
2524 struct sk_buff *skb;
2525 int err = -ENOBUFS;
2527 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2528 if (skb == NULL)
2529 goto errout;
2531 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2532 if (err < 0) {
2533 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2534 WARN_ON(err == -EMSGSIZE);
2535 kfree_skb(skb);
2536 goto errout;
2538 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2539 return;
2540 errout:
2541 if (err < 0)
2542 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2545 #ifdef CONFIG_ARPD
2546 void neigh_app_ns(struct neighbour *n)
2548 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2550 EXPORT_SYMBOL(neigh_app_ns);
2551 #endif /* CONFIG_ARPD */
2553 #ifdef CONFIG_SYSCTL
2555 static struct neigh_sysctl_table {
2556 struct ctl_table_header *sysctl_header;
2557 struct ctl_table neigh_vars[__NET_NEIGH_MAX];
2558 char *dev_name;
2559 } neigh_sysctl_template __read_mostly = {
2560 .neigh_vars = {
2562 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2563 .procname = "mcast_solicit",
2564 .maxlen = sizeof(int),
2565 .mode = 0644,
2566 .proc_handler = proc_dointvec,
2569 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2570 .procname = "ucast_solicit",
2571 .maxlen = sizeof(int),
2572 .mode = 0644,
2573 .proc_handler = proc_dointvec,
2576 .ctl_name = NET_NEIGH_APP_SOLICIT,
2577 .procname = "app_solicit",
2578 .maxlen = sizeof(int),
2579 .mode = 0644,
2580 .proc_handler = proc_dointvec,
2583 .procname = "retrans_time",
2584 .maxlen = sizeof(int),
2585 .mode = 0644,
2586 .proc_handler = proc_dointvec_userhz_jiffies,
2589 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2590 .procname = "base_reachable_time",
2591 .maxlen = sizeof(int),
2592 .mode = 0644,
2593 .proc_handler = proc_dointvec_jiffies,
2594 .strategy = sysctl_jiffies,
2597 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2598 .procname = "delay_first_probe_time",
2599 .maxlen = sizeof(int),
2600 .mode = 0644,
2601 .proc_handler = proc_dointvec_jiffies,
2602 .strategy = sysctl_jiffies,
2605 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2606 .procname = "gc_stale_time",
2607 .maxlen = sizeof(int),
2608 .mode = 0644,
2609 .proc_handler = proc_dointvec_jiffies,
2610 .strategy = sysctl_jiffies,
2613 .ctl_name = NET_NEIGH_UNRES_QLEN,
2614 .procname = "unres_qlen",
2615 .maxlen = sizeof(int),
2616 .mode = 0644,
2617 .proc_handler = proc_dointvec,
2620 .ctl_name = NET_NEIGH_PROXY_QLEN,
2621 .procname = "proxy_qlen",
2622 .maxlen = sizeof(int),
2623 .mode = 0644,
2624 .proc_handler = proc_dointvec,
2627 .procname = "anycast_delay",
2628 .maxlen = sizeof(int),
2629 .mode = 0644,
2630 .proc_handler = proc_dointvec_userhz_jiffies,
2633 .procname = "proxy_delay",
2634 .maxlen = sizeof(int),
2635 .mode = 0644,
2636 .proc_handler = proc_dointvec_userhz_jiffies,
2639 .procname = "locktime",
2640 .maxlen = sizeof(int),
2641 .mode = 0644,
2642 .proc_handler = proc_dointvec_userhz_jiffies,
2645 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2646 .procname = "retrans_time_ms",
2647 .maxlen = sizeof(int),
2648 .mode = 0644,
2649 .proc_handler = proc_dointvec_ms_jiffies,
2650 .strategy = sysctl_ms_jiffies,
2653 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2654 .procname = "base_reachable_time_ms",
2655 .maxlen = sizeof(int),
2656 .mode = 0644,
2657 .proc_handler = proc_dointvec_ms_jiffies,
2658 .strategy = sysctl_ms_jiffies,
2661 .ctl_name = NET_NEIGH_GC_INTERVAL,
2662 .procname = "gc_interval",
2663 .maxlen = sizeof(int),
2664 .mode = 0644,
2665 .proc_handler = proc_dointvec_jiffies,
2666 .strategy = sysctl_jiffies,
2669 .ctl_name = NET_NEIGH_GC_THRESH1,
2670 .procname = "gc_thresh1",
2671 .maxlen = sizeof(int),
2672 .mode = 0644,
2673 .proc_handler = proc_dointvec,
2676 .ctl_name = NET_NEIGH_GC_THRESH2,
2677 .procname = "gc_thresh2",
2678 .maxlen = sizeof(int),
2679 .mode = 0644,
2680 .proc_handler = proc_dointvec,
2683 .ctl_name = NET_NEIGH_GC_THRESH3,
2684 .procname = "gc_thresh3",
2685 .maxlen = sizeof(int),
2686 .mode = 0644,
2687 .proc_handler = proc_dointvec,
2693 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2694 int p_id, int pdev_id, char *p_name,
2695 proc_handler *handler, ctl_handler *strategy)
2697 struct neigh_sysctl_table *t;
2698 const char *dev_name_source = NULL;
2700 #define NEIGH_CTL_PATH_ROOT 0
2701 #define NEIGH_CTL_PATH_PROTO 1
2702 #define NEIGH_CTL_PATH_NEIGH 2
2703 #define NEIGH_CTL_PATH_DEV 3
2705 struct ctl_path neigh_path[] = {
2706 { .procname = "net", .ctl_name = CTL_NET, },
2707 { .procname = "proto", .ctl_name = 0, },
2708 { .procname = "neigh", .ctl_name = 0, },
2709 { .procname = "default", .ctl_name = NET_PROTO_CONF_DEFAULT, },
2710 { },
2713 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2714 if (!t)
2715 goto err;
2717 t->neigh_vars[0].data = &p->mcast_probes;
2718 t->neigh_vars[1].data = &p->ucast_probes;
2719 t->neigh_vars[2].data = &p->app_probes;
2720 t->neigh_vars[3].data = &p->retrans_time;
2721 t->neigh_vars[4].data = &p->base_reachable_time;
2722 t->neigh_vars[5].data = &p->delay_probe_time;
2723 t->neigh_vars[6].data = &p->gc_staletime;
2724 t->neigh_vars[7].data = &p->queue_len;
2725 t->neigh_vars[8].data = &p->proxy_qlen;
2726 t->neigh_vars[9].data = &p->anycast_delay;
2727 t->neigh_vars[10].data = &p->proxy_delay;
2728 t->neigh_vars[11].data = &p->locktime;
2729 t->neigh_vars[12].data = &p->retrans_time;
2730 t->neigh_vars[13].data = &p->base_reachable_time;
2732 if (dev) {
2733 dev_name_source = dev->name;
2734 neigh_path[NEIGH_CTL_PATH_DEV].ctl_name = dev->ifindex;
2735 /* Terminate the table early */
2736 memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
2737 } else {
2738 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
2739 t->neigh_vars[14].data = (int *)(p + 1);
2740 t->neigh_vars[15].data = (int *)(p + 1) + 1;
2741 t->neigh_vars[16].data = (int *)(p + 1) + 2;
2742 t->neigh_vars[17].data = (int *)(p + 1) + 3;
2746 if (handler || strategy) {
2747 /* RetransTime */
2748 t->neigh_vars[3].proc_handler = handler;
2749 t->neigh_vars[3].strategy = strategy;
2750 t->neigh_vars[3].extra1 = dev;
2751 if (!strategy)
2752 t->neigh_vars[3].ctl_name = CTL_UNNUMBERED;
2753 /* ReachableTime */
2754 t->neigh_vars[4].proc_handler = handler;
2755 t->neigh_vars[4].strategy = strategy;
2756 t->neigh_vars[4].extra1 = dev;
2757 if (!strategy)
2758 t->neigh_vars[4].ctl_name = CTL_UNNUMBERED;
2759 /* RetransTime (in milliseconds)*/
2760 t->neigh_vars[12].proc_handler = handler;
2761 t->neigh_vars[12].strategy = strategy;
2762 t->neigh_vars[12].extra1 = dev;
2763 if (!strategy)
2764 t->neigh_vars[12].ctl_name = CTL_UNNUMBERED;
2765 /* ReachableTime (in milliseconds) */
2766 t->neigh_vars[13].proc_handler = handler;
2767 t->neigh_vars[13].strategy = strategy;
2768 t->neigh_vars[13].extra1 = dev;
2769 if (!strategy)
2770 t->neigh_vars[13].ctl_name = CTL_UNNUMBERED;
2773 t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2774 if (!t->dev_name)
2775 goto free;
2777 neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2778 neigh_path[NEIGH_CTL_PATH_NEIGH].ctl_name = pdev_id;
2779 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2780 neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id;
2782 t->sysctl_header =
2783 register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
2784 if (!t->sysctl_header)
2785 goto free_procname;
2787 p->sysctl_table = t;
2788 return 0;
2790 free_procname:
2791 kfree(t->dev_name);
2792 free:
2793 kfree(t);
2794 err:
2795 return -ENOBUFS;
2797 EXPORT_SYMBOL(neigh_sysctl_register);
2799 void neigh_sysctl_unregister(struct neigh_parms *p)
2801 if (p->sysctl_table) {
2802 struct neigh_sysctl_table *t = p->sysctl_table;
2803 p->sysctl_table = NULL;
2804 unregister_sysctl_table(t->sysctl_header);
2805 kfree(t->dev_name);
2806 kfree(t);
2809 EXPORT_SYMBOL(neigh_sysctl_unregister);
2811 #endif /* CONFIG_SYSCTL */
2813 static int __init neigh_init(void)
2815 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2816 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2817 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2819 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2820 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2822 return 0;
2825 subsys_initcall(neigh_init);