sfc: Don't try to set filters with search depths we know won't work
[linux-2.6/cjktty.git] / net / core / neighbour.c
blob3ffafaa0414cd6e4789a791e52f35fec59d02580
1 /*
2 * Generic address resolution entity
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/socket.h>
23 #include <linux/netdevice.h>
24 #include <linux/proc_fs.h>
25 #ifdef CONFIG_SYSCTL
26 #include <linux/sysctl.h>
27 #endif
28 #include <linux/times.h>
29 #include <net/net_namespace.h>
30 #include <net/neighbour.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <net/netevent.h>
34 #include <net/netlink.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/random.h>
37 #include <linux/string.h>
38 #include <linux/log2.h>
40 #define NEIGH_DEBUG 1
42 #define NEIGH_PRINTK(x...) printk(x)
43 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
44 #define NEIGH_PRINTK0 NEIGH_PRINTK
45 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
46 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
48 #if NEIGH_DEBUG >= 1
49 #undef NEIGH_PRINTK1
50 #define NEIGH_PRINTK1 NEIGH_PRINTK
51 #endif
52 #if NEIGH_DEBUG >= 2
53 #undef NEIGH_PRINTK2
54 #define NEIGH_PRINTK2 NEIGH_PRINTK
55 #endif
57 #define PNEIGH_HASHMASK 0xF
59 static void neigh_timer_handler(unsigned long arg);
60 static void __neigh_notify(struct neighbour *n, int type, int flags);
61 static void neigh_update_notify(struct neighbour *neigh);
62 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
64 static struct neigh_table *neigh_tables;
65 #ifdef CONFIG_PROC_FS
66 static const struct file_operations neigh_stat_seq_fops;
67 #endif
70 Neighbour hash table buckets are protected with rwlock tbl->lock.
72 - All the scans/updates to hash buckets MUST be made under this lock.
73 - NOTHING clever should be made under this lock: no callbacks
74 to protocol backends, no attempts to send something to network.
75 It will result in deadlocks, if backend/driver wants to use neighbour
76 cache.
77 - If the entry requires some non-trivial actions, increase
78 its reference count and release table lock.
80 Neighbour entries are protected:
81 - with reference count.
82 - with rwlock neigh->lock
84 Reference count prevents destruction.
86 neigh->lock mainly serializes ll address data and its validity state.
87 However, the same lock is used to protect another entry fields:
88 - timer
89 - resolution queue
91 Again, nothing clever shall be made under neigh->lock,
92 the most complicated procedure, which we allow is dev->hard_header.
93 It is supposed, that dev->hard_header is simplistic and does
94 not make callbacks to neighbour tables.
96 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
97 list of neighbour tables. This list is used only in process context,
100 static DEFINE_RWLOCK(neigh_tbl_lock);
102 static int neigh_blackhole(struct sk_buff *skb)
104 kfree_skb(skb);
105 return -ENETDOWN;
108 static void neigh_cleanup_and_release(struct neighbour *neigh)
110 if (neigh->parms->neigh_cleanup)
111 neigh->parms->neigh_cleanup(neigh);
113 __neigh_notify(neigh, RTM_DELNEIGH, 0);
114 neigh_release(neigh);
118 * It is random distribution in the interval (1/2)*base...(3/2)*base.
119 * It corresponds to default IPv6 settings and is not overridable,
120 * because it is really reasonable choice.
123 unsigned long neigh_rand_reach_time(unsigned long base)
125 return base ? (net_random() % base) + (base >> 1) : 0;
127 EXPORT_SYMBOL(neigh_rand_reach_time);
130 static int neigh_forced_gc(struct neigh_table *tbl)
132 int shrunk = 0;
133 int i;
134 struct neigh_hash_table *nht;
136 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
138 write_lock_bh(&tbl->lock);
139 nht = rcu_dereference_protected(tbl->nht,
140 lockdep_is_held(&tbl->lock));
141 for (i = 0; i <= nht->hash_mask; i++) {
142 struct neighbour *n;
143 struct neighbour __rcu **np;
145 np = &nht->hash_buckets[i];
146 while ((n = rcu_dereference_protected(*np,
147 lockdep_is_held(&tbl->lock))) != NULL) {
148 /* Neighbour record may be discarded if:
149 * - nobody refers to it.
150 * - it is not permanent
152 write_lock(&n->lock);
153 if (atomic_read(&n->refcnt) == 1 &&
154 !(n->nud_state & NUD_PERMANENT)) {
155 rcu_assign_pointer(*np,
156 rcu_dereference_protected(n->next,
157 lockdep_is_held(&tbl->lock)));
158 n->dead = 1;
159 shrunk = 1;
160 write_unlock(&n->lock);
161 neigh_cleanup_and_release(n);
162 continue;
164 write_unlock(&n->lock);
165 np = &n->next;
169 tbl->last_flush = jiffies;
171 write_unlock_bh(&tbl->lock);
173 return shrunk;
176 static void neigh_add_timer(struct neighbour *n, unsigned long when)
178 neigh_hold(n);
179 if (unlikely(mod_timer(&n->timer, when))) {
180 printk("NEIGH: BUG, double timer add, state is %x\n",
181 n->nud_state);
182 dump_stack();
186 static int neigh_del_timer(struct neighbour *n)
188 if ((n->nud_state & NUD_IN_TIMER) &&
189 del_timer(&n->timer)) {
190 neigh_release(n);
191 return 1;
193 return 0;
196 static void pneigh_queue_purge(struct sk_buff_head *list)
198 struct sk_buff *skb;
200 while ((skb = skb_dequeue(list)) != NULL) {
201 dev_put(skb->dev);
202 kfree_skb(skb);
206 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
208 int i;
209 struct neigh_hash_table *nht;
211 nht = rcu_dereference_protected(tbl->nht,
212 lockdep_is_held(&tbl->lock));
214 for (i = 0; i <= nht->hash_mask; i++) {
215 struct neighbour *n;
216 struct neighbour __rcu **np = &nht->hash_buckets[i];
218 while ((n = rcu_dereference_protected(*np,
219 lockdep_is_held(&tbl->lock))) != NULL) {
220 if (dev && n->dev != dev) {
221 np = &n->next;
222 continue;
224 rcu_assign_pointer(*np,
225 rcu_dereference_protected(n->next,
226 lockdep_is_held(&tbl->lock)));
227 write_lock(&n->lock);
228 neigh_del_timer(n);
229 n->dead = 1;
231 if (atomic_read(&n->refcnt) != 1) {
232 /* The most unpleasant situation.
233 We must destroy neighbour entry,
234 but someone still uses it.
236 The destroy will be delayed until
237 the last user releases us, but
238 we must kill timers etc. and move
239 it to safe state.
241 skb_queue_purge(&n->arp_queue);
242 n->output = neigh_blackhole;
243 if (n->nud_state & NUD_VALID)
244 n->nud_state = NUD_NOARP;
245 else
246 n->nud_state = NUD_NONE;
247 NEIGH_PRINTK2("neigh %p is stray.\n", n);
249 write_unlock(&n->lock);
250 neigh_cleanup_and_release(n);
255 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
257 write_lock_bh(&tbl->lock);
258 neigh_flush_dev(tbl, dev);
259 write_unlock_bh(&tbl->lock);
261 EXPORT_SYMBOL(neigh_changeaddr);
263 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
265 write_lock_bh(&tbl->lock);
266 neigh_flush_dev(tbl, dev);
267 pneigh_ifdown(tbl, dev);
268 write_unlock_bh(&tbl->lock);
270 del_timer_sync(&tbl->proxy_timer);
271 pneigh_queue_purge(&tbl->proxy_queue);
272 return 0;
274 EXPORT_SYMBOL(neigh_ifdown);
276 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
278 struct neighbour *n = NULL;
279 unsigned long now = jiffies;
280 int entries;
282 entries = atomic_inc_return(&tbl->entries) - 1;
283 if (entries >= tbl->gc_thresh3 ||
284 (entries >= tbl->gc_thresh2 &&
285 time_after(now, tbl->last_flush + 5 * HZ))) {
286 if (!neigh_forced_gc(tbl) &&
287 entries >= tbl->gc_thresh3)
288 goto out_entries;
291 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
292 if (!n)
293 goto out_entries;
295 skb_queue_head_init(&n->arp_queue);
296 rwlock_init(&n->lock);
297 n->updated = n->used = now;
298 n->nud_state = NUD_NONE;
299 n->output = neigh_blackhole;
300 n->parms = neigh_parms_clone(&tbl->parms);
301 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
303 NEIGH_CACHE_STAT_INC(tbl, allocs);
304 n->tbl = tbl;
305 atomic_set(&n->refcnt, 1);
306 n->dead = 1;
307 out:
308 return n;
310 out_entries:
311 atomic_dec(&tbl->entries);
312 goto out;
315 static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
317 size_t size = entries * sizeof(struct neighbour *);
318 struct neigh_hash_table *ret;
319 struct neighbour **buckets;
321 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
322 if (!ret)
323 return NULL;
324 if (size <= PAGE_SIZE)
325 buckets = kzalloc(size, GFP_ATOMIC);
326 else
327 buckets = (struct neighbour **)
328 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
329 get_order(size));
330 if (!buckets) {
331 kfree(ret);
332 return NULL;
334 rcu_assign_pointer(ret->hash_buckets, buckets);
335 ret->hash_mask = entries - 1;
336 get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
337 return ret;
340 static void neigh_hash_free_rcu(struct rcu_head *head)
342 struct neigh_hash_table *nht = container_of(head,
343 struct neigh_hash_table,
344 rcu);
345 size_t size = (nht->hash_mask + 1) * sizeof(struct neighbour *);
346 struct neighbour **buckets = nht->hash_buckets;
348 if (size <= PAGE_SIZE)
349 kfree(buckets);
350 else
351 free_pages((unsigned long)buckets, get_order(size));
352 kfree(nht);
355 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
356 unsigned long new_entries)
358 unsigned int i, hash;
359 struct neigh_hash_table *new_nht, *old_nht;
361 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
363 BUG_ON(!is_power_of_2(new_entries));
364 old_nht = rcu_dereference_protected(tbl->nht,
365 lockdep_is_held(&tbl->lock));
366 new_nht = neigh_hash_alloc(new_entries);
367 if (!new_nht)
368 return old_nht;
370 for (i = 0; i <= old_nht->hash_mask; i++) {
371 struct neighbour *n, *next;
373 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
374 lockdep_is_held(&tbl->lock));
375 n != NULL;
376 n = next) {
377 hash = tbl->hash(n->primary_key, n->dev,
378 new_nht->hash_rnd);
380 hash &= new_nht->hash_mask;
381 next = rcu_dereference_protected(n->next,
382 lockdep_is_held(&tbl->lock));
384 rcu_assign_pointer(n->next,
385 rcu_dereference_protected(
386 new_nht->hash_buckets[hash],
387 lockdep_is_held(&tbl->lock)));
388 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
392 rcu_assign_pointer(tbl->nht, new_nht);
393 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
394 return new_nht;
397 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
398 struct net_device *dev)
400 struct neighbour *n;
401 int key_len = tbl->key_len;
402 u32 hash_val;
403 struct neigh_hash_table *nht;
405 NEIGH_CACHE_STAT_INC(tbl, lookups);
407 rcu_read_lock_bh();
408 nht = rcu_dereference_bh(tbl->nht);
409 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) & nht->hash_mask;
411 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
412 n != NULL;
413 n = rcu_dereference_bh(n->next)) {
414 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
415 if (!atomic_inc_not_zero(&n->refcnt))
416 n = NULL;
417 NEIGH_CACHE_STAT_INC(tbl, hits);
418 break;
422 rcu_read_unlock_bh();
423 return n;
425 EXPORT_SYMBOL(neigh_lookup);
427 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
428 const void *pkey)
430 struct neighbour *n;
431 int key_len = tbl->key_len;
432 u32 hash_val;
433 struct neigh_hash_table *nht;
435 NEIGH_CACHE_STAT_INC(tbl, lookups);
437 rcu_read_lock_bh();
438 nht = rcu_dereference_bh(tbl->nht);
439 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) & nht->hash_mask;
441 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
442 n != NULL;
443 n = rcu_dereference_bh(n->next)) {
444 if (!memcmp(n->primary_key, pkey, key_len) &&
445 net_eq(dev_net(n->dev), net)) {
446 if (!atomic_inc_not_zero(&n->refcnt))
447 n = NULL;
448 NEIGH_CACHE_STAT_INC(tbl, hits);
449 break;
453 rcu_read_unlock_bh();
454 return n;
456 EXPORT_SYMBOL(neigh_lookup_nodev);
458 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
459 struct net_device *dev)
461 u32 hash_val;
462 int key_len = tbl->key_len;
463 int error;
464 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
465 struct neigh_hash_table *nht;
467 if (!n) {
468 rc = ERR_PTR(-ENOBUFS);
469 goto out;
472 memcpy(n->primary_key, pkey, key_len);
473 n->dev = dev;
474 dev_hold(dev);
476 /* Protocol specific setup. */
477 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
478 rc = ERR_PTR(error);
479 goto out_neigh_release;
482 /* Device specific setup. */
483 if (n->parms->neigh_setup &&
484 (error = n->parms->neigh_setup(n)) < 0) {
485 rc = ERR_PTR(error);
486 goto out_neigh_release;
489 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
491 write_lock_bh(&tbl->lock);
492 nht = rcu_dereference_protected(tbl->nht,
493 lockdep_is_held(&tbl->lock));
495 if (atomic_read(&tbl->entries) > (nht->hash_mask + 1))
496 nht = neigh_hash_grow(tbl, (nht->hash_mask + 1) << 1);
498 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) & nht->hash_mask;
500 if (n->parms->dead) {
501 rc = ERR_PTR(-EINVAL);
502 goto out_tbl_unlock;
505 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
506 lockdep_is_held(&tbl->lock));
507 n1 != NULL;
508 n1 = rcu_dereference_protected(n1->next,
509 lockdep_is_held(&tbl->lock))) {
510 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
511 neigh_hold(n1);
512 rc = n1;
513 goto out_tbl_unlock;
517 n->dead = 0;
518 neigh_hold(n);
519 rcu_assign_pointer(n->next,
520 rcu_dereference_protected(nht->hash_buckets[hash_val],
521 lockdep_is_held(&tbl->lock)));
522 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
523 write_unlock_bh(&tbl->lock);
524 NEIGH_PRINTK2("neigh %p is created.\n", n);
525 rc = n;
526 out:
527 return rc;
528 out_tbl_unlock:
529 write_unlock_bh(&tbl->lock);
530 out_neigh_release:
531 neigh_release(n);
532 goto out;
534 EXPORT_SYMBOL(neigh_create);
536 static u32 pneigh_hash(const void *pkey, int key_len)
538 u32 hash_val = *(u32 *)(pkey + key_len - 4);
539 hash_val ^= (hash_val >> 16);
540 hash_val ^= hash_val >> 8;
541 hash_val ^= hash_val >> 4;
542 hash_val &= PNEIGH_HASHMASK;
543 return hash_val;
546 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
547 struct net *net,
548 const void *pkey,
549 int key_len,
550 struct net_device *dev)
552 while (n) {
553 if (!memcmp(n->key, pkey, key_len) &&
554 net_eq(pneigh_net(n), net) &&
555 (n->dev == dev || !n->dev))
556 return n;
557 n = n->next;
559 return NULL;
562 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
563 struct net *net, const void *pkey, struct net_device *dev)
565 int key_len = tbl->key_len;
566 u32 hash_val = pneigh_hash(pkey, key_len);
568 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
569 net, pkey, key_len, dev);
571 EXPORT_SYMBOL_GPL(__pneigh_lookup);
573 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
574 struct net *net, const void *pkey,
575 struct net_device *dev, int creat)
577 struct pneigh_entry *n;
578 int key_len = tbl->key_len;
579 u32 hash_val = pneigh_hash(pkey, key_len);
581 read_lock_bh(&tbl->lock);
582 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
583 net, pkey, key_len, dev);
584 read_unlock_bh(&tbl->lock);
586 if (n || !creat)
587 goto out;
589 ASSERT_RTNL();
591 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
592 if (!n)
593 goto out;
595 write_pnet(&n->net, hold_net(net));
596 memcpy(n->key, pkey, key_len);
597 n->dev = dev;
598 if (dev)
599 dev_hold(dev);
601 if (tbl->pconstructor && tbl->pconstructor(n)) {
602 if (dev)
603 dev_put(dev);
604 release_net(net);
605 kfree(n);
606 n = NULL;
607 goto out;
610 write_lock_bh(&tbl->lock);
611 n->next = tbl->phash_buckets[hash_val];
612 tbl->phash_buckets[hash_val] = n;
613 write_unlock_bh(&tbl->lock);
614 out:
615 return n;
617 EXPORT_SYMBOL(pneigh_lookup);
620 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
621 struct net_device *dev)
623 struct pneigh_entry *n, **np;
624 int key_len = tbl->key_len;
625 u32 hash_val = pneigh_hash(pkey, key_len);
627 write_lock_bh(&tbl->lock);
628 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
629 np = &n->next) {
630 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
631 net_eq(pneigh_net(n), net)) {
632 *np = n->next;
633 write_unlock_bh(&tbl->lock);
634 if (tbl->pdestructor)
635 tbl->pdestructor(n);
636 if (n->dev)
637 dev_put(n->dev);
638 release_net(pneigh_net(n));
639 kfree(n);
640 return 0;
643 write_unlock_bh(&tbl->lock);
644 return -ENOENT;
647 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
649 struct pneigh_entry *n, **np;
650 u32 h;
652 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
653 np = &tbl->phash_buckets[h];
654 while ((n = *np) != NULL) {
655 if (!dev || n->dev == dev) {
656 *np = n->next;
657 if (tbl->pdestructor)
658 tbl->pdestructor(n);
659 if (n->dev)
660 dev_put(n->dev);
661 release_net(pneigh_net(n));
662 kfree(n);
663 continue;
665 np = &n->next;
668 return -ENOENT;
671 static void neigh_parms_destroy(struct neigh_parms *parms);
673 static inline void neigh_parms_put(struct neigh_parms *parms)
675 if (atomic_dec_and_test(&parms->refcnt))
676 neigh_parms_destroy(parms);
679 static void neigh_destroy_rcu(struct rcu_head *head)
681 struct neighbour *neigh = container_of(head, struct neighbour, rcu);
683 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
686 * neighbour must already be out of the table;
689 void neigh_destroy(struct neighbour *neigh)
691 struct hh_cache *hh;
693 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
695 if (!neigh->dead) {
696 printk(KERN_WARNING
697 "Destroying alive neighbour %p\n", neigh);
698 dump_stack();
699 return;
702 if (neigh_del_timer(neigh))
703 printk(KERN_WARNING "Impossible event.\n");
705 while ((hh = neigh->hh) != NULL) {
706 neigh->hh = hh->hh_next;
707 hh->hh_next = NULL;
709 write_seqlock_bh(&hh->hh_lock);
710 hh->hh_output = neigh_blackhole;
711 write_sequnlock_bh(&hh->hh_lock);
712 if (atomic_dec_and_test(&hh->hh_refcnt))
713 kfree(hh);
716 skb_queue_purge(&neigh->arp_queue);
718 dev_put(neigh->dev);
719 neigh_parms_put(neigh->parms);
721 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
723 atomic_dec(&neigh->tbl->entries);
724 call_rcu(&neigh->rcu, neigh_destroy_rcu);
726 EXPORT_SYMBOL(neigh_destroy);
728 /* Neighbour state is suspicious;
729 disable fast path.
731 Called with write_locked neigh.
733 static void neigh_suspect(struct neighbour *neigh)
735 struct hh_cache *hh;
737 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
739 neigh->output = neigh->ops->output;
741 for (hh = neigh->hh; hh; hh = hh->hh_next)
742 hh->hh_output = neigh->ops->output;
745 /* Neighbour state is OK;
746 enable fast path.
748 Called with write_locked neigh.
750 static void neigh_connect(struct neighbour *neigh)
752 struct hh_cache *hh;
754 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
756 neigh->output = neigh->ops->connected_output;
758 for (hh = neigh->hh; hh; hh = hh->hh_next)
759 hh->hh_output = neigh->ops->hh_output;
762 static void neigh_periodic_work(struct work_struct *work)
764 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
765 struct neighbour *n;
766 struct neighbour __rcu **np;
767 unsigned int i;
768 struct neigh_hash_table *nht;
770 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
772 write_lock_bh(&tbl->lock);
773 nht = rcu_dereference_protected(tbl->nht,
774 lockdep_is_held(&tbl->lock));
777 * periodically recompute ReachableTime from random function
780 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
781 struct neigh_parms *p;
782 tbl->last_rand = jiffies;
783 for (p = &tbl->parms; p; p = p->next)
784 p->reachable_time =
785 neigh_rand_reach_time(p->base_reachable_time);
788 for (i = 0 ; i <= nht->hash_mask; i++) {
789 np = &nht->hash_buckets[i];
791 while ((n = rcu_dereference_protected(*np,
792 lockdep_is_held(&tbl->lock))) != NULL) {
793 unsigned int state;
795 write_lock(&n->lock);
797 state = n->nud_state;
798 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
799 write_unlock(&n->lock);
800 goto next_elt;
803 if (time_before(n->used, n->confirmed))
804 n->used = n->confirmed;
806 if (atomic_read(&n->refcnt) == 1 &&
807 (state == NUD_FAILED ||
808 time_after(jiffies, n->used + n->parms->gc_staletime))) {
809 *np = n->next;
810 n->dead = 1;
811 write_unlock(&n->lock);
812 neigh_cleanup_and_release(n);
813 continue;
815 write_unlock(&n->lock);
817 next_elt:
818 np = &n->next;
821 * It's fine to release lock here, even if hash table
822 * grows while we are preempted.
824 write_unlock_bh(&tbl->lock);
825 cond_resched();
826 write_lock_bh(&tbl->lock);
828 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
829 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
830 * base_reachable_time.
832 schedule_delayed_work(&tbl->gc_work,
833 tbl->parms.base_reachable_time >> 1);
834 write_unlock_bh(&tbl->lock);
837 static __inline__ int neigh_max_probes(struct neighbour *n)
839 struct neigh_parms *p = n->parms;
840 return (n->nud_state & NUD_PROBE) ?
841 p->ucast_probes :
842 p->ucast_probes + p->app_probes + p->mcast_probes;
845 static void neigh_invalidate(struct neighbour *neigh)
846 __releases(neigh->lock)
847 __acquires(neigh->lock)
849 struct sk_buff *skb;
851 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
852 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
853 neigh->updated = jiffies;
855 /* It is very thin place. report_unreachable is very complicated
856 routine. Particularly, it can hit the same neighbour entry!
858 So that, we try to be accurate and avoid dead loop. --ANK
860 while (neigh->nud_state == NUD_FAILED &&
861 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
862 write_unlock(&neigh->lock);
863 neigh->ops->error_report(neigh, skb);
864 write_lock(&neigh->lock);
866 skb_queue_purge(&neigh->arp_queue);
869 /* Called when a timer expires for a neighbour entry. */
871 static void neigh_timer_handler(unsigned long arg)
873 unsigned long now, next;
874 struct neighbour *neigh = (struct neighbour *)arg;
875 unsigned state;
876 int notify = 0;
878 write_lock(&neigh->lock);
880 state = neigh->nud_state;
881 now = jiffies;
882 next = now + HZ;
884 if (!(state & NUD_IN_TIMER)) {
885 #ifndef CONFIG_SMP
886 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
887 #endif
888 goto out;
891 if (state & NUD_REACHABLE) {
892 if (time_before_eq(now,
893 neigh->confirmed + neigh->parms->reachable_time)) {
894 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
895 next = neigh->confirmed + neigh->parms->reachable_time;
896 } else if (time_before_eq(now,
897 neigh->used + neigh->parms->delay_probe_time)) {
898 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
899 neigh->nud_state = NUD_DELAY;
900 neigh->updated = jiffies;
901 neigh_suspect(neigh);
902 next = now + neigh->parms->delay_probe_time;
903 } else {
904 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
905 neigh->nud_state = NUD_STALE;
906 neigh->updated = jiffies;
907 neigh_suspect(neigh);
908 notify = 1;
910 } else if (state & NUD_DELAY) {
911 if (time_before_eq(now,
912 neigh->confirmed + neigh->parms->delay_probe_time)) {
913 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
914 neigh->nud_state = NUD_REACHABLE;
915 neigh->updated = jiffies;
916 neigh_connect(neigh);
917 notify = 1;
918 next = neigh->confirmed + neigh->parms->reachable_time;
919 } else {
920 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
921 neigh->nud_state = NUD_PROBE;
922 neigh->updated = jiffies;
923 atomic_set(&neigh->probes, 0);
924 next = now + neigh->parms->retrans_time;
926 } else {
927 /* NUD_PROBE|NUD_INCOMPLETE */
928 next = now + neigh->parms->retrans_time;
931 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
932 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
933 neigh->nud_state = NUD_FAILED;
934 notify = 1;
935 neigh_invalidate(neigh);
938 if (neigh->nud_state & NUD_IN_TIMER) {
939 if (time_before(next, jiffies + HZ/2))
940 next = jiffies + HZ/2;
941 if (!mod_timer(&neigh->timer, next))
942 neigh_hold(neigh);
944 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
945 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
946 /* keep skb alive even if arp_queue overflows */
947 if (skb)
948 skb = skb_copy(skb, GFP_ATOMIC);
949 write_unlock(&neigh->lock);
950 neigh->ops->solicit(neigh, skb);
951 atomic_inc(&neigh->probes);
952 kfree_skb(skb);
953 } else {
954 out:
955 write_unlock(&neigh->lock);
958 if (notify)
959 neigh_update_notify(neigh);
961 neigh_release(neigh);
964 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
966 int rc;
967 unsigned long now;
969 write_lock_bh(&neigh->lock);
971 rc = 0;
972 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
973 goto out_unlock_bh;
975 now = jiffies;
977 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
978 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
979 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
980 neigh->nud_state = NUD_INCOMPLETE;
981 neigh->updated = jiffies;
982 neigh_add_timer(neigh, now + 1);
983 } else {
984 neigh->nud_state = NUD_FAILED;
985 neigh->updated = jiffies;
986 write_unlock_bh(&neigh->lock);
988 kfree_skb(skb);
989 return 1;
991 } else if (neigh->nud_state & NUD_STALE) {
992 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
993 neigh->nud_state = NUD_DELAY;
994 neigh->updated = jiffies;
995 neigh_add_timer(neigh,
996 jiffies + neigh->parms->delay_probe_time);
999 if (neigh->nud_state == NUD_INCOMPLETE) {
1000 if (skb) {
1001 if (skb_queue_len(&neigh->arp_queue) >=
1002 neigh->parms->queue_len) {
1003 struct sk_buff *buff;
1004 buff = __skb_dequeue(&neigh->arp_queue);
1005 kfree_skb(buff);
1006 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1008 skb_dst_force(skb);
1009 __skb_queue_tail(&neigh->arp_queue, skb);
1011 rc = 1;
1013 out_unlock_bh:
1014 write_unlock_bh(&neigh->lock);
1015 return rc;
1017 EXPORT_SYMBOL(__neigh_event_send);
1019 static void neigh_update_hhs(struct neighbour *neigh)
1021 struct hh_cache *hh;
1022 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1023 = NULL;
1025 if (neigh->dev->header_ops)
1026 update = neigh->dev->header_ops->cache_update;
1028 if (update) {
1029 for (hh = neigh->hh; hh; hh = hh->hh_next) {
1030 write_seqlock_bh(&hh->hh_lock);
1031 update(hh, neigh->dev, neigh->ha);
1032 write_sequnlock_bh(&hh->hh_lock);
1039 /* Generic update routine.
1040 -- lladdr is new lladdr or NULL, if it is not supplied.
1041 -- new is new state.
1042 -- flags
1043 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1044 if it is different.
1045 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1046 lladdr instead of overriding it
1047 if it is different.
1048 It also allows to retain current state
1049 if lladdr is unchanged.
1050 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1052 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1053 NTF_ROUTER flag.
1054 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1055 a router.
1057 Caller MUST hold reference count on the entry.
1060 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1061 u32 flags)
1063 u8 old;
1064 int err;
1065 int notify = 0;
1066 struct net_device *dev;
1067 int update_isrouter = 0;
1069 write_lock_bh(&neigh->lock);
1071 dev = neigh->dev;
1072 old = neigh->nud_state;
1073 err = -EPERM;
1075 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1076 (old & (NUD_NOARP | NUD_PERMANENT)))
1077 goto out;
1079 if (!(new & NUD_VALID)) {
1080 neigh_del_timer(neigh);
1081 if (old & NUD_CONNECTED)
1082 neigh_suspect(neigh);
1083 neigh->nud_state = new;
1084 err = 0;
1085 notify = old & NUD_VALID;
1086 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1087 (new & NUD_FAILED)) {
1088 neigh_invalidate(neigh);
1089 notify = 1;
1091 goto out;
1094 /* Compare new lladdr with cached one */
1095 if (!dev->addr_len) {
1096 /* First case: device needs no address. */
1097 lladdr = neigh->ha;
1098 } else if (lladdr) {
1099 /* The second case: if something is already cached
1100 and a new address is proposed:
1101 - compare new & old
1102 - if they are different, check override flag
1104 if ((old & NUD_VALID) &&
1105 !memcmp(lladdr, neigh->ha, dev->addr_len))
1106 lladdr = neigh->ha;
1107 } else {
1108 /* No address is supplied; if we know something,
1109 use it, otherwise discard the request.
1111 err = -EINVAL;
1112 if (!(old & NUD_VALID))
1113 goto out;
1114 lladdr = neigh->ha;
1117 if (new & NUD_CONNECTED)
1118 neigh->confirmed = jiffies;
1119 neigh->updated = jiffies;
1121 /* If entry was valid and address is not changed,
1122 do not change entry state, if new one is STALE.
1124 err = 0;
1125 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1126 if (old & NUD_VALID) {
1127 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1128 update_isrouter = 0;
1129 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1130 (old & NUD_CONNECTED)) {
1131 lladdr = neigh->ha;
1132 new = NUD_STALE;
1133 } else
1134 goto out;
1135 } else {
1136 if (lladdr == neigh->ha && new == NUD_STALE &&
1137 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1138 (old & NUD_CONNECTED))
1140 new = old;
1144 if (new != old) {
1145 neigh_del_timer(neigh);
1146 if (new & NUD_IN_TIMER)
1147 neigh_add_timer(neigh, (jiffies +
1148 ((new & NUD_REACHABLE) ?
1149 neigh->parms->reachable_time :
1150 0)));
1151 neigh->nud_state = new;
1154 if (lladdr != neigh->ha) {
1155 memcpy(&neigh->ha, lladdr, dev->addr_len);
1156 neigh_update_hhs(neigh);
1157 if (!(new & NUD_CONNECTED))
1158 neigh->confirmed = jiffies -
1159 (neigh->parms->base_reachable_time << 1);
1160 notify = 1;
1162 if (new == old)
1163 goto out;
1164 if (new & NUD_CONNECTED)
1165 neigh_connect(neigh);
1166 else
1167 neigh_suspect(neigh);
1168 if (!(old & NUD_VALID)) {
1169 struct sk_buff *skb;
1171 /* Again: avoid dead loop if something went wrong */
1173 while (neigh->nud_state & NUD_VALID &&
1174 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1175 struct neighbour *n1 = neigh;
1176 write_unlock_bh(&neigh->lock);
1177 /* On shaper/eql skb->dst->neighbour != neigh :( */
1178 if (skb_dst(skb) && skb_dst(skb)->neighbour)
1179 n1 = skb_dst(skb)->neighbour;
1180 n1->output(skb);
1181 write_lock_bh(&neigh->lock);
1183 skb_queue_purge(&neigh->arp_queue);
1185 out:
1186 if (update_isrouter) {
1187 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1188 (neigh->flags | NTF_ROUTER) :
1189 (neigh->flags & ~NTF_ROUTER);
1191 write_unlock_bh(&neigh->lock);
1193 if (notify)
1194 neigh_update_notify(neigh);
1196 return err;
1198 EXPORT_SYMBOL(neigh_update);
1200 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1201 u8 *lladdr, void *saddr,
1202 struct net_device *dev)
1204 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1205 lladdr || !dev->addr_len);
1206 if (neigh)
1207 neigh_update(neigh, lladdr, NUD_STALE,
1208 NEIGH_UPDATE_F_OVERRIDE);
1209 return neigh;
1211 EXPORT_SYMBOL(neigh_event_ns);
1213 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1214 __be16 protocol)
1216 struct hh_cache *hh;
1217 struct net_device *dev = dst->dev;
1219 for (hh = n->hh; hh; hh = hh->hh_next)
1220 if (hh->hh_type == protocol)
1221 break;
1223 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1224 seqlock_init(&hh->hh_lock);
1225 hh->hh_type = protocol;
1226 atomic_set(&hh->hh_refcnt, 0);
1227 hh->hh_next = NULL;
1229 if (dev->header_ops->cache(n, hh)) {
1230 kfree(hh);
1231 hh = NULL;
1232 } else {
1233 atomic_inc(&hh->hh_refcnt);
1234 hh->hh_next = n->hh;
1235 n->hh = hh;
1236 if (n->nud_state & NUD_CONNECTED)
1237 hh->hh_output = n->ops->hh_output;
1238 else
1239 hh->hh_output = n->ops->output;
1242 if (hh) {
1243 atomic_inc(&hh->hh_refcnt);
1244 dst->hh = hh;
1248 /* This function can be used in contexts, where only old dev_queue_xmit
1249 * worked, f.e. if you want to override normal output path (eql, shaper),
1250 * but resolution is not made yet.
1253 int neigh_compat_output(struct sk_buff *skb)
1255 struct net_device *dev = skb->dev;
1257 __skb_pull(skb, skb_network_offset(skb));
1259 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1260 skb->len) < 0 &&
1261 dev->header_ops->rebuild(skb))
1262 return 0;
1264 return dev_queue_xmit(skb);
1266 EXPORT_SYMBOL(neigh_compat_output);
1268 /* Slow and careful. */
1270 int neigh_resolve_output(struct sk_buff *skb)
1272 struct dst_entry *dst = skb_dst(skb);
1273 struct neighbour *neigh;
1274 int rc = 0;
1276 if (!dst || !(neigh = dst->neighbour))
1277 goto discard;
1279 __skb_pull(skb, skb_network_offset(skb));
1281 if (!neigh_event_send(neigh, skb)) {
1282 int err;
1283 struct net_device *dev = neigh->dev;
1284 if (dev->header_ops->cache &&
1285 !dst->hh &&
1286 !(dst->flags & DST_NOCACHE)) {
1287 write_lock_bh(&neigh->lock);
1288 if (!dst->hh)
1289 neigh_hh_init(neigh, dst, dst->ops->protocol);
1290 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1291 neigh->ha, NULL, skb->len);
1292 write_unlock_bh(&neigh->lock);
1293 } else {
1294 read_lock_bh(&neigh->lock);
1295 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1296 neigh->ha, NULL, skb->len);
1297 read_unlock_bh(&neigh->lock);
1299 if (err >= 0)
1300 rc = neigh->ops->queue_xmit(skb);
1301 else
1302 goto out_kfree_skb;
1304 out:
1305 return rc;
1306 discard:
1307 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1308 dst, dst ? dst->neighbour : NULL);
1309 out_kfree_skb:
1310 rc = -EINVAL;
1311 kfree_skb(skb);
1312 goto out;
1314 EXPORT_SYMBOL(neigh_resolve_output);
1316 /* As fast as possible without hh cache */
1318 int neigh_connected_output(struct sk_buff *skb)
1320 int err;
1321 struct dst_entry *dst = skb_dst(skb);
1322 struct neighbour *neigh = dst->neighbour;
1323 struct net_device *dev = neigh->dev;
1325 __skb_pull(skb, skb_network_offset(skb));
1327 read_lock_bh(&neigh->lock);
1328 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1329 neigh->ha, NULL, skb->len);
1330 read_unlock_bh(&neigh->lock);
1331 if (err >= 0)
1332 err = neigh->ops->queue_xmit(skb);
1333 else {
1334 err = -EINVAL;
1335 kfree_skb(skb);
1337 return err;
1339 EXPORT_SYMBOL(neigh_connected_output);
1341 static void neigh_proxy_process(unsigned long arg)
1343 struct neigh_table *tbl = (struct neigh_table *)arg;
1344 long sched_next = 0;
1345 unsigned long now = jiffies;
1346 struct sk_buff *skb, *n;
1348 spin_lock(&tbl->proxy_queue.lock);
1350 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1351 long tdif = NEIGH_CB(skb)->sched_next - now;
1353 if (tdif <= 0) {
1354 struct net_device *dev = skb->dev;
1355 __skb_unlink(skb, &tbl->proxy_queue);
1356 if (tbl->proxy_redo && netif_running(dev))
1357 tbl->proxy_redo(skb);
1358 else
1359 kfree_skb(skb);
1361 dev_put(dev);
1362 } else if (!sched_next || tdif < sched_next)
1363 sched_next = tdif;
1365 del_timer(&tbl->proxy_timer);
1366 if (sched_next)
1367 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1368 spin_unlock(&tbl->proxy_queue.lock);
1371 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1372 struct sk_buff *skb)
1374 unsigned long now = jiffies;
1375 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1377 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1378 kfree_skb(skb);
1379 return;
1382 NEIGH_CB(skb)->sched_next = sched_next;
1383 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1385 spin_lock(&tbl->proxy_queue.lock);
1386 if (del_timer(&tbl->proxy_timer)) {
1387 if (time_before(tbl->proxy_timer.expires, sched_next))
1388 sched_next = tbl->proxy_timer.expires;
1390 skb_dst_drop(skb);
1391 dev_hold(skb->dev);
1392 __skb_queue_tail(&tbl->proxy_queue, skb);
1393 mod_timer(&tbl->proxy_timer, sched_next);
1394 spin_unlock(&tbl->proxy_queue.lock);
1396 EXPORT_SYMBOL(pneigh_enqueue);
1398 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1399 struct net *net, int ifindex)
1401 struct neigh_parms *p;
1403 for (p = &tbl->parms; p; p = p->next) {
1404 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1405 (!p->dev && !ifindex))
1406 return p;
1409 return NULL;
1412 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1413 struct neigh_table *tbl)
1415 struct neigh_parms *p, *ref;
1416 struct net *net = dev_net(dev);
1417 const struct net_device_ops *ops = dev->netdev_ops;
1419 ref = lookup_neigh_parms(tbl, net, 0);
1420 if (!ref)
1421 return NULL;
1423 p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1424 if (p) {
1425 p->tbl = tbl;
1426 atomic_set(&p->refcnt, 1);
1427 p->reachable_time =
1428 neigh_rand_reach_time(p->base_reachable_time);
1430 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1431 kfree(p);
1432 return NULL;
1435 dev_hold(dev);
1436 p->dev = dev;
1437 write_pnet(&p->net, hold_net(net));
1438 p->sysctl_table = NULL;
1439 write_lock_bh(&tbl->lock);
1440 p->next = tbl->parms.next;
1441 tbl->parms.next = p;
1442 write_unlock_bh(&tbl->lock);
1444 return p;
1446 EXPORT_SYMBOL(neigh_parms_alloc);
1448 static void neigh_rcu_free_parms(struct rcu_head *head)
1450 struct neigh_parms *parms =
1451 container_of(head, struct neigh_parms, rcu_head);
1453 neigh_parms_put(parms);
1456 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1458 struct neigh_parms **p;
1460 if (!parms || parms == &tbl->parms)
1461 return;
1462 write_lock_bh(&tbl->lock);
1463 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1464 if (*p == parms) {
1465 *p = parms->next;
1466 parms->dead = 1;
1467 write_unlock_bh(&tbl->lock);
1468 if (parms->dev)
1469 dev_put(parms->dev);
1470 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1471 return;
1474 write_unlock_bh(&tbl->lock);
1475 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1477 EXPORT_SYMBOL(neigh_parms_release);
1479 static void neigh_parms_destroy(struct neigh_parms *parms)
1481 release_net(neigh_parms_net(parms));
1482 kfree(parms);
1485 static struct lock_class_key neigh_table_proxy_queue_class;
1487 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1489 unsigned long now = jiffies;
1490 unsigned long phsize;
1492 write_pnet(&tbl->parms.net, &init_net);
1493 atomic_set(&tbl->parms.refcnt, 1);
1494 tbl->parms.reachable_time =
1495 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1497 if (!tbl->kmem_cachep)
1498 tbl->kmem_cachep =
1499 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1500 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1501 NULL);
1502 tbl->stats = alloc_percpu(struct neigh_statistics);
1503 if (!tbl->stats)
1504 panic("cannot create neighbour cache statistics");
1506 #ifdef CONFIG_PROC_FS
1507 if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1508 &neigh_stat_seq_fops, tbl))
1509 panic("cannot create neighbour proc dir entry");
1510 #endif
1512 tbl->nht = neigh_hash_alloc(8);
1514 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1515 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1517 if (!tbl->nht || !tbl->phash_buckets)
1518 panic("cannot allocate neighbour cache hashes");
1520 rwlock_init(&tbl->lock);
1521 INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
1522 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
1523 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1524 skb_queue_head_init_class(&tbl->proxy_queue,
1525 &neigh_table_proxy_queue_class);
1527 tbl->last_flush = now;
1528 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1530 EXPORT_SYMBOL(neigh_table_init_no_netlink);
1532 void neigh_table_init(struct neigh_table *tbl)
1534 struct neigh_table *tmp;
1536 neigh_table_init_no_netlink(tbl);
1537 write_lock(&neigh_tbl_lock);
1538 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1539 if (tmp->family == tbl->family)
1540 break;
1542 tbl->next = neigh_tables;
1543 neigh_tables = tbl;
1544 write_unlock(&neigh_tbl_lock);
1546 if (unlikely(tmp)) {
1547 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1548 "family %d\n", tbl->family);
1549 dump_stack();
1552 EXPORT_SYMBOL(neigh_table_init);
1554 int neigh_table_clear(struct neigh_table *tbl)
1556 struct neigh_table **tp;
1558 /* It is not clean... Fix it to unload IPv6 module safely */
1559 cancel_delayed_work(&tbl->gc_work);
1560 flush_scheduled_work();
1561 del_timer_sync(&tbl->proxy_timer);
1562 pneigh_queue_purge(&tbl->proxy_queue);
1563 neigh_ifdown(tbl, NULL);
1564 if (atomic_read(&tbl->entries))
1565 printk(KERN_CRIT "neighbour leakage\n");
1566 write_lock(&neigh_tbl_lock);
1567 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1568 if (*tp == tbl) {
1569 *tp = tbl->next;
1570 break;
1573 write_unlock(&neigh_tbl_lock);
1575 call_rcu(&tbl->nht->rcu, neigh_hash_free_rcu);
1576 tbl->nht = NULL;
1578 kfree(tbl->phash_buckets);
1579 tbl->phash_buckets = NULL;
1581 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1583 free_percpu(tbl->stats);
1584 tbl->stats = NULL;
1586 kmem_cache_destroy(tbl->kmem_cachep);
1587 tbl->kmem_cachep = NULL;
1589 return 0;
1591 EXPORT_SYMBOL(neigh_table_clear);
1593 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1595 struct net *net = sock_net(skb->sk);
1596 struct ndmsg *ndm;
1597 struct nlattr *dst_attr;
1598 struct neigh_table *tbl;
1599 struct net_device *dev = NULL;
1600 int err = -EINVAL;
1602 ASSERT_RTNL();
1603 if (nlmsg_len(nlh) < sizeof(*ndm))
1604 goto out;
1606 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1607 if (dst_attr == NULL)
1608 goto out;
1610 ndm = nlmsg_data(nlh);
1611 if (ndm->ndm_ifindex) {
1612 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1613 if (dev == NULL) {
1614 err = -ENODEV;
1615 goto out;
1619 read_lock(&neigh_tbl_lock);
1620 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1621 struct neighbour *neigh;
1623 if (tbl->family != ndm->ndm_family)
1624 continue;
1625 read_unlock(&neigh_tbl_lock);
1627 if (nla_len(dst_attr) < tbl->key_len)
1628 goto out;
1630 if (ndm->ndm_flags & NTF_PROXY) {
1631 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1632 goto out;
1635 if (dev == NULL)
1636 goto out;
1638 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1639 if (neigh == NULL) {
1640 err = -ENOENT;
1641 goto out;
1644 err = neigh_update(neigh, NULL, NUD_FAILED,
1645 NEIGH_UPDATE_F_OVERRIDE |
1646 NEIGH_UPDATE_F_ADMIN);
1647 neigh_release(neigh);
1648 goto out;
1650 read_unlock(&neigh_tbl_lock);
1651 err = -EAFNOSUPPORT;
1653 out:
1654 return err;
1657 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1659 struct net *net = sock_net(skb->sk);
1660 struct ndmsg *ndm;
1661 struct nlattr *tb[NDA_MAX+1];
1662 struct neigh_table *tbl;
1663 struct net_device *dev = NULL;
1664 int err;
1666 ASSERT_RTNL();
1667 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1668 if (err < 0)
1669 goto out;
1671 err = -EINVAL;
1672 if (tb[NDA_DST] == NULL)
1673 goto out;
1675 ndm = nlmsg_data(nlh);
1676 if (ndm->ndm_ifindex) {
1677 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1678 if (dev == NULL) {
1679 err = -ENODEV;
1680 goto out;
1683 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1684 goto out;
1687 read_lock(&neigh_tbl_lock);
1688 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1689 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1690 struct neighbour *neigh;
1691 void *dst, *lladdr;
1693 if (tbl->family != ndm->ndm_family)
1694 continue;
1695 read_unlock(&neigh_tbl_lock);
1697 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1698 goto out;
1699 dst = nla_data(tb[NDA_DST]);
1700 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1702 if (ndm->ndm_flags & NTF_PROXY) {
1703 struct pneigh_entry *pn;
1705 err = -ENOBUFS;
1706 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1707 if (pn) {
1708 pn->flags = ndm->ndm_flags;
1709 err = 0;
1711 goto out;
1714 if (dev == NULL)
1715 goto out;
1717 neigh = neigh_lookup(tbl, dst, dev);
1718 if (neigh == NULL) {
1719 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1720 err = -ENOENT;
1721 goto out;
1724 neigh = __neigh_lookup_errno(tbl, dst, dev);
1725 if (IS_ERR(neigh)) {
1726 err = PTR_ERR(neigh);
1727 goto out;
1729 } else {
1730 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1731 err = -EEXIST;
1732 neigh_release(neigh);
1733 goto out;
1736 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1737 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1740 if (ndm->ndm_flags & NTF_USE) {
1741 neigh_event_send(neigh, NULL);
1742 err = 0;
1743 } else
1744 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1745 neigh_release(neigh);
1746 goto out;
1749 read_unlock(&neigh_tbl_lock);
1750 err = -EAFNOSUPPORT;
1751 out:
1752 return err;
1755 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1757 struct nlattr *nest;
1759 nest = nla_nest_start(skb, NDTA_PARMS);
1760 if (nest == NULL)
1761 return -ENOBUFS;
1763 if (parms->dev)
1764 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1766 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1767 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1768 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1769 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1770 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1771 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1772 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1773 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1774 parms->base_reachable_time);
1775 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1776 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1777 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1778 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1779 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1780 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1782 return nla_nest_end(skb, nest);
1784 nla_put_failure:
1785 nla_nest_cancel(skb, nest);
1786 return -EMSGSIZE;
1789 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1790 u32 pid, u32 seq, int type, int flags)
1792 struct nlmsghdr *nlh;
1793 struct ndtmsg *ndtmsg;
1795 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1796 if (nlh == NULL)
1797 return -EMSGSIZE;
1799 ndtmsg = nlmsg_data(nlh);
1801 read_lock_bh(&tbl->lock);
1802 ndtmsg->ndtm_family = tbl->family;
1803 ndtmsg->ndtm_pad1 = 0;
1804 ndtmsg->ndtm_pad2 = 0;
1806 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1807 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1808 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1809 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1810 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1813 unsigned long now = jiffies;
1814 unsigned int flush_delta = now - tbl->last_flush;
1815 unsigned int rand_delta = now - tbl->last_rand;
1816 struct neigh_hash_table *nht;
1817 struct ndt_config ndc = {
1818 .ndtc_key_len = tbl->key_len,
1819 .ndtc_entry_size = tbl->entry_size,
1820 .ndtc_entries = atomic_read(&tbl->entries),
1821 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1822 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1823 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1826 rcu_read_lock_bh();
1827 nht = rcu_dereference_bh(tbl->nht);
1828 ndc.ndtc_hash_rnd = nht->hash_rnd;
1829 ndc.ndtc_hash_mask = nht->hash_mask;
1830 rcu_read_unlock_bh();
1832 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1836 int cpu;
1837 struct ndt_stats ndst;
1839 memset(&ndst, 0, sizeof(ndst));
1841 for_each_possible_cpu(cpu) {
1842 struct neigh_statistics *st;
1844 st = per_cpu_ptr(tbl->stats, cpu);
1845 ndst.ndts_allocs += st->allocs;
1846 ndst.ndts_destroys += st->destroys;
1847 ndst.ndts_hash_grows += st->hash_grows;
1848 ndst.ndts_res_failed += st->res_failed;
1849 ndst.ndts_lookups += st->lookups;
1850 ndst.ndts_hits += st->hits;
1851 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1852 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1853 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1854 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1857 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1860 BUG_ON(tbl->parms.dev);
1861 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1862 goto nla_put_failure;
1864 read_unlock_bh(&tbl->lock);
1865 return nlmsg_end(skb, nlh);
1867 nla_put_failure:
1868 read_unlock_bh(&tbl->lock);
1869 nlmsg_cancel(skb, nlh);
1870 return -EMSGSIZE;
1873 static int neightbl_fill_param_info(struct sk_buff *skb,
1874 struct neigh_table *tbl,
1875 struct neigh_parms *parms,
1876 u32 pid, u32 seq, int type,
1877 unsigned int flags)
1879 struct ndtmsg *ndtmsg;
1880 struct nlmsghdr *nlh;
1882 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1883 if (nlh == NULL)
1884 return -EMSGSIZE;
1886 ndtmsg = nlmsg_data(nlh);
1888 read_lock_bh(&tbl->lock);
1889 ndtmsg->ndtm_family = tbl->family;
1890 ndtmsg->ndtm_pad1 = 0;
1891 ndtmsg->ndtm_pad2 = 0;
1893 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1894 neightbl_fill_parms(skb, parms) < 0)
1895 goto errout;
1897 read_unlock_bh(&tbl->lock);
1898 return nlmsg_end(skb, nlh);
1899 errout:
1900 read_unlock_bh(&tbl->lock);
1901 nlmsg_cancel(skb, nlh);
1902 return -EMSGSIZE;
1905 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1906 [NDTA_NAME] = { .type = NLA_STRING },
1907 [NDTA_THRESH1] = { .type = NLA_U32 },
1908 [NDTA_THRESH2] = { .type = NLA_U32 },
1909 [NDTA_THRESH3] = { .type = NLA_U32 },
1910 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1911 [NDTA_PARMS] = { .type = NLA_NESTED },
1914 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1915 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1916 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1917 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1918 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1919 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1920 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1921 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1922 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1923 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1924 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1925 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1926 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1927 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1930 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1932 struct net *net = sock_net(skb->sk);
1933 struct neigh_table *tbl;
1934 struct ndtmsg *ndtmsg;
1935 struct nlattr *tb[NDTA_MAX+1];
1936 int err;
1938 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1939 nl_neightbl_policy);
1940 if (err < 0)
1941 goto errout;
1943 if (tb[NDTA_NAME] == NULL) {
1944 err = -EINVAL;
1945 goto errout;
1948 ndtmsg = nlmsg_data(nlh);
1949 read_lock(&neigh_tbl_lock);
1950 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1951 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1952 continue;
1954 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1955 break;
1958 if (tbl == NULL) {
1959 err = -ENOENT;
1960 goto errout_locked;
1964 * We acquire tbl->lock to be nice to the periodic timers and
1965 * make sure they always see a consistent set of values.
1967 write_lock_bh(&tbl->lock);
1969 if (tb[NDTA_PARMS]) {
1970 struct nlattr *tbp[NDTPA_MAX+1];
1971 struct neigh_parms *p;
1972 int i, ifindex = 0;
1974 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1975 nl_ntbl_parm_policy);
1976 if (err < 0)
1977 goto errout_tbl_lock;
1979 if (tbp[NDTPA_IFINDEX])
1980 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1982 p = lookup_neigh_parms(tbl, net, ifindex);
1983 if (p == NULL) {
1984 err = -ENOENT;
1985 goto errout_tbl_lock;
1988 for (i = 1; i <= NDTPA_MAX; i++) {
1989 if (tbp[i] == NULL)
1990 continue;
1992 switch (i) {
1993 case NDTPA_QUEUE_LEN:
1994 p->queue_len = nla_get_u32(tbp[i]);
1995 break;
1996 case NDTPA_PROXY_QLEN:
1997 p->proxy_qlen = nla_get_u32(tbp[i]);
1998 break;
1999 case NDTPA_APP_PROBES:
2000 p->app_probes = nla_get_u32(tbp[i]);
2001 break;
2002 case NDTPA_UCAST_PROBES:
2003 p->ucast_probes = nla_get_u32(tbp[i]);
2004 break;
2005 case NDTPA_MCAST_PROBES:
2006 p->mcast_probes = nla_get_u32(tbp[i]);
2007 break;
2008 case NDTPA_BASE_REACHABLE_TIME:
2009 p->base_reachable_time = nla_get_msecs(tbp[i]);
2010 break;
2011 case NDTPA_GC_STALETIME:
2012 p->gc_staletime = nla_get_msecs(tbp[i]);
2013 break;
2014 case NDTPA_DELAY_PROBE_TIME:
2015 p->delay_probe_time = nla_get_msecs(tbp[i]);
2016 break;
2017 case NDTPA_RETRANS_TIME:
2018 p->retrans_time = nla_get_msecs(tbp[i]);
2019 break;
2020 case NDTPA_ANYCAST_DELAY:
2021 p->anycast_delay = nla_get_msecs(tbp[i]);
2022 break;
2023 case NDTPA_PROXY_DELAY:
2024 p->proxy_delay = nla_get_msecs(tbp[i]);
2025 break;
2026 case NDTPA_LOCKTIME:
2027 p->locktime = nla_get_msecs(tbp[i]);
2028 break;
2033 if (tb[NDTA_THRESH1])
2034 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2036 if (tb[NDTA_THRESH2])
2037 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2039 if (tb[NDTA_THRESH3])
2040 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2042 if (tb[NDTA_GC_INTERVAL])
2043 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2045 err = 0;
2047 errout_tbl_lock:
2048 write_unlock_bh(&tbl->lock);
2049 errout_locked:
2050 read_unlock(&neigh_tbl_lock);
2051 errout:
2052 return err;
2055 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2057 struct net *net = sock_net(skb->sk);
2058 int family, tidx, nidx = 0;
2059 int tbl_skip = cb->args[0];
2060 int neigh_skip = cb->args[1];
2061 struct neigh_table *tbl;
2063 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2065 read_lock(&neigh_tbl_lock);
2066 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
2067 struct neigh_parms *p;
2069 if (tidx < tbl_skip || (family && tbl->family != family))
2070 continue;
2072 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
2073 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2074 NLM_F_MULTI) <= 0)
2075 break;
2077 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2078 if (!net_eq(neigh_parms_net(p), net))
2079 continue;
2081 if (nidx < neigh_skip)
2082 goto next;
2084 if (neightbl_fill_param_info(skb, tbl, p,
2085 NETLINK_CB(cb->skb).pid,
2086 cb->nlh->nlmsg_seq,
2087 RTM_NEWNEIGHTBL,
2088 NLM_F_MULTI) <= 0)
2089 goto out;
2090 next:
2091 nidx++;
2094 neigh_skip = 0;
2096 out:
2097 read_unlock(&neigh_tbl_lock);
2098 cb->args[0] = tidx;
2099 cb->args[1] = nidx;
2101 return skb->len;
2104 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2105 u32 pid, u32 seq, int type, unsigned int flags)
2107 unsigned long now = jiffies;
2108 struct nda_cacheinfo ci;
2109 struct nlmsghdr *nlh;
2110 struct ndmsg *ndm;
2112 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2113 if (nlh == NULL)
2114 return -EMSGSIZE;
2116 ndm = nlmsg_data(nlh);
2117 ndm->ndm_family = neigh->ops->family;
2118 ndm->ndm_pad1 = 0;
2119 ndm->ndm_pad2 = 0;
2120 ndm->ndm_flags = neigh->flags;
2121 ndm->ndm_type = neigh->type;
2122 ndm->ndm_ifindex = neigh->dev->ifindex;
2124 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
2126 read_lock_bh(&neigh->lock);
2127 ndm->ndm_state = neigh->nud_state;
2128 if ((neigh->nud_state & NUD_VALID) &&
2129 nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
2130 read_unlock_bh(&neigh->lock);
2131 goto nla_put_failure;
2134 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2135 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2136 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2137 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2138 read_unlock_bh(&neigh->lock);
2140 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2141 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2143 return nlmsg_end(skb, nlh);
2145 nla_put_failure:
2146 nlmsg_cancel(skb, nlh);
2147 return -EMSGSIZE;
2150 static void neigh_update_notify(struct neighbour *neigh)
2152 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2153 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2156 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2157 struct netlink_callback *cb)
2159 struct net *net = sock_net(skb->sk);
2160 struct neighbour *n;
2161 int rc, h, s_h = cb->args[1];
2162 int idx, s_idx = idx = cb->args[2];
2163 struct neigh_hash_table *nht;
2165 rcu_read_lock_bh();
2166 nht = rcu_dereference_bh(tbl->nht);
2168 for (h = 0; h <= nht->hash_mask; h++) {
2169 if (h < s_h)
2170 continue;
2171 if (h > s_h)
2172 s_idx = 0;
2173 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2174 n != NULL;
2175 n = rcu_dereference_bh(n->next)) {
2176 if (!net_eq(dev_net(n->dev), net))
2177 continue;
2178 if (idx < s_idx)
2179 goto next;
2180 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2181 cb->nlh->nlmsg_seq,
2182 RTM_NEWNEIGH,
2183 NLM_F_MULTI) <= 0) {
2184 rc = -1;
2185 goto out;
2187 next:
2188 idx++;
2191 rc = skb->len;
2192 out:
2193 rcu_read_unlock_bh();
2194 cb->args[1] = h;
2195 cb->args[2] = idx;
2196 return rc;
2199 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2201 struct neigh_table *tbl;
2202 int t, family, s_t;
2204 read_lock(&neigh_tbl_lock);
2205 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2206 s_t = cb->args[0];
2208 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2209 if (t < s_t || (family && tbl->family != family))
2210 continue;
2211 if (t > s_t)
2212 memset(&cb->args[1], 0, sizeof(cb->args) -
2213 sizeof(cb->args[0]));
2214 if (neigh_dump_table(tbl, skb, cb) < 0)
2215 break;
2217 read_unlock(&neigh_tbl_lock);
2219 cb->args[0] = t;
2220 return skb->len;
2223 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2225 int chain;
2226 struct neigh_hash_table *nht;
2228 rcu_read_lock_bh();
2229 nht = rcu_dereference_bh(tbl->nht);
2231 read_lock(&tbl->lock); /* avoid resizes */
2232 for (chain = 0; chain <= nht->hash_mask; chain++) {
2233 struct neighbour *n;
2235 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2236 n != NULL;
2237 n = rcu_dereference_bh(n->next))
2238 cb(n, cookie);
2240 read_unlock(&tbl->lock);
2241 rcu_read_unlock_bh();
2243 EXPORT_SYMBOL(neigh_for_each);
2245 /* The tbl->lock must be held as a writer and BH disabled. */
2246 void __neigh_for_each_release(struct neigh_table *tbl,
2247 int (*cb)(struct neighbour *))
2249 int chain;
2250 struct neigh_hash_table *nht;
2252 nht = rcu_dereference_protected(tbl->nht,
2253 lockdep_is_held(&tbl->lock));
2254 for (chain = 0; chain <= nht->hash_mask; chain++) {
2255 struct neighbour *n;
2256 struct neighbour __rcu **np;
2258 np = &nht->hash_buckets[chain];
2259 while ((n = rcu_dereference_protected(*np,
2260 lockdep_is_held(&tbl->lock))) != NULL) {
2261 int release;
2263 write_lock(&n->lock);
2264 release = cb(n);
2265 if (release) {
2266 rcu_assign_pointer(*np,
2267 rcu_dereference_protected(n->next,
2268 lockdep_is_held(&tbl->lock)));
2269 n->dead = 1;
2270 } else
2271 np = &n->next;
2272 write_unlock(&n->lock);
2273 if (release)
2274 neigh_cleanup_and_release(n);
2278 EXPORT_SYMBOL(__neigh_for_each_release);
2280 #ifdef CONFIG_PROC_FS
2282 static struct neighbour *neigh_get_first(struct seq_file *seq)
2284 struct neigh_seq_state *state = seq->private;
2285 struct net *net = seq_file_net(seq);
2286 struct neigh_hash_table *nht = state->nht;
2287 struct neighbour *n = NULL;
2288 int bucket = state->bucket;
2290 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2291 for (bucket = 0; bucket <= nht->hash_mask; bucket++) {
2292 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2294 while (n) {
2295 if (!net_eq(dev_net(n->dev), net))
2296 goto next;
2297 if (state->neigh_sub_iter) {
2298 loff_t fakep = 0;
2299 void *v;
2301 v = state->neigh_sub_iter(state, n, &fakep);
2302 if (!v)
2303 goto next;
2305 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2306 break;
2307 if (n->nud_state & ~NUD_NOARP)
2308 break;
2309 next:
2310 n = rcu_dereference_bh(n->next);
2313 if (n)
2314 break;
2316 state->bucket = bucket;
2318 return n;
2321 static struct neighbour *neigh_get_next(struct seq_file *seq,
2322 struct neighbour *n,
2323 loff_t *pos)
2325 struct neigh_seq_state *state = seq->private;
2326 struct net *net = seq_file_net(seq);
2327 struct neigh_hash_table *nht = state->nht;
2329 if (state->neigh_sub_iter) {
2330 void *v = state->neigh_sub_iter(state, n, pos);
2331 if (v)
2332 return n;
2334 n = rcu_dereference_bh(n->next);
2336 while (1) {
2337 while (n) {
2338 if (!net_eq(dev_net(n->dev), net))
2339 goto next;
2340 if (state->neigh_sub_iter) {
2341 void *v = state->neigh_sub_iter(state, n, pos);
2342 if (v)
2343 return n;
2344 goto next;
2346 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2347 break;
2349 if (n->nud_state & ~NUD_NOARP)
2350 break;
2351 next:
2352 n = rcu_dereference_bh(n->next);
2355 if (n)
2356 break;
2358 if (++state->bucket > nht->hash_mask)
2359 break;
2361 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2364 if (n && pos)
2365 --(*pos);
2366 return n;
2369 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2371 struct neighbour *n = neigh_get_first(seq);
2373 if (n) {
2374 --(*pos);
2375 while (*pos) {
2376 n = neigh_get_next(seq, n, pos);
2377 if (!n)
2378 break;
2381 return *pos ? NULL : n;
2384 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2386 struct neigh_seq_state *state = seq->private;
2387 struct net *net = seq_file_net(seq);
2388 struct neigh_table *tbl = state->tbl;
2389 struct pneigh_entry *pn = NULL;
2390 int bucket = state->bucket;
2392 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2393 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2394 pn = tbl->phash_buckets[bucket];
2395 while (pn && !net_eq(pneigh_net(pn), net))
2396 pn = pn->next;
2397 if (pn)
2398 break;
2400 state->bucket = bucket;
2402 return pn;
2405 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2406 struct pneigh_entry *pn,
2407 loff_t *pos)
2409 struct neigh_seq_state *state = seq->private;
2410 struct net *net = seq_file_net(seq);
2411 struct neigh_table *tbl = state->tbl;
2413 pn = pn->next;
2414 while (!pn) {
2415 if (++state->bucket > PNEIGH_HASHMASK)
2416 break;
2417 pn = tbl->phash_buckets[state->bucket];
2418 while (pn && !net_eq(pneigh_net(pn), net))
2419 pn = pn->next;
2420 if (pn)
2421 break;
2424 if (pn && pos)
2425 --(*pos);
2427 return pn;
2430 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2432 struct pneigh_entry *pn = pneigh_get_first(seq);
2434 if (pn) {
2435 --(*pos);
2436 while (*pos) {
2437 pn = pneigh_get_next(seq, pn, pos);
2438 if (!pn)
2439 break;
2442 return *pos ? NULL : pn;
2445 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2447 struct neigh_seq_state *state = seq->private;
2448 void *rc;
2449 loff_t idxpos = *pos;
2451 rc = neigh_get_idx(seq, &idxpos);
2452 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2453 rc = pneigh_get_idx(seq, &idxpos);
2455 return rc;
2458 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2459 __acquires(rcu_bh)
2461 struct neigh_seq_state *state = seq->private;
2463 state->tbl = tbl;
2464 state->bucket = 0;
2465 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2467 rcu_read_lock_bh();
2468 state->nht = rcu_dereference_bh(tbl->nht);
2470 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2472 EXPORT_SYMBOL(neigh_seq_start);
2474 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2476 struct neigh_seq_state *state;
2477 void *rc;
2479 if (v == SEQ_START_TOKEN) {
2480 rc = neigh_get_first(seq);
2481 goto out;
2484 state = seq->private;
2485 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2486 rc = neigh_get_next(seq, v, NULL);
2487 if (rc)
2488 goto out;
2489 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2490 rc = pneigh_get_first(seq);
2491 } else {
2492 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2493 rc = pneigh_get_next(seq, v, NULL);
2495 out:
2496 ++(*pos);
2497 return rc;
2499 EXPORT_SYMBOL(neigh_seq_next);
2501 void neigh_seq_stop(struct seq_file *seq, void *v)
2502 __releases(rcu_bh)
2504 rcu_read_unlock_bh();
2506 EXPORT_SYMBOL(neigh_seq_stop);
2508 /* statistics via seq_file */
2510 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2512 struct neigh_table *tbl = seq->private;
2513 int cpu;
2515 if (*pos == 0)
2516 return SEQ_START_TOKEN;
2518 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2519 if (!cpu_possible(cpu))
2520 continue;
2521 *pos = cpu+1;
2522 return per_cpu_ptr(tbl->stats, cpu);
2524 return NULL;
2527 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2529 struct neigh_table *tbl = seq->private;
2530 int cpu;
2532 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2533 if (!cpu_possible(cpu))
2534 continue;
2535 *pos = cpu+1;
2536 return per_cpu_ptr(tbl->stats, cpu);
2538 return NULL;
2541 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2546 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2548 struct neigh_table *tbl = seq->private;
2549 struct neigh_statistics *st = v;
2551 if (v == SEQ_START_TOKEN) {
2552 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
2553 return 0;
2556 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2557 "%08lx %08lx %08lx %08lx %08lx\n",
2558 atomic_read(&tbl->entries),
2560 st->allocs,
2561 st->destroys,
2562 st->hash_grows,
2564 st->lookups,
2565 st->hits,
2567 st->res_failed,
2569 st->rcv_probes_mcast,
2570 st->rcv_probes_ucast,
2572 st->periodic_gc_runs,
2573 st->forced_gc_runs,
2574 st->unres_discards
2577 return 0;
2580 static const struct seq_operations neigh_stat_seq_ops = {
2581 .start = neigh_stat_seq_start,
2582 .next = neigh_stat_seq_next,
2583 .stop = neigh_stat_seq_stop,
2584 .show = neigh_stat_seq_show,
2587 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2589 int ret = seq_open(file, &neigh_stat_seq_ops);
2591 if (!ret) {
2592 struct seq_file *sf = file->private_data;
2593 sf->private = PDE(inode)->data;
2595 return ret;
2598 static const struct file_operations neigh_stat_seq_fops = {
2599 .owner = THIS_MODULE,
2600 .open = neigh_stat_seq_open,
2601 .read = seq_read,
2602 .llseek = seq_lseek,
2603 .release = seq_release,
2606 #endif /* CONFIG_PROC_FS */
2608 static inline size_t neigh_nlmsg_size(void)
2610 return NLMSG_ALIGN(sizeof(struct ndmsg))
2611 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2612 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2613 + nla_total_size(sizeof(struct nda_cacheinfo))
2614 + nla_total_size(4); /* NDA_PROBES */
2617 static void __neigh_notify(struct neighbour *n, int type, int flags)
2619 struct net *net = dev_net(n->dev);
2620 struct sk_buff *skb;
2621 int err = -ENOBUFS;
2623 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2624 if (skb == NULL)
2625 goto errout;
2627 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2628 if (err < 0) {
2629 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2630 WARN_ON(err == -EMSGSIZE);
2631 kfree_skb(skb);
2632 goto errout;
2634 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2635 return;
2636 errout:
2637 if (err < 0)
2638 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2641 #ifdef CONFIG_ARPD
2642 void neigh_app_ns(struct neighbour *n)
2644 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2646 EXPORT_SYMBOL(neigh_app_ns);
2647 #endif /* CONFIG_ARPD */
2649 #ifdef CONFIG_SYSCTL
2651 #define NEIGH_VARS_MAX 19
2653 static struct neigh_sysctl_table {
2654 struct ctl_table_header *sysctl_header;
2655 struct ctl_table neigh_vars[NEIGH_VARS_MAX];
2656 char *dev_name;
2657 } neigh_sysctl_template __read_mostly = {
2658 .neigh_vars = {
2660 .procname = "mcast_solicit",
2661 .maxlen = sizeof(int),
2662 .mode = 0644,
2663 .proc_handler = proc_dointvec,
2666 .procname = "ucast_solicit",
2667 .maxlen = sizeof(int),
2668 .mode = 0644,
2669 .proc_handler = proc_dointvec,
2672 .procname = "app_solicit",
2673 .maxlen = sizeof(int),
2674 .mode = 0644,
2675 .proc_handler = proc_dointvec,
2678 .procname = "retrans_time",
2679 .maxlen = sizeof(int),
2680 .mode = 0644,
2681 .proc_handler = proc_dointvec_userhz_jiffies,
2684 .procname = "base_reachable_time",
2685 .maxlen = sizeof(int),
2686 .mode = 0644,
2687 .proc_handler = proc_dointvec_jiffies,
2690 .procname = "delay_first_probe_time",
2691 .maxlen = sizeof(int),
2692 .mode = 0644,
2693 .proc_handler = proc_dointvec_jiffies,
2696 .procname = "gc_stale_time",
2697 .maxlen = sizeof(int),
2698 .mode = 0644,
2699 .proc_handler = proc_dointvec_jiffies,
2702 .procname = "unres_qlen",
2703 .maxlen = sizeof(int),
2704 .mode = 0644,
2705 .proc_handler = proc_dointvec,
2708 .procname = "proxy_qlen",
2709 .maxlen = sizeof(int),
2710 .mode = 0644,
2711 .proc_handler = proc_dointvec,
2714 .procname = "anycast_delay",
2715 .maxlen = sizeof(int),
2716 .mode = 0644,
2717 .proc_handler = proc_dointvec_userhz_jiffies,
2720 .procname = "proxy_delay",
2721 .maxlen = sizeof(int),
2722 .mode = 0644,
2723 .proc_handler = proc_dointvec_userhz_jiffies,
2726 .procname = "locktime",
2727 .maxlen = sizeof(int),
2728 .mode = 0644,
2729 .proc_handler = proc_dointvec_userhz_jiffies,
2732 .procname = "retrans_time_ms",
2733 .maxlen = sizeof(int),
2734 .mode = 0644,
2735 .proc_handler = proc_dointvec_ms_jiffies,
2738 .procname = "base_reachable_time_ms",
2739 .maxlen = sizeof(int),
2740 .mode = 0644,
2741 .proc_handler = proc_dointvec_ms_jiffies,
2744 .procname = "gc_interval",
2745 .maxlen = sizeof(int),
2746 .mode = 0644,
2747 .proc_handler = proc_dointvec_jiffies,
2750 .procname = "gc_thresh1",
2751 .maxlen = sizeof(int),
2752 .mode = 0644,
2753 .proc_handler = proc_dointvec,
2756 .procname = "gc_thresh2",
2757 .maxlen = sizeof(int),
2758 .mode = 0644,
2759 .proc_handler = proc_dointvec,
2762 .procname = "gc_thresh3",
2763 .maxlen = sizeof(int),
2764 .mode = 0644,
2765 .proc_handler = proc_dointvec,
2771 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2772 char *p_name, proc_handler *handler)
2774 struct neigh_sysctl_table *t;
2775 const char *dev_name_source = NULL;
2777 #define NEIGH_CTL_PATH_ROOT 0
2778 #define NEIGH_CTL_PATH_PROTO 1
2779 #define NEIGH_CTL_PATH_NEIGH 2
2780 #define NEIGH_CTL_PATH_DEV 3
2782 struct ctl_path neigh_path[] = {
2783 { .procname = "net", },
2784 { .procname = "proto", },
2785 { .procname = "neigh", },
2786 { .procname = "default", },
2787 { },
2790 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2791 if (!t)
2792 goto err;
2794 t->neigh_vars[0].data = &p->mcast_probes;
2795 t->neigh_vars[1].data = &p->ucast_probes;
2796 t->neigh_vars[2].data = &p->app_probes;
2797 t->neigh_vars[3].data = &p->retrans_time;
2798 t->neigh_vars[4].data = &p->base_reachable_time;
2799 t->neigh_vars[5].data = &p->delay_probe_time;
2800 t->neigh_vars[6].data = &p->gc_staletime;
2801 t->neigh_vars[7].data = &p->queue_len;
2802 t->neigh_vars[8].data = &p->proxy_qlen;
2803 t->neigh_vars[9].data = &p->anycast_delay;
2804 t->neigh_vars[10].data = &p->proxy_delay;
2805 t->neigh_vars[11].data = &p->locktime;
2806 t->neigh_vars[12].data = &p->retrans_time;
2807 t->neigh_vars[13].data = &p->base_reachable_time;
2809 if (dev) {
2810 dev_name_source = dev->name;
2811 /* Terminate the table early */
2812 memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
2813 } else {
2814 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
2815 t->neigh_vars[14].data = (int *)(p + 1);
2816 t->neigh_vars[15].data = (int *)(p + 1) + 1;
2817 t->neigh_vars[16].data = (int *)(p + 1) + 2;
2818 t->neigh_vars[17].data = (int *)(p + 1) + 3;
2822 if (handler) {
2823 /* RetransTime */
2824 t->neigh_vars[3].proc_handler = handler;
2825 t->neigh_vars[3].extra1 = dev;
2826 /* ReachableTime */
2827 t->neigh_vars[4].proc_handler = handler;
2828 t->neigh_vars[4].extra1 = dev;
2829 /* RetransTime (in milliseconds)*/
2830 t->neigh_vars[12].proc_handler = handler;
2831 t->neigh_vars[12].extra1 = dev;
2832 /* ReachableTime (in milliseconds) */
2833 t->neigh_vars[13].proc_handler = handler;
2834 t->neigh_vars[13].extra1 = dev;
2837 t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2838 if (!t->dev_name)
2839 goto free;
2841 neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2842 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2844 t->sysctl_header =
2845 register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
2846 if (!t->sysctl_header)
2847 goto free_procname;
2849 p->sysctl_table = t;
2850 return 0;
2852 free_procname:
2853 kfree(t->dev_name);
2854 free:
2855 kfree(t);
2856 err:
2857 return -ENOBUFS;
2859 EXPORT_SYMBOL(neigh_sysctl_register);
2861 void neigh_sysctl_unregister(struct neigh_parms *p)
2863 if (p->sysctl_table) {
2864 struct neigh_sysctl_table *t = p->sysctl_table;
2865 p->sysctl_table = NULL;
2866 unregister_sysctl_table(t->sysctl_header);
2867 kfree(t->dev_name);
2868 kfree(t);
2871 EXPORT_SYMBOL(neigh_sysctl_unregister);
2873 #endif /* CONFIG_SYSCTL */
2875 static int __init neigh_init(void)
2877 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2878 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2879 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2881 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2882 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2884 return 0;
2887 subsys_initcall(neigh_init);