Merge with 2.5.75.
[linux-2.6/linux-mips.git] / net / core / neighbour.c
blob001fdb40e6de8c001b336264848f64ac23264603
1 /*
2 * Generic address resolution entity
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
17 #include <linux/config.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/socket.h>
21 #include <linux/sched.h>
22 #include <linux/netdevice.h>
23 #ifdef CONFIG_SYSCTL
24 #include <linux/sysctl.h>
25 #endif
26 #include <net/neighbour.h>
27 #include <net/dst.h>
28 #include <net/sock.h>
29 #include <linux/rtnetlink.h>
31 #define NEIGH_DEBUG 1
33 #define NEIGH_PRINTK(x...) printk(x)
34 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
35 #define NEIGH_PRINTK0 NEIGH_PRINTK
36 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
37 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
39 #if NEIGH_DEBUG >= 1
40 #undef NEIGH_PRINTK1
41 #define NEIGH_PRINTK1 NEIGH_PRINTK
42 #endif
43 #if NEIGH_DEBUG >= 2
44 #undef NEIGH_PRINTK2
45 #define NEIGH_PRINTK2 NEIGH_PRINTK
46 #endif
48 static void neigh_timer_handler(unsigned long arg);
49 #ifdef CONFIG_ARPD
50 static void neigh_app_notify(struct neighbour *n);
51 #endif
52 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
54 static int neigh_glbl_allocs;
55 static struct neigh_table *neigh_tables;
58 Neighbour hash table buckets are protected with rwlock tbl->lock.
60 - All the scans/updates to hash buckets MUST be made under this lock.
61 - NOTHING clever should be made under this lock: no callbacks
62 to protocol backends, no attempts to send something to network.
63 It will result in deadlocks, if backend/driver wants to use neighbour
64 cache.
65 - If the entry requires some non-trivial actions, increase
66 its reference count and release table lock.
68 Neighbour entries are protected:
69 - with reference count.
70 - with rwlock neigh->lock
72 Reference count prevents destruction.
74 neigh->lock mainly serializes ll address data and its validity state.
75 However, the same lock is used to protect another entry fields:
76 - timer
77 - resolution queue
79 Again, nothing clever shall be made under neigh->lock,
80 the most complicated procedure, which we allow is dev->hard_header.
81 It is supposed, that dev->hard_header is simplistic and does
82 not make callbacks to neighbour tables.
84 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
85 list of neighbour tables. This list is used only in process context,
88 static rwlock_t neigh_tbl_lock = RW_LOCK_UNLOCKED;
90 static int neigh_blackhole(struct sk_buff *skb)
92 kfree_skb(skb);
93 return -ENETDOWN;
97 * It is random distribution in the interval (1/2)*base...(3/2)*base.
98 * It corresponds to default IPv6 settings and is not overridable,
99 * because it is really reasonbale choice.
102 unsigned long neigh_rand_reach_time(unsigned long base)
104 return (net_random() % base) + (base >> 1);
108 static int neigh_forced_gc(struct neigh_table *tbl)
110 int shrunk = 0;
111 int i;
113 for (i = 0; i <= NEIGH_HASHMASK; i++) {
114 struct neighbour *n, **np;
116 np = &tbl->hash_buckets[i];
117 write_lock_bh(&tbl->lock);
118 while ((n = *np) != NULL) {
119 /* Neighbour record may be discarded if:
120 - nobody refers to it.
121 - it is not premanent
122 - (NEW and probably wrong)
123 INCOMPLETE entries are kept at least for
124 n->parms->retrans_time, otherwise we could
125 flood network with resolution requests.
126 It is not clear, what is better table overflow
127 or flooding.
129 write_lock(&n->lock);
130 if (atomic_read(&n->refcnt) == 1 &&
131 !(n->nud_state & NUD_PERMANENT) &&
132 (n->nud_state != NUD_INCOMPLETE ||
133 jiffies - n->used > n->parms->retrans_time)) {
134 *np = n->next;
135 n->dead = 1;
136 shrunk = 1;
137 write_unlock(&n->lock);
138 neigh_release(n);
139 continue;
141 write_unlock(&n->lock);
142 np = &n->next;
144 write_unlock_bh(&tbl->lock);
147 tbl->last_flush = jiffies;
148 return shrunk;
151 static int neigh_del_timer(struct neighbour *n)
153 if ((n->nud_state & NUD_IN_TIMER) &&
154 del_timer(&n->timer)) {
155 neigh_release(n);
156 return 1;
158 return 0;
161 static void pneigh_queue_purge(struct sk_buff_head *list)
163 struct sk_buff *skb;
165 while ((skb = skb_dequeue(list)) != NULL) {
166 dev_put(skb->dev);
167 kfree_skb(skb);
171 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
173 int i;
175 write_lock_bh(&tbl->lock);
177 for (i = 0; i <= NEIGH_HASHMASK; i++) {
178 struct neighbour *n, **np = &tbl->hash_buckets[i];
180 while ((n = *np) != NULL) {
181 if (dev && n->dev != dev) {
182 np = &n->next;
183 continue;
185 *np = n->next;
186 write_lock(&n->lock);
187 neigh_del_timer(n);
188 n->dead = 1;
190 if (atomic_read(&n->refcnt) != 1) {
191 /* The most unpleasant situation.
192 We must destroy neighbour entry,
193 but someone still uses it.
195 The destroy will be delayed until
196 the last user releases us, but
197 we must kill timers etc. and move
198 it to safe state.
200 n->parms = &tbl->parms;
201 skb_queue_purge(&n->arp_queue);
202 n->output = neigh_blackhole;
203 if (n->nud_state & NUD_VALID)
204 n->nud_state = NUD_NOARP;
205 else
206 n->nud_state = NUD_NONE;
207 NEIGH_PRINTK2("neigh %p is stray.\n", n);
209 write_unlock(&n->lock);
210 neigh_release(n);
214 pneigh_ifdown(tbl, dev);
215 write_unlock_bh(&tbl->lock);
217 del_timer_sync(&tbl->proxy_timer);
218 pneigh_queue_purge(&tbl->proxy_queue);
219 return 0;
222 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
224 struct neighbour *n = NULL;
225 unsigned long now = jiffies;
227 if (tbl->entries > tbl->gc_thresh3 ||
228 (tbl->entries > tbl->gc_thresh2 &&
229 now - tbl->last_flush > 5 * HZ)) {
230 if (!neigh_forced_gc(tbl) &&
231 tbl->entries > tbl->gc_thresh3)
232 goto out;
235 n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
236 if (!n)
237 goto out;
239 memset(n, 0, tbl->entry_size);
241 skb_queue_head_init(&n->arp_queue);
242 n->lock = RW_LOCK_UNLOCKED;
243 n->updated = n->used = now;
244 n->nud_state = NUD_NONE;
245 n->output = neigh_blackhole;
246 n->parms = &tbl->parms;
247 init_timer(&n->timer);
248 n->timer.function = neigh_timer_handler;
249 n->timer.data = (unsigned long)n;
250 tbl->stats.allocs++;
251 neigh_glbl_allocs++;
252 tbl->entries++;
253 n->tbl = tbl;
254 atomic_set(&n->refcnt, 1);
255 n->dead = 1;
256 out:
257 return n;
260 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
261 struct net_device *dev)
263 struct neighbour *n;
264 int key_len = tbl->key_len;
265 u32 hash_val = tbl->hash(pkey, dev);
267 read_lock_bh(&tbl->lock);
268 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
269 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
270 neigh_hold(n);
271 break;
274 read_unlock_bh(&tbl->lock);
275 return n;
278 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
279 struct net_device *dev)
281 u32 hash_val;
282 int key_len = tbl->key_len;
283 int error;
284 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
286 if (!n) {
287 rc = ERR_PTR(-ENOBUFS);
288 goto out;
291 memcpy(n->primary_key, pkey, key_len);
292 n->dev = dev;
293 dev_hold(dev);
295 /* Protocol specific setup. */
296 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
297 rc = ERR_PTR(error);
298 goto out_neigh_release;
301 /* Device specific setup. */
302 if (n->parms->neigh_setup &&
303 (error = n->parms->neigh_setup(n)) < 0) {
304 rc = ERR_PTR(error);
305 goto out_neigh_release;
308 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
310 hash_val = tbl->hash(pkey, dev);
312 write_lock_bh(&tbl->lock);
313 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
314 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
315 neigh_hold(n1);
316 write_unlock_bh(&tbl->lock);
317 rc = n1;
318 goto out_neigh_release;
322 n->next = tbl->hash_buckets[hash_val];
323 tbl->hash_buckets[hash_val] = n;
324 n->dead = 0;
325 neigh_hold(n);
326 write_unlock_bh(&tbl->lock);
327 NEIGH_PRINTK2("neigh %p is created.\n", n);
328 rc = n;
329 out:
330 return rc;
331 out_neigh_release:
332 neigh_release(n);
333 goto out;
336 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
337 struct net_device *dev, int creat)
339 struct pneigh_entry *n;
340 int key_len = tbl->key_len;
341 u32 hash_val = *(u32 *)(pkey + key_len - 4);
343 hash_val ^= (hash_val >> 16);
344 hash_val ^= hash_val >> 8;
345 hash_val ^= hash_val >> 4;
346 hash_val &= PNEIGH_HASHMASK;
348 read_lock_bh(&tbl->lock);
350 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
351 if (!memcmp(n->key, pkey, key_len) &&
352 (n->dev == dev || !n->dev)) {
353 read_unlock_bh(&tbl->lock);
354 goto out;
357 read_unlock_bh(&tbl->lock);
358 n = NULL;
359 if (!creat)
360 goto out;
362 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
363 if (!n)
364 goto out;
366 memcpy(n->key, pkey, key_len);
367 n->dev = dev;
369 if (tbl->pconstructor && tbl->pconstructor(n)) {
370 kfree(n);
371 n = NULL;
372 goto out;
375 write_lock_bh(&tbl->lock);
376 n->next = tbl->phash_buckets[hash_val];
377 tbl->phash_buckets[hash_val] = n;
378 write_unlock_bh(&tbl->lock);
379 out:
380 return n;
384 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
385 struct net_device *dev)
387 struct pneigh_entry *n, **np;
388 int key_len = tbl->key_len;
389 u32 hash_val = *(u32 *)(pkey + key_len - 4);
391 hash_val ^= (hash_val >> 16);
392 hash_val ^= hash_val >> 8;
393 hash_val ^= hash_val >> 4;
394 hash_val &= PNEIGH_HASHMASK;
396 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
397 np = &n->next) {
398 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
399 write_lock_bh(&tbl->lock);
400 *np = n->next;
401 write_unlock_bh(&tbl->lock);
402 if (tbl->pdestructor)
403 tbl->pdestructor(n);
404 kfree(n);
405 return 0;
408 return -ENOENT;
411 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
413 struct pneigh_entry *n, **np;
414 u32 h;
416 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
417 np = &tbl->phash_buckets[h];
418 while ((n = *np) != NULL) {
419 if (!dev || n->dev == dev) {
420 *np = n->next;
421 if (tbl->pdestructor)
422 tbl->pdestructor(n);
423 kfree(n);
424 continue;
426 np = &n->next;
429 return -ENOENT;
434 * neighbour must already be out of the table;
437 void neigh_destroy(struct neighbour *neigh)
439 struct hh_cache *hh;
441 if (!neigh->dead) {
442 printk(KERN_WARNING
443 "Destroying alive neighbour %p\n", neigh);
444 dump_stack();
445 return;
448 if (neigh_del_timer(neigh))
449 printk(KERN_WARNING "Impossible event.\n");
451 while ((hh = neigh->hh) != NULL) {
452 neigh->hh = hh->hh_next;
453 hh->hh_next = NULL;
454 write_lock_bh(&hh->hh_lock);
455 hh->hh_output = neigh_blackhole;
456 write_unlock_bh(&hh->hh_lock);
457 if (atomic_dec_and_test(&hh->hh_refcnt))
458 kfree(hh);
461 if (neigh->ops && neigh->ops->destructor)
462 (neigh->ops->destructor)(neigh);
464 skb_queue_purge(&neigh->arp_queue);
466 dev_put(neigh->dev);
468 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
470 neigh_glbl_allocs--;
471 neigh->tbl->entries--;
472 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
475 /* Neighbour state is suspicious;
476 disable fast path.
478 Called with write_locked neigh.
480 static void neigh_suspect(struct neighbour *neigh)
482 struct hh_cache *hh;
484 NEIGH_PRINTK2("neigh %p is suspecteded.\n", neigh);
486 neigh->output = neigh->ops->output;
488 for (hh = neigh->hh; hh; hh = hh->hh_next)
489 hh->hh_output = neigh->ops->output;
492 /* Neighbour state is OK;
493 enable fast path.
495 Called with write_locked neigh.
497 static void neigh_connect(struct neighbour *neigh)
499 struct hh_cache *hh;
501 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
503 neigh->output = neigh->ops->connected_output;
505 for (hh = neigh->hh; hh; hh = hh->hh_next)
506 hh->hh_output = neigh->ops->hh_output;
510 Transitions NUD_STALE <-> NUD_REACHABLE do not occur
511 when fast path is built: we have no timers assotiated with
512 these states, we do not have time to check state when sending.
513 neigh_periodic_timer check periodically neigh->confirmed
514 time and moves NUD_REACHABLE -> NUD_STALE.
516 If a routine wants to know TRUE entry state, it calls
517 neigh_sync before checking state.
519 Called with write_locked neigh.
522 static void neigh_sync(struct neighbour *n)
524 unsigned long now = jiffies;
525 u8 state = n->nud_state;
527 if (state & (NUD_NOARP | NUD_PERMANENT))
528 return;
529 if (state & NUD_REACHABLE) {
530 if (now - n->confirmed > n->parms->reachable_time) {
531 n->nud_state = NUD_STALE;
532 neigh_suspect(n);
534 } else if (state & NUD_VALID) {
535 if (now - n->confirmed < n->parms->reachable_time) {
536 neigh_del_timer(n);
537 n->nud_state = NUD_REACHABLE;
538 neigh_connect(n);
543 static void neigh_periodic_timer(unsigned long arg)
545 struct neigh_table *tbl = (struct neigh_table *)arg;
546 unsigned long now = jiffies;
547 int i;
550 write_lock(&tbl->lock);
553 * periodically recompute ReachableTime from random function
556 if (now - tbl->last_rand > 300 * HZ) {
557 struct neigh_parms *p;
558 tbl->last_rand = now;
559 for (p = &tbl->parms; p; p = p->next)
560 p->reachable_time =
561 neigh_rand_reach_time(p->base_reachable_time);
564 for (i = 0; i <= NEIGH_HASHMASK; i++) {
565 struct neighbour *n, **np;
567 np = &tbl->hash_buckets[i];
568 while ((n = *np) != NULL) {
569 unsigned state;
571 write_lock(&n->lock);
573 state = n->nud_state;
574 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
575 write_unlock(&n->lock);
576 goto next_elt;
579 if ((long)(n->used - n->confirmed) < 0)
580 n->used = n->confirmed;
582 if (atomic_read(&n->refcnt) == 1 &&
583 (state == NUD_FAILED ||
584 now - n->used > n->parms->gc_staletime)) {
585 *np = n->next;
586 n->dead = 1;
587 write_unlock(&n->lock);
588 neigh_release(n);
589 continue;
592 if (n->nud_state & NUD_REACHABLE &&
593 now - n->confirmed > n->parms->reachable_time) {
594 n->nud_state = NUD_STALE;
595 neigh_suspect(n);
597 write_unlock(&n->lock);
599 next_elt:
600 np = &n->next;
604 mod_timer(&tbl->gc_timer, now + tbl->gc_interval);
605 write_unlock(&tbl->lock);
608 static __inline__ int neigh_max_probes(struct neighbour *n)
610 struct neigh_parms *p = n->parms;
611 return (n->nud_state & NUD_PROBE ?
612 p->ucast_probes :
613 p->ucast_probes + p->app_probes + p->mcast_probes);
617 /* Called when a timer expires for a neighbour entry. */
619 static void neigh_timer_handler(unsigned long arg)
621 unsigned long now = jiffies;
622 struct neighbour *neigh = (struct neighbour *)arg;
623 unsigned state;
624 int notify = 0;
626 write_lock(&neigh->lock);
628 state = neigh->nud_state;
630 if (!(state & NUD_IN_TIMER)) {
631 #ifndef CONFIG_SMP
632 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
633 #endif
634 goto out;
637 if ((state & NUD_VALID) &&
638 now - neigh->confirmed < neigh->parms->reachable_time) {
639 neigh->nud_state = NUD_REACHABLE;
640 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
641 neigh_connect(neigh);
642 goto out;
644 if (state == NUD_DELAY) {
645 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
646 neigh->nud_state = NUD_PROBE;
647 atomic_set(&neigh->probes, 0);
650 if (atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
651 struct sk_buff *skb;
653 neigh->nud_state = NUD_FAILED;
654 notify = 1;
655 neigh->tbl->stats.res_failed++;
656 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
658 /* It is very thin place. report_unreachable is very complicated
659 routine. Particularly, it can hit the same neighbour entry!
661 So that, we try to be accurate and avoid dead loop. --ANK
663 while (neigh->nud_state == NUD_FAILED &&
664 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
665 write_unlock(&neigh->lock);
666 neigh->ops->error_report(neigh, skb);
667 write_lock(&neigh->lock);
669 skb_queue_purge(&neigh->arp_queue);
670 goto out;
673 neigh->timer.expires = now + neigh->parms->retrans_time;
674 add_timer(&neigh->timer);
675 write_unlock(&neigh->lock);
677 neigh->ops->solicit(neigh, skb_peek(&neigh->arp_queue));
678 atomic_inc(&neigh->probes);
679 return;
681 out:
682 write_unlock(&neigh->lock);
683 #ifdef CONFIG_ARPD
684 if (notify && neigh->parms->app_probes)
685 neigh_app_notify(neigh);
686 #endif
687 neigh_release(neigh);
690 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
692 int rc;
694 write_lock_bh(&neigh->lock);
696 rc = 0;
697 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
698 goto out_unlock_bh;
700 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
701 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
702 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
703 neigh->nud_state = NUD_INCOMPLETE;
704 neigh_hold(neigh);
705 neigh->timer.expires = jiffies +
706 neigh->parms->retrans_time;
707 add_timer(&neigh->timer);
708 write_unlock_bh(&neigh->lock);
709 neigh->ops->solicit(neigh, skb);
710 atomic_inc(&neigh->probes);
711 write_lock_bh(&neigh->lock);
712 } else {
713 neigh->nud_state = NUD_FAILED;
714 write_unlock_bh(&neigh->lock);
716 if (skb)
717 kfree_skb(skb);
718 return 1;
722 if (neigh->nud_state == NUD_INCOMPLETE) {
723 if (skb) {
724 if (skb_queue_len(&neigh->arp_queue) >=
725 neigh->parms->queue_len) {
726 struct sk_buff *buff;
727 buff = neigh->arp_queue.next;
728 __skb_unlink(buff, &neigh->arp_queue);
729 kfree_skb(buff);
731 __skb_queue_tail(&neigh->arp_queue, skb);
733 rc = 1;
734 } else if (neigh->nud_state == NUD_STALE) {
735 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
736 neigh_hold(neigh);
737 neigh->nud_state = NUD_DELAY;
738 neigh->timer.expires = jiffies + neigh->parms->delay_probe_time;
739 add_timer(&neigh->timer);
740 rc = 0;
742 out_unlock_bh:
743 write_unlock_bh(&neigh->lock);
744 return rc;
747 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
749 struct hh_cache *hh;
750 void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
751 neigh->dev->header_cache_update;
753 if (update) {
754 for (hh = neigh->hh; hh; hh = hh->hh_next) {
755 write_lock_bh(&hh->hh_lock);
756 update(hh, neigh->dev, neigh->ha);
757 write_unlock_bh(&hh->hh_lock);
764 /* Generic update routine.
765 -- lladdr is new lladdr or NULL, if it is not supplied.
766 -- new is new state.
767 -- override == 1 allows to override existing lladdr, if it is different.
768 -- arp == 0 means that the change is administrative.
770 Caller MUST hold reference count on the entry.
773 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
774 int override, int arp)
776 u8 old;
777 int err;
778 #ifdef CONFIG_ARPD
779 int notify = 0;
780 #endif
781 struct net_device *dev;
783 write_lock_bh(&neigh->lock);
785 dev = neigh->dev;
786 old = neigh->nud_state;
787 err = -EPERM;
789 if (arp && (old & (NUD_NOARP | NUD_PERMANENT)))
790 goto out;
792 if (!(new & NUD_VALID)) {
793 neigh_del_timer(neigh);
794 if (old & NUD_CONNECTED)
795 neigh_suspect(neigh);
796 neigh->nud_state = new;
797 err = 0;
798 #ifdef CONFIG_ARPD
799 notify = old & NUD_VALID;
800 #endif
801 goto out;
804 /* Compare new lladdr with cached one */
805 if (!dev->addr_len) {
806 /* First case: device needs no address. */
807 lladdr = neigh->ha;
808 } else if (lladdr) {
809 /* The second case: if something is already cached
810 and a new address is proposed:
811 - compare new & old
812 - if they are different, check override flag
814 if (old & NUD_VALID) {
815 if (!memcmp(lladdr, neigh->ha, dev->addr_len))
816 lladdr = neigh->ha;
817 else if (!override)
818 goto out;
820 } else {
821 /* No address is supplied; if we know something,
822 use it, otherwise discard the request.
824 err = -EINVAL;
825 if (!(old & NUD_VALID))
826 goto out;
827 lladdr = neigh->ha;
830 neigh_sync(neigh);
831 old = neigh->nud_state;
832 if (new & NUD_CONNECTED)
833 neigh->confirmed = jiffies;
834 neigh->updated = jiffies;
836 /* If entry was valid and address is not changed,
837 do not change entry state, if new one is STALE.
839 err = 0;
840 if ((old & NUD_VALID) && lladdr == neigh->ha &&
841 (new == old || (new == NUD_STALE && (old & NUD_CONNECTED))))
842 goto out;
844 neigh_del_timer(neigh);
845 neigh->nud_state = new;
846 if (lladdr != neigh->ha) {
847 memcpy(&neigh->ha, lladdr, dev->addr_len);
848 neigh_update_hhs(neigh);
849 if (!(new & NUD_CONNECTED))
850 neigh->confirmed = jiffies -
851 (neigh->parms->base_reachable_time << 1);
852 #ifdef CONFIG_ARPD
853 notify = 1;
854 #endif
856 if (new == old)
857 goto out;
858 if (new & NUD_CONNECTED)
859 neigh_connect(neigh);
860 else
861 neigh_suspect(neigh);
862 if (!(old & NUD_VALID)) {
863 struct sk_buff *skb;
865 /* Again: avoid dead loop if something went wrong */
867 while (neigh->nud_state & NUD_VALID &&
868 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
869 struct neighbour *n1 = neigh;
870 write_unlock_bh(&neigh->lock);
871 /* On shaper/eql skb->dst->neighbour != neigh :( */
872 if (skb->dst && skb->dst->neighbour)
873 n1 = skb->dst->neighbour;
874 n1->output(skb);
875 write_lock_bh(&neigh->lock);
877 skb_queue_purge(&neigh->arp_queue);
879 out:
880 write_unlock_bh(&neigh->lock);
881 #ifdef CONFIG_ARPD
882 if (notify && neigh->parms->app_probes)
883 neigh_app_notify(neigh);
884 #endif
885 return err;
888 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
889 u8 *lladdr, void *saddr,
890 struct net_device *dev)
892 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
893 lladdr || !dev->addr_len);
894 if (neigh)
895 neigh_update(neigh, lladdr, NUD_STALE, 1, 1);
896 return neigh;
899 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
900 u16 protocol)
902 struct hh_cache *hh;
903 struct net_device *dev = dst->dev;
905 for (hh = n->hh; hh; hh = hh->hh_next)
906 if (hh->hh_type == protocol)
907 break;
909 if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
910 memset(hh, 0, sizeof(struct hh_cache));
911 hh->hh_lock = RW_LOCK_UNLOCKED;
912 hh->hh_type = protocol;
913 atomic_set(&hh->hh_refcnt, 0);
914 hh->hh_next = NULL;
915 if (dev->hard_header_cache(n, hh)) {
916 kfree(hh);
917 hh = NULL;
918 } else {
919 atomic_inc(&hh->hh_refcnt);
920 hh->hh_next = n->hh;
921 n->hh = hh;
922 if (n->nud_state & NUD_CONNECTED)
923 hh->hh_output = n->ops->hh_output;
924 else
925 hh->hh_output = n->ops->output;
928 if (hh) {
929 atomic_inc(&hh->hh_refcnt);
930 dst->hh = hh;
934 /* This function can be used in contexts, where only old dev_queue_xmit
935 worked, f.e. if you want to override normal output path (eql, shaper),
936 but resoltution is not made yet.
939 int neigh_compat_output(struct sk_buff *skb)
941 struct net_device *dev = skb->dev;
943 __skb_pull(skb, skb->nh.raw - skb->data);
945 if (dev->hard_header &&
946 dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
947 skb->len) < 0 &&
948 dev->rebuild_header(skb))
949 return 0;
951 return dev_queue_xmit(skb);
954 /* Slow and careful. */
956 int neigh_resolve_output(struct sk_buff *skb)
958 struct dst_entry *dst = skb->dst;
959 struct neighbour *neigh;
960 int rc = 0;
962 if (!dst || !(neigh = dst->neighbour))
963 goto discard;
965 __skb_pull(skb, skb->nh.raw - skb->data);
967 if (!neigh_event_send(neigh, skb)) {
968 int err;
969 struct net_device *dev = neigh->dev;
970 if (dev->hard_header_cache && !dst->hh) {
971 write_lock_bh(&neigh->lock);
972 if (!dst->hh)
973 neigh_hh_init(neigh, dst, dst->ops->protocol);
974 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
975 neigh->ha, NULL, skb->len);
976 write_unlock_bh(&neigh->lock);
977 } else {
978 read_lock_bh(&neigh->lock);
979 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
980 neigh->ha, NULL, skb->len);
981 read_unlock_bh(&neigh->lock);
983 if (err >= 0)
984 rc = neigh->ops->queue_xmit(skb);
985 else
986 goto out_kfree_skb;
988 out:
989 return rc;
990 discard:
991 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
992 dst, dst ? dst->neighbour : NULL);
993 out_kfree_skb:
994 rc = -EINVAL;
995 kfree_skb(skb);
996 goto out;
999 /* As fast as possible without hh cache */
1001 int neigh_connected_output(struct sk_buff *skb)
1003 int err;
1004 struct dst_entry *dst = skb->dst;
1005 struct neighbour *neigh = dst->neighbour;
1006 struct net_device *dev = neigh->dev;
1008 __skb_pull(skb, skb->nh.raw - skb->data);
1010 read_lock_bh(&neigh->lock);
1011 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1012 neigh->ha, NULL, skb->len);
1013 read_unlock_bh(&neigh->lock);
1014 if (err >= 0)
1015 err = neigh->ops->queue_xmit(skb);
1016 else {
1017 err = -EINVAL;
1018 kfree_skb(skb);
1020 return err;
1023 static void neigh_proxy_process(unsigned long arg)
1025 struct neigh_table *tbl = (struct neigh_table *)arg;
1026 long sched_next = 0;
1027 unsigned long now = jiffies;
1028 struct sk_buff *skb;
1030 spin_lock(&tbl->proxy_queue.lock);
1032 skb = tbl->proxy_queue.next;
1034 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1035 struct sk_buff *back = skb;
1036 long tdif = back->stamp.tv_usec - now;
1038 skb = skb->next;
1039 if (tdif <= 0) {
1040 struct net_device *dev = back->dev;
1041 __skb_unlink(back, &tbl->proxy_queue);
1042 if (tbl->proxy_redo && netif_running(dev))
1043 tbl->proxy_redo(back);
1044 else
1045 kfree_skb(back);
1047 dev_put(dev);
1048 } else if (!sched_next || tdif < sched_next)
1049 sched_next = tdif;
1051 del_timer(&tbl->proxy_timer);
1052 if (sched_next)
1053 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1054 spin_unlock(&tbl->proxy_queue.lock);
1057 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1058 struct sk_buff *skb)
1060 unsigned long now = jiffies;
1061 long sched_next = net_random() % p->proxy_delay;
1063 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1064 kfree_skb(skb);
1065 return;
1067 skb->stamp.tv_sec = 0;
1068 skb->stamp.tv_usec = now + sched_next;
1070 spin_lock(&tbl->proxy_queue.lock);
1071 if (del_timer(&tbl->proxy_timer)) {
1072 long tval = tbl->proxy_timer.expires - now;
1073 if (tval < sched_next)
1074 sched_next = tval;
1076 dst_release(skb->dst);
1077 skb->dst = NULL;
1078 dev_hold(skb->dev);
1079 __skb_queue_tail(&tbl->proxy_queue, skb);
1080 mod_timer(&tbl->proxy_timer, now + sched_next);
1081 spin_unlock(&tbl->proxy_queue.lock);
1085 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1086 struct neigh_table *tbl)
1088 struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
1090 if (p) {
1091 memcpy(p, &tbl->parms, sizeof(*p));
1092 p->tbl = tbl;
1093 p->reachable_time =
1094 neigh_rand_reach_time(p->base_reachable_time);
1095 if (dev && dev->neigh_setup && dev->neigh_setup(dev, p)) {
1096 kfree(p);
1097 return NULL;
1099 p->sysctl_table = NULL;
1100 write_lock_bh(&tbl->lock);
1101 p->next = tbl->parms.next;
1102 tbl->parms.next = p;
1103 write_unlock_bh(&tbl->lock);
1105 return p;
1108 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1110 struct neigh_parms **p;
1112 if (!parms || parms == &tbl->parms)
1113 return;
1114 write_lock_bh(&tbl->lock);
1115 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1116 if (*p == parms) {
1117 *p = parms->next;
1118 write_unlock_bh(&tbl->lock);
1119 kfree(parms);
1120 return;
1123 write_unlock_bh(&tbl->lock);
1124 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1128 void neigh_table_init(struct neigh_table *tbl)
1130 unsigned long now = jiffies;
1132 tbl->parms.reachable_time =
1133 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1135 if (!tbl->kmem_cachep)
1136 tbl->kmem_cachep = kmem_cache_create(tbl->id,
1137 (tbl->entry_size +
1138 15) & ~15,
1139 0, SLAB_HWCACHE_ALIGN,
1140 NULL, NULL);
1141 tbl->lock = RW_LOCK_UNLOCKED;
1142 init_timer(&tbl->gc_timer);
1143 tbl->gc_timer.data = (unsigned long)tbl;
1144 tbl->gc_timer.function = neigh_periodic_timer;
1145 tbl->gc_timer.expires = now + tbl->gc_interval +
1146 tbl->parms.reachable_time;
1147 add_timer(&tbl->gc_timer);
1149 init_timer(&tbl->proxy_timer);
1150 tbl->proxy_timer.data = (unsigned long)tbl;
1151 tbl->proxy_timer.function = neigh_proxy_process;
1152 skb_queue_head_init(&tbl->proxy_queue);
1154 tbl->last_flush = now;
1155 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1156 write_lock(&neigh_tbl_lock);
1157 tbl->next = neigh_tables;
1158 neigh_tables = tbl;
1159 write_unlock(&neigh_tbl_lock);
1162 int neigh_table_clear(struct neigh_table *tbl)
1164 struct neigh_table **tp;
1166 /* It is not clean... Fix it to unload IPv6 module safely */
1167 del_timer_sync(&tbl->gc_timer);
1168 del_timer_sync(&tbl->proxy_timer);
1169 pneigh_queue_purge(&tbl->proxy_queue);
1170 neigh_ifdown(tbl, NULL);
1171 if (tbl->entries)
1172 printk(KERN_CRIT "neighbour leakage\n");
1173 write_lock(&neigh_tbl_lock);
1174 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1175 if (*tp == tbl) {
1176 *tp = tbl->next;
1177 break;
1180 write_unlock(&neigh_tbl_lock);
1181 return 0;
1184 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1186 struct ndmsg *ndm = NLMSG_DATA(nlh);
1187 struct rtattr **nda = arg;
1188 struct neigh_table *tbl;
1189 struct net_device *dev = NULL;
1190 int err = -ENODEV;
1192 if (ndm->ndm_ifindex &&
1193 (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1194 goto out;
1196 read_lock(&neigh_tbl_lock);
1197 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1198 struct neighbour *n;
1200 if (tbl->family != ndm->ndm_family)
1201 continue;
1202 read_unlock(&neigh_tbl_lock);
1204 err = -EINVAL;
1205 if (!nda[NDA_DST - 1] ||
1206 nda[NDA_DST - 1]->rta_len != RTA_LENGTH(tbl->key_len))
1207 goto out_dev_put;
1209 if (ndm->ndm_flags & NTF_PROXY) {
1210 err = pneigh_delete(tbl,
1211 RTA_DATA(nda[NDA_DST - 1]), dev);
1212 goto out_dev_put;
1215 if (!dev)
1216 goto out;
1218 n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST - 1]), dev);
1219 if (n) {
1220 err = neigh_update(n, NULL, NUD_FAILED, 1, 0);
1221 neigh_release(n);
1223 goto out_dev_put;
1225 read_unlock(&neigh_tbl_lock);
1226 err = -EADDRNOTAVAIL;
1227 out_dev_put:
1228 if (dev)
1229 dev_put(dev);
1230 out:
1231 return err;
1234 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1236 struct ndmsg *ndm = NLMSG_DATA(nlh);
1237 struct rtattr **nda = arg;
1238 struct neigh_table *tbl;
1239 struct net_device *dev = NULL;
1240 int err = -ENODEV;
1242 if (ndm->ndm_ifindex &&
1243 (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1244 goto out;
1246 read_lock(&neigh_tbl_lock);
1247 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1248 int override = 1;
1249 struct neighbour *n;
1251 if (tbl->family != ndm->ndm_family)
1252 continue;
1253 read_unlock(&neigh_tbl_lock);
1255 err = -EINVAL;
1256 if (!nda[NDA_DST - 1] ||
1257 nda[NDA_DST - 1]->rta_len != RTA_LENGTH(tbl->key_len))
1258 goto out_dev_put;
1259 if (ndm->ndm_flags & NTF_PROXY) {
1260 err = -ENOBUFS;
1261 if (pneigh_lookup(tbl,
1262 RTA_DATA(nda[NDA_DST - 1]), dev, 1))
1263 err = 0;
1264 goto out_dev_put;
1266 err = -EINVAL;
1267 if (!dev)
1268 goto out;
1269 if (nda[NDA_LLADDR - 1] &&
1270 nda[NDA_LLADDR - 1]->rta_len != RTA_LENGTH(dev->addr_len))
1271 goto out_dev_put;
1272 err = 0;
1273 n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST - 1]), dev);
1274 if (n) {
1275 if (nlh->nlmsg_flags & NLM_F_EXCL)
1276 err = -EEXIST;
1277 override = nlh->nlmsg_flags & NLM_F_REPLACE;
1278 } else if (!(nlh->nlmsg_flags & NLM_F_CREATE))
1279 err = -ENOENT;
1280 else {
1281 n = __neigh_lookup_errno(tbl, RTA_DATA(nda[NDA_DST - 1]),
1282 dev);
1283 if (IS_ERR(n)) {
1284 err = PTR_ERR(n);
1285 n = NULL;
1288 if (!err) {
1289 err = neigh_update(n, nda[NDA_LLADDR - 1] ?
1290 RTA_DATA(nda[NDA_LLADDR - 1]) :
1291 NULL,
1292 ndm->ndm_state,
1293 override, 0);
1295 if (n)
1296 neigh_release(n);
1297 goto out_dev_put;
1300 read_unlock(&neigh_tbl_lock);
1301 err = -EADDRNOTAVAIL;
1302 out_dev_put:
1303 if (dev)
1304 dev_put(dev);
1305 out:
1306 return err;
1310 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1311 u32 pid, u32 seq, int event)
1313 unsigned long now = jiffies;
1314 unsigned char *b = skb->tail;
1315 struct nda_cacheinfo ci;
1316 int locked = 0;
1317 struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq, event,
1318 sizeof(struct ndmsg));
1319 struct ndmsg *ndm = NLMSG_DATA(nlh);
1321 ndm->ndm_family = n->ops->family;
1322 ndm->ndm_flags = n->flags;
1323 ndm->ndm_type = n->type;
1324 ndm->ndm_ifindex = n->dev->ifindex;
1325 RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1326 read_lock_bh(&n->lock);
1327 locked = 1;
1328 ndm->ndm_state = n->nud_state;
1329 if (n->nud_state & NUD_VALID)
1330 RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1331 ci.ndm_used = now - n->used;
1332 ci.ndm_confirmed = now - n->confirmed;
1333 ci.ndm_updated = now - n->updated;
1334 ci.ndm_refcnt = atomic_read(&n->refcnt) - 1;
1335 read_unlock_bh(&n->lock);
1336 locked = 0;
1337 RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1338 nlh->nlmsg_len = skb->tail - b;
1339 return skb->len;
1341 nlmsg_failure:
1342 rtattr_failure:
1343 if (locked)
1344 read_unlock_bh(&n->lock);
1345 skb_trim(skb, b - skb->data);
1346 return -1;
1350 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1351 struct netlink_callback *cb)
1353 struct neighbour *n;
1354 int rc, h, s_h = cb->args[1];
1355 int idx, s_idx = idx = cb->args[2];
1357 for (h = 0; h <= NEIGH_HASHMASK; h++) {
1358 if (h < s_h)
1359 continue;
1360 if (h > s_h)
1361 s_idx = 0;
1362 read_lock_bh(&tbl->lock);
1363 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
1364 if (idx < s_idx)
1365 continue;
1366 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1367 cb->nlh->nlmsg_seq,
1368 RTM_NEWNEIGH) <= 0) {
1369 read_unlock_bh(&tbl->lock);
1370 rc = -1;
1371 goto out;
1374 read_unlock_bh(&tbl->lock);
1376 rc = skb->len;
1377 out:
1378 cb->args[1] = h;
1379 cb->args[2] = idx;
1380 return rc;
1383 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1385 struct neigh_table *tbl;
1386 int t, family, s_t;
1388 read_lock(&neigh_tbl_lock);
1389 family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1390 s_t = cb->args[0];
1392 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
1393 if (t < s_t || (family && tbl->family != family))
1394 continue;
1395 if (t > s_t)
1396 memset(&cb->args[1], 0, sizeof(cb->args) -
1397 sizeof(cb->args[0]));
1398 if (neigh_dump_table(tbl, skb, cb) < 0)
1399 break;
1401 read_unlock(&neigh_tbl_lock);
1403 cb->args[0] = t;
1404 return skb->len;
1407 #ifdef CONFIG_ARPD
1408 void neigh_app_ns(struct neighbour *n)
1410 struct nlmsghdr *nlh;
1411 int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
1412 struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
1414 if (!skb)
1415 return;
1417 if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH) < 0) {
1418 kfree_skb(skb);
1419 return;
1421 nlh = (struct nlmsghdr *)skb->data;
1422 nlh->nlmsg_flags = NLM_F_REQUEST;
1423 NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH;
1424 netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC);
1427 static void neigh_app_notify(struct neighbour *n)
1429 struct nlmsghdr *nlh;
1430 int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
1431 struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
1433 if (!skb)
1434 return;
1436 if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH) < 0) {
1437 kfree_skb(skb);
1438 return;
1440 nlh = (struct nlmsghdr *)skb->data;
1441 NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH;
1442 netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC);
1445 #endif /* CONFIG_ARPD */
1447 #ifdef CONFIG_SYSCTL
1449 struct neigh_sysctl_table {
1450 struct ctl_table_header *sysctl_header;
1451 ctl_table neigh_vars[17];
1452 ctl_table neigh_dev[2];
1453 ctl_table neigh_neigh_dir[2];
1454 ctl_table neigh_proto_dir[2];
1455 ctl_table neigh_root_dir[2];
1456 } neigh_sysctl_template = {
1457 .neigh_vars = {
1459 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
1460 .procname = "mcast_solicit",
1461 .maxlen = sizeof(int),
1462 .mode = 0644,
1463 .proc_handler = &proc_dointvec,
1466 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
1467 .procname = "ucast_solicit",
1468 .maxlen = sizeof(int),
1469 .mode = 0644,
1470 .proc_handler = &proc_dointvec,
1473 .ctl_name = NET_NEIGH_APP_SOLICIT,
1474 .procname = "app_solicit",
1475 .maxlen = sizeof(int),
1476 .mode = 0644,
1477 .proc_handler = &proc_dointvec,
1480 .ctl_name = NET_NEIGH_RETRANS_TIME,
1481 .procname = "retrans_time",
1482 .maxlen = sizeof(int),
1483 .mode = 0644,
1484 .proc_handler = &proc_dointvec,
1487 .ctl_name = NET_NEIGH_REACHABLE_TIME,
1488 .procname = "base_reachable_time",
1489 .maxlen = sizeof(int),
1490 .mode = 0644,
1491 .proc_handler = &proc_dointvec_jiffies,
1494 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
1495 .procname = "delay_first_probe_time",
1496 .maxlen = sizeof(int),
1497 .mode = 0644,
1498 .proc_handler = &proc_dointvec_jiffies,
1501 .ctl_name = NET_NEIGH_GC_STALE_TIME,
1502 .procname = "gc_stale_time",
1503 .maxlen = sizeof(int),
1504 .mode = 0644,
1505 .proc_handler = &proc_dointvec_jiffies,
1508 .ctl_name = NET_NEIGH_UNRES_QLEN,
1509 .procname = "unres_qlen",
1510 .maxlen = sizeof(int),
1511 .mode = 0644,
1512 .proc_handler = &proc_dointvec,
1515 .ctl_name = NET_NEIGH_PROXY_QLEN,
1516 .procname = "proxy_qlen",
1517 .maxlen = sizeof(int),
1518 .mode = 0644,
1519 .proc_handler = &proc_dointvec,
1522 .ctl_name = NET_NEIGH_ANYCAST_DELAY,
1523 .procname = "anycast_delay",
1524 .maxlen = sizeof(int),
1525 .mode = 0644,
1526 .proc_handler = &proc_dointvec,
1529 .ctl_name = NET_NEIGH_PROXY_DELAY,
1530 .procname = "proxy_delay",
1531 .maxlen = sizeof(int),
1532 .mode = 0644,
1533 .proc_handler = &proc_dointvec,
1536 .ctl_name = NET_NEIGH_LOCKTIME,
1537 .procname = "locktime",
1538 .maxlen = sizeof(int),
1539 .mode = 0644,
1540 .proc_handler = &proc_dointvec,
1543 .ctl_name = NET_NEIGH_GC_INTERVAL,
1544 .procname = "gc_interval",
1545 .maxlen = sizeof(int),
1546 .mode = 0644,
1547 .proc_handler = &proc_dointvec_jiffies,
1550 .ctl_name = NET_NEIGH_GC_THRESH1,
1551 .procname = "gc_thresh1",
1552 .maxlen = sizeof(int),
1553 .mode = 0644,
1554 .proc_handler = &proc_dointvec,
1557 .ctl_name = NET_NEIGH_GC_THRESH2,
1558 .procname = "gc_thresh2",
1559 .maxlen = sizeof(int),
1560 .mode = 0644,
1561 .proc_handler = &proc_dointvec,
1564 .ctl_name = NET_NEIGH_GC_THRESH3,
1565 .procname = "gc_thresh3",
1566 .maxlen = sizeof(int),
1567 .mode = 0644,
1568 .proc_handler = &proc_dointvec,
1571 .neigh_dev = {
1573 .ctl_name = NET_PROTO_CONF_DEFAULT,
1574 .procname = "default",
1575 .mode = 0555,
1578 .neigh_neigh_dir = {
1580 .procname = "neigh",
1581 .mode = 0555,
1584 .neigh_proto_dir = {
1586 .mode = 0555,
1589 .neigh_root_dir = {
1591 .ctl_name = CTL_NET,
1592 .procname = "net",
1593 .mode = 0555,
1598 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
1599 int p_id, int pdev_id, char *p_name)
1601 struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
1603 if (!t)
1604 return -ENOBUFS;
1605 memcpy(t, &neigh_sysctl_template, sizeof(*t));
1606 t->neigh_vars[0].data = &p->mcast_probes;
1607 t->neigh_vars[1].data = &p->ucast_probes;
1608 t->neigh_vars[2].data = &p->app_probes;
1609 t->neigh_vars[3].data = &p->retrans_time;
1610 t->neigh_vars[4].data = &p->base_reachable_time;
1611 t->neigh_vars[5].data = &p->delay_probe_time;
1612 t->neigh_vars[6].data = &p->gc_staletime;
1613 t->neigh_vars[7].data = &p->queue_len;
1614 t->neigh_vars[8].data = &p->proxy_qlen;
1615 t->neigh_vars[9].data = &p->anycast_delay;
1616 t->neigh_vars[10].data = &p->proxy_delay;
1617 t->neigh_vars[11].data = &p->locktime;
1618 if (dev) {
1619 t->neigh_dev[0].procname = dev->name;
1620 t->neigh_dev[0].ctl_name = dev->ifindex;
1621 memset(&t->neigh_vars[12], 0, sizeof(ctl_table));
1622 } else {
1623 t->neigh_vars[12].data = (int *)(p + 1);
1624 t->neigh_vars[13].data = (int *)(p + 1) + 1;
1625 t->neigh_vars[14].data = (int *)(p + 1) + 2;
1626 t->neigh_vars[15].data = (int *)(p + 1) + 3;
1628 t->neigh_neigh_dir[0].ctl_name = pdev_id;
1630 t->neigh_proto_dir[0].procname = p_name;
1631 t->neigh_proto_dir[0].ctl_name = p_id;
1633 t->neigh_dev[0].child = t->neigh_vars;
1634 t->neigh_neigh_dir[0].child = t->neigh_dev;
1635 t->neigh_proto_dir[0].child = t->neigh_neigh_dir;
1636 t->neigh_root_dir[0].child = t->neigh_proto_dir;
1638 t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
1639 if (!t->sysctl_header) {
1640 kfree(t);
1641 return -ENOBUFS;
1643 p->sysctl_table = t;
1644 return 0;
1647 void neigh_sysctl_unregister(struct neigh_parms *p)
1649 if (p->sysctl_table) {
1650 struct neigh_sysctl_table *t = p->sysctl_table;
1651 p->sysctl_table = NULL;
1652 unregister_sysctl_table(t->sysctl_header);
1653 kfree(t);
1657 #endif /* CONFIG_SYSCTL */