- Linus: more PageDirty / swapcache handling
[davej-history.git] / net / core / neighbour.c
blob2cb5550713f0a8c45a34679f423fb2acac1166b7
1 /*
2 * Generic address resolution entity
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
17 #include <linux/config.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/socket.h>
21 #include <linux/sched.h>
22 #include <linux/netdevice.h>
23 #ifdef CONFIG_SYSCTL
24 #include <linux/sysctl.h>
25 #endif
26 #include <net/neighbour.h>
27 #include <net/dst.h>
28 #include <net/sock.h>
29 #include <linux/rtnetlink.h>
31 #define NEIGH_DEBUG 1
33 #define NEIGH_PRINTK(x...) printk(x)
34 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
35 #define NEIGH_PRINTK0 NEIGH_PRINTK
36 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
37 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
39 #if NEIGH_DEBUG >= 1
40 #undef NEIGH_PRINTK1
41 #define NEIGH_PRINTK1 NEIGH_PRINTK
42 #endif
43 #if NEIGH_DEBUG >= 2
44 #undef NEIGH_PRINTK2
45 #define NEIGH_PRINTK2 NEIGH_PRINTK
46 #endif
48 static void neigh_timer_handler(unsigned long arg);
49 #ifdef CONFIG_ARPD
50 static void neigh_app_notify(struct neighbour *n);
51 #endif
52 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
54 static int neigh_glbl_allocs;
55 static struct neigh_table *neigh_tables;
57 #if defined(__i386__) && defined(CONFIG_SMP)
58 #define ASSERT_WL(n) if ((int)((n)->lock.lock) > 0) { printk("WL assertion failed at " __FILE__ "(%d):" __FUNCTION__ "\n", __LINE__); }
59 #else
60 #define ASSERT_WL(n) do { } while(0)
61 #endif
64 Neighbour hash table buckets are protected with rwlock tbl->lock.
66 - All the scans/updates to hash buckets MUST be made under this lock.
67 - NOTHING clever should be made under this lock: no callbacks
68 to protocol backends, no attempts to send something to network.
69 It will result in deadlocks, if backend/driver wants to use neighbour
70 cache.
71 - If the entry requires some non-trivial actions, increase
72 its reference count and release table lock.
74 Neighbour entries are protected:
75 - with reference count.
76 - with rwlock neigh->lock
78 Reference count prevents destruction.
80 neigh->lock mainly serializes ll address data and its validity state.
81 However, the same lock is used to protect another entry fields:
82 - timer
83 - resolution queue
85 Again, nothing clever shall be made under neigh->lock,
86 the most complicated procedure, which we allow is dev->hard_header.
87 It is supposed, that dev->hard_header is simplistic and does
88 not make callbacks to neighbour tables.
90 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
91 list of neighbour tables. This list is used only in process context,
94 static rwlock_t neigh_tbl_lock = RW_LOCK_UNLOCKED;
96 static int neigh_blackhole(struct sk_buff *skb)
98 kfree_skb(skb);
99 return -ENETDOWN;
103 * It is random distribution in the interval (1/2)*base...(3/2)*base.
104 * It corresponds to default IPv6 settings and is not overridable,
105 * because it is really reasonbale choice.
108 unsigned long neigh_rand_reach_time(unsigned long base)
110 return (net_random() % base) + (base>>1);
114 static int neigh_forced_gc(struct neigh_table *tbl)
116 int shrunk = 0;
117 int i;
119 for (i=0; i<=NEIGH_HASHMASK; i++) {
120 struct neighbour *n, **np;
122 np = &tbl->hash_buckets[i];
123 write_lock_bh(&tbl->lock);
124 while ((n = *np) != NULL) {
125 /* Neighbour record may be discarded if:
126 - nobody refers to it.
127 - it is not premanent
128 - (NEW and probably wrong)
129 INCOMPLETE entries are kept at least for
130 n->parms->retrans_time, otherwise we could
131 flood network with resolution requests.
132 It is not clear, what is better table overflow
133 or flooding.
135 write_lock(&n->lock);
136 if (atomic_read(&n->refcnt) == 1 &&
137 !(n->nud_state&NUD_PERMANENT) &&
138 (n->nud_state != NUD_INCOMPLETE ||
139 jiffies - n->used > n->parms->retrans_time)) {
140 *np = n->next;
141 n->dead = 1;
142 shrunk = 1;
143 write_unlock(&n->lock);
144 neigh_release(n);
145 continue;
147 write_unlock(&n->lock);
148 np = &n->next;
150 write_unlock_bh(&tbl->lock);
153 tbl->last_flush = jiffies;
154 return shrunk;
157 static int neigh_del_timer(struct neighbour *n)
159 if (n->nud_state & NUD_IN_TIMER) {
160 if (del_timer(&n->timer)) {
161 neigh_release(n);
162 return 1;
165 return 0;
168 static void pneigh_queue_purge(struct sk_buff_head *list)
170 struct sk_buff *skb;
172 while ((skb = skb_dequeue(list)) != NULL) {
173 dev_put(skb->dev);
174 kfree_skb(skb);
178 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
180 int i;
182 write_lock_bh(&tbl->lock);
184 for (i=0; i<=NEIGH_HASHMASK; i++) {
185 struct neighbour *n, **np;
187 np = &tbl->hash_buckets[i];
188 while ((n = *np) != NULL) {
189 if (dev && n->dev != dev) {
190 np = &n->next;
191 continue;
193 *np = n->next;
194 write_lock(&n->lock);
195 neigh_del_timer(n);
196 n->dead = 1;
198 if (atomic_read(&n->refcnt) != 1) {
199 /* The most unpleasant situation.
200 We must destroy neighbour entry,
201 but someone still uses it.
203 The destroy will be delayed until
204 the last user releases us, but
205 we must kill timers etc. and move
206 it to safe state.
208 n->parms = &tbl->parms;
209 skb_queue_purge(&n->arp_queue);
210 n->output = neigh_blackhole;
211 if (n->nud_state&NUD_VALID)
212 n->nud_state = NUD_NOARP;
213 else
214 n->nud_state = NUD_NONE;
215 NEIGH_PRINTK2("neigh %p is stray.\n", n);
217 write_unlock(&n->lock);
218 neigh_release(n);
222 pneigh_ifdown(tbl, dev);
223 write_unlock_bh(&tbl->lock);
225 del_timer_sync(&tbl->proxy_timer);
226 pneigh_queue_purge(&tbl->proxy_queue);
227 return 0;
230 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
232 struct neighbour *n;
233 unsigned long now = jiffies;
235 if (tbl->entries > tbl->gc_thresh3 ||
236 (tbl->entries > tbl->gc_thresh2 &&
237 now - tbl->last_flush > 5*HZ)) {
238 if (neigh_forced_gc(tbl) == 0 &&
239 tbl->entries > tbl->gc_thresh3)
240 return NULL;
243 n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
244 if (n == NULL)
245 return NULL;
247 memset(n, 0, tbl->entry_size);
249 skb_queue_head_init(&n->arp_queue);
250 n->lock = RW_LOCK_UNLOCKED;
251 n->updated = n->used = now;
252 n->nud_state = NUD_NONE;
253 n->output = neigh_blackhole;
254 n->parms = &tbl->parms;
255 init_timer(&n->timer);
256 n->timer.function = neigh_timer_handler;
257 n->timer.data = (unsigned long)n;
258 tbl->stats.allocs++;
259 neigh_glbl_allocs++;
260 tbl->entries++;
261 n->tbl = tbl;
262 atomic_set(&n->refcnt, 1);
263 n->dead = 1;
264 return n;
267 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
268 struct net_device *dev)
270 struct neighbour *n;
271 u32 hash_val;
272 int key_len = tbl->key_len;
274 hash_val = tbl->hash(pkey, dev);
276 read_lock_bh(&tbl->lock);
277 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
278 if (dev == n->dev &&
279 memcmp(n->primary_key, pkey, key_len) == 0) {
280 neigh_hold(n);
281 break;
284 read_unlock_bh(&tbl->lock);
285 return n;
288 struct neighbour * neigh_create(struct neigh_table *tbl, const void *pkey,
289 struct net_device *dev)
291 struct neighbour *n, *n1;
292 u32 hash_val;
293 int key_len = tbl->key_len;
294 int error;
296 n = neigh_alloc(tbl);
297 if (n == NULL)
298 return ERR_PTR(-ENOBUFS);
300 memcpy(n->primary_key, pkey, key_len);
301 n->dev = dev;
302 dev_hold(dev);
304 /* Protocol specific setup. */
305 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
306 neigh_release(n);
307 return ERR_PTR(error);
310 /* Device specific setup. */
311 if (n->parms && n->parms->neigh_setup &&
312 (error = n->parms->neigh_setup(n)) < 0) {
313 neigh_release(n);
314 return ERR_PTR(error);
317 n->confirmed = jiffies - (n->parms->base_reachable_time<<1);
319 hash_val = tbl->hash(pkey, dev);
321 write_lock_bh(&tbl->lock);
322 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
323 if (dev == n1->dev &&
324 memcmp(n1->primary_key, pkey, key_len) == 0) {
325 neigh_hold(n1);
326 write_unlock_bh(&tbl->lock);
327 neigh_release(n);
328 return n1;
332 n->next = tbl->hash_buckets[hash_val];
333 tbl->hash_buckets[hash_val] = n;
334 n->dead = 0;
335 neigh_hold(n);
336 write_unlock_bh(&tbl->lock);
337 NEIGH_PRINTK2("neigh %p is created.\n", n);
338 return n;
341 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
342 struct net_device *dev, int creat)
344 struct pneigh_entry *n;
345 u32 hash_val;
346 int key_len = tbl->key_len;
348 hash_val = *(u32*)(pkey + key_len - 4);
349 hash_val ^= (hash_val>>16);
350 hash_val ^= hash_val>>8;
351 hash_val ^= hash_val>>4;
352 hash_val &= PNEIGH_HASHMASK;
354 read_lock_bh(&tbl->lock);
356 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
357 if (memcmp(n->key, pkey, key_len) == 0 &&
358 (n->dev == dev || !n->dev)) {
359 read_unlock_bh(&tbl->lock);
360 return n;
363 read_unlock_bh(&tbl->lock);
364 if (!creat)
365 return NULL;
367 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
368 if (n == NULL)
369 return NULL;
371 memcpy(n->key, pkey, key_len);
372 n->dev = dev;
374 if (tbl->pconstructor && tbl->pconstructor(n)) {
375 kfree(n);
376 return NULL;
379 write_lock_bh(&tbl->lock);
380 n->next = tbl->phash_buckets[hash_val];
381 tbl->phash_buckets[hash_val] = n;
382 write_unlock_bh(&tbl->lock);
383 return n;
387 int pneigh_delete(struct neigh_table *tbl, const void *pkey, struct net_device *dev)
389 struct pneigh_entry *n, **np;
390 u32 hash_val;
391 int key_len = tbl->key_len;
393 hash_val = *(u32*)(pkey + key_len - 4);
394 hash_val ^= (hash_val>>16);
395 hash_val ^= hash_val>>8;
396 hash_val ^= hash_val>>4;
397 hash_val &= PNEIGH_HASHMASK;
399 for (np = &tbl->phash_buckets[hash_val]; (n=*np) != NULL; np = &n->next) {
400 if (memcmp(n->key, pkey, key_len) == 0 && n->dev == dev) {
401 write_lock_bh(&tbl->lock);
402 *np = n->next;
403 write_unlock_bh(&tbl->lock);
404 if (tbl->pdestructor)
405 tbl->pdestructor(n);
406 kfree(n);
407 return 0;
410 return -ENOENT;
413 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
415 struct pneigh_entry *n, **np;
416 u32 h;
418 for (h=0; h<=PNEIGH_HASHMASK; h++) {
419 np = &tbl->phash_buckets[h];
420 while ((n=*np) != NULL) {
421 if (n->dev == dev || dev == NULL) {
422 *np = n->next;
423 if (tbl->pdestructor)
424 tbl->pdestructor(n);
425 kfree(n);
426 continue;
428 np = &n->next;
431 return -ENOENT;
436 * neighbour must already be out of the table;
439 void neigh_destroy(struct neighbour *neigh)
441 struct hh_cache *hh;
443 if (!neigh->dead) {
444 printk("Destroying alive neighbour %p from %08lx\n", neigh,
445 *(((unsigned long*)&neigh)-1));
446 return;
449 if (neigh_del_timer(neigh))
450 printk("Impossible event.\n");
452 while ((hh = neigh->hh) != NULL) {
453 neigh->hh = hh->hh_next;
454 hh->hh_next = NULL;
455 write_lock_bh(&hh->hh_lock);
456 hh->hh_output = neigh_blackhole;
457 write_unlock_bh(&hh->hh_lock);
458 if (atomic_dec_and_test(&hh->hh_refcnt))
459 kfree(hh);
462 if (neigh->ops && neigh->ops->destructor)
463 (neigh->ops->destructor)(neigh);
465 skb_queue_purge(&neigh->arp_queue);
467 dev_put(neigh->dev);
469 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
471 neigh_glbl_allocs--;
472 neigh->tbl->entries--;
473 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
476 /* Neighbour state is suspicious;
477 disable fast path.
479 Called with write_locked neigh.
481 static void neigh_suspect(struct neighbour *neigh)
483 struct hh_cache *hh;
485 NEIGH_PRINTK2("neigh %p is suspecteded.\n", neigh);
487 ASSERT_WL(neigh);
489 neigh->output = neigh->ops->output;
491 for (hh = neigh->hh; hh; hh = hh->hh_next)
492 hh->hh_output = neigh->ops->output;
495 /* Neighbour state is OK;
496 enable fast path.
498 Called with write_locked neigh.
500 static void neigh_connect(struct neighbour *neigh)
502 struct hh_cache *hh;
504 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
506 ASSERT_WL(neigh);
508 neigh->output = neigh->ops->connected_output;
510 for (hh = neigh->hh; hh; hh = hh->hh_next)
511 hh->hh_output = neigh->ops->hh_output;
515 Transitions NUD_STALE <-> NUD_REACHABLE do not occur
516 when fast path is built: we have no timers assotiated with
517 these states, we do not have time to check state when sending.
518 neigh_periodic_timer check periodically neigh->confirmed
519 time and moves NUD_REACHABLE -> NUD_STALE.
521 If a routine wants to know TRUE entry state, it calls
522 neigh_sync before checking state.
524 Called with write_locked neigh.
527 static void neigh_sync(struct neighbour *n)
529 unsigned long now = jiffies;
530 u8 state = n->nud_state;
532 ASSERT_WL(n);
533 if (state&(NUD_NOARP|NUD_PERMANENT))
534 return;
535 if (state&NUD_REACHABLE) {
536 if (now - n->confirmed > n->parms->reachable_time) {
537 n->nud_state = NUD_STALE;
538 neigh_suspect(n);
540 } else if (state&NUD_VALID) {
541 if (now - n->confirmed < n->parms->reachable_time) {
542 neigh_del_timer(n);
543 n->nud_state = NUD_REACHABLE;
544 neigh_connect(n);
549 static void SMP_TIMER_NAME(neigh_periodic_timer)(unsigned long arg)
551 struct neigh_table *tbl = (struct neigh_table*)arg;
552 unsigned long now = jiffies;
553 int i;
556 write_lock(&tbl->lock);
559 * periodicly recompute ReachableTime from random function
562 if (now - tbl->last_rand > 300*HZ) {
563 struct neigh_parms *p;
564 tbl->last_rand = now;
565 for (p=&tbl->parms; p; p = p->next)
566 p->reachable_time = neigh_rand_reach_time(p->base_reachable_time);
569 for (i=0; i <= NEIGH_HASHMASK; i++) {
570 struct neighbour *n, **np;
572 np = &tbl->hash_buckets[i];
573 while ((n = *np) != NULL) {
574 unsigned state;
576 write_lock(&n->lock);
578 state = n->nud_state;
579 if (state&(NUD_PERMANENT|NUD_IN_TIMER)) {
580 write_unlock(&n->lock);
581 goto next_elt;
584 if ((long)(n->used - n->confirmed) < 0)
585 n->used = n->confirmed;
587 if (atomic_read(&n->refcnt) == 1 &&
588 (state == NUD_FAILED || now - n->used > n->parms->gc_staletime)) {
589 *np = n->next;
590 n->dead = 1;
591 write_unlock(&n->lock);
592 neigh_release(n);
593 continue;
596 if (n->nud_state&NUD_REACHABLE &&
597 now - n->confirmed > n->parms->reachable_time) {
598 n->nud_state = NUD_STALE;
599 neigh_suspect(n);
601 write_unlock(&n->lock);
603 next_elt:
604 np = &n->next;
608 mod_timer(&tbl->gc_timer, now + tbl->gc_interval);
609 write_unlock(&tbl->lock);
612 #ifdef CONFIG_SMP
613 static void neigh_periodic_timer(unsigned long arg)
615 struct neigh_table *tbl = (struct neigh_table*)arg;
617 tasklet_schedule(&tbl->gc_task);
619 #endif
621 static __inline__ int neigh_max_probes(struct neighbour *n)
623 struct neigh_parms *p = n->parms;
624 return p->ucast_probes + p->app_probes + p->mcast_probes;
628 /* Called when a timer expires for a neighbour entry. */
630 static void neigh_timer_handler(unsigned long arg)
632 unsigned long now = jiffies;
633 struct neighbour *neigh = (struct neighbour*)arg;
634 unsigned state;
635 int notify = 0;
637 write_lock(&neigh->lock);
639 state = neigh->nud_state;
641 if (!(state&NUD_IN_TIMER)) {
642 #ifndef CONFIG_SMP
643 printk("neigh: timer & !nud_in_timer\n");
644 #endif
645 goto out;
648 if ((state&NUD_VALID) &&
649 now - neigh->confirmed < neigh->parms->reachable_time) {
650 neigh->nud_state = NUD_REACHABLE;
651 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
652 neigh_connect(neigh);
653 goto out;
655 if (state == NUD_DELAY) {
656 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
657 neigh->nud_state = NUD_PROBE;
658 atomic_set(&neigh->probes, 0);
661 if (atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
662 struct sk_buff *skb;
664 neigh->nud_state = NUD_FAILED;
665 notify = 1;
666 neigh->tbl->stats.res_failed++;
667 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
669 /* It is very thin place. report_unreachable is very complicated
670 routine. Particularly, it can hit the same neighbour entry!
672 So that, we try to be accurate and avoid dead loop. --ANK
674 while(neigh->nud_state==NUD_FAILED && (skb=__skb_dequeue(&neigh->arp_queue)) != NULL) {
675 write_unlock(&neigh->lock);
676 neigh->ops->error_report(neigh, skb);
677 write_lock(&neigh->lock);
679 skb_queue_purge(&neigh->arp_queue);
680 goto out;
683 neigh->timer.expires = now + neigh->parms->retrans_time;
684 add_timer(&neigh->timer);
685 write_unlock(&neigh->lock);
687 neigh->ops->solicit(neigh, skb_peek(&neigh->arp_queue));
688 atomic_inc(&neigh->probes);
689 return;
691 out:
692 write_unlock(&neigh->lock);
693 #ifdef CONFIG_ARPD
694 if (notify && neigh->parms->app_probes)
695 neigh_app_notify(neigh);
696 #endif
697 neigh_release(neigh);
700 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
702 write_lock_bh(&neigh->lock);
703 if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) {
704 if (!(neigh->nud_state&(NUD_STALE|NUD_INCOMPLETE))) {
705 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
706 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
707 neigh->nud_state = NUD_INCOMPLETE;
708 neigh_hold(neigh);
709 neigh->timer.expires = jiffies + neigh->parms->retrans_time;
710 add_timer(&neigh->timer);
711 write_unlock_bh(&neigh->lock);
712 neigh->ops->solicit(neigh, skb);
713 atomic_inc(&neigh->probes);
714 write_lock_bh(&neigh->lock);
715 } else {
716 neigh->nud_state = NUD_FAILED;
717 write_unlock_bh(&neigh->lock);
719 if (skb)
720 kfree_skb(skb);
721 return 1;
724 if (neigh->nud_state == NUD_INCOMPLETE) {
725 if (skb) {
726 if (skb_queue_len(&neigh->arp_queue) >= neigh->parms->queue_len) {
727 struct sk_buff *buff;
728 buff = neigh->arp_queue.prev;
729 __skb_unlink(buff, &neigh->arp_queue);
730 kfree_skb(buff);
732 __skb_queue_head(&neigh->arp_queue, skb);
734 write_unlock_bh(&neigh->lock);
735 return 1;
737 if (neigh->nud_state == NUD_STALE) {
738 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
739 neigh_hold(neigh);
740 neigh->nud_state = NUD_DELAY;
741 neigh->timer.expires = jiffies + neigh->parms->delay_probe_time;
742 add_timer(&neigh->timer);
745 write_unlock_bh(&neigh->lock);
746 return 0;
749 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
751 struct hh_cache *hh;
752 void (*update)(struct hh_cache*, struct net_device*, unsigned char*) =
753 neigh->dev->header_cache_update;
755 if (update) {
756 for (hh=neigh->hh; hh; hh=hh->hh_next) {
757 write_lock_bh(&hh->hh_lock);
758 update(hh, neigh->dev, neigh->ha);
759 write_unlock_bh(&hh->hh_lock);
766 /* Generic update routine.
767 -- lladdr is new lladdr or NULL, if it is not supplied.
768 -- new is new state.
769 -- override==1 allows to override existing lladdr, if it is different.
770 -- arp==0 means that the change is administrative.
772 Caller MUST hold reference count on the entry.
775 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, int override, int arp)
777 u8 old;
778 int err;
779 int notify = 0;
780 struct net_device *dev = neigh->dev;
782 write_lock_bh(&neigh->lock);
783 old = neigh->nud_state;
785 err = -EPERM;
786 if (arp && (old&(NUD_NOARP|NUD_PERMANENT)))
787 goto out;
789 if (!(new&NUD_VALID)) {
790 neigh_del_timer(neigh);
791 if (old&NUD_CONNECTED)
792 neigh_suspect(neigh);
793 neigh->nud_state = new;
794 err = 0;
795 notify = old&NUD_VALID;
796 goto out;
799 /* Compare new lladdr with cached one */
800 if (dev->addr_len == 0) {
801 /* First case: device needs no address. */
802 lladdr = neigh->ha;
803 } else if (lladdr) {
804 /* The second case: if something is already cached
805 and a new address is proposed:
806 - compare new & old
807 - if they are different, check override flag
809 if (old&NUD_VALID) {
810 if (memcmp(lladdr, neigh->ha, dev->addr_len) == 0)
811 lladdr = neigh->ha;
812 else if (!override)
813 goto out;
815 } else {
816 /* No address is supplied; if we know something,
817 use it, otherwise discard the request.
819 err = -EINVAL;
820 if (!(old&NUD_VALID))
821 goto out;
822 lladdr = neigh->ha;
825 neigh_sync(neigh);
826 old = neigh->nud_state;
827 if (new&NUD_CONNECTED)
828 neigh->confirmed = jiffies;
829 neigh->updated = jiffies;
831 /* If entry was valid and address is not changed,
832 do not change entry state, if new one is STALE.
834 err = 0;
835 if (old&NUD_VALID) {
836 if (lladdr == neigh->ha)
837 if (new == old || (new == NUD_STALE && (old&NUD_CONNECTED)))
838 goto out;
840 neigh_del_timer(neigh);
841 neigh->nud_state = new;
842 if (lladdr != neigh->ha) {
843 memcpy(&neigh->ha, lladdr, dev->addr_len);
844 neigh_update_hhs(neigh);
845 if (!(new&NUD_CONNECTED))
846 neigh->confirmed = jiffies - (neigh->parms->base_reachable_time<<1);
847 #ifdef CONFIG_ARPD
848 notify = 1;
849 #endif
851 if (new == old)
852 goto out;
853 if (new&NUD_CONNECTED)
854 neigh_connect(neigh);
855 else
856 neigh_suspect(neigh);
857 if (!(old&NUD_VALID)) {
858 struct sk_buff *skb;
860 /* Again: avoid dead loop if something went wrong */
862 while (neigh->nud_state&NUD_VALID &&
863 (skb=__skb_dequeue(&neigh->arp_queue)) != NULL) {
864 struct neighbour *n1 = neigh;
865 write_unlock_bh(&neigh->lock);
866 /* On shaper/eql skb->dst->neighbour != neigh :( */
867 if (skb->dst && skb->dst->neighbour)
868 n1 = skb->dst->neighbour;
869 n1->output(skb);
870 write_lock_bh(&neigh->lock);
872 skb_queue_purge(&neigh->arp_queue);
874 out:
875 write_unlock_bh(&neigh->lock);
876 #ifdef CONFIG_ARPD
877 if (notify && neigh->parms->app_probes)
878 neigh_app_notify(neigh);
879 #endif
880 return err;
883 struct neighbour * neigh_event_ns(struct neigh_table *tbl,
884 u8 *lladdr, void *saddr,
885 struct net_device *dev)
887 struct neighbour *neigh;
889 neigh = __neigh_lookup(tbl, saddr, dev, lladdr || !dev->addr_len);
890 if (neigh)
891 neigh_update(neigh, lladdr, NUD_STALE, 1, 1);
892 return neigh;
895 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, u16 protocol)
897 struct hh_cache *hh = NULL;
898 struct net_device *dev = dst->dev;
900 for (hh=n->hh; hh; hh = hh->hh_next)
901 if (hh->hh_type == protocol)
902 break;
904 if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
905 memset(hh, 0, sizeof(struct hh_cache));
906 hh->hh_lock = RW_LOCK_UNLOCKED;
907 hh->hh_type = protocol;
908 atomic_set(&hh->hh_refcnt, 0);
909 hh->hh_next = NULL;
910 if (dev->hard_header_cache(n, hh)) {
911 kfree(hh);
912 hh = NULL;
913 } else {
914 atomic_inc(&hh->hh_refcnt);
915 hh->hh_next = n->hh;
916 n->hh = hh;
917 if (n->nud_state&NUD_CONNECTED)
918 hh->hh_output = n->ops->hh_output;
919 else
920 hh->hh_output = n->ops->output;
923 if (hh) {
924 atomic_inc(&hh->hh_refcnt);
925 dst->hh = hh;
929 /* This function can be used in contexts, where only old dev_queue_xmit
930 worked, f.e. if you want to override normal output path (eql, shaper),
931 but resoltution is not made yet.
934 int neigh_compat_output(struct sk_buff *skb)
936 struct net_device *dev = skb->dev;
938 __skb_pull(skb, skb->nh.raw - skb->data);
940 if (dev->hard_header &&
941 dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, skb->len) < 0 &&
942 dev->rebuild_header(skb))
943 return 0;
945 return dev_queue_xmit(skb);
948 /* Slow and careful. */
950 int neigh_resolve_output(struct sk_buff *skb)
952 struct dst_entry *dst = skb->dst;
953 struct neighbour *neigh;
955 if (!dst || !(neigh = dst->neighbour))
956 goto discard;
958 __skb_pull(skb, skb->nh.raw - skb->data);
960 if (neigh_event_send(neigh, skb) == 0) {
961 int err;
962 struct net_device *dev = neigh->dev;
963 if (dev->hard_header_cache && dst->hh == NULL) {
964 write_lock_bh(&neigh->lock);
965 if (dst->hh == NULL)
966 neigh_hh_init(neigh, dst, dst->ops->protocol);
967 err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
968 write_unlock_bh(&neigh->lock);
969 } else {
970 read_lock_bh(&neigh->lock);
971 err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
972 read_unlock_bh(&neigh->lock);
974 if (err >= 0)
975 return neigh->ops->queue_xmit(skb);
976 kfree_skb(skb);
977 return -EINVAL;
979 return 0;
981 discard:
982 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n", dst, dst ? dst->neighbour : NULL);
983 kfree_skb(skb);
984 return -EINVAL;
987 /* As fast as possible without hh cache */
989 int neigh_connected_output(struct sk_buff *skb)
991 int err;
992 struct dst_entry *dst = skb->dst;
993 struct neighbour *neigh = dst->neighbour;
994 struct net_device *dev = neigh->dev;
996 __skb_pull(skb, skb->nh.raw - skb->data);
998 read_lock_bh(&neigh->lock);
999 err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
1000 read_unlock_bh(&neigh->lock);
1001 if (err >= 0)
1002 return neigh->ops->queue_xmit(skb);
1003 kfree_skb(skb);
1004 return -EINVAL;
1007 static void neigh_proxy_process(unsigned long arg)
1009 struct neigh_table *tbl = (struct neigh_table *)arg;
1010 long sched_next = 0;
1011 unsigned long now = jiffies;
1012 struct sk_buff *skb;
1014 spin_lock(&tbl->proxy_queue.lock);
1016 skb = tbl->proxy_queue.next;
1018 while (skb != (struct sk_buff*)&tbl->proxy_queue) {
1019 struct sk_buff *back = skb;
1020 long tdif = back->stamp.tv_usec - now;
1022 skb = skb->next;
1023 if (tdif <= 0) {
1024 struct net_device *dev = back->dev;
1025 __skb_unlink(back, &tbl->proxy_queue);
1026 if (tbl->proxy_redo && netif_running(dev))
1027 tbl->proxy_redo(back);
1028 else
1029 kfree_skb(back);
1031 dev_put(dev);
1032 } else if (!sched_next || tdif < sched_next)
1033 sched_next = tdif;
1035 del_timer(&tbl->proxy_timer);
1036 if (sched_next)
1037 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1038 spin_unlock(&tbl->proxy_queue.lock);
1041 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1042 struct sk_buff *skb)
1044 unsigned long now = jiffies;
1045 long sched_next = net_random()%p->proxy_delay;
1047 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1048 kfree_skb(skb);
1049 return;
1051 skb->stamp.tv_sec = 0;
1052 skb->stamp.tv_usec = now + sched_next;
1054 spin_lock(&tbl->proxy_queue.lock);
1055 if (del_timer(&tbl->proxy_timer)) {
1056 long tval = tbl->proxy_timer.expires - now;
1057 if (tval < sched_next)
1058 sched_next = tval;
1060 dst_release(skb->dst);
1061 skb->dst = NULL;
1062 dev_hold(skb->dev);
1063 __skb_queue_tail(&tbl->proxy_queue, skb);
1064 mod_timer(&tbl->proxy_timer, now + sched_next);
1065 spin_unlock(&tbl->proxy_queue.lock);
1069 struct neigh_parms *neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl)
1071 struct neigh_parms *p;
1072 p = kmalloc(sizeof(*p), GFP_KERNEL);
1073 if (p) {
1074 memcpy(p, &tbl->parms, sizeof(*p));
1075 p->tbl = tbl;
1076 p->reachable_time = neigh_rand_reach_time(p->base_reachable_time);
1077 if (dev && dev->neigh_setup) {
1078 if (dev->neigh_setup(dev, p)) {
1079 kfree(p);
1080 return NULL;
1083 write_lock_bh(&tbl->lock);
1084 p->next = tbl->parms.next;
1085 tbl->parms.next = p;
1086 write_unlock_bh(&tbl->lock);
1088 return p;
1091 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1093 struct neigh_parms **p;
1095 if (parms == NULL || parms == &tbl->parms)
1096 return;
1097 write_lock_bh(&tbl->lock);
1098 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1099 if (*p == parms) {
1100 *p = parms->next;
1101 write_unlock_bh(&tbl->lock);
1102 #ifdef CONFIG_SYSCTL
1103 neigh_sysctl_unregister(parms);
1104 #endif
1105 kfree(parms);
1106 return;
1109 write_unlock_bh(&tbl->lock);
1110 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1114 void neigh_table_init(struct neigh_table *tbl)
1116 unsigned long now = jiffies;
1118 tbl->parms.reachable_time = neigh_rand_reach_time(tbl->parms.base_reachable_time);
1120 if (tbl->kmem_cachep == NULL)
1121 tbl->kmem_cachep = kmem_cache_create(tbl->id,
1122 (tbl->entry_size+15)&~15,
1123 0, SLAB_HWCACHE_ALIGN,
1124 NULL, NULL);
1126 #ifdef CONFIG_SMP
1127 tasklet_init(&tbl->gc_task, SMP_TIMER_NAME(neigh_periodic_timer), (unsigned long)tbl);
1128 #endif
1129 init_timer(&tbl->gc_timer);
1130 tbl->lock = RW_LOCK_UNLOCKED;
1131 tbl->gc_timer.data = (unsigned long)tbl;
1132 tbl->gc_timer.function = neigh_periodic_timer;
1133 tbl->gc_timer.expires = now + tbl->gc_interval + tbl->parms.reachable_time;
1134 add_timer(&tbl->gc_timer);
1136 init_timer(&tbl->proxy_timer);
1137 tbl->proxy_timer.data = (unsigned long)tbl;
1138 tbl->proxy_timer.function = neigh_proxy_process;
1139 skb_queue_head_init(&tbl->proxy_queue);
1141 tbl->last_flush = now;
1142 tbl->last_rand = now + tbl->parms.reachable_time*20;
1143 write_lock(&neigh_tbl_lock);
1144 tbl->next = neigh_tables;
1145 neigh_tables = tbl;
1146 write_unlock(&neigh_tbl_lock);
1149 int neigh_table_clear(struct neigh_table *tbl)
1151 struct neigh_table **tp;
1153 /* It is not clean... Fix it to unload IPv6 module safely */
1154 del_timer_sync(&tbl->gc_timer);
1155 tasklet_kill(&tbl->gc_task);
1156 del_timer_sync(&tbl->proxy_timer);
1157 pneigh_queue_purge(&tbl->proxy_queue);
1158 neigh_ifdown(tbl, NULL);
1159 if (tbl->entries)
1160 printk(KERN_CRIT "neighbour leakage\n");
1161 write_lock(&neigh_tbl_lock);
1162 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1163 if (*tp == tbl) {
1164 *tp = tbl->next;
1165 break;
1168 write_unlock(&neigh_tbl_lock);
1169 #ifdef CONFIG_SYSCTL
1170 neigh_sysctl_unregister(&tbl->parms);
1171 #endif
1172 return 0;
1175 #ifdef CONFIG_RTNETLINK
1178 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1180 struct ndmsg *ndm = NLMSG_DATA(nlh);
1181 struct rtattr **nda = arg;
1182 struct neigh_table *tbl;
1183 struct net_device *dev = NULL;
1184 int err = 0;
1186 if (ndm->ndm_ifindex) {
1187 if ((dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1188 return -ENODEV;
1191 read_lock(&neigh_tbl_lock);
1192 for (tbl=neigh_tables; tbl; tbl = tbl->next) {
1193 struct neighbour *n;
1195 if (tbl->family != ndm->ndm_family)
1196 continue;
1197 read_unlock(&neigh_tbl_lock);
1199 err = -EINVAL;
1200 if (nda[NDA_DST-1] == NULL ||
1201 nda[NDA_DST-1]->rta_len != RTA_LENGTH(tbl->key_len))
1202 goto out;
1204 if (ndm->ndm_flags&NTF_PROXY) {
1205 err = pneigh_delete(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1206 goto out;
1209 if (dev == NULL)
1210 return -EINVAL;
1212 n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1213 if (n) {
1214 err = neigh_update(n, NULL, NUD_FAILED, 1, 0);
1215 neigh_release(n);
1217 out:
1218 if (dev)
1219 dev_put(dev);
1220 return err;
1222 read_unlock(&neigh_tbl_lock);
1224 if (dev)
1225 dev_put(dev);
1227 return -EADDRNOTAVAIL;
1230 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1232 struct ndmsg *ndm = NLMSG_DATA(nlh);
1233 struct rtattr **nda = arg;
1234 struct neigh_table *tbl;
1235 struct net_device *dev = NULL;
1237 if (ndm->ndm_ifindex) {
1238 if ((dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1239 return -ENODEV;
1242 read_lock(&neigh_tbl_lock);
1243 for (tbl=neigh_tables; tbl; tbl = tbl->next) {
1244 int err = 0;
1245 struct neighbour *n;
1247 if (tbl->family != ndm->ndm_family)
1248 continue;
1249 read_unlock(&neigh_tbl_lock);
1251 err = -EINVAL;
1252 if (nda[NDA_DST-1] == NULL ||
1253 nda[NDA_DST-1]->rta_len != RTA_LENGTH(tbl->key_len))
1254 goto out;
1255 if (ndm->ndm_flags&NTF_PROXY) {
1256 err = -ENOBUFS;
1257 if (pneigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev, 1))
1258 err = 0;
1259 goto out;
1261 if (dev == NULL)
1262 return -EINVAL;
1263 err = -EINVAL;
1264 if (nda[NDA_LLADDR-1] != NULL &&
1265 nda[NDA_LLADDR-1]->rta_len != RTA_LENGTH(dev->addr_len))
1266 goto out;
1267 err = 0;
1268 n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1269 if (n) {
1270 if (nlh->nlmsg_flags&NLM_F_EXCL)
1271 err = -EEXIST;
1272 } else if (!(nlh->nlmsg_flags&NLM_F_CREATE))
1273 err = -ENOENT;
1274 else {
1275 n = __neigh_lookup_errno(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1276 if (IS_ERR(n)) {
1277 err = PTR_ERR(n);
1278 n = NULL;
1281 if (err == 0) {
1282 err = neigh_update(n, nda[NDA_LLADDR-1] ? RTA_DATA(nda[NDA_LLADDR-1]) : NULL,
1283 ndm->ndm_state,
1284 nlh->nlmsg_flags&NLM_F_REPLACE, 0);
1286 if (n)
1287 neigh_release(n);
1288 out:
1289 if (dev)
1290 dev_put(dev);
1291 return err;
1293 read_unlock(&neigh_tbl_lock);
1295 if (dev)
1296 dev_put(dev);
1297 return -EADDRNOTAVAIL;
1301 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1302 u32 pid, u32 seq, int event)
1304 unsigned long now = jiffies;
1305 struct ndmsg *ndm;
1306 struct nlmsghdr *nlh;
1307 unsigned char *b = skb->tail;
1308 struct nda_cacheinfo ci;
1309 int locked = 0;
1311 nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*ndm));
1312 ndm = NLMSG_DATA(nlh);
1313 ndm->ndm_family = n->ops->family;
1314 ndm->ndm_flags = n->flags;
1315 ndm->ndm_type = n->type;
1316 ndm->ndm_ifindex = n->dev->ifindex;
1317 RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1318 read_lock_bh(&n->lock);
1319 locked=1;
1320 ndm->ndm_state = n->nud_state;
1321 if (n->nud_state&NUD_VALID)
1322 RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1323 ci.ndm_used = now - n->used;
1324 ci.ndm_confirmed = now - n->confirmed;
1325 ci.ndm_updated = now - n->updated;
1326 ci.ndm_refcnt = atomic_read(&n->refcnt) - 1;
1327 read_unlock_bh(&n->lock);
1328 locked=0;
1329 RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1330 nlh->nlmsg_len = skb->tail - b;
1331 return skb->len;
1333 nlmsg_failure:
1334 rtattr_failure:
1335 if (locked)
1336 read_unlock_bh(&n->lock);
1337 skb_trim(skb, b - skb->data);
1338 return -1;
1342 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, struct netlink_callback *cb)
1344 struct neighbour *n;
1345 int h, s_h;
1346 int idx, s_idx;
1348 s_h = cb->args[1];
1349 s_idx = idx = cb->args[2];
1350 for (h=0; h <= NEIGH_HASHMASK; h++) {
1351 if (h < s_h) continue;
1352 if (h > s_h)
1353 s_idx = 0;
1354 read_lock_bh(&tbl->lock);
1355 for (n = tbl->hash_buckets[h], idx = 0; n;
1356 n = n->next, idx++) {
1357 if (idx < s_idx)
1358 continue;
1359 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1360 cb->nlh->nlmsg_seq, RTM_NEWNEIGH) <= 0) {
1361 read_unlock_bh(&tbl->lock);
1362 cb->args[1] = h;
1363 cb->args[2] = idx;
1364 return -1;
1367 read_unlock_bh(&tbl->lock);
1370 cb->args[1] = h;
1371 cb->args[2] = idx;
1372 return skb->len;
1375 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1377 int t;
1378 int s_t;
1379 struct neigh_table *tbl;
1380 int family = ((struct rtgenmsg*)NLMSG_DATA(cb->nlh))->rtgen_family;
1382 s_t = cb->args[0];
1384 read_lock(&neigh_tbl_lock);
1385 for (tbl=neigh_tables, t=0; tbl; tbl = tbl->next, t++) {
1386 if (t < s_t) continue;
1387 if (family && tbl->family != family)
1388 continue;
1389 if (t > s_t)
1390 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1391 if (neigh_dump_table(tbl, skb, cb) < 0)
1392 break;
1394 read_unlock(&neigh_tbl_lock);
1396 cb->args[0] = t;
1398 return skb->len;
1401 #ifdef CONFIG_ARPD
1402 void neigh_app_ns(struct neighbour *n)
1404 struct sk_buff *skb;
1405 struct nlmsghdr *nlh;
1406 int size = NLMSG_SPACE(sizeof(struct ndmsg)+256);
1408 skb = alloc_skb(size, GFP_ATOMIC);
1409 if (!skb)
1410 return;
1412 if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH) < 0) {
1413 kfree_skb(skb);
1414 return;
1416 nlh = (struct nlmsghdr*)skb->data;
1417 nlh->nlmsg_flags = NLM_F_REQUEST;
1418 NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH;
1419 netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC);
1422 static void neigh_app_notify(struct neighbour *n)
1424 struct sk_buff *skb;
1425 struct nlmsghdr *nlh;
1426 int size = NLMSG_SPACE(sizeof(struct ndmsg)+256);
1428 skb = alloc_skb(size, GFP_ATOMIC);
1429 if (!skb)
1430 return;
1432 if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH) < 0) {
1433 kfree_skb(skb);
1434 return;
1436 nlh = (struct nlmsghdr*)skb->data;
1437 NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH;
1438 netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC);
1443 #endif
1446 #endif
1448 #ifdef CONFIG_SYSCTL
1450 struct neigh_sysctl_table
1452 struct ctl_table_header *sysctl_header;
1453 ctl_table neigh_vars[17];
1454 ctl_table neigh_dev[2];
1455 ctl_table neigh_neigh_dir[2];
1456 ctl_table neigh_proto_dir[2];
1457 ctl_table neigh_root_dir[2];
1458 } neigh_sysctl_template = {
1459 NULL,
1460 {{NET_NEIGH_MCAST_SOLICIT, "mcast_solicit",
1461 NULL, sizeof(int), 0644, NULL,
1462 &proc_dointvec},
1463 {NET_NEIGH_UCAST_SOLICIT, "ucast_solicit",
1464 NULL, sizeof(int), 0644, NULL,
1465 &proc_dointvec},
1466 {NET_NEIGH_APP_SOLICIT, "app_solicit",
1467 NULL, sizeof(int), 0644, NULL,
1468 &proc_dointvec},
1469 {NET_NEIGH_RETRANS_TIME, "retrans_time",
1470 NULL, sizeof(int), 0644, NULL,
1471 &proc_dointvec},
1472 {NET_NEIGH_REACHABLE_TIME, "base_reachable_time",
1473 NULL, sizeof(int), 0644, NULL,
1474 &proc_dointvec_jiffies},
1475 {NET_NEIGH_DELAY_PROBE_TIME, "delay_first_probe_time",
1476 NULL, sizeof(int), 0644, NULL,
1477 &proc_dointvec_jiffies},
1478 {NET_NEIGH_GC_STALE_TIME, "gc_stale_time",
1479 NULL, sizeof(int), 0644, NULL,
1480 &proc_dointvec_jiffies},
1481 {NET_NEIGH_UNRES_QLEN, "unres_qlen",
1482 NULL, sizeof(int), 0644, NULL,
1483 &proc_dointvec},
1484 {NET_NEIGH_PROXY_QLEN, "proxy_qlen",
1485 NULL, sizeof(int), 0644, NULL,
1486 &proc_dointvec},
1487 {NET_NEIGH_ANYCAST_DELAY, "anycast_delay",
1488 NULL, sizeof(int), 0644, NULL,
1489 &proc_dointvec},
1490 {NET_NEIGH_PROXY_DELAY, "proxy_delay",
1491 NULL, sizeof(int), 0644, NULL,
1492 &proc_dointvec},
1493 {NET_NEIGH_LOCKTIME, "locktime",
1494 NULL, sizeof(int), 0644, NULL,
1495 &proc_dointvec},
1496 {NET_NEIGH_GC_INTERVAL, "gc_interval",
1497 NULL, sizeof(int), 0644, NULL,
1498 &proc_dointvec_jiffies},
1499 {NET_NEIGH_GC_THRESH1, "gc_thresh1",
1500 NULL, sizeof(int), 0644, NULL,
1501 &proc_dointvec},
1502 {NET_NEIGH_GC_THRESH2, "gc_thresh2",
1503 NULL, sizeof(int), 0644, NULL,
1504 &proc_dointvec},
1505 {NET_NEIGH_GC_THRESH3, "gc_thresh3",
1506 NULL, sizeof(int), 0644, NULL,
1507 &proc_dointvec},
1508 {0}},
1510 {{NET_PROTO_CONF_DEFAULT, "default", NULL, 0, 0555, NULL},{0}},
1511 {{0, "neigh", NULL, 0, 0555, NULL},{0}},
1512 {{0, NULL, NULL, 0, 0555, NULL},{0}},
1513 {{CTL_NET, "net", NULL, 0, 0555, NULL},{0}}
1516 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
1517 int p_id, int pdev_id, char *p_name)
1519 struct neigh_sysctl_table *t;
1521 t = kmalloc(sizeof(*t), GFP_KERNEL);
1522 if (t == NULL)
1523 return -ENOBUFS;
1524 memcpy(t, &neigh_sysctl_template, sizeof(*t));
1525 t->neigh_vars[0].data = &p->mcast_probes;
1526 t->neigh_vars[1].data = &p->ucast_probes;
1527 t->neigh_vars[2].data = &p->app_probes;
1528 t->neigh_vars[3].data = &p->retrans_time;
1529 t->neigh_vars[4].data = &p->base_reachable_time;
1530 t->neigh_vars[5].data = &p->delay_probe_time;
1531 t->neigh_vars[6].data = &p->gc_staletime;
1532 t->neigh_vars[7].data = &p->queue_len;
1533 t->neigh_vars[8].data = &p->proxy_qlen;
1534 t->neigh_vars[9].data = &p->anycast_delay;
1535 t->neigh_vars[10].data = &p->proxy_delay;
1536 t->neigh_vars[11].data = &p->locktime;
1537 if (dev) {
1538 t->neigh_dev[0].procname = dev->name;
1539 t->neigh_dev[0].ctl_name = dev->ifindex;
1540 memset(&t->neigh_vars[12], 0, sizeof(ctl_table));
1541 } else {
1542 t->neigh_vars[12].data = (int*)(p+1);
1543 t->neigh_vars[13].data = (int*)(p+1) + 1;
1544 t->neigh_vars[14].data = (int*)(p+1) + 2;
1545 t->neigh_vars[15].data = (int*)(p+1) + 3;
1547 t->neigh_neigh_dir[0].ctl_name = pdev_id;
1549 t->neigh_proto_dir[0].procname = p_name;
1550 t->neigh_proto_dir[0].ctl_name = p_id;
1552 t->neigh_dev[0].child = t->neigh_vars;
1553 t->neigh_neigh_dir[0].child = t->neigh_dev;
1554 t->neigh_proto_dir[0].child = t->neigh_neigh_dir;
1555 t->neigh_root_dir[0].child = t->neigh_proto_dir;
1557 t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
1558 if (t->sysctl_header == NULL) {
1559 kfree(t);
1560 return -ENOBUFS;
1562 p->sysctl_table = t;
1563 return 0;
1566 void neigh_sysctl_unregister(struct neigh_parms *p)
1568 if (p->sysctl_table) {
1569 struct neigh_sysctl_table *t = p->sysctl_table;
1570 p->sysctl_table = NULL;
1571 unregister_sysctl_table(t->sysctl_header);
1572 kfree(t);
1576 #endif /* CONFIG_SYSCTL */