2 * Generic address resolution entity
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
17 #include <linux/config.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/socket.h>
21 #include <linux/sched.h>
22 #include <linux/netdevice.h>
24 #include <linux/sysctl.h>
26 #include <net/neighbour.h>
29 #include <linux/rtnetlink.h>
33 #define NEIGH_PRINTK(x...) printk(x)
34 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
35 #define NEIGH_PRINTK0 NEIGH_PRINTK
36 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
37 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
41 #define NEIGH_PRINTK1 NEIGH_PRINTK
45 #define NEIGH_PRINTK2 NEIGH_PRINTK
48 static void neigh_timer_handler(unsigned long arg
);
50 static void neigh_app_notify(struct neighbour
*n
);
52 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
);
54 static int neigh_glbl_allocs
;
55 static struct neigh_table
*neigh_tables
;
57 #if defined(__i386__) && defined(CONFIG_SMP)
58 #define ASSERT_WL(n) if ((int)((n)->lock.lock) > 0) { printk("WL assertion failed at " __FILE__ "(%d):" __FUNCTION__ "\n", __LINE__); }
60 #define ASSERT_WL(n) do { } while(0)
64 Neighbour hash table buckets are protected with rwlock tbl->lock.
66 - All the scans/updates to hash buckets MUST be made under this lock.
67 - NOTHING clever should be made under this lock: no callbacks
68 to protocol backends, no attempts to send something to network.
69 It will result in deadlocks, if backend/driver wants to use neighbour
71 - If the entry requires some non-trivial actions, increase
72 its reference count and release table lock.
74 Neighbour entries are protected:
75 - with reference count.
76 - with rwlock neigh->lock
78 Reference count prevents destruction.
80 neigh->lock mainly serializes ll address data and its validity state.
81 However, the same lock is used to protect another entry fields:
85 Again, nothing clever shall be made under neigh->lock,
86 the most complicated procedure, which we allow is dev->hard_header.
87 It is supposed, that dev->hard_header is simplistic and does
88 not make callbacks to neighbour tables.
90 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
91 list of neighbour tables. This list is used only in process context,
94 static rwlock_t neigh_tbl_lock
= RW_LOCK_UNLOCKED
;
96 static int neigh_blackhole(struct sk_buff
*skb
)
103 * It is random distribution in the interval (1/2)*base...(3/2)*base.
104 * It corresponds to default IPv6 settings and is not overridable,
105 * because it is really reasonbale choice.
108 unsigned long neigh_rand_reach_time(unsigned long base
)
110 return (net_random() % base
) + (base
>>1);
114 static int neigh_forced_gc(struct neigh_table
*tbl
)
119 for (i
=0; i
<=NEIGH_HASHMASK
; i
++) {
120 struct neighbour
*n
, **np
;
122 np
= &tbl
->hash_buckets
[i
];
123 write_lock_bh(&tbl
->lock
);
124 while ((n
= *np
) != NULL
) {
125 /* Neighbour record may be discarded if:
126 - nobody refers to it.
127 - it is not premanent
128 - (NEW and probably wrong)
129 INCOMPLETE entries are kept at least for
130 n->parms->retrans_time, otherwise we could
131 flood network with resolution requests.
132 It is not clear, what is better table overflow
135 write_lock(&n
->lock
);
136 if (atomic_read(&n
->refcnt
) == 1 &&
137 !(n
->nud_state
&NUD_PERMANENT
) &&
138 (n
->nud_state
!= NUD_INCOMPLETE
||
139 jiffies
- n
->used
> n
->parms
->retrans_time
)) {
143 write_unlock(&n
->lock
);
147 write_unlock(&n
->lock
);
150 write_unlock_bh(&tbl
->lock
);
153 tbl
->last_flush
= jiffies
;
157 static int neigh_del_timer(struct neighbour
*n
)
159 if (n
->nud_state
& NUD_IN_TIMER
) {
160 if (del_timer(&n
->timer
)) {
168 static void pneigh_queue_purge(struct sk_buff_head
*list
)
172 while ((skb
= skb_dequeue(list
)) != NULL
) {
178 int neigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
182 write_lock_bh(&tbl
->lock
);
184 for (i
=0; i
<=NEIGH_HASHMASK
; i
++) {
185 struct neighbour
*n
, **np
;
187 np
= &tbl
->hash_buckets
[i
];
188 while ((n
= *np
) != NULL
) {
189 if (dev
&& n
->dev
!= dev
) {
194 write_lock(&n
->lock
);
198 if (atomic_read(&n
->refcnt
) != 1) {
199 /* The most unpleasant situation.
200 We must destroy neighbour entry,
201 but someone still uses it.
203 The destroy will be delayed until
204 the last user releases us, but
205 we must kill timers etc. and move
208 n
->parms
= &tbl
->parms
;
209 skb_queue_purge(&n
->arp_queue
);
210 n
->output
= neigh_blackhole
;
211 if (n
->nud_state
&NUD_VALID
)
212 n
->nud_state
= NUD_NOARP
;
214 n
->nud_state
= NUD_NONE
;
215 NEIGH_PRINTK2("neigh %p is stray.\n", n
);
217 write_unlock(&n
->lock
);
222 pneigh_ifdown(tbl
, dev
);
223 write_unlock_bh(&tbl
->lock
);
225 del_timer_sync(&tbl
->proxy_timer
);
226 pneigh_queue_purge(&tbl
->proxy_queue
);
230 static struct neighbour
*neigh_alloc(struct neigh_table
*tbl
)
233 unsigned long now
= jiffies
;
235 if (tbl
->entries
> tbl
->gc_thresh3
||
236 (tbl
->entries
> tbl
->gc_thresh2
&&
237 now
- tbl
->last_flush
> 5*HZ
)) {
238 if (neigh_forced_gc(tbl
) == 0 &&
239 tbl
->entries
> tbl
->gc_thresh3
)
243 n
= kmem_cache_alloc(tbl
->kmem_cachep
, SLAB_ATOMIC
);
247 memset(n
, 0, tbl
->entry_size
);
249 skb_queue_head_init(&n
->arp_queue
);
250 n
->lock
= RW_LOCK_UNLOCKED
;
251 n
->updated
= n
->used
= now
;
252 n
->nud_state
= NUD_NONE
;
253 n
->output
= neigh_blackhole
;
254 n
->parms
= &tbl
->parms
;
255 init_timer(&n
->timer
);
256 n
->timer
.function
= neigh_timer_handler
;
257 n
->timer
.data
= (unsigned long)n
;
262 atomic_set(&n
->refcnt
, 1);
267 struct neighbour
*neigh_lookup(struct neigh_table
*tbl
, const void *pkey
,
268 struct net_device
*dev
)
272 int key_len
= tbl
->key_len
;
274 hash_val
= tbl
->hash(pkey
, dev
);
276 read_lock_bh(&tbl
->lock
);
277 for (n
= tbl
->hash_buckets
[hash_val
]; n
; n
= n
->next
) {
279 memcmp(n
->primary_key
, pkey
, key_len
) == 0) {
284 read_unlock_bh(&tbl
->lock
);
288 struct neighbour
* neigh_create(struct neigh_table
*tbl
, const void *pkey
,
289 struct net_device
*dev
)
291 struct neighbour
*n
, *n1
;
293 int key_len
= tbl
->key_len
;
296 n
= neigh_alloc(tbl
);
298 return ERR_PTR(-ENOBUFS
);
300 memcpy(n
->primary_key
, pkey
, key_len
);
304 /* Protocol specific setup. */
305 if (tbl
->constructor
&& (error
= tbl
->constructor(n
)) < 0) {
307 return ERR_PTR(error
);
310 /* Device specific setup. */
311 if (n
->parms
&& n
->parms
->neigh_setup
&&
312 (error
= n
->parms
->neigh_setup(n
)) < 0) {
314 return ERR_PTR(error
);
317 n
->confirmed
= jiffies
- (n
->parms
->base_reachable_time
<<1);
319 hash_val
= tbl
->hash(pkey
, dev
);
321 write_lock_bh(&tbl
->lock
);
322 for (n1
= tbl
->hash_buckets
[hash_val
]; n1
; n1
= n1
->next
) {
323 if (dev
== n1
->dev
&&
324 memcmp(n1
->primary_key
, pkey
, key_len
) == 0) {
326 write_unlock_bh(&tbl
->lock
);
332 n
->next
= tbl
->hash_buckets
[hash_val
];
333 tbl
->hash_buckets
[hash_val
] = n
;
336 write_unlock_bh(&tbl
->lock
);
337 NEIGH_PRINTK2("neigh %p is created.\n", n
);
341 struct pneigh_entry
* pneigh_lookup(struct neigh_table
*tbl
, const void *pkey
,
342 struct net_device
*dev
, int creat
)
344 struct pneigh_entry
*n
;
346 int key_len
= tbl
->key_len
;
348 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
349 hash_val
^= (hash_val
>>16);
350 hash_val
^= hash_val
>>8;
351 hash_val
^= hash_val
>>4;
352 hash_val
&= PNEIGH_HASHMASK
;
354 read_lock_bh(&tbl
->lock
);
356 for (n
= tbl
->phash_buckets
[hash_val
]; n
; n
= n
->next
) {
357 if (memcmp(n
->key
, pkey
, key_len
) == 0 &&
358 (n
->dev
== dev
|| !n
->dev
)) {
359 read_unlock_bh(&tbl
->lock
);
363 read_unlock_bh(&tbl
->lock
);
367 n
= kmalloc(sizeof(*n
) + key_len
, GFP_KERNEL
);
371 memcpy(n
->key
, pkey
, key_len
);
374 if (tbl
->pconstructor
&& tbl
->pconstructor(n
)) {
379 write_lock_bh(&tbl
->lock
);
380 n
->next
= tbl
->phash_buckets
[hash_val
];
381 tbl
->phash_buckets
[hash_val
] = n
;
382 write_unlock_bh(&tbl
->lock
);
387 int pneigh_delete(struct neigh_table
*tbl
, const void *pkey
, struct net_device
*dev
)
389 struct pneigh_entry
*n
, **np
;
391 int key_len
= tbl
->key_len
;
393 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
394 hash_val
^= (hash_val
>>16);
395 hash_val
^= hash_val
>>8;
396 hash_val
^= hash_val
>>4;
397 hash_val
&= PNEIGH_HASHMASK
;
399 for (np
= &tbl
->phash_buckets
[hash_val
]; (n
=*np
) != NULL
; np
= &n
->next
) {
400 if (memcmp(n
->key
, pkey
, key_len
) == 0 && n
->dev
== dev
) {
401 write_lock_bh(&tbl
->lock
);
403 write_unlock_bh(&tbl
->lock
);
404 if (tbl
->pdestructor
)
413 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
415 struct pneigh_entry
*n
, **np
;
418 for (h
=0; h
<=PNEIGH_HASHMASK
; h
++) {
419 np
= &tbl
->phash_buckets
[h
];
420 while ((n
=*np
) != NULL
) {
421 if (n
->dev
== dev
|| dev
== NULL
) {
423 if (tbl
->pdestructor
)
436 * neighbour must already be out of the table;
439 void neigh_destroy(struct neighbour
*neigh
)
444 printk("Destroying alive neighbour %p from %08lx\n", neigh
,
445 *(((unsigned long*)&neigh
)-1));
449 if (neigh_del_timer(neigh
))
450 printk("Impossible event.\n");
452 while ((hh
= neigh
->hh
) != NULL
) {
453 neigh
->hh
= hh
->hh_next
;
455 write_lock_bh(&hh
->hh_lock
);
456 hh
->hh_output
= neigh_blackhole
;
457 write_unlock_bh(&hh
->hh_lock
);
458 if (atomic_dec_and_test(&hh
->hh_refcnt
))
462 if (neigh
->ops
&& neigh
->ops
->destructor
)
463 (neigh
->ops
->destructor
)(neigh
);
465 skb_queue_purge(&neigh
->arp_queue
);
469 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh
);
472 neigh
->tbl
->entries
--;
473 kmem_cache_free(neigh
->tbl
->kmem_cachep
, neigh
);
476 /* Neighbour state is suspicious;
479 Called with write_locked neigh.
481 static void neigh_suspect(struct neighbour
*neigh
)
485 NEIGH_PRINTK2("neigh %p is suspecteded.\n", neigh
);
489 neigh
->output
= neigh
->ops
->output
;
491 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
)
492 hh
->hh_output
= neigh
->ops
->output
;
495 /* Neighbour state is OK;
498 Called with write_locked neigh.
500 static void neigh_connect(struct neighbour
*neigh
)
504 NEIGH_PRINTK2("neigh %p is connected.\n", neigh
);
508 neigh
->output
= neigh
->ops
->connected_output
;
510 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
)
511 hh
->hh_output
= neigh
->ops
->hh_output
;
515 Transitions NUD_STALE <-> NUD_REACHABLE do not occur
516 when fast path is built: we have no timers assotiated with
517 these states, we do not have time to check state when sending.
518 neigh_periodic_timer check periodically neigh->confirmed
519 time and moves NUD_REACHABLE -> NUD_STALE.
521 If a routine wants to know TRUE entry state, it calls
522 neigh_sync before checking state.
524 Called with write_locked neigh.
527 static void neigh_sync(struct neighbour
*n
)
529 unsigned long now
= jiffies
;
530 u8 state
= n
->nud_state
;
533 if (state
&(NUD_NOARP
|NUD_PERMANENT
))
535 if (state
&NUD_REACHABLE
) {
536 if (now
- n
->confirmed
> n
->parms
->reachable_time
) {
537 n
->nud_state
= NUD_STALE
;
540 } else if (state
&NUD_VALID
) {
541 if (now
- n
->confirmed
< n
->parms
->reachable_time
) {
543 n
->nud_state
= NUD_REACHABLE
;
549 static void SMP_TIMER_NAME(neigh_periodic_timer
)(unsigned long arg
)
551 struct neigh_table
*tbl
= (struct neigh_table
*)arg
;
552 unsigned long now
= jiffies
;
556 write_lock(&tbl
->lock
);
559 * periodicly recompute ReachableTime from random function
562 if (now
- tbl
->last_rand
> 300*HZ
) {
563 struct neigh_parms
*p
;
564 tbl
->last_rand
= now
;
565 for (p
=&tbl
->parms
; p
; p
= p
->next
)
566 p
->reachable_time
= neigh_rand_reach_time(p
->base_reachable_time
);
569 for (i
=0; i
<= NEIGH_HASHMASK
; i
++) {
570 struct neighbour
*n
, **np
;
572 np
= &tbl
->hash_buckets
[i
];
573 while ((n
= *np
) != NULL
) {
576 write_lock(&n
->lock
);
578 state
= n
->nud_state
;
579 if (state
&(NUD_PERMANENT
|NUD_IN_TIMER
)) {
580 write_unlock(&n
->lock
);
584 if ((long)(n
->used
- n
->confirmed
) < 0)
585 n
->used
= n
->confirmed
;
587 if (atomic_read(&n
->refcnt
) == 1 &&
588 (state
== NUD_FAILED
|| now
- n
->used
> n
->parms
->gc_staletime
)) {
591 write_unlock(&n
->lock
);
596 if (n
->nud_state
&NUD_REACHABLE
&&
597 now
- n
->confirmed
> n
->parms
->reachable_time
) {
598 n
->nud_state
= NUD_STALE
;
601 write_unlock(&n
->lock
);
608 mod_timer(&tbl
->gc_timer
, now
+ tbl
->gc_interval
);
609 write_unlock(&tbl
->lock
);
613 static void neigh_periodic_timer(unsigned long arg
)
615 struct neigh_table
*tbl
= (struct neigh_table
*)arg
;
617 tasklet_schedule(&tbl
->gc_task
);
621 static __inline__
int neigh_max_probes(struct neighbour
*n
)
623 struct neigh_parms
*p
= n
->parms
;
624 return p
->ucast_probes
+ p
->app_probes
+ p
->mcast_probes
;
628 /* Called when a timer expires for a neighbour entry. */
630 static void neigh_timer_handler(unsigned long arg
)
632 unsigned long now
= jiffies
;
633 struct neighbour
*neigh
= (struct neighbour
*)arg
;
637 write_lock(&neigh
->lock
);
639 state
= neigh
->nud_state
;
641 if (!(state
&NUD_IN_TIMER
)) {
643 printk("neigh: timer & !nud_in_timer\n");
648 if ((state
&NUD_VALID
) &&
649 now
- neigh
->confirmed
< neigh
->parms
->reachable_time
) {
650 neigh
->nud_state
= NUD_REACHABLE
;
651 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh
);
652 neigh_connect(neigh
);
655 if (state
== NUD_DELAY
) {
656 NEIGH_PRINTK2("neigh %p is probed.\n", neigh
);
657 neigh
->nud_state
= NUD_PROBE
;
658 atomic_set(&neigh
->probes
, 0);
661 if (atomic_read(&neigh
->probes
) >= neigh_max_probes(neigh
)) {
664 neigh
->nud_state
= NUD_FAILED
;
666 neigh
->tbl
->stats
.res_failed
++;
667 NEIGH_PRINTK2("neigh %p is failed.\n", neigh
);
669 /* It is very thin place. report_unreachable is very complicated
670 routine. Particularly, it can hit the same neighbour entry!
672 So that, we try to be accurate and avoid dead loop. --ANK
674 while(neigh
->nud_state
==NUD_FAILED
&& (skb
=__skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
675 write_unlock(&neigh
->lock
);
676 neigh
->ops
->error_report(neigh
, skb
);
677 write_lock(&neigh
->lock
);
679 skb_queue_purge(&neigh
->arp_queue
);
683 neigh
->timer
.expires
= now
+ neigh
->parms
->retrans_time
;
684 add_timer(&neigh
->timer
);
685 write_unlock(&neigh
->lock
);
687 neigh
->ops
->solicit(neigh
, skb_peek(&neigh
->arp_queue
));
688 atomic_inc(&neigh
->probes
);
692 write_unlock(&neigh
->lock
);
694 if (notify
&& neigh
->parms
->app_probes
)
695 neigh_app_notify(neigh
);
697 neigh_release(neigh
);
700 int __neigh_event_send(struct neighbour
*neigh
, struct sk_buff
*skb
)
702 write_lock_bh(&neigh
->lock
);
703 if (!(neigh
->nud_state
&(NUD_CONNECTED
|NUD_DELAY
|NUD_PROBE
))) {
704 if (!(neigh
->nud_state
&(NUD_STALE
|NUD_INCOMPLETE
))) {
705 if (neigh
->parms
->mcast_probes
+ neigh
->parms
->app_probes
) {
706 atomic_set(&neigh
->probes
, neigh
->parms
->ucast_probes
);
707 neigh
->nud_state
= NUD_INCOMPLETE
;
709 neigh
->timer
.expires
= jiffies
+ neigh
->parms
->retrans_time
;
710 add_timer(&neigh
->timer
);
711 write_unlock_bh(&neigh
->lock
);
712 neigh
->ops
->solicit(neigh
, skb
);
713 atomic_inc(&neigh
->probes
);
714 write_lock_bh(&neigh
->lock
);
716 neigh
->nud_state
= NUD_FAILED
;
717 write_unlock_bh(&neigh
->lock
);
724 if (neigh
->nud_state
== NUD_INCOMPLETE
) {
726 if (skb_queue_len(&neigh
->arp_queue
) >= neigh
->parms
->queue_len
) {
727 struct sk_buff
*buff
;
728 buff
= neigh
->arp_queue
.prev
;
729 __skb_unlink(buff
, &neigh
->arp_queue
);
732 __skb_queue_head(&neigh
->arp_queue
, skb
);
734 write_unlock_bh(&neigh
->lock
);
737 if (neigh
->nud_state
== NUD_STALE
) {
738 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh
);
740 neigh
->nud_state
= NUD_DELAY
;
741 neigh
->timer
.expires
= jiffies
+ neigh
->parms
->delay_probe_time
;
742 add_timer(&neigh
->timer
);
745 write_unlock_bh(&neigh
->lock
);
749 static __inline__
void neigh_update_hhs(struct neighbour
*neigh
)
752 void (*update
)(struct hh_cache
*, struct net_device
*, unsigned char*) =
753 neigh
->dev
->header_cache_update
;
756 for (hh
=neigh
->hh
; hh
; hh
=hh
->hh_next
) {
757 write_lock_bh(&hh
->hh_lock
);
758 update(hh
, neigh
->dev
, neigh
->ha
);
759 write_unlock_bh(&hh
->hh_lock
);
766 /* Generic update routine.
767 -- lladdr is new lladdr or NULL, if it is not supplied.
769 -- override==1 allows to override existing lladdr, if it is different.
770 -- arp==0 means that the change is administrative.
772 Caller MUST hold reference count on the entry.
775 int neigh_update(struct neighbour
*neigh
, const u8
*lladdr
, u8
new, int override
, int arp
)
780 struct net_device
*dev
= neigh
->dev
;
782 write_lock_bh(&neigh
->lock
);
783 old
= neigh
->nud_state
;
786 if (arp
&& (old
&(NUD_NOARP
|NUD_PERMANENT
)))
789 if (!(new&NUD_VALID
)) {
790 neigh_del_timer(neigh
);
791 if (old
&NUD_CONNECTED
)
792 neigh_suspect(neigh
);
793 neigh
->nud_state
= new;
795 notify
= old
&NUD_VALID
;
799 /* Compare new lladdr with cached one */
800 if (dev
->addr_len
== 0) {
801 /* First case: device needs no address. */
804 /* The second case: if something is already cached
805 and a new address is proposed:
807 - if they are different, check override flag
810 if (memcmp(lladdr
, neigh
->ha
, dev
->addr_len
) == 0)
816 /* No address is supplied; if we know something,
817 use it, otherwise discard the request.
820 if (!(old
&NUD_VALID
))
826 old
= neigh
->nud_state
;
827 if (new&NUD_CONNECTED
)
828 neigh
->confirmed
= jiffies
;
829 neigh
->updated
= jiffies
;
831 /* If entry was valid and address is not changed,
832 do not change entry state, if new one is STALE.
836 if (lladdr
== neigh
->ha
)
837 if (new == old
|| (new == NUD_STALE
&& (old
&NUD_CONNECTED
)))
840 neigh_del_timer(neigh
);
841 neigh
->nud_state
= new;
842 if (lladdr
!= neigh
->ha
) {
843 memcpy(&neigh
->ha
, lladdr
, dev
->addr_len
);
844 neigh_update_hhs(neigh
);
845 if (!(new&NUD_CONNECTED
))
846 neigh
->confirmed
= jiffies
- (neigh
->parms
->base_reachable_time
<<1);
853 if (new&NUD_CONNECTED
)
854 neigh_connect(neigh
);
856 neigh_suspect(neigh
);
857 if (!(old
&NUD_VALID
)) {
860 /* Again: avoid dead loop if something went wrong */
862 while (neigh
->nud_state
&NUD_VALID
&&
863 (skb
=__skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
864 struct neighbour
*n1
= neigh
;
865 write_unlock_bh(&neigh
->lock
);
866 /* On shaper/eql skb->dst->neighbour != neigh :( */
867 if (skb
->dst
&& skb
->dst
->neighbour
)
868 n1
= skb
->dst
->neighbour
;
870 write_lock_bh(&neigh
->lock
);
872 skb_queue_purge(&neigh
->arp_queue
);
875 write_unlock_bh(&neigh
->lock
);
877 if (notify
&& neigh
->parms
->app_probes
)
878 neigh_app_notify(neigh
);
883 struct neighbour
* neigh_event_ns(struct neigh_table
*tbl
,
884 u8
*lladdr
, void *saddr
,
885 struct net_device
*dev
)
887 struct neighbour
*neigh
;
889 neigh
= __neigh_lookup(tbl
, saddr
, dev
, lladdr
|| !dev
->addr_len
);
891 neigh_update(neigh
, lladdr
, NUD_STALE
, 1, 1);
895 static void neigh_hh_init(struct neighbour
*n
, struct dst_entry
*dst
, u16 protocol
)
897 struct hh_cache
*hh
= NULL
;
898 struct net_device
*dev
= dst
->dev
;
900 for (hh
=n
->hh
; hh
; hh
= hh
->hh_next
)
901 if (hh
->hh_type
== protocol
)
904 if (!hh
&& (hh
= kmalloc(sizeof(*hh
), GFP_ATOMIC
)) != NULL
) {
905 memset(hh
, 0, sizeof(struct hh_cache
));
906 hh
->hh_lock
= RW_LOCK_UNLOCKED
;
907 hh
->hh_type
= protocol
;
908 atomic_set(&hh
->hh_refcnt
, 0);
910 if (dev
->hard_header_cache(n
, hh
)) {
914 atomic_inc(&hh
->hh_refcnt
);
917 if (n
->nud_state
&NUD_CONNECTED
)
918 hh
->hh_output
= n
->ops
->hh_output
;
920 hh
->hh_output
= n
->ops
->output
;
924 atomic_inc(&hh
->hh_refcnt
);
929 /* This function can be used in contexts, where only old dev_queue_xmit
930 worked, f.e. if you want to override normal output path (eql, shaper),
931 but resoltution is not made yet.
934 int neigh_compat_output(struct sk_buff
*skb
)
936 struct net_device
*dev
= skb
->dev
;
938 __skb_pull(skb
, skb
->nh
.raw
- skb
->data
);
940 if (dev
->hard_header
&&
941 dev
->hard_header(skb
, dev
, ntohs(skb
->protocol
), NULL
, NULL
, skb
->len
) < 0 &&
942 dev
->rebuild_header(skb
))
945 return dev_queue_xmit(skb
);
948 /* Slow and careful. */
950 int neigh_resolve_output(struct sk_buff
*skb
)
952 struct dst_entry
*dst
= skb
->dst
;
953 struct neighbour
*neigh
;
955 if (!dst
|| !(neigh
= dst
->neighbour
))
958 __skb_pull(skb
, skb
->nh
.raw
- skb
->data
);
960 if (neigh_event_send(neigh
, skb
) == 0) {
962 struct net_device
*dev
= neigh
->dev
;
963 if (dev
->hard_header_cache
&& dst
->hh
== NULL
) {
964 write_lock_bh(&neigh
->lock
);
966 neigh_hh_init(neigh
, dst
, dst
->ops
->protocol
);
967 err
= dev
->hard_header(skb
, dev
, ntohs(skb
->protocol
), neigh
->ha
, NULL
, skb
->len
);
968 write_unlock_bh(&neigh
->lock
);
970 read_lock_bh(&neigh
->lock
);
971 err
= dev
->hard_header(skb
, dev
, ntohs(skb
->protocol
), neigh
->ha
, NULL
, skb
->len
);
972 read_unlock_bh(&neigh
->lock
);
975 return neigh
->ops
->queue_xmit(skb
);
982 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n", dst
, dst
? dst
->neighbour
: NULL
);
987 /* As fast as possible without hh cache */
989 int neigh_connected_output(struct sk_buff
*skb
)
992 struct dst_entry
*dst
= skb
->dst
;
993 struct neighbour
*neigh
= dst
->neighbour
;
994 struct net_device
*dev
= neigh
->dev
;
996 __skb_pull(skb
, skb
->nh
.raw
- skb
->data
);
998 read_lock_bh(&neigh
->lock
);
999 err
= dev
->hard_header(skb
, dev
, ntohs(skb
->protocol
), neigh
->ha
, NULL
, skb
->len
);
1000 read_unlock_bh(&neigh
->lock
);
1002 return neigh
->ops
->queue_xmit(skb
);
1007 static void neigh_proxy_process(unsigned long arg
)
1009 struct neigh_table
*tbl
= (struct neigh_table
*)arg
;
1010 long sched_next
= 0;
1011 unsigned long now
= jiffies
;
1012 struct sk_buff
*skb
;
1014 spin_lock(&tbl
->proxy_queue
.lock
);
1016 skb
= tbl
->proxy_queue
.next
;
1018 while (skb
!= (struct sk_buff
*)&tbl
->proxy_queue
) {
1019 struct sk_buff
*back
= skb
;
1020 long tdif
= back
->stamp
.tv_usec
- now
;
1024 struct net_device
*dev
= back
->dev
;
1025 __skb_unlink(back
, &tbl
->proxy_queue
);
1026 if (tbl
->proxy_redo
&& netif_running(dev
))
1027 tbl
->proxy_redo(back
);
1032 } else if (!sched_next
|| tdif
< sched_next
)
1035 del_timer(&tbl
->proxy_timer
);
1037 mod_timer(&tbl
->proxy_timer
, jiffies
+ sched_next
);
1038 spin_unlock(&tbl
->proxy_queue
.lock
);
1041 void pneigh_enqueue(struct neigh_table
*tbl
, struct neigh_parms
*p
,
1042 struct sk_buff
*skb
)
1044 unsigned long now
= jiffies
;
1045 long sched_next
= net_random()%p
->proxy_delay
;
1047 if (tbl
->proxy_queue
.qlen
> p
->proxy_qlen
) {
1051 skb
->stamp
.tv_sec
= 0;
1052 skb
->stamp
.tv_usec
= now
+ sched_next
;
1054 spin_lock(&tbl
->proxy_queue
.lock
);
1055 if (del_timer(&tbl
->proxy_timer
)) {
1056 long tval
= tbl
->proxy_timer
.expires
- now
;
1057 if (tval
< sched_next
)
1060 dst_release(skb
->dst
);
1063 __skb_queue_tail(&tbl
->proxy_queue
, skb
);
1064 mod_timer(&tbl
->proxy_timer
, now
+ sched_next
);
1065 spin_unlock(&tbl
->proxy_queue
.lock
);
1069 struct neigh_parms
*neigh_parms_alloc(struct net_device
*dev
, struct neigh_table
*tbl
)
1071 struct neigh_parms
*p
;
1072 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
1074 memcpy(p
, &tbl
->parms
, sizeof(*p
));
1076 p
->reachable_time
= neigh_rand_reach_time(p
->base_reachable_time
);
1077 if (dev
&& dev
->neigh_setup
) {
1078 if (dev
->neigh_setup(dev
, p
)) {
1083 write_lock_bh(&tbl
->lock
);
1084 p
->next
= tbl
->parms
.next
;
1085 tbl
->parms
.next
= p
;
1086 write_unlock_bh(&tbl
->lock
);
1091 void neigh_parms_release(struct neigh_table
*tbl
, struct neigh_parms
*parms
)
1093 struct neigh_parms
**p
;
1095 if (parms
== NULL
|| parms
== &tbl
->parms
)
1097 write_lock_bh(&tbl
->lock
);
1098 for (p
= &tbl
->parms
.next
; *p
; p
= &(*p
)->next
) {
1101 write_unlock_bh(&tbl
->lock
);
1102 #ifdef CONFIG_SYSCTL
1103 neigh_sysctl_unregister(parms
);
1109 write_unlock_bh(&tbl
->lock
);
1110 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1114 void neigh_table_init(struct neigh_table
*tbl
)
1116 unsigned long now
= jiffies
;
1118 tbl
->parms
.reachable_time
= neigh_rand_reach_time(tbl
->parms
.base_reachable_time
);
1120 if (tbl
->kmem_cachep
== NULL
)
1121 tbl
->kmem_cachep
= kmem_cache_create(tbl
->id
,
1122 (tbl
->entry_size
+15)&~15,
1123 0, SLAB_HWCACHE_ALIGN
,
1127 tasklet_init(&tbl
->gc_task
, SMP_TIMER_NAME(neigh_periodic_timer
), (unsigned long)tbl
);
1129 init_timer(&tbl
->gc_timer
);
1130 tbl
->lock
= RW_LOCK_UNLOCKED
;
1131 tbl
->gc_timer
.data
= (unsigned long)tbl
;
1132 tbl
->gc_timer
.function
= neigh_periodic_timer
;
1133 tbl
->gc_timer
.expires
= now
+ tbl
->gc_interval
+ tbl
->parms
.reachable_time
;
1134 add_timer(&tbl
->gc_timer
);
1136 init_timer(&tbl
->proxy_timer
);
1137 tbl
->proxy_timer
.data
= (unsigned long)tbl
;
1138 tbl
->proxy_timer
.function
= neigh_proxy_process
;
1139 skb_queue_head_init(&tbl
->proxy_queue
);
1141 tbl
->last_flush
= now
;
1142 tbl
->last_rand
= now
+ tbl
->parms
.reachable_time
*20;
1143 write_lock(&neigh_tbl_lock
);
1144 tbl
->next
= neigh_tables
;
1146 write_unlock(&neigh_tbl_lock
);
1149 int neigh_table_clear(struct neigh_table
*tbl
)
1151 struct neigh_table
**tp
;
1153 /* It is not clean... Fix it to unload IPv6 module safely */
1154 del_timer_sync(&tbl
->gc_timer
);
1155 tasklet_kill(&tbl
->gc_task
);
1156 del_timer_sync(&tbl
->proxy_timer
);
1157 pneigh_queue_purge(&tbl
->proxy_queue
);
1158 neigh_ifdown(tbl
, NULL
);
1160 printk(KERN_CRIT
"neighbour leakage\n");
1161 write_lock(&neigh_tbl_lock
);
1162 for (tp
= &neigh_tables
; *tp
; tp
= &(*tp
)->next
) {
1168 write_unlock(&neigh_tbl_lock
);
1169 #ifdef CONFIG_SYSCTL
1170 neigh_sysctl_unregister(&tbl
->parms
);
1175 #ifdef CONFIG_RTNETLINK
1178 int neigh_delete(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1180 struct ndmsg
*ndm
= NLMSG_DATA(nlh
);
1181 struct rtattr
**nda
= arg
;
1182 struct neigh_table
*tbl
;
1183 struct net_device
*dev
= NULL
;
1186 if (ndm
->ndm_ifindex
) {
1187 if ((dev
= dev_get_by_index(ndm
->ndm_ifindex
)) == NULL
)
1191 read_lock(&neigh_tbl_lock
);
1192 for (tbl
=neigh_tables
; tbl
; tbl
= tbl
->next
) {
1193 struct neighbour
*n
;
1195 if (tbl
->family
!= ndm
->ndm_family
)
1197 read_unlock(&neigh_tbl_lock
);
1200 if (nda
[NDA_DST
-1] == NULL
||
1201 nda
[NDA_DST
-1]->rta_len
!= RTA_LENGTH(tbl
->key_len
))
1204 if (ndm
->ndm_flags
&NTF_PROXY
) {
1205 err
= pneigh_delete(tbl
, RTA_DATA(nda
[NDA_DST
-1]), dev
);
1212 n
= neigh_lookup(tbl
, RTA_DATA(nda
[NDA_DST
-1]), dev
);
1214 err
= neigh_update(n
, NULL
, NUD_FAILED
, 1, 0);
1222 read_unlock(&neigh_tbl_lock
);
1227 return -EADDRNOTAVAIL
;
1230 int neigh_add(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1232 struct ndmsg
*ndm
= NLMSG_DATA(nlh
);
1233 struct rtattr
**nda
= arg
;
1234 struct neigh_table
*tbl
;
1235 struct net_device
*dev
= NULL
;
1237 if (ndm
->ndm_ifindex
) {
1238 if ((dev
= dev_get_by_index(ndm
->ndm_ifindex
)) == NULL
)
1242 read_lock(&neigh_tbl_lock
);
1243 for (tbl
=neigh_tables
; tbl
; tbl
= tbl
->next
) {
1245 struct neighbour
*n
;
1247 if (tbl
->family
!= ndm
->ndm_family
)
1249 read_unlock(&neigh_tbl_lock
);
1252 if (nda
[NDA_DST
-1] == NULL
||
1253 nda
[NDA_DST
-1]->rta_len
!= RTA_LENGTH(tbl
->key_len
))
1255 if (ndm
->ndm_flags
&NTF_PROXY
) {
1257 if (pneigh_lookup(tbl
, RTA_DATA(nda
[NDA_DST
-1]), dev
, 1))
1264 if (nda
[NDA_LLADDR
-1] != NULL
&&
1265 nda
[NDA_LLADDR
-1]->rta_len
!= RTA_LENGTH(dev
->addr_len
))
1268 n
= neigh_lookup(tbl
, RTA_DATA(nda
[NDA_DST
-1]), dev
);
1270 if (nlh
->nlmsg_flags
&NLM_F_EXCL
)
1272 } else if (!(nlh
->nlmsg_flags
&NLM_F_CREATE
))
1275 n
= __neigh_lookup_errno(tbl
, RTA_DATA(nda
[NDA_DST
-1]), dev
);
1282 err
= neigh_update(n
, nda
[NDA_LLADDR
-1] ? RTA_DATA(nda
[NDA_LLADDR
-1]) : NULL
,
1284 nlh
->nlmsg_flags
&NLM_F_REPLACE
, 0);
1293 read_unlock(&neigh_tbl_lock
);
1297 return -EADDRNOTAVAIL
;
1301 static int neigh_fill_info(struct sk_buff
*skb
, struct neighbour
*n
,
1302 u32 pid
, u32 seq
, int event
)
1304 unsigned long now
= jiffies
;
1306 struct nlmsghdr
*nlh
;
1307 unsigned char *b
= skb
->tail
;
1308 struct nda_cacheinfo ci
;
1311 nlh
= NLMSG_PUT(skb
, pid
, seq
, event
, sizeof(*ndm
));
1312 ndm
= NLMSG_DATA(nlh
);
1313 ndm
->ndm_family
= n
->ops
->family
;
1314 ndm
->ndm_flags
= n
->flags
;
1315 ndm
->ndm_type
= n
->type
;
1316 ndm
->ndm_ifindex
= n
->dev
->ifindex
;
1317 RTA_PUT(skb
, NDA_DST
, n
->tbl
->key_len
, n
->primary_key
);
1318 read_lock_bh(&n
->lock
);
1320 ndm
->ndm_state
= n
->nud_state
;
1321 if (n
->nud_state
&NUD_VALID
)
1322 RTA_PUT(skb
, NDA_LLADDR
, n
->dev
->addr_len
, n
->ha
);
1323 ci
.ndm_used
= now
- n
->used
;
1324 ci
.ndm_confirmed
= now
- n
->confirmed
;
1325 ci
.ndm_updated
= now
- n
->updated
;
1326 ci
.ndm_refcnt
= atomic_read(&n
->refcnt
) - 1;
1327 read_unlock_bh(&n
->lock
);
1329 RTA_PUT(skb
, NDA_CACHEINFO
, sizeof(ci
), &ci
);
1330 nlh
->nlmsg_len
= skb
->tail
- b
;
1336 read_unlock_bh(&n
->lock
);
1337 skb_trim(skb
, b
- skb
->data
);
1342 static int neigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
, struct netlink_callback
*cb
)
1344 struct neighbour
*n
;
1349 s_idx
= idx
= cb
->args
[2];
1350 for (h
=0; h
<= NEIGH_HASHMASK
; h
++) {
1351 if (h
< s_h
) continue;
1354 read_lock_bh(&tbl
->lock
);
1355 for (n
= tbl
->hash_buckets
[h
], idx
= 0; n
;
1356 n
= n
->next
, idx
++) {
1359 if (neigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).pid
,
1360 cb
->nlh
->nlmsg_seq
, RTM_NEWNEIGH
) <= 0) {
1361 read_unlock_bh(&tbl
->lock
);
1367 read_unlock_bh(&tbl
->lock
);
1375 int neigh_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1379 struct neigh_table
*tbl
;
1380 int family
= ((struct rtgenmsg
*)NLMSG_DATA(cb
->nlh
))->rtgen_family
;
1384 read_lock(&neigh_tbl_lock
);
1385 for (tbl
=neigh_tables
, t
=0; tbl
; tbl
= tbl
->next
, t
++) {
1386 if (t
< s_t
) continue;
1387 if (family
&& tbl
->family
!= family
)
1390 memset(&cb
->args
[1], 0, sizeof(cb
->args
)-sizeof(cb
->args
[0]));
1391 if (neigh_dump_table(tbl
, skb
, cb
) < 0)
1394 read_unlock(&neigh_tbl_lock
);
1402 void neigh_app_ns(struct neighbour
*n
)
1404 struct sk_buff
*skb
;
1405 struct nlmsghdr
*nlh
;
1406 int size
= NLMSG_SPACE(sizeof(struct ndmsg
)+256);
1408 skb
= alloc_skb(size
, GFP_ATOMIC
);
1412 if (neigh_fill_info(skb
, n
, 0, 0, RTM_GETNEIGH
) < 0) {
1416 nlh
= (struct nlmsghdr
*)skb
->data
;
1417 nlh
->nlmsg_flags
= NLM_F_REQUEST
;
1418 NETLINK_CB(skb
).dst_groups
= RTMGRP_NEIGH
;
1419 netlink_broadcast(rtnl
, skb
, 0, RTMGRP_NEIGH
, GFP_ATOMIC
);
1422 static void neigh_app_notify(struct neighbour
*n
)
1424 struct sk_buff
*skb
;
1425 struct nlmsghdr
*nlh
;
1426 int size
= NLMSG_SPACE(sizeof(struct ndmsg
)+256);
1428 skb
= alloc_skb(size
, GFP_ATOMIC
);
1432 if (neigh_fill_info(skb
, n
, 0, 0, RTM_NEWNEIGH
) < 0) {
1436 nlh
= (struct nlmsghdr
*)skb
->data
;
1437 NETLINK_CB(skb
).dst_groups
= RTMGRP_NEIGH
;
1438 netlink_broadcast(rtnl
, skb
, 0, RTMGRP_NEIGH
, GFP_ATOMIC
);
1448 #ifdef CONFIG_SYSCTL
1450 struct neigh_sysctl_table
1452 struct ctl_table_header
*sysctl_header
;
1453 ctl_table neigh_vars
[17];
1454 ctl_table neigh_dev
[2];
1455 ctl_table neigh_neigh_dir
[2];
1456 ctl_table neigh_proto_dir
[2];
1457 ctl_table neigh_root_dir
[2];
1458 } neigh_sysctl_template
= {
1460 {{NET_NEIGH_MCAST_SOLICIT
, "mcast_solicit",
1461 NULL
, sizeof(int), 0644, NULL
,
1463 {NET_NEIGH_UCAST_SOLICIT
, "ucast_solicit",
1464 NULL
, sizeof(int), 0644, NULL
,
1466 {NET_NEIGH_APP_SOLICIT
, "app_solicit",
1467 NULL
, sizeof(int), 0644, NULL
,
1469 {NET_NEIGH_RETRANS_TIME
, "retrans_time",
1470 NULL
, sizeof(int), 0644, NULL
,
1472 {NET_NEIGH_REACHABLE_TIME
, "base_reachable_time",
1473 NULL
, sizeof(int), 0644, NULL
,
1474 &proc_dointvec_jiffies
},
1475 {NET_NEIGH_DELAY_PROBE_TIME
, "delay_first_probe_time",
1476 NULL
, sizeof(int), 0644, NULL
,
1477 &proc_dointvec_jiffies
},
1478 {NET_NEIGH_GC_STALE_TIME
, "gc_stale_time",
1479 NULL
, sizeof(int), 0644, NULL
,
1480 &proc_dointvec_jiffies
},
1481 {NET_NEIGH_UNRES_QLEN
, "unres_qlen",
1482 NULL
, sizeof(int), 0644, NULL
,
1484 {NET_NEIGH_PROXY_QLEN
, "proxy_qlen",
1485 NULL
, sizeof(int), 0644, NULL
,
1487 {NET_NEIGH_ANYCAST_DELAY
, "anycast_delay",
1488 NULL
, sizeof(int), 0644, NULL
,
1490 {NET_NEIGH_PROXY_DELAY
, "proxy_delay",
1491 NULL
, sizeof(int), 0644, NULL
,
1493 {NET_NEIGH_LOCKTIME
, "locktime",
1494 NULL
, sizeof(int), 0644, NULL
,
1496 {NET_NEIGH_GC_INTERVAL
, "gc_interval",
1497 NULL
, sizeof(int), 0644, NULL
,
1498 &proc_dointvec_jiffies
},
1499 {NET_NEIGH_GC_THRESH1
, "gc_thresh1",
1500 NULL
, sizeof(int), 0644, NULL
,
1502 {NET_NEIGH_GC_THRESH2
, "gc_thresh2",
1503 NULL
, sizeof(int), 0644, NULL
,
1505 {NET_NEIGH_GC_THRESH3
, "gc_thresh3",
1506 NULL
, sizeof(int), 0644, NULL
,
1510 {{NET_PROTO_CONF_DEFAULT
, "default", NULL
, 0, 0555, NULL
},{0}},
1511 {{0, "neigh", NULL
, 0, 0555, NULL
},{0}},
1512 {{0, NULL
, NULL
, 0, 0555, NULL
},{0}},
1513 {{CTL_NET
, "net", NULL
, 0, 0555, NULL
},{0}}
1516 int neigh_sysctl_register(struct net_device
*dev
, struct neigh_parms
*p
,
1517 int p_id
, int pdev_id
, char *p_name
)
1519 struct neigh_sysctl_table
*t
;
1521 t
= kmalloc(sizeof(*t
), GFP_KERNEL
);
1524 memcpy(t
, &neigh_sysctl_template
, sizeof(*t
));
1525 t
->neigh_vars
[0].data
= &p
->mcast_probes
;
1526 t
->neigh_vars
[1].data
= &p
->ucast_probes
;
1527 t
->neigh_vars
[2].data
= &p
->app_probes
;
1528 t
->neigh_vars
[3].data
= &p
->retrans_time
;
1529 t
->neigh_vars
[4].data
= &p
->base_reachable_time
;
1530 t
->neigh_vars
[5].data
= &p
->delay_probe_time
;
1531 t
->neigh_vars
[6].data
= &p
->gc_staletime
;
1532 t
->neigh_vars
[7].data
= &p
->queue_len
;
1533 t
->neigh_vars
[8].data
= &p
->proxy_qlen
;
1534 t
->neigh_vars
[9].data
= &p
->anycast_delay
;
1535 t
->neigh_vars
[10].data
= &p
->proxy_delay
;
1536 t
->neigh_vars
[11].data
= &p
->locktime
;
1538 t
->neigh_dev
[0].procname
= dev
->name
;
1539 t
->neigh_dev
[0].ctl_name
= dev
->ifindex
;
1540 memset(&t
->neigh_vars
[12], 0, sizeof(ctl_table
));
1542 t
->neigh_vars
[12].data
= (int*)(p
+1);
1543 t
->neigh_vars
[13].data
= (int*)(p
+1) + 1;
1544 t
->neigh_vars
[14].data
= (int*)(p
+1) + 2;
1545 t
->neigh_vars
[15].data
= (int*)(p
+1) + 3;
1547 t
->neigh_neigh_dir
[0].ctl_name
= pdev_id
;
1549 t
->neigh_proto_dir
[0].procname
= p_name
;
1550 t
->neigh_proto_dir
[0].ctl_name
= p_id
;
1552 t
->neigh_dev
[0].child
= t
->neigh_vars
;
1553 t
->neigh_neigh_dir
[0].child
= t
->neigh_dev
;
1554 t
->neigh_proto_dir
[0].child
= t
->neigh_neigh_dir
;
1555 t
->neigh_root_dir
[0].child
= t
->neigh_proto_dir
;
1557 t
->sysctl_header
= register_sysctl_table(t
->neigh_root_dir
, 0);
1558 if (t
->sysctl_header
== NULL
) {
1562 p
->sysctl_table
= t
;
1566 void neigh_sysctl_unregister(struct neigh_parms
*p
)
1568 if (p
->sysctl_table
) {
1569 struct neigh_sysctl_table
*t
= p
->sysctl_table
;
1570 p
->sysctl_table
= NULL
;
1571 unregister_sysctl_table(t
->sysctl_header
);
1576 #endif /* CONFIG_SYSCTL */