2 * Connection oriented routing
3 * Copyright (C) 2007-2019 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/delay.h>
25 * Broadcast data format:
27 * is 0, may be increased if the protocol changes
29 * is 0, must be increased if a future version of the protocol is incompatible
30 * to the current version
33 * Data format of the announce packet "data" field:
34 *{command [2] commandlength [2] commanddata [commandlength]}[...]
39 /* NEIGHCMD_VERSION: version[2] minversion[2] */
40 #define NEIGHCMD_VERSION 1
42 /* NEIGHCMD_ADDR: addrlen[2] addr[addrlen] */
43 #define NEIGHCMD_ADDR 2
46 struct neighbor_discdata
{
48 unsigned long jiffies_created
;
52 struct net_device
*dev
;
53 char mac
[MAX_ADDR_LEN
];
67 static atomic_t packets_in_workqueue
= ATOMIC_INIT(0);
69 static DEFINE_MUTEX(announce_rcv_lock
);
70 static DEFINE_SPINLOCK(announce_snd_lock
);
71 static DEFINE_SPINLOCK(neighbor_list_lock
);
73 static DEFINE_MUTEX(neigh_up_lock
);
75 static DEFINE_SPINLOCK(local_addr_lock
);
76 static char *local_addr
;
77 static __u32 local_addrlen
;
78 static __be32 local_addr_sessionid
;
80 static LIST_HEAD(nb_dd_list
); /* protected by announce_rcv_lock */
81 static __u32 num_nb_dd
= 0;
82 static struct kmem_cache
*nb_dd_slab
;
84 static LIST_HEAD(nb_list
);
85 static struct kmem_cache
*nb_slab
;
86 static atomic_t num_neighs
;
88 static LIST_HEAD(announce_out_list
);
90 static struct notifier_block netdev_notify
;
91 __u8 netdev_notify_registered
= 0;
94 void neighbor_free(struct kref
*ref
)
96 struct neighbor
*nb
= container_of(ref
, struct neighbor
, ref
);
97 /* printk(KERN_ERR "neighbor free"); */
98 BUG_ON(nb
->nb_list
.next
!= LIST_POISON1
);
99 BUG_ON(nb
->nb_list
.prev
!= LIST_POISON2
);
107 kref_put(&(nb
->queue
->ref
), free_qos
);
109 kmem_cache_free(nb_slab
, nb
);
110 atomic_dec(&num_neighs
);
113 static void stall_timer(struct work_struct
*work
);
115 static struct neighbor
*alloc_neighbor(gfp_t allocflags
)
120 if (atomic_inc_return(&num_neighs
) >= MAX_NEIGHBORS
) {
121 atomic_dec(&num_neighs
);
125 nb
= kmem_cache_alloc(nb_slab
, allocflags
);
126 if (unlikely(nb
== 0))
129 memset(nb
, 0, sizeof(struct neighbor
));
131 kref_init(&(nb
->ref
));
132 atomic_set(&(nb
->sessionid_rcv_needed
), 1);
133 atomic_set(&(nb
->sessionid_snd_needed
), 1);
134 timer_setup(&(nb
->cmsg_timer
), controlmsg_timerfunc
, 0);
135 tasklet_init(&(nb
->cmsg_task
), controlmsg_taskfunc
, (unsigned long) nb
);
136 atomic_set(&(nb
->cmsg_task_scheduled
), 0);
137 atomic_set(&(nb
->cmsg_timer_running
), 0);
138 spin_lock_init(&(nb
->cmsg_lock
));
139 spin_lock_init(&(nb
->send_cmsg_lock
));
140 INIT_LIST_HEAD(&(nb
->cmsg_queue_pong
));
141 INIT_LIST_HEAD(&(nb
->cmsg_queue_ack
));
142 INIT_LIST_HEAD(&(nb
->cmsg_queue_ackconn
));
143 INIT_LIST_HEAD(&(nb
->cmsg_queue_conndata
));
144 INIT_LIST_HEAD(&(nb
->cmsg_queue_other
));
145 nb
->last_ping_time
= jiffies
;
146 nb
->cmsg_interval
= 1000000;
147 atomic_set(&(nb
->latency_retrans_us
), PING_GUESSLATENCY_MS
*1000);
148 atomic_set(&(nb
->latency_advertised_us
), PING_GUESSLATENCY_MS
*1000);
149 atomic_set(&(nb
->max_remote_ack_delay_us
), 1000000);
150 atomic_set(&(nb
->max_remote_ackconn_delay_us
), 1000000);
151 atomic_set(&(nb
->max_remote_other_delay_us
), 1000000);
152 spin_lock_init(&(nb
->state_lock
));
153 INIT_DELAYED_WORK(&(nb
->stalltimeout_timer
), stall_timer
);
154 spin_lock_init(&(nb
->connid_lock
));
155 spin_lock_init(&(nb
->connid_reuse_lock
));
156 INIT_LIST_HEAD(&(nb
->connid_reuse_list
));
157 spin_lock_init(&(nb
->kp_retransmits_lock
));
158 get_random_bytes((char *) &seqno
, sizeof(seqno
));
159 atomic64_set(&(nb
->kpacket_seqno
), seqno
);
160 atomic64_set(&(nb
->priority_sum
), 0);
161 spin_lock_init(&(nb
->conn_list_lock
));
162 INIT_LIST_HEAD(&(nb
->rcv_conn_list
));
163 INIT_LIST_HEAD(&(nb
->stalledconn_list
));
164 spin_lock_init(&(nb
->stalledconn_lock
));
165 INIT_WORK(&(nb
->stalledconn_work
), resume_nbstalled_conns
);
166 INIT_LIST_HEAD(&(nb
->rcv_conn_list
));
167 spin_lock_init(&(nb
->retrans_lock
));
168 INIT_LIST_HEAD(&(nb
->retrans_list
));
169 INIT_LIST_HEAD(&(nb
->retrans_list_conn
));
174 int is_from_nb(struct sk_buff
*skb
, struct neighbor
*nb
)
178 char source_hw
[MAX_ADDR_LEN
];
179 memset(source_hw
, 0, MAX_ADDR_LEN
);
180 if (skb
->dev
->header_ops
!= 0 &&
181 skb
->dev
->header_ops
->parse
!= 0)
182 skb
->dev
->header_ops
->parse(skb
, source_hw
);
184 rc
= (skb
->dev
== nb
->dev
&& memcmp(nb
->mac
, source_hw
,
189 static struct neighbor
*_get_neigh_by_mac(struct net_device
*dev
,
192 struct list_head
*currlh
;
193 struct neighbor
*ret
= 0;
195 spin_lock_bh(&(neighbor_list_lock
));
197 currlh
= nb_list
.next
;
199 while (currlh
!= &nb_list
) {
200 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
203 if (curr
->dev
== dev
&& memcmp(curr
->mac
, source_hw
,
204 MAX_ADDR_LEN
) == 0) {
206 kref_get(&(ret
->ref
));
210 currlh
= currlh
->next
;
213 spin_unlock_bh(&(neighbor_list_lock
));
218 struct neighbor
*get_neigh_by_mac(struct sk_buff
*skb
)
220 char source_hw
[MAX_ADDR_LEN
];
221 memset(source_hw
, 0, MAX_ADDR_LEN
);
222 if (skb
->dev
->header_ops
!= 0 &&
223 skb
->dev
->header_ops
->parse
!= 0)
224 skb
->dev
->header_ops
->parse(skb
, source_hw
);
226 return _get_neigh_by_mac(skb
->dev
, source_hw
);
229 struct neighbor
*find_neigh(char *addr
, __u16 addrlen
)
231 struct list_head
*currlh
;
232 struct neighbor
*ret
= 0;
234 if (addr
== 0 || addrlen
== 0)
237 spin_lock_bh(&(neighbor_list_lock
));
239 currlh
= nb_list
.next
;
241 while (currlh
!= &nb_list
) {
242 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
245 if (curr
->addr
!= 0 && curr
->addrlen
!= 0 &&
246 curr
->addrlen
== addrlen
&&
247 memcmp(curr
->addr
, addr
, addrlen
) == 0) {
249 kref_get(&(ret
->ref
));
254 currlh
= currlh
->next
;
258 spin_unlock_bh(&(neighbor_list_lock
));
263 //TODO throughput field
264 __u32
generate_neigh_list(char *buf
, __u32 buflen
)
266 struct list_head
*currlh
;
270 __u32 buf_offset
= 4;
275 * The variable length header rowcount need to be generated after the
276 * data. This is done by reserving the maximum space they could take. If
277 * they end up being smaller, the data is moved so that there is no gap.
281 BUG_ON(buflen
< buf_offset
);
284 rc
= encode_len(buf
+ buf_offset
, buflen
- buf_offset
, 2);
289 BUG_ON(buflen
< buf_offset
+ 2);
290 put_u16(buf
+ buf_offset
, LIST_NEIGH_FIELD_ADDR
);
293 rc
= encode_len(buf
+ buf_offset
, buflen
- buf_offset
, 0);
298 BUG_ON(buflen
< buf_offset
+ 2);
299 put_u16(buf
+ buf_offset
, LIST_NEIGH_FIELD_LATENCY
);
302 rc
= encode_len(buf
+ buf_offset
, buflen
- buf_offset
, 1);
306 spin_lock_bh(&(neighbor_list_lock
));
308 currlh
= nb_list
.next
;
310 while (currlh
!= &nb_list
) {
311 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
315 state
= get_neigh_state(curr
);
317 if (state
!= NEIGHBOR_STATE_ACTIVE
)
320 BUG_ON((curr
->addr
== 0) != (curr
->addrlen
== 0));
321 if (curr
->addr
== 0 || curr
->addrlen
== 0)
324 if (unlikely(buflen
< buf_offset
+ 4 + curr
->addrlen
+ 1))
328 rc
= encode_len(buf
+ buf_offset
, buflen
- buf_offset
,
333 BUG_ON(curr
->addrlen
> buflen
- buf_offset
);
334 memcpy(buf
+ buf_offset
, curr
->addr
, curr
->addrlen
); /* addr */
335 buf_offset
+= curr
->addrlen
;
337 buf
[buf_offset
] = enc_log_64_11(atomic_read(
338 &(curr
->latency_advertised_us
)));
341 BUG_ON(buf_offset
> buflen
);
346 currlh
= currlh
->next
;
349 spin_unlock_bh(&(neighbor_list_lock
));
351 rc
= encode_len(buf
, 4, cnt
);
356 memmove(buf
+ ((__u32
) rc
), buf
+4, buf_offset
);
358 return buf_offset
- 4 + ((__u32
) rc
);
361 static void reset_all_conns(struct neighbor
*nb
)
364 unsigned long iflags
;
368 spin_lock_irqsave(&(nb
->conn_list_lock
), iflags
);
370 if (list_empty(&(nb
->rcv_conn_list
))) {
371 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
375 src_in
= container_of(nb
->rcv_conn_list
.next
, struct conn
,
377 kref_get(&(src_in
->ref
));
379 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
381 if (src_in
->is_client
) {
382 spin_lock_bh(&(src_in
->rcv_lock
));
383 spin_lock_bh(&(src_in
->reversedir
->rcv_lock
));
385 spin_lock_bh(&(src_in
->reversedir
->rcv_lock
));
386 spin_lock_bh(&(src_in
->rcv_lock
));
389 if (unlikely(unlikely(src_in
->sourcetype
!= SOURCE_IN
) ||
390 unlikely(src_in
->source
.in
.nb
!= nb
))) {
395 rc
= send_reset_conn(nb
, src_in
->reversedir
->target
.out
.conn_id
,
398 if (unlikely(rc
!= 0))
401 if (src_in
->reversedir
->isreset
== 0)
402 src_in
->reversedir
->isreset
= 1;
405 if (src_in
->is_client
) {
406 spin_unlock_bh(&(src_in
->rcv_lock
));
407 spin_unlock_bh(&(src_in
->reversedir
->rcv_lock
));
409 spin_unlock_bh(&(src_in
->reversedir
->rcv_lock
));
410 spin_unlock_bh(&(src_in
->rcv_lock
));
415 kref_put(&(src_in
->ref
), free_conn
);
417 kref_put(&(src_in
->ref
), free_conn
);
418 kref_get(&(nb
->ref
));
419 schedule_delayed_work(&(nb
->stalltimeout_timer
), HZ
);
425 static void reset_neighbor(struct neighbor
*nb
)
428 unsigned long iflags
;
430 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
431 removenblist
= (nb
->state
!= NEIGHBOR_STATE_KILLED
);
432 nb
->state
= NEIGHBOR_STATE_KILLED
;
433 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
436 printk(KERN_ERR "reset_neighbor"); */
439 #warning todo empty cmsg queues
442 spin_lock_bh(&neighbor_list_lock
);
443 list_del(&(nb
->nb_list
));
444 spin_unlock_bh(&neighbor_list_lock
);
446 kref_put(&(nb
->ref
), neighbor_free
); /* nb_list */
450 #warning todo neighbor does not get resetted???
451 #warning todo reset neighbor if stuck in initial state
452 static void reset_neighbors(struct net_device
*dev
)
454 struct list_head
*currlh
;
457 spin_lock_bh(&neighbor_list_lock
);
459 currlh
= nb_list
.next
;
461 while (currlh
!= &nb_list
) {
462 unsigned long iflags
;
463 struct neighbor
*currnb
= container_of(currlh
, struct neighbor
,
467 if (dev
!= 0 && currnb
->dev
!= dev
)
470 spin_lock_irqsave(&(currnb
->state_lock
), iflags
);
471 state
= currnb
->state
;
472 spin_unlock_irqrestore(&(currnb
->state_lock
), iflags
);
474 if (state
!= NEIGHBOR_STATE_KILLED
) {
475 spin_unlock_bh(&neighbor_list_lock
);
476 reset_neighbor(currnb
);
481 currlh
= currlh
->next
;
484 spin_unlock_bh(&neighbor_list_lock
);
487 static void stall_timer(struct work_struct
*work
)
489 struct neighbor
*nb
= container_of(to_delayed_work(work
),
490 struct neighbor
, stalltimeout_timer
);
495 unsigned long iflags
;
497 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
499 if (unlikely(nbstate
!= NEIGHBOR_STATE_STALLED
))
500 nb
->str_timer_pending
= 0;
502 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
504 if (unlikely(nbstate
== NEIGHBOR_STATE_ACTIVE
))
507 stall_time_ms
= jiffies_to_msecs(jiffies
-
508 nb
->state_time
.last_roundtrip
);
510 if (nbstate
== NEIGHBOR_STATE_STALLED
&&
511 stall_time_ms
< NB_KILL_TIME_MS
) {
512 schedule_delayed_work(&(nb
->stalltimeout_timer
),
513 msecs_to_jiffies(NB_KILL_TIME_MS
-
521 kref_put(&(nb
->ref
), neighbor_free
); /* stall_timer */
524 int get_neigh_state(struct neighbor
*nb
)
527 unsigned long iflags
;
532 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
534 stall_time_ms
= jiffies_to_msecs(jiffies
-
535 nb
->state_time
.last_roundtrip
);
537 if (likely(nb
->state
== NEIGHBOR_STATE_ACTIVE
) &&
538 unlikely(stall_time_ms
> NB_STALL_TIME_MS
) && (
539 nb
->ping_intransit
>= NB_STALL_MINPINGS
||
540 nb
->ping_intransit
>= PING_COOKIES_PER_NEIGH
)) {
541 nb
->state
= NEIGHBOR_STATE_STALLED
;
542 nb
->ping_success
= 0;
543 if (nb
->str_timer_pending
== 0) {
544 nb
->str_timer_pending
= 1;
545 kref_get(&(nb
->ref
));
547 schedule_delayed_work(&(nb
->stalltimeout_timer
),
548 msecs_to_jiffies(NB_KILL_TIME_MS
-
552 /* printk(KERN_ERR "switched to stalled"); */
553 BUG_ON(nb
->ping_intransit
> PING_COOKIES_PER_NEIGH
);
558 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
563 static struct ping_cookie
*find_cookie(struct neighbor
*nb
, __u32 cookie
)
567 for(i
=0;i
<PING_COOKIES_PER_NEIGH
;i
++) {
568 if (nb
->cookies
[i
].cookie
== cookie
)
569 return &(nb
->cookies
[i
]);
574 static void reset_cookie(struct neighbor
*nb
, struct ping_cookie
*c
)
579 if (nb
->cookie_unsent
!= c
->cookie
)
580 nb
->ping_intransit
--;
585 static __u32
sqrt(__u64 x
)
590 if (unlikely(x
<= 1))
594 y
= y
/2 + div64_u64(x
/2, y
);
595 if (unlikely(y
== 0))
599 if (unlikely(y
> U32_MAX
))
605 static __u32
calc_newlatency(struct neighbor
*nb_statelocked
,
606 __u32 oldlatency_us
, __s64 newlatency_ns
)
608 __s64 oldlatency
= oldlatency_us
* 1000L;
611 if (unlikely(unlikely(nb_statelocked
->state
== NEIGHBOR_STATE_INITIAL
)&&
612 nb_statelocked
->ping_success
< 16))
613 newlatency
= div64_s64(
614 oldlatency
* nb_statelocked
->ping_success
+
616 nb_statelocked
->ping_success
+ 1);
618 newlatency
= (oldlatency
* 15 + newlatency_ns
) / 16;
620 newlatency
= div_s64(newlatency
+ 500, 1000);
622 if (unlikely(newlatency
< 0))
624 if (unlikely(newlatency
> U32_MAX
))
625 newlatency
= U32_MAX
;
627 return (__u32
) newlatency
;
630 static void update_nb_latency(struct neighbor
*nb_statelocked
,
631 struct ping_cookie
*c
, __u32 respdelay
)
633 ktime_t now
= ktime_get();
635 __s64 pinglatency_retrans_ns
= ktime_to_ns(now
) -
636 ktime_to_ns(c
->time_sent
) - respdelay
* 1000LL;
637 __s64 pinglatency_advertised_ns
= ktime_to_ns(now
) -
638 ktime_to_ns(c
->time_created
) - respdelay
* 1000LL;
640 __u32 oldlatency_retrans_us
=
641 atomic_read(&(nb_statelocked
->latency_retrans_us
));
643 __u32 newlatency_retrans_us
= calc_newlatency(nb_statelocked
,
644 oldlatency_retrans_us
, pinglatency_retrans_ns
);
646 atomic_set(&(nb_statelocked
->latency_retrans_us
),
647 newlatency_retrans_us
);
649 if (unlikely(unlikely(nb_statelocked
->state
== NEIGHBOR_STATE_INITIAL
)&&
650 nb_statelocked
->ping_success
< 16)) {
651 nb_statelocked
->latency_variance_retrans_us
=
652 ((__u64
) newlatency_retrans_us
) *
653 newlatency_retrans_us
;
654 atomic_set(&(nb_statelocked
->latency_stddev_retrans_us
), sqrt(
655 nb_statelocked
->latency_variance_retrans_us
));
656 } else if (pinglatency_retrans_ns
> oldlatency_retrans_us
*
658 __s64 newdiff
= div_s64(pinglatency_retrans_ns
-
659 oldlatency_retrans_us
* ((__s64
) 1000), 1000);
660 __u32 newdiff32
= (__u32
) (unlikely(newdiff
>= U32_MAX
) ?
662 __u64 newvar
= ((__u64
) newdiff32
) * newdiff32
;
664 __u64 oldval
= nb_statelocked
->latency_variance_retrans_us
;
666 if (unlikely(unlikely(newvar
> (1LL << 55)) || unlikely(
667 oldval
> (1LL << 55)))) {
668 nb_statelocked
->latency_variance_retrans_us
=
669 (oldval
/ 16) * 15 + newvar
/16;
671 nb_statelocked
->latency_variance_retrans_us
=
672 (oldval
* 15 + newvar
) / 16;
675 atomic_set(&(nb_statelocked
->latency_stddev_retrans_us
), sqrt(
676 nb_statelocked
->latency_variance_retrans_us
));
679 atomic_set(&(nb_statelocked
->latency_advertised_us
),
680 calc_newlatency(nb_statelocked
,
681 atomic_read(&(nb_statelocked
->latency_advertised_us
)),
682 pinglatency_advertised_ns
));
684 nb_statelocked
->last_roundtrip_end
= now
;
687 void ping_resp(struct neighbor
*nb
, __u32 cookie
, __u32 respdelay
)
689 unsigned long iflags
;
691 struct ping_cookie
*c
;
693 int stalledresume
= 0;
695 int call_connidreuse
= 0;
697 if (unlikely(cookie
== 0))
700 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
702 c
= find_cookie(nb
, cookie
);
704 if (unlikely(c
== 0))
707 atomic_set(&(nb
->sessionid_snd_needed
), 0);
709 call_connidreuse
= ktime_before_eq(nb
->last_roundtrip_end
,
712 update_nb_latency(nb
, c
, respdelay
);
718 for(i
=0;i
<PING_COOKIES_PER_NEIGH
;i
++) {
719 if (nb
->cookies
[i
].cookie
!= 0 && ktime_before(
720 nb
->cookies
[i
].time_created
, c
->time_created
)) {
721 nb
->cookies
[i
].pongs
++;
722 if (nb
->cookies
[i
].pongs
>= PING_PONGLIMIT
) {
723 reset_cookie(nb
, &(nb
->cookies
[i
]));
728 if (unlikely(nb
->state
== NEIGHBOR_STATE_INITIAL
||
729 nb
->state
== NEIGHBOR_STATE_STALLED
)) {
730 call_connidreuse
= 0;
732 if (nb
->state
== NEIGHBOR_STATE_INITIAL
) {
733 __u64 jiffies64
= get_jiffies_64();
734 if (nb
->state_time
.last_state_change
== 0)
735 nb
->state_time
.last_state_change
= jiffies64
;
736 if (jiffies64
<= (nb
->state_time
.last_state_change
+
737 msecs_to_jiffies(INITIAL_TIME_MS
)))
741 if ((nb
->state
== NEIGHBOR_STATE_INITIAL
&&
742 nb
->ping_success
>= PING_SUCCESS_CNT_INIT
) || (
743 nb
->state
== NEIGHBOR_STATE_STALLED
&&
744 nb
->ping_success
>= PING_SUCCESS_CNT_STALLED
)) {
746 (nb
->state
== NEIGHBOR_STATE_STALLED
);
747 nb
->state
= NEIGHBOR_STATE_ACTIVE
;
748 /* printk(KERN_ERR "changed to active"); */
752 if (likely(nb
->state
== NEIGHBOR_STATE_ACTIVE
) ||
753 nb
->state
== NEIGHBOR_STATE_STALLED
)
754 nb
->state_time
.last_roundtrip
= c
->jiffies_sent
;
757 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
759 if (call_connidreuse
)
760 connid_used_pingsuccess(nb
);
762 if (unlikely(stalledresume
)) {
763 spin_lock_bh(&(nb
->retrans_lock
));
764 reschedule_conn_retrans_timer(nb
);
765 spin_unlock_bh(&(nb
->retrans_lock
));
767 spin_lock_bh(&(nb
->stalledconn_lock
));
768 if (nb
->stalledconn_work_scheduled
== 0) {
769 kref_get(&(nb
->ref
)),
770 schedule_work(&(nb
->stalledconn_work
));
771 nb
->stalledconn_work_scheduled
= 1;
773 spin_unlock_bh(&(nb
->stalledconn_lock
));
777 __u32
add_ping_req(struct neighbor
*nb
, unsigned long *last_ping_time
,
780 unsigned long iflags
;
781 struct ping_cookie
*c
;
786 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
788 if (nb
->cookie_unsent
!= 0) {
789 c
= find_cookie(nb
, nb
->cookie_unsent
);
793 nb
->cookie_unsent
= 0;
796 c
= find_cookie(nb
, 0);
800 get_random_bytes((char *) &i
, sizeof(i
));
801 i
= (i
% PING_COOKIES_PER_NEIGH
);
802 c
= &(nb
->cookies
[i
]);
807 if (unlikely(nb
->lastcookie
== 0))
809 c
->cookie
= nb
->lastcookie
;
810 c
->time_created
= now
;
815 c
->jiffies_sent
= jiffies
;
818 nb
->ping_intransit
++;
820 *last_ping_time
= nb
->last_ping_time
;
821 nb
->last_ping_time
= c
->jiffies_sent
;
823 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
830 void ping_sent(struct neighbor
*nb
, __u32 cookie
)
832 unsigned long iflags
;
836 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
838 if (nb
->cookie_unsent
== cookie
)
839 nb
->cookie_unsent
= 0;
841 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
844 void unadd_ping_req(struct neighbor
*nb
, __u32 cookie
,
845 unsigned long last_ping_time
, int congested
)
847 unsigned long iflags
;
849 struct ping_cookie
*c
;
853 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
856 BUG_ON(nb
->cookie_unsent
!= 0 && nb
->cookie_unsent
!= cookie
);
857 nb
->cookie_unsent
= cookie
;
860 c
= find_cookie(nb
, cookie
);
861 if (likely(c
!= 0)) {
864 nb
->ping_intransit
--;
867 nb
->last_ping_time
= last_ping_time
;
869 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
872 static int get_ping_forcetime(struct neighbor
*nb
)
874 unsigned long iflags
;
878 if (unlikely(get_neigh_state(nb
) != NEIGHBOR_STATE_ACTIVE
))
879 return PING_FORCETIME_MS
;
881 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
882 fast
= ((nb
->ping_success
< PING_ACTIVE_FASTINITIAL_COUNT
) ||
883 (nb
->ping_intransit
> 0));
884 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
887 return PING_FORCETIME_ACTIVE_FAST_MS
;
889 spin_lock_irqsave(&(nb
->conn_list_lock
), iflags
);
890 idle
= list_empty(&(nb
->rcv_conn_list
));
891 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
894 return PING_FORCETIME_ACTIVEIDLE_MS
;
896 return PING_FORCETIME_ACTIVE_MS
;
899 static __u32
get_ping_mindelay(struct neighbor
*nb_statelocked
)
901 __u32 latency_us
= ((__u32
) atomic_read(
902 &(nb_statelocked
->latency_advertised_us
)));
903 __u32 max_remote_other_delay_us
= ((__u32
) atomic_read(
904 &(nb_statelocked
->max_remote_other_delay_us
)));
907 #warning todo millisecs???
908 if (latency_us
< PING_GUESSLATENCY_MS
* 1000)
909 latency_us
= PING_GUESSLATENCY_MS
* 1000;
911 if (unlikely(nb_statelocked
->state
!= NEIGHBOR_STATE_ACTIVE
))
912 mindelay_ms
= latency_us
/1000;
914 mindelay_ms
= ((latency_us
/2 +
915 max_remote_other_delay_us
/2)/500);
917 if (likely(nb_statelocked
->ping_intransit
< PING_COOKIES_THROTTLESTART
))
920 mindelay_ms
= mindelay_ms
* (1 + 9 * (nb_statelocked
->ping_intransit
*
921 nb_statelocked
->ping_intransit
/
922 (PING_COOKIES_PER_NEIGH
* PING_COOKIES_PER_NEIGH
)));
928 * Check whether we want to send a ping now:
929 * 0... Do not send ping.
930 * 1... Send ping now, but only if it can be merged with other messages. This
931 * can happen way before the time requested by get_next_ping_time().
932 * 2... Send ping now, even if a packet has to be created just for the ping
935 int time_to_send_ping(struct neighbor
*nb
)
937 unsigned long iflags
;
938 int rc
= TIMETOSENDPING_YES
;
940 __u32 ms_since_last_ping
;
942 __u32 forcetime
= get_ping_forcetime(nb
);
945 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
947 ms_since_last_ping
= jiffies_to_msecs(jiffies
- nb
->last_ping_time
);
949 mindelay
= get_ping_mindelay(nb
);
951 if (forcetime
< (mindelay
* 3))
952 forcetime
= mindelay
* 3;
953 else if (forcetime
> (mindelay
* 3))
954 mindelay
= forcetime
/3;
956 if (ms_since_last_ping
< mindelay
|| ms_since_last_ping
< (forcetime
/4))
957 rc
= TIMETOSENDPING_NO
;
958 else if (ms_since_last_ping
>= forcetime
)
959 rc
= TIMETOSENDPING_FORCE
;
961 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
966 unsigned long get_next_ping_time(struct neighbor
*nb
)
968 unsigned long iflags
;
970 __u32 forcetime
= get_ping_forcetime(nb
);
973 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
974 mindelay
= get_ping_mindelay(nb
);
975 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
977 if (forcetime
< (mindelay
* 3))
978 forcetime
= mindelay
* 3;
980 return nb
->last_ping_time
+ msecs_to_jiffies(forcetime
);
983 static void add_neighbor(struct neighbor_discdata
*nb_dd
)
986 struct list_head
*currlh
;
988 nb
= alloc_neighbor(GFP_KERNEL
);
989 if (unlikely(nb
== 0))
992 nb
->queue
= get_queue(nb_dd
->dev
);
993 if (nb
->queue
== 0) {
994 kmem_cache_free(nb_slab
, nb
);
995 atomic_dec(&num_neighs
);
999 dev_hold(nb_dd
->dev
);
1000 nb
->dev
= nb_dd
->dev
;
1002 memcpy(nb
->mac
, nb_dd
->mac
, MAX_ADDR_LEN
);
1004 nb
->addr
= nb_dd
->addr
;
1005 nb
->addrlen
= nb_dd
->addrlen
;
1007 nb_dd
->nb_allocated
= 1;
1011 spin_lock_bh(&neighbor_list_lock
);
1013 currlh
= nb_list
.next
;
1015 BUG_ON((nb
->addr
== 0) != (nb
->addrlen
== 0));
1017 if (is_clientmode() && (nb
->addr
== 0|| nb
->addrlen
== 0))
1018 goto already_present
;
1020 while (currlh
!= &nb_list
) {
1021 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
1024 BUG_ON((curr
->addr
== 0) != (curr
->addrlen
== 0));
1026 if (curr
->dev
== nb
->dev
&&
1027 memcmp(curr
->mac
, nb
->mac
, MAX_ADDR_LEN
) == 0)
1028 goto already_present
;
1030 if (curr
->addr
!= 0 && curr
->addrlen
!= 0 &&
1031 nb
->addr
!= 0 && nb
->addrlen
!= 0 &&
1032 curr
->addrlen
== nb
->addrlen
&&
1033 memcmp(curr
->addr
, nb
->addr
, curr
->addrlen
) ==0)
1034 goto already_present
;
1036 currlh
= currlh
->next
;
1039 /* printk(KERN_ERR "add_neigh"); */
1041 spin_lock_bh(&(local_addr_lock
));
1042 nb
->sessionid
= local_addr_sessionid
^ nb_dd
->sessionid
;
1043 spin_unlock_bh(&(local_addr_lock
));
1045 timer_setup(&(nb
->retrans_timer
), retransmit_timerfunc
, 0);
1046 tasklet_init(&(nb
->retrans_task
), retransmit_taskfunc
,
1047 (unsigned long) nb
);
1049 timer_setup(&(nb
->retrans_timer_conn
), retransmit_conn_timerfunc
, 0);
1050 tasklet_init(&(nb
->retrans_task_conn
), retransmit_conn_taskfunc
,
1051 (unsigned long) nb
);
1053 spin_lock_bh(&(nb
->cmsg_lock
));
1054 nb
->last_ping_time
= jiffies
;
1055 nb
->cmsg_interval
= 1000000;
1056 schedule_controlmsg_timer(nb
);
1057 spin_unlock_bh(&(nb
->cmsg_lock
));
1059 list_add_tail(&(nb
->nb_list
), &nb_list
);
1063 kmem_cache_free(nb_slab
, nb
);
1064 atomic_dec(&num_neighs
);
1067 spin_unlock_bh(&neighbor_list_lock
);
1070 static int parse_announce_version(struct neighbor_discdata
*nb_dd
,
1071 __u16 cmd
, char *cmddata
, __u16 len
)
1076 if (unlikely(len
< 4))
1079 version
= parse_u16(cmddata
);
1082 minversion
= parse_u16(cmddata
);
1086 if (minversion
!= 0)
1089 if (nb_dd
->rcvd_version
!= 0) {
1090 if (nb_dd
->version
!= version
||
1091 nb_dd
->minversion
!= minversion
)
1094 nb_dd
->version
= version
;
1095 nb_dd
->minversion
= minversion
;
1096 nb_dd
->rcvd_version
= 1;
1102 static int parse_announce_addaddr(struct neighbor_discdata
*nb_dd
,
1103 __u16 cmd
, char *cmddata
, __u16 len
)
1108 BUG_ON(cmd
!= NEIGHCMD_ADDR
);
1109 BUG_ON((nb_dd
->addr
== 0) != (nb_dd
->addrlen
== 0));
1110 BUG_ON(nb_dd
->rcvd_addr
== 0 && nb_dd
->addr
!= 0);
1115 addrlen
= parse_u16(cmddata
);
1126 if (nb_dd
->rcvd_addr
!= 0) {
1127 if (nb_dd
->addrlen
!= addrlen
)
1129 if (addrlen
!= 0 && memcmp(nb_dd
->addr
, addr
, addrlen
) != 0)
1133 nb_dd
->addr
= kmalloc(addrlen
, GFP_KERNEL
);
1134 if (unlikely(nb_dd
->addr
== 0))
1137 memcpy(nb_dd
->addr
, addr
, addrlen
);
1139 nb_dd
->addrlen
= addrlen
;
1141 nb_dd
->rcvd_addr
= 1;
1147 static int parse_announce_cmd(struct neighbor_discdata
*nb_dd
,
1148 __u16 cmd
, char *cmddata
, __u16 len
)
1150 if (cmd
== NEIGHCMD_VERSION
) {
1151 return parse_announce_version(nb_dd
, cmd
, cmddata
, len
);
1152 } else if (cmd
== NEIGHCMD_ADDR
) {
1153 return parse_announce_addaddr(nb_dd
, cmd
, cmddata
, len
);
1159 static int parse_announce_cmds(struct neighbor_discdata
*nb_dd
,
1160 char *msg
, __u32 len
)
1163 while (zeros
< len
) {
1164 if (msg
[len
-zeros
-1] != 0)
1169 while (len
>= 4 && len
> zeros
) {
1173 cmd
= parse_u16(msg
);
1176 cmdlen
= parse_u16(msg
);
1183 if (parse_announce_cmd(nb_dd
, cmd
, msg
, cmdlen
) != 0)
1190 if (len
!= 0 && len
< zeros
)
1196 static void neighbor_discdata_free(struct neighbor_discdata
*nb_dd
)
1198 list_del(&(nb_dd
->lh
));
1200 BUG_ON(nb_dd
->dev
== 0);
1201 dev_put(nb_dd
->dev
);
1203 if (nb_dd
->addr
!= 0) {
1209 kmem_cache_free(nb_dd_slab
, nb_dd
);
1211 BUG_ON(num_nb_dd
== 0);
1215 static void announce_send_start(struct net_device
*dev
, char *mac
, int type
);
1217 static struct neighbor_discdata
*findoralloc_neighbor_discdata(
1218 struct net_device
*dev
, char *source_hw
, __be32 sessionid
)
1220 unsigned long jiffies_tmp
= jiffies
;
1221 struct list_head
*currlh
;
1224 struct neighbor_discdata
*nb_dd
;
1226 currlh
= nb_dd_list
.next
;
1227 while (currlh
!= &nb_dd_list
) {
1228 struct neighbor_discdata
*curr
= container_of(currlh
,
1229 struct neighbor_discdata
, lh
);
1231 currlh
= currlh
->next
;
1233 if (time_after(jiffies_tmp
, curr
->jiffies_created
+
1234 HZ
* NEIGHBOR_DISCOVERY_TIMEOUT_SEC
)) {
1235 neighbor_discdata_free(curr
);
1239 if (curr
->sessionid
== sessionid
&& curr
->dev
== dev
&&
1240 memcmp(curr
->mac
, source_hw
, MAX_ADDR_LEN
) == 0)
1244 neighs
= atomic_read(&num_neighs
);
1245 if (neighs
+ num_nb_dd
< neighs
|| neighs
+ num_nb_dd
>=MAX_NEIGHBORS
)
1249 nb_dd
= kmem_cache_alloc(nb_dd_slab
, GFP_KERNEL
);
1250 if (unlikely(nb_dd
== 0))
1253 memset(nb_dd
, 0, sizeof(struct neighbor_discdata
));
1255 nb_dd
->sessionid
= sessionid
;
1260 memcpy(nb_dd
->mac
, source_hw
, MAX_ADDR_LEN
);
1262 list_add_tail(&(nb_dd
->lh
), &nb_dd_list
);
1263 nb_dd
->jiffies_created
= jiffies_tmp
;
1265 if (is_clientmode())
1266 announce_send_start(dev
, source_hw
, ANNOUNCE_TYPE_UNICAST
);
1271 static void parse_announce(struct net_device
*dev
, char *source_hw
,
1272 char *msg
, __u32 len
)
1275 struct neighbor_discdata
*nb_dd
;
1278 if (unlikely(len
< 4))
1281 sessionid
= parse_be32(msg
);
1285 nb_dd
= findoralloc_neighbor_discdata(dev
, source_hw
, sessionid
);
1286 if (unlikely(nb_dd
== 0))
1289 if (parse_announce_cmds(nb_dd
, msg
, len
) != 0)
1292 if (nb_dd
->rcvd_version
!= 0 && nb_dd
->rcvd_addr
!= 0) {
1293 add_neighbor(nb_dd
);
1296 neighbor_discdata_free(nb_dd
);
1300 static void _rcv_announce(struct work_struct
*work
)
1302 struct skb_procstate
*ps
= container_of(work
,
1303 struct skb_procstate
, funcstate
.announce1
.work
);
1304 struct sk_buff
*skb
= skb_from_pstate(ps
);
1306 char source_hw
[MAX_ADDR_LEN
];
1308 struct neighbor
*nb
;
1313 memset(source_hw
, 0, MAX_ADDR_LEN
);
1314 if (skb
->dev
->header_ops
!= 0 &&
1315 skb
->dev
->header_ops
->parse
!= 0)
1316 skb
->dev
->header_ops
->parse(skb
, source_hw
);
1318 nb
= _get_neigh_by_mac(skb
->dev
, source_hw
);
1320 kref_put(&(nb
->ref
), neighbor_free
);
1325 if (unlikely(skb
->len
> 65535 || skb
->len
< 0))
1327 len
= (__u16
) skb
->len
;
1329 msg
= cor_pull_skb(skb
, len
);
1333 mutex_lock(&(announce_rcv_lock
));
1334 parse_announce(skb
->dev
, source_hw
, msg
, len
);
1335 mutex_unlock(&(announce_rcv_lock
));
1340 atomic_dec(&packets_in_workqueue
);
1343 int rcv_announce(struct sk_buff
*skb
)
1345 struct skb_procstate
*ps
= skb_pstate(skb
);
1348 queuelen
= atomic_inc_return(&packets_in_workqueue
);
1350 BUG_ON(queuelen
<= 0);
1352 if (queuelen
> MAX_PACKETS_IN_RCVQUEUE
) {
1353 atomic_dec(&packets_in_workqueue
);
1355 return NET_RX_SUCCESS
;
1358 INIT_WORK(&(ps
->funcstate
.announce1
.work
), _rcv_announce
);
1359 schedule_work(&(ps
->funcstate
.announce1
.work
));
1360 return NET_RX_SUCCESS
;
1363 static int __send_announce(struct announce_data
*ann
)
1368 __u32 local_addrlen_tmp
;
1372 struct sk_buff
*skb
;
1375 headroom
= LL_RESERVED_SPACE(ann
->dev
) +
1376 ann
->dev
->needed_tailroom
;
1378 spin_lock_bh(&(local_addr_lock
));
1381 BUG_ON(local_addrlen
> 64);
1383 local_addrlen_tmp
= local_addrlen
;
1385 spin_unlock_bh(&(local_addr_lock
));
1387 len
= 1 + 4 + 8 + 6 + local_addrlen_tmp
;
1391 skb
= alloc_skb(headroom
+ len
, GFP_ATOMIC
);
1392 if (unlikely(skb
== 0))
1395 skb
->protocol
= htons(ETH_P_COR
);
1396 skb
->dev
= ann
->dev
;
1397 skb_reserve(skb
, headroom
);
1399 #warning net_device locking? (other places too)
1400 if(unlikely(dev_hard_header(skb
, ann
->dev
, ETH_P_COR
,
1401 ann
->mac
, ann
->dev
->dev_addr
, skb
->len
) < 0))
1404 skb_reset_network_header(skb
);
1406 msg
= skb_put(skb
, len
);
1407 if (unlikely(msg
== 0))
1410 spin_lock_bh(&(local_addr_lock
));
1412 if (unlikely(local_addrlen
!= local_addrlen_tmp
)) {
1419 msg
[0] = PACKET_TYPE_ANNOUNCE
;
1422 put_be32(msg
+ offset
, local_addr_sessionid
); /* sessionid */
1425 put_u16(msg
+ offset
, NEIGHCMD_VERSION
); /* command */
1427 put_u16(msg
+ offset
, 4); /* command length */
1429 put_u16(msg
+ offset
, 0); /* version */
1431 put_u16(msg
+ offset
, 0); /* minversion */
1434 put_u16(msg
+ offset
, NEIGHCMD_ADDR
); /* command */
1436 put_u16(msg
+ offset
, 2 + local_addrlen
); /* command length*/
1438 put_u16(msg
+ offset
, local_addrlen
); /* addrlen */
1440 if (local_addrlen
!= 0) {
1441 memcpy(msg
+ offset
, local_addr
, local_addrlen
); /* addr */
1442 offset
+= local_addrlen
;
1445 spin_unlock_bh(&(local_addr_lock
));
1447 BUG_ON(offset
!= len
);
1449 return cor_dev_queue_xmit(skb
, QOS_CALLER_ANNOUNCE
);
1458 void announce_data_free(struct kref
*ref
)
1460 struct announce_data
*ann
= container_of(ref
, struct announce_data
,
1465 int _send_announce(struct announce_data
*ann
, int fromqos
)
1470 spin_lock_bh(&(announce_snd_lock
));
1472 if (unlikely(ann
->dev
== 0))
1477 #warning todo reactivate may_send_announce + set rc
1478 /* if (fromqos == 0 && may_send_announce(ann->dev) == 0)
1481 /*rc = */__send_announce(ann
);
1483 if (rc
== 0 && ann
->type
!= ANNOUNCE_TYPE_BROADCAST
) {
1485 reschedule
= (ann
->sndcnt
< ANNOUNCE_SEND_UNICAST_MAXCNT
?
1488 if (reschedule
== 0) {
1492 list_del(&(ann
->lh
));
1493 kref_put(&(ann
->ref
), kreffree_bug
);
1498 spin_unlock_bh(&(announce_snd_lock
));
1500 if (unlikely(reschedule
== 0)) {
1501 } else if (rc
!= 0) {
1503 struct qos_queue
*q
= get_queue(ann
->dev
);
1505 qos_enqueue(q
, &(ann
->rb
), QOS_CALLER_ANNOUNCE
);
1506 kref_put(&(q
->ref
), free_qos
);
1510 kref_get(&(ann
->ref
));
1511 schedule_delayed_work(&(ann
->announce_work
), msecs_to_jiffies(
1512 ANNOUNCE_SEND_PACKETINTELVAL_MS
));
1516 kref_put(&(ann
->ref
), announce_data_free
);
1518 return rc
== 0 ? QOS_RESUME_DONE
: QOS_RESUME_CONG_NOPROGRESS
;
1521 static void send_announce(struct work_struct
*work
)
1523 struct announce_data
*ann
= container_of(to_delayed_work(work
),
1524 struct announce_data
, announce_work
);
1525 _send_announce(ann
, 0);
1528 static void announce_send_start(struct net_device
*dev
, char *mac
, int type
)
1530 struct announce_data
*ann
;
1532 ann
= kmalloc(sizeof(struct announce_data
), GFP_KERNEL
);
1534 if (unlikely(ann
== 0)) {
1535 printk(KERN_ERR
"cor cannot allocate memory for sending "
1540 memset(ann
, 0, sizeof(struct announce_data
));
1542 kref_init(&(ann
->ref
));
1546 memcpy(ann
->mac
, mac
, MAX_ADDR_LEN
);
1549 spin_lock_bh(&(announce_snd_lock
));
1550 list_add_tail(&(ann
->lh
), &announce_out_list
);
1551 spin_unlock_bh(&(announce_snd_lock
));
1553 INIT_DELAYED_WORK(&(ann
->announce_work
), send_announce
);
1554 kref_get(&(ann
->ref
));
1555 schedule_delayed_work(&(ann
->announce_work
), 1);
1558 void announce_send_stop(struct net_device
*dev
, char *mac
, int type
)
1560 struct list_head
*lh
= announce_out_list
.next
;
1562 spin_lock_bh(&(announce_snd_lock
));
1564 while (lh
!= &announce_out_list
) {
1565 struct announce_data
*ann
= container_of(lh
,
1566 struct announce_data
, lh
);
1571 if (dev
!= 0 && (ann
->dev
!= dev
|| (
1572 type
!= ANNOUNCE_TYPE_BROADCAST
&& (
1573 ann
->type
!= type
||
1574 memcmp(ann
->mac
, mac
, MAX_ADDR_LEN
) != 0))))
1580 list_del(&(ann
->lh
));
1581 kref_put(&(ann
->ref
), kreffree_bug
);
1584 spin_unlock_bh(&(announce_snd_lock
));
1587 int netdev_notify_func(struct notifier_block
*not, unsigned long event
,
1590 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1595 if (dev
->flags
& IFF_LOOPBACK
)
1599 rc
= create_queue(dev
);
1602 if (is_clientmode() == 0)
1603 announce_send_start(dev
, dev
->broadcast
,
1604 ANNOUNCE_TYPE_BROADCAST
);
1607 printk(KERN_ERR
"down 1");
1610 printk(KERN_ERR
"down 2");
1612 announce_send_stop(dev
, 0, ANNOUNCE_TYPE_BROADCAST
);
1613 printk(KERN_ERR
"down 3");
1615 reset_neighbors(dev
);
1616 printk(KERN_ERR
"down 4");
1619 printk(KERN_ERR
"down 5");
1624 case NETDEV_REGISTER
:
1625 case NETDEV_UNREGISTER
:
1626 case NETDEV_CHANGEMTU
:
1627 case NETDEV_CHANGEADDR
:
1628 case NETDEV_GOING_DOWN
:
1629 case NETDEV_CHANGENAME
:
1630 case NETDEV_FEAT_CHANGE
:
1631 case NETDEV_BONDING_FAILOVER
:
1640 void _cor_neighbor_down(void)
1644 spin_lock_bh(&(local_addr_lock
));
1645 if (local_addr
!= 0) {
1650 spin_unlock_bh(&(local_addr_lock
));
1656 announce_send_stop(0, 0, ANNOUNCE_TYPE_BROADCAST
);
1658 if (netdev_notify_registered
!= 0 &&
1659 unregister_netdevice_notifier(&netdev_notify
) != 0) {
1660 printk(KERN_WARNING
"warning: cor_neighbor_down: "
1661 "unregister_netdevice_notifier failed");
1664 netdev_notify_registered
= 0;
1667 void cor_neighbor_down(void)
1669 mutex_lock(&(neigh_up_lock
));
1670 _cor_neighbor_down();
1671 mutex_unlock(&(neigh_up_lock
));
1674 int cor_neighbor_up(char *addr2
, __u32 addrlen2
)
1678 char *addr2_copy
= kmalloc(addrlen2
, GFP_KERNEL
);
1679 if (unlikely(addr2_copy
== 0))
1682 memcpy(addr2_copy
, addr2
, addrlen2
);
1684 mutex_lock(&(neigh_up_lock
));
1686 _cor_neighbor_down();
1688 spin_lock_bh(&(local_addr_lock
));
1690 BUG_ON(local_addr
!= 0);
1691 BUG_ON(local_addrlen
!= 0);
1693 local_addr
= addr2_copy
;
1695 local_addrlen
= addrlen2
;
1696 get_random_bytes((char *) &local_addr_sessionid
,
1697 sizeof(local_addr_sessionid
));
1699 spin_unlock_bh(&(local_addr_lock
));
1701 BUG_ON(netdev_notify_registered
!= 0);
1703 if (register_netdevice_notifier(&netdev_notify
) != 0)
1706 netdev_notify_registered
= 1;
1712 spin_lock_bh(&(local_addr_lock
));
1716 spin_unlock_bh(&(local_addr_lock
));
1720 mutex_unlock(&(neigh_up_lock
));
1725 int is_clientmode(void)
1728 spin_lock_bh(&(local_addr_lock
));
1729 rc
= (local_addrlen
== 0 ? 1 : 0);
1730 spin_unlock_bh(&(local_addr_lock
));
1734 int __init
cor_neighbor_init(void)
1736 nb_slab
= kmem_cache_create("cor_neighbor", sizeof(struct neighbor
), 8,
1738 if (unlikely(nb_slab
== 0))
1741 nb_dd_slab
= kmem_cache_create("cor_neighbor_discoverydata",
1742 sizeof(struct neighbor_discdata
), 8, 0, 0);
1743 if (unlikely(nb_dd_slab
== 0))
1746 atomic_set(&num_neighs
, 0);
1748 memset(&netdev_notify
, 0, sizeof(netdev_notify
));
1749 netdev_notify
.notifier_call
= netdev_notify_func
;
1754 MODULE_LICENSE("GPL");