2 * Connection oriented routing
3 * Copyright (C) 2007-2020 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/delay.h>
25 * Broadcast data format:
27 * is 0, may be increased if the protocol changes
29 * is 0, must be increased if a future version of the protocol is incompatible
30 * to the current version
33 * Data format of the announce packet "data" field:
34 *{command [2] commandlength [2] commanddata [commandlength]}[...]
39 /* NEIGHCMD_VERSION: version[2] minversion[2] */
40 #define NEIGHCMD_VERSION 1
42 /* NEIGHCMD_ADDR: addrlen[2] addr[addrlen] */
43 #define NEIGHCMD_ADDR 2
46 struct neighbor_discdata
{
48 unsigned long jiffies_created
;
52 struct net_device
*dev
;
53 char mac
[MAX_ADDR_LEN
];
67 static atomic_t packets_in_workqueue
= ATOMIC_INIT(0);
69 static DEFINE_MUTEX(announce_rcv_lock
);
70 static DEFINE_SPINLOCK(announce_snd_lock
);
71 static DEFINE_SPINLOCK(neighbor_list_lock
);
73 static DEFINE_MUTEX(neigh_up_lock
);
75 static DEFINE_SPINLOCK(local_addr_lock
);
76 static char *local_addr
;
77 static __u32 local_addrlen
;
78 static __be32 local_addr_sessionid
;
80 static LIST_HEAD(nb_dd_list
); /* protected by announce_rcv_lock */
81 static __u32 num_nb_dd
= 0;
82 static struct kmem_cache
*nb_dd_slab
;
84 static LIST_HEAD(nb_list
);
85 static struct kmem_cache
*nb_slab
;
86 static atomic_t num_neighs
;
88 static LIST_HEAD(announce_out_list
);
90 static struct notifier_block netdev_notify
;
91 __u8 netdev_notify_registered
= 0;
94 void neighbor_free(struct kref
*ref
)
96 struct neighbor
*nb
= container_of(ref
, struct neighbor
, ref
);
98 WARN_ONCE(list_empty(&(nb
->cmsg_queue_pong
)) == 0,
99 "cor neighbor_free(): nb->cmsg_queue_pong is not empty");
100 WARN_ONCE(list_empty(&(nb
->cmsg_queue_ack
)) == 0,
101 "cor neighbor_free(): nb->cmsg_queue_ack is not empty");
102 WARN_ONCE(list_empty(&(nb
->cmsg_queue_ackconn
)) == 0,
103 "cor neighbor_free(): nb->cmsg_queue_ackconn is not empty");
104 WARN_ONCE(list_empty(&(nb
->cmsg_queue_conndata_lowlat
)) == 0,
105 "cor neighbor_free(): nb->cmsg_queue_conndata_lowlat is not empty");
106 WARN_ONCE(list_empty(&(nb
->cmsg_queue_conndata_highlat
)) == 0,
107 "cor neighbor_free(): nb->cmsg_queue_conndata_highlat is not empty");
108 WARN_ONCE(list_empty(&(nb
->cmsg_queue_other
)) == 0,
109 "cor neighbor_free(): nb->cmsg_queue_other is not empty");
110 WARN_ONCE(nb
->pending_conn_resets_rb
.rb_node
!= 0,
111 "cor neighbor_free(): nb->pending_conn_resets_rb is not empty");
112 WARN_ONCE(nb
->rb_kp
.in_queue
!= RB_INQUEUE_FALSE
,
113 "cor neighbor_free(): nb->rb_kp.in_queue is not RB_INQUEUE_FALSE");
114 WARN_ONCE(nb
->rb_cr
.in_queue
!= RB_INQUEUE_FALSE
,
115 "cor neighbor_free(): nb->rb_cr.in_queue is not RB_INQUEUE_FALSE");
116 WARN_ONCE(nb
->rb
.in_queue
!= RB_INQUEUE_FALSE
,
117 "cor neighbor_free(): nb->rb.in_queue is not RB_INQUEUE_FALSE");
118 WARN_ONCE(list_empty(&(nb
->conns_waiting
.lh
)) == 0,
119 "cor neighbor_free(): nb->conns_waiting.lh is not empty");
120 WARN_ONCE(list_empty(&(nb
->conns_waiting
.lh_nextpass
)) == 0,
121 "cor neighbor_free(): nb->conns_waiting.lh_nextpass is not empty");
122 WARN_ONCE(nb
->str_timer_pending
!= 0,
123 "cor neighbor_free(): nb->str_timer_pending is not 0");
124 WARN_ONCE(nb
->connid_rb
.rb_node
!= 0,
125 "cor neighbor_free(): nb->connid_rb is not empty");
126 WARN_ONCE(nb
->connid_reuse_rb
.rb_node
!= 0,
127 "cor neighbor_free(): nb->connid_reuse_rb is not empty");
128 WARN_ONCE(list_empty(&(nb
->connid_reuse_list
)) == 0,
129 "cor neighbor_free(): nb->connid_reuse_list is not empty");
130 WARN_ONCE(nb
->kp_retransmits_rb
.rb_node
!= 0,
131 "cor neighbor_free(): nb->kp_retransmits_rb is not empty");
132 WARN_ONCE(list_empty(&(nb
->rcv_conn_list
)) == 0,
133 "cor neighbor_free(): nb->rcv_conn_list is not empty");
134 WARN_ONCE(nb
->stalledconn_work_scheduled
!= 0,
135 "cor neighbor_free(): nb->stalledconn_work_scheduled is not 0");
136 WARN_ONCE(list_empty(&(nb
->stalledconn_list
)) == 0,
137 "cor neighbor_free(): nb->stalledconn_list is not empty");
138 WARN_ONCE(list_empty(&(nb
->retrans_list
)) == 0,
139 "cor neighbor_free(): nb->retrans_list is not empty");
140 WARN_ONCE(list_empty(&(nb
->retrans_conn_list
)) == 0,
141 "cor neighbor_free(): nb->retrans_conn_list is not empty");
143 /* printk(KERN_ERR "neighbor free"); */
144 BUG_ON(nb
->nb_list
.next
!= LIST_POISON1
);
145 BUG_ON(nb
->nb_list
.prev
!= LIST_POISON2
);
153 kref_put(&(nb
->queue
->ref
), free_qos
);
155 kmem_cache_free(nb_slab
, nb
);
156 atomic_dec(&num_neighs
);
159 static void stall_timer(struct work_struct
*work
);
161 static struct neighbor
*alloc_neighbor(gfp_t allocflags
)
166 if (atomic_inc_return(&num_neighs
) >= MAX_NEIGHBORS
) {
167 atomic_dec(&num_neighs
);
171 nb
= kmem_cache_alloc(nb_slab
, allocflags
);
172 if (unlikely(nb
== 0))
175 memset(nb
, 0, sizeof(struct neighbor
));
177 kref_init(&(nb
->ref
));
178 atomic_set(&(nb
->sessionid_rcv_needed
), 1);
179 atomic_set(&(nb
->sessionid_snd_needed
), 1);
180 timer_setup(&(nb
->cmsg_timer
), controlmsg_timerfunc
, 0);
181 spin_lock_init(&(nb
->cmsg_lock
));
182 INIT_LIST_HEAD(&(nb
->cmsg_queue_pong
));
183 INIT_LIST_HEAD(&(nb
->cmsg_queue_ack
));
184 INIT_LIST_HEAD(&(nb
->cmsg_queue_ackconn
));
185 INIT_LIST_HEAD(&(nb
->cmsg_queue_conndata_lowlat
));
186 INIT_LIST_HEAD(&(nb
->cmsg_queue_conndata_highlat
));
187 INIT_LIST_HEAD(&(nb
->cmsg_queue_other
));
188 atomic_set(&(nb
->cmsg_pongs_retrans_cnt
), 0);
189 atomic_set(&(nb
->cmsg_othercnt
), 0);
190 atomic_set(&(nb
->cmsg_bulk_readds
), 0);
191 atomic_set(&(nb
->cmsg_delay_conndata
), 0);
192 nb
->last_ping_time
= jiffies
;
193 atomic_set(&(nb
->latency_retrans_us
), PING_GUESSLATENCY_MS
*1000);
194 atomic_set(&(nb
->latency_advertised_us
), PING_GUESSLATENCY_MS
*1000);
195 atomic_set(&(nb
->max_remote_ack_delay_us
), 1000000);
196 atomic_set(&(nb
->max_remote_ackconn_delay_us
), 1000000);
197 atomic_set(&(nb
->max_remote_other_delay_us
), 1000000);
198 spin_lock_init(&(nb
->conns_waiting
.lock
));
199 INIT_LIST_HEAD(&(nb
->conns_waiting
.lh
));
200 INIT_LIST_HEAD(&(nb
->conns_waiting
.lh_nextpass
));
201 spin_lock_init(&(nb
->nbcongwin
.lock
));
202 atomic64_set(&(nb
->nbcongwin
.data_intransit
), 0);
203 atomic64_set(&(nb
->nbcongwin
.cwin
), 0);
204 spin_lock_init(&(nb
->state_lock
));
205 nb
->state
= NEIGHBOR_STATE_INITIAL
;
206 nb
->state_time
.initial_state_since
= jiffies
;
207 INIT_DELAYED_WORK(&(nb
->stalltimeout_timer
), stall_timer
);
208 spin_lock_init(&(nb
->connid_lock
));
209 spin_lock_init(&(nb
->connid_reuse_lock
));
210 INIT_LIST_HEAD(&(nb
->connid_reuse_list
));
211 get_random_bytes((char *) &seqno
, sizeof(seqno
));
212 nb
->kpacket_seqno
= seqno
;
213 atomic64_set(&(nb
->priority_sum
), 0);
214 spin_lock_init(&(nb
->conn_list_lock
));
215 INIT_LIST_HEAD(&(nb
->rcv_conn_list
));
216 INIT_LIST_HEAD(&(nb
->stalledconn_list
));
217 spin_lock_init(&(nb
->stalledconn_lock
));
218 INIT_WORK(&(nb
->stalledconn_work
), resume_nbstalled_conns
);
219 INIT_LIST_HEAD(&(nb
->rcv_conn_list
));
220 spin_lock_init(&(nb
->retrans_lock
));
221 INIT_LIST_HEAD(&(nb
->retrans_list
));
222 spin_lock_init(&(nb
->retrans_conn_lock
));
223 INIT_LIST_HEAD(&(nb
->retrans_conn_list
));
228 int is_from_nb(struct sk_buff
*skb
, struct neighbor
*nb
)
232 char source_hw
[MAX_ADDR_LEN
];
233 memset(source_hw
, 0, MAX_ADDR_LEN
);
234 if (skb
->dev
->header_ops
!= 0 &&
235 skb
->dev
->header_ops
->parse
!= 0)
236 skb
->dev
->header_ops
->parse(skb
, source_hw
);
238 rc
= (skb
->dev
== nb
->dev
&& memcmp(nb
->mac
, source_hw
,
243 static struct neighbor
*_get_neigh_by_mac(struct net_device
*dev
,
246 struct list_head
*currlh
;
247 struct neighbor
*ret
= 0;
249 spin_lock_bh(&(neighbor_list_lock
));
251 currlh
= nb_list
.next
;
253 while (currlh
!= &nb_list
) {
254 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
257 if (curr
->dev
== dev
&& memcmp(curr
->mac
, source_hw
,
258 MAX_ADDR_LEN
) == 0) {
260 kref_get(&(ret
->ref
));
264 currlh
= currlh
->next
;
267 spin_unlock_bh(&(neighbor_list_lock
));
272 struct neighbor
*get_neigh_by_mac(struct sk_buff
*skb
)
274 char source_hw
[MAX_ADDR_LEN
];
275 memset(source_hw
, 0, MAX_ADDR_LEN
);
276 if (skb
->dev
->header_ops
!= 0 &&
277 skb
->dev
->header_ops
->parse
!= 0)
278 skb
->dev
->header_ops
->parse(skb
, source_hw
);
280 return _get_neigh_by_mac(skb
->dev
, source_hw
);
283 struct neighbor
*find_neigh(char *addr
, __u16 addrlen
)
285 struct list_head
*currlh
;
286 struct neighbor
*ret
= 0;
288 if (addr
== 0 || addrlen
== 0)
291 spin_lock_bh(&(neighbor_list_lock
));
293 currlh
= nb_list
.next
;
295 while (currlh
!= &nb_list
) {
296 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
299 if (curr
->addr
!= 0 && curr
->addrlen
!= 0 &&
300 curr
->addrlen
== addrlen
&&
301 memcmp(curr
->addr
, addr
, addrlen
) == 0) {
303 kref_get(&(ret
->ref
));
308 currlh
= currlh
->next
;
312 spin_unlock_bh(&(neighbor_list_lock
));
317 //TODO throughput field
318 __u32
generate_neigh_list(char *buf
, __u32 buflen
)
320 struct list_head
*currlh
;
324 __u32 buf_offset
= 4;
329 * The variable length header rowcount need to be generated after the
330 * data. This is done by reserving the maximum space they could take. If
331 * they end up being smaller, the data is moved so that there is no gap.
335 BUG_ON(buflen
< buf_offset
);
338 rc
= encode_len(buf
+ buf_offset
, buflen
- buf_offset
, 2);
343 BUG_ON(buflen
< buf_offset
+ 2);
344 put_u16(buf
+ buf_offset
, LIST_NEIGH_FIELD_ADDR
);
347 rc
= encode_len(buf
+ buf_offset
, buflen
- buf_offset
, 0);
352 BUG_ON(buflen
< buf_offset
+ 2);
353 put_u16(buf
+ buf_offset
, LIST_NEIGH_FIELD_LATENCY
);
356 rc
= encode_len(buf
+ buf_offset
, buflen
- buf_offset
, 1);
360 spin_lock_bh(&(neighbor_list_lock
));
362 currlh
= nb_list
.next
;
364 while (currlh
!= &nb_list
) {
365 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
369 state
= get_neigh_state(curr
);
371 if (state
!= NEIGHBOR_STATE_ACTIVE
)
374 BUG_ON((curr
->addr
== 0) != (curr
->addrlen
== 0));
375 if (curr
->addr
== 0 || curr
->addrlen
== 0)
378 if (unlikely(buflen
< buf_offset
+ 4 + curr
->addrlen
+ 1))
382 rc
= encode_len(buf
+ buf_offset
, buflen
- buf_offset
,
387 BUG_ON(curr
->addrlen
> buflen
- buf_offset
);
388 memcpy(buf
+ buf_offset
, curr
->addr
, curr
->addrlen
); /* addr */
389 buf_offset
+= curr
->addrlen
;
391 buf
[buf_offset
] = enc_log_64_11(atomic_read(
392 &(curr
->latency_advertised_us
)));
395 BUG_ON(buf_offset
> buflen
);
400 currlh
= currlh
->next
;
403 spin_unlock_bh(&(neighbor_list_lock
));
405 rc
= encode_len(buf
, 4, cnt
);
410 memmove(buf
+ ((__u32
) rc
), buf
+4, buf_offset
);
412 return buf_offset
- 4 + ((__u32
) rc
);
415 static void reset_all_conns(struct neighbor
*nb
)
418 unsigned long iflags
;
422 spin_lock_irqsave(&(nb
->conn_list_lock
), iflags
);
424 if (list_empty(&(nb
->rcv_conn_list
))) {
425 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
429 src_in
= container_of(nb
->rcv_conn_list
.next
, struct conn
,
431 kref_get(&(src_in
->ref
));
433 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
435 if (src_in
->is_client
) {
436 spin_lock_bh(&(src_in
->rcv_lock
));
437 spin_lock_bh(&(src_in
->reversedir
->rcv_lock
));
439 spin_lock_bh(&(src_in
->reversedir
->rcv_lock
));
440 spin_lock_bh(&(src_in
->rcv_lock
));
443 if (unlikely(unlikely(src_in
->sourcetype
!= SOURCE_IN
) ||
444 unlikely(src_in
->source
.in
.nb
!= nb
))) {
449 rc
= send_reset_conn(nb
, src_in
->reversedir
->target
.out
.conn_id
,
452 if (unlikely(rc
!= 0))
455 if (src_in
->reversedir
->isreset
== 0)
456 src_in
->reversedir
->isreset
= 1;
459 if (src_in
->is_client
) {
460 spin_unlock_bh(&(src_in
->rcv_lock
));
461 spin_unlock_bh(&(src_in
->reversedir
->rcv_lock
));
463 spin_unlock_bh(&(src_in
->reversedir
->rcv_lock
));
464 spin_unlock_bh(&(src_in
->rcv_lock
));
469 kref_put(&(src_in
->ref
), free_conn
);
471 kref_put(&(src_in
->ref
), free_conn
);
472 kref_get(&(nb
->ref
));
473 schedule_delayed_work(&(nb
->stalltimeout_timer
), HZ
);
479 static void reset_neighbor(struct neighbor
*nb
)
482 unsigned long iflags
;
484 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
485 removenblist
= (nb
->state
!= NEIGHBOR_STATE_KILLED
);
486 nb
->state
= NEIGHBOR_STATE_KILLED
;
487 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
489 /* if (removenblist) {
490 printk(KERN_ERR "reset_neighbor");
495 delete_connid_reuse_items(nb
);
498 spin_lock_bh(&neighbor_list_lock
);
499 list_del(&(nb
->nb_list
));
500 spin_unlock_bh(&neighbor_list_lock
);
502 kref_put(&(nb
->ref
), neighbor_free
); /* nb_list */
506 static void reset_neighbors(struct net_device
*dev
)
508 struct list_head
*currlh
;
511 spin_lock_bh(&neighbor_list_lock
);
513 currlh
= nb_list
.next
;
515 while (currlh
!= &nb_list
) {
516 unsigned long iflags
;
517 struct neighbor
*currnb
= container_of(currlh
, struct neighbor
,
521 if (dev
!= 0 && currnb
->dev
!= dev
)
524 spin_lock_irqsave(&(currnb
->state_lock
), iflags
);
525 state
= currnb
->state
;
526 spin_unlock_irqrestore(&(currnb
->state_lock
), iflags
);
528 if (state
!= NEIGHBOR_STATE_KILLED
) {
529 spin_unlock_bh(&neighbor_list_lock
);
530 reset_neighbor(currnb
);
535 currlh
= currlh
->next
;
538 spin_unlock_bh(&neighbor_list_lock
);
541 static void stall_timer(struct work_struct
*work
)
543 struct neighbor
*nb
= container_of(to_delayed_work(work
),
544 struct neighbor
, stalltimeout_timer
);
549 unsigned long iflags
;
551 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
553 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
555 if (nbstate
== NEIGHBOR_STATE_STALLED
) {
556 stall_time_ms
= jiffies_to_msecs(jiffies
-
557 nb
->state_time
.last_roundtrip
);
559 if (stall_time_ms
< NB_KILL_TIME_MS
) {
560 schedule_delayed_work(&(nb
->stalltimeout_timer
),
561 msecs_to_jiffies(NB_KILL_TIME_MS
-
569 nb
->str_timer_pending
= 0;
570 kref_put(&(nb
->ref
), neighbor_free
); /* stall_timer */
573 #warning todo reset_neighbor aquires locks which are held by callers (e.g. free_conn_acks --> nb->cmsg_lock)
574 int get_neigh_state(struct neighbor
*nb
)
577 unsigned long iflags
;
582 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
584 stall_time_ms
= jiffies_to_msecs(jiffies
-
585 nb
->state_time
.last_roundtrip
);
587 if (likely(nb
->state
== NEIGHBOR_STATE_ACTIVE
) &&
588 unlikely(stall_time_ms
> NB_STALL_TIME_MS
) && (
589 nb
->ping_intransit
>= NB_STALL_MINPINGS
||
590 nb
->ping_intransit
>= PING_COOKIES_PER_NEIGH
)) {
591 nb
->state
= NEIGHBOR_STATE_STALLED
;
592 nb
->ping_success
= 0;
593 if (nb
->str_timer_pending
== 0) {
594 nb
->str_timer_pending
= 1;
595 kref_get(&(nb
->ref
));
597 schedule_delayed_work(&(nb
->stalltimeout_timer
),
598 msecs_to_jiffies(NB_KILL_TIME_MS
-
602 /* printk(KERN_ERR "changed to stalled"); */
603 BUG_ON(nb
->ping_intransit
> PING_COOKIES_PER_NEIGH
);
604 } else if (unlikely(nb
->state
== NEIGHBOR_STATE_INITIAL
) &&
605 time_after(jiffies
, nb
->state_time
.initial_state_since
+
606 INITIAL_TIME_LIMIT_SEC
* HZ
)) {
607 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
609 return NEIGHBOR_STATE_KILLED
;
614 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
619 static struct ping_cookie
*find_cookie(struct neighbor
*nb
, __u32 cookie
)
623 for(i
=0;i
<PING_COOKIES_PER_NEIGH
;i
++) {
624 if (nb
->cookies
[i
].cookie
== cookie
)
625 return &(nb
->cookies
[i
]);
630 static void reset_cookie(struct neighbor
*nb
, struct ping_cookie
*c
)
635 if (nb
->cookie_unsent
!= c
->cookie
)
636 nb
->ping_intransit
--;
641 static __u32
sqrt(__u64 x
)
646 if (unlikely(x
<= 1))
650 y
= y
/2 + div64_u64(x
/2, y
);
651 if (unlikely(y
== 0))
655 if (unlikely(y
> U32_MAX
))
661 static __u32
calc_newlatency(struct neighbor
*nb_statelocked
,
662 __u32 oldlatency_us
, __s64 newlatency_ns
)
664 __s64 oldlatency
= oldlatency_us
* 1000LL;
667 if (unlikely(unlikely(nb_statelocked
->state
== NEIGHBOR_STATE_INITIAL
)&&
668 nb_statelocked
->ping_success
< 16))
669 newlatency
= div64_s64(
670 oldlatency
* nb_statelocked
->ping_success
+
672 nb_statelocked
->ping_success
+ 1);
674 newlatency
= (oldlatency
* 15 + newlatency_ns
) / 16;
676 newlatency
= div_s64(newlatency
+ 500, 1000);
678 if (unlikely(newlatency
< 0))
680 if (unlikely(newlatency
> U32_MAX
))
681 newlatency
= U32_MAX
;
683 return (__u32
) newlatency
;
686 static void update_nb_latency(struct neighbor
*nb_statelocked
,
687 struct ping_cookie
*c
, __u32 respdelay
)
689 ktime_t now
= ktime_get();
691 __s64 pinglatency_retrans_ns
= ktime_to_ns(now
) -
692 ktime_to_ns(c
->time_sent
) - respdelay
* 1000LL;
693 __s64 pinglatency_advertised_ns
= ktime_to_ns(now
) -
694 ktime_to_ns(c
->time_created
) - respdelay
* 1000LL;
696 __u32 oldlatency_retrans_us
=
697 atomic_read(&(nb_statelocked
->latency_retrans_us
));
699 __u32 newlatency_retrans_us
= calc_newlatency(nb_statelocked
,
700 oldlatency_retrans_us
, pinglatency_retrans_ns
);
702 atomic_set(&(nb_statelocked
->latency_retrans_us
),
703 newlatency_retrans_us
);
705 if (unlikely(unlikely(nb_statelocked
->state
== NEIGHBOR_STATE_INITIAL
)&&
706 nb_statelocked
->ping_success
< 16)) {
707 nb_statelocked
->latency_variance_retrans_us
=
708 ((__u64
) newlatency_retrans_us
) *
709 newlatency_retrans_us
;
710 atomic_set(&(nb_statelocked
->latency_stddev_retrans_us
), sqrt(
711 nb_statelocked
->latency_variance_retrans_us
));
712 } else if (pinglatency_retrans_ns
> oldlatency_retrans_us
*
714 __s64 newdiff
= div_s64(pinglatency_retrans_ns
-
715 oldlatency_retrans_us
* ((__s64
) 1000), 1000);
716 __u32 newdiff32
= (__u32
) (unlikely(newdiff
>= U32_MAX
) ?
718 __u64 newvar
= ((__u64
) newdiff32
) * newdiff32
;
720 __u64 oldval
= nb_statelocked
->latency_variance_retrans_us
;
722 if (unlikely(unlikely(newvar
> (1LL << 55)) || unlikely(
723 oldval
> (1LL << 55)))) {
724 nb_statelocked
->latency_variance_retrans_us
=
725 (oldval
/ 16) * 15 + newvar
/16;
727 nb_statelocked
->latency_variance_retrans_us
=
728 (oldval
* 15 + newvar
) / 16;
731 atomic_set(&(nb_statelocked
->latency_stddev_retrans_us
), sqrt(
732 nb_statelocked
->latency_variance_retrans_us
));
735 atomic_set(&(nb_statelocked
->latency_advertised_us
),
736 calc_newlatency(nb_statelocked
,
737 atomic_read(&(nb_statelocked
->latency_advertised_us
)),
738 pinglatency_advertised_ns
));
740 nb_statelocked
->last_roundtrip_end
= now
;
743 void ping_resp(struct neighbor
*nb
, __u32 cookie
, __u32 respdelay
)
745 unsigned long iflags
;
747 struct ping_cookie
*c
;
749 int stalledresume
= 0;
751 int call_connidreuse
= 0;
753 if (unlikely(cookie
== 0))
756 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
758 c
= find_cookie(nb
, cookie
);
760 if (unlikely(c
== 0))
763 atomic_set(&(nb
->sessionid_snd_needed
), 0);
765 call_connidreuse
= ktime_before_eq(nb
->last_roundtrip_end
,
768 update_nb_latency(nb
, c
, respdelay
);
774 for(i
=0;i
<PING_COOKIES_PER_NEIGH
;i
++) {
775 if (nb
->cookies
[i
].cookie
!= 0 && ktime_before(
776 nb
->cookies
[i
].time_created
, c
->time_created
)) {
777 nb
->cookies
[i
].pongs
++;
778 if (nb
->cookies
[i
].pongs
>= PING_PONGLIMIT
) {
779 reset_cookie(nb
, &(nb
->cookies
[i
]));
784 if (unlikely(nb
->state
== NEIGHBOR_STATE_INITIAL
||
785 nb
->state
== NEIGHBOR_STATE_STALLED
)) {
786 call_connidreuse
= 0;
788 if ((nb
->state
== NEIGHBOR_STATE_INITIAL
&&
789 nb
->ping_success
>= PING_SUCCESS_CNT_INIT
) || (
790 nb
->state
== NEIGHBOR_STATE_STALLED
&&
791 nb
->ping_success
>= PING_SUCCESS_CNT_STALLED
)) {
792 stalledresume
= (nb
->state
== NEIGHBOR_STATE_STALLED
);
793 nb
->state
= NEIGHBOR_STATE_ACTIVE
;
794 /* printk(KERN_ERR "changed to active"); */
798 if (likely(nb
->state
== NEIGHBOR_STATE_ACTIVE
) ||
799 nb
->state
== NEIGHBOR_STATE_STALLED
)
800 nb
->state_time
.last_roundtrip
= c
->jiffies_sent
;
803 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
805 if (call_connidreuse
)
806 connid_used_pingsuccess(nb
);
808 if (unlikely(stalledresume
)) {
809 spin_lock_bh(&(nb
->retrans_conn_lock
));
810 reschedule_conn_retrans_timer(nb
);
811 spin_unlock_bh(&(nb
->retrans_conn_lock
));
813 spin_lock_bh(&(nb
->stalledconn_lock
));
814 if (nb
->stalledconn_work_scheduled
== 0) {
815 kref_get(&(nb
->ref
)),
816 schedule_work(&(nb
->stalledconn_work
));
817 nb
->stalledconn_work_scheduled
= 1;
819 spin_unlock_bh(&(nb
->stalledconn_lock
));
823 __u32
add_ping_req(struct neighbor
*nb
, unsigned long *last_ping_time
,
826 unsigned long iflags
;
827 struct ping_cookie
*c
;
832 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
834 if (nb
->cookie_unsent
!= 0) {
835 c
= find_cookie(nb
, nb
->cookie_unsent
);
839 nb
->cookie_unsent
= 0;
842 c
= find_cookie(nb
, 0);
846 get_random_bytes((char *) &i
, sizeof(i
));
847 i
= (i
% PING_COOKIES_PER_NEIGH
);
848 c
= &(nb
->cookies
[i
]);
853 if (unlikely(nb
->lastcookie
== 0))
855 c
->cookie
= nb
->lastcookie
;
856 c
->time_created
= now
;
861 c
->jiffies_sent
= jiffies
;
864 nb
->ping_intransit
++;
866 *last_ping_time
= nb
->last_ping_time
;
867 nb
->last_ping_time
= c
->jiffies_sent
;
869 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
876 void ping_sent(struct neighbor
*nb
, __u32 cookie
)
878 unsigned long iflags
;
882 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
884 if (nb
->cookie_unsent
== cookie
)
885 nb
->cookie_unsent
= 0;
887 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
890 void unadd_ping_req(struct neighbor
*nb
, __u32 cookie
,
891 unsigned long last_ping_time
, int congested
)
893 unsigned long iflags
;
895 struct ping_cookie
*c
;
899 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
902 BUG_ON(nb
->cookie_unsent
!= 0 && nb
->cookie_unsent
!= cookie
);
903 nb
->cookie_unsent
= cookie
;
906 c
= find_cookie(nb
, cookie
);
907 if (likely(c
!= 0)) {
910 nb
->ping_intransit
--;
913 nb
->last_ping_time
= last_ping_time
;
915 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
918 static int get_ping_forcetime_ms(struct neighbor
*nb
)
920 unsigned long iflags
;
924 if (unlikely(get_neigh_state(nb
) != NEIGHBOR_STATE_ACTIVE
))
925 return PING_FORCETIME_MS
;
927 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
928 fast
= ((nb
->ping_success
< PING_ACTIVE_FASTINITIAL_COUNT
) ||
929 (nb
->ping_intransit
> 0));
930 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
933 return PING_FORCETIME_ACTIVE_FAST_MS
;
935 spin_lock_irqsave(&(nb
->conn_list_lock
), iflags
);
936 idle
= list_empty(&(nb
->rcv_conn_list
));
937 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
940 return PING_FORCETIME_ACTIVEIDLE_MS
;
942 return PING_FORCETIME_ACTIVE_MS
;
945 static __u32
get_ping_mindelay_ms(struct neighbor
*nb_statelocked
)
947 __u32 latency_us
= ((__u32
) atomic_read(
948 &(nb_statelocked
->latency_advertised_us
)));
949 __u32 max_remote_other_delay_us
= ((__u32
) atomic_read(
950 &(nb_statelocked
->max_remote_other_delay_us
)));
953 if (latency_us
< PING_GUESSLATENCY_MS
* 1000)
954 latency_us
= PING_GUESSLATENCY_MS
* 1000;
956 if (unlikely(nb_statelocked
->state
!= NEIGHBOR_STATE_ACTIVE
))
957 mindelay_ms
= latency_us
/1000;
959 mindelay_ms
= ((latency_us
/2 +
960 max_remote_other_delay_us
/2)/500);
962 if (likely(nb_statelocked
->ping_intransit
< PING_COOKIES_THROTTLESTART
))
965 mindelay_ms
= mindelay_ms
* (1 + 9 * (nb_statelocked
->ping_intransit
*
966 nb_statelocked
->ping_intransit
/
967 (PING_COOKIES_PER_NEIGH
* PING_COOKIES_PER_NEIGH
)));
973 * Check whether we want to send a ping now:
974 * 0... Do not send ping.
975 * 1... Send ping now, but only if it can be merged with other messages. This
976 * can happen way before the time requested by get_next_ping_time().
977 * 2... Send ping now, even if a packet has to be created just for the ping
980 int time_to_send_ping(struct neighbor
*nb
)
982 unsigned long iflags
;
983 int rc
= TIMETOSENDPING_YES
;
985 __u32 ms_since_last_ping
;
987 __u32 forcetime
= get_ping_forcetime_ms(nb
);
990 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
992 ms_since_last_ping
= jiffies_to_msecs(jiffies
- nb
->last_ping_time
);
994 mindelay
= get_ping_mindelay_ms(nb
);
996 if (forcetime
< (mindelay
* 3))
997 forcetime
= mindelay
* 3;
998 else if (forcetime
> (mindelay
* 3))
999 mindelay
= forcetime
/3;
1001 if (ms_since_last_ping
< mindelay
|| ms_since_last_ping
< (forcetime
/4))
1002 rc
= TIMETOSENDPING_NO
;
1003 else if (ms_since_last_ping
>= forcetime
)
1004 rc
= TIMETOSENDPING_FORCE
;
1006 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
1011 unsigned long get_next_ping_time(struct neighbor
*nb
)
1013 unsigned long iflags
;
1015 __u32 forcetime
= get_ping_forcetime_ms(nb
);
1018 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
1019 mindelay
= get_ping_mindelay_ms(nb
);
1020 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
1022 if (forcetime
< (mindelay
* 3))
1023 forcetime
= mindelay
* 3;
1025 return nb
->last_ping_time
+ msecs_to_jiffies(forcetime
);
1028 static void add_neighbor(struct neighbor_discdata
*nb_dd
)
1030 struct neighbor
*nb
;
1031 struct list_head
*currlh
;
1033 nb
= alloc_neighbor(GFP_KERNEL
);
1034 if (unlikely(nb
== 0))
1037 nb
->queue
= get_queue(nb_dd
->dev
);
1038 if (nb
->queue
== 0) {
1039 kmem_cache_free(nb_slab
, nb
);
1040 atomic_dec(&num_neighs
);
1044 dev_hold(nb_dd
->dev
);
1045 nb
->dev
= nb_dd
->dev
;
1047 memcpy(nb
->mac
, nb_dd
->mac
, MAX_ADDR_LEN
);
1049 nb
->addr
= nb_dd
->addr
;
1050 nb
->addrlen
= nb_dd
->addrlen
;
1052 nb_dd
->nb_allocated
= 1;
1056 spin_lock_bh(&neighbor_list_lock
);
1058 currlh
= nb_list
.next
;
1060 BUG_ON((nb
->addr
== 0) != (nb
->addrlen
== 0));
1062 if (is_clientmode() && (nb
->addr
== 0|| nb
->addrlen
== 0))
1063 goto already_present
;
1065 while (currlh
!= &nb_list
) {
1066 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
1069 BUG_ON((curr
->addr
== 0) != (curr
->addrlen
== 0));
1071 if (curr
->dev
== nb
->dev
&&
1072 memcmp(curr
->mac
, nb
->mac
, MAX_ADDR_LEN
) == 0)
1073 goto already_present
;
1075 if (curr
->addr
!= 0 && curr
->addrlen
!= 0 &&
1076 nb
->addr
!= 0 && nb
->addrlen
!= 0 &&
1077 curr
->addrlen
== nb
->addrlen
&&
1078 memcmp(curr
->addr
, nb
->addr
, curr
->addrlen
) ==0)
1079 goto already_present
;
1081 currlh
= currlh
->next
;
1084 /* printk(KERN_ERR "add_neigh"); */
1086 spin_lock_bh(&(local_addr_lock
));
1087 nb
->sessionid
= local_addr_sessionid
^ nb_dd
->sessionid
;
1088 spin_unlock_bh(&(local_addr_lock
));
1090 timer_setup(&(nb
->retrans_timer
), retransmit_timerfunc
, 0);
1092 timer_setup(&(nb
->retrans_conn_timer
), retransmit_conn_timerfunc
, 0);
1094 spin_lock_bh(&(nb
->cmsg_lock
));
1097 spin_lock_bh(&(nb
->cmsg_lock
));
1098 nb
->last_ping_time
= jiffies
;
1099 schedule_controlmsg_timer(nb
);
1100 spin_unlock_bh(&(nb
->cmsg_lock
));
1102 list_add_tail(&(nb
->nb_list
), &nb_list
);
1106 kmem_cache_free(nb_slab
, nb
);
1107 atomic_dec(&num_neighs
);
1110 spin_unlock_bh(&neighbor_list_lock
);
1113 static int parse_announce_version(struct neighbor_discdata
*nb_dd
,
1114 __u16 cmd
, char *cmddata
, __u16 len
)
1119 if (unlikely(len
< 4))
1122 version
= parse_u16(cmddata
);
1125 minversion
= parse_u16(cmddata
);
1129 if (minversion
!= 0)
1132 if (nb_dd
->rcvd_version
!= 0) {
1133 if (nb_dd
->version
!= version
||
1134 nb_dd
->minversion
!= minversion
)
1137 nb_dd
->version
= version
;
1138 nb_dd
->minversion
= minversion
;
1139 nb_dd
->rcvd_version
= 1;
1145 static int parse_announce_addaddr(struct neighbor_discdata
*nb_dd
,
1146 __u16 cmd
, char *cmddata
, __u16 len
)
1151 BUG_ON(cmd
!= NEIGHCMD_ADDR
);
1152 BUG_ON((nb_dd
->addr
== 0) != (nb_dd
->addrlen
== 0));
1153 BUG_ON(nb_dd
->rcvd_addr
== 0 && nb_dd
->addr
!= 0);
1158 addrlen
= parse_u16(cmddata
);
1169 if (nb_dd
->rcvd_addr
!= 0) {
1170 if (nb_dd
->addrlen
!= addrlen
)
1172 if (addrlen
!= 0 && memcmp(nb_dd
->addr
, addr
, addrlen
) != 0)
1176 nb_dd
->addr
= kmalloc(addrlen
, GFP_KERNEL
);
1177 if (unlikely(nb_dd
->addr
== 0))
1180 memcpy(nb_dd
->addr
, addr
, addrlen
);
1182 nb_dd
->addrlen
= addrlen
;
1184 nb_dd
->rcvd_addr
= 1;
1190 static int parse_announce_cmd(struct neighbor_discdata
*nb_dd
,
1191 __u16 cmd
, char *cmddata
, __u16 len
)
1193 if (cmd
== NEIGHCMD_VERSION
) {
1194 return parse_announce_version(nb_dd
, cmd
, cmddata
, len
);
1195 } else if (cmd
== NEIGHCMD_ADDR
) {
1196 return parse_announce_addaddr(nb_dd
, cmd
, cmddata
, len
);
1202 static int parse_announce_cmds(struct neighbor_discdata
*nb_dd
,
1203 char *msg
, __u32 len
)
1206 while (zeros
< len
) {
1207 if (msg
[len
-zeros
-1] != 0)
1212 while (len
>= 4 && len
> zeros
) {
1216 cmd
= parse_u16(msg
);
1219 cmdlen
= parse_u16(msg
);
1226 if (parse_announce_cmd(nb_dd
, cmd
, msg
, cmdlen
) != 0)
1233 if (len
!= 0 && len
< zeros
)
1239 static void neighbor_discdata_free(struct neighbor_discdata
*nb_dd
)
1241 list_del(&(nb_dd
->lh
));
1243 BUG_ON(nb_dd
->dev
== 0);
1244 dev_put(nb_dd
->dev
);
1246 if (nb_dd
->addr
!= 0) {
1252 kmem_cache_free(nb_dd_slab
, nb_dd
);
1254 BUG_ON(num_nb_dd
== 0);
1258 static void announce_send_start(struct net_device
*dev
, char *mac
, int type
);
1260 static struct neighbor_discdata
*findoralloc_neighbor_discdata(
1261 struct net_device
*dev
, char *source_hw
, __be32 sessionid
)
1263 unsigned long jiffies_tmp
= jiffies
;
1264 struct list_head
*currlh
;
1267 struct neighbor_discdata
*nb_dd
;
1269 currlh
= nb_dd_list
.next
;
1270 while (currlh
!= &nb_dd_list
) {
1271 struct neighbor_discdata
*curr
= container_of(currlh
,
1272 struct neighbor_discdata
, lh
);
1274 currlh
= currlh
->next
;
1276 if (time_after(jiffies_tmp
, curr
->jiffies_created
+
1277 HZ
* NEIGHBOR_DISCOVERY_TIMEOUT_SEC
)) {
1278 neighbor_discdata_free(curr
);
1282 if (curr
->sessionid
== sessionid
&& curr
->dev
== dev
&&
1283 memcmp(curr
->mac
, source_hw
, MAX_ADDR_LEN
) == 0)
1287 neighs
= atomic_read(&num_neighs
);
1288 if (neighs
+ num_nb_dd
< neighs
|| neighs
+ num_nb_dd
>=MAX_NEIGHBORS
)
1292 nb_dd
= kmem_cache_alloc(nb_dd_slab
, GFP_KERNEL
);
1293 if (unlikely(nb_dd
== 0))
1296 memset(nb_dd
, 0, sizeof(struct neighbor_discdata
));
1298 nb_dd
->sessionid
= sessionid
;
1303 memcpy(nb_dd
->mac
, source_hw
, MAX_ADDR_LEN
);
1305 list_add_tail(&(nb_dd
->lh
), &nb_dd_list
);
1306 nb_dd
->jiffies_created
= jiffies_tmp
;
1308 if (is_clientmode())
1309 announce_send_start(dev
, source_hw
, ANNOUNCE_TYPE_UNICAST
);
1314 static void parse_announce(struct net_device
*dev
, char *source_hw
,
1315 char *msg
, __u32 len
)
1318 struct neighbor_discdata
*nb_dd
;
1321 if (unlikely(len
< 4))
1324 sessionid
= parse_be32(msg
);
1328 nb_dd
= findoralloc_neighbor_discdata(dev
, source_hw
, sessionid
);
1329 if (unlikely(nb_dd
== 0))
1332 if (parse_announce_cmds(nb_dd
, msg
, len
) != 0)
1335 if (nb_dd
->rcvd_version
!= 0 && nb_dd
->rcvd_addr
!= 0) {
1336 add_neighbor(nb_dd
);
1339 neighbor_discdata_free(nb_dd
);
1343 static void _rcv_announce(struct work_struct
*work
)
1345 struct skb_procstate
*ps
= container_of(work
,
1346 struct skb_procstate
, funcstate
.announce1
.work
);
1347 struct sk_buff
*skb
= skb_from_pstate(ps
);
1349 char source_hw
[MAX_ADDR_LEN
];
1351 struct neighbor
*nb
;
1356 memset(source_hw
, 0, MAX_ADDR_LEN
);
1357 if (skb
->dev
->header_ops
!= 0 &&
1358 skb
->dev
->header_ops
->parse
!= 0)
1359 skb
->dev
->header_ops
->parse(skb
, source_hw
);
1361 nb
= _get_neigh_by_mac(skb
->dev
, source_hw
);
1363 kref_put(&(nb
->ref
), neighbor_free
);
1368 if (unlikely(skb
->len
> 65535 || skb
->len
< 0))
1370 len
= (__u16
) skb
->len
;
1372 msg
= cor_pull_skb(skb
, len
);
1376 mutex_lock(&(announce_rcv_lock
));
1377 parse_announce(skb
->dev
, source_hw
, msg
, len
);
1378 mutex_unlock(&(announce_rcv_lock
));
1383 atomic_dec(&packets_in_workqueue
);
1386 int rcv_announce(struct sk_buff
*skb
)
1388 struct skb_procstate
*ps
= skb_pstate(skb
);
1391 queuelen
= atomic_inc_return(&packets_in_workqueue
);
1393 BUG_ON(queuelen
<= 0);
1395 if (queuelen
> MAX_PACKETS_IN_ANNOUNCE_RCVQUEUE
) {
1396 atomic_dec(&packets_in_workqueue
);
1398 return NET_RX_SUCCESS
;
1401 INIT_WORK(&(ps
->funcstate
.announce1
.work
), _rcv_announce
);
1402 schedule_work(&(ps
->funcstate
.announce1
.work
));
1403 return NET_RX_SUCCESS
;
1406 static int ___send_announce(struct sk_buff
*skb
)
1409 struct qos_queue
*q
= get_queue(skb
->dev
);
1411 rc
= cor_dev_queue_xmit(skb
, q
, QOS_CALLER_ANNOUNCE
);
1412 kref_put(&(q
->ref
), free_qos
);
1417 static int __send_announce(struct announce_data
*ann
)
1422 __u32 local_addrlen_tmp
;
1426 struct sk_buff
*skb
;
1429 headroom
= LL_RESERVED_SPACE(ann
->dev
) +
1430 ann
->dev
->needed_tailroom
;
1432 spin_lock_bh(&(local_addr_lock
));
1435 BUG_ON(local_addrlen
> 64);
1437 local_addrlen_tmp
= local_addrlen
;
1439 spin_unlock_bh(&(local_addr_lock
));
1441 len
= 1 + 4 + 8 + 6 + local_addrlen_tmp
;
1445 skb
= alloc_skb(headroom
+ len
, GFP_ATOMIC
);
1446 if (unlikely(skb
== 0))
1449 skb
->protocol
= htons(ETH_P_COR
);
1450 skb
->dev
= ann
->dev
;
1451 skb_reserve(skb
, headroom
);
1453 #warning net_device locking? (other places too)
1454 if(unlikely(dev_hard_header(skb
, ann
->dev
, ETH_P_COR
,
1455 ann
->mac
, ann
->dev
->dev_addr
, skb
->len
) < 0))
1458 skb_reset_network_header(skb
);
1460 msg
= skb_put(skb
, len
);
1461 if (unlikely(msg
== 0))
1464 spin_lock_bh(&(local_addr_lock
));
1466 if (unlikely(local_addrlen
!= local_addrlen_tmp
)) {
1473 msg
[0] = PACKET_TYPE_ANNOUNCE
;
1476 put_be32(msg
+ offset
, local_addr_sessionid
); /* sessionid */
1479 put_u16(msg
+ offset
, NEIGHCMD_VERSION
); /* command */
1481 put_u16(msg
+ offset
, 4); /* command length */
1483 put_u16(msg
+ offset
, 0); /* version */
1485 put_u16(msg
+ offset
, 0); /* minversion */
1488 put_u16(msg
+ offset
, NEIGHCMD_ADDR
); /* command */
1490 put_u16(msg
+ offset
, 2 + local_addrlen
); /* command length */
1492 put_u16(msg
+ offset
, local_addrlen
); /* addrlen */
1494 if (local_addrlen
!= 0) {
1495 memcpy(msg
+ offset
, local_addr
, local_addrlen
); /* addr */
1496 offset
+= local_addrlen
;
1499 spin_unlock_bh(&(local_addr_lock
));
1501 BUG_ON(offset
!= len
);
1503 return ___send_announce(skb
);
1512 void announce_data_free(struct kref
*ref
)
1514 struct announce_data
*ann
= container_of(ref
, struct announce_data
,
1519 int _send_announce(struct announce_data
*ann
, int fromqos
, int *sent
)
1524 spin_lock_bh(&(announce_snd_lock
));
1526 if (unlikely(ann
->dev
== 0))
1531 #warning todo reactivate qos_fastsend_allowed_announce + set rc
1532 /* if (fromqos == 0 && qos_fastsend_allowed_announce(ann->dev) == 0)
1535 /*rc = */__send_announce(ann
);
1537 if (rc
== 0 && ann
->type
!= ANNOUNCE_TYPE_BROADCAST
) {
1539 reschedule
= (ann
->sndcnt
< ANNOUNCE_SEND_UNICAST_MAXCNT
?
1542 if (reschedule
== 0) {
1546 list_del(&(ann
->lh
));
1547 kref_put(&(ann
->ref
), kreffree_bug
);
1552 spin_unlock_bh(&(announce_snd_lock
));
1554 if (unlikely(reschedule
== 0)) {
1555 } else if (rc
!= 0) {
1557 struct qos_queue
*q
= get_queue(ann
->dev
);
1559 qos_enqueue(q
, &(ann
->rb
), QOS_CALLER_ANNOUNCE
);
1560 kref_put(&(q
->ref
), free_qos
);
1564 kref_get(&(ann
->ref
));
1565 schedule_delayed_work(&(ann
->announce_work
), msecs_to_jiffies(
1566 ANNOUNCE_SEND_PACKETINTELVAL_MS
));
1570 kref_put(&(ann
->ref
), announce_data_free
);
1573 return QOS_RESUME_CONG
;
1577 return QOS_RESUME_DONE
;
1580 static void send_announce(struct work_struct
*work
)
1582 struct announce_data
*ann
= container_of(to_delayed_work(work
),
1583 struct announce_data
, announce_work
);
1585 _send_announce(ann
, 0, &sent
);
1588 static void announce_send_start(struct net_device
*dev
, char *mac
, int type
)
1590 struct announce_data
*ann
;
1592 ann
= kmalloc(sizeof(struct announce_data
), GFP_KERNEL
);
1594 if (unlikely(ann
== 0)) {
1595 printk(KERN_ERR
"cor cannot allocate memory for sending "
1600 memset(ann
, 0, sizeof(struct announce_data
));
1602 kref_init(&(ann
->ref
));
1606 memcpy(ann
->mac
, mac
, MAX_ADDR_LEN
);
1609 spin_lock_bh(&(announce_snd_lock
));
1610 list_add_tail(&(ann
->lh
), &announce_out_list
);
1611 spin_unlock_bh(&(announce_snd_lock
));
1613 INIT_DELAYED_WORK(&(ann
->announce_work
), send_announce
);
1614 kref_get(&(ann
->ref
));
1615 schedule_delayed_work(&(ann
->announce_work
), 1);
1618 void announce_send_stop(struct net_device
*dev
, char *mac
, int type
)
1620 struct list_head
*lh
= announce_out_list
.next
;
1622 spin_lock_bh(&(announce_snd_lock
));
1624 while (lh
!= &announce_out_list
) {
1625 struct announce_data
*ann
= container_of(lh
,
1626 struct announce_data
, lh
);
1631 if (dev
!= 0 && (ann
->dev
!= dev
|| (
1632 type
!= ANNOUNCE_TYPE_BROADCAST
&& (
1633 ann
->type
!= type
||
1634 memcmp(ann
->mac
, mac
, MAX_ADDR_LEN
) != 0))))
1640 list_del(&(ann
->lh
));
1641 kref_put(&(ann
->ref
), kreffree_bug
);
1644 spin_unlock_bh(&(announce_snd_lock
));
1647 int netdev_notify_func(struct notifier_block
*not, unsigned long event
,
1650 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1655 if (dev
->flags
& IFF_LOOPBACK
)
1659 rc
= create_queue(dev
);
1662 if (is_clientmode() == 0)
1663 announce_send_start(dev
, dev
->broadcast
,
1664 ANNOUNCE_TYPE_BROADCAST
);
1667 printk(KERN_ERR
"down 1");
1670 printk(KERN_ERR
"down 2");
1672 announce_send_stop(dev
, 0, ANNOUNCE_TYPE_BROADCAST
);
1673 printk(KERN_ERR
"down 3");
1675 reset_neighbors(dev
);
1676 printk(KERN_ERR
"down 4");
1679 printk(KERN_ERR
"down 5");
1684 case NETDEV_REGISTER
:
1685 case NETDEV_UNREGISTER
:
1686 case NETDEV_CHANGEMTU
:
1687 case NETDEV_CHANGEADDR
:
1688 case NETDEV_GOING_DOWN
:
1689 case NETDEV_CHANGENAME
:
1690 case NETDEV_FEAT_CHANGE
:
1691 case NETDEV_BONDING_FAILOVER
:
1700 void _cor_neighbor_down(void)
1704 spin_lock_bh(&(local_addr_lock
));
1705 if (local_addr
!= 0) {
1710 spin_unlock_bh(&(local_addr_lock
));
1716 announce_send_stop(0, 0, ANNOUNCE_TYPE_BROADCAST
);
1718 if (netdev_notify_registered
!= 0 &&
1719 unregister_netdevice_notifier(&netdev_notify
) != 0) {
1720 printk(KERN_WARNING
"warning: cor_neighbor_down: "
1721 "unregister_netdevice_notifier failed");
1724 netdev_notify_registered
= 0;
1727 void cor_neighbor_down(void)
1729 mutex_lock(&(neigh_up_lock
));
1730 _cor_neighbor_down();
1731 mutex_unlock(&(neigh_up_lock
));
1734 int cor_neighbor_up(char *addr2
, __u32 addrlen2
)
1738 char *addr2_copy
= kmalloc(addrlen2
, GFP_KERNEL
);
1739 if (unlikely(addr2_copy
== 0))
1742 memcpy(addr2_copy
, addr2
, addrlen2
);
1744 mutex_lock(&(neigh_up_lock
));
1746 _cor_neighbor_down();
1748 spin_lock_bh(&(local_addr_lock
));
1750 BUG_ON(local_addr
!= 0);
1751 BUG_ON(local_addrlen
!= 0);
1753 local_addr
= addr2_copy
;
1755 local_addrlen
= addrlen2
;
1756 get_random_bytes((char *) &local_addr_sessionid
,
1757 sizeof(local_addr_sessionid
));
1759 spin_unlock_bh(&(local_addr_lock
));
1761 BUG_ON(netdev_notify_registered
!= 0);
1763 if (register_netdevice_notifier(&netdev_notify
) != 0)
1766 netdev_notify_registered
= 1;
1772 spin_lock_bh(&(local_addr_lock
));
1776 spin_unlock_bh(&(local_addr_lock
));
1780 mutex_unlock(&(neigh_up_lock
));
1785 int is_clientmode(void)
1788 spin_lock_bh(&(local_addr_lock
));
1789 rc
= (local_addrlen
== 0 ? 1 : 0);
1790 spin_unlock_bh(&(local_addr_lock
));
1794 int __init
cor_neighbor_init(void)
1796 nb_slab
= kmem_cache_create("cor_neighbor", sizeof(struct neighbor
), 8,
1798 if (unlikely(nb_slab
== 0))
1801 nb_dd_slab
= kmem_cache_create("cor_neighbor_discoverydata",
1802 sizeof(struct neighbor_discdata
), 8, 0, 0);
1803 if (unlikely(nb_dd_slab
== 0))
1806 atomic_set(&num_neighs
, 0);
1808 memset(&netdev_notify
, 0, sizeof(netdev_notify
));
1809 netdev_notify
.notifier_call
= netdev_notify_func
;
1814 MODULE_LICENSE("GPL");