2 * Connection oriented routing
3 * Copyright (C) 2007-2020 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/delay.h>
25 * Broadcast data format:
27 * is 0, may be increased if the protocol changes
29 * is 0, must be increased if a future version of the protocol is incompatible
30 * to the current version
33 * Data format of the announce packet "data" field:
34 *{command [2] commandlength [2] commanddata [commandlength]}[...]
39 /* NEIGHCMD_VERSION: version[2] minversion[2] */
40 #define NEIGHCMD_VERSION 1
42 /* NEIGHCMD_ADDR: addrlen[2] addr[addrlen] */
43 #define NEIGHCMD_ADDR 2
46 struct neighbor_discdata
{
48 unsigned long jiffies_created
;
52 struct net_device
*dev
;
53 char mac
[MAX_ADDR_LEN
];
67 static atomic_t packets_in_workqueue
= ATOMIC_INIT(0);
69 static DEFINE_MUTEX(announce_rcv_lock
);
70 static DEFINE_SPINLOCK(announce_snd_lock
);
71 static DEFINE_SPINLOCK(neighbor_list_lock
);
73 static DEFINE_MUTEX(neigh_up_lock
);
75 static DEFINE_SPINLOCK(local_addr_lock
);
76 static char *local_addr
;
77 static __u32 local_addrlen
;
78 static __be32 local_addr_sessionid
;
80 static LIST_HEAD(nb_dd_list
); /* protected by announce_rcv_lock */
81 static __u32 num_nb_dd
= 0;
82 static struct kmem_cache
*nb_dd_slab
;
84 static LIST_HEAD(nb_list
);
85 static struct kmem_cache
*nb_slab
;
86 static atomic_t num_neighs
;
88 static LIST_HEAD(announce_out_list
);
90 static struct notifier_block netdev_notify
;
91 __u8 netdev_notify_registered
= 0;
94 void neighbor_free(struct kref
*ref
)
96 struct cor_neighbor
*nb
= container_of(ref
, struct cor_neighbor
, ref
);
98 WARN_ONCE(list_empty(&(nb
->cmsg_queue_pong
)) == 0,
99 "cor neighbor_free(): nb->cmsg_queue_pong is not empty");
100 WARN_ONCE(list_empty(&(nb
->cmsg_queue_ack
)) == 0,
101 "cor neighbor_free(): nb->cmsg_queue_ack is not empty");
102 WARN_ONCE(list_empty(&(nb
->cmsg_queue_ackconn
)) == 0,
103 "cor neighbor_free(): nb->cmsg_queue_ackconn is not empty");
104 WARN_ONCE(list_empty(&(nb
->cmsg_queue_conndata_lowlat
)) == 0,
105 "cor neighbor_free(): nb->cmsg_queue_conndata_lowlat is not empty");
106 WARN_ONCE(list_empty(&(nb
->cmsg_queue_conndata_highlat
)) == 0,
107 "cor neighbor_free(): nb->cmsg_queue_conndata_highlat is not empty");
108 WARN_ONCE(list_empty(&(nb
->cmsg_queue_other
)) == 0,
109 "cor neighbor_free(): nb->cmsg_queue_other is not empty");
110 WARN_ONCE(nb
->pending_conn_resets_rb
.rb_node
!= 0,
111 "cor neighbor_free(): nb->pending_conn_resets_rb is not empty");
112 WARN_ONCE(nb
->rb_kp
.in_queue
!= RB_INQUEUE_FALSE
,
113 "cor neighbor_free(): nb->rb_kp.in_queue is not RB_INQUEUE_FALSE");
114 WARN_ONCE(nb
->rb_cr
.in_queue
!= RB_INQUEUE_FALSE
,
115 "cor neighbor_free(): nb->rb_cr.in_queue is not RB_INQUEUE_FALSE");
116 WARN_ONCE(nb
->rb
.in_queue
!= RB_INQUEUE_FALSE
,
117 "cor neighbor_free(): nb->rb.in_queue is not RB_INQUEUE_FALSE");
118 WARN_ONCE(list_empty(&(nb
->conns_waiting
.lh
)) == 0,
119 "cor neighbor_free(): nb->conns_waiting.lh is not empty");
120 WARN_ONCE(list_empty(&(nb
->conns_waiting
.lh_nextpass
)) == 0,
121 "cor neighbor_free(): nb->conns_waiting.lh_nextpass is not empty");
122 WARN_ONCE(nb
->str_timer_pending
!= 0,
123 "cor neighbor_free(): nb->str_timer_pending is not 0");
124 WARN_ONCE(nb
->connid_rb
.rb_node
!= 0,
125 "cor neighbor_free(): nb->connid_rb is not empty");
126 WARN_ONCE(nb
->connid_reuse_rb
.rb_node
!= 0,
127 "cor neighbor_free(): nb->connid_reuse_rb is not empty");
128 WARN_ONCE(list_empty(&(nb
->connid_reuse_list
)) == 0,
129 "cor neighbor_free(): nb->connid_reuse_list is not empty");
130 WARN_ONCE(nb
->kp_retransmits_rb
.rb_node
!= 0,
131 "cor neighbor_free(): nb->kp_retransmits_rb is not empty");
132 WARN_ONCE(list_empty(&(nb
->rcv_conn_list
)) == 0,
133 "cor neighbor_free(): nb->rcv_conn_list is not empty");
134 WARN_ONCE(nb
->stalledconn_work_scheduled
!= 0,
135 "cor neighbor_free(): nb->stalledconn_work_scheduled is not 0");
136 WARN_ONCE(list_empty(&(nb
->stalledconn_list
)) == 0,
137 "cor neighbor_free(): nb->stalledconn_list is not empty");
138 WARN_ONCE(list_empty(&(nb
->retrans_list
)) == 0,
139 "cor neighbor_free(): nb->retrans_list is not empty");
140 WARN_ONCE(list_empty(&(nb
->retrans_conn_list
)) == 0,
141 "cor neighbor_free(): nb->retrans_conn_list is not empty");
143 /* printk(KERN_ERR "neighbor free"); */
144 BUG_ON(nb
->nb_list
.next
!= LIST_POISON1
);
145 BUG_ON(nb
->nb_list
.prev
!= LIST_POISON2
);
153 kref_put(&(nb
->queue
->ref
), free_qos
);
155 kmem_cache_free(nb_slab
, nb
);
156 atomic_dec(&num_neighs
);
159 static void stall_timer(struct work_struct
*work
);
161 static void _reset_neighbor(struct work_struct
*work
);
163 static struct cor_neighbor
*alloc_neighbor(gfp_t allocflags
)
165 struct cor_neighbor
*nb
;
168 if (atomic_inc_return(&num_neighs
) >= MAX_NEIGHBORS
) {
169 atomic_dec(&num_neighs
);
173 nb
= kmem_cache_alloc(nb_slab
, allocflags
);
174 if (unlikely(nb
== 0))
177 memset(nb
, 0, sizeof(struct cor_neighbor
));
179 kref_init(&(nb
->ref
));
180 atomic_set(&(nb
->sessionid_rcv_needed
), 1);
181 atomic_set(&(nb
->sessionid_snd_needed
), 1);
182 timer_setup(&(nb
->cmsg_timer
), controlmsg_timerfunc
, 0);
183 spin_lock_init(&(nb
->cmsg_lock
));
184 INIT_LIST_HEAD(&(nb
->cmsg_queue_pong
));
185 INIT_LIST_HEAD(&(nb
->cmsg_queue_ack
));
186 INIT_LIST_HEAD(&(nb
->cmsg_queue_ackconn
));
187 INIT_LIST_HEAD(&(nb
->cmsg_queue_conndata_lowlat
));
188 INIT_LIST_HEAD(&(nb
->cmsg_queue_conndata_highlat
));
189 INIT_LIST_HEAD(&(nb
->cmsg_queue_other
));
190 atomic_set(&(nb
->cmsg_pongs_retrans_cnt
), 0);
191 atomic_set(&(nb
->cmsg_othercnt
), 0);
192 atomic_set(&(nb
->cmsg_bulk_readds
), 0);
193 atomic_set(&(nb
->cmsg_delay_conndata
), 0);
194 nb
->last_ping_time
= jiffies
;
195 atomic_set(&(nb
->latency_retrans_us
), PING_GUESSLATENCY_MS
*1000);
196 atomic_set(&(nb
->latency_advertised_us
), PING_GUESSLATENCY_MS
*1000);
197 atomic_set(&(nb
->max_remote_ack_delay_us
), 1000000);
198 atomic_set(&(nb
->max_remote_ackconn_delay_us
), 1000000);
199 atomic_set(&(nb
->max_remote_other_delay_us
), 1000000);
200 spin_lock_init(&(nb
->conns_waiting
.lock
));
201 INIT_LIST_HEAD(&(nb
->conns_waiting
.lh
));
202 INIT_LIST_HEAD(&(nb
->conns_waiting
.lh_nextpass
));
203 spin_lock_init(&(nb
->nbcongwin
.lock
));
204 atomic64_set(&(nb
->nbcongwin
.data_intransit
), 0);
205 atomic64_set(&(nb
->nbcongwin
.cwin
), 0);
206 spin_lock_init(&(nb
->state_lock
));
207 nb
->state
= NEIGHBOR_STATE_INITIAL
;
208 nb
->state_time
.initial_state_since
= jiffies
;
209 INIT_DELAYED_WORK(&(nb
->stalltimeout_timer
), stall_timer
);
210 spin_lock_init(&(nb
->connid_lock
));
211 spin_lock_init(&(nb
->connid_reuse_lock
));
212 INIT_LIST_HEAD(&(nb
->connid_reuse_list
));
213 get_random_bytes((char *) &seqno
, sizeof(seqno
));
214 nb
->kpacket_seqno
= seqno
;
215 atomic64_set(&(nb
->priority_sum
), 0);
216 spin_lock_init(&(nb
->conn_list_lock
));
217 INIT_LIST_HEAD(&(nb
->rcv_conn_list
));
218 INIT_LIST_HEAD(&(nb
->stalledconn_list
));
219 spin_lock_init(&(nb
->stalledconn_lock
));
220 INIT_WORK(&(nb
->stalledconn_work
), resume_nbstalled_conns
);
221 INIT_LIST_HEAD(&(nb
->rcv_conn_list
));
222 spin_lock_init(&(nb
->retrans_lock
));
223 INIT_LIST_HEAD(&(nb
->retrans_list
));
224 spin_lock_init(&(nb
->retrans_conn_lock
));
225 INIT_LIST_HEAD(&(nb
->retrans_conn_list
));
226 INIT_WORK(&(nb
->reset_neigh_work
), _reset_neighbor
);
231 int is_from_nb(struct sk_buff
*skb
, struct cor_neighbor
*nb
)
235 char source_hw
[MAX_ADDR_LEN
];
236 memset(source_hw
, 0, MAX_ADDR_LEN
);
237 if (skb
->dev
->header_ops
!= 0 &&
238 skb
->dev
->header_ops
->parse
!= 0)
239 skb
->dev
->header_ops
->parse(skb
, source_hw
);
241 rc
= (skb
->dev
== nb
->dev
&& memcmp(nb
->mac
, source_hw
,
246 static struct cor_neighbor
*_get_neigh_by_mac(struct net_device
*dev
,
249 struct list_head
*currlh
;
250 struct cor_neighbor
*ret
= 0;
252 spin_lock_bh(&(neighbor_list_lock
));
254 currlh
= nb_list
.next
;
256 while (currlh
!= &nb_list
) {
257 struct cor_neighbor
*curr
= container_of(currlh
,
258 struct cor_neighbor
, nb_list
);
260 if (curr
->dev
== dev
&& memcmp(curr
->mac
, source_hw
,
261 MAX_ADDR_LEN
) == 0) {
263 kref_get(&(ret
->ref
));
267 currlh
= currlh
->next
;
270 spin_unlock_bh(&(neighbor_list_lock
));
275 struct cor_neighbor
*get_neigh_by_mac(struct sk_buff
*skb
)
277 char source_hw
[MAX_ADDR_LEN
];
278 memset(source_hw
, 0, MAX_ADDR_LEN
);
279 if (skb
->dev
->header_ops
!= 0 &&
280 skb
->dev
->header_ops
->parse
!= 0)
281 skb
->dev
->header_ops
->parse(skb
, source_hw
);
283 return _get_neigh_by_mac(skb
->dev
, source_hw
);
286 struct cor_neighbor
*find_neigh(char *addr
, __u16 addrlen
)
288 struct list_head
*currlh
;
289 struct cor_neighbor
*ret
= 0;
291 if (addr
== 0 || addrlen
== 0)
294 spin_lock_bh(&(neighbor_list_lock
));
296 currlh
= nb_list
.next
;
298 while (currlh
!= &nb_list
) {
299 struct cor_neighbor
*curr
= container_of(currlh
,
300 struct cor_neighbor
, nb_list
);
302 if (curr
->addr
!= 0 && curr
->addrlen
!= 0 &&
303 curr
->addrlen
== addrlen
&&
304 memcmp(curr
->addr
, addr
, addrlen
) == 0) {
306 kref_get(&(ret
->ref
));
311 currlh
= currlh
->next
;
315 spin_unlock_bh(&(neighbor_list_lock
));
320 __u32
generate_neigh_list(char *buf
, __u32 buflen
)
322 struct list_head
*currlh
;
326 __u32 buf_offset
= 4;
331 * The variable length header rowcount need to be generated after the
332 * data. This is done by reserving the maximum space they could take. If
333 * they end up being smaller, the data is moved so that there is no gap.
337 BUG_ON(buflen
< buf_offset
);
340 rc
= encode_len(buf
+ buf_offset
, buflen
- buf_offset
, 2);
345 BUG_ON(buflen
< buf_offset
+ 2);
346 put_u16(buf
+ buf_offset
, LIST_NEIGH_FIELD_ADDR
);
349 rc
= encode_len(buf
+ buf_offset
, buflen
- buf_offset
, 0);
354 BUG_ON(buflen
< buf_offset
+ 2);
355 put_u16(buf
+ buf_offset
, LIST_NEIGH_FIELD_LATENCY
);
358 rc
= encode_len(buf
+ buf_offset
, buflen
- buf_offset
, 1);
362 spin_lock_bh(&(neighbor_list_lock
));
364 currlh
= nb_list
.next
;
366 while (currlh
!= &nb_list
) {
367 struct cor_neighbor
*curr
= container_of(currlh
,
368 struct cor_neighbor
, nb_list
);
371 state
= get_neigh_state(curr
);
373 if (state
!= NEIGHBOR_STATE_ACTIVE
)
376 BUG_ON((curr
->addr
== 0) != (curr
->addrlen
== 0));
377 if (curr
->addr
== 0 || curr
->addrlen
== 0)
380 if (unlikely(buflen
< buf_offset
+ 4 + curr
->addrlen
+ 1))
384 rc
= encode_len(buf
+ buf_offset
, buflen
- buf_offset
,
389 BUG_ON(curr
->addrlen
> buflen
- buf_offset
);
390 memcpy(buf
+ buf_offset
, curr
->addr
, curr
->addrlen
); /* addr */
391 buf_offset
+= curr
->addrlen
;
393 buf
[buf_offset
] = enc_log_64_11(atomic_read(
394 &(curr
->latency_advertised_us
)));
397 BUG_ON(buf_offset
> buflen
);
402 currlh
= currlh
->next
;
405 spin_unlock_bh(&(neighbor_list_lock
));
407 rc
= encode_len(buf
, 4, cnt
);
412 memmove(buf
+ ((__u32
) rc
), buf
+4, buf_offset
);
414 return buf_offset
- 4 + ((__u32
) rc
);
417 static void reset_all_conns(struct cor_neighbor
*nb
)
420 unsigned long iflags
;
421 struct cor_conn
*src_in
;
424 spin_lock_irqsave(&(nb
->conn_list_lock
), iflags
);
426 if (list_empty(&(nb
->rcv_conn_list
))) {
427 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
431 src_in
= container_of(nb
->rcv_conn_list
.next
, struct cor_conn
,
433 kref_get(&(src_in
->ref
));
435 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
437 if (src_in
->is_client
) {
438 spin_lock_bh(&(src_in
->rcv_lock
));
439 spin_lock_bh(&(src_in
->reversedir
->rcv_lock
));
441 spin_lock_bh(&(src_in
->reversedir
->rcv_lock
));
442 spin_lock_bh(&(src_in
->rcv_lock
));
445 if (unlikely(unlikely(src_in
->sourcetype
!= SOURCE_IN
) ||
446 unlikely(src_in
->source
.in
.nb
!= nb
))) {
451 rc
= send_reset_conn(nb
, src_in
->reversedir
->target
.out
.conn_id
,
454 if (unlikely(rc
!= 0))
457 if (src_in
->reversedir
->isreset
== 0)
458 src_in
->reversedir
->isreset
= 1;
461 if (src_in
->is_client
) {
462 spin_unlock_bh(&(src_in
->rcv_lock
));
463 spin_unlock_bh(&(src_in
->reversedir
->rcv_lock
));
465 spin_unlock_bh(&(src_in
->reversedir
->rcv_lock
));
466 spin_unlock_bh(&(src_in
->rcv_lock
));
471 kref_put(&(src_in
->ref
), free_conn
);
473 kref_put(&(src_in
->ref
), free_conn
);
474 kref_get(&(nb
->ref
));
475 schedule_delayed_work(&(nb
->stalltimeout_timer
), HZ
);
481 static void _reset_neighbor(struct work_struct
*work
)
483 struct cor_neighbor
*nb
= container_of(work
, struct cor_neighbor
,
487 delete_connid_reuse_items(nb
);
489 kref_put(&(nb
->ref
), neighbor_free
);
492 static void reset_neighbor(struct cor_neighbor
*nb
, int use_workqueue
)
495 unsigned long iflags
;
497 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
498 removenblist
= (nb
->state
!= NEIGHBOR_STATE_KILLED
);
499 nb
->state
= NEIGHBOR_STATE_KILLED
;
500 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
502 /* if (removenblist) {
503 printk(KERN_ERR "reset_neighbor");
508 schedule_work(&(nb
->reset_neigh_work
));
509 kref_get(&(nb
->ref
));
512 delete_connid_reuse_items(nb
);
516 spin_lock_bh(&neighbor_list_lock
);
517 list_del(&(nb
->nb_list
));
518 spin_unlock_bh(&neighbor_list_lock
);
520 kref_put(&(nb
->ref
), neighbor_free
); /* nb_list */
524 static void reset_neighbors(struct net_device
*dev
)
526 struct list_head
*currlh
;
529 spin_lock_bh(&neighbor_list_lock
);
531 currlh
= nb_list
.next
;
533 while (currlh
!= &nb_list
) {
534 unsigned long iflags
;
535 struct cor_neighbor
*currnb
= container_of(currlh
,
536 struct cor_neighbor
, nb_list
);
539 if (dev
!= 0 && currnb
->dev
!= dev
)
542 spin_lock_irqsave(&(currnb
->state_lock
), iflags
);
543 state
= currnb
->state
;
544 spin_unlock_irqrestore(&(currnb
->state_lock
), iflags
);
546 if (state
!= NEIGHBOR_STATE_KILLED
) {
547 spin_unlock_bh(&neighbor_list_lock
);
548 reset_neighbor(currnb
, 0);
553 currlh
= currlh
->next
;
556 spin_unlock_bh(&neighbor_list_lock
);
559 static void stall_timer(struct work_struct
*work
)
561 struct cor_neighbor
*nb
= container_of(to_delayed_work(work
),
562 struct cor_neighbor
, stalltimeout_timer
);
567 unsigned long iflags
;
569 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
571 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
573 if (nbstate
== NEIGHBOR_STATE_STALLED
) {
574 stall_time_ms
= jiffies_to_msecs(jiffies
-
575 nb
->state_time
.last_roundtrip
);
577 if (stall_time_ms
< NB_KILL_TIME_MS
) {
578 schedule_delayed_work(&(nb
->stalltimeout_timer
),
579 msecs_to_jiffies(NB_KILL_TIME_MS
-
584 reset_neighbor(nb
, 1);
587 nb
->str_timer_pending
= 0;
588 kref_put(&(nb
->ref
), neighbor_free
); /* stall_timer */
591 int get_neigh_state(struct cor_neighbor
*nb
)
594 unsigned long iflags
;
599 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
601 stall_time_ms
= jiffies_to_msecs(jiffies
-
602 nb
->state_time
.last_roundtrip
);
604 if (likely(nb
->state
== NEIGHBOR_STATE_ACTIVE
) &&
605 unlikely(stall_time_ms
> NB_STALL_TIME_MS
) && (
606 nb
->ping_intransit
>= NB_STALL_MINPINGS
||
607 nb
->ping_intransit
>= PING_COOKIES_PER_NEIGH
)) {
608 nb
->state
= NEIGHBOR_STATE_STALLED
;
609 nb
->ping_success
= 0;
610 if (nb
->str_timer_pending
== 0) {
611 nb
->str_timer_pending
= 1;
612 kref_get(&(nb
->ref
));
614 schedule_delayed_work(&(nb
->stalltimeout_timer
),
615 msecs_to_jiffies(NB_KILL_TIME_MS
-
619 /* printk(KERN_ERR "changed to stalled"); */
620 BUG_ON(nb
->ping_intransit
> PING_COOKIES_PER_NEIGH
);
621 } else if (unlikely(nb
->state
== NEIGHBOR_STATE_INITIAL
) &&
622 time_after(jiffies
, nb
->state_time
.initial_state_since
+
623 INITIAL_TIME_LIMIT_SEC
* HZ
)) {
624 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
625 reset_neighbor(nb
, 1);
626 return NEIGHBOR_STATE_KILLED
;
631 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
636 static struct cor_ping_cookie
*find_cookie(struct cor_neighbor
*nb
,
641 for(i
=0;i
<PING_COOKIES_PER_NEIGH
;i
++) {
642 if (nb
->cookies
[i
].cookie
== cookie
)
643 return &(nb
->cookies
[i
]);
648 static void reset_cookie(struct cor_neighbor
*nb
, struct cor_ping_cookie
*c
)
653 if (nb
->cookie_unsent
!= c
->cookie
)
654 nb
->ping_intransit
--;
659 static __u32
sqrt(__u64 x
)
664 if (unlikely(x
<= 1))
668 y
= y
/2 + div64_u64(x
/2, y
);
669 if (unlikely(y
== 0))
673 if (unlikely(y
> U32_MAX
))
679 static __u32
calc_newlatency(struct cor_neighbor
*nb_statelocked
,
680 __u32 oldlatency_us
, __s64 newlatency_ns
)
682 __s64 oldlatency
= oldlatency_us
* 1000LL;
685 if (unlikely(unlikely(nb_statelocked
->state
== NEIGHBOR_STATE_INITIAL
)&&
686 nb_statelocked
->ping_success
< 16))
687 newlatency
= div64_s64(
688 oldlatency
* nb_statelocked
->ping_success
+
690 nb_statelocked
->ping_success
+ 1);
692 newlatency
= (oldlatency
* 15 + newlatency_ns
) / 16;
694 newlatency
= div_s64(newlatency
+ 500, 1000);
696 if (unlikely(newlatency
< 0))
698 if (unlikely(newlatency
> U32_MAX
))
699 newlatency
= U32_MAX
;
701 return (__u32
) newlatency
;
704 static void update_nb_latency(struct cor_neighbor
*nb_statelocked
,
705 struct cor_ping_cookie
*c
, __u32 respdelay
)
707 ktime_t now
= ktime_get();
709 __s64 pinglatency_retrans_ns
= ktime_to_ns(now
) -
710 ktime_to_ns(c
->time_sent
) - respdelay
* 1000LL;
711 __s64 pinglatency_advertised_ns
= ktime_to_ns(now
) -
712 ktime_to_ns(c
->time_created
) - respdelay
* 1000LL;
714 __u32 oldlatency_retrans_us
=
715 atomic_read(&(nb_statelocked
->latency_retrans_us
));
717 __u32 newlatency_retrans_us
= calc_newlatency(nb_statelocked
,
718 oldlatency_retrans_us
, pinglatency_retrans_ns
);
720 atomic_set(&(nb_statelocked
->latency_retrans_us
),
721 newlatency_retrans_us
);
723 if (unlikely(unlikely(nb_statelocked
->state
== NEIGHBOR_STATE_INITIAL
)&&
724 nb_statelocked
->ping_success
< 16)) {
725 nb_statelocked
->latency_variance_retrans_us
=
726 ((__u64
) newlatency_retrans_us
) *
727 newlatency_retrans_us
;
728 atomic_set(&(nb_statelocked
->latency_stddev_retrans_us
), sqrt(
729 nb_statelocked
->latency_variance_retrans_us
));
730 } else if (pinglatency_retrans_ns
> oldlatency_retrans_us
*
732 __s64 newdiff
= div_s64(pinglatency_retrans_ns
-
733 oldlatency_retrans_us
* ((__s64
) 1000), 1000);
734 __u32 newdiff32
= (__u32
) (unlikely(newdiff
>= U32_MAX
) ?
736 __u64 newvar
= ((__u64
) newdiff32
) * newdiff32
;
738 __u64 oldval
= nb_statelocked
->latency_variance_retrans_us
;
740 if (unlikely(unlikely(newvar
> (1LL << 55)) || unlikely(
741 oldval
> (1LL << 55)))) {
742 nb_statelocked
->latency_variance_retrans_us
=
743 (oldval
/ 16) * 15 + newvar
/16;
745 nb_statelocked
->latency_variance_retrans_us
=
746 (oldval
* 15 + newvar
) / 16;
749 atomic_set(&(nb_statelocked
->latency_stddev_retrans_us
), sqrt(
750 nb_statelocked
->latency_variance_retrans_us
));
753 atomic_set(&(nb_statelocked
->latency_advertised_us
),
754 calc_newlatency(nb_statelocked
,
755 atomic_read(&(nb_statelocked
->latency_advertised_us
)),
756 pinglatency_advertised_ns
));
758 nb_statelocked
->last_roundtrip_end
= now
;
761 void ping_resp(struct cor_neighbor
*nb
, __u32 cookie
, __u32 respdelay
)
763 unsigned long iflags
;
765 struct cor_ping_cookie
*c
;
767 int stalledresume
= 0;
769 int call_connidreuse
= 0;
771 if (unlikely(cookie
== 0))
774 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
776 c
= find_cookie(nb
, cookie
);
778 if (unlikely(c
== 0))
781 atomic_set(&(nb
->sessionid_snd_needed
), 0);
783 call_connidreuse
= ktime_before_eq(nb
->last_roundtrip_end
,
786 update_nb_latency(nb
, c
, respdelay
);
792 for(i
=0;i
<PING_COOKIES_PER_NEIGH
;i
++) {
793 if (nb
->cookies
[i
].cookie
!= 0 && ktime_before(
794 nb
->cookies
[i
].time_created
, c
->time_created
)) {
795 nb
->cookies
[i
].pongs
++;
796 if (nb
->cookies
[i
].pongs
>= PING_PONGLIMIT
) {
797 reset_cookie(nb
, &(nb
->cookies
[i
]));
802 if (unlikely(nb
->state
== NEIGHBOR_STATE_INITIAL
||
803 nb
->state
== NEIGHBOR_STATE_STALLED
)) {
804 call_connidreuse
= 0;
806 if ((nb
->state
== NEIGHBOR_STATE_INITIAL
&&
807 nb
->ping_success
>= PING_SUCCESS_CNT_INIT
) || (
808 nb
->state
== NEIGHBOR_STATE_STALLED
&&
809 nb
->ping_success
>= PING_SUCCESS_CNT_STALLED
)) {
810 stalledresume
= (nb
->state
== NEIGHBOR_STATE_STALLED
);
811 nb
->state
= NEIGHBOR_STATE_ACTIVE
;
812 /* printk(KERN_ERR "changed to active"); */
816 if (likely(nb
->state
== NEIGHBOR_STATE_ACTIVE
) ||
817 nb
->state
== NEIGHBOR_STATE_STALLED
)
818 nb
->state_time
.last_roundtrip
= c
->jiffies_sent
;
821 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
823 if (call_connidreuse
)
824 connid_used_pingsuccess(nb
);
826 if (unlikely(stalledresume
)) {
827 spin_lock_bh(&(nb
->retrans_conn_lock
));
828 reschedule_conn_retrans_timer(nb
);
829 spin_unlock_bh(&(nb
->retrans_conn_lock
));
831 spin_lock_bh(&(nb
->stalledconn_lock
));
832 if (nb
->stalledconn_work_scheduled
== 0) {
833 kref_get(&(nb
->ref
)),
834 schedule_work(&(nb
->stalledconn_work
));
835 nb
->stalledconn_work_scheduled
= 1;
837 spin_unlock_bh(&(nb
->stalledconn_lock
));
841 __u32
add_ping_req(struct cor_neighbor
*nb
, unsigned long *last_ping_time
)
843 unsigned long iflags
;
844 struct cor_ping_cookie
*c
;
849 ktime_t now
= ktime_get();
851 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
853 if (nb
->cookie_unsent
!= 0) {
854 c
= find_cookie(nb
, nb
->cookie_unsent
);
858 nb
->cookie_unsent
= 0;
861 c
= find_cookie(nb
, 0);
865 get_random_bytes((char *) &i
, sizeof(i
));
866 i
= (i
% PING_COOKIES_PER_NEIGH
);
867 c
= &(nb
->cookies
[i
]);
872 if (unlikely(nb
->lastcookie
== 0))
874 c
->cookie
= nb
->lastcookie
;
875 c
->time_created
= now
;
880 c
->jiffies_sent
= jiffies
;
883 nb
->ping_intransit
++;
885 *last_ping_time
= nb
->last_ping_time
;
886 nb
->last_ping_time
= c
->jiffies_sent
;
888 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
895 void ping_sent(struct cor_neighbor
*nb
, __u32 cookie
)
897 unsigned long iflags
;
901 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
903 if (nb
->cookie_unsent
== cookie
)
904 nb
->cookie_unsent
= 0;
906 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
909 void unadd_ping_req(struct cor_neighbor
*nb
, __u32 cookie
,
910 unsigned long last_ping_time
, int congested
)
912 unsigned long iflags
;
914 struct cor_ping_cookie
*c
;
918 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
921 BUG_ON(nb
->cookie_unsent
!= 0 && nb
->cookie_unsent
!= cookie
);
922 nb
->cookie_unsent
= cookie
;
925 c
= find_cookie(nb
, cookie
);
926 if (likely(c
!= 0)) {
929 nb
->ping_intransit
--;
932 nb
->last_ping_time
= last_ping_time
;
934 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
937 static int get_ping_forcetime_ms(struct cor_neighbor
*nb
)
939 unsigned long iflags
;
943 if (unlikely(get_neigh_state(nb
) != NEIGHBOR_STATE_ACTIVE
))
944 return PING_FORCETIME_MS
;
946 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
947 fast
= ((nb
->ping_success
< PING_ACTIVE_FASTINITIAL_COUNT
) ||
948 (nb
->ping_intransit
> 0));
949 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
952 return PING_FORCETIME_ACTIVE_FAST_MS
;
954 spin_lock_irqsave(&(nb
->conn_list_lock
), iflags
);
955 idle
= list_empty(&(nb
->rcv_conn_list
));
956 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
959 return PING_FORCETIME_ACTIVEIDLE_MS
;
961 return PING_FORCETIME_ACTIVE_MS
;
964 static __u32
get_ping_mindelay_ms(struct cor_neighbor
*nb_statelocked
)
966 __u32 latency_us
= ((__u32
) atomic_read(
967 &(nb_statelocked
->latency_advertised_us
)));
968 __u32 max_remote_other_delay_us
= ((__u32
) atomic_read(
969 &(nb_statelocked
->max_remote_other_delay_us
)));
972 if (latency_us
< PING_GUESSLATENCY_MS
* 1000)
973 latency_us
= PING_GUESSLATENCY_MS
* 1000;
975 if (unlikely(nb_statelocked
->state
!= NEIGHBOR_STATE_ACTIVE
))
976 mindelay_ms
= latency_us
/1000;
978 mindelay_ms
= ((latency_us
/2 +
979 max_remote_other_delay_us
/2)/500);
981 if (likely(nb_statelocked
->ping_intransit
< PING_COOKIES_THROTTLESTART
))
984 mindelay_ms
= mindelay_ms
* (1 + 9 * (nb_statelocked
->ping_intransit
*
985 nb_statelocked
->ping_intransit
/
986 (PING_COOKIES_PER_NEIGH
* PING_COOKIES_PER_NEIGH
)));
992 * Check whether we want to send a ping now:
993 * 0... Do not send ping.
994 * 1... Send ping now, but only if it can be merged with other messages. This
995 * can happen way before the time requested by get_next_ping_time().
996 * 2... Send ping now, even if a packet has to be created just for the ping
999 int time_to_send_ping(struct cor_neighbor
*nb
)
1001 unsigned long iflags
;
1002 int rc
= TIMETOSENDPING_YES
;
1004 __u32 ms_since_last_ping
;
1006 __u32 forcetime
= get_ping_forcetime_ms(nb
);
1009 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
1011 ms_since_last_ping
= jiffies_to_msecs(jiffies
- nb
->last_ping_time
);
1013 mindelay
= get_ping_mindelay_ms(nb
);
1015 if (forcetime
< (mindelay
* 3))
1016 forcetime
= mindelay
* 3;
1017 else if (forcetime
> (mindelay
* 3))
1018 mindelay
= forcetime
/3;
1020 if (ms_since_last_ping
< mindelay
|| ms_since_last_ping
< (forcetime
/4))
1021 rc
= TIMETOSENDPING_NO
;
1022 else if (ms_since_last_ping
>= forcetime
)
1023 rc
= TIMETOSENDPING_FORCE
;
1025 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
1030 unsigned long get_next_ping_time(struct cor_neighbor
*nb
)
1032 unsigned long iflags
;
1034 __u32 forcetime
= get_ping_forcetime_ms(nb
);
1037 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
1038 mindelay
= get_ping_mindelay_ms(nb
);
1039 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
1041 if (forcetime
< (mindelay
* 3))
1042 forcetime
= mindelay
* 3;
1044 return nb
->last_ping_time
+ msecs_to_jiffies(forcetime
);
1047 static void add_neighbor(struct neighbor_discdata
*nb_dd
)
1049 struct cor_neighbor
*nb
;
1050 struct list_head
*currlh
;
1052 nb
= alloc_neighbor(GFP_KERNEL
);
1053 if (unlikely(nb
== 0))
1056 nb
->queue
= cor_get_queue(nb_dd
->dev
);
1057 if (nb
->queue
== 0) {
1058 kmem_cache_free(nb_slab
, nb
);
1059 atomic_dec(&num_neighs
);
1063 dev_hold(nb_dd
->dev
);
1064 nb
->dev
= nb_dd
->dev
;
1066 memcpy(nb
->mac
, nb_dd
->mac
, MAX_ADDR_LEN
);
1068 nb
->addr
= nb_dd
->addr
;
1069 nb
->addrlen
= nb_dd
->addrlen
;
1071 nb_dd
->nb_allocated
= 1;
1075 spin_lock_bh(&neighbor_list_lock
);
1077 currlh
= nb_list
.next
;
1079 BUG_ON((nb
->addr
== 0) != (nb
->addrlen
== 0));
1081 if (is_clientmode() && (nb
->addr
== 0|| nb
->addrlen
== 0))
1082 goto already_present
;
1084 while (currlh
!= &nb_list
) {
1085 struct cor_neighbor
*curr
= container_of(currlh
,
1086 struct cor_neighbor
, nb_list
);
1088 BUG_ON((curr
->addr
== 0) != (curr
->addrlen
== 0));
1090 if (curr
->dev
== nb
->dev
&&
1091 memcmp(curr
->mac
, nb
->mac
, MAX_ADDR_LEN
) == 0)
1092 goto already_present
;
1094 if (curr
->addr
!= 0 && curr
->addrlen
!= 0 &&
1095 nb
->addr
!= 0 && nb
->addrlen
!= 0 &&
1096 curr
->addrlen
== nb
->addrlen
&&
1097 memcmp(curr
->addr
, nb
->addr
, curr
->addrlen
) ==0)
1098 goto already_present
;
1100 currlh
= currlh
->next
;
1103 /* printk(KERN_ERR "add_neigh"); */
1105 spin_lock_bh(&(local_addr_lock
));
1106 nb
->sessionid
= local_addr_sessionid
^ nb_dd
->sessionid
;
1107 spin_unlock_bh(&(local_addr_lock
));
1109 timer_setup(&(nb
->retrans_timer
), retransmit_timerfunc
, 0);
1111 timer_setup(&(nb
->retrans_conn_timer
), retransmit_conn_timerfunc
, 0);
1113 spin_lock_bh(&(nb
->cmsg_lock
));
1114 nb
->last_ping_time
= jiffies
;
1115 schedule_controlmsg_timer(nb
);
1116 spin_unlock_bh(&(nb
->cmsg_lock
));
1118 list_add_tail(&(nb
->nb_list
), &nb_list
);
1122 kmem_cache_free(nb_slab
, nb
);
1123 atomic_dec(&num_neighs
);
1126 spin_unlock_bh(&neighbor_list_lock
);
1129 static int parse_announce_version(struct neighbor_discdata
*nb_dd
,
1130 __u16 cmd
, char *cmddata
, __u16 len
)
1135 if (unlikely(len
< 4))
1138 version
= parse_u16(cmddata
);
1141 minversion
= parse_u16(cmddata
);
1145 if (minversion
!= 0)
1148 if (nb_dd
->rcvd_version
!= 0) {
1149 if (nb_dd
->version
!= version
||
1150 nb_dd
->minversion
!= minversion
)
1153 nb_dd
->version
= version
;
1154 nb_dd
->minversion
= minversion
;
1155 nb_dd
->rcvd_version
= 1;
1161 static int parse_announce_addaddr(struct neighbor_discdata
*nb_dd
,
1162 __u16 cmd
, char *cmddata
, __u16 len
)
1167 BUG_ON(cmd
!= NEIGHCMD_ADDR
);
1168 BUG_ON((nb_dd
->addr
== 0) != (nb_dd
->addrlen
== 0));
1169 BUG_ON(nb_dd
->rcvd_addr
== 0 && nb_dd
->addr
!= 0);
1174 addrlen
= parse_u16(cmddata
);
1185 if (nb_dd
->rcvd_addr
!= 0) {
1186 if (nb_dd
->addrlen
!= addrlen
)
1188 if (addrlen
!= 0 && memcmp(nb_dd
->addr
, addr
, addrlen
) != 0)
1192 nb_dd
->addr
= kmalloc(addrlen
, GFP_KERNEL
);
1193 if (unlikely(nb_dd
->addr
== 0))
1196 memcpy(nb_dd
->addr
, addr
, addrlen
);
1198 nb_dd
->addrlen
= addrlen
;
1200 nb_dd
->rcvd_addr
= 1;
1206 static int parse_announce_cmd(struct neighbor_discdata
*nb_dd
,
1207 __u16 cmd
, char *cmddata
, __u16 len
)
1209 if (cmd
== NEIGHCMD_VERSION
) {
1210 return parse_announce_version(nb_dd
, cmd
, cmddata
, len
);
1211 } else if (cmd
== NEIGHCMD_ADDR
) {
1212 return parse_announce_addaddr(nb_dd
, cmd
, cmddata
, len
);
1218 static int parse_announce_cmds(struct neighbor_discdata
*nb_dd
,
1219 char *msg
, __u32 len
)
1222 while (zeros
< len
) {
1223 if (msg
[len
-zeros
-1] != 0)
1228 while (len
>= 4 && len
> zeros
) {
1232 cmd
= parse_u16(msg
);
1235 cmdlen
= parse_u16(msg
);
1242 if (parse_announce_cmd(nb_dd
, cmd
, msg
, cmdlen
) != 0)
1249 if (len
!= 0 && len
< zeros
)
1255 static void neighbor_discdata_free(struct neighbor_discdata
*nb_dd
)
1257 list_del(&(nb_dd
->lh
));
1259 BUG_ON(nb_dd
->dev
== 0);
1260 dev_put(nb_dd
->dev
);
1262 if (nb_dd
->addr
!= 0) {
1268 kmem_cache_free(nb_dd_slab
, nb_dd
);
1270 BUG_ON(num_nb_dd
== 0);
1274 static void announce_send_start(struct net_device
*dev
, char *mac
, int type
);
1276 static struct neighbor_discdata
*findoralloc_neighbor_discdata(
1277 struct net_device
*dev
, char *source_hw
, __be32 sessionid
)
1279 unsigned long jiffies_tmp
= jiffies
;
1280 struct list_head
*currlh
;
1283 struct neighbor_discdata
*nb_dd
;
1285 currlh
= nb_dd_list
.next
;
1286 while (currlh
!= &nb_dd_list
) {
1287 struct neighbor_discdata
*curr
= container_of(currlh
,
1288 struct neighbor_discdata
, lh
);
1290 currlh
= currlh
->next
;
1292 if (time_after(jiffies_tmp
, curr
->jiffies_created
+
1293 HZ
* NEIGHBOR_DISCOVERY_TIMEOUT_SEC
)) {
1294 neighbor_discdata_free(curr
);
1298 if (curr
->sessionid
== sessionid
&& curr
->dev
== dev
&&
1299 memcmp(curr
->mac
, source_hw
, MAX_ADDR_LEN
) == 0)
1303 neighs
= atomic_read(&num_neighs
);
1304 if (neighs
+ num_nb_dd
< neighs
|| neighs
+ num_nb_dd
>=MAX_NEIGHBORS
)
1308 nb_dd
= kmem_cache_alloc(nb_dd_slab
, GFP_KERNEL
);
1309 if (unlikely(nb_dd
== 0))
1312 memset(nb_dd
, 0, sizeof(struct neighbor_discdata
));
1314 nb_dd
->sessionid
= sessionid
;
1319 memcpy(nb_dd
->mac
, source_hw
, MAX_ADDR_LEN
);
1321 list_add_tail(&(nb_dd
->lh
), &nb_dd_list
);
1322 nb_dd
->jiffies_created
= jiffies_tmp
;
1324 if (is_clientmode())
1325 announce_send_start(dev
, source_hw
, ANNOUNCE_TYPE_UNICAST
);
1330 static void parse_announce(struct net_device
*dev
, char *source_hw
,
1331 char *msg
, __u32 len
)
1334 struct neighbor_discdata
*nb_dd
;
1337 if (unlikely(len
< 4))
1340 sessionid
= parse_be32(msg
);
1344 nb_dd
= findoralloc_neighbor_discdata(dev
, source_hw
, sessionid
);
1345 if (unlikely(nb_dd
== 0))
1348 if (parse_announce_cmds(nb_dd
, msg
, len
) != 0)
1351 if (nb_dd
->rcvd_version
!= 0 && nb_dd
->rcvd_addr
!= 0) {
1352 add_neighbor(nb_dd
);
1355 neighbor_discdata_free(nb_dd
);
1359 static void _rcv_announce(struct work_struct
*work
)
1361 struct cor_skb_procstate
*ps
= container_of(work
,
1362 struct cor_skb_procstate
, funcstate
.announce1
.work
);
1363 struct sk_buff
*skb
= skb_from_pstate(ps
);
1365 char source_hw
[MAX_ADDR_LEN
];
1367 struct cor_neighbor
*nb
;
1372 if (is_device_configurated(skb
->dev
) == 0)
1375 memset(source_hw
, 0, MAX_ADDR_LEN
);
1376 if (skb
->dev
->header_ops
!= 0 &&
1377 skb
->dev
->header_ops
->parse
!= 0)
1378 skb
->dev
->header_ops
->parse(skb
, source_hw
);
1380 nb
= _get_neigh_by_mac(skb
->dev
, source_hw
);
1382 kref_put(&(nb
->ref
), neighbor_free
);
1387 if (unlikely(skb
->len
> 65535 || skb
->len
< 0))
1389 len
= (__u16
) skb
->len
;
1391 msg
= cor_pull_skb(skb
, len
);
1395 mutex_lock(&(announce_rcv_lock
));
1396 parse_announce(skb
->dev
, source_hw
, msg
, len
);
1397 mutex_unlock(&(announce_rcv_lock
));
1402 atomic_dec(&packets_in_workqueue
);
1405 int rcv_announce(struct sk_buff
*skb
)
1407 struct cor_skb_procstate
*ps
= skb_pstate(skb
);
1410 queuelen
= atomic_inc_return(&packets_in_workqueue
);
1412 BUG_ON(queuelen
<= 0);
1414 if (queuelen
> MAX_PACKETS_IN_ANNOUNCE_RCVQUEUE
) {
1415 atomic_dec(&packets_in_workqueue
);
1417 return NET_RX_SUCCESS
;
1420 INIT_WORK(&(ps
->funcstate
.announce1
.work
), _rcv_announce
);
1421 schedule_work(&(ps
->funcstate
.announce1
.work
));
1422 return NET_RX_SUCCESS
;
1425 static int ___send_announce(struct sk_buff
*skb
, int *sent
)
1428 struct cor_qos_queue
*q
= cor_get_queue(skb
->dev
);
1433 return NET_XMIT_SUCCESS
;
1436 rc
= cor_dev_queue_xmit(skb
, q
, QOS_CALLER_ANNOUNCE
);
1437 kref_put(&(q
->ref
), free_qos
);
1438 if (rc
!= NET_XMIT_DROP
)
1444 static int __send_announce(struct cor_announce_data
*ann
, int *sent
)
1449 __u32 local_addrlen_tmp
;
1453 struct sk_buff
*skb
;
1456 headroom
= LL_RESERVED_SPACE(ann
->dev
) +
1457 ann
->dev
->needed_tailroom
;
1459 spin_lock_bh(&(local_addr_lock
));
1462 BUG_ON(local_addrlen
> 64);
1464 local_addrlen_tmp
= local_addrlen
;
1466 spin_unlock_bh(&(local_addr_lock
));
1468 len
= 1 + 4 + 8 + 6 + local_addrlen_tmp
;
1472 skb
= alloc_skb(headroom
+ len
, GFP_ATOMIC
);
1473 if (unlikely(skb
== 0))
1474 return NET_XMIT_SUCCESS
;
1476 skb
->protocol
= htons(ETH_P_COR
);
1477 skb
->dev
= ann
->dev
;
1478 skb_reserve(skb
, headroom
);
1480 #warning net_device locking? (other places too)
1481 if(unlikely(dev_hard_header(skb
, ann
->dev
, ETH_P_COR
,
1482 ann
->mac
, ann
->dev
->dev_addr
, skb
->len
) < 0))
1485 skb_reset_network_header(skb
);
1487 msg
= skb_put(skb
, len
);
1488 if (unlikely(msg
== 0))
1491 spin_lock_bh(&(local_addr_lock
));
1493 if (unlikely(local_addrlen
!= local_addrlen_tmp
)) {
1500 msg
[0] = PACKET_TYPE_ANNOUNCE
;
1503 put_be32(msg
+ offset
, local_addr_sessionid
); /* sessionid */
1506 put_u16(msg
+ offset
, NEIGHCMD_VERSION
); /* command */
1508 put_u16(msg
+ offset
, 4); /* command length */
1510 put_u16(msg
+ offset
, 0); /* version */
1512 put_u16(msg
+ offset
, 0); /* minversion */
1515 put_u16(msg
+ offset
, NEIGHCMD_ADDR
); /* command */
1517 put_u16(msg
+ offset
, 2 + local_addrlen
); /* command length */
1519 put_u16(msg
+ offset
, local_addrlen
); /* addrlen */
1521 if (local_addrlen
!= 0) {
1522 memcpy(msg
+ offset
, local_addr
, local_addrlen
); /* addr */
1523 offset
+= local_addrlen
;
1526 spin_unlock_bh(&(local_addr_lock
));
1528 BUG_ON(offset
!= len
);
1530 return ___send_announce(skb
, sent
);
1535 return NET_XMIT_SUCCESS
;
1539 void announce_data_free(struct kref
*ref
)
1541 struct cor_announce_data
*ann
= container_of(ref
,
1542 struct cor_announce_data
, ref
);
1546 int _send_announce(struct cor_announce_data
*ann
, int fromqos
, int *sent
)
1551 spin_lock_bh(&(announce_snd_lock
));
1553 if (unlikely(ann
->dev
== 0)) {
1554 rc
= NET_XMIT_SUCCESS
;
1558 if (is_device_configurated(ann
->dev
) == 0)
1559 rc
= NET_XMIT_SUCCESS
;
1560 else if (fromqos
== 0 && qos_fastsend_allowed_announce(ann
->dev
) == 0)
1563 rc
= __send_announce(ann
, sent
);
1565 if (rc
!= NET_XMIT_DROP
&& ann
->type
!= ANNOUNCE_TYPE_BROADCAST
) {
1567 reschedule
= (ann
->sndcnt
< ANNOUNCE_SEND_UNICAST_MAXCNT
?
1570 if (reschedule
== 0) {
1574 list_del(&(ann
->lh
));
1575 kref_put(&(ann
->ref
), kreffree_bug
);
1582 spin_unlock_bh(&(announce_snd_lock
));
1584 if (unlikely(reschedule
== 0)) {
1585 kref_put(&(ann
->ref
), announce_data_free
);
1586 } else if (rc
== NET_XMIT_DROP
) {
1588 struct cor_qos_queue
*q
= cor_get_queue(ann
->dev
);
1590 cor_qos_enqueue(q
, &(ann
->rb
), ns_to_ktime(0),
1591 QOS_CALLER_ANNOUNCE
);
1592 kref_put(&(q
->ref
), free_qos
);
1596 schedule_delayed_work(&(ann
->announce_work
), msecs_to_jiffies(
1597 ANNOUNCE_SEND_PACKETINTELVAL_MS
));
1600 if (rc
!= NET_XMIT_SUCCESS
)
1601 return QOS_RESUME_CONG
;
1603 return QOS_RESUME_DONE
;
1606 static void send_announce(struct work_struct
*work
)
1608 struct cor_announce_data
*ann
= container_of(to_delayed_work(work
),
1609 struct cor_announce_data
, announce_work
);
1611 _send_announce(ann
, 0, &sent
);
1614 static void announce_send_start(struct net_device
*dev
, char *mac
, int type
)
1616 struct cor_announce_data
*ann
;
1618 ann
= kmalloc(sizeof(struct cor_announce_data
), GFP_KERNEL
);
1620 if (unlikely(ann
== 0)) {
1621 printk(KERN_ERR
"cor cannot allocate memory for sending "
1626 memset(ann
, 0, sizeof(struct cor_announce_data
));
1628 kref_init(&(ann
->ref
));
1632 memcpy(ann
->mac
, mac
, MAX_ADDR_LEN
);
1635 spin_lock_bh(&(announce_snd_lock
));
1636 list_add_tail(&(ann
->lh
), &announce_out_list
);
1637 spin_unlock_bh(&(announce_snd_lock
));
1639 INIT_DELAYED_WORK(&(ann
->announce_work
), send_announce
);
1640 kref_get(&(ann
->ref
));
1641 schedule_delayed_work(&(ann
->announce_work
), 1);
1644 void announce_send_stop(struct net_device
*dev
, char *mac
, int type
)
1646 struct list_head
*lh
= announce_out_list
.next
;
1648 spin_lock_bh(&(announce_snd_lock
));
1650 while (lh
!= &announce_out_list
) {
1651 struct cor_announce_data
*ann
= container_of(lh
,
1652 struct cor_announce_data
, lh
);
1657 if (dev
!= 0 && (ann
->dev
!= dev
|| (
1658 type
!= ANNOUNCE_TYPE_BROADCAST
&& (
1659 ann
->type
!= type
||
1660 memcmp(ann
->mac
, mac
, MAX_ADDR_LEN
) != 0))))
1666 list_del(&(ann
->lh
));
1667 kref_put(&(ann
->ref
), kreffree_bug
);
1670 spin_unlock_bh(&(announce_snd_lock
));
1673 int netdev_notify_func(struct notifier_block
*not, unsigned long event
,
1676 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1681 if (dev
->flags
& IFF_LOOPBACK
)
1685 rc
= create_queue(dev
);
1688 if (is_clientmode() == 0)
1689 announce_send_start(dev
, dev
->broadcast
,
1690 ANNOUNCE_TYPE_BROADCAST
);
1693 printk(KERN_ERR
"down 1");
1696 printk(KERN_ERR
"down 2");
1698 announce_send_stop(dev
, 0, ANNOUNCE_TYPE_BROADCAST
);
1699 printk(KERN_ERR
"down 3");
1701 reset_neighbors(dev
);
1702 printk(KERN_ERR
"down 4");
1705 printk(KERN_ERR
"down 5");
1710 case NETDEV_REGISTER
:
1711 case NETDEV_UNREGISTER
:
1712 case NETDEV_CHANGEMTU
:
1713 case NETDEV_CHANGEADDR
:
1714 case NETDEV_GOING_DOWN
:
1715 case NETDEV_CHANGENAME
:
1716 case NETDEV_FEAT_CHANGE
:
1717 case NETDEV_BONDING_FAILOVER
:
1726 void _cor_neighbor_down(void)
1730 spin_lock_bh(&(local_addr_lock
));
1731 if (local_addr
!= 0) {
1736 spin_unlock_bh(&(local_addr_lock
));
1742 announce_send_stop(0, 0, ANNOUNCE_TYPE_BROADCAST
);
1744 if (netdev_notify_registered
!= 0 &&
1745 unregister_netdevice_notifier(&netdev_notify
) != 0) {
1746 printk(KERN_WARNING
"warning: cor_neighbor_down: "
1747 "unregister_netdevice_notifier failed");
1750 netdev_notify_registered
= 0;
1753 void cor_neighbor_down(void)
1755 mutex_lock(&(neigh_up_lock
));
1756 _cor_neighbor_down();
1757 mutex_unlock(&(neigh_up_lock
));
1760 int cor_neighbor_up(char *addr2
, __u32 addrlen2
)
1764 char *addr2_copy
= kmalloc(addrlen2
, GFP_KERNEL
);
1765 if (unlikely(addr2_copy
== 0))
1768 memcpy(addr2_copy
, addr2
, addrlen2
);
1770 mutex_lock(&(neigh_up_lock
));
1772 _cor_neighbor_down();
1774 spin_lock_bh(&(local_addr_lock
));
1776 BUG_ON(local_addr
!= 0);
1777 BUG_ON(local_addrlen
!= 0);
1779 local_addr
= addr2_copy
;
1781 local_addrlen
= addrlen2
;
1782 get_random_bytes((char *) &local_addr_sessionid
,
1783 sizeof(local_addr_sessionid
));
1785 spin_unlock_bh(&(local_addr_lock
));
1787 BUG_ON(netdev_notify_registered
!= 0);
1789 if (register_netdevice_notifier(&netdev_notify
) != 0)
1792 netdev_notify_registered
= 1;
1798 spin_lock_bh(&(local_addr_lock
));
1802 spin_unlock_bh(&(local_addr_lock
));
1806 mutex_unlock(&(neigh_up_lock
));
1811 int is_clientmode(void)
1814 spin_lock_bh(&(local_addr_lock
));
1815 rc
= (local_addrlen
== 0 ? 1 : 0);
1816 spin_unlock_bh(&(local_addr_lock
));
1820 int __init
cor_neighbor_init(void)
1822 nb_slab
= kmem_cache_create("cor_neighbor", sizeof(struct cor_neighbor
),
1824 if (unlikely(nb_slab
== 0))
1827 nb_dd_slab
= kmem_cache_create("cor_neighbor_discoverydata",
1828 sizeof(struct neighbor_discdata
), 8, 0, 0);
1829 if (unlikely(nb_dd_slab
== 0))
1832 atomic_set(&num_neighs
, 0);
1834 memset(&netdev_notify
, 0, sizeof(netdev_notify
));
1835 netdev_notify
.notifier_call
= netdev_notify_func
;
1840 void __exit
cor_neighbor_exit1(void)
1842 flush_scheduled_work();
1845 void __exit
cor_neighbor_exit2(void)
1847 BUG_ON(atomic_read(&num_neighs
) != 0);
1849 kmem_cache_destroy(nb_dd_slab
);
1852 kmem_cache_destroy(nb_slab
);
1856 MODULE_LICENSE("GPL");