2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/delay.h>
25 static DEFINE_SPINLOCK(cor_neighbor_list_lock
);
26 static LIST_HEAD(cor_nb_list
);
27 static struct kmem_cache
*cor_nb_slab
;
28 atomic_t cor_num_neighs
;
30 static DEFINE_SPINLOCK(cor_connid_gen
);
33 void cor_neighbor_free(struct kref
*ref
)
35 struct cor_neighbor
*nb
= container_of(ref
, struct cor_neighbor
, ref
);
37 WARN_ONCE(list_empty(&(nb
->cmsg_queue_pong
)) == 0,
38 "cor_neighbor_free(): nb->cmsg_queue_pong is not empty");
39 WARN_ONCE(list_empty(&(nb
->cmsg_queue_ack_fast
)) == 0,
40 "cor_neighbor_free(): nb->cmsg_queue_ack_fast is not empty");
41 WARN_ONCE(list_empty(&(nb
->cmsg_queue_ack_slow
)) == 0,
42 "cor_neighbor_free(): nb->cmsg_queue_ack_slow is not empty");
43 WARN_ONCE(list_empty(&(nb
->cmsg_queue_ackconn_urgent
)) == 0,
44 "cor_neighbor_free(): nb->cmsg_queue_ackconn_urgent is not empty");
45 WARN_ONCE(list_empty(&(nb
->cmsg_queue_ackconn_lowlat
)) == 0,
46 "cor_neighbor_free(): nb->cmsg_queue_ackconn_lowlat is not empty");
47 WARN_ONCE(list_empty(&(nb
->cmsg_queue_ackconn_highlat
)) == 0,
48 "cor_neighbor_free(): nb->cmsg_queue_ackconn_highlat is not empty");
49 WARN_ONCE(list_empty(&(nb
->cmsg_queue_conndata_lowlat
)) == 0,
50 "cor_neighbor_free(): nb->cmsg_queue_conndata_lowlat is not empty");
51 WARN_ONCE(list_empty(&(nb
->cmsg_queue_conndata_highlat
)) == 0,
52 "cor_neighbor_free(): nb->cmsg_queue_conndata_highlat is not empty");
53 WARN_ONCE(list_empty(&(nb
->cmsg_queue_other
)) == 0,
54 "cor_neighbor_free(): nb->cmsg_queue_other is not empty");
55 WARN_ONCE(nb
->pending_conn_resets_rb
.rb_node
!= 0,
56 "cor_neighbor_free(): nb->pending_conn_resets_rb is not empty");
57 WARN_ONCE(nb
->rb_kp
.in_queue
!= RB_INQUEUE_FALSE
,
58 "cor_neighbor_free(): nb->rb_kp.in_queue is not RB_INQUEUE_FALSE");
59 WARN_ONCE(nb
->rb_cr
.in_queue
!= RB_INQUEUE_FALSE
,
60 "cor_neighbor_free(): nb->rb_cr.in_queue is not RB_INQUEUE_FALSE");
61 WARN_ONCE(nb
->rb
.in_queue
!= RB_INQUEUE_FALSE
,
62 "cor_neighbor_free(): nb->rb.in_queue is not RB_INQUEUE_FALSE");
63 WARN_ONCE(list_empty(&(nb
->conns_waiting
.lh
)) == 0,
64 "cor_neighbor_free(): nb->conns_waiting.lh is not empty");
65 WARN_ONCE(list_empty(&(nb
->conns_waiting
.lh_nextpass
)) == 0,
66 "cor_neighbor_free(): nb->conns_waiting.lh_nextpass is not empty");
67 WARN_ONCE(nb
->str_timer_pending
!= 0,
68 "cor_neighbor_free(): nb->str_timer_pending is not 0");
69 WARN_ONCE(nb
->connid_rb
.rb_node
!= 0,
70 "cor _neighbor_free(): nb->connid_rb is not empty");
71 WARN_ONCE(nb
->connid_reuse_rb
.rb_node
!= 0,
72 "cor_neighbor_free(): nb->connid_reuse_rb is not empty");
73 WARN_ONCE(list_empty(&(nb
->connid_reuse_list
)) == 0,
74 "cor_neighbor_free(): nb->connid_reuse_list is not empty");
75 WARN_ONCE(nb
->kp_retransmits_rb
.rb_node
!= 0,
76 "cor_neighbor_free(): nb->kp_retransmits_rb is not empty");
77 WARN_ONCE(list_empty(&(nb
->snd_conn_idle_list
)) == 0,
78 "cor_neighbor_free(): nb->snd_conn_idle_list is not empty");
79 WARN_ONCE(list_empty(&(nb
->snd_conn_busy_list
)) == 0,
80 "cor_neighbor_free(): nb->snd_conn_busy_list is not empty");
81 WARN_ONCE(list_empty(&(nb
->retrans_fast_list
)) == 0,
82 "cor_neighbor_free(): nb->retrans_fast_list is not empty");
83 WARN_ONCE(list_empty(&(nb
->retrans_slow_list
)) == 0,
84 "cor_neighbor_free(): nb->retrans_slow_list is not empty");
85 WARN_ONCE(list_empty(&(nb
->retrans_conn_lowlatency_list
)) == 0,
86 "cor_neighbor_free(): nb->retrans_conn_lowlatency_list is not empty");
87 WARN_ONCE(list_empty(&(nb
->retrans_conn_highlatency_list
)) == 0,
88 "cor_neighbor_free(): nb->retrans_conn_highlatency_list is not empty");
90 /* printk(KERN_ERR "neighbor free\n"); */
91 BUG_ON(nb
->nb_list
.next
!= LIST_POISON1
);
92 BUG_ON(nb
->nb_list
.prev
!= LIST_POISON2
);
97 kref_put(&(nb
->queue
->ref
), cor_free_qos
);
99 kmem_cache_free(cor_nb_slab
, nb
);
100 atomic_dec(&cor_num_neighs
);
103 static void cor_stall_timer(struct work_struct
*work
);
105 static void _cor_reset_neighbor(struct work_struct
*work
);
107 static struct cor_neighbor
*cor_alloc_neighbor(gfp_t allocflags
)
109 struct cor_neighbor
*nb
;
112 if (atomic_inc_return(&cor_num_neighs
) >= MAX_NEIGHBORS
) {
113 atomic_dec(&cor_num_neighs
);
117 nb
= kmem_cache_alloc(cor_nb_slab
, allocflags
);
118 if (unlikely(nb
== 0))
121 memset(nb
, 0, sizeof(struct cor_neighbor
));
123 kref_init(&(nb
->ref
));
124 atomic_set(&(nb
->sessionid_rcv_needed
), 1);
125 atomic_set(&(nb
->sessionid_snd_needed
), 1);
126 timer_setup(&(nb
->cmsg_timer
), cor_controlmsg_timerfunc
, 0);
127 spin_lock_init(&(nb
->cmsg_lock
));
128 INIT_LIST_HEAD(&(nb
->cmsg_queue_pong
));
129 INIT_LIST_HEAD(&(nb
->cmsg_queue_ack_fast
));
130 INIT_LIST_HEAD(&(nb
->cmsg_queue_ack_slow
));
131 INIT_LIST_HEAD(&(nb
->cmsg_queue_ackconn_urgent
));
132 INIT_LIST_HEAD(&(nb
->cmsg_queue_ackconn_lowlat
));
133 INIT_LIST_HEAD(&(nb
->cmsg_queue_ackconn_highlat
));
134 INIT_LIST_HEAD(&(nb
->cmsg_queue_conndata_lowlat
));
135 INIT_LIST_HEAD(&(nb
->cmsg_queue_conndata_highlat
));
136 INIT_LIST_HEAD(&(nb
->cmsg_queue_other
));
137 atomic_set(&(nb
->cmsg_pongs_retrans_cnt
), 0);
138 atomic_set(&(nb
->cmsg_othercnt
), 0);
139 atomic_set(&(nb
->cmsg_bulk_readds
), 0);
140 atomic_set(&(nb
->cmsg_delay_conndata
), 0);
141 atomic_set(&(nb
->rcvmtu_sendneeded
), 1);
142 nb
->last_ping_time
= jiffies
;
143 atomic_set(&(nb
->latency_retrans_us
), PING_GUESSLATENCY_MS
*1000);
144 atomic_set(&(nb
->latency_advertised_us
), PING_GUESSLATENCY_MS
*1000);
145 atomic_set(&(nb
->max_remote_ack_fast_delay_us
), 1000000);
146 atomic_set(&(nb
->max_remote_ack_slow_delay_us
), 1000000);
147 atomic_set(&(nb
->max_remote_ackconn_lowlat_delay_us
), 1000000);
148 atomic_set(&(nb
->max_remote_ackconn_highlat_delay_us
), 1000000);
149 atomic_set(&(nb
->max_remote_pong_delay_us
), 1000000);
150 atomic_set(&(nb
->remote_rcvmtu
), 128);
151 spin_lock_init(&(nb
->conns_waiting
.lock
));
152 INIT_LIST_HEAD(&(nb
->conns_waiting
.lh
));
153 INIT_LIST_HEAD(&(nb
->conns_waiting
.lh_nextpass
));
154 spin_lock_init(&(nb
->nbcongwin
.lock
));
155 atomic64_set(&(nb
->nbcongwin
.data_intransit
), 0);
156 atomic64_set(&(nb
->nbcongwin
.cwin
), 0);
157 spin_lock_init(&(nb
->state_lock
));
158 nb
->state
= NEIGHBOR_STATE_INITIAL
;
159 nb
->state_time
.initial_state_since
= jiffies
;
160 INIT_DELAYED_WORK(&(nb
->stalltimeout_timer
), cor_stall_timer
);
161 spin_lock_init(&(nb
->connid_lock
));
162 spin_lock_init(&(nb
->connid_reuse_lock
));
163 INIT_LIST_HEAD(&(nb
->connid_reuse_list
));
164 get_random_bytes((char *) &seqno
, sizeof(seqno
));
165 nb
->kpacket_seqno
= seqno
;
166 atomic64_set(&(nb
->priority_sum
), 0);
167 spin_lock_init(&(nb
->conn_list_lock
));
168 INIT_LIST_HEAD(&(nb
->snd_conn_idle_list
));
169 INIT_LIST_HEAD(&(nb
->snd_conn_busy_list
));
170 spin_lock_init(&(nb
->retrans_lock
));
171 INIT_LIST_HEAD(&(nb
->retrans_fast_list
));
172 INIT_LIST_HEAD(&(nb
->retrans_slow_list
));
173 spin_lock_init(&(nb
->retrans_conn_lock
));
174 INIT_LIST_HEAD(&(nb
->retrans_conn_lowlatency_list
));
175 INIT_LIST_HEAD(&(nb
->retrans_conn_highlatency_list
));
176 INIT_WORK(&(nb
->reset_neigh_work
), _cor_reset_neighbor
);
181 int cor_is_from_nb(struct sk_buff
*skb
, struct cor_neighbor
*nb
)
185 char source_hw
[MAX_ADDR_LEN
];
186 memset(source_hw
, 0, MAX_ADDR_LEN
);
187 if (skb
->dev
->header_ops
!= 0 &&
188 skb
->dev
->header_ops
->parse
!= 0)
189 skb
->dev
->header_ops
->parse(skb
, source_hw
);
191 rc
= (skb
->dev
== nb
->dev
&& memcmp(nb
->mac
, source_hw
,
196 struct cor_neighbor
*_cor_get_neigh_by_mac(struct net_device
*dev
,
199 struct list_head
*currlh
;
200 struct cor_neighbor
*ret
= 0;
202 spin_lock_bh(&cor_neighbor_list_lock
);
204 currlh
= cor_nb_list
.next
;
205 while (currlh
!= &cor_nb_list
) {
206 struct cor_neighbor
*curr
= container_of(currlh
,
207 struct cor_neighbor
, nb_list
);
209 BUG_ON(curr
->in_nb_list
== 0);
211 if (curr
->dev
== dev
&& memcmp(curr
->mac
, source_hw
,
212 MAX_ADDR_LEN
) == 0) {
214 cor_nb_kref_get(ret
, "stack");
218 currlh
= currlh
->next
;
221 spin_unlock_bh(&cor_neighbor_list_lock
);
223 if (ret
!= 0 && unlikely(cor_get_neigh_state(ret
) ==
224 NEIGHBOR_STATE_KILLED
)) {
225 cor_nb_kref_put(ret
, "stack");
232 struct cor_neighbor
*cor_get_neigh_by_mac(struct sk_buff
*skb
)
234 char source_hw
[MAX_ADDR_LEN
];
235 memset(source_hw
, 0, MAX_ADDR_LEN
);
236 if (skb
->dev
->header_ops
!= 0 &&
237 skb
->dev
->header_ops
->parse
!= 0)
238 skb
->dev
->header_ops
->parse(skb
, source_hw
);
240 return _cor_get_neigh_by_mac(skb
->dev
, source_hw
);
243 struct cor_neighbor
*cor_find_neigh(__be64 addr
)
245 struct list_head
*currlh
;
246 struct cor_neighbor
*ret
= 0;
248 spin_lock_bh(&cor_neighbor_list_lock
);
250 currlh
= cor_nb_list
.next
;
251 while (currlh
!= &cor_nb_list
) {
252 struct cor_neighbor
*curr
= container_of(currlh
,
253 struct cor_neighbor
, nb_list
);
255 BUG_ON(curr
->in_nb_list
== 0);
257 BUG_ON((curr
->has_addr
== 0) && (curr
->addr
!= 0));
258 if (curr
->has_addr
!= 0 && curr
->addr
== addr
) {
260 cor_nb_kref_get(ret
, "stack");
265 currlh
= currlh
->next
;
269 spin_unlock_bh(&cor_neighbor_list_lock
);
271 if (ret
!= 0 && unlikely(cor_get_neigh_state(ret
) ==
272 NEIGHBOR_STATE_KILLED
)) {
273 cor_nb_kref_put(ret
, "stack");
280 void cor_resend_rcvmtu(struct net_device
*dev
)
282 struct list_head
*currlh
;
284 spin_lock_bh(&cor_neighbor_list_lock
);
286 currlh
= cor_nb_list
.next
;
287 while (currlh
!= &cor_nb_list
) {
288 struct cor_neighbor
*nb
= container_of(currlh
,
289 struct cor_neighbor
, nb_list
);
291 unsigned long iflags
;
298 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
300 if (nb
->rcvmtu_allowed_countdown
!= 0)
301 nb
->rcvmtu_delayed_send_needed
= 1;
302 nb
->rcvmtu_allowed_countdown
= 3;
304 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
307 currlh
= currlh
->next
;
310 spin_unlock_bh(&cor_neighbor_list_lock
);
313 __u32
cor_generate_neigh_list(char *buf
, __u32 buflen
)
315 struct list_head
*currlh
;
319 __u32 buf_offset
= 4;
324 * The variable length header rowcount need to be generated after the
325 * data. This is done by reserving the maximum space they could take. If
326 * they end up being smaller, the data is moved so that there is no gap.
330 BUG_ON(buflen
< buf_offset
);
333 rc
= cor_encode_len(buf
+ buf_offset
, buflen
- buf_offset
, 2);
338 BUG_ON(buflen
< buf_offset
+ 2);
339 cor_put_u16(buf
+ buf_offset
, LIST_NEIGH_FIELD_ADDR
);
342 rc
= cor_encode_len(buf
+ buf_offset
, buflen
- buf_offset
, 8);
347 BUG_ON(buflen
< buf_offset
+ 2);
348 cor_put_u16(buf
+ buf_offset
, LIST_NEIGH_FIELD_LATENCY
);
351 rc
= cor_encode_len(buf
+ buf_offset
, buflen
- buf_offset
, 1);
355 spin_lock_bh(&cor_neighbor_list_lock
);
357 currlh
= cor_nb_list
.next
;
358 while (currlh
!= &cor_nb_list
) {
359 struct cor_neighbor
*curr
= container_of(currlh
,
360 struct cor_neighbor
, nb_list
);
363 BUG_ON(curr
->in_nb_list
== 0);
365 state
= cor_get_neigh_state(curr
);
366 if (state
!= NEIGHBOR_STATE_ACTIVE
)
369 BUG_ON((curr
->has_addr
== 0) && (curr
->addr
!= 0));
370 if (curr
->has_addr
== 0)
374 if (unlikely(buflen
< buf_offset
+ 8 + 1))
377 cor_put_be64(buf
+ buf_offset
, curr
->addr
);
380 buf
[buf_offset
] = cor_enc_log_64_11(atomic_read(
381 &(curr
->latency_advertised_us
)));
384 BUG_ON(buf_offset
> buflen
);
389 currlh
= currlh
->next
;
392 spin_unlock_bh(&cor_neighbor_list_lock
);
394 rc
= cor_encode_len(buf
, 4, cnt
);
399 memmove(buf
+ ((__u32
) rc
), buf
+4, buf_offset
);
401 return buf_offset
- 4 + ((__u32
) rc
);
404 static void cor_reset_all_conns(struct cor_neighbor
*nb
)
407 unsigned long iflags
;
408 struct cor_conn
*trgt_out
;
409 struct cor_conn
*src_in
;
410 struct cor_conn_bidir
*cnb
;
413 spin_lock_irqsave(&(nb
->conn_list_lock
), iflags
);
415 if (!list_empty(&(nb
->snd_conn_busy_list
))) {
416 trgt_out
= container_of(nb
->snd_conn_busy_list
.next
,
417 struct cor_conn
, trgt
.out
.nb_list
);
418 } else if (!list_empty(&(nb
->snd_conn_idle_list
))) {
419 trgt_out
= container_of(nb
->snd_conn_idle_list
.next
,
420 struct cor_conn
, trgt
.out
.nb_list
);
422 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
426 cor_conn_kref_get(trgt_out
, "stack");
428 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
430 src_in
= cor_get_conn_reversedir(trgt_out
);
431 cnb
= cor_get_conn_bidir(trgt_out
);
433 spin_lock_bh(&(cnb
->cli
.rcv_lock
));
434 spin_lock_bh(&(cnb
->srv
.rcv_lock
));
436 if (unlikely(unlikely(src_in
->sourcetype
!= SOURCE_IN
) ||
437 unlikely(src_in
->src
.in
.nb
!= nb
))) {
442 rc
= cor_send_reset_conn(nb
, trgt_out
->trgt
.out
.conn_id
, 1);
444 if (unlikely(rc
!= 0))
447 if (trgt_out
->isreset
== 0)
448 trgt_out
->isreset
= 1;
451 spin_unlock_bh(&(cnb
->srv
.rcv_lock
));
452 spin_unlock_bh(&(cnb
->cli
.rcv_lock
));
455 cor_reset_conn(src_in
);
456 cor_conn_kref_put(src_in
, "stack");
458 cor_conn_kref_put(src_in
, "stack");
459 cor_nb_kref_get(nb
, "stalltimeout_timer");
460 schedule_delayed_work(&(nb
->stalltimeout_timer
), HZ
);
466 static void cor_delete_connid_reuse_items(struct cor_neighbor
*nb
);
468 static void _cor_reset_neighbor(struct work_struct
*work
)
470 struct cor_neighbor
*nb
= container_of(work
, struct cor_neighbor
,
473 cor_reset_all_conns(nb
);
474 cor_delete_connid_reuse_items(nb
);
476 spin_lock_bh(&cor_neighbor_list_lock
);
477 if (nb
->in_nb_list
!= 0) {
478 list_del(&(nb
->nb_list
));
480 cor_nb_kref_put_bug(nb
, "neigh_list");
482 spin_unlock_bh(&cor_neighbor_list_lock
);
484 cor_nb_kref_put(nb
, "reset_neigh_work");
487 static void cor_reset_neighbor(struct cor_neighbor
*nb
, int use_workqueue
)
489 unsigned long iflags
;
491 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
492 /* if (nb->state != NEIGHBOR_STATE_KILLED) {
493 printk(KERN_ERR "cor_reset_neighbor\n");
496 nb
->state
= NEIGHBOR_STATE_KILLED
;
497 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
501 schedule_work(&(nb
->reset_neigh_work
));
502 cor_nb_kref_get(nb
, "reset_neigh_work");
505 cor_reset_all_conns(nb
);
506 cor_delete_connid_reuse_items(nb
);
508 spin_lock_bh(&cor_neighbor_list_lock
);
509 if (nb
->in_nb_list
!= 0) {
510 list_del(&(nb
->nb_list
));
514 spin_unlock_bh(&cor_neighbor_list_lock
);
517 cor_nb_kref_put(nb
, "neigh_list");
521 void cor_reset_neighbors(struct net_device
*dev
)
523 struct list_head
*currlh
;
526 spin_lock_bh(&cor_neighbor_list_lock
);
528 currlh
= cor_nb_list
.next
;
529 while (currlh
!= &cor_nb_list
) {
530 unsigned long iflags
;
531 struct cor_neighbor
*currnb
= container_of(currlh
,
532 struct cor_neighbor
, nb_list
);
535 BUG_ON(currnb
->in_nb_list
== 0);
537 if (dev
!= 0 && currnb
->dev
!= dev
)
540 spin_lock_irqsave(&(currnb
->state_lock
), iflags
);
541 state
= currnb
->state
;
542 spin_unlock_irqrestore(&(currnb
->state_lock
), iflags
);
544 if (state
!= NEIGHBOR_STATE_KILLED
) {
545 spin_unlock_bh(&cor_neighbor_list_lock
);
546 cor_reset_neighbor(currnb
, 0);
551 currlh
= currlh
->next
;
554 spin_unlock_bh(&cor_neighbor_list_lock
);
557 static void cor_stall_timer(struct work_struct
*work
)
559 struct cor_neighbor
*nb
= container_of(to_delayed_work(work
),
560 struct cor_neighbor
, stalltimeout_timer
);
565 unsigned long iflags
;
567 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
569 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
571 if (nbstate
== NEIGHBOR_STATE_STALLED
) {
572 stall_time_ms
= jiffies_to_msecs(jiffies
-
573 nb
->state_time
.last_roundtrip
);
575 if (stall_time_ms
< NB_KILL_TIME_MS
) {
576 schedule_delayed_work(&(nb
->stalltimeout_timer
),
577 msecs_to_jiffies(NB_KILL_TIME_MS
-
582 cor_reset_neighbor(nb
, 1);
585 nb
->str_timer_pending
= 0;
586 cor_nb_kref_put(nb
, "stalltimeout_timer");
589 int cor_get_neigh_state(struct cor_neighbor
*nb
)
592 unsigned long iflags
;
597 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
599 stall_time_ms
= jiffies_to_msecs(jiffies
-
600 nb
->state_time
.last_roundtrip
);
602 WARN_ONCE(likely(nb
->state
== NEIGHBOR_STATE_ACTIVE
) && unlikely(
603 jiffies_to_msecs(jiffies
- nb
->last_ping_time
) >
604 PING_FORCETIME_ACTIVEIDLE_MS
* 4) &&
605 nb
->ping_intransit
== 0,
606 "We have stopped sending pings to a neighbor!?");
608 if (likely(nb
->state
== NEIGHBOR_STATE_ACTIVE
) &&
609 unlikely(stall_time_ms
> NB_STALL_TIME_MS
) && (
610 nb
->ping_intransit
>= NB_STALL_MINPINGS
||
611 nb
->ping_intransit
>= PING_COOKIES_PER_NEIGH
)) {
612 nb
->state
= NEIGHBOR_STATE_STALLED
;
613 nb
->ping_success
= 0;
614 if (nb
->str_timer_pending
== 0) {
615 nb
->str_timer_pending
= 1;
616 cor_nb_kref_get(nb
, "stalltimeout_timer");
618 schedule_delayed_work(&(nb
->stalltimeout_timer
),
619 msecs_to_jiffies(NB_KILL_TIME_MS
-
623 /* printk(KERN_ERR "changed to stalled\n"); */
624 BUG_ON(nb
->ping_intransit
> PING_COOKIES_PER_NEIGH
);
625 } else if (unlikely(nb
->state
== NEIGHBOR_STATE_INITIAL
) &&
626 time_after(jiffies
, nb
->state_time
.initial_state_since
+
627 INITIAL_TIME_LIMIT_SEC
* HZ
)) {
628 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
629 cor_reset_neighbor(nb
, 1);
630 return NEIGHBOR_STATE_KILLED
;
635 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
640 static struct cor_ping_cookie
*cor_find_cookie(struct cor_neighbor
*nb
,
645 for(i
=0;i
<PING_COOKIES_PER_NEIGH
;i
++) {
646 if (nb
->cookies
[i
].cookie
== cookie
)
647 return &(nb
->cookies
[i
]);
652 static void cor_reset_cookie(struct cor_neighbor
*nb
, struct cor_ping_cookie
*c
)
657 if (nb
->cookie_unsent
!= c
->cookie
)
658 nb
->ping_intransit
--;
663 static __u32
sqrt(__u64 x
)
668 if (unlikely(x
<= 1))
672 y
= y
/2 + div64_u64(x
/2, y
);
673 if (unlikely(y
== 0))
677 if (unlikely(y
> U32_MAX
))
683 static __u32
cor_calc_newlatency(struct cor_neighbor
*nb_statelocked
,
684 __u32 oldlatency_us
, __s64 newlatency_ns
)
686 __s64 oldlatency
= oldlatency_us
* 1000LL;
689 if (unlikely(unlikely(nb_statelocked
->state
== NEIGHBOR_STATE_INITIAL
)&&
690 nb_statelocked
->ping_success
< 16))
691 newlatency
= div64_s64(
692 oldlatency
* nb_statelocked
->ping_success
+
694 nb_statelocked
->ping_success
+ 1);
696 newlatency
= (oldlatency
* 15 + newlatency_ns
) / 16;
698 newlatency
= div_s64(newlatency
+ 500, 1000);
700 if (unlikely(newlatency
< 0))
702 if (unlikely(newlatency
> U32_MAX
))
703 newlatency
= U32_MAX
;
705 return (__u32
) newlatency
;
708 static void cor_update_nb_latency(struct cor_neighbor
*nb_statelocked
,
709 struct cor_ping_cookie
*c
, __u32 respdelay
)
711 ktime_t now
= ktime_get();
713 __s64 pinglatency_retrans_ns
= ktime_to_ns(now
) -
714 ktime_to_ns(c
->time_sent
) - respdelay
* 1000LL;
715 __s64 pinglatency_advertised_ns
= ktime_to_ns(now
) -
716 ktime_to_ns(c
->time_created
) - respdelay
* 1000LL;
718 __u32 oldlatency_retrans_us
=
719 atomic_read(&(nb_statelocked
->latency_retrans_us
));
721 __u32 newlatency_retrans_us
= cor_calc_newlatency(nb_statelocked
,
722 oldlatency_retrans_us
, pinglatency_retrans_ns
);
724 atomic_set(&(nb_statelocked
->latency_retrans_us
),
725 newlatency_retrans_us
);
727 if (unlikely(unlikely(nb_statelocked
->state
== NEIGHBOR_STATE_INITIAL
)&&
728 nb_statelocked
->ping_success
< 16)) {
729 nb_statelocked
->latency_variance_retrans_us
=
730 ((__u64
) newlatency_retrans_us
) *
731 newlatency_retrans_us
;
732 atomic_set(&(nb_statelocked
->latency_stddev_retrans_us
), sqrt(
733 nb_statelocked
->latency_variance_retrans_us
));
734 } else if (pinglatency_retrans_ns
> oldlatency_retrans_us
*
736 __s64 newdiff
= div_s64(pinglatency_retrans_ns
-
737 oldlatency_retrans_us
* ((__s64
) 1000), 1000);
738 __u32 newdiff32
= (__u32
) (unlikely(newdiff
>= U32_MAX
) ?
740 __u64 newvar
= ((__u64
) newdiff32
) * newdiff32
;
742 __u64 oldval
= nb_statelocked
->latency_variance_retrans_us
;
744 if (unlikely(unlikely(newvar
> (1LL << 55)) || unlikely(
745 oldval
> (1LL << 55)))) {
746 nb_statelocked
->latency_variance_retrans_us
=
747 (oldval
/ 16) * 15 + newvar
/16;
749 nb_statelocked
->latency_variance_retrans_us
=
750 (oldval
* 15 + newvar
) / 16;
753 atomic_set(&(nb_statelocked
->latency_stddev_retrans_us
), sqrt(
754 nb_statelocked
->latency_variance_retrans_us
));
757 atomic_set(&(nb_statelocked
->latency_advertised_us
),
758 cor_calc_newlatency(nb_statelocked
,
759 atomic_read(&(nb_statelocked
->latency_advertised_us
)),
760 pinglatency_advertised_ns
));
762 nb_statelocked
->last_roundtrip_end
= now
;
765 static void cor_connid_used_pingsuccess(struct cor_neighbor
*nb
);
767 void cor_ping_resp(struct cor_neighbor
*nb
, __u32 cookie
, __u32 respdelay
)
769 unsigned long iflags
;
771 struct cor_ping_cookie
*c
;
773 int stalledresume
= 0;
775 int call_connidreuse
= 0;
776 int call_send_rcvmtu
= 0;
778 if (unlikely(cookie
== 0))
781 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
783 c
= cor_find_cookie(nb
, cookie
);
785 if (unlikely(c
== 0))
788 atomic_set(&(nb
->sessionid_snd_needed
), 0);
790 call_connidreuse
= ktime_before_eq(nb
->last_roundtrip_end
,
793 cor_update_nb_latency(nb
, c
, respdelay
);
797 cor_reset_cookie(nb
, c
);
799 for(i
=0;i
<PING_COOKIES_PER_NEIGH
;i
++) {
800 if (nb
->cookies
[i
].cookie
!= 0 && ktime_before(
801 nb
->cookies
[i
].time_created
, c
->time_created
)) {
802 nb
->cookies
[i
].pongs
++;
803 if (nb
->cookies
[i
].pongs
>= PING_PONGLIMIT
) {
804 cor_reset_cookie(nb
, &(nb
->cookies
[i
]));
809 if (unlikely(nb
->state
== NEIGHBOR_STATE_INITIAL
||
810 nb
->state
== NEIGHBOR_STATE_STALLED
)) {
811 call_connidreuse
= 0;
813 if ((nb
->state
== NEIGHBOR_STATE_INITIAL
&&
814 nb
->ping_success
>= PING_SUCCESS_CNT_INIT
) || (
815 nb
->state
== NEIGHBOR_STATE_STALLED
&&
816 nb
->ping_success
>= PING_SUCCESS_CNT_STALLED
)) {
817 stalledresume
= (nb
->state
== NEIGHBOR_STATE_STALLED
);
818 nb
->state
= NEIGHBOR_STATE_ACTIVE
;
819 /* printk(KERN_ERR "changed to active\n"); */
823 if (likely(nb
->state
== NEIGHBOR_STATE_ACTIVE
) ||
824 nb
->state
== NEIGHBOR_STATE_STALLED
)
825 nb
->state_time
.last_roundtrip
= c
->jiffies_sent
;
827 if (c
== &(nb
->cookies
[0]) &&
828 unlikely(nb
->rcvmtu_allowed_countdown
!= 0)) {
829 nb
->rcvmtu_allowed_countdown
--;
831 if (unlikely(nb
->rcvmtu_allowed_countdown
== 0 &&
832 nb
->rcvmtu_delayed_send_needed
!= 0)) {
833 nb
->rcvmtu_allowed_countdown
= 3;
834 nb
->rcvmtu_delayed_send_needed
= 0;
835 call_send_rcvmtu
= 1;
840 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
842 if (call_connidreuse
)
843 cor_connid_used_pingsuccess(nb
);
845 if (unlikely(call_send_rcvmtu
))
848 if (unlikely(stalledresume
)) {
849 spin_lock_bh(&(nb
->retrans_conn_lock
));
850 cor_reschedule_conn_retrans_timer(nb
);
851 spin_unlock_bh(&(nb
->retrans_conn_lock
));
853 cor_qos_enqueue(nb
->queue
, &(nb
->rb
), 0, ns_to_ktime(0),
854 QOS_CALLER_NEIGHBOR
, 1);
858 __u32
cor_add_ping_req(struct cor_neighbor
*nb
, unsigned long *last_ping_time
)
860 unsigned long iflags
;
861 struct cor_ping_cookie
*c
;
866 ktime_t now
= ktime_get();
868 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
870 if (nb
->cookie_unsent
!= 0) {
871 c
= cor_find_cookie(nb
, nb
->cookie_unsent
);
875 nb
->cookie_unsent
= 0;
878 c
= cor_find_cookie(nb
, 0);
882 get_random_bytes((char *) &i
, sizeof(i
));
883 i
= (i
% PING_COOKIES_PER_NEIGH
);
884 c
= &(nb
->cookies
[i
]);
885 cor_reset_cookie(nb
, c
);
889 if (unlikely(nb
->lastcookie
== 0))
891 c
->cookie
= nb
->lastcookie
;
892 c
->time_created
= now
;
897 c
->jiffies_sent
= jiffies
;
900 nb
->ping_intransit
++;
902 *last_ping_time
= nb
->last_ping_time
;
903 nb
->last_ping_time
= c
->jiffies_sent
;
905 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
912 void cor_ping_sent(struct cor_neighbor
*nb
, __u32 cookie
)
914 unsigned long iflags
;
918 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
920 if (nb
->cookie_unsent
== cookie
)
921 nb
->cookie_unsent
= 0;
923 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
926 void cor_unadd_ping_req(struct cor_neighbor
*nb
, __u32 cookie
,
927 unsigned long last_ping_time
, int congested
)
929 unsigned long iflags
;
931 struct cor_ping_cookie
*c
;
935 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
938 BUG_ON(nb
->cookie_unsent
!= 0 && nb
->cookie_unsent
!= cookie
);
939 nb
->cookie_unsent
= cookie
;
942 c
= cor_find_cookie(nb
, cookie
);
943 if (likely(c
!= 0)) {
946 nb
->ping_intransit
--;
949 nb
->last_ping_time
= last_ping_time
;
951 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
954 static int cor_get_ping_forcetime_ms(struct cor_neighbor
*nb
)
956 unsigned long iflags
;
960 if (unlikely(cor_get_neigh_state(nb
) != NEIGHBOR_STATE_ACTIVE
))
961 return PING_FORCETIME_MS
;
963 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
964 fast
= ((nb
->ping_success
< PING_ACTIVE_FASTINITIAL_COUNT
) ||
965 (nb
->ping_intransit
> 0));
966 if (unlikely(nb
->rcvmtu_delayed_send_needed
!= 0)) {
967 BUG_ON(nb
->rcvmtu_allowed_countdown
== 0);
970 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
973 return PING_FORCETIME_ACTIVE_FAST_MS
;
975 spin_lock_irqsave(&(nb
->conn_list_lock
), iflags
);
976 idle
= list_empty(&(nb
->snd_conn_idle_list
)) &&
977 list_empty(&(nb
->snd_conn_busy_list
));
978 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
981 return PING_FORCETIME_ACTIVEIDLE_MS
;
983 return PING_FORCETIME_ACTIVE_MS
;
986 static __u32
cor_get_ping_mindelay_ms(struct cor_neighbor
*nb_statelocked
)
988 __u32 latency_us
= ((__u32
) atomic_read(
989 &(nb_statelocked
->latency_advertised_us
)));
990 __u32 max_remote_pong_delay_us
= ((__u32
) atomic_read(
991 &(nb_statelocked
->max_remote_pong_delay_us
)));
994 if (latency_us
< PING_GUESSLATENCY_MS
* 1000)
995 latency_us
= PING_GUESSLATENCY_MS
* 1000;
997 if (unlikely(nb_statelocked
->state
!= NEIGHBOR_STATE_ACTIVE
))
998 mindelay_ms
= latency_us
/1000;
1000 mindelay_ms
= ((latency_us
/2 +
1001 max_remote_pong_delay_us
/2)/500);
1003 if (likely(nb_statelocked
->ping_intransit
< PING_COOKIES_THROTTLESTART
))
1006 mindelay_ms
= mindelay_ms
* (1 + 9 * (nb_statelocked
->ping_intransit
*
1007 nb_statelocked
->ping_intransit
/
1008 (PING_COOKIES_PER_NEIGH
* PING_COOKIES_PER_NEIGH
)));
1014 * Check whether we want to send a ping now:
1015 * 0... Do not send ping.
1016 * 1... Send ping now, but only if it can be merged with other messages. This
1017 * can happen way before the time requested by cor_get_next_ping_time().
1018 * 2... Send ping now, even if a packet has to be created just for the ping
1021 int cor_time_to_send_ping(struct cor_neighbor
*nb
)
1023 unsigned long iflags
;
1024 int rc
= TIMETOSENDPING_YES
;
1026 __u32 ms_since_last_ping
;
1028 __u32 forcetime
= cor_get_ping_forcetime_ms(nb
);
1031 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
1033 ms_since_last_ping
= jiffies_to_msecs(jiffies
- nb
->last_ping_time
);
1035 mindelay
= cor_get_ping_mindelay_ms(nb
);
1037 if (forcetime
< (mindelay
* 3))
1038 forcetime
= mindelay
* 3;
1039 else if (forcetime
> (mindelay
* 3))
1040 mindelay
= forcetime
/3;
1042 if (ms_since_last_ping
< mindelay
|| ms_since_last_ping
< (forcetime
/4))
1043 rc
= TIMETOSENDPING_NO
;
1044 else if (ms_since_last_ping
>= forcetime
)
1045 rc
= TIMETOSENDPING_FORCE
;
1047 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
1052 unsigned long cor_get_next_ping_time(struct cor_neighbor
*nb
)
1054 unsigned long iflags
;
1056 __u32 forcetime
= cor_get_ping_forcetime_ms(nb
);
1059 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
1060 mindelay
= cor_get_ping_mindelay_ms(nb
);
1061 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
1063 if (forcetime
< (mindelay
* 3))
1064 forcetime
= mindelay
* 3;
1066 return nb
->last_ping_time
+ msecs_to_jiffies(forcetime
);
1069 void cor_add_neighbor(struct cor_neighbor_discdata
*nb_dd
)
1071 struct cor_neighbor
*nb
;
1072 struct list_head
*currlh
;
1074 nb
= cor_alloc_neighbor(GFP_KERNEL
);
1075 if (unlikely(nb
== 0))
1078 nb
->queue
= cor_get_queue(nb_dd
->dev
);
1079 if (nb
->queue
== 0) {
1080 kmem_cache_free(cor_nb_slab
, nb
);
1081 atomic_dec(&cor_num_neighs
);
1085 dev_hold(nb_dd
->dev
);
1086 nb
->dev
= nb_dd
->dev
;
1088 memcpy(nb
->mac
, nb_dd
->mac
, MAX_ADDR_LEN
);
1090 nb
->has_addr
= nb_dd
->has_addr
;
1091 nb
->addr
= nb_dd
->addr
;
1093 nb_dd
->nb_allocated
= 1;
1095 spin_lock_bh(&cor_neighbor_list_lock
);
1097 BUG_ON((nb
->has_addr
== 0) && (nb
->addr
!= 0));
1099 if (cor_is_clientmode() && nb
->has_addr
== 0)
1100 goto already_present
;
1102 currlh
= cor_nb_list
.next
;
1103 while (currlh
!= &cor_nb_list
) {
1104 struct cor_neighbor
*curr
= container_of(currlh
,
1105 struct cor_neighbor
, nb_list
);
1107 BUG_ON((curr
->has_addr
== 0) && (curr
->addr
!= 0));
1109 if (curr
->dev
== nb
->dev
&&
1110 memcmp(curr
->mac
, nb
->mac
, MAX_ADDR_LEN
) == 0)
1111 goto already_present
;
1113 if (curr
->has_addr
!= 0 && curr
->addr
== nb
->addr
)
1114 goto already_present
;
1116 currlh
= currlh
->next
;
1119 /* printk(KERN_ERR "add_neigh\n"); */
1121 spin_lock_bh(&cor_local_addr_lock
);
1122 nb
->sessionid
= cor_local_addr_sessionid
^ nb_dd
->sessionid
;
1123 spin_unlock_bh(&cor_local_addr_lock
);
1125 timer_setup(&(nb
->retrans_timer
), cor_retransmit_timerfunc
, 0);
1127 timer_setup(&(nb
->retrans_conn_timer
), cor_retransmit_conn_timerfunc
, 0);
1129 spin_lock_bh(&(nb
->cmsg_lock
));
1130 nb
->last_ping_time
= jiffies
;
1131 cor_schedule_controlmsg_timer(nb
);
1132 spin_unlock_bh(&(nb
->cmsg_lock
));
1134 list_add_tail(&(nb
->nb_list
), &cor_nb_list
);
1136 cor_nb_kref_get(nb
, "neigh_list");
1137 cor_nb_kref_put_bug(nb
, "alloc");
1141 kmem_cache_free(cor_nb_slab
, nb
);
1142 atomic_dec(&cor_num_neighs
);
1145 spin_unlock_bh(&cor_neighbor_list_lock
);
1148 struct cor_conn
*cor_get_conn(struct cor_neighbor
*nb
, __u32 conn_id
)
1150 unsigned long iflags
;
1152 struct rb_node
* n
= 0;
1153 struct cor_conn
*ret
= 0;
1155 spin_lock_irqsave(&(nb
->connid_lock
), iflags
);
1157 n
= nb
->connid_rb
.rb_node
;
1159 while (likely(n
!= 0) && ret
== 0) {
1160 struct cor_conn
*src_in_o
= container_of(n
, struct cor_conn
,
1163 BUG_ON(src_in_o
->sourcetype
!= SOURCE_IN
);
1165 if (conn_id
< src_in_o
->src
.in
.conn_id
)
1167 else if (conn_id
> src_in_o
->src
.in
.conn_id
)
1174 cor_conn_kref_get(ret
, "stack");
1176 spin_unlock_irqrestore(&(nb
->connid_lock
), iflags
);
1181 int cor_insert_connid(struct cor_neighbor
*nb
, struct cor_conn
*src_in_ll
)
1185 unsigned long iflags
;
1187 __u32 conn_id
= src_in_ll
->src
.in
.conn_id
;
1189 struct rb_root
*root
;
1191 struct rb_node
*parent
= 0;
1193 BUG_ON(src_in_ll
->sourcetype
!= SOURCE_IN
);
1195 spin_lock_irqsave(&(nb
->connid_lock
), iflags
);
1197 root
= &(nb
->connid_rb
);
1198 p
= &(root
->rb_node
);
1201 struct cor_conn
*src_in_o
= container_of(*p
, struct cor_conn
,
1204 BUG_ON(src_in_o
->sourcetype
!= SOURCE_IN
);
1207 if (unlikely(conn_id
== src_in_o
->src
.in
.conn_id
)) {
1209 } else if (conn_id
< src_in_o
->src
.in
.conn_id
) {
1211 } else if (conn_id
> src_in_o
->src
.in
.conn_id
) {
1212 p
= &(*p
)->rb_right
;
1218 cor_conn_kref_get(src_in_ll
, "connid table");
1219 rb_link_node(&(src_in_ll
->src
.in
.rbn
), parent
, p
);
1220 rb_insert_color(&(src_in_ll
->src
.in
.rbn
), root
);
1227 spin_unlock_irqrestore(&(nb
->connid_lock
), iflags
);
1232 static struct cor_connid_reuse_item
*cor_get_connid_reuseitem(
1233 struct cor_neighbor
*nb
, __u32 conn_id
)
1235 unsigned long iflags
;
1237 struct rb_node
*n
= 0;
1238 struct cor_connid_reuse_item
*ret
= 0;
1240 spin_lock_irqsave(&(nb
->connid_reuse_lock
), iflags
);
1242 n
= nb
->connid_reuse_rb
.rb_node
;
1244 while (likely(n
!= 0) && ret
== 0) {
1245 struct cor_connid_reuse_item
*cir
= container_of(n
,
1246 struct cor_connid_reuse_item
, rbn
);
1248 BUG_ON(cir
->conn_id
== 0);
1250 if (conn_id
< cir
->conn_id
)
1252 else if (conn_id
> cir
->conn_id
)
1259 kref_get(&(ret
->ref
));
1261 spin_unlock_irqrestore(&(nb
->connid_reuse_lock
), iflags
);
1266 /* nb->connid_reuse_lock must be held by the caller */
1267 static void _cor_insert_connid_reuse_insertrb(struct cor_neighbor
*nb
,
1268 struct cor_connid_reuse_item
*ins
)
1270 struct rb_root
*root
;
1272 struct rb_node
*parent
= 0;
1274 BUG_ON(ins
->conn_id
== 0);
1276 root
= &(nb
->connid_reuse_rb
);
1277 p
= &(root
->rb_node
);
1280 struct cor_connid_reuse_item
*curr
= container_of(*p
,
1281 struct cor_connid_reuse_item
, rbn
);
1283 BUG_ON(curr
->conn_id
== 0);
1286 if (unlikely(ins
->conn_id
== curr
->conn_id
)) {
1288 } else if (ins
->conn_id
< curr
->conn_id
) {
1290 } else if (ins
->conn_id
> curr
->conn_id
) {
1291 p
= &(*p
)->rb_right
;
1297 kref_get(&(ins
->ref
));
1298 rb_link_node(&(ins
->rbn
), parent
, p
);
1299 rb_insert_color(&(ins
->rbn
), root
);
1302 void cor_insert_connid_reuse(struct cor_neighbor
*nb
, __u32 conn_id
)
1304 unsigned long iflags
;
1306 struct cor_connid_reuse_item
*cir
= kmem_cache_alloc(
1307 cor_connid_reuse_slab
, GFP_ATOMIC
);
1309 if (unlikely(cir
== 0)) {
1310 BUILD_BUG_ON(CONNID_REUSE_RTTS
> 255);
1312 spin_lock_irqsave(&(nb
->connid_reuse_lock
), iflags
);
1313 nb
->connid_reuse_oom_countdown
= CONNID_REUSE_RTTS
;
1314 spin_unlock_irqrestore(&(nb
->connid_reuse_lock
), iflags
);
1319 memset(cir
, 0, sizeof(struct cor_connid_reuse_item
));
1321 kref_init(&(cir
->ref
));
1322 cir
->conn_id
= conn_id
;
1324 spin_lock_irqsave(&(nb
->connid_reuse_lock
), iflags
);
1326 cir
->pingcnt
= nb
->connid_reuse_pingcnt
;
1328 _cor_insert_connid_reuse_insertrb(nb
, cir
);
1329 list_add_tail(&(cir
->lh
), &(nb
->connid_reuse_list
));
1331 spin_unlock_irqrestore(&(nb
->connid_reuse_lock
), iflags
);
1334 static void cor_free_connid_reuse(struct kref
*ref
)
1336 struct cor_connid_reuse_item
*cir
= container_of(ref
,
1337 struct cor_connid_reuse_item
, ref
);
1339 kmem_cache_free(cor_connid_reuse_slab
, cir
);
1342 static void cor_delete_connid_reuse_items(struct cor_neighbor
*nb
)
1344 unsigned long iflags
;
1345 struct cor_connid_reuse_item
*cri
;
1347 spin_lock_irqsave(&(nb
->connid_reuse_lock
), iflags
);
1349 while (list_empty(&(nb
->connid_reuse_list
)) == 0) {
1350 cri
= container_of(nb
->connid_reuse_list
.next
,
1351 struct cor_connid_reuse_item
, lh
);
1353 rb_erase(&(cri
->rbn
), &(nb
->connid_reuse_rb
));
1354 kref_put(&(cri
->ref
), cor_kreffree_bug
);
1356 list_del(&(cri
->lh
));
1357 kref_put(&(cri
->ref
), cor_free_connid_reuse
);
1360 spin_unlock_irqrestore(&(nb
->connid_reuse_lock
), iflags
);
1363 static void cor_connid_used_pingsuccess(struct cor_neighbor
*nb
)
1365 unsigned long iflags
;
1366 struct cor_connid_reuse_item
*cri
;
1368 spin_lock_irqsave(&(nb
->connid_reuse_lock
), iflags
);
1370 nb
->connid_reuse_pingcnt
++;
1371 while (list_empty(&(nb
->connid_reuse_list
)) == 0) {
1372 cri
= container_of(nb
->connid_reuse_list
.next
,
1373 struct cor_connid_reuse_item
, lh
);
1374 if ((cri
->pingcnt
+ CONNID_REUSE_RTTS
-
1375 nb
->connid_reuse_pingcnt
) < 32768)
1378 rb_erase(&(cri
->rbn
), &(nb
->connid_reuse_rb
));
1379 kref_put(&(cri
->ref
), cor_kreffree_bug
);
1381 list_del(&(cri
->lh
));
1382 kref_put(&(cri
->ref
), cor_free_connid_reuse
);
1385 if (unlikely(nb
->connid_reuse_oom_countdown
!= 0))
1386 nb
->connid_reuse_oom_countdown
--;
1389 spin_unlock_irqrestore(&(nb
->connid_reuse_lock
), iflags
);
1392 static int cor_connid_used(struct cor_neighbor
*nb
, __u32 conn_id
)
1394 struct cor_conn
*cn
;
1395 struct cor_connid_reuse_item
*cir
;
1397 cn
= cor_get_conn(nb
, conn_id
);
1398 if (unlikely(cn
!= 0)) {
1399 cor_conn_kref_put(cn
, "stack");
1403 cir
= cor_get_connid_reuseitem(nb
, conn_id
);
1404 if (unlikely(cir
!= 0)) {
1405 kref_put(&(cir
->ref
), cor_free_connid_reuse
);
1412 int cor_connid_alloc(struct cor_neighbor
*nb
, struct cor_conn
*src_in_ll
)
1414 unsigned long iflags
;
1415 struct cor_conn
*trgt_out_ll
= cor_get_conn_reversedir(src_in_ll
);
1419 BUG_ON(src_in_ll
->sourcetype
!= SOURCE_IN
);
1420 BUG_ON(trgt_out_ll
->targettype
!= TARGET_OUT
);
1422 spin_lock_irqsave(&cor_connid_gen
, iflags
);
1423 for (i
=0;i
<16;i
++) {
1425 get_random_bytes((char *) &conn_id
, sizeof(conn_id
));
1426 conn_id
= (conn_id
& ~(1 << 31));
1428 if (unlikely(conn_id
== 0))
1431 if (unlikely(cor_connid_used(nb
, conn_id
)))
1436 spin_unlock_irqrestore(&cor_connid_gen
, iflags
);
1441 spin_lock_irqsave(&(nb
->connid_reuse_lock
), iflags
);
1442 if (unlikely(nb
->connid_reuse_oom_countdown
!= 0)) {
1443 spin_unlock_irqrestore(&(nb
->connid_reuse_lock
), iflags
);
1446 spin_unlock_irqrestore(&(nb
->connid_reuse_lock
), iflags
);
1449 src_in_ll
->src
.in
.conn_id
= conn_id
;
1450 trgt_out_ll
->trgt
.out
.conn_id
= cor_get_connid_reverse(conn_id
);
1451 if (unlikely(cor_insert_connid(nb
, src_in_ll
) != 0)) {
1454 spin_unlock_irqrestore(&cor_connid_gen
, iflags
);
1458 int __init
cor_neighbor_init(void)
1460 cor_nb_slab
= kmem_cache_create("cor_neighbor",
1461 sizeof(struct cor_neighbor
), 8, 0, 0);
1462 if (unlikely(cor_nb_slab
== 0))
1465 atomic_set(&cor_num_neighs
, 0);
1470 void __exit
cor_neighbor_exit2(void)
1472 BUG_ON(atomic_read(&cor_num_neighs
) != 0);
1474 kmem_cache_destroy(cor_nb_slab
);
1478 MODULE_LICENSE("GPL");