2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/delay.h>
20 static DEFINE_SPINLOCK(cor_neighbor_list_lock
);
21 static LIST_HEAD(cor_nb_list
);
22 static struct kmem_cache
*cor_nb_slab
;
23 atomic_t cor_num_neighs
;
25 static DEFINE_SPINLOCK(cor_connid_gen
);
28 void cor_neighbor_free(struct kref
*ref
)
30 struct cor_neighbor
*nb
= container_of(ref
, struct cor_neighbor
, ref
);
32 WARN_ONCE(list_empty(&nb
->cmsg_queue_pong
) == 0,
33 "cor_neighbor_free(): nb->cmsg_queue_pong is not empty");
34 WARN_ONCE(list_empty(&nb
->cmsg_queue_ack_fast
) == 0,
35 "cor_neighbor_free(): nb->cmsg_queue_ack_fast is not empty");
36 WARN_ONCE(list_empty(&nb
->cmsg_queue_ack_slow
) == 0,
37 "cor_neighbor_free(): nb->cmsg_queue_ack_slow is not empty");
38 WARN_ONCE(list_empty(&nb
->cmsg_queue_ackconn_urgent
) == 0,
39 "cor_neighbor_free(): nb->cmsg_queue_ackconn_urgent is not empty");
40 WARN_ONCE(list_empty(&nb
->cmsg_queue_ackconn_lowlat
) == 0,
41 "cor_neighbor_free(): nb->cmsg_queue_ackconn_lowlat is not empty");
42 WARN_ONCE(list_empty(&nb
->cmsg_queue_ackconn_highlat
) == 0,
43 "cor_neighbor_free(): nb->cmsg_queue_ackconn_highlat is not empty");
44 WARN_ONCE(list_empty(&nb
->cmsg_queue_conndata_lowlat
) == 0,
45 "cor_neighbor_free(): nb->cmsg_queue_conndata_lowlat is not empty");
46 WARN_ONCE(list_empty(&nb
->cmsg_queue_conndata_highlat
) == 0,
47 "cor_neighbor_free(): nb->cmsg_queue_conndata_highlat is not empty");
48 WARN_ONCE(list_empty(&nb
->cmsg_queue_other
) == 0,
49 "cor_neighbor_free(): nb->cmsg_queue_other is not empty");
50 WARN_ONCE(nb
->pending_conn_resets_rb
.rb_node
!= 0,
51 "cor_neighbor_free(): nb->pending_conn_resets_rb is not empty");
52 WARN_ONCE(nb
->rb_kp
.in_queue
!= RB_INQUEUE_FALSE
,
53 "cor_neighbor_free(): nb->rb_kp.in_queue is not RB_INQUEUE_FALSE");
54 WARN_ONCE(nb
->rb_cr
.in_queue
!= RB_INQUEUE_FALSE
,
55 "cor_neighbor_free(): nb->rb_cr.in_queue is not RB_INQUEUE_FALSE");
56 WARN_ONCE(nb
->rb
.in_queue
!= RB_INQUEUE_FALSE
,
57 "cor_neighbor_free(): nb->rb.in_queue is not RB_INQUEUE_FALSE");
58 WARN_ONCE(list_empty(&nb
->conns_waiting
.lh
) == 0,
59 "cor_neighbor_free(): nb->conns_waiting.lh is not empty");
60 WARN_ONCE(list_empty(&nb
->conns_waiting
.lh_nextpass
) == 0,
61 "cor_neighbor_free(): nb->conns_waiting.lh_nextpass is not empty");
62 WARN_ONCE(nb
->str_timer_pending
!= 0,
63 "cor_neighbor_free(): nb->str_timer_pending is not 0");
64 WARN_ONCE(nb
->connid_rb
.rb_node
!= 0,
65 "cor _neighbor_free(): nb->connid_rb is not empty");
66 WARN_ONCE(nb
->connid_reuse_rb
.rb_node
!= 0,
67 "cor_neighbor_free(): nb->connid_reuse_rb is not empty");
68 WARN_ONCE(list_empty(&nb
->connid_reuse_list
) == 0,
69 "cor_neighbor_free(): nb->connid_reuse_list is not empty");
70 WARN_ONCE(nb
->kp_retransmits_rb
.rb_node
!= 0,
71 "cor_neighbor_free(): nb->kp_retransmits_rb is not empty");
72 WARN_ONCE(list_empty(&nb
->snd_conn_idle_list
) == 0,
73 "cor_neighbor_free(): nb->snd_conn_idle_list is not empty");
74 WARN_ONCE(list_empty(&nb
->snd_conn_busy_list
) == 0,
75 "cor_neighbor_free(): nb->snd_conn_busy_list is not empty");
76 WARN_ONCE(list_empty(&nb
->retrans_fast_list
) == 0,
77 "cor_neighbor_free(): nb->retrans_fast_list is not empty");
78 WARN_ONCE(list_empty(&nb
->retrans_slow_list
) == 0,
79 "cor_neighbor_free(): nb->retrans_slow_list is not empty");
80 WARN_ONCE(list_empty(&nb
->retrans_conn_lowlatency_list
) == 0,
81 "cor_neighbor_free(): nb->retrans_conn_lowlatency_list is not empty");
82 WARN_ONCE(list_empty(&nb
->retrans_conn_highlatency_list
) == 0,
83 "cor_neighbor_free(): nb->retrans_conn_highlatency_list is not empty");
85 /* printk(KERN_ERR "neighbor free\n"); */
86 BUG_ON(nb
->nb_list
.next
!= LIST_POISON1
);
87 BUG_ON(nb
->nb_list
.prev
!= LIST_POISON2
);
92 kref_put(&nb
->queue
->ref
, cor_free_qos
);
94 kmem_cache_free(cor_nb_slab
, nb
);
95 atomic_dec(&cor_num_neighs
);
98 static void cor_stall_timer(struct work_struct
*work
);
100 static void _cor_reset_neighbor(struct work_struct
*work
);
102 static struct cor_neighbor
*cor_alloc_neighbor(gfp_t allocflags
)
104 struct cor_neighbor
*nb
;
107 if (atomic_inc_return(&cor_num_neighs
) >= MAX_NEIGHBORS
) {
108 atomic_dec(&cor_num_neighs
);
112 nb
= kmem_cache_alloc(cor_nb_slab
, allocflags
);
113 if (unlikely(nb
== 0))
116 memset(nb
, 0, sizeof(struct cor_neighbor
));
119 atomic_set(&nb
->sessionid_rcv_needed
, 1);
120 atomic_set(&nb
->sessionid_snd_needed
, 1);
121 timer_setup(&nb
->cmsg_timer
, cor_controlmsg_timerfunc
, 0);
122 spin_lock_init(&nb
->cmsg_lock
);
123 INIT_LIST_HEAD(&nb
->cmsg_queue_pong
);
124 INIT_LIST_HEAD(&nb
->cmsg_queue_ack_fast
);
125 INIT_LIST_HEAD(&nb
->cmsg_queue_ack_slow
);
126 INIT_LIST_HEAD(&nb
->cmsg_queue_ackconn_urgent
);
127 INIT_LIST_HEAD(&nb
->cmsg_queue_ackconn_lowlat
);
128 INIT_LIST_HEAD(&nb
->cmsg_queue_ackconn_highlat
);
129 INIT_LIST_HEAD(&nb
->cmsg_queue_conndata_lowlat
);
130 INIT_LIST_HEAD(&nb
->cmsg_queue_conndata_highlat
);
131 INIT_LIST_HEAD(&nb
->cmsg_queue_other
);
132 atomic_set(&nb
->cmsg_pongs_retrans_cnt
, 0);
133 atomic_set(&nb
->cmsg_othercnt
, 0);
134 atomic_set(&nb
->cmsg_bulk_readds
, 0);
135 atomic_set(&nb
->cmsg_delay_conndata
, 0);
136 atomic_set(&nb
->rcvmtu_sendneeded
, 1);
137 nb
->last_ping_time
= jiffies
;
138 atomic_set(&nb
->latency_retrans_us
, PING_GUESSLATENCY_MS
*1000);
139 atomic_set(&nb
->latency_advertised_us
, PING_GUESSLATENCY_MS
*1000);
140 atomic_set(&nb
->max_remote_ack_fast_delay_us
, 1000000);
141 atomic_set(&nb
->max_remote_ack_slow_delay_us
, 1000000);
142 atomic_set(&nb
->max_remote_ackconn_lowlat_delay_us
, 1000000);
143 atomic_set(&nb
->max_remote_ackconn_highlat_delay_us
, 1000000);
144 atomic_set(&nb
->max_remote_pong_delay_us
, 1000000);
145 atomic_set(&nb
->remote_rcvmtu
, 128);
146 spin_lock_init(&nb
->conns_waiting
.lock
);
147 INIT_LIST_HEAD(&nb
->conns_waiting
.lh
);
148 INIT_LIST_HEAD(&nb
->conns_waiting
.lh_nextpass
);
149 spin_lock_init(&nb
->nbcongwin
.lock
);
150 atomic64_set(&nb
->nbcongwin
.data_intransit
, 0);
151 atomic64_set(&nb
->nbcongwin
.cwin
, 0);
152 spin_lock_init(&nb
->state_lock
);
153 nb
->state
= NEIGHBOR_STATE_INITIAL
;
154 nb
->state_time
.initial_state_since
= jiffies
;
155 INIT_DELAYED_WORK(&nb
->stalltimeout_timer
, cor_stall_timer
);
156 spin_lock_init(&nb
->connid_lock
);
157 spin_lock_init(&nb
->connid_reuse_lock
);
158 INIT_LIST_HEAD(&nb
->connid_reuse_list
);
159 get_random_bytes((char *) &seqno
, sizeof(seqno
));
160 nb
->kpacket_seqno
= seqno
;
161 atomic64_set(&nb
->priority_sum
, 0);
162 spin_lock_init(&nb
->conn_list_lock
);
163 INIT_LIST_HEAD(&nb
->snd_conn_idle_list
);
164 INIT_LIST_HEAD(&nb
->snd_conn_busy_list
);
165 spin_lock_init(&nb
->retrans_lock
);
166 INIT_LIST_HEAD(&nb
->retrans_fast_list
);
167 INIT_LIST_HEAD(&nb
->retrans_slow_list
);
168 spin_lock_init(&nb
->retrans_conn_lock
);
169 INIT_LIST_HEAD(&nb
->retrans_conn_lowlatency_list
);
170 INIT_LIST_HEAD(&nb
->retrans_conn_highlatency_list
);
171 INIT_WORK(&nb
->reset_neigh_work
, _cor_reset_neighbor
);
176 int cor_is_from_nb(struct sk_buff
*skb
, struct cor_neighbor
*nb
)
180 char source_hw
[MAX_ADDR_LEN
];
182 memset(source_hw
, 0, MAX_ADDR_LEN
);
183 if (skb
->dev
->header_ops
!= 0 &&
184 skb
->dev
->header_ops
->parse
!= 0)
185 skb
->dev
->header_ops
->parse(skb
, source_hw
);
187 rc
= (skb
->dev
== nb
->dev
&& memcmp(nb
->mac
, source_hw
,
192 struct cor_neighbor
*_cor_get_neigh_by_mac(struct net_device
*dev
,
195 struct list_head
*currlh
;
196 struct cor_neighbor
*ret
= 0;
198 spin_lock_bh(&cor_neighbor_list_lock
);
200 currlh
= cor_nb_list
.next
;
201 while (currlh
!= &cor_nb_list
) {
202 struct cor_neighbor
*curr
= container_of(currlh
,
203 struct cor_neighbor
, nb_list
);
205 BUG_ON(curr
->in_nb_list
== 0);
207 if (curr
->dev
== dev
&& memcmp(curr
->mac
, source_hw
,
208 MAX_ADDR_LEN
) == 0) {
210 cor_nb_kref_get(ret
, "stack");
214 currlh
= currlh
->next
;
217 spin_unlock_bh(&cor_neighbor_list_lock
);
219 if (ret
!= 0 && unlikely(cor_get_neigh_state(ret
) ==
220 NEIGHBOR_STATE_KILLED
)) {
221 cor_nb_kref_put(ret
, "stack");
228 struct cor_neighbor
*cor_get_neigh_by_mac(struct sk_buff
*skb
)
230 char source_hw
[MAX_ADDR_LEN
];
232 memset(source_hw
, 0, MAX_ADDR_LEN
);
233 if (skb
->dev
->header_ops
!= 0 &&
234 skb
->dev
->header_ops
->parse
!= 0)
235 skb
->dev
->header_ops
->parse(skb
, source_hw
);
237 return _cor_get_neigh_by_mac(skb
->dev
, source_hw
);
240 struct cor_neighbor
*cor_find_neigh(__be64 addr
)
242 struct list_head
*currlh
;
243 struct cor_neighbor
*ret
= 0;
245 spin_lock_bh(&cor_neighbor_list_lock
);
247 currlh
= cor_nb_list
.next
;
248 while (currlh
!= &cor_nb_list
) {
249 struct cor_neighbor
*curr
= container_of(currlh
,
250 struct cor_neighbor
, nb_list
);
252 BUG_ON(curr
->in_nb_list
== 0);
254 BUG_ON((curr
->has_addr
== 0) && (curr
->addr
!= 0));
255 if (curr
->has_addr
!= 0 && curr
->addr
== addr
) {
257 cor_nb_kref_get(ret
, "stack");
262 currlh
= currlh
->next
;
266 spin_unlock_bh(&cor_neighbor_list_lock
);
268 if (ret
!= 0 && unlikely(cor_get_neigh_state(ret
) ==
269 NEIGHBOR_STATE_KILLED
)) {
270 cor_nb_kref_put(ret
, "stack");
277 void cor_resend_rcvmtu(struct net_device
*dev
)
279 struct list_head
*currlh
;
281 spin_lock_bh(&cor_neighbor_list_lock
);
283 currlh
= cor_nb_list
.next
;
284 while (currlh
!= &cor_nb_list
) {
285 struct cor_neighbor
*nb
= container_of(currlh
,
286 struct cor_neighbor
, nb_list
);
288 unsigned long iflags
;
295 spin_lock_irqsave(&nb
->state_lock
, iflags
);
297 if (nb
->rcvmtu_allowed_countdown
!= 0)
298 nb
->rcvmtu_delayed_send_needed
= 1;
299 nb
->rcvmtu_allowed_countdown
= 3;
301 spin_unlock_irqrestore(&nb
->state_lock
, iflags
);
304 currlh
= currlh
->next
;
307 spin_unlock_bh(&cor_neighbor_list_lock
);
310 __u32
cor_generate_neigh_list(char *buf
, __u32 buflen
)
312 struct list_head
*currlh
;
316 __u32 buf_offset
= 4;
321 * The variable length header rowcount need to be generated after the
322 * data. This is done by reserving the maximum space they could take. If
323 * they end up being smaller, the data is moved so that there is no gap.
327 BUG_ON(buflen
< buf_offset
);
330 rc
= cor_encode_len(buf
+ buf_offset
, buflen
- buf_offset
, 2);
335 BUG_ON(buflen
< buf_offset
+ 2);
336 cor_put_u16(buf
+ buf_offset
, LIST_NEIGH_FIELD_ADDR
);
339 rc
= cor_encode_len(buf
+ buf_offset
, buflen
- buf_offset
, 8);
344 BUG_ON(buflen
< buf_offset
+ 2);
345 cor_put_u16(buf
+ buf_offset
, LIST_NEIGH_FIELD_LATENCY
);
348 rc
= cor_encode_len(buf
+ buf_offset
, buflen
- buf_offset
, 1);
352 spin_lock_bh(&cor_neighbor_list_lock
);
354 currlh
= cor_nb_list
.next
;
355 while (currlh
!= &cor_nb_list
) {
356 struct cor_neighbor
*curr
= container_of(currlh
,
357 struct cor_neighbor
, nb_list
);
360 BUG_ON(curr
->in_nb_list
== 0);
362 state
= cor_get_neigh_state(curr
);
363 if (state
!= NEIGHBOR_STATE_ACTIVE
)
366 BUG_ON((curr
->has_addr
== 0) && (curr
->addr
!= 0));
367 if (curr
->has_addr
== 0)
371 if (unlikely(buflen
< buf_offset
+ 8 + 1))
374 cor_put_be64(buf
+ buf_offset
, curr
->addr
);
377 buf
[buf_offset
] = cor_enc_log_64_11(atomic_read(
378 &curr
->latency_advertised_us
));
381 BUG_ON(buf_offset
> buflen
);
386 currlh
= currlh
->next
;
389 spin_unlock_bh(&cor_neighbor_list_lock
);
391 rc
= cor_encode_len(buf
, 4, cnt
);
396 memmove(buf
+ ((__u32
) rc
), buf
+4, buf_offset
);
398 return buf_offset
- 4 + ((__u32
) rc
);
401 static void cor_reset_all_conns(struct cor_neighbor
*nb
)
404 unsigned long iflags
;
405 struct cor_conn
*trgt_out
;
406 struct cor_conn
*src_in
;
407 struct cor_conn_bidir
*cnb
;
410 spin_lock_irqsave(&nb
->conn_list_lock
, iflags
);
412 if (!list_empty(&nb
->snd_conn_busy_list
)) {
413 trgt_out
= container_of(nb
->snd_conn_busy_list
.next
,
414 struct cor_conn
, trgt
.out
.nb_list
);
415 } else if (!list_empty(&nb
->snd_conn_idle_list
)) {
416 trgt_out
= container_of(nb
->snd_conn_idle_list
.next
,
417 struct cor_conn
, trgt
.out
.nb_list
);
419 spin_unlock_irqrestore(&nb
->conn_list_lock
, iflags
);
423 cor_conn_kref_get(trgt_out
, "stack");
425 spin_unlock_irqrestore(&nb
->conn_list_lock
, iflags
);
427 src_in
= cor_get_conn_reversedir(trgt_out
);
428 cnb
= cor_get_conn_bidir(trgt_out
);
430 spin_lock_bh(&cnb
->cli
.rcv_lock
);
431 spin_lock_bh(&cnb
->srv
.rcv_lock
);
433 if (unlikely(unlikely(src_in
->sourcetype
!= SOURCE_IN
) ||
434 unlikely(src_in
->src
.in
.nb
!= nb
))) {
439 rc
= cor_send_reset_conn(nb
, trgt_out
->trgt
.out
.conn_id
, 1);
441 if (unlikely(rc
!= 0))
444 if (trgt_out
->isreset
== 0)
445 trgt_out
->isreset
= 1;
448 spin_unlock_bh(&cnb
->srv
.rcv_lock
);
449 spin_unlock_bh(&cnb
->cli
.rcv_lock
);
452 cor_reset_conn(src_in
);
453 cor_conn_kref_put(src_in
, "stack");
455 cor_conn_kref_put(src_in
, "stack");
456 cor_nb_kref_get(nb
, "stalltimeout_timer");
457 schedule_delayed_work(&nb
->stalltimeout_timer
, HZ
);
463 static void cor_delete_connid_reuse_items(struct cor_neighbor
*nb
);
465 static void _cor_reset_neighbor(struct work_struct
*work
)
467 struct cor_neighbor
*nb
= container_of(work
, struct cor_neighbor
,
470 cor_reset_all_conns(nb
);
471 cor_delete_connid_reuse_items(nb
);
473 spin_lock_bh(&cor_neighbor_list_lock
);
474 if (nb
->in_nb_list
!= 0) {
475 list_del(&nb
->nb_list
);
477 cor_nb_kref_put_bug(nb
, "neigh_list");
479 spin_unlock_bh(&cor_neighbor_list_lock
);
481 cor_nb_kref_put(nb
, "reset_neigh_work");
484 static void cor_reset_neighbor(struct cor_neighbor
*nb
, int use_workqueue
)
486 unsigned long iflags
;
488 spin_lock_irqsave(&nb
->state_lock
, iflags
);
489 /* if (nb->state != NEIGHBOR_STATE_KILLED) {
490 printk(KERN_ERR "cor_reset_neighbor\n");
493 nb
->state
= NEIGHBOR_STATE_KILLED
;
494 spin_unlock_irqrestore(&nb
->state_lock
, iflags
);
498 schedule_work(&nb
->reset_neigh_work
);
499 cor_nb_kref_get(nb
, "reset_neigh_work");
503 cor_reset_all_conns(nb
);
504 cor_delete_connid_reuse_items(nb
);
506 spin_lock_bh(&cor_neighbor_list_lock
);
507 if (nb
->in_nb_list
!= 0) {
508 list_del(&nb
->nb_list
);
512 spin_unlock_bh(&cor_neighbor_list_lock
);
515 cor_nb_kref_put(nb
, "neigh_list");
519 void cor_reset_neighbors(struct net_device
*dev
)
521 struct list_head
*currlh
;
524 spin_lock_bh(&cor_neighbor_list_lock
);
526 currlh
= cor_nb_list
.next
;
527 while (currlh
!= &cor_nb_list
) {
528 unsigned long iflags
;
529 struct cor_neighbor
*currnb
= container_of(currlh
,
530 struct cor_neighbor
, nb_list
);
533 BUG_ON(currnb
->in_nb_list
== 0);
535 if (dev
!= 0 && currnb
->dev
!= dev
)
538 spin_lock_irqsave(&currnb
->state_lock
, iflags
);
539 state
= currnb
->state
;
540 spin_unlock_irqrestore(&currnb
->state_lock
, iflags
);
542 if (state
!= NEIGHBOR_STATE_KILLED
) {
543 spin_unlock_bh(&cor_neighbor_list_lock
);
544 cor_reset_neighbor(currnb
, 0);
549 currlh
= currlh
->next
;
552 spin_unlock_bh(&cor_neighbor_list_lock
);
555 static void cor_stall_timer(struct work_struct
*work
)
557 struct cor_neighbor
*nb
= container_of(to_delayed_work(work
),
558 struct cor_neighbor
, stalltimeout_timer
);
563 unsigned long iflags
;
565 spin_lock_irqsave(&nb
->state_lock
, iflags
);
567 spin_unlock_irqrestore(&nb
->state_lock
, iflags
);
569 if (nbstate
== NEIGHBOR_STATE_STALLED
) {
570 stall_time_ms
= jiffies_to_msecs(jiffies
-
571 nb
->state_time
.last_roundtrip
);
573 if (stall_time_ms
< NB_KILL_TIME_MS
) {
574 schedule_delayed_work(&nb
->stalltimeout_timer
,
575 msecs_to_jiffies(NB_KILL_TIME_MS
-
580 cor_reset_neighbor(nb
, 1);
583 nb
->str_timer_pending
= 0;
584 cor_nb_kref_put(nb
, "stalltimeout_timer");
587 int cor_get_neigh_state(struct cor_neighbor
*nb
)
590 unsigned long iflags
;
595 spin_lock_irqsave(&nb
->state_lock
, iflags
);
597 stall_time_ms
= jiffies_to_msecs(jiffies
-
598 nb
->state_time
.last_roundtrip
);
600 WARN_ONCE(likely(nb
->state
== NEIGHBOR_STATE_ACTIVE
) && unlikely(
601 jiffies_to_msecs(jiffies
- nb
->last_ping_time
) >
602 PING_FORCETIME_ACTIVEIDLE_MS
* 4) &&
603 nb
->ping_intransit
== 0,
604 "We have stopped sending pings to a neighbor!?");
606 if (likely(nb
->state
== NEIGHBOR_STATE_ACTIVE
) &&
607 unlikely(stall_time_ms
> NB_STALL_TIME_MS
) && (
608 nb
->ping_intransit
>= NB_STALL_MINPINGS
||
609 nb
->ping_intransit
>= PING_COOKIES_PER_NEIGH
)) {
610 nb
->state
= NEIGHBOR_STATE_STALLED
;
611 nb
->ping_success
= 0;
612 if (nb
->str_timer_pending
== 0) {
613 nb
->str_timer_pending
= 1;
614 cor_nb_kref_get(nb
, "stalltimeout_timer");
616 schedule_delayed_work(&nb
->stalltimeout_timer
,
617 msecs_to_jiffies(NB_KILL_TIME_MS
-
621 /* printk(KERN_ERR "changed to stalled\n"); */
622 BUG_ON(nb
->ping_intransit
> PING_COOKIES_PER_NEIGH
);
623 } else if (unlikely(nb
->state
== NEIGHBOR_STATE_INITIAL
) &&
624 time_after(jiffies
, nb
->state_time
.initial_state_since
+
625 INITIAL_TIME_LIMIT_SEC
* HZ
)) {
626 spin_unlock_irqrestore(&nb
->state_lock
, iflags
);
627 cor_reset_neighbor(nb
, 1);
628 return NEIGHBOR_STATE_KILLED
;
633 spin_unlock_irqrestore(&nb
->state_lock
, iflags
);
638 static struct cor_ping_cookie
*cor_find_cookie(struct cor_neighbor
*nb
,
643 for (i
= 0; i
< PING_COOKIES_PER_NEIGH
; i
++) {
644 if (nb
->cookies
[i
].cookie
== cookie
)
645 return &nb
->cookies
[i
];
650 static void cor_reset_cookie(struct cor_neighbor
*nb
, struct cor_ping_cookie
*c
)
655 if (nb
->cookie_unsent
!= c
->cookie
)
656 nb
->ping_intransit
--;
661 static __u32
sqrt(__u64 x
)
666 if (unlikely(x
<= 1))
669 for (i
= 0; i
< 20; i
++) {
670 y
= y
/2 + div64_u64(x
/2, y
);
671 if (unlikely(y
== 0))
675 if (unlikely(y
> U32_MAX
))
681 static __u32
cor_calc_newlatency(struct cor_neighbor
*nb_statelocked
,
682 __u32 oldlatency_us
, __s64 newlatency_ns
)
684 __s64 oldlatency
= oldlatency_us
* 1000LL;
687 if (unlikely(unlikely(nb_statelocked
->state
== NEIGHBOR_STATE_INITIAL
)&&
688 nb_statelocked
->ping_success
< 16))
689 newlatency
= div64_s64(
690 oldlatency
* nb_statelocked
->ping_success
+
692 nb_statelocked
->ping_success
+ 1);
694 newlatency
= (oldlatency
* 15 + newlatency_ns
) / 16;
696 newlatency
= div_s64(newlatency
+ 500, 1000);
698 if (unlikely(newlatency
< 0))
700 if (unlikely(newlatency
> U32_MAX
))
701 newlatency
= U32_MAX
;
703 return (__u32
) newlatency
;
706 static void cor_update_nb_latency(struct cor_neighbor
*nb_statelocked
,
707 struct cor_ping_cookie
*c
, __u32 respdelay
)
709 ktime_t now
= ktime_get();
711 __s64 pinglatency_retrans_ns
= ktime_to_ns(now
) -
712 ktime_to_ns(c
->time_sent
) - respdelay
* 1000LL;
713 __s64 pinglatency_advertised_ns
= ktime_to_ns(now
) -
714 ktime_to_ns(c
->time_created
) - respdelay
* 1000LL;
716 __u32 oldlatency_retrans_us
=
717 atomic_read(&nb_statelocked
->latency_retrans_us
);
719 __u32 newlatency_retrans_us
= cor_calc_newlatency(nb_statelocked
,
720 oldlatency_retrans_us
, pinglatency_retrans_ns
);
722 atomic_set(&nb_statelocked
->latency_retrans_us
, newlatency_retrans_us
);
724 if (unlikely(unlikely(nb_statelocked
->state
== NEIGHBOR_STATE_INITIAL
)&&
725 nb_statelocked
->ping_success
< 16)) {
726 nb_statelocked
->latency_variance_retrans_us
=
727 ((__u64
) newlatency_retrans_us
) *
728 newlatency_retrans_us
;
729 atomic_set(&nb_statelocked
->latency_stddev_retrans_us
, sqrt(
730 nb_statelocked
->latency_variance_retrans_us
));
731 } else if (pinglatency_retrans_ns
> oldlatency_retrans_us
*
733 __s64 newdiff
= div_s64(pinglatency_retrans_ns
-
734 oldlatency_retrans_us
* ((__s64
) 1000), 1000);
735 __u32 newdiff32
= (__u32
) (unlikely(newdiff
>= U32_MAX
) ?
737 __u64 newvar
= ((__u64
) newdiff32
) * newdiff32
;
739 __u64 oldval
= nb_statelocked
->latency_variance_retrans_us
;
741 if (unlikely(unlikely(newvar
> (1LL << 55)) || unlikely(
742 oldval
> (1LL << 55)))) {
743 nb_statelocked
->latency_variance_retrans_us
=
744 (oldval
/ 16) * 15 + newvar
/16;
746 nb_statelocked
->latency_variance_retrans_us
=
747 (oldval
* 15 + newvar
) / 16;
750 atomic_set(&nb_statelocked
->latency_stddev_retrans_us
, sqrt(
751 nb_statelocked
->latency_variance_retrans_us
));
754 atomic_set(&nb_statelocked
->latency_advertised_us
,
755 cor_calc_newlatency(nb_statelocked
,
756 atomic_read(&nb_statelocked
->latency_advertised_us
),
757 pinglatency_advertised_ns
));
759 nb_statelocked
->last_roundtrip_end
= now
;
762 static void cor_connid_used_pingsuccess(struct cor_neighbor
*nb
);
764 void cor_ping_resp(struct cor_neighbor
*nb
, __u32 cookie
, __u32 respdelay
)
766 unsigned long iflags
;
768 struct cor_ping_cookie
*c
;
770 int stalledresume
= 0;
772 int call_connidreuse
= 0;
773 int call_send_rcvmtu
= 0;
775 if (unlikely(cookie
== 0))
778 spin_lock_irqsave(&nb
->state_lock
, iflags
);
780 c
= cor_find_cookie(nb
, cookie
);
782 if (unlikely(c
== 0))
785 atomic_set(&nb
->sessionid_snd_needed
, 0);
787 call_connidreuse
= ktime_before_eq(nb
->last_roundtrip_end
,
790 cor_update_nb_latency(nb
, c
, respdelay
);
794 cor_reset_cookie(nb
, c
);
796 for (i
= 0; i
< PING_COOKIES_PER_NEIGH
; i
++) {
797 if (nb
->cookies
[i
].cookie
!= 0 && ktime_before(
798 nb
->cookies
[i
].time_created
, c
->time_created
)) {
799 nb
->cookies
[i
].pongs
++;
800 if (nb
->cookies
[i
].pongs
>= PING_PONGLIMIT
) {
801 cor_reset_cookie(nb
, &nb
->cookies
[i
]);
806 if (unlikely(nb
->state
== NEIGHBOR_STATE_INITIAL
||
807 nb
->state
== NEIGHBOR_STATE_STALLED
)) {
808 call_connidreuse
= 0;
810 if ((nb
->state
== NEIGHBOR_STATE_INITIAL
&&
811 nb
->ping_success
>= PING_SUCCESS_CNT_INIT
) || (
812 nb
->state
== NEIGHBOR_STATE_STALLED
&&
813 nb
->ping_success
>= PING_SUCCESS_CNT_STALLED
)) {
814 stalledresume
= (nb
->state
== NEIGHBOR_STATE_STALLED
);
815 nb
->state
= NEIGHBOR_STATE_ACTIVE
;
816 /* printk(KERN_ERR "changed to active\n"); */
820 if (likely(nb
->state
== NEIGHBOR_STATE_ACTIVE
) ||
821 nb
->state
== NEIGHBOR_STATE_STALLED
)
822 nb
->state_time
.last_roundtrip
= c
->jiffies_sent
;
824 if (c
== &nb
->cookies
[0] &&
825 unlikely(nb
->rcvmtu_allowed_countdown
!= 0)) {
826 nb
->rcvmtu_allowed_countdown
--;
828 if (unlikely(nb
->rcvmtu_allowed_countdown
== 0 &&
829 nb
->rcvmtu_delayed_send_needed
!= 0)) {
830 nb
->rcvmtu_allowed_countdown
= 3;
831 nb
->rcvmtu_delayed_send_needed
= 0;
832 call_send_rcvmtu
= 1;
837 spin_unlock_irqrestore(&nb
->state_lock
, iflags
);
839 if (call_connidreuse
)
840 cor_connid_used_pingsuccess(nb
);
842 if (unlikely(call_send_rcvmtu
))
845 if (unlikely(stalledresume
)) {
846 spin_lock_bh(&nb
->retrans_conn_lock
);
847 cor_reschedule_conn_retrans_timer(nb
);
848 spin_unlock_bh(&nb
->retrans_conn_lock
);
850 cor_qos_enqueue(nb
->queue
, &nb
->rb
, 0, ns_to_ktime(0),
851 QOS_CALLER_NEIGHBOR
, 1);
855 __u32
cor_add_ping_req(struct cor_neighbor
*nb
, unsigned long *last_ping_time
)
857 unsigned long iflags
;
858 struct cor_ping_cookie
*c
;
863 ktime_t now
= ktime_get();
865 spin_lock_irqsave(&nb
->state_lock
, iflags
);
867 if (nb
->cookie_unsent
!= 0) {
868 c
= cor_find_cookie(nb
, nb
->cookie_unsent
);
872 nb
->cookie_unsent
= 0;
875 c
= cor_find_cookie(nb
, 0);
879 get_random_bytes((char *) &i
, sizeof(i
));
880 i
= (i
% PING_COOKIES_PER_NEIGH
);
882 cor_reset_cookie(nb
, c
);
886 if (unlikely(nb
->lastcookie
== 0))
888 c
->cookie
= nb
->lastcookie
;
889 c
->time_created
= now
;
894 c
->jiffies_sent
= jiffies
;
897 nb
->ping_intransit
++;
899 *last_ping_time
= nb
->last_ping_time
;
900 nb
->last_ping_time
= c
->jiffies_sent
;
902 spin_unlock_irqrestore(&nb
->state_lock
, iflags
);
909 void cor_ping_sent(struct cor_neighbor
*nb
, __u32 cookie
)
911 unsigned long iflags
;
915 spin_lock_irqsave(&nb
->state_lock
, iflags
);
917 if (nb
->cookie_unsent
== cookie
)
918 nb
->cookie_unsent
= 0;
920 spin_unlock_irqrestore(&nb
->state_lock
, iflags
);
923 void cor_unadd_ping_req(struct cor_neighbor
*nb
, __u32 cookie
,
924 unsigned long last_ping_time
, int congested
)
926 unsigned long iflags
;
928 struct cor_ping_cookie
*c
;
932 spin_lock_irqsave(&nb
->state_lock
, iflags
);
935 BUG_ON(nb
->cookie_unsent
!= 0 && nb
->cookie_unsent
!= cookie
);
936 nb
->cookie_unsent
= cookie
;
939 c
= cor_find_cookie(nb
, cookie
);
940 if (likely(c
!= 0)) {
943 nb
->ping_intransit
--;
946 nb
->last_ping_time
= last_ping_time
;
948 spin_unlock_irqrestore(&nb
->state_lock
, iflags
);
951 static int cor_get_ping_forcetime_ms(struct cor_neighbor
*nb
)
953 unsigned long iflags
;
957 if (unlikely(cor_get_neigh_state(nb
) != NEIGHBOR_STATE_ACTIVE
))
958 return PING_FORCETIME_MS
;
960 spin_lock_irqsave(&nb
->state_lock
, iflags
);
961 fast
= ((nb
->ping_success
< PING_ACTIVE_FASTINITIAL_COUNT
) ||
962 (nb
->ping_intransit
> 0));
963 if (unlikely(nb
->rcvmtu_delayed_send_needed
!= 0)) {
964 BUG_ON(nb
->rcvmtu_allowed_countdown
== 0);
967 spin_unlock_irqrestore(&nb
->state_lock
, iflags
);
970 return PING_FORCETIME_ACTIVE_FAST_MS
;
972 spin_lock_irqsave(&nb
->conn_list_lock
, iflags
);
973 idle
= list_empty(&nb
->snd_conn_idle_list
) &&
974 list_empty(&nb
->snd_conn_busy_list
);
975 spin_unlock_irqrestore(&nb
->conn_list_lock
, iflags
);
978 return PING_FORCETIME_ACTIVEIDLE_MS
;
980 return PING_FORCETIME_ACTIVE_MS
;
983 static __u32
cor_get_ping_mindelay_ms(struct cor_neighbor
*nb_statelocked
)
985 __u32 latency_us
= ((__u32
) atomic_read(
986 &nb_statelocked
->latency_advertised_us
));
987 __u32 max_remote_pong_delay_us
= ((__u32
) atomic_read(
988 &nb_statelocked
->max_remote_pong_delay_us
));
991 if (latency_us
< PING_GUESSLATENCY_MS
* 1000)
992 latency_us
= PING_GUESSLATENCY_MS
* 1000;
994 if (unlikely(nb_statelocked
->state
!= NEIGHBOR_STATE_ACTIVE
))
995 mindelay_ms
= latency_us
/1000;
997 mindelay_ms
= ((latency_us
/2 +
998 max_remote_pong_delay_us
/2)/500);
1000 if (likely(nb_statelocked
->ping_intransit
< PING_COOKIES_THROTTLESTART
))
1003 mindelay_ms
= mindelay_ms
* (1 + 9 * (nb_statelocked
->ping_intransit
*
1004 nb_statelocked
->ping_intransit
/
1005 (PING_COOKIES_PER_NEIGH
* PING_COOKIES_PER_NEIGH
)));
1011 * Check whether we want to send a ping now:
1012 * 0... Do not send ping.
1013 * 1... Send ping now, but only if it can be merged with other messages. This
1014 * can happen way before the time requested by cor_get_next_ping_time().
1015 * 2... Send ping now, even if a packet has to be created just for the ping
1018 int cor_time_to_send_ping(struct cor_neighbor
*nb
)
1020 unsigned long iflags
;
1021 int rc
= TIMETOSENDPING_YES
;
1023 __u32 ms_since_last_ping
;
1025 __u32 forcetime
= cor_get_ping_forcetime_ms(nb
);
1028 spin_lock_irqsave(&nb
->state_lock
, iflags
);
1030 ms_since_last_ping
= jiffies_to_msecs(jiffies
- nb
->last_ping_time
);
1032 mindelay
= cor_get_ping_mindelay_ms(nb
);
1034 if (forcetime
< (mindelay
* 3))
1035 forcetime
= mindelay
* 3;
1036 else if (forcetime
> (mindelay
* 3))
1037 mindelay
= forcetime
/3;
1039 if (ms_since_last_ping
< mindelay
|| ms_since_last_ping
< (forcetime
/4))
1040 rc
= TIMETOSENDPING_NO
;
1041 else if (ms_since_last_ping
>= forcetime
)
1042 rc
= TIMETOSENDPING_FORCE
;
1044 spin_unlock_irqrestore(&nb
->state_lock
, iflags
);
1049 unsigned long cor_get_next_ping_time(struct cor_neighbor
*nb
)
1051 unsigned long iflags
;
1053 __u32 forcetime
= cor_get_ping_forcetime_ms(nb
);
1056 spin_lock_irqsave(&nb
->state_lock
, iflags
);
1057 mindelay
= cor_get_ping_mindelay_ms(nb
);
1058 spin_unlock_irqrestore(&nb
->state_lock
, iflags
);
1060 if (forcetime
< (mindelay
* 3))
1061 forcetime
= mindelay
* 3;
1063 return nb
->last_ping_time
+ msecs_to_jiffies(forcetime
);
1066 void cor_add_neighbor(struct cor_neighbor_discdata
*nb_dd
)
1068 struct cor_neighbor
*nb
;
1069 struct list_head
*currlh
;
1071 nb
= cor_alloc_neighbor(GFP_KERNEL
);
1072 if (unlikely(nb
== 0))
1075 nb
->queue
= cor_get_queue(nb_dd
->dev
);
1076 if (nb
->queue
== 0) {
1077 kmem_cache_free(cor_nb_slab
, nb
);
1078 atomic_dec(&cor_num_neighs
);
1082 dev_hold(nb_dd
->dev
);
1083 nb
->dev
= nb_dd
->dev
;
1085 memcpy(nb
->mac
, nb_dd
->mac
, MAX_ADDR_LEN
);
1087 nb
->has_addr
= nb_dd
->has_addr
;
1088 nb
->addr
= nb_dd
->addr
;
1090 nb_dd
->nb_allocated
= 1;
1092 spin_lock_bh(&cor_neighbor_list_lock
);
1094 BUG_ON((nb
->has_addr
== 0) && (nb
->addr
!= 0));
1096 if (cor_is_clientmode() && nb
->has_addr
== 0)
1097 goto already_present
;
1099 currlh
= cor_nb_list
.next
;
1100 while (currlh
!= &cor_nb_list
) {
1101 struct cor_neighbor
*curr
= container_of(currlh
,
1102 struct cor_neighbor
, nb_list
);
1104 BUG_ON((curr
->has_addr
== 0) && (curr
->addr
!= 0));
1106 if (curr
->dev
== nb
->dev
&&
1107 memcmp(curr
->mac
, nb
->mac
, MAX_ADDR_LEN
) == 0)
1108 goto already_present
;
1110 if (curr
->has_addr
!= 0 && curr
->addr
== nb
->addr
)
1111 goto already_present
;
1113 currlh
= currlh
->next
;
1116 /* printk(KERN_ERR "add_neigh\n"); */
1118 spin_lock_bh(&cor_local_addr_lock
);
1119 nb
->sessionid
= cor_local_addr_sessionid
^ nb_dd
->sessionid
;
1120 spin_unlock_bh(&cor_local_addr_lock
);
1122 timer_setup(&nb
->retrans_timer
, cor_retransmit_timerfunc
, 0);
1124 timer_setup(&nb
->retrans_conn_timer
, cor_retransmit_conn_timerfunc
, 0);
1126 spin_lock_bh(&nb
->cmsg_lock
);
1127 nb
->last_ping_time
= jiffies
;
1128 cor_schedule_controlmsg_timer(nb
);
1129 spin_unlock_bh(&nb
->cmsg_lock
);
1131 list_add_tail(&nb
->nb_list
, &cor_nb_list
);
1133 cor_nb_kref_get(nb
, "neigh_list");
1134 cor_nb_kref_put_bug(nb
, "alloc");
1138 kmem_cache_free(cor_nb_slab
, nb
);
1139 atomic_dec(&cor_num_neighs
);
1142 spin_unlock_bh(&cor_neighbor_list_lock
);
1145 struct cor_conn
*cor_get_conn(struct cor_neighbor
*nb
, __u32 conn_id
)
1147 unsigned long iflags
;
1149 struct rb_node
*n
= 0;
1150 struct cor_conn
*ret
= 0;
1152 spin_lock_irqsave(&nb
->connid_lock
, iflags
);
1154 n
= nb
->connid_rb
.rb_node
;
1156 while (likely(n
!= 0) && ret
== 0) {
1157 struct cor_conn
*src_in_o
= container_of(n
, struct cor_conn
,
1160 BUG_ON(src_in_o
->sourcetype
!= SOURCE_IN
);
1162 if (conn_id
< src_in_o
->src
.in
.conn_id
)
1164 else if (conn_id
> src_in_o
->src
.in
.conn_id
)
1171 cor_conn_kref_get(ret
, "stack");
1173 spin_unlock_irqrestore(&nb
->connid_lock
, iflags
);
1178 int cor_insert_connid(struct cor_neighbor
*nb
, struct cor_conn
*src_in_ll
)
1182 unsigned long iflags
;
1184 __u32 conn_id
= src_in_ll
->src
.in
.conn_id
;
1186 struct rb_root
*root
;
1188 struct rb_node
*parent
= 0;
1190 BUG_ON(src_in_ll
->sourcetype
!= SOURCE_IN
);
1192 spin_lock_irqsave(&nb
->connid_lock
, iflags
);
1194 root
= &nb
->connid_rb
;
1198 struct cor_conn
*src_in_o
= container_of(*p
, struct cor_conn
,
1201 BUG_ON(src_in_o
->sourcetype
!= SOURCE_IN
);
1204 if (unlikely(conn_id
== src_in_o
->src
.in
.conn_id
)) {
1206 } else if (conn_id
< src_in_o
->src
.in
.conn_id
) {
1208 } else if (conn_id
> src_in_o
->src
.in
.conn_id
) {
1209 p
= &(*p
)->rb_right
;
1213 cor_conn_kref_get(src_in_ll
, "connid table");
1214 rb_link_node(&src_in_ll
->src
.in
.rbn
, parent
, p
);
1215 rb_insert_color(&src_in_ll
->src
.in
.rbn
, root
);
1222 spin_unlock_irqrestore(&nb
->connid_lock
, iflags
);
1227 static struct cor_connid_reuse_item
*cor_get_connid_reuseitem(
1228 struct cor_neighbor
*nb
, __u32 conn_id
)
1230 unsigned long iflags
;
1232 struct rb_node
*n
= 0;
1233 struct cor_connid_reuse_item
*ret
= 0;
1235 spin_lock_irqsave(&nb
->connid_reuse_lock
, iflags
);
1237 n
= nb
->connid_reuse_rb
.rb_node
;
1239 while (likely(n
!= 0) && ret
== 0) {
1240 struct cor_connid_reuse_item
*cir
= container_of(n
,
1241 struct cor_connid_reuse_item
, rbn
);
1243 BUG_ON(cir
->conn_id
== 0);
1245 if (conn_id
< cir
->conn_id
)
1247 else if (conn_id
> cir
->conn_id
)
1254 kref_get(&ret
->ref
);
1256 spin_unlock_irqrestore(&nb
->connid_reuse_lock
, iflags
);
1261 /* nb->connid_reuse_lock must be held by the caller */
1262 static void _cor_insert_connid_reuse_insertrb(struct cor_neighbor
*nb
,
1263 struct cor_connid_reuse_item
*ins
)
1265 struct rb_root
*root
;
1267 struct rb_node
*parent
= 0;
1269 BUG_ON(ins
->conn_id
== 0);
1271 root
= &nb
->connid_reuse_rb
;
1275 struct cor_connid_reuse_item
*curr
= container_of(*p
,
1276 struct cor_connid_reuse_item
, rbn
);
1278 BUG_ON(curr
->conn_id
== 0);
1281 if (unlikely(ins
->conn_id
== curr
->conn_id
)) {
1283 } else if (ins
->conn_id
< curr
->conn_id
) {
1285 } else if (ins
->conn_id
> curr
->conn_id
) {
1286 p
= &(*p
)->rb_right
;
1290 kref_get(&ins
->ref
);
1291 rb_link_node(&ins
->rbn
, parent
, p
);
1292 rb_insert_color(&ins
->rbn
, root
);
1295 void cor_insert_connid_reuse(struct cor_neighbor
*nb
, __u32 conn_id
)
1297 unsigned long iflags
;
1299 struct cor_connid_reuse_item
*cir
= kmem_cache_alloc(
1300 cor_connid_reuse_slab
, GFP_ATOMIC
);
1302 if (unlikely(cir
== 0)) {
1303 BUILD_BUG_ON(CONNID_REUSE_RTTS
> 255);
1305 spin_lock_irqsave(&nb
->connid_reuse_lock
, iflags
);
1306 nb
->connid_reuse_oom_countdown
= CONNID_REUSE_RTTS
;
1307 spin_unlock_irqrestore(&nb
->connid_reuse_lock
, iflags
);
1312 memset(cir
, 0, sizeof(struct cor_connid_reuse_item
));
1314 kref_init(&cir
->ref
);
1315 cir
->conn_id
= conn_id
;
1317 spin_lock_irqsave(&nb
->connid_reuse_lock
, iflags
);
1319 cir
->pingcnt
= nb
->connid_reuse_pingcnt
;
1321 _cor_insert_connid_reuse_insertrb(nb
, cir
);
1322 list_add_tail(&cir
->lh
, &nb
->connid_reuse_list
);
1324 spin_unlock_irqrestore(&nb
->connid_reuse_lock
, iflags
);
1327 static void cor_free_connid_reuse(struct kref
*ref
)
1329 struct cor_connid_reuse_item
*cir
= container_of(ref
,
1330 struct cor_connid_reuse_item
, ref
);
1332 kmem_cache_free(cor_connid_reuse_slab
, cir
);
1335 static void cor_delete_connid_reuse_items(struct cor_neighbor
*nb
)
1337 unsigned long iflags
;
1338 struct cor_connid_reuse_item
*cri
;
1340 spin_lock_irqsave(&nb
->connid_reuse_lock
, iflags
);
1342 while (list_empty(&nb
->connid_reuse_list
) == 0) {
1343 cri
= container_of(nb
->connid_reuse_list
.next
,
1344 struct cor_connid_reuse_item
, lh
);
1346 rb_erase(&cri
->rbn
, &nb
->connid_reuse_rb
);
1347 kref_put(&cri
->ref
, cor_kreffree_bug
);
1350 kref_put(&cri
->ref
, cor_free_connid_reuse
);
1353 spin_unlock_irqrestore(&nb
->connid_reuse_lock
, iflags
);
1356 static void cor_connid_used_pingsuccess(struct cor_neighbor
*nb
)
1358 unsigned long iflags
;
1359 struct cor_connid_reuse_item
*cri
;
1361 spin_lock_irqsave(&nb
->connid_reuse_lock
, iflags
);
1363 nb
->connid_reuse_pingcnt
++;
1364 while (list_empty(&nb
->connid_reuse_list
) == 0) {
1365 cri
= container_of(nb
->connid_reuse_list
.next
,
1366 struct cor_connid_reuse_item
, lh
);
1367 if ((cri
->pingcnt
+ CONNID_REUSE_RTTS
-
1368 nb
->connid_reuse_pingcnt
) < 32768)
1371 rb_erase(&cri
->rbn
, &nb
->connid_reuse_rb
);
1372 kref_put(&cri
->ref
, cor_kreffree_bug
);
1375 kref_put(&cri
->ref
, cor_free_connid_reuse
);
1378 if (unlikely(nb
->connid_reuse_oom_countdown
!= 0))
1379 nb
->connid_reuse_oom_countdown
--;
1382 spin_unlock_irqrestore(&nb
->connid_reuse_lock
, iflags
);
1385 static int cor_connid_used(struct cor_neighbor
*nb
, __u32 conn_id
)
1387 struct cor_conn
*cn
;
1388 struct cor_connid_reuse_item
*cir
;
1390 cn
= cor_get_conn(nb
, conn_id
);
1391 if (unlikely(cn
!= 0)) {
1392 cor_conn_kref_put(cn
, "stack");
1396 cir
= cor_get_connid_reuseitem(nb
, conn_id
);
1397 if (unlikely(cir
!= 0)) {
1398 kref_put(&cir
->ref
, cor_free_connid_reuse
);
1405 int cor_connid_alloc(struct cor_neighbor
*nb
, struct cor_conn
*src_in_ll
)
1407 unsigned long iflags
;
1408 struct cor_conn
*trgt_out_ll
= cor_get_conn_reversedir(src_in_ll
);
1412 BUG_ON(src_in_ll
->sourcetype
!= SOURCE_IN
);
1413 BUG_ON(trgt_out_ll
->targettype
!= TARGET_OUT
);
1415 spin_lock_irqsave(&cor_connid_gen
, iflags
);
1416 for (i
= 0; i
< 16; i
++) {
1418 get_random_bytes((char *) &conn_id
, sizeof(conn_id
));
1419 conn_id
= (conn_id
& ~(1 << 31));
1421 if (unlikely(conn_id
== 0))
1424 if (unlikely(cor_connid_used(nb
, conn_id
)))
1429 spin_unlock_irqrestore(&cor_connid_gen
, iflags
);
1434 spin_lock_irqsave(&nb
->connid_reuse_lock
, iflags
);
1435 if (unlikely(nb
->connid_reuse_oom_countdown
!= 0)) {
1436 spin_unlock_irqrestore(&nb
->connid_reuse_lock
, iflags
);
1439 spin_unlock_irqrestore(&nb
->connid_reuse_lock
, iflags
);
1442 src_in_ll
->src
.in
.conn_id
= conn_id
;
1443 trgt_out_ll
->trgt
.out
.conn_id
= cor_get_connid_reverse(conn_id
);
1444 if (unlikely(cor_insert_connid(nb
, src_in_ll
) != 0)) {
1447 spin_unlock_irqrestore(&cor_connid_gen
, iflags
);
1451 int __init
cor_neighbor_init(void)
1453 cor_nb_slab
= kmem_cache_create("cor_neighbor",
1454 sizeof(struct cor_neighbor
), 8, 0, 0);
1455 if (unlikely(cor_nb_slab
== 0))
1458 atomic_set(&cor_num_neighs
, 0);
1463 void __exit
cor_neighbor_exit2(void)
1465 BUG_ON(atomic_read(&cor_num_neighs
) != 0);
1467 kmem_cache_destroy(cor_nb_slab
);
1471 MODULE_LICENSE("GPL");