checkpatch fixes
[cor.git] / net / cor / neigh.c
blob6a8cee4fa8199d5a7cdc6ca5f61d9a0bf50c22ee
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/delay.h>
17 #include "cor.h"
20 static DEFINE_SPINLOCK(cor_neighbor_list_lock);
21 static LIST_HEAD(cor_nb_list);
22 static struct kmem_cache *cor_nb_slab;
23 atomic_t cor_num_neighs;
25 static DEFINE_SPINLOCK(cor_connid_gen);
28 void cor_neighbor_free(struct kref *ref)
30 struct cor_neighbor *nb = container_of(ref, struct cor_neighbor, ref);
32 WARN_ONCE(list_empty(&nb->cmsg_queue_pong) == 0,
33 "cor_neighbor_free(): nb->cmsg_queue_pong is not empty");
34 WARN_ONCE(list_empty(&nb->cmsg_queue_ack_fast) == 0,
35 "cor_neighbor_free(): nb->cmsg_queue_ack_fast is not empty");
36 WARN_ONCE(list_empty(&nb->cmsg_queue_ack_slow) == 0,
37 "cor_neighbor_free(): nb->cmsg_queue_ack_slow is not empty");
38 WARN_ONCE(list_empty(&nb->cmsg_queue_ackconn_urgent) == 0,
39 "cor_neighbor_free(): nb->cmsg_queue_ackconn_urgent is not empty");
40 WARN_ONCE(list_empty(&nb->cmsg_queue_ackconn_lowlat) == 0,
41 "cor_neighbor_free(): nb->cmsg_queue_ackconn_lowlat is not empty");
42 WARN_ONCE(list_empty(&nb->cmsg_queue_ackconn_highlat) == 0,
43 "cor_neighbor_free(): nb->cmsg_queue_ackconn_highlat is not empty");
44 WARN_ONCE(list_empty(&nb->cmsg_queue_conndata_lowlat) == 0,
45 "cor_neighbor_free(): nb->cmsg_queue_conndata_lowlat is not empty");
46 WARN_ONCE(list_empty(&nb->cmsg_queue_conndata_highlat) == 0,
47 "cor_neighbor_free(): nb->cmsg_queue_conndata_highlat is not empty");
48 WARN_ONCE(list_empty(&nb->cmsg_queue_other) == 0,
49 "cor_neighbor_free(): nb->cmsg_queue_other is not empty");
50 WARN_ONCE(nb->pending_conn_resets_rb.rb_node != 0,
51 "cor_neighbor_free(): nb->pending_conn_resets_rb is not empty");
52 WARN_ONCE(nb->rb_kp.in_queue != RB_INQUEUE_FALSE,
53 "cor_neighbor_free(): nb->rb_kp.in_queue is not RB_INQUEUE_FALSE");
54 WARN_ONCE(nb->rb_cr.in_queue != RB_INQUEUE_FALSE,
55 "cor_neighbor_free(): nb->rb_cr.in_queue is not RB_INQUEUE_FALSE");
56 WARN_ONCE(nb->rb.in_queue != RB_INQUEUE_FALSE,
57 "cor_neighbor_free(): nb->rb.in_queue is not RB_INQUEUE_FALSE");
58 WARN_ONCE(list_empty(&nb->conns_waiting.lh) == 0,
59 "cor_neighbor_free(): nb->conns_waiting.lh is not empty");
60 WARN_ONCE(list_empty(&nb->conns_waiting.lh_nextpass) == 0,
61 "cor_neighbor_free(): nb->conns_waiting.lh_nextpass is not empty");
62 WARN_ONCE(nb->str_timer_pending != 0,
63 "cor_neighbor_free(): nb->str_timer_pending is not 0");
64 WARN_ONCE(nb->connid_rb.rb_node != 0,
65 "cor _neighbor_free(): nb->connid_rb is not empty");
66 WARN_ONCE(nb->connid_reuse_rb.rb_node != 0,
67 "cor_neighbor_free(): nb->connid_reuse_rb is not empty");
68 WARN_ONCE(list_empty(&nb->connid_reuse_list) == 0,
69 "cor_neighbor_free(): nb->connid_reuse_list is not empty");
70 WARN_ONCE(nb->kp_retransmits_rb.rb_node != 0,
71 "cor_neighbor_free(): nb->kp_retransmits_rb is not empty");
72 WARN_ONCE(list_empty(&nb->snd_conn_idle_list) == 0,
73 "cor_neighbor_free(): nb->snd_conn_idle_list is not empty");
74 WARN_ONCE(list_empty(&nb->snd_conn_busy_list) == 0,
75 "cor_neighbor_free(): nb->snd_conn_busy_list is not empty");
76 WARN_ONCE(list_empty(&nb->retrans_fast_list) == 0,
77 "cor_neighbor_free(): nb->retrans_fast_list is not empty");
78 WARN_ONCE(list_empty(&nb->retrans_slow_list) == 0,
79 "cor_neighbor_free(): nb->retrans_slow_list is not empty");
80 WARN_ONCE(list_empty(&nb->retrans_conn_lowlatency_list) == 0,
81 "cor_neighbor_free(): nb->retrans_conn_lowlatency_list is not empty");
82 WARN_ONCE(list_empty(&nb->retrans_conn_highlatency_list) == 0,
83 "cor_neighbor_free(): nb->retrans_conn_highlatency_list is not empty");
85 /* printk(KERN_ERR "neighbor free\n"); */
86 BUG_ON(nb->nb_list.next != LIST_POISON1);
87 BUG_ON(nb->nb_list.prev != LIST_POISON2);
88 if (nb->dev != 0)
89 dev_put(nb->dev);
90 nb->dev = 0;
91 if (nb->queue != 0)
92 kref_put(&nb->queue->ref, cor_free_qos);
93 nb->queue = 0;
94 kmem_cache_free(cor_nb_slab, nb);
95 atomic_dec(&cor_num_neighs);
98 static void cor_stall_timer(struct work_struct *work);
100 static void _cor_reset_neighbor(struct work_struct *work);
102 static struct cor_neighbor *cor_alloc_neighbor(gfp_t allocflags)
104 struct cor_neighbor *nb;
105 __u64 seqno;
107 if (atomic_inc_return(&cor_num_neighs) >= MAX_NEIGHBORS) {
108 atomic_dec(&cor_num_neighs);
109 return 0;
112 nb = kmem_cache_alloc(cor_nb_slab, allocflags);
113 if (unlikely(nb == 0))
114 return 0;
116 memset(nb, 0, sizeof(struct cor_neighbor));
118 kref_init(&nb->ref);
119 atomic_set(&nb->sessionid_rcv_needed, 1);
120 atomic_set(&nb->sessionid_snd_needed, 1);
121 timer_setup(&nb->cmsg_timer, cor_controlmsg_timerfunc, 0);
122 spin_lock_init(&nb->cmsg_lock);
123 INIT_LIST_HEAD(&nb->cmsg_queue_pong);
124 INIT_LIST_HEAD(&nb->cmsg_queue_ack_fast);
125 INIT_LIST_HEAD(&nb->cmsg_queue_ack_slow);
126 INIT_LIST_HEAD(&nb->cmsg_queue_ackconn_urgent);
127 INIT_LIST_HEAD(&nb->cmsg_queue_ackconn_lowlat);
128 INIT_LIST_HEAD(&nb->cmsg_queue_ackconn_highlat);
129 INIT_LIST_HEAD(&nb->cmsg_queue_conndata_lowlat);
130 INIT_LIST_HEAD(&nb->cmsg_queue_conndata_highlat);
131 INIT_LIST_HEAD(&nb->cmsg_queue_other);
132 atomic_set(&nb->cmsg_pongs_retrans_cnt, 0);
133 atomic_set(&nb->cmsg_othercnt, 0);
134 atomic_set(&nb->cmsg_bulk_readds, 0);
135 atomic_set(&nb->cmsg_delay_conndata, 0);
136 atomic_set(&nb->rcvmtu_sendneeded, 1);
137 nb->last_ping_time = jiffies;
138 atomic_set(&nb->latency_retrans_us, PING_GUESSLATENCY_MS*1000);
139 atomic_set(&nb->latency_advertised_us, PING_GUESSLATENCY_MS*1000);
140 atomic_set(&nb->max_remote_ack_fast_delay_us, 1000000);
141 atomic_set(&nb->max_remote_ack_slow_delay_us, 1000000);
142 atomic_set(&nb->max_remote_ackconn_lowlat_delay_us, 1000000);
143 atomic_set(&nb->max_remote_ackconn_highlat_delay_us, 1000000);
144 atomic_set(&nb->max_remote_pong_delay_us, 1000000);
145 atomic_set(&nb->remote_rcvmtu, 128);
146 spin_lock_init(&nb->conns_waiting.lock);
147 INIT_LIST_HEAD(&nb->conns_waiting.lh);
148 INIT_LIST_HEAD(&nb->conns_waiting.lh_nextpass);
149 spin_lock_init(&nb->nbcongwin.lock);
150 atomic64_set(&nb->nbcongwin.data_intransit, 0);
151 atomic64_set(&nb->nbcongwin.cwin, 0);
152 spin_lock_init(&nb->state_lock);
153 nb->state = NEIGHBOR_STATE_INITIAL;
154 nb->state_time.initial_state_since = jiffies;
155 INIT_DELAYED_WORK(&nb->stalltimeout_timer, cor_stall_timer);
156 spin_lock_init(&nb->connid_lock);
157 spin_lock_init(&nb->connid_reuse_lock);
158 INIT_LIST_HEAD(&nb->connid_reuse_list);
159 get_random_bytes((char *) &seqno, sizeof(seqno));
160 nb->kpacket_seqno = seqno;
161 atomic64_set(&nb->priority_sum, 0);
162 spin_lock_init(&nb->conn_list_lock);
163 INIT_LIST_HEAD(&nb->snd_conn_idle_list);
164 INIT_LIST_HEAD(&nb->snd_conn_busy_list);
165 spin_lock_init(&nb->retrans_lock);
166 INIT_LIST_HEAD(&nb->retrans_fast_list);
167 INIT_LIST_HEAD(&nb->retrans_slow_list);
168 spin_lock_init(&nb->retrans_conn_lock);
169 INIT_LIST_HEAD(&nb->retrans_conn_lowlatency_list);
170 INIT_LIST_HEAD(&nb->retrans_conn_highlatency_list);
171 INIT_WORK(&nb->reset_neigh_work, _cor_reset_neighbor);
173 return nb;
176 int cor_is_from_nb(struct sk_buff *skb, struct cor_neighbor *nb)
178 int rc;
180 char source_hw[MAX_ADDR_LEN];
182 memset(source_hw, 0, MAX_ADDR_LEN);
183 if (skb->dev->header_ops != 0 &&
184 skb->dev->header_ops->parse != 0)
185 skb->dev->header_ops->parse(skb, source_hw);
187 rc = (skb->dev == nb->dev && memcmp(nb->mac, source_hw,
188 MAX_ADDR_LEN) == 0);
189 return rc;
192 struct cor_neighbor *_cor_get_neigh_by_mac(struct net_device *dev,
193 char *source_hw)
195 struct list_head *currlh;
196 struct cor_neighbor *ret = 0;
198 spin_lock_bh(&cor_neighbor_list_lock);
200 currlh = cor_nb_list.next;
201 while (currlh != &cor_nb_list) {
202 struct cor_neighbor *curr = container_of(currlh,
203 struct cor_neighbor, nb_list);
205 BUG_ON(curr->in_nb_list == 0);
207 if (curr->dev == dev && memcmp(curr->mac, source_hw,
208 MAX_ADDR_LEN) == 0) {
209 ret = curr;
210 cor_nb_kref_get(ret, "stack");
211 break;
214 currlh = currlh->next;
217 spin_unlock_bh(&cor_neighbor_list_lock);
219 if (ret != 0 && unlikely(cor_get_neigh_state(ret) ==
220 NEIGHBOR_STATE_KILLED)) {
221 cor_nb_kref_put(ret, "stack");
222 return 0;
223 } else {
224 return ret;
228 struct cor_neighbor *cor_get_neigh_by_mac(struct sk_buff *skb)
230 char source_hw[MAX_ADDR_LEN];
232 memset(source_hw, 0, MAX_ADDR_LEN);
233 if (skb->dev->header_ops != 0 &&
234 skb->dev->header_ops->parse != 0)
235 skb->dev->header_ops->parse(skb, source_hw);
237 return _cor_get_neigh_by_mac(skb->dev, source_hw);
240 struct cor_neighbor *cor_find_neigh(__be64 addr)
242 struct list_head *currlh;
243 struct cor_neighbor *ret = 0;
245 spin_lock_bh(&cor_neighbor_list_lock);
247 currlh = cor_nb_list.next;
248 while (currlh != &cor_nb_list) {
249 struct cor_neighbor *curr = container_of(currlh,
250 struct cor_neighbor, nb_list);
252 BUG_ON(curr->in_nb_list == 0);
254 BUG_ON((curr->has_addr == 0) && (curr->addr != 0));
255 if (curr->has_addr != 0 && curr->addr == addr) {
256 ret = curr;
257 cor_nb_kref_get(ret, "stack");
259 goto out;
262 currlh = currlh->next;
265 out:
266 spin_unlock_bh(&cor_neighbor_list_lock);
268 if (ret != 0 && unlikely(cor_get_neigh_state(ret) ==
269 NEIGHBOR_STATE_KILLED)) {
270 cor_nb_kref_put(ret, "stack");
271 return 0;
272 } else {
273 return ret;
277 void cor_resend_rcvmtu(struct net_device *dev)
279 struct list_head *currlh;
281 spin_lock_bh(&cor_neighbor_list_lock);
283 currlh = cor_nb_list.next;
284 while (currlh != &cor_nb_list) {
285 struct cor_neighbor *nb = container_of(currlh,
286 struct cor_neighbor, nb_list);
288 unsigned long iflags;
290 if (nb->dev != dev)
291 goto cont;
293 cor_send_rcvmtu(nb);
295 spin_lock_irqsave(&nb->state_lock, iflags);
297 if (nb->rcvmtu_allowed_countdown != 0)
298 nb->rcvmtu_delayed_send_needed = 1;
299 nb->rcvmtu_allowed_countdown = 3;
301 spin_unlock_irqrestore(&nb->state_lock, iflags);
303 cont:
304 currlh = currlh->next;
307 spin_unlock_bh(&cor_neighbor_list_lock);
310 __u32 cor_generate_neigh_list(char *buf, __u32 buflen)
312 struct list_head *currlh;
314 __u32 cnt = 0;
316 __u32 buf_offset = 4;
318 int rc;
321 * The variable length header rowcount need to be generated after the
322 * data. This is done by reserving the maximum space they could take. If
323 * they end up being smaller, the data is moved so that there is no gap.
326 BUG_ON(buf == 0);
327 BUG_ON(buflen < buf_offset);
329 /* num_fields */
330 rc = cor_encode_len(buf + buf_offset, buflen - buf_offset, 2);
331 BUG_ON(rc <= 0);
332 buf_offset += rc;
334 /* addr field */
335 BUG_ON(buflen < buf_offset + 2);
336 cor_put_u16(buf + buf_offset, LIST_NEIGH_FIELD_ADDR);
337 buf_offset += 2;
339 rc = cor_encode_len(buf + buf_offset, buflen - buf_offset, 8);
340 BUG_ON(rc <= 0);
341 buf_offset += rc;
343 /* latency field */
344 BUG_ON(buflen < buf_offset + 2);
345 cor_put_u16(buf + buf_offset, LIST_NEIGH_FIELD_LATENCY);
346 buf_offset += 2;
348 rc = cor_encode_len(buf + buf_offset, buflen - buf_offset, 1);
349 BUG_ON(rc <= 0);
350 buf_offset += rc;
352 spin_lock_bh(&cor_neighbor_list_lock);
354 currlh = cor_nb_list.next;
355 while (currlh != &cor_nb_list) {
356 struct cor_neighbor *curr = container_of(currlh,
357 struct cor_neighbor, nb_list);
358 int state;
360 BUG_ON(curr->in_nb_list == 0);
362 state = cor_get_neigh_state(curr);
363 if (state != NEIGHBOR_STATE_ACTIVE)
364 goto cont;
366 BUG_ON((curr->has_addr == 0) && (curr->addr != 0));
367 if (curr->has_addr == 0)
368 goto cont;
371 if (unlikely(buflen < buf_offset + 8 + 1))
372 break;
374 cor_put_be64(buf + buf_offset, curr->addr);
375 buf_offset += 8;
377 buf[buf_offset] = cor_enc_log_64_11(atomic_read(
378 &curr->latency_advertised_us));
379 buf_offset += 1;
381 BUG_ON(buf_offset > buflen);
383 cnt++;
385 cont:
386 currlh = currlh->next;
389 spin_unlock_bh(&cor_neighbor_list_lock);
391 rc = cor_encode_len(buf, 4, cnt);
392 BUG_ON(rc <= 0);
393 BUG_ON(rc > 4);
395 if (likely(rc < 4))
396 memmove(buf + ((__u32) rc), buf+4, buf_offset);
398 return buf_offset - 4 + ((__u32) rc);
401 static void cor_reset_all_conns(struct cor_neighbor *nb)
403 while (1) {
404 unsigned long iflags;
405 struct cor_conn *trgt_out;
406 struct cor_conn *src_in;
407 struct cor_conn_bidir *cnb;
408 int rc;
410 spin_lock_irqsave(&nb->conn_list_lock, iflags);
412 if (!list_empty(&nb->snd_conn_busy_list)) {
413 trgt_out = container_of(nb->snd_conn_busy_list.next,
414 struct cor_conn, trgt.out.nb_list);
415 } else if (!list_empty(&nb->snd_conn_idle_list)) {
416 trgt_out = container_of(nb->snd_conn_idle_list.next,
417 struct cor_conn, trgt.out.nb_list);
418 } else {
419 spin_unlock_irqrestore(&nb->conn_list_lock, iflags);
420 break;
423 cor_conn_kref_get(trgt_out, "stack");
425 spin_unlock_irqrestore(&nb->conn_list_lock, iflags);
427 src_in = cor_get_conn_reversedir(trgt_out);
428 cnb = cor_get_conn_bidir(trgt_out);
430 spin_lock_bh(&cnb->cli.rcv_lock);
431 spin_lock_bh(&cnb->srv.rcv_lock);
433 if (unlikely(unlikely(src_in->sourcetype != SOURCE_IN) ||
434 unlikely(src_in->src.in.nb != nb))) {
435 rc = 1;
436 goto unlock;
439 rc = cor_send_reset_conn(nb, trgt_out->trgt.out.conn_id, 1);
441 if (unlikely(rc != 0))
442 goto unlock;
444 if (trgt_out->isreset == 0)
445 trgt_out->isreset = 1;
447 unlock:
448 spin_unlock_bh(&cnb->srv.rcv_lock);
449 spin_unlock_bh(&cnb->cli.rcv_lock);
451 if (rc == 0) {
452 cor_reset_conn(src_in);
453 cor_conn_kref_put(src_in, "stack");
454 } else {
455 cor_conn_kref_put(src_in, "stack");
456 cor_nb_kref_get(nb, "stalltimeout_timer");
457 schedule_delayed_work(&nb->stalltimeout_timer, HZ);
458 break;
463 static void cor_delete_connid_reuse_items(struct cor_neighbor *nb);
465 static void _cor_reset_neighbor(struct work_struct *work)
467 struct cor_neighbor *nb = container_of(work, struct cor_neighbor,
468 reset_neigh_work);
470 cor_reset_all_conns(nb);
471 cor_delete_connid_reuse_items(nb);
473 spin_lock_bh(&cor_neighbor_list_lock);
474 if (nb->in_nb_list != 0) {
475 list_del(&nb->nb_list);
476 nb->in_nb_list = 0;
477 cor_nb_kref_put_bug(nb, "neigh_list");
479 spin_unlock_bh(&cor_neighbor_list_lock);
481 cor_nb_kref_put(nb, "reset_neigh_work");
484 static void cor_reset_neighbor(struct cor_neighbor *nb, int use_workqueue)
486 unsigned long iflags;
488 spin_lock_irqsave(&nb->state_lock, iflags);
489 /* if (nb->state != NEIGHBOR_STATE_KILLED) {
490 printk(KERN_ERR "cor_reset_neighbor\n");
491 show_stack(0, 0);
492 } */
493 nb->state = NEIGHBOR_STATE_KILLED;
494 spin_unlock_irqrestore(&nb->state_lock, iflags);
497 if (use_workqueue) {
498 schedule_work(&nb->reset_neigh_work);
499 cor_nb_kref_get(nb, "reset_neigh_work");
500 } else {
501 int krefput = 0;
503 cor_reset_all_conns(nb);
504 cor_delete_connid_reuse_items(nb);
506 spin_lock_bh(&cor_neighbor_list_lock);
507 if (nb->in_nb_list != 0) {
508 list_del(&nb->nb_list);
509 nb->in_nb_list = 0;
510 krefput = 1;
512 spin_unlock_bh(&cor_neighbor_list_lock);
514 if (krefput)
515 cor_nb_kref_put(nb, "neigh_list");
519 void cor_reset_neighbors(struct net_device *dev)
521 struct list_head *currlh;
523 restart:
524 spin_lock_bh(&cor_neighbor_list_lock);
526 currlh = cor_nb_list.next;
527 while (currlh != &cor_nb_list) {
528 unsigned long iflags;
529 struct cor_neighbor *currnb = container_of(currlh,
530 struct cor_neighbor, nb_list);
531 __u8 state;
533 BUG_ON(currnb->in_nb_list == 0);
535 if (dev != 0 && currnb->dev != dev)
536 goto cont;
538 spin_lock_irqsave(&currnb->state_lock, iflags);
539 state = currnb->state;
540 spin_unlock_irqrestore(&currnb->state_lock, iflags);
542 if (state != NEIGHBOR_STATE_KILLED) {
543 spin_unlock_bh(&cor_neighbor_list_lock);
544 cor_reset_neighbor(currnb, 0);
545 goto restart;
548 cont:
549 currlh = currlh->next;
552 spin_unlock_bh(&cor_neighbor_list_lock);
555 static void cor_stall_timer(struct work_struct *work)
557 struct cor_neighbor *nb = container_of(to_delayed_work(work),
558 struct cor_neighbor, stalltimeout_timer);
560 int stall_time_ms;
561 __u8 nbstate;
563 unsigned long iflags;
565 spin_lock_irqsave(&nb->state_lock, iflags);
566 nbstate = nb->state;
567 spin_unlock_irqrestore(&nb->state_lock, iflags);
569 if (nbstate == NEIGHBOR_STATE_STALLED) {
570 stall_time_ms = jiffies_to_msecs(jiffies -
571 nb->state_time.last_roundtrip);
573 if (stall_time_ms < NB_KILL_TIME_MS) {
574 schedule_delayed_work(&nb->stalltimeout_timer,
575 msecs_to_jiffies(NB_KILL_TIME_MS -
576 stall_time_ms));
577 return;
580 cor_reset_neighbor(nb, 1);
583 nb->str_timer_pending = 0;
584 cor_nb_kref_put(nb, "stalltimeout_timer");
587 int cor_get_neigh_state(struct cor_neighbor *nb)
589 int ret;
590 unsigned long iflags;
591 int stall_time_ms;
593 BUG_ON(nb == 0);
595 spin_lock_irqsave(&nb->state_lock, iflags);
597 stall_time_ms = jiffies_to_msecs(jiffies -
598 nb->state_time.last_roundtrip);
600 WARN_ONCE(likely(nb->state == NEIGHBOR_STATE_ACTIVE) && unlikely(
601 jiffies_to_msecs(jiffies - nb->last_ping_time) >
602 PING_FORCETIME_ACTIVEIDLE_MS * 4) &&
603 nb->ping_intransit == 0,
604 "We have stopped sending pings to a neighbor!?");
606 if (likely(nb->state == NEIGHBOR_STATE_ACTIVE) &&
607 unlikely(stall_time_ms > NB_STALL_TIME_MS) && (
608 nb->ping_intransit >= NB_STALL_MINPINGS ||
609 nb->ping_intransit >= PING_COOKIES_PER_NEIGH)) {
610 nb->state = NEIGHBOR_STATE_STALLED;
611 nb->ping_success = 0;
612 if (nb->str_timer_pending == 0) {
613 nb->str_timer_pending = 1;
614 cor_nb_kref_get(nb, "stalltimeout_timer");
616 schedule_delayed_work(&nb->stalltimeout_timer,
617 msecs_to_jiffies(NB_KILL_TIME_MS -
618 stall_time_ms));
621 /* printk(KERN_ERR "changed to stalled\n"); */
622 BUG_ON(nb->ping_intransit > PING_COOKIES_PER_NEIGH);
623 } else if (unlikely(nb->state == NEIGHBOR_STATE_INITIAL) &&
624 time_after(jiffies, nb->state_time.initial_state_since +
625 INITIAL_TIME_LIMIT_SEC * HZ)) {
626 spin_unlock_irqrestore(&nb->state_lock, iflags);
627 cor_reset_neighbor(nb, 1);
628 return NEIGHBOR_STATE_KILLED;
631 ret = nb->state;
633 spin_unlock_irqrestore(&nb->state_lock, iflags);
635 return ret;
638 static struct cor_ping_cookie *cor_find_cookie(struct cor_neighbor *nb,
639 __u32 cookie)
641 int i;
643 for (i = 0; i < PING_COOKIES_PER_NEIGH; i++) {
644 if (nb->cookies[i].cookie == cookie)
645 return &nb->cookies[i];
647 return 0;
650 static void cor_reset_cookie(struct cor_neighbor *nb, struct cor_ping_cookie *c)
652 if (c->cookie == 0)
653 return;
655 if (nb->cookie_unsent != c->cookie)
656 nb->ping_intransit--;
658 c->cookie = 0;
661 static __u32 sqrt(__u64 x)
663 int i;
664 __u64 y = 65536;
666 if (unlikely(x <= 1))
667 return 0;
669 for (i = 0; i < 20; i++) {
670 y = y/2 + div64_u64(x/2, y);
671 if (unlikely(y == 0))
672 y = 1;
675 if (unlikely(y > U32_MAX))
676 y = U32_MAX;
678 return (__u32) y;
681 static __u32 cor_calc_newlatency(struct cor_neighbor *nb_statelocked,
682 __u32 oldlatency_us, __s64 newlatency_ns)
684 __s64 oldlatency = oldlatency_us * 1000LL;
685 __s64 newlatency;
687 if (unlikely(unlikely(nb_statelocked->state == NEIGHBOR_STATE_INITIAL)&&
688 nb_statelocked->ping_success < 16))
689 newlatency = div64_s64(
690 oldlatency * nb_statelocked->ping_success +
691 newlatency_ns,
692 nb_statelocked->ping_success + 1);
693 else
694 newlatency = (oldlatency * 15 + newlatency_ns) / 16;
696 newlatency = div_s64(newlatency + 500, 1000);
698 if (unlikely(newlatency < 0))
699 newlatency = 0;
700 if (unlikely(newlatency > U32_MAX))
701 newlatency = U32_MAX;
703 return (__u32) newlatency;
706 static void cor_update_nb_latency(struct cor_neighbor *nb_statelocked,
707 struct cor_ping_cookie *c, __u32 respdelay)
709 ktime_t now = ktime_get();
711 __s64 pinglatency_retrans_ns = ktime_to_ns(now) -
712 ktime_to_ns(c->time_sent) - respdelay * 1000LL;
713 __s64 pinglatency_advertised_ns = ktime_to_ns(now) -
714 ktime_to_ns(c->time_created) - respdelay * 1000LL;
716 __u32 oldlatency_retrans_us =
717 atomic_read(&nb_statelocked->latency_retrans_us);
719 __u32 newlatency_retrans_us = cor_calc_newlatency(nb_statelocked,
720 oldlatency_retrans_us, pinglatency_retrans_ns);
722 atomic_set(&nb_statelocked->latency_retrans_us, newlatency_retrans_us);
724 if (unlikely(unlikely(nb_statelocked->state == NEIGHBOR_STATE_INITIAL)&&
725 nb_statelocked->ping_success < 16)) {
726 nb_statelocked->latency_variance_retrans_us =
727 ((__u64) newlatency_retrans_us) *
728 newlatency_retrans_us;
729 atomic_set(&nb_statelocked->latency_stddev_retrans_us, sqrt(
730 nb_statelocked->latency_variance_retrans_us));
731 } else if (pinglatency_retrans_ns > oldlatency_retrans_us *
732 ((__s64) 1000)) {
733 __s64 newdiff = div_s64(pinglatency_retrans_ns -
734 oldlatency_retrans_us * ((__s64) 1000), 1000);
735 __u32 newdiff32 = (__u32) (unlikely(newdiff >= U32_MAX) ?
736 U32_MAX : newdiff);
737 __u64 newvar = ((__u64) newdiff32) * newdiff32;
739 __u64 oldval = nb_statelocked->latency_variance_retrans_us;
741 if (unlikely(unlikely(newvar > (1LL << 55)) || unlikely(
742 oldval > (1LL << 55)))) {
743 nb_statelocked->latency_variance_retrans_us =
744 (oldval / 16) * 15 + newvar/16;
745 } else {
746 nb_statelocked->latency_variance_retrans_us =
747 (oldval * 15 + newvar) / 16;
750 atomic_set(&nb_statelocked->latency_stddev_retrans_us, sqrt(
751 nb_statelocked->latency_variance_retrans_us));
754 atomic_set(&nb_statelocked->latency_advertised_us,
755 cor_calc_newlatency(nb_statelocked,
756 atomic_read(&nb_statelocked->latency_advertised_us),
757 pinglatency_advertised_ns));
759 nb_statelocked->last_roundtrip_end = now;
762 static void cor_connid_used_pingsuccess(struct cor_neighbor *nb);
764 void cor_ping_resp(struct cor_neighbor *nb, __u32 cookie, __u32 respdelay)
766 unsigned long iflags;
768 struct cor_ping_cookie *c;
769 int i;
770 int stalledresume = 0;
772 int call_connidreuse = 0;
773 int call_send_rcvmtu = 0;
775 if (unlikely(cookie == 0))
776 return;
778 spin_lock_irqsave(&nb->state_lock, iflags);
780 c = cor_find_cookie(nb, cookie);
782 if (unlikely(c == 0))
783 goto out;
785 atomic_set(&nb->sessionid_snd_needed, 0);
787 call_connidreuse = ktime_before_eq(nb->last_roundtrip_end,
788 c->time_created);
790 cor_update_nb_latency(nb, c, respdelay);
792 nb->ping_success++;
794 cor_reset_cookie(nb, c);
796 for (i = 0; i < PING_COOKIES_PER_NEIGH; i++) {
797 if (nb->cookies[i].cookie != 0 && ktime_before(
798 nb->cookies[i].time_created, c->time_created)) {
799 nb->cookies[i].pongs++;
800 if (nb->cookies[i].pongs >= PING_PONGLIMIT) {
801 cor_reset_cookie(nb, &nb->cookies[i]);
806 if (unlikely(nb->state == NEIGHBOR_STATE_INITIAL ||
807 nb->state == NEIGHBOR_STATE_STALLED)) {
808 call_connidreuse = 0;
810 if ((nb->state == NEIGHBOR_STATE_INITIAL &&
811 nb->ping_success >= PING_SUCCESS_CNT_INIT) || (
812 nb->state == NEIGHBOR_STATE_STALLED &&
813 nb->ping_success >= PING_SUCCESS_CNT_STALLED)) {
814 stalledresume = (nb->state == NEIGHBOR_STATE_STALLED);
815 nb->state = NEIGHBOR_STATE_ACTIVE;
816 /* printk(KERN_ERR "changed to active\n"); */
820 if (likely(nb->state == NEIGHBOR_STATE_ACTIVE) ||
821 nb->state == NEIGHBOR_STATE_STALLED)
822 nb->state_time.last_roundtrip = c->jiffies_sent;
824 if (c == &nb->cookies[0] &&
825 unlikely(nb->rcvmtu_allowed_countdown != 0)) {
826 nb->rcvmtu_allowed_countdown--;
828 if (unlikely(nb->rcvmtu_allowed_countdown == 0 &&
829 nb->rcvmtu_delayed_send_needed != 0)) {
830 nb->rcvmtu_allowed_countdown = 3;
831 nb->rcvmtu_delayed_send_needed = 0;
832 call_send_rcvmtu = 1;
836 out:
837 spin_unlock_irqrestore(&nb->state_lock, iflags);
839 if (call_connidreuse)
840 cor_connid_used_pingsuccess(nb);
842 if (unlikely(call_send_rcvmtu))
843 cor_send_rcvmtu(nb);
845 if (unlikely(stalledresume)) {
846 spin_lock_bh(&nb->retrans_conn_lock);
847 cor_reschedule_conn_retrans_timer(nb);
848 spin_unlock_bh(&nb->retrans_conn_lock);
850 cor_qos_enqueue(nb->queue, &nb->rb, 0, ns_to_ktime(0),
851 QOS_CALLER_NEIGHBOR, 1);
855 __u32 cor_add_ping_req(struct cor_neighbor *nb, unsigned long *last_ping_time)
857 unsigned long iflags;
858 struct cor_ping_cookie *c;
859 __u32 i;
861 __u32 cookie;
863 ktime_t now = ktime_get();
865 spin_lock_irqsave(&nb->state_lock, iflags);
867 if (nb->cookie_unsent != 0) {
868 c = cor_find_cookie(nb, nb->cookie_unsent);
869 if (c != 0)
870 goto unsent;
871 c = 0;
872 nb->cookie_unsent = 0;
875 c = cor_find_cookie(nb, 0);
876 if (c != 0)
877 goto found;
879 get_random_bytes((char *) &i, sizeof(i));
880 i = (i % PING_COOKIES_PER_NEIGH);
881 c = &nb->cookies[i];
882 cor_reset_cookie(nb, c);
884 found:
885 nb->lastcookie++;
886 if (unlikely(nb->lastcookie == 0))
887 nb->lastcookie++;
888 c->cookie = nb->lastcookie;
889 c->time_created = now;
891 unsent:
892 c->pongs = 0;
893 c->time_sent = now;
894 c->jiffies_sent = jiffies;
895 cookie = c->cookie;
897 nb->ping_intransit++;
899 *last_ping_time = nb->last_ping_time;
900 nb->last_ping_time = c->jiffies_sent;
902 spin_unlock_irqrestore(&nb->state_lock, iflags);
904 BUG_ON(cookie == 0);
906 return cookie;
909 void cor_ping_sent(struct cor_neighbor *nb, __u32 cookie)
911 unsigned long iflags;
913 BUG_ON(cookie == 0);
915 spin_lock_irqsave(&nb->state_lock, iflags);
917 if (nb->cookie_unsent == cookie)
918 nb->cookie_unsent = 0;
920 spin_unlock_irqrestore(&nb->state_lock, iflags);
923 void cor_unadd_ping_req(struct cor_neighbor *nb, __u32 cookie,
924 unsigned long last_ping_time, int congested)
926 unsigned long iflags;
928 struct cor_ping_cookie *c;
930 BUG_ON(cookie == 0);
932 spin_lock_irqsave(&nb->state_lock, iflags);
934 if (congested) {
935 BUG_ON(nb->cookie_unsent != 0 && nb->cookie_unsent != cookie);
936 nb->cookie_unsent = cookie;
939 c = cor_find_cookie(nb, cookie);
940 if (likely(c != 0)) {
941 if (congested == 0)
942 c->cookie = 0;
943 nb->ping_intransit--;
946 nb->last_ping_time = last_ping_time;
948 spin_unlock_irqrestore(&nb->state_lock, iflags);
951 static int cor_get_ping_forcetime_ms(struct cor_neighbor *nb)
953 unsigned long iflags;
954 int fast;
955 int idle;
957 if (unlikely(cor_get_neigh_state(nb) != NEIGHBOR_STATE_ACTIVE))
958 return PING_FORCETIME_MS;
960 spin_lock_irqsave(&nb->state_lock, iflags);
961 fast = ((nb->ping_success < PING_ACTIVE_FASTINITIAL_COUNT) ||
962 (nb->ping_intransit > 0));
963 if (unlikely(nb->rcvmtu_delayed_send_needed != 0)) {
964 BUG_ON(nb->rcvmtu_allowed_countdown == 0);
965 fast = 1;
967 spin_unlock_irqrestore(&nb->state_lock, iflags);
969 if (fast)
970 return PING_FORCETIME_ACTIVE_FAST_MS;
972 spin_lock_irqsave(&nb->conn_list_lock, iflags);
973 idle = list_empty(&nb->snd_conn_idle_list) &&
974 list_empty(&nb->snd_conn_busy_list);
975 spin_unlock_irqrestore(&nb->conn_list_lock, iflags);
977 if (idle)
978 return PING_FORCETIME_ACTIVEIDLE_MS;
979 else
980 return PING_FORCETIME_ACTIVE_MS;
983 static __u32 cor_get_ping_mindelay_ms(struct cor_neighbor *nb_statelocked)
985 __u32 latency_us = ((__u32) atomic_read(
986 &nb_statelocked->latency_advertised_us));
987 __u32 max_remote_pong_delay_us = ((__u32) atomic_read(
988 &nb_statelocked->max_remote_pong_delay_us));
989 __u32 mindelay_ms;
991 if (latency_us < PING_GUESSLATENCY_MS * 1000)
992 latency_us = PING_GUESSLATENCY_MS * 1000;
994 if (unlikely(nb_statelocked->state != NEIGHBOR_STATE_ACTIVE))
995 mindelay_ms = latency_us/1000;
996 else
997 mindelay_ms = ((latency_us/2 +
998 max_remote_pong_delay_us/2)/500);
1000 if (likely(nb_statelocked->ping_intransit < PING_COOKIES_THROTTLESTART))
1001 return mindelay_ms;
1003 mindelay_ms = mindelay_ms * (1 + 9 * (nb_statelocked->ping_intransit *
1004 nb_statelocked->ping_intransit /
1005 (PING_COOKIES_PER_NEIGH * PING_COOKIES_PER_NEIGH)));
1007 return mindelay_ms;
1011 * Check whether we want to send a ping now:
1012 * 0... Do not send ping.
1013 * 1... Send ping now, but only if it can be merged with other messages. This
1014 * can happen way before the time requested by cor_get_next_ping_time().
1015 * 2... Send ping now, even if a packet has to be created just for the ping
1016 * alone.
1018 int cor_time_to_send_ping(struct cor_neighbor *nb)
1020 unsigned long iflags;
1021 int rc = TIMETOSENDPING_YES;
1023 __u32 ms_since_last_ping;
1025 __u32 forcetime = cor_get_ping_forcetime_ms(nb);
1026 __u32 mindelay;
1028 spin_lock_irqsave(&nb->state_lock, iflags);
1030 ms_since_last_ping = jiffies_to_msecs(jiffies - nb->last_ping_time);
1032 mindelay = cor_get_ping_mindelay_ms(nb);
1034 if (forcetime < (mindelay * 3))
1035 forcetime = mindelay * 3;
1036 else if (forcetime > (mindelay * 3))
1037 mindelay = forcetime/3;
1039 if (ms_since_last_ping < mindelay || ms_since_last_ping < (forcetime/4))
1040 rc = TIMETOSENDPING_NO;
1041 else if (ms_since_last_ping >= forcetime)
1042 rc = TIMETOSENDPING_FORCE;
1044 spin_unlock_irqrestore(&nb->state_lock, iflags);
1046 return rc;
1049 unsigned long cor_get_next_ping_time(struct cor_neighbor *nb)
1051 unsigned long iflags;
1053 __u32 forcetime = cor_get_ping_forcetime_ms(nb);
1054 __u32 mindelay;
1056 spin_lock_irqsave(&nb->state_lock, iflags);
1057 mindelay = cor_get_ping_mindelay_ms(nb);
1058 spin_unlock_irqrestore(&nb->state_lock, iflags);
1060 if (forcetime < (mindelay * 3))
1061 forcetime = mindelay * 3;
1063 return nb->last_ping_time + msecs_to_jiffies(forcetime);
1066 void cor_add_neighbor(struct cor_neighbor_discdata *nb_dd)
1068 struct cor_neighbor *nb;
1069 struct list_head *currlh;
1071 nb = cor_alloc_neighbor(GFP_KERNEL);
1072 if (unlikely(nb == 0))
1073 return;
1075 nb->queue = cor_get_queue(nb_dd->dev);
1076 if (nb->queue == 0) {
1077 kmem_cache_free(cor_nb_slab, nb);
1078 atomic_dec(&cor_num_neighs);
1079 return;
1082 dev_hold(nb_dd->dev);
1083 nb->dev = nb_dd->dev;
1085 memcpy(nb->mac, nb_dd->mac, MAX_ADDR_LEN);
1087 nb->has_addr = nb_dd->has_addr;
1088 nb->addr = nb_dd->addr;
1090 nb_dd->nb_allocated = 1;
1092 spin_lock_bh(&cor_neighbor_list_lock);
1094 BUG_ON((nb->has_addr == 0) && (nb->addr != 0));
1096 if (cor_is_clientmode() && nb->has_addr == 0)
1097 goto already_present;
1099 currlh = cor_nb_list.next;
1100 while (currlh != &cor_nb_list) {
1101 struct cor_neighbor *curr = container_of(currlh,
1102 struct cor_neighbor, nb_list);
1104 BUG_ON((curr->has_addr == 0) && (curr->addr != 0));
1106 if (curr->dev == nb->dev &&
1107 memcmp(curr->mac, nb->mac, MAX_ADDR_LEN) == 0)
1108 goto already_present;
1110 if (curr->has_addr != 0 && curr->addr == nb->addr)
1111 goto already_present;
1113 currlh = currlh->next;
1116 /* printk(KERN_ERR "add_neigh\n"); */
1118 spin_lock_bh(&cor_local_addr_lock);
1119 nb->sessionid = cor_local_addr_sessionid ^ nb_dd->sessionid;
1120 spin_unlock_bh(&cor_local_addr_lock);
1122 timer_setup(&nb->retrans_timer, cor_retransmit_timerfunc, 0);
1124 timer_setup(&nb->retrans_conn_timer, cor_retransmit_conn_timerfunc, 0);
1126 spin_lock_bh(&nb->cmsg_lock);
1127 nb->last_ping_time = jiffies;
1128 cor_schedule_controlmsg_timer(nb);
1129 spin_unlock_bh(&nb->cmsg_lock);
1131 list_add_tail(&nb->nb_list, &cor_nb_list);
1132 nb->in_nb_list = 1;
1133 cor_nb_kref_get(nb, "neigh_list");
1134 cor_nb_kref_put_bug(nb, "alloc");
1136 if (0) {
1137 already_present:
1138 kmem_cache_free(cor_nb_slab, nb);
1139 atomic_dec(&cor_num_neighs);
1142 spin_unlock_bh(&cor_neighbor_list_lock);
1145 struct cor_conn *cor_get_conn(struct cor_neighbor *nb, __u32 conn_id)
1147 unsigned long iflags;
1149 struct rb_node *n = 0;
1150 struct cor_conn *ret = 0;
1152 spin_lock_irqsave(&nb->connid_lock, iflags);
1154 n = nb->connid_rb.rb_node;
1156 while (likely(n != 0) && ret == 0) {
1157 struct cor_conn *src_in_o = container_of(n, struct cor_conn,
1158 src.in.rbn);
1160 BUG_ON(src_in_o->sourcetype != SOURCE_IN);
1162 if (conn_id < src_in_o->src.in.conn_id)
1163 n = n->rb_left;
1164 else if (conn_id > src_in_o->src.in.conn_id)
1165 n = n->rb_right;
1166 else
1167 ret = src_in_o;
1170 if (ret != 0)
1171 cor_conn_kref_get(ret, "stack");
1173 spin_unlock_irqrestore(&nb->connid_lock, iflags);
1175 return ret;
1178 int cor_insert_connid(struct cor_neighbor *nb, struct cor_conn *src_in_ll)
1180 int rc = 0;
1182 unsigned long iflags;
1184 __u32 conn_id = src_in_ll->src.in.conn_id;
1186 struct rb_root *root;
1187 struct rb_node **p;
1188 struct rb_node *parent = 0;
1190 BUG_ON(src_in_ll->sourcetype != SOURCE_IN);
1192 spin_lock_irqsave(&nb->connid_lock, iflags);
1194 root = &nb->connid_rb;
1195 p = &root->rb_node;
1197 while ((*p) != 0) {
1198 struct cor_conn *src_in_o = container_of(*p, struct cor_conn,
1199 src.in.rbn);
1201 BUG_ON(src_in_o->sourcetype != SOURCE_IN);
1203 parent = *p;
1204 if (unlikely(conn_id == src_in_o->src.in.conn_id)) {
1205 goto duplicate;
1206 } else if (conn_id < src_in_o->src.in.conn_id) {
1207 p = &(*p)->rb_left;
1208 } else if (conn_id > src_in_o->src.in.conn_id) {
1209 p = &(*p)->rb_right;
1213 cor_conn_kref_get(src_in_ll, "connid table");
1214 rb_link_node(&src_in_ll->src.in.rbn, parent, p);
1215 rb_insert_color(&src_in_ll->src.in.rbn, root);
1217 if (0) {
1218 duplicate:
1219 rc = 1;
1222 spin_unlock_irqrestore(&nb->connid_lock, iflags);
1224 return rc;
1227 static struct cor_connid_reuse_item *cor_get_connid_reuseitem(
1228 struct cor_neighbor *nb, __u32 conn_id)
1230 unsigned long iflags;
1232 struct rb_node *n = 0;
1233 struct cor_connid_reuse_item *ret = 0;
1235 spin_lock_irqsave(&nb->connid_reuse_lock, iflags);
1237 n = nb->connid_reuse_rb.rb_node;
1239 while (likely(n != 0) && ret == 0) {
1240 struct cor_connid_reuse_item *cir = container_of(n,
1241 struct cor_connid_reuse_item, rbn);
1243 BUG_ON(cir->conn_id == 0);
1245 if (conn_id < cir->conn_id)
1246 n = n->rb_left;
1247 else if (conn_id > cir->conn_id)
1248 n = n->rb_right;
1249 else
1250 ret = cir;
1253 if (ret != 0)
1254 kref_get(&ret->ref);
1256 spin_unlock_irqrestore(&nb->connid_reuse_lock, iflags);
1258 return ret;
1261 /* nb->connid_reuse_lock must be held by the caller */
1262 static void _cor_insert_connid_reuse_insertrb(struct cor_neighbor *nb,
1263 struct cor_connid_reuse_item *ins)
1265 struct rb_root *root;
1266 struct rb_node **p;
1267 struct rb_node *parent = 0;
1269 BUG_ON(ins->conn_id == 0);
1271 root = &nb->connid_reuse_rb;
1272 p = &root->rb_node;
1274 while ((*p) != 0) {
1275 struct cor_connid_reuse_item *curr = container_of(*p,
1276 struct cor_connid_reuse_item, rbn);
1278 BUG_ON(curr->conn_id == 0);
1280 parent = *p;
1281 if (unlikely(ins->conn_id == curr->conn_id)) {
1282 BUG();
1283 } else if (ins->conn_id < curr->conn_id) {
1284 p = &(*p)->rb_left;
1285 } else if (ins->conn_id > curr->conn_id) {
1286 p = &(*p)->rb_right;
1290 kref_get(&ins->ref);
1291 rb_link_node(&ins->rbn, parent, p);
1292 rb_insert_color(&ins->rbn, root);
1295 void cor_insert_connid_reuse(struct cor_neighbor *nb, __u32 conn_id)
1297 unsigned long iflags;
1299 struct cor_connid_reuse_item *cir = kmem_cache_alloc(
1300 cor_connid_reuse_slab, GFP_ATOMIC);
1302 if (unlikely(cir == 0)) {
1303 BUILD_BUG_ON(CONNID_REUSE_RTTS > 255);
1305 spin_lock_irqsave(&nb->connid_reuse_lock, iflags);
1306 nb->connid_reuse_oom_countdown = CONNID_REUSE_RTTS;
1307 spin_unlock_irqrestore(&nb->connid_reuse_lock, iflags);
1309 return;
1312 memset(cir, 0, sizeof(struct cor_connid_reuse_item));
1314 kref_init(&cir->ref);
1315 cir->conn_id = conn_id;
1317 spin_lock_irqsave(&nb->connid_reuse_lock, iflags);
1319 cir->pingcnt = nb->connid_reuse_pingcnt;
1321 _cor_insert_connid_reuse_insertrb(nb, cir);
1322 list_add_tail(&cir->lh, &nb->connid_reuse_list);
1324 spin_unlock_irqrestore(&nb->connid_reuse_lock, iflags);
1327 static void cor_free_connid_reuse(struct kref *ref)
1329 struct cor_connid_reuse_item *cir = container_of(ref,
1330 struct cor_connid_reuse_item, ref);
1332 kmem_cache_free(cor_connid_reuse_slab, cir);
1335 static void cor_delete_connid_reuse_items(struct cor_neighbor *nb)
1337 unsigned long iflags;
1338 struct cor_connid_reuse_item *cri;
1340 spin_lock_irqsave(&nb->connid_reuse_lock, iflags);
1342 while (list_empty(&nb->connid_reuse_list) == 0) {
1343 cri = container_of(nb->connid_reuse_list.next,
1344 struct cor_connid_reuse_item, lh);
1346 rb_erase(&cri->rbn, &nb->connid_reuse_rb);
1347 kref_put(&cri->ref, cor_kreffree_bug);
1349 list_del(&cri->lh);
1350 kref_put(&cri->ref, cor_free_connid_reuse);
1353 spin_unlock_irqrestore(&nb->connid_reuse_lock, iflags);
1356 static void cor_connid_used_pingsuccess(struct cor_neighbor *nb)
1358 unsigned long iflags;
1359 struct cor_connid_reuse_item *cri;
1361 spin_lock_irqsave(&nb->connid_reuse_lock, iflags);
1363 nb->connid_reuse_pingcnt++;
1364 while (list_empty(&nb->connid_reuse_list) == 0) {
1365 cri = container_of(nb->connid_reuse_list.next,
1366 struct cor_connid_reuse_item, lh);
1367 if ((cri->pingcnt + CONNID_REUSE_RTTS -
1368 nb->connid_reuse_pingcnt) < 32768)
1369 break;
1371 rb_erase(&cri->rbn, &nb->connid_reuse_rb);
1372 kref_put(&cri->ref, cor_kreffree_bug);
1374 list_del(&cri->lh);
1375 kref_put(&cri->ref, cor_free_connid_reuse);
1378 if (unlikely(nb->connid_reuse_oom_countdown != 0))
1379 nb->connid_reuse_oom_countdown--;
1382 spin_unlock_irqrestore(&nb->connid_reuse_lock, iflags);
1385 static int cor_connid_used(struct cor_neighbor *nb, __u32 conn_id)
1387 struct cor_conn *cn;
1388 struct cor_connid_reuse_item *cir;
1390 cn = cor_get_conn(nb, conn_id);
1391 if (unlikely(cn != 0)) {
1392 cor_conn_kref_put(cn, "stack");
1393 return 1;
1396 cir = cor_get_connid_reuseitem(nb, conn_id);
1397 if (unlikely(cir != 0)) {
1398 kref_put(&cir->ref, cor_free_connid_reuse);
1399 return 1;
1402 return 0;
1405 int cor_connid_alloc(struct cor_neighbor *nb, struct cor_conn *src_in_ll)
1407 unsigned long iflags;
1408 struct cor_conn *trgt_out_ll = cor_get_conn_reversedir(src_in_ll);
1409 __u32 conn_id;
1410 int i;
1412 BUG_ON(src_in_ll->sourcetype != SOURCE_IN);
1413 BUG_ON(trgt_out_ll->targettype != TARGET_OUT);
1415 spin_lock_irqsave(&cor_connid_gen, iflags);
1416 for (i = 0; i < 16; i++) {
1417 conn_id = 0;
1418 get_random_bytes((char *) &conn_id, sizeof(conn_id));
1419 conn_id = (conn_id & ~(1 << 31));
1421 if (unlikely(conn_id == 0))
1422 continue;
1424 if (unlikely(cor_connid_used(nb, conn_id)))
1425 continue;
1427 goto found;
1429 spin_unlock_irqrestore(&cor_connid_gen, iflags);
1431 return 1;
1433 found:
1434 spin_lock_irqsave(&nb->connid_reuse_lock, iflags);
1435 if (unlikely(nb->connid_reuse_oom_countdown != 0)) {
1436 spin_unlock_irqrestore(&nb->connid_reuse_lock, iflags);
1437 return 1;
1439 spin_unlock_irqrestore(&nb->connid_reuse_lock, iflags);
1442 src_in_ll->src.in.conn_id = conn_id;
1443 trgt_out_ll->trgt.out.conn_id = cor_get_connid_reverse(conn_id);
1444 if (unlikely(cor_insert_connid(nb, src_in_ll) != 0)) {
1445 BUG();
1447 spin_unlock_irqrestore(&cor_connid_gen, iflags);
1448 return 0;
1451 int __init cor_neighbor_init(void)
1453 cor_nb_slab = kmem_cache_create("cor_neighbor",
1454 sizeof(struct cor_neighbor), 8, 0, 0);
1455 if (unlikely(cor_nb_slab == 0))
1456 return -ENOMEM;
1458 atomic_set(&cor_num_neighs, 0);
1460 return 0;
1463 void __exit cor_neighbor_exit2(void)
1465 BUG_ON(atomic_read(&cor_num_neighs) != 0);
1467 kmem_cache_destroy(cor_nb_slab);
1468 cor_nb_slab = 0;
1471 MODULE_LICENSE("GPL");