call kthread_stop from workqueue, do not reset conn if connect cmd has been retransmitted
[cor.git] / net / cor / common.c
blob7896b215b6fa18e93a019b28d1835c33294bdc48
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2019 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/mutex.h>
23 #include "cor.h"
25 DEFINE_SPINLOCK(cor_bindnodes);
26 static DEFINE_SPINLOCK(conn_free);
27 static DEFINE_SPINLOCK(connid_gen);
29 static LIST_HEAD(openports);
31 static struct kmem_cache *conn_slab;
32 static struct kmem_cache *connid_reuse_slab;
34 atomic_t num_conns;
37 struct conn *get_conn(struct neighbor *nb, __u32 conn_id)
39 struct rb_node * n = 0;
40 struct conn *ret = 0;
42 spin_lock_bh(&(nb->connid_lock));
44 n = nb->connid_rb.rb_node;
46 while (likely(n != 0) && ret == 0) {
47 struct conn *src_in_o = container_of(n, struct conn,
48 source.in.rbn);
50 BUG_ON(src_in_o->sourcetype != SOURCE_IN);
52 if (conn_id < src_in_o->source.in.conn_id)
53 n = n->rb_left;
54 else if (conn_id > src_in_o->source.in.conn_id)
55 n = n->rb_right;
56 else
57 ret = src_in_o;
60 if (ret != 0)
61 kref_get(&(ret->ref));
63 spin_unlock_bh(&(nb->connid_lock));
65 return ret;
68 static int insert_connid(struct neighbor *nb, struct conn *src_in_ll)
70 int rc = 0;
72 __u32 conn_id = src_in_ll->source.in.conn_id;
74 struct rb_root *root;
75 struct rb_node **p;
76 struct rb_node *parent = 0;
78 BUG_ON(src_in_ll->sourcetype != SOURCE_IN);
80 spin_lock_bh(&(nb->connid_lock));
82 root = &(nb->connid_rb);
83 p = &(root->rb_node);
85 while ((*p) != 0) {
86 struct conn *src_in_o = container_of(*p, struct conn,
87 source.in.rbn);
89 BUG_ON(src_in_o->sourcetype != SOURCE_IN);
91 parent = *p;
92 if (unlikely(conn_id == src_in_o->source.in.conn_id)) {
93 goto duplicate;
94 } else if (conn_id < src_in_o->source.in.conn_id) {
95 p = &(*p)->rb_left;
96 } else if (conn_id > src_in_o->source.in.conn_id) {
97 p = &(*p)->rb_right;
98 } else {
99 BUG();
103 kref_get(&(src_in_ll->ref));
104 rb_link_node(&(src_in_ll->source.in.rbn), parent, p);
105 rb_insert_color(&(src_in_ll->source.in.rbn), root);
107 if (0) {
108 duplicate:
109 rc = 1;
112 spin_unlock_bh(&(nb->connid_lock));
114 return rc;
117 struct connid_reuse_item *get_connid_reuseitem(struct neighbor *nb,
118 __u32 conn_id)
120 struct rb_node *n = 0;
121 struct connid_reuse_item *ret = 0;
123 spin_lock_bh(&(nb->connid_reuse_lock));
125 n = nb->connid_reuse_rb.rb_node;
127 while (likely(n != 0) && ret == 0) {
128 struct connid_reuse_item *cir = container_of(n,
129 struct connid_reuse_item, rbn);
131 BUG_ON(cir->conn_id == 0);
133 if (conn_id < cir->conn_id)
134 n = n->rb_left;
135 else if (conn_id > cir->conn_id)
136 n = n->rb_right;
137 else
138 ret = cir;
141 if (ret != 0)
142 kref_get(&(ret->ref));
144 spin_unlock_bh(&(nb->connid_reuse_lock));
146 return ret;
149 /* nb->connid_reuse_lock must be held by the caller */
150 static void insert_connid_reuse(struct neighbor *nb,
151 struct connid_reuse_item *ins)
153 struct rb_root *root;
154 struct rb_node **p;
155 struct rb_node *parent = 0;
157 BUG_ON(ins->conn_id == 0);
159 root = &(nb->connid_reuse_rb);
160 p = &(root->rb_node);
162 while ((*p) != 0) {
163 struct connid_reuse_item *curr = container_of(*p,
164 struct connid_reuse_item, rbn);
166 BUG_ON(curr->conn_id == 0);
168 parent = *p;
169 if (unlikely(ins->conn_id == curr->conn_id)) {
170 BUG();
171 } else if (ins->conn_id < curr->conn_id) {
172 p = &(*p)->rb_left;
173 } else if (ins->conn_id > curr->conn_id) {
174 p = &(*p)->rb_right;
175 } else {
176 BUG();
180 kref_get(&(ins->ref));
181 rb_link_node(&(ins->rbn), parent, p);
182 rb_insert_color(&(ins->rbn), root);
185 static void free_connid_reuse(struct kref *ref)
187 struct connid_reuse_item *cir = container_of(ref,
188 struct connid_reuse_item, ref);
190 kmem_cache_free(connid_reuse_slab, cir);
193 void delete_connid_reuse_items(struct neighbor *nb)
195 struct connid_reuse_item *cri;
197 spin_lock_bh(&(nb->connid_reuse_lock));
199 while (list_empty(&(nb->connid_reuse_list)) == 0) {
200 cri = container_of(nb->connid_reuse_list.next,
201 struct connid_reuse_item, lh);
203 rb_erase(&(cri->rbn), &(nb->connid_reuse_rb));
204 kref_put(&(cri->ref), kreffree_bug);
206 list_del(&(cri->lh));
207 kref_put(&(cri->ref), free_connid_reuse);
210 spin_unlock_bh(&(nb->connid_reuse_lock));
213 void connid_used_pingsuccess(struct neighbor *nb)
215 struct connid_reuse_item *cri;
217 spin_lock_bh(&(nb->connid_reuse_lock));
219 nb->connid_reuse_pingcnt++;
220 while (list_empty(&(nb->connid_reuse_list)) == 0) {
221 cri = container_of(nb->connid_reuse_list.next,
222 struct connid_reuse_item, lh);
223 if ((cri->pingcnt + CONNID_REUSE_RTTS -
224 nb->connid_reuse_pingcnt) < 32768)
225 break;
227 rb_erase(&(cri->rbn), &(nb->connid_reuse_rb));
228 kref_put(&(cri->ref), kreffree_bug);
230 list_del(&(cri->lh));
231 kref_put(&(cri->ref), free_connid_reuse);
234 spin_unlock_bh(&(nb->connid_reuse_lock));
237 static int connid_used(struct neighbor *nb, __u32 conn_id)
239 struct conn *cn;
240 struct connid_reuse_item *cir;
242 cn = get_conn(nb, conn_id);
243 if (unlikely(cn != 0)) {
244 kref_put(&(cn->ref), free_conn);
245 return 1;
248 cir = get_connid_reuseitem(nb, conn_id);
249 if (unlikely(cir != 0)) {
250 kref_put(&(cir->ref), free_connid_reuse);
251 return 1;
254 return 0;
257 static int connid_alloc(struct neighbor *nb, struct conn *src_in_ll)
259 __u32 conn_id;
260 int i;
262 BUG_ON(src_in_ll->sourcetype != SOURCE_IN);
263 BUG_ON(src_in_ll->reversedir->targettype != TARGET_OUT);
265 spin_lock_bh(&connid_gen);
266 for (i=0;i<16;i++) {
267 conn_id = 0;
268 get_random_bytes((char *) &conn_id, sizeof(conn_id));
269 conn_id = (conn_id & ~(1 << 31));
271 if (unlikely(conn_id == 0))
272 continue;
274 if (unlikely(connid_used(nb, conn_id)))
275 continue;
277 goto found;
279 spin_unlock_bh(&connid_gen);
281 return 1;
283 found:
284 src_in_ll->source.in.conn_id = conn_id;
285 src_in_ll->reversedir->target.out.conn_id = (conn_id | (1 << 31));
286 if (insert_connid(nb, src_in_ll) != 0) {
287 BUG();
289 spin_unlock_bh(&connid_gen);
290 return 0;
293 void _set_last_act(struct conn *src_in_l)
295 unsigned long iflags;
296 src_in_l->source.in.jiffies_last_act = jiffies;
297 spin_lock_irqsave(&(src_in_l->source.in.nb->conn_list_lock), iflags);
298 list_del(&(src_in_l->source.in.nb_list));
299 list_add_tail(&(src_in_l->source.in.nb_list),
300 &(src_in_l->source.in.nb->rcv_conn_list));
301 spin_unlock_irqrestore(&(src_in_l->source.in.nb->conn_list_lock),
302 iflags);
305 void free_conn(struct kref *ref)
307 unsigned long iflags;
308 struct conn *cn = container_of(ref, struct conn, ref);
309 struct conn *reversedir = 0;
311 spin_lock_irqsave(&conn_free, iflags);
313 BUG_ON(cn->isreset == 0);
315 if (cn->reversedir != 0)
316 cn->reversedir->isreset = 3;
318 if (cn->isreset != 3)
319 goto out;
321 if (cn->reversedir != 0) {
322 cn->reversedir->reversedir = 0;
323 reversedir = cn->reversedir;
324 cn->reversedir = 0;
327 if (cn->sourcetype == SOURCE_IN) {
328 WARN_ONCE(list_empty(&(cn->source.in.reorder_queue)) == 0,
329 "cor free_conn(): cn->source.in.reorder_queue is not empty");
330 WARN_ONCE(list_empty(&(cn->source.in.acks_pending)) == 0,
331 "cor free_conn():cn->source.in.acks_pending is not empty");
333 WARN_ONCE(cn->source.in.conn_id != 0,
334 "cor free_conn(): cn->source.in.conn_id is not 0");
335 kref_put(&(cn->source.in.nb->ref), neighbor_free);
336 cn->source.in.nb = 0;
339 if (cn->targettype == TARGET_OUT) {
340 WARN_ONCE(list_empty(&(cn->target.out.retrans_list)) == 0,
341 "cor free_conn(): cn->target.out.retrans_list is not empty");
342 WARN_ONCE(cn->target.out.rb.in_queue != RB_INQUEUE_FALSE,
343 "cor free_conn(): cn->target.out.rb.in_queue is not RB_INQUEUE_FALSE");
344 WARN_ONCE(cn->target.out.conn_id != 0,
345 "cor free_conn(): cn->target.out.conn_id is not 0");
346 kref_put(&(cn->target.out.nb->ref), neighbor_free);
347 cn->target.out.nb = 0;
350 WARN_ONCE(cn->data_buf.datasize != 0,
351 "cor free_conn(): cn->data_buf.datasize is not 0");
352 WARN_ONCE(cn->data_buf.overhead != 0,
353 "cor free_conn(): cn->data_buf.overhead is not 0");
354 WARN_ONCE(list_empty(&(cn->data_buf.items)) == 0,
355 "cor free_conn(): cn->data_buf.items is not empty");
356 WARN_ONCE(cn->data_buf.nextread != 0,
357 "cor free_conn(): cn->data_buf.nextread is not 0");
359 memset(cn, 9*16 + 10, sizeof(struct conn));
360 kmem_cache_free(conn_slab, cn);
362 out:
363 spin_unlock_irqrestore(&conn_free, iflags);
365 if (reversedir != 0)
366 free_conn(&(reversedir->ref));
370 * rc == 0 ==> ok
371 * rc == 1 ==> connid_reuse or connid allocation failed
373 int conn_init_out(struct conn *trgt_unconn_ll, struct neighbor *nb,
374 __u32 rcvd_connid, int use_rcvd_connid)
376 unsigned long iflags;
377 int rc = 0;
378 struct conn *src_unconn_ll = trgt_unconn_ll->reversedir;
380 BUG_ON(trgt_unconn_ll->targettype != TARGET_UNCONNECTED);
381 BUG_ON(src_unconn_ll == 0);
382 BUG_ON(src_unconn_ll->sourcetype != SOURCE_UNCONNECTED);
384 memset(&(trgt_unconn_ll->target.out), 0,
385 sizeof(trgt_unconn_ll->target.out));
386 memset(&(src_unconn_ll->source.in), 0,
387 sizeof(src_unconn_ll->source.in));
389 trgt_unconn_ll->targettype = TARGET_OUT;
390 src_unconn_ll->sourcetype = SOURCE_IN;
392 if (use_rcvd_connid) {
393 BUG_ON((rcvd_connid & (1 << 31)) == 0);
395 src_unconn_ll->source.in.conn_id = rcvd_connid;
396 if (unlikely(insert_connid(nb, src_unconn_ll) != 0)) {
397 src_unconn_ll->source.in.conn_id = 0;
398 rc = 1;
399 goto out_err;
401 } else {
402 src_unconn_ll->source.in.cir = kmem_cache_alloc(
403 connid_reuse_slab, GFP_ATOMIC);
404 if (unlikely(src_unconn_ll->source.in.cir == 0)) {
405 rc = 1;
406 goto out_err;
408 memset(src_unconn_ll->source.in.cir, 0,
409 sizeof(struct connid_reuse_item));
411 if (unlikely(connid_alloc(nb, src_unconn_ll))) {
412 rc = 1;
413 goto out_freecir;
417 trgt_unconn_ll->target.out.nb = nb;
418 src_unconn_ll->source.in.nb = nb;
420 /* neighbor pointer */
421 kref_get(&(nb->ref));
422 kref_get(&(nb->ref));
424 INIT_LIST_HEAD(&(src_unconn_ll->source.in.reorder_queue));
426 INIT_LIST_HEAD(&(src_unconn_ll->source.in.acks_pending));
428 INIT_LIST_HEAD(&(trgt_unconn_ll->target.out.retrans_list));
430 reset_seqno(trgt_unconn_ll, 0);
431 if (use_rcvd_connid == 0) {
432 get_random_bytes((char *)
433 &(trgt_unconn_ll->target.out.seqno_nextsend),
434 sizeof(
435 trgt_unconn_ll->target.out.seqno_nextsend));
436 trgt_unconn_ll->target.out.seqno_acked =
437 trgt_unconn_ll->target.out.seqno_nextsend;
438 trgt_unconn_ll->target.out.seqno_windowlimit =
439 trgt_unconn_ll->target.out.seqno_nextsend;
440 reset_seqno(trgt_unconn_ll,
441 trgt_unconn_ll->target.out.seqno_nextsend);
443 get_random_bytes((char *)
444 &(src_unconn_ll->source.in.next_seqno),
445 sizeof(src_unconn_ll->source.in.next_seqno));
446 src_unconn_ll->source.in.window_seqnolimit =
447 src_unconn_ll->source.in.next_seqno;
448 src_unconn_ll->source.in.window_seqnolimit_remote =
449 src_unconn_ll->source.in.next_seqno;
452 get_random_bytes((char *) &(trgt_unconn_ll->target.out.priority_seqno),
454 trgt_unconn_ll->source.in.priority_seqno = 0;
456 src_unconn_ll->source.in.jiffies_last_act = jiffies;
458 trgt_unconn_ll->target.out.jiffies_idle_since =
459 jiffies << JIFFIES_LAST_IDLE_SHIFT;
461 spin_lock_irqsave(&(nb->conn_list_lock), iflags);
462 list_add_tail(&(src_unconn_ll->source.in.nb_list),
463 &(nb->rcv_conn_list));
464 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
466 /* neighbor lists */
467 kref_get(&(src_unconn_ll->ref));
469 if (src_unconn_ll->is_client)
470 atomic_inc(&num_conns);
472 if (use_rcvd_connid == 0)
473 update_windowlimit(src_unconn_ll);
475 if (0) {
476 out_freecir:
477 kmem_cache_free(connid_reuse_slab,
478 src_unconn_ll->source.in.cir);
479 src_unconn_ll->source.in.cir = 0;
480 out_err:
481 trgt_unconn_ll->targettype = TARGET_UNCONNECTED;
482 src_unconn_ll->sourcetype = SOURCE_UNCONNECTED;
484 return rc;
487 void conn_init_sock_source(struct conn *cn)
489 BUG_ON(cn == 0);
490 cn->sourcetype = SOURCE_SOCK;
491 memset(&(cn->source.sock), 0, sizeof(cn->source.sock));
492 cn->source.sock.priority = PRIORITY_MAX;
493 cn->source.sock.snd_speed.jiffies_last_refresh = jiffies;
494 cn->source.sock.snd_speed.flushed = 1;
497 void conn_init_sock_target(struct conn *cn)
499 BUG_ON(cn == 0);
500 cn->targettype = TARGET_SOCK;
501 memset(&(cn->target.sock), 0, sizeof(cn->target.sock));
502 reset_seqno(cn, 0);
505 struct conn* alloc_conn(gfp_t allocflags)
507 struct conn *cn1 = 0;
508 struct conn *cn2 = 0;
510 cn1 = kmem_cache_alloc(conn_slab, allocflags);
511 if (unlikely(cn1 == 0))
512 goto out_err0;
514 cn2 = kmem_cache_alloc(conn_slab, allocflags);
515 if (unlikely(cn2 == 0))
516 goto out_err1;
518 memset(cn1, 0, sizeof(struct conn));
519 memset(cn2, 0, sizeof(struct conn));
521 cn1->reversedir = cn2;
522 cn2->reversedir = cn1;
524 kref_init(&(cn1->ref));
525 kref_init(&(cn2->ref));
527 cn1->sourcetype = SOURCE_UNCONNECTED;
528 cn2->sourcetype = SOURCE_UNCONNECTED;
529 cn1->targettype = TARGET_UNCONNECTED;
530 cn2->targettype = TARGET_UNCONNECTED;
532 cn1->isreset = 0;
533 cn2->isreset = 0;
535 spin_lock_init(&(cn1->rcv_lock));
536 spin_lock_init(&(cn2->rcv_lock));
538 databuf_init(cn1);
539 databuf_init(cn2);
541 bufsize_init(cn1, 0);
542 bufsize_init(cn2, 0);
544 cn1->bufsize.bufsize = (BUFSIZE_INITIAL_LOWLAT << BUFSIZE_SHIFT);
545 cn2->bufsize.bufsize = (BUFSIZE_INITIAL_LOWLAT << BUFSIZE_SHIFT);
546 #warning todo set to BUFSIZE_INITIAL_HIGHLAT if switched to highlatency
548 return cn1;
550 out_err1:
551 kmem_cache_free(conn_slab, cn1);
552 out_err0:
553 return 0;
556 static struct cor_sock *get_corsock_by_port(__be64 port)
558 struct list_head *curr = openports.next;
560 while (curr != &openports) {
561 struct cor_sock *cs = container_of(curr, struct cor_sock,
562 data.listener.lh);
563 BUG_ON(cs->type != CS_TYPE_LISTENER);
564 if (cs->data.listener.port == port)
565 return cs;
567 curr = curr->next;
570 return 0;
573 __u32 list_services(char *buf, __u32 buflen)
575 __u32 cnt = 0;
577 __u32 buf_offset = 4;
579 struct list_head *curr;
580 int rc;
583 * The variable length header rowcount need to be generated after the
584 * data. This is done by reserving the maximum space they could take. If
585 * they end up being smaller, the data is moved so that there is no gap.
588 BUG_ON(buf == 0);
589 BUG_ON(buflen < buf_offset);
591 spin_lock_bh(&cor_bindnodes);
593 curr = openports.next;
595 while (curr != &openports) {
596 struct cor_sock *cs = container_of(curr, struct cor_sock,
597 data.listener.lh);
598 BUG_ON(cs->type != CS_TYPE_LISTENER);
600 if (cs->data.listener.publish_service == 0)
601 goto cont;
603 if (unlikely(buf_offset + 2 < buf_offset) ||
604 buf_offset + 2 > buflen)
605 break;
607 buf[buf_offset] = ((char *) &(cs->data.listener.port))[0];
608 buf[buf_offset+1] = ((char *) &(cs->data.listener.port))[1];
609 buf_offset += 2;
610 cnt++;
612 cont:
613 curr = curr->next;
616 spin_unlock_bh(&cor_bindnodes);
618 rc = encode_len(buf, 4, cnt);
619 BUG_ON(rc <= 0);
620 BUG_ON(rc > 4);
622 if (likely(rc < 4))
623 memmove(buf + ((__u32) rc), buf+4, buf_offset);
625 return buf_offset - 4 + ((__u32) rc);
629 void set_publish_service(struct cor_sock *cs, __u8 value)
631 BUG_ON (value != 0 && value != 1);
633 mutex_lock(&(cs->lock));
635 cs->publish_service = value;
637 if (cs->type == CS_TYPE_LISTENER) {
638 spin_lock_bh(&cor_bindnodes);
639 cs->data.listener.publish_service = value;
640 spin_unlock_bh(&cor_bindnodes);
643 mutex_unlock(&(cs->lock));
646 void close_port(struct cor_sock *cs)
648 mutex_lock(&(cs->lock));
649 if (unlikely(cs->type != CS_TYPE_LISTENER))
650 goto out;
652 spin_lock_bh(&cor_bindnodes);
654 list_del(&(cs->data.listener.lh));
656 while (list_empty(&(cs->data.listener.conn_queue)) == 0) {
657 struct conn *src_sock_o = container_of(
658 cs->data.listener.conn_queue.next,
659 struct conn, source.sock.cl_list);
660 list_del(&(src_sock_o->source.sock.cl_list));
661 reset_conn(src_sock_o);
662 kref_get(&(src_sock_o->reversedir->ref));
663 kref_put(&(src_sock_o->ref), free_conn);
666 spin_unlock_bh(&cor_bindnodes);
667 out:
668 mutex_unlock(&(cs->lock));
671 int open_port(struct cor_sock *cs_l, __be16 port)
673 int rc = 0;
675 spin_lock_bh(&cor_bindnodes);
676 if (get_corsock_by_port(port) != 0) {
677 rc = -EADDRINUSE;
678 goto out;
681 BUG_ON(cs_l->type != CS_TYPE_UNCONNECTED);
683 cs_l->type = CS_TYPE_LISTENER;
684 cs_l->data.listener.port = port;
685 cs_l->data.listener.publish_service = cs_l->publish_service;
687 /* kref is not used here */
688 INIT_LIST_HEAD(&(cs_l->data.listener.conn_queue));
690 list_add_tail((struct list_head *) &(cs_l->data.listener.lh),
691 &openports);
693 out:
694 spin_unlock_bh(&cor_bindnodes);
696 return rc;
700 * rc == 0 connected
701 * rc == 2 port not open
702 * rc == 3 listener queue full
704 int connect_port(struct conn *trgt_unconn_ll, __be16 port)
706 struct cor_sock *cs;
707 int rc = 0;
709 spin_lock_bh(&cor_bindnodes);
711 cs = get_corsock_by_port(port);
712 if (cs == 0) {
713 rc = 2;
714 goto out;
717 if (unlikely(cs->data.listener.queue_len >=
718 cs->data.listener.queue_maxlen)) {
719 if (cs->data.listener.queue_maxlen <= 0)
720 rc = 2;
721 else
722 rc = 3;
724 goto out;
727 kref_get(&(trgt_unconn_ll->ref));
728 kref_get(&(trgt_unconn_ll->reversedir->ref));
730 BUG_ON(trgt_unconn_ll->is_client != 1);
731 conn_init_sock_target(trgt_unconn_ll);
732 conn_init_sock_source(trgt_unconn_ll->reversedir);
734 list_add_tail(&(trgt_unconn_ll->reversedir->source.sock.cl_list),
735 &(cs->data.listener.conn_queue));
736 cs->data.listener.queue_len++;
737 atomic_set(&(cs->ready_to_accept), 1);
738 barrier();
739 cs->sk.sk_state_change(&(cs->sk));
741 out:
742 spin_unlock_bh(&cor_bindnodes);
743 return rc;
747 * rc == 0 connected
748 * rc == 3 addr not found
749 * rc == 4 ==> connid allocation failed
750 * rc == 4 ==> control msg alloc failed
752 int connect_neigh(struct conn *trgt_unconn_ll, char *addr, __u16 addrlen)
754 struct control_msg_out *cm;
755 struct neighbor *nb = 0;
757 nb = find_neigh(addr, addrlen);
758 if (nb == 0)
759 return 3;
761 cm = alloc_control_msg(nb, ACM_PRIORITY_MED);
762 if (unlikely(cm == 0)) {
763 kref_put(&(nb->ref), neighbor_free);
764 return 4;
767 if (unlikely(conn_init_out(trgt_unconn_ll, nb, 0, 0))) {
768 free_control_msg(cm);
769 kref_put(&(nb->ref), neighbor_free);
770 return 4;
773 send_connect_nb(cm, trgt_unconn_ll->target.out.conn_id,
774 trgt_unconn_ll->target.out.seqno_nextsend,
775 trgt_unconn_ll->reversedir->source.in.next_seqno,
776 trgt_unconn_ll->reversedir);
778 kref_put(&(nb->ref), neighbor_free);
780 return 0;
783 static int _reset_conn(struct conn *cn_ll, int trgt_out_resetneeded)
786 * active conns have an additional ref to make sure that they are not
787 * freed when only one direction is referenced by the connid hashtable
789 int krefput = 1;
791 if (cn_ll->sourcetype == SOURCE_IN) {
792 unsigned long iflags;
793 struct neighbor *nb = cn_ll->source.in.nb;
795 spin_lock_irqsave(&(nb->conn_list_lock), iflags);
796 list_del(&(cn_ll->source.in.nb_list));
797 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
799 krefput++;
801 if (cn_ll->source.in.conn_id != 0 &&
802 (cn_ll->source.in.conn_id & (1 << 31)) != 0) {
803 BUG_ON(cn_ll->source.in.cir != 0);
804 } else if (cn_ll->source.in.conn_id != 0 &&
805 (cn_ll->source.in.conn_id & (1 << 31)) == 0) {
806 BUG_ON(cn_ll->source.in.cir == 0);
808 kref_init(&(cn_ll->source.in.cir->ref));
809 cn_ll->source.in.cir->conn_id =
810 cn_ll->source.in.conn_id;
811 cn_ll->source.in.cir->pingcnt =
812 nb->connid_reuse_pingcnt;
814 spin_lock_bh(&(nb->connid_reuse_lock));
815 insert_connid_reuse(nb, cn_ll->source.in.cir);
816 list_add_tail(&(cn_ll->source.in.cir->lh),
817 &(nb->connid_reuse_list));
818 spin_unlock_bh(&(nb->connid_reuse_lock));
820 cn_ll->source.in.cir = 0;
823 if (cn_ll->source.in.conn_id != 0) {
824 spin_lock_bh(&(nb->connid_lock));
825 rb_erase(&(cn_ll->source.in.rbn), &(nb->connid_rb));
826 spin_unlock_bh(&(nb->connid_lock));
827 krefput++;
829 cn_ll->source.in.conn_id = 0;
831 free_ack_conns(cn_ll);
834 if (cn_ll->is_client)
835 atomic_dec(&num_conns);
837 reset_ooo_queue(cn_ll);
838 } else if (cn_ll->sourcetype == SOURCE_SOCK) {
839 if (likely(cn_ll->source.sock.cs != 0)) {
840 cor_sk_write_space(cn_ll->source.sock.cs);
841 kref_put(&(cn_ll->source.sock.cs->ref), free_sock);
842 cn_ll->source.sock.cs = 0;
846 if (cn_ll->targettype == TARGET_UNCONNECTED) {
847 if (cn_ll->target.unconnected.cmdparams != 0) {
848 kfree(cn_ll->target.unconnected.cmdparams);
849 cn_ll->target.unconnected.cmdparams = 0;
851 } else if (cn_ll->targettype == TARGET_OUT) {
852 if (trgt_out_resetneeded && cn_ll->target.out.conn_id != 0) {
853 send_reset_conn(cn_ll->target.out.nb,
854 cn_ll->target.out.conn_id, 0);
857 cn_ll->target.out.conn_id = 0;
859 cancel_all_conn_retrans(cn_ll);
861 qos_remove_conn(cn_ll);
863 spin_lock_bh(&(cn_ll->target.out.nb->stalledconn_lock));
864 if (cn_ll->target.out.nbstalled_lh.prev != 0) {
865 list_del(&(cn_ll->target.out.nbstalled_lh));
866 cn_ll->target.out.nbstalled_lh.prev = 0;
867 cn_ll->target.out.nbstalled_lh.next = 0;
868 krefput++;
870 spin_unlock_bh(&(cn_ll->target.out.nb->stalledconn_lock));
871 } else if (cn_ll->targettype == TARGET_SOCK) {
872 if (likely(cn_ll->target.sock.cs != 0)) {
873 if (cn_ll->target.sock.socktype == SOCKTYPE_RAW) {
874 cor_sk_data_ready(cn_ll->target.sock.cs);
875 } else {
876 cor_mngdsocket_readfromconn_fromatomic(
877 cn_ll->target.sock.cs);
879 kref_put(&(cn_ll->target.sock.cs->ref), free_sock);
880 cn_ll->target.sock.cs = 0;
881 cn_ll->target.sock.rcv_buf = 0;
885 databuf_ackdiscard(cn_ll);
887 account_bufspace(cn_ll);
889 connreset_priority(cn_ll);
891 return krefput;
894 /* warning: do not hold the rcv_lock while calling this! */
895 void reset_conn_locked(struct conn *cn_ll)
897 int put1;
898 int put2;
900 int isreset1;
901 int isreset2;
903 BUG_ON(cn_ll->isreset <= 1 && cn_ll->reversedir->isreset >= 2);
904 BUG_ON(cn_ll->isreset >= 2 && cn_ll->reversedir->isreset <= 1);
906 isreset1 = cn_ll->isreset;
907 if (cn_ll->isreset <= 1)
908 cn_ll->isreset = 2;
910 isreset2 = cn_ll->reversedir->isreset;
911 if (cn_ll->reversedir->isreset <= 1)
912 cn_ll->reversedir->isreset = 2;
914 if (isreset1 >= 2) {
915 put1 = 0;
916 put2 = 0;
917 } else {
918 put1 = _reset_conn(cn_ll, isreset1 == 0);
919 put2 = _reset_conn(cn_ll->reversedir, isreset2 == 0);
922 /* free_conn may not be called, before both _reset_conn have finished */
923 while (put1 > 0) {
924 kref_put(&(cn_ll->ref), kreffree_bug);
925 put1--;
928 while (put2 > 0) {
929 kref_put(&(cn_ll->reversedir->ref), kreffree_bug);
930 put2--;
934 void reset_conn(struct conn *cn)
936 kref_get(&(cn->ref));
937 kref_get(&(cn->reversedir->ref));
939 if (cn->is_client) {
940 spin_lock_bh(&(cn->rcv_lock));
941 spin_lock_bh(&(cn->reversedir->rcv_lock));
942 } else {
943 spin_lock_bh(&(cn->reversedir->rcv_lock));
944 spin_lock_bh(&(cn->rcv_lock));
947 reset_conn_locked(cn);
949 if (cn->is_client) {
950 spin_unlock_bh(&(cn->rcv_lock));
951 spin_unlock_bh(&(cn->reversedir->rcv_lock));
952 } else {
953 spin_unlock_bh(&(cn->reversedir->rcv_lock));
954 spin_unlock_bh(&(cn->rcv_lock));
957 kref_put(&(cn->ref), free_conn);
958 kref_put(&(cn->reversedir->ref), free_conn);
961 static int __init cor_init(void)
963 int rc;
965 struct conn c;
967 printk(KERN_ERR "sizeof conn: %u", (__u32) sizeof(c));
968 printk(KERN_ERR " conn.source: %u", (__u32) sizeof(c.source));
969 printk(KERN_ERR " conn.target: %u", (__u32) sizeof(c.target));
970 printk(KERN_ERR " conn.target.out: %u", (__u32) sizeof(c.target.out));
971 printk(KERN_ERR " conn.buf: %u", (__u32) sizeof(c.data_buf));
973 printk(KERN_ERR "sizeof neighbor: %u", (__u32) sizeof(struct neighbor));
975 printk(KERN_ERR "sizeof mutex: %u", (__u32) sizeof(struct mutex));
976 printk(KERN_ERR "sizeof spinlock: %u", (__u32) sizeof(spinlock_t));
977 printk(KERN_ERR "sizeof kref: %u", (__u32) sizeof(struct kref));
978 printk(KERN_ERR "sizeof list_head: %u",
979 (__u32) sizeof(struct list_head));
980 printk(KERN_ERR "sizeof rb_root: %u", (__u32) sizeof(struct rb_root));
981 printk(KERN_ERR "sizeof rb_node: %u", (__u32) sizeof(struct rb_node));
984 rc = cor_util_init();
985 if (unlikely(rc != 0))
986 return rc;
988 conn_slab = kmem_cache_create("cor_conn", sizeof(struct conn), 8, 0, 0);
989 if (unlikely(conn_slab == 0))
990 return -ENOMEM;
992 connid_reuse_slab = kmem_cache_create("cor_connid_reuse",
993 sizeof(struct connid_reuse_item), 8, 0, 0);
994 if (unlikely(connid_reuse_slab == 0))
995 return -ENOMEM;
998 atomic_set(&num_conns, 0);
999 barrier();
1001 rc = forward_init();
1002 if (unlikely(rc != 0))
1003 return rc;
1005 rc = cor_kgen_init();
1006 if (unlikely(rc != 0))
1007 return rc;
1009 rc = cor_rd_init1();
1010 if (unlikely(rc != 0))
1011 return rc;
1013 rc = cor_snd_init();
1014 if (unlikely(rc != 0))
1015 return rc;
1017 rc = cor_neighbor_init();
1018 if (unlikely(rc != 0))
1019 return rc;
1021 rc = cor_rcv_init();
1022 if (unlikely(rc != 0))
1023 return rc;
1025 rc = cor_sock_managed_init1();
1026 if (unlikely(rc != 0))
1027 return rc;
1029 rc = cor_sock_init2();
1030 if (unlikely(rc != 0))
1031 return rc;
1033 rc = cor_rd_init2();
1034 if (unlikely(rc != 0))
1035 return rc;
1037 return 0;
1040 static void __exit cor_exit(void)
1042 cor_rd_exit1();
1043 cor_sock_exit1();
1044 cor_sock_managed_exit1();
1045 cor_snd_exit1();
1047 cor_rcv_exit2();
1048 cor_neighbor_exit2();
1049 cor_snd_exit2();
1050 cor_rd_exit2();
1051 cor_kgen_exit2();
1052 forward_exit2();
1054 BUG_ON(atomic_read(&num_conns) != 0);
1056 kmem_cache_destroy(conn_slab);
1057 conn_slab = 0;
1059 kmem_cache_destroy(connid_reuse_slab);
1060 connid_reuse_slab = 0;
1063 module_init(cor_init);
1064 module_exit(cor_exit);
1065 MODULE_LICENSE("GPL");