credit system reclaced with priority system, init session endiness bugfix
[cor.git] / net / cor / common.c
blob38cddc1dc7a1972aa44cde32c4bffa4cf560ec20
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2013 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/mutex.h>
23 #include "cor.h"
25 DEFINE_SPINLOCK(cor_bindnodes);
26 static DEFINE_SPINLOCK(conn_free);
27 static DEFINE_SPINLOCK(connid_gen);
29 static LIST_HEAD(openports);
31 static struct kmem_cache *conn_slab;
32 static struct kmem_cache *connid_reuse_slab;
34 atomic_t num_conns;
37 struct conn *get_conn(struct neighbor *nb, __u32 conn_id)
39 struct rb_node * n = 0;
40 struct conn *ret = 0;
42 spin_lock_bh(&(nb->connid_lock));
44 n = nb->connid_rb.rb_node;
46 while (likely(n != 0) && ret == 0) {
47 struct conn *src_in_o = container_of(n, struct conn,
48 source.in.rbn);
50 BUG_ON(src_in_o->sourcetype != SOURCE_IN);
52 if (conn_id < src_in_o->source.in.conn_id)
53 n = n->rb_left;
54 else if (conn_id > src_in_o->source.in.conn_id)
55 n = n->rb_right;
56 else
57 ret = src_in_o;
60 if (ret != 0)
61 kref_get(&(ret->ref));
63 spin_unlock_bh(&(nb->connid_lock));
65 return ret;
68 static int insert_connid(struct neighbor *nb, struct conn *src_in_ll)
70 int rc = 0;
72 __u32 conn_id = src_in_ll->source.in.conn_id;
74 struct rb_node **p = 0;
75 struct rb_node *parent = 0;
77 BUG_ON(src_in_ll->sourcetype != SOURCE_IN);
79 spin_lock_bh(&(nb->connid_lock));
81 p = &(nb->connid_rb.rb_node);
83 while ((*p) != 0) {
84 struct conn *src_in_o = container_of(*p, struct conn,
85 source.in.rbn);
87 BUG_ON(src_in_o->sourcetype != SOURCE_IN);
89 parent = *p;
90 if (unlikely(conn_id == src_in_o->source.in.conn_id)) {
91 goto duplicate;
92 } else if (conn_id < src_in_o->source.in.conn_id) {
93 p = &(*p)->rb_left;
94 } else if (conn_id > src_in_o->source.in.conn_id) {
95 p = &(*p)->rb_right;
96 } else {
97 BUG();
101 kref_get(&(src_in_ll->ref));
102 rb_link_node(&(src_in_ll->source.in.rbn), parent, p);
104 if (0) {
105 duplicate:
106 rc = 1;
109 spin_unlock_bh(&(nb->connid_lock));
111 return rc;
114 struct connid_reuse_item *get_connid_reuseitem(struct neighbor *nb,
115 __u32 conn_id)
117 struct rb_node *n = 0;
118 struct connid_reuse_item *ret = 0;
120 spin_lock_bh(&(nb->connid_reuse_lock));
122 n = nb->connid_reuse_rb.rb_node;
124 while (likely(n != 0) && ret == 0) {
125 struct connid_reuse_item *cir = container_of(n,
126 struct connid_reuse_item, rbn);
128 BUG_ON(cir->conn_id == 0);
130 if (conn_id < cir->conn_id)
131 n = n->rb_left;
132 else if (conn_id > cir->conn_id)
133 n = n->rb_right;
134 else
135 ret = cir;
138 if (ret != 0)
139 kref_get(&(ret->ref));
141 spin_unlock_bh(&(nb->connid_reuse_lock));
143 return ret;
146 static void insert_connid_reuse(struct neighbor *nb,
147 struct connid_reuse_item *ins)
149 struct rb_node **p = 0;
150 struct rb_node *parent = 0;
152 BUG_ON(ins->conn_id == 0);
154 spin_lock_bh(&(nb->connid_reuse_lock));
156 p = &(nb->connid_reuse_rb.rb_node);
158 while ((*p) != 0) {
159 struct connid_reuse_item *curr = container_of(*p,
160 struct connid_reuse_item, rbn);
162 BUG_ON(curr->conn_id == 0);
164 parent = *p;
165 if (unlikely(ins->conn_id == curr->conn_id)) {
166 BUG();
167 } else if (ins->conn_id < curr->conn_id) {
168 p = &(*p)->rb_left;
169 } else if (ins->conn_id > curr->conn_id) {
170 p = &(*p)->rb_right;
171 } else {
172 BUG();
176 kref_get(&(ins->ref));
177 rb_link_node(&(ins->rbn), parent, p);
179 spin_unlock_bh(&(nb->connid_reuse_lock));
182 static void free_connid_reuse(struct kref *ref)
184 struct connid_reuse_item *cir = container_of(ref,
185 struct connid_reuse_item, ref);
187 kmem_cache_free(connid_reuse_slab, cir);
190 void connid_used_pingsuccess(struct neighbor *nb)
192 struct connid_reuse_item *cri;
194 spin_lock_bh(&(nb->connid_reuse_lock));
196 nb->connid_reuse_pingcnt++;
197 while (list_empty(&(nb->connid_reuse_list)) == 0) {
198 cri = container_of(nb->connid_reuse_list.next,
199 struct connid_reuse_item, lh);
200 if ((cri->pingcnt + CONNID_REUSE_RTTS -
201 nb->connid_reuse_pingcnt) < 32768)
202 break;
204 rb_erase(&(cri->rbn), &(nb->connid_reuse_rb));
205 kref_put(&(cri->ref), kreffree_bug);
207 list_del(&(cri->lh));
208 kref_put(&(cri->ref), free_connid_reuse);
211 spin_unlock_bh(&(nb->connid_reuse_lock));
214 static int connid_used(struct neighbor *nb, __u32 conn_id)
216 struct conn *cn;
217 struct connid_reuse_item *cir;
219 cn = get_conn(nb, conn_id);
220 if (unlikely(cn != 0)) {
221 kref_put(&(cn->ref), free_conn);
222 return 1;
225 cir = get_connid_reuseitem(nb, conn_id);
226 if (unlikely(cir != 0)) {
227 kref_put(&(cir->ref), free_connid_reuse);
228 return 1;
231 return 0;
234 static int connid_alloc(struct neighbor *nb, struct conn *src_in_ll)
236 __u32 conn_id;
237 int i;
239 BUG_ON(src_in_ll->sourcetype != SOURCE_IN);
240 BUG_ON(src_in_ll->reversedir->targettype != TARGET_OUT);
242 spin_lock_bh(&connid_gen);
243 for (i=0;i<16;i++) {
244 conn_id = 0;
245 get_random_bytes((char *) &conn_id, sizeof(conn_id));
246 conn_id = (conn_id & ~(1 << 31));
248 if (unlikely(conn_id == 0))
249 continue;
251 if (unlikely(connid_used(nb, conn_id)))
252 continue;
254 goto found;
256 spin_unlock_bh(&connid_gen);
258 return 1;
260 found:
261 src_in_ll->source.in.conn_id = conn_id;
262 src_in_ll->reversedir->target.out.conn_id = (conn_id | (1 << 31));
263 if (insert_connid(nb, src_in_ll) != 0) {
264 BUG();
266 spin_unlock_bh(&connid_gen);
267 return 0;
270 void _set_last_act(struct conn *src_in_l)
272 unsigned long iflags;
273 src_in_l->source.in.jiffies_last_act = jiffies;
274 spin_lock_irqsave(&(src_in_l->source.in.nb->conn_list_lock), iflags);
275 list_del(&(src_in_l->source.in.nb_list));
276 list_add_tail(&(src_in_l->source.in.nb_list),
277 &(src_in_l->source.in.nb->rcv_conn_list));
278 spin_unlock_irqrestore(&(src_in_l->source.in.nb->conn_list_lock),
279 iflags);
282 void free_conn(struct kref *ref)
284 unsigned long iflags;
285 struct conn *cn = container_of(ref, struct conn, ref);
286 struct conn *reversedir = 0;
288 spin_lock_irqsave(&conn_free, iflags);
290 BUG_ON(cn->isreset == 0);
292 if (cn->reversedir != 0)
293 cn->reversedir->isreset = 3;
295 if (cn->isreset != 3)
296 goto out;
298 if (cn->reversedir != 0) {
299 cn->reversedir->reversedir = 0;
300 reversedir = cn->reversedir;
301 cn->reversedir = 0;
304 if (cn->sourcetype == SOURCE_IN) {
305 BUG_ON(cn->source.in.conn_id != 0);
306 kref_put(&(cn->source.in.nb->ref), neighbor_free);
307 cn->source.in.nb = 0;
310 if (cn->targettype == TARGET_OUT) {
311 BUG_ON(cn->target.out.conn_id != 0);
312 kref_put(&(cn->target.out.nb->ref), neighbor_free);
313 cn->target.out.nb = 0;
316 BUG_ON(cn->data_buf.datasize != 0);
317 BUG_ON(cn->data_buf.overhead != 0);
319 memset(cn, 9*16 + 10, sizeof(struct conn));
320 kmem_cache_free(conn_slab, cn);
322 out:
323 spin_unlock_irqrestore(&conn_free, iflags);
325 if (reversedir != 0)
326 free_conn(&(reversedir->ref));
330 * rc == 0 ==> ok
331 * rc == 1 ==> connid_reuse or connid allocation failed
333 int conn_init_out(struct conn *trgt_unconn_ll, struct neighbor *nb,
334 __u32 rcvd_connid, int use_rcvd_connid)
336 unsigned long iflags;
337 int rc = 0;
338 struct conn *src_unconn_ll = trgt_unconn_ll->reversedir;
340 BUG_ON(trgt_unconn_ll->targettype != TARGET_UNCONNECTED);
341 BUG_ON(src_unconn_ll == 0);
342 BUG_ON(src_unconn_ll->sourcetype != SOURCE_UNCONNECTED);
344 memset(&(trgt_unconn_ll->target.out), 0,
345 sizeof(trgt_unconn_ll->target.out));
346 memset(&(src_unconn_ll->source.in), 0,
347 sizeof(src_unconn_ll->source.in));
349 trgt_unconn_ll->targettype = TARGET_OUT;
350 src_unconn_ll->sourcetype = SOURCE_IN;
352 if (use_rcvd_connid) {
353 BUG_ON((rcvd_connid & (1 << 31)) == 0);
355 src_unconn_ll->source.in.conn_id = rcvd_connid;
356 if (unlikely(insert_connid(nb, src_unconn_ll) != 0)) {
357 src_unconn_ll->source.in.conn_id = 0;
358 rc = 1;
359 goto out_err;
361 } else {
362 src_unconn_ll->source.in.cir = kmem_cache_alloc(
363 connid_reuse_slab, GFP_ATOMIC);
364 if (unlikely(src_unconn_ll->source.in.cir == 0)) {
365 rc = 1;
366 goto out_err;
368 memset(src_unconn_ll->source.in.cir, 0,
369 sizeof(struct connid_reuse_item));
371 if (unlikely(connid_alloc(nb, src_unconn_ll))) {
372 rc = 1;
373 goto out_freecir;
377 trgt_unconn_ll->target.out.nb = nb;
378 src_unconn_ll->source.in.nb = nb;
380 /* neighbor pointer */
381 kref_get(&(nb->ref));
382 kref_get(&(nb->ref));
384 INIT_LIST_HEAD(&(src_unconn_ll->source.in.reorder_queue));
386 INIT_LIST_HEAD(&(src_unconn_ll->source.in.acks_pending));
388 INIT_LIST_HEAD(&(trgt_unconn_ll->target.out.retrans_list));
390 reset_seqno(trgt_unconn_ll, 0);
391 if (use_rcvd_connid == 0) {
392 get_random_bytes((char *)
393 &(trgt_unconn_ll->target.out.seqno_nextsend),
394 sizeof(
395 trgt_unconn_ll->target.out.seqno_nextsend));
396 trgt_unconn_ll->target.out.seqno_acked =
397 trgt_unconn_ll->target.out.seqno_nextsend;
398 reset_seqno(trgt_unconn_ll,
399 trgt_unconn_ll->target.out.seqno_nextsend);
401 get_random_bytes((char *)
402 &(src_unconn_ll->source.in.next_seqno),
403 sizeof(src_unconn_ll->source.in.next_seqno));
404 src_unconn_ll->source.in.window_seqnolimit =
405 src_unconn_ll->source.in.next_seqno;
406 src_unconn_ll->source.in.window_seqnolimit_remote =
407 src_unconn_ll->source.in.next_seqno;
410 get_random_bytes((char *) &(trgt_unconn_ll->target.out.priority_seqno),
412 trgt_unconn_ll->source.in.priority_seqno = 0;
414 src_unconn_ll->source.in.jiffies_last_act = jiffies;
416 spin_lock_irqsave(&(nb->conn_list_lock), iflags);
417 list_add_tail(&(src_unconn_ll->source.in.nb_list),
418 &(nb->rcv_conn_list));
419 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
421 /* neighbor lists */
422 kref_get(&(src_unconn_ll->ref));
424 if (src_unconn_ll->is_client)
425 atomic_inc(&num_conns);
427 if (use_rcvd_connid == 0)
428 update_windowlimit(src_unconn_ll);
430 if (0) {
431 out_freecir:
432 kmem_cache_free(connid_reuse_slab,
433 src_unconn_ll->source.in.cir);
434 src_unconn_ll->source.in.cir = 0;
435 out_err:
436 trgt_unconn_ll->targettype = TARGET_UNCONNECTED;
437 src_unconn_ll->sourcetype = SOURCE_UNCONNECTED;
439 return rc;
442 void conn_init_sock_source(struct conn *cn)
444 BUG_ON(cn == 0);
445 cn->sourcetype = SOURCE_SOCK;
446 memset(&(cn->source.sock), 0, sizeof(cn->source.sock));
447 cn->source.sock.priority = PRIORITY_MAX;
450 void conn_init_sock_target(struct conn *cn)
452 BUG_ON(cn == 0);
453 cn->targettype = TARGET_SOCK;
454 memset(&(cn->target.sock), 0, sizeof(cn->target.sock));
455 reset_seqno(cn, 0);
458 struct conn* alloc_conn(gfp_t allocflags)
460 struct conn *cn1 = 0;
461 struct conn *cn2 = 0;
463 cn1 = kmem_cache_alloc(conn_slab, allocflags);
464 if (unlikely(cn1 == 0))
465 goto out_err0;
467 cn2 = kmem_cache_alloc(conn_slab, allocflags);
468 if (unlikely(cn2 == 0))
469 goto out_err1;
471 memset(cn1, 0, sizeof(struct conn));
472 memset(cn2, 0, sizeof(struct conn));
474 cn1->reversedir = cn2;
475 cn2->reversedir = cn1;
477 kref_init(&(cn1->ref));
478 kref_init(&(cn2->ref));
480 cn1->sourcetype = SOURCE_UNCONNECTED;
481 cn2->sourcetype = SOURCE_UNCONNECTED;
482 cn1->targettype = TARGET_UNCONNECTED;
483 cn2->targettype = TARGET_UNCONNECTED;
485 cn1->isreset = 0;
486 cn2->isreset = 0;
488 spin_lock_init(&(cn1->rcv_lock));
489 spin_lock_init(&(cn2->rcv_lock));
491 databuf_init(cn1);
492 databuf_init(cn2);
494 speedtracker_init(&(cn1->st));
495 speedtracker_init(&(cn2->st));
497 return cn1;
499 out_err1:
500 kmem_cache_free(conn_slab, cn1);
501 out_err0:
502 return 0;
505 static struct cor_sock *get_corsock_by_port(__be64 port)
507 struct list_head *curr = openports.next;
509 while (curr != &openports) {
510 struct cor_sock *cs = container_of(curr, struct cor_sock,
511 data.listener.lh);
512 BUG_ON(cs->type != CS_TYPE_LISTENER);
513 if (cs->data.listener.port == port)
514 return cs;
516 curr = curr->next;
519 return 0;
522 __u32 list_services(char *buf, __u32 buflen)
524 __u32 cnt = 0;
526 __u32 buf_offset = 4;
528 struct list_head *curr;
529 int rc;
532 * The variable length header rowcount need to be generated after the
533 * data. This is done by reserving the maximum space they could take. If
534 * they end up being smaller, the data is moved so that there is no gap.
537 BUG_ON(buf == 0);
538 BUG_ON(buflen < buf_offset);
540 spin_lock_bh(&cor_bindnodes);
542 curr = openports.next;
544 while (curr != &openports) {
545 struct cor_sock *cs = container_of(curr, struct cor_sock,
546 data.listener.lh);
547 BUG_ON(cs->type != CS_TYPE_LISTENER);
549 if (cs->data.listener.publish_service == 0)
550 goto cont;
552 if (unlikely(buf_offset + 2 < buf_offset) ||
553 buf_offset + 2 > buflen)
554 break;
556 buf[buf_offset] = ((char *) &(cs->data.listener.port))[0];
557 buf[buf_offset+1] = ((char *) &(cs->data.listener.port))[1];
558 buf_offset += 2;
559 cnt++;
561 cont:
562 curr = curr->next;
565 spin_unlock_bh(&cor_bindnodes);
567 rc = encode_len(buf, 4, cnt);
568 BUG_ON(rc <= 0);
569 BUG_ON(rc > 4);
571 if (likely(rc < 4))
572 memmove(buf + ((__u32) rc), buf+4, buf_offset);
574 return buf_offset - 4 + ((__u32) rc);
578 void set_publish_service(struct cor_sock *cs, __u8 value)
580 BUG_ON (value != 0 && value != 1);
582 mutex_lock(&(cs->lock));
584 cs->publish_service = value;
586 if (cs->type == CS_TYPE_LISTENER) {
587 spin_lock_bh(&cor_bindnodes);
588 cs->data.listener.publish_service = value;
589 spin_unlock_bh(&cor_bindnodes);
592 mutex_unlock(&(cs->lock));
595 void close_port(struct cor_sock *cs)
597 mutex_lock(&(cs->lock));
598 if (unlikely(cs->type != CS_TYPE_LISTENER))
599 goto out;
601 spin_lock_bh(&cor_bindnodes);
603 list_del(&(cs->data.listener.lh));
605 while (list_empty(&(cs->data.listener.conn_queue)) == 0) {
606 struct conn *src_sock_o = container_of(
607 cs->data.listener.conn_queue.next,
608 struct conn, source.sock.cl_list);
609 list_del(&(src_sock_o->source.sock.cl_list));
610 reset_conn(src_sock_o);
611 kref_get(&(src_sock_o->reversedir->ref));
612 kref_put(&(src_sock_o->ref), free_conn);
615 spin_unlock_bh(&cor_bindnodes);
616 out:
617 mutex_unlock(&(cs->lock));
620 int open_port(struct cor_sock *cs_l, __be16 port)
622 int rc = 0;
624 spin_lock_bh(&cor_bindnodes);
625 if (get_corsock_by_port(port) != 0) {
626 rc = -EADDRINUSE;
627 goto out;
630 BUG_ON(cs_l->type != CS_TYPE_UNCONNECTED);
632 cs_l->type = CS_TYPE_LISTENER;
633 cs_l->data.listener.port = port;
634 cs_l->data.listener.publish_service = cs_l->publish_service;
636 /* kref is not used here */
637 INIT_LIST_HEAD(&(cs_l->data.listener.conn_queue));
639 list_add_tail((struct list_head *) &(cs_l->data.listener.lh),
640 &openports);
642 out:
643 spin_unlock_bh(&cor_bindnodes);
645 return rc;
649 * rc == 0 connected
650 * rc == 2 port not open
651 * rc == 3 listener queue full
653 int connect_port(struct conn *trgt_unconn_l, __be16 port)
655 struct cor_sock *cs;
656 int rc = 0;
658 spin_lock_bh(&cor_bindnodes);
660 cs = get_corsock_by_port(port);
661 if (cs == 0) {
662 rc = 2;
663 goto out;
666 if (unlikely(cs->data.listener.queue_len >=
667 cs->data.listener.queue_maxlen)) {
668 if (cs->data.listener.queue_maxlen <= 0)
669 rc = 2;
670 else
671 rc = 3;
673 goto out;
676 kref_get(&(trgt_unconn_l->ref));
677 kref_get(&(trgt_unconn_l->reversedir->ref));
679 BUG_ON(trgt_unconn_l->is_client != 1);
680 spin_lock_bh(&(trgt_unconn_l->reversedir->rcv_lock));
681 conn_init_sock_target(trgt_unconn_l);
682 conn_init_sock_source(trgt_unconn_l->reversedir);
683 spin_unlock_bh(&(trgt_unconn_l->reversedir->rcv_lock));
685 list_add_tail(&(trgt_unconn_l->reversedir->source.sock.cl_list),
686 &(cs->data.listener.conn_queue));
687 cs->data.listener.queue_len++;
688 cs->sk.sk_state_change(&(cs->sk));
690 out:
691 spin_unlock_bh(&cor_bindnodes);
692 return rc;
696 * rc == 0 connected
697 * rc == 3 addr not found
698 * rc == 4 ==> connid allocation failed
699 * rc == 4 ==> control msg alloc failed
701 int connect_neigh(struct conn *trgt_unconn_l, char *addr, __u16 addrlen)
703 int rc = 0;
704 int ciorc;
705 struct control_msg_out *cm;
706 struct neighbor *nb = 0;
707 __u64 seqno2;
709 nb = find_neigh(addr, addrlen);
711 if (nb == 0) {
712 rc = 3;
713 goto discard;
716 cm = alloc_control_msg(nb, ACM_PRIORITY_LOW);
717 if (unlikely(cm == 0)) {
718 rc = 4;
719 goto discard;
722 spin_lock_bh(&(trgt_unconn_l->reversedir->rcv_lock));
723 ciorc = conn_init_out(trgt_unconn_l, nb, 0, 0);
724 seqno2 = trgt_unconn_l->reversedir->source.in.next_seqno;
725 spin_unlock_bh(&(trgt_unconn_l->reversedir->rcv_lock));
726 if (unlikely(ciorc)) {
727 rc = 4;
728 goto freecm;
731 send_connect_nb(cm, trgt_unconn_l->target.out.conn_id,
732 trgt_unconn_l->target.out.seqno_nextsend, seqno2,
733 trgt_unconn_l->reversedir);
735 if (0) {
736 freecm:
737 free_control_msg(cm);
738 discard:
739 trgt_unconn_l->targettype = TARGET_DISCARD;
742 if (nb != 0)
743 kref_put(&(nb->ref), neighbor_free);
745 return rc;
748 static int _reset_conn(struct conn *cn, int trgt_out_resetneeded)
751 * active conns have an additional ref to make sure that they are not
752 * freed when only one direction is referenced by the connid hashtable
754 int krefput = 1;
756 /* lock sourcetype/targettype */
757 spin_lock_bh(&(cn->rcv_lock));
759 if (cn->sourcetype == SOURCE_IN) {
760 unsigned long iflags;
762 spin_lock_irqsave(&(cn->source.in.nb->conn_list_lock), iflags);
763 list_del(&(cn->source.in.nb_list));
764 spin_unlock_irqrestore(&(cn->source.in.nb->conn_list_lock),
765 iflags);
767 krefput++;
769 if (cn->source.in.conn_id != 0 &&
770 (cn->source.in.conn_id & (1 << 31)) != 0) {
771 BUG_ON(cn->source.in.cir != 0);
772 } else if (cn->source.in.conn_id != 0 &&
773 (cn->source.in.conn_id & (1 << 31)) == 0) {
774 BUG_ON(cn->source.in.cir == 0);
776 kref_init(&(cn->source.in.cir->ref));
777 cn->source.in.cir->conn_id = cn->source.in.conn_id;
778 cn->source.in.cir->pingcnt =
779 cn->source.in.nb->connid_reuse_pingcnt;
781 spin_lock_bh(&(cn->source.in.nb->connid_reuse_lock));
782 insert_connid_reuse(cn->source.in.nb,
783 cn->source.in.cir);
784 list_add_tail(&(cn->source.in.cir->lh),
785 &(cn->source.in.nb->connid_reuse_list));
786 spin_unlock_bh(&(cn->source.in.nb->connid_reuse_lock));
788 cn->source.in.cir = 0;
791 if (cn->source.in.conn_id != 0) {
792 spin_lock_bh(&(cn->source.in.nb->connid_lock));
793 rb_erase(&(cn->source.in.rbn),
794 &(cn->source.in.nb->connid_rb));
795 spin_unlock_bh(&(cn->source.in.nb->connid_lock));
796 krefput++;
798 cn->source.in.conn_id = 0;
800 free_ack_conns(cn);
803 if (cn->is_client)
804 atomic_dec(&num_conns);
806 reset_ooo_queue(cn);
807 } else if (cn->sourcetype == SOURCE_SOCK) {
808 if (likely(cn->source.sock.cs != 0)) {
809 cor_sock_flushtoconn(cn->source.sock.cs);
810 kref_put(&(cn->source.sock.cs->ref), free_sock);
811 cn->source.sock.cs = 0;
815 if (cn->targettype == TARGET_UNCONNECTED) {
816 connreset_cpacket_buffer(cn);
817 } else if (cn->targettype == TARGET_OUT) {
818 if (trgt_out_resetneeded && cn->target.out.conn_id != 0) {
819 send_reset_conn(cn->target.out.nb,
820 cn->target.out.conn_id, 0);
823 cn->target.out.conn_id = 0;
825 cancel_conn_all_retrans(cn);
827 qos_remove_conn(cn);
829 spin_lock_bh(&(cn->target.out.nb->stalledconn_lock));
830 if (cn->target.out.nbstalled_lh.prev != 0) {
831 list_del(&(cn->target.out.nbstalled_lh));
832 cn->target.out.nbstalled_lh.prev = 0;
833 cn->target.out.nbstalled_lh.next = 0;
834 krefput++;
836 spin_unlock_bh(&(cn->target.out.nb->stalledconn_lock));
837 } else if (cn->targettype == TARGET_SOCK) {
838 if (likely(cn->target.sock.cs != 0)) {
839 cor_sock_readfromconn(cn->target.sock.cs);
840 kref_put(&(cn->target.sock.cs->ref), free_sock);
841 cn->target.sock.cs = 0;
845 databuf_ackdiscard(cn);
847 spin_unlock_bh(&(cn->rcv_lock));
849 reset_bufferusage(cn); /* source in only */
850 connreset_priority(cn);
852 return krefput;
855 /* warning: do not hold the rcv_lock while calling this! */
856 void reset_conn(struct conn *cn)
858 int put1;
859 int put2;
861 int isreset1;
862 int isreset2;
864 if (cn->is_client) {
865 spin_lock_bh(&(cn->rcv_lock));
866 spin_lock_bh(&(cn->reversedir->rcv_lock));
867 } else {
868 spin_lock_bh(&(cn->reversedir->rcv_lock));
869 spin_lock_bh(&(cn->rcv_lock));
872 BUG_ON(cn->isreset <= 1 && cn->reversedir->isreset >= 2);
873 BUG_ON(cn->isreset >= 2 && cn->reversedir->isreset <= 1);
875 isreset1 = cn->isreset;
876 if (cn->isreset <= 1)
877 cn->isreset = 2;
879 isreset2 = cn->reversedir->isreset;
880 if (cn->reversedir->isreset <= 1)
881 cn->reversedir->isreset = 2;
883 if (cn->is_client) {
884 spin_unlock_bh(&(cn->rcv_lock));
885 spin_unlock_bh(&(cn->reversedir->rcv_lock));
886 } else {
887 spin_unlock_bh(&(cn->reversedir->rcv_lock));
888 spin_unlock_bh(&(cn->rcv_lock));
891 if (isreset1 >= 2)
892 return;
894 put1 = _reset_conn(cn, isreset1 == 0);
895 put2 = _reset_conn(cn->reversedir, isreset2 == 0);
897 /* free_conn may not be called, before both _reset_conn have finished */
898 while (put1 > 0) {
899 kref_put(&(cn->ref), free_conn);
900 put1--;
903 while (put2 > 0) {
904 kref_put(&(cn->reversedir->ref), free_conn);
905 put2--;
909 static int __init cor_common_init(void)
911 int rc;
913 struct conn c;
915 printk(KERN_ERR "sizeof conn: %d", (__u32) sizeof(c));
916 printk(KERN_ERR " conn.source: %d", (__u32) sizeof(c.source));
917 printk(KERN_ERR " conn.target: %d", (__u32) sizeof(c.target));
918 printk(KERN_ERR " conn.target.out: %d", (__u32) sizeof(c.target.out));
919 printk(KERN_ERR " conn.buf: %d", (__u32) sizeof(c.data_buf));
921 printk(KERN_ERR " mutex: %d", (__u32) sizeof(struct mutex));
922 printk(KERN_ERR " spinlock: %d", (__u32) sizeof(spinlock_t));
923 printk(KERN_ERR " kref: %d", (__u32) sizeof(struct kref));
926 rc = cor_util_init();
927 if (unlikely(rc != 0))
928 return rc;
930 conn_slab = kmem_cache_create("cor_conn", sizeof(struct conn), 8, 0, 0);
931 if (unlikely(conn_slab == 0))
932 return -ENOMEM;
934 connid_reuse_slab = kmem_cache_create("cor_connid_reuse",
935 sizeof(struct connid_reuse_item), 8, 0, 0);
936 if (unlikely(connid_reuse_slab == 0))
937 return -ENOMEM;
940 atomic_set(&num_conns, 0);
941 barrier();
943 rc = credits_init();
944 if (unlikely(rc != 0))
945 return rc;
947 rc = forward_init();
948 if (unlikely(rc != 0))
949 return rc;
951 rc = cor_kgen_init();
952 if (unlikely(rc != 0))
953 return rc;
955 rc = cor_cpacket_init();
956 if (unlikely(rc != 0))
957 return rc;
959 rc = cor_rd_init1();
960 if (unlikely(rc != 0))
961 return rc;
963 rc = cor_snd_init();
964 if (unlikely(rc != 0))
965 return rc;
967 rc = cor_neighbor_init();
968 if (unlikely(rc != 0))
969 return rc;
971 rc = cor_rcv_init();
972 if (unlikely(rc != 0))
973 return rc;
975 rc = cor_sock_init1();
976 if (unlikely(rc != 0))
977 return rc;
979 rc = cor_sock_init2();
980 if (unlikely(rc != 0))
981 return rc;
983 rc = cor_rd_init2();
984 if (unlikely(rc != 0))
985 return rc;
987 #warning todo add_random_ready_callback
989 return 0;
992 module_init(cor_common_init);
993 MODULE_LICENSE("GPL");