2 * Connection oriented routing
3 * Copyright (C) 2007-2013 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/mutex.h>
25 DEFINE_SPINLOCK(cor_bindnodes
);
26 static DEFINE_SPINLOCK(conn_free
);
27 static DEFINE_SPINLOCK(connid_gen
);
29 static LIST_HEAD(openports
);
31 static struct kmem_cache
*conn_slab
;
32 static struct kmem_cache
*connid_reuse_slab
;
37 struct conn
*get_conn(struct neighbor
*nb
, __u32 conn_id
)
39 struct rb_node
* n
= 0;
42 spin_lock_bh(&(nb
->connid_lock
));
44 n
= nb
->connid_rb
.rb_node
;
46 while (likely(n
!= 0) && ret
== 0) {
47 struct conn
*src_in_o
= container_of(n
, struct conn
,
50 BUG_ON(src_in_o
->sourcetype
!= SOURCE_IN
);
52 if (conn_id
< src_in_o
->source
.in
.conn_id
)
54 else if (conn_id
> src_in_o
->source
.in
.conn_id
)
61 kref_get(&(ret
->ref
));
63 spin_unlock_bh(&(nb
->connid_lock
));
68 static int insert_connid(struct neighbor
*nb
, struct conn
*src_in_ll
)
72 __u32 conn_id
= src_in_ll
->source
.in
.conn_id
;
74 struct rb_node
**p
= 0;
75 struct rb_node
*parent
= 0;
77 BUG_ON(src_in_ll
->sourcetype
!= SOURCE_IN
);
79 spin_lock_bh(&(nb
->connid_lock
));
81 p
= &(nb
->connid_rb
.rb_node
);
84 struct conn
*src_in_o
= container_of(*p
, struct conn
,
87 BUG_ON(src_in_o
->sourcetype
!= SOURCE_IN
);
90 if (unlikely(conn_id
== src_in_o
->source
.in
.conn_id
)) {
92 } else if (conn_id
< src_in_o
->source
.in
.conn_id
) {
94 } else if (conn_id
> src_in_o
->source
.in
.conn_id
) {
101 kref_get(&(src_in_ll
->ref
));
102 rb_link_node(&(src_in_ll
->source
.in
.rbn
), parent
, p
);
109 spin_unlock_bh(&(nb
->connid_lock
));
114 struct connid_reuse_item
*get_connid_reuseitem(struct neighbor
*nb
,
117 struct rb_node
*n
= 0;
118 struct connid_reuse_item
*ret
= 0;
120 spin_lock_bh(&(nb
->connid_reuse_lock
));
122 n
= nb
->connid_reuse_rb
.rb_node
;
124 while (likely(n
!= 0) && ret
== 0) {
125 struct connid_reuse_item
*cir
= container_of(n
,
126 struct connid_reuse_item
, rbn
);
128 BUG_ON(cir
->conn_id
== 0);
130 if (conn_id
< cir
->conn_id
)
132 else if (conn_id
> cir
->conn_id
)
139 kref_get(&(ret
->ref
));
141 spin_unlock_bh(&(nb
->connid_reuse_lock
));
146 static void insert_connid_reuse(struct neighbor
*nb
,
147 struct connid_reuse_item
*ins
)
149 struct rb_node
**p
= 0;
150 struct rb_node
*parent
= 0;
152 BUG_ON(ins
->conn_id
== 0);
154 spin_lock_bh(&(nb
->connid_reuse_lock
));
156 p
= &(nb
->connid_reuse_rb
.rb_node
);
159 struct connid_reuse_item
*curr
= container_of(*p
,
160 struct connid_reuse_item
, rbn
);
162 BUG_ON(curr
->conn_id
== 0);
165 if (unlikely(ins
->conn_id
== curr
->conn_id
)) {
167 } else if (ins
->conn_id
< curr
->conn_id
) {
169 } else if (ins
->conn_id
> curr
->conn_id
) {
176 kref_get(&(ins
->ref
));
177 rb_link_node(&(ins
->rbn
), parent
, p
);
179 spin_unlock_bh(&(nb
->connid_reuse_lock
));
182 static void free_connid_reuse(struct kref
*ref
)
184 struct connid_reuse_item
*cir
= container_of(ref
,
185 struct connid_reuse_item
, ref
);
187 kmem_cache_free(connid_reuse_slab
, cir
);
190 void connid_used_pingsuccess(struct neighbor
*nb
)
192 struct connid_reuse_item
*cri
;
194 spin_lock_bh(&(nb
->connid_reuse_lock
));
196 nb
->connid_reuse_pingcnt
++;
197 while (list_empty(&(nb
->connid_reuse_list
)) == 0) {
198 cri
= container_of(nb
->connid_reuse_list
.next
,
199 struct connid_reuse_item
, lh
);
200 if ((cri
->pingcnt
+ CONNID_REUSE_RTTS
-
201 nb
->connid_reuse_pingcnt
) < 32768)
204 rb_erase(&(cri
->rbn
), &(nb
->connid_reuse_rb
));
205 kref_put(&(cri
->ref
), kreffree_bug
);
207 list_del(&(cri
->lh
));
208 kref_put(&(cri
->ref
), free_connid_reuse
);
211 spin_unlock_bh(&(nb
->connid_reuse_lock
));
214 static int connid_used(struct neighbor
*nb
, __u32 conn_id
)
217 struct connid_reuse_item
*cir
;
219 cn
= get_conn(nb
, conn_id
);
220 if (unlikely(cn
!= 0)) {
221 kref_put(&(cn
->ref
), free_conn
);
225 cir
= get_connid_reuseitem(nb
, conn_id
);
226 if (unlikely(cir
!= 0)) {
227 kref_put(&(cir
->ref
), free_connid_reuse
);
234 static int connid_alloc(struct neighbor
*nb
, struct conn
*src_in_ll
)
239 BUG_ON(src_in_ll
->sourcetype
!= SOURCE_IN
);
240 BUG_ON(src_in_ll
->reversedir
->targettype
!= TARGET_OUT
);
242 spin_lock_bh(&connid_gen
);
245 get_random_bytes((char *) &conn_id
, sizeof(conn_id
));
246 conn_id
= (conn_id
& ~(1 << 31));
248 if (unlikely(conn_id
== 0))
251 if (unlikely(connid_used(nb
, conn_id
)))
256 spin_unlock_bh(&connid_gen
);
261 src_in_ll
->source
.in
.conn_id
= conn_id
;
262 src_in_ll
->reversedir
->target
.out
.conn_id
= (conn_id
| (1 << 31));
263 if (insert_connid(nb
, src_in_ll
) != 0) {
266 spin_unlock_bh(&connid_gen
);
270 void _set_last_act(struct conn
*src_in_l
)
272 unsigned long iflags
;
273 src_in_l
->source
.in
.jiffies_last_act
= jiffies
;
274 spin_lock_irqsave(&(src_in_l
->source
.in
.nb
->conn_list_lock
), iflags
);
275 list_del(&(src_in_l
->source
.in
.nb_list
));
276 list_add_tail(&(src_in_l
->source
.in
.nb_list
),
277 &(src_in_l
->source
.in
.nb
->rcv_conn_list
));
278 spin_unlock_irqrestore(&(src_in_l
->source
.in
.nb
->conn_list_lock
),
282 void free_conn(struct kref
*ref
)
284 unsigned long iflags
;
285 struct conn
*cn
= container_of(ref
, struct conn
, ref
);
286 struct conn
*reversedir
= 0;
288 spin_lock_irqsave(&conn_free
, iflags
);
290 BUG_ON(cn
->isreset
== 0);
292 if (cn
->reversedir
!= 0)
293 cn
->reversedir
->isreset
= 3;
295 if (cn
->isreset
!= 3)
298 if (cn
->reversedir
!= 0) {
299 cn
->reversedir
->reversedir
= 0;
300 reversedir
= cn
->reversedir
;
304 if (cn
->sourcetype
== SOURCE_IN
) {
305 BUG_ON(cn
->source
.in
.conn_id
!= 0);
306 kref_put(&(cn
->source
.in
.nb
->ref
), neighbor_free
);
307 cn
->source
.in
.nb
= 0;
310 if (cn
->targettype
== TARGET_OUT
) {
311 BUG_ON(cn
->target
.out
.conn_id
!= 0);
312 kref_put(&(cn
->target
.out
.nb
->ref
), neighbor_free
);
313 cn
->target
.out
.nb
= 0;
316 BUG_ON(cn
->data_buf
.datasize
!= 0);
317 BUG_ON(cn
->data_buf
.overhead
!= 0);
319 memset(cn
, 9*16 + 10, sizeof(struct conn
));
320 kmem_cache_free(conn_slab
, cn
);
323 spin_unlock_irqrestore(&conn_free
, iflags
);
326 free_conn(&(reversedir
->ref
));
331 * rc == 1 ==> connid_reuse or connid allocation failed
333 int conn_init_out(struct conn
*trgt_unconn_ll
, struct neighbor
*nb
,
334 __u32 rcvd_connid
, int use_rcvd_connid
)
336 unsigned long iflags
;
338 struct conn
*src_unconn_ll
= trgt_unconn_ll
->reversedir
;
340 BUG_ON(trgt_unconn_ll
->targettype
!= TARGET_UNCONNECTED
);
341 BUG_ON(src_unconn_ll
== 0);
342 BUG_ON(src_unconn_ll
->sourcetype
!= SOURCE_UNCONNECTED
);
344 memset(&(trgt_unconn_ll
->target
.out
), 0,
345 sizeof(trgt_unconn_ll
->target
.out
));
346 memset(&(src_unconn_ll
->source
.in
), 0,
347 sizeof(src_unconn_ll
->source
.in
));
349 trgt_unconn_ll
->targettype
= TARGET_OUT
;
350 src_unconn_ll
->sourcetype
= SOURCE_IN
;
352 if (use_rcvd_connid
) {
353 BUG_ON((rcvd_connid
& (1 << 31)) == 0);
355 src_unconn_ll
->source
.in
.conn_id
= rcvd_connid
;
356 if (unlikely(insert_connid(nb
, src_unconn_ll
) != 0)) {
357 src_unconn_ll
->source
.in
.conn_id
= 0;
362 src_unconn_ll
->source
.in
.cir
= kmem_cache_alloc(
363 connid_reuse_slab
, GFP_ATOMIC
);
364 if (unlikely(src_unconn_ll
->source
.in
.cir
== 0)) {
368 memset(src_unconn_ll
->source
.in
.cir
, 0,
369 sizeof(struct connid_reuse_item
));
371 if (unlikely(connid_alloc(nb
, src_unconn_ll
))) {
377 trgt_unconn_ll
->target
.out
.nb
= nb
;
378 src_unconn_ll
->source
.in
.nb
= nb
;
380 /* neighbor pointer */
381 kref_get(&(nb
->ref
));
382 kref_get(&(nb
->ref
));
384 INIT_LIST_HEAD(&(src_unconn_ll
->source
.in
.reorder_queue
));
386 INIT_LIST_HEAD(&(src_unconn_ll
->source
.in
.acks_pending
));
388 INIT_LIST_HEAD(&(trgt_unconn_ll
->target
.out
.retrans_list
));
390 reset_seqno(trgt_unconn_ll
, 0);
391 if (use_rcvd_connid
== 0) {
392 get_random_bytes((char *)
393 &(trgt_unconn_ll
->target
.out
.seqno_nextsend
),
395 trgt_unconn_ll
->target
.out
.seqno_nextsend
));
396 trgt_unconn_ll
->target
.out
.seqno_acked
=
397 trgt_unconn_ll
->target
.out
.seqno_nextsend
;
398 reset_seqno(trgt_unconn_ll
,
399 trgt_unconn_ll
->target
.out
.seqno_nextsend
);
401 get_random_bytes((char *)
402 &(src_unconn_ll
->source
.in
.next_seqno
),
403 sizeof(src_unconn_ll
->source
.in
.next_seqno
));
404 src_unconn_ll
->source
.in
.window_seqnolimit
=
405 src_unconn_ll
->source
.in
.next_seqno
;
406 src_unconn_ll
->source
.in
.window_seqnolimit_remote
=
407 src_unconn_ll
->source
.in
.next_seqno
;
410 get_random_bytes((char *) &(trgt_unconn_ll
->target
.out
.priority_seqno
),
412 trgt_unconn_ll
->source
.in
.priority_seqno
= 0;
414 src_unconn_ll
->source
.in
.jiffies_last_act
= jiffies
;
416 spin_lock_irqsave(&(nb
->conn_list_lock
), iflags
);
417 list_add_tail(&(src_unconn_ll
->source
.in
.nb_list
),
418 &(nb
->rcv_conn_list
));
419 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
422 kref_get(&(src_unconn_ll
->ref
));
424 if (src_unconn_ll
->is_client
)
425 atomic_inc(&num_conns
);
427 if (use_rcvd_connid
== 0)
428 update_windowlimit(src_unconn_ll
);
432 kmem_cache_free(connid_reuse_slab
,
433 src_unconn_ll
->source
.in
.cir
);
434 src_unconn_ll
->source
.in
.cir
= 0;
436 trgt_unconn_ll
->targettype
= TARGET_UNCONNECTED
;
437 src_unconn_ll
->sourcetype
= SOURCE_UNCONNECTED
;
442 void conn_init_sock_source(struct conn
*cn
)
445 cn
->sourcetype
= SOURCE_SOCK
;
446 memset(&(cn
->source
.sock
), 0, sizeof(cn
->source
.sock
));
447 cn
->source
.sock
.priority
= PRIORITY_MAX
;
450 void conn_init_sock_target(struct conn
*cn
)
453 cn
->targettype
= TARGET_SOCK
;
454 memset(&(cn
->target
.sock
), 0, sizeof(cn
->target
.sock
));
458 struct conn
* alloc_conn(gfp_t allocflags
)
460 struct conn
*cn1
= 0;
461 struct conn
*cn2
= 0;
463 cn1
= kmem_cache_alloc(conn_slab
, allocflags
);
464 if (unlikely(cn1
== 0))
467 cn2
= kmem_cache_alloc(conn_slab
, allocflags
);
468 if (unlikely(cn2
== 0))
471 memset(cn1
, 0, sizeof(struct conn
));
472 memset(cn2
, 0, sizeof(struct conn
));
474 cn1
->reversedir
= cn2
;
475 cn2
->reversedir
= cn1
;
477 kref_init(&(cn1
->ref
));
478 kref_init(&(cn2
->ref
));
480 cn1
->sourcetype
= SOURCE_UNCONNECTED
;
481 cn2
->sourcetype
= SOURCE_UNCONNECTED
;
482 cn1
->targettype
= TARGET_UNCONNECTED
;
483 cn2
->targettype
= TARGET_UNCONNECTED
;
488 spin_lock_init(&(cn1
->rcv_lock
));
489 spin_lock_init(&(cn2
->rcv_lock
));
494 speedtracker_init(&(cn1
->st
));
495 speedtracker_init(&(cn2
->st
));
500 kmem_cache_free(conn_slab
, cn1
);
505 static struct cor_sock
*get_corsock_by_port(__be64 port
)
507 struct list_head
*curr
= openports
.next
;
509 while (curr
!= &openports
) {
510 struct cor_sock
*cs
= container_of(curr
, struct cor_sock
,
512 BUG_ON(cs
->type
!= CS_TYPE_LISTENER
);
513 if (cs
->data
.listener
.port
== port
)
522 __u32
list_services(char *buf
, __u32 buflen
)
526 __u32 buf_offset
= 4;
528 struct list_head
*curr
;
532 * The variable length header rowcount need to be generated after the
533 * data. This is done by reserving the maximum space they could take. If
534 * they end up being smaller, the data is moved so that there is no gap.
538 BUG_ON(buflen
< buf_offset
);
540 spin_lock_bh(&cor_bindnodes
);
542 curr
= openports
.next
;
544 while (curr
!= &openports
) {
545 struct cor_sock
*cs
= container_of(curr
, struct cor_sock
,
547 BUG_ON(cs
->type
!= CS_TYPE_LISTENER
);
549 if (cs
->data
.listener
.publish_service
== 0)
552 if (unlikely(buf_offset
+ 2 < buf_offset
) ||
553 buf_offset
+ 2 > buflen
)
556 buf
[buf_offset
] = ((char *) &(cs
->data
.listener
.port
))[0];
557 buf
[buf_offset
+1] = ((char *) &(cs
->data
.listener
.port
))[1];
565 spin_unlock_bh(&cor_bindnodes
);
567 rc
= encode_len(buf
, 4, cnt
);
572 memmove(buf
+ ((__u32
) rc
), buf
+4, buf_offset
);
574 return buf_offset
- 4 + ((__u32
) rc
);
578 void set_publish_service(struct cor_sock
*cs
, __u8 value
)
580 BUG_ON (value
!= 0 && value
!= 1);
582 mutex_lock(&(cs
->lock
));
584 cs
->publish_service
= value
;
586 if (cs
->type
== CS_TYPE_LISTENER
) {
587 spin_lock_bh(&cor_bindnodes
);
588 cs
->data
.listener
.publish_service
= value
;
589 spin_unlock_bh(&cor_bindnodes
);
592 mutex_unlock(&(cs
->lock
));
595 void close_port(struct cor_sock
*cs
)
597 mutex_lock(&(cs
->lock
));
598 if (unlikely(cs
->type
!= CS_TYPE_LISTENER
))
601 spin_lock_bh(&cor_bindnodes
);
603 list_del(&(cs
->data
.listener
.lh
));
605 while (list_empty(&(cs
->data
.listener
.conn_queue
)) == 0) {
606 struct conn
*src_sock_o
= container_of(
607 cs
->data
.listener
.conn_queue
.next
,
608 struct conn
, source
.sock
.cl_list
);
609 list_del(&(src_sock_o
->source
.sock
.cl_list
));
610 reset_conn(src_sock_o
);
611 kref_get(&(src_sock_o
->reversedir
->ref
));
612 kref_put(&(src_sock_o
->ref
), free_conn
);
615 spin_unlock_bh(&cor_bindnodes
);
617 mutex_unlock(&(cs
->lock
));
620 int open_port(struct cor_sock
*cs_l
, __be16 port
)
624 spin_lock_bh(&cor_bindnodes
);
625 if (get_corsock_by_port(port
) != 0) {
630 BUG_ON(cs_l
->type
!= CS_TYPE_UNCONNECTED
);
632 cs_l
->type
= CS_TYPE_LISTENER
;
633 cs_l
->data
.listener
.port
= port
;
634 cs_l
->data
.listener
.publish_service
= cs_l
->publish_service
;
636 /* kref is not used here */
637 INIT_LIST_HEAD(&(cs_l
->data
.listener
.conn_queue
));
639 list_add_tail((struct list_head
*) &(cs_l
->data
.listener
.lh
),
643 spin_unlock_bh(&cor_bindnodes
);
650 * rc == 2 port not open
651 * rc == 3 listener queue full
653 int connect_port(struct conn
*trgt_unconn_l
, __be16 port
)
658 spin_lock_bh(&cor_bindnodes
);
660 cs
= get_corsock_by_port(port
);
666 if (unlikely(cs
->data
.listener
.queue_len
>=
667 cs
->data
.listener
.queue_maxlen
)) {
668 if (cs
->data
.listener
.queue_maxlen
<= 0)
676 kref_get(&(trgt_unconn_l
->ref
));
677 kref_get(&(trgt_unconn_l
->reversedir
->ref
));
679 BUG_ON(trgt_unconn_l
->is_client
!= 1);
680 spin_lock_bh(&(trgt_unconn_l
->reversedir
->rcv_lock
));
681 conn_init_sock_target(trgt_unconn_l
);
682 conn_init_sock_source(trgt_unconn_l
->reversedir
);
683 spin_unlock_bh(&(trgt_unconn_l
->reversedir
->rcv_lock
));
685 list_add_tail(&(trgt_unconn_l
->reversedir
->source
.sock
.cl_list
),
686 &(cs
->data
.listener
.conn_queue
));
687 cs
->data
.listener
.queue_len
++;
688 cs
->sk
.sk_state_change(&(cs
->sk
));
691 spin_unlock_bh(&cor_bindnodes
);
697 * rc == 3 addr not found
698 * rc == 4 ==> connid allocation failed
699 * rc == 4 ==> control msg alloc failed
701 int connect_neigh(struct conn
*trgt_unconn_l
, char *addr
, __u16 addrlen
)
705 struct control_msg_out
*cm
;
706 struct neighbor
*nb
= 0;
709 nb
= find_neigh(addr
, addrlen
);
716 cm
= alloc_control_msg(nb
, ACM_PRIORITY_LOW
);
717 if (unlikely(cm
== 0)) {
722 spin_lock_bh(&(trgt_unconn_l
->reversedir
->rcv_lock
));
723 ciorc
= conn_init_out(trgt_unconn_l
, nb
, 0, 0);
724 seqno2
= trgt_unconn_l
->reversedir
->source
.in
.next_seqno
;
725 spin_unlock_bh(&(trgt_unconn_l
->reversedir
->rcv_lock
));
726 if (unlikely(ciorc
)) {
731 send_connect_nb(cm
, trgt_unconn_l
->target
.out
.conn_id
,
732 trgt_unconn_l
->target
.out
.seqno_nextsend
, seqno2
,
733 trgt_unconn_l
->reversedir
);
737 free_control_msg(cm
);
739 trgt_unconn_l
->targettype
= TARGET_DISCARD
;
743 kref_put(&(nb
->ref
), neighbor_free
);
748 static int _reset_conn(struct conn
*cn
, int trgt_out_resetneeded
)
751 * active conns have an additional ref to make sure that they are not
752 * freed when only one direction is referenced by the connid hashtable
756 /* lock sourcetype/targettype */
757 spin_lock_bh(&(cn
->rcv_lock
));
759 if (cn
->sourcetype
== SOURCE_IN
) {
760 unsigned long iflags
;
762 spin_lock_irqsave(&(cn
->source
.in
.nb
->conn_list_lock
), iflags
);
763 list_del(&(cn
->source
.in
.nb_list
));
764 spin_unlock_irqrestore(&(cn
->source
.in
.nb
->conn_list_lock
),
769 if (cn
->source
.in
.conn_id
!= 0 &&
770 (cn
->source
.in
.conn_id
& (1 << 31)) != 0) {
771 BUG_ON(cn
->source
.in
.cir
!= 0);
772 } else if (cn
->source
.in
.conn_id
!= 0 &&
773 (cn
->source
.in
.conn_id
& (1 << 31)) == 0) {
774 BUG_ON(cn
->source
.in
.cir
== 0);
776 kref_init(&(cn
->source
.in
.cir
->ref
));
777 cn
->source
.in
.cir
->conn_id
= cn
->source
.in
.conn_id
;
778 cn
->source
.in
.cir
->pingcnt
=
779 cn
->source
.in
.nb
->connid_reuse_pingcnt
;
781 spin_lock_bh(&(cn
->source
.in
.nb
->connid_reuse_lock
));
782 insert_connid_reuse(cn
->source
.in
.nb
,
784 list_add_tail(&(cn
->source
.in
.cir
->lh
),
785 &(cn
->source
.in
.nb
->connid_reuse_list
));
786 spin_unlock_bh(&(cn
->source
.in
.nb
->connid_reuse_lock
));
788 cn
->source
.in
.cir
= 0;
791 if (cn
->source
.in
.conn_id
!= 0) {
792 spin_lock_bh(&(cn
->source
.in
.nb
->connid_lock
));
793 rb_erase(&(cn
->source
.in
.rbn
),
794 &(cn
->source
.in
.nb
->connid_rb
));
795 spin_unlock_bh(&(cn
->source
.in
.nb
->connid_lock
));
798 cn
->source
.in
.conn_id
= 0;
804 atomic_dec(&num_conns
);
807 } else if (cn
->sourcetype
== SOURCE_SOCK
) {
808 if (likely(cn
->source
.sock
.cs
!= 0)) {
809 cor_sock_flushtoconn(cn
->source
.sock
.cs
);
810 kref_put(&(cn
->source
.sock
.cs
->ref
), free_sock
);
811 cn
->source
.sock
.cs
= 0;
815 if (cn
->targettype
== TARGET_UNCONNECTED
) {
816 connreset_cpacket_buffer(cn
);
817 } else if (cn
->targettype
== TARGET_OUT
) {
818 if (trgt_out_resetneeded
&& cn
->target
.out
.conn_id
!= 0) {
819 send_reset_conn(cn
->target
.out
.nb
,
820 cn
->target
.out
.conn_id
, 0);
823 cn
->target
.out
.conn_id
= 0;
825 cancel_conn_all_retrans(cn
);
829 spin_lock_bh(&(cn
->target
.out
.nb
->stalledconn_lock
));
830 if (cn
->target
.out
.nbstalled_lh
.prev
!= 0) {
831 list_del(&(cn
->target
.out
.nbstalled_lh
));
832 cn
->target
.out
.nbstalled_lh
.prev
= 0;
833 cn
->target
.out
.nbstalled_lh
.next
= 0;
836 spin_unlock_bh(&(cn
->target
.out
.nb
->stalledconn_lock
));
837 } else if (cn
->targettype
== TARGET_SOCK
) {
838 if (likely(cn
->target
.sock
.cs
!= 0)) {
839 cor_sock_readfromconn(cn
->target
.sock
.cs
);
840 kref_put(&(cn
->target
.sock
.cs
->ref
), free_sock
);
841 cn
->target
.sock
.cs
= 0;
845 databuf_ackdiscard(cn
);
847 spin_unlock_bh(&(cn
->rcv_lock
));
849 reset_bufferusage(cn
); /* source in only */
850 connreset_priority(cn
);
855 /* warning: do not hold the rcv_lock while calling this! */
856 void reset_conn(struct conn
*cn
)
865 spin_lock_bh(&(cn
->rcv_lock
));
866 spin_lock_bh(&(cn
->reversedir
->rcv_lock
));
868 spin_lock_bh(&(cn
->reversedir
->rcv_lock
));
869 spin_lock_bh(&(cn
->rcv_lock
));
872 BUG_ON(cn
->isreset
<= 1 && cn
->reversedir
->isreset
>= 2);
873 BUG_ON(cn
->isreset
>= 2 && cn
->reversedir
->isreset
<= 1);
875 isreset1
= cn
->isreset
;
876 if (cn
->isreset
<= 1)
879 isreset2
= cn
->reversedir
->isreset
;
880 if (cn
->reversedir
->isreset
<= 1)
881 cn
->reversedir
->isreset
= 2;
884 spin_unlock_bh(&(cn
->rcv_lock
));
885 spin_unlock_bh(&(cn
->reversedir
->rcv_lock
));
887 spin_unlock_bh(&(cn
->reversedir
->rcv_lock
));
888 spin_unlock_bh(&(cn
->rcv_lock
));
894 put1
= _reset_conn(cn
, isreset1
== 0);
895 put2
= _reset_conn(cn
->reversedir
, isreset2
== 0);
897 /* free_conn may not be called, before both _reset_conn have finished */
899 kref_put(&(cn
->ref
), free_conn
);
904 kref_put(&(cn
->reversedir
->ref
), free_conn
);
909 static int __init
cor_common_init(void)
915 printk(KERN_ERR
"sizeof conn: %d", (__u32
) sizeof(c
));
916 printk(KERN_ERR
" conn.source: %d", (__u32
) sizeof(c
.source
));
917 printk(KERN_ERR
" conn.target: %d", (__u32
) sizeof(c
.target
));
918 printk(KERN_ERR
" conn.target.out: %d", (__u32
) sizeof(c
.target
.out
));
919 printk(KERN_ERR
" conn.buf: %d", (__u32
) sizeof(c
.data_buf
));
921 printk(KERN_ERR
" mutex: %d", (__u32
) sizeof(struct mutex
));
922 printk(KERN_ERR
" spinlock: %d", (__u32
) sizeof(spinlock_t
));
923 printk(KERN_ERR
" kref: %d", (__u32
) sizeof(struct kref
));
926 rc
= cor_util_init();
927 if (unlikely(rc
!= 0))
930 conn_slab
= kmem_cache_create("cor_conn", sizeof(struct conn
), 8, 0, 0);
931 if (unlikely(conn_slab
== 0))
934 connid_reuse_slab
= kmem_cache_create("cor_connid_reuse",
935 sizeof(struct connid_reuse_item
), 8, 0, 0);
936 if (unlikely(connid_reuse_slab
== 0))
940 atomic_set(&num_conns
, 0);
944 if (unlikely(rc
!= 0))
948 if (unlikely(rc
!= 0))
951 rc
= cor_kgen_init();
952 if (unlikely(rc
!= 0))
955 rc
= cor_cpacket_init();
956 if (unlikely(rc
!= 0))
960 if (unlikely(rc
!= 0))
964 if (unlikely(rc
!= 0))
967 rc
= cor_neighbor_init();
968 if (unlikely(rc
!= 0))
972 if (unlikely(rc
!= 0))
975 rc
= cor_sock_init1();
976 if (unlikely(rc
!= 0))
979 rc
= cor_sock_init2();
980 if (unlikely(rc
!= 0))
984 if (unlikely(rc
!= 0))
987 #warning todo add_random_ready_callback
992 module_init(cor_common_init
);
993 MODULE_LICENSE("GPL");