2 * Connection oriented routing
3 * Copyright (C) 2007-2019 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/mutex.h>
25 DEFINE_SPINLOCK(cor_bindnodes
);
26 static DEFINE_SPINLOCK(conn_free
);
27 static DEFINE_SPINLOCK(connid_gen
);
29 static LIST_HEAD(openports
);
31 static struct kmem_cache
*conn_slab
;
32 static struct kmem_cache
*connid_reuse_slab
;
37 struct conn
*get_conn(struct neighbor
*nb
, __u32 conn_id
)
39 struct rb_node
* n
= 0;
42 spin_lock_bh(&(nb
->connid_lock
));
44 n
= nb
->connid_rb
.rb_node
;
46 while (likely(n
!= 0) && ret
== 0) {
47 struct conn
*src_in_o
= container_of(n
, struct conn
,
50 BUG_ON(src_in_o
->sourcetype
!= SOURCE_IN
);
52 if (conn_id
< src_in_o
->source
.in
.conn_id
)
54 else if (conn_id
> src_in_o
->source
.in
.conn_id
)
61 kref_get(&(ret
->ref
));
63 spin_unlock_bh(&(nb
->connid_lock
));
68 static int insert_connid(struct neighbor
*nb
, struct conn
*src_in_ll
)
72 __u32 conn_id
= src_in_ll
->source
.in
.conn_id
;
76 struct rb_node
*parent
= 0;
78 BUG_ON(src_in_ll
->sourcetype
!= SOURCE_IN
);
80 spin_lock_bh(&(nb
->connid_lock
));
82 root
= &(nb
->connid_rb
);
86 struct conn
*src_in_o
= container_of(*p
, struct conn
,
89 BUG_ON(src_in_o
->sourcetype
!= SOURCE_IN
);
92 if (unlikely(conn_id
== src_in_o
->source
.in
.conn_id
)) {
94 } else if (conn_id
< src_in_o
->source
.in
.conn_id
) {
96 } else if (conn_id
> src_in_o
->source
.in
.conn_id
) {
103 kref_get(&(src_in_ll
->ref
));
104 rb_link_node(&(src_in_ll
->source
.in
.rbn
), parent
, p
);
105 rb_insert_color(&(src_in_ll
->source
.in
.rbn
), root
);
112 spin_unlock_bh(&(nb
->connid_lock
));
117 struct connid_reuse_item
*get_connid_reuseitem(struct neighbor
*nb
,
120 struct rb_node
*n
= 0;
121 struct connid_reuse_item
*ret
= 0;
123 spin_lock_bh(&(nb
->connid_reuse_lock
));
125 n
= nb
->connid_reuse_rb
.rb_node
;
127 while (likely(n
!= 0) && ret
== 0) {
128 struct connid_reuse_item
*cir
= container_of(n
,
129 struct connid_reuse_item
, rbn
);
131 BUG_ON(cir
->conn_id
== 0);
133 if (conn_id
< cir
->conn_id
)
135 else if (conn_id
> cir
->conn_id
)
142 kref_get(&(ret
->ref
));
144 spin_unlock_bh(&(nb
->connid_reuse_lock
));
149 /* nb->connid_reuse_lock must be held by the caller */
150 static void insert_connid_reuse(struct neighbor
*nb
,
151 struct connid_reuse_item
*ins
)
153 struct rb_root
*root
;
155 struct rb_node
*parent
= 0;
157 BUG_ON(ins
->conn_id
== 0);
159 root
= &(nb
->connid_reuse_rb
);
160 p
= &(root
->rb_node
);
163 struct connid_reuse_item
*curr
= container_of(*p
,
164 struct connid_reuse_item
, rbn
);
166 BUG_ON(curr
->conn_id
== 0);
169 if (unlikely(ins
->conn_id
== curr
->conn_id
)) {
171 } else if (ins
->conn_id
< curr
->conn_id
) {
173 } else if (ins
->conn_id
> curr
->conn_id
) {
180 kref_get(&(ins
->ref
));
181 rb_link_node(&(ins
->rbn
), parent
, p
);
182 rb_insert_color(&(ins
->rbn
), root
);
185 static void free_connid_reuse(struct kref
*ref
)
187 struct connid_reuse_item
*cir
= container_of(ref
,
188 struct connid_reuse_item
, ref
);
190 kmem_cache_free(connid_reuse_slab
, cir
);
193 void delete_connid_reuse_items(struct neighbor
*nb
)
195 struct connid_reuse_item
*cri
;
197 spin_lock_bh(&(nb
->connid_reuse_lock
));
199 while (list_empty(&(nb
->connid_reuse_list
)) == 0) {
200 cri
= container_of(nb
->connid_reuse_list
.next
,
201 struct connid_reuse_item
, lh
);
203 rb_erase(&(cri
->rbn
), &(nb
->connid_reuse_rb
));
204 kref_put(&(cri
->ref
), kreffree_bug
);
206 list_del(&(cri
->lh
));
207 kref_put(&(cri
->ref
), free_connid_reuse
);
210 spin_unlock_bh(&(nb
->connid_reuse_lock
));
213 void connid_used_pingsuccess(struct neighbor
*nb
)
215 struct connid_reuse_item
*cri
;
217 spin_lock_bh(&(nb
->connid_reuse_lock
));
219 nb
->connid_reuse_pingcnt
++;
220 while (list_empty(&(nb
->connid_reuse_list
)) == 0) {
221 cri
= container_of(nb
->connid_reuse_list
.next
,
222 struct connid_reuse_item
, lh
);
223 if ((cri
->pingcnt
+ CONNID_REUSE_RTTS
-
224 nb
->connid_reuse_pingcnt
) < 32768)
227 rb_erase(&(cri
->rbn
), &(nb
->connid_reuse_rb
));
228 kref_put(&(cri
->ref
), kreffree_bug
);
230 list_del(&(cri
->lh
));
231 kref_put(&(cri
->ref
), free_connid_reuse
);
234 spin_unlock_bh(&(nb
->connid_reuse_lock
));
237 static int connid_used(struct neighbor
*nb
, __u32 conn_id
)
240 struct connid_reuse_item
*cir
;
242 cn
= get_conn(nb
, conn_id
);
243 if (unlikely(cn
!= 0)) {
244 kref_put(&(cn
->ref
), free_conn
);
248 cir
= get_connid_reuseitem(nb
, conn_id
);
249 if (unlikely(cir
!= 0)) {
250 kref_put(&(cir
->ref
), free_connid_reuse
);
257 static int connid_alloc(struct neighbor
*nb
, struct conn
*src_in_ll
)
262 BUG_ON(src_in_ll
->sourcetype
!= SOURCE_IN
);
263 BUG_ON(src_in_ll
->reversedir
->targettype
!= TARGET_OUT
);
265 spin_lock_bh(&connid_gen
);
268 get_random_bytes((char *) &conn_id
, sizeof(conn_id
));
269 conn_id
= (conn_id
& ~(1 << 31));
271 if (unlikely(conn_id
== 0))
274 if (unlikely(connid_used(nb
, conn_id
)))
279 spin_unlock_bh(&connid_gen
);
284 src_in_ll
->source
.in
.conn_id
= conn_id
;
285 src_in_ll
->reversedir
->target
.out
.conn_id
= (conn_id
| (1 << 31));
286 if (insert_connid(nb
, src_in_ll
) != 0) {
289 spin_unlock_bh(&connid_gen
);
293 void _set_last_act(struct conn
*src_in_l
)
295 unsigned long iflags
;
296 src_in_l
->source
.in
.jiffies_last_act
= jiffies
;
297 spin_lock_irqsave(&(src_in_l
->source
.in
.nb
->conn_list_lock
), iflags
);
298 list_del(&(src_in_l
->source
.in
.nb_list
));
299 list_add_tail(&(src_in_l
->source
.in
.nb_list
),
300 &(src_in_l
->source
.in
.nb
->rcv_conn_list
));
301 spin_unlock_irqrestore(&(src_in_l
->source
.in
.nb
->conn_list_lock
),
305 void free_conn(struct kref
*ref
)
307 unsigned long iflags
;
308 struct conn
*cn
= container_of(ref
, struct conn
, ref
);
309 struct conn
*reversedir
= 0;
311 spin_lock_irqsave(&conn_free
, iflags
);
313 BUG_ON(cn
->isreset
== 0);
315 if (cn
->reversedir
!= 0)
316 cn
->reversedir
->isreset
= 3;
318 if (cn
->isreset
!= 3)
321 if (cn
->reversedir
!= 0) {
322 cn
->reversedir
->reversedir
= 0;
323 reversedir
= cn
->reversedir
;
327 if (cn
->sourcetype
== SOURCE_IN
) {
328 WARN_ONCE(list_empty(&(cn
->source
.in
.reorder_queue
)) == 0,
329 "cor free_conn(): cn->source.in.reorder_queue is not empty");
330 WARN_ONCE(list_empty(&(cn
->source
.in
.acks_pending
)) == 0,
331 "cor free_conn():cn->source.in.acks_pending is not empty");
333 WARN_ONCE(cn
->source
.in
.conn_id
!= 0,
334 "cor free_conn(): cn->source.in.conn_id is not 0");
335 kref_put(&(cn
->source
.in
.nb
->ref
), neighbor_free
);
336 cn
->source
.in
.nb
= 0;
339 if (cn
->targettype
== TARGET_OUT
) {
340 WARN_ONCE(list_empty(&(cn
->target
.out
.retrans_list
)) == 0,
341 "cor free_conn(): cn->target.out.retrans_list is not empty");
342 WARN_ONCE(cn
->target
.out
.rb
.in_queue
!= RB_INQUEUE_FALSE
,
343 "cor free_conn(): cn->target.out.rb.in_queue is not RB_INQUEUE_FALSE");
344 WARN_ONCE(cn
->target
.out
.conn_id
!= 0,
345 "cor free_conn(): cn->target.out.conn_id is not 0");
346 kref_put(&(cn
->target
.out
.nb
->ref
), neighbor_free
);
347 cn
->target
.out
.nb
= 0;
350 WARN_ONCE(cn
->data_buf
.datasize
!= 0,
351 "cor free_conn(): cn->data_buf.datasize is not 0");
352 WARN_ONCE(cn
->data_buf
.overhead
!= 0,
353 "cor free_conn(): cn->data_buf.overhead is not 0");
354 WARN_ONCE(list_empty(&(cn
->data_buf
.items
)) == 0,
355 "cor free_conn(): cn->data_buf.items is not empty");
356 WARN_ONCE(cn
->data_buf
.nextread
!= 0,
357 "cor free_conn(): cn->data_buf.nextread is not 0");
359 memset(cn
, 9*16 + 10, sizeof(struct conn
));
360 kmem_cache_free(conn_slab
, cn
);
363 spin_unlock_irqrestore(&conn_free
, iflags
);
366 free_conn(&(reversedir
->ref
));
371 * rc == 1 ==> connid_reuse or connid allocation failed
373 int conn_init_out(struct conn
*trgt_unconn_ll
, struct neighbor
*nb
,
374 __u32 rcvd_connid
, int use_rcvd_connid
)
376 unsigned long iflags
;
378 struct conn
*src_unconn_ll
= trgt_unconn_ll
->reversedir
;
380 BUG_ON(trgt_unconn_ll
->targettype
!= TARGET_UNCONNECTED
);
381 BUG_ON(src_unconn_ll
== 0);
382 BUG_ON(src_unconn_ll
->sourcetype
!= SOURCE_UNCONNECTED
);
384 memset(&(trgt_unconn_ll
->target
.out
), 0,
385 sizeof(trgt_unconn_ll
->target
.out
));
386 memset(&(src_unconn_ll
->source
.in
), 0,
387 sizeof(src_unconn_ll
->source
.in
));
389 trgt_unconn_ll
->targettype
= TARGET_OUT
;
390 src_unconn_ll
->sourcetype
= SOURCE_IN
;
392 if (use_rcvd_connid
) {
393 BUG_ON((rcvd_connid
& (1 << 31)) == 0);
395 src_unconn_ll
->source
.in
.conn_id
= rcvd_connid
;
396 if (unlikely(insert_connid(nb
, src_unconn_ll
) != 0)) {
397 src_unconn_ll
->source
.in
.conn_id
= 0;
402 src_unconn_ll
->source
.in
.cir
= kmem_cache_alloc(
403 connid_reuse_slab
, GFP_ATOMIC
);
404 if (unlikely(src_unconn_ll
->source
.in
.cir
== 0)) {
408 memset(src_unconn_ll
->source
.in
.cir
, 0,
409 sizeof(struct connid_reuse_item
));
411 if (unlikely(connid_alloc(nb
, src_unconn_ll
))) {
417 trgt_unconn_ll
->target
.out
.nb
= nb
;
418 src_unconn_ll
->source
.in
.nb
= nb
;
420 /* neighbor pointer */
421 kref_get(&(nb
->ref
));
422 kref_get(&(nb
->ref
));
424 INIT_LIST_HEAD(&(src_unconn_ll
->source
.in
.reorder_queue
));
426 INIT_LIST_HEAD(&(src_unconn_ll
->source
.in
.acks_pending
));
428 INIT_LIST_HEAD(&(trgt_unconn_ll
->target
.out
.retrans_list
));
430 reset_seqno(trgt_unconn_ll
, 0);
431 if (use_rcvd_connid
== 0) {
432 get_random_bytes((char *)
433 &(trgt_unconn_ll
->target
.out
.seqno_nextsend
),
435 trgt_unconn_ll
->target
.out
.seqno_nextsend
));
436 trgt_unconn_ll
->target
.out
.seqno_acked
=
437 trgt_unconn_ll
->target
.out
.seqno_nextsend
;
438 trgt_unconn_ll
->target
.out
.seqno_windowlimit
=
439 trgt_unconn_ll
->target
.out
.seqno_nextsend
;
440 reset_seqno(trgt_unconn_ll
,
441 trgt_unconn_ll
->target
.out
.seqno_nextsend
);
443 get_random_bytes((char *)
444 &(src_unconn_ll
->source
.in
.next_seqno
),
445 sizeof(src_unconn_ll
->source
.in
.next_seqno
));
446 src_unconn_ll
->source
.in
.window_seqnolimit
=
447 src_unconn_ll
->source
.in
.next_seqno
;
448 src_unconn_ll
->source
.in
.window_seqnolimit_remote
=
449 src_unconn_ll
->source
.in
.next_seqno
;
452 get_random_bytes((char *) &(trgt_unconn_ll
->target
.out
.priority_seqno
),
454 trgt_unconn_ll
->source
.in
.priority_seqno
= 0;
456 src_unconn_ll
->source
.in
.jiffies_last_act
= jiffies
;
458 trgt_unconn_ll
->target
.out
.jiffies_idle_since
=
459 jiffies
<< JIFFIES_LAST_IDLE_SHIFT
;
461 spin_lock_irqsave(&(nb
->conn_list_lock
), iflags
);
462 list_add_tail(&(src_unconn_ll
->source
.in
.nb_list
),
463 &(nb
->rcv_conn_list
));
464 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
467 kref_get(&(src_unconn_ll
->ref
));
469 if (src_unconn_ll
->is_client
)
470 atomic_inc(&num_conns
);
472 if (use_rcvd_connid
== 0)
473 update_windowlimit(src_unconn_ll
);
477 kmem_cache_free(connid_reuse_slab
,
478 src_unconn_ll
->source
.in
.cir
);
479 src_unconn_ll
->source
.in
.cir
= 0;
481 trgt_unconn_ll
->targettype
= TARGET_UNCONNECTED
;
482 src_unconn_ll
->sourcetype
= SOURCE_UNCONNECTED
;
487 void conn_init_sock_source(struct conn
*cn
)
490 cn
->sourcetype
= SOURCE_SOCK
;
491 memset(&(cn
->source
.sock
), 0, sizeof(cn
->source
.sock
));
492 cn
->source
.sock
.priority
= PRIORITY_MAX
;
493 cn
->source
.sock
.snd_speed
.jiffies_last_refresh
= jiffies
;
494 cn
->source
.sock
.snd_speed
.flushed
= 1;
497 void conn_init_sock_target(struct conn
*cn
)
500 cn
->targettype
= TARGET_SOCK
;
501 memset(&(cn
->target
.sock
), 0, sizeof(cn
->target
.sock
));
505 struct conn
* alloc_conn(gfp_t allocflags
)
507 struct conn
*cn1
= 0;
508 struct conn
*cn2
= 0;
510 cn1
= kmem_cache_alloc(conn_slab
, allocflags
);
511 if (unlikely(cn1
== 0))
514 cn2
= kmem_cache_alloc(conn_slab
, allocflags
);
515 if (unlikely(cn2
== 0))
518 memset(cn1
, 0, sizeof(struct conn
));
519 memset(cn2
, 0, sizeof(struct conn
));
521 cn1
->reversedir
= cn2
;
522 cn2
->reversedir
= cn1
;
524 kref_init(&(cn1
->ref
));
525 kref_init(&(cn2
->ref
));
527 cn1
->sourcetype
= SOURCE_UNCONNECTED
;
528 cn2
->sourcetype
= SOURCE_UNCONNECTED
;
529 cn1
->targettype
= TARGET_UNCONNECTED
;
530 cn2
->targettype
= TARGET_UNCONNECTED
;
535 spin_lock_init(&(cn1
->rcv_lock
));
536 spin_lock_init(&(cn2
->rcv_lock
));
541 bufsize_init(cn1
, 0);
542 bufsize_init(cn2
, 0);
544 cn1
->bufsize
.bufsize
= (BUFSIZE_INITIAL_LOWLAT
<< BUFSIZE_SHIFT
);
545 cn2
->bufsize
.bufsize
= (BUFSIZE_INITIAL_LOWLAT
<< BUFSIZE_SHIFT
);
546 #warning todo set to BUFSIZE_INITIAL_HIGHLAT if switched to highlatency
551 kmem_cache_free(conn_slab
, cn1
);
556 static struct cor_sock
*get_corsock_by_port(__be64 port
)
558 struct list_head
*curr
= openports
.next
;
560 while (curr
!= &openports
) {
561 struct cor_sock
*cs
= container_of(curr
, struct cor_sock
,
563 BUG_ON(cs
->type
!= CS_TYPE_LISTENER
);
564 if (cs
->data
.listener
.port
== port
)
573 __u32
list_services(char *buf
, __u32 buflen
)
577 __u32 buf_offset
= 4;
579 struct list_head
*curr
;
583 * The variable length header rowcount need to be generated after the
584 * data. This is done by reserving the maximum space they could take. If
585 * they end up being smaller, the data is moved so that there is no gap.
589 BUG_ON(buflen
< buf_offset
);
591 spin_lock_bh(&cor_bindnodes
);
593 curr
= openports
.next
;
595 while (curr
!= &openports
) {
596 struct cor_sock
*cs
= container_of(curr
, struct cor_sock
,
598 BUG_ON(cs
->type
!= CS_TYPE_LISTENER
);
600 if (cs
->data
.listener
.publish_service
== 0)
603 if (unlikely(buf_offset
+ 2 < buf_offset
) ||
604 buf_offset
+ 2 > buflen
)
607 buf
[buf_offset
] = ((char *) &(cs
->data
.listener
.port
))[0];
608 buf
[buf_offset
+1] = ((char *) &(cs
->data
.listener
.port
))[1];
616 spin_unlock_bh(&cor_bindnodes
);
618 rc
= encode_len(buf
, 4, cnt
);
623 memmove(buf
+ ((__u32
) rc
), buf
+4, buf_offset
);
625 return buf_offset
- 4 + ((__u32
) rc
);
629 void set_publish_service(struct cor_sock
*cs
, __u8 value
)
631 BUG_ON (value
!= 0 && value
!= 1);
633 mutex_lock(&(cs
->lock
));
635 cs
->publish_service
= value
;
637 if (cs
->type
== CS_TYPE_LISTENER
) {
638 spin_lock_bh(&cor_bindnodes
);
639 cs
->data
.listener
.publish_service
= value
;
640 spin_unlock_bh(&cor_bindnodes
);
643 mutex_unlock(&(cs
->lock
));
646 void close_port(struct cor_sock
*cs
)
648 mutex_lock(&(cs
->lock
));
649 if (unlikely(cs
->type
!= CS_TYPE_LISTENER
))
652 spin_lock_bh(&cor_bindnodes
);
654 list_del(&(cs
->data
.listener
.lh
));
656 while (list_empty(&(cs
->data
.listener
.conn_queue
)) == 0) {
657 struct conn
*src_sock_o
= container_of(
658 cs
->data
.listener
.conn_queue
.next
,
659 struct conn
, source
.sock
.cl_list
);
660 list_del(&(src_sock_o
->source
.sock
.cl_list
));
661 reset_conn(src_sock_o
);
662 kref_get(&(src_sock_o
->reversedir
->ref
));
663 kref_put(&(src_sock_o
->ref
), free_conn
);
666 spin_unlock_bh(&cor_bindnodes
);
668 mutex_unlock(&(cs
->lock
));
671 int open_port(struct cor_sock
*cs_l
, __be16 port
)
675 spin_lock_bh(&cor_bindnodes
);
676 if (get_corsock_by_port(port
) != 0) {
681 BUG_ON(cs_l
->type
!= CS_TYPE_UNCONNECTED
);
683 cs_l
->type
= CS_TYPE_LISTENER
;
684 cs_l
->data
.listener
.port
= port
;
685 cs_l
->data
.listener
.publish_service
= cs_l
->publish_service
;
687 /* kref is not used here */
688 INIT_LIST_HEAD(&(cs_l
->data
.listener
.conn_queue
));
690 list_add_tail((struct list_head
*) &(cs_l
->data
.listener
.lh
),
694 spin_unlock_bh(&cor_bindnodes
);
701 * rc == 2 port not open
702 * rc == 3 listener queue full
704 int connect_port(struct conn
*trgt_unconn_ll
, __be16 port
)
709 spin_lock_bh(&cor_bindnodes
);
711 cs
= get_corsock_by_port(port
);
717 if (unlikely(cs
->data
.listener
.queue_len
>=
718 cs
->data
.listener
.queue_maxlen
)) {
719 if (cs
->data
.listener
.queue_maxlen
<= 0)
727 kref_get(&(trgt_unconn_ll
->ref
));
728 kref_get(&(trgt_unconn_ll
->reversedir
->ref
));
730 BUG_ON(trgt_unconn_ll
->is_client
!= 1);
731 conn_init_sock_target(trgt_unconn_ll
);
732 conn_init_sock_source(trgt_unconn_ll
->reversedir
);
734 list_add_tail(&(trgt_unconn_ll
->reversedir
->source
.sock
.cl_list
),
735 &(cs
->data
.listener
.conn_queue
));
736 cs
->data
.listener
.queue_len
++;
737 atomic_set(&(cs
->ready_to_accept
), 1);
739 cs
->sk
.sk_state_change(&(cs
->sk
));
742 spin_unlock_bh(&cor_bindnodes
);
748 * rc == 3 addr not found
749 * rc == 4 ==> connid allocation failed
750 * rc == 4 ==> control msg alloc failed
752 int connect_neigh(struct conn
*trgt_unconn_ll
, char *addr
, __u16 addrlen
)
754 struct control_msg_out
*cm
;
755 struct neighbor
*nb
= 0;
757 nb
= find_neigh(addr
, addrlen
);
761 cm
= alloc_control_msg(nb
, ACM_PRIORITY_MED
);
762 if (unlikely(cm
== 0)) {
763 kref_put(&(nb
->ref
), neighbor_free
);
767 if (unlikely(conn_init_out(trgt_unconn_ll
, nb
, 0, 0))) {
768 free_control_msg(cm
);
769 kref_put(&(nb
->ref
), neighbor_free
);
773 send_connect_nb(cm
, trgt_unconn_ll
->target
.out
.conn_id
,
774 trgt_unconn_ll
->target
.out
.seqno_nextsend
,
775 trgt_unconn_ll
->reversedir
->source
.in
.next_seqno
,
776 trgt_unconn_ll
->reversedir
);
778 kref_put(&(nb
->ref
), neighbor_free
);
783 static int _reset_conn(struct conn
*cn_ll
, int trgt_out_resetneeded
)
786 * active conns have an additional ref to make sure that they are not
787 * freed when only one direction is referenced by the connid hashtable
791 if (cn_ll
->sourcetype
== SOURCE_IN
) {
792 unsigned long iflags
;
793 struct neighbor
*nb
= cn_ll
->source
.in
.nb
;
795 spin_lock_irqsave(&(nb
->conn_list_lock
), iflags
);
796 list_del(&(cn_ll
->source
.in
.nb_list
));
797 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
801 if (cn_ll
->source
.in
.conn_id
!= 0 &&
802 (cn_ll
->source
.in
.conn_id
& (1 << 31)) != 0) {
803 BUG_ON(cn_ll
->source
.in
.cir
!= 0);
804 } else if (cn_ll
->source
.in
.conn_id
!= 0 &&
805 (cn_ll
->source
.in
.conn_id
& (1 << 31)) == 0) {
806 BUG_ON(cn_ll
->source
.in
.cir
== 0);
808 kref_init(&(cn_ll
->source
.in
.cir
->ref
));
809 cn_ll
->source
.in
.cir
->conn_id
=
810 cn_ll
->source
.in
.conn_id
;
811 cn_ll
->source
.in
.cir
->pingcnt
=
812 nb
->connid_reuse_pingcnt
;
814 spin_lock_bh(&(nb
->connid_reuse_lock
));
815 insert_connid_reuse(nb
, cn_ll
->source
.in
.cir
);
816 list_add_tail(&(cn_ll
->source
.in
.cir
->lh
),
817 &(nb
->connid_reuse_list
));
818 spin_unlock_bh(&(nb
->connid_reuse_lock
));
820 cn_ll
->source
.in
.cir
= 0;
823 if (cn_ll
->source
.in
.conn_id
!= 0) {
824 spin_lock_bh(&(nb
->connid_lock
));
825 rb_erase(&(cn_ll
->source
.in
.rbn
), &(nb
->connid_rb
));
826 spin_unlock_bh(&(nb
->connid_lock
));
829 cn_ll
->source
.in
.conn_id
= 0;
831 free_ack_conns(cn_ll
);
834 if (cn_ll
->is_client
)
835 atomic_dec(&num_conns
);
837 reset_ooo_queue(cn_ll
);
838 } else if (cn_ll
->sourcetype
== SOURCE_SOCK
) {
839 if (likely(cn_ll
->source
.sock
.cs
!= 0)) {
840 cor_sk_write_space(cn_ll
->source
.sock
.cs
);
841 kref_put(&(cn_ll
->source
.sock
.cs
->ref
), free_sock
);
842 cn_ll
->source
.sock
.cs
= 0;
846 if (cn_ll
->targettype
== TARGET_UNCONNECTED
) {
847 if (cn_ll
->target
.unconnected
.cmdparams
!= 0) {
848 kfree(cn_ll
->target
.unconnected
.cmdparams
);
849 cn_ll
->target
.unconnected
.cmdparams
= 0;
851 } else if (cn_ll
->targettype
== TARGET_OUT
) {
852 if (trgt_out_resetneeded
&& cn_ll
->target
.out
.conn_id
!= 0) {
853 send_reset_conn(cn_ll
->target
.out
.nb
,
854 cn_ll
->target
.out
.conn_id
, 0);
857 cn_ll
->target
.out
.conn_id
= 0;
859 cancel_all_conn_retrans(cn_ll
);
861 qos_remove_conn(cn_ll
);
863 spin_lock_bh(&(cn_ll
->target
.out
.nb
->stalledconn_lock
));
864 if (cn_ll
->target
.out
.nbstalled_lh
.prev
!= 0) {
865 list_del(&(cn_ll
->target
.out
.nbstalled_lh
));
866 cn_ll
->target
.out
.nbstalled_lh
.prev
= 0;
867 cn_ll
->target
.out
.nbstalled_lh
.next
= 0;
870 spin_unlock_bh(&(cn_ll
->target
.out
.nb
->stalledconn_lock
));
871 } else if (cn_ll
->targettype
== TARGET_SOCK
) {
872 if (likely(cn_ll
->target
.sock
.cs
!= 0)) {
873 if (cn_ll
->target
.sock
.socktype
== SOCKTYPE_RAW
) {
874 cor_sk_data_ready(cn_ll
->target
.sock
.cs
);
876 cor_mngdsocket_readfromconn_fromatomic(
877 cn_ll
->target
.sock
.cs
);
879 kref_put(&(cn_ll
->target
.sock
.cs
->ref
), free_sock
);
880 cn_ll
->target
.sock
.cs
= 0;
881 cn_ll
->target
.sock
.rcv_buf
= 0;
885 databuf_ackdiscard(cn_ll
);
887 account_bufspace(cn_ll
);
889 connreset_priority(cn_ll
);
894 /* warning: do not hold the rcv_lock while calling this! */
895 void reset_conn_locked(struct conn
*cn_ll
)
903 BUG_ON(cn_ll
->isreset
<= 1 && cn_ll
->reversedir
->isreset
>= 2);
904 BUG_ON(cn_ll
->isreset
>= 2 && cn_ll
->reversedir
->isreset
<= 1);
906 isreset1
= cn_ll
->isreset
;
907 if (cn_ll
->isreset
<= 1)
910 isreset2
= cn_ll
->reversedir
->isreset
;
911 if (cn_ll
->reversedir
->isreset
<= 1)
912 cn_ll
->reversedir
->isreset
= 2;
918 put1
= _reset_conn(cn_ll
, isreset1
== 0);
919 put2
= _reset_conn(cn_ll
->reversedir
, isreset2
== 0);
922 /* free_conn may not be called, before both _reset_conn have finished */
924 kref_put(&(cn_ll
->ref
), kreffree_bug
);
929 kref_put(&(cn_ll
->reversedir
->ref
), kreffree_bug
);
934 void reset_conn(struct conn
*cn
)
936 kref_get(&(cn
->ref
));
937 kref_get(&(cn
->reversedir
->ref
));
940 spin_lock_bh(&(cn
->rcv_lock
));
941 spin_lock_bh(&(cn
->reversedir
->rcv_lock
));
943 spin_lock_bh(&(cn
->reversedir
->rcv_lock
));
944 spin_lock_bh(&(cn
->rcv_lock
));
947 reset_conn_locked(cn
);
950 spin_unlock_bh(&(cn
->rcv_lock
));
951 spin_unlock_bh(&(cn
->reversedir
->rcv_lock
));
953 spin_unlock_bh(&(cn
->reversedir
->rcv_lock
));
954 spin_unlock_bh(&(cn
->rcv_lock
));
957 kref_put(&(cn
->ref
), free_conn
);
958 kref_put(&(cn
->reversedir
->ref
), free_conn
);
961 static int __init
cor_init(void)
967 printk(KERN_ERR
"sizeof conn: %u", (__u32
) sizeof(c
));
968 printk(KERN_ERR
" conn.source: %u", (__u32
) sizeof(c
.source
));
969 printk(KERN_ERR
" conn.target: %u", (__u32
) sizeof(c
.target
));
970 printk(KERN_ERR
" conn.target.out: %u", (__u32
) sizeof(c
.target
.out
));
971 printk(KERN_ERR
" conn.buf: %u", (__u32
) sizeof(c
.data_buf
));
973 printk(KERN_ERR
"sizeof neighbor: %u", (__u32
) sizeof(struct neighbor
));
975 printk(KERN_ERR
"sizeof mutex: %u", (__u32
) sizeof(struct mutex
));
976 printk(KERN_ERR
"sizeof spinlock: %u", (__u32
) sizeof(spinlock_t
));
977 printk(KERN_ERR
"sizeof kref: %u", (__u32
) sizeof(struct kref
));
978 printk(KERN_ERR
"sizeof list_head: %u",
979 (__u32
) sizeof(struct list_head
));
980 printk(KERN_ERR
"sizeof rb_root: %u", (__u32
) sizeof(struct rb_root
));
981 printk(KERN_ERR
"sizeof rb_node: %u", (__u32
) sizeof(struct rb_node
));
984 rc
= cor_util_init();
985 if (unlikely(rc
!= 0))
988 conn_slab
= kmem_cache_create("cor_conn", sizeof(struct conn
), 8, 0, 0);
989 if (unlikely(conn_slab
== 0))
992 connid_reuse_slab
= kmem_cache_create("cor_connid_reuse",
993 sizeof(struct connid_reuse_item
), 8, 0, 0);
994 if (unlikely(connid_reuse_slab
== 0))
998 atomic_set(&num_conns
, 0);
1001 rc
= forward_init();
1002 if (unlikely(rc
!= 0))
1005 rc
= cor_kgen_init();
1006 if (unlikely(rc
!= 0))
1009 rc
= cor_rd_init1();
1010 if (unlikely(rc
!= 0))
1013 rc
= cor_snd_init();
1014 if (unlikely(rc
!= 0))
1017 rc
= cor_neighbor_init();
1018 if (unlikely(rc
!= 0))
1021 rc
= cor_rcv_init();
1022 if (unlikely(rc
!= 0))
1025 rc
= cor_sock_managed_init1();
1026 if (unlikely(rc
!= 0))
1029 rc
= cor_sock_init2();
1030 if (unlikely(rc
!= 0))
1033 rc
= cor_rd_init2();
1034 if (unlikely(rc
!= 0))
1040 static void __exit
cor_exit(void)
1044 cor_sock_managed_exit1();
1048 cor_neighbor_exit2();
1054 BUG_ON(atomic_read(&num_conns
) != 0);
1056 kmem_cache_destroy(conn_slab
);
1059 kmem_cache_destroy(connid_reuse_slab
);
1060 connid_reuse_slab
= 0;
1063 module_init(cor_init
);
1064 module_exit(cor_exit
);
1065 MODULE_LICENSE("GPL");