remove nbstalled_list
[cor.git] / net / cor / conn.c
blob1e8cf24a69ce3c83f3ae78522b50e7713470727c
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/mutex.h>
23 #include "cor.h"
25 DEFINE_SPINLOCK(cor_bindnodes);
27 static LIST_HEAD(cor_openports);
29 static struct kmem_cache *cor_conn_slab;
30 struct kmem_cache *cor_connid_reuse_slab;
32 atomic_t cor_num_conns;
34 int cor_new_incoming_conn_allowed(struct cor_neighbor *nb)
36 /**
37 * MAX_CONNS is only loosly enforced for now
38 * (cor_num_conns is not checked and incremented at the same time)
40 if (atomic_read(&cor_num_conns) >= MAX_CONNS)
41 return 0;
42 else
43 return 1;
46 static __u64 cor_get_prio_in(__u64 priority)
48 if (PRIORITY_IN_MULITPLIER_PERCENT >= 100)
49 return priority;
51 if (unlikely(priority > U64_MAX/100)) {
52 return (priority / 100) * PRIORITY_IN_MULITPLIER_PERCENT;
53 } else {
54 return (priority * PRIORITY_IN_MULITPLIER_PERCENT) / 100;
58 static __u32 cor_conn_prio_sum_limit(__u32 priority, __u64 priority_sum)
60 __u32 shiftcnt = 0;
62 while ((PRIORITY_SUM_IN_MAX >> shiftcnt) > U32_MAX) {
63 shiftcnt++;
66 return div_u64(((__u64) priority) * (PRIORITY_SUM_IN_MAX >> shiftcnt),
67 (priority_sum >> shiftcnt));
70 __u32 _cor_conn_refresh_priority(struct cor_conn *cn_lx)
72 BUG_ON(cn_lx->is_client == 0);
74 if (cn_lx->sourcetype == SOURCE_IN) {
75 __u32 priority = (__u32)
76 cor_get_prio_in(cn_lx->source.in.priority);
77 __u64 priority_sum = cor_get_prio_in(atomic64_read(
78 &(cn_lx->source.in.nb->priority_sum)));
79 if (PRIORITY_SUM_IN_MAX != U64_MAX &&
80 priority_sum > PRIORITY_SUM_IN_MAX) {
81 return cor_conn_prio_sum_limit(priority, priority_sum);
82 } else {
83 return priority;
85 } else if (cn_lx->sourcetype == SOURCE_SOCK) {
86 return cn_lx->source.sock.priority;
87 } else {
88 BUG();
89 return 0;
93 __u32 cor_conn_refresh_priority(struct cor_conn *cn, int locked)
95 struct cor_conn *cn_reversedir = cor_get_conn_reversedir(cn);
96 __u32 priority = 0;
98 if (likely(locked == 0)) {
99 if (cn->is_client == 0)
100 return cor_conn_refresh_priority(cn_reversedir, 0);
102 spin_lock_bh(&(cn->rcv_lock));
103 spin_lock_bh(&(cor_get_conn_reversedir(cn)->rcv_lock));
104 } else {
105 BUG_ON(cn->is_client == 0);
108 if (unlikely(cn->isreset != 0) || cn->targettype != TARGET_OUT)
109 goto out;
111 priority = _cor_conn_refresh_priority(cn);
113 if (cn->target.out.priority_send_allowed != 0) {
114 __u16 priority_enc = cor_enc_priority(priority);
115 if (priority_enc != cn->target.out.priority_last)
116 cor_send_priority(cn, priority_enc);
119 out:
120 if (likely(locked == 0)) {
121 spin_unlock_bh(&(cor_get_conn_reversedir(cn)->rcv_lock));
122 spin_unlock_bh(&(cn->rcv_lock));
125 return priority;
128 static void _cor_set_conn_in_priority(struct cor_conn *src_in_lx,
129 __u32 newpriority)
131 struct cor_neighbor *nb = src_in_lx->source.in.nb;
133 __u32 oldpriority = src_in_lx->source.in.priority;
135 cor_update_atomic_sum(&(nb->priority_sum),
136 oldpriority, newpriority);
138 src_in_lx->source.in.priority = newpriority;
141 void cor_set_conn_in_priority(struct cor_neighbor *nb, __u32 conn_id,
142 struct cor_conn *src_in, __u8 priority_seqno, __u16 priority)
144 __u32 newpriority;
146 if (unlikely(src_in->is_client == 0))
147 return;
149 spin_lock_bh(&(src_in->rcv_lock));
150 spin_lock_bh(&(cor_get_conn_reversedir(src_in)->rcv_lock));
152 if (unlikely(cor_is_conn_in(src_in, nb, conn_id) == 0))
153 goto out;
155 if (src_in->source.in.priority_seqno != priority_seqno)
156 goto out;
157 src_in->source.in.priority_seqno =
158 (src_in->source.in.priority_seqno + 1) & 15;
160 newpriority = (cor_dec_priority(priority)*4)/5;
161 _cor_set_conn_in_priority(src_in, newpriority);
162 cor_conn_refresh_priority(src_in, 1);
164 out:
165 spin_unlock_bh(&(cor_get_conn_reversedir(src_in)->rcv_lock));
166 spin_unlock_bh(&(src_in->rcv_lock));
169 static void cor_connreset_priority(struct cor_conn *cn_lx)
171 if (cn_lx->is_client == 0)
172 return;
174 if (cn_lx->sourcetype == SOURCE_IN)
175 _cor_set_conn_in_priority(cn_lx, 0);
178 void _cor_set_last_act(struct cor_conn *src_in_l)
180 unsigned long iflags;
181 src_in_l->source.in.jiffies_last_act = jiffies;
182 spin_lock_irqsave(&(src_in_l->source.in.nb->conn_list_lock), iflags);
183 list_del(&(src_in_l->source.in.nb_list));
184 list_add_tail(&(src_in_l->source.in.nb_list),
185 &(src_in_l->source.in.nb->rcv_conn_list));
186 spin_unlock_irqrestore(&(src_in_l->source.in.nb->conn_list_lock),
187 iflags);
190 static void _cor_free_conn(struct cor_conn *cn)
192 BUG_ON(cn->isreset == 0);
194 if (cn->sourcetype == SOURCE_IN) {
195 WARN_ONCE(list_empty(&(cn->source.in.reorder_queue)) == 0,
196 "cor_free_conn(): cn->source.in.reorder_queue is not empty");
197 WARN_ONCE(list_empty(&(cn->source.in.acks_pending)) == 0,
198 "cor_free_conn():cn->source.in.acks_pending is not empty");
200 WARN_ONCE(cn->source.in.conn_id != 0,
201 "cor_free_conn(): cn->source.in.conn_id is not 0");
202 kref_put(&(cn->source.in.nb->ref), cor_neighbor_free);
203 cn->source.in.nb = 0;
206 if (cn->targettype == TARGET_OUT) {
207 WARN_ONCE(list_empty(&(cn->target.out.retrans_list)) == 0,
208 "cor_free_conn(): cn->target.out.retrans_list is not empty");
209 WARN_ONCE(cn->target.out.rb.in_queue != RB_INQUEUE_FALSE,
210 "cor_free_conn(): cn->target.out.rb.in_queue is not RB_INQUEUE_FALSE");
211 WARN_ONCE(cn->target.out.conn_id != 0,
212 "cor_free_conn(): cn->target.out.conn_id is not 0");
213 kref_put(&(cn->target.out.nb->ref), cor_neighbor_free);
214 cn->target.out.nb = 0;
217 WARN_ONCE(cn->data_buf.datasize != 0,
218 "cor_free_conn(): cn->data_buf.datasize is not 0");
219 WARN_ONCE(cn->data_buf.overhead != 0,
220 "cor_free_conn(): cn->data_buf.overhead is not 0");
221 WARN_ONCE(list_empty(&(cn->data_buf.items)) == 0,
222 "cor_free_conn(): cn->data_buf.items is not empty");
223 WARN_ONCE(cn->data_buf.nextread != 0,
224 "cor_free_conn(): cn->data_buf.nextread is not 0");
227 void cor_free_conn(struct kref *ref)
229 struct cor_conn_bidir *cnb = container_of(ref, struct cor_conn_bidir,
230 ref);
232 _cor_free_conn(&(cnb->cli));
233 _cor_free_conn(&(cnb->srv));
235 memset(cnb, 9*16 + 10, sizeof(struct cor_conn_bidir));
236 kmem_cache_free(cor_conn_slab, cnb);
240 * rc == 0 ==> ok
241 * rc == 1 ==> connid_reuse or connid allocation failed
243 int cor_conn_init_out(struct cor_conn *trgt_unconn_ll, struct cor_neighbor *nb,
244 __u32 rcvd_connid, int use_rcvd_connid)
246 unsigned long iflags;
247 int rc = 0;
248 struct cor_conn *src_unconn_ll =
249 cor_get_conn_reversedir(trgt_unconn_ll);
250 __u8 tmp;
252 BUG_ON(trgt_unconn_ll->targettype != TARGET_UNCONNECTED);
253 BUG_ON(src_unconn_ll == 0);
254 BUG_ON(src_unconn_ll->sourcetype != SOURCE_UNCONNECTED);
256 memset(&(trgt_unconn_ll->target.out), 0,
257 sizeof(trgt_unconn_ll->target.out));
258 memset(&(src_unconn_ll->source.in), 0,
259 sizeof(src_unconn_ll->source.in));
261 trgt_unconn_ll->targettype = TARGET_OUT;
262 src_unconn_ll->sourcetype = SOURCE_IN;
264 if (use_rcvd_connid) {
265 BUG_ON((rcvd_connid & (1 << 31)) == 0);
267 src_unconn_ll->source.in.conn_id = rcvd_connid;
268 if (unlikely(cor_insert_connid(nb, src_unconn_ll) != 0)) {
269 src_unconn_ll->source.in.conn_id = 0;
270 rc = 1;
271 goto out_err;
273 } else {
274 src_unconn_ll->source.in.cir = kmem_cache_alloc(
275 cor_connid_reuse_slab, GFP_ATOMIC);
276 if (unlikely(src_unconn_ll->source.in.cir == 0)) {
277 rc = 1;
278 goto out_err;
280 memset(src_unconn_ll->source.in.cir, 0,
281 sizeof(struct cor_connid_reuse_item));
283 if (unlikely(cor_connid_alloc(nb, src_unconn_ll))) {
284 rc = 1;
285 goto out_freecir;
289 trgt_unconn_ll->target.out.nb = nb;
290 src_unconn_ll->source.in.nb = nb;
292 /* neighbor pointer */
293 kref_get(&(nb->ref));
294 kref_get(&(nb->ref));
296 INIT_LIST_HEAD(&(src_unconn_ll->source.in.reorder_queue));
298 INIT_LIST_HEAD(&(src_unconn_ll->source.in.acks_pending));
300 INIT_LIST_HEAD(&(trgt_unconn_ll->target.out.retrans_list));
302 cor_reset_seqno(trgt_unconn_ll, 0);
303 if (use_rcvd_connid == 0) {
304 get_random_bytes((char *)
305 &(trgt_unconn_ll->target.out.seqno_nextsend),
306 sizeof(
307 trgt_unconn_ll->target.out.seqno_nextsend));
308 trgt_unconn_ll->target.out.seqno_acked =
309 trgt_unconn_ll->target.out.seqno_nextsend;
310 trgt_unconn_ll->target.out.seqno_windowlimit =
311 trgt_unconn_ll->target.out.seqno_nextsend;
312 cor_reset_seqno(trgt_unconn_ll,
313 trgt_unconn_ll->target.out.seqno_nextsend);
315 get_random_bytes((char *)
316 &(src_unconn_ll->source.in.next_seqno),
317 sizeof(src_unconn_ll->source.in.next_seqno));
318 src_unconn_ll->source.in.window_seqnolimit =
319 src_unconn_ll->source.in.next_seqno;
320 src_unconn_ll->source.in.window_seqnolimit_remote =
321 src_unconn_ll->source.in.next_seqno;
324 get_random_bytes((char *) &tmp, 1);
325 trgt_unconn_ll->target.out.priority_seqno = (tmp & 15);
327 src_unconn_ll->source.in.priority_seqno = 0;
329 src_unconn_ll->source.in.jiffies_last_act = jiffies;
331 trgt_unconn_ll->target.out.jiffies_idle_since =
332 jiffies << JIFFIES_LAST_IDLE_SHIFT;
334 trgt_unconn_ll->target.out.remote_bufsize_changerate = 64;
336 spin_lock_irqsave(&(nb->conn_list_lock), iflags);
337 list_add_tail(&(src_unconn_ll->source.in.nb_list),
338 &(nb->rcv_conn_list));
339 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
341 /* neighbor lists */
342 cor_conn_kref_get(src_unconn_ll, "neighbor_list");
344 if (src_unconn_ll->is_client)
345 atomic_inc(&cor_num_conns);
347 if (use_rcvd_connid == 0)
348 cor_update_windowlimit(src_unconn_ll);
350 if (0) {
351 out_freecir:
352 kmem_cache_free(cor_connid_reuse_slab,
353 src_unconn_ll->source.in.cir);
354 src_unconn_ll->source.in.cir = 0;
355 out_err:
356 trgt_unconn_ll->targettype = TARGET_UNCONNECTED;
357 src_unconn_ll->sourcetype = SOURCE_UNCONNECTED;
359 return rc;
362 void cor_conn_init_sock_source(struct cor_conn *cn)
364 BUG_ON(cn == 0);
365 cn->sourcetype = SOURCE_SOCK;
366 memset(&(cn->source.sock), 0, sizeof(cn->source.sock));
367 cn->source.sock.priority = cor_priority_max();
368 cn->source.sock.snd_speed.jiffies_last_refresh = jiffies;
369 cn->source.sock.snd_speed.flushed = 1;
372 void cor_conn_init_sock_target(struct cor_conn *cn)
374 BUG_ON(cn == 0);
375 cn->targettype = TARGET_SOCK;
376 memset(&(cn->target.sock), 0, sizeof(cn->target.sock));
377 cor_reset_seqno(cn, 0);
380 static void _cor_alloc_conn(struct cor_conn *cn, __u8 is_highlatency)
382 cn->sourcetype = SOURCE_UNCONNECTED;
383 cn->targettype = TARGET_UNCONNECTED;
385 cn->isreset = 0;
387 spin_lock_init(&(cn->rcv_lock));
389 cor_databuf_init(cn);
391 cor_bufsize_init(cn, 0);
393 if (is_highlatency == 0) {
394 cn->is_highlatency = 0;
395 cn->bufsize.bufsize =
396 (BUFSIZE_INITIAL_LOWLAT << BUFSIZE_SHIFT);
397 } else {
398 cn->is_highlatency = 1;
399 cn->bufsize.bufsize =
400 (BUFSIZE_INITIAL_HIGHLAT << BUFSIZE_SHIFT);
404 struct cor_conn_bidir* cor_alloc_conn(gfp_t allocflags, __u8 is_highlatency)
406 struct cor_conn_bidir *cnb;
408 cnb = kmem_cache_alloc(cor_conn_slab, allocflags);
409 if (unlikely(cnb == 0))
410 return 0;
412 memset(cnb, 0, sizeof(struct cor_conn_bidir));
414 cnb->cli.is_client = 1;
415 kref_init(&(cnb->ref));
417 _cor_alloc_conn(&(cnb->cli), is_highlatency);
418 _cor_alloc_conn(&(cnb->srv), is_highlatency);
420 return cnb;
423 static struct cor_sock *cor_get_corsock_by_port(__be32 port)
425 struct list_head *curr = cor_openports.next;
427 while (curr != &cor_openports) {
428 struct cor_sock *cs = container_of(curr, struct cor_sock,
429 data.listener.lh);
430 BUG_ON(cs->type != CS_TYPE_LISTENER);
431 if (cs->data.listener.port == port)
432 return cs;
434 curr = curr->next;
437 return 0;
440 __u32 cor_list_services(char *buf, __u32 buflen)
442 __u32 cnt = 0;
444 __u32 buf_offset = 4;
446 struct list_head *curr;
447 int rc;
450 * The variable length header rowcount need to be generated after the
451 * data. This is done by reserving the maximum space they could take. If
452 * they end up being smaller, the data is moved so that there is no gap.
455 BUG_ON(buf == 0);
456 BUG_ON(buflen < buf_offset);
458 spin_lock_bh(&cor_bindnodes);
460 curr = cor_openports.next;
461 while (curr != &cor_openports) {
462 struct cor_sock *cs = container_of(curr, struct cor_sock,
463 data.listener.lh);
464 BUG_ON(cs->type != CS_TYPE_LISTENER);
466 if (cs->data.listener.publish_service == 0)
467 goto cont;
469 if (unlikely(buf_offset + 4 < buf_offset) ||
470 buf_offset + 4 > buflen)
471 break;
473 buf[buf_offset] = ((char *) &(cs->data.listener.port))[0];
474 buf[buf_offset+1] = ((char *) &(cs->data.listener.port))[1];
475 buf[buf_offset+2] = ((char *) &(cs->data.listener.port))[2];
476 buf[buf_offset+3] = ((char *) &(cs->data.listener.port))[3];
477 buf_offset += 4;
478 cnt++;
480 cont:
481 curr = curr->next;
484 spin_unlock_bh(&cor_bindnodes);
486 rc = cor_encode_len(buf, 4, cnt);
487 BUG_ON(rc <= 0);
488 BUG_ON(rc > 4);
490 if (likely(rc < 4))
491 memmove(buf + ((__u32) rc), buf+4, buf_offset);
493 return buf_offset - 4 + ((__u32) rc);
497 void cor_set_publish_service(struct cor_sock *cs, __u8 value)
499 BUG_ON (value != 0 && value != 1);
501 mutex_lock(&(cs->lock));
503 cs->publish_service = value;
505 if (cs->type == CS_TYPE_LISTENER) {
506 spin_lock_bh(&cor_bindnodes);
507 cs->data.listener.publish_service = value;
508 spin_unlock_bh(&cor_bindnodes);
511 mutex_unlock(&(cs->lock));
514 void cor_close_port(struct cor_sock *cs)
516 mutex_lock(&(cs->lock));
517 if (unlikely(cs->type != CS_TYPE_LISTENER))
518 goto out;
520 spin_lock_bh(&cor_bindnodes);
522 list_del(&(cs->data.listener.lh));
524 while (list_empty(&(cs->data.listener.conn_queue)) == 0) {
525 struct cor_conn *src_sock_o = container_of(
526 cs->data.listener.conn_queue.next,
527 struct cor_conn, source.sock.cl_list);
528 BUG_ON(src_sock_o->source.sock.in_cl_list == 0);
529 list_del(&(src_sock_o->source.sock.cl_list));
530 src_sock_o->source.sock.in_cl_list = 0;
531 spin_unlock_bh(&cor_bindnodes);
533 cor_reset_conn(src_sock_o);
535 spin_lock_bh(&cor_bindnodes);
536 cor_conn_kref_put(src_sock_o, "conn_queue");
539 spin_unlock_bh(&cor_bindnodes);
540 out:
541 mutex_unlock(&(cs->lock));
544 int cor_open_port(struct cor_sock *cs_l, __be32 port)
546 int rc = 0;
548 spin_lock_bh(&cor_bindnodes);
549 if (cor_get_corsock_by_port(port) != 0) {
550 rc = -EADDRINUSE;
551 goto out;
554 BUG_ON(cs_l->type != CS_TYPE_UNCONNECTED);
556 cs_l->type = CS_TYPE_LISTENER;
557 cs_l->data.listener.port = port;
558 cs_l->data.listener.publish_service = cs_l->publish_service;
560 /* kref is not used here */
561 INIT_LIST_HEAD(&(cs_l->data.listener.conn_queue));
563 list_add_tail((struct list_head *) &(cs_l->data.listener.lh),
564 &cor_openports);
566 out:
567 spin_unlock_bh(&cor_bindnodes);
569 return rc;
573 * rc == 0 connected
574 * rc == 2 port not open
575 * rc == 3 listener queue full
577 int cor_connect_port(struct cor_conn *trgt_unconn_ll, __be32 port)
579 struct cor_conn *src_unconn_ll =
580 cor_get_conn_reversedir(trgt_unconn_ll);
581 struct cor_sock *cs;
582 int rc = 0;
584 spin_lock_bh(&cor_bindnodes);
586 cs = cor_get_corsock_by_port(port);
587 if (cs == 0) {
588 rc = 2;
589 goto out;
592 if (unlikely(cs->data.listener.queue_len >=
593 cs->data.listener.queue_maxlen)) {
594 if (cs->data.listener.queue_maxlen <= 0)
595 rc = 2;
596 else
597 rc = 3;
599 goto out;
602 BUG_ON(trgt_unconn_ll->is_client != 1);
603 cor_conn_init_sock_target(trgt_unconn_ll);
604 cor_conn_init_sock_source(src_unconn_ll);
606 list_add_tail(&(src_unconn_ll->source.sock.cl_list),
607 &(cs->data.listener.conn_queue));
608 src_unconn_ll->source.sock.in_cl_list = 1;
609 cor_conn_kref_get(src_unconn_ll, "conn_queue");
611 cs->data.listener.queue_len++;
612 atomic_set(&(cs->ready_to_accept), 1);
613 barrier();
614 cs->sk.sk_state_change(&(cs->sk));
616 out:
617 spin_unlock_bh(&cor_bindnodes);
618 return rc;
622 * rc == 0 connected
623 * rc == 3 addr not found
624 * rc == 4 ==> connid allocation failed
625 * rc == 4 ==> control msg alloc failed
627 int cor_connect_neigh(struct cor_conn *trgt_unconn_ll, char *addr,
628 __u16 addrlen)
630 struct cor_control_msg_out *cm;
631 struct cor_neighbor *nb = 0;
633 nb = cor_find_neigh(addr, addrlen);
634 if (nb == 0)
635 return 3;
637 cm = cor_alloc_control_msg(nb, ACM_PRIORITY_MED);
638 if (unlikely(cm == 0)) {
639 kref_put(&(nb->ref), cor_neighbor_free);
640 return 4;
643 if (unlikely(cor_conn_init_out(trgt_unconn_ll, nb, 0, 0))) {
644 cor_free_control_msg(cm);
645 kref_put(&(nb->ref), cor_neighbor_free);
646 return 4;
649 trgt_unconn_ll->target.out.priority_last =
650 _cor_conn_refresh_priority(trgt_unconn_ll);
652 cor_send_connect_nb(cm, trgt_unconn_ll->target.out.conn_id,
653 trgt_unconn_ll->target.out.seqno_nextsend,
654 cor_get_conn_reversedir(trgt_unconn_ll)->
655 source.in.next_seqno,
656 cor_get_conn_reversedir(trgt_unconn_ll));
658 kref_put(&(nb->ref), cor_neighbor_free);
660 return 0;
663 static void _cor_reset_conn(struct cor_conn *cn_ll, int trgt_out_resetneeded)
665 if (cn_ll->sourcetype == SOURCE_IN) {
666 unsigned long iflags;
667 struct cor_neighbor *nb = cn_ll->source.in.nb;
669 spin_lock_irqsave(&(nb->conn_list_lock), iflags);
670 list_del(&(cn_ll->source.in.nb_list));
671 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
672 cor_conn_kref_put_bug(cn_ll, "neighbor_list");
674 if (cn_ll->source.in.conn_id != 0 &&
675 (cn_ll->source.in.conn_id & (1 << 31)) != 0) {
676 BUG_ON(cn_ll->source.in.cir != 0);
677 } else if (cn_ll->source.in.conn_id != 0 &&
678 (cn_ll->source.in.conn_id & (1 << 31)) == 0) {
679 BUG_ON(cn_ll->source.in.cir == 0);
681 kref_init(&(cn_ll->source.in.cir->ref));
682 cn_ll->source.in.cir->conn_id =
683 cn_ll->source.in.conn_id;
684 cn_ll->source.in.cir->pingcnt =
685 nb->connid_reuse_pingcnt;
687 spin_lock_bh(&(nb->connid_reuse_lock));
688 cor_insert_connid_reuse(nb, cn_ll->source.in.cir);
689 list_add_tail(&(cn_ll->source.in.cir->lh),
690 &(nb->connid_reuse_list));
691 spin_unlock_bh(&(nb->connid_reuse_lock));
693 cn_ll->source.in.cir = 0;
696 if (cn_ll->source.in.conn_id != 0) {
697 spin_lock_bh(&(nb->connid_lock));
698 rb_erase(&(cn_ll->source.in.rbn), &(nb->connid_rb));
699 spin_unlock_bh(&(nb->connid_lock));
700 cor_conn_kref_put_bug(cn_ll, "connid_table");
702 cn_ll->source.in.conn_id = 0;
704 cor_free_ack_conns(cn_ll);
707 if (cn_ll->is_client)
708 atomic_dec(&cor_num_conns);
710 cor_reset_ooo_queue(cn_ll);
711 } else if (cn_ll->sourcetype == SOURCE_SOCK) {
712 if (likely(cn_ll->source.sock.cs != 0)) {
713 cor_sk_write_space(cn_ll->source.sock.cs);
714 kref_put(&(cn_ll->source.sock.cs->ref), cor_free_sock);
715 cn_ll->source.sock.cs = 0;
717 if (unlikely(cn_ll->source.sock.in_cl_list != 0)) {
718 list_del(&(cn_ll->source.sock.cl_list));
719 cn_ll->source.sock.in_cl_list = 0;
720 cor_conn_kref_put_bug(cn_ll, "conn_queue");
724 if (cn_ll->targettype == TARGET_UNCONNECTED) {
725 if (cn_ll->target.unconnected.cmdparams != 0) {
726 kfree(cn_ll->target.unconnected.cmdparams);
727 cn_ll->target.unconnected.cmdparams = 0;
729 } else if (cn_ll->targettype == TARGET_OUT) {
730 if (trgt_out_resetneeded && cn_ll->target.out.conn_id != 0) {
731 cor_send_reset_conn(cn_ll->target.out.nb,
732 cn_ll->target.out.conn_id, 0);
735 cn_ll->target.out.conn_id = 0;
737 cor_cancel_all_conn_retrans(cn_ll);
739 cor_qos_remove_conn(cn_ll);
740 } else if (cn_ll->targettype == TARGET_SOCK) {
741 if (likely(cn_ll->target.sock.cs != 0)) {
742 if (cn_ll->target.sock.socktype == SOCKTYPE_RAW) {
743 cor_sk_data_ready(cn_ll->target.sock.cs);
744 } else {
745 cor_mngdsocket_readfromconn_fromatomic(
746 cn_ll->target.sock.cs);
748 kref_put(&(cn_ll->target.sock.cs->ref), cor_free_sock);
749 cn_ll->target.sock.cs = 0;
750 cn_ll->target.sock.rcv_buf = 0;
754 cor_databuf_ackdiscard(cn_ll);
756 cor_account_bufspace(cn_ll);
758 cor_connreset_priority(cn_ll);
761 void cor_reset_conn_locked(struct cor_conn_bidir *cnb_ll)
763 BUG_ON(cnb_ll->cli.isreset <= 1 && cnb_ll->srv.isreset == 2);
764 BUG_ON(cnb_ll->cli.isreset == 2 && cnb_ll->srv.isreset <= 1);
766 if (cnb_ll->cli.isreset <= 1) {
767 __u8 old_isreset_cli = cnb_ll->cli.isreset;
768 __u8 old_isreset_srv = cnb_ll->srv.isreset;
770 cnb_ll->cli.isreset = 2;
771 cnb_ll->srv.isreset = 2;
773 _cor_reset_conn(&(cnb_ll->cli), old_isreset_cli == 0);
774 _cor_reset_conn(&(cnb_ll->srv), old_isreset_srv == 0);
778 void cor_reset_conn(struct cor_conn *cn)
780 struct cor_conn_bidir *cnb = cor_get_conn_bidir(cn);
782 cor_conn_kref_get(&(cnb->cli), "stack");
783 cor_conn_kref_get(&(cnb->srv), "stack");
785 spin_lock_bh(&(cnb->cli.rcv_lock));
786 spin_lock_bh(&(cnb->srv.rcv_lock));
788 cor_reset_conn_locked(cnb);
790 spin_unlock_bh(&(cnb->srv.rcv_lock));
791 spin_unlock_bh(&(cnb->cli.rcv_lock));
793 cor_conn_kref_put_bug(&(cnb->cli), "stack");
794 cor_conn_kref_put(&(cnb->srv), "stack");
797 static int __init cor_init(void)
799 int rc;
801 struct cor_conn_bidir cb;
802 struct cor_conn c;
805 printk(KERN_ERR "sizeof cor_conn_bidir: %u", (__u32) sizeof(cb));
806 printk(KERN_ERR "sizeof conn: %u", (__u32) sizeof(c));
807 printk(KERN_ERR " conn.source: %u", (__u32) sizeof(c.source));
808 printk(KERN_ERR " conn.source.in: %u", (__u32) sizeof(c.source.in));
809 printk(KERN_ERR " conn.target: %u", (__u32) sizeof(c.target));
810 printk(KERN_ERR " conn.target.out: %u", (__u32) sizeof(c.target.out));
811 printk(KERN_ERR " conn.data_buf: %u", (__u32) sizeof(c.data_buf));
812 printk(KERN_ERR " conn.bufsize: %u", (__u32) sizeof(c.bufsize));
814 printk(KERN_ERR "sizeof cor_neighbor: %u",
815 (__u32) sizeof(struct cor_neighbor));
817 printk(KERN_ERR "sizeof mutex: %u", (__u32) sizeof(struct mutex));
818 printk(KERN_ERR "sizeof spinlock: %u", (__u32) sizeof(spinlock_t));
819 printk(KERN_ERR "sizeof kref: %u", (__u32) sizeof(struct kref));
820 printk(KERN_ERR "sizeof list_head: %u",
821 (__u32) sizeof(struct list_head));
822 printk(KERN_ERR "sizeof rb_root: %u", (__u32) sizeof(struct rb_root));
823 printk(KERN_ERR "sizeof rb_node: %u", (__u32) sizeof(struct rb_node));
826 rc = cor_util_init();
827 if (unlikely(rc != 0))
828 return rc;
830 cor_conn_slab = kmem_cache_create("cor_conn",
831 sizeof(struct cor_conn_bidir), 8, 0, 0);
832 if (unlikely(cor_conn_slab == 0))
833 return -ENOMEM;
835 cor_connid_reuse_slab = kmem_cache_create("cor_connid_reuse",
836 sizeof(struct cor_connid_reuse_item), 8, 0, 0);
837 if (unlikely(cor_connid_reuse_slab == 0))
838 return -ENOMEM;
841 atomic_set(&cor_num_conns, 0);
842 barrier();
844 rc = cor_forward_init();
845 if (unlikely(rc != 0))
846 return rc;
848 rc = cor_kgen_init();
849 if (unlikely(rc != 0))
850 return rc;
852 rc = cor_rd_init1();
853 if (unlikely(rc != 0))
854 return rc;
856 rc = cor_snd_init();
857 if (unlikely(rc != 0))
858 return rc;
860 rc = cor_neighbor_init();
861 if (unlikely(rc != 0))
862 return rc;
864 rc = cor_neigh_ann_rcv_init();
865 if (unlikely(rc != 0))
866 return rc;
868 rc = cor_dev_init();
869 if (unlikely(rc != 0))
870 return rc;
872 rc = cor_rcv_init();
873 if (unlikely(rc != 0))
874 return rc;
876 rc = cor_sock_managed_init1();
877 if (unlikely(rc != 0))
878 return rc;
880 rc = cor_conn_src_sock_init1();
881 if (unlikely(rc != 0))
882 return rc;
884 rc = cor_sock_init2();
885 if (unlikely(rc != 0))
886 return rc;
888 rc = cor_rd_init2();
889 if (unlikely(rc != 0))
890 return rc;
892 return 0;
895 static void __exit cor_exit(void)
897 cor_rd_exit1();
898 cor_sock_exit1();
899 cor_conn_src_sock_exit1();
900 cor_dev_exit1();
902 flush_scheduled_work();
904 cor_rcv_exit2();
905 cor_neighbor_exit2();
906 cor_neigh_ann_rcv_exit2();
907 cor_snd_exit2();
908 cor_rd_exit2();
909 cor_kgen_exit2();
910 cor_forward_exit2();
912 BUG_ON(atomic_read(&cor_num_conns) != 0);
914 kmem_cache_destroy(cor_conn_slab);
915 cor_conn_slab = 0;
917 kmem_cache_destroy(cor_connid_reuse_slab);
918 cor_connid_reuse_slab = 0;
921 module_init(cor_init);
922 module_exit(cor_exit);
923 MODULE_LICENSE("GPL");