convert rcv_conn_list to snd_conn_list
[cor.git] / net / cor / conn.c
blobc417f18f562b3e8711a358440bc2bc76aa1dddaa
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/mutex.h>
23 #include "cor.h"
25 DEFINE_SPINLOCK(cor_bindnodes);
27 static LIST_HEAD(cor_openports);
29 static struct kmem_cache *cor_conn_slab;
30 struct kmem_cache *cor_connid_reuse_slab;
32 atomic_t cor_num_conns;
34 int cor_new_incoming_conn_allowed(struct cor_neighbor *nb)
36 /**
37 * MAX_CONNS is only loosly enforced for now
38 * (cor_num_conns is not checked and incremented at the same time)
40 if (atomic_read(&cor_num_conns) >= MAX_CONNS)
41 return 0;
42 else
43 return 1;
46 static __u64 cor_get_prio_in(__u64 priority)
48 if (PRIORITY_IN_MULITPLIER_PERCENT >= 100)
49 return priority;
51 if (unlikely(priority > U64_MAX/100)) {
52 return (priority / 100) * PRIORITY_IN_MULITPLIER_PERCENT;
53 } else {
54 return (priority * PRIORITY_IN_MULITPLIER_PERCENT) / 100;
58 static __u32 cor_conn_prio_sum_limit(__u32 priority, __u64 priority_sum)
60 __u32 shiftcnt = 0;
62 while ((PRIORITY_SUM_IN_MAX >> shiftcnt) > U32_MAX) {
63 shiftcnt++;
66 return div_u64(((__u64) priority) * (PRIORITY_SUM_IN_MAX >> shiftcnt),
67 (priority_sum >> shiftcnt));
70 __u32 _cor_conn_refresh_priority(struct cor_conn *cn_lx)
72 BUG_ON(cn_lx->is_client == 0);
74 if (cn_lx->sourcetype == SOURCE_IN) {
75 __u32 priority = (__u32)
76 cor_get_prio_in(cn_lx->source.in.priority);
77 __u64 priority_sum = cor_get_prio_in(atomic64_read(
78 &(cn_lx->source.in.nb->priority_sum)));
79 if (PRIORITY_SUM_IN_MAX != U64_MAX &&
80 priority_sum > PRIORITY_SUM_IN_MAX) {
81 return cor_conn_prio_sum_limit(priority, priority_sum);
82 } else {
83 return priority;
85 } else if (cn_lx->sourcetype == SOURCE_SOCK) {
86 return cn_lx->source.sock.priority;
87 } else {
88 BUG();
89 return 0;
93 __u32 cor_conn_refresh_priority(struct cor_conn *cn, int locked)
95 struct cor_conn *cn_reversedir = cor_get_conn_reversedir(cn);
96 __u32 priority = 0;
98 if (likely(locked == 0)) {
99 if (cn->is_client == 0)
100 return cor_conn_refresh_priority(cn_reversedir, 0);
102 spin_lock_bh(&(cn->rcv_lock));
103 spin_lock_bh(&(cor_get_conn_reversedir(cn)->rcv_lock));
104 } else {
105 BUG_ON(cn->is_client == 0);
108 if (unlikely(cn->isreset != 0) || cn->targettype != TARGET_OUT)
109 goto out;
111 priority = _cor_conn_refresh_priority(cn);
113 if (cn->target.out.priority_send_allowed != 0) {
114 __u16 priority_enc = cor_enc_priority(priority);
115 if (priority_enc != cn->target.out.priority_last)
116 cor_send_priority(cn, priority_enc);
119 out:
120 if (likely(locked == 0)) {
121 spin_unlock_bh(&(cor_get_conn_reversedir(cn)->rcv_lock));
122 spin_unlock_bh(&(cn->rcv_lock));
125 return priority;
128 static void _cor_set_conn_in_priority(struct cor_conn *src_in_lx,
129 __u32 newpriority)
131 struct cor_neighbor *nb = src_in_lx->source.in.nb;
133 __u32 oldpriority = src_in_lx->source.in.priority;
135 cor_update_atomic_sum(&(nb->priority_sum),
136 oldpriority, newpriority);
138 src_in_lx->source.in.priority = newpriority;
141 void cor_set_conn_in_priority(struct cor_neighbor *nb, __u32 conn_id,
142 struct cor_conn *src_in, __u8 priority_seqno, __u16 priority)
144 __u32 newpriority;
146 if (unlikely(src_in->is_client == 0))
147 return;
149 spin_lock_bh(&(src_in->rcv_lock));
150 spin_lock_bh(&(cor_get_conn_reversedir(src_in)->rcv_lock));
152 if (unlikely(cor_is_conn_in(src_in, nb, conn_id) == 0))
153 goto out;
155 if (src_in->source.in.priority_seqno != priority_seqno)
156 goto out;
157 src_in->source.in.priority_seqno =
158 (src_in->source.in.priority_seqno + 1) & 15;
160 newpriority = (cor_dec_priority(priority)*4)/5;
161 _cor_set_conn_in_priority(src_in, newpriority);
162 cor_conn_refresh_priority(src_in, 1);
164 out:
165 spin_unlock_bh(&(cor_get_conn_reversedir(src_in)->rcv_lock));
166 spin_unlock_bh(&(src_in->rcv_lock));
169 static void cor_connreset_priority(struct cor_conn *cn_lx)
171 if (cn_lx->is_client == 0)
172 return;
174 if (cn_lx->sourcetype == SOURCE_IN)
175 _cor_set_conn_in_priority(cn_lx, 0);
178 void _cor_set_last_act(struct cor_conn *trgt_out_l)
180 unsigned long iflags;
181 struct cor_neighbor *nb = trgt_out_l->target.out.nb;
183 trgt_out_l->target.out.jiffies_last_act = jiffies;
185 spin_lock_irqsave(&(nb->conn_list_lock), iflags);
186 list_del(&(trgt_out_l->target.out.nb_list));
187 list_add_tail(&(trgt_out_l->target.out.nb_list), &(nb->snd_conn_list));
188 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
191 static void _cor_free_conn(struct cor_conn *cn)
193 BUG_ON(cn->isreset == 0);
195 if (cn->sourcetype == SOURCE_IN) {
196 WARN_ONCE(list_empty(&(cn->source.in.reorder_queue)) == 0,
197 "cor_free_conn(): cn->source.in.reorder_queue is not empty");
198 WARN_ONCE(list_empty(&(cn->source.in.acks_pending)) == 0,
199 "cor_free_conn():cn->source.in.acks_pending is not empty");
201 WARN_ONCE(cn->source.in.conn_id != 0,
202 "cor_free_conn(): cn->source.in.conn_id is not 0");
203 cor_nb_kref_put(cn->source.in.nb, "conn");
204 cn->source.in.nb = 0;
207 if (cn->targettype == TARGET_OUT) {
208 WARN_ONCE(list_empty(&(cn->target.out.retrans_list)) == 0,
209 "cor_free_conn(): cn->target.out.retrans_list is not empty");
210 WARN_ONCE(cn->target.out.rb.in_queue != RB_INQUEUE_FALSE,
211 "cor_free_conn(): cn->target.out.rb.in_queue is not RB_INQUEUE_FALSE");
212 WARN_ONCE(cn->target.out.conn_id != 0,
213 "cor_free_conn(): cn->target.out.conn_id is not 0");
214 cor_nb_kref_put(cn->target.out.nb, "conn");
215 cn->target.out.nb = 0;
218 WARN_ONCE(cn->data_buf.datasize != 0,
219 "cor_free_conn(): cn->data_buf.datasize is not 0");
220 WARN_ONCE(cn->data_buf.overhead != 0,
221 "cor_free_conn(): cn->data_buf.overhead is not 0");
222 WARN_ONCE(list_empty(&(cn->data_buf.items)) == 0,
223 "cor_free_conn(): cn->data_buf.items is not empty");
224 WARN_ONCE(cn->data_buf.nextread != 0,
225 "cor_free_conn(): cn->data_buf.nextread is not 0");
228 void cor_free_conn(struct kref *ref)
230 struct cor_conn_bidir *cnb = container_of(ref, struct cor_conn_bidir,
231 ref);
233 _cor_free_conn(&(cnb->cli));
234 _cor_free_conn(&(cnb->srv));
236 memset(cnb, 9*16 + 10, sizeof(struct cor_conn_bidir));
237 kmem_cache_free(cor_conn_slab, cnb);
241 * rc == 0 ==> ok
242 * rc == 1 ==> connid_reuse or connid allocation failed
244 int cor_conn_init_out(struct cor_conn *trgt_unconn_ll, struct cor_neighbor *nb,
245 __u32 rcvd_connid, int use_rcvd_connid)
247 unsigned long iflags;
248 struct cor_conn *src_unconn_ll =
249 cor_get_conn_reversedir(trgt_unconn_ll);
250 __u8 tmp;
252 BUG_ON(trgt_unconn_ll->targettype != TARGET_UNCONNECTED);
253 BUG_ON(src_unconn_ll == 0);
254 BUG_ON(src_unconn_ll->sourcetype != SOURCE_UNCONNECTED);
256 memset(&(trgt_unconn_ll->target.out), 0,
257 sizeof(trgt_unconn_ll->target.out));
258 memset(&(src_unconn_ll->source.in), 0,
259 sizeof(src_unconn_ll->source.in));
261 trgt_unconn_ll->targettype = TARGET_OUT;
262 src_unconn_ll->sourcetype = SOURCE_IN;
264 spin_lock_irqsave(&(nb->conn_list_lock), iflags);
265 if (unlikely(cor_get_neigh_state(nb) == NEIGHBOR_STATE_KILLED))
266 goto out_err;
268 if (use_rcvd_connid) {
269 BUG_ON((rcvd_connid & (1 << 31)) == 0);
271 src_unconn_ll->source.in.conn_id = rcvd_connid;
272 if (unlikely(cor_insert_connid(nb, src_unconn_ll) != 0)) {
273 src_unconn_ll->source.in.conn_id = 0;
274 goto out_err;
276 } else {
277 src_unconn_ll->source.in.cir = kmem_cache_alloc(
278 cor_connid_reuse_slab, GFP_ATOMIC);
279 if (unlikely(src_unconn_ll->source.in.cir == 0))
280 goto out_err;
282 memset(src_unconn_ll->source.in.cir, 0,
283 sizeof(struct cor_connid_reuse_item));
285 if (unlikely(cor_connid_alloc(nb, src_unconn_ll))) {
286 kmem_cache_free(cor_connid_reuse_slab,
287 src_unconn_ll->source.in.cir);
288 src_unconn_ll->source.in.cir = 0;
289 goto out_err;
293 list_add_tail(&(trgt_unconn_ll->target.out.nb_list),
294 &(nb->snd_conn_list));
295 cor_conn_kref_get(trgt_unconn_ll, "neighbor_list");
297 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
299 if (0) {
300 out_err:
301 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
302 trgt_unconn_ll->targettype = TARGET_UNCONNECTED;
303 src_unconn_ll->sourcetype = SOURCE_UNCONNECTED;
304 return 1;
307 trgt_unconn_ll->target.out.nb = nb;
308 src_unconn_ll->source.in.nb = nb;
309 cor_nb_kref_get(nb, "conn");
310 cor_nb_kref_get(nb, "conn");
312 INIT_LIST_HEAD(&(src_unconn_ll->source.in.reorder_queue));
313 INIT_LIST_HEAD(&(src_unconn_ll->source.in.acks_pending));
314 INIT_LIST_HEAD(&(trgt_unconn_ll->target.out.retrans_list));
316 cor_reset_seqno(trgt_unconn_ll, 0);
317 if (use_rcvd_connid == 0) {
318 get_random_bytes((char *)
319 &(trgt_unconn_ll->target.out.seqno_nextsend),
320 sizeof(
321 trgt_unconn_ll->target.out.seqno_nextsend));
322 trgt_unconn_ll->target.out.seqno_acked =
323 trgt_unconn_ll->target.out.seqno_nextsend;
324 trgt_unconn_ll->target.out.seqno_windowlimit =
325 trgt_unconn_ll->target.out.seqno_nextsend;
326 cor_reset_seqno(trgt_unconn_ll,
327 trgt_unconn_ll->target.out.seqno_nextsend);
329 get_random_bytes((char *)
330 &(src_unconn_ll->source.in.next_seqno),
331 sizeof(src_unconn_ll->source.in.next_seqno));
332 src_unconn_ll->source.in.window_seqnolimit =
333 src_unconn_ll->source.in.next_seqno;
334 src_unconn_ll->source.in.window_seqnolimit_remote =
335 src_unconn_ll->source.in.next_seqno;
338 get_random_bytes((char *) &tmp, 1);
339 trgt_unconn_ll->target.out.priority_seqno = (tmp & 15);
341 src_unconn_ll->source.in.priority_seqno = 0;
343 trgt_unconn_ll->target.out.jiffies_last_act = jiffies;
345 if (src_unconn_ll->is_highlatency)
346 trgt_unconn_ll->target.out.jiffies_idle_since =
347 (jiffies -
348 BURSTPRIO_MAXIDLETIME_HIGHLATENCY_SECS * HZ) <<
349 JIFFIES_LAST_IDLE_SHIFT;
350 else
351 trgt_unconn_ll->target.out.jiffies_idle_since =
352 jiffies << JIFFIES_LAST_IDLE_SHIFT;
354 trgt_unconn_ll->target.out.remote_bufsize_changerate = 64;
356 if (src_unconn_ll->is_client)
357 atomic_inc(&cor_num_conns);
359 if (use_rcvd_connid == 0)
360 cor_update_windowlimit(src_unconn_ll);
362 return 0;
365 void cor_conn_init_sock_source(struct cor_conn *cn)
367 BUG_ON(cn == 0);
368 cn->sourcetype = SOURCE_SOCK;
369 memset(&(cn->source.sock), 0, sizeof(cn->source.sock));
370 cn->source.sock.priority = cor_priority_max();
371 cn->source.sock.snd_speed.jiffies_last_refresh = jiffies;
372 cn->source.sock.snd_speed.flushed = 1;
375 void cor_conn_init_sock_target(struct cor_conn *cn)
377 BUG_ON(cn == 0);
378 cn->targettype = TARGET_SOCK;
379 memset(&(cn->target.sock), 0, sizeof(cn->target.sock));
380 cor_reset_seqno(cn, 0);
383 static void _cor_alloc_conn(struct cor_conn *cn, __u8 is_highlatency)
385 cn->sourcetype = SOURCE_UNCONNECTED;
386 cn->targettype = TARGET_UNCONNECTED;
388 cn->isreset = 0;
390 spin_lock_init(&(cn->rcv_lock));
392 cor_databuf_init(cn);
394 cor_bufsize_init(cn, 0);
396 if (is_highlatency == 0) {
397 cn->is_highlatency = 0;
398 cn->bufsize.bufsize =
399 (BUFSIZE_INITIAL_LOWLAT << BUFSIZE_SHIFT);
400 } else {
401 cn->is_highlatency = 1;
402 cn->bufsize.bufsize =
403 (BUFSIZE_INITIAL_HIGHLAT << BUFSIZE_SHIFT);
407 struct cor_conn_bidir* cor_alloc_conn(gfp_t allocflags, __u8 is_highlatency)
409 struct cor_conn_bidir *cnb;
411 cnb = kmem_cache_alloc(cor_conn_slab, allocflags);
412 if (unlikely(cnb == 0))
413 return 0;
415 memset(cnb, 0, sizeof(struct cor_conn_bidir));
417 cnb->cli.is_client = 1;
418 kref_init(&(cnb->ref));
420 _cor_alloc_conn(&(cnb->cli), is_highlatency);
421 _cor_alloc_conn(&(cnb->srv), is_highlatency);
423 return cnb;
426 static struct cor_sock *cor_get_corsock_by_port(__be32 port)
428 struct list_head *curr = cor_openports.next;
430 while (curr != &cor_openports) {
431 struct cor_sock *cs = container_of(curr, struct cor_sock,
432 data.listener.lh);
433 BUG_ON(cs->type != CS_TYPE_LISTENER);
434 if (cs->data.listener.port == port)
435 return cs;
437 curr = curr->next;
440 return 0;
443 __u32 cor_list_services(char *buf, __u32 buflen)
445 __u32 cnt = 0;
447 __u32 buf_offset = 4;
449 struct list_head *curr;
450 int rc;
453 * The variable length header rowcount need to be generated after the
454 * data. This is done by reserving the maximum space they could take. If
455 * they end up being smaller, the data is moved so that there is no gap.
458 BUG_ON(buf == 0);
459 BUG_ON(buflen < buf_offset);
461 spin_lock_bh(&cor_bindnodes);
463 curr = cor_openports.next;
464 while (curr != &cor_openports) {
465 struct cor_sock *cs = container_of(curr, struct cor_sock,
466 data.listener.lh);
467 BUG_ON(cs->type != CS_TYPE_LISTENER);
469 if (cs->data.listener.publish_service == 0)
470 goto cont;
472 if (unlikely(buf_offset + 4 < buf_offset) ||
473 buf_offset + 4 > buflen)
474 break;
476 buf[buf_offset] = ((char *) &(cs->data.listener.port))[0];
477 buf[buf_offset+1] = ((char *) &(cs->data.listener.port))[1];
478 buf[buf_offset+2] = ((char *) &(cs->data.listener.port))[2];
479 buf[buf_offset+3] = ((char *) &(cs->data.listener.port))[3];
480 buf_offset += 4;
481 cnt++;
483 cont:
484 curr = curr->next;
487 spin_unlock_bh(&cor_bindnodes);
489 rc = cor_encode_len(buf, 4, cnt);
490 BUG_ON(rc <= 0);
491 BUG_ON(rc > 4);
493 if (likely(rc < 4))
494 memmove(buf + ((__u32) rc), buf+4, buf_offset);
496 return buf_offset - 4 + ((__u32) rc);
500 void cor_set_publish_service(struct cor_sock *cs, __u8 value)
502 BUG_ON (value != 0 && value != 1);
504 mutex_lock(&(cs->lock));
506 cs->publish_service = value;
508 if (cs->type == CS_TYPE_LISTENER) {
509 spin_lock_bh(&cor_bindnodes);
510 cs->data.listener.publish_service = value;
511 spin_unlock_bh(&cor_bindnodes);
514 mutex_unlock(&(cs->lock));
517 void cor_close_port(struct cor_sock *cs)
519 mutex_lock(&(cs->lock));
520 if (unlikely(cs->type != CS_TYPE_LISTENER))
521 goto out;
523 spin_lock_bh(&cor_bindnodes);
525 list_del(&(cs->data.listener.lh));
527 while (list_empty(&(cs->data.listener.conn_queue)) == 0) {
528 struct cor_conn *src_sock_o = container_of(
529 cs->data.listener.conn_queue.next,
530 struct cor_conn, source.sock.cl_list);
531 BUG_ON(src_sock_o->source.sock.in_cl_list == 0);
532 list_del(&(src_sock_o->source.sock.cl_list));
533 src_sock_o->source.sock.in_cl_list = 0;
534 spin_unlock_bh(&cor_bindnodes);
536 cor_reset_conn(src_sock_o);
538 spin_lock_bh(&cor_bindnodes);
539 cor_conn_kref_put(src_sock_o, "conn_queue");
542 spin_unlock_bh(&cor_bindnodes);
543 out:
544 mutex_unlock(&(cs->lock));
547 int cor_open_port(struct cor_sock *cs_l, __be32 port)
549 int rc = 0;
551 spin_lock_bh(&cor_bindnodes);
552 if (cor_get_corsock_by_port(port) != 0) {
553 rc = -EADDRINUSE;
554 goto out;
557 BUG_ON(cs_l->type != CS_TYPE_UNCONNECTED);
559 cs_l->type = CS_TYPE_LISTENER;
560 cs_l->data.listener.port = port;
561 cs_l->data.listener.publish_service = cs_l->publish_service;
563 /* kref is not used here */
564 INIT_LIST_HEAD(&(cs_l->data.listener.conn_queue));
566 list_add_tail((struct list_head *) &(cs_l->data.listener.lh),
567 &cor_openports);
569 out:
570 spin_unlock_bh(&cor_bindnodes);
572 return rc;
576 * rc == 0 connected
577 * rc == 2 port not open
578 * rc == 3 listener queue full
580 int cor_connect_port(struct cor_conn *trgt_unconn_ll, __be32 port)
582 struct cor_conn *src_unconn_ll =
583 cor_get_conn_reversedir(trgt_unconn_ll);
584 struct cor_sock *cs;
585 int rc = 0;
587 spin_lock_bh(&cor_bindnodes);
589 cs = cor_get_corsock_by_port(port);
590 if (cs == 0) {
591 rc = 2;
592 goto out;
595 if (unlikely(cs->data.listener.queue_len >=
596 cs->data.listener.queue_maxlen)) {
597 if (cs->data.listener.queue_maxlen <= 0)
598 rc = 2;
599 else
600 rc = 3;
602 goto out;
605 BUG_ON(trgt_unconn_ll->is_client != 1);
606 cor_conn_init_sock_target(trgt_unconn_ll);
607 cor_conn_init_sock_source(src_unconn_ll);
609 list_add_tail(&(src_unconn_ll->source.sock.cl_list),
610 &(cs->data.listener.conn_queue));
611 src_unconn_ll->source.sock.in_cl_list = 1;
612 cor_conn_kref_get(src_unconn_ll, "conn_queue");
614 cs->data.listener.queue_len++;
615 atomic_set(&(cs->ready_to_accept), 1);
616 barrier();
617 cs->sk.sk_state_change(&(cs->sk));
619 out:
620 spin_unlock_bh(&cor_bindnodes);
621 return rc;
625 * rc == 0 connected
626 * rc == 3 addr not found
627 * rc == 4 ==> connid allocation failed
628 * rc == 4 ==> control msg alloc failed
630 int cor_connect_neigh(struct cor_conn *trgt_unconn_ll, char *addr,
631 __u16 addrlen)
633 struct cor_control_msg_out *cm;
634 struct cor_neighbor *nb = 0;
636 nb = cor_find_neigh(addr, addrlen);
637 if (nb == 0)
638 return 3;
640 cm = cor_alloc_control_msg(nb, ACM_PRIORITY_MED);
641 if (unlikely(cm == 0)) {
642 cor_nb_kref_put(nb, "stack");
643 return 4;
646 if (unlikely(cor_conn_init_out(trgt_unconn_ll, nb, 0, 0))) {
647 cor_free_control_msg(cm);
648 cor_nb_kref_put(nb, "stack");
649 return 4;
652 trgt_unconn_ll->target.out.priority_last =
653 _cor_conn_refresh_priority(trgt_unconn_ll);
655 cor_send_connect_nb(cm, trgt_unconn_ll->target.out.conn_id,
656 trgt_unconn_ll->target.out.seqno_nextsend,
657 cor_get_conn_reversedir(trgt_unconn_ll)->
658 source.in.next_seqno,
659 cor_get_conn_reversedir(trgt_unconn_ll));
661 cor_nb_kref_put(nb, "stack");
663 return 0;
666 static void _cor_reset_conn(struct cor_conn *cn_ll, int trgt_out_resetneeded)
668 unsigned long iflags;
670 if (cn_ll->sourcetype == SOURCE_IN) {
671 struct cor_neighbor *nb = cn_ll->source.in.nb;
673 if (cn_ll->source.in.conn_id != 0 &&
674 (cn_ll->source.in.conn_id & (1 << 31)) != 0) {
675 BUG_ON(cn_ll->source.in.cir != 0);
676 } else if (cn_ll->source.in.conn_id != 0 &&
677 (cn_ll->source.in.conn_id & (1 << 31)) == 0) {
678 BUG_ON(cn_ll->source.in.cir == 0);
680 kref_init(&(cn_ll->source.in.cir->ref));
681 cn_ll->source.in.cir->conn_id =
682 cn_ll->source.in.conn_id;
683 cn_ll->source.in.cir->pingcnt =
684 nb->connid_reuse_pingcnt;
686 spin_lock_irqsave(&(nb->connid_reuse_lock), iflags);
687 cor_insert_connid_reuse(nb, cn_ll->source.in.cir);
688 list_add_tail(&(cn_ll->source.in.cir->lh),
689 &(nb->connid_reuse_list));
690 spin_unlock_irqrestore(&(nb->connid_reuse_lock),
691 iflags);
693 cn_ll->source.in.cir = 0;
696 if (cn_ll->source.in.conn_id != 0) {
697 spin_lock_irqsave(&(nb->connid_lock), iflags);
698 rb_erase(&(cn_ll->source.in.rbn), &(nb->connid_rb));
699 spin_unlock_irqrestore(&(nb->connid_lock), iflags);
700 cor_conn_kref_put_bug(cn_ll, "connid_table");
702 cn_ll->source.in.conn_id = 0;
704 cor_free_ack_conns(cn_ll);
707 if (cn_ll->is_client)
708 atomic_dec(&cor_num_conns);
710 cor_reset_ooo_queue(cn_ll);
711 } else if (cn_ll->sourcetype == SOURCE_SOCK) {
712 if (likely(cn_ll->source.sock.cs != 0)) {
713 cor_sk_write_space(cn_ll->source.sock.cs);
714 kref_put(&(cn_ll->source.sock.cs->ref), cor_free_sock);
715 cn_ll->source.sock.cs = 0;
717 if (unlikely(cn_ll->source.sock.in_cl_list != 0)) {
718 list_del(&(cn_ll->source.sock.cl_list));
719 cn_ll->source.sock.in_cl_list = 0;
720 cor_conn_kref_put_bug(cn_ll, "conn_queue");
724 if (cn_ll->targettype == TARGET_UNCONNECTED) {
725 if (cn_ll->target.unconnected.cmdparams != 0) {
726 kfree(cn_ll->target.unconnected.cmdparams);
727 cn_ll->target.unconnected.cmdparams = 0;
729 } else if (cn_ll->targettype == TARGET_OUT) {
730 struct cor_neighbor *nb = cn_ll->target.out.nb;
732 spin_lock_irqsave(&(nb->conn_list_lock), iflags);
733 list_del(&(cn_ll->target.out.nb_list));
734 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
735 cor_conn_kref_put_bug(cn_ll, "neighbor_list");
738 if (trgt_out_resetneeded && cn_ll->target.out.conn_id != 0) {
739 cor_send_reset_conn(cn_ll->target.out.nb,
740 cn_ll->target.out.conn_id, 0);
743 cn_ll->target.out.conn_id = 0;
745 cor_cancel_all_conn_retrans(cn_ll);
747 cor_qos_remove_conn(cn_ll);
748 } else if (cn_ll->targettype == TARGET_SOCK) {
749 if (likely(cn_ll->target.sock.cs != 0)) {
750 if (cn_ll->target.sock.socktype == SOCKTYPE_RAW) {
751 cor_sk_data_ready(cn_ll->target.sock.cs);
752 } else {
753 cor_mngdsocket_readfromconn_fromatomic(
754 cn_ll->target.sock.cs);
756 kref_put(&(cn_ll->target.sock.cs->ref), cor_free_sock);
757 cn_ll->target.sock.cs = 0;
758 cn_ll->target.sock.rcv_buf = 0;
762 cor_databuf_ackdiscard(cn_ll);
764 cor_account_bufspace(cn_ll);
766 cor_connreset_priority(cn_ll);
769 void cor_reset_conn_locked(struct cor_conn_bidir *cnb_ll)
771 BUG_ON(cnb_ll->cli.isreset <= 1 && cnb_ll->srv.isreset == 2);
772 BUG_ON(cnb_ll->cli.isreset == 2 && cnb_ll->srv.isreset <= 1);
774 if (cnb_ll->cli.isreset <= 1) {
775 __u8 old_isreset_cli = cnb_ll->cli.isreset;
776 __u8 old_isreset_srv = cnb_ll->srv.isreset;
778 cnb_ll->cli.isreset = 2;
779 cnb_ll->srv.isreset = 2;
781 _cor_reset_conn(&(cnb_ll->cli), old_isreset_cli == 0);
782 _cor_reset_conn(&(cnb_ll->srv), old_isreset_srv == 0);
786 void cor_reset_conn(struct cor_conn *cn)
788 struct cor_conn_bidir *cnb = cor_get_conn_bidir(cn);
790 cor_conn_kref_get(&(cnb->cli), "stack");
791 cor_conn_kref_get(&(cnb->srv), "stack");
793 spin_lock_bh(&(cnb->cli.rcv_lock));
794 spin_lock_bh(&(cnb->srv.rcv_lock));
796 cor_reset_conn_locked(cnb);
798 spin_unlock_bh(&(cnb->srv.rcv_lock));
799 spin_unlock_bh(&(cnb->cli.rcv_lock));
801 cor_conn_kref_put_bug(&(cnb->cli), "stack");
802 cor_conn_kref_put(&(cnb->srv), "stack");
805 static int __init cor_init(void)
807 int rc;
809 struct cor_conn_bidir cb;
810 struct cor_conn c;
813 printk(KERN_ERR "sizeof cor_conn_bidir: %u\n", (__u32) sizeof(cb));
814 printk(KERN_ERR "sizeof conn: %u\n", (__u32) sizeof(c));
815 printk(KERN_ERR " conn.source: %u\n", (__u32) sizeof(c.source));
816 printk(KERN_ERR " conn.source.in: %u\n", (__u32) sizeof(c.source.in));
817 printk(KERN_ERR " conn.target: %u\n", (__u32) sizeof(c.target));
818 printk(KERN_ERR " conn.target.out: %u\n",
819 (__u32) sizeof(c.target.out));
820 printk(KERN_ERR " conn.data_buf: %u\n", (__u32) sizeof(c.data_buf));
821 printk(KERN_ERR " conn.bufsize: %u\n", (__u32) sizeof(c.bufsize));
823 printk(KERN_ERR "sizeof cor_neighbor: %u\n",
824 (__u32) sizeof(struct cor_neighbor));
826 printk(KERN_ERR "sizeof mutex: %u\n", (__u32) sizeof(struct mutex));
827 printk(KERN_ERR "sizeof spinlock: %u\n", (__u32) sizeof(spinlock_t));
828 printk(KERN_ERR "sizeof kref: %u\n", (__u32) sizeof(struct kref));
829 printk(KERN_ERR "sizeof list_head: %u\n",
830 (__u32) sizeof(struct list_head));
831 printk(KERN_ERR "sizeof rb_root: %u\n", (__u32) sizeof(struct rb_root));
832 printk(KERN_ERR "sizeof rb_node: %u\n", (__u32) sizeof(struct rb_node));
835 rc = cor_util_init();
836 if (unlikely(rc != 0))
837 return rc;
839 cor_conn_slab = kmem_cache_create("cor_conn",
840 sizeof(struct cor_conn_bidir), 8, 0, 0);
841 if (unlikely(cor_conn_slab == 0))
842 return -ENOMEM;
844 cor_connid_reuse_slab = kmem_cache_create("cor_connid_reuse",
845 sizeof(struct cor_connid_reuse_item), 8, 0, 0);
846 if (unlikely(cor_connid_reuse_slab == 0))
847 return -ENOMEM;
850 atomic_set(&cor_num_conns, 0);
851 barrier();
853 rc = cor_forward_init();
854 if (unlikely(rc != 0))
855 return rc;
857 rc = cor_kgen_init();
858 if (unlikely(rc != 0))
859 return rc;
861 rc = cor_rd_init1();
862 if (unlikely(rc != 0))
863 return rc;
865 rc = cor_snd_init();
866 if (unlikely(rc != 0))
867 return rc;
869 rc = cor_neighbor_init();
870 if (unlikely(rc != 0))
871 return rc;
873 rc = cor_neigh_ann_rcv_init();
874 if (unlikely(rc != 0))
875 return rc;
877 rc = cor_dev_init();
878 if (unlikely(rc != 0))
879 return rc;
881 rc = cor_rcv_init();
882 if (unlikely(rc != 0))
883 return rc;
885 rc = cor_sock_managed_init1();
886 if (unlikely(rc != 0))
887 return rc;
889 rc = cor_conn_src_sock_init1();
890 if (unlikely(rc != 0))
891 return rc;
893 rc = cor_sock_init2();
894 if (unlikely(rc != 0))
895 return rc;
897 rc = cor_rd_init2();
898 if (unlikely(rc != 0))
899 return rc;
901 return 0;
904 static void __exit cor_exit(void)
906 cor_rd_exit1();
907 cor_sock_exit1();
908 cor_conn_src_sock_exit1();
909 cor_dev_exit1();
911 flush_scheduled_work();
913 cor_rcv_exit2();
914 cor_neighbor_exit2();
915 cor_neigh_ann_rcv_exit2();
916 cor_snd_exit2();
917 cor_rd_exit2();
918 cor_kgen_exit2();
919 cor_forward_exit2();
921 BUG_ON(atomic_read(&cor_num_conns) != 0);
923 kmem_cache_destroy(cor_conn_slab);
924 cor_conn_slab = 0;
926 kmem_cache_destroy(cor_connid_reuse_slab);
927 cor_connid_reuse_slab = 0;
930 module_init(cor_init);
931 module_exit(cor_exit);
932 MODULE_LICENSE("GPL");