neigh_snd: race condition fix, shorter connect_neigh cmd, 32 bit ports
[cor.git] / net / cor / conn.c
blob351e8912f23728c088e3b433ec918d3122268186
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/mutex.h>
23 #include "cor.h"
25 DEFINE_SPINLOCK(cor_bindnodes);
26 static DEFINE_SPINLOCK(cor_conn_free);
28 static LIST_HEAD(cor_openports);
30 static struct kmem_cache *cor_conn_slab;
31 struct kmem_cache *cor_connid_reuse_slab;
33 atomic_t cor_num_conns;
35 int cor_newconn_checkpriority(struct cor_neighbor *nb, __u8 priority)
37 // if (cor_num_conns >= MAX_CONNS) ...
38 #warning todo reset other conn
39 return 0;
42 __u32 cor_refresh_conn_priority(struct cor_conn *cn, int locked)
44 __u32 priority = 0;
45 __u8 priority_last;
46 __u8 priority_enc;
49 if (likely(locked == 0)) {
50 if (cn->is_client == 0)
51 return cor_refresh_conn_priority(cn->reversedir, 0);
53 spin_lock_bh(&(cn->rcv_lock));
54 spin_lock_bh(&(cn->reversedir->rcv_lock));
58 if (unlikely(cn->isreset != 0))
59 goto out;
61 if (cn->targettype != TARGET_OUT) {
62 priority = 0;
63 goto out;
66 if (cn->sourcetype == SOURCE_IN) {
67 __u64 priority_sum =
68 atomic64_read(&(
69 cn->source.in.nb->priority_sum));
70 if (priority_sum > PRIORITY_MAX) {
71 priority = div_u64(((__u64) cn->source.in.priority) *
72 PRIORITY_MAX, priority_sum);
73 } else {
74 priority = cn->source.in.priority;
76 } else if (cn->sourcetype == SOURCE_SOCK) {
77 priority = cn->source.sock.priority;
78 } else {
79 BUG();
82 if (cn->target.out.priority_send_allowed == 0)
83 goto out;
85 priority_enc = cor_enc_log_256_16(priority);
87 priority_last = cn->target.out.priority_last;
88 if (priority_enc != priority_last) {
89 int force = (priority_enc != priority_last + 1 &&
90 priority_enc != priority_last - 1);
91 cor_send_priority(cn, force, priority_enc);
94 out:
95 if (likely(locked == 0)) {
96 spin_unlock_bh(&(cn->reversedir->rcv_lock));
97 spin_unlock_bh(&(cn->rcv_lock));
100 return priority;
103 static void _cor_set_conn_in_priority(struct cor_conn *src_in_lx,
104 __u32 newpriority)
106 struct cor_neighbor *nb = src_in_lx->source.in.nb;
108 __u32 oldpriority = src_in_lx->source.in.priority;
110 cor_update_atomic_sum(&(nb->priority_sum),
111 oldpriority, newpriority);
113 src_in_lx->source.in.priority = newpriority;
116 void cor_set_conn_in_priority(struct cor_neighbor *nb, __u32 conn_id,
117 struct cor_conn *src_in, __u8 priority_seqno, __u8 priority)
119 __u32 newpriority;
121 if (unlikely(src_in->is_client == 0))
122 return;
124 spin_lock_bh(&(src_in->rcv_lock));
125 spin_lock_bh(&(src_in->reversedir->rcv_lock));
127 if (unlikely(cor_is_conn_in(src_in, nb, conn_id) == 0))
128 goto out;
130 if (src_in->source.in.priority_seqno != priority_seqno)
131 goto out;
132 src_in->source.in.priority_seqno++;
134 newpriority = (cor_dec_log_256_16(priority)*4)/5;
135 _cor_set_conn_in_priority(src_in, newpriority);
136 cor_refresh_conn_priority(src_in, 1);
138 out:
139 spin_unlock_bh(&(src_in->reversedir->rcv_lock));
140 spin_unlock_bh(&(src_in->rcv_lock));
143 static void cor_connreset_priority(struct cor_conn *cn_lx)
145 if (cn_lx->is_client == 0)
146 return;
148 if (cn_lx->sourcetype == SOURCE_IN)
149 _cor_set_conn_in_priority(cn_lx, 0);
152 void _cor_set_last_act(struct cor_conn *src_in_l)
154 unsigned long iflags;
155 src_in_l->source.in.jiffies_last_act = jiffies;
156 spin_lock_irqsave(&(src_in_l->source.in.nb->conn_list_lock), iflags);
157 list_del(&(src_in_l->source.in.nb_list));
158 list_add_tail(&(src_in_l->source.in.nb_list),
159 &(src_in_l->source.in.nb->rcv_conn_list));
160 spin_unlock_irqrestore(&(src_in_l->source.in.nb->conn_list_lock),
161 iflags);
164 void cor_free_conn(struct kref *ref)
166 unsigned long iflags;
167 struct cor_conn *cn = container_of(ref, struct cor_conn, ref);
168 struct cor_conn *reversedir = 0;
170 spin_lock_irqsave(&cor_conn_free, iflags);
172 BUG_ON(cn->isreset == 0);
174 if (cn->reversedir != 0)
175 cn->reversedir->isreset = 3;
177 if (cn->isreset != 3)
178 goto out;
180 if (cn->reversedir != 0) {
181 cn->reversedir->reversedir = 0;
182 reversedir = cn->reversedir;
183 cn->reversedir = 0;
186 if (cn->sourcetype == SOURCE_IN) {
187 WARN_ONCE(list_empty(&(cn->source.in.reorder_queue)) == 0,
188 "cor_free_conn(): cn->source.in.reorder_queue is not empty");
189 WARN_ONCE(list_empty(&(cn->source.in.acks_pending)) == 0,
190 "cor_free_conn():cn->source.in.acks_pending is not empty");
192 WARN_ONCE(cn->source.in.conn_id != 0,
193 "cor_free_conn(): cn->source.in.conn_id is not 0");
194 kref_put(&(cn->source.in.nb->ref), cor_neighbor_free);
195 cn->source.in.nb = 0;
198 if (cn->targettype == TARGET_OUT) {
199 WARN_ONCE(list_empty(&(cn->target.out.retrans_list)) == 0,
200 "cor_free_conn(): cn->target.out.retrans_list is not empty");
201 WARN_ONCE(cn->target.out.rb.in_queue != RB_INQUEUE_FALSE,
202 "cor_free_conn(): cn->target.out.rb.in_queue is not RB_INQUEUE_FALSE");
203 WARN_ONCE(cn->target.out.conn_id != 0,
204 "cor_free_conn(): cn->target.out.conn_id is not 0");
205 kref_put(&(cn->target.out.nb->ref), cor_neighbor_free);
206 cn->target.out.nb = 0;
209 WARN_ONCE(cn->data_buf.datasize != 0,
210 "cor_free_conn(): cn->data_buf.datasize is not 0");
211 WARN_ONCE(cn->data_buf.overhead != 0,
212 "cor_free_conn(): cn->data_buf.overhead is not 0");
213 WARN_ONCE(list_empty(&(cn->data_buf.items)) == 0,
214 "cor_free_conn(): cn->data_buf.items is not empty");
215 WARN_ONCE(cn->data_buf.nextread != 0,
216 "cor_free_conn(): cn->data_buf.nextread is not 0");
218 memset(cn, 9*16 + 10, sizeof(struct cor_conn));
219 kmem_cache_free(cor_conn_slab, cn);
221 out:
222 spin_unlock_irqrestore(&cor_conn_free, iflags);
224 if (reversedir != 0)
225 cor_free_conn(&(reversedir->ref));
229 * rc == 0 ==> ok
230 * rc == 1 ==> connid_reuse or connid allocation failed
232 int cor_conn_init_out(struct cor_conn *trgt_unconn_ll, struct cor_neighbor *nb,
233 __u32 rcvd_connid, int use_rcvd_connid)
235 unsigned long iflags;
236 int rc = 0;
237 struct cor_conn *src_unconn_ll = trgt_unconn_ll->reversedir;
239 BUG_ON(trgt_unconn_ll->targettype != TARGET_UNCONNECTED);
240 BUG_ON(src_unconn_ll == 0);
241 BUG_ON(src_unconn_ll->sourcetype != SOURCE_UNCONNECTED);
243 memset(&(trgt_unconn_ll->target.out), 0,
244 sizeof(trgt_unconn_ll->target.out));
245 memset(&(src_unconn_ll->source.in), 0,
246 sizeof(src_unconn_ll->source.in));
248 trgt_unconn_ll->targettype = TARGET_OUT;
249 src_unconn_ll->sourcetype = SOURCE_IN;
251 if (use_rcvd_connid) {
252 BUG_ON((rcvd_connid & (1 << 31)) == 0);
254 src_unconn_ll->source.in.conn_id = rcvd_connid;
255 if (unlikely(cor_insert_connid(nb, src_unconn_ll) != 0)) {
256 src_unconn_ll->source.in.conn_id = 0;
257 rc = 1;
258 goto out_err;
260 } else {
261 src_unconn_ll->source.in.cir = kmem_cache_alloc(
262 cor_connid_reuse_slab, GFP_ATOMIC);
263 if (unlikely(src_unconn_ll->source.in.cir == 0)) {
264 rc = 1;
265 goto out_err;
267 memset(src_unconn_ll->source.in.cir, 0,
268 sizeof(struct cor_connid_reuse_item));
270 if (unlikely(cor_connid_alloc(nb, src_unconn_ll))) {
271 rc = 1;
272 goto out_freecir;
276 trgt_unconn_ll->target.out.nb = nb;
277 src_unconn_ll->source.in.nb = nb;
279 /* neighbor pointer */
280 kref_get(&(nb->ref));
281 kref_get(&(nb->ref));
283 INIT_LIST_HEAD(&(src_unconn_ll->source.in.reorder_queue));
285 INIT_LIST_HEAD(&(src_unconn_ll->source.in.acks_pending));
287 INIT_LIST_HEAD(&(trgt_unconn_ll->target.out.retrans_list));
289 cor_reset_seqno(trgt_unconn_ll, 0);
290 if (use_rcvd_connid == 0) {
291 get_random_bytes((char *)
292 &(trgt_unconn_ll->target.out.seqno_nextsend),
293 sizeof(
294 trgt_unconn_ll->target.out.seqno_nextsend));
295 trgt_unconn_ll->target.out.seqno_acked =
296 trgt_unconn_ll->target.out.seqno_nextsend;
297 trgt_unconn_ll->target.out.seqno_windowlimit =
298 trgt_unconn_ll->target.out.seqno_nextsend;
299 cor_reset_seqno(trgt_unconn_ll,
300 trgt_unconn_ll->target.out.seqno_nextsend);
302 get_random_bytes((char *)
303 &(src_unconn_ll->source.in.next_seqno),
304 sizeof(src_unconn_ll->source.in.next_seqno));
305 src_unconn_ll->source.in.window_seqnolimit =
306 src_unconn_ll->source.in.next_seqno;
307 src_unconn_ll->source.in.window_seqnolimit_remote =
308 src_unconn_ll->source.in.next_seqno;
311 get_random_bytes((char *) &(trgt_unconn_ll->target.out.priority_seqno),
313 trgt_unconn_ll->source.in.priority_seqno = 0;
315 src_unconn_ll->source.in.jiffies_last_act = jiffies;
317 trgt_unconn_ll->target.out.jiffies_idle_since =
318 jiffies << JIFFIES_LAST_IDLE_SHIFT;
320 spin_lock_irqsave(&(nb->conn_list_lock), iflags);
321 list_add_tail(&(src_unconn_ll->source.in.nb_list),
322 &(nb->rcv_conn_list));
323 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
325 /* neighbor lists */
326 kref_get(&(src_unconn_ll->ref));
328 if (src_unconn_ll->is_client)
329 atomic_inc(&cor_num_conns);
331 if (use_rcvd_connid == 0)
332 cor_update_windowlimit(src_unconn_ll);
334 if (0) {
335 out_freecir:
336 kmem_cache_free(cor_connid_reuse_slab,
337 src_unconn_ll->source.in.cir);
338 src_unconn_ll->source.in.cir = 0;
339 out_err:
340 trgt_unconn_ll->targettype = TARGET_UNCONNECTED;
341 src_unconn_ll->sourcetype = SOURCE_UNCONNECTED;
343 return rc;
346 void cor_conn_init_sock_source(struct cor_conn *cn)
348 BUG_ON(cn == 0);
349 cn->sourcetype = SOURCE_SOCK;
350 memset(&(cn->source.sock), 0, sizeof(cn->source.sock));
351 cn->source.sock.priority = PRIORITY_MAX;
352 cn->source.sock.snd_speed.jiffies_last_refresh = jiffies;
353 cn->source.sock.snd_speed.flushed = 1;
356 void cor_conn_init_sock_target(struct cor_conn *cn)
358 BUG_ON(cn == 0);
359 cn->targettype = TARGET_SOCK;
360 memset(&(cn->target.sock), 0, sizeof(cn->target.sock));
361 cor_reset_seqno(cn, 0);
364 struct cor_conn* cor_alloc_conn(gfp_t allocflags, __u8 is_highlatency)
366 struct cor_conn *cn1 = 0;
367 struct cor_conn *cn2 = 0;
369 cn1 = kmem_cache_alloc(cor_conn_slab, allocflags);
370 if (unlikely(cn1 == 0))
371 goto out_err0;
373 cn2 = kmem_cache_alloc(cor_conn_slab, allocflags);
374 if (unlikely(cn2 == 0))
375 goto out_err1;
377 memset(cn1, 0, sizeof(struct cor_conn));
378 memset(cn2, 0, sizeof(struct cor_conn));
380 cn1->reversedir = cn2;
381 cn2->reversedir = cn1;
383 kref_init(&(cn1->ref));
384 kref_init(&(cn2->ref));
386 cn1->sourcetype = SOURCE_UNCONNECTED;
387 cn2->sourcetype = SOURCE_UNCONNECTED;
388 cn1->targettype = TARGET_UNCONNECTED;
389 cn2->targettype = TARGET_UNCONNECTED;
391 cn1->isreset = 0;
392 cn2->isreset = 0;
394 spin_lock_init(&(cn1->rcv_lock));
395 spin_lock_init(&(cn2->rcv_lock));
397 cor_databuf_init(cn1);
398 cor_databuf_init(cn2);
400 cor_bufsize_init(cn1, 0);
401 cor_bufsize_init(cn2, 0);
403 if (is_highlatency == 0) {
404 cn1->is_highlatency = 0;
405 cn2->is_highlatency = 0;
407 cn1->bufsize.bufsize =
408 (BUFSIZE_INITIAL_LOWLAT << BUFSIZE_SHIFT);
409 cn2->bufsize.bufsize =
410 (BUFSIZE_INITIAL_LOWLAT << BUFSIZE_SHIFT);
411 } else {
412 cn1->is_highlatency = 1;
413 cn2->is_highlatency = 1;
415 cn1->bufsize.bufsize =
416 (BUFSIZE_INITIAL_HIGHLAT << BUFSIZE_SHIFT);
417 cn2->bufsize.bufsize =
418 (BUFSIZE_INITIAL_HIGHLAT << BUFSIZE_SHIFT);
421 return cn1;
423 out_err1:
424 kmem_cache_free(cor_conn_slab, cn1);
425 out_err0:
426 return 0;
429 static struct cor_sock *cor_get_corsock_by_port(__be32 port)
431 struct list_head *curr = cor_openports.next;
433 while (curr != &cor_openports) {
434 struct cor_sock *cs = container_of(curr, struct cor_sock,
435 data.listener.lh);
436 BUG_ON(cs->type != CS_TYPE_LISTENER);
437 if (cs->data.listener.port == port)
438 return cs;
440 curr = curr->next;
443 return 0;
446 __u32 cor_list_services(char *buf, __u32 buflen)
448 __u32 cnt = 0;
450 __u32 buf_offset = 4;
452 struct list_head *curr;
453 int rc;
456 * The variable length header rowcount need to be generated after the
457 * data. This is done by reserving the maximum space they could take. If
458 * they end up being smaller, the data is moved so that there is no gap.
461 BUG_ON(buf == 0);
462 BUG_ON(buflen < buf_offset);
464 spin_lock_bh(&cor_bindnodes);
466 curr = cor_openports.next;
467 while (curr != &cor_openports) {
468 struct cor_sock *cs = container_of(curr, struct cor_sock,
469 data.listener.lh);
470 BUG_ON(cs->type != CS_TYPE_LISTENER);
472 if (cs->data.listener.publish_service == 0)
473 goto cont;
475 if (unlikely(buf_offset + 4 < buf_offset) ||
476 buf_offset + 4 > buflen)
477 break;
479 buf[buf_offset] = ((char *) &(cs->data.listener.port))[0];
480 buf[buf_offset+1] = ((char *) &(cs->data.listener.port))[1];
481 buf[buf_offset+2] = ((char *) &(cs->data.listener.port))[2];
482 buf[buf_offset+3] = ((char *) &(cs->data.listener.port))[3];
483 buf_offset += 4;
484 cnt++;
486 cont:
487 curr = curr->next;
490 spin_unlock_bh(&cor_bindnodes);
492 rc = cor_encode_len(buf, 4, cnt);
493 BUG_ON(rc <= 0);
494 BUG_ON(rc > 4);
496 if (likely(rc < 4))
497 memmove(buf + ((__u32) rc), buf+4, buf_offset);
499 return buf_offset - 4 + ((__u32) rc);
503 void cor_set_publish_service(struct cor_sock *cs, __u8 value)
505 BUG_ON (value != 0 && value != 1);
507 mutex_lock(&(cs->lock));
509 cs->publish_service = value;
511 if (cs->type == CS_TYPE_LISTENER) {
512 spin_lock_bh(&cor_bindnodes);
513 cs->data.listener.publish_service = value;
514 spin_unlock_bh(&cor_bindnodes);
517 mutex_unlock(&(cs->lock));
520 void cor_close_port(struct cor_sock *cs)
522 mutex_lock(&(cs->lock));
523 if (unlikely(cs->type != CS_TYPE_LISTENER))
524 goto out;
526 spin_lock_bh(&cor_bindnodes);
528 list_del(&(cs->data.listener.lh));
530 while (list_empty(&(cs->data.listener.conn_queue)) == 0) {
531 struct cor_conn *src_sock_o = container_of(
532 cs->data.listener.conn_queue.next,
533 struct cor_conn, source.sock.cl_list);
534 list_del(&(src_sock_o->source.sock.cl_list));
535 cor_reset_conn(src_sock_o);
536 kref_get(&(src_sock_o->reversedir->ref));
537 kref_put(&(src_sock_o->ref), cor_free_conn);
540 spin_unlock_bh(&cor_bindnodes);
541 out:
542 mutex_unlock(&(cs->lock));
545 int cor_open_port(struct cor_sock *cs_l, __be32 port)
547 int rc = 0;
549 spin_lock_bh(&cor_bindnodes);
550 if (cor_get_corsock_by_port(port) != 0) {
551 rc = -EADDRINUSE;
552 goto out;
555 BUG_ON(cs_l->type != CS_TYPE_UNCONNECTED);
557 cs_l->type = CS_TYPE_LISTENER;
558 cs_l->data.listener.port = port;
559 cs_l->data.listener.publish_service = cs_l->publish_service;
561 /* kref is not used here */
562 INIT_LIST_HEAD(&(cs_l->data.listener.conn_queue));
564 list_add_tail((struct list_head *) &(cs_l->data.listener.lh),
565 &cor_openports);
567 out:
568 spin_unlock_bh(&cor_bindnodes);
570 return rc;
574 * rc == 0 connected
575 * rc == 2 port not open
576 * rc == 3 listener queue full
578 int cor_connect_port(struct cor_conn *trgt_unconn_ll, __be32 port)
580 struct cor_sock *cs;
581 int rc = 0;
583 spin_lock_bh(&cor_bindnodes);
585 cs = cor_get_corsock_by_port(port);
586 if (cs == 0) {
587 rc = 2;
588 goto out;
591 if (unlikely(cs->data.listener.queue_len >=
592 cs->data.listener.queue_maxlen)) {
593 if (cs->data.listener.queue_maxlen <= 0)
594 rc = 2;
595 else
596 rc = 3;
598 goto out;
601 kref_get(&(trgt_unconn_ll->ref));
602 kref_get(&(trgt_unconn_ll->reversedir->ref));
604 BUG_ON(trgt_unconn_ll->is_client != 1);
605 cor_conn_init_sock_target(trgt_unconn_ll);
606 cor_conn_init_sock_source(trgt_unconn_ll->reversedir);
608 list_add_tail(&(trgt_unconn_ll->reversedir->source.sock.cl_list),
609 &(cs->data.listener.conn_queue));
610 cs->data.listener.queue_len++;
611 atomic_set(&(cs->ready_to_accept), 1);
612 barrier();
613 cs->sk.sk_state_change(&(cs->sk));
615 out:
616 spin_unlock_bh(&cor_bindnodes);
617 return rc;
621 * rc == 0 connected
622 * rc == 3 addr not found
623 * rc == 4 ==> connid allocation failed
624 * rc == 4 ==> control msg alloc failed
626 int cor_connect_neigh(struct cor_conn *trgt_unconn_ll, char *addr,
627 __u16 addrlen)
629 struct cor_control_msg_out *cm;
630 struct cor_neighbor *nb = 0;
632 nb = cor_find_neigh(addr, addrlen);
633 if (nb == 0)
634 return 3;
636 cm = cor_alloc_control_msg(nb, ACM_PRIORITY_MED);
637 if (unlikely(cm == 0)) {
638 kref_put(&(nb->ref), cor_neighbor_free);
639 return 4;
642 if (unlikely(cor_conn_init_out(trgt_unconn_ll, nb, 0, 0))) {
643 cor_free_control_msg(cm);
644 kref_put(&(nb->ref), cor_neighbor_free);
645 return 4;
648 cor_send_connect_nb(cm, trgt_unconn_ll->target.out.conn_id,
649 trgt_unconn_ll->target.out.seqno_nextsend,
650 trgt_unconn_ll->reversedir->source.in.next_seqno,
651 trgt_unconn_ll->reversedir);
653 kref_put(&(nb->ref), cor_neighbor_free);
655 return 0;
658 static int _cor_reset_conn(struct cor_conn *cn_ll, int trgt_out_resetneeded)
661 * active conns have an additional ref to make sure that they are not
662 * freed when only one direction is referenced by the connid hashtable
664 int krefput = 1;
666 if (cn_ll->sourcetype == SOURCE_IN) {
667 unsigned long iflags;
668 struct cor_neighbor *nb = cn_ll->source.in.nb;
670 spin_lock_irqsave(&(nb->conn_list_lock), iflags);
671 list_del(&(cn_ll->source.in.nb_list));
672 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
674 krefput++;
676 if (cn_ll->source.in.conn_id != 0 &&
677 (cn_ll->source.in.conn_id & (1 << 31)) != 0) {
678 BUG_ON(cn_ll->source.in.cir != 0);
679 } else if (cn_ll->source.in.conn_id != 0 &&
680 (cn_ll->source.in.conn_id & (1 << 31)) == 0) {
681 BUG_ON(cn_ll->source.in.cir == 0);
683 kref_init(&(cn_ll->source.in.cir->ref));
684 cn_ll->source.in.cir->conn_id =
685 cn_ll->source.in.conn_id;
686 cn_ll->source.in.cir->pingcnt =
687 nb->connid_reuse_pingcnt;
689 spin_lock_bh(&(nb->connid_reuse_lock));
690 cor_insert_connid_reuse(nb, cn_ll->source.in.cir);
691 list_add_tail(&(cn_ll->source.in.cir->lh),
692 &(nb->connid_reuse_list));
693 spin_unlock_bh(&(nb->connid_reuse_lock));
695 cn_ll->source.in.cir = 0;
698 if (cn_ll->source.in.conn_id != 0) {
699 spin_lock_bh(&(nb->connid_lock));
700 rb_erase(&(cn_ll->source.in.rbn), &(nb->connid_rb));
701 spin_unlock_bh(&(nb->connid_lock));
702 krefput++;
704 cn_ll->source.in.conn_id = 0;
706 cor_free_ack_conns(cn_ll);
709 if (cn_ll->is_client)
710 atomic_dec(&cor_num_conns);
712 cor_reset_ooo_queue(cn_ll);
713 } else if (cn_ll->sourcetype == SOURCE_SOCK) {
714 if (likely(cn_ll->source.sock.cs != 0)) {
715 cor_sk_write_space(cn_ll->source.sock.cs);
716 kref_put(&(cn_ll->source.sock.cs->ref), cor_free_sock);
717 cn_ll->source.sock.cs = 0;
721 if (cn_ll->targettype == TARGET_UNCONNECTED) {
722 if (cn_ll->target.unconnected.cmdparams != 0) {
723 kfree(cn_ll->target.unconnected.cmdparams);
724 cn_ll->target.unconnected.cmdparams = 0;
726 } else if (cn_ll->targettype == TARGET_OUT) {
727 if (trgt_out_resetneeded && cn_ll->target.out.conn_id != 0) {
728 cor_send_reset_conn(cn_ll->target.out.nb,
729 cn_ll->target.out.conn_id, 0);
732 cn_ll->target.out.conn_id = 0;
734 cor_cancel_all_conn_retrans(cn_ll);
736 cor_qos_remove_conn(cn_ll);
738 spin_lock_bh(&(cn_ll->target.out.nb->stalledconn_lock));
739 if (cn_ll->target.out.nbstalled_lh.prev != 0) {
740 list_del(&(cn_ll->target.out.nbstalled_lh));
741 cn_ll->target.out.nbstalled_lh.prev = 0;
742 cn_ll->target.out.nbstalled_lh.next = 0;
743 krefput++;
745 spin_unlock_bh(&(cn_ll->target.out.nb->stalledconn_lock));
746 } else if (cn_ll->targettype == TARGET_SOCK) {
747 if (likely(cn_ll->target.sock.cs != 0)) {
748 if (cn_ll->target.sock.socktype == SOCKTYPE_RAW) {
749 cor_sk_data_ready(cn_ll->target.sock.cs);
750 } else {
751 cor_mngdsocket_readfromconn_fromatomic(
752 cn_ll->target.sock.cs);
754 kref_put(&(cn_ll->target.sock.cs->ref), cor_free_sock);
755 cn_ll->target.sock.cs = 0;
756 cn_ll->target.sock.rcv_buf = 0;
760 cor_databuf_ackdiscard(cn_ll);
762 cor_account_bufspace(cn_ll);
764 cor_connreset_priority(cn_ll);
766 return krefput;
769 /* warning: do not hold the rcv_lock while calling this! */
770 void cor_reset_conn_locked(struct cor_conn *cn_ll)
772 int put1;
773 int put2;
775 int isreset1;
776 int isreset2;
778 BUG_ON(cn_ll->isreset <= 1 && cn_ll->reversedir->isreset >= 2);
779 BUG_ON(cn_ll->isreset >= 2 && cn_ll->reversedir->isreset <= 1);
781 isreset1 = cn_ll->isreset;
782 if (cn_ll->isreset <= 1)
783 cn_ll->isreset = 2;
785 isreset2 = cn_ll->reversedir->isreset;
786 if (cn_ll->reversedir->isreset <= 1)
787 cn_ll->reversedir->isreset = 2;
789 if (isreset1 >= 2) {
790 put1 = 0;
791 put2 = 0;
792 } else {
793 put1 = _cor_reset_conn(cn_ll, isreset1 == 0);
794 put2 = _cor_reset_conn(cn_ll->reversedir, isreset2 == 0);
797 /* cor_free_conn may not be called, before both _reset_conn have
798 * finished */
799 while (put1 > 0) {
800 kref_put(&(cn_ll->ref), cor_kreffree_bug);
801 put1--;
804 while (put2 > 0) {
805 kref_put(&(cn_ll->reversedir->ref), cor_kreffree_bug);
806 put2--;
810 void cor_reset_conn(struct cor_conn *cn)
812 kref_get(&(cn->ref));
813 kref_get(&(cn->reversedir->ref));
815 if (cn->is_client) {
816 spin_lock_bh(&(cn->rcv_lock));
817 spin_lock_bh(&(cn->reversedir->rcv_lock));
818 } else {
819 spin_lock_bh(&(cn->reversedir->rcv_lock));
820 spin_lock_bh(&(cn->rcv_lock));
823 cor_reset_conn_locked(cn);
825 if (cn->is_client) {
826 spin_unlock_bh(&(cn->rcv_lock));
827 spin_unlock_bh(&(cn->reversedir->rcv_lock));
828 } else {
829 spin_unlock_bh(&(cn->reversedir->rcv_lock));
830 spin_unlock_bh(&(cn->rcv_lock));
833 kref_put(&(cn->ref), cor_free_conn);
834 kref_put(&(cn->reversedir->ref), cor_free_conn);
837 static int __init cor_init(void)
839 int rc;
841 struct cor_conn c;
843 printk(KERN_ERR "sizeof conn: %u", (__u32) sizeof(c));
844 printk(KERN_ERR " conn.source: %u", (__u32) sizeof(c.source));
845 printk(KERN_ERR " conn.target: %u", (__u32) sizeof(c.target));
846 printk(KERN_ERR " conn.target.out: %u", (__u32) sizeof(c.target.out));
847 printk(KERN_ERR " conn.buf: %u", (__u32) sizeof(c.data_buf));
849 printk(KERN_ERR "sizeof cor_neighbor: %u",
850 (__u32) sizeof(struct cor_neighbor));
852 printk(KERN_ERR "sizeof mutex: %u", (__u32) sizeof(struct mutex));
853 printk(KERN_ERR "sizeof spinlock: %u", (__u32) sizeof(spinlock_t));
854 printk(KERN_ERR "sizeof kref: %u", (__u32) sizeof(struct kref));
855 printk(KERN_ERR "sizeof list_head: %u",
856 (__u32) sizeof(struct list_head));
857 printk(KERN_ERR "sizeof rb_root: %u", (__u32) sizeof(struct rb_root));
858 printk(KERN_ERR "sizeof rb_node: %u", (__u32) sizeof(struct rb_node));
861 rc = cor_util_init();
862 if (unlikely(rc != 0))
863 return rc;
865 cor_conn_slab = kmem_cache_create("cor_conn", sizeof(struct cor_conn),
866 8, 0, 0);
867 if (unlikely(cor_conn_slab == 0))
868 return -ENOMEM;
870 cor_connid_reuse_slab = kmem_cache_create("cor_connid_reuse",
871 sizeof(struct cor_connid_reuse_item), 8, 0, 0);
872 if (unlikely(cor_connid_reuse_slab == 0))
873 return -ENOMEM;
876 atomic_set(&cor_num_conns, 0);
877 barrier();
879 rc = cor_forward_init();
880 if (unlikely(rc != 0))
881 return rc;
883 rc = cor_kgen_init();
884 if (unlikely(rc != 0))
885 return rc;
887 rc = cor_rd_init1();
888 if (unlikely(rc != 0))
889 return rc;
891 rc = cor_snd_init();
892 if (unlikely(rc != 0))
893 return rc;
895 rc = cor_neighbor_init();
896 if (unlikely(rc != 0))
897 return rc;
899 rc = cor_neigh_ann_rcv_init();
900 if (unlikely(rc != 0))
901 return rc;
903 rc = cor_dev_init();
904 if (unlikely(rc != 0))
905 return rc;
907 rc = cor_rcv_init();
908 if (unlikely(rc != 0))
909 return rc;
911 rc = cor_sock_managed_init1();
912 if (unlikely(rc != 0))
913 return rc;
915 rc = cor_conn_src_sock_init1();
916 if (unlikely(rc != 0))
917 return rc;
919 rc = cor_sock_init2();
920 if (unlikely(rc != 0))
921 return rc;
923 rc = cor_rd_init2();
924 if (unlikely(rc != 0))
925 return rc;
927 return 0;
930 static void __exit cor_exit(void)
932 cor_rd_exit1();
933 cor_sock_exit1();
934 cor_conn_src_sock_exit1();
935 cor_dev_exit1();
937 flush_scheduled_work();
939 cor_rcv_exit2();
940 cor_neighbor_exit2();
941 cor_neigh_ann_rcv_exit2();
942 cor_snd_exit2();
943 cor_rd_exit2();
944 cor_kgen_exit2();
945 cor_forward_exit2();
947 BUG_ON(atomic_read(&cor_num_conns) != 0);
949 kmem_cache_destroy(cor_conn_slab);
950 cor_conn_slab = 0;
952 kmem_cache_destroy(cor_connid_reuse_slab);
953 cor_connid_reuse_slab = 0;
956 module_init(cor_init);
957 module_exit(cor_exit);
958 MODULE_LICENSE("GPL");