mtd: lpddr: Remove unnecessary OOM messages
[linux-2.6/btrfs-unstable.git] / net / rxrpc / ar-connection.c
blob7bf5b5b9e8b9400af1cbaecaeaf9a8c222670492
1 /* RxRPC virtual connection handler
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/net.h>
15 #include <linux/skbuff.h>
16 #include <linux/crypto.h>
17 #include <net/sock.h>
18 #include <net/af_rxrpc.h>
19 #include "ar-internal.h"
21 static void rxrpc_connection_reaper(struct work_struct *work);
23 LIST_HEAD(rxrpc_connections);
24 DEFINE_RWLOCK(rxrpc_connection_lock);
25 static unsigned long rxrpc_connection_timeout = 10 * 60;
26 static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
29 * allocate a new client connection bundle
31 static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp)
33 struct rxrpc_conn_bundle *bundle;
35 _enter("");
37 bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp);
38 if (bundle) {
39 INIT_LIST_HEAD(&bundle->unused_conns);
40 INIT_LIST_HEAD(&bundle->avail_conns);
41 INIT_LIST_HEAD(&bundle->busy_conns);
42 init_waitqueue_head(&bundle->chanwait);
43 atomic_set(&bundle->usage, 1);
46 _leave(" = %p", bundle);
47 return bundle;
51 * compare bundle parameters with what we're looking for
52 * - return -ve, 0 or +ve
54 static inline
55 int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle,
56 struct key *key, __be16 service_id)
58 return (bundle->service_id - service_id) ?:
59 ((unsigned long) bundle->key - (unsigned long) key);
63 * get bundle of client connections that a client socket can make use of
65 struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
66 struct rxrpc_transport *trans,
67 struct key *key,
68 __be16 service_id,
69 gfp_t gfp)
71 struct rxrpc_conn_bundle *bundle, *candidate;
72 struct rb_node *p, *parent, **pp;
74 _enter("%p{%x},%x,%hx,",
75 rx, key_serial(key), trans->debug_id, ntohs(service_id));
77 if (rx->trans == trans && rx->bundle) {
78 atomic_inc(&rx->bundle->usage);
79 return rx->bundle;
82 /* search the extant bundles first for one that matches the specified
83 * user ID */
84 spin_lock(&trans->client_lock);
86 p = trans->bundles.rb_node;
87 while (p) {
88 bundle = rb_entry(p, struct rxrpc_conn_bundle, node);
90 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
91 p = p->rb_left;
92 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
93 p = p->rb_right;
94 else
95 goto found_extant_bundle;
98 spin_unlock(&trans->client_lock);
100 /* not yet present - create a candidate for a new record and then
101 * redo the search */
102 candidate = rxrpc_alloc_bundle(gfp);
103 if (!candidate) {
104 _leave(" = -ENOMEM");
105 return ERR_PTR(-ENOMEM);
108 candidate->key = key_get(key);
109 candidate->service_id = service_id;
111 spin_lock(&trans->client_lock);
113 pp = &trans->bundles.rb_node;
114 parent = NULL;
115 while (*pp) {
116 parent = *pp;
117 bundle = rb_entry(parent, struct rxrpc_conn_bundle, node);
119 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
120 pp = &(*pp)->rb_left;
121 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
122 pp = &(*pp)->rb_right;
123 else
124 goto found_extant_second;
127 /* second search also failed; add the new bundle */
128 bundle = candidate;
129 candidate = NULL;
131 rb_link_node(&bundle->node, parent, pp);
132 rb_insert_color(&bundle->node, &trans->bundles);
133 spin_unlock(&trans->client_lock);
134 _net("BUNDLE new on trans %d", trans->debug_id);
135 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
136 atomic_inc(&bundle->usage);
137 rx->bundle = bundle;
139 _leave(" = %p [new]", bundle);
140 return bundle;
142 /* we found the bundle in the list immediately */
143 found_extant_bundle:
144 atomic_inc(&bundle->usage);
145 spin_unlock(&trans->client_lock);
146 _net("BUNDLE old on trans %d", trans->debug_id);
147 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
148 atomic_inc(&bundle->usage);
149 rx->bundle = bundle;
151 _leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage));
152 return bundle;
154 /* we found the bundle on the second time through the list */
155 found_extant_second:
156 atomic_inc(&bundle->usage);
157 spin_unlock(&trans->client_lock);
158 kfree(candidate);
159 _net("BUNDLE old2 on trans %d", trans->debug_id);
160 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
161 atomic_inc(&bundle->usage);
162 rx->bundle = bundle;
164 _leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage));
165 return bundle;
169 * release a bundle
171 void rxrpc_put_bundle(struct rxrpc_transport *trans,
172 struct rxrpc_conn_bundle *bundle)
174 _enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage));
176 if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) {
177 _debug("Destroy bundle");
178 rb_erase(&bundle->node, &trans->bundles);
179 spin_unlock(&trans->client_lock);
180 ASSERT(list_empty(&bundle->unused_conns));
181 ASSERT(list_empty(&bundle->avail_conns));
182 ASSERT(list_empty(&bundle->busy_conns));
183 ASSERTCMP(bundle->num_conns, ==, 0);
184 key_put(bundle->key);
185 kfree(bundle);
188 _leave("");
192 * allocate a new connection
194 static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
196 struct rxrpc_connection *conn;
198 _enter("");
200 conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
201 if (conn) {
202 INIT_WORK(&conn->processor, &rxrpc_process_connection);
203 INIT_LIST_HEAD(&conn->bundle_link);
204 conn->calls = RB_ROOT;
205 skb_queue_head_init(&conn->rx_queue);
206 rwlock_init(&conn->lock);
207 spin_lock_init(&conn->state_lock);
208 atomic_set(&conn->usage, 1);
209 conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
210 conn->avail_calls = RXRPC_MAXCALLS;
211 conn->size_align = 4;
212 conn->header_size = sizeof(struct rxrpc_header);
215 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
216 return conn;
220 * assign a connection ID to a connection and add it to the transport's
221 * connection lookup tree
222 * - called with transport client lock held
224 static void rxrpc_assign_connection_id(struct rxrpc_connection *conn)
226 struct rxrpc_connection *xconn;
227 struct rb_node *parent, **p;
228 __be32 epoch;
229 u32 real_conn_id;
231 _enter("");
233 epoch = conn->epoch;
235 write_lock_bh(&conn->trans->conn_lock);
237 conn->trans->conn_idcounter += RXRPC_CID_INC;
238 if (conn->trans->conn_idcounter < RXRPC_CID_INC)
239 conn->trans->conn_idcounter = RXRPC_CID_INC;
240 real_conn_id = conn->trans->conn_idcounter;
242 attempt_insertion:
243 parent = NULL;
244 p = &conn->trans->client_conns.rb_node;
246 while (*p) {
247 parent = *p;
248 xconn = rb_entry(parent, struct rxrpc_connection, node);
250 if (epoch < xconn->epoch)
251 p = &(*p)->rb_left;
252 else if (epoch > xconn->epoch)
253 p = &(*p)->rb_right;
254 else if (real_conn_id < xconn->real_conn_id)
255 p = &(*p)->rb_left;
256 else if (real_conn_id > xconn->real_conn_id)
257 p = &(*p)->rb_right;
258 else
259 goto id_exists;
262 /* we've found a suitable hole - arrange for this connection to occupy
263 * it */
264 rb_link_node(&conn->node, parent, p);
265 rb_insert_color(&conn->node, &conn->trans->client_conns);
267 conn->real_conn_id = real_conn_id;
268 conn->cid = htonl(real_conn_id);
269 write_unlock_bh(&conn->trans->conn_lock);
270 _leave(" [CONNID %x CID %x]", real_conn_id, ntohl(conn->cid));
271 return;
273 /* we found a connection with the proposed ID - walk the tree from that
274 * point looking for the next unused ID */
275 id_exists:
276 for (;;) {
277 real_conn_id += RXRPC_CID_INC;
278 if (real_conn_id < RXRPC_CID_INC) {
279 real_conn_id = RXRPC_CID_INC;
280 conn->trans->conn_idcounter = real_conn_id;
281 goto attempt_insertion;
284 parent = rb_next(parent);
285 if (!parent)
286 goto attempt_insertion;
288 xconn = rb_entry(parent, struct rxrpc_connection, node);
289 if (epoch < xconn->epoch ||
290 real_conn_id < xconn->real_conn_id)
291 goto attempt_insertion;
296 * add a call to a connection's call-by-ID tree
298 static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
299 struct rxrpc_call *call)
301 struct rxrpc_call *xcall;
302 struct rb_node *parent, **p;
303 __be32 call_id;
305 write_lock_bh(&conn->lock);
307 call_id = call->call_id;
308 p = &conn->calls.rb_node;
309 parent = NULL;
310 while (*p) {
311 parent = *p;
312 xcall = rb_entry(parent, struct rxrpc_call, conn_node);
314 if (call_id < xcall->call_id)
315 p = &(*p)->rb_left;
316 else if (call_id > xcall->call_id)
317 p = &(*p)->rb_right;
318 else
319 BUG();
322 rb_link_node(&call->conn_node, parent, p);
323 rb_insert_color(&call->conn_node, &conn->calls);
325 write_unlock_bh(&conn->lock);
329 * connect a call on an exclusive connection
331 static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
332 struct rxrpc_transport *trans,
333 __be16 service_id,
334 struct rxrpc_call *call,
335 gfp_t gfp)
337 struct rxrpc_connection *conn;
338 int chan, ret;
340 _enter("");
342 conn = rx->conn;
343 if (!conn) {
344 /* not yet present - create a candidate for a new connection
345 * and then redo the check */
346 conn = rxrpc_alloc_connection(gfp);
347 if (!conn) {
348 _leave(" = -ENOMEM");
349 return -ENOMEM;
352 conn->trans = trans;
353 conn->bundle = NULL;
354 conn->service_id = service_id;
355 conn->epoch = rxrpc_epoch;
356 conn->in_clientflag = 0;
357 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
358 conn->cid = 0;
359 conn->state = RXRPC_CONN_CLIENT;
360 conn->avail_calls = RXRPC_MAXCALLS - 1;
361 conn->security_level = rx->min_sec_level;
362 conn->key = key_get(rx->key);
364 ret = rxrpc_init_client_conn_security(conn);
365 if (ret < 0) {
366 key_put(conn->key);
367 kfree(conn);
368 _leave(" = %d [key]", ret);
369 return ret;
372 write_lock_bh(&rxrpc_connection_lock);
373 list_add_tail(&conn->link, &rxrpc_connections);
374 write_unlock_bh(&rxrpc_connection_lock);
376 spin_lock(&trans->client_lock);
377 atomic_inc(&trans->usage);
379 _net("CONNECT EXCL new %d on TRANS %d",
380 conn->debug_id, conn->trans->debug_id);
382 rxrpc_assign_connection_id(conn);
383 rx->conn = conn;
384 } else {
385 spin_lock(&trans->client_lock);
388 /* we've got a connection with a free channel and we can now attach the
389 * call to it
390 * - we're holding the transport's client lock
391 * - we're holding a reference on the connection
393 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
394 if (!conn->channels[chan])
395 goto found_channel;
396 goto no_free_channels;
398 found_channel:
399 atomic_inc(&conn->usage);
400 conn->channels[chan] = call;
401 call->conn = conn;
402 call->channel = chan;
403 call->cid = conn->cid | htonl(chan);
404 call->call_id = htonl(++conn->call_counter);
406 _net("CONNECT client on conn %d chan %d as call %x",
407 conn->debug_id, chan, ntohl(call->call_id));
409 spin_unlock(&trans->client_lock);
411 rxrpc_add_call_ID_to_conn(conn, call);
412 _leave(" = 0");
413 return 0;
415 no_free_channels:
416 spin_unlock(&trans->client_lock);
417 _leave(" = -ENOSR");
418 return -ENOSR;
422 * find a connection for a call
423 * - called in process context with IRQs enabled
425 int rxrpc_connect_call(struct rxrpc_sock *rx,
426 struct rxrpc_transport *trans,
427 struct rxrpc_conn_bundle *bundle,
428 struct rxrpc_call *call,
429 gfp_t gfp)
431 struct rxrpc_connection *conn, *candidate;
432 int chan, ret;
434 DECLARE_WAITQUEUE(myself, current);
436 _enter("%p,%lx,", rx, call->user_call_ID);
438 if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags))
439 return rxrpc_connect_exclusive(rx, trans, bundle->service_id,
440 call, gfp);
442 spin_lock(&trans->client_lock);
443 for (;;) {
444 /* see if the bundle has a call slot available */
445 if (!list_empty(&bundle->avail_conns)) {
446 _debug("avail");
447 conn = list_entry(bundle->avail_conns.next,
448 struct rxrpc_connection,
449 bundle_link);
450 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
451 list_del_init(&conn->bundle_link);
452 bundle->num_conns--;
453 continue;
455 if (--conn->avail_calls == 0)
456 list_move(&conn->bundle_link,
457 &bundle->busy_conns);
458 ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
459 ASSERT(conn->channels[0] == NULL ||
460 conn->channels[1] == NULL ||
461 conn->channels[2] == NULL ||
462 conn->channels[3] == NULL);
463 atomic_inc(&conn->usage);
464 break;
467 if (!list_empty(&bundle->unused_conns)) {
468 _debug("unused");
469 conn = list_entry(bundle->unused_conns.next,
470 struct rxrpc_connection,
471 bundle_link);
472 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
473 list_del_init(&conn->bundle_link);
474 bundle->num_conns--;
475 continue;
477 ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS);
478 conn->avail_calls = RXRPC_MAXCALLS - 1;
479 ASSERT(conn->channels[0] == NULL &&
480 conn->channels[1] == NULL &&
481 conn->channels[2] == NULL &&
482 conn->channels[3] == NULL);
483 atomic_inc(&conn->usage);
484 list_move(&conn->bundle_link, &bundle->avail_conns);
485 break;
488 /* need to allocate a new connection */
489 _debug("get new conn [%d]", bundle->num_conns);
491 spin_unlock(&trans->client_lock);
493 if (signal_pending(current))
494 goto interrupted;
496 if (bundle->num_conns >= 20) {
497 _debug("too many conns");
499 if (!(gfp & __GFP_WAIT)) {
500 _leave(" = -EAGAIN");
501 return -EAGAIN;
504 add_wait_queue(&bundle->chanwait, &myself);
505 for (;;) {
506 set_current_state(TASK_INTERRUPTIBLE);
507 if (bundle->num_conns < 20 ||
508 !list_empty(&bundle->unused_conns) ||
509 !list_empty(&bundle->avail_conns))
510 break;
511 if (signal_pending(current))
512 goto interrupted_dequeue;
513 schedule();
515 remove_wait_queue(&bundle->chanwait, &myself);
516 __set_current_state(TASK_RUNNING);
517 spin_lock(&trans->client_lock);
518 continue;
521 /* not yet present - create a candidate for a new connection and then
522 * redo the check */
523 candidate = rxrpc_alloc_connection(gfp);
524 if (!candidate) {
525 _leave(" = -ENOMEM");
526 return -ENOMEM;
529 candidate->trans = trans;
530 candidate->bundle = bundle;
531 candidate->service_id = bundle->service_id;
532 candidate->epoch = rxrpc_epoch;
533 candidate->in_clientflag = 0;
534 candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
535 candidate->cid = 0;
536 candidate->state = RXRPC_CONN_CLIENT;
537 candidate->avail_calls = RXRPC_MAXCALLS;
538 candidate->security_level = rx->min_sec_level;
539 candidate->key = key_get(bundle->key);
541 ret = rxrpc_init_client_conn_security(candidate);
542 if (ret < 0) {
543 key_put(candidate->key);
544 kfree(candidate);
545 _leave(" = %d [key]", ret);
546 return ret;
549 write_lock_bh(&rxrpc_connection_lock);
550 list_add_tail(&candidate->link, &rxrpc_connections);
551 write_unlock_bh(&rxrpc_connection_lock);
553 spin_lock(&trans->client_lock);
555 list_add(&candidate->bundle_link, &bundle->unused_conns);
556 bundle->num_conns++;
557 atomic_inc(&bundle->usage);
558 atomic_inc(&trans->usage);
560 _net("CONNECT new %d on TRANS %d",
561 candidate->debug_id, candidate->trans->debug_id);
563 rxrpc_assign_connection_id(candidate);
564 if (candidate->security)
565 candidate->security->prime_packet_security(candidate);
567 /* leave the candidate lurking in zombie mode attached to the
568 * bundle until we're ready for it */
569 rxrpc_put_connection(candidate);
570 candidate = NULL;
573 /* we've got a connection with a free channel and we can now attach the
574 * call to it
575 * - we're holding the transport's client lock
576 * - we're holding a reference on the connection
577 * - we're holding a reference on the bundle
579 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
580 if (!conn->channels[chan])
581 goto found_channel;
582 ASSERT(conn->channels[0] == NULL ||
583 conn->channels[1] == NULL ||
584 conn->channels[2] == NULL ||
585 conn->channels[3] == NULL);
586 BUG();
588 found_channel:
589 conn->channels[chan] = call;
590 call->conn = conn;
591 call->channel = chan;
592 call->cid = conn->cid | htonl(chan);
593 call->call_id = htonl(++conn->call_counter);
595 _net("CONNECT client on conn %d chan %d as call %x",
596 conn->debug_id, chan, ntohl(call->call_id));
598 ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
599 spin_unlock(&trans->client_lock);
601 rxrpc_add_call_ID_to_conn(conn, call);
603 _leave(" = 0");
604 return 0;
606 interrupted_dequeue:
607 remove_wait_queue(&bundle->chanwait, &myself);
608 __set_current_state(TASK_RUNNING);
609 interrupted:
610 _leave(" = -ERESTARTSYS");
611 return -ERESTARTSYS;
615 * get a record of an incoming connection
617 struct rxrpc_connection *
618 rxrpc_incoming_connection(struct rxrpc_transport *trans,
619 struct rxrpc_header *hdr,
620 gfp_t gfp)
622 struct rxrpc_connection *conn, *candidate = NULL;
623 struct rb_node *p, **pp;
624 const char *new = "old";
625 __be32 epoch;
626 u32 conn_id;
628 _enter("");
630 ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED);
632 epoch = hdr->epoch;
633 conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
635 /* search the connection list first */
636 read_lock_bh(&trans->conn_lock);
638 p = trans->server_conns.rb_node;
639 while (p) {
640 conn = rb_entry(p, struct rxrpc_connection, node);
642 _debug("maybe %x", conn->real_conn_id);
644 if (epoch < conn->epoch)
645 p = p->rb_left;
646 else if (epoch > conn->epoch)
647 p = p->rb_right;
648 else if (conn_id < conn->real_conn_id)
649 p = p->rb_left;
650 else if (conn_id > conn->real_conn_id)
651 p = p->rb_right;
652 else
653 goto found_extant_connection;
655 read_unlock_bh(&trans->conn_lock);
657 /* not yet present - create a candidate for a new record and then
658 * redo the search */
659 candidate = rxrpc_alloc_connection(gfp);
660 if (!candidate) {
661 _leave(" = -ENOMEM");
662 return ERR_PTR(-ENOMEM);
665 candidate->trans = trans;
666 candidate->epoch = hdr->epoch;
667 candidate->cid = hdr->cid & cpu_to_be32(RXRPC_CIDMASK);
668 candidate->service_id = hdr->serviceId;
669 candidate->security_ix = hdr->securityIndex;
670 candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
671 candidate->out_clientflag = 0;
672 candidate->real_conn_id = conn_id;
673 candidate->state = RXRPC_CONN_SERVER;
674 if (candidate->service_id)
675 candidate->state = RXRPC_CONN_SERVER_UNSECURED;
677 write_lock_bh(&trans->conn_lock);
679 pp = &trans->server_conns.rb_node;
680 p = NULL;
681 while (*pp) {
682 p = *pp;
683 conn = rb_entry(p, struct rxrpc_connection, node);
685 if (epoch < conn->epoch)
686 pp = &(*pp)->rb_left;
687 else if (epoch > conn->epoch)
688 pp = &(*pp)->rb_right;
689 else if (conn_id < conn->real_conn_id)
690 pp = &(*pp)->rb_left;
691 else if (conn_id > conn->real_conn_id)
692 pp = &(*pp)->rb_right;
693 else
694 goto found_extant_second;
697 /* we can now add the new candidate to the list */
698 conn = candidate;
699 candidate = NULL;
700 rb_link_node(&conn->node, p, pp);
701 rb_insert_color(&conn->node, &trans->server_conns);
702 atomic_inc(&conn->trans->usage);
704 write_unlock_bh(&trans->conn_lock);
706 write_lock_bh(&rxrpc_connection_lock);
707 list_add_tail(&conn->link, &rxrpc_connections);
708 write_unlock_bh(&rxrpc_connection_lock);
710 new = "new";
712 success:
713 _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->real_conn_id);
715 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
716 return conn;
718 /* we found the connection in the list immediately */
719 found_extant_connection:
720 if (hdr->securityIndex != conn->security_ix) {
721 read_unlock_bh(&trans->conn_lock);
722 goto security_mismatch;
724 atomic_inc(&conn->usage);
725 read_unlock_bh(&trans->conn_lock);
726 goto success;
728 /* we found the connection on the second time through the list */
729 found_extant_second:
730 if (hdr->securityIndex != conn->security_ix) {
731 write_unlock_bh(&trans->conn_lock);
732 goto security_mismatch;
734 atomic_inc(&conn->usage);
735 write_unlock_bh(&trans->conn_lock);
736 kfree(candidate);
737 goto success;
739 security_mismatch:
740 kfree(candidate);
741 _leave(" = -EKEYREJECTED");
742 return ERR_PTR(-EKEYREJECTED);
746 * find a connection based on transport and RxRPC connection ID for an incoming
747 * packet
749 struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans,
750 struct rxrpc_header *hdr)
752 struct rxrpc_connection *conn;
753 struct rb_node *p;
754 __be32 epoch;
755 u32 conn_id;
757 _enter(",{%x,%x}", ntohl(hdr->cid), hdr->flags);
759 read_lock_bh(&trans->conn_lock);
761 conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
762 epoch = hdr->epoch;
764 if (hdr->flags & RXRPC_CLIENT_INITIATED)
765 p = trans->server_conns.rb_node;
766 else
767 p = trans->client_conns.rb_node;
769 while (p) {
770 conn = rb_entry(p, struct rxrpc_connection, node);
772 _debug("maybe %x", conn->real_conn_id);
774 if (epoch < conn->epoch)
775 p = p->rb_left;
776 else if (epoch > conn->epoch)
777 p = p->rb_right;
778 else if (conn_id < conn->real_conn_id)
779 p = p->rb_left;
780 else if (conn_id > conn->real_conn_id)
781 p = p->rb_right;
782 else
783 goto found;
786 read_unlock_bh(&trans->conn_lock);
787 _leave(" = NULL");
788 return NULL;
790 found:
791 atomic_inc(&conn->usage);
792 read_unlock_bh(&trans->conn_lock);
793 _leave(" = %p", conn);
794 return conn;
798 * release a virtual connection
800 void rxrpc_put_connection(struct rxrpc_connection *conn)
802 _enter("%p{u=%d,d=%d}",
803 conn, atomic_read(&conn->usage), conn->debug_id);
805 ASSERTCMP(atomic_read(&conn->usage), >, 0);
807 conn->put_time = get_seconds();
808 if (atomic_dec_and_test(&conn->usage)) {
809 _debug("zombie");
810 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
813 _leave("");
817 * destroy a virtual connection
819 static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
821 _enter("%p{%d}", conn, atomic_read(&conn->usage));
823 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
825 _net("DESTROY CONN %d", conn->debug_id);
827 if (conn->bundle)
828 rxrpc_put_bundle(conn->trans, conn->bundle);
830 ASSERT(RB_EMPTY_ROOT(&conn->calls));
831 rxrpc_purge_queue(&conn->rx_queue);
833 rxrpc_clear_conn_security(conn);
834 rxrpc_put_transport(conn->trans);
835 kfree(conn);
836 _leave("");
840 * reap dead connections
842 static void rxrpc_connection_reaper(struct work_struct *work)
844 struct rxrpc_connection *conn, *_p;
845 unsigned long now, earliest, reap_time;
847 LIST_HEAD(graveyard);
849 _enter("");
851 now = get_seconds();
852 earliest = ULONG_MAX;
854 write_lock_bh(&rxrpc_connection_lock);
855 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
856 _debug("reap CONN %d { u=%d,t=%ld }",
857 conn->debug_id, atomic_read(&conn->usage),
858 (long) now - (long) conn->put_time);
860 if (likely(atomic_read(&conn->usage) > 0))
861 continue;
863 spin_lock(&conn->trans->client_lock);
864 write_lock(&conn->trans->conn_lock);
865 reap_time = conn->put_time + rxrpc_connection_timeout;
867 if (atomic_read(&conn->usage) > 0) {
869 } else if (reap_time <= now) {
870 list_move_tail(&conn->link, &graveyard);
871 if (conn->out_clientflag)
872 rb_erase(&conn->node,
873 &conn->trans->client_conns);
874 else
875 rb_erase(&conn->node,
876 &conn->trans->server_conns);
877 if (conn->bundle) {
878 list_del_init(&conn->bundle_link);
879 conn->bundle->num_conns--;
882 } else if (reap_time < earliest) {
883 earliest = reap_time;
886 write_unlock(&conn->trans->conn_lock);
887 spin_unlock(&conn->trans->client_lock);
889 write_unlock_bh(&rxrpc_connection_lock);
891 if (earliest != ULONG_MAX) {
892 _debug("reschedule reaper %ld", (long) earliest - now);
893 ASSERTCMP(earliest, >, now);
894 rxrpc_queue_delayed_work(&rxrpc_connection_reap,
895 (earliest - now) * HZ);
898 /* then destroy all those pulled out */
899 while (!list_empty(&graveyard)) {
900 conn = list_entry(graveyard.next, struct rxrpc_connection,
901 link);
902 list_del_init(&conn->link);
904 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
905 rxrpc_destroy_connection(conn);
908 _leave("");
912 * preemptively destroy all the connection records rather than waiting for them
913 * to time out
915 void __exit rxrpc_destroy_all_connections(void)
917 _enter("");
919 rxrpc_connection_timeout = 0;
920 cancel_delayed_work(&rxrpc_connection_reap);
921 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
923 _leave("");