initial commit with v2.6.9
[linux-2.6.9-moxart.git] / net / rxrpc / connection.c
bloba978007bff4d1f26c2348c50d1704ff63ae4a4d5
1 /* connection.c: Rx connection routines
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <rxrpc/rxrpc.h>
16 #include <rxrpc/transport.h>
17 #include <rxrpc/peer.h>
18 #include <rxrpc/connection.h>
19 #include <rxrpc/call.h>
20 #include <rxrpc/message.h>
21 #include <linux/udp.h>
22 #include <linux/ip.h>
23 #include <net/sock.h>
24 #include <asm/uaccess.h>
25 #include "internal.h"
27 __RXACCT_DECL(atomic_t rxrpc_connection_count);
29 LIST_HEAD(rxrpc_conns);
30 DECLARE_RWSEM(rxrpc_conns_sem);
31 unsigned long rxrpc_conn_timeout = 60 * 60;
33 static void __rxrpc_conn_timeout(rxrpc_timer_t *timer)
35 struct rxrpc_connection *conn =
36 list_entry(timer, struct rxrpc_connection, timeout);
38 _debug("Rx CONN TIMEOUT [%p{u=%d}]", conn, atomic_read(&conn->usage));
40 rxrpc_conn_do_timeout(conn);
43 static const struct rxrpc_timer_ops rxrpc_conn_timer_ops = {
44 .timed_out = __rxrpc_conn_timeout,
47 /*****************************************************************************/
49 * create a new connection record
51 static inline int __rxrpc_create_connection(struct rxrpc_peer *peer,
52 struct rxrpc_connection **_conn)
54 struct rxrpc_connection *conn;
56 _enter("%p",peer);
58 /* allocate and initialise a connection record */
59 conn = kmalloc(sizeof(struct rxrpc_connection), GFP_KERNEL);
60 if (!conn) {
61 _leave(" = -ENOMEM");
62 return -ENOMEM;
65 memset(conn, 0, sizeof(struct rxrpc_connection));
66 atomic_set(&conn->usage, 1);
68 INIT_LIST_HEAD(&conn->link);
69 INIT_LIST_HEAD(&conn->id_link);
70 init_waitqueue_head(&conn->chanwait);
71 spin_lock_init(&conn->lock);
72 rxrpc_timer_init(&conn->timeout, &rxrpc_conn_timer_ops);
74 do_gettimeofday(&conn->atime);
75 conn->mtu_size = 1024;
76 conn->peer = peer;
77 conn->trans = peer->trans;
79 __RXACCT(atomic_inc(&rxrpc_connection_count));
80 *_conn = conn;
81 _leave(" = 0 (%p)", conn);
83 return 0;
84 } /* end __rxrpc_create_connection() */
86 /*****************************************************************************/
88 * create a new connection record for outgoing connections
90 int rxrpc_create_connection(struct rxrpc_transport *trans,
91 __be16 port,
92 __be32 addr,
93 uint16_t service_id,
94 void *security,
95 struct rxrpc_connection **_conn)
97 struct rxrpc_connection *candidate, *conn;
98 struct rxrpc_peer *peer;
99 struct list_head *_p;
100 __be32 connid;
101 int ret;
103 _enter("%p{%hu},%u,%hu", trans, trans->port, ntohs(port), service_id);
105 /* get a peer record */
106 ret = rxrpc_peer_lookup(trans, addr, &peer);
107 if (ret < 0) {
108 _leave(" = %d", ret);
109 return ret;
112 /* allocate and initialise a connection record */
113 ret = __rxrpc_create_connection(peer, &candidate);
114 if (ret < 0) {
115 rxrpc_put_peer(peer);
116 _leave(" = %d", ret);
117 return ret;
120 /* fill in the specific bits */
121 candidate->addr.sin_family = AF_INET;
122 candidate->addr.sin_port = port;
123 candidate->addr.sin_addr.s_addr = addr;
125 candidate->in_epoch = rxrpc_epoch;
126 candidate->out_epoch = rxrpc_epoch;
127 candidate->in_clientflag = 0;
128 candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
129 candidate->service_id = htons(service_id);
131 /* invent a unique connection ID */
132 write_lock(&peer->conn_idlock);
134 try_next_id:
135 connid = htonl(peer->conn_idcounter & RXRPC_CIDMASK);
136 peer->conn_idcounter += RXRPC_MAXCALLS;
138 list_for_each(_p, &peer->conn_idlist) {
139 conn = list_entry(_p, struct rxrpc_connection, id_link);
140 if (connid == conn->conn_id)
141 goto try_next_id;
142 if (connid > conn->conn_id)
143 break;
146 _debug("selected candidate conn ID %x.%u",
147 ntohl(peer->addr.s_addr), ntohl(connid));
149 candidate->conn_id = connid;
150 list_add_tail(&candidate->id_link, _p);
152 write_unlock(&peer->conn_idlock);
154 /* attach to peer */
155 candidate->peer = peer;
157 write_lock(&peer->conn_lock);
159 /* search the peer's transport graveyard list */
160 spin_lock(&peer->conn_gylock);
161 list_for_each(_p, &peer->conn_graveyard) {
162 conn = list_entry(_p, struct rxrpc_connection, link);
163 if (conn->addr.sin_port == candidate->addr.sin_port &&
164 conn->security_ix == candidate->security_ix &&
165 conn->service_id == candidate->service_id &&
166 conn->in_clientflag == 0)
167 goto found_in_graveyard;
169 spin_unlock(&peer->conn_gylock);
171 /* pick the new candidate */
172 _debug("created connection: {%08x} [out]", ntohl(candidate->conn_id));
173 atomic_inc(&peer->conn_count);
174 conn = candidate;
175 candidate = NULL;
177 make_active:
178 list_add_tail(&conn->link, &peer->conn_active);
179 write_unlock(&peer->conn_lock);
181 if (candidate) {
182 write_lock(&peer->conn_idlock);
183 list_del(&candidate->id_link);
184 write_unlock(&peer->conn_idlock);
186 __RXACCT(atomic_dec(&rxrpc_connection_count));
187 kfree(candidate);
189 else {
190 down_write(&rxrpc_conns_sem);
191 list_add_tail(&conn->proc_link, &rxrpc_conns);
192 up_write(&rxrpc_conns_sem);
195 *_conn = conn;
196 _leave(" = 0 (%p)", conn);
198 return 0;
200 /* handle resurrecting a connection from the graveyard */
201 found_in_graveyard:
202 _debug("resurrecting connection: {%08x} [out]", ntohl(conn->conn_id));
203 rxrpc_get_connection(conn);
204 rxrpc_krxtimod_del_timer(&conn->timeout);
205 list_del_init(&conn->link);
206 spin_unlock(&peer->conn_gylock);
207 goto make_active;
208 } /* end rxrpc_create_connection() */
210 /*****************************************************************************/
212 * lookup the connection for an incoming packet
213 * - create a new connection record for unrecorded incoming connections
215 int rxrpc_connection_lookup(struct rxrpc_peer *peer,
216 struct rxrpc_message *msg,
217 struct rxrpc_connection **_conn)
219 struct rxrpc_connection *conn, *candidate = NULL;
220 struct list_head *_p;
221 int ret, fresh = 0;
222 __be32 x_epoch, x_connid;
223 __be16 x_port, x_servid;
224 __u32 x_secix;
225 u8 x_clflag;
227 _enter("%p{{%hu}},%u,%hu",
228 peer,
229 peer->trans->port,
230 ntohs(msg->pkt->h.uh->source),
231 ntohs(msg->hdr.serviceId));
233 x_port = msg->pkt->h.uh->source;
234 x_epoch = msg->hdr.epoch;
235 x_clflag = msg->hdr.flags & RXRPC_CLIENT_INITIATED;
236 x_connid = htonl(ntohl(msg->hdr.cid) & RXRPC_CIDMASK);
237 x_servid = msg->hdr.serviceId;
238 x_secix = msg->hdr.securityIndex;
240 /* [common case] search the transport's active list first */
241 read_lock(&peer->conn_lock);
242 list_for_each(_p, &peer->conn_active) {
243 conn = list_entry(_p, struct rxrpc_connection, link);
244 if (conn->addr.sin_port == x_port &&
245 conn->in_epoch == x_epoch &&
246 conn->conn_id == x_connid &&
247 conn->security_ix == x_secix &&
248 conn->service_id == x_servid &&
249 conn->in_clientflag == x_clflag)
250 goto found_active;
252 read_unlock(&peer->conn_lock);
254 /* [uncommon case] not active
255 * - create a candidate for a new record if an inbound connection
256 * - only examine the graveyard for an outbound connection
258 if (x_clflag) {
259 ret = __rxrpc_create_connection(peer, &candidate);
260 if (ret < 0) {
261 _leave(" = %d", ret);
262 return ret;
265 /* fill in the specifics */
266 candidate->addr.sin_family = AF_INET;
267 candidate->addr.sin_port = x_port;
268 candidate->addr.sin_addr.s_addr = msg->pkt->nh.iph->saddr;
269 candidate->in_epoch = x_epoch;
270 candidate->out_epoch = x_epoch;
271 candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
272 candidate->out_clientflag = 0;
273 candidate->conn_id = x_connid;
274 candidate->service_id = x_servid;
275 candidate->security_ix = x_secix;
278 /* search the active list again, just in case it appeared whilst we
279 * were busy */
280 write_lock(&peer->conn_lock);
281 list_for_each(_p, &peer->conn_active) {
282 conn = list_entry(_p, struct rxrpc_connection, link);
283 if (conn->addr.sin_port == x_port &&
284 conn->in_epoch == x_epoch &&
285 conn->conn_id == x_connid &&
286 conn->security_ix == x_secix &&
287 conn->service_id == x_servid &&
288 conn->in_clientflag == x_clflag)
289 goto found_active_second_chance;
292 /* search the transport's graveyard list */
293 spin_lock(&peer->conn_gylock);
294 list_for_each(_p, &peer->conn_graveyard) {
295 conn = list_entry(_p, struct rxrpc_connection, link);
296 if (conn->addr.sin_port == x_port &&
297 conn->in_epoch == x_epoch &&
298 conn->conn_id == x_connid &&
299 conn->security_ix == x_secix &&
300 conn->service_id == x_servid &&
301 conn->in_clientflag == x_clflag)
302 goto found_in_graveyard;
304 spin_unlock(&peer->conn_gylock);
306 /* outbound connections aren't created here */
307 if (!x_clflag) {
308 write_unlock(&peer->conn_lock);
309 _leave(" = -ENOENT");
310 return -ENOENT;
313 /* we can now add the new candidate to the list */
314 _debug("created connection: {%08x} [in]", ntohl(candidate->conn_id));
315 rxrpc_get_peer(peer);
316 conn = candidate;
317 candidate = NULL;
318 atomic_inc(&peer->conn_count);
319 fresh = 1;
321 make_active:
322 list_add_tail(&conn->link, &peer->conn_active);
324 success_uwfree:
325 write_unlock(&peer->conn_lock);
327 if (candidate) {
328 write_lock(&peer->conn_idlock);
329 list_del(&candidate->id_link);
330 write_unlock(&peer->conn_idlock);
332 __RXACCT(atomic_dec(&rxrpc_connection_count));
333 kfree(candidate);
336 if (fresh) {
337 down_write(&rxrpc_conns_sem);
338 list_add_tail(&conn->proc_link, &rxrpc_conns);
339 up_write(&rxrpc_conns_sem);
342 success:
343 *_conn = conn;
344 _leave(" = 0 (%p)", conn);
345 return 0;
347 /* handle the connection being found in the active list straight off */
348 found_active:
349 rxrpc_get_connection(conn);
350 read_unlock(&peer->conn_lock);
351 goto success;
353 /* handle resurrecting a connection from the graveyard */
354 found_in_graveyard:
355 _debug("resurrecting connection: {%08x} [in]", ntohl(conn->conn_id));
356 rxrpc_get_peer(peer);
357 rxrpc_get_connection(conn);
358 rxrpc_krxtimod_del_timer(&conn->timeout);
359 list_del_init(&conn->link);
360 spin_unlock(&peer->conn_gylock);
361 goto make_active;
363 /* handle finding the connection on the second time through the active
364 * list */
365 found_active_second_chance:
366 rxrpc_get_connection(conn);
367 goto success_uwfree;
369 } /* end rxrpc_connection_lookup() */
371 /*****************************************************************************/
373 * finish using a connection record
374 * - it will be transferred to the peer's connection graveyard when refcount
375 * reaches 0
377 void rxrpc_put_connection(struct rxrpc_connection *conn)
379 struct rxrpc_peer *peer;
381 if (!conn)
382 return;
384 _enter("%p{u=%d p=%hu}",
385 conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port));
387 peer = conn->peer;
388 spin_lock(&peer->conn_gylock);
390 /* sanity check */
391 if (atomic_read(&conn->usage) <= 0)
392 BUG();
394 if (likely(!atomic_dec_and_test(&conn->usage))) {
395 spin_unlock(&peer->conn_gylock);
396 _leave("");
397 return;
400 /* move to graveyard queue */
401 _debug("burying connection: {%08x}", ntohl(conn->conn_id));
402 list_del(&conn->link);
403 list_add_tail(&conn->link, &peer->conn_graveyard);
405 rxrpc_krxtimod_add_timer(&conn->timeout, rxrpc_conn_timeout * HZ);
407 spin_unlock(&peer->conn_gylock);
409 rxrpc_put_peer(conn->peer);
411 _leave(" [killed]");
412 } /* end rxrpc_put_connection() */
414 /*****************************************************************************/
416 * free a connection record
418 void rxrpc_conn_do_timeout(struct rxrpc_connection *conn)
420 struct rxrpc_peer *peer;
422 _enter("%p{u=%d p=%hu}",
423 conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port));
425 peer = conn->peer;
427 if (atomic_read(&conn->usage) < 0)
428 BUG();
430 /* remove from graveyard if still dead */
431 spin_lock(&peer->conn_gylock);
432 if (atomic_read(&conn->usage) == 0) {
433 list_del_init(&conn->link);
435 else {
436 conn = NULL;
438 spin_unlock(&peer->conn_gylock);
440 if (!conn) {
441 _leave("");
442 return; /* resurrected */
445 _debug("--- Destroying Connection %p{%08x} ---",
446 conn, ntohl(conn->conn_id));
448 down_write(&rxrpc_conns_sem);
449 list_del(&conn->proc_link);
450 up_write(&rxrpc_conns_sem);
452 write_lock(&peer->conn_idlock);
453 list_del(&conn->id_link);
454 write_unlock(&peer->conn_idlock);
456 __RXACCT(atomic_dec(&rxrpc_connection_count));
457 kfree(conn);
459 /* if the graveyard is now empty, wake up anyone waiting for that */
460 if (atomic_dec_and_test(&peer->conn_count))
461 wake_up(&peer->conn_gy_waitq);
463 _leave(" [destroyed]");
464 } /* end rxrpc_conn_do_timeout() */
466 /*****************************************************************************/
468 * clear all connection records from a peer endpoint
470 void rxrpc_conn_clearall(struct rxrpc_peer *peer)
472 DECLARE_WAITQUEUE(myself, current);
474 struct rxrpc_connection *conn;
475 int err;
477 _enter("%p", peer);
479 /* there shouldn't be any active conns remaining */
480 if (!list_empty(&peer->conn_active))
481 BUG();
483 /* manually timeout all conns in the graveyard */
484 spin_lock(&peer->conn_gylock);
485 while (!list_empty(&peer->conn_graveyard)) {
486 conn = list_entry(peer->conn_graveyard.next,
487 struct rxrpc_connection, link);
488 err = rxrpc_krxtimod_del_timer(&conn->timeout);
489 spin_unlock(&peer->conn_gylock);
491 if (err == 0)
492 rxrpc_conn_do_timeout(conn);
494 spin_lock(&peer->conn_gylock);
496 spin_unlock(&peer->conn_gylock);
498 /* wait for the the conn graveyard to be completely cleared */
499 set_current_state(TASK_UNINTERRUPTIBLE);
500 add_wait_queue(&peer->conn_gy_waitq, &myself);
502 while (atomic_read(&peer->conn_count) != 0) {
503 schedule();
504 set_current_state(TASK_UNINTERRUPTIBLE);
507 remove_wait_queue(&peer->conn_gy_waitq, &myself);
508 set_current_state(TASK_RUNNING);
510 _leave("");
511 } /* end rxrpc_conn_clearall() */
513 /*****************************************************************************/
515 * allocate and prepare a message for sending out through the transport
516 * endpoint
518 int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
519 struct rxrpc_call *call,
520 uint8_t type,
521 int dcount,
522 struct kvec diov[],
523 int alloc_flags,
524 struct rxrpc_message **_msg)
526 struct rxrpc_message *msg;
527 int loop;
529 _enter("%p{%d},%p,%u", conn, ntohs(conn->addr.sin_port), call, type);
531 if (dcount > 3) {
532 _leave(" = -EINVAL");
533 return -EINVAL;
536 msg = kmalloc(sizeof(struct rxrpc_message), alloc_flags);
537 if (!msg) {
538 _leave(" = -ENOMEM");
539 return -ENOMEM;
542 memset(msg, 0, sizeof(*msg));
543 atomic_set(&msg->usage, 1);
545 INIT_LIST_HEAD(&msg->link);
547 msg->state = RXRPC_MSG_PREPARED;
549 msg->hdr.epoch = conn->out_epoch;
550 msg->hdr.cid = conn->conn_id | (call ? call->chan_ix : 0);
551 msg->hdr.callNumber = call ? call->call_id : 0;
552 msg->hdr.type = type;
553 msg->hdr.flags = conn->out_clientflag;
554 msg->hdr.securityIndex = conn->security_ix;
555 msg->hdr.serviceId = conn->service_id;
557 /* generate sequence numbers for data packets */
558 if (call) {
559 switch (type) {
560 case RXRPC_PACKET_TYPE_DATA:
561 msg->seq = ++call->snd_seq_count;
562 msg->hdr.seq = htonl(msg->seq);
563 break;
564 case RXRPC_PACKET_TYPE_ACK:
565 /* ACK sequence numbers are complicated. The following
566 * may be wrong:
567 * - jumbo packet ACKs should have a seq number
568 * - normal ACKs should not
570 default:
571 break;
575 msg->dcount = dcount + 1;
576 msg->dsize = sizeof(msg->hdr);
577 msg->data[0].iov_len = sizeof(msg->hdr);
578 msg->data[0].iov_base = &msg->hdr;
580 for (loop=0; loop < dcount; loop++) {
581 msg->dsize += diov[loop].iov_len;
582 msg->data[loop+1].iov_len = diov[loop].iov_len;
583 msg->data[loop+1].iov_base = diov[loop].iov_base;
586 __RXACCT(atomic_inc(&rxrpc_message_count));
587 *_msg = msg;
588 _leave(" = 0 (%p) #%d", msg, atomic_read(&rxrpc_message_count));
589 return 0;
590 } /* end rxrpc_conn_newmsg() */
592 /*****************************************************************************/
594 * free a message
596 void __rxrpc_put_message(struct rxrpc_message *msg)
598 int loop;
600 _enter("%p #%d", msg, atomic_read(&rxrpc_message_count));
602 if (msg->pkt)
603 kfree_skb(msg->pkt);
604 rxrpc_put_connection(msg->conn);
606 for (loop = 0; loop < 8; loop++)
607 if (test_bit(loop, &msg->dfree))
608 kfree(msg->data[loop].iov_base);
610 __RXACCT(atomic_dec(&rxrpc_message_count));
611 kfree(msg);
613 _leave("");
614 } /* end __rxrpc_put_message() */
616 /*****************************************************************************/
618 * send a message out through the transport endpoint
620 int rxrpc_conn_sendmsg(struct rxrpc_connection *conn,
621 struct rxrpc_message *msg)
623 struct msghdr msghdr;
624 int ret;
626 _enter("%p{%d}", conn, ntohs(conn->addr.sin_port));
628 /* fill in some fields in the header */
629 spin_lock(&conn->lock);
630 msg->hdr.serial = htonl(++conn->serial_counter);
631 msg->rttdone = 0;
632 spin_unlock(&conn->lock);
634 /* set up the message to be transmitted */
635 msghdr.msg_name = &conn->addr;
636 msghdr.msg_namelen = sizeof(conn->addr);
637 msghdr.msg_control = NULL;
638 msghdr.msg_controllen = 0;
639 msghdr.msg_flags = MSG_CONFIRM | MSG_DONTWAIT;
641 _net("Sending message type %d of %Zd bytes to %08x:%d",
642 msg->hdr.type,
643 msg->dsize,
644 ntohl(conn->addr.sin_addr.s_addr),
645 ntohs(conn->addr.sin_port));
647 /* send the message */
648 ret = kernel_sendmsg(conn->trans->socket, &msghdr,
649 msg->data, msg->dcount, msg->dsize);
650 if (ret < 0) {
651 msg->state = RXRPC_MSG_ERROR;
652 } else {
653 msg->state = RXRPC_MSG_SENT;
654 ret = 0;
656 spin_lock(&conn->lock);
657 do_gettimeofday(&conn->atime);
658 msg->stamp = conn->atime;
659 spin_unlock(&conn->lock);
662 _leave(" = %d", ret);
664 return ret;
665 } /* end rxrpc_conn_sendmsg() */
667 /*****************************************************************************/
669 * deal with a subsequent call packet
671 int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
672 struct rxrpc_call *call,
673 struct rxrpc_message *msg)
675 struct rxrpc_message *pmsg;
676 struct list_head *_p;
677 unsigned cix, seq;
678 int ret = 0;
680 _enter("%p,%p,%p", conn, call, msg);
682 if (!call) {
683 cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK;
685 spin_lock(&conn->lock);
686 call = conn->channels[cix];
688 if (!call || call->call_id != msg->hdr.callNumber) {
689 spin_unlock(&conn->lock);
690 rxrpc_trans_immediate_abort(conn->trans, msg, -ENOENT);
691 goto out;
693 else {
694 rxrpc_get_call(call);
695 spin_unlock(&conn->lock);
698 else {
699 rxrpc_get_call(call);
702 _proto("Received packet %%%u [%u] on call %hu:%u:%u",
703 ntohl(msg->hdr.serial),
704 ntohl(msg->hdr.seq),
705 ntohs(msg->hdr.serviceId),
706 ntohl(conn->conn_id),
707 ntohl(call->call_id));
709 call->pkt_rcv_count++;
711 if (msg->pkt->dst && msg->pkt->dst->dev)
712 conn->peer->if_mtu =
713 msg->pkt->dst->dev->mtu -
714 msg->pkt->dst->dev->hard_header_len;
716 /* queue on the call in seq order */
717 rxrpc_get_message(msg);
718 seq = msg->seq;
720 spin_lock(&call->lock);
721 list_for_each(_p, &call->rcv_receiveq) {
722 pmsg = list_entry(_p, struct rxrpc_message, link);
723 if (pmsg->seq > seq)
724 break;
726 list_add_tail(&msg->link, _p);
728 /* reset the activity timeout */
729 call->flags |= RXRPC_CALL_RCV_PKT;
730 mod_timer(&call->rcv_timeout,jiffies + rxrpc_call_rcv_timeout * HZ);
732 spin_unlock(&call->lock);
734 rxrpc_krxiod_queue_call(call);
736 rxrpc_put_call(call);
737 out:
738 _leave(" = %d", ret);
739 return ret;
740 } /* end rxrpc_conn_receive_call_packet() */
742 /*****************************************************************************/
744 * handle an ICMP error being applied to a connection
746 void rxrpc_conn_handle_error(struct rxrpc_connection *conn,
747 int local, int errno)
749 struct rxrpc_call *calls[4];
750 int loop;
752 _enter("%p{%d},%d", conn, ntohs(conn->addr.sin_port), errno);
754 /* get a ref to all my calls in one go */
755 memset(calls, 0, sizeof(calls));
756 spin_lock(&conn->lock);
758 for (loop = 3; loop >= 0; loop--) {
759 if (conn->channels[loop]) {
760 calls[loop] = conn->channels[loop];
761 rxrpc_get_call(calls[loop]);
765 spin_unlock(&conn->lock);
767 /* now kick them all */
768 for (loop = 3; loop >= 0; loop--) {
769 if (calls[loop]) {
770 rxrpc_call_handle_error(calls[loop], local, errno);
771 rxrpc_put_call(calls[loop]);
775 _leave("");
776 } /* end rxrpc_conn_handle_error() */