x86: introduce ENTRY(KPROBE_ENTRY)_X86 assembly helpers to catch unbalanced declarati...
[linux-2.6/linux-2.6-openrd.git] / fs / afs / rxrpc.c
blobbde3f19c0995015b2a2496aaf805807a7d7348db
1 /* Maintain an RxRPC server socket to do AFS communications through
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <net/sock.h>
13 #include <net/af_rxrpc.h>
14 #include <rxrpc/packet.h>
15 #include "internal.h"
16 #include "afs_cm.h"
18 static struct socket *afs_socket; /* my RxRPC socket */
19 static struct workqueue_struct *afs_async_calls;
20 static atomic_t afs_outstanding_calls;
21 static atomic_t afs_outstanding_skbs;
23 static void afs_wake_up_call_waiter(struct afs_call *);
24 static int afs_wait_for_call_to_complete(struct afs_call *);
25 static void afs_wake_up_async_call(struct afs_call *);
26 static int afs_dont_wait_for_call_to_complete(struct afs_call *);
27 static void afs_process_async_call(struct work_struct *);
28 static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *);
29 static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool);
31 /* synchronous call management */
32 const struct afs_wait_mode afs_sync_call = {
33 .rx_wakeup = afs_wake_up_call_waiter,
34 .wait = afs_wait_for_call_to_complete,
37 /* asynchronous call management */
38 const struct afs_wait_mode afs_async_call = {
39 .rx_wakeup = afs_wake_up_async_call,
40 .wait = afs_dont_wait_for_call_to_complete,
43 /* asynchronous incoming call management */
44 static const struct afs_wait_mode afs_async_incoming_call = {
45 .rx_wakeup = afs_wake_up_async_call,
48 /* asynchronous incoming call initial processing */
49 static const struct afs_call_type afs_RXCMxxxx = {
50 .name = "CB.xxxx",
51 .deliver = afs_deliver_cm_op_id,
52 .abort_to_error = afs_abort_to_error,
55 static void afs_collect_incoming_call(struct work_struct *);
57 static struct sk_buff_head afs_incoming_calls;
58 static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call);
61 * open an RxRPC socket and bind it to be a server for callback notifications
62 * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
64 int afs_open_socket(void)
66 struct sockaddr_rxrpc srx;
67 struct socket *socket;
68 int ret;
70 _enter("");
72 skb_queue_head_init(&afs_incoming_calls);
74 afs_async_calls = create_singlethread_workqueue("kafsd");
75 if (!afs_async_calls) {
76 _leave(" = -ENOMEM [wq]");
77 return -ENOMEM;
80 ret = sock_create_kern(AF_RXRPC, SOCK_DGRAM, PF_INET, &socket);
81 if (ret < 0) {
82 destroy_workqueue(afs_async_calls);
83 _leave(" = %d [socket]", ret);
84 return ret;
87 socket->sk->sk_allocation = GFP_NOFS;
89 /* bind the callback manager's address to make this a server socket */
90 srx.srx_family = AF_RXRPC;
91 srx.srx_service = CM_SERVICE;
92 srx.transport_type = SOCK_DGRAM;
93 srx.transport_len = sizeof(srx.transport.sin);
94 srx.transport.sin.sin_family = AF_INET;
95 srx.transport.sin.sin_port = htons(AFS_CM_PORT);
96 memset(&srx.transport.sin.sin_addr, 0,
97 sizeof(srx.transport.sin.sin_addr));
99 ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
100 if (ret < 0) {
101 sock_release(socket);
102 _leave(" = %d [bind]", ret);
103 return ret;
106 rxrpc_kernel_intercept_rx_messages(socket, afs_rx_interceptor);
108 afs_socket = socket;
109 _leave(" = 0");
110 return 0;
114 * close the RxRPC socket AFS was using
116 void afs_close_socket(void)
118 _enter("");
120 sock_release(afs_socket);
122 _debug("dework");
123 destroy_workqueue(afs_async_calls);
125 ASSERTCMP(atomic_read(&afs_outstanding_skbs), ==, 0);
126 ASSERTCMP(atomic_read(&afs_outstanding_calls), ==, 0);
127 _leave("");
131 * note that the data in a socket buffer is now delivered and that the buffer
132 * should be freed
134 static void afs_data_delivered(struct sk_buff *skb)
136 if (!skb) {
137 _debug("DLVR NULL [%d]", atomic_read(&afs_outstanding_skbs));
138 dump_stack();
139 } else {
140 _debug("DLVR %p{%u} [%d]",
141 skb, skb->mark, atomic_read(&afs_outstanding_skbs));
142 if (atomic_dec_return(&afs_outstanding_skbs) == -1)
143 BUG();
144 rxrpc_kernel_data_delivered(skb);
149 * free a socket buffer
151 static void afs_free_skb(struct sk_buff *skb)
153 if (!skb) {
154 _debug("FREE NULL [%d]", atomic_read(&afs_outstanding_skbs));
155 dump_stack();
156 } else {
157 _debug("FREE %p{%u} [%d]",
158 skb, skb->mark, atomic_read(&afs_outstanding_skbs));
159 if (atomic_dec_return(&afs_outstanding_skbs) == -1)
160 BUG();
161 rxrpc_kernel_free_skb(skb);
166 * free a call
168 static void afs_free_call(struct afs_call *call)
170 _debug("DONE %p{%s} [%d]",
171 call, call->type->name, atomic_read(&afs_outstanding_calls));
172 if (atomic_dec_return(&afs_outstanding_calls) == -1)
173 BUG();
175 ASSERTCMP(call->rxcall, ==, NULL);
176 ASSERT(!work_pending(&call->async_work));
177 ASSERT(skb_queue_empty(&call->rx_queue));
178 ASSERT(call->type->name != NULL);
180 kfree(call->request);
181 kfree(call);
185 * allocate a call with flat request and reply buffers
187 struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type,
188 size_t request_size, size_t reply_size)
190 struct afs_call *call;
192 call = kzalloc(sizeof(*call), GFP_NOFS);
193 if (!call)
194 goto nomem_call;
196 _debug("CALL %p{%s} [%d]",
197 call, type->name, atomic_read(&afs_outstanding_calls));
198 atomic_inc(&afs_outstanding_calls);
200 call->type = type;
201 call->request_size = request_size;
202 call->reply_max = reply_size;
204 if (request_size) {
205 call->request = kmalloc(request_size, GFP_NOFS);
206 if (!call->request)
207 goto nomem_free;
210 if (reply_size) {
211 call->buffer = kmalloc(reply_size, GFP_NOFS);
212 if (!call->buffer)
213 goto nomem_free;
216 init_waitqueue_head(&call->waitq);
217 skb_queue_head_init(&call->rx_queue);
218 return call;
220 nomem_free:
221 afs_free_call(call);
222 nomem_call:
223 return NULL;
227 * clean up a call with flat buffer
229 void afs_flat_call_destructor(struct afs_call *call)
231 _enter("");
233 kfree(call->request);
234 call->request = NULL;
235 kfree(call->buffer);
236 call->buffer = NULL;
240 * attach the data from a bunch of pages on an inode to a call
242 static int afs_send_pages(struct afs_call *call, struct msghdr *msg,
243 struct kvec *iov)
245 struct page *pages[8];
246 unsigned count, n, loop, offset, to;
247 pgoff_t first = call->first, last = call->last;
248 int ret;
250 _enter("");
252 offset = call->first_offset;
253 call->first_offset = 0;
255 do {
256 _debug("attach %lx-%lx", first, last);
258 count = last - first + 1;
259 if (count > ARRAY_SIZE(pages))
260 count = ARRAY_SIZE(pages);
261 n = find_get_pages_contig(call->mapping, first, count, pages);
262 ASSERTCMP(n, ==, count);
264 loop = 0;
265 do {
266 msg->msg_flags = 0;
267 to = PAGE_SIZE;
268 if (first + loop >= last)
269 to = call->last_to;
270 else
271 msg->msg_flags = MSG_MORE;
272 iov->iov_base = kmap(pages[loop]) + offset;
273 iov->iov_len = to - offset;
274 offset = 0;
276 _debug("- range %u-%u%s",
277 offset, to, msg->msg_flags ? " [more]" : "");
278 msg->msg_iov = (struct iovec *) iov;
279 msg->msg_iovlen = 1;
281 /* have to change the state *before* sending the last
282 * packet as RxRPC might give us the reply before it
283 * returns from sending the request */
284 if (first + loop >= last)
285 call->state = AFS_CALL_AWAIT_REPLY;
286 ret = rxrpc_kernel_send_data(call->rxcall, msg,
287 to - offset);
288 kunmap(pages[loop]);
289 if (ret < 0)
290 break;
291 } while (++loop < count);
292 first += count;
294 for (loop = 0; loop < count; loop++)
295 put_page(pages[loop]);
296 if (ret < 0)
297 break;
298 } while (first <= last);
300 _leave(" = %d", ret);
301 return ret;
305 * initiate a call
307 int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
308 const struct afs_wait_mode *wait_mode)
310 struct sockaddr_rxrpc srx;
311 struct rxrpc_call *rxcall;
312 struct msghdr msg;
313 struct kvec iov[1];
314 int ret;
316 _enter("%x,{%d},", addr->s_addr, ntohs(call->port));
318 ASSERT(call->type != NULL);
319 ASSERT(call->type->name != NULL);
321 _debug("____MAKE %p{%s,%x} [%d]____",
322 call, call->type->name, key_serial(call->key),
323 atomic_read(&afs_outstanding_calls));
325 call->wait_mode = wait_mode;
326 INIT_WORK(&call->async_work, afs_process_async_call);
328 memset(&srx, 0, sizeof(srx));
329 srx.srx_family = AF_RXRPC;
330 srx.srx_service = call->service_id;
331 srx.transport_type = SOCK_DGRAM;
332 srx.transport_len = sizeof(srx.transport.sin);
333 srx.transport.sin.sin_family = AF_INET;
334 srx.transport.sin.sin_port = call->port;
335 memcpy(&srx.transport.sin.sin_addr, addr, 4);
337 /* create a call */
338 rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key,
339 (unsigned long) call, gfp);
340 call->key = NULL;
341 if (IS_ERR(rxcall)) {
342 ret = PTR_ERR(rxcall);
343 goto error_kill_call;
346 call->rxcall = rxcall;
348 /* send the request */
349 iov[0].iov_base = call->request;
350 iov[0].iov_len = call->request_size;
352 msg.msg_name = NULL;
353 msg.msg_namelen = 0;
354 msg.msg_iov = (struct iovec *) iov;
355 msg.msg_iovlen = 1;
356 msg.msg_control = NULL;
357 msg.msg_controllen = 0;
358 msg.msg_flags = (call->send_pages ? MSG_MORE : 0);
360 /* have to change the state *before* sending the last packet as RxRPC
361 * might give us the reply before it returns from sending the
362 * request */
363 if (!call->send_pages)
364 call->state = AFS_CALL_AWAIT_REPLY;
365 ret = rxrpc_kernel_send_data(rxcall, &msg, call->request_size);
366 if (ret < 0)
367 goto error_do_abort;
369 if (call->send_pages) {
370 ret = afs_send_pages(call, &msg, iov);
371 if (ret < 0)
372 goto error_do_abort;
375 /* at this point, an async call may no longer exist as it may have
376 * already completed */
377 return wait_mode->wait(call);
379 error_do_abort:
380 rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
381 rxrpc_kernel_end_call(rxcall);
382 call->rxcall = NULL;
383 error_kill_call:
384 call->type->destructor(call);
385 afs_free_call(call);
386 _leave(" = %d", ret);
387 return ret;
391 * handles intercepted messages that were arriving in the socket's Rx queue
392 * - called with the socket receive queue lock held to ensure message ordering
393 * - called with softirqs disabled
395 static void afs_rx_interceptor(struct sock *sk, unsigned long user_call_ID,
396 struct sk_buff *skb)
398 struct afs_call *call = (struct afs_call *) user_call_ID;
400 _enter("%p,,%u", call, skb->mark);
402 _debug("ICPT %p{%u} [%d]",
403 skb, skb->mark, atomic_read(&afs_outstanding_skbs));
405 ASSERTCMP(sk, ==, afs_socket->sk);
406 atomic_inc(&afs_outstanding_skbs);
408 if (!call) {
409 /* its an incoming call for our callback service */
410 skb_queue_tail(&afs_incoming_calls, skb);
411 schedule_work(&afs_collect_incoming_call_work);
412 } else {
413 /* route the messages directly to the appropriate call */
414 skb_queue_tail(&call->rx_queue, skb);
415 call->wait_mode->rx_wakeup(call);
418 _leave("");
422 * deliver messages to a call
424 static void afs_deliver_to_call(struct afs_call *call)
426 struct sk_buff *skb;
427 bool last;
428 u32 abort_code;
429 int ret;
431 _enter("");
433 while ((call->state == AFS_CALL_AWAIT_REPLY ||
434 call->state == AFS_CALL_AWAIT_OP_ID ||
435 call->state == AFS_CALL_AWAIT_REQUEST ||
436 call->state == AFS_CALL_AWAIT_ACK) &&
437 (skb = skb_dequeue(&call->rx_queue))) {
438 switch (skb->mark) {
439 case RXRPC_SKB_MARK_DATA:
440 _debug("Rcv DATA");
441 last = rxrpc_kernel_is_data_last(skb);
442 ret = call->type->deliver(call, skb, last);
443 switch (ret) {
444 case 0:
445 if (last &&
446 call->state == AFS_CALL_AWAIT_REPLY)
447 call->state = AFS_CALL_COMPLETE;
448 break;
449 case -ENOTCONN:
450 abort_code = RX_CALL_DEAD;
451 goto do_abort;
452 case -ENOTSUPP:
453 abort_code = RX_INVALID_OPERATION;
454 goto do_abort;
455 default:
456 abort_code = RXGEN_CC_UNMARSHAL;
457 if (call->state != AFS_CALL_AWAIT_REPLY)
458 abort_code = RXGEN_SS_UNMARSHAL;
459 do_abort:
460 rxrpc_kernel_abort_call(call->rxcall,
461 abort_code);
462 call->error = ret;
463 call->state = AFS_CALL_ERROR;
464 break;
466 afs_data_delivered(skb);
467 skb = NULL;
468 continue;
469 case RXRPC_SKB_MARK_FINAL_ACK:
470 _debug("Rcv ACK");
471 call->state = AFS_CALL_COMPLETE;
472 break;
473 case RXRPC_SKB_MARK_BUSY:
474 _debug("Rcv BUSY");
475 call->error = -EBUSY;
476 call->state = AFS_CALL_BUSY;
477 break;
478 case RXRPC_SKB_MARK_REMOTE_ABORT:
479 abort_code = rxrpc_kernel_get_abort_code(skb);
480 call->error = call->type->abort_to_error(abort_code);
481 call->state = AFS_CALL_ABORTED;
482 _debug("Rcv ABORT %u -> %d", abort_code, call->error);
483 break;
484 case RXRPC_SKB_MARK_NET_ERROR:
485 call->error = -rxrpc_kernel_get_error_number(skb);
486 call->state = AFS_CALL_ERROR;
487 _debug("Rcv NET ERROR %d", call->error);
488 break;
489 case RXRPC_SKB_MARK_LOCAL_ERROR:
490 call->error = -rxrpc_kernel_get_error_number(skb);
491 call->state = AFS_CALL_ERROR;
492 _debug("Rcv LOCAL ERROR %d", call->error);
493 break;
494 default:
495 BUG();
496 break;
499 afs_free_skb(skb);
502 /* make sure the queue is empty if the call is done with (we might have
503 * aborted the call early because of an unmarshalling error) */
504 if (call->state >= AFS_CALL_COMPLETE) {
505 while ((skb = skb_dequeue(&call->rx_queue)))
506 afs_free_skb(skb);
507 if (call->incoming) {
508 rxrpc_kernel_end_call(call->rxcall);
509 call->rxcall = NULL;
510 call->type->destructor(call);
511 afs_free_call(call);
515 _leave("");
519 * wait synchronously for a call to complete
521 static int afs_wait_for_call_to_complete(struct afs_call *call)
523 struct sk_buff *skb;
524 int ret;
526 DECLARE_WAITQUEUE(myself, current);
528 _enter("");
530 add_wait_queue(&call->waitq, &myself);
531 for (;;) {
532 set_current_state(TASK_INTERRUPTIBLE);
534 /* deliver any messages that are in the queue */
535 if (!skb_queue_empty(&call->rx_queue)) {
536 __set_current_state(TASK_RUNNING);
537 afs_deliver_to_call(call);
538 continue;
541 ret = call->error;
542 if (call->state >= AFS_CALL_COMPLETE)
543 break;
544 ret = -EINTR;
545 if (signal_pending(current))
546 break;
547 schedule();
550 remove_wait_queue(&call->waitq, &myself);
551 __set_current_state(TASK_RUNNING);
553 /* kill the call */
554 if (call->state < AFS_CALL_COMPLETE) {
555 _debug("call incomplete");
556 rxrpc_kernel_abort_call(call->rxcall, RX_CALL_DEAD);
557 while ((skb = skb_dequeue(&call->rx_queue)))
558 afs_free_skb(skb);
561 _debug("call complete");
562 rxrpc_kernel_end_call(call->rxcall);
563 call->rxcall = NULL;
564 call->type->destructor(call);
565 afs_free_call(call);
566 _leave(" = %d", ret);
567 return ret;
571 * wake up a waiting call
573 static void afs_wake_up_call_waiter(struct afs_call *call)
575 wake_up(&call->waitq);
579 * wake up an asynchronous call
581 static void afs_wake_up_async_call(struct afs_call *call)
583 _enter("");
584 queue_work(afs_async_calls, &call->async_work);
588 * put a call into asynchronous mode
589 * - mustn't touch the call descriptor as the call my have completed by the
590 * time we get here
592 static int afs_dont_wait_for_call_to_complete(struct afs_call *call)
594 _enter("");
595 return -EINPROGRESS;
599 * delete an asynchronous call
601 static void afs_delete_async_call(struct work_struct *work)
603 struct afs_call *call =
604 container_of(work, struct afs_call, async_work);
606 _enter("");
608 afs_free_call(call);
610 _leave("");
614 * perform processing on an asynchronous call
615 * - on a multiple-thread workqueue this work item may try to run on several
616 * CPUs at the same time
618 static void afs_process_async_call(struct work_struct *work)
620 struct afs_call *call =
621 container_of(work, struct afs_call, async_work);
623 _enter("");
625 if (!skb_queue_empty(&call->rx_queue))
626 afs_deliver_to_call(call);
628 if (call->state >= AFS_CALL_COMPLETE && call->wait_mode) {
629 if (call->wait_mode->async_complete)
630 call->wait_mode->async_complete(call->reply,
631 call->error);
632 call->reply = NULL;
634 /* kill the call */
635 rxrpc_kernel_end_call(call->rxcall);
636 call->rxcall = NULL;
637 if (call->type->destructor)
638 call->type->destructor(call);
640 /* we can't just delete the call because the work item may be
641 * queued */
642 PREPARE_WORK(&call->async_work, afs_delete_async_call);
643 queue_work(afs_async_calls, &call->async_work);
646 _leave("");
650 * empty a socket buffer into a flat reply buffer
652 void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
654 size_t len = skb->len;
656 if (skb_copy_bits(skb, 0, call->buffer + call->reply_size, len) < 0)
657 BUG();
658 call->reply_size += len;
662 * accept the backlog of incoming calls
664 static void afs_collect_incoming_call(struct work_struct *work)
666 struct rxrpc_call *rxcall;
667 struct afs_call *call = NULL;
668 struct sk_buff *skb;
670 while ((skb = skb_dequeue(&afs_incoming_calls))) {
671 _debug("new call");
673 /* don't need the notification */
674 afs_free_skb(skb);
676 if (!call) {
677 call = kzalloc(sizeof(struct afs_call), GFP_KERNEL);
678 if (!call) {
679 rxrpc_kernel_reject_call(afs_socket);
680 return;
683 INIT_WORK(&call->async_work, afs_process_async_call);
684 call->wait_mode = &afs_async_incoming_call;
685 call->type = &afs_RXCMxxxx;
686 init_waitqueue_head(&call->waitq);
687 skb_queue_head_init(&call->rx_queue);
688 call->state = AFS_CALL_AWAIT_OP_ID;
690 _debug("CALL %p{%s} [%d]",
691 call, call->type->name,
692 atomic_read(&afs_outstanding_calls));
693 atomic_inc(&afs_outstanding_calls);
696 rxcall = rxrpc_kernel_accept_call(afs_socket,
697 (unsigned long) call);
698 if (!IS_ERR(rxcall)) {
699 call->rxcall = rxcall;
700 call = NULL;
704 if (call)
705 afs_free_call(call);
709 * grab the operation ID from an incoming cache manager call
711 static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
712 bool last)
714 size_t len = skb->len;
715 void *oibuf = (void *) &call->operation_ID;
717 _enter("{%u},{%zu},%d", call->offset, len, last);
719 ASSERTCMP(call->offset, <, 4);
721 /* the operation ID forms the first four bytes of the request data */
722 len = min_t(size_t, len, 4 - call->offset);
723 if (skb_copy_bits(skb, 0, oibuf + call->offset, len) < 0)
724 BUG();
725 if (!pskb_pull(skb, len))
726 BUG();
727 call->offset += len;
729 if (call->offset < 4) {
730 if (last) {
731 _leave(" = -EBADMSG [op ID short]");
732 return -EBADMSG;
734 _leave(" = 0 [incomplete]");
735 return 0;
738 call->state = AFS_CALL_AWAIT_REQUEST;
740 /* ask the cache manager to route the call (it'll change the call type
741 * if successful) */
742 if (!afs_cm_incoming_call(call))
743 return -ENOTSUPP;
745 /* pass responsibility for the remainer of this message off to the
746 * cache manager op */
747 return call->type->deliver(call, skb, last);
751 * send an empty reply
753 void afs_send_empty_reply(struct afs_call *call)
755 struct msghdr msg;
756 struct iovec iov[1];
758 _enter("");
760 iov[0].iov_base = NULL;
761 iov[0].iov_len = 0;
762 msg.msg_name = NULL;
763 msg.msg_namelen = 0;
764 msg.msg_iov = iov;
765 msg.msg_iovlen = 0;
766 msg.msg_control = NULL;
767 msg.msg_controllen = 0;
768 msg.msg_flags = 0;
770 call->state = AFS_CALL_AWAIT_ACK;
771 switch (rxrpc_kernel_send_data(call->rxcall, &msg, 0)) {
772 case 0:
773 _leave(" [replied]");
774 return;
776 case -ENOMEM:
777 _debug("oom");
778 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
779 default:
780 rxrpc_kernel_end_call(call->rxcall);
781 call->rxcall = NULL;
782 call->type->destructor(call);
783 afs_free_call(call);
784 _leave(" [error]");
785 return;
790 * send a simple reply
792 void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
794 struct msghdr msg;
795 struct iovec iov[1];
796 int n;
798 _enter("");
800 iov[0].iov_base = (void *) buf;
801 iov[0].iov_len = len;
802 msg.msg_name = NULL;
803 msg.msg_namelen = 0;
804 msg.msg_iov = iov;
805 msg.msg_iovlen = 1;
806 msg.msg_control = NULL;
807 msg.msg_controllen = 0;
808 msg.msg_flags = 0;
810 call->state = AFS_CALL_AWAIT_ACK;
811 n = rxrpc_kernel_send_data(call->rxcall, &msg, len);
812 if (n >= 0) {
813 _leave(" [replied]");
814 return;
816 if (n == -ENOMEM) {
817 _debug("oom");
818 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
820 rxrpc_kernel_end_call(call->rxcall);
821 call->rxcall = NULL;
822 call->type->destructor(call);
823 afs_free_call(call);
824 _leave(" [error]");
828 * extract a piece of data from the received data socket buffers
830 int afs_extract_data(struct afs_call *call, struct sk_buff *skb,
831 bool last, void *buf, size_t count)
833 size_t len = skb->len;
835 _enter("{%u},{%zu},%d,,%zu", call->offset, len, last, count);
837 ASSERTCMP(call->offset, <, count);
839 len = min_t(size_t, len, count - call->offset);
840 if (skb_copy_bits(skb, 0, buf + call->offset, len) < 0 ||
841 !pskb_pull(skb, len))
842 BUG();
843 call->offset += len;
845 if (call->offset < count) {
846 if (last) {
847 _leave(" = -EBADMSG [%d < %zu]", call->offset, count);
848 return -EBADMSG;
850 _leave(" = -EAGAIN");
851 return -EAGAIN;
853 return 0;