1 /* RxRPC individual remote procedure call handling
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/circ_buf.h>
17 #include <linux/spinlock_types.h>
19 #include <net/af_rxrpc.h>
20 #include "ar-internal.h"
22 const char *const rxrpc_call_states
[NR__RXRPC_CALL_STATES
] = {
23 [RXRPC_CALL_UNINITIALISED
] = "Uninit ",
24 [RXRPC_CALL_CLIENT_AWAIT_CONN
] = "ClWtConn",
25 [RXRPC_CALL_CLIENT_SEND_REQUEST
] = "ClSndReq",
26 [RXRPC_CALL_CLIENT_AWAIT_REPLY
] = "ClAwtRpl",
27 [RXRPC_CALL_CLIENT_RECV_REPLY
] = "ClRcvRpl",
28 [RXRPC_CALL_SERVER_PREALLOC
] = "SvPrealc",
29 [RXRPC_CALL_SERVER_SECURING
] = "SvSecure",
30 [RXRPC_CALL_SERVER_ACCEPTING
] = "SvAccept",
31 [RXRPC_CALL_SERVER_RECV_REQUEST
] = "SvRcvReq",
32 [RXRPC_CALL_SERVER_ACK_REQUEST
] = "SvAckReq",
33 [RXRPC_CALL_SERVER_SEND_REPLY
] = "SvSndRpl",
34 [RXRPC_CALL_SERVER_AWAIT_ACK
] = "SvAwtACK",
35 [RXRPC_CALL_COMPLETE
] = "Complete",
38 const char *const rxrpc_call_completions
[NR__RXRPC_CALL_COMPLETIONS
] = {
39 [RXRPC_CALL_SUCCEEDED
] = "Complete",
40 [RXRPC_CALL_REMOTELY_ABORTED
] = "RmtAbort",
41 [RXRPC_CALL_LOCALLY_ABORTED
] = "LocAbort",
42 [RXRPC_CALL_LOCAL_ERROR
] = "LocError",
43 [RXRPC_CALL_NETWORK_ERROR
] = "NetError",
46 struct kmem_cache
*rxrpc_call_jar
;
48 static void rxrpc_call_timer_expired(unsigned long _call
)
50 struct rxrpc_call
*call
= (struct rxrpc_call
*)_call
;
52 _enter("%d", call
->debug_id
);
54 if (call
->state
< RXRPC_CALL_COMPLETE
)
55 rxrpc_set_timer(call
, rxrpc_timer_expired
, ktime_get_real());
59 * find an extant server call
60 * - called in process context with IRQs enabled
62 struct rxrpc_call
*rxrpc_find_call_by_user_ID(struct rxrpc_sock
*rx
,
63 unsigned long user_call_ID
)
65 struct rxrpc_call
*call
;
68 _enter("%p,%lx", rx
, user_call_ID
);
70 read_lock(&rx
->call_lock
);
72 p
= rx
->calls
.rb_node
;
74 call
= rb_entry(p
, struct rxrpc_call
, sock_node
);
76 if (user_call_ID
< call
->user_call_ID
)
78 else if (user_call_ID
> call
->user_call_ID
)
81 goto found_extant_call
;
84 read_unlock(&rx
->call_lock
);
89 rxrpc_get_call(call
, rxrpc_call_got
);
90 read_unlock(&rx
->call_lock
);
91 _leave(" = %p [%d]", call
, atomic_read(&call
->usage
));
98 struct rxrpc_call
*rxrpc_alloc_call(gfp_t gfp
)
100 struct rxrpc_call
*call
;
102 call
= kmem_cache_zalloc(rxrpc_call_jar
, gfp
);
106 call
->rxtx_buffer
= kcalloc(RXRPC_RXTX_BUFF_SIZE
,
107 sizeof(struct sk_buff
*),
109 if (!call
->rxtx_buffer
)
112 call
->rxtx_annotations
= kcalloc(RXRPC_RXTX_BUFF_SIZE
, sizeof(u8
), gfp
);
113 if (!call
->rxtx_annotations
)
116 mutex_init(&call
->user_mutex
);
117 setup_timer(&call
->timer
, rxrpc_call_timer_expired
,
118 (unsigned long)call
);
119 INIT_WORK(&call
->processor
, &rxrpc_process_call
);
120 INIT_LIST_HEAD(&call
->link
);
121 INIT_LIST_HEAD(&call
->chan_wait_link
);
122 INIT_LIST_HEAD(&call
->accept_link
);
123 INIT_LIST_HEAD(&call
->recvmsg_link
);
124 INIT_LIST_HEAD(&call
->sock_link
);
125 init_waitqueue_head(&call
->waitq
);
126 spin_lock_init(&call
->lock
);
127 rwlock_init(&call
->state_lock
);
128 atomic_set(&call
->usage
, 1);
129 call
->debug_id
= atomic_inc_return(&rxrpc_debug_id
);
130 call
->tx_total_len
= -1;
132 memset(&call
->sock_node
, 0xed, sizeof(call
->sock_node
));
134 /* Leave space in the ring to handle a maxed-out jumbo packet */
135 call
->rx_winsize
= rxrpc_rx_window_size
;
136 call
->tx_winsize
= 16;
137 call
->rx_expect_next
= 1;
140 call
->cong_ssthresh
= RXRPC_RXTX_BUFF_SIZE
- 1;
144 kfree(call
->rxtx_buffer
);
146 kmem_cache_free(rxrpc_call_jar
, call
);
151 * Allocate a new client call.
153 static struct rxrpc_call
*rxrpc_alloc_client_call(struct sockaddr_rxrpc
*srx
,
156 struct rxrpc_call
*call
;
161 call
= rxrpc_alloc_call(gfp
);
163 return ERR_PTR(-ENOMEM
);
164 call
->state
= RXRPC_CALL_CLIENT_AWAIT_CONN
;
165 call
->service_id
= srx
->srx_service
;
166 call
->tx_phase
= true;
167 now
= ktime_get_real();
168 call
->acks_latest_ts
= now
;
169 call
->cong_tstamp
= now
;
171 _leave(" = %p", call
);
176 * Initiate the call ack/resend/expiry timer.
178 static void rxrpc_start_call_timer(struct rxrpc_call
*call
)
180 ktime_t now
= ktime_get_real(), expire_at
;
182 expire_at
= ktime_add_ms(now
, rxrpc_max_call_lifetime
);
183 call
->expire_at
= expire_at
;
184 call
->ack_at
= expire_at
;
185 call
->ping_at
= expire_at
;
186 call
->resend_at
= expire_at
;
187 call
->timer
.expires
= jiffies
+ LONG_MAX
/ 2;
188 rxrpc_set_timer(call
, rxrpc_timer_begin
, now
);
192 * Set up a call for the given parameters.
193 * - Called with the socket lock held, which it must release.
194 * - If it returns a call, the call's lock will need releasing by the caller.
196 struct rxrpc_call
*rxrpc_new_client_call(struct rxrpc_sock
*rx
,
197 struct rxrpc_conn_parameters
*cp
,
198 struct sockaddr_rxrpc
*srx
,
199 unsigned long user_call_ID
,
202 __releases(&rx
->sk
.sk_lock
.slock
)
204 struct rxrpc_call
*call
, *xcall
;
205 struct rxrpc_net
*rxnet
= rxrpc_net(sock_net(&rx
->sk
));
206 struct rb_node
*parent
, **pp
;
207 const void *here
= __builtin_return_address(0);
210 _enter("%p,%lx", rx
, user_call_ID
);
212 call
= rxrpc_alloc_client_call(srx
, gfp
);
214 release_sock(&rx
->sk
);
215 _leave(" = %ld", PTR_ERR(call
));
219 call
->tx_total_len
= tx_total_len
;
220 trace_rxrpc_call(call
, rxrpc_call_new_client
, atomic_read(&call
->usage
),
221 here
, (const void *)user_call_ID
);
223 /* We need to protect a partially set up call against the user as we
224 * will be acting outside the socket lock.
226 mutex_lock(&call
->user_mutex
);
228 /* Publish the call, even though it is incompletely set up as yet */
229 write_lock(&rx
->call_lock
);
231 pp
= &rx
->calls
.rb_node
;
235 xcall
= rb_entry(parent
, struct rxrpc_call
, sock_node
);
237 if (user_call_ID
< xcall
->user_call_ID
)
238 pp
= &(*pp
)->rb_left
;
239 else if (user_call_ID
> xcall
->user_call_ID
)
240 pp
= &(*pp
)->rb_right
;
242 goto error_dup_user_ID
;
245 rcu_assign_pointer(call
->socket
, rx
);
246 call
->user_call_ID
= user_call_ID
;
247 __set_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
);
248 rxrpc_get_call(call
, rxrpc_call_got_userid
);
249 rb_link_node(&call
->sock_node
, parent
, pp
);
250 rb_insert_color(&call
->sock_node
, &rx
->calls
);
251 list_add(&call
->sock_link
, &rx
->sock_calls
);
253 write_unlock(&rx
->call_lock
);
255 write_lock(&rxnet
->call_lock
);
256 list_add_tail(&call
->link
, &rxnet
->calls
);
257 write_unlock(&rxnet
->call_lock
);
259 /* From this point on, the call is protected by its own lock. */
260 release_sock(&rx
->sk
);
262 /* Set up or get a connection record and set the protocol parameters,
263 * including channel number and call ID.
265 ret
= rxrpc_connect_call(call
, cp
, srx
, gfp
);
269 trace_rxrpc_call(call
, rxrpc_call_connected
, atomic_read(&call
->usage
),
272 spin_lock_bh(&call
->conn
->params
.peer
->lock
);
273 hlist_add_head(&call
->error_link
,
274 &call
->conn
->params
.peer
->error_targets
);
275 spin_unlock_bh(&call
->conn
->params
.peer
->lock
);
277 rxrpc_start_call_timer(call
);
279 _net("CALL new %d on CONN %d", call
->debug_id
, call
->conn
->debug_id
);
281 _leave(" = %p [new]", call
);
284 /* We unexpectedly found the user ID in the list after taking
285 * the call_lock. This shouldn't happen unless the user races
286 * with itself and tries to add the same user ID twice at the
287 * same time in different threads.
290 write_unlock(&rx
->call_lock
);
291 release_sock(&rx
->sk
);
295 __rxrpc_set_call_completion(call
, RXRPC_CALL_LOCAL_ERROR
,
297 trace_rxrpc_call(call
, rxrpc_call_error
, atomic_read(&call
->usage
),
299 rxrpc_release_call(rx
, call
);
300 mutex_unlock(&call
->user_mutex
);
301 rxrpc_put_call(call
, rxrpc_call_put
);
302 _leave(" = %d", ret
);
307 * Set up an incoming call. call->conn points to the connection.
308 * This is called in BH context and isn't allowed to fail.
310 void rxrpc_incoming_call(struct rxrpc_sock
*rx
,
311 struct rxrpc_call
*call
,
314 struct rxrpc_connection
*conn
= call
->conn
;
315 struct rxrpc_skb_priv
*sp
= rxrpc_skb(skb
);
318 _enter(",%d", call
->conn
->debug_id
);
320 rcu_assign_pointer(call
->socket
, rx
);
321 call
->call_id
= sp
->hdr
.callNumber
;
322 call
->service_id
= sp
->hdr
.serviceId
;
323 call
->cid
= sp
->hdr
.cid
;
324 call
->state
= RXRPC_CALL_SERVER_ACCEPTING
;
325 if (sp
->hdr
.securityIndex
> 0)
326 call
->state
= RXRPC_CALL_SERVER_SECURING
;
327 call
->cong_tstamp
= skb
->tstamp
;
329 /* Set the channel for this call. We don't get channel_lock as we're
330 * only defending against the data_ready handler (which we're called
331 * from) and the RESPONSE packet parser (which is only really
332 * interested in call_counter and can cope with a disagreement with the
335 chan
= sp
->hdr
.cid
& RXRPC_CHANNELMASK
;
336 conn
->channels
[chan
].call_counter
= call
->call_id
;
337 conn
->channels
[chan
].call_id
= call
->call_id
;
338 rcu_assign_pointer(conn
->channels
[chan
].call
, call
);
340 spin_lock(&conn
->params
.peer
->lock
);
341 hlist_add_head(&call
->error_link
, &conn
->params
.peer
->error_targets
);
342 spin_unlock(&conn
->params
.peer
->lock
);
344 _net("CALL incoming %d on CONN %d", call
->debug_id
, call
->conn
->debug_id
);
346 rxrpc_start_call_timer(call
);
351 * Queue a call's work processor, getting a ref to pass to the work queue.
353 bool rxrpc_queue_call(struct rxrpc_call
*call
)
355 const void *here
= __builtin_return_address(0);
356 int n
= __atomic_add_unless(&call
->usage
, 1, 0);
359 if (rxrpc_queue_work(&call
->processor
))
360 trace_rxrpc_call(call
, rxrpc_call_queued
, n
+ 1, here
, NULL
);
362 rxrpc_put_call(call
, rxrpc_call_put_noqueue
);
367 * Queue a call's work processor, passing the callers ref to the work queue.
369 bool __rxrpc_queue_call(struct rxrpc_call
*call
)
371 const void *here
= __builtin_return_address(0);
372 int n
= atomic_read(&call
->usage
);
374 if (rxrpc_queue_work(&call
->processor
))
375 trace_rxrpc_call(call
, rxrpc_call_queued_ref
, n
, here
, NULL
);
377 rxrpc_put_call(call
, rxrpc_call_put_noqueue
);
382 * Note the re-emergence of a call.
384 void rxrpc_see_call(struct rxrpc_call
*call
)
386 const void *here
= __builtin_return_address(0);
388 int n
= atomic_read(&call
->usage
);
390 trace_rxrpc_call(call
, rxrpc_call_seen
, n
, here
, NULL
);
395 * Note the addition of a ref on a call.
397 void rxrpc_get_call(struct rxrpc_call
*call
, enum rxrpc_call_trace op
)
399 const void *here
= __builtin_return_address(0);
400 int n
= atomic_inc_return(&call
->usage
);
402 trace_rxrpc_call(call
, op
, n
, here
, NULL
);
406 * Detach a call from its owning socket.
408 void rxrpc_release_call(struct rxrpc_sock
*rx
, struct rxrpc_call
*call
)
410 const void *here
= __builtin_return_address(0);
411 struct rxrpc_connection
*conn
= call
->conn
;
415 _enter("{%d,%d}", call
->debug_id
, atomic_read(&call
->usage
));
417 trace_rxrpc_call(call
, rxrpc_call_release
, atomic_read(&call
->usage
),
418 here
, (const void *)call
->flags
);
420 ASSERTCMP(call
->state
, ==, RXRPC_CALL_COMPLETE
);
422 spin_lock_bh(&call
->lock
);
423 if (test_and_set_bit(RXRPC_CALL_RELEASED
, &call
->flags
))
425 spin_unlock_bh(&call
->lock
);
427 del_timer_sync(&call
->timer
);
429 /* Make sure we don't get any more notifications */
430 write_lock_bh(&rx
->recvmsg_lock
);
432 if (!list_empty(&call
->recvmsg_link
)) {
433 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
434 call
, call
->events
, call
->flags
);
435 list_del(&call
->recvmsg_link
);
439 /* list_empty() must return false in rxrpc_notify_socket() */
440 call
->recvmsg_link
.next
= NULL
;
441 call
->recvmsg_link
.prev
= NULL
;
443 write_unlock_bh(&rx
->recvmsg_lock
);
445 rxrpc_put_call(call
, rxrpc_call_put
);
447 write_lock(&rx
->call_lock
);
449 if (test_and_clear_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
)) {
450 rb_erase(&call
->sock_node
, &rx
->calls
);
451 memset(&call
->sock_node
, 0xdd, sizeof(call
->sock_node
));
452 rxrpc_put_call(call
, rxrpc_call_put_userid
);
455 list_del(&call
->sock_link
);
456 write_unlock(&rx
->call_lock
);
458 _debug("RELEASE CALL %p (%d CONN %p)", call
, call
->debug_id
, conn
);
461 rxrpc_disconnect_call(call
);
463 for (i
= 0; i
< RXRPC_RXTX_BUFF_SIZE
; i
++) {
464 rxrpc_free_skb(call
->rxtx_buffer
[i
],
465 (call
->tx_phase
? rxrpc_skb_tx_cleaned
:
466 rxrpc_skb_rx_cleaned
));
467 call
->rxtx_buffer
[i
] = NULL
;
474 * release all the calls associated with a socket
476 void rxrpc_release_calls_on_socket(struct rxrpc_sock
*rx
)
478 struct rxrpc_call
*call
;
482 while (!list_empty(&rx
->to_be_accepted
)) {
483 call
= list_entry(rx
->to_be_accepted
.next
,
484 struct rxrpc_call
, accept_link
);
485 list_del(&call
->accept_link
);
486 rxrpc_abort_call("SKR", call
, 0, RX_CALL_DEAD
, -ECONNRESET
);
487 rxrpc_put_call(call
, rxrpc_call_put
);
490 while (!list_empty(&rx
->sock_calls
)) {
491 call
= list_entry(rx
->sock_calls
.next
,
492 struct rxrpc_call
, sock_link
);
493 rxrpc_get_call(call
, rxrpc_call_got
);
494 rxrpc_abort_call("SKT", call
, 0, RX_CALL_DEAD
, -ECONNRESET
);
495 rxrpc_send_abort_packet(call
);
496 rxrpc_release_call(rx
, call
);
497 rxrpc_put_call(call
, rxrpc_call_put
);
506 void rxrpc_put_call(struct rxrpc_call
*call
, enum rxrpc_call_trace op
)
508 struct rxrpc_net
*rxnet
;
509 const void *here
= __builtin_return_address(0);
512 ASSERT(call
!= NULL
);
514 n
= atomic_dec_return(&call
->usage
);
515 trace_rxrpc_call(call
, op
, n
, here
, NULL
);
518 _debug("call %d dead", call
->debug_id
);
519 ASSERTCMP(call
->state
, ==, RXRPC_CALL_COMPLETE
);
521 if (!list_empty(&call
->link
)) {
522 rxnet
= rxrpc_net(sock_net(&call
->socket
->sk
));
523 write_lock(&rxnet
->call_lock
);
524 list_del_init(&call
->link
);
525 write_unlock(&rxnet
->call_lock
);
528 rxrpc_cleanup_call(call
);
533 * Final call destruction under RCU.
535 static void rxrpc_rcu_destroy_call(struct rcu_head
*rcu
)
537 struct rxrpc_call
*call
= container_of(rcu
, struct rxrpc_call
, rcu
);
539 rxrpc_put_peer(call
->peer
);
540 kfree(call
->rxtx_buffer
);
541 kfree(call
->rxtx_annotations
);
542 kmem_cache_free(rxrpc_call_jar
, call
);
548 void rxrpc_cleanup_call(struct rxrpc_call
*call
)
552 _net("DESTROY CALL %d", call
->debug_id
);
554 memset(&call
->sock_node
, 0xcd, sizeof(call
->sock_node
));
556 del_timer_sync(&call
->timer
);
558 ASSERTCMP(call
->state
, ==, RXRPC_CALL_COMPLETE
);
559 ASSERT(test_bit(RXRPC_CALL_RELEASED
, &call
->flags
));
560 ASSERTCMP(call
->conn
, ==, NULL
);
562 /* Clean up the Rx/Tx buffer */
563 for (i
= 0; i
< RXRPC_RXTX_BUFF_SIZE
; i
++)
564 rxrpc_free_skb(call
->rxtx_buffer
[i
],
565 (call
->tx_phase
? rxrpc_skb_tx_cleaned
:
566 rxrpc_skb_rx_cleaned
));
568 rxrpc_free_skb(call
->tx_pending
, rxrpc_skb_tx_cleaned
);
570 call_rcu(&call
->rcu
, rxrpc_rcu_destroy_call
);
574 * Make sure that all calls are gone from a network namespace. To reach this
575 * point, any open UDP sockets in that namespace must have been closed, so any
576 * outstanding calls cannot be doing I/O.
578 void rxrpc_destroy_all_calls(struct rxrpc_net
*rxnet
)
580 struct rxrpc_call
*call
;
584 if (list_empty(&rxnet
->calls
))
587 write_lock(&rxnet
->call_lock
);
589 while (!list_empty(&rxnet
->calls
)) {
590 call
= list_entry(rxnet
->calls
.next
, struct rxrpc_call
, link
);
591 _debug("Zapping call %p", call
);
593 rxrpc_see_call(call
);
594 list_del_init(&call
->link
);
596 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
597 call
, atomic_read(&call
->usage
),
598 rxrpc_call_states
[call
->state
],
599 call
->flags
, call
->events
);
601 write_unlock(&rxnet
->call_lock
);
603 write_lock(&rxnet
->call_lock
);
606 write_unlock(&rxnet
->call_lock
);