1 /* Client connection-specific management code.
3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 * Client connections need to be cached for a little while after they've made a
13 * call so as to handle retransmitted DATA packets in case the server didn't
14 * receive the final ACK or terminating ABORT we sent it.
16 * Client connections can be in one of a number of cache states:
18 * (1) INACTIVE - The connection is not held in any list and may not have been
19 * exposed to the world. If it has been previously exposed, it was
20 * discarded from the idle list after expiring.
22 * (2) WAITING - The connection is waiting for the number of client conns to
23 * drop below the maximum capacity. Calls may be in progress upon it from
24 * when it was active and got culled.
26 * The connection is on the rxrpc_waiting_client_conns list which is kept
27 * in to-be-granted order. Culled conns with waiters go to the back of
28 * the queue just like new conns.
30 * (3) ACTIVE - The connection has at least one call in progress upon it, it
31 * may freely grant available channels to new calls and calls may be
32 * waiting on it for channels to become available.
34 * The connection is on the rxnet->active_client_conns list which is kept
35 * in activation order for culling purposes.
37 * rxrpc_nr_active_client_conns is held incremented also.
39 * (4) UPGRADE - As for ACTIVE, but only one call may be in progress and is
40 * being used to probe for service upgrade.
42 * (5) CULLED - The connection got summarily culled to try and free up
43 * capacity. Calls currently in progress on the connection are allowed to
44 * continue, but new calls will have to wait. There can be no waiters in
45 * this state - the conn would have to go to the WAITING state instead.
47 * (6) IDLE - The connection has no calls in progress upon it and must have
48 * been exposed to the world (ie. the EXPOSED flag must be set). When it
49 * expires, the EXPOSED flag is cleared and the connection transitions to
52 * The connection is on the rxnet->idle_client_conns list which is kept in
53 * order of how soon they'll expire.
55 * There are flags of relevance to the cache:
57 * (1) EXPOSED - The connection ID got exposed to the world. If this flag is
58 * set, an extra ref is added to the connection preventing it from being
59 * reaped when it has no calls outstanding. This flag is cleared and the
60 * ref dropped when a conn is discarded from the idle list.
62 * This allows us to move terminal call state retransmission to the
63 * connection and to discard the call immediately we think it is done
64 * with. It also give us a chance to reuse the connection.
66 * (2) DONT_REUSE - The connection should be discarded as soon as possible and
67 * should not be reused. This is set when an exclusive connection is used
68 * or a call ID counter overflows.
70 * The caching state may only be changed if the cache lock is held.
72 * There are two idle client connection expiry durations. If the total number
73 * of connections is below the reap threshold, we use the normal duration; if
74 * it's above, we use the fast duration.
77 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
79 #include <linux/slab.h>
80 #include <linux/idr.h>
81 #include <linux/timer.h>
82 #include <linux/sched/signal.h>
84 #include "ar-internal.h"
86 __read_mostly
unsigned int rxrpc_max_client_connections
= 1000;
87 __read_mostly
unsigned int rxrpc_reap_client_connections
= 900;
88 __read_mostly
unsigned long rxrpc_conn_idle_client_expiry
= 2 * 60 * HZ
;
89 __read_mostly
unsigned long rxrpc_conn_idle_client_fast_expiry
= 2 * HZ
;
92 * We use machine-unique IDs for our client connections.
94 DEFINE_IDR(rxrpc_client_conn_ids
);
95 static DEFINE_SPINLOCK(rxrpc_conn_id_lock
);
97 static void rxrpc_cull_active_client_conns(struct rxrpc_net
*);
100 * Get a connection ID and epoch for a client connection from the global pool.
101 * The connection struct pointer is then recorded in the idr radix tree. The
102 * epoch doesn't change until the client is rebooted (or, at least, unless the
103 * module is unloaded).
105 static int rxrpc_get_client_connection_id(struct rxrpc_connection
*conn
,
108 struct rxrpc_net
*rxnet
= conn
->params
.local
->rxnet
;
114 spin_lock(&rxrpc_conn_id_lock
);
116 id
= idr_alloc_cyclic(&rxrpc_client_conn_ids
, conn
,
117 1, 0x40000000, GFP_NOWAIT
);
121 spin_unlock(&rxrpc_conn_id_lock
);
124 conn
->proto
.epoch
= rxnet
->epoch
;
125 conn
->proto
.cid
= id
<< RXRPC_CIDSHIFT
;
126 set_bit(RXRPC_CONN_HAS_IDR
, &conn
->flags
);
127 _leave(" [CID %x]", conn
->proto
.cid
);
131 spin_unlock(&rxrpc_conn_id_lock
);
138 * Release a connection ID for a client connection from the global pool.
140 static void rxrpc_put_client_connection_id(struct rxrpc_connection
*conn
)
142 if (test_bit(RXRPC_CONN_HAS_IDR
, &conn
->flags
)) {
143 spin_lock(&rxrpc_conn_id_lock
);
144 idr_remove(&rxrpc_client_conn_ids
,
145 conn
->proto
.cid
>> RXRPC_CIDSHIFT
);
146 spin_unlock(&rxrpc_conn_id_lock
);
151 * Destroy the client connection ID tree.
153 void rxrpc_destroy_client_conn_ids(void)
155 struct rxrpc_connection
*conn
;
158 if (!idr_is_empty(&rxrpc_client_conn_ids
)) {
159 idr_for_each_entry(&rxrpc_client_conn_ids
, conn
, id
) {
160 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
161 conn
, atomic_read(&conn
->usage
));
166 idr_destroy(&rxrpc_client_conn_ids
);
170 * Allocate a client connection.
172 static struct rxrpc_connection
*
173 rxrpc_alloc_client_connection(struct rxrpc_conn_parameters
*cp
, gfp_t gfp
)
175 struct rxrpc_connection
*conn
;
176 struct rxrpc_net
*rxnet
= cp
->local
->rxnet
;
181 conn
= rxrpc_alloc_connection(gfp
);
183 _leave(" = -ENOMEM");
184 return ERR_PTR(-ENOMEM
);
187 atomic_set(&conn
->usage
, 1);
189 __set_bit(RXRPC_CONN_DONT_REUSE
, &conn
->flags
);
191 __set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE
, &conn
->flags
);
194 conn
->out_clientflag
= RXRPC_CLIENT_INITIATED
;
195 conn
->state
= RXRPC_CONN_CLIENT
;
196 conn
->service_id
= cp
->service_id
;
198 ret
= rxrpc_get_client_connection_id(conn
, gfp
);
202 ret
= rxrpc_init_client_conn_security(conn
);
206 ret
= conn
->security
->prime_packet_security(conn
);
210 atomic_inc(&rxnet
->nr_conns
);
211 write_lock(&rxnet
->conn_lock
);
212 list_add_tail(&conn
->proc_link
, &rxnet
->conn_proc_list
);
213 write_unlock(&rxnet
->conn_lock
);
215 /* We steal the caller's peer ref. */
217 rxrpc_get_local(conn
->params
.local
);
218 key_get(conn
->params
.key
);
220 trace_rxrpc_conn(conn
, rxrpc_conn_new_client
, atomic_read(&conn
->usage
),
221 __builtin_return_address(0));
222 trace_rxrpc_client(conn
, -1, rxrpc_client_alloc
);
223 _leave(" = %p", conn
);
227 conn
->security
->clear(conn
);
229 rxrpc_put_client_connection_id(conn
);
232 _leave(" = %d", ret
);
237 * Determine if a connection may be reused.
239 static bool rxrpc_may_reuse_conn(struct rxrpc_connection
*conn
)
241 struct rxrpc_net
*rxnet
= conn
->params
.local
->rxnet
;
242 int id_cursor
, id
, distance
, limit
;
244 if (test_bit(RXRPC_CONN_DONT_REUSE
, &conn
->flags
))
247 if (conn
->proto
.epoch
!= rxnet
->epoch
)
248 goto mark_dont_reuse
;
250 /* The IDR tree gets very expensive on memory if the connection IDs are
251 * widely scattered throughout the number space, so we shall want to
252 * kill off connections that, say, have an ID more than about four
253 * times the maximum number of client conns away from the current
254 * allocation point to try and keep the IDs concentrated.
256 id_cursor
= idr_get_cursor(&rxrpc_client_conn_ids
);
257 id
= conn
->proto
.cid
>> RXRPC_CIDSHIFT
;
258 distance
= id
- id_cursor
;
260 distance
= -distance
;
261 limit
= max(rxrpc_max_client_connections
* 4, 1024U);
262 if (distance
> limit
)
263 goto mark_dont_reuse
;
268 set_bit(RXRPC_CONN_DONT_REUSE
, &conn
->flags
);
274 * Create or find a client connection to use for a call.
276 * If we return with a connection, the call will be on its waiting list. It's
277 * left to the caller to assign a channel and wake up the call.
279 static int rxrpc_get_client_conn(struct rxrpc_call
*call
,
280 struct rxrpc_conn_parameters
*cp
,
281 struct sockaddr_rxrpc
*srx
,
284 struct rxrpc_connection
*conn
, *candidate
= NULL
;
285 struct rxrpc_local
*local
= cp
->local
;
286 struct rb_node
*p
, **pp
, *parent
;
290 _enter("{%d,%lx},", call
->debug_id
, call
->user_call_ID
);
292 cp
->peer
= rxrpc_lookup_peer(cp
->local
, srx
, gfp
);
296 call
->cong_cwnd
= cp
->peer
->cong_cwnd
;
297 if (call
->cong_cwnd
>= call
->cong_ssthresh
)
298 call
->cong_mode
= RXRPC_CALL_CONGEST_AVOIDANCE
;
300 call
->cong_mode
= RXRPC_CALL_SLOW_START
;
302 /* If the connection is not meant to be exclusive, search the available
303 * connections to see if the connection we want to use already exists.
305 if (!cp
->exclusive
) {
307 spin_lock(&local
->client_conns_lock
);
308 p
= local
->client_conns
.rb_node
;
310 conn
= rb_entry(p
, struct rxrpc_connection
, client_node
);
312 #define cmp(X) ((long)conn->params.X - (long)cp->X)
315 cmp(security_level
) ?:
320 } else if (diff
> 0) {
323 if (rxrpc_may_reuse_conn(conn
) &&
324 rxrpc_get_connection_maybe(conn
))
325 goto found_extant_conn
;
326 /* The connection needs replacing. It's better
327 * to effect that when we have something to
328 * replace it with so that we don't have to
329 * rebalance the tree twice.
334 spin_unlock(&local
->client_conns_lock
);
337 /* There wasn't a connection yet or we need an exclusive connection.
338 * We need to create a candidate and then potentially redo the search
339 * in case we're racing with another thread also trying to connect on a
340 * shareable connection.
343 candidate
= rxrpc_alloc_client_connection(cp
, gfp
);
344 if (IS_ERR(candidate
)) {
345 ret
= PTR_ERR(candidate
);
349 /* Add the call to the new connection's waiting list in case we're
350 * going to have to wait for the connection to come live. It's our
351 * connection, so we want first dibs on the channel slots. We would
352 * normally have to take channel_lock but we do this before anyone else
353 * can see the connection.
355 list_add_tail(&call
->chan_wait_link
, &candidate
->waiting_calls
);
358 call
->conn
= candidate
;
359 call
->security_ix
= candidate
->security_ix
;
360 call
->service_id
= candidate
->service_id
;
361 _leave(" = 0 [exclusive %d]", candidate
->debug_id
);
365 /* Publish the new connection for userspace to find. We need to redo
366 * the search before doing this lest we race with someone else adding a
367 * conflicting instance.
370 spin_lock(&local
->client_conns_lock
);
372 pp
= &local
->client_conns
.rb_node
;
376 conn
= rb_entry(parent
, struct rxrpc_connection
, client_node
);
378 #define cmp(X) ((long)conn->params.X - (long)candidate->params.X)
381 cmp(security_level
) ?:
385 pp
= &(*pp
)->rb_left
;
386 } else if (diff
> 0) {
387 pp
= &(*pp
)->rb_right
;
389 if (rxrpc_may_reuse_conn(conn
) &&
390 rxrpc_get_connection_maybe(conn
))
391 goto found_extant_conn
;
392 /* The old connection is from an outdated epoch. */
393 _debug("replace conn");
394 clear_bit(RXRPC_CONN_IN_CLIENT_CONNS
, &conn
->flags
);
395 rb_replace_node(&conn
->client_node
,
396 &candidate
->client_node
,
397 &local
->client_conns
);
398 trace_rxrpc_client(conn
, -1, rxrpc_client_replace
);
399 goto candidate_published
;
404 rb_link_node(&candidate
->client_node
, parent
, pp
);
405 rb_insert_color(&candidate
->client_node
, &local
->client_conns
);
408 set_bit(RXRPC_CONN_IN_CLIENT_CONNS
, &candidate
->flags
);
409 call
->conn
= candidate
;
410 call
->security_ix
= candidate
->security_ix
;
411 call
->service_id
= candidate
->service_id
;
412 spin_unlock(&local
->client_conns_lock
);
413 _leave(" = 0 [new %d]", candidate
->debug_id
);
416 /* We come here if we found a suitable connection already in existence.
417 * Discard any candidate we may have allocated, and try to get a
418 * channel on this one.
421 _debug("found conn");
422 spin_unlock(&local
->client_conns_lock
);
425 trace_rxrpc_client(candidate
, -1, rxrpc_client_duplicate
);
426 rxrpc_put_connection(candidate
);
430 spin_lock(&conn
->channel_lock
);
432 call
->security_ix
= conn
->security_ix
;
433 call
->service_id
= conn
->service_id
;
434 list_add(&call
->chan_wait_link
, &conn
->waiting_calls
);
435 spin_unlock(&conn
->channel_lock
);
436 _leave(" = 0 [extant %d]", conn
->debug_id
);
440 rxrpc_put_peer(cp
->peer
);
443 _leave(" = %d", ret
);
448 * Activate a connection.
450 static void rxrpc_activate_conn(struct rxrpc_net
*rxnet
,
451 struct rxrpc_connection
*conn
)
453 if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE
, &conn
->flags
)) {
454 trace_rxrpc_client(conn
, -1, rxrpc_client_to_upgrade
);
455 conn
->cache_state
= RXRPC_CONN_CLIENT_UPGRADE
;
457 trace_rxrpc_client(conn
, -1, rxrpc_client_to_active
);
458 conn
->cache_state
= RXRPC_CONN_CLIENT_ACTIVE
;
460 rxnet
->nr_active_client_conns
++;
461 list_move_tail(&conn
->cache_link
, &rxnet
->active_client_conns
);
465 * Attempt to animate a connection for a new call.
467 * If it's not exclusive, the connection is in the endpoint tree, and we're in
468 * the conn's list of those waiting to grab a channel. There is, however, a
469 * limit on the number of live connections allowed at any one time, so we may
470 * have to wait for capacity to become available.
472 * Note that a connection on the waiting queue might *also* have active
473 * channels if it has been culled to make space and then re-requested by a new
476 static void rxrpc_animate_client_conn(struct rxrpc_net
*rxnet
,
477 struct rxrpc_connection
*conn
)
479 unsigned int nr_conns
;
481 _enter("%d,%d", conn
->debug_id
, conn
->cache_state
);
483 if (conn
->cache_state
== RXRPC_CONN_CLIENT_ACTIVE
||
484 conn
->cache_state
== RXRPC_CONN_CLIENT_UPGRADE
)
487 spin_lock(&rxnet
->client_conn_cache_lock
);
489 nr_conns
= rxnet
->nr_client_conns
;
490 if (!test_and_set_bit(RXRPC_CONN_COUNTED
, &conn
->flags
)) {
491 trace_rxrpc_client(conn
, -1, rxrpc_client_count
);
492 rxnet
->nr_client_conns
= nr_conns
+ 1;
495 switch (conn
->cache_state
) {
496 case RXRPC_CONN_CLIENT_ACTIVE
:
497 case RXRPC_CONN_CLIENT_UPGRADE
:
498 case RXRPC_CONN_CLIENT_WAITING
:
501 case RXRPC_CONN_CLIENT_INACTIVE
:
502 case RXRPC_CONN_CLIENT_CULLED
:
503 case RXRPC_CONN_CLIENT_IDLE
:
504 if (nr_conns
>= rxrpc_max_client_connections
)
505 goto wait_for_capacity
;
513 spin_unlock(&rxnet
->client_conn_cache_lock
);
515 _leave(" [%d]", conn
->cache_state
);
520 rxrpc_activate_conn(rxnet
, conn
);
525 trace_rxrpc_client(conn
, -1, rxrpc_client_to_waiting
);
526 conn
->cache_state
= RXRPC_CONN_CLIENT_WAITING
;
527 list_move_tail(&conn
->cache_link
, &rxnet
->waiting_client_conns
);
532 * Deactivate a channel.
534 static void rxrpc_deactivate_one_channel(struct rxrpc_connection
*conn
,
535 unsigned int channel
)
537 struct rxrpc_channel
*chan
= &conn
->channels
[channel
];
539 rcu_assign_pointer(chan
->call
, NULL
);
540 conn
->active_chans
&= ~(1 << channel
);
544 * Assign a channel to the call at the front of the queue and wake the call up.
545 * We don't increment the callNumber counter until this number has been exposed
548 static void rxrpc_activate_one_channel(struct rxrpc_connection
*conn
,
549 unsigned int channel
)
551 struct rxrpc_channel
*chan
= &conn
->channels
[channel
];
552 struct rxrpc_call
*call
= list_entry(conn
->waiting_calls
.next
,
553 struct rxrpc_call
, chan_wait_link
);
554 u32 call_id
= chan
->call_counter
+ 1;
556 trace_rxrpc_client(conn
, channel
, rxrpc_client_chan_activate
);
558 /* Cancel the final ACK on the previous call if it hasn't been sent yet
559 * as the DATA packet will implicitly ACK it.
561 clear_bit(RXRPC_CONN_FINAL_ACK_0
+ channel
, &conn
->flags
);
563 write_lock_bh(&call
->state_lock
);
564 if (!test_bit(RXRPC_CALL_TX_LASTQ
, &call
->flags
))
565 call
->state
= RXRPC_CALL_CLIENT_SEND_REQUEST
;
567 call
->state
= RXRPC_CALL_CLIENT_AWAIT_REPLY
;
568 write_unlock_bh(&call
->state_lock
);
570 rxrpc_see_call(call
);
571 list_del_init(&call
->chan_wait_link
);
572 conn
->active_chans
|= 1 << channel
;
573 call
->peer
= rxrpc_get_peer(conn
->params
.peer
);
574 call
->cid
= conn
->proto
.cid
| channel
;
575 call
->call_id
= call_id
;
577 trace_rxrpc_connect_call(call
);
578 _net("CONNECT call %08x:%08x as call %d on conn %d",
579 call
->cid
, call
->call_id
, call
->debug_id
, conn
->debug_id
);
581 /* Paired with the read barrier in rxrpc_wait_for_channel(). This
582 * orders cid and epoch in the connection wrt to call_id without the
583 * need to take the channel_lock.
585 * We provisionally assign a callNumber at this point, but we don't
586 * confirm it until the call is about to be exposed.
588 * TODO: Pair with a barrier in the data_ready handler when that looks
589 * at the call ID through a connection channel.
592 chan
->call_id
= call_id
;
593 chan
->call_debug_id
= call
->debug_id
;
594 rcu_assign_pointer(chan
->call
, call
);
595 wake_up(&call
->waitq
);
599 * Assign channels and callNumbers to waiting calls with channel_lock
602 static void rxrpc_activate_channels_locked(struct rxrpc_connection
*conn
)
606 switch (conn
->cache_state
) {
607 case RXRPC_CONN_CLIENT_ACTIVE
:
608 mask
= RXRPC_ACTIVE_CHANS_MASK
;
610 case RXRPC_CONN_CLIENT_UPGRADE
:
617 while (!list_empty(&conn
->waiting_calls
) &&
618 (avail
= ~conn
->active_chans
,
621 rxrpc_activate_one_channel(conn
, __ffs(avail
));
625 * Assign channels and callNumbers to waiting calls.
627 static void rxrpc_activate_channels(struct rxrpc_connection
*conn
)
629 _enter("%d", conn
->debug_id
);
631 trace_rxrpc_client(conn
, -1, rxrpc_client_activate_chans
);
633 if (conn
->active_chans
== RXRPC_ACTIVE_CHANS_MASK
)
636 spin_lock(&conn
->channel_lock
);
637 rxrpc_activate_channels_locked(conn
);
638 spin_unlock(&conn
->channel_lock
);
643 * Wait for a callNumber and a channel to be granted to a call.
645 static int rxrpc_wait_for_channel(struct rxrpc_call
*call
, gfp_t gfp
)
649 _enter("%d", call
->debug_id
);
651 if (!call
->call_id
) {
652 DECLARE_WAITQUEUE(myself
, current
);
654 if (!gfpflags_allow_blocking(gfp
)) {
659 add_wait_queue_exclusive(&call
->waitq
, &myself
);
661 set_current_state(TASK_INTERRUPTIBLE
);
664 if (signal_pending(current
)) {
670 remove_wait_queue(&call
->waitq
, &myself
);
671 __set_current_state(TASK_RUNNING
);
674 /* Paired with the write barrier in rxrpc_activate_one_channel(). */
678 _leave(" = %d", ret
);
683 * find a connection for a call
684 * - called in process context with IRQs enabled
686 int rxrpc_connect_call(struct rxrpc_call
*call
,
687 struct rxrpc_conn_parameters
*cp
,
688 struct sockaddr_rxrpc
*srx
,
691 struct rxrpc_net
*rxnet
= cp
->local
->rxnet
;
694 _enter("{%d,%lx},", call
->debug_id
, call
->user_call_ID
);
696 rxrpc_discard_expired_client_conns(&rxnet
->client_conn_reaper
);
697 rxrpc_cull_active_client_conns(rxnet
);
699 ret
= rxrpc_get_client_conn(call
, cp
, srx
, gfp
);
703 rxrpc_animate_client_conn(rxnet
, call
->conn
);
704 rxrpc_activate_channels(call
->conn
);
706 ret
= rxrpc_wait_for_channel(call
, gfp
);
708 rxrpc_disconnect_client_call(call
);
712 spin_lock_bh(&call
->conn
->params
.peer
->lock
);
713 hlist_add_head(&call
->error_link
,
714 &call
->conn
->params
.peer
->error_targets
);
715 spin_unlock_bh(&call
->conn
->params
.peer
->lock
);
718 _leave(" = %d", ret
);
723 * Note that a connection is about to be exposed to the world. Once it is
724 * exposed, we maintain an extra ref on it that stops it from being summarily
725 * discarded before it's (a) had a chance to deal with retransmission and (b)
726 * had a chance at re-use (the per-connection security negotiation is
729 static void rxrpc_expose_client_conn(struct rxrpc_connection
*conn
,
730 unsigned int channel
)
732 if (!test_and_set_bit(RXRPC_CONN_EXPOSED
, &conn
->flags
)) {
733 trace_rxrpc_client(conn
, channel
, rxrpc_client_exposed
);
734 rxrpc_get_connection(conn
);
739 * Note that a call, and thus a connection, is about to be exposed to the
742 void rxrpc_expose_client_call(struct rxrpc_call
*call
)
744 unsigned int channel
= call
->cid
& RXRPC_CHANNELMASK
;
745 struct rxrpc_connection
*conn
= call
->conn
;
746 struct rxrpc_channel
*chan
= &conn
->channels
[channel
];
748 if (!test_and_set_bit(RXRPC_CALL_EXPOSED
, &call
->flags
)) {
749 /* Mark the call ID as being used. If the callNumber counter
750 * exceeds ~2 billion, we kill the connection after its
751 * outstanding calls have finished so that the counter doesn't
754 chan
->call_counter
++;
755 if (chan
->call_counter
>= INT_MAX
)
756 set_bit(RXRPC_CONN_DONT_REUSE
, &conn
->flags
);
757 rxrpc_expose_client_conn(conn
, channel
);
762 * Set the reap timer.
764 static void rxrpc_set_client_reap_timer(struct rxrpc_net
*rxnet
)
766 unsigned long now
= jiffies
;
767 unsigned long reap_at
= now
+ rxrpc_conn_idle_client_expiry
;
770 timer_reduce(&rxnet
->client_conn_reap_timer
, reap_at
);
774 * Disconnect a client call.
776 void rxrpc_disconnect_client_call(struct rxrpc_call
*call
)
778 unsigned int channel
= call
->cid
& RXRPC_CHANNELMASK
;
779 struct rxrpc_connection
*conn
= call
->conn
;
780 struct rxrpc_channel
*chan
= &conn
->channels
[channel
];
781 struct rxrpc_net
*rxnet
= conn
->params
.local
->rxnet
;
783 trace_rxrpc_client(conn
, channel
, rxrpc_client_chan_disconnect
);
786 spin_lock(&conn
->channel_lock
);
788 /* Calls that have never actually been assigned a channel can simply be
789 * discarded. If the conn didn't get used either, it will follow
790 * immediately unless someone else grabs it in the meantime.
792 if (!list_empty(&call
->chan_wait_link
)) {
793 _debug("call is waiting");
794 ASSERTCMP(call
->call_id
, ==, 0);
795 ASSERT(!test_bit(RXRPC_CALL_EXPOSED
, &call
->flags
));
796 list_del_init(&call
->chan_wait_link
);
798 trace_rxrpc_client(conn
, channel
, rxrpc_client_chan_unstarted
);
800 /* We must deactivate or idle the connection if it's now
801 * waiting for nothing.
803 spin_lock(&rxnet
->client_conn_cache_lock
);
804 if (conn
->cache_state
== RXRPC_CONN_CLIENT_WAITING
&&
805 list_empty(&conn
->waiting_calls
) &&
807 goto idle_connection
;
811 ASSERTCMP(rcu_access_pointer(chan
->call
), ==, call
);
813 /* If a client call was exposed to the world, we save the result for
816 * We use a barrier here so that the call number and abort code can be
817 * read without needing to take a lock.
819 * TODO: Make the incoming packet handler check this and handle
820 * terminal retransmission without requiring access to the call.
822 if (test_bit(RXRPC_CALL_EXPOSED
, &call
->flags
)) {
823 _debug("exposed %u,%u", call
->call_id
, call
->abort_code
);
824 __rxrpc_disconnect_call(conn
, call
);
827 /* See if we can pass the channel directly to another call. */
828 if (conn
->cache_state
== RXRPC_CONN_CLIENT_ACTIVE
&&
829 !list_empty(&conn
->waiting_calls
)) {
830 trace_rxrpc_client(conn
, channel
, rxrpc_client_chan_pass
);
831 rxrpc_activate_one_channel(conn
, channel
);
835 /* Schedule the final ACK to be transmitted in a short while so that it
836 * can be skipped if we find a follow-on call. The first DATA packet
837 * of the follow on call will implicitly ACK this call.
839 if (call
->completion
== RXRPC_CALL_SUCCEEDED
&&
840 test_bit(RXRPC_CALL_EXPOSED
, &call
->flags
)) {
841 unsigned long final_ack_at
= jiffies
+ 2;
843 WRITE_ONCE(chan
->final_ack_at
, final_ack_at
);
844 smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
845 set_bit(RXRPC_CONN_FINAL_ACK_0
+ channel
, &conn
->flags
);
846 rxrpc_reduce_conn_timer(conn
, final_ack_at
);
849 /* Things are more complex and we need the cache lock. We might be
850 * able to simply idle the conn or it might now be lurking on the wait
851 * list. It might even get moved back to the active list whilst we're
852 * waiting for the lock.
854 spin_lock(&rxnet
->client_conn_cache_lock
);
856 switch (conn
->cache_state
) {
857 case RXRPC_CONN_CLIENT_UPGRADE
:
858 /* Deal with termination of a service upgrade probe. */
859 if (test_bit(RXRPC_CONN_EXPOSED
, &conn
->flags
)) {
860 clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE
, &conn
->flags
);
861 trace_rxrpc_client(conn
, channel
, rxrpc_client_to_active
);
862 conn
->cache_state
= RXRPC_CONN_CLIENT_ACTIVE
;
863 rxrpc_activate_channels_locked(conn
);
866 case RXRPC_CONN_CLIENT_ACTIVE
:
867 if (list_empty(&conn
->waiting_calls
)) {
868 rxrpc_deactivate_one_channel(conn
, channel
);
869 if (!conn
->active_chans
) {
870 rxnet
->nr_active_client_conns
--;
871 goto idle_connection
;
876 trace_rxrpc_client(conn
, channel
, rxrpc_client_chan_pass
);
877 rxrpc_activate_one_channel(conn
, channel
);
880 case RXRPC_CONN_CLIENT_CULLED
:
881 rxrpc_deactivate_one_channel(conn
, channel
);
882 ASSERT(list_empty(&conn
->waiting_calls
));
883 if (!conn
->active_chans
)
884 goto idle_connection
;
887 case RXRPC_CONN_CLIENT_WAITING
:
888 rxrpc_deactivate_one_channel(conn
, channel
);
896 spin_unlock(&rxnet
->client_conn_cache_lock
);
898 spin_unlock(&conn
->channel_lock
);
899 rxrpc_put_connection(conn
);
904 /* As no channels remain active, the connection gets deactivated
905 * immediately or moved to the idle list for a short while.
907 if (test_bit(RXRPC_CONN_EXPOSED
, &conn
->flags
)) {
908 trace_rxrpc_client(conn
, channel
, rxrpc_client_to_idle
);
909 conn
->idle_timestamp
= jiffies
;
910 conn
->cache_state
= RXRPC_CONN_CLIENT_IDLE
;
911 list_move_tail(&conn
->cache_link
, &rxnet
->idle_client_conns
);
912 if (rxnet
->idle_client_conns
.next
== &conn
->cache_link
&&
913 !rxnet
->kill_all_client_conns
)
914 rxrpc_set_client_reap_timer(rxnet
);
916 trace_rxrpc_client(conn
, channel
, rxrpc_client_to_inactive
);
917 conn
->cache_state
= RXRPC_CONN_CLIENT_INACTIVE
;
918 list_del_init(&conn
->cache_link
);
924 * Clean up a dead client connection.
926 static struct rxrpc_connection
*
927 rxrpc_put_one_client_conn(struct rxrpc_connection
*conn
)
929 struct rxrpc_connection
*next
= NULL
;
930 struct rxrpc_local
*local
= conn
->params
.local
;
931 struct rxrpc_net
*rxnet
= local
->rxnet
;
932 unsigned int nr_conns
;
934 trace_rxrpc_client(conn
, -1, rxrpc_client_cleanup
);
936 if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS
, &conn
->flags
)) {
937 spin_lock(&local
->client_conns_lock
);
938 if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS
,
940 rb_erase(&conn
->client_node
, &local
->client_conns
);
941 spin_unlock(&local
->client_conns_lock
);
944 rxrpc_put_client_connection_id(conn
);
946 ASSERTCMP(conn
->cache_state
, ==, RXRPC_CONN_CLIENT_INACTIVE
);
948 if (test_bit(RXRPC_CONN_COUNTED
, &conn
->flags
)) {
949 trace_rxrpc_client(conn
, -1, rxrpc_client_uncount
);
950 spin_lock(&rxnet
->client_conn_cache_lock
);
951 nr_conns
= --rxnet
->nr_client_conns
;
953 if (nr_conns
< rxrpc_max_client_connections
&&
954 !list_empty(&rxnet
->waiting_client_conns
)) {
955 next
= list_entry(rxnet
->waiting_client_conns
.next
,
956 struct rxrpc_connection
, cache_link
);
957 rxrpc_get_connection(next
);
958 rxrpc_activate_conn(rxnet
, next
);
961 spin_unlock(&rxnet
->client_conn_cache_lock
);
964 rxrpc_kill_connection(conn
);
966 rxrpc_activate_channels(next
);
968 /* We need to get rid of the temporary ref we took upon next, but we
969 * can't call rxrpc_put_connection() recursively.
975 * Clean up a dead client connections.
977 void rxrpc_put_client_conn(struct rxrpc_connection
*conn
)
979 const void *here
= __builtin_return_address(0);
983 n
= atomic_dec_return(&conn
->usage
);
984 trace_rxrpc_conn(conn
, rxrpc_conn_put_client
, n
, here
);
989 conn
= rxrpc_put_one_client_conn(conn
);
994 * Kill the longest-active client connections to make room for new ones.
996 static void rxrpc_cull_active_client_conns(struct rxrpc_net
*rxnet
)
998 struct rxrpc_connection
*conn
;
999 unsigned int nr_conns
= rxnet
->nr_client_conns
;
1000 unsigned int nr_active
, limit
;
1004 ASSERTCMP(nr_conns
, >=, 0);
1005 if (nr_conns
< rxrpc_max_client_connections
) {
1009 limit
= rxrpc_reap_client_connections
;
1011 spin_lock(&rxnet
->client_conn_cache_lock
);
1012 nr_active
= rxnet
->nr_active_client_conns
;
1014 while (nr_active
> limit
) {
1015 ASSERT(!list_empty(&rxnet
->active_client_conns
));
1016 conn
= list_entry(rxnet
->active_client_conns
.next
,
1017 struct rxrpc_connection
, cache_link
);
1018 ASSERTIFCMP(conn
->cache_state
!= RXRPC_CONN_CLIENT_ACTIVE
,
1019 conn
->cache_state
, ==, RXRPC_CONN_CLIENT_UPGRADE
);
1021 if (list_empty(&conn
->waiting_calls
)) {
1022 trace_rxrpc_client(conn
, -1, rxrpc_client_to_culled
);
1023 conn
->cache_state
= RXRPC_CONN_CLIENT_CULLED
;
1024 list_del_init(&conn
->cache_link
);
1026 trace_rxrpc_client(conn
, -1, rxrpc_client_to_waiting
);
1027 conn
->cache_state
= RXRPC_CONN_CLIENT_WAITING
;
1028 list_move_tail(&conn
->cache_link
,
1029 &rxnet
->waiting_client_conns
);
1035 rxnet
->nr_active_client_conns
= nr_active
;
1036 spin_unlock(&rxnet
->client_conn_cache_lock
);
1037 ASSERTCMP(nr_active
, >=, 0);
1038 _leave(" [culled]");
1042 * Discard expired client connections from the idle list. Each conn in the
1043 * idle list has been exposed and holds an extra ref because of that.
1045 * This may be called from conn setup or from a work item so cannot be
1046 * considered non-reentrant.
1048 void rxrpc_discard_expired_client_conns(struct work_struct
*work
)
1050 struct rxrpc_connection
*conn
;
1051 struct rxrpc_net
*rxnet
=
1052 container_of(work
, struct rxrpc_net
, client_conn_reaper
);
1053 unsigned long expiry
, conn_expires_at
, now
;
1054 unsigned int nr_conns
;
1058 if (list_empty(&rxnet
->idle_client_conns
)) {
1063 /* Don't double up on the discarding */
1064 if (!spin_trylock(&rxnet
->client_conn_discard_lock
)) {
1065 _leave(" [already]");
1069 /* We keep an estimate of what the number of conns ought to be after
1070 * we've discarded some so that we don't overdo the discarding.
1072 nr_conns
= rxnet
->nr_client_conns
;
1075 spin_lock(&rxnet
->client_conn_cache_lock
);
1077 if (list_empty(&rxnet
->idle_client_conns
))
1080 conn
= list_entry(rxnet
->idle_client_conns
.next
,
1081 struct rxrpc_connection
, cache_link
);
1082 ASSERT(test_bit(RXRPC_CONN_EXPOSED
, &conn
->flags
));
1084 if (!rxnet
->kill_all_client_conns
) {
1085 /* If the number of connections is over the reap limit, we
1086 * expedite discard by reducing the expiry timeout. We must,
1087 * however, have at least a short grace period to be able to do
1088 * final-ACK or ABORT retransmission.
1090 expiry
= rxrpc_conn_idle_client_expiry
;
1091 if (nr_conns
> rxrpc_reap_client_connections
)
1092 expiry
= rxrpc_conn_idle_client_fast_expiry
;
1093 if (conn
->params
.local
->service_closed
)
1094 expiry
= rxrpc_closed_conn_expiry
* HZ
;
1096 conn_expires_at
= conn
->idle_timestamp
+ expiry
;
1098 now
= READ_ONCE(jiffies
);
1099 if (time_after(conn_expires_at
, now
))
1100 goto not_yet_expired
;
1103 trace_rxrpc_client(conn
, -1, rxrpc_client_discard
);
1104 if (!test_and_clear_bit(RXRPC_CONN_EXPOSED
, &conn
->flags
))
1106 conn
->cache_state
= RXRPC_CONN_CLIENT_INACTIVE
;
1107 list_del_init(&conn
->cache_link
);
1109 spin_unlock(&rxnet
->client_conn_cache_lock
);
1111 /* When we cleared the EXPOSED flag, we took on responsibility for the
1112 * reference that that had on the usage count. We deal with that here.
1113 * If someone re-sets the flag and re-gets the ref, that's fine.
1115 rxrpc_put_connection(conn
);
1120 /* The connection at the front of the queue hasn't yet expired, so
1121 * schedule the work item for that point if we discarded something.
1123 * We don't worry if the work item is already scheduled - it can look
1124 * after rescheduling itself at a later time. We could cancel it, but
1125 * then things get messier.
1128 if (!rxnet
->kill_all_client_conns
)
1129 timer_reduce(&rxnet
->client_conn_reap_timer
,
1133 spin_unlock(&rxnet
->client_conn_cache_lock
);
1134 spin_unlock(&rxnet
->client_conn_discard_lock
);
1139 * Preemptively destroy all the client connection records rather than waiting
1140 * for them to time out
1142 void rxrpc_destroy_all_client_connections(struct rxrpc_net
*rxnet
)
1146 spin_lock(&rxnet
->client_conn_cache_lock
);
1147 rxnet
->kill_all_client_conns
= true;
1148 spin_unlock(&rxnet
->client_conn_cache_lock
);
1150 del_timer_sync(&rxnet
->client_conn_reap_timer
);
1152 if (!rxrpc_queue_work(&rxnet
->client_conn_reaper
))
1153 _debug("destroy: queue failed");