1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* connection-level event handling
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/module.h>
11 #include <linux/net.h>
12 #include <linux/skbuff.h>
13 #include <linux/errqueue.h>
15 #include <net/af_rxrpc.h>
17 #include "ar-internal.h"
20 * Retransmit terminal ACK or ABORT of the previous call.
22 static void rxrpc_conn_retransmit_call(struct rxrpc_connection
*conn
,
26 struct rxrpc_skb_priv
*sp
= skb
? rxrpc_skb(skb
) : NULL
;
27 struct rxrpc_channel
*chan
;
31 struct rxrpc_wire_header whdr
;
34 struct rxrpc_ackpacket ack
;
36 } __attribute__((packed
)) pkt
;
37 struct rxrpc_ackinfo ack_info
;
40 u32 serial
, mtu
, call_id
, padding
;
42 _enter("%d", conn
->debug_id
);
44 chan
= &conn
->channels
[channel
];
46 /* If the last call got moved on whilst we were waiting to run, just
49 call_id
= READ_ONCE(chan
->last_call
);
50 /* Sync with __rxrpc_disconnect_call() */
52 if (skb
&& call_id
!= sp
->hdr
.callNumber
)
55 msg
.msg_name
= &conn
->params
.peer
->srx
.transport
;
56 msg
.msg_namelen
= conn
->params
.peer
->srx
.transport_len
;
57 msg
.msg_control
= NULL
;
58 msg
.msg_controllen
= 0;
61 iov
[0].iov_base
= &pkt
;
62 iov
[0].iov_len
= sizeof(pkt
.whdr
);
63 iov
[1].iov_base
= &padding
;
65 iov
[2].iov_base
= &ack_info
;
66 iov
[2].iov_len
= sizeof(ack_info
);
68 pkt
.whdr
.epoch
= htonl(conn
->proto
.epoch
);
69 pkt
.whdr
.cid
= htonl(conn
->proto
.cid
| channel
);
70 pkt
.whdr
.callNumber
= htonl(call_id
);
72 pkt
.whdr
.type
= chan
->last_type
;
73 pkt
.whdr
.flags
= conn
->out_clientflag
;
74 pkt
.whdr
.userStatus
= 0;
75 pkt
.whdr
.securityIndex
= conn
->security_ix
;
77 pkt
.whdr
.serviceId
= htons(conn
->service_id
);
79 len
= sizeof(pkt
.whdr
);
80 switch (chan
->last_type
) {
81 case RXRPC_PACKET_TYPE_ABORT
:
82 pkt
.abort_code
= htonl(chan
->last_abort
);
83 iov
[0].iov_len
+= sizeof(pkt
.abort_code
);
84 len
+= sizeof(pkt
.abort_code
);
88 case RXRPC_PACKET_TYPE_ACK
:
89 mtu
= conn
->params
.peer
->if_mtu
;
90 mtu
-= conn
->params
.peer
->hdrsize
;
91 pkt
.ack
.bufferSpace
= 0;
92 pkt
.ack
.maxSkew
= htons(skb
? skb
->priority
: 0);
93 pkt
.ack
.firstPacket
= htonl(chan
->last_seq
+ 1);
94 pkt
.ack
.previousPacket
= htonl(chan
->last_seq
);
95 pkt
.ack
.serial
= htonl(skb
? sp
->hdr
.serial
: 0);
96 pkt
.ack
.reason
= skb
? RXRPC_ACK_DUPLICATE
: RXRPC_ACK_IDLE
;
98 ack_info
.rxMTU
= htonl(rxrpc_rx_mtu
);
99 ack_info
.maxMTU
= htonl(mtu
);
100 ack_info
.rwind
= htonl(rxrpc_rx_window_size
);
101 ack_info
.jumbo_max
= htonl(rxrpc_rx_jumbo_max
);
102 pkt
.whdr
.flags
|= RXRPC_SLOW_START_OK
;
104 iov
[0].iov_len
+= sizeof(pkt
.ack
);
105 len
+= sizeof(pkt
.ack
) + 3 + sizeof(ack_info
);
113 /* Resync with __rxrpc_disconnect_call() and check that the last call
114 * didn't get advanced whilst we were filling out the packets.
117 if (READ_ONCE(chan
->last_call
) != call_id
)
120 serial
= atomic_inc_return(&conn
->serial
);
121 pkt
.whdr
.serial
= htonl(serial
);
123 switch (chan
->last_type
) {
124 case RXRPC_PACKET_TYPE_ABORT
:
125 _proto("Tx ABORT %%%u { %d } [re]", serial
, conn
->abort_code
);
127 case RXRPC_PACKET_TYPE_ACK
:
128 trace_rxrpc_tx_ack(chan
->call_debug_id
, serial
,
129 ntohl(pkt
.ack
.firstPacket
),
130 ntohl(pkt
.ack
.serial
),
132 _proto("Tx ACK %%%u [re]", serial
);
136 ret
= kernel_sendmsg(conn
->params
.local
->socket
, &msg
, iov
, ioc
, len
);
137 conn
->params
.peer
->last_tx_at
= ktime_get_seconds();
139 trace_rxrpc_tx_fail(chan
->call_debug_id
, serial
, ret
,
140 rxrpc_tx_point_call_final_resend
);
142 trace_rxrpc_tx_packet(chan
->call_debug_id
, &pkt
.whdr
,
143 rxrpc_tx_point_call_final_resend
);
149 * pass a connection-level abort onto all calls on that connection
151 static void rxrpc_abort_calls(struct rxrpc_connection
*conn
,
152 enum rxrpc_call_completion
compl,
153 rxrpc_serial_t serial
)
155 struct rxrpc_call
*call
;
158 _enter("{%d},%x", conn
->debug_id
, conn
->abort_code
);
160 spin_lock(&conn
->channel_lock
);
162 for (i
= 0; i
< RXRPC_MAXCALLS
; i
++) {
163 call
= rcu_dereference_protected(
164 conn
->channels
[i
].call
,
165 lockdep_is_held(&conn
->channel_lock
));
167 if (compl == RXRPC_CALL_LOCALLY_ABORTED
)
168 trace_rxrpc_abort(call
->debug_id
,
174 trace_rxrpc_rx_abort(call
, serial
,
176 if (rxrpc_set_call_completion(call
, compl,
179 rxrpc_notify_socket(call
);
183 spin_unlock(&conn
->channel_lock
);
188 * generate a connection-level abort
190 static int rxrpc_abort_connection(struct rxrpc_connection
*conn
,
191 int error
, u32 abort_code
)
193 struct rxrpc_wire_header whdr
;
201 _enter("%d,,%u,%u", conn
->debug_id
, error
, abort_code
);
203 /* generate a connection-level abort */
204 spin_lock_bh(&conn
->state_lock
);
205 if (conn
->state
>= RXRPC_CONN_REMOTELY_ABORTED
) {
206 spin_unlock_bh(&conn
->state_lock
);
207 _leave(" = 0 [already dead]");
212 conn
->abort_code
= abort_code
;
213 conn
->state
= RXRPC_CONN_LOCALLY_ABORTED
;
214 spin_unlock_bh(&conn
->state_lock
);
216 msg
.msg_name
= &conn
->params
.peer
->srx
.transport
;
217 msg
.msg_namelen
= conn
->params
.peer
->srx
.transport_len
;
218 msg
.msg_control
= NULL
;
219 msg
.msg_controllen
= 0;
222 whdr
.epoch
= htonl(conn
->proto
.epoch
);
223 whdr
.cid
= htonl(conn
->proto
.cid
);
226 whdr
.type
= RXRPC_PACKET_TYPE_ABORT
;
227 whdr
.flags
= conn
->out_clientflag
;
229 whdr
.securityIndex
= conn
->security_ix
;
231 whdr
.serviceId
= htons(conn
->service_id
);
233 word
= htonl(conn
->abort_code
);
235 iov
[0].iov_base
= &whdr
;
236 iov
[0].iov_len
= sizeof(whdr
);
237 iov
[1].iov_base
= &word
;
238 iov
[1].iov_len
= sizeof(word
);
240 len
= iov
[0].iov_len
+ iov
[1].iov_len
;
242 serial
= atomic_inc_return(&conn
->serial
);
243 rxrpc_abort_calls(conn
, RXRPC_CALL_LOCALLY_ABORTED
, serial
);
244 whdr
.serial
= htonl(serial
);
245 _proto("Tx CONN ABORT %%%u { %d }", serial
, conn
->abort_code
);
247 ret
= kernel_sendmsg(conn
->params
.local
->socket
, &msg
, iov
, 2, len
);
249 trace_rxrpc_tx_fail(conn
->debug_id
, serial
, ret
,
250 rxrpc_tx_point_conn_abort
);
251 _debug("sendmsg failed: %d", ret
);
255 trace_rxrpc_tx_packet(conn
->debug_id
, &whdr
, rxrpc_tx_point_conn_abort
);
257 conn
->params
.peer
->last_tx_at
= ktime_get_seconds();
264 * mark a call as being on a now-secured channel
265 * - must be called with BH's disabled.
267 static void rxrpc_call_is_secure(struct rxrpc_call
*call
)
271 write_lock_bh(&call
->state_lock
);
272 if (call
->state
== RXRPC_CALL_SERVER_SECURING
) {
273 call
->state
= RXRPC_CALL_SERVER_ACCEPTING
;
274 rxrpc_notify_socket(call
);
276 write_unlock_bh(&call
->state_lock
);
281 * connection-level Rx packet processor
283 static int rxrpc_process_event(struct rxrpc_connection
*conn
,
287 struct rxrpc_skb_priv
*sp
= rxrpc_skb(skb
);
292 if (conn
->state
>= RXRPC_CONN_REMOTELY_ABORTED
) {
293 _leave(" = -ECONNABORTED [%u]", conn
->state
);
294 return -ECONNABORTED
;
297 _enter("{%d},{%u,%%%u},", conn
->debug_id
, sp
->hdr
.type
, sp
->hdr
.serial
);
299 switch (sp
->hdr
.type
) {
300 case RXRPC_PACKET_TYPE_DATA
:
301 case RXRPC_PACKET_TYPE_ACK
:
302 rxrpc_conn_retransmit_call(conn
, skb
,
303 sp
->hdr
.cid
& RXRPC_CHANNELMASK
);
306 case RXRPC_PACKET_TYPE_BUSY
:
307 /* Just ignore BUSY packets for now. */
310 case RXRPC_PACKET_TYPE_ABORT
:
311 if (skb_copy_bits(skb
, sizeof(struct rxrpc_wire_header
),
312 &wtmp
, sizeof(wtmp
)) < 0) {
313 trace_rxrpc_rx_eproto(NULL
, sp
->hdr
.serial
,
314 tracepoint_string("bad_abort"));
317 abort_code
= ntohl(wtmp
);
318 _proto("Rx ABORT %%%u { ac=%d }", sp
->hdr
.serial
, abort_code
);
320 conn
->error
= -ECONNABORTED
;
321 conn
->abort_code
= abort_code
;
322 conn
->state
= RXRPC_CONN_REMOTELY_ABORTED
;
323 rxrpc_abort_calls(conn
, RXRPC_CALL_REMOTELY_ABORTED
, sp
->hdr
.serial
);
324 return -ECONNABORTED
;
326 case RXRPC_PACKET_TYPE_CHALLENGE
:
327 return conn
->security
->respond_to_challenge(conn
, skb
,
330 case RXRPC_PACKET_TYPE_RESPONSE
:
331 ret
= conn
->security
->verify_response(conn
, skb
, _abort_code
);
335 ret
= conn
->security
->init_connection_security(conn
);
339 ret
= conn
->security
->prime_packet_security(conn
);
343 spin_lock(&conn
->channel_lock
);
344 spin_lock(&conn
->state_lock
);
346 if (conn
->state
== RXRPC_CONN_SERVICE_CHALLENGING
) {
347 conn
->state
= RXRPC_CONN_SERVICE
;
348 spin_unlock(&conn
->state_lock
);
349 for (loop
= 0; loop
< RXRPC_MAXCALLS
; loop
++)
350 rxrpc_call_is_secure(
351 rcu_dereference_protected(
352 conn
->channels
[loop
].call
,
353 lockdep_is_held(&conn
->channel_lock
)));
355 spin_unlock(&conn
->state_lock
);
358 spin_unlock(&conn
->channel_lock
);
362 trace_rxrpc_rx_eproto(NULL
, sp
->hdr
.serial
,
363 tracepoint_string("bad_conn_pkt"));
369 * set up security and issue a challenge
371 static void rxrpc_secure_connection(struct rxrpc_connection
*conn
)
376 _enter("{%d}", conn
->debug_id
);
378 ASSERT(conn
->security_ix
!= 0);
380 if (!conn
->params
.key
) {
381 _debug("set up security");
382 ret
= rxrpc_init_server_conn_security(conn
);
387 abort_code
= RX_CALL_DEAD
;
390 abort_code
= RXKADNOAUTH
;
395 if (conn
->security
->issue_challenge(conn
) < 0) {
396 abort_code
= RX_CALL_DEAD
;
405 _debug("abort %d, %d", ret
, abort_code
);
406 rxrpc_abort_connection(conn
, ret
, abort_code
);
407 _leave(" [aborted]");
411 * Process delayed final ACKs that we haven't subsumed into a subsequent call.
413 static void rxrpc_process_delayed_final_acks(struct rxrpc_connection
*conn
)
415 unsigned long j
= jiffies
, next_j
;
416 unsigned int channel
;
420 next_j
= j
+ LONG_MAX
;
422 for (channel
= 0; channel
< RXRPC_MAXCALLS
; channel
++) {
423 struct rxrpc_channel
*chan
= &conn
->channels
[channel
];
424 unsigned long ack_at
;
426 if (!test_bit(RXRPC_CONN_FINAL_ACK_0
+ channel
, &conn
->flags
))
429 smp_rmb(); /* vs rxrpc_disconnect_client_call */
430 ack_at
= READ_ONCE(chan
->final_ack_at
);
432 if (time_before(j
, ack_at
)) {
433 if (time_before(ack_at
, next_j
)) {
440 if (test_and_clear_bit(RXRPC_CONN_FINAL_ACK_0
+ channel
,
442 rxrpc_conn_retransmit_call(conn
, NULL
, channel
);
446 if (time_before_eq(next_j
, j
))
449 rxrpc_reduce_conn_timer(conn
, next_j
);
453 * connection-level event processor
455 void rxrpc_process_connection(struct work_struct
*work
)
457 struct rxrpc_connection
*conn
=
458 container_of(work
, struct rxrpc_connection
, processor
);
460 u32 abort_code
= RX_PROTOCOL_ERROR
;
463 rxrpc_see_connection(conn
);
465 if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE
, &conn
->events
))
466 rxrpc_secure_connection(conn
);
468 /* Process delayed ACKs whose time has come. */
469 if (conn
->flags
& RXRPC_CONN_FINAL_ACK_MASK
)
470 rxrpc_process_delayed_final_acks(conn
);
472 /* go through the conn-level event packets, releasing the ref on this
473 * connection that each one has when we've finished with it */
474 while ((skb
= skb_dequeue(&conn
->rx_queue
))) {
475 rxrpc_see_skb(skb
, rxrpc_skb_seen
);
476 ret
= rxrpc_process_event(conn
, skb
, &abort_code
);
484 goto requeue_and_leave
;
487 rxrpc_free_skb(skb
, rxrpc_skb_freed
);
493 rxrpc_put_connection(conn
);
498 skb_queue_head(&conn
->rx_queue
, skb
);
502 if (rxrpc_abort_connection(conn
, ret
, abort_code
) < 0)
503 goto requeue_and_leave
;
504 rxrpc_free_skb(skb
, rxrpc_skb_freed
);