2 * linux/net/sunrpc/xprt.c
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
7 * The interface works like this:
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the
15 * transport's wait list. At the same time, it installs a timer that
16 * is run after the packet's timeout has expired.
17 * - When a packet arrives, the data_ready handler walks the list of
18 * pending requests for that transport. If a matching XID is found, the
19 * caller is woken up, and the timer removed.
20 * - When no reply arrives within the timeout interval, the timer is
21 * fired by the kernel and runs xprt_timer(). It either adjusts the
22 * timeout values (minor timeout) or wakes up the caller with a status
24 * - When the caller receives a notification from RPC that a reply arrived,
25 * it should release the RPC slot, and process the reply.
26 * If the call timed out, it may choose to retry the operation by
27 * adjusting the initial timeout value, and simply calling rpc_call
30 * Support for async RPC is done through a set of RPC-specific scheduling
31 * primitives that `transparently' work for processes as well as async
32 * tasks that rely on callbacks.
34 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
39 #include <linux/module.h>
41 #include <linux/types.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/net.h>
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/sunrpc/metrics.h>
54 # define RPCDBG_FACILITY RPCDBG_XPRT
60 static void xprt_request_init(struct rpc_task
*, struct rpc_xprt
*);
61 static inline void do_xprt_reserve(struct rpc_task
*);
62 static void xprt_connect_status(struct rpc_task
*task
);
63 static int __xprt_get_cong(struct rpc_xprt
*, struct rpc_task
*);
66 * The transport code maintains an estimate on the maximum number of out-
67 * standing RPC requests, using a smoothed version of the congestion
68 * avoidance implemented in 44BSD. This is basically the Van Jacobson
69 * congestion algorithm: If a retransmit occurs, the congestion window is
70 * halved; otherwise, it is incremented by 1/cwnd when
72 * - a reply is received and
73 * - a full number of requests are outstanding and
74 * - the congestion window hasn't been updated recently.
76 #define RPC_CWNDSHIFT (8U)
77 #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT)
78 #define RPC_INITCWND RPC_CWNDSCALE
79 #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
81 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
84 * xprt_reserve_xprt - serialize write access to transports
85 * @task: task that is requesting access to the transport
87 * This prevents mixing the payload of separate requests, and prevents
88 * transport connects from colliding with writes. No congestion control
91 int xprt_reserve_xprt(struct rpc_task
*task
)
93 struct rpc_xprt
*xprt
= task
->tk_xprt
;
94 struct rpc_rqst
*req
= task
->tk_rqstp
;
96 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
)) {
97 if (task
== xprt
->snd_task
)
103 xprt
->snd_task
= task
;
105 req
->rq_bytes_sent
= 0;
111 dprintk("RPC: %5u failed to lock transport %p\n",
113 task
->tk_timeout
= 0;
114 task
->tk_status
= -EAGAIN
;
115 if (req
&& req
->rq_ntrans
)
116 rpc_sleep_on(&xprt
->resend
, task
, NULL
, NULL
);
118 rpc_sleep_on(&xprt
->sending
, task
, NULL
, NULL
);
122 static void xprt_clear_locked(struct rpc_xprt
*xprt
)
124 xprt
->snd_task
= NULL
;
125 if (!test_bit(XPRT_CLOSE_WAIT
, &xprt
->state
) || xprt
->shutdown
) {
126 smp_mb__before_clear_bit();
127 clear_bit(XPRT_LOCKED
, &xprt
->state
);
128 smp_mb__after_clear_bit();
130 queue_work(rpciod_workqueue
, &xprt
->task_cleanup
);
134 * xprt_reserve_xprt_cong - serialize write access to transports
135 * @task: task that is requesting access to the transport
137 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
138 * integrated into the decision of whether a request is allowed to be
139 * woken up and given access to the transport.
141 int xprt_reserve_xprt_cong(struct rpc_task
*task
)
143 struct rpc_xprt
*xprt
= task
->tk_xprt
;
144 struct rpc_rqst
*req
= task
->tk_rqstp
;
146 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
)) {
147 if (task
== xprt
->snd_task
)
151 if (__xprt_get_cong(xprt
, task
)) {
152 xprt
->snd_task
= task
;
154 req
->rq_bytes_sent
= 0;
159 xprt_clear_locked(xprt
);
161 dprintk("RPC: %5u failed to lock transport %p\n", task
->tk_pid
, xprt
);
162 task
->tk_timeout
= 0;
163 task
->tk_status
= -EAGAIN
;
164 if (req
&& req
->rq_ntrans
)
165 rpc_sleep_on(&xprt
->resend
, task
, NULL
, NULL
);
167 rpc_sleep_on(&xprt
->sending
, task
, NULL
, NULL
);
171 static inline int xprt_lock_write(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
175 spin_lock_bh(&xprt
->transport_lock
);
176 retval
= xprt
->ops
->reserve_xprt(task
);
177 spin_unlock_bh(&xprt
->transport_lock
);
181 static void __xprt_lock_write_next(struct rpc_xprt
*xprt
)
183 struct rpc_task
*task
;
184 struct rpc_rqst
*req
;
186 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
189 task
= rpc_wake_up_next(&xprt
->resend
);
191 task
= rpc_wake_up_next(&xprt
->sending
);
196 req
= task
->tk_rqstp
;
197 xprt
->snd_task
= task
;
199 req
->rq_bytes_sent
= 0;
205 xprt_clear_locked(xprt
);
208 static void __xprt_lock_write_next_cong(struct rpc_xprt
*xprt
)
210 struct rpc_task
*task
;
212 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
214 if (RPCXPRT_CONGESTED(xprt
))
216 task
= rpc_wake_up_next(&xprt
->resend
);
218 task
= rpc_wake_up_next(&xprt
->sending
);
222 if (__xprt_get_cong(xprt
, task
)) {
223 struct rpc_rqst
*req
= task
->tk_rqstp
;
224 xprt
->snd_task
= task
;
226 req
->rq_bytes_sent
= 0;
232 xprt_clear_locked(xprt
);
236 * xprt_release_xprt - allow other requests to use a transport
237 * @xprt: transport with other tasks potentially waiting
238 * @task: task that is releasing access to the transport
240 * Note that "task" can be NULL. No congestion control is provided.
242 void xprt_release_xprt(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
244 if (xprt
->snd_task
== task
) {
245 xprt_clear_locked(xprt
);
246 __xprt_lock_write_next(xprt
);
251 * xprt_release_xprt_cong - allow other requests to use a transport
252 * @xprt: transport with other tasks potentially waiting
253 * @task: task that is releasing access to the transport
255 * Note that "task" can be NULL. Another task is awoken to use the
256 * transport if the transport's congestion window allows it.
258 void xprt_release_xprt_cong(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
260 if (xprt
->snd_task
== task
) {
261 xprt_clear_locked(xprt
);
262 __xprt_lock_write_next_cong(xprt
);
266 static inline void xprt_release_write(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
268 spin_lock_bh(&xprt
->transport_lock
);
269 xprt
->ops
->release_xprt(xprt
, task
);
270 spin_unlock_bh(&xprt
->transport_lock
);
274 * Van Jacobson congestion avoidance. Check if the congestion window
275 * overflowed. Put the task to sleep if this is the case.
278 __xprt_get_cong(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
280 struct rpc_rqst
*req
= task
->tk_rqstp
;
284 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
285 task
->tk_pid
, xprt
->cong
, xprt
->cwnd
);
286 if (RPCXPRT_CONGESTED(xprt
))
289 xprt
->cong
+= RPC_CWNDSCALE
;
294 * Adjust the congestion window, and wake up the next task
295 * that has been sleeping due to congestion
298 __xprt_put_cong(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
303 xprt
->cong
-= RPC_CWNDSCALE
;
304 __xprt_lock_write_next_cong(xprt
);
308 * xprt_release_rqst_cong - housekeeping when request is complete
309 * @task: RPC request that recently completed
311 * Useful for transports that require congestion control.
313 void xprt_release_rqst_cong(struct rpc_task
*task
)
315 __xprt_put_cong(task
->tk_xprt
, task
->tk_rqstp
);
319 * xprt_adjust_cwnd - adjust transport congestion window
320 * @task: recently completed RPC request used to adjust window
321 * @result: result code of completed RPC request
323 * We use a time-smoothed congestion estimator to avoid heavy oscillation.
325 void xprt_adjust_cwnd(struct rpc_task
*task
, int result
)
327 struct rpc_rqst
*req
= task
->tk_rqstp
;
328 struct rpc_xprt
*xprt
= task
->tk_xprt
;
329 unsigned long cwnd
= xprt
->cwnd
;
331 if (result
>= 0 && cwnd
<= xprt
->cong
) {
332 /* The (cwnd >> 1) term makes sure
333 * the result gets rounded properly. */
334 cwnd
+= (RPC_CWNDSCALE
* RPC_CWNDSCALE
+ (cwnd
>> 1)) / cwnd
;
335 if (cwnd
> RPC_MAXCWND(xprt
))
336 cwnd
= RPC_MAXCWND(xprt
);
337 __xprt_lock_write_next_cong(xprt
);
338 } else if (result
== -ETIMEDOUT
) {
340 if (cwnd
< RPC_CWNDSCALE
)
341 cwnd
= RPC_CWNDSCALE
;
343 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
344 xprt
->cong
, xprt
->cwnd
, cwnd
);
346 __xprt_put_cong(xprt
, req
);
350 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
351 * @xprt: transport with waiting tasks
352 * @status: result code to plant in each task before waking it
355 void xprt_wake_pending_tasks(struct rpc_xprt
*xprt
, int status
)
358 rpc_wake_up_status(&xprt
->pending
, status
);
360 rpc_wake_up(&xprt
->pending
);
364 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
365 * @task: task to be put to sleep
368 void xprt_wait_for_buffer_space(struct rpc_task
*task
)
370 struct rpc_rqst
*req
= task
->tk_rqstp
;
371 struct rpc_xprt
*xprt
= req
->rq_xprt
;
373 task
->tk_timeout
= req
->rq_timeout
;
374 rpc_sleep_on(&xprt
->pending
, task
, NULL
, NULL
);
378 * xprt_write_space - wake the task waiting for transport output buffer space
379 * @xprt: transport with waiting tasks
381 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
383 void xprt_write_space(struct rpc_xprt
*xprt
)
385 if (unlikely(xprt
->shutdown
))
388 spin_lock_bh(&xprt
->transport_lock
);
389 if (xprt
->snd_task
) {
390 dprintk("RPC: write space: waking waiting task on "
392 rpc_wake_up_task(xprt
->snd_task
);
394 spin_unlock_bh(&xprt
->transport_lock
);
398 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
399 * @task: task whose timeout is to be set
401 * Set a request's retransmit timeout based on the transport's
402 * default timeout parameters. Used by transports that don't adjust
403 * the retransmit timeout based on round-trip time estimation.
405 void xprt_set_retrans_timeout_def(struct rpc_task
*task
)
407 task
->tk_timeout
= task
->tk_rqstp
->rq_timeout
;
411 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
412 * @task: task whose timeout is to be set
414 * Set a request's retransmit timeout using the RTT estimator.
416 void xprt_set_retrans_timeout_rtt(struct rpc_task
*task
)
418 int timer
= task
->tk_msg
.rpc_proc
->p_timer
;
419 struct rpc_rtt
*rtt
= task
->tk_client
->cl_rtt
;
420 struct rpc_rqst
*req
= task
->tk_rqstp
;
421 unsigned long max_timeout
= req
->rq_xprt
->timeout
.to_maxval
;
423 task
->tk_timeout
= rpc_calc_rto(rtt
, timer
);
424 task
->tk_timeout
<<= rpc_ntimeo(rtt
, timer
) + req
->rq_retries
;
425 if (task
->tk_timeout
> max_timeout
|| task
->tk_timeout
== 0)
426 task
->tk_timeout
= max_timeout
;
429 static void xprt_reset_majortimeo(struct rpc_rqst
*req
)
431 struct rpc_timeout
*to
= &req
->rq_xprt
->timeout
;
433 req
->rq_majortimeo
= req
->rq_timeout
;
434 if (to
->to_exponential
)
435 req
->rq_majortimeo
<<= to
->to_retries
;
437 req
->rq_majortimeo
+= to
->to_increment
* to
->to_retries
;
438 if (req
->rq_majortimeo
> to
->to_maxval
|| req
->rq_majortimeo
== 0)
439 req
->rq_majortimeo
= to
->to_maxval
;
440 req
->rq_majortimeo
+= jiffies
;
444 * xprt_adjust_timeout - adjust timeout values for next retransmit
445 * @req: RPC request containing parameters to use for the adjustment
448 int xprt_adjust_timeout(struct rpc_rqst
*req
)
450 struct rpc_xprt
*xprt
= req
->rq_xprt
;
451 struct rpc_timeout
*to
= &xprt
->timeout
;
454 if (time_before(jiffies
, req
->rq_majortimeo
)) {
455 if (to
->to_exponential
)
456 req
->rq_timeout
<<= 1;
458 req
->rq_timeout
+= to
->to_increment
;
459 if (to
->to_maxval
&& req
->rq_timeout
>= to
->to_maxval
)
460 req
->rq_timeout
= to
->to_maxval
;
463 req
->rq_timeout
= to
->to_initval
;
465 xprt_reset_majortimeo(req
);
466 /* Reset the RTT counters == "slow start" */
467 spin_lock_bh(&xprt
->transport_lock
);
468 rpc_init_rtt(req
->rq_task
->tk_client
->cl_rtt
, to
->to_initval
);
469 spin_unlock_bh(&xprt
->transport_lock
);
473 if (req
->rq_timeout
== 0) {
474 printk(KERN_WARNING
"xprt_adjust_timeout: rq_timeout = 0!\n");
475 req
->rq_timeout
= 5 * HZ
;
480 static void xprt_autoclose(struct work_struct
*work
)
482 struct rpc_xprt
*xprt
=
483 container_of(work
, struct rpc_xprt
, task_cleanup
);
485 xprt_disconnect(xprt
);
486 xprt
->ops
->close(xprt
);
487 xprt_release_write(xprt
, NULL
);
491 * xprt_disconnect - mark a transport as disconnected
492 * @xprt: transport to flag for disconnect
495 void xprt_disconnect(struct rpc_xprt
*xprt
)
497 dprintk("RPC: disconnected transport %p\n", xprt
);
498 spin_lock_bh(&xprt
->transport_lock
);
499 xprt_clear_connected(xprt
);
500 xprt_wake_pending_tasks(xprt
, -ENOTCONN
);
501 spin_unlock_bh(&xprt
->transport_lock
);
505 xprt_init_autodisconnect(unsigned long data
)
507 struct rpc_xprt
*xprt
= (struct rpc_xprt
*)data
;
509 spin_lock(&xprt
->transport_lock
);
510 if (!list_empty(&xprt
->recv
) || xprt
->shutdown
)
512 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
514 spin_unlock(&xprt
->transport_lock
);
515 if (xprt_connecting(xprt
))
516 xprt_release_write(xprt
, NULL
);
518 queue_work(rpciod_workqueue
, &xprt
->task_cleanup
);
521 spin_unlock(&xprt
->transport_lock
);
525 * xprt_connect - schedule a transport connect operation
526 * @task: RPC task that is requesting the connect
529 void xprt_connect(struct rpc_task
*task
)
531 struct rpc_xprt
*xprt
= task
->tk_xprt
;
533 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task
->tk_pid
,
534 xprt
, (xprt_connected(xprt
) ? "is" : "is not"));
536 if (!xprt_bound(xprt
)) {
537 task
->tk_status
= -EIO
;
540 if (!xprt_lock_write(xprt
, task
))
542 if (xprt_connected(xprt
))
543 xprt_release_write(xprt
, task
);
546 task
->tk_rqstp
->rq_bytes_sent
= 0;
548 task
->tk_timeout
= xprt
->connect_timeout
;
549 rpc_sleep_on(&xprt
->pending
, task
, xprt_connect_status
, NULL
);
550 xprt
->stat
.connect_start
= jiffies
;
551 xprt
->ops
->connect(task
);
556 static void xprt_connect_status(struct rpc_task
*task
)
558 struct rpc_xprt
*xprt
= task
->tk_xprt
;
560 if (task
->tk_status
>= 0) {
561 xprt
->stat
.connect_count
++;
562 xprt
->stat
.connect_time
+= (long)jiffies
- xprt
->stat
.connect_start
;
563 dprintk("RPC: %5u xprt_connect_status: connection established\n",
568 switch (task
->tk_status
) {
571 dprintk("RPC: %5u xprt_connect_status: server %s refused "
572 "connection\n", task
->tk_pid
,
573 task
->tk_client
->cl_server
);
576 dprintk("RPC: %5u xprt_connect_status: connection broken\n",
580 dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
581 "out\n", task
->tk_pid
);
584 dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
585 "server %s\n", task
->tk_pid
, -task
->tk_status
,
586 task
->tk_client
->cl_server
);
587 xprt_release_write(xprt
, task
);
588 task
->tk_status
= -EIO
;
593 * xprt_lookup_rqst - find an RPC request corresponding to an XID
594 * @xprt: transport on which the original request was transmitted
595 * @xid: RPC XID of incoming reply
598 struct rpc_rqst
*xprt_lookup_rqst(struct rpc_xprt
*xprt
, __be32 xid
)
600 struct list_head
*pos
;
602 list_for_each(pos
, &xprt
->recv
) {
603 struct rpc_rqst
*entry
= list_entry(pos
, struct rpc_rqst
, rq_list
);
604 if (entry
->rq_xid
== xid
)
608 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
610 xprt
->stat
.bad_xids
++;
615 * xprt_update_rtt - update an RPC client's RTT state after receiving a reply
616 * @task: RPC request that recently completed
619 void xprt_update_rtt(struct rpc_task
*task
)
621 struct rpc_rqst
*req
= task
->tk_rqstp
;
622 struct rpc_rtt
*rtt
= task
->tk_client
->cl_rtt
;
623 unsigned timer
= task
->tk_msg
.rpc_proc
->p_timer
;
626 if (req
->rq_ntrans
== 1)
627 rpc_update_rtt(rtt
, timer
,
628 (long)jiffies
- req
->rq_xtime
);
629 rpc_set_timeo(rtt
, timer
, req
->rq_ntrans
- 1);
634 * xprt_complete_rqst - called when reply processing is complete
635 * @task: RPC request that recently completed
636 * @copied: actual number of bytes received from the transport
638 * Caller holds transport lock.
640 void xprt_complete_rqst(struct rpc_task
*task
, int copied
)
642 struct rpc_rqst
*req
= task
->tk_rqstp
;
644 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
645 task
->tk_pid
, ntohl(req
->rq_xid
), copied
);
647 task
->tk_xprt
->stat
.recvs
++;
648 task
->tk_rtt
= (long)jiffies
- req
->rq_xtime
;
650 list_del_init(&req
->rq_list
);
651 /* Ensure all writes are done before we update req->rq_received */
653 req
->rq_received
= req
->rq_private_buf
.len
= copied
;
654 rpc_wake_up_task(task
);
657 static void xprt_timer(struct rpc_task
*task
)
659 struct rpc_rqst
*req
= task
->tk_rqstp
;
660 struct rpc_xprt
*xprt
= req
->rq_xprt
;
662 dprintk("RPC: %5u xprt_timer\n", task
->tk_pid
);
664 spin_lock(&xprt
->transport_lock
);
665 if (!req
->rq_received
) {
666 if (xprt
->ops
->timer
)
667 xprt
->ops
->timer(task
);
668 task
->tk_status
= -ETIMEDOUT
;
670 task
->tk_timeout
= 0;
671 rpc_wake_up_task(task
);
672 spin_unlock(&xprt
->transport_lock
);
676 * xprt_prepare_transmit - reserve the transport before sending a request
677 * @task: RPC task about to send a request
680 int xprt_prepare_transmit(struct rpc_task
*task
)
682 struct rpc_rqst
*req
= task
->tk_rqstp
;
683 struct rpc_xprt
*xprt
= req
->rq_xprt
;
686 dprintk("RPC: %5u xprt_prepare_transmit\n", task
->tk_pid
);
688 spin_lock_bh(&xprt
->transport_lock
);
689 if (req
->rq_received
&& !req
->rq_bytes_sent
) {
690 err
= req
->rq_received
;
693 if (!xprt
->ops
->reserve_xprt(task
)) {
698 if (!xprt_connected(xprt
)) {
703 spin_unlock_bh(&xprt
->transport_lock
);
707 void xprt_end_transmit(struct rpc_task
*task
)
709 xprt_release_write(task
->tk_xprt
, task
);
713 * xprt_transmit - send an RPC request on a transport
714 * @task: controlling RPC task
716 * We have to copy the iovec because sendmsg fiddles with its contents.
718 void xprt_transmit(struct rpc_task
*task
)
720 struct rpc_rqst
*req
= task
->tk_rqstp
;
721 struct rpc_xprt
*xprt
= req
->rq_xprt
;
724 dprintk("RPC: %5u xprt_transmit(%u)\n", task
->tk_pid
, req
->rq_slen
);
726 if (!req
->rq_received
) {
727 if (list_empty(&req
->rq_list
)) {
728 spin_lock_bh(&xprt
->transport_lock
);
729 /* Update the softirq receive buffer */
730 memcpy(&req
->rq_private_buf
, &req
->rq_rcv_buf
,
731 sizeof(req
->rq_private_buf
));
732 /* Add request to the receive list */
733 list_add_tail(&req
->rq_list
, &xprt
->recv
);
734 spin_unlock_bh(&xprt
->transport_lock
);
735 xprt_reset_majortimeo(req
);
736 /* Turn off autodisconnect */
737 del_singleshot_timer_sync(&xprt
->timer
);
739 } else if (!req
->rq_bytes_sent
)
742 status
= xprt
->ops
->send_request(task
);
744 dprintk("RPC: %5u xmit complete\n", task
->tk_pid
);
745 spin_lock_bh(&xprt
->transport_lock
);
747 xprt
->ops
->set_retrans_timeout(task
);
750 xprt
->stat
.req_u
+= xprt
->stat
.sends
- xprt
->stat
.recvs
;
751 xprt
->stat
.bklog_u
+= xprt
->backlog
.qlen
;
753 /* Don't race with disconnect */
754 if (!xprt_connected(xprt
))
755 task
->tk_status
= -ENOTCONN
;
756 else if (!req
->rq_received
)
757 rpc_sleep_on(&xprt
->pending
, task
, NULL
, xprt_timer
);
758 spin_unlock_bh(&xprt
->transport_lock
);
762 /* Note: at this point, task->tk_sleeping has not yet been set,
763 * hence there is no danger of the waking up task being put on
764 * schedq, and being picked up by a parallel run of rpciod().
766 task
->tk_status
= status
;
767 if (status
== -ECONNREFUSED
)
768 rpc_sleep_on(&xprt
->sending
, task
, NULL
, NULL
);
771 static inline void do_xprt_reserve(struct rpc_task
*task
)
773 struct rpc_xprt
*xprt
= task
->tk_xprt
;
778 if (!list_empty(&xprt
->free
)) {
779 struct rpc_rqst
*req
= list_entry(xprt
->free
.next
, struct rpc_rqst
, rq_list
);
780 list_del_init(&req
->rq_list
);
781 task
->tk_rqstp
= req
;
782 xprt_request_init(task
, xprt
);
785 dprintk("RPC: waiting for request slot\n");
786 task
->tk_status
= -EAGAIN
;
787 task
->tk_timeout
= 0;
788 rpc_sleep_on(&xprt
->backlog
, task
, NULL
, NULL
);
792 * xprt_reserve - allocate an RPC request slot
793 * @task: RPC task requesting a slot allocation
795 * If no more slots are available, place the task on the transport's
798 void xprt_reserve(struct rpc_task
*task
)
800 struct rpc_xprt
*xprt
= task
->tk_xprt
;
802 task
->tk_status
= -EIO
;
803 spin_lock(&xprt
->reserve_lock
);
804 do_xprt_reserve(task
);
805 spin_unlock(&xprt
->reserve_lock
);
808 static inline __be32
xprt_alloc_xid(struct rpc_xprt
*xprt
)
813 static inline void xprt_init_xid(struct rpc_xprt
*xprt
)
815 xprt
->xid
= net_random();
818 static void xprt_request_init(struct rpc_task
*task
, struct rpc_xprt
*xprt
)
820 struct rpc_rqst
*req
= task
->tk_rqstp
;
822 req
->rq_timeout
= xprt
->timeout
.to_initval
;
825 req
->rq_buffer
= NULL
;
826 req
->rq_xid
= xprt_alloc_xid(xprt
);
827 req
->rq_release_snd_buf
= NULL
;
828 xprt_reset_majortimeo(req
);
829 dprintk("RPC: %5u reserved req %p xid %08x\n", task
->tk_pid
,
830 req
, ntohl(req
->rq_xid
));
834 * xprt_release - release an RPC request slot
835 * @task: task which is finished with the slot
838 void xprt_release(struct rpc_task
*task
)
840 struct rpc_xprt
*xprt
= task
->tk_xprt
;
841 struct rpc_rqst
*req
;
843 if (!(req
= task
->tk_rqstp
))
845 rpc_count_iostats(task
);
846 spin_lock_bh(&xprt
->transport_lock
);
847 xprt
->ops
->release_xprt(xprt
, task
);
848 if (xprt
->ops
->release_request
)
849 xprt
->ops
->release_request(task
);
850 if (!list_empty(&req
->rq_list
))
851 list_del(&req
->rq_list
);
852 xprt
->last_used
= jiffies
;
853 if (list_empty(&xprt
->recv
))
854 mod_timer(&xprt
->timer
,
855 xprt
->last_used
+ xprt
->idle_timeout
);
856 spin_unlock_bh(&xprt
->transport_lock
);
857 xprt
->ops
->buf_free(req
->rq_buffer
);
858 task
->tk_rqstp
= NULL
;
859 if (req
->rq_release_snd_buf
)
860 req
->rq_release_snd_buf(req
);
861 memset(req
, 0, sizeof(*req
)); /* mark unused */
863 dprintk("RPC: %5u release request %p\n", task
->tk_pid
, req
);
865 spin_lock(&xprt
->reserve_lock
);
866 list_add(&req
->rq_list
, &xprt
->free
);
867 rpc_wake_up_next(&xprt
->backlog
);
868 spin_unlock(&xprt
->reserve_lock
);
872 * xprt_set_timeout - set constant RPC timeout
873 * @to: RPC timeout parameters to set up
874 * @retr: number of retries
875 * @incr: amount of increase after each retry
878 void xprt_set_timeout(struct rpc_timeout
*to
, unsigned int retr
, unsigned long incr
)
881 to
->to_increment
= incr
;
882 to
->to_maxval
= to
->to_initval
+ (incr
* retr
);
883 to
->to_retries
= retr
;
884 to
->to_exponential
= 0;
888 * xprt_create_transport - create an RPC transport
889 * @args: rpc transport creation arguments
892 struct rpc_xprt
*xprt_create_transport(struct rpc_xprtsock_create
*args
)
894 struct rpc_xprt
*xprt
;
895 struct rpc_rqst
*req
;
897 switch (args
->proto
) {
899 xprt
= xs_setup_udp(args
);
902 xprt
= xs_setup_tcp(args
);
905 printk(KERN_ERR
"RPC: unrecognized transport protocol: %d\n",
907 return ERR_PTR(-EIO
);
910 dprintk("RPC: xprt_create_transport: failed, %ld\n",
915 kref_init(&xprt
->kref
);
916 spin_lock_init(&xprt
->transport_lock
);
917 spin_lock_init(&xprt
->reserve_lock
);
919 INIT_LIST_HEAD(&xprt
->free
);
920 INIT_LIST_HEAD(&xprt
->recv
);
921 INIT_WORK(&xprt
->task_cleanup
, xprt_autoclose
);
922 init_timer(&xprt
->timer
);
923 xprt
->timer
.function
= xprt_init_autodisconnect
;
924 xprt
->timer
.data
= (unsigned long) xprt
;
925 xprt
->last_used
= jiffies
;
926 xprt
->cwnd
= RPC_INITCWND
;
927 xprt
->bind_index
= 0;
929 rpc_init_wait_queue(&xprt
->binding
, "xprt_binding");
930 rpc_init_wait_queue(&xprt
->pending
, "xprt_pending");
931 rpc_init_wait_queue(&xprt
->sending
, "xprt_sending");
932 rpc_init_wait_queue(&xprt
->resend
, "xprt_resend");
933 rpc_init_priority_wait_queue(&xprt
->backlog
, "xprt_backlog");
935 /* initialize free list */
936 for (req
= &xprt
->slot
[xprt
->max_reqs
-1]; req
>= &xprt
->slot
[0]; req
--)
937 list_add(&req
->rq_list
, &xprt
->free
);
941 dprintk("RPC: created transport %p with %u slots\n", xprt
,
948 * xprt_destroy - destroy an RPC transport, killing off all requests.
949 * @kref: kref for the transport to destroy
952 static void xprt_destroy(struct kref
*kref
)
954 struct rpc_xprt
*xprt
= container_of(kref
, struct rpc_xprt
, kref
);
956 dprintk("RPC: destroying transport %p\n", xprt
);
958 del_timer_sync(&xprt
->timer
);
961 * Tear down transport state and free the rpc_xprt
963 xprt
->ops
->destroy(xprt
);
967 * xprt_put - release a reference to an RPC transport.
968 * @xprt: pointer to the transport
971 void xprt_put(struct rpc_xprt
*xprt
)
973 kref_put(&xprt
->kref
, xprt_destroy
);
977 * xprt_get - return a reference to an RPC transport.
978 * @xprt: pointer to the transport
981 struct rpc_xprt
*xprt_get(struct rpc_xprt
*xprt
)
983 kref_get(&xprt
->kref
);