2 * linux/net/sunrpc/xprt.c
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
7 * The interface works like this:
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the
15 * transport's wait list. At the same time, if a reply is expected,
16 * it installs a timer that is run after the packet's timeout has
18 * - When a packet arrives, the data_ready handler walks the list of
19 * pending requests for that transport. If a matching XID is found, the
20 * caller is woken up, and the timer removed.
21 * - When no reply arrives within the timeout interval, the timer is
22 * fired by the kernel and runs xprt_timer(). It either adjusts the
23 * timeout values (minor timeout) or wakes up the caller with a status
25 * - When the caller receives a notification from RPC that a reply arrived,
26 * it should release the RPC slot, and process the reply.
27 * If the call timed out, it may choose to retry the operation by
28 * adjusting the initial timeout value, and simply calling rpc_call
31 * Support for async RPC is done through a set of RPC-specific scheduling
32 * primitives that `transparently' work for processes as well as async
33 * tasks that rely on callbacks.
35 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
37 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
40 #include <linux/module.h>
42 #include <linux/types.h>
43 #include <linux/interrupt.h>
44 #include <linux/workqueue.h>
45 #include <linux/net.h>
47 #include <linux/sunrpc/clnt.h>
48 #include <linux/sunrpc/metrics.h>
49 #include <linux/sunrpc/bc_xprt.h>
58 # define RPCDBG_FACILITY RPCDBG_XPRT
64 static void xprt_request_init(struct rpc_task
*, struct rpc_xprt
*);
65 static inline void do_xprt_reserve(struct rpc_task
*);
66 static void xprt_connect_status(struct rpc_task
*task
);
67 static int __xprt_get_cong(struct rpc_xprt
*, struct rpc_task
*);
69 static DEFINE_SPINLOCK(xprt_list_lock
);
70 static LIST_HEAD(xprt_list
);
73 * The transport code maintains an estimate on the maximum number of out-
74 * standing RPC requests, using a smoothed version of the congestion
75 * avoidance implemented in 44BSD. This is basically the Van Jacobson
76 * congestion algorithm: If a retransmit occurs, the congestion window is
77 * halved; otherwise, it is incremented by 1/cwnd when
79 * - a reply is received and
80 * - a full number of requests are outstanding and
81 * - the congestion window hasn't been updated recently.
83 #define RPC_CWNDSHIFT (8U)
84 #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT)
85 #define RPC_INITCWND RPC_CWNDSCALE
86 #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
88 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
91 * xprt_register_transport - register a transport implementation
92 * @transport: transport to register
94 * If a transport implementation is loaded as a kernel module, it can
95 * call this interface to make itself known to the RPC client.
98 * 0: transport successfully registered
99 * -EEXIST: transport already registered
100 * -EINVAL: transport module being unloaded
102 int xprt_register_transport(struct xprt_class
*transport
)
104 struct xprt_class
*t
;
108 spin_lock(&xprt_list_lock
);
109 list_for_each_entry(t
, &xprt_list
, list
) {
110 /* don't register the same transport class twice */
111 if (t
->ident
== transport
->ident
)
115 list_add_tail(&transport
->list
, &xprt_list
);
116 printk(KERN_INFO
"RPC: Registered %s transport module.\n",
121 spin_unlock(&xprt_list_lock
);
124 EXPORT_SYMBOL_GPL(xprt_register_transport
);
127 * xprt_unregister_transport - unregister a transport implementation
128 * @transport: transport to unregister
131 * 0: transport successfully unregistered
132 * -ENOENT: transport never registered
134 int xprt_unregister_transport(struct xprt_class
*transport
)
136 struct xprt_class
*t
;
140 spin_lock(&xprt_list_lock
);
141 list_for_each_entry(t
, &xprt_list
, list
) {
142 if (t
== transport
) {
144 "RPC: Unregistered %s transport module.\n",
146 list_del_init(&transport
->list
);
153 spin_unlock(&xprt_list_lock
);
156 EXPORT_SYMBOL_GPL(xprt_unregister_transport
);
159 * xprt_load_transport - load a transport implementation
160 * @transport_name: transport to load
163 * 0: transport successfully loaded
164 * -ENOENT: transport module not available
166 int xprt_load_transport(const char *transport_name
)
168 struct xprt_class
*t
;
169 char module_name
[sizeof t
->name
+ 5];
173 spin_lock(&xprt_list_lock
);
174 list_for_each_entry(t
, &xprt_list
, list
) {
175 if (strcmp(t
->name
, transport_name
) == 0) {
176 spin_unlock(&xprt_list_lock
);
180 spin_unlock(&xprt_list_lock
);
181 strcpy(module_name
, "xprt");
182 strncat(module_name
, transport_name
, sizeof t
->name
);
183 result
= request_module(module_name
);
187 EXPORT_SYMBOL_GPL(xprt_load_transport
);
190 * xprt_reserve_xprt - serialize write access to transports
191 * @task: task that is requesting access to the transport
193 * This prevents mixing the payload of separate requests, and prevents
194 * transport connects from colliding with writes. No congestion control
197 int xprt_reserve_xprt(struct rpc_task
*task
)
199 struct rpc_rqst
*req
= task
->tk_rqstp
;
200 struct rpc_xprt
*xprt
= req
->rq_xprt
;
202 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
)) {
203 if (task
== xprt
->snd_task
)
209 xprt
->snd_task
= task
;
211 req
->rq_bytes_sent
= 0;
217 dprintk("RPC: %5u failed to lock transport %p\n",
219 task
->tk_timeout
= 0;
220 task
->tk_status
= -EAGAIN
;
221 if (req
&& req
->rq_ntrans
)
222 rpc_sleep_on(&xprt
->resend
, task
, NULL
);
224 rpc_sleep_on(&xprt
->sending
, task
, NULL
);
227 EXPORT_SYMBOL_GPL(xprt_reserve_xprt
);
229 static void xprt_clear_locked(struct rpc_xprt
*xprt
)
231 xprt
->snd_task
= NULL
;
232 if (!test_bit(XPRT_CLOSE_WAIT
, &xprt
->state
) || xprt
->shutdown
) {
233 smp_mb__before_clear_bit();
234 clear_bit(XPRT_LOCKED
, &xprt
->state
);
235 smp_mb__after_clear_bit();
237 queue_work(rpciod_workqueue
, &xprt
->task_cleanup
);
241 * xprt_reserve_xprt_cong - serialize write access to transports
242 * @task: task that is requesting access to the transport
244 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
245 * integrated into the decision of whether a request is allowed to be
246 * woken up and given access to the transport.
248 int xprt_reserve_xprt_cong(struct rpc_task
*task
)
250 struct rpc_xprt
*xprt
= task
->tk_xprt
;
251 struct rpc_rqst
*req
= task
->tk_rqstp
;
253 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
)) {
254 if (task
== xprt
->snd_task
)
258 if (__xprt_get_cong(xprt
, task
)) {
259 xprt
->snd_task
= task
;
261 req
->rq_bytes_sent
= 0;
266 xprt_clear_locked(xprt
);
268 dprintk("RPC: %5u failed to lock transport %p\n", task
->tk_pid
, xprt
);
269 task
->tk_timeout
= 0;
270 task
->tk_status
= -EAGAIN
;
271 if (req
&& req
->rq_ntrans
)
272 rpc_sleep_on(&xprt
->resend
, task
, NULL
);
274 rpc_sleep_on(&xprt
->sending
, task
, NULL
);
277 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong
);
279 static inline int xprt_lock_write(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
283 spin_lock_bh(&xprt
->transport_lock
);
284 retval
= xprt
->ops
->reserve_xprt(task
);
285 spin_unlock_bh(&xprt
->transport_lock
);
289 static void __xprt_lock_write_next(struct rpc_xprt
*xprt
)
291 struct rpc_task
*task
;
292 struct rpc_rqst
*req
;
294 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
297 task
= rpc_wake_up_next(&xprt
->resend
);
299 task
= rpc_wake_up_next(&xprt
->sending
);
304 req
= task
->tk_rqstp
;
305 xprt
->snd_task
= task
;
307 req
->rq_bytes_sent
= 0;
313 xprt_clear_locked(xprt
);
316 static void __xprt_lock_write_next_cong(struct rpc_xprt
*xprt
)
318 struct rpc_task
*task
;
320 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
322 if (RPCXPRT_CONGESTED(xprt
))
324 task
= rpc_wake_up_next(&xprt
->resend
);
326 task
= rpc_wake_up_next(&xprt
->sending
);
330 if (__xprt_get_cong(xprt
, task
)) {
331 struct rpc_rqst
*req
= task
->tk_rqstp
;
332 xprt
->snd_task
= task
;
334 req
->rq_bytes_sent
= 0;
340 xprt_clear_locked(xprt
);
344 * xprt_release_xprt - allow other requests to use a transport
345 * @xprt: transport with other tasks potentially waiting
346 * @task: task that is releasing access to the transport
348 * Note that "task" can be NULL. No congestion control is provided.
350 void xprt_release_xprt(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
352 if (xprt
->snd_task
== task
) {
353 xprt_clear_locked(xprt
);
354 __xprt_lock_write_next(xprt
);
357 EXPORT_SYMBOL_GPL(xprt_release_xprt
);
360 * xprt_release_xprt_cong - allow other requests to use a transport
361 * @xprt: transport with other tasks potentially waiting
362 * @task: task that is releasing access to the transport
364 * Note that "task" can be NULL. Another task is awoken to use the
365 * transport if the transport's congestion window allows it.
367 void xprt_release_xprt_cong(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
369 if (xprt
->snd_task
== task
) {
370 xprt_clear_locked(xprt
);
371 __xprt_lock_write_next_cong(xprt
);
374 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong
);
376 static inline void xprt_release_write(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
378 spin_lock_bh(&xprt
->transport_lock
);
379 xprt
->ops
->release_xprt(xprt
, task
);
380 spin_unlock_bh(&xprt
->transport_lock
);
384 * Van Jacobson congestion avoidance. Check if the congestion window
385 * overflowed. Put the task to sleep if this is the case.
388 __xprt_get_cong(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
390 struct rpc_rqst
*req
= task
->tk_rqstp
;
394 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
395 task
->tk_pid
, xprt
->cong
, xprt
->cwnd
);
396 if (RPCXPRT_CONGESTED(xprt
))
399 xprt
->cong
+= RPC_CWNDSCALE
;
404 * Adjust the congestion window, and wake up the next task
405 * that has been sleeping due to congestion
408 __xprt_put_cong(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
413 xprt
->cong
-= RPC_CWNDSCALE
;
414 __xprt_lock_write_next_cong(xprt
);
418 * xprt_release_rqst_cong - housekeeping when request is complete
419 * @task: RPC request that recently completed
421 * Useful for transports that require congestion control.
423 void xprt_release_rqst_cong(struct rpc_task
*task
)
425 __xprt_put_cong(task
->tk_xprt
, task
->tk_rqstp
);
427 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong
);
430 * xprt_adjust_cwnd - adjust transport congestion window
431 * @task: recently completed RPC request used to adjust window
432 * @result: result code of completed RPC request
434 * We use a time-smoothed congestion estimator to avoid heavy oscillation.
436 void xprt_adjust_cwnd(struct rpc_task
*task
, int result
)
438 struct rpc_rqst
*req
= task
->tk_rqstp
;
439 struct rpc_xprt
*xprt
= task
->tk_xprt
;
440 unsigned long cwnd
= xprt
->cwnd
;
442 if (result
>= 0 && cwnd
<= xprt
->cong
) {
443 /* The (cwnd >> 1) term makes sure
444 * the result gets rounded properly. */
445 cwnd
+= (RPC_CWNDSCALE
* RPC_CWNDSCALE
+ (cwnd
>> 1)) / cwnd
;
446 if (cwnd
> RPC_MAXCWND(xprt
))
447 cwnd
= RPC_MAXCWND(xprt
);
448 __xprt_lock_write_next_cong(xprt
);
449 } else if (result
== -ETIMEDOUT
) {
451 if (cwnd
< RPC_CWNDSCALE
)
452 cwnd
= RPC_CWNDSCALE
;
454 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
455 xprt
->cong
, xprt
->cwnd
, cwnd
);
457 __xprt_put_cong(xprt
, req
);
459 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd
);
462 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
463 * @xprt: transport with waiting tasks
464 * @status: result code to plant in each task before waking it
467 void xprt_wake_pending_tasks(struct rpc_xprt
*xprt
, int status
)
470 rpc_wake_up_status(&xprt
->pending
, status
);
472 rpc_wake_up(&xprt
->pending
);
474 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks
);
477 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
478 * @task: task to be put to sleep
479 * @action: function pointer to be executed after wait
481 void xprt_wait_for_buffer_space(struct rpc_task
*task
, rpc_action action
)
483 struct rpc_rqst
*req
= task
->tk_rqstp
;
484 struct rpc_xprt
*xprt
= req
->rq_xprt
;
486 task
->tk_timeout
= req
->rq_timeout
;
487 rpc_sleep_on(&xprt
->pending
, task
, action
);
489 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space
);
492 * xprt_write_space - wake the task waiting for transport output buffer space
493 * @xprt: transport with waiting tasks
495 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
497 void xprt_write_space(struct rpc_xprt
*xprt
)
499 if (unlikely(xprt
->shutdown
))
502 spin_lock_bh(&xprt
->transport_lock
);
503 if (xprt
->snd_task
) {
504 dprintk("RPC: write space: waking waiting task on "
506 rpc_wake_up_queued_task(&xprt
->pending
, xprt
->snd_task
);
508 spin_unlock_bh(&xprt
->transport_lock
);
510 EXPORT_SYMBOL_GPL(xprt_write_space
);
513 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
514 * @task: task whose timeout is to be set
516 * Set a request's retransmit timeout based on the transport's
517 * default timeout parameters. Used by transports that don't adjust
518 * the retransmit timeout based on round-trip time estimation.
520 void xprt_set_retrans_timeout_def(struct rpc_task
*task
)
522 task
->tk_timeout
= task
->tk_rqstp
->rq_timeout
;
524 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def
);
527 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
528 * @task: task whose timeout is to be set
530 * Set a request's retransmit timeout using the RTT estimator.
532 void xprt_set_retrans_timeout_rtt(struct rpc_task
*task
)
534 int timer
= task
->tk_msg
.rpc_proc
->p_timer
;
535 struct rpc_clnt
*clnt
= task
->tk_client
;
536 struct rpc_rtt
*rtt
= clnt
->cl_rtt
;
537 struct rpc_rqst
*req
= task
->tk_rqstp
;
538 unsigned long max_timeout
= clnt
->cl_timeout
->to_maxval
;
540 task
->tk_timeout
= rpc_calc_rto(rtt
, timer
);
541 task
->tk_timeout
<<= rpc_ntimeo(rtt
, timer
) + req
->rq_retries
;
542 if (task
->tk_timeout
> max_timeout
|| task
->tk_timeout
== 0)
543 task
->tk_timeout
= max_timeout
;
545 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt
);
547 static void xprt_reset_majortimeo(struct rpc_rqst
*req
)
549 const struct rpc_timeout
*to
= req
->rq_task
->tk_client
->cl_timeout
;
551 req
->rq_majortimeo
= req
->rq_timeout
;
552 if (to
->to_exponential
)
553 req
->rq_majortimeo
<<= to
->to_retries
;
555 req
->rq_majortimeo
+= to
->to_increment
* to
->to_retries
;
556 if (req
->rq_majortimeo
> to
->to_maxval
|| req
->rq_majortimeo
== 0)
557 req
->rq_majortimeo
= to
->to_maxval
;
558 req
->rq_majortimeo
+= jiffies
;
562 * xprt_adjust_timeout - adjust timeout values for next retransmit
563 * @req: RPC request containing parameters to use for the adjustment
566 int xprt_adjust_timeout(struct rpc_rqst
*req
)
568 struct rpc_xprt
*xprt
= req
->rq_xprt
;
569 const struct rpc_timeout
*to
= req
->rq_task
->tk_client
->cl_timeout
;
572 if (time_before(jiffies
, req
->rq_majortimeo
)) {
573 if (to
->to_exponential
)
574 req
->rq_timeout
<<= 1;
576 req
->rq_timeout
+= to
->to_increment
;
577 if (to
->to_maxval
&& req
->rq_timeout
>= to
->to_maxval
)
578 req
->rq_timeout
= to
->to_maxval
;
581 req
->rq_timeout
= to
->to_initval
;
583 xprt_reset_majortimeo(req
);
584 /* Reset the RTT counters == "slow start" */
585 spin_lock_bh(&xprt
->transport_lock
);
586 rpc_init_rtt(req
->rq_task
->tk_client
->cl_rtt
, to
->to_initval
);
587 spin_unlock_bh(&xprt
->transport_lock
);
591 if (req
->rq_timeout
== 0) {
592 printk(KERN_WARNING
"xprt_adjust_timeout: rq_timeout = 0!\n");
593 req
->rq_timeout
= 5 * HZ
;
598 static void xprt_autoclose(struct work_struct
*work
)
600 struct rpc_xprt
*xprt
=
601 container_of(work
, struct rpc_xprt
, task_cleanup
);
603 xprt
->ops
->close(xprt
);
604 clear_bit(XPRT_CLOSE_WAIT
, &xprt
->state
);
605 xprt_release_write(xprt
, NULL
);
609 * xprt_disconnect_done - mark a transport as disconnected
610 * @xprt: transport to flag for disconnect
613 void xprt_disconnect_done(struct rpc_xprt
*xprt
)
615 dprintk("RPC: disconnected transport %p\n", xprt
);
616 spin_lock_bh(&xprt
->transport_lock
);
617 xprt_clear_connected(xprt
);
618 xprt_wake_pending_tasks(xprt
, -EAGAIN
);
619 spin_unlock_bh(&xprt
->transport_lock
);
621 EXPORT_SYMBOL_GPL(xprt_disconnect_done
);
624 * xprt_force_disconnect - force a transport to disconnect
625 * @xprt: transport to disconnect
628 void xprt_force_disconnect(struct rpc_xprt
*xprt
)
630 /* Don't race with the test_bit() in xprt_clear_locked() */
631 spin_lock_bh(&xprt
->transport_lock
);
632 set_bit(XPRT_CLOSE_WAIT
, &xprt
->state
);
633 /* Try to schedule an autoclose RPC call */
634 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
) == 0)
635 queue_work(rpciod_workqueue
, &xprt
->task_cleanup
);
636 xprt_wake_pending_tasks(xprt
, -EAGAIN
);
637 spin_unlock_bh(&xprt
->transport_lock
);
641 * xprt_conditional_disconnect - force a transport to disconnect
642 * @xprt: transport to disconnect
643 * @cookie: 'connection cookie'
645 * This attempts to break the connection if and only if 'cookie' matches
646 * the current transport 'connection cookie'. It ensures that we don't
647 * try to break the connection more than once when we need to retransmit
648 * a batch of RPC requests.
651 void xprt_conditional_disconnect(struct rpc_xprt
*xprt
, unsigned int cookie
)
653 /* Don't race with the test_bit() in xprt_clear_locked() */
654 spin_lock_bh(&xprt
->transport_lock
);
655 if (cookie
!= xprt
->connect_cookie
)
657 if (test_bit(XPRT_CLOSING
, &xprt
->state
) || !xprt_connected(xprt
))
659 set_bit(XPRT_CLOSE_WAIT
, &xprt
->state
);
660 /* Try to schedule an autoclose RPC call */
661 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
) == 0)
662 queue_work(rpciod_workqueue
, &xprt
->task_cleanup
);
663 xprt_wake_pending_tasks(xprt
, -EAGAIN
);
665 spin_unlock_bh(&xprt
->transport_lock
);
669 xprt_init_autodisconnect(unsigned long data
)
671 struct rpc_xprt
*xprt
= (struct rpc_xprt
*)data
;
673 spin_lock(&xprt
->transport_lock
);
674 if (!list_empty(&xprt
->recv
) || xprt
->shutdown
)
676 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
678 spin_unlock(&xprt
->transport_lock
);
679 set_bit(XPRT_CONNECTION_CLOSE
, &xprt
->state
);
680 queue_work(rpciod_workqueue
, &xprt
->task_cleanup
);
683 spin_unlock(&xprt
->transport_lock
);
687 * xprt_connect - schedule a transport connect operation
688 * @task: RPC task that is requesting the connect
691 void xprt_connect(struct rpc_task
*task
)
693 struct rpc_xprt
*xprt
= task
->tk_xprt
;
695 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task
->tk_pid
,
696 xprt
, (xprt_connected(xprt
) ? "is" : "is not"));
698 if (!xprt_bound(xprt
)) {
699 task
->tk_status
= -EAGAIN
;
702 if (!xprt_lock_write(xprt
, task
))
705 if (test_and_clear_bit(XPRT_CLOSE_WAIT
, &xprt
->state
))
706 xprt
->ops
->close(xprt
);
708 if (xprt_connected(xprt
))
709 xprt_release_write(xprt
, task
);
712 task
->tk_rqstp
->rq_bytes_sent
= 0;
714 task
->tk_timeout
= xprt
->connect_timeout
;
715 rpc_sleep_on(&xprt
->pending
, task
, xprt_connect_status
);
716 xprt
->stat
.connect_start
= jiffies
;
717 xprt
->ops
->connect(task
);
722 static void xprt_connect_status(struct rpc_task
*task
)
724 struct rpc_xprt
*xprt
= task
->tk_xprt
;
726 if (task
->tk_status
== 0) {
727 xprt
->stat
.connect_count
++;
728 xprt
->stat
.connect_time
+= (long)jiffies
- xprt
->stat
.connect_start
;
729 dprintk("RPC: %5u xprt_connect_status: connection established\n",
734 switch (task
->tk_status
) {
736 dprintk("RPC: %5u xprt_connect_status: retrying\n", task
->tk_pid
);
739 dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
740 "out\n", task
->tk_pid
);
743 dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
744 "server %s\n", task
->tk_pid
, -task
->tk_status
,
745 task
->tk_client
->cl_server
);
746 xprt_release_write(xprt
, task
);
747 task
->tk_status
= -EIO
;
752 * xprt_lookup_rqst - find an RPC request corresponding to an XID
753 * @xprt: transport on which the original request was transmitted
754 * @xid: RPC XID of incoming reply
757 struct rpc_rqst
*xprt_lookup_rqst(struct rpc_xprt
*xprt
, __be32 xid
)
759 struct list_head
*pos
;
761 list_for_each(pos
, &xprt
->recv
) {
762 struct rpc_rqst
*entry
= list_entry(pos
, struct rpc_rqst
, rq_list
);
763 if (entry
->rq_xid
== xid
)
767 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
769 xprt
->stat
.bad_xids
++;
772 EXPORT_SYMBOL_GPL(xprt_lookup_rqst
);
775 * xprt_update_rtt - update an RPC client's RTT state after receiving a reply
776 * @task: RPC request that recently completed
779 void xprt_update_rtt(struct rpc_task
*task
)
781 struct rpc_rqst
*req
= task
->tk_rqstp
;
782 struct rpc_rtt
*rtt
= task
->tk_client
->cl_rtt
;
783 unsigned timer
= task
->tk_msg
.rpc_proc
->p_timer
;
786 if (req
->rq_ntrans
== 1)
787 rpc_update_rtt(rtt
, timer
,
788 (long)jiffies
- req
->rq_xtime
);
789 rpc_set_timeo(rtt
, timer
, req
->rq_ntrans
- 1);
792 EXPORT_SYMBOL_GPL(xprt_update_rtt
);
795 * xprt_complete_rqst - called when reply processing is complete
796 * @task: RPC request that recently completed
797 * @copied: actual number of bytes received from the transport
799 * Caller holds transport lock.
801 void xprt_complete_rqst(struct rpc_task
*task
, int copied
)
803 struct rpc_rqst
*req
= task
->tk_rqstp
;
804 struct rpc_xprt
*xprt
= req
->rq_xprt
;
806 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
807 task
->tk_pid
, ntohl(req
->rq_xid
), copied
);
810 task
->tk_rtt
= (long)jiffies
- req
->rq_xtime
;
812 list_del_init(&req
->rq_list
);
813 req
->rq_private_buf
.len
= copied
;
814 /* Ensure all writes are done before we update */
815 /* req->rq_reply_bytes_recvd */
817 req
->rq_reply_bytes_recvd
= copied
;
818 rpc_wake_up_queued_task(&xprt
->pending
, task
);
820 EXPORT_SYMBOL_GPL(xprt_complete_rqst
);
822 static void xprt_timer(struct rpc_task
*task
)
824 struct rpc_rqst
*req
= task
->tk_rqstp
;
825 struct rpc_xprt
*xprt
= req
->rq_xprt
;
827 if (task
->tk_status
!= -ETIMEDOUT
)
829 dprintk("RPC: %5u xprt_timer\n", task
->tk_pid
);
831 spin_lock_bh(&xprt
->transport_lock
);
832 if (!req
->rq_reply_bytes_recvd
) {
833 if (xprt
->ops
->timer
)
834 xprt
->ops
->timer(task
);
837 spin_unlock_bh(&xprt
->transport_lock
);
840 static inline int xprt_has_timer(struct rpc_xprt
*xprt
)
842 return xprt
->idle_timeout
!= 0;
846 * xprt_prepare_transmit - reserve the transport before sending a request
847 * @task: RPC task about to send a request
850 int xprt_prepare_transmit(struct rpc_task
*task
)
852 struct rpc_rqst
*req
= task
->tk_rqstp
;
853 struct rpc_xprt
*xprt
= req
->rq_xprt
;
856 dprintk("RPC: %5u xprt_prepare_transmit\n", task
->tk_pid
);
858 spin_lock_bh(&xprt
->transport_lock
);
859 if (req
->rq_reply_bytes_recvd
&& !req
->rq_bytes_sent
) {
860 err
= req
->rq_reply_bytes_recvd
;
863 if (!xprt
->ops
->reserve_xprt(task
))
866 spin_unlock_bh(&xprt
->transport_lock
);
870 void xprt_end_transmit(struct rpc_task
*task
)
872 xprt_release_write(task
->tk_rqstp
->rq_xprt
, task
);
876 * xprt_transmit - send an RPC request on a transport
877 * @task: controlling RPC task
879 * We have to copy the iovec because sendmsg fiddles with its contents.
881 void xprt_transmit(struct rpc_task
*task
)
883 struct rpc_rqst
*req
= task
->tk_rqstp
;
884 struct rpc_xprt
*xprt
= req
->rq_xprt
;
887 dprintk("RPC: %5u xprt_transmit(%u)\n", task
->tk_pid
, req
->rq_slen
);
889 if (!req
->rq_reply_bytes_recvd
) {
890 if (list_empty(&req
->rq_list
) && rpc_reply_expected(task
)) {
892 * Add to the list only if we're expecting a reply
894 spin_lock_bh(&xprt
->transport_lock
);
895 /* Update the softirq receive buffer */
896 memcpy(&req
->rq_private_buf
, &req
->rq_rcv_buf
,
897 sizeof(req
->rq_private_buf
));
898 /* Add request to the receive list */
899 list_add_tail(&req
->rq_list
, &xprt
->recv
);
900 spin_unlock_bh(&xprt
->transport_lock
);
901 xprt_reset_majortimeo(req
);
902 /* Turn off autodisconnect */
903 del_singleshot_timer_sync(&xprt
->timer
);
905 } else if (!req
->rq_bytes_sent
)
908 req
->rq_connect_cookie
= xprt
->connect_cookie
;
909 req
->rq_xtime
= jiffies
;
910 status
= xprt
->ops
->send_request(task
);
912 task
->tk_status
= status
;
916 dprintk("RPC: %5u xmit complete\n", task
->tk_pid
);
917 spin_lock_bh(&xprt
->transport_lock
);
919 xprt
->ops
->set_retrans_timeout(task
);
922 xprt
->stat
.req_u
+= xprt
->stat
.sends
- xprt
->stat
.recvs
;
923 xprt
->stat
.bklog_u
+= xprt
->backlog
.qlen
;
925 /* Don't race with disconnect */
926 if (!xprt_connected(xprt
))
927 task
->tk_status
= -ENOTCONN
;
928 else if (!req
->rq_reply_bytes_recvd
&& rpc_reply_expected(task
)) {
930 * Sleep on the pending queue since
931 * we're expecting a reply.
933 rpc_sleep_on(&xprt
->pending
, task
, xprt_timer
);
935 spin_unlock_bh(&xprt
->transport_lock
);
938 static inline void do_xprt_reserve(struct rpc_task
*task
)
940 struct rpc_xprt
*xprt
= task
->tk_xprt
;
945 if (!list_empty(&xprt
->free
)) {
946 struct rpc_rqst
*req
= list_entry(xprt
->free
.next
, struct rpc_rqst
, rq_list
);
947 list_del_init(&req
->rq_list
);
948 task
->tk_rqstp
= req
;
949 xprt_request_init(task
, xprt
);
952 dprintk("RPC: waiting for request slot\n");
953 task
->tk_status
= -EAGAIN
;
954 task
->tk_timeout
= 0;
955 rpc_sleep_on(&xprt
->backlog
, task
, NULL
);
959 * xprt_reserve - allocate an RPC request slot
960 * @task: RPC task requesting a slot allocation
962 * If no more slots are available, place the task on the transport's
965 void xprt_reserve(struct rpc_task
*task
)
967 struct rpc_xprt
*xprt
= task
->tk_xprt
;
969 task
->tk_status
= -EIO
;
970 spin_lock(&xprt
->reserve_lock
);
971 do_xprt_reserve(task
);
972 spin_unlock(&xprt
->reserve_lock
);
975 static inline __be32
xprt_alloc_xid(struct rpc_xprt
*xprt
)
980 static inline void xprt_init_xid(struct rpc_xprt
*xprt
)
982 xprt
->xid
= net_random();
985 static void xprt_request_init(struct rpc_task
*task
, struct rpc_xprt
*xprt
)
987 struct rpc_rqst
*req
= task
->tk_rqstp
;
989 req
->rq_timeout
= task
->tk_client
->cl_timeout
->to_initval
;
992 req
->rq_buffer
= NULL
;
993 req
->rq_xid
= xprt_alloc_xid(xprt
);
994 req
->rq_release_snd_buf
= NULL
;
995 xprt_reset_majortimeo(req
);
996 dprintk("RPC: %5u reserved req %p xid %08x\n", task
->tk_pid
,
997 req
, ntohl(req
->rq_xid
));
1001 * xprt_release - release an RPC request slot
1002 * @task: task which is finished with the slot
1005 void xprt_release(struct rpc_task
*task
)
1007 struct rpc_xprt
*xprt
;
1008 struct rpc_rqst
*req
;
1011 if (!(req
= task
->tk_rqstp
))
1014 /* Preallocated backchannel request? */
1015 is_bc_request
= bc_prealloc(req
);
1017 xprt
= req
->rq_xprt
;
1018 rpc_count_iostats(task
);
1019 spin_lock_bh(&xprt
->transport_lock
);
1020 xprt
->ops
->release_xprt(xprt
, task
);
1021 if (xprt
->ops
->release_request
)
1022 xprt
->ops
->release_request(task
);
1023 if (!list_empty(&req
->rq_list
))
1024 list_del(&req
->rq_list
);
1025 xprt
->last_used
= jiffies
;
1026 if (list_empty(&xprt
->recv
) && xprt_has_timer(xprt
))
1027 mod_timer(&xprt
->timer
,
1028 xprt
->last_used
+ xprt
->idle_timeout
);
1029 spin_unlock_bh(&xprt
->transport_lock
);
1030 if (!bc_prealloc(req
))
1031 xprt
->ops
->buf_free(req
->rq_buffer
);
1032 task
->tk_rqstp
= NULL
;
1033 if (req
->rq_release_snd_buf
)
1034 req
->rq_release_snd_buf(req
);
1036 dprintk("RPC: %5u release request %p\n", task
->tk_pid
, req
);
1037 if (likely(!is_bc_request
)) {
1038 memset(req
, 0, sizeof(*req
)); /* mark unused */
1040 spin_lock(&xprt
->reserve_lock
);
1041 list_add(&req
->rq_list
, &xprt
->free
);
1042 rpc_wake_up_next(&xprt
->backlog
);
1043 spin_unlock(&xprt
->reserve_lock
);
1045 xprt_free_bc_request(req
);
1049 * xprt_create_transport - create an RPC transport
1050 * @args: rpc transport creation arguments
1053 struct rpc_xprt
*xprt_create_transport(struct xprt_create
*args
)
1055 struct rpc_xprt
*xprt
;
1056 struct rpc_rqst
*req
;
1057 struct xprt_class
*t
;
1059 spin_lock(&xprt_list_lock
);
1060 list_for_each_entry(t
, &xprt_list
, list
) {
1061 if (t
->ident
== args
->ident
) {
1062 spin_unlock(&xprt_list_lock
);
1066 spin_unlock(&xprt_list_lock
);
1067 printk(KERN_ERR
"RPC: transport (%d) not supported\n", args
->ident
);
1068 return ERR_PTR(-EIO
);
1071 xprt
= t
->setup(args
);
1073 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1078 kref_init(&xprt
->kref
);
1079 spin_lock_init(&xprt
->transport_lock
);
1080 spin_lock_init(&xprt
->reserve_lock
);
1082 INIT_LIST_HEAD(&xprt
->free
);
1083 INIT_LIST_HEAD(&xprt
->recv
);
1084 #if defined(CONFIG_NFS_V4_1)
1085 spin_lock_init(&xprt
->bc_pa_lock
);
1086 INIT_LIST_HEAD(&xprt
->bc_pa_list
);
1087 #endif /* CONFIG_NFS_V4_1 */
1089 INIT_WORK(&xprt
->task_cleanup
, xprt_autoclose
);
1090 if (xprt_has_timer(xprt
))
1091 setup_timer(&xprt
->timer
, xprt_init_autodisconnect
,
1092 (unsigned long)xprt
);
1094 init_timer(&xprt
->timer
);
1095 xprt
->last_used
= jiffies
;
1096 xprt
->cwnd
= RPC_INITCWND
;
1097 xprt
->bind_index
= 0;
1099 rpc_init_wait_queue(&xprt
->binding
, "xprt_binding");
1100 rpc_init_wait_queue(&xprt
->pending
, "xprt_pending");
1101 rpc_init_wait_queue(&xprt
->sending
, "xprt_sending");
1102 rpc_init_wait_queue(&xprt
->resend
, "xprt_resend");
1103 rpc_init_priority_wait_queue(&xprt
->backlog
, "xprt_backlog");
1105 /* initialize free list */
1106 for (req
= &xprt
->slot
[xprt
->max_reqs
-1]; req
>= &xprt
->slot
[0]; req
--)
1107 list_add(&req
->rq_list
, &xprt
->free
);
1109 xprt_init_xid(xprt
);
1111 dprintk("RPC: created transport %p with %u slots\n", xprt
,
1117 * xprt_destroy - destroy an RPC transport, killing off all requests.
1118 * @kref: kref for the transport to destroy
1121 static void xprt_destroy(struct kref
*kref
)
1123 struct rpc_xprt
*xprt
= container_of(kref
, struct rpc_xprt
, kref
);
1125 dprintk("RPC: destroying transport %p\n", xprt
);
1127 del_timer_sync(&xprt
->timer
);
1129 rpc_destroy_wait_queue(&xprt
->binding
);
1130 rpc_destroy_wait_queue(&xprt
->pending
);
1131 rpc_destroy_wait_queue(&xprt
->sending
);
1132 rpc_destroy_wait_queue(&xprt
->resend
);
1133 rpc_destroy_wait_queue(&xprt
->backlog
);
1135 * Tear down transport state and free the rpc_xprt
1137 xprt
->ops
->destroy(xprt
);
1141 * xprt_put - release a reference to an RPC transport.
1142 * @xprt: pointer to the transport
1145 void xprt_put(struct rpc_xprt
*xprt
)
1147 kref_put(&xprt
->kref
, xprt_destroy
);
1151 * xprt_get - return a reference to an RPC transport.
1152 * @xprt: pointer to the transport
1155 struct rpc_xprt
*xprt_get(struct rpc_xprt
*xprt
)
1157 kref_get(&xprt
->kref
);