2 * linux/net/sunrpc/xprt.c
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
7 * The interface works like this:
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the
15 * transport's wait list. At the same time, it installs a timer that
16 * is run after the packet's timeout has expired.
17 * - When a packet arrives, the data_ready handler walks the list of
18 * pending requests for that transport. If a matching XID is found, the
19 * caller is woken up, and the timer removed.
20 * - When no reply arrives within the timeout interval, the timer is
21 * fired by the kernel and runs xprt_timer(). It either adjusts the
22 * timeout values (minor timeout) or wakes up the caller with a status
24 * - When the caller receives a notification from RPC that a reply arrived,
25 * it should release the RPC slot, and process the reply.
26 * If the call timed out, it may choose to retry the operation by
27 * adjusting the initial timeout value, and simply calling rpc_call
30 * Support for async RPC is done through a set of RPC-specific scheduling
31 * primitives that `transparently' work for processes as well as async
32 * tasks that rely on callbacks.
34 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
39 #include <linux/module.h>
41 #include <linux/types.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/net.h>
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/sunrpc/metrics.h>
54 # define RPCDBG_FACILITY RPCDBG_XPRT
60 static void xprt_request_init(struct rpc_task
*, struct rpc_xprt
*);
61 static inline void do_xprt_reserve(struct rpc_task
*);
62 static void xprt_connect_status(struct rpc_task
*task
);
63 static int __xprt_get_cong(struct rpc_xprt
*, struct rpc_task
*);
65 static DEFINE_SPINLOCK(xprt_list_lock
);
66 static LIST_HEAD(xprt_list
);
69 * The transport code maintains an estimate on the maximum number of out-
70 * standing RPC requests, using a smoothed version of the congestion
71 * avoidance implemented in 44BSD. This is basically the Van Jacobson
72 * congestion algorithm: If a retransmit occurs, the congestion window is
73 * halved; otherwise, it is incremented by 1/cwnd when
75 * - a reply is received and
76 * - a full number of requests are outstanding and
77 * - the congestion window hasn't been updated recently.
79 #define RPC_CWNDSHIFT (8U)
80 #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT)
81 #define RPC_INITCWND RPC_CWNDSCALE
82 #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
84 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
87 * xprt_register_transport - register a transport implementation
88 * @transport: transport to register
90 * If a transport implementation is loaded as a kernel module, it can
91 * call this interface to make itself known to the RPC client.
94 * 0: transport successfully registered
95 * -EEXIST: transport already registered
96 * -EINVAL: transport module being unloaded
98 int xprt_register_transport(struct xprt_class
*transport
)
100 struct xprt_class
*t
;
104 spin_lock(&xprt_list_lock
);
105 list_for_each_entry(t
, &xprt_list
, list
) {
106 /* don't register the same transport class twice */
107 if (t
->ident
== transport
->ident
)
111 list_add_tail(&transport
->list
, &xprt_list
);
112 printk(KERN_INFO
"RPC: Registered %s transport module.\n",
117 spin_unlock(&xprt_list_lock
);
120 EXPORT_SYMBOL_GPL(xprt_register_transport
);
123 * xprt_unregister_transport - unregister a transport implementation
124 * @transport: transport to unregister
127 * 0: transport successfully unregistered
128 * -ENOENT: transport never registered
130 int xprt_unregister_transport(struct xprt_class
*transport
)
132 struct xprt_class
*t
;
136 spin_lock(&xprt_list_lock
);
137 list_for_each_entry(t
, &xprt_list
, list
) {
138 if (t
== transport
) {
140 "RPC: Unregistered %s transport module.\n",
142 list_del_init(&transport
->list
);
149 spin_unlock(&xprt_list_lock
);
152 EXPORT_SYMBOL_GPL(xprt_unregister_transport
);
155 * xprt_load_transport - load a transport implementation
156 * @transport_name: transport to load
159 * 0: transport successfully loaded
160 * -ENOENT: transport module not available
162 int xprt_load_transport(const char *transport_name
)
164 struct xprt_class
*t
;
165 char module_name
[sizeof t
->name
+ 5];
169 spin_lock(&xprt_list_lock
);
170 list_for_each_entry(t
, &xprt_list
, list
) {
171 if (strcmp(t
->name
, transport_name
) == 0) {
172 spin_unlock(&xprt_list_lock
);
176 spin_unlock(&xprt_list_lock
);
177 strcpy(module_name
, "xprt");
178 strncat(module_name
, transport_name
, sizeof t
->name
);
179 result
= request_module(module_name
);
183 EXPORT_SYMBOL_GPL(xprt_load_transport
);
186 * xprt_reserve_xprt - serialize write access to transports
187 * @task: task that is requesting access to the transport
189 * This prevents mixing the payload of separate requests, and prevents
190 * transport connects from colliding with writes. No congestion control
193 int xprt_reserve_xprt(struct rpc_task
*task
)
195 struct rpc_xprt
*xprt
= task
->tk_xprt
;
196 struct rpc_rqst
*req
= task
->tk_rqstp
;
198 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
)) {
199 if (task
== xprt
->snd_task
)
205 xprt
->snd_task
= task
;
207 req
->rq_bytes_sent
= 0;
213 dprintk("RPC: %5u failed to lock transport %p\n",
215 task
->tk_timeout
= 0;
216 task
->tk_status
= -EAGAIN
;
217 if (req
&& req
->rq_ntrans
)
218 rpc_sleep_on(&xprt
->resend
, task
, NULL
);
220 rpc_sleep_on(&xprt
->sending
, task
, NULL
);
223 EXPORT_SYMBOL_GPL(xprt_reserve_xprt
);
225 static void xprt_clear_locked(struct rpc_xprt
*xprt
)
227 xprt
->snd_task
= NULL
;
228 if (!test_bit(XPRT_CLOSE_WAIT
, &xprt
->state
) || xprt
->shutdown
) {
229 smp_mb__before_clear_bit();
230 clear_bit(XPRT_LOCKED
, &xprt
->state
);
231 smp_mb__after_clear_bit();
233 queue_work(rpciod_workqueue
, &xprt
->task_cleanup
);
237 * xprt_reserve_xprt_cong - serialize write access to transports
238 * @task: task that is requesting access to the transport
240 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
241 * integrated into the decision of whether a request is allowed to be
242 * woken up and given access to the transport.
244 int xprt_reserve_xprt_cong(struct rpc_task
*task
)
246 struct rpc_xprt
*xprt
= task
->tk_xprt
;
247 struct rpc_rqst
*req
= task
->tk_rqstp
;
249 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
)) {
250 if (task
== xprt
->snd_task
)
254 if (__xprt_get_cong(xprt
, task
)) {
255 xprt
->snd_task
= task
;
257 req
->rq_bytes_sent
= 0;
262 xprt_clear_locked(xprt
);
264 dprintk("RPC: %5u failed to lock transport %p\n", task
->tk_pid
, xprt
);
265 task
->tk_timeout
= 0;
266 task
->tk_status
= -EAGAIN
;
267 if (req
&& req
->rq_ntrans
)
268 rpc_sleep_on(&xprt
->resend
, task
, NULL
);
270 rpc_sleep_on(&xprt
->sending
, task
, NULL
);
273 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong
);
275 static inline int xprt_lock_write(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
279 spin_lock_bh(&xprt
->transport_lock
);
280 retval
= xprt
->ops
->reserve_xprt(task
);
281 spin_unlock_bh(&xprt
->transport_lock
);
285 static void __xprt_lock_write_next(struct rpc_xprt
*xprt
)
287 struct rpc_task
*task
;
288 struct rpc_rqst
*req
;
290 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
293 task
= rpc_wake_up_next(&xprt
->resend
);
295 task
= rpc_wake_up_next(&xprt
->sending
);
300 req
= task
->tk_rqstp
;
301 xprt
->snd_task
= task
;
303 req
->rq_bytes_sent
= 0;
309 xprt_clear_locked(xprt
);
312 static void __xprt_lock_write_next_cong(struct rpc_xprt
*xprt
)
314 struct rpc_task
*task
;
316 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
318 if (RPCXPRT_CONGESTED(xprt
))
320 task
= rpc_wake_up_next(&xprt
->resend
);
322 task
= rpc_wake_up_next(&xprt
->sending
);
326 if (__xprt_get_cong(xprt
, task
)) {
327 struct rpc_rqst
*req
= task
->tk_rqstp
;
328 xprt
->snd_task
= task
;
330 req
->rq_bytes_sent
= 0;
336 xprt_clear_locked(xprt
);
340 * xprt_release_xprt - allow other requests to use a transport
341 * @xprt: transport with other tasks potentially waiting
342 * @task: task that is releasing access to the transport
344 * Note that "task" can be NULL. No congestion control is provided.
346 void xprt_release_xprt(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
348 if (xprt
->snd_task
== task
) {
349 xprt_clear_locked(xprt
);
350 __xprt_lock_write_next(xprt
);
353 EXPORT_SYMBOL_GPL(xprt_release_xprt
);
356 * xprt_release_xprt_cong - allow other requests to use a transport
357 * @xprt: transport with other tasks potentially waiting
358 * @task: task that is releasing access to the transport
360 * Note that "task" can be NULL. Another task is awoken to use the
361 * transport if the transport's congestion window allows it.
363 void xprt_release_xprt_cong(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
365 if (xprt
->snd_task
== task
) {
366 xprt_clear_locked(xprt
);
367 __xprt_lock_write_next_cong(xprt
);
370 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong
);
372 static inline void xprt_release_write(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
374 spin_lock_bh(&xprt
->transport_lock
);
375 xprt
->ops
->release_xprt(xprt
, task
);
376 spin_unlock_bh(&xprt
->transport_lock
);
380 * Van Jacobson congestion avoidance. Check if the congestion window
381 * overflowed. Put the task to sleep if this is the case.
384 __xprt_get_cong(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
386 struct rpc_rqst
*req
= task
->tk_rqstp
;
390 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
391 task
->tk_pid
, xprt
->cong
, xprt
->cwnd
);
392 if (RPCXPRT_CONGESTED(xprt
))
395 xprt
->cong
+= RPC_CWNDSCALE
;
400 * Adjust the congestion window, and wake up the next task
401 * that has been sleeping due to congestion
404 __xprt_put_cong(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
409 xprt
->cong
-= RPC_CWNDSCALE
;
410 __xprt_lock_write_next_cong(xprt
);
414 * xprt_release_rqst_cong - housekeeping when request is complete
415 * @task: RPC request that recently completed
417 * Useful for transports that require congestion control.
419 void xprt_release_rqst_cong(struct rpc_task
*task
)
421 __xprt_put_cong(task
->tk_xprt
, task
->tk_rqstp
);
423 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong
);
426 * xprt_adjust_cwnd - adjust transport congestion window
427 * @task: recently completed RPC request used to adjust window
428 * @result: result code of completed RPC request
430 * We use a time-smoothed congestion estimator to avoid heavy oscillation.
432 void xprt_adjust_cwnd(struct rpc_task
*task
, int result
)
434 struct rpc_rqst
*req
= task
->tk_rqstp
;
435 struct rpc_xprt
*xprt
= task
->tk_xprt
;
436 unsigned long cwnd
= xprt
->cwnd
;
438 if (result
>= 0 && cwnd
<= xprt
->cong
) {
439 /* The (cwnd >> 1) term makes sure
440 * the result gets rounded properly. */
441 cwnd
+= (RPC_CWNDSCALE
* RPC_CWNDSCALE
+ (cwnd
>> 1)) / cwnd
;
442 if (cwnd
> RPC_MAXCWND(xprt
))
443 cwnd
= RPC_MAXCWND(xprt
);
444 __xprt_lock_write_next_cong(xprt
);
445 } else if (result
== -ETIMEDOUT
) {
447 if (cwnd
< RPC_CWNDSCALE
)
448 cwnd
= RPC_CWNDSCALE
;
450 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
451 xprt
->cong
, xprt
->cwnd
, cwnd
);
453 __xprt_put_cong(xprt
, req
);
455 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd
);
458 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
459 * @xprt: transport with waiting tasks
460 * @status: result code to plant in each task before waking it
463 void xprt_wake_pending_tasks(struct rpc_xprt
*xprt
, int status
)
466 rpc_wake_up_status(&xprt
->pending
, status
);
468 rpc_wake_up(&xprt
->pending
);
470 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks
);
473 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
474 * @task: task to be put to sleep
475 * @action: function pointer to be executed after wait
477 void xprt_wait_for_buffer_space(struct rpc_task
*task
, rpc_action action
)
479 struct rpc_rqst
*req
= task
->tk_rqstp
;
480 struct rpc_xprt
*xprt
= req
->rq_xprt
;
482 task
->tk_timeout
= req
->rq_timeout
;
483 rpc_sleep_on(&xprt
->pending
, task
, action
);
485 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space
);
488 * xprt_write_space - wake the task waiting for transport output buffer space
489 * @xprt: transport with waiting tasks
491 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
493 void xprt_write_space(struct rpc_xprt
*xprt
)
495 if (unlikely(xprt
->shutdown
))
498 spin_lock_bh(&xprt
->transport_lock
);
499 if (xprt
->snd_task
) {
500 dprintk("RPC: write space: waking waiting task on "
502 rpc_wake_up_queued_task(&xprt
->pending
, xprt
->snd_task
);
504 spin_unlock_bh(&xprt
->transport_lock
);
506 EXPORT_SYMBOL_GPL(xprt_write_space
);
509 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
510 * @task: task whose timeout is to be set
512 * Set a request's retransmit timeout based on the transport's
513 * default timeout parameters. Used by transports that don't adjust
514 * the retransmit timeout based on round-trip time estimation.
516 void xprt_set_retrans_timeout_def(struct rpc_task
*task
)
518 task
->tk_timeout
= task
->tk_rqstp
->rq_timeout
;
520 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def
);
523 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
524 * @task: task whose timeout is to be set
526 * Set a request's retransmit timeout using the RTT estimator.
528 void xprt_set_retrans_timeout_rtt(struct rpc_task
*task
)
530 int timer
= task
->tk_msg
.rpc_proc
->p_timer
;
531 struct rpc_clnt
*clnt
= task
->tk_client
;
532 struct rpc_rtt
*rtt
= clnt
->cl_rtt
;
533 struct rpc_rqst
*req
= task
->tk_rqstp
;
534 unsigned long max_timeout
= clnt
->cl_timeout
->to_maxval
;
536 task
->tk_timeout
= rpc_calc_rto(rtt
, timer
);
537 task
->tk_timeout
<<= rpc_ntimeo(rtt
, timer
) + req
->rq_retries
;
538 if (task
->tk_timeout
> max_timeout
|| task
->tk_timeout
== 0)
539 task
->tk_timeout
= max_timeout
;
541 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt
);
543 static void xprt_reset_majortimeo(struct rpc_rqst
*req
)
545 const struct rpc_timeout
*to
= req
->rq_task
->tk_client
->cl_timeout
;
547 req
->rq_majortimeo
= req
->rq_timeout
;
548 if (to
->to_exponential
)
549 req
->rq_majortimeo
<<= to
->to_retries
;
551 req
->rq_majortimeo
+= to
->to_increment
* to
->to_retries
;
552 if (req
->rq_majortimeo
> to
->to_maxval
|| req
->rq_majortimeo
== 0)
553 req
->rq_majortimeo
= to
->to_maxval
;
554 req
->rq_majortimeo
+= jiffies
;
558 * xprt_adjust_timeout - adjust timeout values for next retransmit
559 * @req: RPC request containing parameters to use for the adjustment
562 int xprt_adjust_timeout(struct rpc_rqst
*req
)
564 struct rpc_xprt
*xprt
= req
->rq_xprt
;
565 const struct rpc_timeout
*to
= req
->rq_task
->tk_client
->cl_timeout
;
568 if (time_before(jiffies
, req
->rq_majortimeo
)) {
569 if (to
->to_exponential
)
570 req
->rq_timeout
<<= 1;
572 req
->rq_timeout
+= to
->to_increment
;
573 if (to
->to_maxval
&& req
->rq_timeout
>= to
->to_maxval
)
574 req
->rq_timeout
= to
->to_maxval
;
577 req
->rq_timeout
= to
->to_initval
;
579 xprt_reset_majortimeo(req
);
580 /* Reset the RTT counters == "slow start" */
581 spin_lock_bh(&xprt
->transport_lock
);
582 rpc_init_rtt(req
->rq_task
->tk_client
->cl_rtt
, to
->to_initval
);
583 spin_unlock_bh(&xprt
->transport_lock
);
587 if (req
->rq_timeout
== 0) {
588 printk(KERN_WARNING
"xprt_adjust_timeout: rq_timeout = 0!\n");
589 req
->rq_timeout
= 5 * HZ
;
594 static void xprt_autoclose(struct work_struct
*work
)
596 struct rpc_xprt
*xprt
=
597 container_of(work
, struct rpc_xprt
, task_cleanup
);
599 xprt
->ops
->close(xprt
);
600 clear_bit(XPRT_CLOSE_WAIT
, &xprt
->state
);
601 xprt_release_write(xprt
, NULL
);
605 * xprt_disconnect_done - mark a transport as disconnected
606 * @xprt: transport to flag for disconnect
609 void xprt_disconnect_done(struct rpc_xprt
*xprt
)
611 dprintk("RPC: disconnected transport %p\n", xprt
);
612 spin_lock_bh(&xprt
->transport_lock
);
613 xprt_clear_connected(xprt
);
614 xprt_wake_pending_tasks(xprt
, -EAGAIN
);
615 spin_unlock_bh(&xprt
->transport_lock
);
617 EXPORT_SYMBOL_GPL(xprt_disconnect_done
);
620 * xprt_force_disconnect - force a transport to disconnect
621 * @xprt: transport to disconnect
624 void xprt_force_disconnect(struct rpc_xprt
*xprt
)
626 /* Don't race with the test_bit() in xprt_clear_locked() */
627 spin_lock_bh(&xprt
->transport_lock
);
628 set_bit(XPRT_CLOSE_WAIT
, &xprt
->state
);
629 /* Try to schedule an autoclose RPC call */
630 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
) == 0)
631 queue_work(rpciod_workqueue
, &xprt
->task_cleanup
);
632 xprt_wake_pending_tasks(xprt
, -EAGAIN
);
633 spin_unlock_bh(&xprt
->transport_lock
);
637 * xprt_conditional_disconnect - force a transport to disconnect
638 * @xprt: transport to disconnect
639 * @cookie: 'connection cookie'
641 * This attempts to break the connection if and only if 'cookie' matches
642 * the current transport 'connection cookie'. It ensures that we don't
643 * try to break the connection more than once when we need to retransmit
644 * a batch of RPC requests.
647 void xprt_conditional_disconnect(struct rpc_xprt
*xprt
, unsigned int cookie
)
649 /* Don't race with the test_bit() in xprt_clear_locked() */
650 spin_lock_bh(&xprt
->transport_lock
);
651 if (cookie
!= xprt
->connect_cookie
)
653 if (test_bit(XPRT_CLOSING
, &xprt
->state
) || !xprt_connected(xprt
))
655 set_bit(XPRT_CLOSE_WAIT
, &xprt
->state
);
656 /* Try to schedule an autoclose RPC call */
657 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
) == 0)
658 queue_work(rpciod_workqueue
, &xprt
->task_cleanup
);
659 xprt_wake_pending_tasks(xprt
, -EAGAIN
);
661 spin_unlock_bh(&xprt
->transport_lock
);
665 xprt_init_autodisconnect(unsigned long data
)
667 struct rpc_xprt
*xprt
= (struct rpc_xprt
*)data
;
669 spin_lock(&xprt
->transport_lock
);
670 if (!list_empty(&xprt
->recv
) || xprt
->shutdown
)
672 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
674 spin_unlock(&xprt
->transport_lock
);
675 set_bit(XPRT_CONNECTION_CLOSE
, &xprt
->state
);
676 queue_work(rpciod_workqueue
, &xprt
->task_cleanup
);
679 spin_unlock(&xprt
->transport_lock
);
683 * xprt_connect - schedule a transport connect operation
684 * @task: RPC task that is requesting the connect
687 void xprt_connect(struct rpc_task
*task
)
689 struct rpc_xprt
*xprt
= task
->tk_xprt
;
691 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task
->tk_pid
,
692 xprt
, (xprt_connected(xprt
) ? "is" : "is not"));
694 if (!xprt_bound(xprt
)) {
695 task
->tk_status
= -EAGAIN
;
698 if (!xprt_lock_write(xprt
, task
))
700 if (xprt_connected(xprt
))
701 xprt_release_write(xprt
, task
);
704 task
->tk_rqstp
->rq_bytes_sent
= 0;
706 task
->tk_timeout
= xprt
->connect_timeout
;
707 rpc_sleep_on(&xprt
->pending
, task
, xprt_connect_status
);
708 xprt
->stat
.connect_start
= jiffies
;
709 xprt
->ops
->connect(task
);
714 static void xprt_connect_status(struct rpc_task
*task
)
716 struct rpc_xprt
*xprt
= task
->tk_xprt
;
718 if (task
->tk_status
== 0) {
719 xprt
->stat
.connect_count
++;
720 xprt
->stat
.connect_time
+= (long)jiffies
- xprt
->stat
.connect_start
;
721 dprintk("RPC: %5u xprt_connect_status: connection established\n",
726 switch (task
->tk_status
) {
728 dprintk("RPC: %5u xprt_connect_status: retrying\n", task
->tk_pid
);
731 dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
732 "out\n", task
->tk_pid
);
735 dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
736 "server %s\n", task
->tk_pid
, -task
->tk_status
,
737 task
->tk_client
->cl_server
);
738 xprt_release_write(xprt
, task
);
739 task
->tk_status
= -EIO
;
744 * xprt_lookup_rqst - find an RPC request corresponding to an XID
745 * @xprt: transport on which the original request was transmitted
746 * @xid: RPC XID of incoming reply
749 struct rpc_rqst
*xprt_lookup_rqst(struct rpc_xprt
*xprt
, __be32 xid
)
751 struct list_head
*pos
;
753 list_for_each(pos
, &xprt
->recv
) {
754 struct rpc_rqst
*entry
= list_entry(pos
, struct rpc_rqst
, rq_list
);
755 if (entry
->rq_xid
== xid
)
759 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
761 xprt
->stat
.bad_xids
++;
764 EXPORT_SYMBOL_GPL(xprt_lookup_rqst
);
767 * xprt_update_rtt - update an RPC client's RTT state after receiving a reply
768 * @task: RPC request that recently completed
771 void xprt_update_rtt(struct rpc_task
*task
)
773 struct rpc_rqst
*req
= task
->tk_rqstp
;
774 struct rpc_rtt
*rtt
= task
->tk_client
->cl_rtt
;
775 unsigned timer
= task
->tk_msg
.rpc_proc
->p_timer
;
778 if (req
->rq_ntrans
== 1)
779 rpc_update_rtt(rtt
, timer
,
780 (long)jiffies
- req
->rq_xtime
);
781 rpc_set_timeo(rtt
, timer
, req
->rq_ntrans
- 1);
784 EXPORT_SYMBOL_GPL(xprt_update_rtt
);
787 * xprt_complete_rqst - called when reply processing is complete
788 * @task: RPC request that recently completed
789 * @copied: actual number of bytes received from the transport
791 * Caller holds transport lock.
793 void xprt_complete_rqst(struct rpc_task
*task
, int copied
)
795 struct rpc_rqst
*req
= task
->tk_rqstp
;
796 struct rpc_xprt
*xprt
= req
->rq_xprt
;
798 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
799 task
->tk_pid
, ntohl(req
->rq_xid
), copied
);
802 task
->tk_rtt
= (long)jiffies
- req
->rq_xtime
;
804 list_del_init(&req
->rq_list
);
805 req
->rq_private_buf
.len
= copied
;
806 /* Ensure all writes are done before we update req->rq_received */
808 req
->rq_received
= copied
;
809 rpc_wake_up_queued_task(&xprt
->pending
, task
);
811 EXPORT_SYMBOL_GPL(xprt_complete_rqst
);
813 static void xprt_timer(struct rpc_task
*task
)
815 struct rpc_rqst
*req
= task
->tk_rqstp
;
816 struct rpc_xprt
*xprt
= req
->rq_xprt
;
818 if (task
->tk_status
!= -ETIMEDOUT
)
820 dprintk("RPC: %5u xprt_timer\n", task
->tk_pid
);
822 spin_lock_bh(&xprt
->transport_lock
);
823 if (!req
->rq_received
) {
824 if (xprt
->ops
->timer
)
825 xprt
->ops
->timer(task
);
828 spin_unlock_bh(&xprt
->transport_lock
);
832 * xprt_prepare_transmit - reserve the transport before sending a request
833 * @task: RPC task about to send a request
836 int xprt_prepare_transmit(struct rpc_task
*task
)
838 struct rpc_rqst
*req
= task
->tk_rqstp
;
839 struct rpc_xprt
*xprt
= req
->rq_xprt
;
842 dprintk("RPC: %5u xprt_prepare_transmit\n", task
->tk_pid
);
844 spin_lock_bh(&xprt
->transport_lock
);
845 if (req
->rq_received
&& !req
->rq_bytes_sent
) {
846 err
= req
->rq_received
;
849 if (!xprt
->ops
->reserve_xprt(task
))
852 spin_unlock_bh(&xprt
->transport_lock
);
856 void xprt_end_transmit(struct rpc_task
*task
)
858 xprt_release_write(task
->tk_xprt
, task
);
862 * xprt_transmit - send an RPC request on a transport
863 * @task: controlling RPC task
865 * We have to copy the iovec because sendmsg fiddles with its contents.
867 void xprt_transmit(struct rpc_task
*task
)
869 struct rpc_rqst
*req
= task
->tk_rqstp
;
870 struct rpc_xprt
*xprt
= req
->rq_xprt
;
873 dprintk("RPC: %5u xprt_transmit(%u)\n", task
->tk_pid
, req
->rq_slen
);
875 if (!req
->rq_received
) {
876 if (list_empty(&req
->rq_list
)) {
877 spin_lock_bh(&xprt
->transport_lock
);
878 /* Update the softirq receive buffer */
879 memcpy(&req
->rq_private_buf
, &req
->rq_rcv_buf
,
880 sizeof(req
->rq_private_buf
));
881 /* Add request to the receive list */
882 list_add_tail(&req
->rq_list
, &xprt
->recv
);
883 spin_unlock_bh(&xprt
->transport_lock
);
884 xprt_reset_majortimeo(req
);
885 /* Turn off autodisconnect */
886 del_singleshot_timer_sync(&xprt
->timer
);
888 } else if (!req
->rq_bytes_sent
)
891 req
->rq_connect_cookie
= xprt
->connect_cookie
;
892 req
->rq_xtime
= jiffies
;
893 status
= xprt
->ops
->send_request(task
);
895 task
->tk_status
= status
;
899 dprintk("RPC: %5u xmit complete\n", task
->tk_pid
);
900 spin_lock_bh(&xprt
->transport_lock
);
902 xprt
->ops
->set_retrans_timeout(task
);
905 xprt
->stat
.req_u
+= xprt
->stat
.sends
- xprt
->stat
.recvs
;
906 xprt
->stat
.bklog_u
+= xprt
->backlog
.qlen
;
908 /* Don't race with disconnect */
909 if (!xprt_connected(xprt
))
910 task
->tk_status
= -ENOTCONN
;
911 else if (!req
->rq_received
)
912 rpc_sleep_on(&xprt
->pending
, task
, xprt_timer
);
913 spin_unlock_bh(&xprt
->transport_lock
);
916 static inline void do_xprt_reserve(struct rpc_task
*task
)
918 struct rpc_xprt
*xprt
= task
->tk_xprt
;
923 if (!list_empty(&xprt
->free
)) {
924 struct rpc_rqst
*req
= list_entry(xprt
->free
.next
, struct rpc_rqst
, rq_list
);
925 list_del_init(&req
->rq_list
);
926 task
->tk_rqstp
= req
;
927 xprt_request_init(task
, xprt
);
930 dprintk("RPC: waiting for request slot\n");
931 task
->tk_status
= -EAGAIN
;
932 task
->tk_timeout
= 0;
933 rpc_sleep_on(&xprt
->backlog
, task
, NULL
);
937 * xprt_reserve - allocate an RPC request slot
938 * @task: RPC task requesting a slot allocation
940 * If no more slots are available, place the task on the transport's
943 void xprt_reserve(struct rpc_task
*task
)
945 struct rpc_xprt
*xprt
= task
->tk_xprt
;
947 task
->tk_status
= -EIO
;
948 spin_lock(&xprt
->reserve_lock
);
949 do_xprt_reserve(task
);
950 spin_unlock(&xprt
->reserve_lock
);
953 static inline __be32
xprt_alloc_xid(struct rpc_xprt
*xprt
)
958 static inline void xprt_init_xid(struct rpc_xprt
*xprt
)
960 xprt
->xid
= net_random();
963 static void xprt_request_init(struct rpc_task
*task
, struct rpc_xprt
*xprt
)
965 struct rpc_rqst
*req
= task
->tk_rqstp
;
967 req
->rq_timeout
= task
->tk_client
->cl_timeout
->to_initval
;
970 req
->rq_buffer
= NULL
;
971 req
->rq_xid
= xprt_alloc_xid(xprt
);
972 req
->rq_release_snd_buf
= NULL
;
973 xprt_reset_majortimeo(req
);
974 dprintk("RPC: %5u reserved req %p xid %08x\n", task
->tk_pid
,
975 req
, ntohl(req
->rq_xid
));
979 * xprt_release - release an RPC request slot
980 * @task: task which is finished with the slot
983 void xprt_release(struct rpc_task
*task
)
985 struct rpc_xprt
*xprt
= task
->tk_xprt
;
986 struct rpc_rqst
*req
;
988 if (!(req
= task
->tk_rqstp
))
990 rpc_count_iostats(task
);
991 spin_lock_bh(&xprt
->transport_lock
);
992 xprt
->ops
->release_xprt(xprt
, task
);
993 if (xprt
->ops
->release_request
)
994 xprt
->ops
->release_request(task
);
995 if (!list_empty(&req
->rq_list
))
996 list_del(&req
->rq_list
);
997 xprt
->last_used
= jiffies
;
998 if (list_empty(&xprt
->recv
))
999 mod_timer(&xprt
->timer
,
1000 xprt
->last_used
+ xprt
->idle_timeout
);
1001 spin_unlock_bh(&xprt
->transport_lock
);
1002 xprt
->ops
->buf_free(req
->rq_buffer
);
1003 task
->tk_rqstp
= NULL
;
1004 if (req
->rq_release_snd_buf
)
1005 req
->rq_release_snd_buf(req
);
1006 memset(req
, 0, sizeof(*req
)); /* mark unused */
1008 dprintk("RPC: %5u release request %p\n", task
->tk_pid
, req
);
1010 spin_lock(&xprt
->reserve_lock
);
1011 list_add(&req
->rq_list
, &xprt
->free
);
1012 rpc_wake_up_next(&xprt
->backlog
);
1013 spin_unlock(&xprt
->reserve_lock
);
1017 * xprt_create_transport - create an RPC transport
1018 * @args: rpc transport creation arguments
1021 struct rpc_xprt
*xprt_create_transport(struct xprt_create
*args
)
1023 struct rpc_xprt
*xprt
;
1024 struct rpc_rqst
*req
;
1025 struct xprt_class
*t
;
1027 spin_lock(&xprt_list_lock
);
1028 list_for_each_entry(t
, &xprt_list
, list
) {
1029 if (t
->ident
== args
->ident
) {
1030 spin_unlock(&xprt_list_lock
);
1034 spin_unlock(&xprt_list_lock
);
1035 printk(KERN_ERR
"RPC: transport (%d) not supported\n", args
->ident
);
1036 return ERR_PTR(-EIO
);
1039 xprt
= t
->setup(args
);
1041 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1046 kref_init(&xprt
->kref
);
1047 spin_lock_init(&xprt
->transport_lock
);
1048 spin_lock_init(&xprt
->reserve_lock
);
1050 INIT_LIST_HEAD(&xprt
->free
);
1051 INIT_LIST_HEAD(&xprt
->recv
);
1052 INIT_WORK(&xprt
->task_cleanup
, xprt_autoclose
);
1053 setup_timer(&xprt
->timer
, xprt_init_autodisconnect
,
1054 (unsigned long)xprt
);
1055 xprt
->last_used
= jiffies
;
1056 xprt
->cwnd
= RPC_INITCWND
;
1057 xprt
->bind_index
= 0;
1059 rpc_init_wait_queue(&xprt
->binding
, "xprt_binding");
1060 rpc_init_wait_queue(&xprt
->pending
, "xprt_pending");
1061 rpc_init_wait_queue(&xprt
->sending
, "xprt_sending");
1062 rpc_init_wait_queue(&xprt
->resend
, "xprt_resend");
1063 rpc_init_priority_wait_queue(&xprt
->backlog
, "xprt_backlog");
1065 /* initialize free list */
1066 for (req
= &xprt
->slot
[xprt
->max_reqs
-1]; req
>= &xprt
->slot
[0]; req
--)
1067 list_add(&req
->rq_list
, &xprt
->free
);
1069 xprt_init_xid(xprt
);
1071 dprintk("RPC: created transport %p with %u slots\n", xprt
,
1078 * xprt_destroy - destroy an RPC transport, killing off all requests.
1079 * @kref: kref for the transport to destroy
1082 static void xprt_destroy(struct kref
*kref
)
1084 struct rpc_xprt
*xprt
= container_of(kref
, struct rpc_xprt
, kref
);
1086 dprintk("RPC: destroying transport %p\n", xprt
);
1088 del_timer_sync(&xprt
->timer
);
1090 rpc_destroy_wait_queue(&xprt
->binding
);
1091 rpc_destroy_wait_queue(&xprt
->pending
);
1092 rpc_destroy_wait_queue(&xprt
->sending
);
1093 rpc_destroy_wait_queue(&xprt
->resend
);
1094 rpc_destroy_wait_queue(&xprt
->backlog
);
1096 * Tear down transport state and free the rpc_xprt
1098 xprt
->ops
->destroy(xprt
);
1102 * xprt_put - release a reference to an RPC transport.
1103 * @xprt: pointer to the transport
1106 void xprt_put(struct rpc_xprt
*xprt
)
1108 kref_put(&xprt
->kref
, xprt_destroy
);
1112 * xprt_get - return a reference to an RPC transport.
1113 * @xprt: pointer to the transport
1116 struct rpc_xprt
*xprt_get(struct rpc_xprt
*xprt
)
1118 kref_get(&xprt
->kref
);