6 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to:
20 * Free Software Foundation
21 * 51 Franklin Street, Fifth Floor
22 * Boston, MA 02111-1301 USA
26 #include <linux/module.h>
27 #include <linux/errno.h>
29 #include <linux/poll.h>
30 #include <linux/kthread.h>
31 #include <linux/idr.h>
32 #include <linux/mutex.h>
38 #include "transport.h"
42 #define SCHED_TIMEOUT 10
43 #define MAXPOLLWADDR 2
46 Rworksched
= 1, /* read work scheduled or running */
47 Rpending
= 2, /* can read */
48 Wworksched
= 4, /* write work scheduled or running */
49 Wpending
= 8, /* can write */
58 struct v9fs_mux_poll_task
;
63 struct v9fs_fcall
*tcall
;
64 struct v9fs_fcall
*rcall
;
66 v9fs_mux_req_callback cb
;
69 struct list_head req_list
;
72 struct v9fs_mux_data
{
74 struct list_head mux_list
;
75 struct v9fs_mux_poll_task
*poll_task
;
77 unsigned char *extended
;
78 struct v9fs_transport
*trans
;
79 struct v9fs_idpool tagpool
;
81 wait_queue_head_t equeue
;
82 struct list_head req_list
;
83 struct list_head unsent_req_list
;
84 struct v9fs_fcall
*rcall
;
90 wait_queue_t poll_wait
[MAXPOLLWADDR
];
91 wait_queue_head_t
*poll_waddr
[MAXPOLLWADDR
];
93 struct work_struct rq
;
94 struct work_struct wq
;
98 struct v9fs_mux_poll_task
{
99 struct task_struct
*task
;
100 struct list_head mux_list
;
104 struct v9fs_mux_rpc
{
105 struct v9fs_mux_data
*m
;
107 struct v9fs_fcall
*tcall
;
108 struct v9fs_fcall
*rcall
;
109 wait_queue_head_t wqueue
;
112 static int v9fs_poll_proc(void *);
113 static void v9fs_read_work(struct work_struct
*work
);
114 static void v9fs_write_work(struct work_struct
*work
);
115 static void v9fs_pollwait(struct file
*filp
, wait_queue_head_t
* wait_address
,
117 static u16
v9fs_mux_get_tag(struct v9fs_mux_data
*);
118 static void v9fs_mux_put_tag(struct v9fs_mux_data
*, u16
);
120 static DEFINE_MUTEX(v9fs_mux_task_lock
);
121 static struct workqueue_struct
*v9fs_mux_wq
;
123 static int v9fs_mux_num
;
124 static int v9fs_mux_poll_task_num
;
125 static struct v9fs_mux_poll_task v9fs_mux_poll_tasks
[100];
127 int v9fs_mux_global_init(void)
131 for (i
= 0; i
< ARRAY_SIZE(v9fs_mux_poll_tasks
); i
++)
132 v9fs_mux_poll_tasks
[i
].task
= NULL
;
134 v9fs_mux_wq
= create_workqueue("v9fs");
141 void v9fs_mux_global_exit(void)
143 destroy_workqueue(v9fs_mux_wq
);
147 * v9fs_mux_calc_poll_procs - calculates the number of polling procs
148 * based on the number of mounted v9fs filesystems.
150 * The current implementation returns sqrt of the number of mounts.
152 static int v9fs_mux_calc_poll_procs(int muxnum
)
156 if (v9fs_mux_poll_task_num
)
157 n
= muxnum
/ v9fs_mux_poll_task_num
+
158 (muxnum
% v9fs_mux_poll_task_num
? 1 : 0);
162 if (n
> ARRAY_SIZE(v9fs_mux_poll_tasks
))
163 n
= ARRAY_SIZE(v9fs_mux_poll_tasks
);
168 static int v9fs_mux_poll_start(struct v9fs_mux_data
*m
)
171 struct v9fs_mux_poll_task
*vpt
, *vptlast
;
172 struct task_struct
*pproc
;
174 dprintk(DEBUG_MUX
, "mux %p muxnum %d procnum %d\n", m
, v9fs_mux_num
,
175 v9fs_mux_poll_task_num
);
176 mutex_lock(&v9fs_mux_task_lock
);
178 n
= v9fs_mux_calc_poll_procs(v9fs_mux_num
+ 1);
179 if (n
> v9fs_mux_poll_task_num
) {
180 for (i
= 0; i
< ARRAY_SIZE(v9fs_mux_poll_tasks
); i
++) {
181 if (v9fs_mux_poll_tasks
[i
].task
== NULL
) {
182 vpt
= &v9fs_mux_poll_tasks
[i
];
183 dprintk(DEBUG_MUX
, "create proc %p\n", vpt
);
184 pproc
= kthread_create(v9fs_poll_proc
, vpt
,
187 if (!IS_ERR(pproc
)) {
189 INIT_LIST_HEAD(&vpt
->mux_list
);
191 v9fs_mux_poll_task_num
++;
192 wake_up_process(vpt
->task
);
198 if (i
>= ARRAY_SIZE(v9fs_mux_poll_tasks
))
199 dprintk(DEBUG_ERROR
, "warning: no free poll slots\n");
202 n
= (v9fs_mux_num
+ 1) / v9fs_mux_poll_task_num
+
203 ((v9fs_mux_num
+ 1) % v9fs_mux_poll_task_num
? 1 : 0);
206 for (i
= 0; i
< ARRAY_SIZE(v9fs_mux_poll_tasks
); i
++) {
207 vpt
= &v9fs_mux_poll_tasks
[i
];
208 if (vpt
->task
!= NULL
) {
210 if (vpt
->muxnum
< n
) {
211 dprintk(DEBUG_MUX
, "put in proc %d\n", i
);
212 list_add(&m
->mux_list
, &vpt
->mux_list
);
215 memset(&m
->poll_waddr
, 0, sizeof(m
->poll_waddr
));
216 init_poll_funcptr(&m
->pt
, v9fs_pollwait
);
222 if (i
>= ARRAY_SIZE(v9fs_mux_poll_tasks
)) {
226 dprintk(DEBUG_MUX
, "put in proc %d\n", i
);
227 list_add(&m
->mux_list
, &vptlast
->mux_list
);
229 m
->poll_task
= vptlast
;
230 memset(&m
->poll_waddr
, 0, sizeof(m
->poll_waddr
));
231 init_poll_funcptr(&m
->pt
, v9fs_pollwait
);
235 mutex_unlock(&v9fs_mux_task_lock
);
240 static void v9fs_mux_poll_stop(struct v9fs_mux_data
*m
)
243 struct v9fs_mux_poll_task
*vpt
;
245 mutex_lock(&v9fs_mux_task_lock
);
247 list_del(&m
->mux_list
);
248 for(i
= 0; i
< ARRAY_SIZE(m
->poll_waddr
); i
++) {
249 if (m
->poll_waddr
[i
] != NULL
) {
250 remove_wait_queue(m
->poll_waddr
[i
], &m
->poll_wait
[i
]);
251 m
->poll_waddr
[i
] = NULL
;
256 dprintk(DEBUG_MUX
, "destroy proc %p\n", vpt
);
257 send_sig(SIGKILL
, vpt
->task
, 1);
259 v9fs_mux_poll_task_num
--;
262 mutex_unlock(&v9fs_mux_task_lock
);
266 * v9fs_mux_init - allocate and initialize the per-session mux data
267 * Creates the polling task if this is the first session.
269 * @trans - transport structure
270 * @msize - maximum message size
271 * @extended - pointer to the extended flag
273 struct v9fs_mux_data
*v9fs_mux_init(struct v9fs_transport
*trans
, int msize
,
274 unsigned char *extended
)
277 struct v9fs_mux_data
*m
, *mtmp
;
279 dprintk(DEBUG_MUX
, "transport %p msize %d\n", trans
, msize
);
280 m
= kmalloc(sizeof(struct v9fs_mux_data
), GFP_KERNEL
);
282 return ERR_PTR(-ENOMEM
);
284 spin_lock_init(&m
->lock
);
285 INIT_LIST_HEAD(&m
->mux_list
);
287 m
->extended
= extended
;
289 idr_init(&m
->tagpool
.pool
);
290 init_MUTEX(&m
->tagpool
.lock
);
292 init_waitqueue_head(&m
->equeue
);
293 INIT_LIST_HEAD(&m
->req_list
);
294 INIT_LIST_HEAD(&m
->unsent_req_list
);
298 m
->wpos
= m
->wsize
= 0;
300 INIT_WORK(&m
->rq
, v9fs_read_work
);
301 INIT_WORK(&m
->wq
, v9fs_write_work
);
303 memset(&m
->poll_waddr
, 0, sizeof(m
->poll_waddr
));
305 n
= v9fs_mux_poll_start(m
);
309 n
= trans
->poll(trans
, &m
->pt
);
311 dprintk(DEBUG_MUX
, "mux %p can read\n", m
);
312 set_bit(Rpending
, &m
->wsched
);
316 dprintk(DEBUG_MUX
, "mux %p can write\n", m
);
317 set_bit(Wpending
, &m
->wsched
);
320 for(i
= 0; i
< ARRAY_SIZE(m
->poll_waddr
); i
++) {
321 if (IS_ERR(m
->poll_waddr
[i
])) {
322 v9fs_mux_poll_stop(m
);
323 mtmp
= (void *)m
->poll_waddr
; /* the error code */
334 * v9fs_mux_destroy - cancels all pending requests and frees mux resources
336 void v9fs_mux_destroy(struct v9fs_mux_data
*m
)
338 dprintk(DEBUG_MUX
, "mux %p prev %p next %p\n", m
,
339 m
->mux_list
.prev
, m
->mux_list
.next
);
340 v9fs_mux_cancel(m
, -ECONNRESET
);
342 if (!list_empty(&m
->req_list
)) {
343 /* wait until all processes waiting on this session exit */
344 dprintk(DEBUG_MUX
, "mux %p waiting for empty request queue\n",
346 wait_event_timeout(m
->equeue
, (list_empty(&m
->req_list
)), 5000);
347 dprintk(DEBUG_MUX
, "mux %p request queue empty: %d\n", m
,
348 list_empty(&m
->req_list
));
351 v9fs_mux_poll_stop(m
);
358 * v9fs_pollwait - called by files poll operation to add v9fs-poll task
359 * to files wait queue
362 v9fs_pollwait(struct file
*filp
, wait_queue_head_t
* wait_address
,
366 struct v9fs_mux_data
*m
;
368 m
= container_of(p
, struct v9fs_mux_data
, pt
);
369 for(i
= 0; i
< ARRAY_SIZE(m
->poll_waddr
); i
++)
370 if (m
->poll_waddr
[i
] == NULL
)
373 if (i
>= ARRAY_SIZE(m
->poll_waddr
)) {
374 dprintk(DEBUG_ERROR
, "not enough wait_address slots\n");
378 m
->poll_waddr
[i
] = wait_address
;
381 dprintk(DEBUG_ERROR
, "no wait_address\n");
382 m
->poll_waddr
[i
] = ERR_PTR(-EIO
);
386 init_waitqueue_entry(&m
->poll_wait
[i
], m
->poll_task
->task
);
387 add_wait_queue(wait_address
, &m
->poll_wait
[i
]);
391 * v9fs_poll_mux - polls a mux and schedules read or write works if necessary
393 static void v9fs_poll_mux(struct v9fs_mux_data
*m
)
400 n
= m
->trans
->poll(m
->trans
, NULL
);
401 if (n
< 0 || n
& (POLLERR
| POLLHUP
| POLLNVAL
)) {
402 dprintk(DEBUG_MUX
, "error mux %p err %d\n", m
, n
);
405 v9fs_mux_cancel(m
, n
);
409 set_bit(Rpending
, &m
->wsched
);
410 dprintk(DEBUG_MUX
, "mux %p can read\n", m
);
411 if (!test_and_set_bit(Rworksched
, &m
->wsched
)) {
412 dprintk(DEBUG_MUX
, "schedule read work mux %p\n", m
);
413 queue_work(v9fs_mux_wq
, &m
->rq
);
418 set_bit(Wpending
, &m
->wsched
);
419 dprintk(DEBUG_MUX
, "mux %p can write\n", m
);
420 if ((m
->wsize
|| !list_empty(&m
->unsent_req_list
))
421 && !test_and_set_bit(Wworksched
, &m
->wsched
)) {
422 dprintk(DEBUG_MUX
, "schedule write work mux %p\n", m
);
423 queue_work(v9fs_mux_wq
, &m
->wq
);
429 * v9fs_poll_proc - polls all v9fs transports for new events and queues
430 * the appropriate work to the work queue
432 static int v9fs_poll_proc(void *a
)
434 struct v9fs_mux_data
*m
, *mtmp
;
435 struct v9fs_mux_poll_task
*vpt
;
438 dprintk(DEBUG_MUX
, "start %p %p\n", current
, vpt
);
439 allow_signal(SIGKILL
);
440 while (!kthread_should_stop()) {
441 set_current_state(TASK_INTERRUPTIBLE
);
442 if (signal_pending(current
))
445 list_for_each_entry_safe(m
, mtmp
, &vpt
->mux_list
, mux_list
) {
449 dprintk(DEBUG_MUX
, "sleeping...\n");
450 schedule_timeout(SCHED_TIMEOUT
* HZ
);
453 __set_current_state(TASK_RUNNING
);
454 dprintk(DEBUG_MUX
, "finish\n");
459 * v9fs_write_work - called when a transport can send some data
461 static void v9fs_write_work(struct work_struct
*work
)
464 struct v9fs_mux_data
*m
;
465 struct v9fs_req
*req
;
467 m
= container_of(work
, struct v9fs_mux_data
, wq
);
470 clear_bit(Wworksched
, &m
->wsched
);
475 if (list_empty(&m
->unsent_req_list
)) {
476 clear_bit(Wworksched
, &m
->wsched
);
482 req
= list_entry(m
->unsent_req_list
.next
, struct v9fs_req
,
484 list_move_tail(&req
->req_list
, &m
->req_list
);
485 if (req
->err
== ERREQFLUSH
)
488 m
->wbuf
= req
->tcall
->sdata
;
489 m
->wsize
= req
->tcall
->size
;
491 dump_data(m
->wbuf
, m
->wsize
);
492 spin_unlock(&m
->lock
);
495 dprintk(DEBUG_MUX
, "mux %p pos %d size %d\n", m
, m
->wpos
, m
->wsize
);
496 clear_bit(Wpending
, &m
->wsched
);
497 err
= m
->trans
->write(m
->trans
, m
->wbuf
+ m
->wpos
, m
->wsize
- m
->wpos
);
498 dprintk(DEBUG_MUX
, "mux %p sent %d bytes\n", m
, err
);
499 if (err
== -EAGAIN
) {
500 clear_bit(Wworksched
, &m
->wsched
);
508 if (m
->wpos
== m
->wsize
)
509 m
->wpos
= m
->wsize
= 0;
511 if (m
->wsize
== 0 && !list_empty(&m
->unsent_req_list
)) {
512 if (test_and_clear_bit(Wpending
, &m
->wsched
))
515 n
= m
->trans
->poll(m
->trans
, NULL
);
518 dprintk(DEBUG_MUX
, "schedule write work mux %p\n", m
);
519 queue_work(v9fs_mux_wq
, &m
->wq
);
521 clear_bit(Wworksched
, &m
->wsched
);
523 clear_bit(Wworksched
, &m
->wsched
);
528 v9fs_mux_cancel(m
, err
);
529 clear_bit(Wworksched
, &m
->wsched
);
532 static void process_request(struct v9fs_mux_data
*m
, struct v9fs_req
*req
)
535 struct v9fs_str
*ename
;
537 if (!req
->err
&& req
->rcall
->id
== RERROR
) {
538 ecode
= req
->rcall
->params
.rerror
.errno
;
539 ename
= &req
->rcall
->params
.rerror
.error
;
541 dprintk(DEBUG_MUX
, "Rerror %.*s\n", ename
->len
, ename
->str
);
547 req
->err
= v9fs_errstr2errno(ename
->str
, ename
->len
);
549 if (!req
->err
) { /* string match failed */
550 PRINT_FCALL_ERROR("unknown error", req
->rcall
);
554 req
->err
= -ESERVERFAULT
;
556 } else if (req
->tcall
&& req
->rcall
->id
!= req
->tcall
->id
+ 1) {
557 dprintk(DEBUG_ERROR
, "fcall mismatch: expected %d, got %d\n",
558 req
->tcall
->id
+ 1, req
->rcall
->id
);
565 * v9fs_read_work - called when there is some data to be read from a transport
567 static void v9fs_read_work(struct work_struct
*work
)
570 struct v9fs_mux_data
*m
;
571 struct v9fs_req
*req
, *rptr
, *rreq
;
572 struct v9fs_fcall
*rcall
;
575 m
= container_of(work
, struct v9fs_mux_data
, rq
);
581 dprintk(DEBUG_MUX
, "start mux %p pos %d\n", m
, m
->rpos
);
585 kmalloc(sizeof(struct v9fs_fcall
) + m
->msize
, GFP_KERNEL
);
591 m
->rbuf
= (char *)m
->rcall
+ sizeof(struct v9fs_fcall
);
595 clear_bit(Rpending
, &m
->wsched
);
596 err
= m
->trans
->read(m
->trans
, m
->rbuf
+ m
->rpos
, m
->msize
- m
->rpos
);
597 dprintk(DEBUG_MUX
, "mux %p got %d bytes\n", m
, err
);
598 if (err
== -EAGAIN
) {
599 clear_bit(Rworksched
, &m
->wsched
);
607 while (m
->rpos
> 4) {
608 n
= le32_to_cpu(*(__le32
*) m
->rbuf
);
611 "requested packet size too big: %d\n", n
);
619 dump_data(m
->rbuf
, n
);
621 v9fs_deserialize_fcall(m
->rbuf
, n
, m
->rcall
, *m
->extended
);
626 if ((v9fs_debug_level
&DEBUG_FCALL
) == DEBUG_FCALL
) {
629 v9fs_printfcall(buf
, sizeof(buf
), m
->rcall
,
631 printk(KERN_NOTICE
">>> %p %s\n", m
, buf
);
637 m
->rcall
= kmalloc(sizeof(struct v9fs_fcall
) + m
->msize
,
644 m
->rbuf
= (char *)m
->rcall
+ sizeof(struct v9fs_fcall
);
645 memmove(m
->rbuf
, rbuf
+ n
, m
->rpos
- n
);
653 dprintk(DEBUG_MUX
, "mux %p fcall id %d tag %d\n", m
, rcall
->id
,
658 list_for_each_entry_safe(rreq
, rptr
, &m
->req_list
, req_list
) {
659 if (rreq
->tag
== rcall
->tag
) {
661 if (req
->flush
!= Flushing
)
662 list_del(&req
->req_list
);
666 spin_unlock(&m
->lock
);
670 process_request(m
, req
);
672 if (req
->flush
!= Flushing
) {
674 (*req
->cb
) (req
, req
->cba
);
681 if (err
>= 0 && rcall
->id
!= RFLUSH
)
683 "unexpected response mux %p id %d tag %d\n",
684 m
, rcall
->id
, rcall
->tag
);
689 if (!list_empty(&m
->req_list
)) {
690 if (test_and_clear_bit(Rpending
, &m
->wsched
))
693 n
= m
->trans
->poll(m
->trans
, NULL
);
696 dprintk(DEBUG_MUX
, "schedule read work mux %p\n", m
);
697 queue_work(v9fs_mux_wq
, &m
->rq
);
699 clear_bit(Rworksched
, &m
->wsched
);
701 clear_bit(Rworksched
, &m
->wsched
);
706 v9fs_mux_cancel(m
, err
);
707 clear_bit(Rworksched
, &m
->wsched
);
711 * v9fs_send_request - send 9P request
712 * The function can sleep until the request is scheduled for sending.
713 * The function can be interrupted. Return from the function is not
714 * a guarantee that the request is sent successfully. Can return errors
715 * that can be retrieved by PTR_ERR macros.
718 * @tc: request to be sent
719 * @cb: callback function to call when response is received
720 * @cba: parameter to pass to the callback function
722 static struct v9fs_req
*v9fs_send_request(struct v9fs_mux_data
*m
,
723 struct v9fs_fcall
*tc
,
724 v9fs_mux_req_callback cb
, void *cba
)
727 struct v9fs_req
*req
;
729 dprintk(DEBUG_MUX
, "mux %p task %p tcall %p id %d\n", m
, current
,
732 return ERR_PTR(m
->err
);
734 req
= kmalloc(sizeof(struct v9fs_req
), GFP_KERNEL
);
736 return ERR_PTR(-ENOMEM
);
738 if (tc
->id
== TVERSION
)
741 n
= v9fs_mux_get_tag(m
);
744 return ERR_PTR(-ENOMEM
);
747 if ((v9fs_debug_level
&DEBUG_FCALL
) == DEBUG_FCALL
) {
750 v9fs_printfcall(buf
, sizeof(buf
), tc
, *m
->extended
);
751 printk(KERN_NOTICE
"<<< %p %s\n", m
, buf
);
754 spin_lock_init(&req
->lock
);
764 list_add_tail(&req
->req_list
, &m
->unsent_req_list
);
765 spin_unlock(&m
->lock
);
767 if (test_and_clear_bit(Wpending
, &m
->wsched
))
770 n
= m
->trans
->poll(m
->trans
, NULL
);
772 if (n
& POLLOUT
&& !test_and_set_bit(Wworksched
, &m
->wsched
))
773 queue_work(v9fs_mux_wq
, &m
->wq
);
778 static void v9fs_mux_free_request(struct v9fs_mux_data
*m
, struct v9fs_req
*req
)
780 v9fs_mux_put_tag(m
, req
->tag
);
784 static void v9fs_mux_flush_cb(struct v9fs_req
*freq
, void *a
)
786 v9fs_mux_req_callback cb
;
788 struct v9fs_mux_data
*m
;
789 struct v9fs_req
*req
, *rreq
, *rptr
;
792 dprintk(DEBUG_MUX
, "mux %p tc %p rc %p err %d oldtag %d\n", m
,
793 freq
->tcall
, freq
->rcall
, freq
->err
,
794 freq
->tcall
->params
.tflush
.oldtag
);
798 tag
= freq
->tcall
->params
.tflush
.oldtag
;
800 list_for_each_entry_safe(rreq
, rptr
, &m
->req_list
, req_list
) {
801 if (rreq
->tag
== tag
) {
803 list_del(&req
->req_list
);
807 spin_unlock(&m
->lock
);
810 spin_lock(&req
->lock
);
811 req
->flush
= Flushed
;
812 spin_unlock(&req
->lock
);
815 (*req
->cb
) (req
, req
->cba
);
824 v9fs_mux_free_request(m
, freq
);
828 v9fs_mux_flush_request(struct v9fs_mux_data
*m
, struct v9fs_req
*req
)
830 struct v9fs_fcall
*fc
;
831 struct v9fs_req
*rreq
, *rptr
;
833 dprintk(DEBUG_MUX
, "mux %p req %p tag %d\n", m
, req
, req
->tag
);
835 /* if a response was received for a request, do nothing */
836 spin_lock(&req
->lock
);
837 if (req
->rcall
|| req
->err
) {
838 spin_unlock(&req
->lock
);
839 dprintk(DEBUG_MUX
, "mux %p req %p response already received\n", m
, req
);
843 req
->flush
= Flushing
;
844 spin_unlock(&req
->lock
);
847 /* if the request is not sent yet, just remove it from the list */
848 list_for_each_entry_safe(rreq
, rptr
, &m
->unsent_req_list
, req_list
) {
849 if (rreq
->tag
== req
->tag
) {
850 dprintk(DEBUG_MUX
, "mux %p req %p request is not sent yet\n", m
, req
);
851 list_del(&rreq
->req_list
);
852 req
->flush
= Flushed
;
853 spin_unlock(&m
->lock
);
855 (*req
->cb
) (req
, req
->cba
);
859 spin_unlock(&m
->lock
);
861 clear_thread_flag(TIF_SIGPENDING
);
862 fc
= v9fs_create_tflush(req
->tag
);
863 v9fs_send_request(m
, fc
, v9fs_mux_flush_cb
, m
);
868 v9fs_mux_rpc_cb(struct v9fs_req
*req
, void *a
)
870 struct v9fs_mux_rpc
*r
;
872 dprintk(DEBUG_MUX
, "req %p r %p\n", req
, a
);
874 r
->rcall
= req
->rcall
;
877 if (req
->flush
!=None
&& !req
->err
)
878 r
->err
= -ERESTARTSYS
;
884 * v9fs_mux_rpc - sends 9P request and waits until a response is available.
885 * The function can be interrupted.
887 * @tc: request to be sent
888 * @rc: pointer where a pointer to the response is stored
891 v9fs_mux_rpc(struct v9fs_mux_data
*m
, struct v9fs_fcall
*tc
,
892 struct v9fs_fcall
**rc
)
896 struct v9fs_req
*req
;
897 struct v9fs_mux_rpc r
;
903 init_waitqueue_head(&r
.wqueue
);
909 if (signal_pending(current
)) {
911 clear_thread_flag(TIF_SIGPENDING
);
914 req
= v9fs_send_request(m
, tc
, v9fs_mux_rpc_cb
, &r
);
917 dprintk(DEBUG_MUX
, "error %d\n", err
);
921 err
= wait_event_interruptible(r
.wqueue
, r
.rcall
!= NULL
|| r
.err
< 0);
925 if (err
== -ERESTARTSYS
&& m
->trans
->status
== Connected
&& m
->err
== 0) {
926 if (v9fs_mux_flush_request(m
, req
)) {
927 /* wait until we get response of the flush message */
929 clear_thread_flag(TIF_SIGPENDING
);
930 err
= wait_event_interruptible(r
.wqueue
,
932 } while (!r
.rcall
&& !r
.err
&& err
==-ERESTARTSYS
&&
933 m
->trans
->status
==Connected
&& !m
->err
);
941 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
943 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
951 v9fs_mux_free_request(m
, req
);
960 * v9fs_mux_rpcnb - sends 9P request without waiting for response.
962 * @tc: request to be sent
963 * @cb: callback function to be called when response arrives
964 * @cba: value to pass to the callback function
966 int v9fs_mux_rpcnb(struct v9fs_mux_data
*m
, struct v9fs_fcall
*tc
,
967 v9fs_mux_req_callback cb
, void *a
)
970 struct v9fs_req
*req
;
972 req
= v9fs_send_request(m
, tc
, cb
, a
);
975 dprintk(DEBUG_MUX
, "error %d\n", err
);
979 dprintk(DEBUG_MUX
, "mux %p tc %p tag %d\n", m
, tc
, req
->tag
);
985 * v9fs_mux_cancel - cancel all pending requests with error
989 void v9fs_mux_cancel(struct v9fs_mux_data
*m
, int err
)
991 struct v9fs_req
*req
, *rtmp
;
992 LIST_HEAD(cancel_list
);
994 dprintk(DEBUG_ERROR
, "mux %p err %d\n", m
, err
);
997 list_for_each_entry_safe(req
, rtmp
, &m
->req_list
, req_list
) {
998 list_move(&req
->req_list
, &cancel_list
);
1000 list_for_each_entry_safe(req
, rtmp
, &m
->unsent_req_list
, req_list
) {
1001 list_move(&req
->req_list
, &cancel_list
);
1003 spin_unlock(&m
->lock
);
1005 list_for_each_entry_safe(req
, rtmp
, &cancel_list
, req_list
) {
1006 list_del(&req
->req_list
);
1011 (*req
->cb
) (req
, req
->cba
);
1016 wake_up(&m
->equeue
);
1019 static u16
v9fs_mux_get_tag(struct v9fs_mux_data
*m
)
1023 tag
= v9fs_get_idpool(&m
->tagpool
);
1030 static void v9fs_mux_put_tag(struct v9fs_mux_data
*m
, u16 tag
)
1032 if (tag
!= V9FS_NOTAG
&& v9fs_check_idpool(tag
, &m
->tagpool
))
1033 v9fs_put_idpool(tag
, &m
->tagpool
);