2 * linux/fs/9p/trans_fd.c
4 * Fd transport layer. Includes deprecated socket layer.
6 * Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
7 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8 * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
9 * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2
13 * as published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to:
22 * Free Software Foundation
23 * 51 Franklin Street, Fifth Floor
24 * Boston, MA 02111-1301 USA
29 #include <linux/module.h>
30 #include <linux/net.h>
31 #include <linux/ipv6.h>
32 #include <linux/kthread.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
36 #include <linux/uaccess.h>
37 #include <linux/inet.h>
38 #include <linux/idr.h>
39 #include <linux/file.h>
40 #include <linux/parser.h>
41 #include <net/9p/9p.h>
42 #include <net/9p/transport.h>
45 #define MAX_SOCK_BUF (64*1024)
47 #define SCHED_TIMEOUT 10
48 #define MAXPOLLWADDR 2
51 * struct p9_fd_opts - per-transport options
52 * @rfd: file descriptor for reading (trans=fd)
53 * @wfd: file descriptor for writing (trans=fd)
54 * @port: port to connect to (trans=tcp)
66 * struct p9_trans_fd - transport state
67 * @rd: reference to file to read from
68 * @wr: reference of file to write to
69 * @conn: connection state reference
80 * Option Parsing (code inspired by NFS code)
81 * - a little lazy - parse all fd-transport options
85 /* Options that take integer arguments */
86 Opt_port
, Opt_rfdno
, Opt_wfdno
, Opt_err
,
89 static match_table_t tokens
= {
90 {Opt_port
, "port=%u"},
91 {Opt_rfdno
, "rfdno=%u"},
92 {Opt_wfdno
, "wfdno=%u"},
97 Rworksched
= 1, /* read work scheduled or running */
98 Rpending
= 2, /* can read */
99 Wworksched
= 4, /* write work scheduled or running */
100 Wpending
= 8, /* can write */
110 typedef void (*p9_conn_req_callback
)(struct p9_req
*req
, void *a
);
113 * struct p9_req - fd mux encoding of an rpc transaction
114 * @lock: protects req_list
115 * @tag: numeric tag for rpc transaction
116 * @tcall: request &p9_fcall structure
117 * @rcall: response &p9_fcall structure
119 * @cb: callback for when response is received
120 * @cba: argument to pass to callback
121 * @flush: flag to indicate RPC has been flushed
122 * @req_list: list link for higher level objects to chain requests
129 struct p9_fcall
*tcall
;
130 struct p9_fcall
*rcall
;
132 p9_conn_req_callback cb
;
135 struct list_head req_list
;
138 struct p9_mux_poll_task
{
139 struct task_struct
*task
;
140 struct list_head mux_list
;
145 * struct p9_conn - fd mux connection state information
146 * @lock: protects mux_list (?)
147 * @mux_list: list link for mux to manage multiple connections (?)
148 * @poll_task: task polling on this connection
149 * @msize: maximum size for connection (dup)
150 * @extended: 9p2000.u flag (dup)
151 * @trans: reference to transport instance for this connection
152 * @tagpool: id accounting for transactions
154 * @req_list: accounting for requests which have been sent
155 * @unsent_req_list: accounting for requests that haven't been sent
156 * @rcall: current response &p9_fcall structure
157 * @rpos: read position in current frame
158 * @rbuf: current read buffer
159 * @wpos: write position for current frame
160 * @wsize: amount of data to write for current frame
161 * @wbuf: current write buffer
162 * @poll_wait: array of wait_q's for various worker threads
165 * @rq: current read work
166 * @wq: current write work
172 spinlock_t lock
; /* protect lock structure */
173 struct list_head mux_list
;
174 struct p9_mux_poll_task
*poll_task
;
176 unsigned char extended
;
177 struct p9_trans
*trans
;
178 struct p9_idpool
*tagpool
;
180 struct list_head req_list
;
181 struct list_head unsent_req_list
;
182 struct p9_fcall
*rcall
;
188 wait_queue_t poll_wait
[MAXPOLLWADDR
];
189 wait_queue_head_t
*poll_waddr
[MAXPOLLWADDR
];
191 struct work_struct rq
;
192 struct work_struct wq
;
193 unsigned long wsched
;
197 * struct p9_mux_rpc - fd mux rpc accounting structure
198 * @m: connection this request was issued on
200 * @tcall: request &p9_fcall
201 * @rcall: response &p9_fcall
202 * @wqueue: wait queue that client is blocked on for this rpc
204 * Bug: isn't this information duplicated elsewhere like &p9_req
210 struct p9_fcall
*tcall
;
211 struct p9_fcall
*rcall
;
212 wait_queue_head_t wqueue
;
215 static int p9_poll_proc(void *);
216 static void p9_read_work(struct work_struct
*work
);
217 static void p9_write_work(struct work_struct
*work
);
218 static void p9_pollwait(struct file
*filp
, wait_queue_head_t
*wait_address
,
220 static int p9_fd_write(struct p9_trans
*trans
, void *v
, int len
);
221 static int p9_fd_read(struct p9_trans
*trans
, void *v
, int len
);
223 static DEFINE_MUTEX(p9_mux_task_lock
);
224 static struct workqueue_struct
*p9_mux_wq
;
226 static int p9_mux_num
;
227 static int p9_mux_poll_task_num
;
228 static struct p9_mux_poll_task p9_mux_poll_tasks
[100];
230 static void p9_conn_destroy(struct p9_conn
*);
231 static unsigned int p9_fd_poll(struct p9_trans
*trans
,
232 struct poll_table_struct
*pt
);
235 static int p9_conn_rpcnb(struct p9_conn
*m
, struct p9_fcall
*tc
,
236 p9_conn_req_callback cb
, void *a
);
237 #endif /* P9_NONBLOCK */
239 static void p9_conn_cancel(struct p9_conn
*m
, int err
);
241 static u16
p9_mux_get_tag(struct p9_conn
*m
)
245 tag
= p9_idpool_get(m
->tagpool
);
252 static void p9_mux_put_tag(struct p9_conn
*m
, u16 tag
)
254 if (tag
!= P9_NOTAG
&& p9_idpool_check(tag
, m
->tagpool
))
255 p9_idpool_put(tag
, m
->tagpool
);
259 * p9_mux_calc_poll_procs - calculates the number of polling procs
260 * @muxnum: number of mounts
262 * Calculation is based on the number of mounted v9fs filesystems.
263 * The current implementation returns sqrt of the number of mounts.
266 static int p9_mux_calc_poll_procs(int muxnum
)
270 if (p9_mux_poll_task_num
)
271 n
= muxnum
/ p9_mux_poll_task_num
+
272 (muxnum
% p9_mux_poll_task_num
? 1 : 0);
276 if (n
> ARRAY_SIZE(p9_mux_poll_tasks
))
277 n
= ARRAY_SIZE(p9_mux_poll_tasks
);
282 static int p9_mux_poll_start(struct p9_conn
*m
)
285 struct p9_mux_poll_task
*vpt
, *vptlast
;
286 struct task_struct
*pproc
;
288 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p muxnum %d procnum %d\n", m
, p9_mux_num
,
289 p9_mux_poll_task_num
);
290 mutex_lock(&p9_mux_task_lock
);
292 n
= p9_mux_calc_poll_procs(p9_mux_num
+ 1);
293 if (n
> p9_mux_poll_task_num
) {
294 for (i
= 0; i
< ARRAY_SIZE(p9_mux_poll_tasks
); i
++) {
295 if (p9_mux_poll_tasks
[i
].task
== NULL
) {
296 vpt
= &p9_mux_poll_tasks
[i
];
297 P9_DPRINTK(P9_DEBUG_MUX
, "create proc %p\n",
299 pproc
= kthread_create(p9_poll_proc
, vpt
,
302 if (!IS_ERR(pproc
)) {
304 INIT_LIST_HEAD(&vpt
->mux_list
);
306 p9_mux_poll_task_num
++;
307 wake_up_process(vpt
->task
);
313 if (i
>= ARRAY_SIZE(p9_mux_poll_tasks
))
314 P9_DPRINTK(P9_DEBUG_ERROR
,
315 "warning: no free poll slots\n");
318 n
= (p9_mux_num
+ 1) / p9_mux_poll_task_num
+
319 ((p9_mux_num
+ 1) % p9_mux_poll_task_num
? 1 : 0);
322 for (i
= 0; i
< ARRAY_SIZE(p9_mux_poll_tasks
); i
++) {
323 vpt
= &p9_mux_poll_tasks
[i
];
324 if (vpt
->task
!= NULL
) {
326 if (vpt
->muxnum
< n
) {
327 P9_DPRINTK(P9_DEBUG_MUX
, "put in proc %d\n", i
);
328 list_add(&m
->mux_list
, &vpt
->mux_list
);
331 memset(&m
->poll_waddr
, 0,
332 sizeof(m
->poll_waddr
));
333 init_poll_funcptr(&m
->pt
, p9_pollwait
);
339 if (i
>= ARRAY_SIZE(p9_mux_poll_tasks
)) {
340 if (vptlast
== NULL
) {
341 mutex_unlock(&p9_mux_task_lock
);
345 P9_DPRINTK(P9_DEBUG_MUX
, "put in proc %d\n", i
);
346 list_add(&m
->mux_list
, &vptlast
->mux_list
);
348 m
->poll_task
= vptlast
;
349 memset(&m
->poll_waddr
, 0, sizeof(m
->poll_waddr
));
350 init_poll_funcptr(&m
->pt
, p9_pollwait
);
354 mutex_unlock(&p9_mux_task_lock
);
359 static void p9_mux_poll_stop(struct p9_conn
*m
)
362 struct p9_mux_poll_task
*vpt
;
364 mutex_lock(&p9_mux_task_lock
);
366 list_del(&m
->mux_list
);
367 for (i
= 0; i
< ARRAY_SIZE(m
->poll_waddr
); i
++) {
368 if (m
->poll_waddr
[i
] != NULL
) {
369 remove_wait_queue(m
->poll_waddr
[i
], &m
->poll_wait
[i
]);
370 m
->poll_waddr
[i
] = NULL
;
375 P9_DPRINTK(P9_DEBUG_MUX
, "destroy proc %p\n", vpt
);
376 kthread_stop(vpt
->task
);
378 p9_mux_poll_task_num
--;
381 mutex_unlock(&p9_mux_task_lock
);
385 * p9_conn_create - allocate and initialize the per-session mux data
386 * @trans: transport structure
388 * Note: Creates the polling task if this is the first session.
391 static struct p9_conn
*p9_conn_create(struct p9_trans
*trans
)
396 P9_DPRINTK(P9_DEBUG_MUX
, "transport %p msize %d\n", trans
,
398 m
= kzalloc(sizeof(struct p9_conn
), GFP_KERNEL
);
400 return ERR_PTR(-ENOMEM
);
402 spin_lock_init(&m
->lock
);
403 INIT_LIST_HEAD(&m
->mux_list
);
404 m
->msize
= trans
->msize
;
405 m
->extended
= trans
->extended
;
407 m
->tagpool
= p9_idpool_create();
408 if (IS_ERR(m
->tagpool
)) {
410 return ERR_PTR(-ENOMEM
);
413 INIT_LIST_HEAD(&m
->req_list
);
414 INIT_LIST_HEAD(&m
->unsent_req_list
);
415 INIT_WORK(&m
->rq
, p9_read_work
);
416 INIT_WORK(&m
->wq
, p9_write_work
);
417 n
= p9_mux_poll_start(m
);
423 n
= p9_fd_poll(trans
, &m
->pt
);
425 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p can read\n", m
);
426 set_bit(Rpending
, &m
->wsched
);
430 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p can write\n", m
);
431 set_bit(Wpending
, &m
->wsched
);
434 for (i
= 0; i
< ARRAY_SIZE(m
->poll_waddr
); i
++) {
435 if (IS_ERR(m
->poll_waddr
[i
])) {
438 return (void *)m
->poll_waddr
; /* the error code */
446 * p9_mux_destroy - cancels all pending requests and frees mux resources
451 static void p9_conn_destroy(struct p9_conn
*m
)
453 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p prev %p next %p\n", m
,
454 m
->mux_list
.prev
, m
->mux_list
.next
);
457 cancel_work_sync(&m
->rq
);
458 cancel_work_sync(&m
->wq
);
460 p9_conn_cancel(m
, -ECONNRESET
);
463 p9_idpool_destroy(m
->tagpool
);
468 * p9_pollwait - add poll task to the wait queue
469 * @filp: file pointer being polled
470 * @wait_address: wait_q to block on
473 * called by files poll operation to add v9fs-poll task to files wait queue
477 p9_pollwait(struct file
*filp
, wait_queue_head_t
*wait_address
, poll_table
*p
)
482 m
= container_of(p
, struct p9_conn
, pt
);
483 for (i
= 0; i
< ARRAY_SIZE(m
->poll_waddr
); i
++)
484 if (m
->poll_waddr
[i
] == NULL
)
487 if (i
>= ARRAY_SIZE(m
->poll_waddr
)) {
488 P9_DPRINTK(P9_DEBUG_ERROR
, "not enough wait_address slots\n");
492 m
->poll_waddr
[i
] = wait_address
;
495 P9_DPRINTK(P9_DEBUG_ERROR
, "no wait_address\n");
496 m
->poll_waddr
[i
] = ERR_PTR(-EIO
);
500 init_waitqueue_entry(&m
->poll_wait
[i
], m
->poll_task
->task
);
501 add_wait_queue(wait_address
, &m
->poll_wait
[i
]);
505 * p9_poll_mux - polls a mux and schedules read or write works if necessary
506 * @m: connection to poll
510 static void p9_poll_mux(struct p9_conn
*m
)
517 n
= p9_fd_poll(m
->trans
, NULL
);
518 if (n
< 0 || n
& (POLLERR
| POLLHUP
| POLLNVAL
)) {
519 P9_DPRINTK(P9_DEBUG_MUX
, "error mux %p err %d\n", m
, n
);
522 p9_conn_cancel(m
, n
);
526 set_bit(Rpending
, &m
->wsched
);
527 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p can read\n", m
);
528 if (!test_and_set_bit(Rworksched
, &m
->wsched
)) {
529 P9_DPRINTK(P9_DEBUG_MUX
, "schedule read work %p\n", m
);
530 queue_work(p9_mux_wq
, &m
->rq
);
535 set_bit(Wpending
, &m
->wsched
);
536 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p can write\n", m
);
537 if ((m
->wsize
|| !list_empty(&m
->unsent_req_list
))
538 && !test_and_set_bit(Wworksched
, &m
->wsched
)) {
539 P9_DPRINTK(P9_DEBUG_MUX
, "schedule write work %p\n", m
);
540 queue_work(p9_mux_wq
, &m
->wq
);
546 * p9_poll_proc - poll worker thread
547 * @a: thread state and arguments
549 * polls all v9fs transports for new events and queues the appropriate
550 * work to the work queue
554 static int p9_poll_proc(void *a
)
556 struct p9_conn
*m
, *mtmp
;
557 struct p9_mux_poll_task
*vpt
;
560 P9_DPRINTK(P9_DEBUG_MUX
, "start %p %p\n", current
, vpt
);
561 while (!kthread_should_stop()) {
562 set_current_state(TASK_INTERRUPTIBLE
);
564 list_for_each_entry_safe(m
, mtmp
, &vpt
->mux_list
, mux_list
) {
568 P9_DPRINTK(P9_DEBUG_MUX
, "sleeping...\n");
569 schedule_timeout(SCHED_TIMEOUT
* HZ
);
572 __set_current_state(TASK_RUNNING
);
573 P9_DPRINTK(P9_DEBUG_MUX
, "finish\n");
578 * p9_write_work - called when a transport can send some data
579 * @work: container for work to be done
583 static void p9_write_work(struct work_struct
*work
)
589 m
= container_of(work
, struct p9_conn
, wq
);
592 clear_bit(Wworksched
, &m
->wsched
);
597 if (list_empty(&m
->unsent_req_list
)) {
598 clear_bit(Wworksched
, &m
->wsched
);
604 req
= list_entry(m
->unsent_req_list
.next
, struct p9_req
,
606 list_move_tail(&req
->req_list
, &m
->req_list
);
607 if (req
->err
== ERREQFLUSH
)
610 m
->wbuf
= req
->tcall
->sdata
;
611 m
->wsize
= req
->tcall
->size
;
613 spin_unlock(&m
->lock
);
616 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p pos %d size %d\n", m
, m
->wpos
,
618 clear_bit(Wpending
, &m
->wsched
);
619 err
= p9_fd_write(m
->trans
, m
->wbuf
+ m
->wpos
, m
->wsize
- m
->wpos
);
620 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p sent %d bytes\n", m
, err
);
621 if (err
== -EAGAIN
) {
622 clear_bit(Wworksched
, &m
->wsched
);
634 if (m
->wpos
== m
->wsize
)
635 m
->wpos
= m
->wsize
= 0;
637 if (m
->wsize
== 0 && !list_empty(&m
->unsent_req_list
)) {
638 if (test_and_clear_bit(Wpending
, &m
->wsched
))
641 n
= p9_fd_poll(m
->trans
, NULL
);
644 P9_DPRINTK(P9_DEBUG_MUX
, "schedule write work %p\n", m
);
645 queue_work(p9_mux_wq
, &m
->wq
);
647 clear_bit(Wworksched
, &m
->wsched
);
649 clear_bit(Wworksched
, &m
->wsched
);
654 p9_conn_cancel(m
, err
);
655 clear_bit(Wworksched
, &m
->wsched
);
658 static void process_request(struct p9_conn
*m
, struct p9_req
*req
)
661 struct p9_str
*ename
;
663 if (!req
->err
&& req
->rcall
->id
== P9_RERROR
) {
664 ecode
= req
->rcall
->params
.rerror
.errno
;
665 ename
= &req
->rcall
->params
.rerror
.error
;
667 P9_DPRINTK(P9_DEBUG_MUX
, "Rerror %.*s\n", ename
->len
,
674 req
->err
= p9_errstr2errno(ename
->str
, ename
->len
);
676 /* string match failed */
678 PRINT_FCALL_ERROR("unknown error", req
->rcall
);
679 req
->err
= -ESERVERFAULT
;
682 } else if (req
->tcall
&& req
->rcall
->id
!= req
->tcall
->id
+ 1) {
683 P9_DPRINTK(P9_DEBUG_ERROR
,
684 "fcall mismatch: expected %d, got %d\n",
685 req
->tcall
->id
+ 1, req
->rcall
->id
);
692 * p9_read_work - called when there is some data to be read from a transport
693 * @work: container of work to be done
697 static void p9_read_work(struct work_struct
*work
)
701 struct p9_req
*req
, *rptr
, *rreq
;
702 struct p9_fcall
*rcall
;
705 m
= container_of(work
, struct p9_conn
, rq
);
711 P9_DPRINTK(P9_DEBUG_MUX
, "start mux %p pos %d\n", m
, m
->rpos
);
715 kmalloc(sizeof(struct p9_fcall
) + m
->msize
, GFP_KERNEL
);
721 m
->rbuf
= (char *)m
->rcall
+ sizeof(struct p9_fcall
);
725 clear_bit(Rpending
, &m
->wsched
);
726 err
= p9_fd_read(m
->trans
, m
->rbuf
+ m
->rpos
, m
->msize
- m
->rpos
);
727 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p got %d bytes\n", m
, err
);
728 if (err
== -EAGAIN
) {
729 clear_bit(Rworksched
, &m
->wsched
);
737 while (m
->rpos
> 4) {
738 n
= le32_to_cpu(*(__le32
*) m
->rbuf
);
740 P9_DPRINTK(P9_DEBUG_ERROR
,
741 "requested packet size too big: %d\n", n
);
750 p9_deserialize_fcall(m
->rbuf
, n
, m
->rcall
, m
->extended
);
754 #ifdef CONFIG_NET_9P_DEBUG
755 if ((p9_debug_level
&P9_DEBUG_FCALL
) == P9_DEBUG_FCALL
) {
758 p9_printfcall(buf
, sizeof(buf
), m
->rcall
,
760 printk(KERN_NOTICE
">>> %p %s\n", m
, buf
);
767 m
->rcall
= kmalloc(sizeof(struct p9_fcall
) + m
->msize
,
774 m
->rbuf
= (char *)m
->rcall
+ sizeof(struct p9_fcall
);
775 memmove(m
->rbuf
, rbuf
+ n
, m
->rpos
- n
);
783 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p fcall id %d tag %d\n", m
,
784 rcall
->id
, rcall
->tag
);
788 list_for_each_entry_safe(rreq
, rptr
, &m
->req_list
, req_list
) {
789 if (rreq
->tag
== rcall
->tag
) {
791 if (req
->flush
!= Flushing
)
792 list_del(&req
->req_list
);
796 spin_unlock(&m
->lock
);
800 process_request(m
, req
);
802 if (req
->flush
!= Flushing
) {
804 (*req
->cb
) (req
, req
->cba
);
809 if (err
>= 0 && rcall
->id
!= P9_RFLUSH
)
810 P9_DPRINTK(P9_DEBUG_ERROR
,
811 "unexpected response mux %p id %d tag %d\n",
812 m
, rcall
->id
, rcall
->tag
);
817 if (!list_empty(&m
->req_list
)) {
818 if (test_and_clear_bit(Rpending
, &m
->wsched
))
821 n
= p9_fd_poll(m
->trans
, NULL
);
824 P9_DPRINTK(P9_DEBUG_MUX
, "schedule read work %p\n", m
);
825 queue_work(p9_mux_wq
, &m
->rq
);
827 clear_bit(Rworksched
, &m
->wsched
);
829 clear_bit(Rworksched
, &m
->wsched
);
834 p9_conn_cancel(m
, err
);
835 clear_bit(Rworksched
, &m
->wsched
);
839 * p9_send_request - send 9P request
840 * The function can sleep until the request is scheduled for sending.
841 * The function can be interrupted. Return from the function is not
842 * a guarantee that the request is sent successfully. Can return errors
843 * that can be retrieved by PTR_ERR macros.
846 * @tc: request to be sent
847 * @cb: callback function to call when response is received
848 * @cba: parameter to pass to the callback function
852 static struct p9_req
*p9_send_request(struct p9_conn
*m
,
854 p9_conn_req_callback cb
, void *cba
)
859 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p task %p tcall %p id %d\n", m
, current
,
862 return ERR_PTR(m
->err
);
864 req
= kmalloc(sizeof(struct p9_req
), GFP_KERNEL
);
866 return ERR_PTR(-ENOMEM
);
868 if (tc
->id
== P9_TVERSION
)
871 n
= p9_mux_get_tag(m
);
874 return ERR_PTR(-ENOMEM
);
878 #ifdef CONFIG_NET_9P_DEBUG
879 if ((p9_debug_level
&P9_DEBUG_FCALL
) == P9_DEBUG_FCALL
) {
882 p9_printfcall(buf
, sizeof(buf
), tc
, m
->extended
);
883 printk(KERN_NOTICE
"<<< %p %s\n", m
, buf
);
887 spin_lock_init(&req
->lock
);
897 list_add_tail(&req
->req_list
, &m
->unsent_req_list
);
898 spin_unlock(&m
->lock
);
900 if (test_and_clear_bit(Wpending
, &m
->wsched
))
903 n
= p9_fd_poll(m
->trans
, NULL
);
905 if (n
& POLLOUT
&& !test_and_set_bit(Wworksched
, &m
->wsched
))
906 queue_work(p9_mux_wq
, &m
->wq
);
911 static void p9_mux_free_request(struct p9_conn
*m
, struct p9_req
*req
)
913 p9_mux_put_tag(m
, req
->tag
);
917 static void p9_mux_flush_cb(struct p9_req
*freq
, void *a
)
921 struct p9_req
*req
, *rreq
, *rptr
;
924 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p tc %p rc %p err %d oldtag %d\n", m
,
925 freq
->tcall
, freq
->rcall
, freq
->err
,
926 freq
->tcall
->params
.tflush
.oldtag
);
929 tag
= freq
->tcall
->params
.tflush
.oldtag
;
931 list_for_each_entry_safe(rreq
, rptr
, &m
->req_list
, req_list
) {
932 if (rreq
->tag
== tag
) {
934 list_del(&req
->req_list
);
938 spin_unlock(&m
->lock
);
941 spin_lock(&req
->lock
);
942 req
->flush
= Flushed
;
943 spin_unlock(&req
->lock
);
946 (*req
->cb
) (req
, req
->cba
);
953 p9_mux_free_request(m
, freq
);
957 p9_mux_flush_request(struct p9_conn
*m
, struct p9_req
*req
)
960 struct p9_req
*rreq
, *rptr
;
962 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p req %p tag %d\n", m
, req
, req
->tag
);
964 /* if a response was received for a request, do nothing */
965 spin_lock(&req
->lock
);
966 if (req
->rcall
|| req
->err
) {
967 spin_unlock(&req
->lock
);
968 P9_DPRINTK(P9_DEBUG_MUX
,
969 "mux %p req %p response already received\n", m
, req
);
973 req
->flush
= Flushing
;
974 spin_unlock(&req
->lock
);
977 /* if the request is not sent yet, just remove it from the list */
978 list_for_each_entry_safe(rreq
, rptr
, &m
->unsent_req_list
, req_list
) {
979 if (rreq
->tag
== req
->tag
) {
980 P9_DPRINTK(P9_DEBUG_MUX
,
981 "mux %p req %p request is not sent yet\n", m
, req
);
982 list_del(&rreq
->req_list
);
983 req
->flush
= Flushed
;
984 spin_unlock(&m
->lock
);
986 (*req
->cb
) (req
, req
->cba
);
990 spin_unlock(&m
->lock
);
992 clear_thread_flag(TIF_SIGPENDING
);
993 fc
= p9_create_tflush(req
->tag
);
994 p9_send_request(m
, fc
, p9_mux_flush_cb
, m
);
999 p9_conn_rpc_cb(struct p9_req
*req
, void *a
)
1001 struct p9_mux_rpc
*r
;
1003 P9_DPRINTK(P9_DEBUG_MUX
, "req %p r %p\n", req
, a
);
1005 r
->rcall
= req
->rcall
;
1008 if (req
->flush
!= None
&& !req
->err
)
1009 r
->err
= -ERESTARTSYS
;
1011 wake_up(&r
->wqueue
);
1015 * p9_fd_rpc- sends 9P request and waits until a response is available.
1016 * The function can be interrupted.
1017 * @t: transport data
1018 * @tc: request to be sent
1019 * @rc: pointer where a pointer to the response is stored
1024 p9_fd_rpc(struct p9_trans
*t
, struct p9_fcall
*tc
, struct p9_fcall
**rc
)
1026 struct p9_trans_fd
*p
= t
->priv
;
1027 struct p9_conn
*m
= p
->conn
;
1028 int err
, sigpending
;
1029 unsigned long flags
;
1031 struct p9_mux_rpc r
;
1037 init_waitqueue_head(&r
.wqueue
);
1043 if (signal_pending(current
)) {
1045 clear_thread_flag(TIF_SIGPENDING
);
1048 req
= p9_send_request(m
, tc
, p9_conn_rpc_cb
, &r
);
1051 P9_DPRINTK(P9_DEBUG_MUX
, "error %d\n", err
);
1055 err
= wait_event_interruptible(r
.wqueue
, r
.rcall
!= NULL
|| r
.err
< 0);
1059 if (err
== -ERESTARTSYS
&& m
->trans
->status
== Connected
1061 if (p9_mux_flush_request(m
, req
)) {
1062 /* wait until we get response of the flush message */
1064 clear_thread_flag(TIF_SIGPENDING
);
1065 err
= wait_event_interruptible(r
.wqueue
,
1067 } while (!r
.rcall
&& !r
.err
&& err
== -ERESTARTSYS
&&
1068 m
->trans
->status
== Connected
&& !m
->err
);
1076 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
1077 recalc_sigpending();
1078 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
1086 p9_mux_free_request(m
, req
);
1095 * p9_conn_rpcnb - sends 9P request without waiting for response.
1097 * @tc: request to be sent
1098 * @cb: callback function to be called when response arrives
1099 * @a: value to pass to the callback function
1103 int p9_conn_rpcnb(struct p9_conn
*m
, struct p9_fcall
*tc
,
1104 p9_conn_req_callback cb
, void *a
)
1109 req
= p9_send_request(m
, tc
, cb
, a
);
1112 P9_DPRINTK(P9_DEBUG_MUX
, "error %d\n", err
);
1113 return PTR_ERR(req
);
1116 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p tc %p tag %d\n", m
, tc
, req
->tag
);
1119 #endif /* P9_NONBLOCK */
1122 * p9_conn_cancel - cancel all pending requests with error
1128 void p9_conn_cancel(struct p9_conn
*m
, int err
)
1130 struct p9_req
*req
, *rtmp
;
1131 LIST_HEAD(cancel_list
);
1133 P9_DPRINTK(P9_DEBUG_ERROR
, "mux %p err %d\n", m
, err
);
1135 spin_lock(&m
->lock
);
1136 list_for_each_entry_safe(req
, rtmp
, &m
->req_list
, req_list
) {
1137 list_move(&req
->req_list
, &cancel_list
);
1139 list_for_each_entry_safe(req
, rtmp
, &m
->unsent_req_list
, req_list
) {
1140 list_move(&req
->req_list
, &cancel_list
);
1142 spin_unlock(&m
->lock
);
1144 list_for_each_entry_safe(req
, rtmp
, &cancel_list
, req_list
) {
1145 list_del(&req
->req_list
);
1150 (*req
->cb
) (req
, req
->cba
);
1157 * parse_options - parse mount options into session structure
1158 * @options: options string passed from mount
1159 * @opts: transport-specific structure to parse options into
1161 * Returns 0 upon success, -ERRNO upon failure
1164 static int parse_opts(char *params
, struct p9_fd_opts
*opts
)
1167 substring_t args
[MAX_OPT_ARGS
];
1172 opts
->port
= P9_PORT
;
1179 options
= kstrdup(params
, GFP_KERNEL
);
1181 P9_DPRINTK(P9_DEBUG_ERROR
,
1182 "failed to allocate copy of option string\n");
1186 while ((p
= strsep(&options
, ",")) != NULL
) {
1191 token
= match_token(p
, tokens
, args
);
1192 r
= match_int(&args
[0], &option
);
1194 P9_DPRINTK(P9_DEBUG_ERROR
,
1195 "integer field, but no integer?\n");
1201 opts
->port
= option
;
1217 static int p9_fd_open(struct p9_trans
*trans
, int rfd
, int wfd
)
1219 struct p9_trans_fd
*ts
= kmalloc(sizeof(struct p9_trans_fd
),
1226 if (!ts
->rd
|| !ts
->wr
) {
1236 trans
->status
= Connected
;
1241 static int p9_socket_open(struct p9_trans
*trans
, struct socket
*csocket
)
1245 csocket
->sk
->sk_allocation
= GFP_NOIO
;
1246 fd
= sock_map_fd(csocket
, 0);
1248 P9_EPRINTK(KERN_ERR
, "p9_socket_open: failed to map fd\n");
1252 ret
= p9_fd_open(trans
, fd
, fd
);
1254 P9_EPRINTK(KERN_ERR
, "p9_socket_open: failed to open fd\n");
1255 sockfd_put(csocket
);
1259 ((struct p9_trans_fd
*)trans
->priv
)->rd
->f_flags
|= O_NONBLOCK
;
1265 * p9_fd_read- read from a fd
1266 * @trans: transport instance state
1267 * @v: buffer to receive data into
1268 * @len: size of receive buffer
1272 static int p9_fd_read(struct p9_trans
*trans
, void *v
, int len
)
1275 struct p9_trans_fd
*ts
= NULL
;
1277 if (trans
&& trans
->status
!= Disconnected
)
1283 if (!(ts
->rd
->f_flags
& O_NONBLOCK
))
1284 P9_DPRINTK(P9_DEBUG_ERROR
, "blocking read ...\n");
1286 ret
= kernel_read(ts
->rd
, ts
->rd
->f_pos
, v
, len
);
1287 if (ret
<= 0 && ret
!= -ERESTARTSYS
&& ret
!= -EAGAIN
)
1288 trans
->status
= Disconnected
;
1293 * p9_fd_write - write to a socket
1294 * @trans: transport instance state
1295 * @v: buffer to send data from
1296 * @len: size of send buffer
1300 static int p9_fd_write(struct p9_trans
*trans
, void *v
, int len
)
1304 struct p9_trans_fd
*ts
= NULL
;
1306 if (trans
&& trans
->status
!= Disconnected
)
1312 if (!(ts
->wr
->f_flags
& O_NONBLOCK
))
1313 P9_DPRINTK(P9_DEBUG_ERROR
, "blocking write ...\n");
1317 /* The cast to a user pointer is valid due to the set_fs() */
1318 ret
= vfs_write(ts
->wr
, (void __user
*)v
, len
, &ts
->wr
->f_pos
);
1321 if (ret
<= 0 && ret
!= -ERESTARTSYS
&& ret
!= -EAGAIN
)
1322 trans
->status
= Disconnected
;
1327 p9_fd_poll(struct p9_trans
*trans
, struct poll_table_struct
*pt
)
1330 struct p9_trans_fd
*ts
= NULL
;
1332 if (trans
&& trans
->status
== Connected
)
1338 if (!ts
->rd
->f_op
|| !ts
->rd
->f_op
->poll
)
1341 if (!ts
->wr
->f_op
|| !ts
->wr
->f_op
->poll
)
1344 ret
= ts
->rd
->f_op
->poll(ts
->rd
, pt
);
1348 if (ts
->rd
!= ts
->wr
) {
1349 n
= ts
->wr
->f_op
->poll(ts
->wr
, pt
);
1352 ret
= (ret
& ~POLLOUT
) | (n
& ~POLLIN
);
1359 * p9_fd_close - shutdown socket
1360 * @trans: private socket structure
1364 static void p9_fd_close(struct p9_trans
*trans
)
1366 struct p9_trans_fd
*ts
;
1371 ts
= xchg(&trans
->priv
, NULL
);
1376 p9_conn_destroy(ts
->conn
);
1378 trans
->status
= Disconnected
;
1387 * stolen from NFS - maybe should be made a generic function?
1389 static inline int valid_ipaddr4(const char *buf
)
1391 int rc
, count
, in
[4];
1393 rc
= sscanf(buf
, "%d.%d.%d.%d", &in
[0], &in
[1], &in
[2], &in
[3]);
1396 for (count
= 0; count
< 4; count
++) {
1397 if (in
[count
] > 255)
1403 static struct p9_trans
*
1404 p9_trans_create_tcp(const char *addr
, char *args
, int msize
, unsigned char dotu
)
1407 struct p9_trans
*trans
;
1408 struct socket
*csocket
;
1409 struct sockaddr_in sin_server
;
1410 struct p9_fd_opts opts
;
1411 struct p9_trans_fd
*p
;
1413 err
= parse_opts(args
, &opts
);
1415 return ERR_PTR(err
);
1417 if (valid_ipaddr4(addr
) < 0)
1418 return ERR_PTR(-EINVAL
);
1421 trans
= kmalloc(sizeof(struct p9_trans
), GFP_KERNEL
);
1423 return ERR_PTR(-ENOMEM
);
1424 trans
->msize
= msize
;
1425 trans
->extended
= dotu
;
1426 trans
->rpc
= p9_fd_rpc
;
1427 trans
->close
= p9_fd_close
;
1429 sin_server
.sin_family
= AF_INET
;
1430 sin_server
.sin_addr
.s_addr
= in_aton(addr
);
1431 sin_server
.sin_port
= htons(opts
.port
);
1432 sock_create_kern(PF_INET
, SOCK_STREAM
, IPPROTO_TCP
, &csocket
);
1435 P9_EPRINTK(KERN_ERR
, "p9_trans_tcp: problem creating socket\n");
1440 err
= csocket
->ops
->connect(csocket
,
1441 (struct sockaddr
*)&sin_server
,
1442 sizeof(struct sockaddr_in
), 0);
1444 P9_EPRINTK(KERN_ERR
,
1445 "p9_trans_tcp: problem connecting socket to %s\n",
1450 err
= p9_socket_open(trans
, csocket
);
1454 p
= (struct p9_trans_fd
*) trans
->priv
;
1455 p
->conn
= p9_conn_create(trans
);
1456 if (IS_ERR(p
->conn
)) {
1457 err
= PTR_ERR(p
->conn
);
1466 sock_release(csocket
);
1469 return ERR_PTR(err
);
1472 static struct p9_trans
*
1473 p9_trans_create_unix(const char *addr
, char *args
, int msize
,
1477 struct socket
*csocket
;
1478 struct sockaddr_un sun_server
;
1479 struct p9_trans
*trans
;
1480 struct p9_trans_fd
*p
;
1483 trans
= kmalloc(sizeof(struct p9_trans
), GFP_KERNEL
);
1485 return ERR_PTR(-ENOMEM
);
1487 trans
->rpc
= p9_fd_rpc
;
1488 trans
->close
= p9_fd_close
;
1490 if (strlen(addr
) > UNIX_PATH_MAX
) {
1491 P9_EPRINTK(KERN_ERR
, "p9_trans_unix: address too long: %s\n",
1493 err
= -ENAMETOOLONG
;
1497 sun_server
.sun_family
= PF_UNIX
;
1498 strcpy(sun_server
.sun_path
, addr
);
1499 sock_create_kern(PF_UNIX
, SOCK_STREAM
, 0, &csocket
);
1500 err
= csocket
->ops
->connect(csocket
, (struct sockaddr
*)&sun_server
,
1501 sizeof(struct sockaddr_un
) - 1, 0);
1503 P9_EPRINTK(KERN_ERR
,
1504 "p9_trans_unix: problem connecting socket: %s: %d\n",
1509 err
= p9_socket_open(trans
, csocket
);
1513 trans
->msize
= msize
;
1514 trans
->extended
= dotu
;
1515 p
= (struct p9_trans_fd
*) trans
->priv
;
1516 p
->conn
= p9_conn_create(trans
);
1517 if (IS_ERR(p
->conn
)) {
1518 err
= PTR_ERR(p
->conn
);
1527 sock_release(csocket
);
1530 return ERR_PTR(err
);
1533 static struct p9_trans
*
1534 p9_trans_create_fd(const char *name
, char *args
, int msize
,
1535 unsigned char extended
)
1538 struct p9_trans
*trans
;
1539 struct p9_fd_opts opts
;
1540 struct p9_trans_fd
*p
;
1542 parse_opts(args
, &opts
);
1544 if (opts
.rfd
== ~0 || opts
.wfd
== ~0) {
1545 printk(KERN_ERR
"v9fs: Insufficient options for proto=fd\n");
1546 return ERR_PTR(-ENOPROTOOPT
);
1549 trans
= kmalloc(sizeof(struct p9_trans
), GFP_KERNEL
);
1551 return ERR_PTR(-ENOMEM
);
1553 trans
->rpc
= p9_fd_rpc
;
1554 trans
->close
= p9_fd_close
;
1556 err
= p9_fd_open(trans
, opts
.rfd
, opts
.wfd
);
1560 trans
->msize
= msize
;
1561 trans
->extended
= extended
;
1562 p
= (struct p9_trans_fd
*) trans
->priv
;
1563 p
->conn
= p9_conn_create(trans
);
1564 if (IS_ERR(p
->conn
)) {
1565 err
= PTR_ERR(p
->conn
);
1574 return ERR_PTR(err
);
1577 static struct p9_trans_module p9_tcp_trans
= {
1579 .maxsize
= MAX_SOCK_BUF
,
1581 .create
= p9_trans_create_tcp
,
1582 .owner
= THIS_MODULE
,
1585 static struct p9_trans_module p9_unix_trans
= {
1587 .maxsize
= MAX_SOCK_BUF
,
1589 .create
= p9_trans_create_unix
,
1590 .owner
= THIS_MODULE
,
1593 static struct p9_trans_module p9_fd_trans
= {
1595 .maxsize
= MAX_SOCK_BUF
,
1597 .create
= p9_trans_create_fd
,
1598 .owner
= THIS_MODULE
,
1601 int p9_trans_fd_init(void)
1605 for (i
= 0; i
< ARRAY_SIZE(p9_mux_poll_tasks
); i
++)
1606 p9_mux_poll_tasks
[i
].task
= NULL
;
1608 p9_mux_wq
= create_workqueue("v9fs");
1610 printk(KERN_WARNING
"v9fs: mux: creating workqueue failed\n");
1614 v9fs_register_trans(&p9_tcp_trans
);
1615 v9fs_register_trans(&p9_unix_trans
);
1616 v9fs_register_trans(&p9_fd_trans
);
1621 void p9_trans_fd_exit(void)
1623 v9fs_unregister_trans(&p9_tcp_trans
);
1624 v9fs_unregister_trans(&p9_unix_trans
);
1625 v9fs_unregister_trans(&p9_fd_trans
);
1627 destroy_workqueue(p9_mux_wq
);