2 * net/sunrpc/rpc_pipe.c
4 * Userland/kernel interface for rpcauth_gss.
5 * Code shamelessly plagiarized from fs/nfsd/nfsctl.c
8 * Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include <linux/pagemap.h>
15 #include <linux/mount.h>
16 #include <linux/namei.h>
17 #include <linux/fsnotify.h>
18 #include <linux/kernel.h>
20 #include <asm/ioctls.h>
22 #include <linux/poll.h>
23 #include <linux/wait.h>
24 #include <linux/seq_file.h>
26 #include <linux/sunrpc/clnt.h>
27 #include <linux/workqueue.h>
28 #include <linux/sunrpc/rpc_pipe_fs.h>
30 static struct vfsmount
*rpc_mount __read_mostly
;
31 static int rpc_mount_count
;
33 static struct file_system_type rpc_pipe_fs_type
;
36 static struct kmem_cache
*rpc_inode_cachep __read_mostly
;
38 #define RPC_UPCALL_TIMEOUT (30*HZ)
40 static void rpc_purge_list(struct rpc_inode
*rpci
, struct list_head
*head
,
41 void (*destroy_msg
)(struct rpc_pipe_msg
*), int err
)
43 struct rpc_pipe_msg
*msg
;
48 msg
= list_entry(head
->next
, struct rpc_pipe_msg
, list
);
52 } while (!list_empty(head
));
53 wake_up(&rpci
->waitq
);
57 rpc_timeout_upcall_queue(struct work_struct
*work
)
60 struct rpc_inode
*rpci
=
61 container_of(work
, struct rpc_inode
, queue_timeout
.work
);
62 struct inode
*inode
= &rpci
->vfs_inode
;
63 void (*destroy_msg
)(struct rpc_pipe_msg
*);
65 spin_lock(&inode
->i_lock
);
66 if (rpci
->ops
== NULL
) {
67 spin_unlock(&inode
->i_lock
);
70 destroy_msg
= rpci
->ops
->destroy_msg
;
71 if (rpci
->nreaders
== 0) {
72 list_splice_init(&rpci
->pipe
, &free_list
);
75 spin_unlock(&inode
->i_lock
);
76 rpc_purge_list(rpci
, &free_list
, destroy_msg
, -ETIMEDOUT
);
80 rpc_queue_upcall(struct inode
*inode
, struct rpc_pipe_msg
*msg
)
82 struct rpc_inode
*rpci
= RPC_I(inode
);
85 spin_lock(&inode
->i_lock
);
86 if (rpci
->ops
== NULL
)
89 list_add_tail(&msg
->list
, &rpci
->pipe
);
90 rpci
->pipelen
+= msg
->len
;
92 } else if (rpci
->flags
& RPC_PIPE_WAIT_FOR_OPEN
) {
93 if (list_empty(&rpci
->pipe
))
94 queue_delayed_work(rpciod_workqueue
,
97 list_add_tail(&msg
->list
, &rpci
->pipe
);
98 rpci
->pipelen
+= msg
->len
;
102 spin_unlock(&inode
->i_lock
);
103 wake_up(&rpci
->waitq
);
108 rpc_inode_setowner(struct inode
*inode
, void *private)
110 RPC_I(inode
)->private = private;
114 rpc_close_pipes(struct inode
*inode
)
116 struct rpc_inode
*rpci
= RPC_I(inode
);
117 struct rpc_pipe_ops
*ops
;
119 mutex_lock(&inode
->i_mutex
);
122 LIST_HEAD(free_list
);
124 spin_lock(&inode
->i_lock
);
126 list_splice_init(&rpci
->in_upcall
, &free_list
);
127 list_splice_init(&rpci
->pipe
, &free_list
);
130 spin_unlock(&inode
->i_lock
);
131 rpc_purge_list(rpci
, &free_list
, ops
->destroy_msg
, -EPIPE
);
133 if (ops
->release_pipe
)
134 ops
->release_pipe(inode
);
135 cancel_delayed_work_sync(&rpci
->queue_timeout
);
137 rpc_inode_setowner(inode
, NULL
);
138 mutex_unlock(&inode
->i_mutex
);
141 static struct inode
*
142 rpc_alloc_inode(struct super_block
*sb
)
144 struct rpc_inode
*rpci
;
145 rpci
= (struct rpc_inode
*)kmem_cache_alloc(rpc_inode_cachep
, GFP_KERNEL
);
148 return &rpci
->vfs_inode
;
152 rpc_destroy_inode(struct inode
*inode
)
154 kmem_cache_free(rpc_inode_cachep
, RPC_I(inode
));
158 rpc_pipe_open(struct inode
*inode
, struct file
*filp
)
160 struct rpc_inode
*rpci
= RPC_I(inode
);
163 mutex_lock(&inode
->i_mutex
);
164 if (rpci
->ops
!= NULL
) {
165 if (filp
->f_mode
& FMODE_READ
)
167 if (filp
->f_mode
& FMODE_WRITE
)
171 mutex_unlock(&inode
->i_mutex
);
176 rpc_pipe_release(struct inode
*inode
, struct file
*filp
)
178 struct rpc_inode
*rpci
= RPC_I(inode
);
179 struct rpc_pipe_msg
*msg
;
181 mutex_lock(&inode
->i_mutex
);
182 if (rpci
->ops
== NULL
)
184 msg
= (struct rpc_pipe_msg
*)filp
->private_data
;
186 spin_lock(&inode
->i_lock
);
187 msg
->errno
= -EAGAIN
;
188 list_del(&msg
->list
);
189 spin_unlock(&inode
->i_lock
);
190 rpci
->ops
->destroy_msg(msg
);
192 if (filp
->f_mode
& FMODE_WRITE
)
194 if (filp
->f_mode
& FMODE_READ
) {
196 if (rpci
->nreaders
== 0) {
197 LIST_HEAD(free_list
);
198 spin_lock(&inode
->i_lock
);
199 list_splice_init(&rpci
->pipe
, &free_list
);
201 spin_unlock(&inode
->i_lock
);
202 rpc_purge_list(rpci
, &free_list
,
203 rpci
->ops
->destroy_msg
, -EAGAIN
);
206 if (rpci
->ops
->release_pipe
)
207 rpci
->ops
->release_pipe(inode
);
209 mutex_unlock(&inode
->i_mutex
);
214 rpc_pipe_read(struct file
*filp
, char __user
*buf
, size_t len
, loff_t
*offset
)
216 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
217 struct rpc_inode
*rpci
= RPC_I(inode
);
218 struct rpc_pipe_msg
*msg
;
221 mutex_lock(&inode
->i_mutex
);
222 if (rpci
->ops
== NULL
) {
226 msg
= filp
->private_data
;
228 spin_lock(&inode
->i_lock
);
229 if (!list_empty(&rpci
->pipe
)) {
230 msg
= list_entry(rpci
->pipe
.next
,
233 list_move(&msg
->list
, &rpci
->in_upcall
);
234 rpci
->pipelen
-= msg
->len
;
235 filp
->private_data
= msg
;
238 spin_unlock(&inode
->i_lock
);
242 /* NOTE: it is up to the callback to update msg->copied */
243 res
= rpci
->ops
->upcall(filp
, msg
, buf
, len
);
244 if (res
< 0 || msg
->len
== msg
->copied
) {
245 filp
->private_data
= NULL
;
246 spin_lock(&inode
->i_lock
);
247 list_del(&msg
->list
);
248 spin_unlock(&inode
->i_lock
);
249 rpci
->ops
->destroy_msg(msg
);
252 mutex_unlock(&inode
->i_mutex
);
257 rpc_pipe_write(struct file
*filp
, const char __user
*buf
, size_t len
, loff_t
*offset
)
259 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
260 struct rpc_inode
*rpci
= RPC_I(inode
);
263 mutex_lock(&inode
->i_mutex
);
265 if (rpci
->ops
!= NULL
)
266 res
= rpci
->ops
->downcall(filp
, buf
, len
);
267 mutex_unlock(&inode
->i_mutex
);
272 rpc_pipe_poll(struct file
*filp
, struct poll_table_struct
*wait
)
274 struct rpc_inode
*rpci
;
275 unsigned int mask
= 0;
277 rpci
= RPC_I(filp
->f_path
.dentry
->d_inode
);
278 poll_wait(filp
, &rpci
->waitq
, wait
);
280 mask
= POLLOUT
| POLLWRNORM
;
281 if (rpci
->ops
== NULL
)
282 mask
|= POLLERR
| POLLHUP
;
283 if (filp
->private_data
|| !list_empty(&rpci
->pipe
))
284 mask
|= POLLIN
| POLLRDNORM
;
289 rpc_pipe_ioctl(struct inode
*ino
, struct file
*filp
,
290 unsigned int cmd
, unsigned long arg
)
292 struct rpc_inode
*rpci
= RPC_I(filp
->f_path
.dentry
->d_inode
);
297 if (rpci
->ops
== NULL
)
300 if (filp
->private_data
) {
301 struct rpc_pipe_msg
*msg
;
302 msg
= (struct rpc_pipe_msg
*)filp
->private_data
;
303 len
+= msg
->len
- msg
->copied
;
305 return put_user(len
, (int __user
*)arg
);
311 static const struct file_operations rpc_pipe_fops
= {
312 .owner
= THIS_MODULE
,
314 .read
= rpc_pipe_read
,
315 .write
= rpc_pipe_write
,
316 .poll
= rpc_pipe_poll
,
317 .ioctl
= rpc_pipe_ioctl
,
318 .open
= rpc_pipe_open
,
319 .release
= rpc_pipe_release
,
323 rpc_show_info(struct seq_file
*m
, void *v
)
325 struct rpc_clnt
*clnt
= m
->private;
327 seq_printf(m
, "RPC server: %s\n", clnt
->cl_server
);
328 seq_printf(m
, "service: %s (%d) version %d\n", clnt
->cl_protname
,
329 clnt
->cl_prog
, clnt
->cl_vers
);
330 seq_printf(m
, "address: %s\n", rpc_peeraddr2str(clnt
, RPC_DISPLAY_ADDR
));
331 seq_printf(m
, "protocol: %s\n", rpc_peeraddr2str(clnt
, RPC_DISPLAY_PROTO
));
332 seq_printf(m
, "port: %s\n", rpc_peeraddr2str(clnt
, RPC_DISPLAY_PORT
));
337 rpc_info_open(struct inode
*inode
, struct file
*file
)
339 struct rpc_clnt
*clnt
;
340 int ret
= single_open(file
, rpc_show_info
, NULL
);
343 struct seq_file
*m
= file
->private_data
;
344 mutex_lock(&inode
->i_mutex
);
345 clnt
= RPC_I(inode
)->private;
347 kref_get(&clnt
->cl_kref
);
350 single_release(inode
, file
);
353 mutex_unlock(&inode
->i_mutex
);
359 rpc_info_release(struct inode
*inode
, struct file
*file
)
361 struct seq_file
*m
= file
->private_data
;
362 struct rpc_clnt
*clnt
= (struct rpc_clnt
*)m
->private;
365 rpc_release_client(clnt
);
366 return single_release(inode
, file
);
369 static const struct file_operations rpc_info_operations
= {
370 .owner
= THIS_MODULE
,
371 .open
= rpc_info_open
,
374 .release
= rpc_info_release
,
379 * We have a single directory with 1 node in it.
392 * Description of fs contents.
394 struct rpc_filelist
{
396 const struct file_operations
*i_fop
;
400 static struct rpc_filelist files
[] = {
403 .mode
= S_IFDIR
| S_IRUGO
| S_IXUGO
,
407 .mode
= S_IFDIR
| S_IRUGO
| S_IXUGO
,
411 .mode
= S_IFDIR
| S_IRUGO
| S_IXUGO
,
413 [RPCAUTH_portmap
] = {
415 .mode
= S_IFDIR
| S_IRUGO
| S_IXUGO
,
419 .mode
= S_IFDIR
| S_IRUGO
| S_IXUGO
,
428 static struct rpc_filelist authfiles
[] = {
431 .i_fop
= &rpc_info_operations
,
432 .mode
= S_IFREG
| S_IRUSR
,
436 struct vfsmount
*rpc_get_mount(void)
440 err
= simple_pin_fs(&rpc_pipe_fs_type
, &rpc_mount
, &rpc_mount_count
);
446 void rpc_put_mount(void)
448 simple_release_fs(&rpc_mount
, &rpc_mount_count
);
451 static int rpc_delete_dentry(struct dentry
*dentry
)
456 static struct dentry_operations rpc_dentry_operations
= {
457 .d_delete
= rpc_delete_dentry
,
461 rpc_lookup_parent(char *path
, struct nameidata
*nd
)
463 struct vfsmount
*mnt
;
468 mnt
= rpc_get_mount();
470 printk(KERN_WARNING
"%s: %s failed to mount "
471 "pseudofilesystem \n", __FILE__
, __FUNCTION__
);
475 if (vfs_path_lookup(mnt
->mnt_root
, mnt
, path
, LOOKUP_PARENT
, nd
)) {
476 printk(KERN_WARNING
"%s: %s failed to find path %s\n",
477 __FILE__
, __FUNCTION__
, path
);
485 rpc_release_path(struct nameidata
*nd
)
491 static struct inode
*
492 rpc_get_inode(struct super_block
*sb
, int mode
)
494 struct inode
*inode
= new_inode(sb
);
497 inode
->i_mode
= mode
;
498 inode
->i_uid
= inode
->i_gid
= 0;
500 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
501 switch(mode
& S_IFMT
) {
503 inode
->i_fop
= &simple_dir_operations
;
504 inode
->i_op
= &simple_dir_inode_operations
;
513 * FIXME: This probably has races.
516 rpc_depopulate(struct dentry
*parent
, int start
, int eof
)
518 struct inode
*dir
= parent
->d_inode
;
519 struct list_head
*pos
, *next
;
520 struct dentry
*dentry
, *dvec
[10];
523 mutex_lock_nested(&dir
->i_mutex
, I_MUTEX_CHILD
);
525 spin_lock(&dcache_lock
);
526 list_for_each_safe(pos
, next
, &parent
->d_subdirs
) {
527 dentry
= list_entry(pos
, struct dentry
, d_u
.d_child
);
528 if (!dentry
->d_inode
||
529 dentry
->d_inode
->i_ino
< start
||
530 dentry
->d_inode
->i_ino
>= eof
)
532 spin_lock(&dentry
->d_lock
);
533 if (!d_unhashed(dentry
)) {
536 spin_unlock(&dentry
->d_lock
);
538 if (n
== ARRAY_SIZE(dvec
))
541 spin_unlock(&dentry
->d_lock
);
543 spin_unlock(&dcache_lock
);
547 if (S_ISREG(dentry
->d_inode
->i_mode
))
548 simple_unlink(dir
, dentry
);
549 else if (S_ISDIR(dentry
->d_inode
->i_mode
))
550 simple_rmdir(dir
, dentry
);
556 mutex_unlock(&dir
->i_mutex
);
560 rpc_populate(struct dentry
*parent
,
561 struct rpc_filelist
*files
,
564 struct inode
*inode
, *dir
= parent
->d_inode
;
565 void *private = RPC_I(dir
)->private;
566 struct dentry
*dentry
;
569 mutex_lock(&dir
->i_mutex
);
570 for (i
= start
; i
< eof
; i
++) {
571 dentry
= d_alloc_name(parent
, files
[i
].name
);
574 dentry
->d_op
= &rpc_dentry_operations
;
575 mode
= files
[i
].mode
;
576 inode
= rpc_get_inode(dir
->i_sb
, mode
);
583 inode
->i_fop
= files
[i
].i_fop
;
585 rpc_inode_setowner(inode
, private);
588 d_add(dentry
, inode
);
589 fsnotify_create(dir
, dentry
);
591 mutex_unlock(&dir
->i_mutex
);
594 mutex_unlock(&dir
->i_mutex
);
595 printk(KERN_WARNING
"%s: %s failed to populate directory %s\n",
596 __FILE__
, __FUNCTION__
, parent
->d_name
.name
);
601 __rpc_mkdir(struct inode
*dir
, struct dentry
*dentry
)
605 inode
= rpc_get_inode(dir
->i_sb
, S_IFDIR
| S_IRUGO
| S_IXUGO
);
608 inode
->i_ino
= iunique(dir
->i_sb
, 100);
609 d_instantiate(dentry
, inode
);
611 fsnotify_mkdir(dir
, dentry
);
614 printk(KERN_WARNING
"%s: %s failed to allocate inode for dentry %s\n",
615 __FILE__
, __FUNCTION__
, dentry
->d_name
.name
);
620 __rpc_rmdir(struct inode
*dir
, struct dentry
*dentry
)
623 error
= simple_rmdir(dir
, dentry
);
629 static struct dentry
*
630 rpc_lookup_create(struct dentry
*parent
, const char *name
, int len
, int exclusive
)
632 struct inode
*dir
= parent
->d_inode
;
633 struct dentry
*dentry
;
635 mutex_lock_nested(&dir
->i_mutex
, I_MUTEX_PARENT
);
636 dentry
= lookup_one_len(name
, parent
, len
);
639 if (!dentry
->d_inode
)
640 dentry
->d_op
= &rpc_dentry_operations
;
641 else if (exclusive
) {
643 dentry
= ERR_PTR(-EEXIST
);
648 mutex_unlock(&dir
->i_mutex
);
652 static struct dentry
*
653 rpc_lookup_negative(char *path
, struct nameidata
*nd
)
655 struct dentry
*dentry
;
658 if ((error
= rpc_lookup_parent(path
, nd
)) != 0)
659 return ERR_PTR(error
);
660 dentry
= rpc_lookup_create(nd
->dentry
, nd
->last
.name
, nd
->last
.len
, 1);
662 rpc_release_path(nd
);
668 rpc_mkdir(char *path
, struct rpc_clnt
*rpc_client
)
671 struct dentry
*dentry
;
675 dentry
= rpc_lookup_negative(path
, &nd
);
678 dir
= nd
.dentry
->d_inode
;
679 if ((error
= __rpc_mkdir(dir
, dentry
)) != 0)
681 RPC_I(dentry
->d_inode
)->private = rpc_client
;
682 error
= rpc_populate(dentry
, authfiles
,
683 RPCAUTH_info
, RPCAUTH_EOF
);
688 mutex_unlock(&dir
->i_mutex
);
689 rpc_release_path(&nd
);
692 rpc_depopulate(dentry
, RPCAUTH_info
, RPCAUTH_EOF
);
693 __rpc_rmdir(dir
, dentry
);
696 printk(KERN_WARNING
"%s: %s() failed to create directory %s (errno = %d)\n",
697 __FILE__
, __FUNCTION__
, path
, error
);
698 dentry
= ERR_PTR(error
);
703 rpc_rmdir(struct dentry
*dentry
)
705 struct dentry
*parent
;
709 parent
= dget_parent(dentry
);
710 dir
= parent
->d_inode
;
711 mutex_lock_nested(&dir
->i_mutex
, I_MUTEX_PARENT
);
712 rpc_depopulate(dentry
, RPCAUTH_info
, RPCAUTH_EOF
);
713 error
= __rpc_rmdir(dir
, dentry
);
715 mutex_unlock(&dir
->i_mutex
);
721 rpc_mkpipe(struct dentry
*parent
, const char *name
, void *private, struct rpc_pipe_ops
*ops
, int flags
)
723 struct dentry
*dentry
;
724 struct inode
*dir
, *inode
;
725 struct rpc_inode
*rpci
;
727 dentry
= rpc_lookup_create(parent
, name
, strlen(name
), 0);
730 dir
= parent
->d_inode
;
731 if (dentry
->d_inode
) {
732 rpci
= RPC_I(dentry
->d_inode
);
733 if (rpci
->private != private ||
735 rpci
->flags
!= flags
) {
737 dentry
= ERR_PTR(-EBUSY
);
739 rpci
->nkern_readwriters
++;
742 inode
= rpc_get_inode(dir
->i_sb
, S_IFIFO
| S_IRUSR
| S_IWUSR
);
745 inode
->i_ino
= iunique(dir
->i_sb
, 100);
746 inode
->i_fop
= &rpc_pipe_fops
;
747 d_instantiate(dentry
, inode
);
749 rpci
->private = private;
752 rpci
->nkern_readwriters
= 1;
753 fsnotify_create(dir
, dentry
);
756 mutex_unlock(&dir
->i_mutex
);
760 dentry
= ERR_PTR(-ENOMEM
);
761 printk(KERN_WARNING
"%s: %s() failed to create pipe %s/%s (errno = %d)\n",
762 __FILE__
, __FUNCTION__
, parent
->d_name
.name
, name
,
768 rpc_unlink(struct dentry
*dentry
)
770 struct dentry
*parent
;
774 parent
= dget_parent(dentry
);
775 dir
= parent
->d_inode
;
776 mutex_lock_nested(&dir
->i_mutex
, I_MUTEX_PARENT
);
777 if (--RPC_I(dentry
->d_inode
)->nkern_readwriters
== 0) {
778 rpc_close_pipes(dentry
->d_inode
);
779 error
= simple_unlink(dir
, dentry
);
784 mutex_unlock(&dir
->i_mutex
);
790 * populate the filesystem
792 static struct super_operations s_ops
= {
793 .alloc_inode
= rpc_alloc_inode
,
794 .destroy_inode
= rpc_destroy_inode
,
795 .statfs
= simple_statfs
,
798 #define RPCAUTH_GSSMAGIC 0x67596969
801 rpc_fill_super(struct super_block
*sb
, void *data
, int silent
)
806 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
807 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
808 sb
->s_magic
= RPCAUTH_GSSMAGIC
;
812 inode
= rpc_get_inode(sb
, S_IFDIR
| 0755);
815 root
= d_alloc_root(inode
);
820 if (rpc_populate(root
, files
, RPCAUTH_Root
+ 1, RPCAUTH_RootEOF
))
831 rpc_get_sb(struct file_system_type
*fs_type
,
832 int flags
, const char *dev_name
, void *data
, struct vfsmount
*mnt
)
834 return get_sb_single(fs_type
, flags
, data
, rpc_fill_super
, mnt
);
837 static struct file_system_type rpc_pipe_fs_type
= {
838 .owner
= THIS_MODULE
,
839 .name
= "rpc_pipefs",
840 .get_sb
= rpc_get_sb
,
841 .kill_sb
= kill_litter_super
,
845 init_once(struct kmem_cache
* cachep
, void *foo
)
847 struct rpc_inode
*rpci
= (struct rpc_inode
*) foo
;
849 inode_init_once(&rpci
->vfs_inode
);
850 rpci
->private = NULL
;
853 INIT_LIST_HEAD(&rpci
->in_upcall
);
854 INIT_LIST_HEAD(&rpci
->in_downcall
);
855 INIT_LIST_HEAD(&rpci
->pipe
);
857 init_waitqueue_head(&rpci
->waitq
);
858 INIT_DELAYED_WORK(&rpci
->queue_timeout
,
859 rpc_timeout_upcall_queue
);
863 int register_rpc_pipefs(void)
867 rpc_inode_cachep
= kmem_cache_create("rpc_inode_cache",
868 sizeof(struct rpc_inode
),
869 0, (SLAB_HWCACHE_ALIGN
|SLAB_RECLAIM_ACCOUNT
|
872 if (!rpc_inode_cachep
)
874 err
= register_filesystem(&rpc_pipe_fs_type
);
876 kmem_cache_destroy(rpc_inode_cachep
);
883 void unregister_rpc_pipefs(void)
885 kmem_cache_destroy(rpc_inode_cachep
);
886 unregister_filesystem(&rpc_pipe_fs_type
);