2 * net/sunrpc/rpc_pipe.c
4 * Userland/kernel interface for rpcauth_gss.
5 * Code shamelessly plagiarized from fs/nfsd/nfsctl.c
8 * Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no>
11 #include <linux/config.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/string.h>
15 #include <linux/pagemap.h>
16 #include <linux/mount.h>
17 #include <linux/namei.h>
18 #include <linux/dnotify.h>
19 #include <linux/kernel.h>
21 #include <asm/ioctls.h>
23 #include <linux/poll.h>
24 #include <linux/wait.h>
25 #include <linux/seq_file.h>
27 #include <linux/sunrpc/clnt.h>
28 #include <linux/workqueue.h>
29 #include <linux/sunrpc/rpc_pipe_fs.h>
31 static struct vfsmount
*rpc_mount __read_mostly
;
32 static int rpc_mount_count
;
34 static struct file_system_type rpc_pipe_fs_type
;
37 static kmem_cache_t
*rpc_inode_cachep __read_mostly
;
39 #define RPC_UPCALL_TIMEOUT (30*HZ)
41 static void rpc_purge_list(struct rpc_inode
*rpci
, struct list_head
*head
,
42 void (*destroy_msg
)(struct rpc_pipe_msg
*), int err
)
44 struct rpc_pipe_msg
*msg
;
49 msg
= list_entry(head
->next
, struct rpc_pipe_msg
, list
);
53 } while (!list_empty(head
));
54 wake_up(&rpci
->waitq
);
58 rpc_timeout_upcall_queue(void *data
)
61 struct rpc_inode
*rpci
= (struct rpc_inode
*)data
;
62 struct inode
*inode
= &rpci
->vfs_inode
;
63 void (*destroy_msg
)(struct rpc_pipe_msg
*);
65 spin_lock(&inode
->i_lock
);
66 if (rpci
->ops
== NULL
) {
67 spin_unlock(&inode
->i_lock
);
70 destroy_msg
= rpci
->ops
->destroy_msg
;
71 if (rpci
->nreaders
== 0) {
72 list_splice_init(&rpci
->pipe
, &free_list
);
75 spin_unlock(&inode
->i_lock
);
76 rpc_purge_list(rpci
, &free_list
, destroy_msg
, -ETIMEDOUT
);
80 rpc_queue_upcall(struct inode
*inode
, struct rpc_pipe_msg
*msg
)
82 struct rpc_inode
*rpci
= RPC_I(inode
);
85 spin_lock(&inode
->i_lock
);
86 if (rpci
->ops
== NULL
)
89 list_add_tail(&msg
->list
, &rpci
->pipe
);
90 rpci
->pipelen
+= msg
->len
;
92 } else if (rpci
->flags
& RPC_PIPE_WAIT_FOR_OPEN
) {
93 if (list_empty(&rpci
->pipe
))
94 schedule_delayed_work(&rpci
->queue_timeout
,
96 list_add_tail(&msg
->list
, &rpci
->pipe
);
97 rpci
->pipelen
+= msg
->len
;
101 spin_unlock(&inode
->i_lock
);
102 wake_up(&rpci
->waitq
);
107 rpc_inode_setowner(struct inode
*inode
, void *private)
109 RPC_I(inode
)->private = private;
113 rpc_close_pipes(struct inode
*inode
)
115 struct rpc_inode
*rpci
= RPC_I(inode
);
116 struct rpc_pipe_ops
*ops
;
118 mutex_lock(&inode
->i_mutex
);
121 LIST_HEAD(free_list
);
123 spin_lock(&inode
->i_lock
);
125 list_splice_init(&rpci
->in_upcall
, &free_list
);
126 list_splice_init(&rpci
->pipe
, &free_list
);
129 spin_unlock(&inode
->i_lock
);
130 rpc_purge_list(rpci
, &free_list
, ops
->destroy_msg
, -EPIPE
);
132 if (ops
->release_pipe
)
133 ops
->release_pipe(inode
);
134 cancel_delayed_work(&rpci
->queue_timeout
);
135 flush_scheduled_work();
137 rpc_inode_setowner(inode
, NULL
);
138 mutex_unlock(&inode
->i_mutex
);
141 static struct inode
*
142 rpc_alloc_inode(struct super_block
*sb
)
144 struct rpc_inode
*rpci
;
145 rpci
= (struct rpc_inode
*)kmem_cache_alloc(rpc_inode_cachep
, SLAB_KERNEL
);
148 return &rpci
->vfs_inode
;
152 rpc_destroy_inode(struct inode
*inode
)
154 kmem_cache_free(rpc_inode_cachep
, RPC_I(inode
));
158 rpc_pipe_open(struct inode
*inode
, struct file
*filp
)
160 struct rpc_inode
*rpci
= RPC_I(inode
);
163 mutex_lock(&inode
->i_mutex
);
164 if (rpci
->ops
!= NULL
) {
165 if (filp
->f_mode
& FMODE_READ
)
167 if (filp
->f_mode
& FMODE_WRITE
)
171 mutex_unlock(&inode
->i_mutex
);
176 rpc_pipe_release(struct inode
*inode
, struct file
*filp
)
178 struct rpc_inode
*rpci
= RPC_I(inode
);
179 struct rpc_pipe_msg
*msg
;
181 mutex_lock(&inode
->i_mutex
);
182 if (rpci
->ops
== NULL
)
184 msg
= (struct rpc_pipe_msg
*)filp
->private_data
;
186 spin_lock(&inode
->i_lock
);
187 msg
->errno
= -EAGAIN
;
188 list_del(&msg
->list
);
189 spin_unlock(&inode
->i_lock
);
190 rpci
->ops
->destroy_msg(msg
);
192 if (filp
->f_mode
& FMODE_WRITE
)
194 if (filp
->f_mode
& FMODE_READ
) {
196 if (rpci
->nreaders
== 0) {
197 LIST_HEAD(free_list
);
198 spin_lock(&inode
->i_lock
);
199 list_splice_init(&rpci
->pipe
, &free_list
);
201 spin_unlock(&inode
->i_lock
);
202 rpc_purge_list(rpci
, &free_list
,
203 rpci
->ops
->destroy_msg
, -EAGAIN
);
206 if (rpci
->ops
->release_pipe
)
207 rpci
->ops
->release_pipe(inode
);
209 mutex_unlock(&inode
->i_mutex
);
214 rpc_pipe_read(struct file
*filp
, char __user
*buf
, size_t len
, loff_t
*offset
)
216 struct inode
*inode
= filp
->f_dentry
->d_inode
;
217 struct rpc_inode
*rpci
= RPC_I(inode
);
218 struct rpc_pipe_msg
*msg
;
221 mutex_lock(&inode
->i_mutex
);
222 if (rpci
->ops
== NULL
) {
226 msg
= filp
->private_data
;
228 spin_lock(&inode
->i_lock
);
229 if (!list_empty(&rpci
->pipe
)) {
230 msg
= list_entry(rpci
->pipe
.next
,
233 list_move(&msg
->list
, &rpci
->in_upcall
);
234 rpci
->pipelen
-= msg
->len
;
235 filp
->private_data
= msg
;
238 spin_unlock(&inode
->i_lock
);
242 /* NOTE: it is up to the callback to update msg->copied */
243 res
= rpci
->ops
->upcall(filp
, msg
, buf
, len
);
244 if (res
< 0 || msg
->len
== msg
->copied
) {
245 filp
->private_data
= NULL
;
246 spin_lock(&inode
->i_lock
);
247 list_del(&msg
->list
);
248 spin_unlock(&inode
->i_lock
);
249 rpci
->ops
->destroy_msg(msg
);
252 mutex_unlock(&inode
->i_mutex
);
257 rpc_pipe_write(struct file
*filp
, const char __user
*buf
, size_t len
, loff_t
*offset
)
259 struct inode
*inode
= filp
->f_dentry
->d_inode
;
260 struct rpc_inode
*rpci
= RPC_I(inode
);
263 mutex_lock(&inode
->i_mutex
);
265 if (rpci
->ops
!= NULL
)
266 res
= rpci
->ops
->downcall(filp
, buf
, len
);
267 mutex_unlock(&inode
->i_mutex
);
272 rpc_pipe_poll(struct file
*filp
, struct poll_table_struct
*wait
)
274 struct rpc_inode
*rpci
;
275 unsigned int mask
= 0;
277 rpci
= RPC_I(filp
->f_dentry
->d_inode
);
278 poll_wait(filp
, &rpci
->waitq
, wait
);
280 mask
= POLLOUT
| POLLWRNORM
;
281 if (rpci
->ops
== NULL
)
282 mask
|= POLLERR
| POLLHUP
;
283 if (!list_empty(&rpci
->pipe
))
284 mask
|= POLLIN
| POLLRDNORM
;
289 rpc_pipe_ioctl(struct inode
*ino
, struct file
*filp
,
290 unsigned int cmd
, unsigned long arg
)
292 struct rpc_inode
*rpci
= RPC_I(filp
->f_dentry
->d_inode
);
297 if (rpci
->ops
== NULL
)
300 if (filp
->private_data
) {
301 struct rpc_pipe_msg
*msg
;
302 msg
= (struct rpc_pipe_msg
*)filp
->private_data
;
303 len
+= msg
->len
- msg
->copied
;
305 return put_user(len
, (int __user
*)arg
);
311 static struct file_operations rpc_pipe_fops
= {
312 .owner
= THIS_MODULE
,
314 .read
= rpc_pipe_read
,
315 .write
= rpc_pipe_write
,
316 .poll
= rpc_pipe_poll
,
317 .ioctl
= rpc_pipe_ioctl
,
318 .open
= rpc_pipe_open
,
319 .release
= rpc_pipe_release
,
323 rpc_show_info(struct seq_file
*m
, void *v
)
325 struct rpc_clnt
*clnt
= m
->private;
327 seq_printf(m
, "RPC server: %s\n", clnt
->cl_server
);
328 seq_printf(m
, "service: %s (%d) version %d\n", clnt
->cl_protname
,
329 clnt
->cl_prog
, clnt
->cl_vers
);
330 seq_printf(m
, "address: %u.%u.%u.%u\n",
331 NIPQUAD(clnt
->cl_xprt
->addr
.sin_addr
.s_addr
));
332 seq_printf(m
, "protocol: %s\n",
333 clnt
->cl_xprt
->prot
== IPPROTO_UDP
? "udp" : "tcp");
338 rpc_info_open(struct inode
*inode
, struct file
*file
)
340 struct rpc_clnt
*clnt
;
341 int ret
= single_open(file
, rpc_show_info
, NULL
);
344 struct seq_file
*m
= file
->private_data
;
345 mutex_lock(&inode
->i_mutex
);
346 clnt
= RPC_I(inode
)->private;
348 atomic_inc(&clnt
->cl_users
);
351 single_release(inode
, file
);
354 mutex_unlock(&inode
->i_mutex
);
360 rpc_info_release(struct inode
*inode
, struct file
*file
)
362 struct seq_file
*m
= file
->private_data
;
363 struct rpc_clnt
*clnt
= (struct rpc_clnt
*)m
->private;
366 rpc_release_client(clnt
);
367 return single_release(inode
, file
);
370 static struct file_operations rpc_info_operations
= {
371 .owner
= THIS_MODULE
,
372 .open
= rpc_info_open
,
375 .release
= rpc_info_release
,
380 * We have a single directory with 1 node in it.
393 * Description of fs contents.
395 struct rpc_filelist
{
397 struct file_operations
*i_fop
;
401 static struct rpc_filelist files
[] = {
404 .mode
= S_IFDIR
| S_IRUGO
| S_IXUGO
,
408 .mode
= S_IFDIR
| S_IRUGO
| S_IXUGO
,
412 .mode
= S_IFDIR
| S_IRUGO
| S_IXUGO
,
414 [RPCAUTH_portmap
] = {
416 .mode
= S_IFDIR
| S_IRUGO
| S_IXUGO
,
420 .mode
= S_IFDIR
| S_IRUGO
| S_IXUGO
,
429 static struct rpc_filelist authfiles
[] = {
432 .i_fop
= &rpc_info_operations
,
433 .mode
= S_IFREG
| S_IRUSR
,
440 return simple_pin_fs("rpc_pipefs", &rpc_mount
, &rpc_mount_count
);
446 simple_release_fs(&rpc_mount
, &rpc_mount_count
);
450 rpc_lookup_parent(char *path
, struct nameidata
*nd
)
454 if (rpc_get_mount()) {
455 printk(KERN_WARNING
"%s: %s failed to mount "
456 "pseudofilesystem \n", __FILE__
, __FUNCTION__
);
459 nd
->mnt
= mntget(rpc_mount
);
460 nd
->dentry
= dget(rpc_mount
->mnt_root
);
461 nd
->last_type
= LAST_ROOT
;
462 nd
->flags
= LOOKUP_PARENT
;
465 if (path_walk(path
, nd
)) {
466 printk(KERN_WARNING
"%s: %s failed to find path %s\n",
467 __FILE__
, __FUNCTION__
, path
);
475 rpc_release_path(struct nameidata
*nd
)
481 static struct inode
*
482 rpc_get_inode(struct super_block
*sb
, int mode
)
484 struct inode
*inode
= new_inode(sb
);
487 inode
->i_mode
= mode
;
488 inode
->i_uid
= inode
->i_gid
= 0;
489 inode
->i_blksize
= PAGE_CACHE_SIZE
;
491 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
492 switch(mode
& S_IFMT
) {
494 inode
->i_fop
= &simple_dir_operations
;
495 inode
->i_op
= &simple_dir_inode_operations
;
504 * FIXME: This probably has races.
507 rpc_depopulate(struct dentry
*parent
)
509 struct inode
*dir
= parent
->d_inode
;
510 struct list_head
*pos
, *next
;
511 struct dentry
*dentry
, *dvec
[10];
514 mutex_lock(&dir
->i_mutex
);
516 spin_lock(&dcache_lock
);
517 list_for_each_safe(pos
, next
, &parent
->d_subdirs
) {
518 dentry
= list_entry(pos
, struct dentry
, d_u
.d_child
);
519 spin_lock(&dentry
->d_lock
);
520 if (!d_unhashed(dentry
)) {
523 spin_unlock(&dentry
->d_lock
);
525 if (n
== ARRAY_SIZE(dvec
))
528 spin_unlock(&dentry
->d_lock
);
530 spin_unlock(&dcache_lock
);
534 if (dentry
->d_inode
) {
535 rpc_close_pipes(dentry
->d_inode
);
536 simple_unlink(dir
, dentry
);
542 mutex_unlock(&dir
->i_mutex
);
546 rpc_populate(struct dentry
*parent
,
547 struct rpc_filelist
*files
,
550 struct inode
*inode
, *dir
= parent
->d_inode
;
551 void *private = RPC_I(dir
)->private;
552 struct dentry
*dentry
;
555 mutex_lock(&dir
->i_mutex
);
556 for (i
= start
; i
< eof
; i
++) {
557 dentry
= d_alloc_name(parent
, files
[i
].name
);
560 mode
= files
[i
].mode
;
561 inode
= rpc_get_inode(dir
->i_sb
, mode
);
568 inode
->i_fop
= files
[i
].i_fop
;
570 rpc_inode_setowner(inode
, private);
573 d_add(dentry
, inode
);
575 mutex_unlock(&dir
->i_mutex
);
578 mutex_unlock(&dir
->i_mutex
);
579 printk(KERN_WARNING
"%s: %s failed to populate directory %s\n",
580 __FILE__
, __FUNCTION__
, parent
->d_name
.name
);
585 __rpc_mkdir(struct inode
*dir
, struct dentry
*dentry
)
589 inode
= rpc_get_inode(dir
->i_sb
, S_IFDIR
| S_IRUSR
| S_IXUSR
);
592 inode
->i_ino
= iunique(dir
->i_sb
, 100);
593 d_instantiate(dentry
, inode
);
595 inode_dir_notify(dir
, DN_CREATE
);
599 printk(KERN_WARNING
"%s: %s failed to allocate inode for dentry %s\n",
600 __FILE__
, __FUNCTION__
, dentry
->d_name
.name
);
605 __rpc_rmdir(struct inode
*dir
, struct dentry
*dentry
)
609 shrink_dcache_parent(dentry
);
611 rpc_close_pipes(dentry
->d_inode
);
612 if ((error
= simple_rmdir(dir
, dentry
)) != 0)
615 inode_dir_notify(dir
, DN_DELETE
);
622 static struct dentry
*
623 rpc_lookup_negative(char *path
, struct nameidata
*nd
)
625 struct dentry
*dentry
;
629 if ((error
= rpc_lookup_parent(path
, nd
)) != 0)
630 return ERR_PTR(error
);
631 dir
= nd
->dentry
->d_inode
;
632 mutex_lock(&dir
->i_mutex
);
633 dentry
= lookup_one_len(nd
->last
.name
, nd
->dentry
, nd
->last
.len
);
636 if (dentry
->d_inode
) {
638 dentry
= ERR_PTR(-EEXIST
);
643 mutex_unlock(&dir
->i_mutex
);
644 rpc_release_path(nd
);
650 rpc_mkdir(char *path
, struct rpc_clnt
*rpc_client
)
653 struct dentry
*dentry
;
657 dentry
= rpc_lookup_negative(path
, &nd
);
660 dir
= nd
.dentry
->d_inode
;
661 if ((error
= __rpc_mkdir(dir
, dentry
)) != 0)
663 RPC_I(dentry
->d_inode
)->private = rpc_client
;
664 error
= rpc_populate(dentry
, authfiles
,
665 RPCAUTH_info
, RPCAUTH_EOF
);
669 mutex_unlock(&dir
->i_mutex
);
670 rpc_release_path(&nd
);
673 rpc_depopulate(dentry
);
674 __rpc_rmdir(dir
, dentry
);
677 printk(KERN_WARNING
"%s: %s() failed to create directory %s (errno = %d)\n",
678 __FILE__
, __FUNCTION__
, path
, error
);
679 dentry
= ERR_PTR(error
);
684 rpc_rmdir(char *path
)
687 struct dentry
*dentry
;
691 if ((error
= rpc_lookup_parent(path
, &nd
)) != 0)
693 dir
= nd
.dentry
->d_inode
;
694 mutex_lock(&dir
->i_mutex
);
695 dentry
= lookup_one_len(nd
.last
.name
, nd
.dentry
, nd
.last
.len
);
696 if (IS_ERR(dentry
)) {
697 error
= PTR_ERR(dentry
);
700 rpc_depopulate(dentry
);
701 error
= __rpc_rmdir(dir
, dentry
);
704 mutex_unlock(&dir
->i_mutex
);
705 rpc_release_path(&nd
);
710 rpc_mkpipe(char *path
, void *private, struct rpc_pipe_ops
*ops
, int flags
)
713 struct dentry
*dentry
;
714 struct inode
*dir
, *inode
;
715 struct rpc_inode
*rpci
;
717 dentry
= rpc_lookup_negative(path
, &nd
);
720 dir
= nd
.dentry
->d_inode
;
721 inode
= rpc_get_inode(dir
->i_sb
, S_IFSOCK
| S_IRUSR
| S_IWUSR
);
724 inode
->i_ino
= iunique(dir
->i_sb
, 100);
725 inode
->i_fop
= &rpc_pipe_fops
;
726 d_instantiate(dentry
, inode
);
728 rpci
->private = private;
731 inode_dir_notify(dir
, DN_CREATE
);
733 mutex_unlock(&dir
->i_mutex
);
734 rpc_release_path(&nd
);
738 dentry
= ERR_PTR(-ENOMEM
);
739 printk(KERN_WARNING
"%s: %s() failed to create pipe %s (errno = %d)\n",
740 __FILE__
, __FUNCTION__
, path
, -ENOMEM
);
745 rpc_unlink(char *path
)
748 struct dentry
*dentry
;
752 if ((error
= rpc_lookup_parent(path
, &nd
)) != 0)
754 dir
= nd
.dentry
->d_inode
;
755 mutex_lock(&dir
->i_mutex
);
756 dentry
= lookup_one_len(nd
.last
.name
, nd
.dentry
, nd
.last
.len
);
757 if (IS_ERR(dentry
)) {
758 error
= PTR_ERR(dentry
);
762 if (dentry
->d_inode
) {
763 rpc_close_pipes(dentry
->d_inode
);
764 error
= simple_unlink(dir
, dentry
);
767 inode_dir_notify(dir
, DN_DELETE
);
769 mutex_unlock(&dir
->i_mutex
);
770 rpc_release_path(&nd
);
775 * populate the filesystem
777 static struct super_operations s_ops
= {
778 .alloc_inode
= rpc_alloc_inode
,
779 .destroy_inode
= rpc_destroy_inode
,
780 .statfs
= simple_statfs
,
783 #define RPCAUTH_GSSMAGIC 0x67596969
786 rpc_fill_super(struct super_block
*sb
, void *data
, int silent
)
791 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
792 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
793 sb
->s_magic
= RPCAUTH_GSSMAGIC
;
797 inode
= rpc_get_inode(sb
, S_IFDIR
| 0755);
800 root
= d_alloc_root(inode
);
805 if (rpc_populate(root
, files
, RPCAUTH_Root
+ 1, RPCAUTH_RootEOF
))
815 static struct super_block
*
816 rpc_get_sb(struct file_system_type
*fs_type
,
817 int flags
, const char *dev_name
, void *data
)
819 return get_sb_single(fs_type
, flags
, data
, rpc_fill_super
);
822 static struct file_system_type rpc_pipe_fs_type
= {
823 .owner
= THIS_MODULE
,
824 .name
= "rpc_pipefs",
825 .get_sb
= rpc_get_sb
,
826 .kill_sb
= kill_litter_super
,
830 init_once(void * foo
, kmem_cache_t
* cachep
, unsigned long flags
)
832 struct rpc_inode
*rpci
= (struct rpc_inode
*) foo
;
834 if ((flags
& (SLAB_CTOR_VERIFY
|SLAB_CTOR_CONSTRUCTOR
)) ==
835 SLAB_CTOR_CONSTRUCTOR
) {
836 inode_init_once(&rpci
->vfs_inode
);
837 rpci
->private = NULL
;
840 INIT_LIST_HEAD(&rpci
->in_upcall
);
841 INIT_LIST_HEAD(&rpci
->pipe
);
843 init_waitqueue_head(&rpci
->waitq
);
844 INIT_WORK(&rpci
->queue_timeout
, rpc_timeout_upcall_queue
, rpci
);
849 int register_rpc_pipefs(void)
851 rpc_inode_cachep
= kmem_cache_create("rpc_inode_cache",
852 sizeof(struct rpc_inode
),
853 0, SLAB_HWCACHE_ALIGN
|SLAB_RECLAIM_ACCOUNT
,
855 if (!rpc_inode_cachep
)
857 register_filesystem(&rpc_pipe_fs_type
);
861 void unregister_rpc_pipefs(void)
863 if (kmem_cache_destroy(rpc_inode_cachep
))
864 printk(KERN_WARNING
"RPC: unable to free inode cache\n");
865 unregister_filesystem(&rpc_pipe_fs_type
);