nfs: add server port to rpc_pipe info file
[linux-2.6/kmemtrace.git] / net / sunrpc / rpc_pipe.c
blobc8433e8865aa63e7739fd5c0586fb977af2a207f
1 /*
2 * net/sunrpc/rpc_pipe.c
4 * Userland/kernel interface for rpcauth_gss.
5 * Code shamelessly plagiarized from fs/nfsd/nfsctl.c
6 * and fs/sysfs/inode.c
8 * Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include <linux/pagemap.h>
15 #include <linux/mount.h>
16 #include <linux/namei.h>
17 #include <linux/fsnotify.h>
18 #include <linux/kernel.h>
20 #include <asm/ioctls.h>
21 #include <linux/fs.h>
22 #include <linux/poll.h>
23 #include <linux/wait.h>
24 #include <linux/seq_file.h>
26 #include <linux/sunrpc/clnt.h>
27 #include <linux/workqueue.h>
28 #include <linux/sunrpc/rpc_pipe_fs.h>
30 static struct vfsmount *rpc_mount __read_mostly;
31 static int rpc_mount_count;
33 static struct file_system_type rpc_pipe_fs_type;
36 static struct kmem_cache *rpc_inode_cachep __read_mostly;
38 #define RPC_UPCALL_TIMEOUT (30*HZ)
40 static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
41 void (*destroy_msg)(struct rpc_pipe_msg *), int err)
43 struct rpc_pipe_msg *msg;
45 if (list_empty(head))
46 return;
47 do {
48 msg = list_entry(head->next, struct rpc_pipe_msg, list);
49 list_del(&msg->list);
50 msg->errno = err;
51 destroy_msg(msg);
52 } while (!list_empty(head));
53 wake_up(&rpci->waitq);
56 static void
57 rpc_timeout_upcall_queue(struct work_struct *work)
59 LIST_HEAD(free_list);
60 struct rpc_inode *rpci =
61 container_of(work, struct rpc_inode, queue_timeout.work);
62 struct inode *inode = &rpci->vfs_inode;
63 void (*destroy_msg)(struct rpc_pipe_msg *);
65 spin_lock(&inode->i_lock);
66 if (rpci->ops == NULL) {
67 spin_unlock(&inode->i_lock);
68 return;
70 destroy_msg = rpci->ops->destroy_msg;
71 if (rpci->nreaders == 0) {
72 list_splice_init(&rpci->pipe, &free_list);
73 rpci->pipelen = 0;
75 spin_unlock(&inode->i_lock);
76 rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
79 int
80 rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
82 struct rpc_inode *rpci = RPC_I(inode);
83 int res = -EPIPE;
85 spin_lock(&inode->i_lock);
86 if (rpci->ops == NULL)
87 goto out;
88 if (rpci->nreaders) {
89 list_add_tail(&msg->list, &rpci->pipe);
90 rpci->pipelen += msg->len;
91 res = 0;
92 } else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) {
93 if (list_empty(&rpci->pipe))
94 queue_delayed_work(rpciod_workqueue,
95 &rpci->queue_timeout,
96 RPC_UPCALL_TIMEOUT);
97 list_add_tail(&msg->list, &rpci->pipe);
98 rpci->pipelen += msg->len;
99 res = 0;
101 out:
102 spin_unlock(&inode->i_lock);
103 wake_up(&rpci->waitq);
104 return res;
107 static inline void
108 rpc_inode_setowner(struct inode *inode, void *private)
110 RPC_I(inode)->private = private;
113 static void
114 rpc_close_pipes(struct inode *inode)
116 struct rpc_inode *rpci = RPC_I(inode);
117 struct rpc_pipe_ops *ops;
119 mutex_lock(&inode->i_mutex);
120 ops = rpci->ops;
121 if (ops != NULL) {
122 LIST_HEAD(free_list);
124 spin_lock(&inode->i_lock);
125 rpci->nreaders = 0;
126 list_splice_init(&rpci->in_upcall, &free_list);
127 list_splice_init(&rpci->pipe, &free_list);
128 rpci->pipelen = 0;
129 rpci->ops = NULL;
130 spin_unlock(&inode->i_lock);
131 rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE);
132 rpci->nwriters = 0;
133 if (ops->release_pipe)
134 ops->release_pipe(inode);
135 cancel_delayed_work_sync(&rpci->queue_timeout);
137 rpc_inode_setowner(inode, NULL);
138 mutex_unlock(&inode->i_mutex);
141 static struct inode *
142 rpc_alloc_inode(struct super_block *sb)
144 struct rpc_inode *rpci;
145 rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL);
146 if (!rpci)
147 return NULL;
148 return &rpci->vfs_inode;
151 static void
152 rpc_destroy_inode(struct inode *inode)
154 kmem_cache_free(rpc_inode_cachep, RPC_I(inode));
157 static int
158 rpc_pipe_open(struct inode *inode, struct file *filp)
160 struct rpc_inode *rpci = RPC_I(inode);
161 int res = -ENXIO;
163 mutex_lock(&inode->i_mutex);
164 if (rpci->ops != NULL) {
165 if (filp->f_mode & FMODE_READ)
166 rpci->nreaders ++;
167 if (filp->f_mode & FMODE_WRITE)
168 rpci->nwriters ++;
169 res = 0;
171 mutex_unlock(&inode->i_mutex);
172 return res;
175 static int
176 rpc_pipe_release(struct inode *inode, struct file *filp)
178 struct rpc_inode *rpci = RPC_I(inode);
179 struct rpc_pipe_msg *msg;
181 mutex_lock(&inode->i_mutex);
182 if (rpci->ops == NULL)
183 goto out;
184 msg = (struct rpc_pipe_msg *)filp->private_data;
185 if (msg != NULL) {
186 spin_lock(&inode->i_lock);
187 msg->errno = -EAGAIN;
188 list_del(&msg->list);
189 spin_unlock(&inode->i_lock);
190 rpci->ops->destroy_msg(msg);
192 if (filp->f_mode & FMODE_WRITE)
193 rpci->nwriters --;
194 if (filp->f_mode & FMODE_READ) {
195 rpci->nreaders --;
196 if (rpci->nreaders == 0) {
197 LIST_HEAD(free_list);
198 spin_lock(&inode->i_lock);
199 list_splice_init(&rpci->pipe, &free_list);
200 rpci->pipelen = 0;
201 spin_unlock(&inode->i_lock);
202 rpc_purge_list(rpci, &free_list,
203 rpci->ops->destroy_msg, -EAGAIN);
206 if (rpci->ops->release_pipe)
207 rpci->ops->release_pipe(inode);
208 out:
209 mutex_unlock(&inode->i_mutex);
210 return 0;
213 static ssize_t
214 rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
216 struct inode *inode = filp->f_path.dentry->d_inode;
217 struct rpc_inode *rpci = RPC_I(inode);
218 struct rpc_pipe_msg *msg;
219 int res = 0;
221 mutex_lock(&inode->i_mutex);
222 if (rpci->ops == NULL) {
223 res = -EPIPE;
224 goto out_unlock;
226 msg = filp->private_data;
227 if (msg == NULL) {
228 spin_lock(&inode->i_lock);
229 if (!list_empty(&rpci->pipe)) {
230 msg = list_entry(rpci->pipe.next,
231 struct rpc_pipe_msg,
232 list);
233 list_move(&msg->list, &rpci->in_upcall);
234 rpci->pipelen -= msg->len;
235 filp->private_data = msg;
236 msg->copied = 0;
238 spin_unlock(&inode->i_lock);
239 if (msg == NULL)
240 goto out_unlock;
242 /* NOTE: it is up to the callback to update msg->copied */
243 res = rpci->ops->upcall(filp, msg, buf, len);
244 if (res < 0 || msg->len == msg->copied) {
245 filp->private_data = NULL;
246 spin_lock(&inode->i_lock);
247 list_del(&msg->list);
248 spin_unlock(&inode->i_lock);
249 rpci->ops->destroy_msg(msg);
251 out_unlock:
252 mutex_unlock(&inode->i_mutex);
253 return res;
256 static ssize_t
257 rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
259 struct inode *inode = filp->f_path.dentry->d_inode;
260 struct rpc_inode *rpci = RPC_I(inode);
261 int res;
263 mutex_lock(&inode->i_mutex);
264 res = -EPIPE;
265 if (rpci->ops != NULL)
266 res = rpci->ops->downcall(filp, buf, len);
267 mutex_unlock(&inode->i_mutex);
268 return res;
271 static unsigned int
272 rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
274 struct rpc_inode *rpci;
275 unsigned int mask = 0;
277 rpci = RPC_I(filp->f_path.dentry->d_inode);
278 poll_wait(filp, &rpci->waitq, wait);
280 mask = POLLOUT | POLLWRNORM;
281 if (rpci->ops == NULL)
282 mask |= POLLERR | POLLHUP;
283 if (!list_empty(&rpci->pipe))
284 mask |= POLLIN | POLLRDNORM;
285 return mask;
288 static int
289 rpc_pipe_ioctl(struct inode *ino, struct file *filp,
290 unsigned int cmd, unsigned long arg)
292 struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
293 int len;
295 switch (cmd) {
296 case FIONREAD:
297 if (rpci->ops == NULL)
298 return -EPIPE;
299 len = rpci->pipelen;
300 if (filp->private_data) {
301 struct rpc_pipe_msg *msg;
302 msg = (struct rpc_pipe_msg *)filp->private_data;
303 len += msg->len - msg->copied;
305 return put_user(len, (int __user *)arg);
306 default:
307 return -EINVAL;
311 static const struct file_operations rpc_pipe_fops = {
312 .owner = THIS_MODULE,
313 .llseek = no_llseek,
314 .read = rpc_pipe_read,
315 .write = rpc_pipe_write,
316 .poll = rpc_pipe_poll,
317 .ioctl = rpc_pipe_ioctl,
318 .open = rpc_pipe_open,
319 .release = rpc_pipe_release,
322 static int
323 rpc_show_info(struct seq_file *m, void *v)
325 struct rpc_clnt *clnt = m->private;
327 seq_printf(m, "RPC server: %s\n", clnt->cl_server);
328 seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname,
329 clnt->cl_prog, clnt->cl_vers);
330 seq_printf(m, "address: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR));
331 seq_printf(m, "protocol: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PROTO));
332 seq_printf(m, "port: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PORT));
333 return 0;
336 static int
337 rpc_info_open(struct inode *inode, struct file *file)
339 struct rpc_clnt *clnt;
340 int ret = single_open(file, rpc_show_info, NULL);
342 if (!ret) {
343 struct seq_file *m = file->private_data;
344 mutex_lock(&inode->i_mutex);
345 clnt = RPC_I(inode)->private;
346 if (clnt) {
347 kref_get(&clnt->cl_kref);
348 m->private = clnt;
349 } else {
350 single_release(inode, file);
351 ret = -EINVAL;
353 mutex_unlock(&inode->i_mutex);
355 return ret;
358 static int
359 rpc_info_release(struct inode *inode, struct file *file)
361 struct seq_file *m = file->private_data;
362 struct rpc_clnt *clnt = (struct rpc_clnt *)m->private;
364 if (clnt)
365 rpc_release_client(clnt);
366 return single_release(inode, file);
369 static const struct file_operations rpc_info_operations = {
370 .owner = THIS_MODULE,
371 .open = rpc_info_open,
372 .read = seq_read,
373 .llseek = seq_lseek,
374 .release = rpc_info_release,
379 * We have a single directory with 1 node in it.
381 enum {
382 RPCAUTH_Root = 1,
383 RPCAUTH_lockd,
384 RPCAUTH_mount,
385 RPCAUTH_nfs,
386 RPCAUTH_portmap,
387 RPCAUTH_statd,
388 RPCAUTH_RootEOF
392 * Description of fs contents.
394 struct rpc_filelist {
395 char *name;
396 const struct file_operations *i_fop;
397 int mode;
400 static struct rpc_filelist files[] = {
401 [RPCAUTH_lockd] = {
402 .name = "lockd",
403 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
405 [RPCAUTH_mount] = {
406 .name = "mount",
407 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
409 [RPCAUTH_nfs] = {
410 .name = "nfs",
411 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
413 [RPCAUTH_portmap] = {
414 .name = "portmap",
415 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
417 [RPCAUTH_statd] = {
418 .name = "statd",
419 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
423 enum {
424 RPCAUTH_info = 2,
425 RPCAUTH_EOF
428 static struct rpc_filelist authfiles[] = {
429 [RPCAUTH_info] = {
430 .name = "info",
431 .i_fop = &rpc_info_operations,
432 .mode = S_IFREG | S_IRUSR,
436 struct vfsmount *rpc_get_mount(void)
438 int err;
440 err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mount, &rpc_mount_count);
441 if (err != 0)
442 return ERR_PTR(err);
443 return rpc_mount;
446 void rpc_put_mount(void)
448 simple_release_fs(&rpc_mount, &rpc_mount_count);
451 static int rpc_delete_dentry(struct dentry *dentry)
453 return 1;
456 static struct dentry_operations rpc_dentry_operations = {
457 .d_delete = rpc_delete_dentry,
460 static int
461 rpc_lookup_parent(char *path, struct nameidata *nd)
463 struct vfsmount *mnt;
465 if (path[0] == '\0')
466 return -ENOENT;
468 mnt = rpc_get_mount();
469 if (IS_ERR(mnt)) {
470 printk(KERN_WARNING "%s: %s failed to mount "
471 "pseudofilesystem \n", __FILE__, __FUNCTION__);
472 return PTR_ERR(mnt);
475 if (vfs_path_lookup(mnt->mnt_root, mnt, path, LOOKUP_PARENT, nd)) {
476 printk(KERN_WARNING "%s: %s failed to find path %s\n",
477 __FILE__, __FUNCTION__, path);
478 rpc_put_mount();
479 return -ENOENT;
481 return 0;
484 static void
485 rpc_release_path(struct nameidata *nd)
487 path_release(nd);
488 rpc_put_mount();
491 static struct inode *
492 rpc_get_inode(struct super_block *sb, int mode)
494 struct inode *inode = new_inode(sb);
495 if (!inode)
496 return NULL;
497 inode->i_mode = mode;
498 inode->i_uid = inode->i_gid = 0;
499 inode->i_blocks = 0;
500 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
501 switch(mode & S_IFMT) {
502 case S_IFDIR:
503 inode->i_fop = &simple_dir_operations;
504 inode->i_op = &simple_dir_inode_operations;
505 inc_nlink(inode);
506 default:
507 break;
509 return inode;
513 * FIXME: This probably has races.
515 static void
516 rpc_depopulate(struct dentry *parent, int start, int eof)
518 struct inode *dir = parent->d_inode;
519 struct list_head *pos, *next;
520 struct dentry *dentry, *dvec[10];
521 int n = 0;
523 mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD);
524 repeat:
525 spin_lock(&dcache_lock);
526 list_for_each_safe(pos, next, &parent->d_subdirs) {
527 dentry = list_entry(pos, struct dentry, d_u.d_child);
528 if (!dentry->d_inode ||
529 dentry->d_inode->i_ino < start ||
530 dentry->d_inode->i_ino >= eof)
531 continue;
532 spin_lock(&dentry->d_lock);
533 if (!d_unhashed(dentry)) {
534 dget_locked(dentry);
535 __d_drop(dentry);
536 spin_unlock(&dentry->d_lock);
537 dvec[n++] = dentry;
538 if (n == ARRAY_SIZE(dvec))
539 break;
540 } else
541 spin_unlock(&dentry->d_lock);
543 spin_unlock(&dcache_lock);
544 if (n) {
545 do {
546 dentry = dvec[--n];
547 if (S_ISREG(dentry->d_inode->i_mode))
548 simple_unlink(dir, dentry);
549 else if (S_ISDIR(dentry->d_inode->i_mode))
550 simple_rmdir(dir, dentry);
551 d_delete(dentry);
552 dput(dentry);
553 } while (n);
554 goto repeat;
556 mutex_unlock(&dir->i_mutex);
559 static int
560 rpc_populate(struct dentry *parent,
561 struct rpc_filelist *files,
562 int start, int eof)
564 struct inode *inode, *dir = parent->d_inode;
565 void *private = RPC_I(dir)->private;
566 struct dentry *dentry;
567 int mode, i;
569 mutex_lock(&dir->i_mutex);
570 for (i = start; i < eof; i++) {
571 dentry = d_alloc_name(parent, files[i].name);
572 if (!dentry)
573 goto out_bad;
574 dentry->d_op = &rpc_dentry_operations;
575 mode = files[i].mode;
576 inode = rpc_get_inode(dir->i_sb, mode);
577 if (!inode) {
578 dput(dentry);
579 goto out_bad;
581 inode->i_ino = i;
582 if (files[i].i_fop)
583 inode->i_fop = files[i].i_fop;
584 if (private)
585 rpc_inode_setowner(inode, private);
586 if (S_ISDIR(mode))
587 inc_nlink(dir);
588 d_add(dentry, inode);
589 fsnotify_create(dir, dentry);
591 mutex_unlock(&dir->i_mutex);
592 return 0;
593 out_bad:
594 mutex_unlock(&dir->i_mutex);
595 printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
596 __FILE__, __FUNCTION__, parent->d_name.name);
597 return -ENOMEM;
600 static int
601 __rpc_mkdir(struct inode *dir, struct dentry *dentry)
603 struct inode *inode;
605 inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO);
606 if (!inode)
607 goto out_err;
608 inode->i_ino = iunique(dir->i_sb, 100);
609 d_instantiate(dentry, inode);
610 inc_nlink(dir);
611 fsnotify_mkdir(dir, dentry);
612 return 0;
613 out_err:
614 printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
615 __FILE__, __FUNCTION__, dentry->d_name.name);
616 return -ENOMEM;
619 static int
620 __rpc_rmdir(struct inode *dir, struct dentry *dentry)
622 int error;
623 error = simple_rmdir(dir, dentry);
624 if (!error)
625 d_delete(dentry);
626 return error;
629 static struct dentry *
630 rpc_lookup_create(struct dentry *parent, const char *name, int len, int exclusive)
632 struct inode *dir = parent->d_inode;
633 struct dentry *dentry;
635 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
636 dentry = lookup_one_len(name, parent, len);
637 if (IS_ERR(dentry))
638 goto out_err;
639 if (!dentry->d_inode)
640 dentry->d_op = &rpc_dentry_operations;
641 else if (exclusive) {
642 dput(dentry);
643 dentry = ERR_PTR(-EEXIST);
644 goto out_err;
646 return dentry;
647 out_err:
648 mutex_unlock(&dir->i_mutex);
649 return dentry;
652 static struct dentry *
653 rpc_lookup_negative(char *path, struct nameidata *nd)
655 struct dentry *dentry;
656 int error;
658 if ((error = rpc_lookup_parent(path, nd)) != 0)
659 return ERR_PTR(error);
660 dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len, 1);
661 if (IS_ERR(dentry))
662 rpc_release_path(nd);
663 return dentry;
667 struct dentry *
668 rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
670 struct nameidata nd;
671 struct dentry *dentry;
672 struct inode *dir;
673 int error;
675 dentry = rpc_lookup_negative(path, &nd);
676 if (IS_ERR(dentry))
677 return dentry;
678 dir = nd.dentry->d_inode;
679 if ((error = __rpc_mkdir(dir, dentry)) != 0)
680 goto err_dput;
681 RPC_I(dentry->d_inode)->private = rpc_client;
682 error = rpc_populate(dentry, authfiles,
683 RPCAUTH_info, RPCAUTH_EOF);
684 if (error)
685 goto err_depopulate;
686 dget(dentry);
687 out:
688 mutex_unlock(&dir->i_mutex);
689 rpc_release_path(&nd);
690 return dentry;
691 err_depopulate:
692 rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF);
693 __rpc_rmdir(dir, dentry);
694 err_dput:
695 dput(dentry);
696 printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n",
697 __FILE__, __FUNCTION__, path, error);
698 dentry = ERR_PTR(error);
699 goto out;
703 rpc_rmdir(struct dentry *dentry)
705 struct dentry *parent;
706 struct inode *dir;
707 int error;
709 parent = dget_parent(dentry);
710 dir = parent->d_inode;
711 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
712 rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF);
713 error = __rpc_rmdir(dir, dentry);
714 dput(dentry);
715 mutex_unlock(&dir->i_mutex);
716 dput(parent);
717 return error;
720 struct dentry *
721 rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pipe_ops *ops, int flags)
723 struct dentry *dentry;
724 struct inode *dir, *inode;
725 struct rpc_inode *rpci;
727 dentry = rpc_lookup_create(parent, name, strlen(name), 0);
728 if (IS_ERR(dentry))
729 return dentry;
730 dir = parent->d_inode;
731 if (dentry->d_inode) {
732 rpci = RPC_I(dentry->d_inode);
733 if (rpci->private != private ||
734 rpci->ops != ops ||
735 rpci->flags != flags) {
736 dput (dentry);
737 dentry = ERR_PTR(-EBUSY);
739 rpci->nkern_readwriters++;
740 goto out;
742 inode = rpc_get_inode(dir->i_sb, S_IFIFO | S_IRUSR | S_IWUSR);
743 if (!inode)
744 goto err_dput;
745 inode->i_ino = iunique(dir->i_sb, 100);
746 inode->i_fop = &rpc_pipe_fops;
747 d_instantiate(dentry, inode);
748 rpci = RPC_I(inode);
749 rpci->private = private;
750 rpci->flags = flags;
751 rpci->ops = ops;
752 rpci->nkern_readwriters = 1;
753 fsnotify_create(dir, dentry);
754 dget(dentry);
755 out:
756 mutex_unlock(&dir->i_mutex);
757 return dentry;
758 err_dput:
759 dput(dentry);
760 dentry = ERR_PTR(-ENOMEM);
761 printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n",
762 __FILE__, __FUNCTION__, parent->d_name.name, name,
763 -ENOMEM);
764 goto out;
768 rpc_unlink(struct dentry *dentry)
770 struct dentry *parent;
771 struct inode *dir;
772 int error = 0;
774 parent = dget_parent(dentry);
775 dir = parent->d_inode;
776 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
777 if (--RPC_I(dentry->d_inode)->nkern_readwriters == 0) {
778 rpc_close_pipes(dentry->d_inode);
779 error = simple_unlink(dir, dentry);
780 if (!error)
781 d_delete(dentry);
783 dput(dentry);
784 mutex_unlock(&dir->i_mutex);
785 dput(parent);
786 return error;
790 * populate the filesystem
792 static struct super_operations s_ops = {
793 .alloc_inode = rpc_alloc_inode,
794 .destroy_inode = rpc_destroy_inode,
795 .statfs = simple_statfs,
798 #define RPCAUTH_GSSMAGIC 0x67596969
800 static int
801 rpc_fill_super(struct super_block *sb, void *data, int silent)
803 struct inode *inode;
804 struct dentry *root;
806 sb->s_blocksize = PAGE_CACHE_SIZE;
807 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
808 sb->s_magic = RPCAUTH_GSSMAGIC;
809 sb->s_op = &s_ops;
810 sb->s_time_gran = 1;
812 inode = rpc_get_inode(sb, S_IFDIR | 0755);
813 if (!inode)
814 return -ENOMEM;
815 root = d_alloc_root(inode);
816 if (!root) {
817 iput(inode);
818 return -ENOMEM;
820 if (rpc_populate(root, files, RPCAUTH_Root + 1, RPCAUTH_RootEOF))
821 goto out;
822 sb->s_root = root;
823 return 0;
824 out:
825 d_genocide(root);
826 dput(root);
827 return -ENOMEM;
830 static int
831 rpc_get_sb(struct file_system_type *fs_type,
832 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
834 return get_sb_single(fs_type, flags, data, rpc_fill_super, mnt);
837 static struct file_system_type rpc_pipe_fs_type = {
838 .owner = THIS_MODULE,
839 .name = "rpc_pipefs",
840 .get_sb = rpc_get_sb,
841 .kill_sb = kill_litter_super,
844 static void
845 init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
847 struct rpc_inode *rpci = (struct rpc_inode *) foo;
849 inode_init_once(&rpci->vfs_inode);
850 rpci->private = NULL;
851 rpci->nreaders = 0;
852 rpci->nwriters = 0;
853 INIT_LIST_HEAD(&rpci->in_upcall);
854 INIT_LIST_HEAD(&rpci->in_downcall);
855 INIT_LIST_HEAD(&rpci->pipe);
856 rpci->pipelen = 0;
857 init_waitqueue_head(&rpci->waitq);
858 INIT_DELAYED_WORK(&rpci->queue_timeout,
859 rpc_timeout_upcall_queue);
860 rpci->ops = NULL;
863 int register_rpc_pipefs(void)
865 int err;
867 rpc_inode_cachep = kmem_cache_create("rpc_inode_cache",
868 sizeof(struct rpc_inode),
869 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
870 SLAB_MEM_SPREAD),
871 init_once);
872 if (!rpc_inode_cachep)
873 return -ENOMEM;
874 err = register_filesystem(&rpc_pipe_fs_type);
875 if (err) {
876 kmem_cache_destroy(rpc_inode_cachep);
877 return err;
880 return 0;
883 void unregister_rpc_pipefs(void)
885 kmem_cache_destroy(rpc_inode_cachep);
886 unregister_filesystem(&rpc_pipe_fs_type);