firewire: prevent userspace from accessing shut down devices
[linux-2.6/mini2440.git] / fs / fuse / inode.c
blobfb77e0962132ea5ee1d2f9dc28f97206335f9125
1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
9 #include "fuse_i.h"
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/seq_file.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/parser.h>
18 #include <linux/statfs.h>
19 #include <linux/random.h>
20 #include <linux/sched.h>
22 MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
23 MODULE_DESCRIPTION("Filesystem in Userspace");
24 MODULE_LICENSE("GPL");
26 static struct kmem_cache *fuse_inode_cachep;
27 struct list_head fuse_conn_list;
28 DEFINE_MUTEX(fuse_mutex);
30 #define FUSE_SUPER_MAGIC 0x65735546
32 #define FUSE_DEFAULT_BLKSIZE 512
34 struct fuse_mount_data {
35 int fd;
36 unsigned rootmode;
37 unsigned user_id;
38 unsigned group_id;
39 unsigned fd_present : 1;
40 unsigned rootmode_present : 1;
41 unsigned user_id_present : 1;
42 unsigned group_id_present : 1;
43 unsigned flags;
44 unsigned max_read;
45 unsigned blksize;
48 static struct inode *fuse_alloc_inode(struct super_block *sb)
50 struct inode *inode;
51 struct fuse_inode *fi;
53 inode = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL);
54 if (!inode)
55 return NULL;
57 fi = get_fuse_inode(inode);
58 fi->i_time = 0;
59 fi->nodeid = 0;
60 fi->nlookup = 0;
61 fi->attr_version = 0;
62 fi->writectr = 0;
63 INIT_LIST_HEAD(&fi->write_files);
64 INIT_LIST_HEAD(&fi->queued_writes);
65 INIT_LIST_HEAD(&fi->writepages);
66 init_waitqueue_head(&fi->page_waitq);
67 fi->forget_req = fuse_request_alloc();
68 if (!fi->forget_req) {
69 kmem_cache_free(fuse_inode_cachep, inode);
70 return NULL;
73 return inode;
76 static void fuse_destroy_inode(struct inode *inode)
78 struct fuse_inode *fi = get_fuse_inode(inode);
79 BUG_ON(!list_empty(&fi->write_files));
80 BUG_ON(!list_empty(&fi->queued_writes));
81 if (fi->forget_req)
82 fuse_request_free(fi->forget_req);
83 kmem_cache_free(fuse_inode_cachep, inode);
86 void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req,
87 u64 nodeid, u64 nlookup)
89 struct fuse_forget_in *inarg = &req->misc.forget_in;
90 inarg->nlookup = nlookup;
91 req->in.h.opcode = FUSE_FORGET;
92 req->in.h.nodeid = nodeid;
93 req->in.numargs = 1;
94 req->in.args[0].size = sizeof(struct fuse_forget_in);
95 req->in.args[0].value = inarg;
96 request_send_noreply(fc, req);
99 static void fuse_clear_inode(struct inode *inode)
101 if (inode->i_sb->s_flags & MS_ACTIVE) {
102 struct fuse_conn *fc = get_fuse_conn(inode);
103 struct fuse_inode *fi = get_fuse_inode(inode);
104 fuse_send_forget(fc, fi->forget_req, fi->nodeid, fi->nlookup);
105 fi->forget_req = NULL;
109 static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
111 if (*flags & MS_MANDLOCK)
112 return -EINVAL;
114 return 0;
117 void fuse_truncate(struct address_space *mapping, loff_t offset)
119 /* See vmtruncate() */
120 unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
121 truncate_inode_pages(mapping, offset);
122 unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
125 void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
126 u64 attr_valid)
128 struct fuse_conn *fc = get_fuse_conn(inode);
129 struct fuse_inode *fi = get_fuse_inode(inode);
131 fi->attr_version = ++fc->attr_version;
132 fi->i_time = attr_valid;
134 inode->i_ino = attr->ino;
135 inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
136 inode->i_nlink = attr->nlink;
137 inode->i_uid = attr->uid;
138 inode->i_gid = attr->gid;
139 inode->i_blocks = attr->blocks;
140 inode->i_atime.tv_sec = attr->atime;
141 inode->i_atime.tv_nsec = attr->atimensec;
142 inode->i_mtime.tv_sec = attr->mtime;
143 inode->i_mtime.tv_nsec = attr->mtimensec;
144 inode->i_ctime.tv_sec = attr->ctime;
145 inode->i_ctime.tv_nsec = attr->ctimensec;
147 if (attr->blksize != 0)
148 inode->i_blkbits = ilog2(attr->blksize);
149 else
150 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
153 * Don't set the sticky bit in i_mode, unless we want the VFS
154 * to check permissions. This prevents failures due to the
155 * check in may_delete().
157 fi->orig_i_mode = inode->i_mode;
158 if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
159 inode->i_mode &= ~S_ISVTX;
162 void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
163 u64 attr_valid, u64 attr_version)
165 struct fuse_conn *fc = get_fuse_conn(inode);
166 struct fuse_inode *fi = get_fuse_inode(inode);
167 loff_t oldsize;
169 spin_lock(&fc->lock);
170 if (attr_version != 0 && fi->attr_version > attr_version) {
171 spin_unlock(&fc->lock);
172 return;
175 fuse_change_attributes_common(inode, attr, attr_valid);
177 oldsize = inode->i_size;
178 i_size_write(inode, attr->size);
179 spin_unlock(&fc->lock);
181 if (S_ISREG(inode->i_mode) && oldsize != attr->size) {
182 if (attr->size < oldsize)
183 fuse_truncate(inode->i_mapping, attr->size);
184 invalidate_inode_pages2(inode->i_mapping);
188 static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
190 inode->i_mode = attr->mode & S_IFMT;
191 inode->i_size = attr->size;
192 if (S_ISREG(inode->i_mode)) {
193 fuse_init_common(inode);
194 fuse_init_file_inode(inode);
195 } else if (S_ISDIR(inode->i_mode))
196 fuse_init_dir(inode);
197 else if (S_ISLNK(inode->i_mode))
198 fuse_init_symlink(inode);
199 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
200 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
201 fuse_init_common(inode);
202 init_special_inode(inode, inode->i_mode,
203 new_decode_dev(attr->rdev));
204 } else
205 BUG();
208 static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
210 u64 nodeid = *(u64 *) _nodeidp;
211 if (get_node_id(inode) == nodeid)
212 return 1;
213 else
214 return 0;
217 static int fuse_inode_set(struct inode *inode, void *_nodeidp)
219 u64 nodeid = *(u64 *) _nodeidp;
220 get_fuse_inode(inode)->nodeid = nodeid;
221 return 0;
224 struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
225 int generation, struct fuse_attr *attr,
226 u64 attr_valid, u64 attr_version)
228 struct inode *inode;
229 struct fuse_inode *fi;
230 struct fuse_conn *fc = get_fuse_conn_super(sb);
232 retry:
233 inode = iget5_locked(sb, nodeid, fuse_inode_eq, fuse_inode_set, &nodeid);
234 if (!inode)
235 return NULL;
237 if ((inode->i_state & I_NEW)) {
238 inode->i_flags |= S_NOATIME|S_NOCMTIME;
239 inode->i_generation = generation;
240 inode->i_data.backing_dev_info = &fc->bdi;
241 fuse_init_inode(inode, attr);
242 unlock_new_inode(inode);
243 } else if ((inode->i_mode ^ attr->mode) & S_IFMT) {
244 /* Inode has changed type, any I/O on the old should fail */
245 make_bad_inode(inode);
246 iput(inode);
247 goto retry;
250 fi = get_fuse_inode(inode);
251 spin_lock(&fc->lock);
252 fi->nlookup ++;
253 spin_unlock(&fc->lock);
254 fuse_change_attributes(inode, attr, attr_valid, attr_version);
256 return inode;
259 static void fuse_umount_begin(struct super_block *sb)
261 fuse_abort_conn(get_fuse_conn_super(sb));
264 static void fuse_send_destroy(struct fuse_conn *fc)
266 struct fuse_req *req = fc->destroy_req;
267 if (req && fc->conn_init) {
268 fc->destroy_req = NULL;
269 req->in.h.opcode = FUSE_DESTROY;
270 req->force = 1;
271 request_send(fc, req);
272 fuse_put_request(fc, req);
276 static void fuse_put_super(struct super_block *sb)
278 struct fuse_conn *fc = get_fuse_conn_super(sb);
280 fuse_send_destroy(fc);
281 spin_lock(&fc->lock);
282 fc->connected = 0;
283 fc->blocked = 0;
284 spin_unlock(&fc->lock);
285 /* Flush all readers on this fs */
286 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
287 wake_up_all(&fc->waitq);
288 wake_up_all(&fc->blocked_waitq);
289 wake_up_all(&fc->reserved_req_waitq);
290 mutex_lock(&fuse_mutex);
291 list_del(&fc->entry);
292 fuse_ctl_remove_conn(fc);
293 mutex_unlock(&fuse_mutex);
294 fuse_conn_put(fc);
297 static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr)
299 stbuf->f_type = FUSE_SUPER_MAGIC;
300 stbuf->f_bsize = attr->bsize;
301 stbuf->f_frsize = attr->frsize;
302 stbuf->f_blocks = attr->blocks;
303 stbuf->f_bfree = attr->bfree;
304 stbuf->f_bavail = attr->bavail;
305 stbuf->f_files = attr->files;
306 stbuf->f_ffree = attr->ffree;
307 stbuf->f_namelen = attr->namelen;
308 /* fsid is left zero */
311 static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
313 struct super_block *sb = dentry->d_sb;
314 struct fuse_conn *fc = get_fuse_conn_super(sb);
315 struct fuse_req *req;
316 struct fuse_statfs_out outarg;
317 int err;
319 if (!fuse_allow_task(fc, current)) {
320 buf->f_type = FUSE_SUPER_MAGIC;
321 return 0;
324 req = fuse_get_req(fc);
325 if (IS_ERR(req))
326 return PTR_ERR(req);
328 memset(&outarg, 0, sizeof(outarg));
329 req->in.numargs = 0;
330 req->in.h.opcode = FUSE_STATFS;
331 req->in.h.nodeid = get_node_id(dentry->d_inode);
332 req->out.numargs = 1;
333 req->out.args[0].size =
334 fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg);
335 req->out.args[0].value = &outarg;
336 request_send(fc, req);
337 err = req->out.h.error;
338 if (!err)
339 convert_fuse_statfs(buf, &outarg.st);
340 fuse_put_request(fc, req);
341 return err;
344 enum {
345 OPT_FD,
346 OPT_ROOTMODE,
347 OPT_USER_ID,
348 OPT_GROUP_ID,
349 OPT_DEFAULT_PERMISSIONS,
350 OPT_ALLOW_OTHER,
351 OPT_MAX_READ,
352 OPT_BLKSIZE,
353 OPT_ERR
356 static match_table_t tokens = {
357 {OPT_FD, "fd=%u"},
358 {OPT_ROOTMODE, "rootmode=%o"},
359 {OPT_USER_ID, "user_id=%u"},
360 {OPT_GROUP_ID, "group_id=%u"},
361 {OPT_DEFAULT_PERMISSIONS, "default_permissions"},
362 {OPT_ALLOW_OTHER, "allow_other"},
363 {OPT_MAX_READ, "max_read=%u"},
364 {OPT_BLKSIZE, "blksize=%u"},
365 {OPT_ERR, NULL}
368 static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
370 char *p;
371 memset(d, 0, sizeof(struct fuse_mount_data));
372 d->max_read = ~0;
373 d->blksize = FUSE_DEFAULT_BLKSIZE;
375 while ((p = strsep(&opt, ",")) != NULL) {
376 int token;
377 int value;
378 substring_t args[MAX_OPT_ARGS];
379 if (!*p)
380 continue;
382 token = match_token(p, tokens, args);
383 switch (token) {
384 case OPT_FD:
385 if (match_int(&args[0], &value))
386 return 0;
387 d->fd = value;
388 d->fd_present = 1;
389 break;
391 case OPT_ROOTMODE:
392 if (match_octal(&args[0], &value))
393 return 0;
394 if (!fuse_valid_type(value))
395 return 0;
396 d->rootmode = value;
397 d->rootmode_present = 1;
398 break;
400 case OPT_USER_ID:
401 if (match_int(&args[0], &value))
402 return 0;
403 d->user_id = value;
404 d->user_id_present = 1;
405 break;
407 case OPT_GROUP_ID:
408 if (match_int(&args[0], &value))
409 return 0;
410 d->group_id = value;
411 d->group_id_present = 1;
412 break;
414 case OPT_DEFAULT_PERMISSIONS:
415 d->flags |= FUSE_DEFAULT_PERMISSIONS;
416 break;
418 case OPT_ALLOW_OTHER:
419 d->flags |= FUSE_ALLOW_OTHER;
420 break;
422 case OPT_MAX_READ:
423 if (match_int(&args[0], &value))
424 return 0;
425 d->max_read = value;
426 break;
428 case OPT_BLKSIZE:
429 if (!is_bdev || match_int(&args[0], &value))
430 return 0;
431 d->blksize = value;
432 break;
434 default:
435 return 0;
439 if (!d->fd_present || !d->rootmode_present ||
440 !d->user_id_present || !d->group_id_present)
441 return 0;
443 return 1;
446 static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt)
448 struct fuse_conn *fc = get_fuse_conn_super(mnt->mnt_sb);
450 seq_printf(m, ",user_id=%u", fc->user_id);
451 seq_printf(m, ",group_id=%u", fc->group_id);
452 if (fc->flags & FUSE_DEFAULT_PERMISSIONS)
453 seq_puts(m, ",default_permissions");
454 if (fc->flags & FUSE_ALLOW_OTHER)
455 seq_puts(m, ",allow_other");
456 if (fc->max_read != ~0)
457 seq_printf(m, ",max_read=%u", fc->max_read);
458 if (mnt->mnt_sb->s_bdev &&
459 mnt->mnt_sb->s_blocksize != FUSE_DEFAULT_BLKSIZE)
460 seq_printf(m, ",blksize=%lu", mnt->mnt_sb->s_blocksize);
461 return 0;
464 static struct fuse_conn *new_conn(struct super_block *sb)
466 struct fuse_conn *fc;
467 int err;
469 fc = kzalloc(sizeof(*fc), GFP_KERNEL);
470 if (fc) {
471 spin_lock_init(&fc->lock);
472 mutex_init(&fc->inst_mutex);
473 atomic_set(&fc->count, 1);
474 init_waitqueue_head(&fc->waitq);
475 init_waitqueue_head(&fc->blocked_waitq);
476 init_waitqueue_head(&fc->reserved_req_waitq);
477 INIT_LIST_HEAD(&fc->pending);
478 INIT_LIST_HEAD(&fc->processing);
479 INIT_LIST_HEAD(&fc->io);
480 INIT_LIST_HEAD(&fc->interrupts);
481 INIT_LIST_HEAD(&fc->bg_queue);
482 atomic_set(&fc->num_waiting, 0);
483 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
484 fc->bdi.unplug_io_fn = default_unplug_io_fn;
485 /* fuse does it's own writeback accounting */
486 fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;
487 fc->dev = sb->s_dev;
488 err = bdi_init(&fc->bdi);
489 if (err)
490 goto error_kfree;
491 err = bdi_register_dev(&fc->bdi, fc->dev);
492 if (err)
493 goto error_bdi_destroy;
495 * For a single fuse filesystem use max 1% of dirty +
496 * writeback threshold.
498 * This gives about 1M of write buffer for memory maps on a
499 * machine with 1G and 10% dirty_ratio, which should be more
500 * than enough.
502 * Privileged users can raise it by writing to
504 * /sys/class/bdi/<bdi>/max_ratio
506 bdi_set_max_ratio(&fc->bdi, 1);
507 fc->reqctr = 0;
508 fc->blocked = 1;
509 fc->attr_version = 1;
510 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
512 return fc;
514 error_bdi_destroy:
515 bdi_destroy(&fc->bdi);
516 error_kfree:
517 mutex_destroy(&fc->inst_mutex);
518 kfree(fc);
519 return NULL;
522 void fuse_conn_put(struct fuse_conn *fc)
524 if (atomic_dec_and_test(&fc->count)) {
525 if (fc->destroy_req)
526 fuse_request_free(fc->destroy_req);
527 mutex_destroy(&fc->inst_mutex);
528 bdi_destroy(&fc->bdi);
529 kfree(fc);
533 struct fuse_conn *fuse_conn_get(struct fuse_conn *fc)
535 atomic_inc(&fc->count);
536 return fc;
539 static struct inode *get_root_inode(struct super_block *sb, unsigned mode)
541 struct fuse_attr attr;
542 memset(&attr, 0, sizeof(attr));
544 attr.mode = mode;
545 attr.ino = FUSE_ROOT_ID;
546 attr.nlink = 1;
547 return fuse_iget(sb, 1, 0, &attr, 0, 0);
550 static const struct super_operations fuse_super_operations = {
551 .alloc_inode = fuse_alloc_inode,
552 .destroy_inode = fuse_destroy_inode,
553 .clear_inode = fuse_clear_inode,
554 .drop_inode = generic_delete_inode,
555 .remount_fs = fuse_remount_fs,
556 .put_super = fuse_put_super,
557 .umount_begin = fuse_umount_begin,
558 .statfs = fuse_statfs,
559 .show_options = fuse_show_options,
562 static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
564 struct fuse_init_out *arg = &req->misc.init_out;
566 if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION)
567 fc->conn_error = 1;
568 else {
569 unsigned long ra_pages;
571 if (arg->minor >= 6) {
572 ra_pages = arg->max_readahead / PAGE_CACHE_SIZE;
573 if (arg->flags & FUSE_ASYNC_READ)
574 fc->async_read = 1;
575 if (!(arg->flags & FUSE_POSIX_LOCKS))
576 fc->no_lock = 1;
577 if (arg->flags & FUSE_ATOMIC_O_TRUNC)
578 fc->atomic_o_trunc = 1;
579 if (arg->flags & FUSE_BIG_WRITES)
580 fc->big_writes = 1;
581 } else {
582 ra_pages = fc->max_read / PAGE_CACHE_SIZE;
583 fc->no_lock = 1;
586 fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages);
587 fc->minor = arg->minor;
588 fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
589 fc->max_write = min_t(unsigned, 4096, fc->max_write);
590 fc->conn_init = 1;
592 fuse_put_request(fc, req);
593 fc->blocked = 0;
594 wake_up_all(&fc->blocked_waitq);
597 static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
599 struct fuse_init_in *arg = &req->misc.init_in;
601 arg->major = FUSE_KERNEL_VERSION;
602 arg->minor = FUSE_KERNEL_MINOR_VERSION;
603 arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
604 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
605 FUSE_BIG_WRITES;
606 req->in.h.opcode = FUSE_INIT;
607 req->in.numargs = 1;
608 req->in.args[0].size = sizeof(*arg);
609 req->in.args[0].value = arg;
610 req->out.numargs = 1;
611 /* Variable length arguement used for backward compatibility
612 with interface version < 7.5. Rest of init_out is zeroed
613 by do_get_request(), so a short reply is not a problem */
614 req->out.argvar = 1;
615 req->out.args[0].size = sizeof(struct fuse_init_out);
616 req->out.args[0].value = &req->misc.init_out;
617 req->end = process_init_reply;
618 request_send_background(fc, req);
621 static int fuse_fill_super(struct super_block *sb, void *data, int silent)
623 struct fuse_conn *fc;
624 struct inode *root;
625 struct fuse_mount_data d;
626 struct file *file;
627 struct dentry *root_dentry;
628 struct fuse_req *init_req;
629 int err;
630 int is_bdev = sb->s_bdev != NULL;
632 if (sb->s_flags & MS_MANDLOCK)
633 return -EINVAL;
635 if (!parse_fuse_opt((char *) data, &d, is_bdev))
636 return -EINVAL;
638 if (is_bdev) {
639 #ifdef CONFIG_BLOCK
640 if (!sb_set_blocksize(sb, d.blksize))
641 return -EINVAL;
642 #endif
643 } else {
644 sb->s_blocksize = PAGE_CACHE_SIZE;
645 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
647 sb->s_magic = FUSE_SUPER_MAGIC;
648 sb->s_op = &fuse_super_operations;
649 sb->s_maxbytes = MAX_LFS_FILESIZE;
651 file = fget(d.fd);
652 if (!file)
653 return -EINVAL;
655 if (file->f_op != &fuse_dev_operations)
656 return -EINVAL;
658 fc = new_conn(sb);
659 if (!fc)
660 return -ENOMEM;
662 fc->flags = d.flags;
663 fc->user_id = d.user_id;
664 fc->group_id = d.group_id;
665 fc->max_read = min_t(unsigned, 4096, d.max_read);
667 /* Used by get_root_inode() */
668 sb->s_fs_info = fc;
670 err = -ENOMEM;
671 root = get_root_inode(sb, d.rootmode);
672 if (!root)
673 goto err;
675 root_dentry = d_alloc_root(root);
676 if (!root_dentry) {
677 iput(root);
678 goto err;
681 init_req = fuse_request_alloc();
682 if (!init_req)
683 goto err_put_root;
685 if (is_bdev) {
686 fc->destroy_req = fuse_request_alloc();
687 if (!fc->destroy_req)
688 goto err_put_root;
691 mutex_lock(&fuse_mutex);
692 err = -EINVAL;
693 if (file->private_data)
694 goto err_unlock;
696 err = fuse_ctl_add_conn(fc);
697 if (err)
698 goto err_unlock;
700 list_add_tail(&fc->entry, &fuse_conn_list);
701 sb->s_root = root_dentry;
702 fc->connected = 1;
703 file->private_data = fuse_conn_get(fc);
704 mutex_unlock(&fuse_mutex);
706 * atomic_dec_and_test() in fput() provides the necessary
707 * memory barrier for file->private_data to be visible on all
708 * CPUs after this
710 fput(file);
712 fuse_send_init(fc, init_req);
714 return 0;
716 err_unlock:
717 mutex_unlock(&fuse_mutex);
718 fuse_request_free(init_req);
719 err_put_root:
720 dput(root_dentry);
721 err:
722 fput(file);
723 fuse_conn_put(fc);
724 return err;
727 static int fuse_get_sb(struct file_system_type *fs_type,
728 int flags, const char *dev_name,
729 void *raw_data, struct vfsmount *mnt)
731 return get_sb_nodev(fs_type, flags, raw_data, fuse_fill_super, mnt);
734 static struct file_system_type fuse_fs_type = {
735 .owner = THIS_MODULE,
736 .name = "fuse",
737 .fs_flags = FS_HAS_SUBTYPE,
738 .get_sb = fuse_get_sb,
739 .kill_sb = kill_anon_super,
742 #ifdef CONFIG_BLOCK
743 static int fuse_get_sb_blk(struct file_system_type *fs_type,
744 int flags, const char *dev_name,
745 void *raw_data, struct vfsmount *mnt)
747 return get_sb_bdev(fs_type, flags, dev_name, raw_data, fuse_fill_super,
748 mnt);
751 static struct file_system_type fuseblk_fs_type = {
752 .owner = THIS_MODULE,
753 .name = "fuseblk",
754 .get_sb = fuse_get_sb_blk,
755 .kill_sb = kill_block_super,
756 .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
759 static inline int register_fuseblk(void)
761 return register_filesystem(&fuseblk_fs_type);
764 static inline void unregister_fuseblk(void)
766 unregister_filesystem(&fuseblk_fs_type);
768 #else
769 static inline int register_fuseblk(void)
771 return 0;
774 static inline void unregister_fuseblk(void)
777 #endif
779 static void fuse_inode_init_once(struct kmem_cache *cachep, void *foo)
781 struct inode * inode = foo;
783 inode_init_once(inode);
786 static int __init fuse_fs_init(void)
788 int err;
790 err = register_filesystem(&fuse_fs_type);
791 if (err)
792 goto out;
794 err = register_fuseblk();
795 if (err)
796 goto out_unreg;
798 fuse_inode_cachep = kmem_cache_create("fuse_inode",
799 sizeof(struct fuse_inode),
800 0, SLAB_HWCACHE_ALIGN,
801 fuse_inode_init_once);
802 err = -ENOMEM;
803 if (!fuse_inode_cachep)
804 goto out_unreg2;
806 return 0;
808 out_unreg2:
809 unregister_fuseblk();
810 out_unreg:
811 unregister_filesystem(&fuse_fs_type);
812 out:
813 return err;
816 static void fuse_fs_cleanup(void)
818 unregister_filesystem(&fuse_fs_type);
819 unregister_fuseblk();
820 kmem_cache_destroy(fuse_inode_cachep);
823 static struct kobject *fuse_kobj;
824 static struct kobject *connections_kobj;
826 static int fuse_sysfs_init(void)
828 int err;
830 fuse_kobj = kobject_create_and_add("fuse", fs_kobj);
831 if (!fuse_kobj) {
832 err = -ENOMEM;
833 goto out_err;
836 connections_kobj = kobject_create_and_add("connections", fuse_kobj);
837 if (!connections_kobj) {
838 err = -ENOMEM;
839 goto out_fuse_unregister;
842 return 0;
844 out_fuse_unregister:
845 kobject_put(fuse_kobj);
846 out_err:
847 return err;
850 static void fuse_sysfs_cleanup(void)
852 kobject_put(connections_kobj);
853 kobject_put(fuse_kobj);
856 static int __init fuse_init(void)
858 int res;
860 printk("fuse init (API version %i.%i)\n",
861 FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
863 INIT_LIST_HEAD(&fuse_conn_list);
864 res = fuse_fs_init();
865 if (res)
866 goto err;
868 res = fuse_dev_init();
869 if (res)
870 goto err_fs_cleanup;
872 res = fuse_sysfs_init();
873 if (res)
874 goto err_dev_cleanup;
876 res = fuse_ctl_init();
877 if (res)
878 goto err_sysfs_cleanup;
880 return 0;
882 err_sysfs_cleanup:
883 fuse_sysfs_cleanup();
884 err_dev_cleanup:
885 fuse_dev_cleanup();
886 err_fs_cleanup:
887 fuse_fs_cleanup();
888 err:
889 return res;
892 static void __exit fuse_exit(void)
894 printk(KERN_DEBUG "fuse exit\n");
896 fuse_ctl_cleanup();
897 fuse_sysfs_cleanup();
898 fuse_fs_cleanup();
899 fuse_dev_cleanup();
902 module_init(fuse_init);
903 module_exit(fuse_exit);