2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR
);
22 static kmem_cache_t
*fuse_req_cachep
;
24 static struct fuse_conn
*fuse_get_conn(struct file
*file
)
27 spin_lock(&fuse_lock
);
28 fc
= file
->private_data
;
29 if (fc
&& !fc
->mounted
)
31 spin_unlock(&fuse_lock
);
35 static void fuse_request_init(struct fuse_req
*req
)
37 memset(req
, 0, sizeof(*req
));
38 INIT_LIST_HEAD(&req
->list
);
39 init_waitqueue_head(&req
->waitq
);
40 atomic_set(&req
->count
, 1);
43 struct fuse_req
*fuse_request_alloc(void)
45 struct fuse_req
*req
= kmem_cache_alloc(fuse_req_cachep
, SLAB_KERNEL
);
47 fuse_request_init(req
);
51 void fuse_request_free(struct fuse_req
*req
)
53 kmem_cache_free(fuse_req_cachep
, req
);
56 static void block_sigs(sigset_t
*oldset
)
60 siginitsetinv(&mask
, sigmask(SIGKILL
));
61 sigprocmask(SIG_BLOCK
, &mask
, oldset
);
64 static void restore_sigs(sigset_t
*oldset
)
66 sigprocmask(SIG_SETMASK
, oldset
, NULL
);
69 void fuse_reset_request(struct fuse_req
*req
)
71 int preallocated
= req
->preallocated
;
72 BUG_ON(atomic_read(&req
->count
) != 1);
73 fuse_request_init(req
);
74 req
->preallocated
= preallocated
;
77 static void __fuse_get_request(struct fuse_req
*req
)
79 atomic_inc(&req
->count
);
82 /* Must be called with > 1 refcount */
83 static void __fuse_put_request(struct fuse_req
*req
)
85 BUG_ON(atomic_read(&req
->count
) < 2);
86 atomic_dec(&req
->count
);
89 static struct fuse_req
*do_get_request(struct fuse_conn
*fc
)
93 spin_lock(&fuse_lock
);
94 BUG_ON(list_empty(&fc
->unused_list
));
95 req
= list_entry(fc
->unused_list
.next
, struct fuse_req
, list
);
96 list_del_init(&req
->list
);
97 spin_unlock(&fuse_lock
);
98 fuse_request_init(req
);
99 req
->preallocated
= 1;
100 req
->in
.h
.uid
= current
->fsuid
;
101 req
->in
.h
.gid
= current
->fsgid
;
102 req
->in
.h
.pid
= current
->pid
;
106 /* This can return NULL, but only in case it's interrupted by a SIGKILL */
107 struct fuse_req
*fuse_get_request(struct fuse_conn
*fc
)
113 intr
= down_interruptible(&fc
->outstanding_sem
);
114 restore_sigs(&oldset
);
115 return intr
? NULL
: do_get_request(fc
);
118 static void fuse_putback_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
120 spin_lock(&fuse_lock
);
121 if (req
->preallocated
)
122 list_add(&req
->list
, &fc
->unused_list
);
124 fuse_request_free(req
);
126 /* If we are in debt decrease that first */
127 if (fc
->outstanding_debt
)
128 fc
->outstanding_debt
--;
130 up(&fc
->outstanding_sem
);
131 spin_unlock(&fuse_lock
);
134 void fuse_put_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
136 if (atomic_dec_and_test(&req
->count
))
137 fuse_putback_request(fc
, req
);
140 void fuse_release_background(struct fuse_req
*req
)
146 spin_lock(&fuse_lock
);
147 list_del(&req
->bg_entry
);
148 spin_unlock(&fuse_lock
);
151 static void process_init_reply(struct fuse_conn
*fc
, struct fuse_req
*req
)
154 struct fuse_init_out
*arg
= &req
->misc
.init_out
;
156 if (req
->out
.h
.error
|| arg
->major
!= FUSE_KERNEL_VERSION
)
159 fc
->minor
= arg
->minor
;
160 fc
->max_write
= arg
->minor
< 5 ? 4096 : arg
->max_write
;
163 /* After INIT reply is received other requests can go
164 out. So do (FUSE_MAX_OUTSTANDING - 1) number of
165 up()s on outstanding_sem. The last up() is done in
166 fuse_putback_request() */
167 for (i
= 1; i
< FUSE_MAX_OUTSTANDING
; i
++)
168 up(&fc
->outstanding_sem
);
172 * This function is called when a request is finished. Either a reply
173 * has arrived or it was interrupted (and not yet sent) or some error
174 * occurred during communication with userspace, or the device file
175 * was closed. In case of a background request the reference to the
176 * stored objects are released. The requester thread is woken up (if
177 * still waiting), and finally the reference to the request is
180 * Called with fuse_lock, unlocks it
182 static void request_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
184 list_del(&req
->list
);
185 req
->state
= FUSE_REQ_FINISHED
;
186 spin_unlock(&fuse_lock
);
187 if (req
->background
) {
188 down_read(&fc
->sbput_sem
);
190 fuse_release_background(req
);
191 up_read(&fc
->sbput_sem
);
193 wake_up(&req
->waitq
);
194 if (req
->in
.h
.opcode
== FUSE_INIT
)
195 process_init_reply(fc
, req
);
196 else if (req
->in
.h
.opcode
== FUSE_RELEASE
&& req
->inode
== NULL
) {
197 /* Special case for failed iget in CREATE */
198 u64 nodeid
= req
->in
.h
.nodeid
;
199 fuse_reset_request(req
);
200 fuse_send_forget(fc
, req
, nodeid
, 1);
203 fuse_put_request(fc
, req
);
207 * Unfortunately request interruption not just solves the deadlock
208 * problem, it causes problems too. These stem from the fact, that an
209 * interrupted request is continued to be processed in userspace,
210 * while all the locks and object references (inode and file) held
211 * during the operation are released.
213 * To release the locks is exactly why there's a need to interrupt the
214 * request, so there's not a lot that can be done about this, except
215 * introduce additional locking in userspace.
217 * More important is to keep inode and file references until userspace
218 * has replied, otherwise FORGET and RELEASE could be sent while the
219 * inode/file is still used by the filesystem.
221 * For this reason the concept of "background" request is introduced.
222 * An interrupted request is backgrounded if it has been already sent
223 * to userspace. Backgrounding involves getting an extra reference to
224 * inode(s) or file used in the request, and adding the request to
225 * fc->background list. When a reply is received for a background
226 * request, the object references are released, and the request is
227 * removed from the list. If the filesystem is unmounted while there
228 * are still background requests, the list is walked and references
229 * are released as if a reply was received.
231 * There's one more use for a background request. The RELEASE message is
232 * always sent as background, since it doesn't return an error or
235 static void background_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
238 list_add(&req
->bg_entry
, &fc
->background
);
240 req
->inode
= igrab(req
->inode
);
242 req
->inode2
= igrab(req
->inode2
);
247 /* Called with fuse_lock held. Releases, and then reacquires it. */
248 static void request_wait_answer(struct fuse_conn
*fc
, struct fuse_req
*req
)
252 spin_unlock(&fuse_lock
);
254 wait_event_interruptible(req
->waitq
, req
->state
== FUSE_REQ_FINISHED
);
255 restore_sigs(&oldset
);
256 spin_lock(&fuse_lock
);
257 if (req
->state
== FUSE_REQ_FINISHED
)
260 req
->out
.h
.error
= -EINTR
;
261 req
->interrupted
= 1;
263 /* This is uninterruptible sleep, because data is
264 being copied to/from the buffers of req. During
265 locked state, there mustn't be any filesystem
266 operation (e.g. page fault), since that could lead
268 spin_unlock(&fuse_lock
);
269 wait_event(req
->waitq
, !req
->locked
);
270 spin_lock(&fuse_lock
);
272 if (req
->state
== FUSE_REQ_PENDING
) {
273 list_del(&req
->list
);
274 __fuse_put_request(req
);
275 } else if (req
->state
== FUSE_REQ_SENT
)
276 background_request(fc
, req
);
279 static unsigned len_args(unsigned numargs
, struct fuse_arg
*args
)
284 for (i
= 0; i
< numargs
; i
++)
285 nbytes
+= args
[i
].size
;
290 static void queue_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
293 /* zero is special */
296 req
->in
.h
.unique
= fc
->reqctr
;
297 req
->in
.h
.len
= sizeof(struct fuse_in_header
) +
298 len_args(req
->in
.numargs
, (struct fuse_arg
*) req
->in
.args
);
299 if (!req
->preallocated
) {
300 /* If request is not preallocated (either FORGET or
301 RELEASE), then still decrease outstanding_sem, so
302 user can't open infinite number of files while not
303 processing the RELEASE requests. However for
304 efficiency do it without blocking, so if down()
305 would block, just increase the debt instead */
306 if (down_trylock(&fc
->outstanding_sem
))
307 fc
->outstanding_debt
++;
309 list_add_tail(&req
->list
, &fc
->pending
);
310 req
->state
= FUSE_REQ_PENDING
;
315 * This can only be interrupted by a SIGKILL
317 void request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
320 spin_lock(&fuse_lock
);
322 req
->out
.h
.error
= -ENOTCONN
;
323 else if (fc
->conn_error
)
324 req
->out
.h
.error
= -ECONNREFUSED
;
326 queue_request(fc
, req
);
327 /* acquire extra reference, since request is still needed
328 after request_end() */
329 __fuse_get_request(req
);
331 request_wait_answer(fc
, req
);
333 spin_unlock(&fuse_lock
);
336 static void request_send_nowait(struct fuse_conn
*fc
, struct fuse_req
*req
)
338 spin_lock(&fuse_lock
);
340 queue_request(fc
, req
);
341 spin_unlock(&fuse_lock
);
343 req
->out
.h
.error
= -ENOTCONN
;
344 request_end(fc
, req
);
348 void request_send_noreply(struct fuse_conn
*fc
, struct fuse_req
*req
)
351 request_send_nowait(fc
, req
);
354 void request_send_background(struct fuse_conn
*fc
, struct fuse_req
*req
)
357 spin_lock(&fuse_lock
);
358 background_request(fc
, req
);
359 spin_unlock(&fuse_lock
);
360 request_send_nowait(fc
, req
);
363 void fuse_send_init(struct fuse_conn
*fc
)
365 /* This is called from fuse_read_super() so there's guaranteed
366 to be exactly one request available */
367 struct fuse_req
*req
= fuse_get_request(fc
);
368 struct fuse_init_in
*arg
= &req
->misc
.init_in
;
369 arg
->major
= FUSE_KERNEL_VERSION
;
370 arg
->minor
= FUSE_KERNEL_MINOR_VERSION
;
371 req
->in
.h
.opcode
= FUSE_INIT
;
373 req
->in
.args
[0].size
= sizeof(*arg
);
374 req
->in
.args
[0].value
= arg
;
375 req
->out
.numargs
= 1;
376 /* Variable length arguement used for backward compatibility
377 with interface version < 7.5. Rest of init_out is zeroed
378 by do_get_request(), so a short reply is not a problem */
380 req
->out
.args
[0].size
= sizeof(struct fuse_init_out
);
381 req
->out
.args
[0].value
= &req
->misc
.init_out
;
382 request_send_background(fc
, req
);
386 * Lock the request. Up to the next unlock_request() there mustn't be
387 * anything that could cause a page-fault. If the request was already
388 * interrupted bail out.
390 static int lock_request(struct fuse_req
*req
)
394 spin_lock(&fuse_lock
);
395 if (req
->interrupted
)
399 spin_unlock(&fuse_lock
);
405 * Unlock request. If it was interrupted during being locked, the
406 * requester thread is currently waiting for it to be unlocked, so
409 static void unlock_request(struct fuse_req
*req
)
412 spin_lock(&fuse_lock
);
414 if (req
->interrupted
)
415 wake_up(&req
->waitq
);
416 spin_unlock(&fuse_lock
);
420 struct fuse_copy_state
{
422 struct fuse_req
*req
;
423 const struct iovec
*iov
;
424 unsigned long nr_segs
;
425 unsigned long seglen
;
433 static void fuse_copy_init(struct fuse_copy_state
*cs
, int write
,
434 struct fuse_req
*req
, const struct iovec
*iov
,
435 unsigned long nr_segs
)
437 memset(cs
, 0, sizeof(*cs
));
441 cs
->nr_segs
= nr_segs
;
444 /* Unmap and put previous page of userspace buffer */
445 static void fuse_copy_finish(struct fuse_copy_state
*cs
)
448 kunmap_atomic(cs
->mapaddr
, KM_USER0
);
450 flush_dcache_page(cs
->pg
);
451 set_page_dirty_lock(cs
->pg
);
459 * Get another pagefull of userspace buffer, and map it to kernel
460 * address space, and lock request
462 static int fuse_copy_fill(struct fuse_copy_state
*cs
)
464 unsigned long offset
;
467 unlock_request(cs
->req
);
468 fuse_copy_finish(cs
);
470 BUG_ON(!cs
->nr_segs
);
471 cs
->seglen
= cs
->iov
[0].iov_len
;
472 cs
->addr
= (unsigned long) cs
->iov
[0].iov_base
;
476 down_read(¤t
->mm
->mmap_sem
);
477 err
= get_user_pages(current
, current
->mm
, cs
->addr
, 1, cs
->write
, 0,
479 up_read(¤t
->mm
->mmap_sem
);
483 offset
= cs
->addr
% PAGE_SIZE
;
484 cs
->mapaddr
= kmap_atomic(cs
->pg
, KM_USER0
);
485 cs
->buf
= cs
->mapaddr
+ offset
;
486 cs
->len
= min(PAGE_SIZE
- offset
, cs
->seglen
);
487 cs
->seglen
-= cs
->len
;
490 return lock_request(cs
->req
);
493 /* Do as much copy to/from userspace buffer as we can */
494 static int fuse_copy_do(struct fuse_copy_state
*cs
, void **val
, unsigned *size
)
496 unsigned ncpy
= min(*size
, cs
->len
);
499 memcpy(cs
->buf
, *val
, ncpy
);
501 memcpy(*val
, cs
->buf
, ncpy
);
511 * Copy a page in the request to/from the userspace buffer. Must be
514 static int fuse_copy_page(struct fuse_copy_state
*cs
, struct page
*page
,
515 unsigned offset
, unsigned count
, int zeroing
)
517 if (page
&& zeroing
&& count
< PAGE_SIZE
) {
518 void *mapaddr
= kmap_atomic(page
, KM_USER1
);
519 memset(mapaddr
, 0, PAGE_SIZE
);
520 kunmap_atomic(mapaddr
, KM_USER1
);
524 if (!cs
->len
&& (err
= fuse_copy_fill(cs
)))
527 void *mapaddr
= kmap_atomic(page
, KM_USER1
);
528 void *buf
= mapaddr
+ offset
;
529 offset
+= fuse_copy_do(cs
, &buf
, &count
);
530 kunmap_atomic(mapaddr
, KM_USER1
);
532 offset
+= fuse_copy_do(cs
, NULL
, &count
);
534 if (page
&& !cs
->write
)
535 flush_dcache_page(page
);
539 /* Copy pages in the request to/from userspace buffer */
540 static int fuse_copy_pages(struct fuse_copy_state
*cs
, unsigned nbytes
,
544 struct fuse_req
*req
= cs
->req
;
545 unsigned offset
= req
->page_offset
;
546 unsigned count
= min(nbytes
, (unsigned) PAGE_SIZE
- offset
);
548 for (i
= 0; i
< req
->num_pages
&& (nbytes
|| zeroing
); i
++) {
549 struct page
*page
= req
->pages
[i
];
550 int err
= fuse_copy_page(cs
, page
, offset
, count
, zeroing
);
555 count
= min(nbytes
, (unsigned) PAGE_SIZE
);
561 /* Copy a single argument in the request to/from userspace buffer */
562 static int fuse_copy_one(struct fuse_copy_state
*cs
, void *val
, unsigned size
)
566 if (!cs
->len
&& (err
= fuse_copy_fill(cs
)))
568 fuse_copy_do(cs
, &val
, &size
);
573 /* Copy request arguments to/from userspace buffer */
574 static int fuse_copy_args(struct fuse_copy_state
*cs
, unsigned numargs
,
575 unsigned argpages
, struct fuse_arg
*args
,
581 for (i
= 0; !err
&& i
< numargs
; i
++) {
582 struct fuse_arg
*arg
= &args
[i
];
583 if (i
== numargs
- 1 && argpages
)
584 err
= fuse_copy_pages(cs
, arg
->size
, zeroing
);
586 err
= fuse_copy_one(cs
, arg
->value
, arg
->size
);
591 /* Wait until a request is available on the pending list */
592 static void request_wait(struct fuse_conn
*fc
)
594 DECLARE_WAITQUEUE(wait
, current
);
596 add_wait_queue_exclusive(&fc
->waitq
, &wait
);
597 while (fc
->mounted
&& list_empty(&fc
->pending
)) {
598 set_current_state(TASK_INTERRUPTIBLE
);
599 if (signal_pending(current
))
602 spin_unlock(&fuse_lock
);
604 spin_lock(&fuse_lock
);
606 set_current_state(TASK_RUNNING
);
607 remove_wait_queue(&fc
->waitq
, &wait
);
611 * Read a single request into the userspace filesystem's buffer. This
612 * function waits until a request is available, then removes it from
613 * the pending list and copies request data to userspace buffer. If
614 * no reply is needed (FORGET) or request has been interrupted or
615 * there was an error during the copying then it's finished by calling
616 * request_end(). Otherwise add it to the processing list, and set
619 static ssize_t
fuse_dev_readv(struct file
*file
, const struct iovec
*iov
,
620 unsigned long nr_segs
, loff_t
*off
)
623 struct fuse_conn
*fc
;
624 struct fuse_req
*req
;
626 struct fuse_copy_state cs
;
630 spin_lock(&fuse_lock
);
631 fc
= file
->private_data
;
640 if (list_empty(&fc
->pending
))
643 req
= list_entry(fc
->pending
.next
, struct fuse_req
, list
);
644 req
->state
= FUSE_REQ_READING
;
645 list_move(&req
->list
, &fc
->io
);
649 /* If request is too large, reply with an error and restart the read */
650 if (iov_length(iov
, nr_segs
) < reqsize
) {
651 req
->out
.h
.error
= -EIO
;
652 /* SETXATTR is special, since it may contain too large data */
653 if (in
->h
.opcode
== FUSE_SETXATTR
)
654 req
->out
.h
.error
= -E2BIG
;
655 request_end(fc
, req
);
658 spin_unlock(&fuse_lock
);
659 fuse_copy_init(&cs
, 1, req
, iov
, nr_segs
);
660 err
= fuse_copy_one(&cs
, &in
->h
, sizeof(in
->h
));
662 err
= fuse_copy_args(&cs
, in
->numargs
, in
->argpages
,
663 (struct fuse_arg
*) in
->args
, 0);
664 fuse_copy_finish(&cs
);
665 spin_lock(&fuse_lock
);
667 if (!err
&& req
->interrupted
)
670 if (!req
->interrupted
)
671 req
->out
.h
.error
= -EIO
;
672 request_end(fc
, req
);
676 request_end(fc
, req
);
678 req
->state
= FUSE_REQ_SENT
;
679 list_move_tail(&req
->list
, &fc
->processing
);
680 spin_unlock(&fuse_lock
);
685 spin_unlock(&fuse_lock
);
689 static ssize_t
fuse_dev_read(struct file
*file
, char __user
*buf
,
690 size_t nbytes
, loff_t
*off
)
693 iov
.iov_len
= nbytes
;
695 return fuse_dev_readv(file
, &iov
, 1, off
);
698 /* Look up request on processing list by unique ID */
699 static struct fuse_req
*request_find(struct fuse_conn
*fc
, u64 unique
)
701 struct list_head
*entry
;
703 list_for_each(entry
, &fc
->processing
) {
704 struct fuse_req
*req
;
705 req
= list_entry(entry
, struct fuse_req
, list
);
706 if (req
->in
.h
.unique
== unique
)
712 static int copy_out_args(struct fuse_copy_state
*cs
, struct fuse_out
*out
,
715 unsigned reqsize
= sizeof(struct fuse_out_header
);
718 return nbytes
!= reqsize
? -EINVAL
: 0;
720 reqsize
+= len_args(out
->numargs
, out
->args
);
722 if (reqsize
< nbytes
|| (reqsize
> nbytes
&& !out
->argvar
))
724 else if (reqsize
> nbytes
) {
725 struct fuse_arg
*lastarg
= &out
->args
[out
->numargs
-1];
726 unsigned diffsize
= reqsize
- nbytes
;
727 if (diffsize
> lastarg
->size
)
729 lastarg
->size
-= diffsize
;
731 return fuse_copy_args(cs
, out
->numargs
, out
->argpages
, out
->args
,
736 * Write a single reply to a request. First the header is copied from
737 * the write buffer. The request is then searched on the processing
738 * list by the unique ID found in the header. If found, then remove
739 * it from the list and copy the rest of the buffer to the request.
740 * The request is finished by calling request_end()
742 static ssize_t
fuse_dev_writev(struct file
*file
, const struct iovec
*iov
,
743 unsigned long nr_segs
, loff_t
*off
)
746 unsigned nbytes
= iov_length(iov
, nr_segs
);
747 struct fuse_req
*req
;
748 struct fuse_out_header oh
;
749 struct fuse_copy_state cs
;
750 struct fuse_conn
*fc
= fuse_get_conn(file
);
754 fuse_copy_init(&cs
, 0, NULL
, iov
, nr_segs
);
755 if (nbytes
< sizeof(struct fuse_out_header
))
758 err
= fuse_copy_one(&cs
, &oh
, sizeof(oh
));
762 if (!oh
.unique
|| oh
.error
<= -1000 || oh
.error
> 0 ||
766 spin_lock(&fuse_lock
);
767 req
= request_find(fc
, oh
.unique
);
772 if (req
->interrupted
) {
773 spin_unlock(&fuse_lock
);
774 fuse_copy_finish(&cs
);
775 spin_lock(&fuse_lock
);
776 request_end(fc
, req
);
779 list_move(&req
->list
, &fc
->io
);
783 spin_unlock(&fuse_lock
);
785 err
= copy_out_args(&cs
, &req
->out
, nbytes
);
786 fuse_copy_finish(&cs
);
788 spin_lock(&fuse_lock
);
791 if (req
->interrupted
)
793 } else if (!req
->interrupted
)
794 req
->out
.h
.error
= -EIO
;
795 request_end(fc
, req
);
797 return err
? err
: nbytes
;
800 spin_unlock(&fuse_lock
);
802 fuse_copy_finish(&cs
);
806 static ssize_t
fuse_dev_write(struct file
*file
, const char __user
*buf
,
807 size_t nbytes
, loff_t
*off
)
810 iov
.iov_len
= nbytes
;
811 iov
.iov_base
= (char __user
*) buf
;
812 return fuse_dev_writev(file
, &iov
, 1, off
);
815 static unsigned fuse_dev_poll(struct file
*file
, poll_table
*wait
)
817 struct fuse_conn
*fc
= fuse_get_conn(file
);
818 unsigned mask
= POLLOUT
| POLLWRNORM
;
823 poll_wait(file
, &fc
->waitq
, wait
);
825 spin_lock(&fuse_lock
);
826 if (!list_empty(&fc
->pending
))
827 mask
|= POLLIN
| POLLRDNORM
;
828 spin_unlock(&fuse_lock
);
833 /* Abort all requests on the given list (pending or processing) */
834 static void end_requests(struct fuse_conn
*fc
, struct list_head
*head
)
836 while (!list_empty(head
)) {
837 struct fuse_req
*req
;
838 req
= list_entry(head
->next
, struct fuse_req
, list
);
839 req
->out
.h
.error
= -ECONNABORTED
;
840 request_end(fc
, req
);
841 spin_lock(&fuse_lock
);
845 static int fuse_dev_release(struct inode
*inode
, struct file
*file
)
847 struct fuse_conn
*fc
;
849 spin_lock(&fuse_lock
);
850 fc
= file
->private_data
;
853 end_requests(fc
, &fc
->pending
);
854 end_requests(fc
, &fc
->processing
);
855 fuse_release_conn(fc
);
857 spin_unlock(&fuse_lock
);
861 struct file_operations fuse_dev_operations
= {
862 .owner
= THIS_MODULE
,
864 .read
= fuse_dev_read
,
865 .readv
= fuse_dev_readv
,
866 .write
= fuse_dev_write
,
867 .writev
= fuse_dev_writev
,
868 .poll
= fuse_dev_poll
,
869 .release
= fuse_dev_release
,
872 static struct miscdevice fuse_miscdevice
= {
875 .fops
= &fuse_dev_operations
,
878 int __init
fuse_dev_init(void)
881 fuse_req_cachep
= kmem_cache_create("fuse_request",
882 sizeof(struct fuse_req
),
884 if (!fuse_req_cachep
)
887 err
= misc_register(&fuse_miscdevice
);
889 goto out_cache_clean
;
894 kmem_cache_destroy(fuse_req_cachep
);
899 void fuse_dev_cleanup(void)
901 misc_deregister(&fuse_miscdevice
);
902 kmem_cache_destroy(fuse_req_cachep
);