2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR
);
22 static kmem_cache_t
*fuse_req_cachep
;
24 static struct fuse_conn
*fuse_get_conn(struct file
*file
)
27 spin_lock(&fuse_lock
);
28 fc
= file
->private_data
;
29 if (fc
&& !fc
->connected
)
31 spin_unlock(&fuse_lock
);
35 static void fuse_request_init(struct fuse_req
*req
)
37 memset(req
, 0, sizeof(*req
));
38 INIT_LIST_HEAD(&req
->list
);
39 init_waitqueue_head(&req
->waitq
);
40 atomic_set(&req
->count
, 1);
43 struct fuse_req
*fuse_request_alloc(void)
45 struct fuse_req
*req
= kmem_cache_alloc(fuse_req_cachep
, SLAB_KERNEL
);
47 fuse_request_init(req
);
51 void fuse_request_free(struct fuse_req
*req
)
53 kmem_cache_free(fuse_req_cachep
, req
);
56 static void block_sigs(sigset_t
*oldset
)
60 siginitsetinv(&mask
, sigmask(SIGKILL
));
61 sigprocmask(SIG_BLOCK
, &mask
, oldset
);
64 static void restore_sigs(sigset_t
*oldset
)
66 sigprocmask(SIG_SETMASK
, oldset
, NULL
);
69 void fuse_reset_request(struct fuse_req
*req
)
71 int preallocated
= req
->preallocated
;
72 BUG_ON(atomic_read(&req
->count
) != 1);
73 fuse_request_init(req
);
74 req
->preallocated
= preallocated
;
77 static void __fuse_get_request(struct fuse_req
*req
)
79 atomic_inc(&req
->count
);
82 /* Must be called with > 1 refcount */
83 static void __fuse_put_request(struct fuse_req
*req
)
85 BUG_ON(atomic_read(&req
->count
) < 2);
86 atomic_dec(&req
->count
);
89 static struct fuse_req
*do_get_request(struct fuse_conn
*fc
)
93 spin_lock(&fuse_lock
);
94 BUG_ON(list_empty(&fc
->unused_list
));
95 req
= list_entry(fc
->unused_list
.next
, struct fuse_req
, list
);
96 list_del_init(&req
->list
);
97 spin_unlock(&fuse_lock
);
98 fuse_request_init(req
);
99 req
->preallocated
= 1;
100 req
->in
.h
.uid
= current
->fsuid
;
101 req
->in
.h
.gid
= current
->fsgid
;
102 req
->in
.h
.pid
= current
->pid
;
106 /* This can return NULL, but only in case it's interrupted by a SIGKILL */
107 struct fuse_req
*fuse_get_request(struct fuse_conn
*fc
)
112 atomic_inc(&fc
->num_waiting
);
114 intr
= down_interruptible(&fc
->outstanding_sem
);
115 restore_sigs(&oldset
);
117 atomic_dec(&fc
->num_waiting
);
120 return do_get_request(fc
);
123 static void fuse_putback_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
125 spin_lock(&fuse_lock
);
126 if (req
->preallocated
) {
127 atomic_dec(&fc
->num_waiting
);
128 list_add(&req
->list
, &fc
->unused_list
);
130 fuse_request_free(req
);
132 /* If we are in debt decrease that first */
133 if (fc
->outstanding_debt
)
134 fc
->outstanding_debt
--;
136 up(&fc
->outstanding_sem
);
137 spin_unlock(&fuse_lock
);
140 void fuse_put_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
142 if (atomic_dec_and_test(&req
->count
))
143 fuse_putback_request(fc
, req
);
146 void fuse_release_background(struct fuse_req
*req
)
152 spin_lock(&fuse_lock
);
153 list_del(&req
->bg_entry
);
154 spin_unlock(&fuse_lock
);
158 * This function is called when a request is finished. Either a reply
159 * has arrived or it was interrupted (and not yet sent) or some error
160 * occurred during communication with userspace, or the device file
161 * was closed. In case of a background request the reference to the
162 * stored objects are released. The requester thread is woken up (if
163 * still waiting), the 'end' callback is called if given, else the
164 * reference to the request is released
166 * Called with fuse_lock, unlocks it
168 static void request_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
170 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
172 list_del(&req
->list
);
173 req
->state
= FUSE_REQ_FINISHED
;
174 spin_unlock(&fuse_lock
);
175 if (req
->background
) {
176 down_read(&fc
->sbput_sem
);
178 fuse_release_background(req
);
179 up_read(&fc
->sbput_sem
);
181 wake_up(&req
->waitq
);
185 fuse_put_request(fc
, req
);
189 * Unfortunately request interruption not just solves the deadlock
190 * problem, it causes problems too. These stem from the fact, that an
191 * interrupted request is continued to be processed in userspace,
192 * while all the locks and object references (inode and file) held
193 * during the operation are released.
195 * To release the locks is exactly why there's a need to interrupt the
196 * request, so there's not a lot that can be done about this, except
197 * introduce additional locking in userspace.
199 * More important is to keep inode and file references until userspace
200 * has replied, otherwise FORGET and RELEASE could be sent while the
201 * inode/file is still used by the filesystem.
203 * For this reason the concept of "background" request is introduced.
204 * An interrupted request is backgrounded if it has been already sent
205 * to userspace. Backgrounding involves getting an extra reference to
206 * inode(s) or file used in the request, and adding the request to
207 * fc->background list. When a reply is received for a background
208 * request, the object references are released, and the request is
209 * removed from the list. If the filesystem is unmounted while there
210 * are still background requests, the list is walked and references
211 * are released as if a reply was received.
213 * There's one more use for a background request. The RELEASE message is
214 * always sent as background, since it doesn't return an error or
217 static void background_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
220 list_add(&req
->bg_entry
, &fc
->background
);
222 req
->inode
= igrab(req
->inode
);
224 req
->inode2
= igrab(req
->inode2
);
229 /* Called with fuse_lock held. Releases, and then reacquires it. */
230 static void request_wait_answer(struct fuse_conn
*fc
, struct fuse_req
*req
)
234 spin_unlock(&fuse_lock
);
236 wait_event_interruptible(req
->waitq
, req
->state
== FUSE_REQ_FINISHED
);
237 restore_sigs(&oldset
);
238 spin_lock(&fuse_lock
);
239 if (req
->state
== FUSE_REQ_FINISHED
&& !req
->interrupted
)
242 if (!req
->interrupted
) {
243 req
->out
.h
.error
= -EINTR
;
244 req
->interrupted
= 1;
247 /* This is uninterruptible sleep, because data is
248 being copied to/from the buffers of req. During
249 locked state, there mustn't be any filesystem
250 operation (e.g. page fault), since that could lead
252 spin_unlock(&fuse_lock
);
253 wait_event(req
->waitq
, !req
->locked
);
254 spin_lock(&fuse_lock
);
256 if (req
->state
== FUSE_REQ_PENDING
) {
257 list_del(&req
->list
);
258 __fuse_put_request(req
);
259 } else if (req
->state
== FUSE_REQ_SENT
)
260 background_request(fc
, req
);
263 static unsigned len_args(unsigned numargs
, struct fuse_arg
*args
)
268 for (i
= 0; i
< numargs
; i
++)
269 nbytes
+= args
[i
].size
;
274 static void queue_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
277 /* zero is special */
280 req
->in
.h
.unique
= fc
->reqctr
;
281 req
->in
.h
.len
= sizeof(struct fuse_in_header
) +
282 len_args(req
->in
.numargs
, (struct fuse_arg
*) req
->in
.args
);
283 if (!req
->preallocated
) {
284 /* If request is not preallocated (either FORGET or
285 RELEASE), then still decrease outstanding_sem, so
286 user can't open infinite number of files while not
287 processing the RELEASE requests. However for
288 efficiency do it without blocking, so if down()
289 would block, just increase the debt instead */
290 if (down_trylock(&fc
->outstanding_sem
))
291 fc
->outstanding_debt
++;
293 list_add_tail(&req
->list
, &fc
->pending
);
294 req
->state
= FUSE_REQ_PENDING
;
299 * This can only be interrupted by a SIGKILL
301 void request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
304 spin_lock(&fuse_lock
);
306 req
->out
.h
.error
= -ENOTCONN
;
307 else if (fc
->conn_error
)
308 req
->out
.h
.error
= -ECONNREFUSED
;
310 queue_request(fc
, req
);
311 /* acquire extra reference, since request is still needed
312 after request_end() */
313 __fuse_get_request(req
);
315 request_wait_answer(fc
, req
);
317 spin_unlock(&fuse_lock
);
320 static void request_send_nowait(struct fuse_conn
*fc
, struct fuse_req
*req
)
322 spin_lock(&fuse_lock
);
324 queue_request(fc
, req
);
325 spin_unlock(&fuse_lock
);
327 req
->out
.h
.error
= -ENOTCONN
;
328 request_end(fc
, req
);
332 void request_send_noreply(struct fuse_conn
*fc
, struct fuse_req
*req
)
335 request_send_nowait(fc
, req
);
338 void request_send_background(struct fuse_conn
*fc
, struct fuse_req
*req
)
341 spin_lock(&fuse_lock
);
342 background_request(fc
, req
);
343 spin_unlock(&fuse_lock
);
344 request_send_nowait(fc
, req
);
348 * Lock the request. Up to the next unlock_request() there mustn't be
349 * anything that could cause a page-fault. If the request was already
350 * interrupted bail out.
352 static int lock_request(struct fuse_req
*req
)
356 spin_lock(&fuse_lock
);
357 if (req
->interrupted
)
361 spin_unlock(&fuse_lock
);
367 * Unlock request. If it was interrupted during being locked, the
368 * requester thread is currently waiting for it to be unlocked, so
371 static void unlock_request(struct fuse_req
*req
)
374 spin_lock(&fuse_lock
);
376 if (req
->interrupted
)
377 wake_up(&req
->waitq
);
378 spin_unlock(&fuse_lock
);
382 struct fuse_copy_state
{
384 struct fuse_req
*req
;
385 const struct iovec
*iov
;
386 unsigned long nr_segs
;
387 unsigned long seglen
;
395 static void fuse_copy_init(struct fuse_copy_state
*cs
, int write
,
396 struct fuse_req
*req
, const struct iovec
*iov
,
397 unsigned long nr_segs
)
399 memset(cs
, 0, sizeof(*cs
));
403 cs
->nr_segs
= nr_segs
;
406 /* Unmap and put previous page of userspace buffer */
407 static void fuse_copy_finish(struct fuse_copy_state
*cs
)
410 kunmap_atomic(cs
->mapaddr
, KM_USER0
);
412 flush_dcache_page(cs
->pg
);
413 set_page_dirty_lock(cs
->pg
);
421 * Get another pagefull of userspace buffer, and map it to kernel
422 * address space, and lock request
424 static int fuse_copy_fill(struct fuse_copy_state
*cs
)
426 unsigned long offset
;
429 unlock_request(cs
->req
);
430 fuse_copy_finish(cs
);
432 BUG_ON(!cs
->nr_segs
);
433 cs
->seglen
= cs
->iov
[0].iov_len
;
434 cs
->addr
= (unsigned long) cs
->iov
[0].iov_base
;
438 down_read(¤t
->mm
->mmap_sem
);
439 err
= get_user_pages(current
, current
->mm
, cs
->addr
, 1, cs
->write
, 0,
441 up_read(¤t
->mm
->mmap_sem
);
445 offset
= cs
->addr
% PAGE_SIZE
;
446 cs
->mapaddr
= kmap_atomic(cs
->pg
, KM_USER0
);
447 cs
->buf
= cs
->mapaddr
+ offset
;
448 cs
->len
= min(PAGE_SIZE
- offset
, cs
->seglen
);
449 cs
->seglen
-= cs
->len
;
452 return lock_request(cs
->req
);
455 /* Do as much copy to/from userspace buffer as we can */
456 static int fuse_copy_do(struct fuse_copy_state
*cs
, void **val
, unsigned *size
)
458 unsigned ncpy
= min(*size
, cs
->len
);
461 memcpy(cs
->buf
, *val
, ncpy
);
463 memcpy(*val
, cs
->buf
, ncpy
);
473 * Copy a page in the request to/from the userspace buffer. Must be
476 static int fuse_copy_page(struct fuse_copy_state
*cs
, struct page
*page
,
477 unsigned offset
, unsigned count
, int zeroing
)
479 if (page
&& zeroing
&& count
< PAGE_SIZE
) {
480 void *mapaddr
= kmap_atomic(page
, KM_USER1
);
481 memset(mapaddr
, 0, PAGE_SIZE
);
482 kunmap_atomic(mapaddr
, KM_USER1
);
486 if (!cs
->len
&& (err
= fuse_copy_fill(cs
)))
489 void *mapaddr
= kmap_atomic(page
, KM_USER1
);
490 void *buf
= mapaddr
+ offset
;
491 offset
+= fuse_copy_do(cs
, &buf
, &count
);
492 kunmap_atomic(mapaddr
, KM_USER1
);
494 offset
+= fuse_copy_do(cs
, NULL
, &count
);
496 if (page
&& !cs
->write
)
497 flush_dcache_page(page
);
501 /* Copy pages in the request to/from userspace buffer */
502 static int fuse_copy_pages(struct fuse_copy_state
*cs
, unsigned nbytes
,
506 struct fuse_req
*req
= cs
->req
;
507 unsigned offset
= req
->page_offset
;
508 unsigned count
= min(nbytes
, (unsigned) PAGE_SIZE
- offset
);
510 for (i
= 0; i
< req
->num_pages
&& (nbytes
|| zeroing
); i
++) {
511 struct page
*page
= req
->pages
[i
];
512 int err
= fuse_copy_page(cs
, page
, offset
, count
, zeroing
);
517 count
= min(nbytes
, (unsigned) PAGE_SIZE
);
523 /* Copy a single argument in the request to/from userspace buffer */
524 static int fuse_copy_one(struct fuse_copy_state
*cs
, void *val
, unsigned size
)
528 if (!cs
->len
&& (err
= fuse_copy_fill(cs
)))
530 fuse_copy_do(cs
, &val
, &size
);
535 /* Copy request arguments to/from userspace buffer */
536 static int fuse_copy_args(struct fuse_copy_state
*cs
, unsigned numargs
,
537 unsigned argpages
, struct fuse_arg
*args
,
543 for (i
= 0; !err
&& i
< numargs
; i
++) {
544 struct fuse_arg
*arg
= &args
[i
];
545 if (i
== numargs
- 1 && argpages
)
546 err
= fuse_copy_pages(cs
, arg
->size
, zeroing
);
548 err
= fuse_copy_one(cs
, arg
->value
, arg
->size
);
553 /* Wait until a request is available on the pending list */
554 static void request_wait(struct fuse_conn
*fc
)
556 DECLARE_WAITQUEUE(wait
, current
);
558 add_wait_queue_exclusive(&fc
->waitq
, &wait
);
559 while (fc
->connected
&& list_empty(&fc
->pending
)) {
560 set_current_state(TASK_INTERRUPTIBLE
);
561 if (signal_pending(current
))
564 spin_unlock(&fuse_lock
);
566 spin_lock(&fuse_lock
);
568 set_current_state(TASK_RUNNING
);
569 remove_wait_queue(&fc
->waitq
, &wait
);
573 * Read a single request into the userspace filesystem's buffer. This
574 * function waits until a request is available, then removes it from
575 * the pending list and copies request data to userspace buffer. If
576 * no reply is needed (FORGET) or request has been interrupted or
577 * there was an error during the copying then it's finished by calling
578 * request_end(). Otherwise add it to the processing list, and set
581 static ssize_t
fuse_dev_readv(struct file
*file
, const struct iovec
*iov
,
582 unsigned long nr_segs
, loff_t
*off
)
585 struct fuse_conn
*fc
;
586 struct fuse_req
*req
;
588 struct fuse_copy_state cs
;
592 spin_lock(&fuse_lock
);
593 fc
= file
->private_data
;
602 if (list_empty(&fc
->pending
))
605 req
= list_entry(fc
->pending
.next
, struct fuse_req
, list
);
606 req
->state
= FUSE_REQ_READING
;
607 list_move(&req
->list
, &fc
->io
);
611 /* If request is too large, reply with an error and restart the read */
612 if (iov_length(iov
, nr_segs
) < reqsize
) {
613 req
->out
.h
.error
= -EIO
;
614 /* SETXATTR is special, since it may contain too large data */
615 if (in
->h
.opcode
== FUSE_SETXATTR
)
616 req
->out
.h
.error
= -E2BIG
;
617 request_end(fc
, req
);
620 spin_unlock(&fuse_lock
);
621 fuse_copy_init(&cs
, 1, req
, iov
, nr_segs
);
622 err
= fuse_copy_one(&cs
, &in
->h
, sizeof(in
->h
));
624 err
= fuse_copy_args(&cs
, in
->numargs
, in
->argpages
,
625 (struct fuse_arg
*) in
->args
, 0);
626 fuse_copy_finish(&cs
);
627 spin_lock(&fuse_lock
);
629 if (!err
&& req
->interrupted
)
632 if (!req
->interrupted
)
633 req
->out
.h
.error
= -EIO
;
634 request_end(fc
, req
);
638 request_end(fc
, req
);
640 req
->state
= FUSE_REQ_SENT
;
641 list_move_tail(&req
->list
, &fc
->processing
);
642 spin_unlock(&fuse_lock
);
647 spin_unlock(&fuse_lock
);
651 static ssize_t
fuse_dev_read(struct file
*file
, char __user
*buf
,
652 size_t nbytes
, loff_t
*off
)
655 iov
.iov_len
= nbytes
;
657 return fuse_dev_readv(file
, &iov
, 1, off
);
660 /* Look up request on processing list by unique ID */
661 static struct fuse_req
*request_find(struct fuse_conn
*fc
, u64 unique
)
663 struct list_head
*entry
;
665 list_for_each(entry
, &fc
->processing
) {
666 struct fuse_req
*req
;
667 req
= list_entry(entry
, struct fuse_req
, list
);
668 if (req
->in
.h
.unique
== unique
)
674 static int copy_out_args(struct fuse_copy_state
*cs
, struct fuse_out
*out
,
677 unsigned reqsize
= sizeof(struct fuse_out_header
);
680 return nbytes
!= reqsize
? -EINVAL
: 0;
682 reqsize
+= len_args(out
->numargs
, out
->args
);
684 if (reqsize
< nbytes
|| (reqsize
> nbytes
&& !out
->argvar
))
686 else if (reqsize
> nbytes
) {
687 struct fuse_arg
*lastarg
= &out
->args
[out
->numargs
-1];
688 unsigned diffsize
= reqsize
- nbytes
;
689 if (diffsize
> lastarg
->size
)
691 lastarg
->size
-= diffsize
;
693 return fuse_copy_args(cs
, out
->numargs
, out
->argpages
, out
->args
,
698 * Write a single reply to a request. First the header is copied from
699 * the write buffer. The request is then searched on the processing
700 * list by the unique ID found in the header. If found, then remove
701 * it from the list and copy the rest of the buffer to the request.
702 * The request is finished by calling request_end()
704 static ssize_t
fuse_dev_writev(struct file
*file
, const struct iovec
*iov
,
705 unsigned long nr_segs
, loff_t
*off
)
708 unsigned nbytes
= iov_length(iov
, nr_segs
);
709 struct fuse_req
*req
;
710 struct fuse_out_header oh
;
711 struct fuse_copy_state cs
;
712 struct fuse_conn
*fc
= fuse_get_conn(file
);
716 fuse_copy_init(&cs
, 0, NULL
, iov
, nr_segs
);
717 if (nbytes
< sizeof(struct fuse_out_header
))
720 err
= fuse_copy_one(&cs
, &oh
, sizeof(oh
));
724 if (!oh
.unique
|| oh
.error
<= -1000 || oh
.error
> 0 ||
728 spin_lock(&fuse_lock
);
733 req
= request_find(fc
, oh
.unique
);
738 if (req
->interrupted
) {
739 spin_unlock(&fuse_lock
);
740 fuse_copy_finish(&cs
);
741 spin_lock(&fuse_lock
);
742 request_end(fc
, req
);
745 list_move(&req
->list
, &fc
->io
);
749 spin_unlock(&fuse_lock
);
751 err
= copy_out_args(&cs
, &req
->out
, nbytes
);
752 fuse_copy_finish(&cs
);
754 spin_lock(&fuse_lock
);
757 if (req
->interrupted
)
759 } else if (!req
->interrupted
)
760 req
->out
.h
.error
= -EIO
;
761 request_end(fc
, req
);
763 return err
? err
: nbytes
;
766 spin_unlock(&fuse_lock
);
768 fuse_copy_finish(&cs
);
772 static ssize_t
fuse_dev_write(struct file
*file
, const char __user
*buf
,
773 size_t nbytes
, loff_t
*off
)
776 iov
.iov_len
= nbytes
;
777 iov
.iov_base
= (char __user
*) buf
;
778 return fuse_dev_writev(file
, &iov
, 1, off
);
781 static unsigned fuse_dev_poll(struct file
*file
, poll_table
*wait
)
783 struct fuse_conn
*fc
= fuse_get_conn(file
);
784 unsigned mask
= POLLOUT
| POLLWRNORM
;
789 poll_wait(file
, &fc
->waitq
, wait
);
791 spin_lock(&fuse_lock
);
792 if (!list_empty(&fc
->pending
))
793 mask
|= POLLIN
| POLLRDNORM
;
794 spin_unlock(&fuse_lock
);
800 * Abort all requests on the given list (pending or processing)
802 * This function releases and reacquires fuse_lock
804 static void end_requests(struct fuse_conn
*fc
, struct list_head
*head
)
806 while (!list_empty(head
)) {
807 struct fuse_req
*req
;
808 req
= list_entry(head
->next
, struct fuse_req
, list
);
809 req
->out
.h
.error
= -ECONNABORTED
;
810 request_end(fc
, req
);
811 spin_lock(&fuse_lock
);
816 * Abort requests under I/O
818 * The requests are set to interrupted and finished, and the request
819 * waiter is woken up. This will make request_wait_answer() wait
820 * until the request is unlocked and then return.
822 * If the request is asynchronous, then the end function needs to be
823 * called after waiting for the request to be unlocked (if it was
826 static void end_io_requests(struct fuse_conn
*fc
)
828 while (!list_empty(&fc
->io
)) {
829 struct fuse_req
*req
=
830 list_entry(fc
->io
.next
, struct fuse_req
, list
);
831 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
833 req
->interrupted
= 1;
834 req
->out
.h
.error
= -ECONNABORTED
;
835 req
->state
= FUSE_REQ_FINISHED
;
836 list_del_init(&req
->list
);
837 wake_up(&req
->waitq
);
840 /* The end function will consume this reference */
841 __fuse_get_request(req
);
842 spin_unlock(&fuse_lock
);
843 wait_event(req
->waitq
, !req
->locked
);
845 spin_lock(&fuse_lock
);
851 * Abort all requests.
853 * Emergency exit in case of a malicious or accidental deadlock, or
854 * just a hung filesystem.
856 * The same effect is usually achievable through killing the
857 * filesystem daemon and all users of the filesystem. The exception
858 * is the combination of an asynchronous request and the tricky
859 * deadlock (see Documentation/filesystems/fuse.txt).
861 * During the aborting, progression of requests from the pending and
862 * processing lists onto the io list, and progression of new requests
863 * onto the pending list is prevented by req->connected being false.
865 * Progression of requests under I/O to the processing list is
866 * prevented by the req->interrupted flag being true for these
867 * requests. For this reason requests on the io list must be aborted
870 void fuse_abort_conn(struct fuse_conn
*fc
)
872 spin_lock(&fuse_lock
);
876 end_requests(fc
, &fc
->pending
);
877 end_requests(fc
, &fc
->processing
);
878 wake_up_all(&fc
->waitq
);
880 spin_unlock(&fuse_lock
);
883 static int fuse_dev_release(struct inode
*inode
, struct file
*file
)
885 struct fuse_conn
*fc
;
887 spin_lock(&fuse_lock
);
888 fc
= file
->private_data
;
891 end_requests(fc
, &fc
->pending
);
892 end_requests(fc
, &fc
->processing
);
894 spin_unlock(&fuse_lock
);
896 kobject_put(&fc
->kobj
);
901 struct file_operations fuse_dev_operations
= {
902 .owner
= THIS_MODULE
,
904 .read
= fuse_dev_read
,
905 .readv
= fuse_dev_readv
,
906 .write
= fuse_dev_write
,
907 .writev
= fuse_dev_writev
,
908 .poll
= fuse_dev_poll
,
909 .release
= fuse_dev_release
,
912 static struct miscdevice fuse_miscdevice
= {
915 .fops
= &fuse_dev_operations
,
918 int __init
fuse_dev_init(void)
921 fuse_req_cachep
= kmem_cache_create("fuse_request",
922 sizeof(struct fuse_req
),
924 if (!fuse_req_cachep
)
927 err
= misc_register(&fuse_miscdevice
);
929 goto out_cache_clean
;
934 kmem_cache_destroy(fuse_req_cachep
);
939 void fuse_dev_cleanup(void)
941 misc_deregister(&fuse_miscdevice
);
942 kmem_cache_destroy(fuse_req_cachep
);