2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR
);
22 static struct kmem_cache
*fuse_req_cachep
;
24 static struct fuse_conn
*fuse_get_conn(struct file
*file
)
27 * Lockless access is OK, because file->private data is set
28 * once during mount and is valid until the file is released.
30 return file
->private_data
;
33 static void fuse_request_init(struct fuse_req
*req
)
35 memset(req
, 0, sizeof(*req
));
36 INIT_LIST_HEAD(&req
->list
);
37 INIT_LIST_HEAD(&req
->intr_entry
);
38 init_waitqueue_head(&req
->waitq
);
39 atomic_set(&req
->count
, 1);
42 struct fuse_req
*fuse_request_alloc(void)
44 struct fuse_req
*req
= kmem_cache_alloc(fuse_req_cachep
, GFP_KERNEL
);
46 fuse_request_init(req
);
49 EXPORT_SYMBOL_GPL(fuse_request_alloc
);
51 struct fuse_req
*fuse_request_alloc_nofs(void)
53 struct fuse_req
*req
= kmem_cache_alloc(fuse_req_cachep
, GFP_NOFS
);
55 fuse_request_init(req
);
59 void fuse_request_free(struct fuse_req
*req
)
61 kmem_cache_free(fuse_req_cachep
, req
);
64 static void block_sigs(sigset_t
*oldset
)
68 siginitsetinv(&mask
, sigmask(SIGKILL
));
69 sigprocmask(SIG_BLOCK
, &mask
, oldset
);
72 static void restore_sigs(sigset_t
*oldset
)
74 sigprocmask(SIG_SETMASK
, oldset
, NULL
);
77 static void __fuse_get_request(struct fuse_req
*req
)
79 atomic_inc(&req
->count
);
82 /* Must be called with > 1 refcount */
83 static void __fuse_put_request(struct fuse_req
*req
)
85 BUG_ON(atomic_read(&req
->count
) < 2);
86 atomic_dec(&req
->count
);
89 static void fuse_req_init_context(struct fuse_req
*req
)
91 req
->in
.h
.uid
= current_fsuid();
92 req
->in
.h
.gid
= current_fsgid();
93 req
->in
.h
.pid
= current
->pid
;
96 struct fuse_req
*fuse_get_req(struct fuse_conn
*fc
)
103 atomic_inc(&fc
->num_waiting
);
105 intr
= wait_event_interruptible(fc
->blocked_waitq
, !fc
->blocked
);
106 restore_sigs(&oldset
);
115 req
= fuse_request_alloc();
120 fuse_req_init_context(req
);
125 atomic_dec(&fc
->num_waiting
);
128 EXPORT_SYMBOL_GPL(fuse_get_req
);
131 * Return request in fuse_file->reserved_req. However that may
132 * currently be in use. If that is the case, wait for it to become
135 static struct fuse_req
*get_reserved_req(struct fuse_conn
*fc
,
138 struct fuse_req
*req
= NULL
;
139 struct fuse_file
*ff
= file
->private_data
;
142 wait_event(fc
->reserved_req_waitq
, ff
->reserved_req
);
143 spin_lock(&fc
->lock
);
144 if (ff
->reserved_req
) {
145 req
= ff
->reserved_req
;
146 ff
->reserved_req
= NULL
;
148 req
->stolen_file
= file
;
150 spin_unlock(&fc
->lock
);
157 * Put stolen request back into fuse_file->reserved_req
159 static void put_reserved_req(struct fuse_conn
*fc
, struct fuse_req
*req
)
161 struct file
*file
= req
->stolen_file
;
162 struct fuse_file
*ff
= file
->private_data
;
164 spin_lock(&fc
->lock
);
165 fuse_request_init(req
);
166 BUG_ON(ff
->reserved_req
);
167 ff
->reserved_req
= req
;
168 wake_up_all(&fc
->reserved_req_waitq
);
169 spin_unlock(&fc
->lock
);
174 * Gets a requests for a file operation, always succeeds
176 * This is used for sending the FLUSH request, which must get to
177 * userspace, due to POSIX locks which may need to be unlocked.
179 * If allocation fails due to OOM, use the reserved request in
182 * This is very unlikely to deadlock accidentally, since the
183 * filesystem should not have it's own file open. If deadlock is
184 * intentional, it can still be broken by "aborting" the filesystem.
186 struct fuse_req
*fuse_get_req_nofail(struct fuse_conn
*fc
, struct file
*file
)
188 struct fuse_req
*req
;
190 atomic_inc(&fc
->num_waiting
);
191 wait_event(fc
->blocked_waitq
, !fc
->blocked
);
192 req
= fuse_request_alloc();
194 req
= get_reserved_req(fc
, file
);
196 fuse_req_init_context(req
);
201 void fuse_put_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
203 if (atomic_dec_and_test(&req
->count
)) {
205 atomic_dec(&fc
->num_waiting
);
207 if (req
->stolen_file
)
208 put_reserved_req(fc
, req
);
210 fuse_request_free(req
);
213 EXPORT_SYMBOL_GPL(fuse_put_request
);
215 static unsigned len_args(unsigned numargs
, struct fuse_arg
*args
)
220 for (i
= 0; i
< numargs
; i
++)
221 nbytes
+= args
[i
].size
;
226 static u64
fuse_get_unique(struct fuse_conn
*fc
)
229 /* zero is special */
236 static void queue_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
238 req
->in
.h
.unique
= fuse_get_unique(fc
);
239 req
->in
.h
.len
= sizeof(struct fuse_in_header
) +
240 len_args(req
->in
.numargs
, (struct fuse_arg
*) req
->in
.args
);
241 list_add_tail(&req
->list
, &fc
->pending
);
242 req
->state
= FUSE_REQ_PENDING
;
245 atomic_inc(&fc
->num_waiting
);
248 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
251 static void flush_bg_queue(struct fuse_conn
*fc
)
253 while (fc
->active_background
< fc
->max_background
&&
254 !list_empty(&fc
->bg_queue
)) {
255 struct fuse_req
*req
;
257 req
= list_entry(fc
->bg_queue
.next
, struct fuse_req
, list
);
258 list_del(&req
->list
);
259 fc
->active_background
++;
260 queue_request(fc
, req
);
265 * This function is called when a request is finished. Either a reply
266 * has arrived or it was aborted (and not yet sent) or some error
267 * occurred during communication with userspace, or the device file
268 * was closed. The requester thread is woken up (if still waiting),
269 * the 'end' callback is called if given, else the reference to the
270 * request is released
272 * Called with fc->lock, unlocks it
274 static void request_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
275 __releases(&fc
->lock
)
277 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
279 list_del(&req
->list
);
280 list_del(&req
->intr_entry
);
281 req
->state
= FUSE_REQ_FINISHED
;
282 if (req
->background
) {
283 if (fc
->num_background
== fc
->max_background
) {
285 wake_up_all(&fc
->blocked_waitq
);
287 if (fc
->num_background
== fc
->congestion_threshold
&&
288 fc
->connected
&& fc
->bdi_initialized
) {
289 clear_bdi_congested(&fc
->bdi
, BLK_RW_SYNC
);
290 clear_bdi_congested(&fc
->bdi
, BLK_RW_ASYNC
);
292 fc
->num_background
--;
293 fc
->active_background
--;
296 spin_unlock(&fc
->lock
);
297 wake_up(&req
->waitq
);
300 fuse_put_request(fc
, req
);
303 static void wait_answer_interruptible(struct fuse_conn
*fc
,
304 struct fuse_req
*req
)
305 __releases(&fc
->lock
)
306 __acquires(&fc
->lock
)
308 if (signal_pending(current
))
311 spin_unlock(&fc
->lock
);
312 wait_event_interruptible(req
->waitq
, req
->state
== FUSE_REQ_FINISHED
);
313 spin_lock(&fc
->lock
);
316 static void queue_interrupt(struct fuse_conn
*fc
, struct fuse_req
*req
)
318 list_add_tail(&req
->intr_entry
, &fc
->interrupts
);
320 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
323 static void request_wait_answer(struct fuse_conn
*fc
, struct fuse_req
*req
)
324 __releases(&fc
->lock
)
325 __acquires(&fc
->lock
)
327 if (!fc
->no_interrupt
) {
328 /* Any signal may interrupt this */
329 wait_answer_interruptible(fc
, req
);
333 if (req
->state
== FUSE_REQ_FINISHED
)
336 req
->interrupted
= 1;
337 if (req
->state
== FUSE_REQ_SENT
)
338 queue_interrupt(fc
, req
);
344 /* Only fatal signals may interrupt this */
346 wait_answer_interruptible(fc
, req
);
347 restore_sigs(&oldset
);
351 if (req
->state
== FUSE_REQ_FINISHED
)
354 /* Request is not yet in userspace, bail out */
355 if (req
->state
== FUSE_REQ_PENDING
) {
356 list_del(&req
->list
);
357 __fuse_put_request(req
);
358 req
->out
.h
.error
= -EINTR
;
364 * Either request is already in userspace, or it was forced.
367 spin_unlock(&fc
->lock
);
368 wait_event(req
->waitq
, req
->state
== FUSE_REQ_FINISHED
);
369 spin_lock(&fc
->lock
);
375 BUG_ON(req
->state
!= FUSE_REQ_FINISHED
);
377 /* This is uninterruptible sleep, because data is
378 being copied to/from the buffers of req. During
379 locked state, there mustn't be any filesystem
380 operation (e.g. page fault), since that could lead
382 spin_unlock(&fc
->lock
);
383 wait_event(req
->waitq
, !req
->locked
);
384 spin_lock(&fc
->lock
);
388 void fuse_request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
391 spin_lock(&fc
->lock
);
393 req
->out
.h
.error
= -ENOTCONN
;
394 else if (fc
->conn_error
)
395 req
->out
.h
.error
= -ECONNREFUSED
;
397 queue_request(fc
, req
);
398 /* acquire extra reference, since request is still needed
399 after request_end() */
400 __fuse_get_request(req
);
402 request_wait_answer(fc
, req
);
404 spin_unlock(&fc
->lock
);
406 EXPORT_SYMBOL_GPL(fuse_request_send
);
408 static void fuse_request_send_nowait_locked(struct fuse_conn
*fc
,
409 struct fuse_req
*req
)
412 fc
->num_background
++;
413 if (fc
->num_background
== fc
->max_background
)
415 if (fc
->num_background
== fc
->congestion_threshold
&&
416 fc
->bdi_initialized
) {
417 set_bdi_congested(&fc
->bdi
, BLK_RW_SYNC
);
418 set_bdi_congested(&fc
->bdi
, BLK_RW_ASYNC
);
420 list_add_tail(&req
->list
, &fc
->bg_queue
);
424 static void fuse_request_send_nowait(struct fuse_conn
*fc
, struct fuse_req
*req
)
426 spin_lock(&fc
->lock
);
428 fuse_request_send_nowait_locked(fc
, req
);
429 spin_unlock(&fc
->lock
);
431 req
->out
.h
.error
= -ENOTCONN
;
432 request_end(fc
, req
);
436 void fuse_request_send_noreply(struct fuse_conn
*fc
, struct fuse_req
*req
)
439 fuse_request_send_nowait(fc
, req
);
442 void fuse_request_send_background(struct fuse_conn
*fc
, struct fuse_req
*req
)
445 fuse_request_send_nowait(fc
, req
);
447 EXPORT_SYMBOL_GPL(fuse_request_send_background
);
450 * Called under fc->lock
452 * fc->connected must have been checked previously
454 void fuse_request_send_background_locked(struct fuse_conn
*fc
,
455 struct fuse_req
*req
)
458 fuse_request_send_nowait_locked(fc
, req
);
462 * Lock the request. Up to the next unlock_request() there mustn't be
463 * anything that could cause a page-fault. If the request was already
466 static int lock_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
470 spin_lock(&fc
->lock
);
475 spin_unlock(&fc
->lock
);
481 * Unlock request. If it was aborted during being locked, the
482 * requester thread is currently waiting for it to be unlocked, so
485 static void unlock_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
488 spin_lock(&fc
->lock
);
491 wake_up(&req
->waitq
);
492 spin_unlock(&fc
->lock
);
496 struct fuse_copy_state
{
497 struct fuse_conn
*fc
;
499 struct fuse_req
*req
;
500 const struct iovec
*iov
;
501 unsigned long nr_segs
;
502 unsigned long seglen
;
510 static void fuse_copy_init(struct fuse_copy_state
*cs
, struct fuse_conn
*fc
,
511 int write
, struct fuse_req
*req
,
512 const struct iovec
*iov
, unsigned long nr_segs
)
514 memset(cs
, 0, sizeof(*cs
));
519 cs
->nr_segs
= nr_segs
;
522 /* Unmap and put previous page of userspace buffer */
523 static void fuse_copy_finish(struct fuse_copy_state
*cs
)
526 kunmap_atomic(cs
->mapaddr
, KM_USER0
);
528 flush_dcache_page(cs
->pg
);
529 set_page_dirty_lock(cs
->pg
);
537 * Get another pagefull of userspace buffer, and map it to kernel
538 * address space, and lock request
540 static int fuse_copy_fill(struct fuse_copy_state
*cs
)
542 unsigned long offset
;
545 unlock_request(cs
->fc
, cs
->req
);
546 fuse_copy_finish(cs
);
548 BUG_ON(!cs
->nr_segs
);
549 cs
->seglen
= cs
->iov
[0].iov_len
;
550 cs
->addr
= (unsigned long) cs
->iov
[0].iov_base
;
554 down_read(¤t
->mm
->mmap_sem
);
555 err
= get_user_pages(current
, current
->mm
, cs
->addr
, 1, cs
->write
, 0,
557 up_read(¤t
->mm
->mmap_sem
);
561 offset
= cs
->addr
% PAGE_SIZE
;
562 cs
->mapaddr
= kmap_atomic(cs
->pg
, KM_USER0
);
563 cs
->buf
= cs
->mapaddr
+ offset
;
564 cs
->len
= min(PAGE_SIZE
- offset
, cs
->seglen
);
565 cs
->seglen
-= cs
->len
;
568 return lock_request(cs
->fc
, cs
->req
);
571 /* Do as much copy to/from userspace buffer as we can */
572 static int fuse_copy_do(struct fuse_copy_state
*cs
, void **val
, unsigned *size
)
574 unsigned ncpy
= min(*size
, cs
->len
);
577 memcpy(cs
->buf
, *val
, ncpy
);
579 memcpy(*val
, cs
->buf
, ncpy
);
589 * Copy a page in the request to/from the userspace buffer. Must be
592 static int fuse_copy_page(struct fuse_copy_state
*cs
, struct page
*page
,
593 unsigned offset
, unsigned count
, int zeroing
)
595 if (page
&& zeroing
&& count
< PAGE_SIZE
) {
596 void *mapaddr
= kmap_atomic(page
, KM_USER1
);
597 memset(mapaddr
, 0, PAGE_SIZE
);
598 kunmap_atomic(mapaddr
, KM_USER1
);
602 int err
= fuse_copy_fill(cs
);
607 void *mapaddr
= kmap_atomic(page
, KM_USER1
);
608 void *buf
= mapaddr
+ offset
;
609 offset
+= fuse_copy_do(cs
, &buf
, &count
);
610 kunmap_atomic(mapaddr
, KM_USER1
);
612 offset
+= fuse_copy_do(cs
, NULL
, &count
);
614 if (page
&& !cs
->write
)
615 flush_dcache_page(page
);
619 /* Copy pages in the request to/from userspace buffer */
620 static int fuse_copy_pages(struct fuse_copy_state
*cs
, unsigned nbytes
,
624 struct fuse_req
*req
= cs
->req
;
625 unsigned offset
= req
->page_offset
;
626 unsigned count
= min(nbytes
, (unsigned) PAGE_SIZE
- offset
);
628 for (i
= 0; i
< req
->num_pages
&& (nbytes
|| zeroing
); i
++) {
629 struct page
*page
= req
->pages
[i
];
630 int err
= fuse_copy_page(cs
, page
, offset
, count
, zeroing
);
635 count
= min(nbytes
, (unsigned) PAGE_SIZE
);
641 /* Copy a single argument in the request to/from userspace buffer */
642 static int fuse_copy_one(struct fuse_copy_state
*cs
, void *val
, unsigned size
)
646 int err
= fuse_copy_fill(cs
);
650 fuse_copy_do(cs
, &val
, &size
);
655 /* Copy request arguments to/from userspace buffer */
656 static int fuse_copy_args(struct fuse_copy_state
*cs
, unsigned numargs
,
657 unsigned argpages
, struct fuse_arg
*args
,
663 for (i
= 0; !err
&& i
< numargs
; i
++) {
664 struct fuse_arg
*arg
= &args
[i
];
665 if (i
== numargs
- 1 && argpages
)
666 err
= fuse_copy_pages(cs
, arg
->size
, zeroing
);
668 err
= fuse_copy_one(cs
, arg
->value
, arg
->size
);
673 static int request_pending(struct fuse_conn
*fc
)
675 return !list_empty(&fc
->pending
) || !list_empty(&fc
->interrupts
);
678 /* Wait until a request is available on the pending list */
679 static void request_wait(struct fuse_conn
*fc
)
680 __releases(&fc
->lock
)
681 __acquires(&fc
->lock
)
683 DECLARE_WAITQUEUE(wait
, current
);
685 add_wait_queue_exclusive(&fc
->waitq
, &wait
);
686 while (fc
->connected
&& !request_pending(fc
)) {
687 set_current_state(TASK_INTERRUPTIBLE
);
688 if (signal_pending(current
))
691 spin_unlock(&fc
->lock
);
693 spin_lock(&fc
->lock
);
695 set_current_state(TASK_RUNNING
);
696 remove_wait_queue(&fc
->waitq
, &wait
);
700 * Transfer an interrupt request to userspace
702 * Unlike other requests this is assembled on demand, without a need
703 * to allocate a separate fuse_req structure.
705 * Called with fc->lock held, releases it
707 static int fuse_read_interrupt(struct fuse_conn
*fc
, struct fuse_req
*req
,
708 const struct iovec
*iov
, unsigned long nr_segs
)
709 __releases(&fc
->lock
)
711 struct fuse_copy_state cs
;
712 struct fuse_in_header ih
;
713 struct fuse_interrupt_in arg
;
714 unsigned reqsize
= sizeof(ih
) + sizeof(arg
);
717 list_del_init(&req
->intr_entry
);
718 req
->intr_unique
= fuse_get_unique(fc
);
719 memset(&ih
, 0, sizeof(ih
));
720 memset(&arg
, 0, sizeof(arg
));
722 ih
.opcode
= FUSE_INTERRUPT
;
723 ih
.unique
= req
->intr_unique
;
724 arg
.unique
= req
->in
.h
.unique
;
726 spin_unlock(&fc
->lock
);
727 if (iov_length(iov
, nr_segs
) < reqsize
)
730 fuse_copy_init(&cs
, fc
, 1, NULL
, iov
, nr_segs
);
731 err
= fuse_copy_one(&cs
, &ih
, sizeof(ih
));
733 err
= fuse_copy_one(&cs
, &arg
, sizeof(arg
));
734 fuse_copy_finish(&cs
);
736 return err
? err
: reqsize
;
740 * Read a single request into the userspace filesystem's buffer. This
741 * function waits until a request is available, then removes it from
742 * the pending list and copies request data to userspace buffer. If
743 * no reply is needed (FORGET) or request has been aborted or there
744 * was an error during the copying then it's finished by calling
745 * request_end(). Otherwise add it to the processing list, and set
748 static ssize_t
fuse_dev_read(struct kiocb
*iocb
, const struct iovec
*iov
,
749 unsigned long nr_segs
, loff_t pos
)
752 struct fuse_req
*req
;
754 struct fuse_copy_state cs
;
756 struct file
*file
= iocb
->ki_filp
;
757 struct fuse_conn
*fc
= fuse_get_conn(file
);
762 spin_lock(&fc
->lock
);
764 if ((file
->f_flags
& O_NONBLOCK
) && fc
->connected
&&
765 !request_pending(fc
))
773 if (!request_pending(fc
))
776 if (!list_empty(&fc
->interrupts
)) {
777 req
= list_entry(fc
->interrupts
.next
, struct fuse_req
,
779 return fuse_read_interrupt(fc
, req
, iov
, nr_segs
);
782 req
= list_entry(fc
->pending
.next
, struct fuse_req
, list
);
783 req
->state
= FUSE_REQ_READING
;
784 list_move(&req
->list
, &fc
->io
);
788 /* If request is too large, reply with an error and restart the read */
789 if (iov_length(iov
, nr_segs
) < reqsize
) {
790 req
->out
.h
.error
= -EIO
;
791 /* SETXATTR is special, since it may contain too large data */
792 if (in
->h
.opcode
== FUSE_SETXATTR
)
793 req
->out
.h
.error
= -E2BIG
;
794 request_end(fc
, req
);
797 spin_unlock(&fc
->lock
);
798 fuse_copy_init(&cs
, fc
, 1, req
, iov
, nr_segs
);
799 err
= fuse_copy_one(&cs
, &in
->h
, sizeof(in
->h
));
801 err
= fuse_copy_args(&cs
, in
->numargs
, in
->argpages
,
802 (struct fuse_arg
*) in
->args
, 0);
803 fuse_copy_finish(&cs
);
804 spin_lock(&fc
->lock
);
807 request_end(fc
, req
);
811 req
->out
.h
.error
= -EIO
;
812 request_end(fc
, req
);
816 request_end(fc
, req
);
818 req
->state
= FUSE_REQ_SENT
;
819 list_move_tail(&req
->list
, &fc
->processing
);
820 if (req
->interrupted
)
821 queue_interrupt(fc
, req
);
822 spin_unlock(&fc
->lock
);
827 spin_unlock(&fc
->lock
);
831 static int fuse_notify_poll(struct fuse_conn
*fc
, unsigned int size
,
832 struct fuse_copy_state
*cs
)
834 struct fuse_notify_poll_wakeup_out outarg
;
837 if (size
!= sizeof(outarg
))
840 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
844 fuse_copy_finish(cs
);
845 return fuse_notify_poll_wakeup(fc
, &outarg
);
848 fuse_copy_finish(cs
);
852 static int fuse_notify_inval_inode(struct fuse_conn
*fc
, unsigned int size
,
853 struct fuse_copy_state
*cs
)
855 struct fuse_notify_inval_inode_out outarg
;
858 if (size
!= sizeof(outarg
))
861 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
864 fuse_copy_finish(cs
);
866 down_read(&fc
->killsb
);
871 err
= fuse_reverse_inval_inode(fc
->sb
, outarg
.ino
,
872 outarg
.off
, outarg
.len
);
875 up_read(&fc
->killsb
);
879 fuse_copy_finish(cs
);
883 static int fuse_notify_inval_entry(struct fuse_conn
*fc
, unsigned int size
,
884 struct fuse_copy_state
*cs
)
886 struct fuse_notify_inval_entry_out outarg
;
888 char buf
[FUSE_NAME_MAX
+1];
891 if (size
< sizeof(outarg
))
894 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
899 if (outarg
.namelen
> FUSE_NAME_MAX
)
903 name
.len
= outarg
.namelen
;
904 err
= fuse_copy_one(cs
, buf
, outarg
.namelen
+ 1);
907 fuse_copy_finish(cs
);
908 buf
[outarg
.namelen
] = 0;
909 name
.hash
= full_name_hash(name
.name
, name
.len
);
911 down_read(&fc
->killsb
);
916 err
= fuse_reverse_inval_entry(fc
->sb
, outarg
.parent
, &name
);
919 up_read(&fc
->killsb
);
923 fuse_copy_finish(cs
);
927 static int fuse_notify(struct fuse_conn
*fc
, enum fuse_notify_code code
,
928 unsigned int size
, struct fuse_copy_state
*cs
)
931 case FUSE_NOTIFY_POLL
:
932 return fuse_notify_poll(fc
, size
, cs
);
934 case FUSE_NOTIFY_INVAL_INODE
:
935 return fuse_notify_inval_inode(fc
, size
, cs
);
937 case FUSE_NOTIFY_INVAL_ENTRY
:
938 return fuse_notify_inval_entry(fc
, size
, cs
);
941 fuse_copy_finish(cs
);
946 /* Look up request on processing list by unique ID */
947 static struct fuse_req
*request_find(struct fuse_conn
*fc
, u64 unique
)
949 struct list_head
*entry
;
951 list_for_each(entry
, &fc
->processing
) {
952 struct fuse_req
*req
;
953 req
= list_entry(entry
, struct fuse_req
, list
);
954 if (req
->in
.h
.unique
== unique
|| req
->intr_unique
== unique
)
960 static int copy_out_args(struct fuse_copy_state
*cs
, struct fuse_out
*out
,
963 unsigned reqsize
= sizeof(struct fuse_out_header
);
966 return nbytes
!= reqsize
? -EINVAL
: 0;
968 reqsize
+= len_args(out
->numargs
, out
->args
);
970 if (reqsize
< nbytes
|| (reqsize
> nbytes
&& !out
->argvar
))
972 else if (reqsize
> nbytes
) {
973 struct fuse_arg
*lastarg
= &out
->args
[out
->numargs
-1];
974 unsigned diffsize
= reqsize
- nbytes
;
975 if (diffsize
> lastarg
->size
)
977 lastarg
->size
-= diffsize
;
979 return fuse_copy_args(cs
, out
->numargs
, out
->argpages
, out
->args
,
984 * Write a single reply to a request. First the header is copied from
985 * the write buffer. The request is then searched on the processing
986 * list by the unique ID found in the header. If found, then remove
987 * it from the list and copy the rest of the buffer to the request.
988 * The request is finished by calling request_end()
990 static ssize_t
fuse_dev_write(struct kiocb
*iocb
, const struct iovec
*iov
,
991 unsigned long nr_segs
, loff_t pos
)
994 size_t nbytes
= iov_length(iov
, nr_segs
);
995 struct fuse_req
*req
;
996 struct fuse_out_header oh
;
997 struct fuse_copy_state cs
;
998 struct fuse_conn
*fc
= fuse_get_conn(iocb
->ki_filp
);
1002 fuse_copy_init(&cs
, fc
, 0, NULL
, iov
, nr_segs
);
1003 if (nbytes
< sizeof(struct fuse_out_header
))
1006 err
= fuse_copy_one(&cs
, &oh
, sizeof(oh
));
1011 if (oh
.len
!= nbytes
)
1015 * Zero oh.unique indicates unsolicited notification message
1016 * and error contains notification code.
1019 err
= fuse_notify(fc
, oh
.error
, nbytes
- sizeof(oh
), &cs
);
1020 return err
? err
: nbytes
;
1024 if (oh
.error
<= -1000 || oh
.error
> 0)
1027 spin_lock(&fc
->lock
);
1032 req
= request_find(fc
, oh
.unique
);
1037 spin_unlock(&fc
->lock
);
1038 fuse_copy_finish(&cs
);
1039 spin_lock(&fc
->lock
);
1040 request_end(fc
, req
);
1043 /* Is it an interrupt reply? */
1044 if (req
->intr_unique
== oh
.unique
) {
1046 if (nbytes
!= sizeof(struct fuse_out_header
))
1049 if (oh
.error
== -ENOSYS
)
1050 fc
->no_interrupt
= 1;
1051 else if (oh
.error
== -EAGAIN
)
1052 queue_interrupt(fc
, req
);
1054 spin_unlock(&fc
->lock
);
1055 fuse_copy_finish(&cs
);
1059 req
->state
= FUSE_REQ_WRITING
;
1060 list_move(&req
->list
, &fc
->io
);
1064 spin_unlock(&fc
->lock
);
1066 err
= copy_out_args(&cs
, &req
->out
, nbytes
);
1067 fuse_copy_finish(&cs
);
1069 spin_lock(&fc
->lock
);
1074 } else if (!req
->aborted
)
1075 req
->out
.h
.error
= -EIO
;
1076 request_end(fc
, req
);
1078 return err
? err
: nbytes
;
1081 spin_unlock(&fc
->lock
);
1083 fuse_copy_finish(&cs
);
1087 static unsigned fuse_dev_poll(struct file
*file
, poll_table
*wait
)
1089 unsigned mask
= POLLOUT
| POLLWRNORM
;
1090 struct fuse_conn
*fc
= fuse_get_conn(file
);
1094 poll_wait(file
, &fc
->waitq
, wait
);
1096 spin_lock(&fc
->lock
);
1099 else if (request_pending(fc
))
1100 mask
|= POLLIN
| POLLRDNORM
;
1101 spin_unlock(&fc
->lock
);
1107 * Abort all requests on the given list (pending or processing)
1109 * This function releases and reacquires fc->lock
1111 static void end_requests(struct fuse_conn
*fc
, struct list_head
*head
)
1112 __releases(&fc
->lock
)
1113 __acquires(&fc
->lock
)
1115 while (!list_empty(head
)) {
1116 struct fuse_req
*req
;
1117 req
= list_entry(head
->next
, struct fuse_req
, list
);
1118 req
->out
.h
.error
= -ECONNABORTED
;
1119 request_end(fc
, req
);
1120 spin_lock(&fc
->lock
);
1125 * Abort requests under I/O
1127 * The requests are set to aborted and finished, and the request
1128 * waiter is woken up. This will make request_wait_answer() wait
1129 * until the request is unlocked and then return.
1131 * If the request is asynchronous, then the end function needs to be
1132 * called after waiting for the request to be unlocked (if it was
1135 static void end_io_requests(struct fuse_conn
*fc
)
1136 __releases(&fc
->lock
)
1137 __acquires(&fc
->lock
)
1139 while (!list_empty(&fc
->io
)) {
1140 struct fuse_req
*req
=
1141 list_entry(fc
->io
.next
, struct fuse_req
, list
);
1142 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
1145 req
->out
.h
.error
= -ECONNABORTED
;
1146 req
->state
= FUSE_REQ_FINISHED
;
1147 list_del_init(&req
->list
);
1148 wake_up(&req
->waitq
);
1151 __fuse_get_request(req
);
1152 spin_unlock(&fc
->lock
);
1153 wait_event(req
->waitq
, !req
->locked
);
1155 fuse_put_request(fc
, req
);
1156 spin_lock(&fc
->lock
);
1162 * Abort all requests.
1164 * Emergency exit in case of a malicious or accidental deadlock, or
1165 * just a hung filesystem.
1167 * The same effect is usually achievable through killing the
1168 * filesystem daemon and all users of the filesystem. The exception
1169 * is the combination of an asynchronous request and the tricky
1170 * deadlock (see Documentation/filesystems/fuse.txt).
1172 * During the aborting, progression of requests from the pending and
1173 * processing lists onto the io list, and progression of new requests
1174 * onto the pending list is prevented by req->connected being false.
1176 * Progression of requests under I/O to the processing list is
1177 * prevented by the req->aborted flag being true for these requests.
1178 * For this reason requests on the io list must be aborted first.
1180 void fuse_abort_conn(struct fuse_conn
*fc
)
1182 spin_lock(&fc
->lock
);
1183 if (fc
->connected
) {
1186 end_io_requests(fc
);
1187 end_requests(fc
, &fc
->pending
);
1188 end_requests(fc
, &fc
->processing
);
1189 wake_up_all(&fc
->waitq
);
1190 wake_up_all(&fc
->blocked_waitq
);
1191 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
1193 spin_unlock(&fc
->lock
);
1195 EXPORT_SYMBOL_GPL(fuse_abort_conn
);
1197 int fuse_dev_release(struct inode
*inode
, struct file
*file
)
1199 struct fuse_conn
*fc
= fuse_get_conn(file
);
1201 spin_lock(&fc
->lock
);
1203 end_requests(fc
, &fc
->pending
);
1204 end_requests(fc
, &fc
->processing
);
1205 spin_unlock(&fc
->lock
);
1211 EXPORT_SYMBOL_GPL(fuse_dev_release
);
1213 static int fuse_dev_fasync(int fd
, struct file
*file
, int on
)
1215 struct fuse_conn
*fc
= fuse_get_conn(file
);
1219 /* No locking - fasync_helper does its own locking */
1220 return fasync_helper(fd
, file
, on
, &fc
->fasync
);
1223 const struct file_operations fuse_dev_operations
= {
1224 .owner
= THIS_MODULE
,
1225 .llseek
= no_llseek
,
1226 .read
= do_sync_read
,
1227 .aio_read
= fuse_dev_read
,
1228 .write
= do_sync_write
,
1229 .aio_write
= fuse_dev_write
,
1230 .poll
= fuse_dev_poll
,
1231 .release
= fuse_dev_release
,
1232 .fasync
= fuse_dev_fasync
,
1234 EXPORT_SYMBOL_GPL(fuse_dev_operations
);
1236 static struct miscdevice fuse_miscdevice
= {
1237 .minor
= FUSE_MINOR
,
1239 .fops
= &fuse_dev_operations
,
1242 int __init
fuse_dev_init(void)
1245 fuse_req_cachep
= kmem_cache_create("fuse_request",
1246 sizeof(struct fuse_req
),
1248 if (!fuse_req_cachep
)
1251 err
= misc_register(&fuse_miscdevice
);
1253 goto out_cache_clean
;
1258 kmem_cache_destroy(fuse_req_cachep
);
1263 void fuse_dev_cleanup(void)
1265 misc_deregister(&fuse_miscdevice
);
1266 kmem_cache_destroy(fuse_req_cachep
);