2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR
);
21 MODULE_ALIAS("devname:fuse");
23 static struct kmem_cache
*fuse_req_cachep
;
25 static struct fuse_conn
*fuse_get_conn(struct file
*file
)
28 * Lockless access is OK, because file->private data is set
29 * once during mount and is valid until the file is released.
31 return file
->private_data
;
34 static void fuse_request_init(struct fuse_req
*req
)
36 memset(req
, 0, sizeof(*req
));
37 INIT_LIST_HEAD(&req
->list
);
38 INIT_LIST_HEAD(&req
->intr_entry
);
39 init_waitqueue_head(&req
->waitq
);
40 atomic_set(&req
->count
, 1);
43 struct fuse_req
*fuse_request_alloc(void)
45 struct fuse_req
*req
= kmem_cache_alloc(fuse_req_cachep
, GFP_KERNEL
);
47 fuse_request_init(req
);
50 EXPORT_SYMBOL_GPL(fuse_request_alloc
);
52 struct fuse_req
*fuse_request_alloc_nofs(void)
54 struct fuse_req
*req
= kmem_cache_alloc(fuse_req_cachep
, GFP_NOFS
);
56 fuse_request_init(req
);
60 void fuse_request_free(struct fuse_req
*req
)
62 kmem_cache_free(fuse_req_cachep
, req
);
65 static void block_sigs(sigset_t
*oldset
)
69 siginitsetinv(&mask
, sigmask(SIGKILL
));
70 sigprocmask(SIG_BLOCK
, &mask
, oldset
);
73 static void restore_sigs(sigset_t
*oldset
)
75 sigprocmask(SIG_SETMASK
, oldset
, NULL
);
78 static void __fuse_get_request(struct fuse_req
*req
)
80 atomic_inc(&req
->count
);
83 /* Must be called with > 1 refcount */
84 static void __fuse_put_request(struct fuse_req
*req
)
86 BUG_ON(atomic_read(&req
->count
) < 2);
87 atomic_dec(&req
->count
);
90 static void fuse_req_init_context(struct fuse_req
*req
)
92 req
->in
.h
.uid
= current_fsuid();
93 req
->in
.h
.gid
= current_fsgid();
94 req
->in
.h
.pid
= current
->pid
;
97 struct fuse_req
*fuse_get_req(struct fuse_conn
*fc
)
104 atomic_inc(&fc
->num_waiting
);
106 intr
= wait_event_interruptible(fc
->blocked_waitq
, !fc
->blocked
);
107 restore_sigs(&oldset
);
116 req
= fuse_request_alloc();
121 fuse_req_init_context(req
);
126 atomic_dec(&fc
->num_waiting
);
129 EXPORT_SYMBOL_GPL(fuse_get_req
);
132 * Return request in fuse_file->reserved_req. However that may
133 * currently be in use. If that is the case, wait for it to become
136 static struct fuse_req
*get_reserved_req(struct fuse_conn
*fc
,
139 struct fuse_req
*req
= NULL
;
140 struct fuse_file
*ff
= file
->private_data
;
143 wait_event(fc
->reserved_req_waitq
, ff
->reserved_req
);
144 spin_lock(&fc
->lock
);
145 if (ff
->reserved_req
) {
146 req
= ff
->reserved_req
;
147 ff
->reserved_req
= NULL
;
149 req
->stolen_file
= file
;
151 spin_unlock(&fc
->lock
);
158 * Put stolen request back into fuse_file->reserved_req
160 static void put_reserved_req(struct fuse_conn
*fc
, struct fuse_req
*req
)
162 struct file
*file
= req
->stolen_file
;
163 struct fuse_file
*ff
= file
->private_data
;
165 spin_lock(&fc
->lock
);
166 fuse_request_init(req
);
167 BUG_ON(ff
->reserved_req
);
168 ff
->reserved_req
= req
;
169 wake_up_all(&fc
->reserved_req_waitq
);
170 spin_unlock(&fc
->lock
);
175 * Gets a requests for a file operation, always succeeds
177 * This is used for sending the FLUSH request, which must get to
178 * userspace, due to POSIX locks which may need to be unlocked.
180 * If allocation fails due to OOM, use the reserved request in
183 * This is very unlikely to deadlock accidentally, since the
184 * filesystem should not have it's own file open. If deadlock is
185 * intentional, it can still be broken by "aborting" the filesystem.
187 struct fuse_req
*fuse_get_req_nofail(struct fuse_conn
*fc
, struct file
*file
)
189 struct fuse_req
*req
;
191 atomic_inc(&fc
->num_waiting
);
192 wait_event(fc
->blocked_waitq
, !fc
->blocked
);
193 req
= fuse_request_alloc();
195 req
= get_reserved_req(fc
, file
);
197 fuse_req_init_context(req
);
202 void fuse_put_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
204 if (atomic_dec_and_test(&req
->count
)) {
206 atomic_dec(&fc
->num_waiting
);
208 if (req
->stolen_file
)
209 put_reserved_req(fc
, req
);
211 fuse_request_free(req
);
214 EXPORT_SYMBOL_GPL(fuse_put_request
);
216 static unsigned len_args(unsigned numargs
, struct fuse_arg
*args
)
221 for (i
= 0; i
< numargs
; i
++)
222 nbytes
+= args
[i
].size
;
227 static u64
fuse_get_unique(struct fuse_conn
*fc
)
230 /* zero is special */
237 static void queue_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
239 req
->in
.h
.unique
= fuse_get_unique(fc
);
240 req
->in
.h
.len
= sizeof(struct fuse_in_header
) +
241 len_args(req
->in
.numargs
, (struct fuse_arg
*) req
->in
.args
);
242 list_add_tail(&req
->list
, &fc
->pending
);
243 req
->state
= FUSE_REQ_PENDING
;
246 atomic_inc(&fc
->num_waiting
);
249 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
252 static void flush_bg_queue(struct fuse_conn
*fc
)
254 while (fc
->active_background
< fc
->max_background
&&
255 !list_empty(&fc
->bg_queue
)) {
256 struct fuse_req
*req
;
258 req
= list_entry(fc
->bg_queue
.next
, struct fuse_req
, list
);
259 list_del(&req
->list
);
260 fc
->active_background
++;
261 queue_request(fc
, req
);
266 * This function is called when a request is finished. Either a reply
267 * has arrived or it was aborted (and not yet sent) or some error
268 * occurred during communication with userspace, or the device file
269 * was closed. The requester thread is woken up (if still waiting),
270 * the 'end' callback is called if given, else the reference to the
271 * request is released
273 * Called with fc->lock, unlocks it
275 static void request_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
276 __releases(&fc
->lock
)
278 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
280 list_del(&req
->list
);
281 list_del(&req
->intr_entry
);
282 req
->state
= FUSE_REQ_FINISHED
;
283 if (req
->background
) {
284 if (fc
->num_background
== fc
->max_background
) {
286 wake_up_all(&fc
->blocked_waitq
);
288 if (fc
->num_background
== fc
->congestion_threshold
&&
289 fc
->connected
&& fc
->bdi_initialized
) {
290 clear_bdi_congested(&fc
->bdi
, BLK_RW_SYNC
);
291 clear_bdi_congested(&fc
->bdi
, BLK_RW_ASYNC
);
293 fc
->num_background
--;
294 fc
->active_background
--;
297 spin_unlock(&fc
->lock
);
298 wake_up(&req
->waitq
);
301 fuse_put_request(fc
, req
);
304 static void wait_answer_interruptible(struct fuse_conn
*fc
,
305 struct fuse_req
*req
)
306 __releases(&fc
->lock
)
307 __acquires(&fc
->lock
)
309 if (signal_pending(current
))
312 spin_unlock(&fc
->lock
);
313 wait_event_interruptible(req
->waitq
, req
->state
== FUSE_REQ_FINISHED
);
314 spin_lock(&fc
->lock
);
317 static void queue_interrupt(struct fuse_conn
*fc
, struct fuse_req
*req
)
319 list_add_tail(&req
->intr_entry
, &fc
->interrupts
);
321 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
324 static void request_wait_answer(struct fuse_conn
*fc
, struct fuse_req
*req
)
325 __releases(&fc
->lock
)
326 __acquires(&fc
->lock
)
328 if (!fc
->no_interrupt
) {
329 /* Any signal may interrupt this */
330 wait_answer_interruptible(fc
, req
);
334 if (req
->state
== FUSE_REQ_FINISHED
)
337 req
->interrupted
= 1;
338 if (req
->state
== FUSE_REQ_SENT
)
339 queue_interrupt(fc
, req
);
345 /* Only fatal signals may interrupt this */
347 wait_answer_interruptible(fc
, req
);
348 restore_sigs(&oldset
);
352 if (req
->state
== FUSE_REQ_FINISHED
)
355 /* Request is not yet in userspace, bail out */
356 if (req
->state
== FUSE_REQ_PENDING
) {
357 list_del(&req
->list
);
358 __fuse_put_request(req
);
359 req
->out
.h
.error
= -EINTR
;
365 * Either request is already in userspace, or it was forced.
368 spin_unlock(&fc
->lock
);
369 wait_event(req
->waitq
, req
->state
== FUSE_REQ_FINISHED
);
370 spin_lock(&fc
->lock
);
376 BUG_ON(req
->state
!= FUSE_REQ_FINISHED
);
378 /* This is uninterruptible sleep, because data is
379 being copied to/from the buffers of req. During
380 locked state, there mustn't be any filesystem
381 operation (e.g. page fault), since that could lead
383 spin_unlock(&fc
->lock
);
384 wait_event(req
->waitq
, !req
->locked
);
385 spin_lock(&fc
->lock
);
389 void fuse_request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
392 spin_lock(&fc
->lock
);
394 req
->out
.h
.error
= -ENOTCONN
;
395 else if (fc
->conn_error
)
396 req
->out
.h
.error
= -ECONNREFUSED
;
398 queue_request(fc
, req
);
399 /* acquire extra reference, since request is still needed
400 after request_end() */
401 __fuse_get_request(req
);
403 request_wait_answer(fc
, req
);
405 spin_unlock(&fc
->lock
);
407 EXPORT_SYMBOL_GPL(fuse_request_send
);
409 static void fuse_request_send_nowait_locked(struct fuse_conn
*fc
,
410 struct fuse_req
*req
)
413 fc
->num_background
++;
414 if (fc
->num_background
== fc
->max_background
)
416 if (fc
->num_background
== fc
->congestion_threshold
&&
417 fc
->bdi_initialized
) {
418 set_bdi_congested(&fc
->bdi
, BLK_RW_SYNC
);
419 set_bdi_congested(&fc
->bdi
, BLK_RW_ASYNC
);
421 list_add_tail(&req
->list
, &fc
->bg_queue
);
425 static void fuse_request_send_nowait(struct fuse_conn
*fc
, struct fuse_req
*req
)
427 spin_lock(&fc
->lock
);
429 fuse_request_send_nowait_locked(fc
, req
);
430 spin_unlock(&fc
->lock
);
432 req
->out
.h
.error
= -ENOTCONN
;
433 request_end(fc
, req
);
437 void fuse_request_send_noreply(struct fuse_conn
*fc
, struct fuse_req
*req
)
440 fuse_request_send_nowait(fc
, req
);
443 void fuse_request_send_background(struct fuse_conn
*fc
, struct fuse_req
*req
)
446 fuse_request_send_nowait(fc
, req
);
448 EXPORT_SYMBOL_GPL(fuse_request_send_background
);
451 * Called under fc->lock
453 * fc->connected must have been checked previously
455 void fuse_request_send_background_locked(struct fuse_conn
*fc
,
456 struct fuse_req
*req
)
459 fuse_request_send_nowait_locked(fc
, req
);
463 * Lock the request. Up to the next unlock_request() there mustn't be
464 * anything that could cause a page-fault. If the request was already
467 static int lock_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
471 spin_lock(&fc
->lock
);
476 spin_unlock(&fc
->lock
);
482 * Unlock request. If it was aborted during being locked, the
483 * requester thread is currently waiting for it to be unlocked, so
486 static void unlock_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
489 spin_lock(&fc
->lock
);
492 wake_up(&req
->waitq
);
493 spin_unlock(&fc
->lock
);
497 struct fuse_copy_state
{
498 struct fuse_conn
*fc
;
500 struct fuse_req
*req
;
501 const struct iovec
*iov
;
502 unsigned long nr_segs
;
503 unsigned long seglen
;
511 static void fuse_copy_init(struct fuse_copy_state
*cs
, struct fuse_conn
*fc
,
512 int write
, struct fuse_req
*req
,
513 const struct iovec
*iov
, unsigned long nr_segs
)
515 memset(cs
, 0, sizeof(*cs
));
520 cs
->nr_segs
= nr_segs
;
523 /* Unmap and put previous page of userspace buffer */
524 static void fuse_copy_finish(struct fuse_copy_state
*cs
)
527 kunmap_atomic(cs
->mapaddr
, KM_USER0
);
529 flush_dcache_page(cs
->pg
);
530 set_page_dirty_lock(cs
->pg
);
538 * Get another pagefull of userspace buffer, and map it to kernel
539 * address space, and lock request
541 static int fuse_copy_fill(struct fuse_copy_state
*cs
)
543 unsigned long offset
;
546 unlock_request(cs
->fc
, cs
->req
);
547 fuse_copy_finish(cs
);
549 BUG_ON(!cs
->nr_segs
);
550 cs
->seglen
= cs
->iov
[0].iov_len
;
551 cs
->addr
= (unsigned long) cs
->iov
[0].iov_base
;
555 down_read(¤t
->mm
->mmap_sem
);
556 err
= get_user_pages(current
, current
->mm
, cs
->addr
, 1, cs
->write
, 0,
558 up_read(¤t
->mm
->mmap_sem
);
562 offset
= cs
->addr
% PAGE_SIZE
;
563 cs
->mapaddr
= kmap_atomic(cs
->pg
, KM_USER0
);
564 cs
->buf
= cs
->mapaddr
+ offset
;
565 cs
->len
= min(PAGE_SIZE
- offset
, cs
->seglen
);
566 cs
->seglen
-= cs
->len
;
569 return lock_request(cs
->fc
, cs
->req
);
572 /* Do as much copy to/from userspace buffer as we can */
573 static int fuse_copy_do(struct fuse_copy_state
*cs
, void **val
, unsigned *size
)
575 unsigned ncpy
= min(*size
, cs
->len
);
578 memcpy(cs
->buf
, *val
, ncpy
);
580 memcpy(*val
, cs
->buf
, ncpy
);
590 * Copy a page in the request to/from the userspace buffer. Must be
593 static int fuse_copy_page(struct fuse_copy_state
*cs
, struct page
*page
,
594 unsigned offset
, unsigned count
, int zeroing
)
596 if (page
&& zeroing
&& count
< PAGE_SIZE
) {
597 void *mapaddr
= kmap_atomic(page
, KM_USER1
);
598 memset(mapaddr
, 0, PAGE_SIZE
);
599 kunmap_atomic(mapaddr
, KM_USER1
);
603 int err
= fuse_copy_fill(cs
);
608 void *mapaddr
= kmap_atomic(page
, KM_USER1
);
609 void *buf
= mapaddr
+ offset
;
610 offset
+= fuse_copy_do(cs
, &buf
, &count
);
611 kunmap_atomic(mapaddr
, KM_USER1
);
613 offset
+= fuse_copy_do(cs
, NULL
, &count
);
615 if (page
&& !cs
->write
)
616 flush_dcache_page(page
);
620 /* Copy pages in the request to/from userspace buffer */
621 static int fuse_copy_pages(struct fuse_copy_state
*cs
, unsigned nbytes
,
625 struct fuse_req
*req
= cs
->req
;
626 unsigned offset
= req
->page_offset
;
627 unsigned count
= min(nbytes
, (unsigned) PAGE_SIZE
- offset
);
629 for (i
= 0; i
< req
->num_pages
&& (nbytes
|| zeroing
); i
++) {
630 struct page
*page
= req
->pages
[i
];
631 int err
= fuse_copy_page(cs
, page
, offset
, count
, zeroing
);
636 count
= min(nbytes
, (unsigned) PAGE_SIZE
);
642 /* Copy a single argument in the request to/from userspace buffer */
643 static int fuse_copy_one(struct fuse_copy_state
*cs
, void *val
, unsigned size
)
647 int err
= fuse_copy_fill(cs
);
651 fuse_copy_do(cs
, &val
, &size
);
656 /* Copy request arguments to/from userspace buffer */
657 static int fuse_copy_args(struct fuse_copy_state
*cs
, unsigned numargs
,
658 unsigned argpages
, struct fuse_arg
*args
,
664 for (i
= 0; !err
&& i
< numargs
; i
++) {
665 struct fuse_arg
*arg
= &args
[i
];
666 if (i
== numargs
- 1 && argpages
)
667 err
= fuse_copy_pages(cs
, arg
->size
, zeroing
);
669 err
= fuse_copy_one(cs
, arg
->value
, arg
->size
);
674 static int request_pending(struct fuse_conn
*fc
)
676 return !list_empty(&fc
->pending
) || !list_empty(&fc
->interrupts
);
679 /* Wait until a request is available on the pending list */
680 static void request_wait(struct fuse_conn
*fc
)
681 __releases(&fc
->lock
)
682 __acquires(&fc
->lock
)
684 DECLARE_WAITQUEUE(wait
, current
);
686 add_wait_queue_exclusive(&fc
->waitq
, &wait
);
687 while (fc
->connected
&& !request_pending(fc
)) {
688 set_current_state(TASK_INTERRUPTIBLE
);
689 if (signal_pending(current
))
692 spin_unlock(&fc
->lock
);
694 spin_lock(&fc
->lock
);
696 set_current_state(TASK_RUNNING
);
697 remove_wait_queue(&fc
->waitq
, &wait
);
701 * Transfer an interrupt request to userspace
703 * Unlike other requests this is assembled on demand, without a need
704 * to allocate a separate fuse_req structure.
706 * Called with fc->lock held, releases it
708 static int fuse_read_interrupt(struct fuse_conn
*fc
, struct fuse_req
*req
,
709 const struct iovec
*iov
, unsigned long nr_segs
)
710 __releases(&fc
->lock
)
712 struct fuse_copy_state cs
;
713 struct fuse_in_header ih
;
714 struct fuse_interrupt_in arg
;
715 unsigned reqsize
= sizeof(ih
) + sizeof(arg
);
718 list_del_init(&req
->intr_entry
);
719 req
->intr_unique
= fuse_get_unique(fc
);
720 memset(&ih
, 0, sizeof(ih
));
721 memset(&arg
, 0, sizeof(arg
));
723 ih
.opcode
= FUSE_INTERRUPT
;
724 ih
.unique
= req
->intr_unique
;
725 arg
.unique
= req
->in
.h
.unique
;
727 spin_unlock(&fc
->lock
);
728 if (iov_length(iov
, nr_segs
) < reqsize
)
731 fuse_copy_init(&cs
, fc
, 1, NULL
, iov
, nr_segs
);
732 err
= fuse_copy_one(&cs
, &ih
, sizeof(ih
));
734 err
= fuse_copy_one(&cs
, &arg
, sizeof(arg
));
735 fuse_copy_finish(&cs
);
737 return err
? err
: reqsize
;
741 * Read a single request into the userspace filesystem's buffer. This
742 * function waits until a request is available, then removes it from
743 * the pending list and copies request data to userspace buffer. If
744 * no reply is needed (FORGET) or request has been aborted or there
745 * was an error during the copying then it's finished by calling
746 * request_end(). Otherwise add it to the processing list, and set
749 static ssize_t
fuse_dev_read(struct kiocb
*iocb
, const struct iovec
*iov
,
750 unsigned long nr_segs
, loff_t pos
)
753 struct fuse_req
*req
;
755 struct fuse_copy_state cs
;
757 struct file
*file
= iocb
->ki_filp
;
758 struct fuse_conn
*fc
= fuse_get_conn(file
);
763 spin_lock(&fc
->lock
);
765 if ((file
->f_flags
& O_NONBLOCK
) && fc
->connected
&&
766 !request_pending(fc
))
774 if (!request_pending(fc
))
777 if (!list_empty(&fc
->interrupts
)) {
778 req
= list_entry(fc
->interrupts
.next
, struct fuse_req
,
780 return fuse_read_interrupt(fc
, req
, iov
, nr_segs
);
783 req
= list_entry(fc
->pending
.next
, struct fuse_req
, list
);
784 req
->state
= FUSE_REQ_READING
;
785 list_move(&req
->list
, &fc
->io
);
789 /* If request is too large, reply with an error and restart the read */
790 if (iov_length(iov
, nr_segs
) < reqsize
) {
791 req
->out
.h
.error
= -EIO
;
792 /* SETXATTR is special, since it may contain too large data */
793 if (in
->h
.opcode
== FUSE_SETXATTR
)
794 req
->out
.h
.error
= -E2BIG
;
795 request_end(fc
, req
);
798 spin_unlock(&fc
->lock
);
799 fuse_copy_init(&cs
, fc
, 1, req
, iov
, nr_segs
);
800 err
= fuse_copy_one(&cs
, &in
->h
, sizeof(in
->h
));
802 err
= fuse_copy_args(&cs
, in
->numargs
, in
->argpages
,
803 (struct fuse_arg
*) in
->args
, 0);
804 fuse_copy_finish(&cs
);
805 spin_lock(&fc
->lock
);
808 request_end(fc
, req
);
812 req
->out
.h
.error
= -EIO
;
813 request_end(fc
, req
);
817 request_end(fc
, req
);
819 req
->state
= FUSE_REQ_SENT
;
820 list_move_tail(&req
->list
, &fc
->processing
);
821 if (req
->interrupted
)
822 queue_interrupt(fc
, req
);
823 spin_unlock(&fc
->lock
);
828 spin_unlock(&fc
->lock
);
832 static int fuse_notify_poll(struct fuse_conn
*fc
, unsigned int size
,
833 struct fuse_copy_state
*cs
)
835 struct fuse_notify_poll_wakeup_out outarg
;
838 if (size
!= sizeof(outarg
))
841 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
845 fuse_copy_finish(cs
);
846 return fuse_notify_poll_wakeup(fc
, &outarg
);
849 fuse_copy_finish(cs
);
853 static int fuse_notify_inval_inode(struct fuse_conn
*fc
, unsigned int size
,
854 struct fuse_copy_state
*cs
)
856 struct fuse_notify_inval_inode_out outarg
;
859 if (size
!= sizeof(outarg
))
862 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
865 fuse_copy_finish(cs
);
867 down_read(&fc
->killsb
);
870 err
= fuse_reverse_inval_inode(fc
->sb
, outarg
.ino
,
871 outarg
.off
, outarg
.len
);
873 up_read(&fc
->killsb
);
877 fuse_copy_finish(cs
);
881 static int fuse_notify_inval_entry(struct fuse_conn
*fc
, unsigned int size
,
882 struct fuse_copy_state
*cs
)
884 struct fuse_notify_inval_entry_out outarg
;
889 buf
= kzalloc(FUSE_NAME_MAX
+ 1, GFP_KERNEL
);
894 if (size
< sizeof(outarg
))
897 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
902 if (outarg
.namelen
> FUSE_NAME_MAX
)
906 name
.len
= outarg
.namelen
;
907 err
= fuse_copy_one(cs
, buf
, outarg
.namelen
+ 1);
910 fuse_copy_finish(cs
);
911 buf
[outarg
.namelen
] = 0;
912 name
.hash
= full_name_hash(name
.name
, name
.len
);
914 down_read(&fc
->killsb
);
917 err
= fuse_reverse_inval_entry(fc
->sb
, outarg
.parent
, &name
);
918 up_read(&fc
->killsb
);
924 fuse_copy_finish(cs
);
928 static int fuse_notify(struct fuse_conn
*fc
, enum fuse_notify_code code
,
929 unsigned int size
, struct fuse_copy_state
*cs
)
932 case FUSE_NOTIFY_POLL
:
933 return fuse_notify_poll(fc
, size
, cs
);
935 case FUSE_NOTIFY_INVAL_INODE
:
936 return fuse_notify_inval_inode(fc
, size
, cs
);
938 case FUSE_NOTIFY_INVAL_ENTRY
:
939 return fuse_notify_inval_entry(fc
, size
, cs
);
942 fuse_copy_finish(cs
);
947 /* Look up request on processing list by unique ID */
948 static struct fuse_req
*request_find(struct fuse_conn
*fc
, u64 unique
)
950 struct list_head
*entry
;
952 list_for_each(entry
, &fc
->processing
) {
953 struct fuse_req
*req
;
954 req
= list_entry(entry
, struct fuse_req
, list
);
955 if (req
->in
.h
.unique
== unique
|| req
->intr_unique
== unique
)
961 static int copy_out_args(struct fuse_copy_state
*cs
, struct fuse_out
*out
,
964 unsigned reqsize
= sizeof(struct fuse_out_header
);
967 return nbytes
!= reqsize
? -EINVAL
: 0;
969 reqsize
+= len_args(out
->numargs
, out
->args
);
971 if (reqsize
< nbytes
|| (reqsize
> nbytes
&& !out
->argvar
))
973 else if (reqsize
> nbytes
) {
974 struct fuse_arg
*lastarg
= &out
->args
[out
->numargs
-1];
975 unsigned diffsize
= reqsize
- nbytes
;
976 if (diffsize
> lastarg
->size
)
978 lastarg
->size
-= diffsize
;
980 return fuse_copy_args(cs
, out
->numargs
, out
->argpages
, out
->args
,
985 * Write a single reply to a request. First the header is copied from
986 * the write buffer. The request is then searched on the processing
987 * list by the unique ID found in the header. If found, then remove
988 * it from the list and copy the rest of the buffer to the request.
989 * The request is finished by calling request_end()
991 static ssize_t
fuse_dev_write(struct kiocb
*iocb
, const struct iovec
*iov
,
992 unsigned long nr_segs
, loff_t pos
)
995 size_t nbytes
= iov_length(iov
, nr_segs
);
996 struct fuse_req
*req
;
997 struct fuse_out_header oh
;
998 struct fuse_copy_state cs
;
999 struct fuse_conn
*fc
= fuse_get_conn(iocb
->ki_filp
);
1003 fuse_copy_init(&cs
, fc
, 0, NULL
, iov
, nr_segs
);
1004 if (nbytes
< sizeof(struct fuse_out_header
))
1007 err
= fuse_copy_one(&cs
, &oh
, sizeof(oh
));
1012 if (oh
.len
!= nbytes
)
1016 * Zero oh.unique indicates unsolicited notification message
1017 * and error contains notification code.
1020 err
= fuse_notify(fc
, oh
.error
, nbytes
- sizeof(oh
), &cs
);
1021 return err
? err
: nbytes
;
1025 if (oh
.error
<= -1000 || oh
.error
> 0)
1028 spin_lock(&fc
->lock
);
1033 req
= request_find(fc
, oh
.unique
);
1038 spin_unlock(&fc
->lock
);
1039 fuse_copy_finish(&cs
);
1040 spin_lock(&fc
->lock
);
1041 request_end(fc
, req
);
1044 /* Is it an interrupt reply? */
1045 if (req
->intr_unique
== oh
.unique
) {
1047 if (nbytes
!= sizeof(struct fuse_out_header
))
1050 if (oh
.error
== -ENOSYS
)
1051 fc
->no_interrupt
= 1;
1052 else if (oh
.error
== -EAGAIN
)
1053 queue_interrupt(fc
, req
);
1055 spin_unlock(&fc
->lock
);
1056 fuse_copy_finish(&cs
);
1060 req
->state
= FUSE_REQ_WRITING
;
1061 list_move(&req
->list
, &fc
->io
);
1065 spin_unlock(&fc
->lock
);
1067 err
= copy_out_args(&cs
, &req
->out
, nbytes
);
1068 fuse_copy_finish(&cs
);
1070 spin_lock(&fc
->lock
);
1075 } else if (!req
->aborted
)
1076 req
->out
.h
.error
= -EIO
;
1077 request_end(fc
, req
);
1079 return err
? err
: nbytes
;
1082 spin_unlock(&fc
->lock
);
1084 fuse_copy_finish(&cs
);
1088 static unsigned fuse_dev_poll(struct file
*file
, poll_table
*wait
)
1090 unsigned mask
= POLLOUT
| POLLWRNORM
;
1091 struct fuse_conn
*fc
= fuse_get_conn(file
);
1095 poll_wait(file
, &fc
->waitq
, wait
);
1097 spin_lock(&fc
->lock
);
1100 else if (request_pending(fc
))
1101 mask
|= POLLIN
| POLLRDNORM
;
1102 spin_unlock(&fc
->lock
);
1108 * Abort all requests on the given list (pending or processing)
1110 * This function releases and reacquires fc->lock
1112 static void end_requests(struct fuse_conn
*fc
, struct list_head
*head
)
1113 __releases(&fc
->lock
)
1114 __acquires(&fc
->lock
)
1116 while (!list_empty(head
)) {
1117 struct fuse_req
*req
;
1118 req
= list_entry(head
->next
, struct fuse_req
, list
);
1119 req
->out
.h
.error
= -ECONNABORTED
;
1120 request_end(fc
, req
);
1121 spin_lock(&fc
->lock
);
1126 * Abort requests under I/O
1128 * The requests are set to aborted and finished, and the request
1129 * waiter is woken up. This will make request_wait_answer() wait
1130 * until the request is unlocked and then return.
1132 * If the request is asynchronous, then the end function needs to be
1133 * called after waiting for the request to be unlocked (if it was
1136 static void end_io_requests(struct fuse_conn
*fc
)
1137 __releases(&fc
->lock
)
1138 __acquires(&fc
->lock
)
1140 while (!list_empty(&fc
->io
)) {
1141 struct fuse_req
*req
=
1142 list_entry(fc
->io
.next
, struct fuse_req
, list
);
1143 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
1146 req
->out
.h
.error
= -ECONNABORTED
;
1147 req
->state
= FUSE_REQ_FINISHED
;
1148 list_del_init(&req
->list
);
1149 wake_up(&req
->waitq
);
1152 __fuse_get_request(req
);
1153 spin_unlock(&fc
->lock
);
1154 wait_event(req
->waitq
, !req
->locked
);
1156 fuse_put_request(fc
, req
);
1157 spin_lock(&fc
->lock
);
1163 * Abort all requests.
1165 * Emergency exit in case of a malicious or accidental deadlock, or
1166 * just a hung filesystem.
1168 * The same effect is usually achievable through killing the
1169 * filesystem daemon and all users of the filesystem. The exception
1170 * is the combination of an asynchronous request and the tricky
1171 * deadlock (see Documentation/filesystems/fuse.txt).
1173 * During the aborting, progression of requests from the pending and
1174 * processing lists onto the io list, and progression of new requests
1175 * onto the pending list is prevented by req->connected being false.
1177 * Progression of requests under I/O to the processing list is
1178 * prevented by the req->aborted flag being true for these requests.
1179 * For this reason requests on the io list must be aborted first.
1181 void fuse_abort_conn(struct fuse_conn
*fc
)
1183 spin_lock(&fc
->lock
);
1184 if (fc
->connected
) {
1187 end_io_requests(fc
);
1188 end_requests(fc
, &fc
->pending
);
1189 end_requests(fc
, &fc
->processing
);
1190 wake_up_all(&fc
->waitq
);
1191 wake_up_all(&fc
->blocked_waitq
);
1192 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
1194 spin_unlock(&fc
->lock
);
1196 EXPORT_SYMBOL_GPL(fuse_abort_conn
);
1198 int fuse_dev_release(struct inode
*inode
, struct file
*file
)
1200 struct fuse_conn
*fc
= fuse_get_conn(file
);
1202 spin_lock(&fc
->lock
);
1204 end_requests(fc
, &fc
->pending
);
1205 end_requests(fc
, &fc
->processing
);
1206 spin_unlock(&fc
->lock
);
1212 EXPORT_SYMBOL_GPL(fuse_dev_release
);
1214 static int fuse_dev_fasync(int fd
, struct file
*file
, int on
)
1216 struct fuse_conn
*fc
= fuse_get_conn(file
);
1220 /* No locking - fasync_helper does its own locking */
1221 return fasync_helper(fd
, file
, on
, &fc
->fasync
);
1224 const struct file_operations fuse_dev_operations
= {
1225 .owner
= THIS_MODULE
,
1226 .llseek
= no_llseek
,
1227 .read
= do_sync_read
,
1228 .aio_read
= fuse_dev_read
,
1229 .write
= do_sync_write
,
1230 .aio_write
= fuse_dev_write
,
1231 .poll
= fuse_dev_poll
,
1232 .release
= fuse_dev_release
,
1233 .fasync
= fuse_dev_fasync
,
1235 EXPORT_SYMBOL_GPL(fuse_dev_operations
);
1237 static struct miscdevice fuse_miscdevice
= {
1238 .minor
= FUSE_MINOR
,
1240 .fops
= &fuse_dev_operations
,
1243 int __init
fuse_dev_init(void)
1246 fuse_req_cachep
= kmem_cache_create("fuse_request",
1247 sizeof(struct fuse_req
),
1249 if (!fuse_req_cachep
)
1252 err
= misc_register(&fuse_miscdevice
);
1254 goto out_cache_clean
;
1259 kmem_cache_destroy(fuse_req_cachep
);
1264 void fuse_dev_cleanup(void)
1266 misc_deregister(&fuse_miscdevice
);
1267 kmem_cache_destroy(fuse_req_cachep
);