2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/swap.h>
21 #include <linux/splice.h>
23 MODULE_ALIAS_MISCDEV(FUSE_MINOR
);
25 static struct kmem_cache
*fuse_req_cachep
;
27 static struct fuse_conn
*fuse_get_conn(struct file
*file
)
30 * Lockless access is OK, because file->private data is set
31 * once during mount and is valid until the file is released.
33 return file
->private_data
;
36 static void fuse_request_init(struct fuse_req
*req
)
38 memset(req
, 0, sizeof(*req
));
39 INIT_LIST_HEAD(&req
->list
);
40 INIT_LIST_HEAD(&req
->intr_entry
);
41 init_waitqueue_head(&req
->waitq
);
42 atomic_set(&req
->count
, 1);
45 struct fuse_req
*fuse_request_alloc(void)
47 struct fuse_req
*req
= kmem_cache_alloc(fuse_req_cachep
, GFP_KERNEL
);
49 fuse_request_init(req
);
52 EXPORT_SYMBOL_GPL(fuse_request_alloc
);
54 struct fuse_req
*fuse_request_alloc_nofs(void)
56 struct fuse_req
*req
= kmem_cache_alloc(fuse_req_cachep
, GFP_NOFS
);
58 fuse_request_init(req
);
62 void fuse_request_free(struct fuse_req
*req
)
64 kmem_cache_free(fuse_req_cachep
, req
);
67 static void block_sigs(sigset_t
*oldset
)
71 siginitsetinv(&mask
, sigmask(SIGKILL
));
72 sigprocmask(SIG_BLOCK
, &mask
, oldset
);
75 static void restore_sigs(sigset_t
*oldset
)
77 sigprocmask(SIG_SETMASK
, oldset
, NULL
);
80 static void __fuse_get_request(struct fuse_req
*req
)
82 atomic_inc(&req
->count
);
85 /* Must be called with > 1 refcount */
86 static void __fuse_put_request(struct fuse_req
*req
)
88 BUG_ON(atomic_read(&req
->count
) < 2);
89 atomic_dec(&req
->count
);
92 static void fuse_req_init_context(struct fuse_req
*req
)
94 req
->in
.h
.uid
= current_fsuid();
95 req
->in
.h
.gid
= current_fsgid();
96 req
->in
.h
.pid
= current
->pid
;
99 struct fuse_req
*fuse_get_req(struct fuse_conn
*fc
)
101 struct fuse_req
*req
;
106 atomic_inc(&fc
->num_waiting
);
108 intr
= wait_event_interruptible(fc
->blocked_waitq
, !fc
->blocked
);
109 restore_sigs(&oldset
);
118 req
= fuse_request_alloc();
123 fuse_req_init_context(req
);
128 atomic_dec(&fc
->num_waiting
);
131 EXPORT_SYMBOL_GPL(fuse_get_req
);
134 * Return request in fuse_file->reserved_req. However that may
135 * currently be in use. If that is the case, wait for it to become
138 static struct fuse_req
*get_reserved_req(struct fuse_conn
*fc
,
141 struct fuse_req
*req
= NULL
;
142 struct fuse_file
*ff
= file
->private_data
;
145 wait_event(fc
->reserved_req_waitq
, ff
->reserved_req
);
146 spin_lock(&fc
->lock
);
147 if (ff
->reserved_req
) {
148 req
= ff
->reserved_req
;
149 ff
->reserved_req
= NULL
;
151 req
->stolen_file
= file
;
153 spin_unlock(&fc
->lock
);
160 * Put stolen request back into fuse_file->reserved_req
162 static void put_reserved_req(struct fuse_conn
*fc
, struct fuse_req
*req
)
164 struct file
*file
= req
->stolen_file
;
165 struct fuse_file
*ff
= file
->private_data
;
167 spin_lock(&fc
->lock
);
168 fuse_request_init(req
);
169 BUG_ON(ff
->reserved_req
);
170 ff
->reserved_req
= req
;
171 wake_up_all(&fc
->reserved_req_waitq
);
172 spin_unlock(&fc
->lock
);
177 * Gets a requests for a file operation, always succeeds
179 * This is used for sending the FLUSH request, which must get to
180 * userspace, due to POSIX locks which may need to be unlocked.
182 * If allocation fails due to OOM, use the reserved request in
185 * This is very unlikely to deadlock accidentally, since the
186 * filesystem should not have it's own file open. If deadlock is
187 * intentional, it can still be broken by "aborting" the filesystem.
189 struct fuse_req
*fuse_get_req_nofail(struct fuse_conn
*fc
, struct file
*file
)
191 struct fuse_req
*req
;
193 atomic_inc(&fc
->num_waiting
);
194 wait_event(fc
->blocked_waitq
, !fc
->blocked
);
195 req
= fuse_request_alloc();
197 req
= get_reserved_req(fc
, file
);
199 fuse_req_init_context(req
);
204 void fuse_put_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
206 if (atomic_dec_and_test(&req
->count
)) {
208 atomic_dec(&fc
->num_waiting
);
210 if (req
->stolen_file
)
211 put_reserved_req(fc
, req
);
213 fuse_request_free(req
);
216 EXPORT_SYMBOL_GPL(fuse_put_request
);
218 static unsigned len_args(unsigned numargs
, struct fuse_arg
*args
)
223 for (i
= 0; i
< numargs
; i
++)
224 nbytes
+= args
[i
].size
;
229 static u64
fuse_get_unique(struct fuse_conn
*fc
)
232 /* zero is special */
239 static void queue_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
241 req
->in
.h
.unique
= fuse_get_unique(fc
);
242 req
->in
.h
.len
= sizeof(struct fuse_in_header
) +
243 len_args(req
->in
.numargs
, (struct fuse_arg
*) req
->in
.args
);
244 list_add_tail(&req
->list
, &fc
->pending
);
245 req
->state
= FUSE_REQ_PENDING
;
248 atomic_inc(&fc
->num_waiting
);
251 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
254 static void flush_bg_queue(struct fuse_conn
*fc
)
256 while (fc
->active_background
< fc
->max_background
&&
257 !list_empty(&fc
->bg_queue
)) {
258 struct fuse_req
*req
;
260 req
= list_entry(fc
->bg_queue
.next
, struct fuse_req
, list
);
261 list_del(&req
->list
);
262 fc
->active_background
++;
263 queue_request(fc
, req
);
268 * This function is called when a request is finished. Either a reply
269 * has arrived or it was aborted (and not yet sent) or some error
270 * occurred during communication with userspace, or the device file
271 * was closed. The requester thread is woken up (if still waiting),
272 * the 'end' callback is called if given, else the reference to the
273 * request is released
275 * Called with fc->lock, unlocks it
277 static void request_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
278 __releases(&fc
->lock
)
280 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
282 list_del(&req
->list
);
283 list_del(&req
->intr_entry
);
284 req
->state
= FUSE_REQ_FINISHED
;
285 if (req
->background
) {
286 if (fc
->num_background
== fc
->max_background
) {
288 wake_up_all(&fc
->blocked_waitq
);
290 if (fc
->num_background
== fc
->congestion_threshold
&&
291 fc
->connected
&& fc
->bdi_initialized
) {
292 clear_bdi_congested(&fc
->bdi
, BLK_RW_SYNC
);
293 clear_bdi_congested(&fc
->bdi
, BLK_RW_ASYNC
);
295 fc
->num_background
--;
296 fc
->active_background
--;
299 spin_unlock(&fc
->lock
);
300 wake_up(&req
->waitq
);
303 fuse_put_request(fc
, req
);
306 static void wait_answer_interruptible(struct fuse_conn
*fc
,
307 struct fuse_req
*req
)
308 __releases(&fc
->lock
)
309 __acquires(&fc
->lock
)
311 if (signal_pending(current
))
314 spin_unlock(&fc
->lock
);
315 wait_event_interruptible(req
->waitq
, req
->state
== FUSE_REQ_FINISHED
);
316 spin_lock(&fc
->lock
);
319 static void queue_interrupt(struct fuse_conn
*fc
, struct fuse_req
*req
)
321 list_add_tail(&req
->intr_entry
, &fc
->interrupts
);
323 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
326 static void request_wait_answer(struct fuse_conn
*fc
, struct fuse_req
*req
)
327 __releases(&fc
->lock
)
328 __acquires(&fc
->lock
)
330 if (!fc
->no_interrupt
) {
331 /* Any signal may interrupt this */
332 wait_answer_interruptible(fc
, req
);
336 if (req
->state
== FUSE_REQ_FINISHED
)
339 req
->interrupted
= 1;
340 if (req
->state
== FUSE_REQ_SENT
)
341 queue_interrupt(fc
, req
);
347 /* Only fatal signals may interrupt this */
349 wait_answer_interruptible(fc
, req
);
350 restore_sigs(&oldset
);
354 if (req
->state
== FUSE_REQ_FINISHED
)
357 /* Request is not yet in userspace, bail out */
358 if (req
->state
== FUSE_REQ_PENDING
) {
359 list_del(&req
->list
);
360 __fuse_put_request(req
);
361 req
->out
.h
.error
= -EINTR
;
367 * Either request is already in userspace, or it was forced.
370 spin_unlock(&fc
->lock
);
371 wait_event(req
->waitq
, req
->state
== FUSE_REQ_FINISHED
);
372 spin_lock(&fc
->lock
);
378 BUG_ON(req
->state
!= FUSE_REQ_FINISHED
);
380 /* This is uninterruptible sleep, because data is
381 being copied to/from the buffers of req. During
382 locked state, there mustn't be any filesystem
383 operation (e.g. page fault), since that could lead
385 spin_unlock(&fc
->lock
);
386 wait_event(req
->waitq
, !req
->locked
);
387 spin_lock(&fc
->lock
);
391 void fuse_request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
394 spin_lock(&fc
->lock
);
396 req
->out
.h
.error
= -ENOTCONN
;
397 else if (fc
->conn_error
)
398 req
->out
.h
.error
= -ECONNREFUSED
;
400 queue_request(fc
, req
);
401 /* acquire extra reference, since request is still needed
402 after request_end() */
403 __fuse_get_request(req
);
405 request_wait_answer(fc
, req
);
407 spin_unlock(&fc
->lock
);
409 EXPORT_SYMBOL_GPL(fuse_request_send
);
411 static void fuse_request_send_nowait_locked(struct fuse_conn
*fc
,
412 struct fuse_req
*req
)
415 fc
->num_background
++;
416 if (fc
->num_background
== fc
->max_background
)
418 if (fc
->num_background
== fc
->congestion_threshold
&&
419 fc
->bdi_initialized
) {
420 set_bdi_congested(&fc
->bdi
, BLK_RW_SYNC
);
421 set_bdi_congested(&fc
->bdi
, BLK_RW_ASYNC
);
423 list_add_tail(&req
->list
, &fc
->bg_queue
);
427 static void fuse_request_send_nowait(struct fuse_conn
*fc
, struct fuse_req
*req
)
429 spin_lock(&fc
->lock
);
431 fuse_request_send_nowait_locked(fc
, req
);
432 spin_unlock(&fc
->lock
);
434 req
->out
.h
.error
= -ENOTCONN
;
435 request_end(fc
, req
);
439 void fuse_request_send_noreply(struct fuse_conn
*fc
, struct fuse_req
*req
)
442 fuse_request_send_nowait(fc
, req
);
445 void fuse_request_send_background(struct fuse_conn
*fc
, struct fuse_req
*req
)
448 fuse_request_send_nowait(fc
, req
);
450 EXPORT_SYMBOL_GPL(fuse_request_send_background
);
453 * Called under fc->lock
455 * fc->connected must have been checked previously
457 void fuse_request_send_background_locked(struct fuse_conn
*fc
,
458 struct fuse_req
*req
)
461 fuse_request_send_nowait_locked(fc
, req
);
465 * Lock the request. Up to the next unlock_request() there mustn't be
466 * anything that could cause a page-fault. If the request was already
469 static int lock_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
473 spin_lock(&fc
->lock
);
478 spin_unlock(&fc
->lock
);
484 * Unlock request. If it was aborted during being locked, the
485 * requester thread is currently waiting for it to be unlocked, so
488 static void unlock_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
491 spin_lock(&fc
->lock
);
494 wake_up(&req
->waitq
);
495 spin_unlock(&fc
->lock
);
499 struct fuse_copy_state
{
500 struct fuse_conn
*fc
;
502 struct fuse_req
*req
;
503 const struct iovec
*iov
;
504 struct pipe_buffer
*pipebufs
;
505 struct pipe_buffer
*currbuf
;
506 struct pipe_inode_info
*pipe
;
507 unsigned long nr_segs
;
508 unsigned long seglen
;
514 unsigned move_pages
:1;
517 static void fuse_copy_init(struct fuse_copy_state
*cs
, struct fuse_conn
*fc
,
518 int write
, struct fuse_req
*req
,
519 const struct iovec
*iov
, unsigned long nr_segs
)
521 memset(cs
, 0, sizeof(*cs
));
526 cs
->nr_segs
= nr_segs
;
529 /* Unmap and put previous page of userspace buffer */
530 static void fuse_copy_finish(struct fuse_copy_state
*cs
)
533 struct pipe_buffer
*buf
= cs
->currbuf
;
535 buf
->ops
->unmap(cs
->pipe
, buf
, cs
->mapaddr
);
539 } else if (cs
->mapaddr
) {
540 kunmap_atomic(cs
->mapaddr
, KM_USER0
);
542 flush_dcache_page(cs
->pg
);
543 set_page_dirty_lock(cs
->pg
);
551 * Get another pagefull of userspace buffer, and map it to kernel
552 * address space, and lock request
554 static int fuse_copy_fill(struct fuse_copy_state
*cs
)
556 unsigned long offset
;
559 unlock_request(cs
->fc
, cs
->req
);
560 fuse_copy_finish(cs
);
562 struct pipe_buffer
*buf
= cs
->pipebufs
;
564 err
= buf
->ops
->confirm(cs
->pipe
, buf
);
568 BUG_ON(!cs
->nr_segs
);
570 cs
->mapaddr
= buf
->ops
->map(cs
->pipe
, buf
, 1);
572 cs
->buf
= cs
->mapaddr
+ buf
->offset
;
577 BUG_ON(!cs
->nr_segs
);
578 cs
->seglen
= cs
->iov
[0].iov_len
;
579 cs
->addr
= (unsigned long) cs
->iov
[0].iov_base
;
583 err
= get_user_pages_fast(cs
->addr
, 1, cs
->write
, &cs
->pg
);
587 offset
= cs
->addr
% PAGE_SIZE
;
588 cs
->mapaddr
= kmap_atomic(cs
->pg
, KM_USER0
);
589 cs
->buf
= cs
->mapaddr
+ offset
;
590 cs
->len
= min(PAGE_SIZE
- offset
, cs
->seglen
);
591 cs
->seglen
-= cs
->len
;
595 return lock_request(cs
->fc
, cs
->req
);
598 /* Do as much copy to/from userspace buffer as we can */
599 static int fuse_copy_do(struct fuse_copy_state
*cs
, void **val
, unsigned *size
)
601 unsigned ncpy
= min(*size
, cs
->len
);
604 memcpy(cs
->buf
, *val
, ncpy
);
606 memcpy(*val
, cs
->buf
, ncpy
);
615 static int fuse_check_page(struct page
*page
)
617 if (page_mapcount(page
) ||
618 page
->mapping
!= NULL
||
619 page_count(page
) != 1 ||
620 (page
->flags
& PAGE_FLAGS_CHECK_AT_PREP
&
627 printk(KERN_WARNING
"fuse: trying to steal weird page\n");
628 printk(KERN_WARNING
" page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page
, page
->index
, page
->flags
, page_count(page
), page_mapcount(page
), page
->mapping
);
634 static int fuse_try_move_page(struct fuse_copy_state
*cs
, struct page
**pagep
)
637 struct page
*oldpage
= *pagep
;
638 struct page
*newpage
;
639 struct pipe_buffer
*buf
= cs
->pipebufs
;
640 struct address_space
*mapping
;
643 unlock_request(cs
->fc
, cs
->req
);
644 fuse_copy_finish(cs
);
646 err
= buf
->ops
->confirm(cs
->pipe
, buf
);
650 BUG_ON(!cs
->nr_segs
);
656 if (cs
->len
!= PAGE_SIZE
)
659 if (buf
->ops
->steal(cs
->pipe
, buf
) != 0)
664 if (WARN_ON(!PageUptodate(newpage
)))
667 ClearPageMappedToDisk(newpage
);
669 if (fuse_check_page(newpage
) != 0)
670 goto out_fallback_unlock
;
672 mapping
= oldpage
->mapping
;
673 index
= oldpage
->index
;
676 * This is a new and locked page, it shouldn't be mapped or
677 * have any special flags on it
679 if (WARN_ON(page_mapped(oldpage
)))
680 goto out_fallback_unlock
;
681 if (WARN_ON(page_has_private(oldpage
)))
682 goto out_fallback_unlock
;
683 if (WARN_ON(PageDirty(oldpage
) || PageWriteback(oldpage
)))
684 goto out_fallback_unlock
;
685 if (WARN_ON(PageMlocked(oldpage
)))
686 goto out_fallback_unlock
;
688 remove_from_page_cache(oldpage
);
689 page_cache_release(oldpage
);
691 err
= add_to_page_cache_locked(newpage
, mapping
, index
, GFP_KERNEL
);
693 printk(KERN_WARNING
"fuse_try_move_page: failed to add page");
694 goto out_fallback_unlock
;
696 page_cache_get(newpage
);
698 if (!(buf
->flags
& PIPE_BUF_FLAG_LRU
))
699 lru_cache_add_file(newpage
);
702 spin_lock(&cs
->fc
->lock
);
703 if (cs
->req
->aborted
)
707 spin_unlock(&cs
->fc
->lock
);
710 unlock_page(newpage
);
711 page_cache_release(newpage
);
715 unlock_page(oldpage
);
716 page_cache_release(oldpage
);
722 unlock_page(newpage
);
724 cs
->mapaddr
= buf
->ops
->map(cs
->pipe
, buf
, 1);
725 cs
->buf
= cs
->mapaddr
+ buf
->offset
;
727 err
= lock_request(cs
->fc
, cs
->req
);
735 * Copy a page in the request to/from the userspace buffer. Must be
738 static int fuse_copy_page(struct fuse_copy_state
*cs
, struct page
**pagep
,
739 unsigned offset
, unsigned count
, int zeroing
)
742 struct page
*page
= *pagep
;
744 if (page
&& zeroing
&& count
< PAGE_SIZE
) {
745 void *mapaddr
= kmap_atomic(page
, KM_USER1
);
746 memset(mapaddr
, 0, PAGE_SIZE
);
747 kunmap_atomic(mapaddr
, KM_USER1
);
751 if (cs
->move_pages
&& page
&&
752 offset
== 0 && count
== PAGE_SIZE
) {
753 err
= fuse_try_move_page(cs
, pagep
);
757 err
= fuse_copy_fill(cs
);
763 void *mapaddr
= kmap_atomic(page
, KM_USER1
);
764 void *buf
= mapaddr
+ offset
;
765 offset
+= fuse_copy_do(cs
, &buf
, &count
);
766 kunmap_atomic(mapaddr
, KM_USER1
);
768 offset
+= fuse_copy_do(cs
, NULL
, &count
);
770 if (page
&& !cs
->write
)
771 flush_dcache_page(page
);
775 /* Copy pages in the request to/from userspace buffer */
776 static int fuse_copy_pages(struct fuse_copy_state
*cs
, unsigned nbytes
,
780 struct fuse_req
*req
= cs
->req
;
781 unsigned offset
= req
->page_offset
;
782 unsigned count
= min(nbytes
, (unsigned) PAGE_SIZE
- offset
);
784 for (i
= 0; i
< req
->num_pages
&& (nbytes
|| zeroing
); i
++) {
787 err
= fuse_copy_page(cs
, &req
->pages
[i
], offset
, count
,
793 count
= min(nbytes
, (unsigned) PAGE_SIZE
);
799 /* Copy a single argument in the request to/from userspace buffer */
800 static int fuse_copy_one(struct fuse_copy_state
*cs
, void *val
, unsigned size
)
804 int err
= fuse_copy_fill(cs
);
808 fuse_copy_do(cs
, &val
, &size
);
813 /* Copy request arguments to/from userspace buffer */
814 static int fuse_copy_args(struct fuse_copy_state
*cs
, unsigned numargs
,
815 unsigned argpages
, struct fuse_arg
*args
,
821 for (i
= 0; !err
&& i
< numargs
; i
++) {
822 struct fuse_arg
*arg
= &args
[i
];
823 if (i
== numargs
- 1 && argpages
)
824 err
= fuse_copy_pages(cs
, arg
->size
, zeroing
);
826 err
= fuse_copy_one(cs
, arg
->value
, arg
->size
);
831 static int request_pending(struct fuse_conn
*fc
)
833 return !list_empty(&fc
->pending
) || !list_empty(&fc
->interrupts
);
836 /* Wait until a request is available on the pending list */
837 static void request_wait(struct fuse_conn
*fc
)
838 __releases(&fc
->lock
)
839 __acquires(&fc
->lock
)
841 DECLARE_WAITQUEUE(wait
, current
);
843 add_wait_queue_exclusive(&fc
->waitq
, &wait
);
844 while (fc
->connected
&& !request_pending(fc
)) {
845 set_current_state(TASK_INTERRUPTIBLE
);
846 if (signal_pending(current
))
849 spin_unlock(&fc
->lock
);
851 spin_lock(&fc
->lock
);
853 set_current_state(TASK_RUNNING
);
854 remove_wait_queue(&fc
->waitq
, &wait
);
858 * Transfer an interrupt request to userspace
860 * Unlike other requests this is assembled on demand, without a need
861 * to allocate a separate fuse_req structure.
863 * Called with fc->lock held, releases it
865 static int fuse_read_interrupt(struct fuse_conn
*fc
, struct fuse_req
*req
,
866 const struct iovec
*iov
, unsigned long nr_segs
)
867 __releases(&fc
->lock
)
869 struct fuse_copy_state cs
;
870 struct fuse_in_header ih
;
871 struct fuse_interrupt_in arg
;
872 unsigned reqsize
= sizeof(ih
) + sizeof(arg
);
875 list_del_init(&req
->intr_entry
);
876 req
->intr_unique
= fuse_get_unique(fc
);
877 memset(&ih
, 0, sizeof(ih
));
878 memset(&arg
, 0, sizeof(arg
));
880 ih
.opcode
= FUSE_INTERRUPT
;
881 ih
.unique
= req
->intr_unique
;
882 arg
.unique
= req
->in
.h
.unique
;
884 spin_unlock(&fc
->lock
);
885 if (iov_length(iov
, nr_segs
) < reqsize
)
888 fuse_copy_init(&cs
, fc
, 1, NULL
, iov
, nr_segs
);
889 err
= fuse_copy_one(&cs
, &ih
, sizeof(ih
));
891 err
= fuse_copy_one(&cs
, &arg
, sizeof(arg
));
892 fuse_copy_finish(&cs
);
894 return err
? err
: reqsize
;
898 * Read a single request into the userspace filesystem's buffer. This
899 * function waits until a request is available, then removes it from
900 * the pending list and copies request data to userspace buffer. If
901 * no reply is needed (FORGET) or request has been aborted or there
902 * was an error during the copying then it's finished by calling
903 * request_end(). Otherwise add it to the processing list, and set
906 static ssize_t
fuse_dev_read(struct kiocb
*iocb
, const struct iovec
*iov
,
907 unsigned long nr_segs
, loff_t pos
)
910 struct fuse_req
*req
;
912 struct fuse_copy_state cs
;
914 struct file
*file
= iocb
->ki_filp
;
915 struct fuse_conn
*fc
= fuse_get_conn(file
);
920 spin_lock(&fc
->lock
);
922 if ((file
->f_flags
& O_NONBLOCK
) && fc
->connected
&&
923 !request_pending(fc
))
931 if (!request_pending(fc
))
934 if (!list_empty(&fc
->interrupts
)) {
935 req
= list_entry(fc
->interrupts
.next
, struct fuse_req
,
937 return fuse_read_interrupt(fc
, req
, iov
, nr_segs
);
940 req
= list_entry(fc
->pending
.next
, struct fuse_req
, list
);
941 req
->state
= FUSE_REQ_READING
;
942 list_move(&req
->list
, &fc
->io
);
946 /* If request is too large, reply with an error and restart the read */
947 if (iov_length(iov
, nr_segs
) < reqsize
) {
948 req
->out
.h
.error
= -EIO
;
949 /* SETXATTR is special, since it may contain too large data */
950 if (in
->h
.opcode
== FUSE_SETXATTR
)
951 req
->out
.h
.error
= -E2BIG
;
952 request_end(fc
, req
);
955 spin_unlock(&fc
->lock
);
956 fuse_copy_init(&cs
, fc
, 1, req
, iov
, nr_segs
);
957 err
= fuse_copy_one(&cs
, &in
->h
, sizeof(in
->h
));
959 err
= fuse_copy_args(&cs
, in
->numargs
, in
->argpages
,
960 (struct fuse_arg
*) in
->args
, 0);
961 fuse_copy_finish(&cs
);
962 spin_lock(&fc
->lock
);
965 request_end(fc
, req
);
969 req
->out
.h
.error
= -EIO
;
970 request_end(fc
, req
);
974 request_end(fc
, req
);
976 req
->state
= FUSE_REQ_SENT
;
977 list_move_tail(&req
->list
, &fc
->processing
);
978 if (req
->interrupted
)
979 queue_interrupt(fc
, req
);
980 spin_unlock(&fc
->lock
);
985 spin_unlock(&fc
->lock
);
989 static int fuse_notify_poll(struct fuse_conn
*fc
, unsigned int size
,
990 struct fuse_copy_state
*cs
)
992 struct fuse_notify_poll_wakeup_out outarg
;
995 if (size
!= sizeof(outarg
))
998 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1002 fuse_copy_finish(cs
);
1003 return fuse_notify_poll_wakeup(fc
, &outarg
);
1006 fuse_copy_finish(cs
);
1010 static int fuse_notify_inval_inode(struct fuse_conn
*fc
, unsigned int size
,
1011 struct fuse_copy_state
*cs
)
1013 struct fuse_notify_inval_inode_out outarg
;
1016 if (size
!= sizeof(outarg
))
1019 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1022 fuse_copy_finish(cs
);
1024 down_read(&fc
->killsb
);
1027 err
= fuse_reverse_inval_inode(fc
->sb
, outarg
.ino
,
1028 outarg
.off
, outarg
.len
);
1030 up_read(&fc
->killsb
);
1034 fuse_copy_finish(cs
);
1038 static int fuse_notify_inval_entry(struct fuse_conn
*fc
, unsigned int size
,
1039 struct fuse_copy_state
*cs
)
1041 struct fuse_notify_inval_entry_out outarg
;
1046 buf
= kzalloc(FUSE_NAME_MAX
+ 1, GFP_KERNEL
);
1051 if (size
< sizeof(outarg
))
1054 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1058 err
= -ENAMETOOLONG
;
1059 if (outarg
.namelen
> FUSE_NAME_MAX
)
1063 name
.len
= outarg
.namelen
;
1064 err
= fuse_copy_one(cs
, buf
, outarg
.namelen
+ 1);
1067 fuse_copy_finish(cs
);
1068 buf
[outarg
.namelen
] = 0;
1069 name
.hash
= full_name_hash(name
.name
, name
.len
);
1071 down_read(&fc
->killsb
);
1074 err
= fuse_reverse_inval_entry(fc
->sb
, outarg
.parent
, &name
);
1075 up_read(&fc
->killsb
);
1081 fuse_copy_finish(cs
);
1085 static int fuse_notify(struct fuse_conn
*fc
, enum fuse_notify_code code
,
1086 unsigned int size
, struct fuse_copy_state
*cs
)
1089 case FUSE_NOTIFY_POLL
:
1090 return fuse_notify_poll(fc
, size
, cs
);
1092 case FUSE_NOTIFY_INVAL_INODE
:
1093 return fuse_notify_inval_inode(fc
, size
, cs
);
1095 case FUSE_NOTIFY_INVAL_ENTRY
:
1096 return fuse_notify_inval_entry(fc
, size
, cs
);
1099 fuse_copy_finish(cs
);
1104 /* Look up request on processing list by unique ID */
1105 static struct fuse_req
*request_find(struct fuse_conn
*fc
, u64 unique
)
1107 struct list_head
*entry
;
1109 list_for_each(entry
, &fc
->processing
) {
1110 struct fuse_req
*req
;
1111 req
= list_entry(entry
, struct fuse_req
, list
);
1112 if (req
->in
.h
.unique
== unique
|| req
->intr_unique
== unique
)
1118 static int copy_out_args(struct fuse_copy_state
*cs
, struct fuse_out
*out
,
1121 unsigned reqsize
= sizeof(struct fuse_out_header
);
1124 return nbytes
!= reqsize
? -EINVAL
: 0;
1126 reqsize
+= len_args(out
->numargs
, out
->args
);
1128 if (reqsize
< nbytes
|| (reqsize
> nbytes
&& !out
->argvar
))
1130 else if (reqsize
> nbytes
) {
1131 struct fuse_arg
*lastarg
= &out
->args
[out
->numargs
-1];
1132 unsigned diffsize
= reqsize
- nbytes
;
1133 if (diffsize
> lastarg
->size
)
1135 lastarg
->size
-= diffsize
;
1137 return fuse_copy_args(cs
, out
->numargs
, out
->argpages
, out
->args
,
1142 * Write a single reply to a request. First the header is copied from
1143 * the write buffer. The request is then searched on the processing
1144 * list by the unique ID found in the header. If found, then remove
1145 * it from the list and copy the rest of the buffer to the request.
1146 * The request is finished by calling request_end()
1148 static ssize_t
fuse_dev_do_write(struct fuse_conn
*fc
,
1149 struct fuse_copy_state
*cs
, size_t nbytes
)
1152 struct fuse_req
*req
;
1153 struct fuse_out_header oh
;
1155 if (nbytes
< sizeof(struct fuse_out_header
))
1158 err
= fuse_copy_one(cs
, &oh
, sizeof(oh
));
1163 if (oh
.len
!= nbytes
)
1167 * Zero oh.unique indicates unsolicited notification message
1168 * and error contains notification code.
1171 err
= fuse_notify(fc
, oh
.error
, nbytes
- sizeof(oh
), cs
);
1172 return err
? err
: nbytes
;
1176 if (oh
.error
<= -1000 || oh
.error
> 0)
1179 spin_lock(&fc
->lock
);
1184 req
= request_find(fc
, oh
.unique
);
1189 spin_unlock(&fc
->lock
);
1190 fuse_copy_finish(cs
);
1191 spin_lock(&fc
->lock
);
1192 request_end(fc
, req
);
1195 /* Is it an interrupt reply? */
1196 if (req
->intr_unique
== oh
.unique
) {
1198 if (nbytes
!= sizeof(struct fuse_out_header
))
1201 if (oh
.error
== -ENOSYS
)
1202 fc
->no_interrupt
= 1;
1203 else if (oh
.error
== -EAGAIN
)
1204 queue_interrupt(fc
, req
);
1206 spin_unlock(&fc
->lock
);
1207 fuse_copy_finish(cs
);
1211 req
->state
= FUSE_REQ_WRITING
;
1212 list_move(&req
->list
, &fc
->io
);
1216 if (!req
->out
.page_replace
)
1218 spin_unlock(&fc
->lock
);
1220 err
= copy_out_args(cs
, &req
->out
, nbytes
);
1221 fuse_copy_finish(cs
);
1223 spin_lock(&fc
->lock
);
1228 } else if (!req
->aborted
)
1229 req
->out
.h
.error
= -EIO
;
1230 request_end(fc
, req
);
1232 return err
? err
: nbytes
;
1235 spin_unlock(&fc
->lock
);
1237 fuse_copy_finish(cs
);
1241 static ssize_t
fuse_dev_write(struct kiocb
*iocb
, const struct iovec
*iov
,
1242 unsigned long nr_segs
, loff_t pos
)
1244 struct fuse_copy_state cs
;
1245 struct fuse_conn
*fc
= fuse_get_conn(iocb
->ki_filp
);
1249 fuse_copy_init(&cs
, fc
, 0, NULL
, iov
, nr_segs
);
1251 return fuse_dev_do_write(fc
, &cs
, iov_length(iov
, nr_segs
));
1254 static ssize_t
fuse_dev_splice_write(struct pipe_inode_info
*pipe
,
1255 struct file
*out
, loff_t
*ppos
,
1256 size_t len
, unsigned int flags
)
1260 struct pipe_buffer
*bufs
;
1261 struct fuse_copy_state cs
;
1262 struct fuse_conn
*fc
;
1266 fc
= fuse_get_conn(out
);
1270 bufs
= kmalloc(pipe
->buffers
* sizeof (struct pipe_buffer
), GFP_KERNEL
);
1277 for (idx
= 0; idx
< pipe
->nrbufs
&& rem
< len
; idx
++)
1278 rem
+= pipe
->bufs
[(pipe
->curbuf
+ idx
) & (pipe
->buffers
- 1)].len
;
1288 struct pipe_buffer
*ibuf
;
1289 struct pipe_buffer
*obuf
;
1291 BUG_ON(nbuf
>= pipe
->buffers
);
1292 BUG_ON(!pipe
->nrbufs
);
1293 ibuf
= &pipe
->bufs
[pipe
->curbuf
];
1296 if (rem
>= ibuf
->len
) {
1299 pipe
->curbuf
= (pipe
->curbuf
+ 1) & (pipe
->buffers
- 1);
1302 ibuf
->ops
->get(pipe
, ibuf
);
1304 obuf
->flags
&= ~PIPE_BUF_FLAG_GIFT
;
1306 ibuf
->offset
+= obuf
->len
;
1307 ibuf
->len
-= obuf
->len
;
1314 memset(&cs
, 0, sizeof(struct fuse_copy_state
));
1321 if (flags
& SPLICE_F_MOVE
)
1324 ret
= fuse_dev_do_write(fc
, &cs
, len
);
1326 for (idx
= 0; idx
< nbuf
; idx
++) {
1327 struct pipe_buffer
*buf
= &bufs
[idx
];
1328 buf
->ops
->release(pipe
, buf
);
1335 static unsigned fuse_dev_poll(struct file
*file
, poll_table
*wait
)
1337 unsigned mask
= POLLOUT
| POLLWRNORM
;
1338 struct fuse_conn
*fc
= fuse_get_conn(file
);
1342 poll_wait(file
, &fc
->waitq
, wait
);
1344 spin_lock(&fc
->lock
);
1347 else if (request_pending(fc
))
1348 mask
|= POLLIN
| POLLRDNORM
;
1349 spin_unlock(&fc
->lock
);
1355 * Abort all requests on the given list (pending or processing)
1357 * This function releases and reacquires fc->lock
1359 static void end_requests(struct fuse_conn
*fc
, struct list_head
*head
)
1360 __releases(&fc
->lock
)
1361 __acquires(&fc
->lock
)
1363 while (!list_empty(head
)) {
1364 struct fuse_req
*req
;
1365 req
= list_entry(head
->next
, struct fuse_req
, list
);
1366 req
->out
.h
.error
= -ECONNABORTED
;
1367 request_end(fc
, req
);
1368 spin_lock(&fc
->lock
);
1373 * Abort requests under I/O
1375 * The requests are set to aborted and finished, and the request
1376 * waiter is woken up. This will make request_wait_answer() wait
1377 * until the request is unlocked and then return.
1379 * If the request is asynchronous, then the end function needs to be
1380 * called after waiting for the request to be unlocked (if it was
1383 static void end_io_requests(struct fuse_conn
*fc
)
1384 __releases(&fc
->lock
)
1385 __acquires(&fc
->lock
)
1387 while (!list_empty(&fc
->io
)) {
1388 struct fuse_req
*req
=
1389 list_entry(fc
->io
.next
, struct fuse_req
, list
);
1390 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
1393 req
->out
.h
.error
= -ECONNABORTED
;
1394 req
->state
= FUSE_REQ_FINISHED
;
1395 list_del_init(&req
->list
);
1396 wake_up(&req
->waitq
);
1399 __fuse_get_request(req
);
1400 spin_unlock(&fc
->lock
);
1401 wait_event(req
->waitq
, !req
->locked
);
1403 fuse_put_request(fc
, req
);
1404 spin_lock(&fc
->lock
);
1410 * Abort all requests.
1412 * Emergency exit in case of a malicious or accidental deadlock, or
1413 * just a hung filesystem.
1415 * The same effect is usually achievable through killing the
1416 * filesystem daemon and all users of the filesystem. The exception
1417 * is the combination of an asynchronous request and the tricky
1418 * deadlock (see Documentation/filesystems/fuse.txt).
1420 * During the aborting, progression of requests from the pending and
1421 * processing lists onto the io list, and progression of new requests
1422 * onto the pending list is prevented by req->connected being false.
1424 * Progression of requests under I/O to the processing list is
1425 * prevented by the req->aborted flag being true for these requests.
1426 * For this reason requests on the io list must be aborted first.
1428 void fuse_abort_conn(struct fuse_conn
*fc
)
1430 spin_lock(&fc
->lock
);
1431 if (fc
->connected
) {
1434 end_io_requests(fc
);
1435 end_requests(fc
, &fc
->pending
);
1436 end_requests(fc
, &fc
->processing
);
1437 wake_up_all(&fc
->waitq
);
1438 wake_up_all(&fc
->blocked_waitq
);
1439 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
1441 spin_unlock(&fc
->lock
);
1443 EXPORT_SYMBOL_GPL(fuse_abort_conn
);
1445 int fuse_dev_release(struct inode
*inode
, struct file
*file
)
1447 struct fuse_conn
*fc
= fuse_get_conn(file
);
1449 spin_lock(&fc
->lock
);
1451 end_requests(fc
, &fc
->pending
);
1452 end_requests(fc
, &fc
->processing
);
1453 spin_unlock(&fc
->lock
);
1459 EXPORT_SYMBOL_GPL(fuse_dev_release
);
1461 static int fuse_dev_fasync(int fd
, struct file
*file
, int on
)
1463 struct fuse_conn
*fc
= fuse_get_conn(file
);
1467 /* No locking - fasync_helper does its own locking */
1468 return fasync_helper(fd
, file
, on
, &fc
->fasync
);
1471 const struct file_operations fuse_dev_operations
= {
1472 .owner
= THIS_MODULE
,
1473 .llseek
= no_llseek
,
1474 .read
= do_sync_read
,
1475 .aio_read
= fuse_dev_read
,
1476 .write
= do_sync_write
,
1477 .aio_write
= fuse_dev_write
,
1478 .splice_write
= fuse_dev_splice_write
,
1479 .poll
= fuse_dev_poll
,
1480 .release
= fuse_dev_release
,
1481 .fasync
= fuse_dev_fasync
,
1483 EXPORT_SYMBOL_GPL(fuse_dev_operations
);
1485 static struct miscdevice fuse_miscdevice
= {
1486 .minor
= FUSE_MINOR
,
1488 .fops
= &fuse_dev_operations
,
1491 int __init
fuse_dev_init(void)
1494 fuse_req_cachep
= kmem_cache_create("fuse_request",
1495 sizeof(struct fuse_req
),
1497 if (!fuse_req_cachep
)
1500 err
= misc_register(&fuse_miscdevice
);
1502 goto out_cache_clean
;
1507 kmem_cache_destroy(fuse_req_cachep
);
1512 void fuse_dev_cleanup(void)
1514 misc_deregister(&fuse_miscdevice
);
1515 kmem_cache_destroy(fuse_req_cachep
);