2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/swap.h>
21 #include <linux/splice.h>
23 MODULE_ALIAS_MISCDEV(FUSE_MINOR
);
24 MODULE_ALIAS("devname:fuse");
26 static struct kmem_cache
*fuse_req_cachep
;
28 static struct fuse_conn
*fuse_get_conn(struct file
*file
)
31 * Lockless access is OK, because file->private data is set
32 * once during mount and is valid until the file is released.
34 return file
->private_data
;
37 static void fuse_request_init(struct fuse_req
*req
)
39 memset(req
, 0, sizeof(*req
));
40 INIT_LIST_HEAD(&req
->list
);
41 INIT_LIST_HEAD(&req
->intr_entry
);
42 init_waitqueue_head(&req
->waitq
);
43 atomic_set(&req
->count
, 1);
46 struct fuse_req
*fuse_request_alloc(void)
48 struct fuse_req
*req
= kmem_cache_alloc(fuse_req_cachep
, GFP_KERNEL
);
50 fuse_request_init(req
);
53 EXPORT_SYMBOL_GPL(fuse_request_alloc
);
55 struct fuse_req
*fuse_request_alloc_nofs(void)
57 struct fuse_req
*req
= kmem_cache_alloc(fuse_req_cachep
, GFP_NOFS
);
59 fuse_request_init(req
);
63 void fuse_request_free(struct fuse_req
*req
)
65 kmem_cache_free(fuse_req_cachep
, req
);
68 static void block_sigs(sigset_t
*oldset
)
72 siginitsetinv(&mask
, sigmask(SIGKILL
));
73 sigprocmask(SIG_BLOCK
, &mask
, oldset
);
76 static void restore_sigs(sigset_t
*oldset
)
78 sigprocmask(SIG_SETMASK
, oldset
, NULL
);
81 static void __fuse_get_request(struct fuse_req
*req
)
83 atomic_inc(&req
->count
);
86 /* Must be called with > 1 refcount */
87 static void __fuse_put_request(struct fuse_req
*req
)
89 BUG_ON(atomic_read(&req
->count
) < 2);
90 atomic_dec(&req
->count
);
93 static void fuse_req_init_context(struct fuse_req
*req
)
95 req
->in
.h
.uid
= current_fsuid();
96 req
->in
.h
.gid
= current_fsgid();
97 req
->in
.h
.pid
= current
->pid
;
100 struct fuse_req
*fuse_get_req(struct fuse_conn
*fc
)
102 struct fuse_req
*req
;
107 atomic_inc(&fc
->num_waiting
);
109 intr
= wait_event_interruptible(fc
->blocked_waitq
, !fc
->blocked
);
110 restore_sigs(&oldset
);
119 req
= fuse_request_alloc();
124 fuse_req_init_context(req
);
129 atomic_dec(&fc
->num_waiting
);
132 EXPORT_SYMBOL_GPL(fuse_get_req
);
135 * Return request in fuse_file->reserved_req. However that may
136 * currently be in use. If that is the case, wait for it to become
139 static struct fuse_req
*get_reserved_req(struct fuse_conn
*fc
,
142 struct fuse_req
*req
= NULL
;
143 struct fuse_file
*ff
= file
->private_data
;
146 wait_event(fc
->reserved_req_waitq
, ff
->reserved_req
);
147 spin_lock(&fc
->lock
);
148 if (ff
->reserved_req
) {
149 req
= ff
->reserved_req
;
150 ff
->reserved_req
= NULL
;
151 req
->stolen_file
= get_file(file
);
153 spin_unlock(&fc
->lock
);
160 * Put stolen request back into fuse_file->reserved_req
162 static void put_reserved_req(struct fuse_conn
*fc
, struct fuse_req
*req
)
164 struct file
*file
= req
->stolen_file
;
165 struct fuse_file
*ff
= file
->private_data
;
167 spin_lock(&fc
->lock
);
168 fuse_request_init(req
);
169 BUG_ON(ff
->reserved_req
);
170 ff
->reserved_req
= req
;
171 wake_up_all(&fc
->reserved_req_waitq
);
172 spin_unlock(&fc
->lock
);
177 * Gets a requests for a file operation, always succeeds
179 * This is used for sending the FLUSH request, which must get to
180 * userspace, due to POSIX locks which may need to be unlocked.
182 * If allocation fails due to OOM, use the reserved request in
185 * This is very unlikely to deadlock accidentally, since the
186 * filesystem should not have it's own file open. If deadlock is
187 * intentional, it can still be broken by "aborting" the filesystem.
189 struct fuse_req
*fuse_get_req_nofail(struct fuse_conn
*fc
, struct file
*file
)
191 struct fuse_req
*req
;
193 atomic_inc(&fc
->num_waiting
);
194 wait_event(fc
->blocked_waitq
, !fc
->blocked
);
195 req
= fuse_request_alloc();
197 req
= get_reserved_req(fc
, file
);
199 fuse_req_init_context(req
);
204 void fuse_put_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
206 if (atomic_dec_and_test(&req
->count
)) {
208 atomic_dec(&fc
->num_waiting
);
210 if (req
->stolen_file
)
211 put_reserved_req(fc
, req
);
213 fuse_request_free(req
);
216 EXPORT_SYMBOL_GPL(fuse_put_request
);
218 static unsigned len_args(unsigned numargs
, struct fuse_arg
*args
)
223 for (i
= 0; i
< numargs
; i
++)
224 nbytes
+= args
[i
].size
;
229 static u64
fuse_get_unique(struct fuse_conn
*fc
)
232 /* zero is special */
239 static void queue_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
241 req
->in
.h
.len
= sizeof(struct fuse_in_header
) +
242 len_args(req
->in
.numargs
, (struct fuse_arg
*) req
->in
.args
);
243 list_add_tail(&req
->list
, &fc
->pending
);
244 req
->state
= FUSE_REQ_PENDING
;
247 atomic_inc(&fc
->num_waiting
);
250 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
253 void fuse_queue_forget(struct fuse_conn
*fc
, struct fuse_forget_link
*forget
,
254 u64 nodeid
, u64 nlookup
)
256 forget
->forget_one
.nodeid
= nodeid
;
257 forget
->forget_one
.nlookup
= nlookup
;
259 spin_lock(&fc
->lock
);
261 fc
->forget_list_tail
->next
= forget
;
262 fc
->forget_list_tail
= forget
;
264 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
268 spin_unlock(&fc
->lock
);
271 static void flush_bg_queue(struct fuse_conn
*fc
)
273 while (fc
->active_background
< fc
->max_background
&&
274 !list_empty(&fc
->bg_queue
)) {
275 struct fuse_req
*req
;
277 req
= list_entry(fc
->bg_queue
.next
, struct fuse_req
, list
);
278 list_del(&req
->list
);
279 fc
->active_background
++;
280 req
->in
.h
.unique
= fuse_get_unique(fc
);
281 queue_request(fc
, req
);
286 * This function is called when a request is finished. Either a reply
287 * has arrived or it was aborted (and not yet sent) or some error
288 * occurred during communication with userspace, or the device file
289 * was closed. The requester thread is woken up (if still waiting),
290 * the 'end' callback is called if given, else the reference to the
291 * request is released
293 * Called with fc->lock, unlocks it
295 static void request_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
298 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
300 list_del(&req
->list
);
301 list_del(&req
->intr_entry
);
302 req
->state
= FUSE_REQ_FINISHED
;
303 if (req
->background
) {
304 if (fc
->num_background
== fc
->max_background
) {
306 wake_up_all(&fc
->blocked_waitq
);
308 if (fc
->num_background
== fc
->congestion_threshold
&&
309 fc
->connected
&& fc
->bdi_initialized
) {
310 clear_bdi_congested(&fc
->bdi
, BLK_RW_SYNC
);
311 clear_bdi_congested(&fc
->bdi
, BLK_RW_ASYNC
);
313 fc
->num_background
--;
314 fc
->active_background
--;
317 spin_unlock(&fc
->lock
);
318 wake_up(&req
->waitq
);
321 fuse_put_request(fc
, req
);
324 static void wait_answer_interruptible(struct fuse_conn
*fc
,
325 struct fuse_req
*req
)
329 if (signal_pending(current
))
332 spin_unlock(&fc
->lock
);
333 wait_event_interruptible(req
->waitq
, req
->state
== FUSE_REQ_FINISHED
);
334 spin_lock(&fc
->lock
);
337 static void queue_interrupt(struct fuse_conn
*fc
, struct fuse_req
*req
)
339 list_add_tail(&req
->intr_entry
, &fc
->interrupts
);
341 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
344 static void request_wait_answer(struct fuse_conn
*fc
, struct fuse_req
*req
)
348 if (!fc
->no_interrupt
) {
349 /* Any signal may interrupt this */
350 wait_answer_interruptible(fc
, req
);
354 if (req
->state
== FUSE_REQ_FINISHED
)
357 req
->interrupted
= 1;
358 if (req
->state
== FUSE_REQ_SENT
)
359 queue_interrupt(fc
, req
);
365 /* Only fatal signals may interrupt this */
367 wait_answer_interruptible(fc
, req
);
368 restore_sigs(&oldset
);
372 if (req
->state
== FUSE_REQ_FINISHED
)
375 /* Request is not yet in userspace, bail out */
376 if (req
->state
== FUSE_REQ_PENDING
) {
377 list_del(&req
->list
);
378 __fuse_put_request(req
);
379 req
->out
.h
.error
= -EINTR
;
385 * Either request is already in userspace, or it was forced.
388 spin_unlock(&fc
->lock
);
389 wait_event(req
->waitq
, req
->state
== FUSE_REQ_FINISHED
);
390 spin_lock(&fc
->lock
);
396 BUG_ON(req
->state
!= FUSE_REQ_FINISHED
);
398 /* This is uninterruptible sleep, because data is
399 being copied to/from the buffers of req. During
400 locked state, there mustn't be any filesystem
401 operation (e.g. page fault), since that could lead
403 spin_unlock(&fc
->lock
);
404 wait_event(req
->waitq
, !req
->locked
);
405 spin_lock(&fc
->lock
);
409 void fuse_request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
412 spin_lock(&fc
->lock
);
414 req
->out
.h
.error
= -ENOTCONN
;
415 else if (fc
->conn_error
)
416 req
->out
.h
.error
= -ECONNREFUSED
;
418 req
->in
.h
.unique
= fuse_get_unique(fc
);
419 queue_request(fc
, req
);
420 /* acquire extra reference, since request is still needed
421 after request_end() */
422 __fuse_get_request(req
);
424 request_wait_answer(fc
, req
);
426 spin_unlock(&fc
->lock
);
428 EXPORT_SYMBOL_GPL(fuse_request_send
);
430 static void fuse_request_send_nowait_locked(struct fuse_conn
*fc
,
431 struct fuse_req
*req
)
434 fc
->num_background
++;
435 if (fc
->num_background
== fc
->max_background
)
437 if (fc
->num_background
== fc
->congestion_threshold
&&
438 fc
->bdi_initialized
) {
439 set_bdi_congested(&fc
->bdi
, BLK_RW_SYNC
);
440 set_bdi_congested(&fc
->bdi
, BLK_RW_ASYNC
);
442 list_add_tail(&req
->list
, &fc
->bg_queue
);
446 static void fuse_request_send_nowait(struct fuse_conn
*fc
, struct fuse_req
*req
)
448 spin_lock(&fc
->lock
);
450 fuse_request_send_nowait_locked(fc
, req
);
451 spin_unlock(&fc
->lock
);
453 req
->out
.h
.error
= -ENOTCONN
;
454 request_end(fc
, req
);
458 void fuse_request_send_background(struct fuse_conn
*fc
, struct fuse_req
*req
)
461 fuse_request_send_nowait(fc
, req
);
463 EXPORT_SYMBOL_GPL(fuse_request_send_background
);
465 static int fuse_request_send_notify_reply(struct fuse_conn
*fc
,
466 struct fuse_req
*req
, u64 unique
)
471 req
->in
.h
.unique
= unique
;
472 spin_lock(&fc
->lock
);
474 queue_request(fc
, req
);
477 spin_unlock(&fc
->lock
);
483 * Called under fc->lock
485 * fc->connected must have been checked previously
487 void fuse_request_send_background_locked(struct fuse_conn
*fc
,
488 struct fuse_req
*req
)
491 fuse_request_send_nowait_locked(fc
, req
);
495 * Lock the request. Up to the next unlock_request() there mustn't be
496 * anything that could cause a page-fault. If the request was already
499 static int lock_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
503 spin_lock(&fc
->lock
);
508 spin_unlock(&fc
->lock
);
514 * Unlock request. If it was aborted during being locked, the
515 * requester thread is currently waiting for it to be unlocked, so
518 static void unlock_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
521 spin_lock(&fc
->lock
);
524 wake_up(&req
->waitq
);
525 spin_unlock(&fc
->lock
);
529 struct fuse_copy_state
{
530 struct fuse_conn
*fc
;
532 struct fuse_req
*req
;
533 const struct iovec
*iov
;
534 struct pipe_buffer
*pipebufs
;
535 struct pipe_buffer
*currbuf
;
536 struct pipe_inode_info
*pipe
;
537 unsigned long nr_segs
;
538 unsigned long seglen
;
544 unsigned move_pages
:1;
547 static void fuse_copy_init(struct fuse_copy_state
*cs
, struct fuse_conn
*fc
,
549 const struct iovec
*iov
, unsigned long nr_segs
)
551 memset(cs
, 0, sizeof(*cs
));
555 cs
->nr_segs
= nr_segs
;
558 /* Unmap and put previous page of userspace buffer */
559 static void fuse_copy_finish(struct fuse_copy_state
*cs
)
562 struct pipe_buffer
*buf
= cs
->currbuf
;
565 buf
->ops
->unmap(cs
->pipe
, buf
, cs
->mapaddr
);
568 buf
->len
= PAGE_SIZE
- cs
->len
;
572 } else if (cs
->mapaddr
) {
575 flush_dcache_page(cs
->pg
);
576 set_page_dirty_lock(cs
->pg
);
584 * Get another pagefull of userspace buffer, and map it to kernel
585 * address space, and lock request
587 static int fuse_copy_fill(struct fuse_copy_state
*cs
)
589 unsigned long offset
;
592 unlock_request(cs
->fc
, cs
->req
);
593 fuse_copy_finish(cs
);
595 struct pipe_buffer
*buf
= cs
->pipebufs
;
598 err
= buf
->ops
->confirm(cs
->pipe
, buf
);
602 BUG_ON(!cs
->nr_segs
);
604 cs
->mapaddr
= buf
->ops
->map(cs
->pipe
, buf
, 0);
606 cs
->buf
= cs
->mapaddr
+ buf
->offset
;
612 if (cs
->nr_segs
== cs
->pipe
->buffers
)
615 page
= alloc_page(GFP_HIGHUSER
);
624 cs
->mapaddr
= kmap(page
);
625 cs
->buf
= cs
->mapaddr
;
632 BUG_ON(!cs
->nr_segs
);
633 cs
->seglen
= cs
->iov
[0].iov_len
;
634 cs
->addr
= (unsigned long) cs
->iov
[0].iov_base
;
638 err
= get_user_pages_fast(cs
->addr
, 1, cs
->write
, &cs
->pg
);
642 offset
= cs
->addr
% PAGE_SIZE
;
643 cs
->mapaddr
= kmap(cs
->pg
);
644 cs
->buf
= cs
->mapaddr
+ offset
;
645 cs
->len
= min(PAGE_SIZE
- offset
, cs
->seglen
);
646 cs
->seglen
-= cs
->len
;
650 return lock_request(cs
->fc
, cs
->req
);
653 /* Do as much copy to/from userspace buffer as we can */
654 static int fuse_copy_do(struct fuse_copy_state
*cs
, void **val
, unsigned *size
)
656 unsigned ncpy
= min(*size
, cs
->len
);
659 memcpy(cs
->buf
, *val
, ncpy
);
661 memcpy(*val
, cs
->buf
, ncpy
);
670 static int fuse_check_page(struct page
*page
)
672 if (page_mapcount(page
) ||
673 page
->mapping
!= NULL
||
674 page_count(page
) != 1 ||
675 (page
->flags
& PAGE_FLAGS_CHECK_AT_PREP
&
682 printk(KERN_WARNING
"fuse: trying to steal weird page\n");
683 printk(KERN_WARNING
" page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page
, page
->index
, page
->flags
, page_count(page
), page_mapcount(page
), page
->mapping
);
689 static int fuse_try_move_page(struct fuse_copy_state
*cs
, struct page
**pagep
)
692 struct page
*oldpage
= *pagep
;
693 struct page
*newpage
;
694 struct pipe_buffer
*buf
= cs
->pipebufs
;
695 struct address_space
*mapping
;
698 unlock_request(cs
->fc
, cs
->req
);
699 fuse_copy_finish(cs
);
701 err
= buf
->ops
->confirm(cs
->pipe
, buf
);
705 BUG_ON(!cs
->nr_segs
);
711 if (cs
->len
!= PAGE_SIZE
)
714 if (buf
->ops
->steal(cs
->pipe
, buf
) != 0)
719 if (WARN_ON(!PageUptodate(newpage
)))
722 ClearPageMappedToDisk(newpage
);
724 if (fuse_check_page(newpage
) != 0)
725 goto out_fallback_unlock
;
727 mapping
= oldpage
->mapping
;
728 index
= oldpage
->index
;
731 * This is a new and locked page, it shouldn't be mapped or
732 * have any special flags on it
734 if (WARN_ON(page_mapped(oldpage
)))
735 goto out_fallback_unlock
;
736 if (WARN_ON(page_has_private(oldpage
)))
737 goto out_fallback_unlock
;
738 if (WARN_ON(PageDirty(oldpage
) || PageWriteback(oldpage
)))
739 goto out_fallback_unlock
;
740 if (WARN_ON(PageMlocked(oldpage
)))
741 goto out_fallback_unlock
;
743 err
= replace_page_cache_page(oldpage
, newpage
, GFP_KERNEL
);
745 unlock_page(newpage
);
749 page_cache_get(newpage
);
751 if (!(buf
->flags
& PIPE_BUF_FLAG_LRU
))
752 lru_cache_add_file(newpage
);
755 spin_lock(&cs
->fc
->lock
);
756 if (cs
->req
->aborted
)
760 spin_unlock(&cs
->fc
->lock
);
763 unlock_page(newpage
);
764 page_cache_release(newpage
);
768 unlock_page(oldpage
);
769 page_cache_release(oldpage
);
775 unlock_page(newpage
);
777 cs
->mapaddr
= buf
->ops
->map(cs
->pipe
, buf
, 1);
778 cs
->buf
= cs
->mapaddr
+ buf
->offset
;
780 err
= lock_request(cs
->fc
, cs
->req
);
787 static int fuse_ref_page(struct fuse_copy_state
*cs
, struct page
*page
,
788 unsigned offset
, unsigned count
)
790 struct pipe_buffer
*buf
;
792 if (cs
->nr_segs
== cs
->pipe
->buffers
)
795 unlock_request(cs
->fc
, cs
->req
);
796 fuse_copy_finish(cs
);
799 page_cache_get(page
);
801 buf
->offset
= offset
;
812 * Copy a page in the request to/from the userspace buffer. Must be
815 static int fuse_copy_page(struct fuse_copy_state
*cs
, struct page
**pagep
,
816 unsigned offset
, unsigned count
, int zeroing
)
819 struct page
*page
= *pagep
;
821 if (page
&& zeroing
&& count
< PAGE_SIZE
)
822 clear_highpage(page
);
825 if (cs
->write
&& cs
->pipebufs
&& page
) {
826 return fuse_ref_page(cs
, page
, offset
, count
);
827 } else if (!cs
->len
) {
828 if (cs
->move_pages
&& page
&&
829 offset
== 0 && count
== PAGE_SIZE
) {
830 err
= fuse_try_move_page(cs
, pagep
);
834 err
= fuse_copy_fill(cs
);
840 void *mapaddr
= kmap_atomic(page
);
841 void *buf
= mapaddr
+ offset
;
842 offset
+= fuse_copy_do(cs
, &buf
, &count
);
843 kunmap_atomic(mapaddr
);
845 offset
+= fuse_copy_do(cs
, NULL
, &count
);
847 if (page
&& !cs
->write
)
848 flush_dcache_page(page
);
852 /* Copy pages in the request to/from userspace buffer */
853 static int fuse_copy_pages(struct fuse_copy_state
*cs
, unsigned nbytes
,
857 struct fuse_req
*req
= cs
->req
;
858 unsigned offset
= req
->page_offset
;
859 unsigned count
= min(nbytes
, (unsigned) PAGE_SIZE
- offset
);
861 for (i
= 0; i
< req
->num_pages
&& (nbytes
|| zeroing
); i
++) {
864 err
= fuse_copy_page(cs
, &req
->pages
[i
], offset
, count
,
870 count
= min(nbytes
, (unsigned) PAGE_SIZE
);
876 /* Copy a single argument in the request to/from userspace buffer */
877 static int fuse_copy_one(struct fuse_copy_state
*cs
, void *val
, unsigned size
)
881 int err
= fuse_copy_fill(cs
);
885 fuse_copy_do(cs
, &val
, &size
);
890 /* Copy request arguments to/from userspace buffer */
891 static int fuse_copy_args(struct fuse_copy_state
*cs
, unsigned numargs
,
892 unsigned argpages
, struct fuse_arg
*args
,
898 for (i
= 0; !err
&& i
< numargs
; i
++) {
899 struct fuse_arg
*arg
= &args
[i
];
900 if (i
== numargs
- 1 && argpages
)
901 err
= fuse_copy_pages(cs
, arg
->size
, zeroing
);
903 err
= fuse_copy_one(cs
, arg
->value
, arg
->size
);
908 static int forget_pending(struct fuse_conn
*fc
)
910 return fc
->forget_list_head
.next
!= NULL
;
913 static int request_pending(struct fuse_conn
*fc
)
915 return !list_empty(&fc
->pending
) || !list_empty(&fc
->interrupts
) ||
919 /* Wait until a request is available on the pending list */
920 static void request_wait(struct fuse_conn
*fc
)
924 DECLARE_WAITQUEUE(wait
, current
);
926 add_wait_queue_exclusive(&fc
->waitq
, &wait
);
927 while (fc
->connected
&& !request_pending(fc
)) {
928 set_current_state(TASK_INTERRUPTIBLE
);
929 if (signal_pending(current
))
932 spin_unlock(&fc
->lock
);
934 spin_lock(&fc
->lock
);
936 set_current_state(TASK_RUNNING
);
937 remove_wait_queue(&fc
->waitq
, &wait
);
941 * Transfer an interrupt request to userspace
943 * Unlike other requests this is assembled on demand, without a need
944 * to allocate a separate fuse_req structure.
946 * Called with fc->lock held, releases it
948 static int fuse_read_interrupt(struct fuse_conn
*fc
, struct fuse_copy_state
*cs
,
949 size_t nbytes
, struct fuse_req
*req
)
952 struct fuse_in_header ih
;
953 struct fuse_interrupt_in arg
;
954 unsigned reqsize
= sizeof(ih
) + sizeof(arg
);
957 list_del_init(&req
->intr_entry
);
958 req
->intr_unique
= fuse_get_unique(fc
);
959 memset(&ih
, 0, sizeof(ih
));
960 memset(&arg
, 0, sizeof(arg
));
962 ih
.opcode
= FUSE_INTERRUPT
;
963 ih
.unique
= req
->intr_unique
;
964 arg
.unique
= req
->in
.h
.unique
;
966 spin_unlock(&fc
->lock
);
967 if (nbytes
< reqsize
)
970 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
972 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
973 fuse_copy_finish(cs
);
975 return err
? err
: reqsize
;
978 static struct fuse_forget_link
*dequeue_forget(struct fuse_conn
*fc
,
982 struct fuse_forget_link
*head
= fc
->forget_list_head
.next
;
983 struct fuse_forget_link
**newhead
= &head
;
986 for (count
= 0; *newhead
!= NULL
&& count
< max
; count
++)
987 newhead
= &(*newhead
)->next
;
989 fc
->forget_list_head
.next
= *newhead
;
991 if (fc
->forget_list_head
.next
== NULL
)
992 fc
->forget_list_tail
= &fc
->forget_list_head
;
1000 static int fuse_read_single_forget(struct fuse_conn
*fc
,
1001 struct fuse_copy_state
*cs
,
1003 __releases(fc
->lock
)
1006 struct fuse_forget_link
*forget
= dequeue_forget(fc
, 1, NULL
);
1007 struct fuse_forget_in arg
= {
1008 .nlookup
= forget
->forget_one
.nlookup
,
1010 struct fuse_in_header ih
= {
1011 .opcode
= FUSE_FORGET
,
1012 .nodeid
= forget
->forget_one
.nodeid
,
1013 .unique
= fuse_get_unique(fc
),
1014 .len
= sizeof(ih
) + sizeof(arg
),
1017 spin_unlock(&fc
->lock
);
1019 if (nbytes
< ih
.len
)
1022 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1024 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1025 fuse_copy_finish(cs
);
1033 static int fuse_read_batch_forget(struct fuse_conn
*fc
,
1034 struct fuse_copy_state
*cs
, size_t nbytes
)
1035 __releases(fc
->lock
)
1038 unsigned max_forgets
;
1040 struct fuse_forget_link
*head
;
1041 struct fuse_batch_forget_in arg
= { .count
= 0 };
1042 struct fuse_in_header ih
= {
1043 .opcode
= FUSE_BATCH_FORGET
,
1044 .unique
= fuse_get_unique(fc
),
1045 .len
= sizeof(ih
) + sizeof(arg
),
1048 if (nbytes
< ih
.len
) {
1049 spin_unlock(&fc
->lock
);
1053 max_forgets
= (nbytes
- ih
.len
) / sizeof(struct fuse_forget_one
);
1054 head
= dequeue_forget(fc
, max_forgets
, &count
);
1055 spin_unlock(&fc
->lock
);
1058 ih
.len
+= count
* sizeof(struct fuse_forget_one
);
1059 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1061 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1064 struct fuse_forget_link
*forget
= head
;
1067 err
= fuse_copy_one(cs
, &forget
->forget_one
,
1068 sizeof(forget
->forget_one
));
1070 head
= forget
->next
;
1074 fuse_copy_finish(cs
);
1082 static int fuse_read_forget(struct fuse_conn
*fc
, struct fuse_copy_state
*cs
,
1084 __releases(fc
->lock
)
1086 if (fc
->minor
< 16 || fc
->forget_list_head
.next
->next
== NULL
)
1087 return fuse_read_single_forget(fc
, cs
, nbytes
);
1089 return fuse_read_batch_forget(fc
, cs
, nbytes
);
1093 * Read a single request into the userspace filesystem's buffer. This
1094 * function waits until a request is available, then removes it from
1095 * the pending list and copies request data to userspace buffer. If
1096 * no reply is needed (FORGET) or request has been aborted or there
1097 * was an error during the copying then it's finished by calling
1098 * request_end(). Otherwise add it to the processing list, and set
1101 static ssize_t
fuse_dev_do_read(struct fuse_conn
*fc
, struct file
*file
,
1102 struct fuse_copy_state
*cs
, size_t nbytes
)
1105 struct fuse_req
*req
;
1110 spin_lock(&fc
->lock
);
1112 if ((file
->f_flags
& O_NONBLOCK
) && fc
->connected
&&
1113 !request_pending(fc
))
1121 if (!request_pending(fc
))
1124 if (!list_empty(&fc
->interrupts
)) {
1125 req
= list_entry(fc
->interrupts
.next
, struct fuse_req
,
1127 return fuse_read_interrupt(fc
, cs
, nbytes
, req
);
1130 if (forget_pending(fc
)) {
1131 if (list_empty(&fc
->pending
) || fc
->forget_batch
-- > 0)
1132 return fuse_read_forget(fc
, cs
, nbytes
);
1134 if (fc
->forget_batch
<= -8)
1135 fc
->forget_batch
= 16;
1138 req
= list_entry(fc
->pending
.next
, struct fuse_req
, list
);
1139 req
->state
= FUSE_REQ_READING
;
1140 list_move(&req
->list
, &fc
->io
);
1143 reqsize
= in
->h
.len
;
1144 /* If request is too large, reply with an error and restart the read */
1145 if (nbytes
< reqsize
) {
1146 req
->out
.h
.error
= -EIO
;
1147 /* SETXATTR is special, since it may contain too large data */
1148 if (in
->h
.opcode
== FUSE_SETXATTR
)
1149 req
->out
.h
.error
= -E2BIG
;
1150 request_end(fc
, req
);
1153 spin_unlock(&fc
->lock
);
1155 err
= fuse_copy_one(cs
, &in
->h
, sizeof(in
->h
));
1157 err
= fuse_copy_args(cs
, in
->numargs
, in
->argpages
,
1158 (struct fuse_arg
*) in
->args
, 0);
1159 fuse_copy_finish(cs
);
1160 spin_lock(&fc
->lock
);
1163 request_end(fc
, req
);
1167 req
->out
.h
.error
= -EIO
;
1168 request_end(fc
, req
);
1172 request_end(fc
, req
);
1174 req
->state
= FUSE_REQ_SENT
;
1175 list_move_tail(&req
->list
, &fc
->processing
);
1176 if (req
->interrupted
)
1177 queue_interrupt(fc
, req
);
1178 spin_unlock(&fc
->lock
);
1183 spin_unlock(&fc
->lock
);
1187 static ssize_t
fuse_dev_read(struct kiocb
*iocb
, const struct iovec
*iov
,
1188 unsigned long nr_segs
, loff_t pos
)
1190 struct fuse_copy_state cs
;
1191 struct file
*file
= iocb
->ki_filp
;
1192 struct fuse_conn
*fc
= fuse_get_conn(file
);
1196 fuse_copy_init(&cs
, fc
, 1, iov
, nr_segs
);
1198 return fuse_dev_do_read(fc
, file
, &cs
, iov_length(iov
, nr_segs
));
1201 static int fuse_dev_pipe_buf_steal(struct pipe_inode_info
*pipe
,
1202 struct pipe_buffer
*buf
)
1207 static const struct pipe_buf_operations fuse_dev_pipe_buf_ops
= {
1209 .map
= generic_pipe_buf_map
,
1210 .unmap
= generic_pipe_buf_unmap
,
1211 .confirm
= generic_pipe_buf_confirm
,
1212 .release
= generic_pipe_buf_release
,
1213 .steal
= fuse_dev_pipe_buf_steal
,
1214 .get
= generic_pipe_buf_get
,
1217 static ssize_t
fuse_dev_splice_read(struct file
*in
, loff_t
*ppos
,
1218 struct pipe_inode_info
*pipe
,
1219 size_t len
, unsigned int flags
)
1224 struct pipe_buffer
*bufs
;
1225 struct fuse_copy_state cs
;
1226 struct fuse_conn
*fc
= fuse_get_conn(in
);
1230 bufs
= kmalloc(pipe
->buffers
* sizeof(struct pipe_buffer
), GFP_KERNEL
);
1234 fuse_copy_init(&cs
, fc
, 1, NULL
, 0);
1237 ret
= fuse_dev_do_read(fc
, in
, &cs
, len
);
1244 if (!pipe
->readers
) {
1245 send_sig(SIGPIPE
, current
, 0);
1251 if (pipe
->nrbufs
+ cs
.nr_segs
> pipe
->buffers
) {
1256 while (page_nr
< cs
.nr_segs
) {
1257 int newbuf
= (pipe
->curbuf
+ pipe
->nrbufs
) & (pipe
->buffers
- 1);
1258 struct pipe_buffer
*buf
= pipe
->bufs
+ newbuf
;
1260 buf
->page
= bufs
[page_nr
].page
;
1261 buf
->offset
= bufs
[page_nr
].offset
;
1262 buf
->len
= bufs
[page_nr
].len
;
1263 buf
->ops
= &fuse_dev_pipe_buf_ops
;
1278 if (waitqueue_active(&pipe
->wait
))
1279 wake_up_interruptible(&pipe
->wait
);
1280 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
1284 for (; page_nr
< cs
.nr_segs
; page_nr
++)
1285 page_cache_release(bufs
[page_nr
].page
);
1291 static int fuse_notify_poll(struct fuse_conn
*fc
, unsigned int size
,
1292 struct fuse_copy_state
*cs
)
1294 struct fuse_notify_poll_wakeup_out outarg
;
1297 if (size
!= sizeof(outarg
))
1300 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1304 fuse_copy_finish(cs
);
1305 return fuse_notify_poll_wakeup(fc
, &outarg
);
1308 fuse_copy_finish(cs
);
1312 static int fuse_notify_inval_inode(struct fuse_conn
*fc
, unsigned int size
,
1313 struct fuse_copy_state
*cs
)
1315 struct fuse_notify_inval_inode_out outarg
;
1318 if (size
!= sizeof(outarg
))
1321 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1324 fuse_copy_finish(cs
);
1326 down_read(&fc
->killsb
);
1329 err
= fuse_reverse_inval_inode(fc
->sb
, outarg
.ino
,
1330 outarg
.off
, outarg
.len
);
1332 up_read(&fc
->killsb
);
1336 fuse_copy_finish(cs
);
1340 static int fuse_notify_inval_entry(struct fuse_conn
*fc
, unsigned int size
,
1341 struct fuse_copy_state
*cs
)
1343 struct fuse_notify_inval_entry_out outarg
;
1348 buf
= kzalloc(FUSE_NAME_MAX
+ 1, GFP_KERNEL
);
1353 if (size
< sizeof(outarg
))
1356 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1360 err
= -ENAMETOOLONG
;
1361 if (outarg
.namelen
> FUSE_NAME_MAX
)
1365 if (size
!= sizeof(outarg
) + outarg
.namelen
+ 1)
1369 name
.len
= outarg
.namelen
;
1370 err
= fuse_copy_one(cs
, buf
, outarg
.namelen
+ 1);
1373 fuse_copy_finish(cs
);
1374 buf
[outarg
.namelen
] = 0;
1375 name
.hash
= full_name_hash(name
.name
, name
.len
);
1377 down_read(&fc
->killsb
);
1380 err
= fuse_reverse_inval_entry(fc
->sb
, outarg
.parent
, 0, &name
);
1381 up_read(&fc
->killsb
);
1387 fuse_copy_finish(cs
);
1391 static int fuse_notify_delete(struct fuse_conn
*fc
, unsigned int size
,
1392 struct fuse_copy_state
*cs
)
1394 struct fuse_notify_delete_out outarg
;
1399 buf
= kzalloc(FUSE_NAME_MAX
+ 1, GFP_KERNEL
);
1404 if (size
< sizeof(outarg
))
1407 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1411 err
= -ENAMETOOLONG
;
1412 if (outarg
.namelen
> FUSE_NAME_MAX
)
1416 if (size
!= sizeof(outarg
) + outarg
.namelen
+ 1)
1420 name
.len
= outarg
.namelen
;
1421 err
= fuse_copy_one(cs
, buf
, outarg
.namelen
+ 1);
1424 fuse_copy_finish(cs
);
1425 buf
[outarg
.namelen
] = 0;
1426 name
.hash
= full_name_hash(name
.name
, name
.len
);
1428 down_read(&fc
->killsb
);
1431 err
= fuse_reverse_inval_entry(fc
->sb
, outarg
.parent
,
1432 outarg
.child
, &name
);
1433 up_read(&fc
->killsb
);
1439 fuse_copy_finish(cs
);
1443 static int fuse_notify_store(struct fuse_conn
*fc
, unsigned int size
,
1444 struct fuse_copy_state
*cs
)
1446 struct fuse_notify_store_out outarg
;
1447 struct inode
*inode
;
1448 struct address_space
*mapping
;
1452 unsigned int offset
;
1458 if (size
< sizeof(outarg
))
1461 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1466 if (size
- sizeof(outarg
) != outarg
.size
)
1469 nodeid
= outarg
.nodeid
;
1471 down_read(&fc
->killsb
);
1477 inode
= ilookup5(fc
->sb
, nodeid
, fuse_inode_eq
, &nodeid
);
1481 mapping
= inode
->i_mapping
;
1482 index
= outarg
.offset
>> PAGE_CACHE_SHIFT
;
1483 offset
= outarg
.offset
& ~PAGE_CACHE_MASK
;
1484 file_size
= i_size_read(inode
);
1485 end
= outarg
.offset
+ outarg
.size
;
1486 if (end
> file_size
) {
1488 fuse_write_update_size(inode
, file_size
);
1494 unsigned int this_num
;
1497 page
= find_or_create_page(mapping
, index
,
1498 mapping_gfp_mask(mapping
));
1502 this_num
= min_t(unsigned, num
, PAGE_CACHE_SIZE
- offset
);
1503 err
= fuse_copy_page(cs
, &page
, offset
, this_num
, 0);
1504 if (!err
&& offset
== 0 && (num
!= 0 || file_size
== end
))
1505 SetPageUptodate(page
);
1507 page_cache_release(page
);
1522 up_read(&fc
->killsb
);
1524 fuse_copy_finish(cs
);
1528 static void fuse_retrieve_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
1530 release_pages(req
->pages
, req
->num_pages
, 0);
1533 static int fuse_retrieve(struct fuse_conn
*fc
, struct inode
*inode
,
1534 struct fuse_notify_retrieve_out
*outarg
)
1537 struct address_space
*mapping
= inode
->i_mapping
;
1538 struct fuse_req
*req
;
1542 unsigned int offset
;
1543 size_t total_len
= 0;
1545 req
= fuse_get_req(fc
);
1547 return PTR_ERR(req
);
1549 offset
= outarg
->offset
& ~PAGE_CACHE_MASK
;
1551 req
->in
.h
.opcode
= FUSE_NOTIFY_REPLY
;
1552 req
->in
.h
.nodeid
= outarg
->nodeid
;
1553 req
->in
.numargs
= 2;
1554 req
->in
.argpages
= 1;
1555 req
->page_offset
= offset
;
1556 req
->end
= fuse_retrieve_end
;
1558 index
= outarg
->offset
>> PAGE_CACHE_SHIFT
;
1559 file_size
= i_size_read(inode
);
1561 if (outarg
->offset
> file_size
)
1563 else if (outarg
->offset
+ num
> file_size
)
1564 num
= file_size
- outarg
->offset
;
1566 while (num
&& req
->num_pages
< FUSE_MAX_PAGES_PER_REQ
) {
1568 unsigned int this_num
;
1570 page
= find_get_page(mapping
, index
);
1574 this_num
= min_t(unsigned, num
, PAGE_CACHE_SIZE
- offset
);
1575 req
->pages
[req
->num_pages
] = page
;
1580 total_len
+= this_num
;
1583 req
->misc
.retrieve_in
.offset
= outarg
->offset
;
1584 req
->misc
.retrieve_in
.size
= total_len
;
1585 req
->in
.args
[0].size
= sizeof(req
->misc
.retrieve_in
);
1586 req
->in
.args
[0].value
= &req
->misc
.retrieve_in
;
1587 req
->in
.args
[1].size
= total_len
;
1589 err
= fuse_request_send_notify_reply(fc
, req
, outarg
->notify_unique
);
1591 fuse_retrieve_end(fc
, req
);
1596 static int fuse_notify_retrieve(struct fuse_conn
*fc
, unsigned int size
,
1597 struct fuse_copy_state
*cs
)
1599 struct fuse_notify_retrieve_out outarg
;
1600 struct inode
*inode
;
1604 if (size
!= sizeof(outarg
))
1607 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1611 fuse_copy_finish(cs
);
1613 down_read(&fc
->killsb
);
1616 u64 nodeid
= outarg
.nodeid
;
1618 inode
= ilookup5(fc
->sb
, nodeid
, fuse_inode_eq
, &nodeid
);
1620 err
= fuse_retrieve(fc
, inode
, &outarg
);
1624 up_read(&fc
->killsb
);
1629 fuse_copy_finish(cs
);
1633 static int fuse_notify(struct fuse_conn
*fc
, enum fuse_notify_code code
,
1634 unsigned int size
, struct fuse_copy_state
*cs
)
1637 case FUSE_NOTIFY_POLL
:
1638 return fuse_notify_poll(fc
, size
, cs
);
1640 case FUSE_NOTIFY_INVAL_INODE
:
1641 return fuse_notify_inval_inode(fc
, size
, cs
);
1643 case FUSE_NOTIFY_INVAL_ENTRY
:
1644 return fuse_notify_inval_entry(fc
, size
, cs
);
1646 case FUSE_NOTIFY_STORE
:
1647 return fuse_notify_store(fc
, size
, cs
);
1649 case FUSE_NOTIFY_RETRIEVE
:
1650 return fuse_notify_retrieve(fc
, size
, cs
);
1652 case FUSE_NOTIFY_DELETE
:
1653 return fuse_notify_delete(fc
, size
, cs
);
1656 fuse_copy_finish(cs
);
1661 /* Look up request on processing list by unique ID */
1662 static struct fuse_req
*request_find(struct fuse_conn
*fc
, u64 unique
)
1664 struct list_head
*entry
;
1666 list_for_each(entry
, &fc
->processing
) {
1667 struct fuse_req
*req
;
1668 req
= list_entry(entry
, struct fuse_req
, list
);
1669 if (req
->in
.h
.unique
== unique
|| req
->intr_unique
== unique
)
1675 static int copy_out_args(struct fuse_copy_state
*cs
, struct fuse_out
*out
,
1678 unsigned reqsize
= sizeof(struct fuse_out_header
);
1681 return nbytes
!= reqsize
? -EINVAL
: 0;
1683 reqsize
+= len_args(out
->numargs
, out
->args
);
1685 if (reqsize
< nbytes
|| (reqsize
> nbytes
&& !out
->argvar
))
1687 else if (reqsize
> nbytes
) {
1688 struct fuse_arg
*lastarg
= &out
->args
[out
->numargs
-1];
1689 unsigned diffsize
= reqsize
- nbytes
;
1690 if (diffsize
> lastarg
->size
)
1692 lastarg
->size
-= diffsize
;
1694 return fuse_copy_args(cs
, out
->numargs
, out
->argpages
, out
->args
,
1699 * Write a single reply to a request. First the header is copied from
1700 * the write buffer. The request is then searched on the processing
1701 * list by the unique ID found in the header. If found, then remove
1702 * it from the list and copy the rest of the buffer to the request.
1703 * The request is finished by calling request_end()
1705 static ssize_t
fuse_dev_do_write(struct fuse_conn
*fc
,
1706 struct fuse_copy_state
*cs
, size_t nbytes
)
1709 struct fuse_req
*req
;
1710 struct fuse_out_header oh
;
1712 if (nbytes
< sizeof(struct fuse_out_header
))
1715 err
= fuse_copy_one(cs
, &oh
, sizeof(oh
));
1720 if (oh
.len
!= nbytes
)
1724 * Zero oh.unique indicates unsolicited notification message
1725 * and error contains notification code.
1728 err
= fuse_notify(fc
, oh
.error
, nbytes
- sizeof(oh
), cs
);
1729 return err
? err
: nbytes
;
1733 if (oh
.error
<= -1000 || oh
.error
> 0)
1736 spin_lock(&fc
->lock
);
1741 req
= request_find(fc
, oh
.unique
);
1746 spin_unlock(&fc
->lock
);
1747 fuse_copy_finish(cs
);
1748 spin_lock(&fc
->lock
);
1749 request_end(fc
, req
);
1752 /* Is it an interrupt reply? */
1753 if (req
->intr_unique
== oh
.unique
) {
1755 if (nbytes
!= sizeof(struct fuse_out_header
))
1758 if (oh
.error
== -ENOSYS
)
1759 fc
->no_interrupt
= 1;
1760 else if (oh
.error
== -EAGAIN
)
1761 queue_interrupt(fc
, req
);
1763 spin_unlock(&fc
->lock
);
1764 fuse_copy_finish(cs
);
1768 req
->state
= FUSE_REQ_WRITING
;
1769 list_move(&req
->list
, &fc
->io
);
1773 if (!req
->out
.page_replace
)
1775 spin_unlock(&fc
->lock
);
1777 err
= copy_out_args(cs
, &req
->out
, nbytes
);
1778 fuse_copy_finish(cs
);
1780 spin_lock(&fc
->lock
);
1785 } else if (!req
->aborted
)
1786 req
->out
.h
.error
= -EIO
;
1787 request_end(fc
, req
);
1789 return err
? err
: nbytes
;
1792 spin_unlock(&fc
->lock
);
1794 fuse_copy_finish(cs
);
1798 static ssize_t
fuse_dev_write(struct kiocb
*iocb
, const struct iovec
*iov
,
1799 unsigned long nr_segs
, loff_t pos
)
1801 struct fuse_copy_state cs
;
1802 struct fuse_conn
*fc
= fuse_get_conn(iocb
->ki_filp
);
1806 fuse_copy_init(&cs
, fc
, 0, iov
, nr_segs
);
1808 return fuse_dev_do_write(fc
, &cs
, iov_length(iov
, nr_segs
));
1811 static ssize_t
fuse_dev_splice_write(struct pipe_inode_info
*pipe
,
1812 struct file
*out
, loff_t
*ppos
,
1813 size_t len
, unsigned int flags
)
1817 struct pipe_buffer
*bufs
;
1818 struct fuse_copy_state cs
;
1819 struct fuse_conn
*fc
;
1823 fc
= fuse_get_conn(out
);
1827 bufs
= kmalloc(pipe
->buffers
* sizeof(struct pipe_buffer
), GFP_KERNEL
);
1834 for (idx
= 0; idx
< pipe
->nrbufs
&& rem
< len
; idx
++)
1835 rem
+= pipe
->bufs
[(pipe
->curbuf
+ idx
) & (pipe
->buffers
- 1)].len
;
1845 struct pipe_buffer
*ibuf
;
1846 struct pipe_buffer
*obuf
;
1848 BUG_ON(nbuf
>= pipe
->buffers
);
1849 BUG_ON(!pipe
->nrbufs
);
1850 ibuf
= &pipe
->bufs
[pipe
->curbuf
];
1853 if (rem
>= ibuf
->len
) {
1856 pipe
->curbuf
= (pipe
->curbuf
+ 1) & (pipe
->buffers
- 1);
1859 ibuf
->ops
->get(pipe
, ibuf
);
1861 obuf
->flags
&= ~PIPE_BUF_FLAG_GIFT
;
1863 ibuf
->offset
+= obuf
->len
;
1864 ibuf
->len
-= obuf
->len
;
1871 fuse_copy_init(&cs
, fc
, 0, NULL
, nbuf
);
1875 if (flags
& SPLICE_F_MOVE
)
1878 ret
= fuse_dev_do_write(fc
, &cs
, len
);
1880 for (idx
= 0; idx
< nbuf
; idx
++) {
1881 struct pipe_buffer
*buf
= &bufs
[idx
];
1882 buf
->ops
->release(pipe
, buf
);
1889 static unsigned fuse_dev_poll(struct file
*file
, poll_table
*wait
)
1891 unsigned mask
= POLLOUT
| POLLWRNORM
;
1892 struct fuse_conn
*fc
= fuse_get_conn(file
);
1896 poll_wait(file
, &fc
->waitq
, wait
);
1898 spin_lock(&fc
->lock
);
1901 else if (request_pending(fc
))
1902 mask
|= POLLIN
| POLLRDNORM
;
1903 spin_unlock(&fc
->lock
);
1909 * Abort all requests on the given list (pending or processing)
1911 * This function releases and reacquires fc->lock
1913 static void end_requests(struct fuse_conn
*fc
, struct list_head
*head
)
1914 __releases(fc
->lock
)
1915 __acquires(fc
->lock
)
1917 while (!list_empty(head
)) {
1918 struct fuse_req
*req
;
1919 req
= list_entry(head
->next
, struct fuse_req
, list
);
1920 req
->out
.h
.error
= -ECONNABORTED
;
1921 request_end(fc
, req
);
1922 spin_lock(&fc
->lock
);
1927 * Abort requests under I/O
1929 * The requests are set to aborted and finished, and the request
1930 * waiter is woken up. This will make request_wait_answer() wait
1931 * until the request is unlocked and then return.
1933 * If the request is asynchronous, then the end function needs to be
1934 * called after waiting for the request to be unlocked (if it was
1937 static void end_io_requests(struct fuse_conn
*fc
)
1938 __releases(fc
->lock
)
1939 __acquires(fc
->lock
)
1941 while (!list_empty(&fc
->io
)) {
1942 struct fuse_req
*req
=
1943 list_entry(fc
->io
.next
, struct fuse_req
, list
);
1944 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
1947 req
->out
.h
.error
= -ECONNABORTED
;
1948 req
->state
= FUSE_REQ_FINISHED
;
1949 list_del_init(&req
->list
);
1950 wake_up(&req
->waitq
);
1953 __fuse_get_request(req
);
1954 spin_unlock(&fc
->lock
);
1955 wait_event(req
->waitq
, !req
->locked
);
1957 fuse_put_request(fc
, req
);
1958 spin_lock(&fc
->lock
);
1963 static void end_queued_requests(struct fuse_conn
*fc
)
1964 __releases(fc
->lock
)
1965 __acquires(fc
->lock
)
1967 fc
->max_background
= UINT_MAX
;
1969 end_requests(fc
, &fc
->pending
);
1970 end_requests(fc
, &fc
->processing
);
1971 while (forget_pending(fc
))
1972 kfree(dequeue_forget(fc
, 1, NULL
));
1975 static void end_polls(struct fuse_conn
*fc
)
1979 p
= rb_first(&fc
->polled_files
);
1982 struct fuse_file
*ff
;
1983 ff
= rb_entry(p
, struct fuse_file
, polled_node
);
1984 wake_up_interruptible_all(&ff
->poll_wait
);
1991 * Abort all requests.
1993 * Emergency exit in case of a malicious or accidental deadlock, or
1994 * just a hung filesystem.
1996 * The same effect is usually achievable through killing the
1997 * filesystem daemon and all users of the filesystem. The exception
1998 * is the combination of an asynchronous request and the tricky
1999 * deadlock (see Documentation/filesystems/fuse.txt).
2001 * During the aborting, progression of requests from the pending and
2002 * processing lists onto the io list, and progression of new requests
2003 * onto the pending list is prevented by req->connected being false.
2005 * Progression of requests under I/O to the processing list is
2006 * prevented by the req->aborted flag being true for these requests.
2007 * For this reason requests on the io list must be aborted first.
2009 void fuse_abort_conn(struct fuse_conn
*fc
)
2011 spin_lock(&fc
->lock
);
2012 if (fc
->connected
) {
2015 end_io_requests(fc
);
2016 end_queued_requests(fc
);
2018 wake_up_all(&fc
->waitq
);
2019 wake_up_all(&fc
->blocked_waitq
);
2020 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
2022 spin_unlock(&fc
->lock
);
2024 EXPORT_SYMBOL_GPL(fuse_abort_conn
);
2026 int fuse_dev_release(struct inode
*inode
, struct file
*file
)
2028 struct fuse_conn
*fc
= fuse_get_conn(file
);
2030 spin_lock(&fc
->lock
);
2033 end_queued_requests(fc
);
2035 wake_up_all(&fc
->blocked_waitq
);
2036 spin_unlock(&fc
->lock
);
2042 EXPORT_SYMBOL_GPL(fuse_dev_release
);
2044 static int fuse_dev_fasync(int fd
, struct file
*file
, int on
)
2046 struct fuse_conn
*fc
= fuse_get_conn(file
);
2050 /* No locking - fasync_helper does its own locking */
2051 return fasync_helper(fd
, file
, on
, &fc
->fasync
);
2054 const struct file_operations fuse_dev_operations
= {
2055 .owner
= THIS_MODULE
,
2056 .llseek
= no_llseek
,
2057 .read
= do_sync_read
,
2058 .aio_read
= fuse_dev_read
,
2059 .splice_read
= fuse_dev_splice_read
,
2060 .write
= do_sync_write
,
2061 .aio_write
= fuse_dev_write
,
2062 .splice_write
= fuse_dev_splice_write
,
2063 .poll
= fuse_dev_poll
,
2064 .release
= fuse_dev_release
,
2065 .fasync
= fuse_dev_fasync
,
2067 EXPORT_SYMBOL_GPL(fuse_dev_operations
);
2069 static struct miscdevice fuse_miscdevice
= {
2070 .minor
= FUSE_MINOR
,
2072 .fops
= &fuse_dev_operations
,
2075 int __init
fuse_dev_init(void)
2078 fuse_req_cachep
= kmem_cache_create("fuse_request",
2079 sizeof(struct fuse_req
),
2081 if (!fuse_req_cachep
)
2084 err
= misc_register(&fuse_miscdevice
);
2086 goto out_cache_clean
;
2091 kmem_cache_destroy(fuse_req_cachep
);
2096 void fuse_dev_cleanup(void)
2098 misc_deregister(&fuse_miscdevice
);
2099 kmem_cache_destroy(fuse_req_cachep
);