2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/module.h>
17 static const struct file_operations fuse_direct_io_file_operations
;
19 static int fuse_send_open(struct fuse_conn
*fc
, u64 nodeid
, struct file
*file
,
20 int opcode
, struct fuse_open_out
*outargp
)
22 struct fuse_open_in inarg
;
26 req
= fuse_get_req(fc
);
30 memset(&inarg
, 0, sizeof(inarg
));
31 inarg
.flags
= file
->f_flags
& ~(O_CREAT
| O_EXCL
| O_NOCTTY
);
32 if (!fc
->atomic_o_trunc
)
33 inarg
.flags
&= ~O_TRUNC
;
34 req
->in
.h
.opcode
= opcode
;
35 req
->in
.h
.nodeid
= nodeid
;
37 req
->in
.args
[0].size
= sizeof(inarg
);
38 req
->in
.args
[0].value
= &inarg
;
40 req
->out
.args
[0].size
= sizeof(*outargp
);
41 req
->out
.args
[0].value
= outargp
;
42 fuse_request_send(fc
, req
);
43 err
= req
->out
.h
.error
;
44 fuse_put_request(fc
, req
);
49 struct fuse_file
*fuse_file_alloc(struct fuse_conn
*fc
)
53 ff
= kmalloc(sizeof(struct fuse_file
), GFP_KERNEL
);
58 ff
->reserved_req
= fuse_request_alloc();
59 if (unlikely(!ff
->reserved_req
)) {
64 INIT_LIST_HEAD(&ff
->write_entry
);
65 atomic_set(&ff
->count
, 0);
66 RB_CLEAR_NODE(&ff
->polled_node
);
67 init_waitqueue_head(&ff
->poll_wait
);
71 spin_unlock(&fc
->lock
);
76 void fuse_file_free(struct fuse_file
*ff
)
78 fuse_request_free(ff
->reserved_req
);
82 struct fuse_file
*fuse_file_get(struct fuse_file
*ff
)
84 atomic_inc(&ff
->count
);
88 static void fuse_release_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
90 path_put(&req
->misc
.release
.path
);
93 static void fuse_file_put(struct fuse_file
*ff
)
95 if (atomic_dec_and_test(&ff
->count
)) {
96 struct fuse_req
*req
= ff
->reserved_req
;
98 req
->end
= fuse_release_end
;
99 fuse_request_send_background(ff
->fc
, req
);
104 int fuse_do_open(struct fuse_conn
*fc
, u64 nodeid
, struct file
*file
,
107 struct fuse_open_out outarg
;
108 struct fuse_file
*ff
;
110 int opcode
= isdir
? FUSE_OPENDIR
: FUSE_OPEN
;
112 ff
= fuse_file_alloc(fc
);
116 err
= fuse_send_open(fc
, nodeid
, file
, opcode
, &outarg
);
123 outarg
.open_flags
&= ~FOPEN_DIRECT_IO
;
127 ff
->open_flags
= outarg
.open_flags
;
128 file
->private_data
= fuse_file_get(ff
);
132 EXPORT_SYMBOL_GPL(fuse_do_open
);
134 void fuse_finish_open(struct inode
*inode
, struct file
*file
)
136 struct fuse_file
*ff
= file
->private_data
;
137 struct fuse_conn
*fc
= get_fuse_conn(inode
);
139 if (ff
->open_flags
& FOPEN_DIRECT_IO
)
140 file
->f_op
= &fuse_direct_io_file_operations
;
141 if (!(ff
->open_flags
& FOPEN_KEEP_CACHE
))
142 invalidate_inode_pages2(inode
->i_mapping
);
143 if (ff
->open_flags
& FOPEN_NONSEEKABLE
)
144 nonseekable_open(inode
, file
);
145 if (fc
->atomic_o_trunc
&& (file
->f_flags
& O_TRUNC
)) {
146 struct fuse_inode
*fi
= get_fuse_inode(inode
);
148 spin_lock(&fc
->lock
);
149 fi
->attr_version
= ++fc
->attr_version
;
150 i_size_write(inode
, 0);
151 spin_unlock(&fc
->lock
);
152 fuse_invalidate_attr(inode
);
156 int fuse_open_common(struct inode
*inode
, struct file
*file
, bool isdir
)
158 struct fuse_conn
*fc
= get_fuse_conn(inode
);
161 /* VFS checks this, but only _after_ ->open() */
162 if (file
->f_flags
& O_DIRECT
)
165 err
= generic_file_open(inode
, file
);
169 err
= fuse_do_open(fc
, get_node_id(inode
), file
, isdir
);
173 fuse_finish_open(inode
, file
);
178 static void fuse_prepare_release(struct fuse_file
*ff
, int flags
, int opcode
)
180 struct fuse_conn
*fc
= ff
->fc
;
181 struct fuse_req
*req
= ff
->reserved_req
;
182 struct fuse_release_in
*inarg
= &req
->misc
.release
.in
;
184 spin_lock(&fc
->lock
);
185 list_del(&ff
->write_entry
);
186 if (!RB_EMPTY_NODE(&ff
->polled_node
))
187 rb_erase(&ff
->polled_node
, &fc
->polled_files
);
188 spin_unlock(&fc
->lock
);
190 wake_up_interruptible_sync(&ff
->poll_wait
);
193 inarg
->flags
= flags
;
194 req
->in
.h
.opcode
= opcode
;
195 req
->in
.h
.nodeid
= ff
->nodeid
;
197 req
->in
.args
[0].size
= sizeof(struct fuse_release_in
);
198 req
->in
.args
[0].value
= inarg
;
201 void fuse_release_common(struct file
*file
, int opcode
)
203 struct fuse_file
*ff
;
204 struct fuse_req
*req
;
206 ff
= file
->private_data
;
210 req
= ff
->reserved_req
;
211 fuse_prepare_release(ff
, file
->f_flags
, opcode
);
213 /* Hold vfsmount and dentry until release is finished */
214 path_get(&file
->f_path
);
215 req
->misc
.release
.path
= file
->f_path
;
218 * Normally this will send the RELEASE request, however if
219 * some asynchronous READ or WRITE requests are outstanding,
220 * the sending will be delayed.
225 static int fuse_open(struct inode
*inode
, struct file
*file
)
227 return fuse_open_common(inode
, file
, false);
230 static int fuse_release(struct inode
*inode
, struct file
*file
)
232 fuse_release_common(file
, FUSE_RELEASE
);
234 /* return value is ignored by VFS */
238 void fuse_sync_release(struct fuse_file
*ff
, int flags
)
240 WARN_ON(atomic_read(&ff
->count
) > 1);
241 fuse_prepare_release(ff
, flags
, FUSE_RELEASE
);
242 ff
->reserved_req
->force
= 1;
243 fuse_request_send(ff
->fc
, ff
->reserved_req
);
244 fuse_put_request(ff
->fc
, ff
->reserved_req
);
247 EXPORT_SYMBOL_GPL(fuse_sync_release
);
250 * Scramble the ID space with XTEA, so that the value of the files_struct
251 * pointer is not exposed to userspace.
253 u64
fuse_lock_owner_id(struct fuse_conn
*fc
, fl_owner_t id
)
255 u32
*k
= fc
->scramble_key
;
256 u64 v
= (unsigned long) id
;
262 for (i
= 0; i
< 32; i
++) {
263 v0
+= ((v1
<< 4 ^ v1
>> 5) + v1
) ^ (sum
+ k
[sum
& 3]);
265 v1
+= ((v0
<< 4 ^ v0
>> 5) + v0
) ^ (sum
+ k
[sum
>>11 & 3]);
268 return (u64
) v0
+ ((u64
) v1
<< 32);
272 * Check if page is under writeback
274 * This is currently done by walking the list of writepage requests
275 * for the inode, which can be pretty inefficient.
277 static bool fuse_page_is_writeback(struct inode
*inode
, pgoff_t index
)
279 struct fuse_conn
*fc
= get_fuse_conn(inode
);
280 struct fuse_inode
*fi
= get_fuse_inode(inode
);
281 struct fuse_req
*req
;
284 spin_lock(&fc
->lock
);
285 list_for_each_entry(req
, &fi
->writepages
, writepages_entry
) {
288 BUG_ON(req
->inode
!= inode
);
289 curr_index
= req
->misc
.write
.in
.offset
>> PAGE_CACHE_SHIFT
;
290 if (curr_index
== index
) {
295 spin_unlock(&fc
->lock
);
301 * Wait for page writeback to be completed.
303 * Since fuse doesn't rely on the VM writeback tracking, this has to
304 * use some other means.
306 static int fuse_wait_on_page_writeback(struct inode
*inode
, pgoff_t index
)
308 struct fuse_inode
*fi
= get_fuse_inode(inode
);
310 wait_event(fi
->page_waitq
, !fuse_page_is_writeback(inode
, index
));
314 static int fuse_flush(struct file
*file
, fl_owner_t id
)
316 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
317 struct fuse_conn
*fc
= get_fuse_conn(inode
);
318 struct fuse_file
*ff
= file
->private_data
;
319 struct fuse_req
*req
;
320 struct fuse_flush_in inarg
;
323 if (is_bad_inode(inode
))
329 req
= fuse_get_req_nofail(fc
, file
);
330 memset(&inarg
, 0, sizeof(inarg
));
332 inarg
.lock_owner
= fuse_lock_owner_id(fc
, id
);
333 req
->in
.h
.opcode
= FUSE_FLUSH
;
334 req
->in
.h
.nodeid
= get_node_id(inode
);
336 req
->in
.args
[0].size
= sizeof(inarg
);
337 req
->in
.args
[0].value
= &inarg
;
339 fuse_request_send(fc
, req
);
340 err
= req
->out
.h
.error
;
341 fuse_put_request(fc
, req
);
342 if (err
== -ENOSYS
) {
350 * Wait for all pending writepages on the inode to finish.
352 * This is currently done by blocking further writes with FUSE_NOWRITE
353 * and waiting for all sent writes to complete.
355 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
356 * could conflict with truncation.
358 static void fuse_sync_writes(struct inode
*inode
)
360 fuse_set_nowrite(inode
);
361 fuse_release_nowrite(inode
);
364 int fuse_fsync_common(struct file
*file
, int datasync
, int isdir
)
366 struct inode
*inode
= file
->f_mapping
->host
;
367 struct fuse_conn
*fc
= get_fuse_conn(inode
);
368 struct fuse_file
*ff
= file
->private_data
;
369 struct fuse_req
*req
;
370 struct fuse_fsync_in inarg
;
373 if (is_bad_inode(inode
))
376 if ((!isdir
&& fc
->no_fsync
) || (isdir
&& fc
->no_fsyncdir
))
380 * Start writeback against all dirty pages of the inode, then
381 * wait for all outstanding writes, before sending the FSYNC
384 err
= write_inode_now(inode
, 0);
388 fuse_sync_writes(inode
);
390 req
= fuse_get_req(fc
);
394 memset(&inarg
, 0, sizeof(inarg
));
396 inarg
.fsync_flags
= datasync
? 1 : 0;
397 req
->in
.h
.opcode
= isdir
? FUSE_FSYNCDIR
: FUSE_FSYNC
;
398 req
->in
.h
.nodeid
= get_node_id(inode
);
400 req
->in
.args
[0].size
= sizeof(inarg
);
401 req
->in
.args
[0].value
= &inarg
;
402 fuse_request_send(fc
, req
);
403 err
= req
->out
.h
.error
;
404 fuse_put_request(fc
, req
);
405 if (err
== -ENOSYS
) {
415 static int fuse_fsync(struct file
*file
, int datasync
)
417 return fuse_fsync_common(file
, datasync
, 0);
420 void fuse_read_fill(struct fuse_req
*req
, struct file
*file
, loff_t pos
,
421 size_t count
, int opcode
)
423 struct fuse_read_in
*inarg
= &req
->misc
.read
.in
;
424 struct fuse_file
*ff
= file
->private_data
;
429 inarg
->flags
= file
->f_flags
;
430 req
->in
.h
.opcode
= opcode
;
431 req
->in
.h
.nodeid
= ff
->nodeid
;
433 req
->in
.args
[0].size
= sizeof(struct fuse_read_in
);
434 req
->in
.args
[0].value
= inarg
;
436 req
->out
.numargs
= 1;
437 req
->out
.args
[0].size
= count
;
440 static size_t fuse_send_read(struct fuse_req
*req
, struct file
*file
,
441 loff_t pos
, size_t count
, fl_owner_t owner
)
443 struct fuse_file
*ff
= file
->private_data
;
444 struct fuse_conn
*fc
= ff
->fc
;
446 fuse_read_fill(req
, file
, pos
, count
, FUSE_READ
);
448 struct fuse_read_in
*inarg
= &req
->misc
.read
.in
;
450 inarg
->read_flags
|= FUSE_READ_LOCKOWNER
;
451 inarg
->lock_owner
= fuse_lock_owner_id(fc
, owner
);
453 fuse_request_send(fc
, req
);
454 return req
->out
.args
[0].size
;
457 static void fuse_read_update_size(struct inode
*inode
, loff_t size
,
460 struct fuse_conn
*fc
= get_fuse_conn(inode
);
461 struct fuse_inode
*fi
= get_fuse_inode(inode
);
463 spin_lock(&fc
->lock
);
464 if (attr_ver
== fi
->attr_version
&& size
< inode
->i_size
) {
465 fi
->attr_version
= ++fc
->attr_version
;
466 i_size_write(inode
, size
);
468 spin_unlock(&fc
->lock
);
471 static int fuse_readpage(struct file
*file
, struct page
*page
)
473 struct inode
*inode
= page
->mapping
->host
;
474 struct fuse_conn
*fc
= get_fuse_conn(inode
);
475 struct fuse_req
*req
;
477 loff_t pos
= page_offset(page
);
478 size_t count
= PAGE_CACHE_SIZE
;
483 if (is_bad_inode(inode
))
487 * Page writeback can extend beyond the liftime of the
488 * page-cache page, so make sure we read a properly synced
491 fuse_wait_on_page_writeback(inode
, page
->index
);
493 req
= fuse_get_req(fc
);
498 attr_ver
= fuse_get_attr_version(fc
);
500 req
->out
.page_zeroing
= 1;
501 req
->out
.argpages
= 1;
503 req
->pages
[0] = page
;
504 num_read
= fuse_send_read(req
, file
, pos
, count
, NULL
);
505 err
= req
->out
.h
.error
;
506 fuse_put_request(fc
, req
);
510 * Short read means EOF. If file size is larger, truncate it
512 if (num_read
< count
)
513 fuse_read_update_size(inode
, pos
+ num_read
, attr_ver
);
515 SetPageUptodate(page
);
518 fuse_invalidate_attr(inode
); /* atime changed */
524 static void fuse_readpages_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
527 size_t count
= req
->misc
.read
.in
.size
;
528 size_t num_read
= req
->out
.args
[0].size
;
529 struct address_space
*mapping
= NULL
;
531 for (i
= 0; mapping
== NULL
&& i
< req
->num_pages
; i
++)
532 mapping
= req
->pages
[i
]->mapping
;
535 struct inode
*inode
= mapping
->host
;
538 * Short read means EOF. If file size is larger, truncate it
540 if (!req
->out
.h
.error
&& num_read
< count
) {
543 pos
= page_offset(req
->pages
[0]) + num_read
;
544 fuse_read_update_size(inode
, pos
,
545 req
->misc
.read
.attr_ver
);
547 fuse_invalidate_attr(inode
); /* atime changed */
550 for (i
= 0; i
< req
->num_pages
; i
++) {
551 struct page
*page
= req
->pages
[i
];
552 if (!req
->out
.h
.error
)
553 SetPageUptodate(page
);
557 page_cache_release(page
);
560 fuse_file_put(req
->ff
);
563 static void fuse_send_readpages(struct fuse_req
*req
, struct file
*file
)
565 struct fuse_file
*ff
= file
->private_data
;
566 struct fuse_conn
*fc
= ff
->fc
;
567 loff_t pos
= page_offset(req
->pages
[0]);
568 size_t count
= req
->num_pages
<< PAGE_CACHE_SHIFT
;
570 req
->out
.argpages
= 1;
571 req
->out
.page_zeroing
= 1;
572 req
->out
.page_replace
= 1;
573 fuse_read_fill(req
, file
, pos
, count
, FUSE_READ
);
574 req
->misc
.read
.attr_ver
= fuse_get_attr_version(fc
);
575 if (fc
->async_read
) {
576 req
->ff
= fuse_file_get(ff
);
577 req
->end
= fuse_readpages_end
;
578 fuse_request_send_background(fc
, req
);
580 fuse_request_send(fc
, req
);
581 fuse_readpages_end(fc
, req
);
582 fuse_put_request(fc
, req
);
586 struct fuse_fill_data
{
587 struct fuse_req
*req
;
592 static int fuse_readpages_fill(void *_data
, struct page
*page
)
594 struct fuse_fill_data
*data
= _data
;
595 struct fuse_req
*req
= data
->req
;
596 struct inode
*inode
= data
->inode
;
597 struct fuse_conn
*fc
= get_fuse_conn(inode
);
599 fuse_wait_on_page_writeback(inode
, page
->index
);
601 if (req
->num_pages
&&
602 (req
->num_pages
== FUSE_MAX_PAGES_PER_REQ
||
603 (req
->num_pages
+ 1) * PAGE_CACHE_SIZE
> fc
->max_read
||
604 req
->pages
[req
->num_pages
- 1]->index
+ 1 != page
->index
)) {
605 fuse_send_readpages(req
, data
->file
);
606 data
->req
= req
= fuse_get_req(fc
);
612 page_cache_get(page
);
613 req
->pages
[req
->num_pages
] = page
;
618 static int fuse_readpages(struct file
*file
, struct address_space
*mapping
,
619 struct list_head
*pages
, unsigned nr_pages
)
621 struct inode
*inode
= mapping
->host
;
622 struct fuse_conn
*fc
= get_fuse_conn(inode
);
623 struct fuse_fill_data data
;
627 if (is_bad_inode(inode
))
632 data
.req
= fuse_get_req(fc
);
633 err
= PTR_ERR(data
.req
);
634 if (IS_ERR(data
.req
))
637 err
= read_cache_pages(mapping
, pages
, fuse_readpages_fill
, &data
);
639 if (data
.req
->num_pages
)
640 fuse_send_readpages(data
.req
, file
);
642 fuse_put_request(fc
, data
.req
);
648 static ssize_t
fuse_file_aio_read(struct kiocb
*iocb
, const struct iovec
*iov
,
649 unsigned long nr_segs
, loff_t pos
)
651 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
653 if (pos
+ iov_length(iov
, nr_segs
) > i_size_read(inode
)) {
656 * If trying to read past EOF, make sure the i_size
657 * attribute is up-to-date.
659 err
= fuse_update_attributes(inode
, NULL
, iocb
->ki_filp
, NULL
);
664 return generic_file_aio_read(iocb
, iov
, nr_segs
, pos
);
667 static void fuse_write_fill(struct fuse_req
*req
, struct fuse_file
*ff
,
668 loff_t pos
, size_t count
)
670 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
671 struct fuse_write_out
*outarg
= &req
->misc
.write
.out
;
676 req
->in
.h
.opcode
= FUSE_WRITE
;
677 req
->in
.h
.nodeid
= ff
->nodeid
;
679 if (ff
->fc
->minor
< 9)
680 req
->in
.args
[0].size
= FUSE_COMPAT_WRITE_IN_SIZE
;
682 req
->in
.args
[0].size
= sizeof(struct fuse_write_in
);
683 req
->in
.args
[0].value
= inarg
;
684 req
->in
.args
[1].size
= count
;
685 req
->out
.numargs
= 1;
686 req
->out
.args
[0].size
= sizeof(struct fuse_write_out
);
687 req
->out
.args
[0].value
= outarg
;
690 static size_t fuse_send_write(struct fuse_req
*req
, struct file
*file
,
691 loff_t pos
, size_t count
, fl_owner_t owner
)
693 struct fuse_file
*ff
= file
->private_data
;
694 struct fuse_conn
*fc
= ff
->fc
;
695 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
697 fuse_write_fill(req
, ff
, pos
, count
);
698 inarg
->flags
= file
->f_flags
;
700 inarg
->write_flags
|= FUSE_WRITE_LOCKOWNER
;
701 inarg
->lock_owner
= fuse_lock_owner_id(fc
, owner
);
703 fuse_request_send(fc
, req
);
704 return req
->misc
.write
.out
.size
;
707 static int fuse_write_begin(struct file
*file
, struct address_space
*mapping
,
708 loff_t pos
, unsigned len
, unsigned flags
,
709 struct page
**pagep
, void **fsdata
)
711 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
713 *pagep
= grab_cache_page_write_begin(mapping
, index
, flags
);
719 void fuse_write_update_size(struct inode
*inode
, loff_t pos
)
721 struct fuse_conn
*fc
= get_fuse_conn(inode
);
722 struct fuse_inode
*fi
= get_fuse_inode(inode
);
724 spin_lock(&fc
->lock
);
725 fi
->attr_version
= ++fc
->attr_version
;
726 if (pos
> inode
->i_size
)
727 i_size_write(inode
, pos
);
728 spin_unlock(&fc
->lock
);
731 static int fuse_buffered_write(struct file
*file
, struct inode
*inode
,
732 loff_t pos
, unsigned count
, struct page
*page
)
736 struct fuse_conn
*fc
= get_fuse_conn(inode
);
737 unsigned offset
= pos
& (PAGE_CACHE_SIZE
- 1);
738 struct fuse_req
*req
;
740 if (is_bad_inode(inode
))
744 * Make sure writepages on the same page are not mixed up with
747 fuse_wait_on_page_writeback(inode
, page
->index
);
749 req
= fuse_get_req(fc
);
753 req
->in
.argpages
= 1;
755 req
->pages
[0] = page
;
756 req
->page_offset
= offset
;
757 nres
= fuse_send_write(req
, file
, pos
, count
, NULL
);
758 err
= req
->out
.h
.error
;
759 fuse_put_request(fc
, req
);
764 fuse_write_update_size(inode
, pos
);
765 if (count
== PAGE_CACHE_SIZE
)
766 SetPageUptodate(page
);
768 fuse_invalidate_attr(inode
);
769 return err
? err
: nres
;
772 static int fuse_write_end(struct file
*file
, struct address_space
*mapping
,
773 loff_t pos
, unsigned len
, unsigned copied
,
774 struct page
*page
, void *fsdata
)
776 struct inode
*inode
= mapping
->host
;
780 res
= fuse_buffered_write(file
, inode
, pos
, copied
, page
);
783 page_cache_release(page
);
787 static size_t fuse_send_write_pages(struct fuse_req
*req
, struct file
*file
,
788 struct inode
*inode
, loff_t pos
,
795 for (i
= 0; i
< req
->num_pages
; i
++)
796 fuse_wait_on_page_writeback(inode
, req
->pages
[i
]->index
);
798 res
= fuse_send_write(req
, file
, pos
, count
, NULL
);
800 offset
= req
->page_offset
;
802 for (i
= 0; i
< req
->num_pages
; i
++) {
803 struct page
*page
= req
->pages
[i
];
805 if (!req
->out
.h
.error
&& !offset
&& count
>= PAGE_CACHE_SIZE
)
806 SetPageUptodate(page
);
808 if (count
> PAGE_CACHE_SIZE
- offset
)
809 count
-= PAGE_CACHE_SIZE
- offset
;
815 page_cache_release(page
);
821 static ssize_t
fuse_fill_write_pages(struct fuse_req
*req
,
822 struct address_space
*mapping
,
823 struct iov_iter
*ii
, loff_t pos
)
825 struct fuse_conn
*fc
= get_fuse_conn(mapping
->host
);
826 unsigned offset
= pos
& (PAGE_CACHE_SIZE
- 1);
830 req
->in
.argpages
= 1;
831 req
->page_offset
= offset
;
836 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
837 size_t bytes
= min_t(size_t, PAGE_CACHE_SIZE
- offset
,
840 bytes
= min_t(size_t, bytes
, fc
->max_write
- count
);
844 if (iov_iter_fault_in_readable(ii
, bytes
))
848 page
= grab_cache_page_write_begin(mapping
, index
, 0);
852 if (mapping_writably_mapped(mapping
))
853 flush_dcache_page(page
);
856 tmp
= iov_iter_copy_from_user_atomic(page
, ii
, offset
, bytes
);
858 flush_dcache_page(page
);
862 page_cache_release(page
);
863 bytes
= min(bytes
, iov_iter_single_seg_count(ii
));
868 req
->pages
[req
->num_pages
] = page
;
871 iov_iter_advance(ii
, tmp
);
875 if (offset
== PAGE_CACHE_SIZE
)
880 } while (iov_iter_count(ii
) && count
< fc
->max_write
&&
881 req
->num_pages
< FUSE_MAX_PAGES_PER_REQ
&& offset
== 0);
883 return count
> 0 ? count
: err
;
886 static ssize_t
fuse_perform_write(struct file
*file
,
887 struct address_space
*mapping
,
888 struct iov_iter
*ii
, loff_t pos
)
890 struct inode
*inode
= mapping
->host
;
891 struct fuse_conn
*fc
= get_fuse_conn(inode
);
895 if (is_bad_inode(inode
))
899 struct fuse_req
*req
;
902 req
= fuse_get_req(fc
);
908 count
= fuse_fill_write_pages(req
, mapping
, ii
, pos
);
914 num_written
= fuse_send_write_pages(req
, file
, inode
,
916 err
= req
->out
.h
.error
;
921 /* break out of the loop on short write */
922 if (num_written
!= count
)
926 fuse_put_request(fc
, req
);
927 } while (!err
&& iov_iter_count(ii
));
930 fuse_write_update_size(inode
, pos
);
932 fuse_invalidate_attr(inode
);
934 return res
> 0 ? res
: err
;
937 static ssize_t
fuse_file_aio_write(struct kiocb
*iocb
, const struct iovec
*iov
,
938 unsigned long nr_segs
, loff_t pos
)
940 struct file
*file
= iocb
->ki_filp
;
941 struct address_space
*mapping
= file
->f_mapping
;
944 struct inode
*inode
= mapping
->host
;
948 WARN_ON(iocb
->ki_pos
!= pos
);
950 err
= generic_segment_checks(iov
, &nr_segs
, &count
, VERIFY_READ
);
954 mutex_lock(&inode
->i_mutex
);
955 vfs_check_frozen(inode
->i_sb
, SB_FREEZE_WRITE
);
957 /* We can write back this queue in page reclaim */
958 current
->backing_dev_info
= mapping
->backing_dev_info
;
960 err
= generic_write_checks(file
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
967 err
= file_remove_suid(file
);
971 file_update_time(file
);
973 iov_iter_init(&i
, iov
, nr_segs
, count
, 0);
974 written
= fuse_perform_write(file
, mapping
, &i
, pos
);
976 iocb
->ki_pos
= pos
+ written
;
979 current
->backing_dev_info
= NULL
;
980 mutex_unlock(&inode
->i_mutex
);
982 return written
? written
: err
;
985 static void fuse_release_user_pages(struct fuse_req
*req
, int write
)
989 for (i
= 0; i
< req
->num_pages
; i
++) {
990 struct page
*page
= req
->pages
[i
];
992 set_page_dirty_lock(page
);
997 static int fuse_get_user_pages(struct fuse_req
*req
, const char __user
*buf
,
998 size_t *nbytesp
, int write
)
1000 size_t nbytes
= *nbytesp
;
1001 unsigned long user_addr
= (unsigned long) buf
;
1002 unsigned offset
= user_addr
& ~PAGE_MASK
;
1005 /* Special case for kernel I/O: can copy directly into the buffer */
1006 if (segment_eq(get_fs(), KERNEL_DS
)) {
1008 req
->in
.args
[1].value
= (void *) user_addr
;
1010 req
->out
.args
[0].value
= (void *) user_addr
;
1015 nbytes
= min_t(size_t, nbytes
, FUSE_MAX_PAGES_PER_REQ
<< PAGE_SHIFT
);
1016 npages
= (nbytes
+ offset
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1017 npages
= clamp(npages
, 1, FUSE_MAX_PAGES_PER_REQ
);
1018 npages
= get_user_pages_fast(user_addr
, npages
, !write
, req
->pages
);
1022 req
->num_pages
= npages
;
1023 req
->page_offset
= offset
;
1026 req
->in
.argpages
= 1;
1028 req
->out
.argpages
= 1;
1030 nbytes
= (req
->num_pages
<< PAGE_SHIFT
) - req
->page_offset
;
1031 *nbytesp
= min(*nbytesp
, nbytes
);
1036 ssize_t
fuse_direct_io(struct file
*file
, const char __user
*buf
,
1037 size_t count
, loff_t
*ppos
, int write
)
1039 struct fuse_file
*ff
= file
->private_data
;
1040 struct fuse_conn
*fc
= ff
->fc
;
1041 size_t nmax
= write
? fc
->max_write
: fc
->max_read
;
1044 struct fuse_req
*req
;
1046 req
= fuse_get_req(fc
);
1048 return PTR_ERR(req
);
1052 fl_owner_t owner
= current
->files
;
1053 size_t nbytes
= min(count
, nmax
);
1054 int err
= fuse_get_user_pages(req
, buf
, &nbytes
, write
);
1061 nres
= fuse_send_write(req
, file
, pos
, nbytes
, owner
);
1063 nres
= fuse_send_read(req
, file
, pos
, nbytes
, owner
);
1065 fuse_release_user_pages(req
, !write
);
1066 if (req
->out
.h
.error
) {
1068 res
= req
->out
.h
.error
;
1070 } else if (nres
> nbytes
) {
1081 fuse_put_request(fc
, req
);
1082 req
= fuse_get_req(fc
);
1088 fuse_put_request(fc
, req
);
1094 EXPORT_SYMBOL_GPL(fuse_direct_io
);
1096 static ssize_t
fuse_direct_read(struct file
*file
, char __user
*buf
,
1097 size_t count
, loff_t
*ppos
)
1100 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1102 if (is_bad_inode(inode
))
1105 res
= fuse_direct_io(file
, buf
, count
, ppos
, 0);
1107 fuse_invalidate_attr(inode
);
1112 static ssize_t
fuse_direct_write(struct file
*file
, const char __user
*buf
,
1113 size_t count
, loff_t
*ppos
)
1115 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1118 if (is_bad_inode(inode
))
1121 /* Don't allow parallel writes to the same file */
1122 mutex_lock(&inode
->i_mutex
);
1123 res
= generic_write_checks(file
, ppos
, &count
, 0);
1125 res
= fuse_direct_io(file
, buf
, count
, ppos
, 1);
1127 fuse_write_update_size(inode
, *ppos
);
1129 mutex_unlock(&inode
->i_mutex
);
1131 fuse_invalidate_attr(inode
);
1136 static void fuse_writepage_free(struct fuse_conn
*fc
, struct fuse_req
*req
)
1138 __free_page(req
->pages
[0]);
1139 fuse_file_put(req
->ff
);
1142 static void fuse_writepage_finish(struct fuse_conn
*fc
, struct fuse_req
*req
)
1144 struct inode
*inode
= req
->inode
;
1145 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1146 struct backing_dev_info
*bdi
= inode
->i_mapping
->backing_dev_info
;
1148 list_del(&req
->writepages_entry
);
1149 dec_bdi_stat(bdi
, BDI_WRITEBACK
);
1150 dec_zone_page_state(req
->pages
[0], NR_WRITEBACK_TEMP
);
1151 bdi_writeout_inc(bdi
);
1152 wake_up(&fi
->page_waitq
);
1155 /* Called under fc->lock, may release and reacquire it */
1156 static void fuse_send_writepage(struct fuse_conn
*fc
, struct fuse_req
*req
)
1157 __releases(fc
->lock
)
1158 __acquires(fc
->lock
)
1160 struct fuse_inode
*fi
= get_fuse_inode(req
->inode
);
1161 loff_t size
= i_size_read(req
->inode
);
1162 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
1167 if (inarg
->offset
+ PAGE_CACHE_SIZE
<= size
) {
1168 inarg
->size
= PAGE_CACHE_SIZE
;
1169 } else if (inarg
->offset
< size
) {
1170 inarg
->size
= size
& (PAGE_CACHE_SIZE
- 1);
1172 /* Got truncated off completely */
1176 req
->in
.args
[1].size
= inarg
->size
;
1178 fuse_request_send_background_locked(fc
, req
);
1182 fuse_writepage_finish(fc
, req
);
1183 spin_unlock(&fc
->lock
);
1184 fuse_writepage_free(fc
, req
);
1185 fuse_put_request(fc
, req
);
1186 spin_lock(&fc
->lock
);
1190 * If fi->writectr is positive (no truncate or fsync going on) send
1191 * all queued writepage requests.
1193 * Called with fc->lock
1195 void fuse_flush_writepages(struct inode
*inode
)
1196 __releases(fc
->lock
)
1197 __acquires(fc
->lock
)
1199 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1200 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1201 struct fuse_req
*req
;
1203 while (fi
->writectr
>= 0 && !list_empty(&fi
->queued_writes
)) {
1204 req
= list_entry(fi
->queued_writes
.next
, struct fuse_req
, list
);
1205 list_del_init(&req
->list
);
1206 fuse_send_writepage(fc
, req
);
1210 static void fuse_writepage_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
1212 struct inode
*inode
= req
->inode
;
1213 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1215 mapping_set_error(inode
->i_mapping
, req
->out
.h
.error
);
1216 spin_lock(&fc
->lock
);
1218 fuse_writepage_finish(fc
, req
);
1219 spin_unlock(&fc
->lock
);
1220 fuse_writepage_free(fc
, req
);
1223 static int fuse_writepage_locked(struct page
*page
)
1225 struct address_space
*mapping
= page
->mapping
;
1226 struct inode
*inode
= mapping
->host
;
1227 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1228 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1229 struct fuse_req
*req
;
1230 struct fuse_file
*ff
;
1231 struct page
*tmp_page
;
1233 set_page_writeback(page
);
1235 req
= fuse_request_alloc_nofs();
1239 tmp_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
1243 spin_lock(&fc
->lock
);
1244 BUG_ON(list_empty(&fi
->write_files
));
1245 ff
= list_entry(fi
->write_files
.next
, struct fuse_file
, write_entry
);
1246 req
->ff
= fuse_file_get(ff
);
1247 spin_unlock(&fc
->lock
);
1249 fuse_write_fill(req
, ff
, page_offset(page
), 0);
1251 copy_highpage(tmp_page
, page
);
1252 req
->misc
.write
.in
.write_flags
|= FUSE_WRITE_CACHE
;
1253 req
->in
.argpages
= 1;
1255 req
->pages
[0] = tmp_page
;
1256 req
->page_offset
= 0;
1257 req
->end
= fuse_writepage_end
;
1260 inc_bdi_stat(mapping
->backing_dev_info
, BDI_WRITEBACK
);
1261 inc_zone_page_state(tmp_page
, NR_WRITEBACK_TEMP
);
1262 end_page_writeback(page
);
1264 spin_lock(&fc
->lock
);
1265 list_add(&req
->writepages_entry
, &fi
->writepages
);
1266 list_add_tail(&req
->list
, &fi
->queued_writes
);
1267 fuse_flush_writepages(inode
);
1268 spin_unlock(&fc
->lock
);
1273 fuse_request_free(req
);
1275 end_page_writeback(page
);
1279 static int fuse_writepage(struct page
*page
, struct writeback_control
*wbc
)
1283 err
= fuse_writepage_locked(page
);
1289 static int fuse_launder_page(struct page
*page
)
1292 if (clear_page_dirty_for_io(page
)) {
1293 struct inode
*inode
= page
->mapping
->host
;
1294 err
= fuse_writepage_locked(page
);
1296 fuse_wait_on_page_writeback(inode
, page
->index
);
1302 * Write back dirty pages now, because there may not be any suitable
1305 static void fuse_vma_close(struct vm_area_struct
*vma
)
1307 filemap_write_and_wait(vma
->vm_file
->f_mapping
);
1311 * Wait for writeback against this page to complete before allowing it
1312 * to be marked dirty again, and hence written back again, possibly
1313 * before the previous writepage completed.
1315 * Block here, instead of in ->writepage(), so that the userspace fs
1316 * can only block processes actually operating on the filesystem.
1318 * Otherwise unprivileged userspace fs would be able to block
1323 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
1325 static int fuse_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1327 struct page
*page
= vmf
->page
;
1329 * Don't use page->mapping as it may become NULL from a
1330 * concurrent truncate.
1332 struct inode
*inode
= vma
->vm_file
->f_mapping
->host
;
1334 fuse_wait_on_page_writeback(inode
, page
->index
);
1338 static const struct vm_operations_struct fuse_file_vm_ops
= {
1339 .close
= fuse_vma_close
,
1340 .fault
= filemap_fault
,
1341 .page_mkwrite
= fuse_page_mkwrite
,
1344 static int fuse_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1346 if ((vma
->vm_flags
& VM_SHARED
) && (vma
->vm_flags
& VM_MAYWRITE
)) {
1347 struct inode
*inode
= file
->f_dentry
->d_inode
;
1348 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1349 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1350 struct fuse_file
*ff
= file
->private_data
;
1352 * file may be written through mmap, so chain it onto the
1353 * inodes's write_file list
1355 spin_lock(&fc
->lock
);
1356 if (list_empty(&ff
->write_entry
))
1357 list_add(&ff
->write_entry
, &fi
->write_files
);
1358 spin_unlock(&fc
->lock
);
1360 file_accessed(file
);
1361 vma
->vm_ops
= &fuse_file_vm_ops
;
1365 static int fuse_direct_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1367 /* Can't provide the coherency needed for MAP_SHARED */
1368 if (vma
->vm_flags
& VM_MAYSHARE
)
1371 invalidate_inode_pages2(file
->f_mapping
);
1373 return generic_file_mmap(file
, vma
);
1376 static int convert_fuse_file_lock(const struct fuse_file_lock
*ffl
,
1377 struct file_lock
*fl
)
1379 switch (ffl
->type
) {
1385 if (ffl
->start
> OFFSET_MAX
|| ffl
->end
> OFFSET_MAX
||
1386 ffl
->end
< ffl
->start
)
1389 fl
->fl_start
= ffl
->start
;
1390 fl
->fl_end
= ffl
->end
;
1391 fl
->fl_pid
= ffl
->pid
;
1397 fl
->fl_type
= ffl
->type
;
1401 static void fuse_lk_fill(struct fuse_req
*req
, struct file
*file
,
1402 const struct file_lock
*fl
, int opcode
, pid_t pid
,
1405 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1406 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1407 struct fuse_file
*ff
= file
->private_data
;
1408 struct fuse_lk_in
*arg
= &req
->misc
.lk_in
;
1411 arg
->owner
= fuse_lock_owner_id(fc
, fl
->fl_owner
);
1412 arg
->lk
.start
= fl
->fl_start
;
1413 arg
->lk
.end
= fl
->fl_end
;
1414 arg
->lk
.type
= fl
->fl_type
;
1417 arg
->lk_flags
|= FUSE_LK_FLOCK
;
1418 req
->in
.h
.opcode
= opcode
;
1419 req
->in
.h
.nodeid
= get_node_id(inode
);
1420 req
->in
.numargs
= 1;
1421 req
->in
.args
[0].size
= sizeof(*arg
);
1422 req
->in
.args
[0].value
= arg
;
1425 static int fuse_getlk(struct file
*file
, struct file_lock
*fl
)
1427 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1428 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1429 struct fuse_req
*req
;
1430 struct fuse_lk_out outarg
;
1433 req
= fuse_get_req(fc
);
1435 return PTR_ERR(req
);
1437 fuse_lk_fill(req
, file
, fl
, FUSE_GETLK
, 0, 0);
1438 req
->out
.numargs
= 1;
1439 req
->out
.args
[0].size
= sizeof(outarg
);
1440 req
->out
.args
[0].value
= &outarg
;
1441 fuse_request_send(fc
, req
);
1442 err
= req
->out
.h
.error
;
1443 fuse_put_request(fc
, req
);
1445 err
= convert_fuse_file_lock(&outarg
.lk
, fl
);
1450 static int fuse_setlk(struct file
*file
, struct file_lock
*fl
, int flock
)
1452 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1453 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1454 struct fuse_req
*req
;
1455 int opcode
= (fl
->fl_flags
& FL_SLEEP
) ? FUSE_SETLKW
: FUSE_SETLK
;
1456 pid_t pid
= fl
->fl_type
!= F_UNLCK
? current
->tgid
: 0;
1459 if (fl
->fl_lmops
&& fl
->fl_lmops
->fl_grant
) {
1460 /* NLM needs asynchronous locks, which we don't support yet */
1464 /* Unlock on close is handled by the flush method */
1465 if (fl
->fl_flags
& FL_CLOSE
)
1468 req
= fuse_get_req(fc
);
1470 return PTR_ERR(req
);
1472 fuse_lk_fill(req
, file
, fl
, opcode
, pid
, flock
);
1473 fuse_request_send(fc
, req
);
1474 err
= req
->out
.h
.error
;
1475 /* locking is restartable */
1478 fuse_put_request(fc
, req
);
1482 static int fuse_file_lock(struct file
*file
, int cmd
, struct file_lock
*fl
)
1484 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1485 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1488 if (cmd
== F_CANCELLK
) {
1490 } else if (cmd
== F_GETLK
) {
1492 posix_test_lock(file
, fl
);
1495 err
= fuse_getlk(file
, fl
);
1498 err
= posix_lock_file(file
, fl
, NULL
);
1500 err
= fuse_setlk(file
, fl
, 0);
1505 static int fuse_file_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
1507 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1508 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1512 err
= flock_lock_file_wait(file
, fl
);
1514 /* emulate flock with POSIX locks */
1515 fl
->fl_owner
= (fl_owner_t
) file
;
1516 err
= fuse_setlk(file
, fl
, 1);
1522 static sector_t
fuse_bmap(struct address_space
*mapping
, sector_t block
)
1524 struct inode
*inode
= mapping
->host
;
1525 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1526 struct fuse_req
*req
;
1527 struct fuse_bmap_in inarg
;
1528 struct fuse_bmap_out outarg
;
1531 if (!inode
->i_sb
->s_bdev
|| fc
->no_bmap
)
1534 req
= fuse_get_req(fc
);
1538 memset(&inarg
, 0, sizeof(inarg
));
1539 inarg
.block
= block
;
1540 inarg
.blocksize
= inode
->i_sb
->s_blocksize
;
1541 req
->in
.h
.opcode
= FUSE_BMAP
;
1542 req
->in
.h
.nodeid
= get_node_id(inode
);
1543 req
->in
.numargs
= 1;
1544 req
->in
.args
[0].size
= sizeof(inarg
);
1545 req
->in
.args
[0].value
= &inarg
;
1546 req
->out
.numargs
= 1;
1547 req
->out
.args
[0].size
= sizeof(outarg
);
1548 req
->out
.args
[0].value
= &outarg
;
1549 fuse_request_send(fc
, req
);
1550 err
= req
->out
.h
.error
;
1551 fuse_put_request(fc
, req
);
1555 return err
? 0 : outarg
.block
;
1558 static loff_t
fuse_file_llseek(struct file
*file
, loff_t offset
, int origin
)
1561 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1563 mutex_lock(&inode
->i_mutex
);
1566 retval
= fuse_update_attributes(inode
, NULL
, file
, NULL
);
1569 offset
+= i_size_read(inode
);
1572 offset
+= file
->f_pos
;
1575 if (offset
>= 0 && offset
<= inode
->i_sb
->s_maxbytes
) {
1576 if (offset
!= file
->f_pos
) {
1577 file
->f_pos
= offset
;
1578 file
->f_version
= 0;
1583 mutex_unlock(&inode
->i_mutex
);
1587 static int fuse_ioctl_copy_user(struct page
**pages
, struct iovec
*iov
,
1588 unsigned int nr_segs
, size_t bytes
, bool to_user
)
1596 iov_iter_init(&ii
, iov
, nr_segs
, bytes
, 0);
1598 while (iov_iter_count(&ii
)) {
1599 struct page
*page
= pages
[page_idx
++];
1600 size_t todo
= min_t(size_t, PAGE_SIZE
, iov_iter_count(&ii
));
1606 char __user
*uaddr
= ii
.iov
->iov_base
+ ii
.iov_offset
;
1607 size_t iov_len
= ii
.iov
->iov_len
- ii
.iov_offset
;
1608 size_t copy
= min(todo
, iov_len
);
1612 left
= copy_from_user(kaddr
, uaddr
, copy
);
1614 left
= copy_to_user(uaddr
, kaddr
, copy
);
1619 iov_iter_advance(&ii
, copy
);
1631 * For ioctls, there is no generic way to determine how much memory
1632 * needs to be read and/or written. Furthermore, ioctls are allowed
1633 * to dereference the passed pointer, so the parameter requires deep
1634 * copying but FUSE has no idea whatsoever about what to copy in or
1637 * This is solved by allowing FUSE server to retry ioctl with
1638 * necessary in/out iovecs. Let's assume the ioctl implementation
1639 * needs to read in the following structure.
1646 * On the first callout to FUSE server, inarg->in_size and
1647 * inarg->out_size will be NULL; then, the server completes the ioctl
1648 * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and
1649 * the actual iov array to
1651 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } }
1653 * which tells FUSE to copy in the requested area and retry the ioctl.
1654 * On the second round, the server has access to the structure and
1655 * from that it can tell what to look for next, so on the invocation,
1656 * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to
1658 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) },
1659 * { .iov_base = a.buf, .iov_len = a.buflen } }
1661 * FUSE will copy both struct a and the pointed buffer from the
1662 * process doing the ioctl and retry ioctl with both struct a and the
1665 * This time, FUSE server has everything it needs and completes ioctl
1666 * without FUSE_IOCTL_RETRY which finishes the ioctl call.
1668 * Copying data out works the same way.
1670 * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel
1671 * automatically initializes in and out iovs by decoding @cmd with
1672 * _IOC_* macros and the server is not allowed to request RETRY. This
1673 * limits ioctl data transfers to well-formed ioctls and is the forced
1674 * behavior for all FUSE servers.
1676 long fuse_do_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
,
1679 struct fuse_file
*ff
= file
->private_data
;
1680 struct fuse_conn
*fc
= ff
->fc
;
1681 struct fuse_ioctl_in inarg
= {
1687 struct fuse_ioctl_out outarg
;
1688 struct fuse_req
*req
= NULL
;
1689 struct page
**pages
= NULL
;
1690 struct page
*iov_page
= NULL
;
1691 struct iovec
*in_iov
= NULL
, *out_iov
= NULL
;
1692 unsigned int in_iovs
= 0, out_iovs
= 0, num_pages
= 0, max_pages
;
1693 size_t in_size
, out_size
, transferred
;
1696 /* assume all the iovs returned by client always fits in a page */
1697 BUILD_BUG_ON(sizeof(struct iovec
) * FUSE_IOCTL_MAX_IOV
> PAGE_SIZE
);
1700 pages
= kzalloc(sizeof(pages
[0]) * FUSE_MAX_PAGES_PER_REQ
, GFP_KERNEL
);
1701 iov_page
= alloc_page(GFP_KERNEL
);
1702 if (!pages
|| !iov_page
)
1706 * If restricted, initialize IO parameters as encoded in @cmd.
1707 * RETRY from server is not allowed.
1709 if (!(flags
& FUSE_IOCTL_UNRESTRICTED
)) {
1710 struct iovec
*iov
= page_address(iov_page
);
1712 iov
->iov_base
= (void __user
*)arg
;
1713 iov
->iov_len
= _IOC_SIZE(cmd
);
1715 if (_IOC_DIR(cmd
) & _IOC_WRITE
) {
1720 if (_IOC_DIR(cmd
) & _IOC_READ
) {
1727 inarg
.in_size
= in_size
= iov_length(in_iov
, in_iovs
);
1728 inarg
.out_size
= out_size
= iov_length(out_iov
, out_iovs
);
1731 * Out data can be used either for actual out data or iovs,
1732 * make sure there always is at least one page.
1734 out_size
= max_t(size_t, out_size
, PAGE_SIZE
);
1735 max_pages
= DIV_ROUND_UP(max(in_size
, out_size
), PAGE_SIZE
);
1737 /* make sure there are enough buffer pages and init request with them */
1739 if (max_pages
> FUSE_MAX_PAGES_PER_REQ
)
1741 while (num_pages
< max_pages
) {
1742 pages
[num_pages
] = alloc_page(GFP_KERNEL
| __GFP_HIGHMEM
);
1743 if (!pages
[num_pages
])
1748 req
= fuse_get_req(fc
);
1754 memcpy(req
->pages
, pages
, sizeof(req
->pages
[0]) * num_pages
);
1755 req
->num_pages
= num_pages
;
1757 /* okay, let's send it to the client */
1758 req
->in
.h
.opcode
= FUSE_IOCTL
;
1759 req
->in
.h
.nodeid
= ff
->nodeid
;
1760 req
->in
.numargs
= 1;
1761 req
->in
.args
[0].size
= sizeof(inarg
);
1762 req
->in
.args
[0].value
= &inarg
;
1765 req
->in
.args
[1].size
= in_size
;
1766 req
->in
.argpages
= 1;
1768 err
= fuse_ioctl_copy_user(pages
, in_iov
, in_iovs
, in_size
,
1774 req
->out
.numargs
= 2;
1775 req
->out
.args
[0].size
= sizeof(outarg
);
1776 req
->out
.args
[0].value
= &outarg
;
1777 req
->out
.args
[1].size
= out_size
;
1778 req
->out
.argpages
= 1;
1779 req
->out
.argvar
= 1;
1781 fuse_request_send(fc
, req
);
1782 err
= req
->out
.h
.error
;
1783 transferred
= req
->out
.args
[1].size
;
1784 fuse_put_request(fc
, req
);
1789 /* did it ask for retry? */
1790 if (outarg
.flags
& FUSE_IOCTL_RETRY
) {
1793 /* no retry if in restricted mode */
1795 if (!(flags
& FUSE_IOCTL_UNRESTRICTED
))
1798 in_iovs
= outarg
.in_iovs
;
1799 out_iovs
= outarg
.out_iovs
;
1802 * Make sure things are in boundary, separate checks
1803 * are to protect against overflow.
1806 if (in_iovs
> FUSE_IOCTL_MAX_IOV
||
1807 out_iovs
> FUSE_IOCTL_MAX_IOV
||
1808 in_iovs
+ out_iovs
> FUSE_IOCTL_MAX_IOV
)
1812 if ((in_iovs
+ out_iovs
) * sizeof(struct iovec
) != transferred
)
1815 /* okay, copy in iovs and retry */
1816 vaddr
= kmap_atomic(pages
[0], KM_USER0
);
1817 memcpy(page_address(iov_page
), vaddr
, transferred
);
1818 kunmap_atomic(vaddr
, KM_USER0
);
1820 in_iov
= page_address(iov_page
);
1821 out_iov
= in_iov
+ in_iovs
;
1827 if (transferred
> inarg
.out_size
)
1830 err
= fuse_ioctl_copy_user(pages
, out_iov
, out_iovs
, transferred
, true);
1833 fuse_put_request(fc
, req
);
1835 __free_page(iov_page
);
1837 __free_page(pages
[--num_pages
]);
1840 return err
? err
: outarg
.result
;
1842 EXPORT_SYMBOL_GPL(fuse_do_ioctl
);
1844 static long fuse_file_ioctl_common(struct file
*file
, unsigned int cmd
,
1845 unsigned long arg
, unsigned int flags
)
1847 struct inode
*inode
= file
->f_dentry
->d_inode
;
1848 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1850 if (!fuse_allow_task(fc
, current
))
1853 if (is_bad_inode(inode
))
1856 return fuse_do_ioctl(file
, cmd
, arg
, flags
);
1859 static long fuse_file_ioctl(struct file
*file
, unsigned int cmd
,
1862 return fuse_file_ioctl_common(file
, cmd
, arg
, 0);
1865 static long fuse_file_compat_ioctl(struct file
*file
, unsigned int cmd
,
1868 return fuse_file_ioctl_common(file
, cmd
, arg
, FUSE_IOCTL_COMPAT
);
1872 * All files which have been polled are linked to RB tree
1873 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
1874 * find the matching one.
1876 static struct rb_node
**fuse_find_polled_node(struct fuse_conn
*fc
, u64 kh
,
1877 struct rb_node
**parent_out
)
1879 struct rb_node
**link
= &fc
->polled_files
.rb_node
;
1880 struct rb_node
*last
= NULL
;
1883 struct fuse_file
*ff
;
1886 ff
= rb_entry(last
, struct fuse_file
, polled_node
);
1889 link
= &last
->rb_left
;
1890 else if (kh
> ff
->kh
)
1891 link
= &last
->rb_right
;
1902 * The file is about to be polled. Make sure it's on the polled_files
1903 * RB tree. Note that files once added to the polled_files tree are
1904 * not removed before the file is released. This is because a file
1905 * polled once is likely to be polled again.
1907 static void fuse_register_polled_file(struct fuse_conn
*fc
,
1908 struct fuse_file
*ff
)
1910 spin_lock(&fc
->lock
);
1911 if (RB_EMPTY_NODE(&ff
->polled_node
)) {
1912 struct rb_node
**link
, *parent
;
1914 link
= fuse_find_polled_node(fc
, ff
->kh
, &parent
);
1916 rb_link_node(&ff
->polled_node
, parent
, link
);
1917 rb_insert_color(&ff
->polled_node
, &fc
->polled_files
);
1919 spin_unlock(&fc
->lock
);
1922 unsigned fuse_file_poll(struct file
*file
, poll_table
*wait
)
1924 struct fuse_file
*ff
= file
->private_data
;
1925 struct fuse_conn
*fc
= ff
->fc
;
1926 struct fuse_poll_in inarg
= { .fh
= ff
->fh
, .kh
= ff
->kh
};
1927 struct fuse_poll_out outarg
;
1928 struct fuse_req
*req
;
1932 return DEFAULT_POLLMASK
;
1934 poll_wait(file
, &ff
->poll_wait
, wait
);
1937 * Ask for notification iff there's someone waiting for it.
1938 * The client may ignore the flag and always notify.
1940 if (waitqueue_active(&ff
->poll_wait
)) {
1941 inarg
.flags
|= FUSE_POLL_SCHEDULE_NOTIFY
;
1942 fuse_register_polled_file(fc
, ff
);
1945 req
= fuse_get_req(fc
);
1949 req
->in
.h
.opcode
= FUSE_POLL
;
1950 req
->in
.h
.nodeid
= ff
->nodeid
;
1951 req
->in
.numargs
= 1;
1952 req
->in
.args
[0].size
= sizeof(inarg
);
1953 req
->in
.args
[0].value
= &inarg
;
1954 req
->out
.numargs
= 1;
1955 req
->out
.args
[0].size
= sizeof(outarg
);
1956 req
->out
.args
[0].value
= &outarg
;
1957 fuse_request_send(fc
, req
);
1958 err
= req
->out
.h
.error
;
1959 fuse_put_request(fc
, req
);
1962 return outarg
.revents
;
1963 if (err
== -ENOSYS
) {
1965 return DEFAULT_POLLMASK
;
1969 EXPORT_SYMBOL_GPL(fuse_file_poll
);
1972 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
1973 * wakes up the poll waiters.
1975 int fuse_notify_poll_wakeup(struct fuse_conn
*fc
,
1976 struct fuse_notify_poll_wakeup_out
*outarg
)
1978 u64 kh
= outarg
->kh
;
1979 struct rb_node
**link
;
1981 spin_lock(&fc
->lock
);
1983 link
= fuse_find_polled_node(fc
, kh
, NULL
);
1985 struct fuse_file
*ff
;
1987 ff
= rb_entry(*link
, struct fuse_file
, polled_node
);
1988 wake_up_interruptible_sync(&ff
->poll_wait
);
1991 spin_unlock(&fc
->lock
);
1995 static const struct file_operations fuse_file_operations
= {
1996 .llseek
= fuse_file_llseek
,
1997 .read
= do_sync_read
,
1998 .aio_read
= fuse_file_aio_read
,
1999 .write
= do_sync_write
,
2000 .aio_write
= fuse_file_aio_write
,
2001 .mmap
= fuse_file_mmap
,
2003 .flush
= fuse_flush
,
2004 .release
= fuse_release
,
2005 .fsync
= fuse_fsync
,
2006 .lock
= fuse_file_lock
,
2007 .flock
= fuse_file_flock
,
2008 .splice_read
= generic_file_splice_read
,
2009 .unlocked_ioctl
= fuse_file_ioctl
,
2010 .compat_ioctl
= fuse_file_compat_ioctl
,
2011 .poll
= fuse_file_poll
,
2014 static const struct file_operations fuse_direct_io_file_operations
= {
2015 .llseek
= fuse_file_llseek
,
2016 .read
= fuse_direct_read
,
2017 .write
= fuse_direct_write
,
2018 .mmap
= fuse_direct_mmap
,
2020 .flush
= fuse_flush
,
2021 .release
= fuse_release
,
2022 .fsync
= fuse_fsync
,
2023 .lock
= fuse_file_lock
,
2024 .flock
= fuse_file_flock
,
2025 .unlocked_ioctl
= fuse_file_ioctl
,
2026 .compat_ioctl
= fuse_file_compat_ioctl
,
2027 .poll
= fuse_file_poll
,
2028 /* no splice_read */
2031 static const struct address_space_operations fuse_file_aops
= {
2032 .readpage
= fuse_readpage
,
2033 .writepage
= fuse_writepage
,
2034 .launder_page
= fuse_launder_page
,
2035 .write_begin
= fuse_write_begin
,
2036 .write_end
= fuse_write_end
,
2037 .readpages
= fuse_readpages
,
2038 .set_page_dirty
= __set_page_dirty_nobuffers
,
2042 void fuse_init_file_inode(struct inode
*inode
)
2044 inode
->i_fop
= &fuse_file_operations
;
2045 inode
->i_data
.a_ops
= &fuse_file_aops
;