2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
12 #include <linux/pagemap.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/sched.h>
16 #include <linux/freezer.h>
18 static const struct file_operations fuse_direct_io_file_operations
;
20 static int fuse_send_open(struct inode
*inode
, struct file
*file
, int isdir
,
21 struct fuse_open_out
*outargp
)
23 struct fuse_conn
*fc
= get_fuse_conn(inode
);
24 struct fuse_open_in inarg
;
28 FUSE_MIGHT_FREEZE(inode
->i_sb
, "fuse_send_open");
30 req
= fuse_get_req(fc
);
34 memset(&inarg
, 0, sizeof(inarg
));
35 inarg
.flags
= file
->f_flags
& ~(O_CREAT
| O_EXCL
| O_NOCTTY
);
36 if (!fc
->atomic_o_trunc
)
37 inarg
.flags
&= ~O_TRUNC
;
38 req
->in
.h
.opcode
= isdir
? FUSE_OPENDIR
: FUSE_OPEN
;
39 req
->in
.h
.nodeid
= get_node_id(inode
);
41 req
->in
.args
[0].size
= sizeof(inarg
);
42 req
->in
.args
[0].value
= &inarg
;
44 req
->out
.args
[0].size
= sizeof(*outargp
);
45 req
->out
.args
[0].value
= outargp
;
46 request_send(fc
, req
);
47 err
= req
->out
.h
.error
;
48 fuse_put_request(fc
, req
);
53 struct fuse_file
*fuse_file_alloc(void)
56 ff
= kmalloc(sizeof(struct fuse_file
), GFP_KERNEL
);
58 ff
->reserved_req
= fuse_request_alloc();
59 if (!ff
->reserved_req
) {
63 INIT_LIST_HEAD(&ff
->write_entry
);
64 atomic_set(&ff
->count
, 0);
70 void fuse_file_free(struct fuse_file
*ff
)
72 fuse_request_free(ff
->reserved_req
);
76 static struct fuse_file
*fuse_file_get(struct fuse_file
*ff
)
78 atomic_inc(&ff
->count
);
82 static void fuse_release_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
84 dput(req
->misc
.release
.dentry
);
85 mntput(req
->misc
.release
.vfsmount
);
86 fuse_put_request(fc
, req
);
89 static void fuse_file_put(struct fuse_file
*ff
)
91 if (atomic_dec_and_test(&ff
->count
)) {
92 struct fuse_req
*req
= ff
->reserved_req
;
93 struct inode
*inode
= req
->misc
.release
.dentry
->d_inode
;
94 struct fuse_conn
*fc
= get_fuse_conn(inode
);
95 req
->end
= fuse_release_end
;
96 request_send_background(fc
, req
);
101 void fuse_finish_open(struct inode
*inode
, struct file
*file
,
102 struct fuse_file
*ff
, struct fuse_open_out
*outarg
)
104 if (outarg
->open_flags
& FOPEN_DIRECT_IO
)
105 file
->f_op
= &fuse_direct_io_file_operations
;
106 if (!(outarg
->open_flags
& FOPEN_KEEP_CACHE
))
107 invalidate_inode_pages2(inode
->i_mapping
);
108 if (outarg
->open_flags
& FOPEN_NONSEEKABLE
)
109 nonseekable_open(inode
, file
);
111 file
->private_data
= fuse_file_get(ff
);
114 int fuse_open_common(struct inode
*inode
, struct file
*file
, int isdir
)
116 struct fuse_open_out outarg
;
117 struct fuse_file
*ff
;
120 /* VFS checks this, but only _after_ ->open() */
121 if (file
->f_flags
& O_DIRECT
)
124 err
= generic_file_open(inode
, file
);
128 ff
= fuse_file_alloc();
132 err
= fuse_send_open(inode
, file
, isdir
, &outarg
);
137 outarg
.open_flags
&= ~FOPEN_DIRECT_IO
;
138 fuse_finish_open(inode
, file
, ff
, &outarg
);
144 void fuse_release_fill(struct fuse_file
*ff
, u64 nodeid
, int flags
, int opcode
)
146 struct fuse_req
*req
= ff
->reserved_req
;
147 struct fuse_release_in
*inarg
= &req
->misc
.release
.in
;
150 inarg
->flags
= flags
;
151 req
->in
.h
.opcode
= opcode
;
152 req
->in
.h
.nodeid
= nodeid
;
154 req
->in
.args
[0].size
= sizeof(struct fuse_release_in
);
155 req
->in
.args
[0].value
= inarg
;
158 int fuse_release_common(struct inode
*inode
, struct file
*file
, int isdir
)
160 struct fuse_file
*ff
= file
->private_data
;
162 struct fuse_conn
*fc
= get_fuse_conn(inode
);
163 struct fuse_req
*req
= ff
->reserved_req
;
165 fuse_release_fill(ff
, get_node_id(inode
), file
->f_flags
,
166 isdir
? FUSE_RELEASEDIR
: FUSE_RELEASE
);
168 /* Hold vfsmount and dentry until release is finished */
169 req
->misc
.release
.vfsmount
= mntget(file
->f_path
.mnt
);
170 req
->misc
.release
.dentry
= dget(file
->f_path
.dentry
);
172 spin_lock(&fc
->lock
);
173 list_del(&ff
->write_entry
);
174 spin_unlock(&fc
->lock
);
176 * Normally this will send the RELEASE request,
177 * however if some asynchronous READ or WRITE requests
178 * are outstanding, the sending will be delayed
183 /* Return value is ignored by VFS */
187 static int fuse_open(struct inode
*inode
, struct file
*file
)
189 return fuse_open_common(inode
, file
, 0);
192 static int fuse_release(struct inode
*inode
, struct file
*file
)
194 return fuse_release_common(inode
, file
, 0);
198 * Scramble the ID space with XTEA, so that the value of the files_struct
199 * pointer is not exposed to userspace.
201 u64
fuse_lock_owner_id(struct fuse_conn
*fc
, fl_owner_t id
)
203 u32
*k
= fc
->scramble_key
;
204 u64 v
= (unsigned long) id
;
210 for (i
= 0; i
< 32; i
++) {
211 v0
+= ((v1
<< 4 ^ v1
>> 5) + v1
) ^ (sum
+ k
[sum
& 3]);
213 v1
+= ((v0
<< 4 ^ v0
>> 5) + v0
) ^ (sum
+ k
[sum
>>11 & 3]);
216 return (u64
) v0
+ ((u64
) v1
<< 32);
220 * Check if page is under writeback
222 * This is currently done by walking the list of writepage requests
223 * for the inode, which can be pretty inefficient.
225 static bool fuse_page_is_writeback(struct inode
*inode
, pgoff_t index
)
227 struct fuse_conn
*fc
= get_fuse_conn(inode
);
228 struct fuse_inode
*fi
= get_fuse_inode(inode
);
229 struct fuse_req
*req
;
232 spin_lock(&fc
->lock
);
233 list_for_each_entry(req
, &fi
->writepages
, writepages_entry
) {
236 BUG_ON(req
->inode
!= inode
);
237 curr_index
= req
->misc
.write
.in
.offset
>> PAGE_CACHE_SHIFT
;
238 if (curr_index
== index
) {
243 spin_unlock(&fc
->lock
);
249 * Wait for page writeback to be completed.
251 * Since fuse doesn't rely on the VM writeback tracking, this has to
252 * use some other means.
254 static int fuse_wait_on_page_writeback(struct inode
*inode
, pgoff_t index
)
256 struct fuse_inode
*fi
= get_fuse_inode(inode
);
258 wait_event(fi
->page_waitq
, !fuse_page_is_writeback(inode
, index
));
262 static int fuse_flush(struct file
*file
, fl_owner_t id
)
264 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
265 struct fuse_conn
*fc
= get_fuse_conn(inode
);
266 struct fuse_file
*ff
= file
->private_data
;
267 struct fuse_req
*req
;
268 struct fuse_flush_in inarg
;
271 if (is_bad_inode(inode
))
277 req
= fuse_get_req_nofail(fc
, file
);
278 memset(&inarg
, 0, sizeof(inarg
));
280 inarg
.lock_owner
= fuse_lock_owner_id(fc
, id
);
281 req
->in
.h
.opcode
= FUSE_FLUSH
;
282 req
->in
.h
.nodeid
= get_node_id(inode
);
284 req
->in
.args
[0].size
= sizeof(inarg
);
285 req
->in
.args
[0].value
= &inarg
;
287 request_send(fc
, req
);
288 err
= req
->out
.h
.error
;
289 fuse_put_request(fc
, req
);
290 if (err
== -ENOSYS
) {
298 * Wait for all pending writepages on the inode to finish.
300 * This is currently done by blocking further writes with FUSE_NOWRITE
301 * and waiting for all sent writes to complete.
303 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
304 * could conflict with truncation.
306 static void fuse_sync_writes(struct inode
*inode
)
308 fuse_set_nowrite(inode
);
309 fuse_release_nowrite(inode
);
312 int fuse_fsync_common(struct file
*file
, struct dentry
*de
, int datasync
,
315 struct inode
*inode
= de
->d_inode
;
316 struct fuse_conn
*fc
= get_fuse_conn(inode
);
317 struct fuse_file
*ff
= file
->private_data
;
318 struct fuse_req
*req
;
319 struct fuse_fsync_in inarg
;
322 if (is_bad_inode(inode
))
325 if ((!isdir
&& fc
->no_fsync
) || (isdir
&& fc
->no_fsyncdir
))
329 * Start writeback against all dirty pages of the inode, then
330 * wait for all outstanding writes, before sending the FSYNC
333 err
= write_inode_now(inode
, 0);
337 fuse_sync_writes(inode
);
339 req
= fuse_get_req(fc
);
343 memset(&inarg
, 0, sizeof(inarg
));
345 inarg
.fsync_flags
= datasync
? 1 : 0;
346 req
->in
.h
.opcode
= isdir
? FUSE_FSYNCDIR
: FUSE_FSYNC
;
347 req
->in
.h
.nodeid
= get_node_id(inode
);
349 req
->in
.args
[0].size
= sizeof(inarg
);
350 req
->in
.args
[0].value
= &inarg
;
351 request_send(fc
, req
);
352 err
= req
->out
.h
.error
;
353 fuse_put_request(fc
, req
);
354 if (err
== -ENOSYS
) {
364 static int fuse_fsync(struct file
*file
, struct dentry
*de
, int datasync
)
366 return fuse_fsync_common(file
, de
, datasync
, 0);
369 void fuse_read_fill(struct fuse_req
*req
, struct file
*file
,
370 struct inode
*inode
, loff_t pos
, size_t count
, int opcode
)
372 struct fuse_read_in
*inarg
= &req
->misc
.read
.in
;
373 struct fuse_file
*ff
= file
->private_data
;
378 inarg
->flags
= file
->f_flags
;
379 req
->in
.h
.opcode
= opcode
;
380 req
->in
.h
.nodeid
= get_node_id(inode
);
382 req
->in
.args
[0].size
= sizeof(struct fuse_read_in
);
383 req
->in
.args
[0].value
= inarg
;
384 req
->out
.argpages
= 1;
386 req
->out
.numargs
= 1;
387 req
->out
.args
[0].size
= count
;
390 static size_t fuse_send_read(struct fuse_req
*req
, struct file
*file
,
391 struct inode
*inode
, loff_t pos
, size_t count
,
394 struct fuse_conn
*fc
= get_fuse_conn(inode
);
396 fuse_read_fill(req
, file
, inode
, pos
, count
, FUSE_READ
);
398 struct fuse_read_in
*inarg
= &req
->misc
.read
.in
;
400 inarg
->read_flags
|= FUSE_READ_LOCKOWNER
;
401 inarg
->lock_owner
= fuse_lock_owner_id(fc
, owner
);
403 request_send(fc
, req
);
404 return req
->out
.args
[0].size
;
407 static void fuse_read_update_size(struct inode
*inode
, loff_t size
,
410 struct fuse_conn
*fc
= get_fuse_conn(inode
);
411 struct fuse_inode
*fi
= get_fuse_inode(inode
);
413 spin_lock(&fc
->lock
);
414 if (attr_ver
== fi
->attr_version
&& size
< inode
->i_size
) {
415 fi
->attr_version
= ++fc
->attr_version
;
416 i_size_write(inode
, size
);
418 spin_unlock(&fc
->lock
);
421 static int fuse_readpage(struct file
*file
, struct page
*page
)
423 struct inode
*inode
= page
->mapping
->host
;
424 struct fuse_conn
*fc
= get_fuse_conn(inode
);
425 struct fuse_req
*req
;
427 loff_t pos
= page_offset(page
);
428 size_t count
= PAGE_CACHE_SIZE
;
433 if (is_bad_inode(inode
))
437 * Page writeback can extend beyond the liftime of the
438 * page-cache page, so make sure we read a properly synced
441 fuse_wait_on_page_writeback(inode
, page
->index
);
443 req
= fuse_get_req(fc
);
448 attr_ver
= fuse_get_attr_version(fc
);
450 req
->out
.page_zeroing
= 1;
452 req
->pages
[0] = page
;
453 num_read
= fuse_send_read(req
, file
, inode
, pos
, count
, NULL
);
454 err
= req
->out
.h
.error
;
455 fuse_put_request(fc
, req
);
459 * Short read means EOF. If file size is larger, truncate it
461 if (num_read
< count
)
462 fuse_read_update_size(inode
, pos
+ num_read
, attr_ver
);
464 SetPageUptodate(page
);
467 fuse_invalidate_attr(inode
); /* atime changed */
473 static void fuse_readpages_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
476 size_t count
= req
->misc
.read
.in
.size
;
477 size_t num_read
= req
->out
.args
[0].size
;
478 struct inode
*inode
= req
->pages
[0]->mapping
->host
;
481 * Short read means EOF. If file size is larger, truncate it
483 if (!req
->out
.h
.error
&& num_read
< count
) {
484 loff_t pos
= page_offset(req
->pages
[0]) + num_read
;
485 fuse_read_update_size(inode
, pos
, req
->misc
.read
.attr_ver
);
488 fuse_invalidate_attr(inode
); /* atime changed */
490 for (i
= 0; i
< req
->num_pages
; i
++) {
491 struct page
*page
= req
->pages
[i
];
492 if (!req
->out
.h
.error
)
493 SetPageUptodate(page
);
499 fuse_file_put(req
->ff
);
500 fuse_put_request(fc
, req
);
503 static void fuse_send_readpages(struct fuse_req
*req
, struct file
*file
,
506 struct fuse_conn
*fc
= get_fuse_conn(inode
);
507 loff_t pos
= page_offset(req
->pages
[0]);
508 size_t count
= req
->num_pages
<< PAGE_CACHE_SHIFT
;
509 req
->out
.page_zeroing
= 1;
510 fuse_read_fill(req
, file
, inode
, pos
, count
, FUSE_READ
);
511 req
->misc
.read
.attr_ver
= fuse_get_attr_version(fc
);
512 if (fc
->async_read
) {
513 struct fuse_file
*ff
= file
->private_data
;
514 req
->ff
= fuse_file_get(ff
);
515 req
->end
= fuse_readpages_end
;
516 request_send_background(fc
, req
);
518 request_send(fc
, req
);
519 fuse_readpages_end(fc
, req
);
523 struct fuse_fill_data
{
524 struct fuse_req
*req
;
529 static int fuse_readpages_fill(void *_data
, struct page
*page
)
531 struct fuse_fill_data
*data
= _data
;
532 struct fuse_req
*req
= data
->req
;
533 struct inode
*inode
= data
->inode
;
534 struct fuse_conn
*fc
= get_fuse_conn(inode
);
536 fuse_wait_on_page_writeback(inode
, page
->index
);
538 if (req
->num_pages
&&
539 (req
->num_pages
== FUSE_MAX_PAGES_PER_REQ
||
540 (req
->num_pages
+ 1) * PAGE_CACHE_SIZE
> fc
->max_read
||
541 req
->pages
[req
->num_pages
- 1]->index
+ 1 != page
->index
)) {
542 fuse_send_readpages(req
, data
->file
, inode
);
543 data
->req
= req
= fuse_get_req(fc
);
549 req
->pages
[req
->num_pages
] = page
;
554 static int fuse_readpages(struct file
*file
, struct address_space
*mapping
,
555 struct list_head
*pages
, unsigned nr_pages
)
557 struct inode
*inode
= mapping
->host
;
558 struct fuse_conn
*fc
= get_fuse_conn(inode
);
559 struct fuse_fill_data data
;
563 if (is_bad_inode(inode
))
568 data
.req
= fuse_get_req(fc
);
569 err
= PTR_ERR(data
.req
);
570 if (IS_ERR(data
.req
))
573 err
= read_cache_pages(mapping
, pages
, fuse_readpages_fill
, &data
);
575 if (data
.req
->num_pages
)
576 fuse_send_readpages(data
.req
, file
, inode
);
578 fuse_put_request(fc
, data
.req
);
584 static ssize_t
fuse_file_aio_read(struct kiocb
*iocb
, const struct iovec
*iov
,
585 unsigned long nr_segs
, loff_t pos
)
587 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
589 if (pos
+ iov_length(iov
, nr_segs
) > i_size_read(inode
)) {
592 * If trying to read past EOF, make sure the i_size
593 * attribute is up-to-date.
595 err
= fuse_update_attributes(inode
, NULL
, iocb
->ki_filp
, NULL
);
600 return generic_file_aio_read(iocb
, iov
, nr_segs
, pos
);
603 static void fuse_write_fill(struct fuse_req
*req
, struct file
*file
,
604 struct fuse_file
*ff
, struct inode
*inode
,
605 loff_t pos
, size_t count
, int writepage
)
607 struct fuse_conn
*fc
= get_fuse_conn(inode
);
608 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
609 struct fuse_write_out
*outarg
= &req
->misc
.write
.out
;
611 memset(inarg
, 0, sizeof(struct fuse_write_in
));
615 inarg
->write_flags
= writepage
? FUSE_WRITE_CACHE
: 0;
616 inarg
->flags
= file
? file
->f_flags
: 0;
617 req
->in
.h
.opcode
= FUSE_WRITE
;
618 req
->in
.h
.nodeid
= get_node_id(inode
);
619 req
->in
.argpages
= 1;
622 req
->in
.args
[0].size
= FUSE_COMPAT_WRITE_IN_SIZE
;
624 req
->in
.args
[0].size
= sizeof(struct fuse_write_in
);
625 req
->in
.args
[0].value
= inarg
;
626 req
->in
.args
[1].size
= count
;
627 req
->out
.numargs
= 1;
628 req
->out
.args
[0].size
= sizeof(struct fuse_write_out
);
629 req
->out
.args
[0].value
= outarg
;
632 static size_t fuse_send_write(struct fuse_req
*req
, struct file
*file
,
633 struct inode
*inode
, loff_t pos
, size_t count
,
636 struct fuse_conn
*fc
= get_fuse_conn(inode
);
637 fuse_write_fill(req
, file
, file
->private_data
, inode
, pos
, count
, 0);
639 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
640 inarg
->write_flags
|= FUSE_WRITE_LOCKOWNER
;
641 inarg
->lock_owner
= fuse_lock_owner_id(fc
, owner
);
643 request_send(fc
, req
);
644 return req
->misc
.write
.out
.size
;
647 static int fuse_write_begin(struct file
*file
, struct address_space
*mapping
,
648 loff_t pos
, unsigned len
, unsigned flags
,
649 struct page
**pagep
, void **fsdata
)
651 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
653 *pagep
= __grab_cache_page(mapping
, index
);
659 static void fuse_write_update_size(struct inode
*inode
, loff_t pos
)
661 struct fuse_conn
*fc
= get_fuse_conn(inode
);
662 struct fuse_inode
*fi
= get_fuse_inode(inode
);
664 spin_lock(&fc
->lock
);
665 fi
->attr_version
= ++fc
->attr_version
;
666 if (pos
> inode
->i_size
)
667 i_size_write(inode
, pos
);
668 spin_unlock(&fc
->lock
);
671 static int fuse_buffered_write(struct file
*file
, struct inode
*inode
,
672 loff_t pos
, unsigned count
, struct page
*page
)
676 struct fuse_conn
*fc
= get_fuse_conn(inode
);
677 unsigned offset
= pos
& (PAGE_CACHE_SIZE
- 1);
678 struct fuse_req
*req
;
680 if (is_bad_inode(inode
))
683 FUSE_MIGHT_FREEZE(inode
->i_sb
, "fuse_commit_write");
686 * Make sure writepages on the same page are not mixed up with
689 fuse_wait_on_page_writeback(inode
, page
->index
);
691 req
= fuse_get_req(fc
);
696 req
->pages
[0] = page
;
697 req
->page_offset
= offset
;
698 nres
= fuse_send_write(req
, file
, inode
, pos
, count
, NULL
);
699 err
= req
->out
.h
.error
;
700 fuse_put_request(fc
, req
);
705 fuse_write_update_size(inode
, pos
);
706 if (count
== PAGE_CACHE_SIZE
)
707 SetPageUptodate(page
);
709 fuse_invalidate_attr(inode
);
710 return err
? err
: nres
;
713 static int fuse_write_end(struct file
*file
, struct address_space
*mapping
,
714 loff_t pos
, unsigned len
, unsigned copied
,
715 struct page
*page
, void *fsdata
)
717 struct inode
*inode
= mapping
->host
;
721 res
= fuse_buffered_write(file
, inode
, pos
, copied
, page
);
724 page_cache_release(page
);
728 static size_t fuse_send_write_pages(struct fuse_req
*req
, struct file
*file
,
729 struct inode
*inode
, loff_t pos
,
736 for (i
= 0; i
< req
->num_pages
; i
++)
737 fuse_wait_on_page_writeback(inode
, req
->pages
[i
]->index
);
739 res
= fuse_send_write(req
, file
, inode
, pos
, count
, NULL
);
741 offset
= req
->page_offset
;
743 for (i
= 0; i
< req
->num_pages
; i
++) {
744 struct page
*page
= req
->pages
[i
];
746 if (!req
->out
.h
.error
&& !offset
&& count
>= PAGE_CACHE_SIZE
)
747 SetPageUptodate(page
);
749 if (count
> PAGE_CACHE_SIZE
- offset
)
750 count
-= PAGE_CACHE_SIZE
- offset
;
756 page_cache_release(page
);
762 static ssize_t
fuse_fill_write_pages(struct fuse_req
*req
,
763 struct address_space
*mapping
,
764 struct iov_iter
*ii
, loff_t pos
)
766 struct fuse_conn
*fc
= get_fuse_conn(mapping
->host
);
767 unsigned offset
= pos
& (PAGE_CACHE_SIZE
- 1);
771 req
->page_offset
= offset
;
776 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
777 size_t bytes
= min_t(size_t, PAGE_CACHE_SIZE
- offset
,
780 bytes
= min_t(size_t, bytes
, fc
->max_write
- count
);
784 if (iov_iter_fault_in_readable(ii
, bytes
))
788 page
= __grab_cache_page(mapping
, index
);
793 tmp
= iov_iter_copy_from_user_atomic(page
, ii
, offset
, bytes
);
795 flush_dcache_page(page
);
799 page_cache_release(page
);
800 bytes
= min(bytes
, iov_iter_single_seg_count(ii
));
805 req
->pages
[req
->num_pages
] = page
;
808 iov_iter_advance(ii
, tmp
);
812 if (offset
== PAGE_CACHE_SIZE
)
817 } while (iov_iter_count(ii
) && count
< fc
->max_write
&&
818 req
->num_pages
< FUSE_MAX_PAGES_PER_REQ
&& offset
== 0);
820 return count
> 0 ? count
: err
;
823 static ssize_t
fuse_perform_write(struct file
*file
,
824 struct address_space
*mapping
,
825 struct iov_iter
*ii
, loff_t pos
)
827 struct inode
*inode
= mapping
->host
;
828 struct fuse_conn
*fc
= get_fuse_conn(inode
);
832 if (is_bad_inode(inode
))
836 struct fuse_req
*req
;
839 req
= fuse_get_req(fc
);
845 count
= fuse_fill_write_pages(req
, mapping
, ii
, pos
);
851 num_written
= fuse_send_write_pages(req
, file
, inode
,
853 err
= req
->out
.h
.error
;
858 /* break out of the loop on short write */
859 if (num_written
!= count
)
863 fuse_put_request(fc
, req
);
864 } while (!err
&& iov_iter_count(ii
));
867 fuse_write_update_size(inode
, pos
);
869 fuse_invalidate_attr(inode
);
871 return res
> 0 ? res
: err
;
874 static ssize_t
fuse_file_aio_write(struct kiocb
*iocb
, const struct iovec
*iov
,
875 unsigned long nr_segs
, loff_t pos
)
877 struct file
*file
= iocb
->ki_filp
;
878 struct address_space
*mapping
= file
->f_mapping
;
881 struct inode
*inode
= mapping
->host
;
885 WARN_ON(iocb
->ki_pos
!= pos
);
887 err
= generic_segment_checks(iov
, &nr_segs
, &count
, VERIFY_READ
);
891 mutex_lock(&inode
->i_mutex
);
892 vfs_check_frozen(inode
->i_sb
, SB_FREEZE_WRITE
);
894 /* We can write back this queue in page reclaim */
895 current
->backing_dev_info
= mapping
->backing_dev_info
;
897 err
= generic_write_checks(file
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
904 err
= file_remove_suid(file
);
908 file_update_time(file
);
910 iov_iter_init(&i
, iov
, nr_segs
, count
, 0);
911 written
= fuse_perform_write(file
, mapping
, &i
, pos
);
913 iocb
->ki_pos
= pos
+ written
;
916 current
->backing_dev_info
= NULL
;
917 mutex_unlock(&inode
->i_mutex
);
919 return written
? written
: err
;
922 static void fuse_release_user_pages(struct fuse_req
*req
, int write
)
926 for (i
= 0; i
< req
->num_pages
; i
++) {
927 struct page
*page
= req
->pages
[i
];
929 set_page_dirty_lock(page
);
934 static int fuse_get_user_pages(struct fuse_req
*req
, const char __user
*buf
,
935 unsigned nbytes
, int write
)
937 unsigned long user_addr
= (unsigned long) buf
;
938 unsigned offset
= user_addr
& ~PAGE_MASK
;
941 /* This doesn't work with nfsd */
945 nbytes
= min(nbytes
, (unsigned) FUSE_MAX_PAGES_PER_REQ
<< PAGE_SHIFT
);
946 npages
= (nbytes
+ offset
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
947 npages
= clamp(npages
, 1, FUSE_MAX_PAGES_PER_REQ
);
948 down_read(¤t
->mm
->mmap_sem
);
949 npages
= get_user_pages(current
, current
->mm
, user_addr
, npages
, write
,
950 0, req
->pages
, NULL
);
951 up_read(¤t
->mm
->mmap_sem
);
955 req
->num_pages
= npages
;
956 req
->page_offset
= offset
;
960 static ssize_t
fuse_direct_io(struct file
*file
, const char __user
*buf
,
961 size_t count
, loff_t
*ppos
, int write
)
963 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
964 struct fuse_conn
*fc
= get_fuse_conn(inode
);
965 size_t nmax
= write
? fc
->max_write
: fc
->max_read
;
968 struct fuse_req
*req
;
970 if (is_bad_inode(inode
))
973 FUSE_MIGHT_FREEZE(file
->f_mapping
->host
->i_sb
, "fuse_direct_io");
975 req
= fuse_get_req(fc
);
981 size_t nbytes_limit
= min(count
, nmax
);
983 int err
= fuse_get_user_pages(req
, buf
, nbytes_limit
, !write
);
988 nbytes
= (req
->num_pages
<< PAGE_SHIFT
) - req
->page_offset
;
989 nbytes
= min(nbytes_limit
, nbytes
);
991 nres
= fuse_send_write(req
, file
, inode
, pos
, nbytes
,
994 nres
= fuse_send_read(req
, file
, inode
, pos
, nbytes
,
996 fuse_release_user_pages(req
, !write
);
997 if (req
->out
.h
.error
) {
999 res
= req
->out
.h
.error
;
1001 } else if (nres
> nbytes
) {
1012 fuse_put_request(fc
, req
);
1013 req
= fuse_get_req(fc
);
1018 fuse_put_request(fc
, req
);
1021 fuse_write_update_size(inode
, pos
);
1024 fuse_invalidate_attr(inode
);
1029 static ssize_t
fuse_direct_read(struct file
*file
, char __user
*buf
,
1030 size_t count
, loff_t
*ppos
)
1032 return fuse_direct_io(file
, buf
, count
, ppos
, 0);
1035 static ssize_t
fuse_direct_write(struct file
*file
, const char __user
*buf
,
1036 size_t count
, loff_t
*ppos
)
1038 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1040 /* Don't allow parallel writes to the same file */
1041 mutex_lock(&inode
->i_mutex
);
1042 res
= generic_write_checks(file
, ppos
, &count
, 0);
1044 res
= fuse_direct_io(file
, buf
, count
, ppos
, 1);
1045 mutex_unlock(&inode
->i_mutex
);
1049 static void fuse_writepage_free(struct fuse_conn
*fc
, struct fuse_req
*req
)
1051 __free_page(req
->pages
[0]);
1052 fuse_file_put(req
->ff
);
1053 fuse_put_request(fc
, req
);
1056 static void fuse_writepage_finish(struct fuse_conn
*fc
, struct fuse_req
*req
)
1058 struct inode
*inode
= req
->inode
;
1059 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1060 struct backing_dev_info
*bdi
= inode
->i_mapping
->backing_dev_info
;
1062 list_del(&req
->writepages_entry
);
1063 dec_bdi_stat(bdi
, BDI_WRITEBACK
);
1064 dec_zone_page_state(req
->pages
[0], NR_WRITEBACK_TEMP
);
1065 bdi_writeout_inc(bdi
);
1066 wake_up(&fi
->page_waitq
);
1069 /* Called under fc->lock, may release and reacquire it */
1070 static void fuse_send_writepage(struct fuse_conn
*fc
, struct fuse_req
*req
)
1072 struct fuse_inode
*fi
= get_fuse_inode(req
->inode
);
1073 loff_t size
= i_size_read(req
->inode
);
1074 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
1079 if (inarg
->offset
+ PAGE_CACHE_SIZE
<= size
) {
1080 inarg
->size
= PAGE_CACHE_SIZE
;
1081 } else if (inarg
->offset
< size
) {
1082 inarg
->size
= size
& (PAGE_CACHE_SIZE
- 1);
1084 /* Got truncated off completely */
1088 req
->in
.args
[1].size
= inarg
->size
;
1090 request_send_background_locked(fc
, req
);
1094 fuse_writepage_finish(fc
, req
);
1095 spin_unlock(&fc
->lock
);
1096 fuse_writepage_free(fc
, req
);
1097 spin_lock(&fc
->lock
);
1101 * If fi->writectr is positive (no truncate or fsync going on) send
1102 * all queued writepage requests.
1104 * Called with fc->lock
1106 void fuse_flush_writepages(struct inode
*inode
)
1108 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1109 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1110 struct fuse_req
*req
;
1112 while (fi
->writectr
>= 0 && !list_empty(&fi
->queued_writes
)) {
1113 req
= list_entry(fi
->queued_writes
.next
, struct fuse_req
, list
);
1114 list_del_init(&req
->list
);
1115 fuse_send_writepage(fc
, req
);
1119 static void fuse_writepage_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
1121 struct inode
*inode
= req
->inode
;
1122 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1124 mapping_set_error(inode
->i_mapping
, req
->out
.h
.error
);
1125 spin_lock(&fc
->lock
);
1127 fuse_writepage_finish(fc
, req
);
1128 spin_unlock(&fc
->lock
);
1129 fuse_writepage_free(fc
, req
);
1132 static int fuse_writepage_locked(struct page
*page
)
1134 struct address_space
*mapping
= page
->mapping
;
1135 struct inode
*inode
= mapping
->host
;
1136 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1137 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1138 struct fuse_req
*req
;
1139 struct fuse_file
*ff
;
1140 struct page
*tmp_page
;
1142 set_page_writeback(page
);
1144 req
= fuse_request_alloc_nofs();
1148 tmp_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
1152 spin_lock(&fc
->lock
);
1153 BUG_ON(list_empty(&fi
->write_files
));
1154 ff
= list_entry(fi
->write_files
.next
, struct fuse_file
, write_entry
);
1155 req
->ff
= fuse_file_get(ff
);
1156 spin_unlock(&fc
->lock
);
1158 fuse_write_fill(req
, NULL
, ff
, inode
, page_offset(page
), 0, 1);
1160 copy_highpage(tmp_page
, page
);
1162 req
->pages
[0] = tmp_page
;
1163 req
->page_offset
= 0;
1164 req
->end
= fuse_writepage_end
;
1167 inc_bdi_stat(mapping
->backing_dev_info
, BDI_WRITEBACK
);
1168 inc_zone_page_state(tmp_page
, NR_WRITEBACK_TEMP
);
1169 end_page_writeback(page
);
1171 spin_lock(&fc
->lock
);
1172 list_add(&req
->writepages_entry
, &fi
->writepages
);
1173 list_add_tail(&req
->list
, &fi
->queued_writes
);
1174 fuse_flush_writepages(inode
);
1175 spin_unlock(&fc
->lock
);
1180 fuse_request_free(req
);
1182 end_page_writeback(page
);
1186 static int fuse_writepage(struct page
*page
, struct writeback_control
*wbc
)
1190 err
= fuse_writepage_locked(page
);
1196 static int fuse_launder_page(struct page
*page
)
1199 if (clear_page_dirty_for_io(page
)) {
1200 struct inode
*inode
= page
->mapping
->host
;
1201 err
= fuse_writepage_locked(page
);
1203 fuse_wait_on_page_writeback(inode
, page
->index
);
1209 * Write back dirty pages now, because there may not be any suitable
1212 static void fuse_vma_close(struct vm_area_struct
*vma
)
1214 filemap_write_and_wait(vma
->vm_file
->f_mapping
);
1218 * Wait for writeback against this page to complete before allowing it
1219 * to be marked dirty again, and hence written back again, possibly
1220 * before the previous writepage completed.
1222 * Block here, instead of in ->writepage(), so that the userspace fs
1223 * can only block processes actually operating on the filesystem.
1225 * Otherwise unprivileged userspace fs would be able to block
1230 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
1232 static int fuse_page_mkwrite(struct vm_area_struct
*vma
, struct page
*page
)
1235 * Don't use page->mapping as it may become NULL from a
1236 * concurrent truncate.
1238 struct inode
*inode
= vma
->vm_file
->f_mapping
->host
;
1240 fuse_wait_on_page_writeback(inode
, page
->index
);
1244 static struct vm_operations_struct fuse_file_vm_ops
= {
1245 .close
= fuse_vma_close
,
1246 .fault
= filemap_fault
,
1247 .page_mkwrite
= fuse_page_mkwrite
,
1250 static int fuse_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1252 if ((vma
->vm_flags
& VM_SHARED
) && (vma
->vm_flags
& VM_MAYWRITE
)) {
1253 struct inode
*inode
= file
->f_dentry
->d_inode
;
1254 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1255 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1256 struct fuse_file
*ff
= file
->private_data
;
1258 * file may be written through mmap, so chain it onto the
1259 * inodes's write_file list
1261 spin_lock(&fc
->lock
);
1262 if (list_empty(&ff
->write_entry
))
1263 list_add(&ff
->write_entry
, &fi
->write_files
);
1264 spin_unlock(&fc
->lock
);
1266 file_accessed(file
);
1267 vma
->vm_ops
= &fuse_file_vm_ops
;
1271 static int convert_fuse_file_lock(const struct fuse_file_lock
*ffl
,
1272 struct file_lock
*fl
)
1274 switch (ffl
->type
) {
1280 if (ffl
->start
> OFFSET_MAX
|| ffl
->end
> OFFSET_MAX
||
1281 ffl
->end
< ffl
->start
)
1284 fl
->fl_start
= ffl
->start
;
1285 fl
->fl_end
= ffl
->end
;
1286 fl
->fl_pid
= ffl
->pid
;
1292 fl
->fl_type
= ffl
->type
;
1296 static void fuse_lk_fill(struct fuse_req
*req
, struct file
*file
,
1297 const struct file_lock
*fl
, int opcode
, pid_t pid
,
1300 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1301 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1302 struct fuse_file
*ff
= file
->private_data
;
1303 struct fuse_lk_in
*arg
= &req
->misc
.lk_in
;
1306 arg
->owner
= fuse_lock_owner_id(fc
, fl
->fl_owner
);
1307 arg
->lk
.start
= fl
->fl_start
;
1308 arg
->lk
.end
= fl
->fl_end
;
1309 arg
->lk
.type
= fl
->fl_type
;
1312 arg
->lk_flags
|= FUSE_LK_FLOCK
;
1313 req
->in
.h
.opcode
= opcode
;
1314 req
->in
.h
.nodeid
= get_node_id(inode
);
1315 req
->in
.numargs
= 1;
1316 req
->in
.args
[0].size
= sizeof(*arg
);
1317 req
->in
.args
[0].value
= arg
;
1320 static int fuse_getlk(struct file
*file
, struct file_lock
*fl
)
1322 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1323 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1324 struct fuse_req
*req
;
1325 struct fuse_lk_out outarg
;
1328 FUSE_MIGHT_FREEZE(file
->f_mapping
->host
->i_sb
, "fuse_getlk");
1330 req
= fuse_get_req(fc
);
1332 return PTR_ERR(req
);
1334 fuse_lk_fill(req
, file
, fl
, FUSE_GETLK
, 0, 0);
1335 req
->out
.numargs
= 1;
1336 req
->out
.args
[0].size
= sizeof(outarg
);
1337 req
->out
.args
[0].value
= &outarg
;
1338 request_send(fc
, req
);
1339 err
= req
->out
.h
.error
;
1340 fuse_put_request(fc
, req
);
1342 err
= convert_fuse_file_lock(&outarg
.lk
, fl
);
1347 static int fuse_setlk(struct file
*file
, struct file_lock
*fl
, int flock
)
1349 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1350 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1351 struct fuse_req
*req
;
1352 int opcode
= (fl
->fl_flags
& FL_SLEEP
) ? FUSE_SETLKW
: FUSE_SETLK
;
1353 pid_t pid
= fl
->fl_type
!= F_UNLCK
? current
->tgid
: 0;
1356 if (fl
->fl_lmops
&& fl
->fl_lmops
->fl_grant
) {
1357 /* NLM needs asynchronous locks, which we don't support yet */
1361 /* Unlock on close is handled by the flush method */
1362 if (fl
->fl_flags
& FL_CLOSE
)
1365 FUSE_MIGHT_FREEZE(file
->f_mapping
->host
->i_sb
, "fuse_setlk");
1367 req
= fuse_get_req(fc
);
1369 return PTR_ERR(req
);
1371 fuse_lk_fill(req
, file
, fl
, opcode
, pid
, flock
);
1372 request_send(fc
, req
);
1373 err
= req
->out
.h
.error
;
1374 /* locking is restartable */
1377 fuse_put_request(fc
, req
);
1381 static int fuse_file_lock(struct file
*file
, int cmd
, struct file_lock
*fl
)
1383 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1384 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1387 if (cmd
== F_CANCELLK
) {
1389 } else if (cmd
== F_GETLK
) {
1391 posix_test_lock(file
, fl
);
1394 err
= fuse_getlk(file
, fl
);
1397 err
= posix_lock_file(file
, fl
, NULL
);
1399 err
= fuse_setlk(file
, fl
, 0);
1404 static int fuse_file_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
1406 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1407 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1411 err
= flock_lock_file_wait(file
, fl
);
1413 /* emulate flock with POSIX locks */
1414 fl
->fl_owner
= (fl_owner_t
) file
;
1415 err
= fuse_setlk(file
, fl
, 1);
1421 static sector_t
fuse_bmap(struct address_space
*mapping
, sector_t block
)
1423 struct inode
*inode
= mapping
->host
;
1424 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1425 struct fuse_req
*req
;
1426 struct fuse_bmap_in inarg
;
1427 struct fuse_bmap_out outarg
;
1430 if (!inode
->i_sb
->s_bdev
|| fc
->no_bmap
)
1433 FUSE_MIGHT_FREEZE(inode
->i_sb
, "fuse_bmap");
1435 req
= fuse_get_req(fc
);
1439 memset(&inarg
, 0, sizeof(inarg
));
1440 inarg
.block
= block
;
1441 inarg
.blocksize
= inode
->i_sb
->s_blocksize
;
1442 req
->in
.h
.opcode
= FUSE_BMAP
;
1443 req
->in
.h
.nodeid
= get_node_id(inode
);
1444 req
->in
.numargs
= 1;
1445 req
->in
.args
[0].size
= sizeof(inarg
);
1446 req
->in
.args
[0].value
= &inarg
;
1447 req
->out
.numargs
= 1;
1448 req
->out
.args
[0].size
= sizeof(outarg
);
1449 req
->out
.args
[0].value
= &outarg
;
1450 request_send(fc
, req
);
1451 err
= req
->out
.h
.error
;
1452 fuse_put_request(fc
, req
);
1456 return err
? 0 : outarg
.block
;
1459 static loff_t
fuse_file_llseek(struct file
*file
, loff_t offset
, int origin
)
1462 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1464 mutex_lock(&inode
->i_mutex
);
1467 retval
= fuse_update_attributes(inode
, NULL
, file
, NULL
);
1470 offset
+= i_size_read(inode
);
1473 offset
+= file
->f_pos
;
1476 if (offset
>= 0 && offset
<= inode
->i_sb
->s_maxbytes
) {
1477 if (offset
!= file
->f_pos
) {
1478 file
->f_pos
= offset
;
1479 file
->f_version
= 0;
1483 mutex_unlock(&inode
->i_mutex
);
1487 static const struct file_operations fuse_file_operations
= {
1488 .llseek
= fuse_file_llseek
,
1489 .read
= do_sync_read
,
1490 .aio_read
= fuse_file_aio_read
,
1491 .write
= do_sync_write
,
1492 .aio_write
= fuse_file_aio_write
,
1493 .mmap
= fuse_file_mmap
,
1495 .flush
= fuse_flush
,
1496 .release
= fuse_release
,
1497 .fsync
= fuse_fsync
,
1498 .lock
= fuse_file_lock
,
1499 .flock
= fuse_file_flock
,
1500 .splice_read
= generic_file_splice_read
,
1503 static const struct file_operations fuse_direct_io_file_operations
= {
1504 .llseek
= fuse_file_llseek
,
1505 .read
= fuse_direct_read
,
1506 .write
= fuse_direct_write
,
1508 .flush
= fuse_flush
,
1509 .release
= fuse_release
,
1510 .fsync
= fuse_fsync
,
1511 .lock
= fuse_file_lock
,
1512 .flock
= fuse_file_flock
,
1513 /* no mmap and splice_read */
1516 static const struct address_space_operations fuse_file_aops
= {
1517 .readpage
= fuse_readpage
,
1518 .writepage
= fuse_writepage
,
1519 .launder_page
= fuse_launder_page
,
1520 .write_begin
= fuse_write_begin
,
1521 .write_end
= fuse_write_end
,
1522 .readpages
= fuse_readpages
,
1523 .set_page_dirty
= __set_page_dirty_nobuffers
,
1527 void fuse_init_file_inode(struct inode
*inode
)
1529 inode
->i_fop
= &fuse_file_operations
;
1530 inode
->i_data
.a_ops
= &fuse_file_aops
;