replace some function names
[linux-2.6/zen-sources.git] / fs / fuse / file.c
blob7a40f3c0b407e582585e606d0c6c1463dc3a2054
1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
9 #include "fuse_i.h"
10 #include "fuse.h"
12 #include <linux/pagemap.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/sched.h>
16 #include <linux/freezer.h>
18 static const struct file_operations fuse_direct_io_file_operations;
20 static int fuse_send_open(struct inode *inode, struct file *file, int isdir,
21 struct fuse_open_out *outargp)
23 struct fuse_conn *fc = get_fuse_conn(inode);
24 struct fuse_open_in inarg;
25 struct fuse_req *req;
26 int err;
28 FUSE_MIGHT_FREEZE(inode->i_sb, "fuse_send_open");
30 req = fuse_get_req(fc);
31 if (IS_ERR(req))
32 return PTR_ERR(req);
34 memset(&inarg, 0, sizeof(inarg));
35 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
36 if (!fc->atomic_o_trunc)
37 inarg.flags &= ~O_TRUNC;
38 req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
39 req->in.h.nodeid = get_node_id(inode);
40 req->in.numargs = 1;
41 req->in.args[0].size = sizeof(inarg);
42 req->in.args[0].value = &inarg;
43 req->out.numargs = 1;
44 req->out.args[0].size = sizeof(*outargp);
45 req->out.args[0].value = outargp;
46 request_send(fc, req);
47 err = req->out.h.error;
48 fuse_put_request(fc, req);
50 return err;
53 struct fuse_file *fuse_file_alloc(void)
55 struct fuse_file *ff;
56 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
57 if (ff) {
58 ff->reserved_req = fuse_request_alloc();
59 if (!ff->reserved_req) {
60 kfree(ff);
61 ff = NULL;
62 } else {
63 INIT_LIST_HEAD(&ff->write_entry);
64 atomic_set(&ff->count, 0);
67 return ff;
70 void fuse_file_free(struct fuse_file *ff)
72 fuse_request_free(ff->reserved_req);
73 kfree(ff);
76 static struct fuse_file *fuse_file_get(struct fuse_file *ff)
78 atomic_inc(&ff->count);
79 return ff;
82 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
84 dput(req->misc.release.dentry);
85 mntput(req->misc.release.vfsmount);
86 fuse_put_request(fc, req);
89 static void fuse_file_put(struct fuse_file *ff)
91 if (atomic_dec_and_test(&ff->count)) {
92 struct fuse_req *req = ff->reserved_req;
93 struct inode *inode = req->misc.release.dentry->d_inode;
94 struct fuse_conn *fc = get_fuse_conn(inode);
95 req->end = fuse_release_end;
96 request_send_background(fc, req);
97 kfree(ff);
101 void fuse_finish_open(struct inode *inode, struct file *file,
102 struct fuse_file *ff, struct fuse_open_out *outarg)
104 if (outarg->open_flags & FOPEN_DIRECT_IO)
105 file->f_op = &fuse_direct_io_file_operations;
106 if (!(outarg->open_flags & FOPEN_KEEP_CACHE))
107 invalidate_inode_pages2(inode->i_mapping);
108 if (outarg->open_flags & FOPEN_NONSEEKABLE)
109 nonseekable_open(inode, file);
110 ff->fh = outarg->fh;
111 file->private_data = fuse_file_get(ff);
114 int fuse_open_common(struct inode *inode, struct file *file, int isdir)
116 struct fuse_open_out outarg;
117 struct fuse_file *ff;
118 int err;
120 /* VFS checks this, but only _after_ ->open() */
121 if (file->f_flags & O_DIRECT)
122 return -EINVAL;
124 err = generic_file_open(inode, file);
125 if (err)
126 return err;
128 ff = fuse_file_alloc();
129 if (!ff)
130 return -ENOMEM;
132 err = fuse_send_open(inode, file, isdir, &outarg);
133 if (err)
134 fuse_file_free(ff);
135 else {
136 if (isdir)
137 outarg.open_flags &= ~FOPEN_DIRECT_IO;
138 fuse_finish_open(inode, file, ff, &outarg);
141 return err;
144 void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode)
146 struct fuse_req *req = ff->reserved_req;
147 struct fuse_release_in *inarg = &req->misc.release.in;
149 inarg->fh = ff->fh;
150 inarg->flags = flags;
151 req->in.h.opcode = opcode;
152 req->in.h.nodeid = nodeid;
153 req->in.numargs = 1;
154 req->in.args[0].size = sizeof(struct fuse_release_in);
155 req->in.args[0].value = inarg;
158 int fuse_release_common(struct inode *inode, struct file *file, int isdir)
160 struct fuse_file *ff = file->private_data;
161 if (ff) {
162 struct fuse_conn *fc = get_fuse_conn(inode);
163 struct fuse_req *req = ff->reserved_req;
165 fuse_release_fill(ff, get_node_id(inode), file->f_flags,
166 isdir ? FUSE_RELEASEDIR : FUSE_RELEASE);
168 /* Hold vfsmount and dentry until release is finished */
169 req->misc.release.vfsmount = mntget(file->f_path.mnt);
170 req->misc.release.dentry = dget(file->f_path.dentry);
172 spin_lock(&fc->lock);
173 list_del(&ff->write_entry);
174 spin_unlock(&fc->lock);
176 * Normally this will send the RELEASE request,
177 * however if some asynchronous READ or WRITE requests
178 * are outstanding, the sending will be delayed
180 fuse_file_put(ff);
183 /* Return value is ignored by VFS */
184 return 0;
187 static int fuse_open(struct inode *inode, struct file *file)
189 return fuse_open_common(inode, file, 0);
192 static int fuse_release(struct inode *inode, struct file *file)
194 return fuse_release_common(inode, file, 0);
198 * Scramble the ID space with XTEA, so that the value of the files_struct
199 * pointer is not exposed to userspace.
201 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
203 u32 *k = fc->scramble_key;
204 u64 v = (unsigned long) id;
205 u32 v0 = v;
206 u32 v1 = v >> 32;
207 u32 sum = 0;
208 int i;
210 for (i = 0; i < 32; i++) {
211 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
212 sum += 0x9E3779B9;
213 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
216 return (u64) v0 + ((u64) v1 << 32);
220 * Check if page is under writeback
222 * This is currently done by walking the list of writepage requests
223 * for the inode, which can be pretty inefficient.
225 static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
227 struct fuse_conn *fc = get_fuse_conn(inode);
228 struct fuse_inode *fi = get_fuse_inode(inode);
229 struct fuse_req *req;
230 bool found = false;
232 spin_lock(&fc->lock);
233 list_for_each_entry(req, &fi->writepages, writepages_entry) {
234 pgoff_t curr_index;
236 BUG_ON(req->inode != inode);
237 curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
238 if (curr_index == index) {
239 found = true;
240 break;
243 spin_unlock(&fc->lock);
245 return found;
249 * Wait for page writeback to be completed.
251 * Since fuse doesn't rely on the VM writeback tracking, this has to
252 * use some other means.
254 static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
256 struct fuse_inode *fi = get_fuse_inode(inode);
258 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
259 return 0;
262 static int fuse_flush(struct file *file, fl_owner_t id)
264 struct inode *inode = file->f_path.dentry->d_inode;
265 struct fuse_conn *fc = get_fuse_conn(inode);
266 struct fuse_file *ff = file->private_data;
267 struct fuse_req *req;
268 struct fuse_flush_in inarg;
269 int err;
271 if (is_bad_inode(inode))
272 return -EIO;
274 if (fc->no_flush)
275 return 0;
277 req = fuse_get_req_nofail(fc, file);
278 memset(&inarg, 0, sizeof(inarg));
279 inarg.fh = ff->fh;
280 inarg.lock_owner = fuse_lock_owner_id(fc, id);
281 req->in.h.opcode = FUSE_FLUSH;
282 req->in.h.nodeid = get_node_id(inode);
283 req->in.numargs = 1;
284 req->in.args[0].size = sizeof(inarg);
285 req->in.args[0].value = &inarg;
286 req->force = 1;
287 request_send(fc, req);
288 err = req->out.h.error;
289 fuse_put_request(fc, req);
290 if (err == -ENOSYS) {
291 fc->no_flush = 1;
292 err = 0;
294 return err;
298 * Wait for all pending writepages on the inode to finish.
300 * This is currently done by blocking further writes with FUSE_NOWRITE
301 * and waiting for all sent writes to complete.
303 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
304 * could conflict with truncation.
306 static void fuse_sync_writes(struct inode *inode)
308 fuse_set_nowrite(inode);
309 fuse_release_nowrite(inode);
312 int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
313 int isdir)
315 struct inode *inode = de->d_inode;
316 struct fuse_conn *fc = get_fuse_conn(inode);
317 struct fuse_file *ff = file->private_data;
318 struct fuse_req *req;
319 struct fuse_fsync_in inarg;
320 int err;
322 if (is_bad_inode(inode))
323 return -EIO;
325 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
326 return 0;
329 * Start writeback against all dirty pages of the inode, then
330 * wait for all outstanding writes, before sending the FSYNC
331 * request.
333 err = write_inode_now(inode, 0);
334 if (err)
335 return err;
337 fuse_sync_writes(inode);
339 req = fuse_get_req(fc);
340 if (IS_ERR(req))
341 return PTR_ERR(req);
343 memset(&inarg, 0, sizeof(inarg));
344 inarg.fh = ff->fh;
345 inarg.fsync_flags = datasync ? 1 : 0;
346 req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
347 req->in.h.nodeid = get_node_id(inode);
348 req->in.numargs = 1;
349 req->in.args[0].size = sizeof(inarg);
350 req->in.args[0].value = &inarg;
351 request_send(fc, req);
352 err = req->out.h.error;
353 fuse_put_request(fc, req);
354 if (err == -ENOSYS) {
355 if (isdir)
356 fc->no_fsyncdir = 1;
357 else
358 fc->no_fsync = 1;
359 err = 0;
361 return err;
364 static int fuse_fsync(struct file *file, struct dentry *de, int datasync)
366 return fuse_fsync_common(file, de, datasync, 0);
369 void fuse_read_fill(struct fuse_req *req, struct file *file,
370 struct inode *inode, loff_t pos, size_t count, int opcode)
372 struct fuse_read_in *inarg = &req->misc.read.in;
373 struct fuse_file *ff = file->private_data;
375 inarg->fh = ff->fh;
376 inarg->offset = pos;
377 inarg->size = count;
378 inarg->flags = file->f_flags;
379 req->in.h.opcode = opcode;
380 req->in.h.nodeid = get_node_id(inode);
381 req->in.numargs = 1;
382 req->in.args[0].size = sizeof(struct fuse_read_in);
383 req->in.args[0].value = inarg;
384 req->out.argpages = 1;
385 req->out.argvar = 1;
386 req->out.numargs = 1;
387 req->out.args[0].size = count;
390 static size_t fuse_send_read(struct fuse_req *req, struct file *file,
391 struct inode *inode, loff_t pos, size_t count,
392 fl_owner_t owner)
394 struct fuse_conn *fc = get_fuse_conn(inode);
396 fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
397 if (owner != NULL) {
398 struct fuse_read_in *inarg = &req->misc.read.in;
400 inarg->read_flags |= FUSE_READ_LOCKOWNER;
401 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
403 request_send(fc, req);
404 return req->out.args[0].size;
407 static void fuse_read_update_size(struct inode *inode, loff_t size,
408 u64 attr_ver)
410 struct fuse_conn *fc = get_fuse_conn(inode);
411 struct fuse_inode *fi = get_fuse_inode(inode);
413 spin_lock(&fc->lock);
414 if (attr_ver == fi->attr_version && size < inode->i_size) {
415 fi->attr_version = ++fc->attr_version;
416 i_size_write(inode, size);
418 spin_unlock(&fc->lock);
421 static int fuse_readpage(struct file *file, struct page *page)
423 struct inode *inode = page->mapping->host;
424 struct fuse_conn *fc = get_fuse_conn(inode);
425 struct fuse_req *req;
426 size_t num_read;
427 loff_t pos = page_offset(page);
428 size_t count = PAGE_CACHE_SIZE;
429 u64 attr_ver;
430 int err;
432 err = -EIO;
433 if (is_bad_inode(inode))
434 goto out;
437 * Page writeback can extend beyond the liftime of the
438 * page-cache page, so make sure we read a properly synced
439 * page.
441 fuse_wait_on_page_writeback(inode, page->index);
443 req = fuse_get_req(fc);
444 err = PTR_ERR(req);
445 if (IS_ERR(req))
446 goto out;
448 attr_ver = fuse_get_attr_version(fc);
450 req->out.page_zeroing = 1;
451 req->num_pages = 1;
452 req->pages[0] = page;
453 num_read = fuse_send_read(req, file, inode, pos, count, NULL);
454 err = req->out.h.error;
455 fuse_put_request(fc, req);
457 if (!err) {
459 * Short read means EOF. If file size is larger, truncate it
461 if (num_read < count)
462 fuse_read_update_size(inode, pos + num_read, attr_ver);
464 SetPageUptodate(page);
467 fuse_invalidate_attr(inode); /* atime changed */
468 out:
469 unlock_page(page);
470 return err;
473 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
475 int i;
476 size_t count = req->misc.read.in.size;
477 size_t num_read = req->out.args[0].size;
478 struct inode *inode = req->pages[0]->mapping->host;
481 * Short read means EOF. If file size is larger, truncate it
483 if (!req->out.h.error && num_read < count) {
484 loff_t pos = page_offset(req->pages[0]) + num_read;
485 fuse_read_update_size(inode, pos, req->misc.read.attr_ver);
488 fuse_invalidate_attr(inode); /* atime changed */
490 for (i = 0; i < req->num_pages; i++) {
491 struct page *page = req->pages[i];
492 if (!req->out.h.error)
493 SetPageUptodate(page);
494 else
495 SetPageError(page);
496 unlock_page(page);
498 if (req->ff)
499 fuse_file_put(req->ff);
500 fuse_put_request(fc, req);
503 static void fuse_send_readpages(struct fuse_req *req, struct file *file,
504 struct inode *inode)
506 struct fuse_conn *fc = get_fuse_conn(inode);
507 loff_t pos = page_offset(req->pages[0]);
508 size_t count = req->num_pages << PAGE_CACHE_SHIFT;
509 req->out.page_zeroing = 1;
510 fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
511 req->misc.read.attr_ver = fuse_get_attr_version(fc);
512 if (fc->async_read) {
513 struct fuse_file *ff = file->private_data;
514 req->ff = fuse_file_get(ff);
515 req->end = fuse_readpages_end;
516 request_send_background(fc, req);
517 } else {
518 request_send(fc, req);
519 fuse_readpages_end(fc, req);
523 struct fuse_fill_data {
524 struct fuse_req *req;
525 struct file *file;
526 struct inode *inode;
529 static int fuse_readpages_fill(void *_data, struct page *page)
531 struct fuse_fill_data *data = _data;
532 struct fuse_req *req = data->req;
533 struct inode *inode = data->inode;
534 struct fuse_conn *fc = get_fuse_conn(inode);
536 fuse_wait_on_page_writeback(inode, page->index);
538 if (req->num_pages &&
539 (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
540 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
541 req->pages[req->num_pages - 1]->index + 1 != page->index)) {
542 fuse_send_readpages(req, data->file, inode);
543 data->req = req = fuse_get_req(fc);
544 if (IS_ERR(req)) {
545 unlock_page(page);
546 return PTR_ERR(req);
549 req->pages[req->num_pages] = page;
550 req->num_pages ++;
551 return 0;
554 static int fuse_readpages(struct file *file, struct address_space *mapping,
555 struct list_head *pages, unsigned nr_pages)
557 struct inode *inode = mapping->host;
558 struct fuse_conn *fc = get_fuse_conn(inode);
559 struct fuse_fill_data data;
560 int err;
562 err = -EIO;
563 if (is_bad_inode(inode))
564 goto out;
566 data.file = file;
567 data.inode = inode;
568 data.req = fuse_get_req(fc);
569 err = PTR_ERR(data.req);
570 if (IS_ERR(data.req))
571 goto out;
573 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
574 if (!err) {
575 if (data.req->num_pages)
576 fuse_send_readpages(data.req, file, inode);
577 else
578 fuse_put_request(fc, data.req);
580 out:
581 return err;
584 static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
585 unsigned long nr_segs, loff_t pos)
587 struct inode *inode = iocb->ki_filp->f_mapping->host;
589 if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) {
590 int err;
592 * If trying to read past EOF, make sure the i_size
593 * attribute is up-to-date.
595 err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL);
596 if (err)
597 return err;
600 return generic_file_aio_read(iocb, iov, nr_segs, pos);
603 static void fuse_write_fill(struct fuse_req *req, struct file *file,
604 struct fuse_file *ff, struct inode *inode,
605 loff_t pos, size_t count, int writepage)
607 struct fuse_conn *fc = get_fuse_conn(inode);
608 struct fuse_write_in *inarg = &req->misc.write.in;
609 struct fuse_write_out *outarg = &req->misc.write.out;
611 memset(inarg, 0, sizeof(struct fuse_write_in));
612 inarg->fh = ff->fh;
613 inarg->offset = pos;
614 inarg->size = count;
615 inarg->write_flags = writepage ? FUSE_WRITE_CACHE : 0;
616 inarg->flags = file ? file->f_flags : 0;
617 req->in.h.opcode = FUSE_WRITE;
618 req->in.h.nodeid = get_node_id(inode);
619 req->in.argpages = 1;
620 req->in.numargs = 2;
621 if (fc->minor < 9)
622 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
623 else
624 req->in.args[0].size = sizeof(struct fuse_write_in);
625 req->in.args[0].value = inarg;
626 req->in.args[1].size = count;
627 req->out.numargs = 1;
628 req->out.args[0].size = sizeof(struct fuse_write_out);
629 req->out.args[0].value = outarg;
632 static size_t fuse_send_write(struct fuse_req *req, struct file *file,
633 struct inode *inode, loff_t pos, size_t count,
634 fl_owner_t owner)
636 struct fuse_conn *fc = get_fuse_conn(inode);
637 fuse_write_fill(req, file, file->private_data, inode, pos, count, 0);
638 if (owner != NULL) {
639 struct fuse_write_in *inarg = &req->misc.write.in;
640 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
641 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
643 request_send(fc, req);
644 return req->misc.write.out.size;
647 static int fuse_write_begin(struct file *file, struct address_space *mapping,
648 loff_t pos, unsigned len, unsigned flags,
649 struct page **pagep, void **fsdata)
651 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
653 *pagep = __grab_cache_page(mapping, index);
654 if (!*pagep)
655 return -ENOMEM;
656 return 0;
659 static void fuse_write_update_size(struct inode *inode, loff_t pos)
661 struct fuse_conn *fc = get_fuse_conn(inode);
662 struct fuse_inode *fi = get_fuse_inode(inode);
664 spin_lock(&fc->lock);
665 fi->attr_version = ++fc->attr_version;
666 if (pos > inode->i_size)
667 i_size_write(inode, pos);
668 spin_unlock(&fc->lock);
671 static int fuse_buffered_write(struct file *file, struct inode *inode,
672 loff_t pos, unsigned count, struct page *page)
674 int err;
675 size_t nres;
676 struct fuse_conn *fc = get_fuse_conn(inode);
677 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
678 struct fuse_req *req;
680 if (is_bad_inode(inode))
681 return -EIO;
683 FUSE_MIGHT_FREEZE(inode->i_sb, "fuse_commit_write");
686 * Make sure writepages on the same page are not mixed up with
687 * plain writes.
689 fuse_wait_on_page_writeback(inode, page->index);
691 req = fuse_get_req(fc);
692 if (IS_ERR(req))
693 return PTR_ERR(req);
695 req->num_pages = 1;
696 req->pages[0] = page;
697 req->page_offset = offset;
698 nres = fuse_send_write(req, file, inode, pos, count, NULL);
699 err = req->out.h.error;
700 fuse_put_request(fc, req);
701 if (!err && !nres)
702 err = -EIO;
703 if (!err) {
704 pos += nres;
705 fuse_write_update_size(inode, pos);
706 if (count == PAGE_CACHE_SIZE)
707 SetPageUptodate(page);
709 fuse_invalidate_attr(inode);
710 return err ? err : nres;
713 static int fuse_write_end(struct file *file, struct address_space *mapping,
714 loff_t pos, unsigned len, unsigned copied,
715 struct page *page, void *fsdata)
717 struct inode *inode = mapping->host;
718 int res = 0;
720 if (copied)
721 res = fuse_buffered_write(file, inode, pos, copied, page);
723 unlock_page(page);
724 page_cache_release(page);
725 return res;
728 static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
729 struct inode *inode, loff_t pos,
730 size_t count)
732 size_t res;
733 unsigned offset;
734 unsigned i;
736 for (i = 0; i < req->num_pages; i++)
737 fuse_wait_on_page_writeback(inode, req->pages[i]->index);
739 res = fuse_send_write(req, file, inode, pos, count, NULL);
741 offset = req->page_offset;
742 count = res;
743 for (i = 0; i < req->num_pages; i++) {
744 struct page *page = req->pages[i];
746 if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE)
747 SetPageUptodate(page);
749 if (count > PAGE_CACHE_SIZE - offset)
750 count -= PAGE_CACHE_SIZE - offset;
751 else
752 count = 0;
753 offset = 0;
755 unlock_page(page);
756 page_cache_release(page);
759 return res;
762 static ssize_t fuse_fill_write_pages(struct fuse_req *req,
763 struct address_space *mapping,
764 struct iov_iter *ii, loff_t pos)
766 struct fuse_conn *fc = get_fuse_conn(mapping->host);
767 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
768 size_t count = 0;
769 int err;
771 req->page_offset = offset;
773 do {
774 size_t tmp;
775 struct page *page;
776 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
777 size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset,
778 iov_iter_count(ii));
780 bytes = min_t(size_t, bytes, fc->max_write - count);
782 again:
783 err = -EFAULT;
784 if (iov_iter_fault_in_readable(ii, bytes))
785 break;
787 err = -ENOMEM;
788 page = __grab_cache_page(mapping, index);
789 if (!page)
790 break;
792 pagefault_disable();
793 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
794 pagefault_enable();
795 flush_dcache_page(page);
797 if (!tmp) {
798 unlock_page(page);
799 page_cache_release(page);
800 bytes = min(bytes, iov_iter_single_seg_count(ii));
801 goto again;
804 err = 0;
805 req->pages[req->num_pages] = page;
806 req->num_pages++;
808 iov_iter_advance(ii, tmp);
809 count += tmp;
810 pos += tmp;
811 offset += tmp;
812 if (offset == PAGE_CACHE_SIZE)
813 offset = 0;
815 if (!fc->big_writes)
816 break;
817 } while (iov_iter_count(ii) && count < fc->max_write &&
818 req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0);
820 return count > 0 ? count : err;
823 static ssize_t fuse_perform_write(struct file *file,
824 struct address_space *mapping,
825 struct iov_iter *ii, loff_t pos)
827 struct inode *inode = mapping->host;
828 struct fuse_conn *fc = get_fuse_conn(inode);
829 int err = 0;
830 ssize_t res = 0;
832 if (is_bad_inode(inode))
833 return -EIO;
835 do {
836 struct fuse_req *req;
837 ssize_t count;
839 req = fuse_get_req(fc);
840 if (IS_ERR(req)) {
841 err = PTR_ERR(req);
842 break;
845 count = fuse_fill_write_pages(req, mapping, ii, pos);
846 if (count <= 0) {
847 err = count;
848 } else {
849 size_t num_written;
851 num_written = fuse_send_write_pages(req, file, inode,
852 pos, count);
853 err = req->out.h.error;
854 if (!err) {
855 res += num_written;
856 pos += num_written;
858 /* break out of the loop on short write */
859 if (num_written != count)
860 err = -EIO;
863 fuse_put_request(fc, req);
864 } while (!err && iov_iter_count(ii));
866 if (res > 0)
867 fuse_write_update_size(inode, pos);
869 fuse_invalidate_attr(inode);
871 return res > 0 ? res : err;
874 static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
875 unsigned long nr_segs, loff_t pos)
877 struct file *file = iocb->ki_filp;
878 struct address_space *mapping = file->f_mapping;
879 size_t count = 0;
880 ssize_t written = 0;
881 struct inode *inode = mapping->host;
882 ssize_t err;
883 struct iov_iter i;
885 WARN_ON(iocb->ki_pos != pos);
887 err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
888 if (err)
889 return err;
891 mutex_lock(&inode->i_mutex);
892 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
894 /* We can write back this queue in page reclaim */
895 current->backing_dev_info = mapping->backing_dev_info;
897 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
898 if (err)
899 goto out;
901 if (count == 0)
902 goto out;
904 err = file_remove_suid(file);
905 if (err)
906 goto out;
908 file_update_time(file);
910 iov_iter_init(&i, iov, nr_segs, count, 0);
911 written = fuse_perform_write(file, mapping, &i, pos);
912 if (written >= 0)
913 iocb->ki_pos = pos + written;
915 out:
916 current->backing_dev_info = NULL;
917 mutex_unlock(&inode->i_mutex);
919 return written ? written : err;
922 static void fuse_release_user_pages(struct fuse_req *req, int write)
924 unsigned i;
926 for (i = 0; i < req->num_pages; i++) {
927 struct page *page = req->pages[i];
928 if (write)
929 set_page_dirty_lock(page);
930 put_page(page);
934 static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
935 unsigned nbytes, int write)
937 unsigned long user_addr = (unsigned long) buf;
938 unsigned offset = user_addr & ~PAGE_MASK;
939 int npages;
941 /* This doesn't work with nfsd */
942 if (!current->mm)
943 return -EPERM;
945 nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
946 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
947 npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ);
948 down_read(&current->mm->mmap_sem);
949 npages = get_user_pages(current, current->mm, user_addr, npages, write,
950 0, req->pages, NULL);
951 up_read(&current->mm->mmap_sem);
952 if (npages < 0)
953 return npages;
955 req->num_pages = npages;
956 req->page_offset = offset;
957 return 0;
960 static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
961 size_t count, loff_t *ppos, int write)
963 struct inode *inode = file->f_path.dentry->d_inode;
964 struct fuse_conn *fc = get_fuse_conn(inode);
965 size_t nmax = write ? fc->max_write : fc->max_read;
966 loff_t pos = *ppos;
967 ssize_t res = 0;
968 struct fuse_req *req;
970 if (is_bad_inode(inode))
971 return -EIO;
973 FUSE_MIGHT_FREEZE(file->f_mapping->host->i_sb, "fuse_direct_io");
975 req = fuse_get_req(fc);
976 if (IS_ERR(req))
977 return PTR_ERR(req);
979 while (count) {
980 size_t nres;
981 size_t nbytes_limit = min(count, nmax);
982 size_t nbytes;
983 int err = fuse_get_user_pages(req, buf, nbytes_limit, !write);
984 if (err) {
985 res = err;
986 break;
988 nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset;
989 nbytes = min(nbytes_limit, nbytes);
990 if (write)
991 nres = fuse_send_write(req, file, inode, pos, nbytes,
992 current->files);
993 else
994 nres = fuse_send_read(req, file, inode, pos, nbytes,
995 current->files);
996 fuse_release_user_pages(req, !write);
997 if (req->out.h.error) {
998 if (!res)
999 res = req->out.h.error;
1000 break;
1001 } else if (nres > nbytes) {
1002 res = -EIO;
1003 break;
1005 count -= nres;
1006 res += nres;
1007 pos += nres;
1008 buf += nres;
1009 if (nres != nbytes)
1010 break;
1011 if (count) {
1012 fuse_put_request(fc, req);
1013 req = fuse_get_req(fc);
1014 if (IS_ERR(req))
1015 break;
1018 fuse_put_request(fc, req);
1019 if (res > 0) {
1020 if (write)
1021 fuse_write_update_size(inode, pos);
1022 *ppos = pos;
1024 fuse_invalidate_attr(inode);
1026 return res;
1029 static ssize_t fuse_direct_read(struct file *file, char __user *buf,
1030 size_t count, loff_t *ppos)
1032 return fuse_direct_io(file, buf, count, ppos, 0);
1035 static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
1036 size_t count, loff_t *ppos)
1038 struct inode *inode = file->f_path.dentry->d_inode;
1039 ssize_t res;
1040 /* Don't allow parallel writes to the same file */
1041 mutex_lock(&inode->i_mutex);
1042 res = generic_write_checks(file, ppos, &count, 0);
1043 if (!res)
1044 res = fuse_direct_io(file, buf, count, ppos, 1);
1045 mutex_unlock(&inode->i_mutex);
1046 return res;
1049 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
1051 __free_page(req->pages[0]);
1052 fuse_file_put(req->ff);
1053 fuse_put_request(fc, req);
1056 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
1058 struct inode *inode = req->inode;
1059 struct fuse_inode *fi = get_fuse_inode(inode);
1060 struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
1062 list_del(&req->writepages_entry);
1063 dec_bdi_stat(bdi, BDI_WRITEBACK);
1064 dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP);
1065 bdi_writeout_inc(bdi);
1066 wake_up(&fi->page_waitq);
1069 /* Called under fc->lock, may release and reacquire it */
1070 static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
1072 struct fuse_inode *fi = get_fuse_inode(req->inode);
1073 loff_t size = i_size_read(req->inode);
1074 struct fuse_write_in *inarg = &req->misc.write.in;
1076 if (!fc->connected)
1077 goto out_free;
1079 if (inarg->offset + PAGE_CACHE_SIZE <= size) {
1080 inarg->size = PAGE_CACHE_SIZE;
1081 } else if (inarg->offset < size) {
1082 inarg->size = size & (PAGE_CACHE_SIZE - 1);
1083 } else {
1084 /* Got truncated off completely */
1085 goto out_free;
1088 req->in.args[1].size = inarg->size;
1089 fi->writectr++;
1090 request_send_background_locked(fc, req);
1091 return;
1093 out_free:
1094 fuse_writepage_finish(fc, req);
1095 spin_unlock(&fc->lock);
1096 fuse_writepage_free(fc, req);
1097 spin_lock(&fc->lock);
1101 * If fi->writectr is positive (no truncate or fsync going on) send
1102 * all queued writepage requests.
1104 * Called with fc->lock
1106 void fuse_flush_writepages(struct inode *inode)
1108 struct fuse_conn *fc = get_fuse_conn(inode);
1109 struct fuse_inode *fi = get_fuse_inode(inode);
1110 struct fuse_req *req;
1112 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
1113 req = list_entry(fi->queued_writes.next, struct fuse_req, list);
1114 list_del_init(&req->list);
1115 fuse_send_writepage(fc, req);
1119 static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
1121 struct inode *inode = req->inode;
1122 struct fuse_inode *fi = get_fuse_inode(inode);
1124 mapping_set_error(inode->i_mapping, req->out.h.error);
1125 spin_lock(&fc->lock);
1126 fi->writectr--;
1127 fuse_writepage_finish(fc, req);
1128 spin_unlock(&fc->lock);
1129 fuse_writepage_free(fc, req);
1132 static int fuse_writepage_locked(struct page *page)
1134 struct address_space *mapping = page->mapping;
1135 struct inode *inode = mapping->host;
1136 struct fuse_conn *fc = get_fuse_conn(inode);
1137 struct fuse_inode *fi = get_fuse_inode(inode);
1138 struct fuse_req *req;
1139 struct fuse_file *ff;
1140 struct page *tmp_page;
1142 set_page_writeback(page);
1144 req = fuse_request_alloc_nofs();
1145 if (!req)
1146 goto err;
1148 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1149 if (!tmp_page)
1150 goto err_free;
1152 spin_lock(&fc->lock);
1153 BUG_ON(list_empty(&fi->write_files));
1154 ff = list_entry(fi->write_files.next, struct fuse_file, write_entry);
1155 req->ff = fuse_file_get(ff);
1156 spin_unlock(&fc->lock);
1158 fuse_write_fill(req, NULL, ff, inode, page_offset(page), 0, 1);
1160 copy_highpage(tmp_page, page);
1161 req->num_pages = 1;
1162 req->pages[0] = tmp_page;
1163 req->page_offset = 0;
1164 req->end = fuse_writepage_end;
1165 req->inode = inode;
1167 inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
1168 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
1169 end_page_writeback(page);
1171 spin_lock(&fc->lock);
1172 list_add(&req->writepages_entry, &fi->writepages);
1173 list_add_tail(&req->list, &fi->queued_writes);
1174 fuse_flush_writepages(inode);
1175 spin_unlock(&fc->lock);
1177 return 0;
1179 err_free:
1180 fuse_request_free(req);
1181 err:
1182 end_page_writeback(page);
1183 return -ENOMEM;
1186 static int fuse_writepage(struct page *page, struct writeback_control *wbc)
1188 int err;
1190 err = fuse_writepage_locked(page);
1191 unlock_page(page);
1193 return err;
1196 static int fuse_launder_page(struct page *page)
1198 int err = 0;
1199 if (clear_page_dirty_for_io(page)) {
1200 struct inode *inode = page->mapping->host;
1201 err = fuse_writepage_locked(page);
1202 if (!err)
1203 fuse_wait_on_page_writeback(inode, page->index);
1205 return err;
1209 * Write back dirty pages now, because there may not be any suitable
1210 * open files later
1212 static void fuse_vma_close(struct vm_area_struct *vma)
1214 filemap_write_and_wait(vma->vm_file->f_mapping);
1218 * Wait for writeback against this page to complete before allowing it
1219 * to be marked dirty again, and hence written back again, possibly
1220 * before the previous writepage completed.
1222 * Block here, instead of in ->writepage(), so that the userspace fs
1223 * can only block processes actually operating on the filesystem.
1225 * Otherwise unprivileged userspace fs would be able to block
1226 * unrelated:
1228 * - page migration
1229 * - sync(2)
1230 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
1232 static int fuse_page_mkwrite(struct vm_area_struct *vma, struct page *page)
1235 * Don't use page->mapping as it may become NULL from a
1236 * concurrent truncate.
1238 struct inode *inode = vma->vm_file->f_mapping->host;
1240 fuse_wait_on_page_writeback(inode, page->index);
1241 return 0;
1244 static struct vm_operations_struct fuse_file_vm_ops = {
1245 .close = fuse_vma_close,
1246 .fault = filemap_fault,
1247 .page_mkwrite = fuse_page_mkwrite,
1250 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
1252 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
1253 struct inode *inode = file->f_dentry->d_inode;
1254 struct fuse_conn *fc = get_fuse_conn(inode);
1255 struct fuse_inode *fi = get_fuse_inode(inode);
1256 struct fuse_file *ff = file->private_data;
1258 * file may be written through mmap, so chain it onto the
1259 * inodes's write_file list
1261 spin_lock(&fc->lock);
1262 if (list_empty(&ff->write_entry))
1263 list_add(&ff->write_entry, &fi->write_files);
1264 spin_unlock(&fc->lock);
1266 file_accessed(file);
1267 vma->vm_ops = &fuse_file_vm_ops;
1268 return 0;
1271 static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
1272 struct file_lock *fl)
1274 switch (ffl->type) {
1275 case F_UNLCK:
1276 break;
1278 case F_RDLCK:
1279 case F_WRLCK:
1280 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
1281 ffl->end < ffl->start)
1282 return -EIO;
1284 fl->fl_start = ffl->start;
1285 fl->fl_end = ffl->end;
1286 fl->fl_pid = ffl->pid;
1287 break;
1289 default:
1290 return -EIO;
1292 fl->fl_type = ffl->type;
1293 return 0;
1296 static void fuse_lk_fill(struct fuse_req *req, struct file *file,
1297 const struct file_lock *fl, int opcode, pid_t pid,
1298 int flock)
1300 struct inode *inode = file->f_path.dentry->d_inode;
1301 struct fuse_conn *fc = get_fuse_conn(inode);
1302 struct fuse_file *ff = file->private_data;
1303 struct fuse_lk_in *arg = &req->misc.lk_in;
1305 arg->fh = ff->fh;
1306 arg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
1307 arg->lk.start = fl->fl_start;
1308 arg->lk.end = fl->fl_end;
1309 arg->lk.type = fl->fl_type;
1310 arg->lk.pid = pid;
1311 if (flock)
1312 arg->lk_flags |= FUSE_LK_FLOCK;
1313 req->in.h.opcode = opcode;
1314 req->in.h.nodeid = get_node_id(inode);
1315 req->in.numargs = 1;
1316 req->in.args[0].size = sizeof(*arg);
1317 req->in.args[0].value = arg;
1320 static int fuse_getlk(struct file *file, struct file_lock *fl)
1322 struct inode *inode = file->f_path.dentry->d_inode;
1323 struct fuse_conn *fc = get_fuse_conn(inode);
1324 struct fuse_req *req;
1325 struct fuse_lk_out outarg;
1326 int err;
1328 FUSE_MIGHT_FREEZE(file->f_mapping->host->i_sb, "fuse_getlk");
1330 req = fuse_get_req(fc);
1331 if (IS_ERR(req))
1332 return PTR_ERR(req);
1334 fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0);
1335 req->out.numargs = 1;
1336 req->out.args[0].size = sizeof(outarg);
1337 req->out.args[0].value = &outarg;
1338 request_send(fc, req);
1339 err = req->out.h.error;
1340 fuse_put_request(fc, req);
1341 if (!err)
1342 err = convert_fuse_file_lock(&outarg.lk, fl);
1344 return err;
1347 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
1349 struct inode *inode = file->f_path.dentry->d_inode;
1350 struct fuse_conn *fc = get_fuse_conn(inode);
1351 struct fuse_req *req;
1352 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
1353 pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
1354 int err;
1356 if (fl->fl_lmops && fl->fl_lmops->fl_grant) {
1357 /* NLM needs asynchronous locks, which we don't support yet */
1358 return -ENOLCK;
1361 /* Unlock on close is handled by the flush method */
1362 if (fl->fl_flags & FL_CLOSE)
1363 return 0;
1365 FUSE_MIGHT_FREEZE(file->f_mapping->host->i_sb, "fuse_setlk");
1367 req = fuse_get_req(fc);
1368 if (IS_ERR(req))
1369 return PTR_ERR(req);
1371 fuse_lk_fill(req, file, fl, opcode, pid, flock);
1372 request_send(fc, req);
1373 err = req->out.h.error;
1374 /* locking is restartable */
1375 if (err == -EINTR)
1376 err = -ERESTARTSYS;
1377 fuse_put_request(fc, req);
1378 return err;
1381 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
1383 struct inode *inode = file->f_path.dentry->d_inode;
1384 struct fuse_conn *fc = get_fuse_conn(inode);
1385 int err;
1387 if (cmd == F_CANCELLK) {
1388 err = 0;
1389 } else if (cmd == F_GETLK) {
1390 if (fc->no_lock) {
1391 posix_test_lock(file, fl);
1392 err = 0;
1393 } else
1394 err = fuse_getlk(file, fl);
1395 } else {
1396 if (fc->no_lock)
1397 err = posix_lock_file(file, fl, NULL);
1398 else
1399 err = fuse_setlk(file, fl, 0);
1401 return err;
1404 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
1406 struct inode *inode = file->f_path.dentry->d_inode;
1407 struct fuse_conn *fc = get_fuse_conn(inode);
1408 int err;
1410 if (fc->no_lock) {
1411 err = flock_lock_file_wait(file, fl);
1412 } else {
1413 /* emulate flock with POSIX locks */
1414 fl->fl_owner = (fl_owner_t) file;
1415 err = fuse_setlk(file, fl, 1);
1418 return err;
1421 static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
1423 struct inode *inode = mapping->host;
1424 struct fuse_conn *fc = get_fuse_conn(inode);
1425 struct fuse_req *req;
1426 struct fuse_bmap_in inarg;
1427 struct fuse_bmap_out outarg;
1428 int err;
1430 if (!inode->i_sb->s_bdev || fc->no_bmap)
1431 return 0;
1433 FUSE_MIGHT_FREEZE(inode->i_sb, "fuse_bmap");
1435 req = fuse_get_req(fc);
1436 if (IS_ERR(req))
1437 return 0;
1439 memset(&inarg, 0, sizeof(inarg));
1440 inarg.block = block;
1441 inarg.blocksize = inode->i_sb->s_blocksize;
1442 req->in.h.opcode = FUSE_BMAP;
1443 req->in.h.nodeid = get_node_id(inode);
1444 req->in.numargs = 1;
1445 req->in.args[0].size = sizeof(inarg);
1446 req->in.args[0].value = &inarg;
1447 req->out.numargs = 1;
1448 req->out.args[0].size = sizeof(outarg);
1449 req->out.args[0].value = &outarg;
1450 request_send(fc, req);
1451 err = req->out.h.error;
1452 fuse_put_request(fc, req);
1453 if (err == -ENOSYS)
1454 fc->no_bmap = 1;
1456 return err ? 0 : outarg.block;
1459 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
1461 loff_t retval;
1462 struct inode *inode = file->f_path.dentry->d_inode;
1464 mutex_lock(&inode->i_mutex);
1465 switch (origin) {
1466 case SEEK_END:
1467 retval = fuse_update_attributes(inode, NULL, file, NULL);
1468 if (retval)
1469 return retval;
1470 offset += i_size_read(inode);
1471 break;
1472 case SEEK_CUR:
1473 offset += file->f_pos;
1475 retval = -EINVAL;
1476 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
1477 if (offset != file->f_pos) {
1478 file->f_pos = offset;
1479 file->f_version = 0;
1481 retval = offset;
1483 mutex_unlock(&inode->i_mutex);
1484 return retval;
1487 static const struct file_operations fuse_file_operations = {
1488 .llseek = fuse_file_llseek,
1489 .read = do_sync_read,
1490 .aio_read = fuse_file_aio_read,
1491 .write = do_sync_write,
1492 .aio_write = fuse_file_aio_write,
1493 .mmap = fuse_file_mmap,
1494 .open = fuse_open,
1495 .flush = fuse_flush,
1496 .release = fuse_release,
1497 .fsync = fuse_fsync,
1498 .lock = fuse_file_lock,
1499 .flock = fuse_file_flock,
1500 .splice_read = generic_file_splice_read,
1503 static const struct file_operations fuse_direct_io_file_operations = {
1504 .llseek = fuse_file_llseek,
1505 .read = fuse_direct_read,
1506 .write = fuse_direct_write,
1507 .open = fuse_open,
1508 .flush = fuse_flush,
1509 .release = fuse_release,
1510 .fsync = fuse_fsync,
1511 .lock = fuse_file_lock,
1512 .flock = fuse_file_flock,
1513 /* no mmap and splice_read */
1516 static const struct address_space_operations fuse_file_aops = {
1517 .readpage = fuse_readpage,
1518 .writepage = fuse_writepage,
1519 .launder_page = fuse_launder_page,
1520 .write_begin = fuse_write_begin,
1521 .write_end = fuse_write_end,
1522 .readpages = fuse_readpages,
1523 .set_page_dirty = __set_page_dirty_nobuffers,
1524 .bmap = fuse_bmap,
1527 void fuse_init_file_inode(struct inode *inode)
1529 inode->i_fop = &fuse_file_operations;
1530 inode->i_data.a_ops = &fuse_file_aops;