1 /* handling of writes to regular files and writing back to the server
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/slab.h>
14 #include <linux/pagemap.h>
15 #include <linux/writeback.h>
16 #include <linux/pagevec.h>
19 static int afs_write_back_from_locked_page(struct afs_writeback
*wb
,
23 * mark a page as having been made dirty and thus needing writeback
25 int afs_set_page_dirty(struct page
*page
)
28 return __set_page_dirty_nobuffers(page
);
32 * unlink a writeback record because its usage has reached zero
33 * - must be called with the wb->vnode->writeback_lock held
35 static void afs_unlink_writeback(struct afs_writeback
*wb
)
37 struct afs_writeback
*front
;
38 struct afs_vnode
*vnode
= wb
->vnode
;
40 list_del_init(&wb
->link
);
41 if (!list_empty(&vnode
->writebacks
)) {
42 /* if an fsync rises to the front of the queue then wake it
44 front
= list_entry(vnode
->writebacks
.next
,
45 struct afs_writeback
, link
);
46 if (front
->state
== AFS_WBACK_SYNCING
) {
47 _debug("wake up sync");
48 front
->state
= AFS_WBACK_COMPLETE
;
49 wake_up(&front
->waitq
);
55 * free a writeback record
57 static void afs_free_writeback(struct afs_writeback
*wb
)
65 * dispose of a reference to a writeback record
67 void afs_put_writeback(struct afs_writeback
*wb
)
69 struct afs_vnode
*vnode
= wb
->vnode
;
71 _enter("{%d}", wb
->usage
);
73 spin_lock(&vnode
->writeback_lock
);
75 afs_unlink_writeback(wb
);
78 spin_unlock(&vnode
->writeback_lock
);
80 afs_free_writeback(wb
);
84 * partly or wholly fill a page that's under preparation for writing
86 static int afs_fill_page(struct afs_vnode
*vnode
, struct key
*key
,
87 unsigned start
, unsigned len
, struct page
*page
)
91 _enter(",,%u,%u", start
, len
);
93 ASSERTCMP(start
+ len
, <=, PAGE_SIZE
);
95 ret
= afs_vnode_fetch_data(vnode
, key
, start
, len
, page
);
98 _debug("got NOENT from server"
99 " - marking file deleted and stale");
100 set_bit(AFS_VNODE_DELETED
, &vnode
->flags
);
105 _leave(" = %d", ret
);
110 * prepare a page for being written to
112 static int afs_prepare_page(struct afs_vnode
*vnode
, struct page
*page
,
113 struct key
*key
, unsigned offset
, unsigned to
)
115 unsigned eof
, tail
, start
, stop
, len
;
122 if (offset
== 0 && to
== PAGE_SIZE
)
127 i_size
= i_size_read(&vnode
->vfs_inode
);
128 pos
= (loff_t
) page
->index
<< PAGE_SHIFT
;
130 /* partial write, page beyond EOF */
133 memset(p
, 0, offset
);
135 memset(p
+ to
, 0, PAGE_SIZE
- to
);
140 if (i_size
- pos
>= PAGE_SIZE
) {
141 /* partial write, page entirely before EOF */
143 tail
= eof
= PAGE_SIZE
;
145 /* partial write, page overlaps EOF */
147 _debug("overlap %u", eof
);
149 if (tail
< PAGE_SIZE
)
150 memset(p
+ tail
, 0, PAGE_SIZE
- tail
);
152 memset(p
+ eof
, 0, PAGE_SIZE
- eof
);
158 if (offset
> 0 || eof
> to
) {
159 /* need to fill one or two bits that aren't going to be written
160 * (cover both fillers in one read if there are two) */
161 start
= (offset
> 0) ? 0 : to
;
162 stop
= (eof
> to
) ? eof
: offset
;
164 _debug("wr=%u-%u av=0-%u rd=%u@%u",
165 offset
, to
, eof
, start
, len
);
166 ret
= afs_fill_page(vnode
, key
, start
, len
, page
);
169 _leave(" = %d", ret
);
174 * prepare to perform part of a write to a page
175 * - the caller holds the page locked, preventing it from being written out or
176 * modified by anyone else
178 int afs_prepare_write(struct file
*file
, struct page
*page
,
179 unsigned offset
, unsigned to
)
181 struct afs_writeback
*candidate
, *wb
;
182 struct afs_vnode
*vnode
= AFS_FS_I(file
->f_dentry
->d_inode
);
183 struct key
*key
= file
->private_data
;
187 _enter("{%x:%u},{%lx},%u,%u",
188 vnode
->fid
.vid
, vnode
->fid
.vnode
, page
->index
, offset
, to
);
190 candidate
= kzalloc(sizeof(*candidate
), GFP_KERNEL
);
193 candidate
->vnode
= vnode
;
194 candidate
->first
= candidate
->last
= page
->index
;
195 candidate
->offset_first
= offset
;
196 candidate
->to_last
= to
;
197 candidate
->usage
= 1;
198 candidate
->state
= AFS_WBACK_PENDING
;
199 init_waitqueue_head(&candidate
->waitq
);
201 if (!PageUptodate(page
)) {
202 _debug("not up to date");
203 ret
= afs_prepare_page(vnode
, page
, key
, offset
, to
);
206 _leave(" = %d [prep]", ret
);
209 SetPageUptodate(page
);
214 spin_lock(&vnode
->writeback_lock
);
216 /* see if this page is already pending a writeback under a suitable key
217 * - if so we can just join onto that one */
218 wb
= (struct afs_writeback
*) page_private(page
);
220 if (wb
->key
== key
&& wb
->state
== AFS_WBACK_PENDING
)
221 goto subsume_in_current_wb
;
222 goto flush_conflicting_wb
;
226 /* see if we can find an already pending writeback that we can
227 * append this page to */
228 list_for_each_entry(wb
, &vnode
->writebacks
, link
) {
229 if (wb
->last
== index
- 1 && wb
->key
== key
&&
230 wb
->state
== AFS_WBACK_PENDING
)
231 goto append_to_previous_wb
;
235 list_add_tail(&candidate
->link
, &vnode
->writebacks
);
236 candidate
->key
= key_get(key
);
237 spin_unlock(&vnode
->writeback_lock
);
238 SetPagePrivate(page
);
239 set_page_private(page
, (unsigned long) candidate
);
240 _leave(" = 0 [new]");
243 subsume_in_current_wb
:
245 ASSERTRANGE(wb
->first
, <=, index
, <=, wb
->last
);
246 if (index
== wb
->first
&& offset
< wb
->offset_first
)
247 wb
->offset_first
= offset
;
248 if (index
== wb
->last
&& to
> wb
->to_last
)
250 spin_unlock(&vnode
->writeback_lock
);
252 _leave(" = 0 [sub]");
255 append_to_previous_wb
:
256 _debug("append into %lx-%lx", wb
->first
, wb
->last
);
260 spin_unlock(&vnode
->writeback_lock
);
261 SetPagePrivate(page
);
262 set_page_private(page
, (unsigned long) wb
);
264 _leave(" = 0 [app]");
267 /* the page is currently bound to another context, so if it's dirty we
268 * need to flush it before we can use the new context */
269 flush_conflicting_wb
:
270 _debug("flush conflict");
271 if (wb
->state
== AFS_WBACK_PENDING
)
272 wb
->state
= AFS_WBACK_CONFLICTING
;
273 spin_unlock(&vnode
->writeback_lock
);
274 if (PageDirty(page
)) {
275 ret
= afs_write_back_from_locked_page(wb
, page
);
277 afs_put_writeback(candidate
);
278 _leave(" = %d", ret
);
283 /* the page holds a ref on the writeback record */
284 afs_put_writeback(wb
);
285 set_page_private(page
, 0);
286 ClearPagePrivate(page
);
291 * finalise part of a write to a page
293 int afs_commit_write(struct file
*file
, struct page
*page
,
294 unsigned offset
, unsigned to
)
296 struct afs_vnode
*vnode
= AFS_FS_I(file
->f_dentry
->d_inode
);
297 loff_t i_size
, maybe_i_size
;
299 _enter("{%x:%u},{%lx},%u,%u",
300 vnode
->fid
.vid
, vnode
->fid
.vnode
, page
->index
, offset
, to
);
302 maybe_i_size
= (loff_t
) page
->index
<< PAGE_SHIFT
;
305 i_size
= i_size_read(&vnode
->vfs_inode
);
306 if (maybe_i_size
> i_size
) {
307 spin_lock(&vnode
->writeback_lock
);
308 i_size
= i_size_read(&vnode
->vfs_inode
);
309 if (maybe_i_size
> i_size
)
310 i_size_write(&vnode
->vfs_inode
, maybe_i_size
);
311 spin_unlock(&vnode
->writeback_lock
);
314 set_page_dirty(page
);
323 * kill all the pages in the given range
325 static void afs_kill_pages(struct afs_vnode
*vnode
, bool error
,
326 pgoff_t first
, pgoff_t last
)
329 unsigned count
, loop
;
331 _enter("{%x:%u},%lx-%lx",
332 vnode
->fid
.vid
, vnode
->fid
.vnode
, first
, last
);
334 pagevec_init(&pv
, 0);
337 _debug("kill %lx-%lx", first
, last
);
339 count
= last
- first
+ 1;
340 if (count
> PAGEVEC_SIZE
)
341 count
= PAGEVEC_SIZE
;
342 pv
.nr
= find_get_pages_contig(vnode
->vfs_inode
.i_mapping
,
343 first
, count
, pv
.pages
);
344 ASSERTCMP(pv
.nr
, ==, count
);
346 for (loop
= 0; loop
< count
; loop
++) {
347 ClearPageUptodate(pv
.pages
[loop
]);
349 SetPageError(pv
.pages
[loop
]);
350 end_page_writeback(pv
.pages
[loop
]);
353 __pagevec_release(&pv
);
354 } while (first
< last
);
360 * synchronously write back the locked page and any subsequent non-locked dirty
361 * pages also covered by the same writeback record
363 static int afs_write_back_from_locked_page(struct afs_writeback
*wb
,
364 struct page
*primary_page
)
366 struct page
*pages
[8], *page
;
368 unsigned n
, offset
, to
;
369 pgoff_t start
, first
, last
;
372 _enter(",%lx", primary_page
->index
);
375 if (!clear_page_dirty_for_io(primary_page
))
377 if (test_set_page_writeback(primary_page
))
380 /* find all consecutive lockable dirty pages, stopping when we find a
381 * page that is not immediately lockable, is not dirty or is missing,
382 * or we reach the end of the range */
383 start
= primary_page
->index
;
384 if (start
>= wb
->last
)
388 _debug("more %lx [%lx]", start
, count
);
389 n
= wb
->last
- start
+ 1;
390 if (n
> ARRAY_SIZE(pages
))
391 n
= ARRAY_SIZE(pages
);
392 n
= find_get_pages_contig(wb
->vnode
->vfs_inode
.i_mapping
,
394 _debug("fgpc %u", n
);
397 if (pages
[0]->index
!= start
) {
398 for (n
--; n
>= 0; n
--)
403 for (loop
= 0; loop
< n
; loop
++) {
405 if (page
->index
> wb
->last
)
407 if (TestSetPageLocked(page
))
409 if (!PageDirty(page
) ||
410 page_private(page
) != (unsigned long) wb
) {
414 if (!clear_page_dirty_for_io(page
))
416 if (test_set_page_writeback(page
))
423 for (; loop
< n
; loop
++)
424 put_page(pages
[loop
]);
429 } while (start
<= wb
->last
&& count
< 65536);
432 /* we now have a contiguous set of dirty pages, each with writeback set
433 * and the dirty mark cleared; the first page is locked and must remain
434 * so, all the rest are unlocked */
435 first
= primary_page
->index
;
436 last
= first
+ count
- 1;
438 offset
= (first
== wb
->first
) ? wb
->offset_first
: 0;
439 to
= (last
== wb
->last
) ? wb
->to_last
: PAGE_SIZE
;
441 _debug("write back %lx[%u..] to %lx[..%u]", first
, offset
, last
, to
);
443 ret
= afs_vnode_store_data(wb
, first
, last
, offset
, to
);
449 &wb
->vnode
->vfs_inode
.i_mapping
->flags
);
458 afs_kill_pages(wb
->vnode
, true, first
, last
);
459 set_bit(AS_EIO
, &wb
->vnode
->vfs_inode
.i_mapping
->flags
);
467 afs_kill_pages(wb
->vnode
, false, first
, last
);
476 _leave(" = %d", ret
);
481 * write a page back to the server
482 * - the caller locked the page for us
484 int afs_writepage(struct page
*page
, struct writeback_control
*wbc
)
486 struct backing_dev_info
*bdi
= page
->mapping
->backing_dev_info
;
487 struct afs_writeback
*wb
;
490 _enter("{%lx},", page
->index
);
492 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
493 wait_on_page_writeback(page
);
495 if (PageWriteback(page
) || !PageDirty(page
)) {
500 wb
= (struct afs_writeback
*) page_private(page
);
503 ret
= afs_write_back_from_locked_page(wb
, page
);
506 _leave(" = %d", ret
);
510 wbc
->nr_to_write
-= ret
;
511 if (wbc
->nonblocking
&& bdi_write_congested(bdi
))
512 wbc
->encountered_congestion
= 1;
519 * write a region of pages back to the server
521 int afs_writepages_region(struct address_space
*mapping
,
522 struct writeback_control
*wbc
,
523 pgoff_t index
, pgoff_t end
, pgoff_t
*_next
)
525 struct backing_dev_info
*bdi
= mapping
->backing_dev_info
;
526 struct afs_writeback
*wb
;
530 _enter(",,%lx,%lx,", index
, end
);
533 n
= find_get_pages_tag(mapping
, &index
, PAGECACHE_TAG_DIRTY
,
538 _debug("wback %lx", page
->index
);
540 if (page
->index
> end
) {
542 page_cache_release(page
);
543 _leave(" = 0 [%lx]", *_next
);
547 /* at this point we hold neither mapping->tree_lock nor lock on
548 * the page itself: the page may be truncated or invalidated
549 * (changing page->mapping to NULL), or even swizzled back from
550 * swapper_space to tmpfs file mapping
554 if (page
->mapping
!= mapping
) {
556 page_cache_release(page
);
560 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
561 wait_on_page_writeback(page
);
563 if (PageWriteback(page
) || !PageDirty(page
)) {
568 wb
= (struct afs_writeback
*) page_private(page
);
571 spin_lock(&wb
->vnode
->writeback_lock
);
572 wb
->state
= AFS_WBACK_WRITING
;
573 spin_unlock(&wb
->vnode
->writeback_lock
);
575 ret
= afs_write_back_from_locked_page(wb
, page
);
577 page_cache_release(page
);
579 _leave(" = %d", ret
);
583 wbc
->nr_to_write
-= ret
;
585 if (wbc
->nonblocking
&& bdi_write_congested(bdi
)) {
586 wbc
->encountered_congestion
= 1;
591 } while (index
< end
&& wbc
->nr_to_write
> 0);
594 _leave(" = 0 [%lx]", *_next
);
599 * write some of the pending data back to the server
601 int afs_writepages(struct address_space
*mapping
,
602 struct writeback_control
*wbc
)
604 struct backing_dev_info
*bdi
= mapping
->backing_dev_info
;
605 pgoff_t start
, end
, next
;
610 if (wbc
->nonblocking
&& bdi_write_congested(bdi
)) {
611 wbc
->encountered_congestion
= 1;
612 _leave(" = 0 [congest]");
616 if (wbc
->range_cyclic
) {
617 start
= mapping
->writeback_index
;
619 ret
= afs_writepages_region(mapping
, wbc
, start
, end
, &next
);
620 if (start
> 0 && wbc
->nr_to_write
> 0 && ret
== 0 &&
621 !(wbc
->nonblocking
&& wbc
->encountered_congestion
))
622 ret
= afs_writepages_region(mapping
, wbc
, 0, start
,
624 mapping
->writeback_index
= next
;
625 } else if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
) {
626 end
= (pgoff_t
)(LLONG_MAX
>> PAGE_CACHE_SHIFT
);
627 ret
= afs_writepages_region(mapping
, wbc
, 0, end
, &next
);
628 if (wbc
->nr_to_write
> 0)
629 mapping
->writeback_index
= next
;
631 start
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
632 end
= wbc
->range_end
>> PAGE_CACHE_SHIFT
;
633 ret
= afs_writepages_region(mapping
, wbc
, start
, end
, &next
);
636 _leave(" = %d", ret
);
641 * write an inode back
643 int afs_write_inode(struct inode
*inode
, int sync
)
645 struct afs_vnode
*vnode
= AFS_FS_I(inode
);
648 _enter("{%x:%u},", vnode
->fid
.vid
, vnode
->fid
.vnode
);
652 ret
= filemap_fdatawait(inode
->i_mapping
);
654 __mark_inode_dirty(inode
, I_DIRTY_DATASYNC
);
657 _leave(" = %d", ret
);
662 * completion of write to server
664 void afs_pages_written_back(struct afs_vnode
*vnode
, struct afs_call
*call
)
666 struct afs_writeback
*wb
= call
->wb
;
668 unsigned count
, loop
;
669 pgoff_t first
= call
->first
, last
= call
->last
;
672 _enter("{%x:%u},{%lx-%lx}",
673 vnode
->fid
.vid
, vnode
->fid
.vnode
, first
, last
);
677 pagevec_init(&pv
, 0);
680 _debug("attach %lx-%lx", first
, last
);
682 count
= last
- first
+ 1;
683 if (count
> PAGEVEC_SIZE
)
684 count
= PAGEVEC_SIZE
;
685 pv
.nr
= find_get_pages_contig(call
->mapping
, first
, count
,
687 ASSERTCMP(pv
.nr
, ==, count
);
689 spin_lock(&vnode
->writeback_lock
);
690 for (loop
= 0; loop
< count
; loop
++) {
691 struct page
*page
= pv
.pages
[loop
];
692 end_page_writeback(page
);
693 if (page_private(page
) == (unsigned long) wb
) {
694 set_page_private(page
, 0);
695 ClearPagePrivate(page
);
700 if (wb
->usage
== 0) {
701 afs_unlink_writeback(wb
);
704 spin_unlock(&vnode
->writeback_lock
);
707 afs_free_writeback(wb
);
711 __pagevec_release(&pv
);
712 } while (first
< last
);
718 * write to an AFS file
720 ssize_t
afs_file_write(struct kiocb
*iocb
, const struct iovec
*iov
,
721 unsigned long nr_segs
, loff_t pos
)
723 struct dentry
*dentry
= iocb
->ki_filp
->f_path
.dentry
;
724 struct afs_vnode
*vnode
= AFS_FS_I(dentry
->d_inode
);
726 size_t count
= iov_length(iov
, nr_segs
);
729 _enter("{%x.%u},{%zu},%lu,",
730 vnode
->fid
.vid
, vnode
->fid
.vnode
, count
, nr_segs
);
732 if (IS_SWAPFILE(&vnode
->vfs_inode
)) {
734 "AFS: Attempt to write to active swap file!\n");
741 result
= generic_file_aio_write(iocb
, iov
, nr_segs
, pos
);
742 if (IS_ERR_VALUE(result
)) {
743 _leave(" = %zd", result
);
747 /* return error values for O_SYNC and IS_SYNC() */
748 if (IS_SYNC(&vnode
->vfs_inode
) || iocb
->ki_filp
->f_flags
& O_SYNC
) {
749 ret
= afs_fsync(iocb
->ki_filp
, dentry
, 1);
754 _leave(" = %zd", result
);
759 * flush the vnode to the fileserver
761 int afs_writeback_all(struct afs_vnode
*vnode
)
763 struct address_space
*mapping
= vnode
->vfs_inode
.i_mapping
;
764 struct writeback_control wbc
= {
765 .bdi
= mapping
->backing_dev_info
,
766 .sync_mode
= WB_SYNC_ALL
,
767 .nr_to_write
= LONG_MAX
,
775 ret
= mapping
->a_ops
->writepages(mapping
, &wbc
);
776 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
778 _leave(" = %d", ret
);
783 * flush any dirty pages for this process, and check for write errors.
784 * - the return status from this call provides a reliable indication of
785 * whether any write errors occurred for this process.
787 int afs_fsync(struct file
*file
, struct dentry
*dentry
, int datasync
)
789 struct afs_writeback
*wb
, *xwb
;
790 struct afs_vnode
*vnode
= AFS_FS_I(dentry
->d_inode
);
793 _enter("{%x:%u},{n=%s},%d",
794 vnode
->fid
.vid
, vnode
->fid
.vnode
, dentry
->d_name
.name
,
797 /* use a writeback record as a marker in the queue - when this reaches
798 * the front of the queue, all the outstanding writes are either
799 * completed or rejected */
800 wb
= kzalloc(sizeof(*wb
), GFP_KERNEL
);
806 wb
->offset_first
= 0;
807 wb
->to_last
= PAGE_SIZE
;
809 wb
->state
= AFS_WBACK_SYNCING
;
810 init_waitqueue_head(&wb
->waitq
);
812 spin_lock(&vnode
->writeback_lock
);
813 list_for_each_entry(xwb
, &vnode
->writebacks
, link
) {
814 if (xwb
->state
== AFS_WBACK_PENDING
)
815 xwb
->state
= AFS_WBACK_CONFLICTING
;
817 list_add_tail(&wb
->link
, &vnode
->writebacks
);
818 spin_unlock(&vnode
->writeback_lock
);
820 /* push all the outstanding writebacks to the server */
821 ret
= afs_writeback_all(vnode
);
823 afs_put_writeback(wb
);
824 _leave(" = %d [wb]", ret
);
828 /* wait for the preceding writes to actually complete */
829 ret
= wait_event_interruptible(wb
->waitq
,
830 wb
->state
== AFS_WBACK_COMPLETE
||
831 vnode
->writebacks
.next
== &wb
->link
);
832 afs_put_writeback(wb
);
833 _leave(" = %d", ret
);