2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
19 #include <linux/writeback.h>
20 #include <linux/swap.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/backing-dev.h>
23 #include <linux/uio.h>
24 #include <trace/events/writeback.h>
41 static void gfs2_page_add_databufs(struct gfs2_inode
*ip
, struct page
*page
,
42 unsigned int from
, unsigned int len
)
44 struct buffer_head
*head
= page_buffers(page
);
45 unsigned int bsize
= head
->b_size
;
46 struct buffer_head
*bh
;
47 unsigned int to
= from
+ len
;
48 unsigned int start
, end
;
50 for (bh
= head
, start
= 0; bh
!= head
|| !start
;
51 bh
= bh
->b_this_page
, start
= end
) {
57 set_buffer_uptodate(bh
);
58 gfs2_trans_add_data(ip
->i_gl
, bh
);
63 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
65 * @lblock: The block number to look up
66 * @bh_result: The buffer head to return the result in
67 * @create: Non-zero if we may add block to the file
72 static int gfs2_get_block_noalloc(struct inode
*inode
, sector_t lblock
,
73 struct buffer_head
*bh_result
, int create
)
77 error
= gfs2_block_map(inode
, lblock
, bh_result
, 0);
80 if (!buffer_mapped(bh_result
))
85 static int gfs2_get_block_direct(struct inode
*inode
, sector_t lblock
,
86 struct buffer_head
*bh_result
, int create
)
88 return gfs2_block_map(inode
, lblock
, bh_result
, 0);
92 * gfs2_writepage_common - Common bits of writepage
93 * @page: The page to be written
94 * @wbc: The writeback control
96 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
99 static int gfs2_writepage_common(struct page
*page
,
100 struct writeback_control
*wbc
)
102 struct inode
*inode
= page
->mapping
->host
;
103 struct gfs2_inode
*ip
= GFS2_I(inode
);
104 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
105 loff_t i_size
= i_size_read(inode
);
106 pgoff_t end_index
= i_size
>> PAGE_SHIFT
;
109 if (gfs2_assert_withdraw(sdp
, gfs2_glock_is_held_excl(ip
->i_gl
)))
111 if (current
->journal_info
)
113 /* Is the page fully outside i_size? (truncate in progress) */
114 offset
= i_size
& (PAGE_SIZE
-1);
115 if (page
->index
> end_index
|| (page
->index
== end_index
&& !offset
)) {
116 page
->mapping
->a_ops
->invalidatepage(page
, 0, PAGE_SIZE
);
121 redirty_page_for_writepage(wbc
, page
);
128 * gfs2_writepage - Write page for writeback mappings
130 * @wbc: The writeback control
134 static int gfs2_writepage(struct page
*page
, struct writeback_control
*wbc
)
138 ret
= gfs2_writepage_common(page
, wbc
);
142 return nobh_writepage(page
, gfs2_get_block_noalloc
, wbc
);
145 /* This is the same as calling block_write_full_page, but it also
146 * writes pages outside of i_size
148 static int gfs2_write_full_page(struct page
*page
, get_block_t
*get_block
,
149 struct writeback_control
*wbc
)
151 struct inode
* const inode
= page
->mapping
->host
;
152 loff_t i_size
= i_size_read(inode
);
153 const pgoff_t end_index
= i_size
>> PAGE_SHIFT
;
157 * The page straddles i_size. It must be zeroed out on each and every
158 * writepage invocation because it may be mmapped. "A file is mapped
159 * in multiples of the page size. For a file that is not a multiple of
160 * the page size, the remaining memory is zeroed when mapped, and
161 * writes to that region are not written out to the file."
163 offset
= i_size
& (PAGE_SIZE
-1);
164 if (page
->index
== end_index
&& offset
)
165 zero_user_segment(page
, offset
, PAGE_SIZE
);
167 return __block_write_full_page(inode
, page
, get_block
, wbc
,
168 end_buffer_async_write
);
172 * __gfs2_jdata_writepage - The core of jdata writepage
173 * @page: The page to write
174 * @wbc: The writeback control
176 * This is shared between writepage and writepages and implements the
177 * core of the writepage operation. If a transaction is required then
178 * PageChecked will have been set and the transaction will have
179 * already been started before this is called.
182 static int __gfs2_jdata_writepage(struct page
*page
, struct writeback_control
*wbc
)
184 struct inode
*inode
= page
->mapping
->host
;
185 struct gfs2_inode
*ip
= GFS2_I(inode
);
186 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
188 if (PageChecked(page
)) {
189 ClearPageChecked(page
);
190 if (!page_has_buffers(page
)) {
191 create_empty_buffers(page
, inode
->i_sb
->s_blocksize
,
192 BIT(BH_Dirty
)|BIT(BH_Uptodate
));
194 gfs2_page_add_databufs(ip
, page
, 0, sdp
->sd_vfs
->s_blocksize
);
196 return gfs2_write_full_page(page
, gfs2_get_block_noalloc
, wbc
);
200 * gfs2_jdata_writepage - Write complete page
201 * @page: Page to write
202 * @wbc: The writeback control
208 static int gfs2_jdata_writepage(struct page
*page
, struct writeback_control
*wbc
)
210 struct inode
*inode
= page
->mapping
->host
;
211 struct gfs2_inode
*ip
= GFS2_I(inode
);
212 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
215 if (gfs2_assert_withdraw(sdp
, gfs2_glock_is_held_excl(ip
->i_gl
)))
217 if (PageChecked(page
) || current
->journal_info
)
219 ret
= __gfs2_jdata_writepage(page
, wbc
);
223 redirty_page_for_writepage(wbc
, page
);
230 * gfs2_writepages - Write a bunch of dirty pages back to disk
231 * @mapping: The mapping to write
232 * @wbc: Write-back control
234 * Used for both ordered and writeback modes.
236 static int gfs2_writepages(struct address_space
*mapping
,
237 struct writeback_control
*wbc
)
239 struct gfs2_sbd
*sdp
= gfs2_mapping2sbd(mapping
);
240 int ret
= mpage_writepages(mapping
, wbc
, gfs2_get_block_noalloc
);
243 * Even if we didn't write any pages here, we might still be holding
244 * dirty pages in the ail. We forcibly flush the ail because we don't
245 * want balance_dirty_pages() to loop indefinitely trying to write out
246 * pages held in the ail that it can't find.
249 set_bit(SDF_FORCE_AIL_FLUSH
, &sdp
->sd_flags
);
255 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
256 * @mapping: The mapping
257 * @wbc: The writeback control
258 * @pvec: The vector of pages
259 * @nr_pages: The number of pages to write
260 * @done_index: Page index
262 * Returns: non-zero if loop should terminate, zero otherwise
265 static int gfs2_write_jdata_pagevec(struct address_space
*mapping
,
266 struct writeback_control
*wbc
,
267 struct pagevec
*pvec
,
271 struct inode
*inode
= mapping
->host
;
272 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
273 unsigned nrblocks
= nr_pages
* (PAGE_SIZE
/inode
->i_sb
->s_blocksize
);
277 ret
= gfs2_trans_begin(sdp
, nrblocks
, nrblocks
);
281 for(i
= 0; i
< nr_pages
; i
++) {
282 struct page
*page
= pvec
->pages
[i
];
284 *done_index
= page
->index
;
288 if (unlikely(page
->mapping
!= mapping
)) {
294 if (!PageDirty(page
)) {
295 /* someone wrote it for us */
296 goto continue_unlock
;
299 if (PageWriteback(page
)) {
300 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
301 wait_on_page_writeback(page
);
303 goto continue_unlock
;
306 BUG_ON(PageWriteback(page
));
307 if (!clear_page_dirty_for_io(page
))
308 goto continue_unlock
;
310 trace_wbc_writepage(wbc
, inode_to_bdi(inode
));
312 ret
= __gfs2_jdata_writepage(page
, wbc
);
314 if (ret
== AOP_WRITEPAGE_ACTIVATE
) {
320 * done_index is set past this page,
321 * so media errors will not choke
322 * background writeout for the entire
323 * file. This has consequences for
324 * range_cyclic semantics (ie. it may
325 * not be suitable for data integrity
328 *done_index
= page
->index
+ 1;
335 * We stop writing back only if we are not doing
336 * integrity sync. In case of integrity sync we have to
337 * keep going until we have written all the pages
338 * we tagged for writeback prior to entering this loop.
340 if (--wbc
->nr_to_write
<= 0 && wbc
->sync_mode
== WB_SYNC_NONE
) {
351 * gfs2_write_cache_jdata - Like write_cache_pages but different
352 * @mapping: The mapping to write
353 * @wbc: The writeback control
355 * The reason that we use our own function here is that we need to
356 * start transactions before we grab page locks. This allows us
357 * to get the ordering right.
360 static int gfs2_write_cache_jdata(struct address_space
*mapping
,
361 struct writeback_control
*wbc
)
367 pgoff_t
uninitialized_var(writeback_index
);
376 if (wbc
->range_cyclic
) {
377 writeback_index
= mapping
->writeback_index
; /* prev offset */
378 index
= writeback_index
;
385 index
= wbc
->range_start
>> PAGE_SHIFT
;
386 end
= wbc
->range_end
>> PAGE_SHIFT
;
387 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
389 cycled
= 1; /* ignore range_cyclic tests */
391 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
392 tag
= PAGECACHE_TAG_TOWRITE
;
394 tag
= PAGECACHE_TAG_DIRTY
;
397 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
398 tag_pages_for_writeback(mapping
, index
, end
);
400 while (!done
&& (index
<= end
)) {
401 nr_pages
= pagevec_lookup_range_tag(&pvec
, mapping
, &index
, end
,
406 ret
= gfs2_write_jdata_pagevec(mapping
, wbc
, &pvec
, nr_pages
, &done_index
);
411 pagevec_release(&pvec
);
415 if (!cycled
&& !done
) {
418 * We hit the last page and there is more work to be done: wrap
419 * back to the start of the file
423 end
= writeback_index
- 1;
427 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
428 mapping
->writeback_index
= done_index
;
435 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
436 * @mapping: The mapping to write
437 * @wbc: The writeback control
441 static int gfs2_jdata_writepages(struct address_space
*mapping
,
442 struct writeback_control
*wbc
)
444 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
445 struct gfs2_sbd
*sdp
= GFS2_SB(mapping
->host
);
448 ret
= gfs2_write_cache_jdata(mapping
, wbc
);
449 if (ret
== 0 && wbc
->sync_mode
== WB_SYNC_ALL
) {
450 gfs2_log_flush(sdp
, ip
->i_gl
, GFS2_LOG_HEAD_FLUSH_NORMAL
|
451 GFS2_LFC_JDATA_WPAGES
);
452 ret
= gfs2_write_cache_jdata(mapping
, wbc
);
458 * stuffed_readpage - Fill in a Linux page with stuffed file data
465 static int stuffed_readpage(struct gfs2_inode
*ip
, struct page
*page
)
467 struct buffer_head
*dibh
;
468 u64 dsize
= i_size_read(&ip
->i_inode
);
473 * Due to the order of unstuffing files and ->fault(), we can be
474 * asked for a zero page in the case of a stuffed file being extended,
475 * so we need to supply one here. It doesn't happen often.
477 if (unlikely(page
->index
)) {
478 zero_user(page
, 0, PAGE_SIZE
);
479 SetPageUptodate(page
);
483 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
487 kaddr
= kmap_atomic(page
);
488 if (dsize
> gfs2_max_stuffed_size(ip
))
489 dsize
= gfs2_max_stuffed_size(ip
);
490 memcpy(kaddr
, dibh
->b_data
+ sizeof(struct gfs2_dinode
), dsize
);
491 memset(kaddr
+ dsize
, 0, PAGE_SIZE
- dsize
);
492 kunmap_atomic(kaddr
);
493 flush_dcache_page(page
);
495 SetPageUptodate(page
);
502 * __gfs2_readpage - readpage
503 * @file: The file to read a page for
504 * @page: The page to read
506 * This is the core of gfs2's readpage. It's used by the internal file
507 * reading code as in that case we already hold the glock. Also it's
508 * called by gfs2_readpage() once the required lock has been granted.
511 static int __gfs2_readpage(void *file
, struct page
*page
)
513 struct gfs2_inode
*ip
= GFS2_I(page
->mapping
->host
);
514 struct gfs2_sbd
*sdp
= GFS2_SB(page
->mapping
->host
);
517 if (gfs2_is_stuffed(ip
)) {
518 error
= stuffed_readpage(ip
, page
);
521 error
= mpage_readpage(page
, gfs2_block_map
);
524 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
531 * gfs2_readpage - read a page of a file
532 * @file: The file to read
533 * @page: The page of the file
535 * This deals with the locking required. We have to unlock and
536 * relock the page in order to get the locking in the right
540 static int gfs2_readpage(struct file
*file
, struct page
*page
)
542 struct address_space
*mapping
= page
->mapping
;
543 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
544 struct gfs2_holder gh
;
548 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, 0, &gh
);
549 error
= gfs2_glock_nq(&gh
);
552 error
= AOP_TRUNCATED_PAGE
;
554 if (page
->mapping
== mapping
&& !PageUptodate(page
))
555 error
= __gfs2_readpage(file
, page
);
560 gfs2_holder_uninit(&gh
);
561 if (error
&& error
!= AOP_TRUNCATED_PAGE
)
567 * gfs2_internal_read - read an internal file
568 * @ip: The gfs2 inode
569 * @buf: The buffer to fill
570 * @pos: The file position
571 * @size: The amount to read
575 int gfs2_internal_read(struct gfs2_inode
*ip
, char *buf
, loff_t
*pos
,
578 struct address_space
*mapping
= ip
->i_inode
.i_mapping
;
579 unsigned long index
= *pos
/ PAGE_SIZE
;
580 unsigned offset
= *pos
& (PAGE_SIZE
- 1);
588 if (offset
+ size
> PAGE_SIZE
)
589 amt
= PAGE_SIZE
- offset
;
590 page
= read_cache_page(mapping
, index
, __gfs2_readpage
, NULL
);
592 return PTR_ERR(page
);
593 p
= kmap_atomic(page
);
594 memcpy(buf
+ copied
, p
+ offset
, amt
);
600 } while(copied
< size
);
606 * gfs2_readpages - Read a bunch of pages at once
607 * @file: The file to read from
608 * @mapping: Address space info
609 * @pages: List of pages to read
610 * @nr_pages: Number of pages to read
613 * 1. This is only for readahead, so we can simply ignore any things
614 * which are slightly inconvenient (such as locking conflicts between
615 * the page lock and the glock) and return having done no I/O. Its
616 * obviously not something we'd want to do on too regular a basis.
617 * Any I/O we ignore at this time will be done via readpage later.
618 * 2. We don't handle stuffed files here we let readpage do the honours.
619 * 3. mpage_readpages() does most of the heavy lifting in the common case.
620 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
623 static int gfs2_readpages(struct file
*file
, struct address_space
*mapping
,
624 struct list_head
*pages
, unsigned nr_pages
)
626 struct inode
*inode
= mapping
->host
;
627 struct gfs2_inode
*ip
= GFS2_I(inode
);
628 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
629 struct gfs2_holder gh
;
632 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, 0, &gh
);
633 ret
= gfs2_glock_nq(&gh
);
636 if (!gfs2_is_stuffed(ip
))
637 ret
= mpage_readpages(mapping
, pages
, nr_pages
, gfs2_block_map
);
640 gfs2_holder_uninit(&gh
);
641 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
647 * gfs2_write_begin - Begin to write to a file
648 * @file: The file to write to
649 * @mapping: The mapping in which to write
650 * @pos: The file offset at which to start writing
651 * @len: Length of the write
652 * @flags: Various flags
653 * @pagep: Pointer to return the page
654 * @fsdata: Pointer to return fs data (unused by GFS2)
659 static int gfs2_write_begin(struct file
*file
, struct address_space
*mapping
,
660 loff_t pos
, unsigned len
, unsigned flags
,
661 struct page
**pagep
, void **fsdata
)
663 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
664 struct gfs2_sbd
*sdp
= GFS2_SB(mapping
->host
);
665 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
666 unsigned int data_blocks
= 0, ind_blocks
= 0, rblocks
;
667 unsigned requested
= 0;
670 pgoff_t index
= pos
>> PAGE_SHIFT
;
671 unsigned from
= pos
& (PAGE_SIZE
- 1);
674 gfs2_holder_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &ip
->i_gh
);
675 error
= gfs2_glock_nq(&ip
->i_gh
);
678 if (&ip
->i_inode
== sdp
->sd_rindex
) {
679 error
= gfs2_glock_nq_init(m_ip
->i_gl
, LM_ST_EXCLUSIVE
,
680 GL_NOCACHE
, &m_ip
->i_gh
);
681 if (unlikely(error
)) {
682 gfs2_glock_dq(&ip
->i_gh
);
687 alloc_required
= gfs2_write_alloc_required(ip
, pos
, len
);
689 if (alloc_required
|| gfs2_is_jdata(ip
))
690 gfs2_write_calc_reserv(ip
, len
, &data_blocks
, &ind_blocks
);
692 if (alloc_required
) {
693 struct gfs2_alloc_parms ap
= { .aflags
= 0, };
694 requested
= data_blocks
+ ind_blocks
;
695 ap
.target
= requested
;
696 error
= gfs2_quota_lock_check(ip
, &ap
);
700 error
= gfs2_inplace_reserve(ip
, &ap
);
705 rblocks
= RES_DINODE
+ ind_blocks
;
706 if (gfs2_is_jdata(ip
))
707 rblocks
+= data_blocks
? data_blocks
: 1;
708 if (ind_blocks
|| data_blocks
)
709 rblocks
+= RES_STATFS
+ RES_QUOTA
;
710 if (&ip
->i_inode
== sdp
->sd_rindex
)
711 rblocks
+= 2 * RES_STATFS
;
713 rblocks
+= gfs2_rg_blocks(ip
, requested
);
715 error
= gfs2_trans_begin(sdp
, rblocks
,
716 PAGE_SIZE
/sdp
->sd_sb
.sb_bsize
);
721 flags
|= AOP_FLAG_NOFS
;
722 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
727 if (gfs2_is_stuffed(ip
)) {
729 if (pos
+ len
> gfs2_max_stuffed_size(ip
)) {
730 error
= gfs2_unstuff_dinode(ip
, page
);
733 } else if (!PageUptodate(page
)) {
734 error
= stuffed_readpage(ip
, page
);
740 error
= __block_write_begin(page
, from
, len
, gfs2_block_map
);
749 if (alloc_required
) {
750 gfs2_inplace_release(ip
);
751 if (pos
+ len
> ip
->i_inode
.i_size
)
752 gfs2_trim_blocks(&ip
->i_inode
);
760 gfs2_inplace_release(ip
);
763 gfs2_quota_unlock(ip
);
765 if (&ip
->i_inode
== sdp
->sd_rindex
) {
766 gfs2_glock_dq(&m_ip
->i_gh
);
767 gfs2_holder_uninit(&m_ip
->i_gh
);
769 gfs2_glock_dq(&ip
->i_gh
);
771 gfs2_holder_uninit(&ip
->i_gh
);
776 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
777 * @inode: the rindex inode
779 static void adjust_fs_space(struct inode
*inode
)
781 struct gfs2_sbd
*sdp
= inode
->i_sb
->s_fs_info
;
782 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
783 struct gfs2_inode
*l_ip
= GFS2_I(sdp
->sd_sc_inode
);
784 struct gfs2_statfs_change_host
*m_sc
= &sdp
->sd_statfs_master
;
785 struct gfs2_statfs_change_host
*l_sc
= &sdp
->sd_statfs_local
;
786 struct buffer_head
*m_bh
, *l_bh
;
787 u64 fs_total
, new_free
;
789 /* Total up the file system space, according to the latest rindex. */
790 fs_total
= gfs2_ri_total(sdp
);
791 if (gfs2_meta_inode_buffer(m_ip
, &m_bh
) != 0)
794 spin_lock(&sdp
->sd_statfs_spin
);
795 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
796 sizeof(struct gfs2_dinode
));
797 if (fs_total
> (m_sc
->sc_total
+ l_sc
->sc_total
))
798 new_free
= fs_total
- (m_sc
->sc_total
+ l_sc
->sc_total
);
801 spin_unlock(&sdp
->sd_statfs_spin
);
802 fs_warn(sdp
, "File system extended by %llu blocks.\n",
803 (unsigned long long)new_free
);
804 gfs2_statfs_change(sdp
, new_free
, new_free
, 0);
806 if (gfs2_meta_inode_buffer(l_ip
, &l_bh
) != 0)
808 update_statfs(sdp
, m_bh
, l_bh
);
815 * gfs2_stuffed_write_end - Write end for stuffed files
817 * @dibh: The buffer_head containing the on-disk inode
818 * @pos: The file position
819 * @copied: How much was actually copied by the VFS
822 * This copies the data from the page into the inode block after
823 * the inode data structure itself.
827 static int gfs2_stuffed_write_end(struct inode
*inode
, struct buffer_head
*dibh
,
828 loff_t pos
, unsigned copied
,
831 struct gfs2_inode
*ip
= GFS2_I(inode
);
832 u64 to
= pos
+ copied
;
834 unsigned char *buf
= dibh
->b_data
+ sizeof(struct gfs2_dinode
);
836 BUG_ON(pos
+ copied
> gfs2_max_stuffed_size(ip
));
838 kaddr
= kmap_atomic(page
);
839 memcpy(buf
+ pos
, kaddr
+ pos
, copied
);
840 flush_dcache_page(page
);
841 kunmap_atomic(kaddr
);
843 WARN_ON(!PageUptodate(page
));
848 if (inode
->i_size
< to
)
849 i_size_write(inode
, to
);
850 mark_inode_dirty(inode
);
857 * @file: The file to write to
858 * @mapping: The address space to write to
859 * @pos: The file position
860 * @len: The length of the data
861 * @copied: How much was actually copied by the VFS
862 * @page: The page that has been written
863 * @fsdata: The fsdata (unused in GFS2)
865 * The main write_end function for GFS2. We just put our locking around the VFS
866 * provided functions.
871 static int gfs2_write_end(struct file
*file
, struct address_space
*mapping
,
872 loff_t pos
, unsigned len
, unsigned copied
,
873 struct page
*page
, void *fsdata
)
875 struct inode
*inode
= page
->mapping
->host
;
876 struct gfs2_inode
*ip
= GFS2_I(inode
);
877 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
878 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
879 struct buffer_head
*dibh
;
881 struct gfs2_trans
*tr
= current
->journal_info
;
884 BUG_ON(gfs2_glock_is_locked_by_me(ip
->i_gl
) == NULL
);
886 ret
= gfs2_meta_inode_buffer(ip
, &dibh
);
890 if (gfs2_is_stuffed(ip
)) {
891 ret
= gfs2_stuffed_write_end(inode
, dibh
, pos
, copied
, page
);
896 if (gfs2_is_jdata(ip
))
897 gfs2_page_add_databufs(ip
, page
, pos
& ~PAGE_MASK
, len
);
899 gfs2_ordered_add_inode(ip
);
901 ret
= generic_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
903 if (tr
->tr_num_buf_new
)
904 __mark_inode_dirty(inode
, I_DIRTY_DATASYNC
);
906 gfs2_trans_add_meta(ip
->i_gl
, dibh
);
909 if (inode
== sdp
->sd_rindex
) {
910 adjust_fs_space(inode
);
911 sdp
->sd_rindex_uptodate
= 0;
921 gfs2_inplace_release(ip
);
922 if (ip
->i_qadata
&& ip
->i_qadata
->qa_qd_num
)
923 gfs2_quota_unlock(ip
);
924 if (inode
== sdp
->sd_rindex
) {
925 gfs2_glock_dq(&m_ip
->i_gh
);
926 gfs2_holder_uninit(&m_ip
->i_gh
);
928 gfs2_glock_dq(&ip
->i_gh
);
929 gfs2_holder_uninit(&ip
->i_gh
);
934 * jdata_set_page_dirty - Page dirtying function
935 * @page: The page to dirty
937 * Returns: 1 if it dirtyed the page, or 0 otherwise
940 static int jdata_set_page_dirty(struct page
*page
)
942 SetPageChecked(page
);
943 return __set_page_dirty_buffers(page
);
947 * gfs2_bmap - Block map function
948 * @mapping: Address space info
949 * @lblock: The block to map
951 * Returns: The disk address for the block or 0 on hole or error
954 static sector_t
gfs2_bmap(struct address_space
*mapping
, sector_t lblock
)
956 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
957 struct gfs2_holder i_gh
;
961 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
, &i_gh
);
965 if (!gfs2_is_stuffed(ip
))
966 dblock
= generic_block_bmap(mapping
, lblock
, gfs2_block_map
);
968 gfs2_glock_dq_uninit(&i_gh
);
973 static void gfs2_discard(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
)
975 struct gfs2_bufdata
*bd
;
979 clear_buffer_dirty(bh
);
982 if (!list_empty(&bd
->bd_list
) && !buffer_pinned(bh
))
983 list_del_init(&bd
->bd_list
);
985 gfs2_remove_from_journal(bh
, REMOVE_JDATA
);
988 clear_buffer_mapped(bh
);
989 clear_buffer_req(bh
);
990 clear_buffer_new(bh
);
991 gfs2_log_unlock(sdp
);
995 static void gfs2_invalidatepage(struct page
*page
, unsigned int offset
,
998 struct gfs2_sbd
*sdp
= GFS2_SB(page
->mapping
->host
);
999 unsigned int stop
= offset
+ length
;
1000 int partial_page
= (offset
|| length
< PAGE_SIZE
);
1001 struct buffer_head
*bh
, *head
;
1002 unsigned long pos
= 0;
1004 BUG_ON(!PageLocked(page
));
1006 ClearPageChecked(page
);
1007 if (!page_has_buffers(page
))
1010 bh
= head
= page_buffers(page
);
1012 if (pos
+ bh
->b_size
> stop
)
1016 gfs2_discard(sdp
, bh
);
1018 bh
= bh
->b_this_page
;
1019 } while (bh
!= head
);
1022 try_to_release_page(page
, 0);
1026 * gfs2_ok_for_dio - check that dio is valid on this file
1028 * @offset: The offset at which we are reading or writing
1030 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
1031 * 1 (to accept the i/o request)
1033 static int gfs2_ok_for_dio(struct gfs2_inode
*ip
, loff_t offset
)
1036 * Should we return an error here? I can't see that O_DIRECT for
1037 * a stuffed file makes any sense. For now we'll silently fall
1038 * back to buffered I/O
1040 if (gfs2_is_stuffed(ip
))
1043 if (offset
>= i_size_read(&ip
->i_inode
))
1050 static ssize_t
gfs2_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
1052 struct file
*file
= iocb
->ki_filp
;
1053 struct inode
*inode
= file
->f_mapping
->host
;
1054 struct address_space
*mapping
= inode
->i_mapping
;
1055 struct gfs2_inode
*ip
= GFS2_I(inode
);
1056 loff_t offset
= iocb
->ki_pos
;
1057 struct gfs2_holder gh
;
1061 * Deferred lock, even if its a write, since we do no allocation
1062 * on this path. All we need change is atime, and this lock mode
1063 * ensures that other nodes have flushed their buffered read caches
1064 * (i.e. their page cache entries for this inode). We do not,
1065 * unfortunately have the option of only flushing a range like
1068 gfs2_holder_init(ip
->i_gl
, LM_ST_DEFERRED
, 0, &gh
);
1069 rv
= gfs2_glock_nq(&gh
);
1072 rv
= gfs2_ok_for_dio(ip
, offset
);
1074 goto out
; /* dio not valid, fall back to buffered i/o */
1077 * Now since we are holding a deferred (CW) lock at this point, you
1078 * might be wondering why this is ever needed. There is a case however
1079 * where we've granted a deferred local lock against a cached exclusive
1080 * glock. That is ok provided all granted local locks are deferred, but
1081 * it also means that it is possible to encounter pages which are
1082 * cached and possibly also mapped. So here we check for that and sort
1083 * them out ahead of the dio. The glock state machine will take care of
1086 * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
1087 * the first place, mapping->nr_pages will always be zero.
1089 if (mapping
->nrpages
) {
1090 loff_t lstart
= offset
& ~(PAGE_SIZE
- 1);
1091 loff_t len
= iov_iter_count(iter
);
1092 loff_t end
= PAGE_ALIGN(offset
+ len
) - 1;
1097 if (test_and_clear_bit(GIF_SW_PAGED
, &ip
->i_flags
))
1098 unmap_shared_mapping_range(ip
->i_inode
.i_mapping
, offset
, len
);
1099 rv
= filemap_write_and_wait_range(mapping
, lstart
, end
);
1102 if (iov_iter_rw(iter
) == WRITE
)
1103 truncate_inode_pages_range(mapping
, lstart
, end
);
1106 rv
= __blockdev_direct_IO(iocb
, inode
, inode
->i_sb
->s_bdev
, iter
,
1107 gfs2_get_block_direct
, NULL
, NULL
, 0);
1111 gfs2_holder_uninit(&gh
);
1116 * gfs2_releasepage - free the metadata associated with a page
1117 * @page: the page that's being released
1118 * @gfp_mask: passed from Linux VFS, ignored by us
1120 * Call try_to_free_buffers() if the buffers in this page can be
1126 int gfs2_releasepage(struct page
*page
, gfp_t gfp_mask
)
1128 struct address_space
*mapping
= page
->mapping
;
1129 struct gfs2_sbd
*sdp
= gfs2_mapping2sbd(mapping
);
1130 struct buffer_head
*bh
, *head
;
1131 struct gfs2_bufdata
*bd
;
1133 if (!page_has_buffers(page
))
1137 * From xfs_vm_releasepage: mm accommodates an old ext3 case where
1138 * clean pages might not have had the dirty bit cleared. Thus, it can
1139 * send actual dirty pages to ->releasepage() via shrink_active_list().
1141 * As a workaround, we skip pages that contain dirty buffers below.
1142 * Once ->releasepage isn't called on dirty pages anymore, we can warn
1143 * on dirty buffers like we used to here again.
1147 spin_lock(&sdp
->sd_ail_lock
);
1148 head
= bh
= page_buffers(page
);
1150 if (atomic_read(&bh
->b_count
))
1151 goto cannot_release
;
1153 if (bd
&& bd
->bd_tr
)
1154 goto cannot_release
;
1155 if (buffer_dirty(bh
) || WARN_ON(buffer_pinned(bh
)))
1156 goto cannot_release
;
1157 bh
= bh
->b_this_page
;
1158 } while(bh
!= head
);
1159 spin_unlock(&sdp
->sd_ail_lock
);
1161 head
= bh
= page_buffers(page
);
1165 gfs2_assert_warn(sdp
, bd
->bd_bh
== bh
);
1166 if (!list_empty(&bd
->bd_list
))
1167 list_del_init(&bd
->bd_list
);
1169 bh
->b_private
= NULL
;
1170 kmem_cache_free(gfs2_bufdata_cachep
, bd
);
1173 bh
= bh
->b_this_page
;
1174 } while (bh
!= head
);
1175 gfs2_log_unlock(sdp
);
1177 return try_to_free_buffers(page
);
1180 spin_unlock(&sdp
->sd_ail_lock
);
1181 gfs2_log_unlock(sdp
);
1185 static const struct address_space_operations gfs2_writeback_aops
= {
1186 .writepage
= gfs2_writepage
,
1187 .writepages
= gfs2_writepages
,
1188 .readpage
= gfs2_readpage
,
1189 .readpages
= gfs2_readpages
,
1190 .write_begin
= gfs2_write_begin
,
1191 .write_end
= gfs2_write_end
,
1193 .invalidatepage
= gfs2_invalidatepage
,
1194 .releasepage
= gfs2_releasepage
,
1195 .direct_IO
= gfs2_direct_IO
,
1196 .migratepage
= buffer_migrate_page
,
1197 .is_partially_uptodate
= block_is_partially_uptodate
,
1198 .error_remove_page
= generic_error_remove_page
,
1201 static const struct address_space_operations gfs2_ordered_aops
= {
1202 .writepage
= gfs2_writepage
,
1203 .writepages
= gfs2_writepages
,
1204 .readpage
= gfs2_readpage
,
1205 .readpages
= gfs2_readpages
,
1206 .write_begin
= gfs2_write_begin
,
1207 .write_end
= gfs2_write_end
,
1208 .set_page_dirty
= __set_page_dirty_buffers
,
1210 .invalidatepage
= gfs2_invalidatepage
,
1211 .releasepage
= gfs2_releasepage
,
1212 .direct_IO
= gfs2_direct_IO
,
1213 .migratepage
= buffer_migrate_page
,
1214 .is_partially_uptodate
= block_is_partially_uptodate
,
1215 .error_remove_page
= generic_error_remove_page
,
1218 static const struct address_space_operations gfs2_jdata_aops
= {
1219 .writepage
= gfs2_jdata_writepage
,
1220 .writepages
= gfs2_jdata_writepages
,
1221 .readpage
= gfs2_readpage
,
1222 .readpages
= gfs2_readpages
,
1223 .write_begin
= gfs2_write_begin
,
1224 .write_end
= gfs2_write_end
,
1225 .set_page_dirty
= jdata_set_page_dirty
,
1227 .invalidatepage
= gfs2_invalidatepage
,
1228 .releasepage
= gfs2_releasepage
,
1229 .is_partially_uptodate
= block_is_partially_uptodate
,
1230 .error_remove_page
= generic_error_remove_page
,
1233 void gfs2_set_aops(struct inode
*inode
)
1235 struct gfs2_inode
*ip
= GFS2_I(inode
);
1237 if (gfs2_is_writeback(ip
))
1238 inode
->i_mapping
->a_ops
= &gfs2_writeback_aops
;
1239 else if (gfs2_is_ordered(ip
))
1240 inode
->i_mapping
->a_ops
= &gfs2_ordered_aops
;
1241 else if (gfs2_is_jdata(ip
))
1242 inode
->i_mapping
->a_ops
= &gfs2_jdata_aops
;