2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
19 #include <linux/writeback.h>
20 #include <linux/swap.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/lm_interface.h>
23 #include <linux/backing-dev.h>
32 #include "ops_address.h"
41 static void gfs2_page_add_databufs(struct gfs2_inode
*ip
, struct page
*page
,
42 unsigned int from
, unsigned int to
)
44 struct buffer_head
*head
= page_buffers(page
);
45 unsigned int bsize
= head
->b_size
;
46 struct buffer_head
*bh
;
47 unsigned int start
, end
;
49 for (bh
= head
, start
= 0; bh
!= head
|| !start
;
50 bh
= bh
->b_this_page
, start
= end
) {
52 if (end
<= from
|| start
>= to
)
54 if (gfs2_is_jdata(ip
))
55 set_buffer_uptodate(bh
);
56 gfs2_trans_add_bh(ip
->i_gl
, bh
, 0);
61 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63 * @lblock: The block number to look up
64 * @bh_result: The buffer head to return the result in
65 * @create: Non-zero if we may add block to the file
70 static int gfs2_get_block_noalloc(struct inode
*inode
, sector_t lblock
,
71 struct buffer_head
*bh_result
, int create
)
75 error
= gfs2_block_map(inode
, lblock
, bh_result
, 0);
78 if (!buffer_mapped(bh_result
))
83 static int gfs2_get_block_direct(struct inode
*inode
, sector_t lblock
,
84 struct buffer_head
*bh_result
, int create
)
86 return gfs2_block_map(inode
, lblock
, bh_result
, 0);
90 * gfs2_writepage_common - Common bits of writepage
91 * @page: The page to be written
92 * @wbc: The writeback control
94 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
97 static int gfs2_writepage_common(struct page
*page
,
98 struct writeback_control
*wbc
)
100 struct inode
*inode
= page
->mapping
->host
;
101 struct gfs2_inode
*ip
= GFS2_I(inode
);
102 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
103 loff_t i_size
= i_size_read(inode
);
104 pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
107 if (gfs2_assert_withdraw(sdp
, gfs2_glock_is_held_excl(ip
->i_gl
)))
109 if (current
->journal_info
)
111 /* Is the page fully outside i_size? (truncate in progress) */
112 offset
= i_size
& (PAGE_CACHE_SIZE
-1);
113 if (page
->index
> end_index
|| (page
->index
== end_index
&& !offset
)) {
114 page
->mapping
->a_ops
->invalidatepage(page
, 0);
119 redirty_page_for_writepage(wbc
, page
);
126 * gfs2_writeback_writepage - Write page for writeback mappings
128 * @wbc: The writeback control
132 static int gfs2_writeback_writepage(struct page
*page
,
133 struct writeback_control
*wbc
)
137 ret
= gfs2_writepage_common(page
, wbc
);
141 ret
= mpage_writepage(page
, gfs2_get_block_noalloc
, wbc
);
143 ret
= block_write_full_page(page
, gfs2_get_block_noalloc
, wbc
);
148 * gfs2_ordered_writepage - Write page for ordered data files
149 * @page: The page to write
150 * @wbc: The writeback control
154 static int gfs2_ordered_writepage(struct page
*page
,
155 struct writeback_control
*wbc
)
157 struct inode
*inode
= page
->mapping
->host
;
158 struct gfs2_inode
*ip
= GFS2_I(inode
);
161 ret
= gfs2_writepage_common(page
, wbc
);
165 if (!page_has_buffers(page
)) {
166 create_empty_buffers(page
, inode
->i_sb
->s_blocksize
,
167 (1 << BH_Dirty
)|(1 << BH_Uptodate
));
169 gfs2_page_add_databufs(ip
, page
, 0, inode
->i_sb
->s_blocksize
-1);
170 return block_write_full_page(page
, gfs2_get_block_noalloc
, wbc
);
174 * __gfs2_jdata_writepage - The core of jdata writepage
175 * @page: The page to write
176 * @wbc: The writeback control
178 * This is shared between writepage and writepages and implements the
179 * core of the writepage operation. If a transaction is required then
180 * PageChecked will have been set and the transaction will have
181 * already been started before this is called.
184 static int __gfs2_jdata_writepage(struct page
*page
, struct writeback_control
*wbc
)
186 struct inode
*inode
= page
->mapping
->host
;
187 struct gfs2_inode
*ip
= GFS2_I(inode
);
188 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
190 if (PageChecked(page
)) {
191 ClearPageChecked(page
);
192 if (!page_has_buffers(page
)) {
193 create_empty_buffers(page
, inode
->i_sb
->s_blocksize
,
194 (1 << BH_Dirty
)|(1 << BH_Uptodate
));
196 gfs2_page_add_databufs(ip
, page
, 0, sdp
->sd_vfs
->s_blocksize
-1);
198 return block_write_full_page(page
, gfs2_get_block_noalloc
, wbc
);
202 * gfs2_jdata_writepage - Write complete page
203 * @page: Page to write
209 static int gfs2_jdata_writepage(struct page
*page
, struct writeback_control
*wbc
)
211 struct inode
*inode
= page
->mapping
->host
;
212 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
216 if (PageChecked(page
)) {
217 if (wbc
->sync_mode
!= WB_SYNC_ALL
)
219 ret
= gfs2_trans_begin(sdp
, RES_DINODE
+ 1, 0);
224 ret
= gfs2_writepage_common(page
, wbc
);
226 ret
= __gfs2_jdata_writepage(page
, wbc
);
232 redirty_page_for_writepage(wbc
, page
);
238 * gfs2_writeback_writepages - Write a bunch of dirty pages back to disk
239 * @mapping: The mapping to write
240 * @wbc: Write-back control
242 * For the data=writeback case we can already ignore buffer heads
243 * and write whole extents at once. This is a big reduction in the
244 * number of I/O requests we send and the bmap calls we make in this case.
246 static int gfs2_writeback_writepages(struct address_space
*mapping
,
247 struct writeback_control
*wbc
)
249 return mpage_writepages(mapping
, wbc
, gfs2_get_block_noalloc
);
253 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
254 * @mapping: The mapping
255 * @wbc: The writeback control
256 * @writepage: The writepage function to call for each page
257 * @pvec: The vector of pages
258 * @nr_pages: The number of pages to write
260 * Returns: non-zero if loop should terminate, zero otherwise
263 static int gfs2_write_jdata_pagevec(struct address_space
*mapping
,
264 struct writeback_control
*wbc
,
265 struct pagevec
*pvec
,
266 int nr_pages
, pgoff_t end
)
268 struct inode
*inode
= mapping
->host
;
269 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
270 loff_t i_size
= i_size_read(inode
);
271 pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
272 unsigned offset
= i_size
& (PAGE_CACHE_SIZE
-1);
273 unsigned nrblocks
= nr_pages
* (PAGE_CACHE_SIZE
/inode
->i_sb
->s_blocksize
);
274 struct backing_dev_info
*bdi
= mapping
->backing_dev_info
;
278 ret
= gfs2_trans_begin(sdp
, nrblocks
, nrblocks
);
282 for(i
= 0; i
< nr_pages
; i
++) {
283 struct page
*page
= pvec
->pages
[i
];
287 if (unlikely(page
->mapping
!= mapping
)) {
292 if (!wbc
->range_cyclic
&& page
->index
> end
) {
298 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
299 wait_on_page_writeback(page
);
301 if (PageWriteback(page
) ||
302 !clear_page_dirty_for_io(page
)) {
307 /* Is the page fully outside i_size? (truncate in progress) */
308 if (page
->index
> end_index
|| (page
->index
== end_index
&& !offset
)) {
309 page
->mapping
->a_ops
->invalidatepage(page
, 0);
314 ret
= __gfs2_jdata_writepage(page
, wbc
);
316 if (ret
|| (--(wbc
->nr_to_write
) <= 0))
318 if (wbc
->nonblocking
&& bdi_write_congested(bdi
)) {
319 wbc
->encountered_congestion
= 1;
329 * gfs2_write_cache_jdata - Like write_cache_pages but different
330 * @mapping: The mapping to write
331 * @wbc: The writeback control
332 * @writepage: The writepage function to call
333 * @data: The data to pass to writepage
335 * The reason that we use our own function here is that we need to
336 * start transactions before we grab page locks. This allows us
337 * to get the ordering right.
340 static int gfs2_write_cache_jdata(struct address_space
*mapping
,
341 struct writeback_control
*wbc
)
343 struct backing_dev_info
*bdi
= mapping
->backing_dev_info
;
353 if (wbc
->nonblocking
&& bdi_write_congested(bdi
)) {
354 wbc
->encountered_congestion
= 1;
358 pagevec_init(&pvec
, 0);
359 if (wbc
->range_cyclic
) {
360 index
= mapping
->writeback_index
; /* Start from prev offset */
363 index
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
364 end
= wbc
->range_end
>> PAGE_CACHE_SHIFT
;
365 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
371 while (!done
&& (index
<= end
) &&
372 (nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &index
,
374 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1))) {
376 ret
= gfs2_write_jdata_pagevec(mapping
, wbc
, &pvec
, nr_pages
, end
);
382 pagevec_release(&pvec
);
386 if (!scanned
&& !done
) {
388 * We hit the last page and there is more work to be done: wrap
389 * back to the start of the file
396 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
397 mapping
->writeback_index
= index
;
403 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
404 * @mapping: The mapping to write
405 * @wbc: The writeback control
409 static int gfs2_jdata_writepages(struct address_space
*mapping
,
410 struct writeback_control
*wbc
)
412 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
413 struct gfs2_sbd
*sdp
= GFS2_SB(mapping
->host
);
416 ret
= gfs2_write_cache_jdata(mapping
, wbc
);
417 if (ret
== 0 && wbc
->sync_mode
== WB_SYNC_ALL
) {
418 gfs2_log_flush(sdp
, ip
->i_gl
);
419 ret
= gfs2_write_cache_jdata(mapping
, wbc
);
425 * stuffed_readpage - Fill in a Linux page with stuffed file data
432 static int stuffed_readpage(struct gfs2_inode
*ip
, struct page
*page
)
434 struct buffer_head
*dibh
;
439 * Due to the order of unstuffing files and ->fault(), we can be
440 * asked for a zero page in the case of a stuffed file being extended,
441 * so we need to supply one here. It doesn't happen often.
443 if (unlikely(page
->index
)) {
444 zero_user(page
, 0, PAGE_CACHE_SIZE
);
448 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
452 kaddr
= kmap_atomic(page
, KM_USER0
);
453 memcpy(kaddr
, dibh
->b_data
+ sizeof(struct gfs2_dinode
),
455 memset(kaddr
+ ip
->i_disksize
, 0, PAGE_CACHE_SIZE
- ip
->i_disksize
);
456 kunmap_atomic(kaddr
, KM_USER0
);
457 flush_dcache_page(page
);
459 SetPageUptodate(page
);
466 * __gfs2_readpage - readpage
467 * @file: The file to read a page for
468 * @page: The page to read
470 * This is the core of gfs2's readpage. Its used by the internal file
471 * reading code as in that case we already hold the glock. Also its
472 * called by gfs2_readpage() once the required lock has been granted.
476 static int __gfs2_readpage(void *file
, struct page
*page
)
478 struct gfs2_inode
*ip
= GFS2_I(page
->mapping
->host
);
479 struct gfs2_sbd
*sdp
= GFS2_SB(page
->mapping
->host
);
482 if (gfs2_is_stuffed(ip
)) {
483 error
= stuffed_readpage(ip
, page
);
486 error
= mpage_readpage(page
, gfs2_block_map
);
489 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
496 * gfs2_readpage - read a page of a file
497 * @file: The file to read
498 * @page: The page of the file
500 * This deals with the locking required. We have to unlock and
501 * relock the page in order to get the locking in the right
505 static int gfs2_readpage(struct file
*file
, struct page
*page
)
507 struct address_space
*mapping
= page
->mapping
;
508 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
509 struct gfs2_holder gh
;
513 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, 0, &gh
);
514 error
= gfs2_glock_nq(&gh
);
517 error
= AOP_TRUNCATED_PAGE
;
519 if (page
->mapping
== mapping
&& !PageUptodate(page
))
520 error
= __gfs2_readpage(file
, page
);
525 gfs2_holder_uninit(&gh
);
526 if (error
&& error
!= AOP_TRUNCATED_PAGE
)
532 * gfs2_internal_read - read an internal file
533 * @ip: The gfs2 inode
534 * @ra_state: The readahead state (or NULL for no readahead)
535 * @buf: The buffer to fill
536 * @pos: The file position
537 * @size: The amount to read
541 int gfs2_internal_read(struct gfs2_inode
*ip
, struct file_ra_state
*ra_state
,
542 char *buf
, loff_t
*pos
, unsigned size
)
544 struct address_space
*mapping
= ip
->i_inode
.i_mapping
;
545 unsigned long index
= *pos
/ PAGE_CACHE_SIZE
;
546 unsigned offset
= *pos
& (PAGE_CACHE_SIZE
- 1);
554 if (offset
+ size
> PAGE_CACHE_SIZE
)
555 amt
= PAGE_CACHE_SIZE
- offset
;
556 page
= read_cache_page(mapping
, index
, __gfs2_readpage
, NULL
);
558 return PTR_ERR(page
);
559 p
= kmap_atomic(page
, KM_USER0
);
560 memcpy(buf
+ copied
, p
+ offset
, amt
);
561 kunmap_atomic(p
, KM_USER0
);
562 mark_page_accessed(page
);
563 page_cache_release(page
);
567 } while(copied
< size
);
573 * gfs2_readpages - Read a bunch of pages at once
576 * 1. This is only for readahead, so we can simply ignore any things
577 * which are slightly inconvenient (such as locking conflicts between
578 * the page lock and the glock) and return having done no I/O. Its
579 * obviously not something we'd want to do on too regular a basis.
580 * Any I/O we ignore at this time will be done via readpage later.
581 * 2. We don't handle stuffed files here we let readpage do the honours.
582 * 3. mpage_readpages() does most of the heavy lifting in the common case.
583 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
586 static int gfs2_readpages(struct file
*file
, struct address_space
*mapping
,
587 struct list_head
*pages
, unsigned nr_pages
)
589 struct inode
*inode
= mapping
->host
;
590 struct gfs2_inode
*ip
= GFS2_I(inode
);
591 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
592 struct gfs2_holder gh
;
595 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, 0, &gh
);
596 ret
= gfs2_glock_nq(&gh
);
599 if (!gfs2_is_stuffed(ip
))
600 ret
= mpage_readpages(mapping
, pages
, nr_pages
, gfs2_block_map
);
603 gfs2_holder_uninit(&gh
);
604 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
610 * gfs2_write_begin - Begin to write to a file
611 * @file: The file to write to
612 * @mapping: The mapping in which to write
613 * @pos: The file offset at which to start writing
614 * @len: Length of the write
615 * @flags: Various flags
616 * @pagep: Pointer to return the page
617 * @fsdata: Pointer to return fs data (unused by GFS2)
622 static int gfs2_write_begin(struct file
*file
, struct address_space
*mapping
,
623 loff_t pos
, unsigned len
, unsigned flags
,
624 struct page
**pagep
, void **fsdata
)
626 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
627 struct gfs2_sbd
*sdp
= GFS2_SB(mapping
->host
);
628 unsigned int data_blocks
= 0, ind_blocks
= 0, rblocks
;
631 struct gfs2_alloc
*al
;
632 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
633 unsigned from
= pos
& (PAGE_CACHE_SIZE
- 1);
634 unsigned to
= from
+ len
;
637 gfs2_holder_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &ip
->i_gh
);
638 error
= gfs2_glock_nq(&ip
->i_gh
);
642 error
= gfs2_write_alloc_required(ip
, pos
, len
, &alloc_required
);
646 if (alloc_required
|| gfs2_is_jdata(ip
))
647 gfs2_write_calc_reserv(ip
, len
, &data_blocks
, &ind_blocks
);
649 if (alloc_required
) {
650 al
= gfs2_alloc_get(ip
);
656 error
= gfs2_quota_lock_check(ip
);
660 al
->al_requested
= data_blocks
+ ind_blocks
;
661 error
= gfs2_inplace_reserve(ip
);
666 rblocks
= RES_DINODE
+ ind_blocks
;
667 if (gfs2_is_jdata(ip
))
668 rblocks
+= data_blocks
? data_blocks
: 1;
669 if (ind_blocks
|| data_blocks
)
670 rblocks
+= RES_STATFS
+ RES_QUOTA
;
672 error
= gfs2_trans_begin(sdp
, rblocks
,
673 PAGE_CACHE_SIZE
/sdp
->sd_sb
.sb_bsize
);
678 flags
|= AOP_FLAG_NOFS
;
679 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
684 if (gfs2_is_stuffed(ip
)) {
686 if (pos
+ len
> sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_dinode
)) {
687 error
= gfs2_unstuff_dinode(ip
, page
);
690 } else if (!PageUptodate(page
)) {
691 error
= stuffed_readpage(ip
, page
);
697 error
= block_prepare_write(page
, from
, to
, gfs2_block_map
);
702 page_cache_release(page
);
703 if (pos
+ len
> ip
->i_inode
.i_size
)
704 vmtruncate(&ip
->i_inode
, ip
->i_inode
.i_size
);
708 if (alloc_required
) {
709 gfs2_inplace_release(ip
);
711 gfs2_quota_unlock(ip
);
716 gfs2_glock_dq(&ip
->i_gh
);
718 gfs2_holder_uninit(&ip
->i_gh
);
723 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
724 * @inode: the rindex inode
726 static void adjust_fs_space(struct inode
*inode
)
728 struct gfs2_sbd
*sdp
= inode
->i_sb
->s_fs_info
;
729 struct gfs2_statfs_change_host
*m_sc
= &sdp
->sd_statfs_master
;
730 struct gfs2_statfs_change_host
*l_sc
= &sdp
->sd_statfs_local
;
731 u64 fs_total
, new_free
;
733 /* Total up the file system space, according to the latest rindex. */
734 fs_total
= gfs2_ri_total(sdp
);
736 spin_lock(&sdp
->sd_statfs_spin
);
737 if (fs_total
> (m_sc
->sc_total
+ l_sc
->sc_total
))
738 new_free
= fs_total
- (m_sc
->sc_total
+ l_sc
->sc_total
);
741 spin_unlock(&sdp
->sd_statfs_spin
);
742 fs_warn(sdp
, "File system extended by %llu blocks.\n",
743 (unsigned long long)new_free
);
744 gfs2_statfs_change(sdp
, new_free
, new_free
, 0);
748 * gfs2_stuffed_write_end - Write end for stuffed files
750 * @dibh: The buffer_head containing the on-disk inode
751 * @pos: The file position
752 * @len: The length of the write
753 * @copied: How much was actually copied by the VFS
756 * This copies the data from the page into the inode block after
757 * the inode data structure itself.
761 static int gfs2_stuffed_write_end(struct inode
*inode
, struct buffer_head
*dibh
,
762 loff_t pos
, unsigned len
, unsigned copied
,
765 struct gfs2_inode
*ip
= GFS2_I(inode
);
766 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
767 u64 to
= pos
+ copied
;
769 unsigned char *buf
= dibh
->b_data
+ sizeof(struct gfs2_dinode
);
770 struct gfs2_dinode
*di
= (struct gfs2_dinode
*)dibh
->b_data
;
772 BUG_ON((pos
+ len
) > (dibh
->b_size
- sizeof(struct gfs2_dinode
)));
773 kaddr
= kmap_atomic(page
, KM_USER0
);
774 memcpy(buf
+ pos
, kaddr
+ pos
, copied
);
775 memset(kaddr
+ pos
+ copied
, 0, len
- copied
);
776 flush_dcache_page(page
);
777 kunmap_atomic(kaddr
, KM_USER0
);
779 if (!PageUptodate(page
))
780 SetPageUptodate(page
);
782 page_cache_release(page
);
784 if (inode
->i_size
< to
) {
785 i_size_write(inode
, to
);
786 ip
->i_disksize
= inode
->i_size
;
787 di
->di_size
= cpu_to_be64(inode
->i_size
);
788 mark_inode_dirty(inode
);
791 if (inode
== sdp
->sd_rindex
)
792 adjust_fs_space(inode
);
796 gfs2_glock_dq(&ip
->i_gh
);
797 gfs2_holder_uninit(&ip
->i_gh
);
803 * @file: The file to write to
804 * @mapping: The address space to write to
805 * @pos: The file position
806 * @len: The length of the data
808 * @page: The page that has been written
809 * @fsdata: The fsdata (unused in GFS2)
811 * The main write_end function for GFS2. We have a separate one for
812 * stuffed files as they are slightly different, otherwise we just
813 * put our locking around the VFS provided functions.
818 static int gfs2_write_end(struct file
*file
, struct address_space
*mapping
,
819 loff_t pos
, unsigned len
, unsigned copied
,
820 struct page
*page
, void *fsdata
)
822 struct inode
*inode
= page
->mapping
->host
;
823 struct gfs2_inode
*ip
= GFS2_I(inode
);
824 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
825 struct buffer_head
*dibh
;
826 struct gfs2_alloc
*al
= ip
->i_alloc
;
827 struct gfs2_dinode
*di
;
828 unsigned int from
= pos
& (PAGE_CACHE_SIZE
- 1);
829 unsigned int to
= from
+ len
;
832 BUG_ON(gfs2_glock_is_locked_by_me(ip
->i_gl
) == NULL
);
834 ret
= gfs2_meta_inode_buffer(ip
, &dibh
);
837 page_cache_release(page
);
841 gfs2_trans_add_bh(ip
->i_gl
, dibh
, 1);
843 if (gfs2_is_stuffed(ip
))
844 return gfs2_stuffed_write_end(inode
, dibh
, pos
, len
, copied
, page
);
846 if (!gfs2_is_writeback(ip
))
847 gfs2_page_add_databufs(ip
, page
, from
, to
);
849 ret
= generic_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
851 if (likely(ret
>= 0) && (inode
->i_size
> ip
->i_disksize
)) {
852 di
= (struct gfs2_dinode
*)dibh
->b_data
;
853 ip
->i_disksize
= inode
->i_size
;
854 di
->di_size
= cpu_to_be64(inode
->i_size
);
855 mark_inode_dirty(inode
);
858 if (inode
== sdp
->sd_rindex
)
859 adjust_fs_space(inode
);
865 gfs2_inplace_release(ip
);
866 gfs2_quota_unlock(ip
);
869 gfs2_glock_dq(&ip
->i_gh
);
870 gfs2_holder_uninit(&ip
->i_gh
);
875 * gfs2_set_page_dirty - Page dirtying function
876 * @page: The page to dirty
878 * Returns: 1 if it dirtyed the page, or 0 otherwise
881 static int gfs2_set_page_dirty(struct page
*page
)
883 SetPageChecked(page
);
884 return __set_page_dirty_buffers(page
);
888 * gfs2_bmap - Block map function
889 * @mapping: Address space info
890 * @lblock: The block to map
892 * Returns: The disk address for the block or 0 on hole or error
895 static sector_t
gfs2_bmap(struct address_space
*mapping
, sector_t lblock
)
897 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
898 struct gfs2_holder i_gh
;
902 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
, &i_gh
);
906 if (!gfs2_is_stuffed(ip
))
907 dblock
= generic_block_bmap(mapping
, lblock
, gfs2_block_map
);
909 gfs2_glock_dq_uninit(&i_gh
);
914 static void gfs2_discard(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
)
916 struct gfs2_bufdata
*bd
;
920 clear_buffer_dirty(bh
);
923 if (!list_empty(&bd
->bd_le
.le_list
) && !buffer_pinned(bh
))
924 list_del_init(&bd
->bd_le
.le_list
);
926 gfs2_remove_from_journal(bh
, current
->journal_info
, 0);
929 clear_buffer_mapped(bh
);
930 clear_buffer_req(bh
);
931 clear_buffer_new(bh
);
932 gfs2_log_unlock(sdp
);
936 static void gfs2_invalidatepage(struct page
*page
, unsigned long offset
)
938 struct gfs2_sbd
*sdp
= GFS2_SB(page
->mapping
->host
);
939 struct buffer_head
*bh
, *head
;
940 unsigned long pos
= 0;
942 BUG_ON(!PageLocked(page
));
944 ClearPageChecked(page
);
945 if (!page_has_buffers(page
))
948 bh
= head
= page_buffers(page
);
951 gfs2_discard(sdp
, bh
);
953 bh
= bh
->b_this_page
;
954 } while (bh
!= head
);
957 try_to_release_page(page
, 0);
961 * gfs2_ok_for_dio - check that dio is valid on this file
964 * @offset: The offset at which we are reading or writing
966 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
967 * 1 (to accept the i/o request)
969 static int gfs2_ok_for_dio(struct gfs2_inode
*ip
, int rw
, loff_t offset
)
972 * Should we return an error here? I can't see that O_DIRECT for
973 * a stuffed file makes any sense. For now we'll silently fall
974 * back to buffered I/O
976 if (gfs2_is_stuffed(ip
))
979 if (offset
>= i_size_read(&ip
->i_inode
))
986 static ssize_t
gfs2_direct_IO(int rw
, struct kiocb
*iocb
,
987 const struct iovec
*iov
, loff_t offset
,
988 unsigned long nr_segs
)
990 struct file
*file
= iocb
->ki_filp
;
991 struct inode
*inode
= file
->f_mapping
->host
;
992 struct gfs2_inode
*ip
= GFS2_I(inode
);
993 struct gfs2_holder gh
;
997 * Deferred lock, even if its a write, since we do no allocation
998 * on this path. All we need change is atime, and this lock mode
999 * ensures that other nodes have flushed their buffered read caches
1000 * (i.e. their page cache entries for this inode). We do not,
1001 * unfortunately have the option of only flushing a range like
1004 gfs2_holder_init(ip
->i_gl
, LM_ST_DEFERRED
, 0, &gh
);
1005 rv
= gfs2_glock_nq(&gh
);
1008 rv
= gfs2_ok_for_dio(ip
, rw
, offset
);
1010 goto out
; /* dio not valid, fall back to buffered i/o */
1012 rv
= blockdev_direct_IO_no_locking(rw
, iocb
, inode
, inode
->i_sb
->s_bdev
,
1013 iov
, offset
, nr_segs
,
1014 gfs2_get_block_direct
, NULL
);
1016 gfs2_glock_dq_m(1, &gh
);
1017 gfs2_holder_uninit(&gh
);
1022 * gfs2_releasepage - free the metadata associated with a page
1023 * @page: the page that's being released
1024 * @gfp_mask: passed from Linux VFS, ignored by us
1026 * Call try_to_free_buffers() if the buffers in this page can be
1032 int gfs2_releasepage(struct page
*page
, gfp_t gfp_mask
)
1034 struct inode
*aspace
= page
->mapping
->host
;
1035 struct gfs2_sbd
*sdp
= aspace
->i_sb
->s_fs_info
;
1036 struct buffer_head
*bh
, *head
;
1037 struct gfs2_bufdata
*bd
;
1039 if (!page_has_buffers(page
))
1043 head
= bh
= page_buffers(page
);
1045 if (atomic_read(&bh
->b_count
))
1046 goto cannot_release
;
1048 if (bd
&& bd
->bd_ail
)
1049 goto cannot_release
;
1050 gfs2_assert_warn(sdp
, !buffer_pinned(bh
));
1051 gfs2_assert_warn(sdp
, !buffer_dirty(bh
));
1052 bh
= bh
->b_this_page
;
1053 } while(bh
!= head
);
1054 gfs2_log_unlock(sdp
);
1056 head
= bh
= page_buffers(page
);
1061 gfs2_assert_warn(sdp
, bd
->bd_bh
== bh
);
1062 gfs2_assert_warn(sdp
, list_empty(&bd
->bd_list_tr
));
1063 if (!list_empty(&bd
->bd_le
.le_list
)) {
1064 if (!buffer_pinned(bh
))
1065 list_del_init(&bd
->bd_le
.le_list
);
1071 bh
->b_private
= NULL
;
1073 gfs2_log_unlock(sdp
);
1075 kmem_cache_free(gfs2_bufdata_cachep
, bd
);
1077 bh
= bh
->b_this_page
;
1078 } while (bh
!= head
);
1080 return try_to_free_buffers(page
);
1082 gfs2_log_unlock(sdp
);
1086 static const struct address_space_operations gfs2_writeback_aops
= {
1087 .writepage
= gfs2_writeback_writepage
,
1088 .writepages
= gfs2_writeback_writepages
,
1089 .readpage
= gfs2_readpage
,
1090 .readpages
= gfs2_readpages
,
1091 .sync_page
= block_sync_page
,
1092 .write_begin
= gfs2_write_begin
,
1093 .write_end
= gfs2_write_end
,
1095 .invalidatepage
= gfs2_invalidatepage
,
1096 .releasepage
= gfs2_releasepage
,
1097 .direct_IO
= gfs2_direct_IO
,
1098 .migratepage
= buffer_migrate_page
,
1101 static const struct address_space_operations gfs2_ordered_aops
= {
1102 .writepage
= gfs2_ordered_writepage
,
1103 .readpage
= gfs2_readpage
,
1104 .readpages
= gfs2_readpages
,
1105 .sync_page
= block_sync_page
,
1106 .write_begin
= gfs2_write_begin
,
1107 .write_end
= gfs2_write_end
,
1108 .set_page_dirty
= gfs2_set_page_dirty
,
1110 .invalidatepage
= gfs2_invalidatepage
,
1111 .releasepage
= gfs2_releasepage
,
1112 .direct_IO
= gfs2_direct_IO
,
1113 .migratepage
= buffer_migrate_page
,
1116 static const struct address_space_operations gfs2_jdata_aops
= {
1117 .writepage
= gfs2_jdata_writepage
,
1118 .writepages
= gfs2_jdata_writepages
,
1119 .readpage
= gfs2_readpage
,
1120 .readpages
= gfs2_readpages
,
1121 .sync_page
= block_sync_page
,
1122 .write_begin
= gfs2_write_begin
,
1123 .write_end
= gfs2_write_end
,
1124 .set_page_dirty
= gfs2_set_page_dirty
,
1126 .invalidatepage
= gfs2_invalidatepage
,
1127 .releasepage
= gfs2_releasepage
,
1130 void gfs2_set_aops(struct inode
*inode
)
1132 struct gfs2_inode
*ip
= GFS2_I(inode
);
1134 if (gfs2_is_writeback(ip
))
1135 inode
->i_mapping
->a_ops
= &gfs2_writeback_aops
;
1136 else if (gfs2_is_ordered(ip
))
1137 inode
->i_mapping
->a_ops
= &gfs2_ordered_aops
;
1138 else if (gfs2_is_jdata(ip
))
1139 inode
->i_mapping
->a_ops
= &gfs2_jdata_aops
;