2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
19 #include <linux/writeback.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/lm_interface.h>
30 #include "ops_address.h"
40 static void gfs2_page_add_databufs(struct gfs2_inode
*ip
, struct page
*page
,
41 unsigned int from
, unsigned int to
)
43 struct buffer_head
*head
= page_buffers(page
);
44 unsigned int bsize
= head
->b_size
;
45 struct buffer_head
*bh
;
46 unsigned int start
, end
;
48 for (bh
= head
, start
= 0; bh
!= head
|| !start
;
49 bh
= bh
->b_this_page
, start
= end
) {
51 if (end
<= from
|| start
>= to
)
53 if (gfs2_is_jdata(ip
))
54 set_buffer_uptodate(bh
);
55 gfs2_trans_add_bh(ip
->i_gl
, bh
, 0);
60 * gfs2_get_block - Fills in a buffer head with details about a block
62 * @lblock: The block number to look up
63 * @bh_result: The buffer head to return the result in
64 * @create: Non-zero if we may add block to the file
69 int gfs2_get_block(struct inode
*inode
, sector_t lblock
,
70 struct buffer_head
*bh_result
, int create
)
72 return gfs2_block_map(inode
, lblock
, create
, bh_result
);
76 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
78 * @lblock: The block number to look up
79 * @bh_result: The buffer head to return the result in
80 * @create: Non-zero if we may add block to the file
85 static int gfs2_get_block_noalloc(struct inode
*inode
, sector_t lblock
,
86 struct buffer_head
*bh_result
, int create
)
90 error
= gfs2_block_map(inode
, lblock
, 0, bh_result
);
93 if (bh_result
->b_blocknr
== 0)
98 static int gfs2_get_block_direct(struct inode
*inode
, sector_t lblock
,
99 struct buffer_head
*bh_result
, int create
)
101 return gfs2_block_map(inode
, lblock
, 0, bh_result
);
105 * gfs2_writepage - Write complete page
106 * @page: Page to write
110 * Some of this is copied from block_write_full_page() although we still
111 * call it to do most of the work.
114 static int gfs2_writepage(struct page
*page
, struct writeback_control
*wbc
)
116 struct inode
*inode
= page
->mapping
->host
;
117 struct gfs2_inode
*ip
= GFS2_I(inode
);
118 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
119 loff_t i_size
= i_size_read(inode
);
120 pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
125 if (gfs2_assert_withdraw(sdp
, gfs2_glock_is_held_excl(ip
->i_gl
))) {
129 if (current
->journal_info
)
132 /* Is the page fully outside i_size? (truncate in progress) */
133 offset
= i_size
& (PAGE_CACHE_SIZE
-1);
134 if (page
->index
> end_index
|| (page
->index
== end_index
&& !offset
)) {
135 page
->mapping
->a_ops
->invalidatepage(page
, 0);
137 return 0; /* don't care */
140 if ((sdp
->sd_args
.ar_data
== GFS2_DATA_ORDERED
|| gfs2_is_jdata(ip
)) &&
142 ClearPageChecked(page
);
143 error
= gfs2_trans_begin(sdp
, RES_DINODE
+ 1, 0);
146 if (!page_has_buffers(page
)) {
147 create_empty_buffers(page
, inode
->i_sb
->s_blocksize
,
148 (1 << BH_Dirty
)|(1 << BH_Uptodate
));
150 gfs2_page_add_databufs(ip
, page
, 0, sdp
->sd_vfs
->s_blocksize
-1);
153 error
= block_write_full_page(page
, gfs2_get_block_noalloc
, wbc
);
156 gfs2_meta_cache_flush(ip
);
160 redirty_page_for_writepage(wbc
, page
);
166 * gfs2_writepages - Write a bunch of dirty pages back to disk
167 * @mapping: The mapping to write
168 * @wbc: Write-back control
170 * For journaled files and/or ordered writes this just falls back to the
171 * kernel's default writepages path for now. We will probably want to change
172 * that eventually (i.e. when we look at allocate on flush).
174 * For the data=writeback case though we can already ignore buffer heads
175 * and write whole extents at once. This is a big reduction in the
176 * number of I/O requests we send and the bmap calls we make in this case.
178 static int gfs2_writepages(struct address_space
*mapping
,
179 struct writeback_control
*wbc
)
181 struct inode
*inode
= mapping
->host
;
182 struct gfs2_inode
*ip
= GFS2_I(inode
);
183 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
185 if (sdp
->sd_args
.ar_data
== GFS2_DATA_WRITEBACK
&& !gfs2_is_jdata(ip
))
186 return mpage_writepages(mapping
, wbc
, gfs2_get_block_noalloc
);
188 return generic_writepages(mapping
, wbc
);
192 * stuffed_readpage - Fill in a Linux page with stuffed file data
199 static int stuffed_readpage(struct gfs2_inode
*ip
, struct page
*page
)
201 struct buffer_head
*dibh
;
206 * Due to the order of unstuffing files and ->nopage(), we can be
207 * asked for a zero page in the case of a stuffed file being extended,
208 * so we need to supply one here. It doesn't happen often.
210 if (unlikely(page
->index
)) {
211 zero_user_page(page
, 0, PAGE_CACHE_SIZE
, KM_USER0
);
215 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
219 kaddr
= kmap_atomic(page
, KM_USER0
);
220 memcpy(kaddr
, dibh
->b_data
+ sizeof(struct gfs2_dinode
),
222 memset(kaddr
+ ip
->i_di
.di_size
, 0, PAGE_CACHE_SIZE
- ip
->i_di
.di_size
);
223 kunmap_atomic(kaddr
, KM_USER0
);
224 flush_dcache_page(page
);
226 SetPageUptodate(page
);
233 * gfs2_readpage - readpage with locking
234 * @file: The file to read a page for. N.B. This may be NULL if we are
235 * reading an internal file.
236 * @page: The page to read
241 static int gfs2_readpage(struct file
*file
, struct page
*page
)
243 struct gfs2_inode
*ip
= GFS2_I(page
->mapping
->host
);
244 struct gfs2_sbd
*sdp
= GFS2_SB(page
->mapping
->host
);
245 struct gfs2_file
*gf
= NULL
;
246 struct gfs2_holder gh
;
250 if (likely(file
!= &gfs2_internal_file_sentinel
)) {
252 gf
= file
->private_data
;
253 if (test_bit(GFF_EXLOCK
, &gf
->f_flags
))
254 /* gfs2_sharewrite_fault has grabbed the ip->i_gl already */
257 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, GL_ATIME
|LM_FLAG_TRY_1CB
, &gh
);
259 error
= gfs2_glock_nq_atime(&gh
);
265 if (gfs2_is_stuffed(ip
)) {
266 error
= stuffed_readpage(ip
, page
);
269 error
= mpage_readpage(page
, gfs2_get_block
);
271 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
275 gfs2_glock_dq_m(1, &gh
);
276 gfs2_holder_uninit(&gh
);
282 if (error
== GLR_TRYFAILED
) {
283 error
= AOP_TRUNCATED_PAGE
;
287 gfs2_holder_uninit(&gh
);
292 * gfs2_readpages - Read a bunch of pages at once
295 * 1. This is only for readahead, so we can simply ignore any things
296 * which are slightly inconvenient (such as locking conflicts between
297 * the page lock and the glock) and return having done no I/O. Its
298 * obviously not something we'd want to do on too regular a basis.
299 * Any I/O we ignore at this time will be done via readpage later.
300 * 2. We don't handle stuffed files here we let readpage do the honours.
301 * 3. mpage_readpages() does most of the heavy lifting in the common case.
302 * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
303 * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as
304 * well as read-ahead.
306 static int gfs2_readpages(struct file
*file
, struct address_space
*mapping
,
307 struct list_head
*pages
, unsigned nr_pages
)
309 struct inode
*inode
= mapping
->host
;
310 struct gfs2_inode
*ip
= GFS2_I(inode
);
311 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
312 struct gfs2_holder gh
;
316 if (likely(file
!= &gfs2_internal_file_sentinel
)) {
318 struct gfs2_file
*gf
= file
->private_data
;
319 if (test_bit(GFF_EXLOCK
, &gf
->f_flags
))
322 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
,
323 LM_FLAG_TRY_1CB
|GL_ATIME
, &gh
);
325 ret
= gfs2_glock_nq_atime(&gh
);
326 if (ret
== GLR_TRYFAILED
)
332 if (!gfs2_is_stuffed(ip
))
333 ret
= mpage_readpages(mapping
, pages
, nr_pages
, gfs2_get_block
);
336 gfs2_glock_dq_m(1, &gh
);
337 gfs2_holder_uninit(&gh
);
340 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
347 gfs2_holder_uninit(&gh
);
352 * gfs2_prepare_write - Prepare to write a page to a file
353 * @file: The file to write to
354 * @page: The page which is to be prepared for writing
355 * @from: From (byte range within page)
356 * @to: To (byte range within page)
361 static int gfs2_prepare_write(struct file
*file
, struct page
*page
,
362 unsigned from
, unsigned to
)
364 struct gfs2_inode
*ip
= GFS2_I(page
->mapping
->host
);
365 struct gfs2_sbd
*sdp
= GFS2_SB(page
->mapping
->host
);
366 unsigned int data_blocks
, ind_blocks
, rblocks
;
369 loff_t pos
= ((loff_t
)page
->index
<< PAGE_CACHE_SHIFT
) + from
;
370 loff_t end
= ((loff_t
)page
->index
<< PAGE_CACHE_SHIFT
) + to
;
371 struct gfs2_alloc
*al
;
372 unsigned int write_len
= to
- from
;
375 gfs2_holder_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, GL_ATIME
|LM_FLAG_TRY_1CB
, &ip
->i_gh
);
376 error
= gfs2_glock_nq_atime(&ip
->i_gh
);
377 if (unlikely(error
)) {
378 if (error
== GLR_TRYFAILED
) {
380 error
= AOP_TRUNCATED_PAGE
;
386 gfs2_write_calc_reserv(ip
, write_len
, &data_blocks
, &ind_blocks
);
388 error
= gfs2_write_alloc_required(ip
, pos
, write_len
, &alloc_required
);
393 ip
->i_alloc
.al_requested
= 0;
394 if (alloc_required
) {
395 al
= gfs2_alloc_get(ip
);
397 error
= gfs2_quota_lock(ip
, NO_QUOTA_CHANGE
, NO_QUOTA_CHANGE
);
401 error
= gfs2_quota_check(ip
, ip
->i_inode
.i_uid
, ip
->i_inode
.i_gid
);
405 al
->al_requested
= data_blocks
+ ind_blocks
;
406 error
= gfs2_inplace_reserve(ip
);
411 rblocks
= RES_DINODE
+ ind_blocks
;
412 if (gfs2_is_jdata(ip
))
413 rblocks
+= data_blocks
? data_blocks
: 1;
414 if (ind_blocks
|| data_blocks
)
415 rblocks
+= RES_STATFS
+ RES_QUOTA
;
417 error
= gfs2_trans_begin(sdp
, rblocks
, 0);
421 if (gfs2_is_stuffed(ip
)) {
422 if (end
> sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_dinode
)) {
423 error
= gfs2_unstuff_dinode(ip
, page
);
426 } else if (!PageUptodate(page
))
427 error
= stuffed_readpage(ip
, page
);
432 error
= block_prepare_write(page
, from
, to
, gfs2_get_block
);
437 if (alloc_required
) {
438 gfs2_inplace_release(ip
);
440 gfs2_quota_unlock(ip
);
445 gfs2_glock_dq_m(1, &ip
->i_gh
);
447 gfs2_holder_uninit(&ip
->i_gh
);
454 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
455 * @inode: the rindex inode
457 static void adjust_fs_space(struct inode
*inode
)
459 struct gfs2_sbd
*sdp
= inode
->i_sb
->s_fs_info
;
460 struct gfs2_statfs_change_host
*m_sc
= &sdp
->sd_statfs_master
;
461 struct gfs2_statfs_change_host
*l_sc
= &sdp
->sd_statfs_local
;
462 u64 fs_total
, new_free
;
464 /* Total up the file system space, according to the latest rindex. */
465 fs_total
= gfs2_ri_total(sdp
);
467 spin_lock(&sdp
->sd_statfs_spin
);
468 if (fs_total
> (m_sc
->sc_total
+ l_sc
->sc_total
))
469 new_free
= fs_total
- (m_sc
->sc_total
+ l_sc
->sc_total
);
472 spin_unlock(&sdp
->sd_statfs_spin
);
473 fs_warn(sdp
, "File system extended by %llu blocks.\n",
474 (unsigned long long)new_free
);
475 gfs2_statfs_change(sdp
, new_free
, new_free
, 0);
479 * gfs2_commit_write - Commit write to a file
480 * @file: The file to write to
481 * @page: The page containing the data
482 * @from: From (byte range within page)
483 * @to: To (byte range within page)
488 static int gfs2_commit_write(struct file
*file
, struct page
*page
,
489 unsigned from
, unsigned to
)
491 struct inode
*inode
= page
->mapping
->host
;
492 struct gfs2_inode
*ip
= GFS2_I(inode
);
493 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
494 int error
= -EOPNOTSUPP
;
495 struct buffer_head
*dibh
;
496 struct gfs2_alloc
*al
= &ip
->i_alloc
;
497 struct gfs2_dinode
*di
;
499 if (gfs2_assert_withdraw(sdp
, gfs2_glock_is_locked_by_me(ip
->i_gl
)))
502 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
506 gfs2_trans_add_bh(ip
->i_gl
, dibh
, 1);
507 di
= (struct gfs2_dinode
*)dibh
->b_data
;
509 if (gfs2_is_stuffed(ip
)) {
513 file_size
= ((u64
)page
->index
<< PAGE_CACHE_SHIFT
) + to
;
515 kaddr
= kmap_atomic(page
, KM_USER0
);
516 memcpy(dibh
->b_data
+ sizeof(struct gfs2_dinode
) + from
,
517 kaddr
+ from
, to
- from
);
518 kunmap_atomic(kaddr
, KM_USER0
);
520 SetPageUptodate(page
);
522 if (inode
->i_size
< file_size
) {
523 i_size_write(inode
, file_size
);
524 mark_inode_dirty(inode
);
527 if (sdp
->sd_args
.ar_data
== GFS2_DATA_ORDERED
||
529 gfs2_page_add_databufs(ip
, page
, from
, to
);
530 error
= generic_commit_write(file
, page
, from
, to
);
535 if (ip
->i_di
.di_size
< inode
->i_size
) {
536 ip
->i_di
.di_size
= inode
->i_size
;
537 di
->di_size
= cpu_to_be64(inode
->i_size
);
540 if (inode
== sdp
->sd_rindex
)
541 adjust_fs_space(inode
);
545 if (al
->al_requested
) {
546 gfs2_inplace_release(ip
);
547 gfs2_quota_unlock(ip
);
551 gfs2_glock_dq_m(1, &ip
->i_gh
);
553 gfs2_holder_uninit(&ip
->i_gh
);
560 if (al
->al_requested
) {
561 gfs2_inplace_release(ip
);
562 gfs2_quota_unlock(ip
);
566 gfs2_glock_dq_m(1, &ip
->i_gh
);
568 gfs2_holder_uninit(&ip
->i_gh
);
570 ClearPageUptodate(page
);
575 * gfs2_set_page_dirty - Page dirtying function
576 * @page: The page to dirty
578 * Returns: 1 if it dirtyed the page, or 0 otherwise
581 static int gfs2_set_page_dirty(struct page
*page
)
583 struct gfs2_inode
*ip
= GFS2_I(page
->mapping
->host
);
584 struct gfs2_sbd
*sdp
= GFS2_SB(page
->mapping
->host
);
586 if (sdp
->sd_args
.ar_data
== GFS2_DATA_ORDERED
|| gfs2_is_jdata(ip
))
587 SetPageChecked(page
);
588 return __set_page_dirty_buffers(page
);
592 * gfs2_bmap - Block map function
593 * @mapping: Address space info
594 * @lblock: The block to map
596 * Returns: The disk address for the block or 0 on hole or error
599 static sector_t
gfs2_bmap(struct address_space
*mapping
, sector_t lblock
)
601 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
602 struct gfs2_holder i_gh
;
606 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
, &i_gh
);
610 if (!gfs2_is_stuffed(ip
))
611 dblock
= generic_block_bmap(mapping
, lblock
, gfs2_get_block
);
613 gfs2_glock_dq_uninit(&i_gh
);
618 static void discard_buffer(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
)
620 struct gfs2_bufdata
*bd
;
626 bh
->b_private
= NULL
;
627 if (!bd
->bd_ail
&& list_empty(&bd
->bd_le
.le_list
))
628 kmem_cache_free(gfs2_bufdata_cachep
, bd
);
630 gfs2_log_unlock(sdp
);
633 clear_buffer_dirty(bh
);
635 clear_buffer_mapped(bh
);
636 clear_buffer_req(bh
);
637 clear_buffer_new(bh
);
638 clear_buffer_delay(bh
);
642 static void gfs2_invalidatepage(struct page
*page
, unsigned long offset
)
644 struct gfs2_sbd
*sdp
= GFS2_SB(page
->mapping
->host
);
645 struct buffer_head
*head
, *bh
, *next
;
646 unsigned int curr_off
= 0;
648 BUG_ON(!PageLocked(page
));
650 ClearPageChecked(page
);
651 if (!page_has_buffers(page
))
654 bh
= head
= page_buffers(page
);
656 unsigned int next_off
= curr_off
+ bh
->b_size
;
657 next
= bh
->b_this_page
;
659 if (offset
<= curr_off
)
660 discard_buffer(sdp
, bh
);
664 } while (bh
!= head
);
667 try_to_release_page(page
, 0);
673 * gfs2_ok_for_dio - check that dio is valid on this file
676 * @offset: The offset at which we are reading or writing
678 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
679 * 1 (to accept the i/o request)
681 static int gfs2_ok_for_dio(struct gfs2_inode
*ip
, int rw
, loff_t offset
)
684 * Should we return an error here? I can't see that O_DIRECT for
685 * a journaled file makes any sense. For now we'll silently fall
686 * back to buffered I/O, likewise we do the same for stuffed
687 * files since they are (a) small and (b) unaligned.
689 if (gfs2_is_jdata(ip
))
692 if (gfs2_is_stuffed(ip
))
695 if (offset
> i_size_read(&ip
->i_inode
))
702 static ssize_t
gfs2_direct_IO(int rw
, struct kiocb
*iocb
,
703 const struct iovec
*iov
, loff_t offset
,
704 unsigned long nr_segs
)
706 struct file
*file
= iocb
->ki_filp
;
707 struct inode
*inode
= file
->f_mapping
->host
;
708 struct gfs2_inode
*ip
= GFS2_I(inode
);
709 struct gfs2_holder gh
;
713 * Deferred lock, even if its a write, since we do no allocation
714 * on this path. All we need change is atime, and this lock mode
715 * ensures that other nodes have flushed their buffered read caches
716 * (i.e. their page cache entries for this inode). We do not,
717 * unfortunately have the option of only flushing a range like
720 gfs2_holder_init(ip
->i_gl
, LM_ST_DEFERRED
, GL_ATIME
, &gh
);
721 rv
= gfs2_glock_nq_atime(&gh
);
724 rv
= gfs2_ok_for_dio(ip
, rw
, offset
);
726 goto out
; /* dio not valid, fall back to buffered i/o */
728 rv
= blockdev_direct_IO_no_locking(rw
, iocb
, inode
, inode
->i_sb
->s_bdev
,
729 iov
, offset
, nr_segs
,
730 gfs2_get_block_direct
, NULL
);
732 gfs2_glock_dq_m(1, &gh
);
733 gfs2_holder_uninit(&gh
);
738 * stuck_releasepage - We're stuck in gfs2_releasepage(). Print stuff out.
739 * @bh: the buffer we're stuck on
743 static void stuck_releasepage(struct buffer_head
*bh
)
745 struct inode
*inode
= bh
->b_page
->mapping
->host
;
746 struct gfs2_sbd
*sdp
= inode
->i_sb
->s_fs_info
;
747 struct gfs2_bufdata
*bd
= bh
->b_private
;
748 struct gfs2_glock
*gl
;
749 static unsigned limit
= 0;
755 fs_warn(sdp
, "stuck in gfs2_releasepage() %p\n", inode
);
756 fs_warn(sdp
, "blkno = %llu, bh->b_count = %d\n",
757 (unsigned long long)bh
->b_blocknr
, atomic_read(&bh
->b_count
));
758 fs_warn(sdp
, "pinned = %u\n", buffer_pinned(bh
));
759 fs_warn(sdp
, "bh->b_private = %s\n", (bd
) ? "!NULL" : "NULL");
766 fs_warn(sdp
, "gl = (%u, %llu)\n",
767 gl
->gl_name
.ln_type
, (unsigned long long)gl
->gl_name
.ln_number
);
769 fs_warn(sdp
, "bd_list_tr = %s, bd_le.le_list = %s\n",
770 (list_empty(&bd
->bd_list_tr
)) ? "no" : "yes",
771 (list_empty(&bd
->bd_le
.le_list
)) ? "no" : "yes");
773 if (gl
->gl_ops
== &gfs2_inode_glops
) {
774 struct gfs2_inode
*ip
= gl
->gl_object
;
780 fs_warn(sdp
, "ip = %llu %llu\n",
781 (unsigned long long)ip
->i_no_formal_ino
,
782 (unsigned long long)ip
->i_no_addr
);
784 for (x
= 0; x
< GFS2_MAX_META_HEIGHT
; x
++)
785 fs_warn(sdp
, "ip->i_cache[%u] = %s\n",
786 x
, (ip
->i_cache
[x
]) ? "!NULL" : "NULL");
791 * gfs2_releasepage - free the metadata associated with a page
792 * @page: the page that's being released
793 * @gfp_mask: passed from Linux VFS, ignored by us
795 * Call try_to_free_buffers() if the buffers in this page can be
801 int gfs2_releasepage(struct page
*page
, gfp_t gfp_mask
)
803 struct inode
*aspace
= page
->mapping
->host
;
804 struct gfs2_sbd
*sdp
= aspace
->i_sb
->s_fs_info
;
805 struct buffer_head
*bh
, *head
;
806 struct gfs2_bufdata
*bd
;
807 unsigned long t
= jiffies
+ gfs2_tune_get(sdp
, gt_stall_secs
) * HZ
;
809 if (!page_has_buffers(page
))
812 head
= bh
= page_buffers(page
);
814 while (atomic_read(&bh
->b_count
)) {
815 if (!atomic_read(&aspace
->i_writecount
))
818 if (!(gfp_mask
& __GFP_WAIT
))
821 if (time_after_eq(jiffies
, t
)) {
822 stuck_releasepage(bh
);
823 /* should we withdraw here? */
830 gfs2_assert_warn(sdp
, !buffer_pinned(bh
));
831 gfs2_assert_warn(sdp
, !buffer_dirty(bh
));
836 gfs2_assert_warn(sdp
, bd
->bd_bh
== bh
);
837 gfs2_assert_warn(sdp
, list_empty(&bd
->bd_list_tr
));
838 gfs2_assert_warn(sdp
, !bd
->bd_ail
);
840 if (!list_empty(&bd
->bd_le
.le_list
))
842 bh
->b_private
= NULL
;
844 gfs2_log_unlock(sdp
);
846 kmem_cache_free(gfs2_bufdata_cachep
, bd
);
848 bh
= bh
->b_this_page
;
849 } while (bh
!= head
);
852 return try_to_free_buffers(page
);
855 const struct address_space_operations gfs2_file_aops
= {
856 .writepage
= gfs2_writepage
,
857 .writepages
= gfs2_writepages
,
858 .readpage
= gfs2_readpage
,
859 .readpages
= gfs2_readpages
,
860 .sync_page
= block_sync_page
,
861 .prepare_write
= gfs2_prepare_write
,
862 .commit_write
= gfs2_commit_write
,
863 .set_page_dirty
= gfs2_set_page_dirty
,
865 .invalidatepage
= gfs2_invalidatepage
,
866 .releasepage
= gfs2_releasepage
,
867 .direct_IO
= gfs2_direct_IO
,