libata-acpi: clean up parameters and misc stuff
[linux-2.6/mini2440.git] / fs / gfs2 / ops_address.c
blob30c15622174fdbc59f3b94ab8cec331001b2a943
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
18 #include <linux/fs.h>
19 #include <linux/writeback.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/lm_interface.h>
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "bmap.h"
26 #include "glock.h"
27 #include "inode.h"
28 #include "log.h"
29 #include "meta_io.h"
30 #include "ops_address.h"
31 #include "quota.h"
32 #include "trans.h"
33 #include "rgrp.h"
34 #include "ops_file.h"
35 #include "util.h"
36 #include "glops.h"
39 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
40 unsigned int from, unsigned int to)
42 struct buffer_head *head = page_buffers(page);
43 unsigned int bsize = head->b_size;
44 struct buffer_head *bh;
45 unsigned int start, end;
47 for (bh = head, start = 0; bh != head || !start;
48 bh = bh->b_this_page, start = end) {
49 end = start + bsize;
50 if (end <= from || start >= to)
51 continue;
52 gfs2_trans_add_bh(ip->i_gl, bh, 0);
56 /**
57 * gfs2_get_block - Fills in a buffer head with details about a block
58 * @inode: The inode
59 * @lblock: The block number to look up
60 * @bh_result: The buffer head to return the result in
61 * @create: Non-zero if we may add block to the file
63 * Returns: errno
66 int gfs2_get_block(struct inode *inode, sector_t lblock,
67 struct buffer_head *bh_result, int create)
69 return gfs2_block_map(inode, lblock, create, bh_result);
72 /**
73 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
74 * @inode: The inode
75 * @lblock: The block number to look up
76 * @bh_result: The buffer head to return the result in
77 * @create: Non-zero if we may add block to the file
79 * Returns: errno
82 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
83 struct buffer_head *bh_result, int create)
85 int error;
87 error = gfs2_block_map(inode, lblock, 0, bh_result);
88 if (error)
89 return error;
90 if (bh_result->b_blocknr == 0)
91 return -EIO;
92 return 0;
95 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
96 struct buffer_head *bh_result, int create)
98 return gfs2_block_map(inode, lblock, 0, bh_result);
102 * gfs2_writepage - Write complete page
103 * @page: Page to write
105 * Returns: errno
107 * Some of this is copied from block_write_full_page() although we still
108 * call it to do most of the work.
111 static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
113 struct inode *inode = page->mapping->host;
114 struct gfs2_inode *ip = GFS2_I(inode);
115 struct gfs2_sbd *sdp = GFS2_SB(inode);
116 loff_t i_size = i_size_read(inode);
117 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
118 unsigned offset;
119 int error;
120 int done_trans = 0;
122 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) {
123 unlock_page(page);
124 return -EIO;
126 if (current->journal_info)
127 goto out_ignore;
129 /* Is the page fully outside i_size? (truncate in progress) */
130 offset = i_size & (PAGE_CACHE_SIZE-1);
131 if (page->index > end_index || (page->index == end_index && !offset)) {
132 page->mapping->a_ops->invalidatepage(page, 0);
133 unlock_page(page);
134 return 0; /* don't care */
137 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip)) {
138 error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
139 if (error)
140 goto out_ignore;
141 if (!page_has_buffers(page)) {
142 create_empty_buffers(page, inode->i_sb->s_blocksize,
143 (1 << BH_Dirty)|(1 << BH_Uptodate));
145 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
146 done_trans = 1;
148 error = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
149 if (done_trans)
150 gfs2_trans_end(sdp);
151 gfs2_meta_cache_flush(ip);
152 return error;
154 out_ignore:
155 redirty_page_for_writepage(wbc, page);
156 unlock_page(page);
157 return 0;
161 * gfs2_writepages - Write a bunch of dirty pages back to disk
162 * @mapping: The mapping to write
163 * @wbc: Write-back control
165 * For journaled files and/or ordered writes this just falls back to the
166 * kernel's default writepages path for now. We will probably want to change
167 * that eventually (i.e. when we look at allocate on flush).
169 * For the data=writeback case though we can already ignore buffer heads
170 * and write whole extents at once. This is a big reduction in the
171 * number of I/O requests we send and the bmap calls we make in this case.
173 static int gfs2_writepages(struct address_space *mapping,
174 struct writeback_control *wbc)
176 struct inode *inode = mapping->host;
177 struct gfs2_inode *ip = GFS2_I(inode);
178 struct gfs2_sbd *sdp = GFS2_SB(inode);
180 if (sdp->sd_args.ar_data == GFS2_DATA_WRITEBACK && !gfs2_is_jdata(ip))
181 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
183 return generic_writepages(mapping, wbc);
187 * stuffed_readpage - Fill in a Linux page with stuffed file data
188 * @ip: the inode
189 * @page: the page
191 * Returns: errno
194 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
196 struct buffer_head *dibh;
197 void *kaddr;
198 int error;
201 * Due to the order of unstuffing files and ->nopage(), we can be
202 * asked for a zero page in the case of a stuffed file being extended,
203 * so we need to supply one here. It doesn't happen often.
205 if (unlikely(page->index)) {
206 kaddr = kmap_atomic(page, KM_USER0);
207 memset(kaddr, 0, PAGE_CACHE_SIZE);
208 kunmap_atomic(kaddr, KM_USER0);
209 flush_dcache_page(page);
210 SetPageUptodate(page);
211 return 0;
214 error = gfs2_meta_inode_buffer(ip, &dibh);
215 if (error)
216 return error;
218 kaddr = kmap_atomic(page, KM_USER0);
219 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
220 ip->i_di.di_size);
221 memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
222 kunmap_atomic(kaddr, KM_USER0);
223 flush_dcache_page(page);
224 brelse(dibh);
225 SetPageUptodate(page);
227 return 0;
232 * gfs2_readpage - readpage with locking
233 * @file: The file to read a page for. N.B. This may be NULL if we are
234 * reading an internal file.
235 * @page: The page to read
237 * Returns: errno
240 static int gfs2_readpage(struct file *file, struct page *page)
242 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
243 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
244 struct gfs2_file *gf = NULL;
245 struct gfs2_holder gh;
246 int error;
247 int do_unlock = 0;
249 if (likely(file != &gfs2_internal_file_sentinel)) {
250 if (file) {
251 gf = file->private_data;
252 if (test_bit(GFF_EXLOCK, &gf->f_flags))
253 /* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */
254 goto skip_lock;
256 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
257 do_unlock = 1;
258 error = gfs2_glock_nq_atime(&gh);
259 if (unlikely(error))
260 goto out_unlock;
263 skip_lock:
264 if (gfs2_is_stuffed(ip)) {
265 error = stuffed_readpage(ip, page);
266 unlock_page(page);
267 } else
268 error = mpage_readpage(page, gfs2_get_block);
270 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
271 error = -EIO;
273 if (do_unlock) {
274 gfs2_glock_dq_m(1, &gh);
275 gfs2_holder_uninit(&gh);
277 out:
278 return error;
279 out_unlock:
280 unlock_page(page);
281 if (error == GLR_TRYFAILED) {
282 error = AOP_TRUNCATED_PAGE;
283 yield();
285 if (do_unlock)
286 gfs2_holder_uninit(&gh);
287 goto out;
291 * gfs2_readpages - Read a bunch of pages at once
293 * Some notes:
294 * 1. This is only for readahead, so we can simply ignore any things
295 * which are slightly inconvenient (such as locking conflicts between
296 * the page lock and the glock) and return having done no I/O. Its
297 * obviously not something we'd want to do on too regular a basis.
298 * Any I/O we ignore at this time will be done via readpage later.
299 * 2. We don't handle stuffed files here we let readpage do the honours.
300 * 3. mpage_readpages() does most of the heavy lifting in the common case.
301 * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
302 * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as
303 * well as read-ahead.
305 static int gfs2_readpages(struct file *file, struct address_space *mapping,
306 struct list_head *pages, unsigned nr_pages)
308 struct inode *inode = mapping->host;
309 struct gfs2_inode *ip = GFS2_I(inode);
310 struct gfs2_sbd *sdp = GFS2_SB(inode);
311 struct gfs2_holder gh;
312 int ret = 0;
313 int do_unlock = 0;
315 if (likely(file != &gfs2_internal_file_sentinel)) {
316 if (file) {
317 struct gfs2_file *gf = file->private_data;
318 if (test_bit(GFF_EXLOCK, &gf->f_flags))
319 goto skip_lock;
321 gfs2_holder_init(ip->i_gl, LM_ST_SHARED,
322 LM_FLAG_TRY_1CB|GL_ATIME, &gh);
323 do_unlock = 1;
324 ret = gfs2_glock_nq_atime(&gh);
325 if (ret == GLR_TRYFAILED)
326 goto out_noerror;
327 if (unlikely(ret))
328 goto out_unlock;
330 skip_lock:
331 if (!gfs2_is_stuffed(ip))
332 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block);
334 if (do_unlock) {
335 gfs2_glock_dq_m(1, &gh);
336 gfs2_holder_uninit(&gh);
338 out:
339 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
340 ret = -EIO;
341 return ret;
342 out_noerror:
343 ret = 0;
344 out_unlock:
345 if (do_unlock)
346 gfs2_holder_uninit(&gh);
347 goto out;
351 * gfs2_prepare_write - Prepare to write a page to a file
352 * @file: The file to write to
353 * @page: The page which is to be prepared for writing
354 * @from: From (byte range within page)
355 * @to: To (byte range within page)
357 * Returns: errno
360 static int gfs2_prepare_write(struct file *file, struct page *page,
361 unsigned from, unsigned to)
363 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
364 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
365 unsigned int data_blocks, ind_blocks, rblocks;
366 int alloc_required;
367 int error = 0;
368 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + from;
369 loff_t end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
370 struct gfs2_alloc *al;
371 unsigned int write_len = to - from;
374 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|LM_FLAG_TRY_1CB, &ip->i_gh);
375 error = gfs2_glock_nq_atime(&ip->i_gh);
376 if (unlikely(error)) {
377 if (error == GLR_TRYFAILED) {
378 unlock_page(page);
379 error = AOP_TRUNCATED_PAGE;
380 yield();
382 goto out_uninit;
385 gfs2_write_calc_reserv(ip, write_len, &data_blocks, &ind_blocks);
387 error = gfs2_write_alloc_required(ip, pos, write_len, &alloc_required);
388 if (error)
389 goto out_unlock;
392 ip->i_alloc.al_requested = 0;
393 if (alloc_required) {
394 al = gfs2_alloc_get(ip);
396 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
397 if (error)
398 goto out_alloc_put;
400 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
401 if (error)
402 goto out_qunlock;
404 al->al_requested = data_blocks + ind_blocks;
405 error = gfs2_inplace_reserve(ip);
406 if (error)
407 goto out_qunlock;
410 rblocks = RES_DINODE + ind_blocks;
411 if (gfs2_is_jdata(ip))
412 rblocks += data_blocks ? data_blocks : 1;
413 if (ind_blocks || data_blocks)
414 rblocks += RES_STATFS + RES_QUOTA;
416 error = gfs2_trans_begin(sdp, rblocks, 0);
417 if (error)
418 goto out;
420 if (gfs2_is_stuffed(ip)) {
421 if (end > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
422 error = gfs2_unstuff_dinode(ip, page);
423 if (error == 0)
424 goto prepare_write;
425 } else if (!PageUptodate(page))
426 error = stuffed_readpage(ip, page);
427 goto out;
430 prepare_write:
431 error = block_prepare_write(page, from, to, gfs2_get_block);
433 out:
434 if (error) {
435 gfs2_trans_end(sdp);
436 if (alloc_required) {
437 gfs2_inplace_release(ip);
438 out_qunlock:
439 gfs2_quota_unlock(ip);
440 out_alloc_put:
441 gfs2_alloc_put(ip);
443 out_unlock:
444 gfs2_glock_dq_m(1, &ip->i_gh);
445 out_uninit:
446 gfs2_holder_uninit(&ip->i_gh);
449 return error;
453 * gfs2_commit_write - Commit write to a file
454 * @file: The file to write to
455 * @page: The page containing the data
456 * @from: From (byte range within page)
457 * @to: To (byte range within page)
459 * Returns: errno
462 static int gfs2_commit_write(struct file *file, struct page *page,
463 unsigned from, unsigned to)
465 struct inode *inode = page->mapping->host;
466 struct gfs2_inode *ip = GFS2_I(inode);
467 struct gfs2_sbd *sdp = GFS2_SB(inode);
468 int error = -EOPNOTSUPP;
469 struct buffer_head *dibh;
470 struct gfs2_alloc *al = &ip->i_alloc;
471 struct gfs2_dinode *di;
473 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)))
474 goto fail_nounlock;
476 error = gfs2_meta_inode_buffer(ip, &dibh);
477 if (error)
478 goto fail_endtrans;
480 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
481 di = (struct gfs2_dinode *)dibh->b_data;
483 if (gfs2_is_stuffed(ip)) {
484 u64 file_size;
485 void *kaddr;
487 file_size = ((u64)page->index << PAGE_CACHE_SHIFT) + to;
489 kaddr = kmap_atomic(page, KM_USER0);
490 memcpy(dibh->b_data + sizeof(struct gfs2_dinode) + from,
491 kaddr + from, to - from);
492 kunmap_atomic(kaddr, KM_USER0);
494 SetPageUptodate(page);
496 if (inode->i_size < file_size) {
497 i_size_write(inode, file_size);
498 mark_inode_dirty(inode);
500 } else {
501 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED ||
502 gfs2_is_jdata(ip))
503 gfs2_page_add_databufs(ip, page, from, to);
504 error = generic_commit_write(file, page, from, to);
505 if (error)
506 goto fail;
509 if (ip->i_di.di_size < inode->i_size) {
510 ip->i_di.di_size = inode->i_size;
511 di->di_size = cpu_to_be64(inode->i_size);
514 brelse(dibh);
515 gfs2_trans_end(sdp);
516 if (al->al_requested) {
517 gfs2_inplace_release(ip);
518 gfs2_quota_unlock(ip);
519 gfs2_alloc_put(ip);
521 unlock_page(page);
522 gfs2_glock_dq_m(1, &ip->i_gh);
523 lock_page(page);
524 gfs2_holder_uninit(&ip->i_gh);
525 return 0;
527 fail:
528 brelse(dibh);
529 fail_endtrans:
530 gfs2_trans_end(sdp);
531 if (al->al_requested) {
532 gfs2_inplace_release(ip);
533 gfs2_quota_unlock(ip);
534 gfs2_alloc_put(ip);
536 unlock_page(page);
537 gfs2_glock_dq_m(1, &ip->i_gh);
538 lock_page(page);
539 gfs2_holder_uninit(&ip->i_gh);
540 fail_nounlock:
541 ClearPageUptodate(page);
542 return error;
546 * gfs2_bmap - Block map function
547 * @mapping: Address space info
548 * @lblock: The block to map
550 * Returns: The disk address for the block or 0 on hole or error
553 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
555 struct gfs2_inode *ip = GFS2_I(mapping->host);
556 struct gfs2_holder i_gh;
557 sector_t dblock = 0;
558 int error;
560 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
561 if (error)
562 return 0;
564 if (!gfs2_is_stuffed(ip))
565 dblock = generic_block_bmap(mapping, lblock, gfs2_get_block);
567 gfs2_glock_dq_uninit(&i_gh);
569 return dblock;
572 static void discard_buffer(struct gfs2_sbd *sdp, struct buffer_head *bh)
574 struct gfs2_bufdata *bd;
576 gfs2_log_lock(sdp);
577 bd = bh->b_private;
578 if (bd) {
579 bd->bd_bh = NULL;
580 bh->b_private = NULL;
582 gfs2_log_unlock(sdp);
584 lock_buffer(bh);
585 clear_buffer_dirty(bh);
586 bh->b_bdev = NULL;
587 clear_buffer_mapped(bh);
588 clear_buffer_req(bh);
589 clear_buffer_new(bh);
590 clear_buffer_delay(bh);
591 unlock_buffer(bh);
594 static void gfs2_invalidatepage(struct page *page, unsigned long offset)
596 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
597 struct buffer_head *head, *bh, *next;
598 unsigned int curr_off = 0;
600 BUG_ON(!PageLocked(page));
601 if (!page_has_buffers(page))
602 return;
604 bh = head = page_buffers(page);
605 do {
606 unsigned int next_off = curr_off + bh->b_size;
607 next = bh->b_this_page;
609 if (offset <= curr_off)
610 discard_buffer(sdp, bh);
612 curr_off = next_off;
613 bh = next;
614 } while (bh != head);
616 if (!offset)
617 try_to_release_page(page, 0);
619 return;
623 * gfs2_ok_for_dio - check that dio is valid on this file
624 * @ip: The inode
625 * @rw: READ or WRITE
626 * @offset: The offset at which we are reading or writing
628 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
629 * 1 (to accept the i/o request)
631 static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
634 * Should we return an error here? I can't see that O_DIRECT for
635 * a journaled file makes any sense. For now we'll silently fall
636 * back to buffered I/O, likewise we do the same for stuffed
637 * files since they are (a) small and (b) unaligned.
639 if (gfs2_is_jdata(ip))
640 return 0;
642 if (gfs2_is_stuffed(ip))
643 return 0;
645 if (offset > i_size_read(&ip->i_inode))
646 return 0;
647 return 1;
652 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
653 const struct iovec *iov, loff_t offset,
654 unsigned long nr_segs)
656 struct file *file = iocb->ki_filp;
657 struct inode *inode = file->f_mapping->host;
658 struct gfs2_inode *ip = GFS2_I(inode);
659 struct gfs2_holder gh;
660 int rv;
663 * Deferred lock, even if its a write, since we do no allocation
664 * on this path. All we need change is atime, and this lock mode
665 * ensures that other nodes have flushed their buffered read caches
666 * (i.e. their page cache entries for this inode). We do not,
667 * unfortunately have the option of only flushing a range like
668 * the VFS does.
670 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh);
671 rv = gfs2_glock_nq_atime(&gh);
672 if (rv)
673 return rv;
674 rv = gfs2_ok_for_dio(ip, rw, offset);
675 if (rv != 1)
676 goto out; /* dio not valid, fall back to buffered i/o */
678 rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev,
679 iov, offset, nr_segs,
680 gfs2_get_block_direct, NULL);
681 out:
682 gfs2_glock_dq_m(1, &gh);
683 gfs2_holder_uninit(&gh);
684 return rv;
688 * stuck_releasepage - We're stuck in gfs2_releasepage(). Print stuff out.
689 * @bh: the buffer we're stuck on
693 static void stuck_releasepage(struct buffer_head *bh)
695 struct inode *inode = bh->b_page->mapping->host;
696 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
697 struct gfs2_bufdata *bd = bh->b_private;
698 struct gfs2_glock *gl;
699 static unsigned limit = 0;
701 if (limit > 3)
702 return;
703 limit++;
705 fs_warn(sdp, "stuck in gfs2_releasepage() %p\n", inode);
706 fs_warn(sdp, "blkno = %llu, bh->b_count = %d\n",
707 (unsigned long long)bh->b_blocknr, atomic_read(&bh->b_count));
708 fs_warn(sdp, "pinned = %u\n", buffer_pinned(bh));
709 fs_warn(sdp, "bh->b_private = %s\n", (bd) ? "!NULL" : "NULL");
711 if (!bd)
712 return;
714 gl = bd->bd_gl;
716 fs_warn(sdp, "gl = (%u, %llu)\n",
717 gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number);
719 fs_warn(sdp, "bd_list_tr = %s, bd_le.le_list = %s\n",
720 (list_empty(&bd->bd_list_tr)) ? "no" : "yes",
721 (list_empty(&bd->bd_le.le_list)) ? "no" : "yes");
723 if (gl->gl_ops == &gfs2_inode_glops) {
724 struct gfs2_inode *ip = gl->gl_object;
725 unsigned int x;
727 if (!ip)
728 return;
730 fs_warn(sdp, "ip = %llu %llu\n",
731 (unsigned long long)ip->i_num.no_formal_ino,
732 (unsigned long long)ip->i_num.no_addr);
734 for (x = 0; x < GFS2_MAX_META_HEIGHT; x++)
735 fs_warn(sdp, "ip->i_cache[%u] = %s\n",
736 x, (ip->i_cache[x]) ? "!NULL" : "NULL");
741 * gfs2_releasepage - free the metadata associated with a page
742 * @page: the page that's being released
743 * @gfp_mask: passed from Linux VFS, ignored by us
745 * Call try_to_free_buffers() if the buffers in this page can be
746 * released.
748 * Returns: 0
751 int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
753 struct inode *aspace = page->mapping->host;
754 struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
755 struct buffer_head *bh, *head;
756 struct gfs2_bufdata *bd;
757 unsigned long t = jiffies + gfs2_tune_get(sdp, gt_stall_secs) * HZ;
759 if (!page_has_buffers(page))
760 goto out;
762 head = bh = page_buffers(page);
763 do {
764 while (atomic_read(&bh->b_count)) {
765 if (!atomic_read(&aspace->i_writecount))
766 return 0;
768 if (!(gfp_mask & __GFP_WAIT))
769 return 0;
771 if (time_after_eq(jiffies, t)) {
772 stuck_releasepage(bh);
773 /* should we withdraw here? */
774 return 0;
777 yield();
780 gfs2_assert_warn(sdp, !buffer_pinned(bh));
781 gfs2_assert_warn(sdp, !buffer_dirty(bh));
783 gfs2_log_lock(sdp);
784 bd = bh->b_private;
785 if (bd) {
786 gfs2_assert_warn(sdp, bd->bd_bh == bh);
787 gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
788 gfs2_assert_warn(sdp, !bd->bd_ail);
789 bd->bd_bh = NULL;
790 if (!list_empty(&bd->bd_le.le_list))
791 bd = NULL;
792 bh->b_private = NULL;
794 gfs2_log_unlock(sdp);
795 if (bd)
796 kmem_cache_free(gfs2_bufdata_cachep, bd);
798 bh = bh->b_this_page;
799 } while (bh != head);
801 out:
802 return try_to_free_buffers(page);
805 const struct address_space_operations gfs2_file_aops = {
806 .writepage = gfs2_writepage,
807 .writepages = gfs2_writepages,
808 .readpage = gfs2_readpage,
809 .readpages = gfs2_readpages,
810 .sync_page = block_sync_page,
811 .prepare_write = gfs2_prepare_write,
812 .commit_write = gfs2_commit_write,
813 .bmap = gfs2_bmap,
814 .invalidatepage = gfs2_invalidatepage,
815 .releasepage = gfs2_releasepage,
816 .direct_IO = gfs2_direct_IO,