added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / fs / gfs2 / ops_address.c
blob4ddab67867eb273553b6afbf0a42399f56c51735
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
18 #include <linux/fs.h>
19 #include <linux/writeback.h>
20 #include <linux/swap.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/lm_interface.h>
23 #include <linux/backing-dev.h>
25 #include "gfs2.h"
26 #include "incore.h"
27 #include "bmap.h"
28 #include "glock.h"
29 #include "inode.h"
30 #include "log.h"
31 #include "meta_io.h"
32 #include "ops_address.h"
33 #include "quota.h"
34 #include "trans.h"
35 #include "rgrp.h"
36 #include "super.h"
37 #include "util.h"
38 #include "glops.h"
41 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
42 unsigned int from, unsigned int to)
44 struct buffer_head *head = page_buffers(page);
45 unsigned int bsize = head->b_size;
46 struct buffer_head *bh;
47 unsigned int start, end;
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
51 end = start + bsize;
52 if (end <= from || start >= to)
53 continue;
54 if (gfs2_is_jdata(ip))
55 set_buffer_uptodate(bh);
56 gfs2_trans_add_bh(ip->i_gl, bh, 0);
60 /**
61 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
62 * @inode: The inode
63 * @lblock: The block number to look up
64 * @bh_result: The buffer head to return the result in
65 * @create: Non-zero if we may add block to the file
67 * Returns: errno
70 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
71 struct buffer_head *bh_result, int create)
73 int error;
75 error = gfs2_block_map(inode, lblock, bh_result, 0);
76 if (error)
77 return error;
78 if (!buffer_mapped(bh_result))
79 return -EIO;
80 return 0;
83 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
84 struct buffer_head *bh_result, int create)
86 return gfs2_block_map(inode, lblock, bh_result, 0);
89 /**
90 * gfs2_writepage_common - Common bits of writepage
91 * @page: The page to be written
92 * @wbc: The writeback control
94 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
97 static int gfs2_writepage_common(struct page *page,
98 struct writeback_control *wbc)
100 struct inode *inode = page->mapping->host;
101 struct gfs2_inode *ip = GFS2_I(inode);
102 struct gfs2_sbd *sdp = GFS2_SB(inode);
103 loff_t i_size = i_size_read(inode);
104 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
105 unsigned offset;
107 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
108 goto out;
109 if (current->journal_info)
110 goto redirty;
111 /* Is the page fully outside i_size? (truncate in progress) */
112 offset = i_size & (PAGE_CACHE_SIZE-1);
113 if (page->index > end_index || (page->index == end_index && !offset)) {
114 page->mapping->a_ops->invalidatepage(page, 0);
115 goto out;
117 return 1;
118 redirty:
119 redirty_page_for_writepage(wbc, page);
120 out:
121 unlock_page(page);
122 return 0;
126 * gfs2_writeback_writepage - Write page for writeback mappings
127 * @page: The page
128 * @wbc: The writeback control
132 static int gfs2_writeback_writepage(struct page *page,
133 struct writeback_control *wbc)
135 int ret;
137 ret = gfs2_writepage_common(page, wbc);
138 if (ret <= 0)
139 return ret;
141 ret = mpage_writepage(page, gfs2_get_block_noalloc, wbc);
142 if (ret == -EAGAIN)
143 ret = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
144 return ret;
148 * gfs2_ordered_writepage - Write page for ordered data files
149 * @page: The page to write
150 * @wbc: The writeback control
154 static int gfs2_ordered_writepage(struct page *page,
155 struct writeback_control *wbc)
157 struct inode *inode = page->mapping->host;
158 struct gfs2_inode *ip = GFS2_I(inode);
159 int ret;
161 ret = gfs2_writepage_common(page, wbc);
162 if (ret <= 0)
163 return ret;
165 if (!page_has_buffers(page)) {
166 create_empty_buffers(page, inode->i_sb->s_blocksize,
167 (1 << BH_Dirty)|(1 << BH_Uptodate));
169 gfs2_page_add_databufs(ip, page, 0, inode->i_sb->s_blocksize-1);
170 return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
174 * __gfs2_jdata_writepage - The core of jdata writepage
175 * @page: The page to write
176 * @wbc: The writeback control
178 * This is shared between writepage and writepages and implements the
179 * core of the writepage operation. If a transaction is required then
180 * PageChecked will have been set and the transaction will have
181 * already been started before this is called.
184 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
186 struct inode *inode = page->mapping->host;
187 struct gfs2_inode *ip = GFS2_I(inode);
188 struct gfs2_sbd *sdp = GFS2_SB(inode);
190 if (PageChecked(page)) {
191 ClearPageChecked(page);
192 if (!page_has_buffers(page)) {
193 create_empty_buffers(page, inode->i_sb->s_blocksize,
194 (1 << BH_Dirty)|(1 << BH_Uptodate));
196 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
198 return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
202 * gfs2_jdata_writepage - Write complete page
203 * @page: Page to write
205 * Returns: errno
209 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
211 struct inode *inode = page->mapping->host;
212 struct gfs2_sbd *sdp = GFS2_SB(inode);
213 int ret;
214 int done_trans = 0;
216 if (PageChecked(page)) {
217 if (wbc->sync_mode != WB_SYNC_ALL)
218 goto out_ignore;
219 ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
220 if (ret)
221 goto out_ignore;
222 done_trans = 1;
224 ret = gfs2_writepage_common(page, wbc);
225 if (ret > 0)
226 ret = __gfs2_jdata_writepage(page, wbc);
227 if (done_trans)
228 gfs2_trans_end(sdp);
229 return ret;
231 out_ignore:
232 redirty_page_for_writepage(wbc, page);
233 unlock_page(page);
234 return 0;
238 * gfs2_writeback_writepages - Write a bunch of dirty pages back to disk
239 * @mapping: The mapping to write
240 * @wbc: Write-back control
242 * For the data=writeback case we can already ignore buffer heads
243 * and write whole extents at once. This is a big reduction in the
244 * number of I/O requests we send and the bmap calls we make in this case.
246 static int gfs2_writeback_writepages(struct address_space *mapping,
247 struct writeback_control *wbc)
249 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
253 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
254 * @mapping: The mapping
255 * @wbc: The writeback control
256 * @writepage: The writepage function to call for each page
257 * @pvec: The vector of pages
258 * @nr_pages: The number of pages to write
260 * Returns: non-zero if loop should terminate, zero otherwise
263 static int gfs2_write_jdata_pagevec(struct address_space *mapping,
264 struct writeback_control *wbc,
265 struct pagevec *pvec,
266 int nr_pages, pgoff_t end)
268 struct inode *inode = mapping->host;
269 struct gfs2_sbd *sdp = GFS2_SB(inode);
270 loff_t i_size = i_size_read(inode);
271 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
272 unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
273 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
274 struct backing_dev_info *bdi = mapping->backing_dev_info;
275 int i;
276 int ret;
278 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
279 if (ret < 0)
280 return ret;
282 for(i = 0; i < nr_pages; i++) {
283 struct page *page = pvec->pages[i];
285 lock_page(page);
287 if (unlikely(page->mapping != mapping)) {
288 unlock_page(page);
289 continue;
292 if (!wbc->range_cyclic && page->index > end) {
293 ret = 1;
294 unlock_page(page);
295 continue;
298 if (wbc->sync_mode != WB_SYNC_NONE)
299 wait_on_page_writeback(page);
301 if (PageWriteback(page) ||
302 !clear_page_dirty_for_io(page)) {
303 unlock_page(page);
304 continue;
307 /* Is the page fully outside i_size? (truncate in progress) */
308 if (page->index > end_index || (page->index == end_index && !offset)) {
309 page->mapping->a_ops->invalidatepage(page, 0);
310 unlock_page(page);
311 continue;
314 ret = __gfs2_jdata_writepage(page, wbc);
316 if (ret || (--(wbc->nr_to_write) <= 0))
317 ret = 1;
318 if (wbc->nonblocking && bdi_write_congested(bdi)) {
319 wbc->encountered_congestion = 1;
320 ret = 1;
324 gfs2_trans_end(sdp);
325 return ret;
329 * gfs2_write_cache_jdata - Like write_cache_pages but different
330 * @mapping: The mapping to write
331 * @wbc: The writeback control
332 * @writepage: The writepage function to call
333 * @data: The data to pass to writepage
335 * The reason that we use our own function here is that we need to
336 * start transactions before we grab page locks. This allows us
337 * to get the ordering right.
340 static int gfs2_write_cache_jdata(struct address_space *mapping,
341 struct writeback_control *wbc)
343 struct backing_dev_info *bdi = mapping->backing_dev_info;
344 int ret = 0;
345 int done = 0;
346 struct pagevec pvec;
347 int nr_pages;
348 pgoff_t index;
349 pgoff_t end;
350 int scanned = 0;
351 int range_whole = 0;
353 if (wbc->nonblocking && bdi_write_congested(bdi)) {
354 wbc->encountered_congestion = 1;
355 return 0;
358 pagevec_init(&pvec, 0);
359 if (wbc->range_cyclic) {
360 index = mapping->writeback_index; /* Start from prev offset */
361 end = -1;
362 } else {
363 index = wbc->range_start >> PAGE_CACHE_SHIFT;
364 end = wbc->range_end >> PAGE_CACHE_SHIFT;
365 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
366 range_whole = 1;
367 scanned = 1;
370 retry:
371 while (!done && (index <= end) &&
372 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
373 PAGECACHE_TAG_DIRTY,
374 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
375 scanned = 1;
376 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end);
377 if (ret)
378 done = 1;
379 if (ret > 0)
380 ret = 0;
382 pagevec_release(&pvec);
383 cond_resched();
386 if (!scanned && !done) {
388 * We hit the last page and there is more work to be done: wrap
389 * back to the start of the file
391 scanned = 1;
392 index = 0;
393 goto retry;
396 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
397 mapping->writeback_index = index;
398 return ret;
403 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
404 * @mapping: The mapping to write
405 * @wbc: The writeback control
409 static int gfs2_jdata_writepages(struct address_space *mapping,
410 struct writeback_control *wbc)
412 struct gfs2_inode *ip = GFS2_I(mapping->host);
413 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
414 int ret;
416 ret = gfs2_write_cache_jdata(mapping, wbc);
417 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
418 gfs2_log_flush(sdp, ip->i_gl);
419 ret = gfs2_write_cache_jdata(mapping, wbc);
421 return ret;
425 * stuffed_readpage - Fill in a Linux page with stuffed file data
426 * @ip: the inode
427 * @page: the page
429 * Returns: errno
432 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
434 struct buffer_head *dibh;
435 void *kaddr;
436 int error;
439 * Due to the order of unstuffing files and ->fault(), we can be
440 * asked for a zero page in the case of a stuffed file being extended,
441 * so we need to supply one here. It doesn't happen often.
443 if (unlikely(page->index)) {
444 zero_user(page, 0, PAGE_CACHE_SIZE);
445 return 0;
448 error = gfs2_meta_inode_buffer(ip, &dibh);
449 if (error)
450 return error;
452 kaddr = kmap_atomic(page, KM_USER0);
453 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
454 ip->i_disksize);
455 memset(kaddr + ip->i_disksize, 0, PAGE_CACHE_SIZE - ip->i_disksize);
456 kunmap_atomic(kaddr, KM_USER0);
457 flush_dcache_page(page);
458 brelse(dibh);
459 SetPageUptodate(page);
461 return 0;
466 * __gfs2_readpage - readpage
467 * @file: The file to read a page for
468 * @page: The page to read
470 * This is the core of gfs2's readpage. Its used by the internal file
471 * reading code as in that case we already hold the glock. Also its
472 * called by gfs2_readpage() once the required lock has been granted.
476 static int __gfs2_readpage(void *file, struct page *page)
478 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
479 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
480 int error;
482 if (gfs2_is_stuffed(ip)) {
483 error = stuffed_readpage(ip, page);
484 unlock_page(page);
485 } else {
486 error = mpage_readpage(page, gfs2_block_map);
489 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
490 return -EIO;
492 return error;
496 * gfs2_readpage - read a page of a file
497 * @file: The file to read
498 * @page: The page of the file
500 * This deals with the locking required. We have to unlock and
501 * relock the page in order to get the locking in the right
502 * order.
505 static int gfs2_readpage(struct file *file, struct page *page)
507 struct address_space *mapping = page->mapping;
508 struct gfs2_inode *ip = GFS2_I(mapping->host);
509 struct gfs2_holder gh;
510 int error;
512 unlock_page(page);
513 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
514 error = gfs2_glock_nq(&gh);
515 if (unlikely(error))
516 goto out;
517 error = AOP_TRUNCATED_PAGE;
518 lock_page(page);
519 if (page->mapping == mapping && !PageUptodate(page))
520 error = __gfs2_readpage(file, page);
521 else
522 unlock_page(page);
523 gfs2_glock_dq(&gh);
524 out:
525 gfs2_holder_uninit(&gh);
526 if (error && error != AOP_TRUNCATED_PAGE)
527 lock_page(page);
528 return error;
532 * gfs2_internal_read - read an internal file
533 * @ip: The gfs2 inode
534 * @ra_state: The readahead state (or NULL for no readahead)
535 * @buf: The buffer to fill
536 * @pos: The file position
537 * @size: The amount to read
541 int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
542 char *buf, loff_t *pos, unsigned size)
544 struct address_space *mapping = ip->i_inode.i_mapping;
545 unsigned long index = *pos / PAGE_CACHE_SIZE;
546 unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
547 unsigned copied = 0;
548 unsigned amt;
549 struct page *page;
550 void *p;
552 do {
553 amt = size - copied;
554 if (offset + size > PAGE_CACHE_SIZE)
555 amt = PAGE_CACHE_SIZE - offset;
556 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
557 if (IS_ERR(page))
558 return PTR_ERR(page);
559 p = kmap_atomic(page, KM_USER0);
560 memcpy(buf + copied, p + offset, amt);
561 kunmap_atomic(p, KM_USER0);
562 mark_page_accessed(page);
563 page_cache_release(page);
564 copied += amt;
565 index++;
566 offset = 0;
567 } while(copied < size);
568 (*pos) += size;
569 return size;
573 * gfs2_readpages - Read a bunch of pages at once
575 * Some notes:
576 * 1. This is only for readahead, so we can simply ignore any things
577 * which are slightly inconvenient (such as locking conflicts between
578 * the page lock and the glock) and return having done no I/O. Its
579 * obviously not something we'd want to do on too regular a basis.
580 * Any I/O we ignore at this time will be done via readpage later.
581 * 2. We don't handle stuffed files here we let readpage do the honours.
582 * 3. mpage_readpages() does most of the heavy lifting in the common case.
583 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
586 static int gfs2_readpages(struct file *file, struct address_space *mapping,
587 struct list_head *pages, unsigned nr_pages)
589 struct inode *inode = mapping->host;
590 struct gfs2_inode *ip = GFS2_I(inode);
591 struct gfs2_sbd *sdp = GFS2_SB(inode);
592 struct gfs2_holder gh;
593 int ret;
595 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
596 ret = gfs2_glock_nq(&gh);
597 if (unlikely(ret))
598 goto out_uninit;
599 if (!gfs2_is_stuffed(ip))
600 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
601 gfs2_glock_dq(&gh);
602 out_uninit:
603 gfs2_holder_uninit(&gh);
604 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
605 ret = -EIO;
606 return ret;
610 * gfs2_write_begin - Begin to write to a file
611 * @file: The file to write to
612 * @mapping: The mapping in which to write
613 * @pos: The file offset at which to start writing
614 * @len: Length of the write
615 * @flags: Various flags
616 * @pagep: Pointer to return the page
617 * @fsdata: Pointer to return fs data (unused by GFS2)
619 * Returns: errno
622 static int gfs2_write_begin(struct file *file, struct address_space *mapping,
623 loff_t pos, unsigned len, unsigned flags,
624 struct page **pagep, void **fsdata)
626 struct gfs2_inode *ip = GFS2_I(mapping->host);
627 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
628 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
629 int alloc_required;
630 int error = 0;
631 struct gfs2_alloc *al;
632 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
633 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
634 unsigned to = from + len;
635 struct page *page;
637 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
638 error = gfs2_glock_nq(&ip->i_gh);
639 if (unlikely(error))
640 goto out_uninit;
642 error = gfs2_write_alloc_required(ip, pos, len, &alloc_required);
643 if (error)
644 goto out_unlock;
646 if (alloc_required || gfs2_is_jdata(ip))
647 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
649 if (alloc_required) {
650 al = gfs2_alloc_get(ip);
651 if (!al) {
652 error = -ENOMEM;
653 goto out_unlock;
656 error = gfs2_quota_lock_check(ip);
657 if (error)
658 goto out_alloc_put;
660 al->al_requested = data_blocks + ind_blocks;
661 error = gfs2_inplace_reserve(ip);
662 if (error)
663 goto out_qunlock;
666 rblocks = RES_DINODE + ind_blocks;
667 if (gfs2_is_jdata(ip))
668 rblocks += data_blocks ? data_blocks : 1;
669 if (ind_blocks || data_blocks)
670 rblocks += RES_STATFS + RES_QUOTA;
672 error = gfs2_trans_begin(sdp, rblocks,
673 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
674 if (error)
675 goto out_trans_fail;
677 error = -ENOMEM;
678 flags |= AOP_FLAG_NOFS;
679 page = grab_cache_page_write_begin(mapping, index, flags);
680 *pagep = page;
681 if (unlikely(!page))
682 goto out_endtrans;
684 if (gfs2_is_stuffed(ip)) {
685 error = 0;
686 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
687 error = gfs2_unstuff_dinode(ip, page);
688 if (error == 0)
689 goto prepare_write;
690 } else if (!PageUptodate(page)) {
691 error = stuffed_readpage(ip, page);
693 goto out;
696 prepare_write:
697 error = block_prepare_write(page, from, to, gfs2_block_map);
698 out:
699 if (error == 0)
700 return 0;
702 page_cache_release(page);
703 if (pos + len > ip->i_inode.i_size)
704 vmtruncate(&ip->i_inode, ip->i_inode.i_size);
705 out_endtrans:
706 gfs2_trans_end(sdp);
707 out_trans_fail:
708 if (alloc_required) {
709 gfs2_inplace_release(ip);
710 out_qunlock:
711 gfs2_quota_unlock(ip);
712 out_alloc_put:
713 gfs2_alloc_put(ip);
715 out_unlock:
716 gfs2_glock_dq(&ip->i_gh);
717 out_uninit:
718 gfs2_holder_uninit(&ip->i_gh);
719 return error;
723 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
724 * @inode: the rindex inode
726 static void adjust_fs_space(struct inode *inode)
728 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
729 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
730 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
731 u64 fs_total, new_free;
733 /* Total up the file system space, according to the latest rindex. */
734 fs_total = gfs2_ri_total(sdp);
736 spin_lock(&sdp->sd_statfs_spin);
737 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
738 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
739 else
740 new_free = 0;
741 spin_unlock(&sdp->sd_statfs_spin);
742 fs_warn(sdp, "File system extended by %llu blocks.\n",
743 (unsigned long long)new_free);
744 gfs2_statfs_change(sdp, new_free, new_free, 0);
748 * gfs2_stuffed_write_end - Write end for stuffed files
749 * @inode: The inode
750 * @dibh: The buffer_head containing the on-disk inode
751 * @pos: The file position
752 * @len: The length of the write
753 * @copied: How much was actually copied by the VFS
754 * @page: The page
756 * This copies the data from the page into the inode block after
757 * the inode data structure itself.
759 * Returns: errno
761 static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
762 loff_t pos, unsigned len, unsigned copied,
763 struct page *page)
765 struct gfs2_inode *ip = GFS2_I(inode);
766 struct gfs2_sbd *sdp = GFS2_SB(inode);
767 u64 to = pos + copied;
768 void *kaddr;
769 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
770 struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
772 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
773 kaddr = kmap_atomic(page, KM_USER0);
774 memcpy(buf + pos, kaddr + pos, copied);
775 memset(kaddr + pos + copied, 0, len - copied);
776 flush_dcache_page(page);
777 kunmap_atomic(kaddr, KM_USER0);
779 if (!PageUptodate(page))
780 SetPageUptodate(page);
781 unlock_page(page);
782 page_cache_release(page);
784 if (inode->i_size < to) {
785 i_size_write(inode, to);
786 ip->i_disksize = inode->i_size;
787 di->di_size = cpu_to_be64(inode->i_size);
788 mark_inode_dirty(inode);
791 if (inode == sdp->sd_rindex)
792 adjust_fs_space(inode);
794 brelse(dibh);
795 gfs2_trans_end(sdp);
796 gfs2_glock_dq(&ip->i_gh);
797 gfs2_holder_uninit(&ip->i_gh);
798 return copied;
802 * gfs2_write_end
803 * @file: The file to write to
804 * @mapping: The address space to write to
805 * @pos: The file position
806 * @len: The length of the data
807 * @copied:
808 * @page: The page that has been written
809 * @fsdata: The fsdata (unused in GFS2)
811 * The main write_end function for GFS2. We have a separate one for
812 * stuffed files as they are slightly different, otherwise we just
813 * put our locking around the VFS provided functions.
815 * Returns: errno
818 static int gfs2_write_end(struct file *file, struct address_space *mapping,
819 loff_t pos, unsigned len, unsigned copied,
820 struct page *page, void *fsdata)
822 struct inode *inode = page->mapping->host;
823 struct gfs2_inode *ip = GFS2_I(inode);
824 struct gfs2_sbd *sdp = GFS2_SB(inode);
825 struct buffer_head *dibh;
826 struct gfs2_alloc *al = ip->i_alloc;
827 struct gfs2_dinode *di;
828 unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
829 unsigned int to = from + len;
830 int ret;
832 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
834 ret = gfs2_meta_inode_buffer(ip, &dibh);
835 if (unlikely(ret)) {
836 unlock_page(page);
837 page_cache_release(page);
838 goto failed;
841 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
843 if (gfs2_is_stuffed(ip))
844 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
846 if (!gfs2_is_writeback(ip))
847 gfs2_page_add_databufs(ip, page, from, to);
849 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
851 if (likely(ret >= 0) && (inode->i_size > ip->i_disksize)) {
852 di = (struct gfs2_dinode *)dibh->b_data;
853 ip->i_disksize = inode->i_size;
854 di->di_size = cpu_to_be64(inode->i_size);
855 mark_inode_dirty(inode);
858 if (inode == sdp->sd_rindex)
859 adjust_fs_space(inode);
861 brelse(dibh);
862 gfs2_trans_end(sdp);
863 failed:
864 if (al) {
865 gfs2_inplace_release(ip);
866 gfs2_quota_unlock(ip);
867 gfs2_alloc_put(ip);
869 gfs2_glock_dq(&ip->i_gh);
870 gfs2_holder_uninit(&ip->i_gh);
871 return ret;
875 * gfs2_set_page_dirty - Page dirtying function
876 * @page: The page to dirty
878 * Returns: 1 if it dirtyed the page, or 0 otherwise
881 static int gfs2_set_page_dirty(struct page *page)
883 SetPageChecked(page);
884 return __set_page_dirty_buffers(page);
888 * gfs2_bmap - Block map function
889 * @mapping: Address space info
890 * @lblock: The block to map
892 * Returns: The disk address for the block or 0 on hole or error
895 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
897 struct gfs2_inode *ip = GFS2_I(mapping->host);
898 struct gfs2_holder i_gh;
899 sector_t dblock = 0;
900 int error;
902 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
903 if (error)
904 return 0;
906 if (!gfs2_is_stuffed(ip))
907 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
909 gfs2_glock_dq_uninit(&i_gh);
911 return dblock;
914 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
916 struct gfs2_bufdata *bd;
918 lock_buffer(bh);
919 gfs2_log_lock(sdp);
920 clear_buffer_dirty(bh);
921 bd = bh->b_private;
922 if (bd) {
923 if (!list_empty(&bd->bd_le.le_list) && !buffer_pinned(bh))
924 list_del_init(&bd->bd_le.le_list);
925 else
926 gfs2_remove_from_journal(bh, current->journal_info, 0);
928 bh->b_bdev = NULL;
929 clear_buffer_mapped(bh);
930 clear_buffer_req(bh);
931 clear_buffer_new(bh);
932 gfs2_log_unlock(sdp);
933 unlock_buffer(bh);
936 static void gfs2_invalidatepage(struct page *page, unsigned long offset)
938 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
939 struct buffer_head *bh, *head;
940 unsigned long pos = 0;
942 BUG_ON(!PageLocked(page));
943 if (offset == 0)
944 ClearPageChecked(page);
945 if (!page_has_buffers(page))
946 goto out;
948 bh = head = page_buffers(page);
949 do {
950 if (offset <= pos)
951 gfs2_discard(sdp, bh);
952 pos += bh->b_size;
953 bh = bh->b_this_page;
954 } while (bh != head);
955 out:
956 if (offset == 0)
957 try_to_release_page(page, 0);
961 * gfs2_ok_for_dio - check that dio is valid on this file
962 * @ip: The inode
963 * @rw: READ or WRITE
964 * @offset: The offset at which we are reading or writing
966 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
967 * 1 (to accept the i/o request)
969 static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
972 * Should we return an error here? I can't see that O_DIRECT for
973 * a stuffed file makes any sense. For now we'll silently fall
974 * back to buffered I/O
976 if (gfs2_is_stuffed(ip))
977 return 0;
979 if (offset >= i_size_read(&ip->i_inode))
980 return 0;
981 return 1;
986 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
987 const struct iovec *iov, loff_t offset,
988 unsigned long nr_segs)
990 struct file *file = iocb->ki_filp;
991 struct inode *inode = file->f_mapping->host;
992 struct gfs2_inode *ip = GFS2_I(inode);
993 struct gfs2_holder gh;
994 int rv;
997 * Deferred lock, even if its a write, since we do no allocation
998 * on this path. All we need change is atime, and this lock mode
999 * ensures that other nodes have flushed their buffered read caches
1000 * (i.e. their page cache entries for this inode). We do not,
1001 * unfortunately have the option of only flushing a range like
1002 * the VFS does.
1004 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
1005 rv = gfs2_glock_nq(&gh);
1006 if (rv)
1007 return rv;
1008 rv = gfs2_ok_for_dio(ip, rw, offset);
1009 if (rv != 1)
1010 goto out; /* dio not valid, fall back to buffered i/o */
1012 rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev,
1013 iov, offset, nr_segs,
1014 gfs2_get_block_direct, NULL);
1015 out:
1016 gfs2_glock_dq_m(1, &gh);
1017 gfs2_holder_uninit(&gh);
1018 return rv;
1022 * gfs2_releasepage - free the metadata associated with a page
1023 * @page: the page that's being released
1024 * @gfp_mask: passed from Linux VFS, ignored by us
1026 * Call try_to_free_buffers() if the buffers in this page can be
1027 * released.
1029 * Returns: 0
1032 int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1034 struct inode *aspace = page->mapping->host;
1035 struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
1036 struct buffer_head *bh, *head;
1037 struct gfs2_bufdata *bd;
1039 if (!page_has_buffers(page))
1040 return 0;
1042 gfs2_log_lock(sdp);
1043 head = bh = page_buffers(page);
1044 do {
1045 if (atomic_read(&bh->b_count))
1046 goto cannot_release;
1047 bd = bh->b_private;
1048 if (bd && bd->bd_ail)
1049 goto cannot_release;
1050 gfs2_assert_warn(sdp, !buffer_pinned(bh));
1051 gfs2_assert_warn(sdp, !buffer_dirty(bh));
1052 bh = bh->b_this_page;
1053 } while(bh != head);
1054 gfs2_log_unlock(sdp);
1056 head = bh = page_buffers(page);
1057 do {
1058 gfs2_log_lock(sdp);
1059 bd = bh->b_private;
1060 if (bd) {
1061 gfs2_assert_warn(sdp, bd->bd_bh == bh);
1062 gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
1063 if (!list_empty(&bd->bd_le.le_list)) {
1064 if (!buffer_pinned(bh))
1065 list_del_init(&bd->bd_le.le_list);
1066 else
1067 bd = NULL;
1069 if (bd)
1070 bd->bd_bh = NULL;
1071 bh->b_private = NULL;
1073 gfs2_log_unlock(sdp);
1074 if (bd)
1075 kmem_cache_free(gfs2_bufdata_cachep, bd);
1077 bh = bh->b_this_page;
1078 } while (bh != head);
1080 return try_to_free_buffers(page);
1081 cannot_release:
1082 gfs2_log_unlock(sdp);
1083 return 0;
1086 static const struct address_space_operations gfs2_writeback_aops = {
1087 .writepage = gfs2_writeback_writepage,
1088 .writepages = gfs2_writeback_writepages,
1089 .readpage = gfs2_readpage,
1090 .readpages = gfs2_readpages,
1091 .sync_page = block_sync_page,
1092 .write_begin = gfs2_write_begin,
1093 .write_end = gfs2_write_end,
1094 .bmap = gfs2_bmap,
1095 .invalidatepage = gfs2_invalidatepage,
1096 .releasepage = gfs2_releasepage,
1097 .direct_IO = gfs2_direct_IO,
1098 .migratepage = buffer_migrate_page,
1101 static const struct address_space_operations gfs2_ordered_aops = {
1102 .writepage = gfs2_ordered_writepage,
1103 .readpage = gfs2_readpage,
1104 .readpages = gfs2_readpages,
1105 .sync_page = block_sync_page,
1106 .write_begin = gfs2_write_begin,
1107 .write_end = gfs2_write_end,
1108 .set_page_dirty = gfs2_set_page_dirty,
1109 .bmap = gfs2_bmap,
1110 .invalidatepage = gfs2_invalidatepage,
1111 .releasepage = gfs2_releasepage,
1112 .direct_IO = gfs2_direct_IO,
1113 .migratepage = buffer_migrate_page,
1116 static const struct address_space_operations gfs2_jdata_aops = {
1117 .writepage = gfs2_jdata_writepage,
1118 .writepages = gfs2_jdata_writepages,
1119 .readpage = gfs2_readpage,
1120 .readpages = gfs2_readpages,
1121 .sync_page = block_sync_page,
1122 .write_begin = gfs2_write_begin,
1123 .write_end = gfs2_write_end,
1124 .set_page_dirty = gfs2_set_page_dirty,
1125 .bmap = gfs2_bmap,
1126 .invalidatepage = gfs2_invalidatepage,
1127 .releasepage = gfs2_releasepage,
1130 void gfs2_set_aops(struct inode *inode)
1132 struct gfs2_inode *ip = GFS2_I(inode);
1134 if (gfs2_is_writeback(ip))
1135 inode->i_mapping->a_ops = &gfs2_writeback_aops;
1136 else if (gfs2_is_ordered(ip))
1137 inode->i_mapping->a_ops = &gfs2_ordered_aops;
1138 else if (gfs2_is_jdata(ip))
1139 inode->i_mapping->a_ops = &gfs2_jdata_aops;
1140 else
1141 BUG();