ftrace: add command interface for function selection
[linux-2.6/kvm.git] / fs / btrfs / inode.c
blob8f0706210a478cc8866b3dd280b2860e75089757
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/statfs.h>
35 #include <linux/compat.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/xattr.h>
38 #include <linux/posix_acl.h>
39 #include <linux/falloc.h>
40 #include "compat.h"
41 #include "ctree.h"
42 #include "disk-io.h"
43 #include "transaction.h"
44 #include "btrfs_inode.h"
45 #include "ioctl.h"
46 #include "print-tree.h"
47 #include "volumes.h"
48 #include "ordered-data.h"
49 #include "xattr.h"
50 #include "tree-log.h"
51 #include "ref-cache.h"
52 #include "compression.h"
53 #include "locking.h"
55 struct btrfs_iget_args {
56 u64 ino;
57 struct btrfs_root *root;
60 static struct inode_operations btrfs_dir_inode_operations;
61 static struct inode_operations btrfs_symlink_inode_operations;
62 static struct inode_operations btrfs_dir_ro_inode_operations;
63 static struct inode_operations btrfs_special_inode_operations;
64 static struct inode_operations btrfs_file_inode_operations;
65 static struct address_space_operations btrfs_aops;
66 static struct address_space_operations btrfs_symlink_aops;
67 static struct file_operations btrfs_dir_file_operations;
68 static struct extent_io_ops btrfs_extent_io_ops;
70 static struct kmem_cache *btrfs_inode_cachep;
71 struct kmem_cache *btrfs_trans_handle_cachep;
72 struct kmem_cache *btrfs_transaction_cachep;
73 struct kmem_cache *btrfs_bit_radix_cachep;
74 struct kmem_cache *btrfs_path_cachep;
76 #define S_SHIFT 12
77 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
78 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
79 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
80 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
81 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
82 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
83 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
84 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
87 static void btrfs_truncate(struct inode *inode);
88 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
89 static noinline int cow_file_range(struct inode *inode,
90 struct page *locked_page,
91 u64 start, u64 end, int *page_started,
92 unsigned long *nr_written, int unlock);
94 static int btrfs_init_inode_security(struct inode *inode, struct inode *dir)
96 int err;
98 err = btrfs_init_acl(inode, dir);
99 if (!err)
100 err = btrfs_xattr_security_init(inode, dir);
101 return err;
105 * a very lame attempt at stopping writes when the FS is 85% full. There
106 * are countless ways this is incorrect, but it is better than nothing.
108 int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
109 int for_del)
111 u64 total;
112 u64 used;
113 u64 thresh;
114 int ret = 0;
116 spin_lock(&root->fs_info->delalloc_lock);
117 total = btrfs_super_total_bytes(&root->fs_info->super_copy);
118 used = btrfs_super_bytes_used(&root->fs_info->super_copy);
119 if (for_del)
120 thresh = total * 90;
121 else
122 thresh = total * 85;
124 do_div(thresh, 100);
126 if (used + root->fs_info->delalloc_bytes + num_required > thresh)
127 ret = -ENOSPC;
128 spin_unlock(&root->fs_info->delalloc_lock);
129 return ret;
133 * this does all the hard work for inserting an inline extent into
134 * the btree. The caller should have done a btrfs_drop_extents so that
135 * no overlapping inline items exist in the btree
137 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
138 struct btrfs_root *root, struct inode *inode,
139 u64 start, size_t size, size_t compressed_size,
140 struct page **compressed_pages)
142 struct btrfs_key key;
143 struct btrfs_path *path;
144 struct extent_buffer *leaf;
145 struct page *page = NULL;
146 char *kaddr;
147 unsigned long ptr;
148 struct btrfs_file_extent_item *ei;
149 int err = 0;
150 int ret;
151 size_t cur_size = size;
152 size_t datasize;
153 unsigned long offset;
154 int use_compress = 0;
156 if (compressed_size && compressed_pages) {
157 use_compress = 1;
158 cur_size = compressed_size;
161 path = btrfs_alloc_path();
162 if (!path)
163 return -ENOMEM;
165 btrfs_set_trans_block_group(trans, inode);
167 key.objectid = inode->i_ino;
168 key.offset = start;
169 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
170 datasize = btrfs_file_extent_calc_inline_size(cur_size);
172 inode_add_bytes(inode, size);
173 ret = btrfs_insert_empty_item(trans, root, path, &key,
174 datasize);
175 BUG_ON(ret);
176 if (ret) {
177 err = ret;
178 goto fail;
180 leaf = path->nodes[0];
181 ei = btrfs_item_ptr(leaf, path->slots[0],
182 struct btrfs_file_extent_item);
183 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
184 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
185 btrfs_set_file_extent_encryption(leaf, ei, 0);
186 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
187 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
188 ptr = btrfs_file_extent_inline_start(ei);
190 if (use_compress) {
191 struct page *cpage;
192 int i = 0;
193 while (compressed_size > 0) {
194 cpage = compressed_pages[i];
195 cur_size = min_t(unsigned long, compressed_size,
196 PAGE_CACHE_SIZE);
198 kaddr = kmap(cpage);
199 write_extent_buffer(leaf, kaddr, ptr, cur_size);
200 kunmap(cpage);
202 i++;
203 ptr += cur_size;
204 compressed_size -= cur_size;
206 btrfs_set_file_extent_compression(leaf, ei,
207 BTRFS_COMPRESS_ZLIB);
208 } else {
209 page = find_get_page(inode->i_mapping,
210 start >> PAGE_CACHE_SHIFT);
211 btrfs_set_file_extent_compression(leaf, ei, 0);
212 kaddr = kmap_atomic(page, KM_USER0);
213 offset = start & (PAGE_CACHE_SIZE - 1);
214 write_extent_buffer(leaf, kaddr + offset, ptr, size);
215 kunmap_atomic(kaddr, KM_USER0);
216 page_cache_release(page);
218 btrfs_mark_buffer_dirty(leaf);
219 btrfs_free_path(path);
221 BTRFS_I(inode)->disk_i_size = inode->i_size;
222 btrfs_update_inode(trans, root, inode);
223 return 0;
224 fail:
225 btrfs_free_path(path);
226 return err;
231 * conditionally insert an inline extent into the file. This
232 * does the checks required to make sure the data is small enough
233 * to fit as an inline extent.
235 static int cow_file_range_inline(struct btrfs_trans_handle *trans,
236 struct btrfs_root *root,
237 struct inode *inode, u64 start, u64 end,
238 size_t compressed_size,
239 struct page **compressed_pages)
241 u64 isize = i_size_read(inode);
242 u64 actual_end = min(end + 1, isize);
243 u64 inline_len = actual_end - start;
244 u64 aligned_end = (end + root->sectorsize - 1) &
245 ~((u64)root->sectorsize - 1);
246 u64 hint_byte;
247 u64 data_len = inline_len;
248 int ret;
250 if (compressed_size)
251 data_len = compressed_size;
253 if (start > 0 ||
254 actual_end >= PAGE_CACHE_SIZE ||
255 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
256 (!compressed_size &&
257 (actual_end & (root->sectorsize - 1)) == 0) ||
258 end + 1 < isize ||
259 data_len > root->fs_info->max_inline) {
260 return 1;
263 ret = btrfs_drop_extents(trans, root, inode, start,
264 aligned_end, start, &hint_byte);
265 BUG_ON(ret);
267 if (isize > actual_end)
268 inline_len = min_t(u64, isize, actual_end);
269 ret = insert_inline_extent(trans, root, inode, start,
270 inline_len, compressed_size,
271 compressed_pages);
272 BUG_ON(ret);
273 btrfs_drop_extent_cache(inode, start, aligned_end, 0);
274 return 0;
277 struct async_extent {
278 u64 start;
279 u64 ram_size;
280 u64 compressed_size;
281 struct page **pages;
282 unsigned long nr_pages;
283 struct list_head list;
286 struct async_cow {
287 struct inode *inode;
288 struct btrfs_root *root;
289 struct page *locked_page;
290 u64 start;
291 u64 end;
292 struct list_head extents;
293 struct btrfs_work work;
296 static noinline int add_async_extent(struct async_cow *cow,
297 u64 start, u64 ram_size,
298 u64 compressed_size,
299 struct page **pages,
300 unsigned long nr_pages)
302 struct async_extent *async_extent;
304 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
305 async_extent->start = start;
306 async_extent->ram_size = ram_size;
307 async_extent->compressed_size = compressed_size;
308 async_extent->pages = pages;
309 async_extent->nr_pages = nr_pages;
310 list_add_tail(&async_extent->list, &cow->extents);
311 return 0;
315 * we create compressed extents in two phases. The first
316 * phase compresses a range of pages that have already been
317 * locked (both pages and state bits are locked).
319 * This is done inside an ordered work queue, and the compression
320 * is spread across many cpus. The actual IO submission is step
321 * two, and the ordered work queue takes care of making sure that
322 * happens in the same order things were put onto the queue by
323 * writepages and friends.
325 * If this code finds it can't get good compression, it puts an
326 * entry onto the work queue to write the uncompressed bytes. This
327 * makes sure that both compressed inodes and uncompressed inodes
328 * are written in the same order that pdflush sent them down.
330 static noinline int compress_file_range(struct inode *inode,
331 struct page *locked_page,
332 u64 start, u64 end,
333 struct async_cow *async_cow,
334 int *num_added)
336 struct btrfs_root *root = BTRFS_I(inode)->root;
337 struct btrfs_trans_handle *trans;
338 u64 num_bytes;
339 u64 orig_start;
340 u64 disk_num_bytes;
341 u64 blocksize = root->sectorsize;
342 u64 actual_end;
343 u64 isize = i_size_read(inode);
344 int ret = 0;
345 struct page **pages = NULL;
346 unsigned long nr_pages;
347 unsigned long nr_pages_ret = 0;
348 unsigned long total_compressed = 0;
349 unsigned long total_in = 0;
350 unsigned long max_compressed = 128 * 1024;
351 unsigned long max_uncompressed = 128 * 1024;
352 int i;
353 int will_compress;
355 orig_start = start;
357 actual_end = min_t(u64, isize, end + 1);
358 again:
359 will_compress = 0;
360 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
361 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
364 * we don't want to send crud past the end of i_size through
365 * compression, that's just a waste of CPU time. So, if the
366 * end of the file is before the start of our current
367 * requested range of bytes, we bail out to the uncompressed
368 * cleanup code that can deal with all of this.
370 * It isn't really the fastest way to fix things, but this is a
371 * very uncommon corner.
373 if (actual_end <= start)
374 goto cleanup_and_bail_uncompressed;
376 total_compressed = actual_end - start;
378 /* we want to make sure that amount of ram required to uncompress
379 * an extent is reasonable, so we limit the total size in ram
380 * of a compressed extent to 128k. This is a crucial number
381 * because it also controls how easily we can spread reads across
382 * cpus for decompression.
384 * We also want to make sure the amount of IO required to do
385 * a random read is reasonably small, so we limit the size of
386 * a compressed extent to 128k.
388 total_compressed = min(total_compressed, max_uncompressed);
389 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
390 num_bytes = max(blocksize, num_bytes);
391 disk_num_bytes = num_bytes;
392 total_in = 0;
393 ret = 0;
396 * we do compression for mount -o compress and when the
397 * inode has not been flagged as nocompress. This flag can
398 * change at any time if we discover bad compression ratios.
400 if (!btrfs_test_flag(inode, NOCOMPRESS) &&
401 btrfs_test_opt(root, COMPRESS)) {
402 WARN_ON(pages);
403 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
405 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
406 total_compressed, pages,
407 nr_pages, &nr_pages_ret,
408 &total_in,
409 &total_compressed,
410 max_compressed);
412 if (!ret) {
413 unsigned long offset = total_compressed &
414 (PAGE_CACHE_SIZE - 1);
415 struct page *page = pages[nr_pages_ret - 1];
416 char *kaddr;
418 /* zero the tail end of the last page, we might be
419 * sending it down to disk
421 if (offset) {
422 kaddr = kmap_atomic(page, KM_USER0);
423 memset(kaddr + offset, 0,
424 PAGE_CACHE_SIZE - offset);
425 kunmap_atomic(kaddr, KM_USER0);
427 will_compress = 1;
430 if (start == 0) {
431 trans = btrfs_join_transaction(root, 1);
432 BUG_ON(!trans);
433 btrfs_set_trans_block_group(trans, inode);
435 /* lets try to make an inline extent */
436 if (ret || total_in < (actual_end - start)) {
437 /* we didn't compress the entire range, try
438 * to make an uncompressed inline extent.
440 ret = cow_file_range_inline(trans, root, inode,
441 start, end, 0, NULL);
442 } else {
443 /* try making a compressed inline extent */
444 ret = cow_file_range_inline(trans, root, inode,
445 start, end,
446 total_compressed, pages);
448 btrfs_end_transaction(trans, root);
449 if (ret == 0) {
451 * inline extent creation worked, we don't need
452 * to create any more async work items. Unlock
453 * and free up our temp pages.
455 extent_clear_unlock_delalloc(inode,
456 &BTRFS_I(inode)->io_tree,
457 start, end, NULL, 1, 0,
458 0, 1, 1, 1);
459 ret = 0;
460 goto free_pages_out;
464 if (will_compress) {
466 * we aren't doing an inline extent round the compressed size
467 * up to a block size boundary so the allocator does sane
468 * things
470 total_compressed = (total_compressed + blocksize - 1) &
471 ~(blocksize - 1);
474 * one last check to make sure the compression is really a
475 * win, compare the page count read with the blocks on disk
477 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
478 ~(PAGE_CACHE_SIZE - 1);
479 if (total_compressed >= total_in) {
480 will_compress = 0;
481 } else {
482 disk_num_bytes = total_compressed;
483 num_bytes = total_in;
486 if (!will_compress && pages) {
488 * the compression code ran but failed to make things smaller,
489 * free any pages it allocated and our page pointer array
491 for (i = 0; i < nr_pages_ret; i++) {
492 WARN_ON(pages[i]->mapping);
493 page_cache_release(pages[i]);
495 kfree(pages);
496 pages = NULL;
497 total_compressed = 0;
498 nr_pages_ret = 0;
500 /* flag the file so we don't compress in the future */
501 btrfs_set_flag(inode, NOCOMPRESS);
503 if (will_compress) {
504 *num_added += 1;
506 /* the async work queues will take care of doing actual
507 * allocation on disk for these compressed pages,
508 * and will submit them to the elevator.
510 add_async_extent(async_cow, start, num_bytes,
511 total_compressed, pages, nr_pages_ret);
513 if (start + num_bytes < end && start + num_bytes < actual_end) {
514 start += num_bytes;
515 pages = NULL;
516 cond_resched();
517 goto again;
519 } else {
520 cleanup_and_bail_uncompressed:
522 * No compression, but we still need to write the pages in
523 * the file we've been given so far. redirty the locked
524 * page if it corresponds to our extent and set things up
525 * for the async work queue to run cow_file_range to do
526 * the normal delalloc dance
528 if (page_offset(locked_page) >= start &&
529 page_offset(locked_page) <= end) {
530 __set_page_dirty_nobuffers(locked_page);
531 /* unlocked later on in the async handlers */
533 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
534 *num_added += 1;
537 out:
538 return 0;
540 free_pages_out:
541 for (i = 0; i < nr_pages_ret; i++) {
542 WARN_ON(pages[i]->mapping);
543 page_cache_release(pages[i]);
545 kfree(pages);
547 goto out;
551 * phase two of compressed writeback. This is the ordered portion
552 * of the code, which only gets called in the order the work was
553 * queued. We walk all the async extents created by compress_file_range
554 * and send them down to the disk.
556 static noinline int submit_compressed_extents(struct inode *inode,
557 struct async_cow *async_cow)
559 struct async_extent *async_extent;
560 u64 alloc_hint = 0;
561 struct btrfs_trans_handle *trans;
562 struct btrfs_key ins;
563 struct extent_map *em;
564 struct btrfs_root *root = BTRFS_I(inode)->root;
565 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
566 struct extent_io_tree *io_tree;
567 int ret;
569 if (list_empty(&async_cow->extents))
570 return 0;
572 trans = btrfs_join_transaction(root, 1);
574 while (!list_empty(&async_cow->extents)) {
575 async_extent = list_entry(async_cow->extents.next,
576 struct async_extent, list);
577 list_del(&async_extent->list);
579 io_tree = &BTRFS_I(inode)->io_tree;
581 /* did the compression code fall back to uncompressed IO? */
582 if (!async_extent->pages) {
583 int page_started = 0;
584 unsigned long nr_written = 0;
586 lock_extent(io_tree, async_extent->start,
587 async_extent->start +
588 async_extent->ram_size - 1, GFP_NOFS);
590 /* allocate blocks */
591 cow_file_range(inode, async_cow->locked_page,
592 async_extent->start,
593 async_extent->start +
594 async_extent->ram_size - 1,
595 &page_started, &nr_written, 0);
598 * if page_started, cow_file_range inserted an
599 * inline extent and took care of all the unlocking
600 * and IO for us. Otherwise, we need to submit
601 * all those pages down to the drive.
603 if (!page_started)
604 extent_write_locked_range(io_tree,
605 inode, async_extent->start,
606 async_extent->start +
607 async_extent->ram_size - 1,
608 btrfs_get_extent,
609 WB_SYNC_ALL);
610 kfree(async_extent);
611 cond_resched();
612 continue;
615 lock_extent(io_tree, async_extent->start,
616 async_extent->start + async_extent->ram_size - 1,
617 GFP_NOFS);
619 * here we're doing allocation and writeback of the
620 * compressed pages
622 btrfs_drop_extent_cache(inode, async_extent->start,
623 async_extent->start +
624 async_extent->ram_size - 1, 0);
626 ret = btrfs_reserve_extent(trans, root,
627 async_extent->compressed_size,
628 async_extent->compressed_size,
629 0, alloc_hint,
630 (u64)-1, &ins, 1);
631 BUG_ON(ret);
632 em = alloc_extent_map(GFP_NOFS);
633 em->start = async_extent->start;
634 em->len = async_extent->ram_size;
635 em->orig_start = em->start;
637 em->block_start = ins.objectid;
638 em->block_len = ins.offset;
639 em->bdev = root->fs_info->fs_devices->latest_bdev;
640 set_bit(EXTENT_FLAG_PINNED, &em->flags);
641 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
643 while (1) {
644 spin_lock(&em_tree->lock);
645 ret = add_extent_mapping(em_tree, em);
646 spin_unlock(&em_tree->lock);
647 if (ret != -EEXIST) {
648 free_extent_map(em);
649 break;
651 btrfs_drop_extent_cache(inode, async_extent->start,
652 async_extent->start +
653 async_extent->ram_size - 1, 0);
656 ret = btrfs_add_ordered_extent(inode, async_extent->start,
657 ins.objectid,
658 async_extent->ram_size,
659 ins.offset,
660 BTRFS_ORDERED_COMPRESSED);
661 BUG_ON(ret);
663 btrfs_end_transaction(trans, root);
666 * clear dirty, set writeback and unlock the pages.
668 extent_clear_unlock_delalloc(inode,
669 &BTRFS_I(inode)->io_tree,
670 async_extent->start,
671 async_extent->start +
672 async_extent->ram_size - 1,
673 NULL, 1, 1, 0, 1, 1, 0);
675 ret = btrfs_submit_compressed_write(inode,
676 async_extent->start,
677 async_extent->ram_size,
678 ins.objectid,
679 ins.offset, async_extent->pages,
680 async_extent->nr_pages);
682 BUG_ON(ret);
683 trans = btrfs_join_transaction(root, 1);
684 alloc_hint = ins.objectid + ins.offset;
685 kfree(async_extent);
686 cond_resched();
689 btrfs_end_transaction(trans, root);
690 return 0;
694 * when extent_io.c finds a delayed allocation range in the file,
695 * the call backs end up in this code. The basic idea is to
696 * allocate extents on disk for the range, and create ordered data structs
697 * in ram to track those extents.
699 * locked_page is the page that writepage had locked already. We use
700 * it to make sure we don't do extra locks or unlocks.
702 * *page_started is set to one if we unlock locked_page and do everything
703 * required to start IO on it. It may be clean and already done with
704 * IO when we return.
706 static noinline int cow_file_range(struct inode *inode,
707 struct page *locked_page,
708 u64 start, u64 end, int *page_started,
709 unsigned long *nr_written,
710 int unlock)
712 struct btrfs_root *root = BTRFS_I(inode)->root;
713 struct btrfs_trans_handle *trans;
714 u64 alloc_hint = 0;
715 u64 num_bytes;
716 unsigned long ram_size;
717 u64 disk_num_bytes;
718 u64 cur_alloc_size;
719 u64 blocksize = root->sectorsize;
720 u64 actual_end;
721 u64 isize = i_size_read(inode);
722 struct btrfs_key ins;
723 struct extent_map *em;
724 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
725 int ret = 0;
727 trans = btrfs_join_transaction(root, 1);
728 BUG_ON(!trans);
729 btrfs_set_trans_block_group(trans, inode);
731 actual_end = min_t(u64, isize, end + 1);
733 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
734 num_bytes = max(blocksize, num_bytes);
735 disk_num_bytes = num_bytes;
736 ret = 0;
738 if (start == 0) {
739 /* lets try to make an inline extent */
740 ret = cow_file_range_inline(trans, root, inode,
741 start, end, 0, NULL);
742 if (ret == 0) {
743 extent_clear_unlock_delalloc(inode,
744 &BTRFS_I(inode)->io_tree,
745 start, end, NULL, 1, 1,
746 1, 1, 1, 1);
747 *nr_written = *nr_written +
748 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
749 *page_started = 1;
750 ret = 0;
751 goto out;
755 BUG_ON(disk_num_bytes >
756 btrfs_super_total_bytes(&root->fs_info->super_copy));
758 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
760 while (disk_num_bytes > 0) {
761 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
762 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
763 root->sectorsize, 0, alloc_hint,
764 (u64)-1, &ins, 1);
765 BUG_ON(ret);
767 em = alloc_extent_map(GFP_NOFS);
768 em->start = start;
769 em->orig_start = em->start;
771 ram_size = ins.offset;
772 em->len = ins.offset;
774 em->block_start = ins.objectid;
775 em->block_len = ins.offset;
776 em->bdev = root->fs_info->fs_devices->latest_bdev;
777 set_bit(EXTENT_FLAG_PINNED, &em->flags);
779 while (1) {
780 spin_lock(&em_tree->lock);
781 ret = add_extent_mapping(em_tree, em);
782 spin_unlock(&em_tree->lock);
783 if (ret != -EEXIST) {
784 free_extent_map(em);
785 break;
787 btrfs_drop_extent_cache(inode, start,
788 start + ram_size - 1, 0);
791 cur_alloc_size = ins.offset;
792 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
793 ram_size, cur_alloc_size, 0);
794 BUG_ON(ret);
796 if (root->root_key.objectid ==
797 BTRFS_DATA_RELOC_TREE_OBJECTID) {
798 ret = btrfs_reloc_clone_csums(inode, start,
799 cur_alloc_size);
800 BUG_ON(ret);
803 if (disk_num_bytes < cur_alloc_size)
804 break;
806 /* we're not doing compressed IO, don't unlock the first
807 * page (which the caller expects to stay locked), don't
808 * clear any dirty bits and don't set any writeback bits
810 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
811 start, start + ram_size - 1,
812 locked_page, unlock, 1,
813 1, 0, 0, 0);
814 disk_num_bytes -= cur_alloc_size;
815 num_bytes -= cur_alloc_size;
816 alloc_hint = ins.objectid + ins.offset;
817 start += cur_alloc_size;
819 out:
820 ret = 0;
821 btrfs_end_transaction(trans, root);
823 return ret;
827 * work queue call back to started compression on a file and pages
829 static noinline void async_cow_start(struct btrfs_work *work)
831 struct async_cow *async_cow;
832 int num_added = 0;
833 async_cow = container_of(work, struct async_cow, work);
835 compress_file_range(async_cow->inode, async_cow->locked_page,
836 async_cow->start, async_cow->end, async_cow,
837 &num_added);
838 if (num_added == 0)
839 async_cow->inode = NULL;
843 * work queue call back to submit previously compressed pages
845 static noinline void async_cow_submit(struct btrfs_work *work)
847 struct async_cow *async_cow;
848 struct btrfs_root *root;
849 unsigned long nr_pages;
851 async_cow = container_of(work, struct async_cow, work);
853 root = async_cow->root;
854 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
855 PAGE_CACHE_SHIFT;
857 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
859 if (atomic_read(&root->fs_info->async_delalloc_pages) <
860 5 * 1042 * 1024 &&
861 waitqueue_active(&root->fs_info->async_submit_wait))
862 wake_up(&root->fs_info->async_submit_wait);
864 if (async_cow->inode)
865 submit_compressed_extents(async_cow->inode, async_cow);
868 static noinline void async_cow_free(struct btrfs_work *work)
870 struct async_cow *async_cow;
871 async_cow = container_of(work, struct async_cow, work);
872 kfree(async_cow);
875 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
876 u64 start, u64 end, int *page_started,
877 unsigned long *nr_written)
879 struct async_cow *async_cow;
880 struct btrfs_root *root = BTRFS_I(inode)->root;
881 unsigned long nr_pages;
882 u64 cur_end;
883 int limit = 10 * 1024 * 1042;
885 if (!btrfs_test_opt(root, COMPRESS)) {
886 return cow_file_range(inode, locked_page, start, end,
887 page_started, nr_written, 1);
890 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
891 EXTENT_DELALLOC, 1, 0, GFP_NOFS);
892 while (start < end) {
893 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
894 async_cow->inode = inode;
895 async_cow->root = root;
896 async_cow->locked_page = locked_page;
897 async_cow->start = start;
899 if (btrfs_test_flag(inode, NOCOMPRESS))
900 cur_end = end;
901 else
902 cur_end = min(end, start + 512 * 1024 - 1);
904 async_cow->end = cur_end;
905 INIT_LIST_HEAD(&async_cow->extents);
907 async_cow->work.func = async_cow_start;
908 async_cow->work.ordered_func = async_cow_submit;
909 async_cow->work.ordered_free = async_cow_free;
910 async_cow->work.flags = 0;
912 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
913 PAGE_CACHE_SHIFT;
914 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
916 btrfs_queue_worker(&root->fs_info->delalloc_workers,
917 &async_cow->work);
919 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
920 wait_event(root->fs_info->async_submit_wait,
921 (atomic_read(&root->fs_info->async_delalloc_pages) <
922 limit));
925 while (atomic_read(&root->fs_info->async_submit_draining) &&
926 atomic_read(&root->fs_info->async_delalloc_pages)) {
927 wait_event(root->fs_info->async_submit_wait,
928 (atomic_read(&root->fs_info->async_delalloc_pages) ==
929 0));
932 *nr_written += nr_pages;
933 start = cur_end + 1;
935 *page_started = 1;
936 return 0;
939 static noinline int csum_exist_in_range(struct btrfs_root *root,
940 u64 bytenr, u64 num_bytes)
942 int ret;
943 struct btrfs_ordered_sum *sums;
944 LIST_HEAD(list);
946 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
947 bytenr + num_bytes - 1, &list);
948 if (ret == 0 && list_empty(&list))
949 return 0;
951 while (!list_empty(&list)) {
952 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
953 list_del(&sums->list);
954 kfree(sums);
956 return 1;
960 * when nowcow writeback call back. This checks for snapshots or COW copies
961 * of the extents that exist in the file, and COWs the file as required.
963 * If no cow copies or snapshots exist, we write directly to the existing
964 * blocks on disk
966 static int run_delalloc_nocow(struct inode *inode, struct page *locked_page,
967 u64 start, u64 end, int *page_started, int force,
968 unsigned long *nr_written)
970 struct btrfs_root *root = BTRFS_I(inode)->root;
971 struct btrfs_trans_handle *trans;
972 struct extent_buffer *leaf;
973 struct btrfs_path *path;
974 struct btrfs_file_extent_item *fi;
975 struct btrfs_key found_key;
976 u64 cow_start;
977 u64 cur_offset;
978 u64 extent_end;
979 u64 disk_bytenr;
980 u64 num_bytes;
981 int extent_type;
982 int ret;
983 int type;
984 int nocow;
985 int check_prev = 1;
987 path = btrfs_alloc_path();
988 BUG_ON(!path);
989 trans = btrfs_join_transaction(root, 1);
990 BUG_ON(!trans);
992 cow_start = (u64)-1;
993 cur_offset = start;
994 while (1) {
995 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
996 cur_offset, 0);
997 BUG_ON(ret < 0);
998 if (ret > 0 && path->slots[0] > 0 && check_prev) {
999 leaf = path->nodes[0];
1000 btrfs_item_key_to_cpu(leaf, &found_key,
1001 path->slots[0] - 1);
1002 if (found_key.objectid == inode->i_ino &&
1003 found_key.type == BTRFS_EXTENT_DATA_KEY)
1004 path->slots[0]--;
1006 check_prev = 0;
1007 next_slot:
1008 leaf = path->nodes[0];
1009 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1010 ret = btrfs_next_leaf(root, path);
1011 if (ret < 0)
1012 BUG_ON(1);
1013 if (ret > 0)
1014 break;
1015 leaf = path->nodes[0];
1018 nocow = 0;
1019 disk_bytenr = 0;
1020 num_bytes = 0;
1021 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1023 if (found_key.objectid > inode->i_ino ||
1024 found_key.type > BTRFS_EXTENT_DATA_KEY ||
1025 found_key.offset > end)
1026 break;
1028 if (found_key.offset > cur_offset) {
1029 extent_end = found_key.offset;
1030 goto out_check;
1033 fi = btrfs_item_ptr(leaf, path->slots[0],
1034 struct btrfs_file_extent_item);
1035 extent_type = btrfs_file_extent_type(leaf, fi);
1037 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1038 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1039 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1040 extent_end = found_key.offset +
1041 btrfs_file_extent_num_bytes(leaf, fi);
1042 if (extent_end <= start) {
1043 path->slots[0]++;
1044 goto next_slot;
1046 if (disk_bytenr == 0)
1047 goto out_check;
1048 if (btrfs_file_extent_compression(leaf, fi) ||
1049 btrfs_file_extent_encryption(leaf, fi) ||
1050 btrfs_file_extent_other_encoding(leaf, fi))
1051 goto out_check;
1052 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1053 goto out_check;
1054 if (btrfs_extent_readonly(root, disk_bytenr))
1055 goto out_check;
1056 if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1057 disk_bytenr))
1058 goto out_check;
1059 disk_bytenr += btrfs_file_extent_offset(leaf, fi);
1060 disk_bytenr += cur_offset - found_key.offset;
1061 num_bytes = min(end + 1, extent_end) - cur_offset;
1063 * force cow if csum exists in the range.
1064 * this ensure that csum for a given extent are
1065 * either valid or do not exist.
1067 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1068 goto out_check;
1069 nocow = 1;
1070 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1071 extent_end = found_key.offset +
1072 btrfs_file_extent_inline_len(leaf, fi);
1073 extent_end = ALIGN(extent_end, root->sectorsize);
1074 } else {
1075 BUG_ON(1);
1077 out_check:
1078 if (extent_end <= start) {
1079 path->slots[0]++;
1080 goto next_slot;
1082 if (!nocow) {
1083 if (cow_start == (u64)-1)
1084 cow_start = cur_offset;
1085 cur_offset = extent_end;
1086 if (cur_offset > end)
1087 break;
1088 path->slots[0]++;
1089 goto next_slot;
1092 btrfs_release_path(root, path);
1093 if (cow_start != (u64)-1) {
1094 ret = cow_file_range(inode, locked_page, cow_start,
1095 found_key.offset - 1, page_started,
1096 nr_written, 1);
1097 BUG_ON(ret);
1098 cow_start = (u64)-1;
1101 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1102 struct extent_map *em;
1103 struct extent_map_tree *em_tree;
1104 em_tree = &BTRFS_I(inode)->extent_tree;
1105 em = alloc_extent_map(GFP_NOFS);
1106 em->start = cur_offset;
1107 em->orig_start = em->start;
1108 em->len = num_bytes;
1109 em->block_len = num_bytes;
1110 em->block_start = disk_bytenr;
1111 em->bdev = root->fs_info->fs_devices->latest_bdev;
1112 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1113 while (1) {
1114 spin_lock(&em_tree->lock);
1115 ret = add_extent_mapping(em_tree, em);
1116 spin_unlock(&em_tree->lock);
1117 if (ret != -EEXIST) {
1118 free_extent_map(em);
1119 break;
1121 btrfs_drop_extent_cache(inode, em->start,
1122 em->start + em->len - 1, 0);
1124 type = BTRFS_ORDERED_PREALLOC;
1125 } else {
1126 type = BTRFS_ORDERED_NOCOW;
1129 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1130 num_bytes, num_bytes, type);
1131 BUG_ON(ret);
1133 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1134 cur_offset, cur_offset + num_bytes - 1,
1135 locked_page, 1, 1, 1, 0, 0, 0);
1136 cur_offset = extent_end;
1137 if (cur_offset > end)
1138 break;
1140 btrfs_release_path(root, path);
1142 if (cur_offset <= end && cow_start == (u64)-1)
1143 cow_start = cur_offset;
1144 if (cow_start != (u64)-1) {
1145 ret = cow_file_range(inode, locked_page, cow_start, end,
1146 page_started, nr_written, 1);
1147 BUG_ON(ret);
1150 ret = btrfs_end_transaction(trans, root);
1151 BUG_ON(ret);
1152 btrfs_free_path(path);
1153 return 0;
1157 * extent_io.c call back to do delayed allocation processing
1159 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1160 u64 start, u64 end, int *page_started,
1161 unsigned long *nr_written)
1163 int ret;
1165 if (btrfs_test_flag(inode, NODATACOW))
1166 ret = run_delalloc_nocow(inode, locked_page, start, end,
1167 page_started, 1, nr_written);
1168 else if (btrfs_test_flag(inode, PREALLOC))
1169 ret = run_delalloc_nocow(inode, locked_page, start, end,
1170 page_started, 0, nr_written);
1171 else
1172 ret = cow_file_range_async(inode, locked_page, start, end,
1173 page_started, nr_written);
1175 return ret;
1179 * extent_io.c set_bit_hook, used to track delayed allocation
1180 * bytes in this file, and to maintain the list of inodes that
1181 * have pending delalloc work to be done.
1183 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1184 unsigned long old, unsigned long bits)
1187 * set_bit and clear bit hooks normally require _irqsave/restore
1188 * but in this case, we are only testeing for the DELALLOC
1189 * bit, which is only set or cleared with irqs on
1191 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1192 struct btrfs_root *root = BTRFS_I(inode)->root;
1193 spin_lock(&root->fs_info->delalloc_lock);
1194 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1195 root->fs_info->delalloc_bytes += end - start + 1;
1196 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1197 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1198 &root->fs_info->delalloc_inodes);
1200 spin_unlock(&root->fs_info->delalloc_lock);
1202 return 0;
1206 * extent_io.c clear_bit_hook, see set_bit_hook for why
1208 static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
1209 unsigned long old, unsigned long bits)
1212 * set_bit and clear bit hooks normally require _irqsave/restore
1213 * but in this case, we are only testeing for the DELALLOC
1214 * bit, which is only set or cleared with irqs on
1216 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1217 struct btrfs_root *root = BTRFS_I(inode)->root;
1219 spin_lock(&root->fs_info->delalloc_lock);
1220 if (end - start + 1 > root->fs_info->delalloc_bytes) {
1221 printk(KERN_INFO "btrfs warning: delalloc account "
1222 "%llu %llu\n",
1223 (unsigned long long)end - start + 1,
1224 (unsigned long long)
1225 root->fs_info->delalloc_bytes);
1226 root->fs_info->delalloc_bytes = 0;
1227 BTRFS_I(inode)->delalloc_bytes = 0;
1228 } else {
1229 root->fs_info->delalloc_bytes -= end - start + 1;
1230 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
1232 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1233 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1234 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1236 spin_unlock(&root->fs_info->delalloc_lock);
1238 return 0;
1242 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1243 * we don't create bios that span stripes or chunks
1245 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1246 size_t size, struct bio *bio,
1247 unsigned long bio_flags)
1249 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1250 struct btrfs_mapping_tree *map_tree;
1251 u64 logical = (u64)bio->bi_sector << 9;
1252 u64 length = 0;
1253 u64 map_length;
1254 int ret;
1256 if (bio_flags & EXTENT_BIO_COMPRESSED)
1257 return 0;
1259 length = bio->bi_size;
1260 map_tree = &root->fs_info->mapping_tree;
1261 map_length = length;
1262 ret = btrfs_map_block(map_tree, READ, logical,
1263 &map_length, NULL, 0);
1265 if (map_length < length + size)
1266 return 1;
1267 return 0;
1271 * in order to insert checksums into the metadata in large chunks,
1272 * we wait until bio submission time. All the pages in the bio are
1273 * checksummed and sums are attached onto the ordered extent record.
1275 * At IO completion time the cums attached on the ordered extent record
1276 * are inserted into the btree
1278 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1279 struct bio *bio, int mirror_num,
1280 unsigned long bio_flags)
1282 struct btrfs_root *root = BTRFS_I(inode)->root;
1283 int ret = 0;
1285 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1286 BUG_ON(ret);
1287 return 0;
1291 * in order to insert checksums into the metadata in large chunks,
1292 * we wait until bio submission time. All the pages in the bio are
1293 * checksummed and sums are attached onto the ordered extent record.
1295 * At IO completion time the cums attached on the ordered extent record
1296 * are inserted into the btree
1298 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1299 int mirror_num, unsigned long bio_flags)
1301 struct btrfs_root *root = BTRFS_I(inode)->root;
1302 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1306 * extent_io.c submission hook. This does the right thing for csum calculation
1307 * on write, or reading the csums from the tree before a read
1309 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1310 int mirror_num, unsigned long bio_flags)
1312 struct btrfs_root *root = BTRFS_I(inode)->root;
1313 int ret = 0;
1314 int skip_sum;
1316 skip_sum = btrfs_test_flag(inode, NODATASUM);
1318 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1319 BUG_ON(ret);
1321 if (!(rw & (1 << BIO_RW))) {
1322 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1323 return btrfs_submit_compressed_read(inode, bio,
1324 mirror_num, bio_flags);
1325 } else if (!skip_sum)
1326 btrfs_lookup_bio_sums(root, inode, bio, NULL);
1327 goto mapit;
1328 } else if (!skip_sum) {
1329 /* csum items have already been cloned */
1330 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1331 goto mapit;
1332 /* we're doing a write, do the async checksumming */
1333 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1334 inode, rw, bio, mirror_num,
1335 bio_flags, __btrfs_submit_bio_start,
1336 __btrfs_submit_bio_done);
1339 mapit:
1340 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1344 * given a list of ordered sums record them in the inode. This happens
1345 * at IO completion time based on sums calculated at bio submission time.
1347 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1348 struct inode *inode, u64 file_offset,
1349 struct list_head *list)
1351 struct btrfs_ordered_sum *sum;
1353 btrfs_set_trans_block_group(trans, inode);
1355 list_for_each_entry(sum, list, list) {
1356 btrfs_csum_file_blocks(trans,
1357 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1359 return 0;
1362 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1364 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1365 WARN_ON(1);
1366 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1367 GFP_NOFS);
1370 /* see btrfs_writepage_start_hook for details on why this is required */
1371 struct btrfs_writepage_fixup {
1372 struct page *page;
1373 struct btrfs_work work;
1376 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1378 struct btrfs_writepage_fixup *fixup;
1379 struct btrfs_ordered_extent *ordered;
1380 struct page *page;
1381 struct inode *inode;
1382 u64 page_start;
1383 u64 page_end;
1385 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1386 page = fixup->page;
1387 again:
1388 lock_page(page);
1389 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1390 ClearPageChecked(page);
1391 goto out_page;
1394 inode = page->mapping->host;
1395 page_start = page_offset(page);
1396 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1398 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1400 /* already ordered? We're done */
1401 if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
1402 EXTENT_ORDERED, 0)) {
1403 goto out;
1406 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1407 if (ordered) {
1408 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1409 page_end, GFP_NOFS);
1410 unlock_page(page);
1411 btrfs_start_ordered_extent(inode, ordered, 1);
1412 goto again;
1415 btrfs_set_extent_delalloc(inode, page_start, page_end);
1416 ClearPageChecked(page);
1417 out:
1418 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1419 out_page:
1420 unlock_page(page);
1421 page_cache_release(page);
1425 * There are a few paths in the higher layers of the kernel that directly
1426 * set the page dirty bit without asking the filesystem if it is a
1427 * good idea. This causes problems because we want to make sure COW
1428 * properly happens and the data=ordered rules are followed.
1430 * In our case any range that doesn't have the ORDERED bit set
1431 * hasn't been properly setup for IO. We kick off an async process
1432 * to fix it up. The async helper will wait for ordered extents, set
1433 * the delalloc bit and make it safe to write the page.
1435 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1437 struct inode *inode = page->mapping->host;
1438 struct btrfs_writepage_fixup *fixup;
1439 struct btrfs_root *root = BTRFS_I(inode)->root;
1440 int ret;
1442 ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1443 EXTENT_ORDERED, 0);
1444 if (ret)
1445 return 0;
1447 if (PageChecked(page))
1448 return -EAGAIN;
1450 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1451 if (!fixup)
1452 return -EAGAIN;
1454 SetPageChecked(page);
1455 page_cache_get(page);
1456 fixup->work.func = btrfs_writepage_fixup_worker;
1457 fixup->page = page;
1458 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1459 return -EAGAIN;
1462 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1463 struct inode *inode, u64 file_pos,
1464 u64 disk_bytenr, u64 disk_num_bytes,
1465 u64 num_bytes, u64 ram_bytes,
1466 u8 compression, u8 encryption,
1467 u16 other_encoding, int extent_type)
1469 struct btrfs_root *root = BTRFS_I(inode)->root;
1470 struct btrfs_file_extent_item *fi;
1471 struct btrfs_path *path;
1472 struct extent_buffer *leaf;
1473 struct btrfs_key ins;
1474 u64 hint;
1475 int ret;
1477 path = btrfs_alloc_path();
1478 BUG_ON(!path);
1480 ret = btrfs_drop_extents(trans, root, inode, file_pos,
1481 file_pos + num_bytes, file_pos, &hint);
1482 BUG_ON(ret);
1484 ins.objectid = inode->i_ino;
1485 ins.offset = file_pos;
1486 ins.type = BTRFS_EXTENT_DATA_KEY;
1487 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1488 BUG_ON(ret);
1489 leaf = path->nodes[0];
1490 fi = btrfs_item_ptr(leaf, path->slots[0],
1491 struct btrfs_file_extent_item);
1492 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1493 btrfs_set_file_extent_type(leaf, fi, extent_type);
1494 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1495 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1496 btrfs_set_file_extent_offset(leaf, fi, 0);
1497 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1498 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1499 btrfs_set_file_extent_compression(leaf, fi, compression);
1500 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1501 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1502 btrfs_mark_buffer_dirty(leaf);
1504 inode_add_bytes(inode, num_bytes);
1505 btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0);
1507 ins.objectid = disk_bytenr;
1508 ins.offset = disk_num_bytes;
1509 ins.type = BTRFS_EXTENT_ITEM_KEY;
1510 ret = btrfs_alloc_reserved_extent(trans, root, leaf->start,
1511 root->root_key.objectid,
1512 trans->transid, inode->i_ino, &ins);
1513 BUG_ON(ret);
1515 btrfs_free_path(path);
1516 return 0;
1519 /* as ordered data IO finishes, this gets called so we can finish
1520 * an ordered extent if the range of bytes in the file it covers are
1521 * fully written.
1523 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1525 struct btrfs_root *root = BTRFS_I(inode)->root;
1526 struct btrfs_trans_handle *trans;
1527 struct btrfs_ordered_extent *ordered_extent;
1528 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1529 int compressed = 0;
1530 int ret;
1532 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1533 if (!ret)
1534 return 0;
1536 trans = btrfs_join_transaction(root, 1);
1538 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1539 BUG_ON(!ordered_extent);
1540 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1541 goto nocow;
1543 lock_extent(io_tree, ordered_extent->file_offset,
1544 ordered_extent->file_offset + ordered_extent->len - 1,
1545 GFP_NOFS);
1547 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1548 compressed = 1;
1549 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1550 BUG_ON(compressed);
1551 ret = btrfs_mark_extent_written(trans, root, inode,
1552 ordered_extent->file_offset,
1553 ordered_extent->file_offset +
1554 ordered_extent->len);
1555 BUG_ON(ret);
1556 } else {
1557 ret = insert_reserved_file_extent(trans, inode,
1558 ordered_extent->file_offset,
1559 ordered_extent->start,
1560 ordered_extent->disk_len,
1561 ordered_extent->len,
1562 ordered_extent->len,
1563 compressed, 0, 0,
1564 BTRFS_FILE_EXTENT_REG);
1565 BUG_ON(ret);
1567 unlock_extent(io_tree, ordered_extent->file_offset,
1568 ordered_extent->file_offset + ordered_extent->len - 1,
1569 GFP_NOFS);
1570 nocow:
1571 add_pending_csums(trans, inode, ordered_extent->file_offset,
1572 &ordered_extent->list);
1574 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1575 btrfs_ordered_update_i_size(inode, ordered_extent);
1576 btrfs_update_inode(trans, root, inode);
1577 btrfs_remove_ordered_extent(inode, ordered_extent);
1578 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1580 /* once for us */
1581 btrfs_put_ordered_extent(ordered_extent);
1582 /* once for the tree */
1583 btrfs_put_ordered_extent(ordered_extent);
1585 btrfs_end_transaction(trans, root);
1586 return 0;
1589 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1590 struct extent_state *state, int uptodate)
1592 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1596 * When IO fails, either with EIO or csum verification fails, we
1597 * try other mirrors that might have a good copy of the data. This
1598 * io_failure_record is used to record state as we go through all the
1599 * mirrors. If another mirror has good data, the page is set up to date
1600 * and things continue. If a good mirror can't be found, the original
1601 * bio end_io callback is called to indicate things have failed.
1603 struct io_failure_record {
1604 struct page *page;
1605 u64 start;
1606 u64 len;
1607 u64 logical;
1608 unsigned long bio_flags;
1609 int last_mirror;
1612 static int btrfs_io_failed_hook(struct bio *failed_bio,
1613 struct page *page, u64 start, u64 end,
1614 struct extent_state *state)
1616 struct io_failure_record *failrec = NULL;
1617 u64 private;
1618 struct extent_map *em;
1619 struct inode *inode = page->mapping->host;
1620 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1621 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1622 struct bio *bio;
1623 int num_copies;
1624 int ret;
1625 int rw;
1626 u64 logical;
1628 ret = get_state_private(failure_tree, start, &private);
1629 if (ret) {
1630 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1631 if (!failrec)
1632 return -ENOMEM;
1633 failrec->start = start;
1634 failrec->len = end - start + 1;
1635 failrec->last_mirror = 0;
1636 failrec->bio_flags = 0;
1638 spin_lock(&em_tree->lock);
1639 em = lookup_extent_mapping(em_tree, start, failrec->len);
1640 if (em->start > start || em->start + em->len < start) {
1641 free_extent_map(em);
1642 em = NULL;
1644 spin_unlock(&em_tree->lock);
1646 if (!em || IS_ERR(em)) {
1647 kfree(failrec);
1648 return -EIO;
1650 logical = start - em->start;
1651 logical = em->block_start + logical;
1652 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1653 logical = em->block_start;
1654 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1656 failrec->logical = logical;
1657 free_extent_map(em);
1658 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1659 EXTENT_DIRTY, GFP_NOFS);
1660 set_state_private(failure_tree, start,
1661 (u64)(unsigned long)failrec);
1662 } else {
1663 failrec = (struct io_failure_record *)(unsigned long)private;
1665 num_copies = btrfs_num_copies(
1666 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1667 failrec->logical, failrec->len);
1668 failrec->last_mirror++;
1669 if (!state) {
1670 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1671 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1672 failrec->start,
1673 EXTENT_LOCKED);
1674 if (state && state->start != failrec->start)
1675 state = NULL;
1676 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1678 if (!state || failrec->last_mirror > num_copies) {
1679 set_state_private(failure_tree, failrec->start, 0);
1680 clear_extent_bits(failure_tree, failrec->start,
1681 failrec->start + failrec->len - 1,
1682 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1683 kfree(failrec);
1684 return -EIO;
1686 bio = bio_alloc(GFP_NOFS, 1);
1687 bio->bi_private = state;
1688 bio->bi_end_io = failed_bio->bi_end_io;
1689 bio->bi_sector = failrec->logical >> 9;
1690 bio->bi_bdev = failed_bio->bi_bdev;
1691 bio->bi_size = 0;
1693 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1694 if (failed_bio->bi_rw & (1 << BIO_RW))
1695 rw = WRITE;
1696 else
1697 rw = READ;
1699 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1700 failrec->last_mirror,
1701 failrec->bio_flags);
1702 return 0;
1706 * each time an IO finishes, we do a fast check in the IO failure tree
1707 * to see if we need to process or clean up an io_failure_record
1709 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1711 u64 private;
1712 u64 private_failure;
1713 struct io_failure_record *failure;
1714 int ret;
1716 private = 0;
1717 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1718 (u64)-1, 1, EXTENT_DIRTY)) {
1719 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1720 start, &private_failure);
1721 if (ret == 0) {
1722 failure = (struct io_failure_record *)(unsigned long)
1723 private_failure;
1724 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1725 failure->start, 0);
1726 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1727 failure->start,
1728 failure->start + failure->len - 1,
1729 EXTENT_DIRTY | EXTENT_LOCKED,
1730 GFP_NOFS);
1731 kfree(failure);
1734 return 0;
1738 * when reads are done, we need to check csums to verify the data is correct
1739 * if there's a match, we allow the bio to finish. If not, we go through
1740 * the io_failure_record routines to find good copies
1742 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1743 struct extent_state *state)
1745 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1746 struct inode *inode = page->mapping->host;
1747 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1748 char *kaddr;
1749 u64 private = ~(u32)0;
1750 int ret;
1751 struct btrfs_root *root = BTRFS_I(inode)->root;
1752 u32 csum = ~(u32)0;
1754 if (PageChecked(page)) {
1755 ClearPageChecked(page);
1756 goto good;
1758 if (btrfs_test_flag(inode, NODATASUM))
1759 return 0;
1761 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1762 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1)) {
1763 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1764 GFP_NOFS);
1765 return 0;
1768 if (state && state->start == start) {
1769 private = state->private;
1770 ret = 0;
1771 } else {
1772 ret = get_state_private(io_tree, start, &private);
1774 kaddr = kmap_atomic(page, KM_USER0);
1775 if (ret)
1776 goto zeroit;
1778 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1779 btrfs_csum_final(csum, (char *)&csum);
1780 if (csum != private)
1781 goto zeroit;
1783 kunmap_atomic(kaddr, KM_USER0);
1784 good:
1785 /* if the io failure tree for this inode is non-empty,
1786 * check to see if we've recovered from a failed IO
1788 btrfs_clean_io_failures(inode, start);
1789 return 0;
1791 zeroit:
1792 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1793 "private %llu\n", page->mapping->host->i_ino,
1794 (unsigned long long)start, csum,
1795 (unsigned long long)private);
1796 memset(kaddr + offset, 1, end - start + 1);
1797 flush_dcache_page(page);
1798 kunmap_atomic(kaddr, KM_USER0);
1799 if (private == 0)
1800 return 0;
1801 return -EIO;
1805 * This creates an orphan entry for the given inode in case something goes
1806 * wrong in the middle of an unlink/truncate.
1808 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1810 struct btrfs_root *root = BTRFS_I(inode)->root;
1811 int ret = 0;
1813 spin_lock(&root->list_lock);
1815 /* already on the orphan list, we're good */
1816 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1817 spin_unlock(&root->list_lock);
1818 return 0;
1821 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1823 spin_unlock(&root->list_lock);
1826 * insert an orphan item to track this unlinked/truncated file
1828 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
1830 return ret;
1834 * We have done the truncate/delete so we can go ahead and remove the orphan
1835 * item for this particular inode.
1837 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
1839 struct btrfs_root *root = BTRFS_I(inode)->root;
1840 int ret = 0;
1842 spin_lock(&root->list_lock);
1844 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
1845 spin_unlock(&root->list_lock);
1846 return 0;
1849 list_del_init(&BTRFS_I(inode)->i_orphan);
1850 if (!trans) {
1851 spin_unlock(&root->list_lock);
1852 return 0;
1855 spin_unlock(&root->list_lock);
1857 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
1859 return ret;
1863 * this cleans up any orphans that may be left on the list from the last use
1864 * of this root.
1866 void btrfs_orphan_cleanup(struct btrfs_root *root)
1868 struct btrfs_path *path;
1869 struct extent_buffer *leaf;
1870 struct btrfs_item *item;
1871 struct btrfs_key key, found_key;
1872 struct btrfs_trans_handle *trans;
1873 struct inode *inode;
1874 int ret = 0, nr_unlink = 0, nr_truncate = 0;
1876 path = btrfs_alloc_path();
1877 if (!path)
1878 return;
1879 path->reada = -1;
1881 key.objectid = BTRFS_ORPHAN_OBJECTID;
1882 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1883 key.offset = (u64)-1;
1886 while (1) {
1887 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1888 if (ret < 0) {
1889 printk(KERN_ERR "Error searching slot for orphan: %d"
1890 "\n", ret);
1891 break;
1895 * if ret == 0 means we found what we were searching for, which
1896 * is weird, but possible, so only screw with path if we didnt
1897 * find the key and see if we have stuff that matches
1899 if (ret > 0) {
1900 if (path->slots[0] == 0)
1901 break;
1902 path->slots[0]--;
1905 /* pull out the item */
1906 leaf = path->nodes[0];
1907 item = btrfs_item_nr(leaf, path->slots[0]);
1908 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1910 /* make sure the item matches what we want */
1911 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
1912 break;
1913 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
1914 break;
1916 /* release the path since we're done with it */
1917 btrfs_release_path(root, path);
1920 * this is where we are basically btrfs_lookup, without the
1921 * crossing root thing. we store the inode number in the
1922 * offset of the orphan item.
1924 inode = btrfs_iget_locked(root->fs_info->sb,
1925 found_key.offset, root);
1926 if (!inode)
1927 break;
1929 if (inode->i_state & I_NEW) {
1930 BTRFS_I(inode)->root = root;
1932 /* have to set the location manually */
1933 BTRFS_I(inode)->location.objectid = inode->i_ino;
1934 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
1935 BTRFS_I(inode)->location.offset = 0;
1937 btrfs_read_locked_inode(inode);
1938 unlock_new_inode(inode);
1942 * add this inode to the orphan list so btrfs_orphan_del does
1943 * the proper thing when we hit it
1945 spin_lock(&root->list_lock);
1946 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1947 spin_unlock(&root->list_lock);
1950 * if this is a bad inode, means we actually succeeded in
1951 * removing the inode, but not the orphan record, which means
1952 * we need to manually delete the orphan since iput will just
1953 * do a destroy_inode
1955 if (is_bad_inode(inode)) {
1956 trans = btrfs_start_transaction(root, 1);
1957 btrfs_orphan_del(trans, inode);
1958 btrfs_end_transaction(trans, root);
1959 iput(inode);
1960 continue;
1963 /* if we have links, this was a truncate, lets do that */
1964 if (inode->i_nlink) {
1965 nr_truncate++;
1966 btrfs_truncate(inode);
1967 } else {
1968 nr_unlink++;
1971 /* this will do delete_inode and everything for us */
1972 iput(inode);
1975 if (nr_unlink)
1976 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
1977 if (nr_truncate)
1978 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
1980 btrfs_free_path(path);
1984 * read an inode from the btree into the in-memory inode
1986 void btrfs_read_locked_inode(struct inode *inode)
1988 struct btrfs_path *path;
1989 struct extent_buffer *leaf;
1990 struct btrfs_inode_item *inode_item;
1991 struct btrfs_timespec *tspec;
1992 struct btrfs_root *root = BTRFS_I(inode)->root;
1993 struct btrfs_key location;
1994 u64 alloc_group_block;
1995 u32 rdev;
1996 int ret;
1998 path = btrfs_alloc_path();
1999 BUG_ON(!path);
2000 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2002 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2003 if (ret)
2004 goto make_bad;
2006 leaf = path->nodes[0];
2007 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2008 struct btrfs_inode_item);
2010 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2011 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2012 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2013 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2014 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2016 tspec = btrfs_inode_atime(inode_item);
2017 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2018 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2020 tspec = btrfs_inode_mtime(inode_item);
2021 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2022 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2024 tspec = btrfs_inode_ctime(inode_item);
2025 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2026 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2028 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2029 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2030 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2031 inode->i_generation = BTRFS_I(inode)->generation;
2032 inode->i_rdev = 0;
2033 rdev = btrfs_inode_rdev(leaf, inode_item);
2035 BTRFS_I(inode)->index_cnt = (u64)-1;
2036 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2038 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2040 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2041 alloc_group_block, 0);
2042 btrfs_free_path(path);
2043 inode_item = NULL;
2045 switch (inode->i_mode & S_IFMT) {
2046 case S_IFREG:
2047 inode->i_mapping->a_ops = &btrfs_aops;
2048 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2049 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2050 inode->i_fop = &btrfs_file_operations;
2051 inode->i_op = &btrfs_file_inode_operations;
2052 break;
2053 case S_IFDIR:
2054 inode->i_fop = &btrfs_dir_file_operations;
2055 if (root == root->fs_info->tree_root)
2056 inode->i_op = &btrfs_dir_ro_inode_operations;
2057 else
2058 inode->i_op = &btrfs_dir_inode_operations;
2059 break;
2060 case S_IFLNK:
2061 inode->i_op = &btrfs_symlink_inode_operations;
2062 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2063 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2064 break;
2065 default:
2066 inode->i_op = &btrfs_special_inode_operations;
2067 init_special_inode(inode, inode->i_mode, rdev);
2068 break;
2070 return;
2072 make_bad:
2073 btrfs_free_path(path);
2074 make_bad_inode(inode);
2078 * given a leaf and an inode, copy the inode fields into the leaf
2080 static void fill_inode_item(struct btrfs_trans_handle *trans,
2081 struct extent_buffer *leaf,
2082 struct btrfs_inode_item *item,
2083 struct inode *inode)
2085 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2086 btrfs_set_inode_gid(leaf, item, inode->i_gid);
2087 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2088 btrfs_set_inode_mode(leaf, item, inode->i_mode);
2089 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2091 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2092 inode->i_atime.tv_sec);
2093 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2094 inode->i_atime.tv_nsec);
2096 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2097 inode->i_mtime.tv_sec);
2098 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2099 inode->i_mtime.tv_nsec);
2101 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2102 inode->i_ctime.tv_sec);
2103 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2104 inode->i_ctime.tv_nsec);
2106 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2107 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2108 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2109 btrfs_set_inode_transid(leaf, item, trans->transid);
2110 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2111 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2112 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2116 * copy everything in the in-memory inode into the btree.
2118 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2119 struct btrfs_root *root, struct inode *inode)
2121 struct btrfs_inode_item *inode_item;
2122 struct btrfs_path *path;
2123 struct extent_buffer *leaf;
2124 int ret;
2126 path = btrfs_alloc_path();
2127 BUG_ON(!path);
2128 ret = btrfs_lookup_inode(trans, root, path,
2129 &BTRFS_I(inode)->location, 1);
2130 if (ret) {
2131 if (ret > 0)
2132 ret = -ENOENT;
2133 goto failed;
2136 btrfs_unlock_up_safe(path, 1);
2137 leaf = path->nodes[0];
2138 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2139 struct btrfs_inode_item);
2141 fill_inode_item(trans, leaf, inode_item, inode);
2142 btrfs_mark_buffer_dirty(leaf);
2143 btrfs_set_inode_last_trans(trans, inode);
2144 ret = 0;
2145 failed:
2146 btrfs_free_path(path);
2147 return ret;
2152 * unlink helper that gets used here in inode.c and in the tree logging
2153 * recovery code. It remove a link in a directory with a given name, and
2154 * also drops the back refs in the inode to the directory
2156 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2157 struct btrfs_root *root,
2158 struct inode *dir, struct inode *inode,
2159 const char *name, int name_len)
2161 struct btrfs_path *path;
2162 int ret = 0;
2163 struct extent_buffer *leaf;
2164 struct btrfs_dir_item *di;
2165 struct btrfs_key key;
2166 u64 index;
2168 path = btrfs_alloc_path();
2169 if (!path) {
2170 ret = -ENOMEM;
2171 goto err;
2174 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2175 name, name_len, -1);
2176 if (IS_ERR(di)) {
2177 ret = PTR_ERR(di);
2178 goto err;
2180 if (!di) {
2181 ret = -ENOENT;
2182 goto err;
2184 leaf = path->nodes[0];
2185 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2186 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2187 if (ret)
2188 goto err;
2189 btrfs_release_path(root, path);
2191 ret = btrfs_del_inode_ref(trans, root, name, name_len,
2192 inode->i_ino,
2193 dir->i_ino, &index);
2194 if (ret) {
2195 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2196 "inode %lu parent %lu\n", name_len, name,
2197 inode->i_ino, dir->i_ino);
2198 goto err;
2201 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2202 index, name, name_len, -1);
2203 if (IS_ERR(di)) {
2204 ret = PTR_ERR(di);
2205 goto err;
2207 if (!di) {
2208 ret = -ENOENT;
2209 goto err;
2211 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2212 btrfs_release_path(root, path);
2214 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2215 inode, dir->i_ino);
2216 BUG_ON(ret != 0 && ret != -ENOENT);
2217 if (ret != -ENOENT)
2218 BTRFS_I(dir)->log_dirty_trans = trans->transid;
2220 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2221 dir, index);
2222 BUG_ON(ret);
2223 err:
2224 btrfs_free_path(path);
2225 if (ret)
2226 goto out;
2228 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2229 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2230 btrfs_update_inode(trans, root, dir);
2231 btrfs_drop_nlink(inode);
2232 ret = btrfs_update_inode(trans, root, inode);
2233 dir->i_sb->s_dirt = 1;
2234 out:
2235 return ret;
2238 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2240 struct btrfs_root *root;
2241 struct btrfs_trans_handle *trans;
2242 struct inode *inode = dentry->d_inode;
2243 int ret;
2244 unsigned long nr = 0;
2246 root = BTRFS_I(dir)->root;
2248 ret = btrfs_check_free_space(root, 1, 1);
2249 if (ret)
2250 goto fail;
2252 trans = btrfs_start_transaction(root, 1);
2254 btrfs_set_trans_block_group(trans, dir);
2255 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2256 dentry->d_name.name, dentry->d_name.len);
2258 if (inode->i_nlink == 0)
2259 ret = btrfs_orphan_add(trans, inode);
2261 nr = trans->blocks_used;
2263 btrfs_end_transaction_throttle(trans, root);
2264 fail:
2265 btrfs_btree_balance_dirty(root, nr);
2266 return ret;
2269 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2271 struct inode *inode = dentry->d_inode;
2272 int err = 0;
2273 int ret;
2274 struct btrfs_root *root = BTRFS_I(dir)->root;
2275 struct btrfs_trans_handle *trans;
2276 unsigned long nr = 0;
2279 * the FIRST_FREE_OBJECTID check makes sure we don't try to rmdir
2280 * the root of a subvolume or snapshot
2282 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2283 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
2284 return -ENOTEMPTY;
2287 ret = btrfs_check_free_space(root, 1, 1);
2288 if (ret)
2289 goto fail;
2291 trans = btrfs_start_transaction(root, 1);
2292 btrfs_set_trans_block_group(trans, dir);
2294 err = btrfs_orphan_add(trans, inode);
2295 if (err)
2296 goto fail_trans;
2298 /* now the directory is empty */
2299 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2300 dentry->d_name.name, dentry->d_name.len);
2301 if (!err)
2302 btrfs_i_size_write(inode, 0);
2304 fail_trans:
2305 nr = trans->blocks_used;
2306 ret = btrfs_end_transaction_throttle(trans, root);
2307 fail:
2308 btrfs_btree_balance_dirty(root, nr);
2310 if (ret && !err)
2311 err = ret;
2312 return err;
2315 #if 0
2317 * when truncating bytes in a file, it is possible to avoid reading
2318 * the leaves that contain only checksum items. This can be the
2319 * majority of the IO required to delete a large file, but it must
2320 * be done carefully.
2322 * The keys in the level just above the leaves are checked to make sure
2323 * the lowest key in a given leaf is a csum key, and starts at an offset
2324 * after the new size.
2326 * Then the key for the next leaf is checked to make sure it also has
2327 * a checksum item for the same file. If it does, we know our target leaf
2328 * contains only checksum items, and it can be safely freed without reading
2329 * it.
2331 * This is just an optimization targeted at large files. It may do
2332 * nothing. It will return 0 unless things went badly.
2334 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2335 struct btrfs_root *root,
2336 struct btrfs_path *path,
2337 struct inode *inode, u64 new_size)
2339 struct btrfs_key key;
2340 int ret;
2341 int nritems;
2342 struct btrfs_key found_key;
2343 struct btrfs_key other_key;
2344 struct btrfs_leaf_ref *ref;
2345 u64 leaf_gen;
2346 u64 leaf_start;
2348 path->lowest_level = 1;
2349 key.objectid = inode->i_ino;
2350 key.type = BTRFS_CSUM_ITEM_KEY;
2351 key.offset = new_size;
2352 again:
2353 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2354 if (ret < 0)
2355 goto out;
2357 if (path->nodes[1] == NULL) {
2358 ret = 0;
2359 goto out;
2361 ret = 0;
2362 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2363 nritems = btrfs_header_nritems(path->nodes[1]);
2365 if (!nritems)
2366 goto out;
2368 if (path->slots[1] >= nritems)
2369 goto next_node;
2371 /* did we find a key greater than anything we want to delete? */
2372 if (found_key.objectid > inode->i_ino ||
2373 (found_key.objectid == inode->i_ino && found_key.type > key.type))
2374 goto out;
2376 /* we check the next key in the node to make sure the leave contains
2377 * only checksum items. This comparison doesn't work if our
2378 * leaf is the last one in the node
2380 if (path->slots[1] + 1 >= nritems) {
2381 next_node:
2382 /* search forward from the last key in the node, this
2383 * will bring us into the next node in the tree
2385 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2387 /* unlikely, but we inc below, so check to be safe */
2388 if (found_key.offset == (u64)-1)
2389 goto out;
2391 /* search_forward needs a path with locks held, do the
2392 * search again for the original key. It is possible
2393 * this will race with a balance and return a path that
2394 * we could modify, but this drop is just an optimization
2395 * and is allowed to miss some leaves.
2397 btrfs_release_path(root, path);
2398 found_key.offset++;
2400 /* setup a max key for search_forward */
2401 other_key.offset = (u64)-1;
2402 other_key.type = key.type;
2403 other_key.objectid = key.objectid;
2405 path->keep_locks = 1;
2406 ret = btrfs_search_forward(root, &found_key, &other_key,
2407 path, 0, 0);
2408 path->keep_locks = 0;
2409 if (ret || found_key.objectid != key.objectid ||
2410 found_key.type != key.type) {
2411 ret = 0;
2412 goto out;
2415 key.offset = found_key.offset;
2416 btrfs_release_path(root, path);
2417 cond_resched();
2418 goto again;
2421 /* we know there's one more slot after us in the tree,
2422 * read that key so we can verify it is also a checksum item
2424 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2426 if (found_key.objectid < inode->i_ino)
2427 goto next_key;
2429 if (found_key.type != key.type || found_key.offset < new_size)
2430 goto next_key;
2433 * if the key for the next leaf isn't a csum key from this objectid,
2434 * we can't be sure there aren't good items inside this leaf.
2435 * Bail out
2437 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2438 goto out;
2440 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2441 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2443 * it is safe to delete this leaf, it contains only
2444 * csum items from this inode at an offset >= new_size
2446 ret = btrfs_del_leaf(trans, root, path, leaf_start);
2447 BUG_ON(ret);
2449 if (root->ref_cows && leaf_gen < trans->transid) {
2450 ref = btrfs_alloc_leaf_ref(root, 0);
2451 if (ref) {
2452 ref->root_gen = root->root_key.offset;
2453 ref->bytenr = leaf_start;
2454 ref->owner = 0;
2455 ref->generation = leaf_gen;
2456 ref->nritems = 0;
2458 btrfs_sort_leaf_ref(ref);
2460 ret = btrfs_add_leaf_ref(root, ref, 0);
2461 WARN_ON(ret);
2462 btrfs_free_leaf_ref(root, ref);
2463 } else {
2464 WARN_ON(1);
2467 next_key:
2468 btrfs_release_path(root, path);
2470 if (other_key.objectid == inode->i_ino &&
2471 other_key.type == key.type && other_key.offset > key.offset) {
2472 key.offset = other_key.offset;
2473 cond_resched();
2474 goto again;
2476 ret = 0;
2477 out:
2478 /* fixup any changes we've made to the path */
2479 path->lowest_level = 0;
2480 path->keep_locks = 0;
2481 btrfs_release_path(root, path);
2482 return ret;
2485 #endif
2488 * this can truncate away extent items, csum items and directory items.
2489 * It starts at a high offset and removes keys until it can't find
2490 * any higher than new_size
2492 * csum items that cross the new i_size are truncated to the new size
2493 * as well.
2495 * min_type is the minimum key type to truncate down to. If set to 0, this
2496 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2498 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2499 struct btrfs_root *root,
2500 struct inode *inode,
2501 u64 new_size, u32 min_type)
2503 int ret;
2504 struct btrfs_path *path;
2505 struct btrfs_key key;
2506 struct btrfs_key found_key;
2507 u32 found_type = (u8)-1;
2508 struct extent_buffer *leaf;
2509 struct btrfs_file_extent_item *fi;
2510 u64 extent_start = 0;
2511 u64 extent_num_bytes = 0;
2512 u64 item_end = 0;
2513 u64 root_gen = 0;
2514 u64 root_owner = 0;
2515 int found_extent;
2516 int del_item;
2517 int pending_del_nr = 0;
2518 int pending_del_slot = 0;
2519 int extent_type = -1;
2520 int encoding;
2521 u64 mask = root->sectorsize - 1;
2523 if (root->ref_cows)
2524 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2525 path = btrfs_alloc_path();
2526 path->reada = -1;
2527 BUG_ON(!path);
2529 /* FIXME, add redo link to tree so we don't leak on crash */
2530 key.objectid = inode->i_ino;
2531 key.offset = (u64)-1;
2532 key.type = (u8)-1;
2534 btrfs_init_path(path);
2536 search_again:
2537 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2538 if (ret < 0)
2539 goto error;
2541 if (ret > 0) {
2542 /* there are no items in the tree for us to truncate, we're
2543 * done
2545 if (path->slots[0] == 0) {
2546 ret = 0;
2547 goto error;
2549 path->slots[0]--;
2552 while (1) {
2553 fi = NULL;
2554 leaf = path->nodes[0];
2555 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2556 found_type = btrfs_key_type(&found_key);
2557 encoding = 0;
2559 if (found_key.objectid != inode->i_ino)
2560 break;
2562 if (found_type < min_type)
2563 break;
2565 item_end = found_key.offset;
2566 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2567 fi = btrfs_item_ptr(leaf, path->slots[0],
2568 struct btrfs_file_extent_item);
2569 extent_type = btrfs_file_extent_type(leaf, fi);
2570 encoding = btrfs_file_extent_compression(leaf, fi);
2571 encoding |= btrfs_file_extent_encryption(leaf, fi);
2572 encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2574 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2575 item_end +=
2576 btrfs_file_extent_num_bytes(leaf, fi);
2577 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2578 item_end += btrfs_file_extent_inline_len(leaf,
2579 fi);
2581 item_end--;
2583 if (item_end < new_size) {
2584 if (found_type == BTRFS_DIR_ITEM_KEY)
2585 found_type = BTRFS_INODE_ITEM_KEY;
2586 else if (found_type == BTRFS_EXTENT_ITEM_KEY)
2587 found_type = BTRFS_EXTENT_DATA_KEY;
2588 else if (found_type == BTRFS_EXTENT_DATA_KEY)
2589 found_type = BTRFS_XATTR_ITEM_KEY;
2590 else if (found_type == BTRFS_XATTR_ITEM_KEY)
2591 found_type = BTRFS_INODE_REF_KEY;
2592 else if (found_type)
2593 found_type--;
2594 else
2595 break;
2596 btrfs_set_key_type(&key, found_type);
2597 goto next;
2599 if (found_key.offset >= new_size)
2600 del_item = 1;
2601 else
2602 del_item = 0;
2603 found_extent = 0;
2605 /* FIXME, shrink the extent if the ref count is only 1 */
2606 if (found_type != BTRFS_EXTENT_DATA_KEY)
2607 goto delete;
2609 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2610 u64 num_dec;
2611 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2612 if (!del_item && !encoding) {
2613 u64 orig_num_bytes =
2614 btrfs_file_extent_num_bytes(leaf, fi);
2615 extent_num_bytes = new_size -
2616 found_key.offset + root->sectorsize - 1;
2617 extent_num_bytes = extent_num_bytes &
2618 ~((u64)root->sectorsize - 1);
2619 btrfs_set_file_extent_num_bytes(leaf, fi,
2620 extent_num_bytes);
2621 num_dec = (orig_num_bytes -
2622 extent_num_bytes);
2623 if (root->ref_cows && extent_start != 0)
2624 inode_sub_bytes(inode, num_dec);
2625 btrfs_mark_buffer_dirty(leaf);
2626 } else {
2627 extent_num_bytes =
2628 btrfs_file_extent_disk_num_bytes(leaf,
2629 fi);
2630 /* FIXME blocksize != 4096 */
2631 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2632 if (extent_start != 0) {
2633 found_extent = 1;
2634 if (root->ref_cows)
2635 inode_sub_bytes(inode, num_dec);
2637 root_gen = btrfs_header_generation(leaf);
2638 root_owner = btrfs_header_owner(leaf);
2640 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2642 * we can't truncate inline items that have had
2643 * special encodings
2645 if (!del_item &&
2646 btrfs_file_extent_compression(leaf, fi) == 0 &&
2647 btrfs_file_extent_encryption(leaf, fi) == 0 &&
2648 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2649 u32 size = new_size - found_key.offset;
2651 if (root->ref_cows) {
2652 inode_sub_bytes(inode, item_end + 1 -
2653 new_size);
2655 size =
2656 btrfs_file_extent_calc_inline_size(size);
2657 ret = btrfs_truncate_item(trans, root, path,
2658 size, 1);
2659 BUG_ON(ret);
2660 } else if (root->ref_cows) {
2661 inode_sub_bytes(inode, item_end + 1 -
2662 found_key.offset);
2665 delete:
2666 if (del_item) {
2667 if (!pending_del_nr) {
2668 /* no pending yet, add ourselves */
2669 pending_del_slot = path->slots[0];
2670 pending_del_nr = 1;
2671 } else if (pending_del_nr &&
2672 path->slots[0] + 1 == pending_del_slot) {
2673 /* hop on the pending chunk */
2674 pending_del_nr++;
2675 pending_del_slot = path->slots[0];
2676 } else {
2677 BUG();
2679 } else {
2680 break;
2682 if (found_extent) {
2683 ret = btrfs_free_extent(trans, root, extent_start,
2684 extent_num_bytes,
2685 leaf->start, root_owner,
2686 root_gen, inode->i_ino, 0);
2687 BUG_ON(ret);
2689 next:
2690 if (path->slots[0] == 0) {
2691 if (pending_del_nr)
2692 goto del_pending;
2693 btrfs_release_path(root, path);
2694 if (found_type == BTRFS_INODE_ITEM_KEY)
2695 break;
2696 goto search_again;
2699 path->slots[0]--;
2700 if (pending_del_nr &&
2701 path->slots[0] + 1 != pending_del_slot) {
2702 struct btrfs_key debug;
2703 del_pending:
2704 btrfs_item_key_to_cpu(path->nodes[0], &debug,
2705 pending_del_slot);
2706 ret = btrfs_del_items(trans, root, path,
2707 pending_del_slot,
2708 pending_del_nr);
2709 BUG_ON(ret);
2710 pending_del_nr = 0;
2711 btrfs_release_path(root, path);
2712 if (found_type == BTRFS_INODE_ITEM_KEY)
2713 break;
2714 goto search_again;
2717 ret = 0;
2718 error:
2719 if (pending_del_nr) {
2720 ret = btrfs_del_items(trans, root, path, pending_del_slot,
2721 pending_del_nr);
2723 btrfs_free_path(path);
2724 inode->i_sb->s_dirt = 1;
2725 return ret;
2729 * taken from block_truncate_page, but does cow as it zeros out
2730 * any bytes left in the last page in the file.
2732 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
2734 struct inode *inode = mapping->host;
2735 struct btrfs_root *root = BTRFS_I(inode)->root;
2736 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2737 struct btrfs_ordered_extent *ordered;
2738 char *kaddr;
2739 u32 blocksize = root->sectorsize;
2740 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2741 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2742 struct page *page;
2743 int ret = 0;
2744 u64 page_start;
2745 u64 page_end;
2747 if ((offset & (blocksize - 1)) == 0)
2748 goto out;
2750 ret = -ENOMEM;
2751 again:
2752 page = grab_cache_page(mapping, index);
2753 if (!page)
2754 goto out;
2756 page_start = page_offset(page);
2757 page_end = page_start + PAGE_CACHE_SIZE - 1;
2759 if (!PageUptodate(page)) {
2760 ret = btrfs_readpage(NULL, page);
2761 lock_page(page);
2762 if (page->mapping != mapping) {
2763 unlock_page(page);
2764 page_cache_release(page);
2765 goto again;
2767 if (!PageUptodate(page)) {
2768 ret = -EIO;
2769 goto out_unlock;
2772 wait_on_page_writeback(page);
2774 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2775 set_page_extent_mapped(page);
2777 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2778 if (ordered) {
2779 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2780 unlock_page(page);
2781 page_cache_release(page);
2782 btrfs_start_ordered_extent(inode, ordered, 1);
2783 btrfs_put_ordered_extent(ordered);
2784 goto again;
2787 btrfs_set_extent_delalloc(inode, page_start, page_end);
2788 ret = 0;
2789 if (offset != PAGE_CACHE_SIZE) {
2790 kaddr = kmap(page);
2791 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2792 flush_dcache_page(page);
2793 kunmap(page);
2795 ClearPageChecked(page);
2796 set_page_dirty(page);
2797 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2799 out_unlock:
2800 unlock_page(page);
2801 page_cache_release(page);
2802 out:
2803 return ret;
2806 int btrfs_cont_expand(struct inode *inode, loff_t size)
2808 struct btrfs_trans_handle *trans;
2809 struct btrfs_root *root = BTRFS_I(inode)->root;
2810 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2811 struct extent_map *em;
2812 u64 mask = root->sectorsize - 1;
2813 u64 hole_start = (inode->i_size + mask) & ~mask;
2814 u64 block_end = (size + mask) & ~mask;
2815 u64 last_byte;
2816 u64 cur_offset;
2817 u64 hole_size;
2818 int err;
2820 if (size <= hole_start)
2821 return 0;
2823 err = btrfs_check_free_space(root, 1, 0);
2824 if (err)
2825 return err;
2827 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2829 while (1) {
2830 struct btrfs_ordered_extent *ordered;
2831 btrfs_wait_ordered_range(inode, hole_start,
2832 block_end - hole_start);
2833 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2834 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
2835 if (!ordered)
2836 break;
2837 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2838 btrfs_put_ordered_extent(ordered);
2841 trans = btrfs_start_transaction(root, 1);
2842 btrfs_set_trans_block_group(trans, inode);
2844 cur_offset = hole_start;
2845 while (1) {
2846 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2847 block_end - cur_offset, 0);
2848 BUG_ON(IS_ERR(em) || !em);
2849 last_byte = min(extent_map_end(em), block_end);
2850 last_byte = (last_byte + mask) & ~mask;
2851 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2852 u64 hint_byte = 0;
2853 hole_size = last_byte - cur_offset;
2854 err = btrfs_drop_extents(trans, root, inode,
2855 cur_offset,
2856 cur_offset + hole_size,
2857 cur_offset, &hint_byte);
2858 if (err)
2859 break;
2860 err = btrfs_insert_file_extent(trans, root,
2861 inode->i_ino, cur_offset, 0,
2862 0, hole_size, 0, hole_size,
2863 0, 0, 0);
2864 btrfs_drop_extent_cache(inode, hole_start,
2865 last_byte - 1, 0);
2867 free_extent_map(em);
2868 cur_offset = last_byte;
2869 if (err || cur_offset >= block_end)
2870 break;
2873 btrfs_end_transaction(trans, root);
2874 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2875 return err;
2878 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
2880 struct inode *inode = dentry->d_inode;
2881 int err;
2883 err = inode_change_ok(inode, attr);
2884 if (err)
2885 return err;
2887 if (S_ISREG(inode->i_mode) &&
2888 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
2889 err = btrfs_cont_expand(inode, attr->ia_size);
2890 if (err)
2891 return err;
2894 err = inode_setattr(inode, attr);
2896 if (!err && ((attr->ia_valid & ATTR_MODE)))
2897 err = btrfs_acl_chmod(inode);
2898 return err;
2901 void btrfs_delete_inode(struct inode *inode)
2903 struct btrfs_trans_handle *trans;
2904 struct btrfs_root *root = BTRFS_I(inode)->root;
2905 unsigned long nr;
2906 int ret;
2908 truncate_inode_pages(&inode->i_data, 0);
2909 if (is_bad_inode(inode)) {
2910 btrfs_orphan_del(NULL, inode);
2911 goto no_delete;
2913 btrfs_wait_ordered_range(inode, 0, (u64)-1);
2915 btrfs_i_size_write(inode, 0);
2916 trans = btrfs_join_transaction(root, 1);
2918 btrfs_set_trans_block_group(trans, inode);
2919 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
2920 if (ret) {
2921 btrfs_orphan_del(NULL, inode);
2922 goto no_delete_lock;
2925 btrfs_orphan_del(trans, inode);
2927 nr = trans->blocks_used;
2928 clear_inode(inode);
2930 btrfs_end_transaction(trans, root);
2931 btrfs_btree_balance_dirty(root, nr);
2932 return;
2934 no_delete_lock:
2935 nr = trans->blocks_used;
2936 btrfs_end_transaction(trans, root);
2937 btrfs_btree_balance_dirty(root, nr);
2938 no_delete:
2939 clear_inode(inode);
2943 * this returns the key found in the dir entry in the location pointer.
2944 * If no dir entries were found, location->objectid is 0.
2946 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
2947 struct btrfs_key *location)
2949 const char *name = dentry->d_name.name;
2950 int namelen = dentry->d_name.len;
2951 struct btrfs_dir_item *di;
2952 struct btrfs_path *path;
2953 struct btrfs_root *root = BTRFS_I(dir)->root;
2954 int ret = 0;
2956 path = btrfs_alloc_path();
2957 BUG_ON(!path);
2959 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
2960 namelen, 0);
2961 if (IS_ERR(di))
2962 ret = PTR_ERR(di);
2964 if (!di || IS_ERR(di))
2965 goto out_err;
2967 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
2968 out:
2969 btrfs_free_path(path);
2970 return ret;
2971 out_err:
2972 location->objectid = 0;
2973 goto out;
2977 * when we hit a tree root in a directory, the btrfs part of the inode
2978 * needs to be changed to reflect the root directory of the tree root. This
2979 * is kind of like crossing a mount point.
2981 static int fixup_tree_root_location(struct btrfs_root *root,
2982 struct btrfs_key *location,
2983 struct btrfs_root **sub_root,
2984 struct dentry *dentry)
2986 struct btrfs_root_item *ri;
2988 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
2989 return 0;
2990 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
2991 return 0;
2993 *sub_root = btrfs_read_fs_root(root->fs_info, location,
2994 dentry->d_name.name,
2995 dentry->d_name.len);
2996 if (IS_ERR(*sub_root))
2997 return PTR_ERR(*sub_root);
2999 ri = &(*sub_root)->root_item;
3000 location->objectid = btrfs_root_dirid(ri);
3001 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3002 location->offset = 0;
3004 return 0;
3007 static noinline void init_btrfs_i(struct inode *inode)
3009 struct btrfs_inode *bi = BTRFS_I(inode);
3011 bi->i_acl = NULL;
3012 bi->i_default_acl = NULL;
3014 bi->generation = 0;
3015 bi->sequence = 0;
3016 bi->last_trans = 0;
3017 bi->logged_trans = 0;
3018 bi->delalloc_bytes = 0;
3019 bi->disk_i_size = 0;
3020 bi->flags = 0;
3021 bi->index_cnt = (u64)-1;
3022 bi->log_dirty_trans = 0;
3023 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3024 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3025 inode->i_mapping, GFP_NOFS);
3026 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3027 inode->i_mapping, GFP_NOFS);
3028 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3029 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3030 mutex_init(&BTRFS_I(inode)->extent_mutex);
3031 mutex_init(&BTRFS_I(inode)->log_mutex);
3034 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3036 struct btrfs_iget_args *args = p;
3037 inode->i_ino = args->ino;
3038 init_btrfs_i(inode);
3039 BTRFS_I(inode)->root = args->root;
3040 return 0;
3043 static int btrfs_find_actor(struct inode *inode, void *opaque)
3045 struct btrfs_iget_args *args = opaque;
3046 return args->ino == inode->i_ino &&
3047 args->root == BTRFS_I(inode)->root;
3050 struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
3051 struct btrfs_root *root, int wait)
3053 struct inode *inode;
3054 struct btrfs_iget_args args;
3055 args.ino = objectid;
3056 args.root = root;
3058 if (wait) {
3059 inode = ilookup5(s, objectid, btrfs_find_actor,
3060 (void *)&args);
3061 } else {
3062 inode = ilookup5_nowait(s, objectid, btrfs_find_actor,
3063 (void *)&args);
3065 return inode;
3068 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
3069 struct btrfs_root *root)
3071 struct inode *inode;
3072 struct btrfs_iget_args args;
3073 args.ino = objectid;
3074 args.root = root;
3076 inode = iget5_locked(s, objectid, btrfs_find_actor,
3077 btrfs_init_locked_inode,
3078 (void *)&args);
3079 return inode;
3082 /* Get an inode object given its location and corresponding root.
3083 * Returns in *is_new if the inode was read from disk
3085 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3086 struct btrfs_root *root, int *is_new)
3088 struct inode *inode;
3090 inode = btrfs_iget_locked(s, location->objectid, root);
3091 if (!inode)
3092 return ERR_PTR(-EACCES);
3094 if (inode->i_state & I_NEW) {
3095 BTRFS_I(inode)->root = root;
3096 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3097 btrfs_read_locked_inode(inode);
3098 unlock_new_inode(inode);
3099 if (is_new)
3100 *is_new = 1;
3101 } else {
3102 if (is_new)
3103 *is_new = 0;
3106 return inode;
3109 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3111 struct inode *inode;
3112 struct btrfs_inode *bi = BTRFS_I(dir);
3113 struct btrfs_root *root = bi->root;
3114 struct btrfs_root *sub_root = root;
3115 struct btrfs_key location;
3116 int ret, new;
3118 if (dentry->d_name.len > BTRFS_NAME_LEN)
3119 return ERR_PTR(-ENAMETOOLONG);
3121 ret = btrfs_inode_by_name(dir, dentry, &location);
3123 if (ret < 0)
3124 return ERR_PTR(ret);
3126 inode = NULL;
3127 if (location.objectid) {
3128 ret = fixup_tree_root_location(root, &location, &sub_root,
3129 dentry);
3130 if (ret < 0)
3131 return ERR_PTR(ret);
3132 if (ret > 0)
3133 return ERR_PTR(-ENOENT);
3134 inode = btrfs_iget(dir->i_sb, &location, sub_root, &new);
3135 if (IS_ERR(inode))
3136 return ERR_CAST(inode);
3138 return inode;
3141 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3142 struct nameidata *nd)
3144 struct inode *inode;
3146 if (dentry->d_name.len > BTRFS_NAME_LEN)
3147 return ERR_PTR(-ENAMETOOLONG);
3149 inode = btrfs_lookup_dentry(dir, dentry);
3150 if (IS_ERR(inode))
3151 return ERR_CAST(inode);
3153 return d_splice_alias(inode, dentry);
3156 static unsigned char btrfs_filetype_table[] = {
3157 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3160 static int btrfs_real_readdir(struct file *filp, void *dirent,
3161 filldir_t filldir)
3163 struct inode *inode = filp->f_dentry->d_inode;
3164 struct btrfs_root *root = BTRFS_I(inode)->root;
3165 struct btrfs_item *item;
3166 struct btrfs_dir_item *di;
3167 struct btrfs_key key;
3168 struct btrfs_key found_key;
3169 struct btrfs_path *path;
3170 int ret;
3171 u32 nritems;
3172 struct extent_buffer *leaf;
3173 int slot;
3174 int advance;
3175 unsigned char d_type;
3176 int over = 0;
3177 u32 di_cur;
3178 u32 di_total;
3179 u32 di_len;
3180 int key_type = BTRFS_DIR_INDEX_KEY;
3181 char tmp_name[32];
3182 char *name_ptr;
3183 int name_len;
3185 /* FIXME, use a real flag for deciding about the key type */
3186 if (root->fs_info->tree_root == root)
3187 key_type = BTRFS_DIR_ITEM_KEY;
3189 /* special case for "." */
3190 if (filp->f_pos == 0) {
3191 over = filldir(dirent, ".", 1,
3192 1, inode->i_ino,
3193 DT_DIR);
3194 if (over)
3195 return 0;
3196 filp->f_pos = 1;
3198 /* special case for .., just use the back ref */
3199 if (filp->f_pos == 1) {
3200 u64 pino = parent_ino(filp->f_path.dentry);
3201 over = filldir(dirent, "..", 2,
3202 2, pino, DT_DIR);
3203 if (over)
3204 return 0;
3205 filp->f_pos = 2;
3207 path = btrfs_alloc_path();
3208 path->reada = 2;
3210 btrfs_set_key_type(&key, key_type);
3211 key.offset = filp->f_pos;
3212 key.objectid = inode->i_ino;
3214 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3215 if (ret < 0)
3216 goto err;
3217 advance = 0;
3219 while (1) {
3220 leaf = path->nodes[0];
3221 nritems = btrfs_header_nritems(leaf);
3222 slot = path->slots[0];
3223 if (advance || slot >= nritems) {
3224 if (slot >= nritems - 1) {
3225 ret = btrfs_next_leaf(root, path);
3226 if (ret)
3227 break;
3228 leaf = path->nodes[0];
3229 nritems = btrfs_header_nritems(leaf);
3230 slot = path->slots[0];
3231 } else {
3232 slot++;
3233 path->slots[0]++;
3237 advance = 1;
3238 item = btrfs_item_nr(leaf, slot);
3239 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3241 if (found_key.objectid != key.objectid)
3242 break;
3243 if (btrfs_key_type(&found_key) != key_type)
3244 break;
3245 if (found_key.offset < filp->f_pos)
3246 continue;
3248 filp->f_pos = found_key.offset;
3250 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3251 di_cur = 0;
3252 di_total = btrfs_item_size(leaf, item);
3254 while (di_cur < di_total) {
3255 struct btrfs_key location;
3257 name_len = btrfs_dir_name_len(leaf, di);
3258 if (name_len <= sizeof(tmp_name)) {
3259 name_ptr = tmp_name;
3260 } else {
3261 name_ptr = kmalloc(name_len, GFP_NOFS);
3262 if (!name_ptr) {
3263 ret = -ENOMEM;
3264 goto err;
3267 read_extent_buffer(leaf, name_ptr,
3268 (unsigned long)(di + 1), name_len);
3270 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3271 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3273 /* is this a reference to our own snapshot? If so
3274 * skip it
3276 if (location.type == BTRFS_ROOT_ITEM_KEY &&
3277 location.objectid == root->root_key.objectid) {
3278 over = 0;
3279 goto skip;
3281 over = filldir(dirent, name_ptr, name_len,
3282 found_key.offset, location.objectid,
3283 d_type);
3285 skip:
3286 if (name_ptr != tmp_name)
3287 kfree(name_ptr);
3289 if (over)
3290 goto nopos;
3291 di_len = btrfs_dir_name_len(leaf, di) +
3292 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3293 di_cur += di_len;
3294 di = (struct btrfs_dir_item *)((char *)di + di_len);
3298 /* Reached end of directory/root. Bump pos past the last item. */
3299 if (key_type == BTRFS_DIR_INDEX_KEY)
3300 filp->f_pos = INT_LIMIT(off_t);
3301 else
3302 filp->f_pos++;
3303 nopos:
3304 ret = 0;
3305 err:
3306 btrfs_free_path(path);
3307 return ret;
3310 int btrfs_write_inode(struct inode *inode, int wait)
3312 struct btrfs_root *root = BTRFS_I(inode)->root;
3313 struct btrfs_trans_handle *trans;
3314 int ret = 0;
3316 if (root->fs_info->btree_inode == inode)
3317 return 0;
3319 if (wait) {
3320 trans = btrfs_join_transaction(root, 1);
3321 btrfs_set_trans_block_group(trans, inode);
3322 ret = btrfs_commit_transaction(trans, root);
3324 return ret;
3328 * This is somewhat expensive, updating the tree every time the
3329 * inode changes. But, it is most likely to find the inode in cache.
3330 * FIXME, needs more benchmarking...there are no reasons other than performance
3331 * to keep or drop this code.
3333 void btrfs_dirty_inode(struct inode *inode)
3335 struct btrfs_root *root = BTRFS_I(inode)->root;
3336 struct btrfs_trans_handle *trans;
3338 trans = btrfs_join_transaction(root, 1);
3339 btrfs_set_trans_block_group(trans, inode);
3340 btrfs_update_inode(trans, root, inode);
3341 btrfs_end_transaction(trans, root);
3345 * find the highest existing sequence number in a directory
3346 * and then set the in-memory index_cnt variable to reflect
3347 * free sequence numbers
3349 static int btrfs_set_inode_index_count(struct inode *inode)
3351 struct btrfs_root *root = BTRFS_I(inode)->root;
3352 struct btrfs_key key, found_key;
3353 struct btrfs_path *path;
3354 struct extent_buffer *leaf;
3355 int ret;
3357 key.objectid = inode->i_ino;
3358 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
3359 key.offset = (u64)-1;
3361 path = btrfs_alloc_path();
3362 if (!path)
3363 return -ENOMEM;
3365 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3366 if (ret < 0)
3367 goto out;
3368 /* FIXME: we should be able to handle this */
3369 if (ret == 0)
3370 goto out;
3371 ret = 0;
3374 * MAGIC NUMBER EXPLANATION:
3375 * since we search a directory based on f_pos we have to start at 2
3376 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
3377 * else has to start at 2
3379 if (path->slots[0] == 0) {
3380 BTRFS_I(inode)->index_cnt = 2;
3381 goto out;
3384 path->slots[0]--;
3386 leaf = path->nodes[0];
3387 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3389 if (found_key.objectid != inode->i_ino ||
3390 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
3391 BTRFS_I(inode)->index_cnt = 2;
3392 goto out;
3395 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
3396 out:
3397 btrfs_free_path(path);
3398 return ret;
3402 * helper to find a free sequence number in a given directory. This current
3403 * code is very simple, later versions will do smarter things in the btree
3405 int btrfs_set_inode_index(struct inode *dir, u64 *index)
3407 int ret = 0;
3409 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
3410 ret = btrfs_set_inode_index_count(dir);
3411 if (ret)
3412 return ret;
3415 *index = BTRFS_I(dir)->index_cnt;
3416 BTRFS_I(dir)->index_cnt++;
3418 return ret;
3421 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3422 struct btrfs_root *root,
3423 struct inode *dir,
3424 const char *name, int name_len,
3425 u64 ref_objectid, u64 objectid,
3426 u64 alloc_hint, int mode, u64 *index)
3428 struct inode *inode;
3429 struct btrfs_inode_item *inode_item;
3430 struct btrfs_key *location;
3431 struct btrfs_path *path;
3432 struct btrfs_inode_ref *ref;
3433 struct btrfs_key key[2];
3434 u32 sizes[2];
3435 unsigned long ptr;
3436 int ret;
3437 int owner;
3439 path = btrfs_alloc_path();
3440 BUG_ON(!path);
3442 inode = new_inode(root->fs_info->sb);
3443 if (!inode)
3444 return ERR_PTR(-ENOMEM);
3446 if (dir) {
3447 ret = btrfs_set_inode_index(dir, index);
3448 if (ret)
3449 return ERR_PTR(ret);
3452 * index_cnt is ignored for everything but a dir,
3453 * btrfs_get_inode_index_count has an explanation for the magic
3454 * number
3456 init_btrfs_i(inode);
3457 BTRFS_I(inode)->index_cnt = 2;
3458 BTRFS_I(inode)->root = root;
3459 BTRFS_I(inode)->generation = trans->transid;
3461 if (mode & S_IFDIR)
3462 owner = 0;
3463 else
3464 owner = 1;
3465 BTRFS_I(inode)->block_group =
3466 btrfs_find_block_group(root, 0, alloc_hint, owner);
3467 if ((mode & S_IFREG)) {
3468 if (btrfs_test_opt(root, NODATASUM))
3469 btrfs_set_flag(inode, NODATASUM);
3470 if (btrfs_test_opt(root, NODATACOW))
3471 btrfs_set_flag(inode, NODATACOW);
3474 key[0].objectid = objectid;
3475 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
3476 key[0].offset = 0;
3478 key[1].objectid = objectid;
3479 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
3480 key[1].offset = ref_objectid;
3482 sizes[0] = sizeof(struct btrfs_inode_item);
3483 sizes[1] = name_len + sizeof(*ref);
3485 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
3486 if (ret != 0)
3487 goto fail;
3489 if (objectid > root->highest_inode)
3490 root->highest_inode = objectid;
3492 inode->i_uid = current_fsuid();
3494 if (dir && (dir->i_mode & S_ISGID)) {
3495 inode->i_gid = dir->i_gid;
3496 if (S_ISDIR(mode))
3497 mode |= S_ISGID;
3498 } else
3499 inode->i_gid = current_fsgid();
3501 inode->i_mode = mode;
3502 inode->i_ino = objectid;
3503 inode_set_bytes(inode, 0);
3504 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3505 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3506 struct btrfs_inode_item);
3507 fill_inode_item(trans, path->nodes[0], inode_item, inode);
3509 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3510 struct btrfs_inode_ref);
3511 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
3512 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
3513 ptr = (unsigned long)(ref + 1);
3514 write_extent_buffer(path->nodes[0], name, ptr, name_len);
3516 btrfs_mark_buffer_dirty(path->nodes[0]);
3517 btrfs_free_path(path);
3519 location = &BTRFS_I(inode)->location;
3520 location->objectid = objectid;
3521 location->offset = 0;
3522 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3524 insert_inode_hash(inode);
3525 return inode;
3526 fail:
3527 if (dir)
3528 BTRFS_I(dir)->index_cnt--;
3529 btrfs_free_path(path);
3530 return ERR_PTR(ret);
3533 static inline u8 btrfs_inode_type(struct inode *inode)
3535 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
3539 * utility function to add 'inode' into 'parent_inode' with
3540 * a give name and a given sequence number.
3541 * if 'add_backref' is true, also insert a backref from the
3542 * inode to the parent directory.
3544 int btrfs_add_link(struct btrfs_trans_handle *trans,
3545 struct inode *parent_inode, struct inode *inode,
3546 const char *name, int name_len, int add_backref, u64 index)
3548 int ret;
3549 struct btrfs_key key;
3550 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
3552 key.objectid = inode->i_ino;
3553 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
3554 key.offset = 0;
3556 ret = btrfs_insert_dir_item(trans, root, name, name_len,
3557 parent_inode->i_ino,
3558 &key, btrfs_inode_type(inode),
3559 index);
3560 if (ret == 0) {
3561 if (add_backref) {
3562 ret = btrfs_insert_inode_ref(trans, root,
3563 name, name_len,
3564 inode->i_ino,
3565 parent_inode->i_ino,
3566 index);
3568 btrfs_i_size_write(parent_inode, parent_inode->i_size +
3569 name_len * 2);
3570 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
3571 ret = btrfs_update_inode(trans, root, parent_inode);
3573 return ret;
3576 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
3577 struct dentry *dentry, struct inode *inode,
3578 int backref, u64 index)
3580 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3581 inode, dentry->d_name.name,
3582 dentry->d_name.len, backref, index);
3583 if (!err) {
3584 d_instantiate(dentry, inode);
3585 return 0;
3587 if (err > 0)
3588 err = -EEXIST;
3589 return err;
3592 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3593 int mode, dev_t rdev)
3595 struct btrfs_trans_handle *trans;
3596 struct btrfs_root *root = BTRFS_I(dir)->root;
3597 struct inode *inode = NULL;
3598 int err;
3599 int drop_inode = 0;
3600 u64 objectid;
3601 unsigned long nr = 0;
3602 u64 index = 0;
3604 if (!new_valid_dev(rdev))
3605 return -EINVAL;
3607 err = btrfs_check_free_space(root, 1, 0);
3608 if (err)
3609 goto fail;
3611 trans = btrfs_start_transaction(root, 1);
3612 btrfs_set_trans_block_group(trans, dir);
3614 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3615 if (err) {
3616 err = -ENOSPC;
3617 goto out_unlock;
3620 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3621 dentry->d_name.len,
3622 dentry->d_parent->d_inode->i_ino, objectid,
3623 BTRFS_I(dir)->block_group, mode, &index);
3624 err = PTR_ERR(inode);
3625 if (IS_ERR(inode))
3626 goto out_unlock;
3628 err = btrfs_init_inode_security(inode, dir);
3629 if (err) {
3630 drop_inode = 1;
3631 goto out_unlock;
3634 btrfs_set_trans_block_group(trans, inode);
3635 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3636 if (err)
3637 drop_inode = 1;
3638 else {
3639 inode->i_op = &btrfs_special_inode_operations;
3640 init_special_inode(inode, inode->i_mode, rdev);
3641 btrfs_update_inode(trans, root, inode);
3643 dir->i_sb->s_dirt = 1;
3644 btrfs_update_inode_block_group(trans, inode);
3645 btrfs_update_inode_block_group(trans, dir);
3646 out_unlock:
3647 nr = trans->blocks_used;
3648 btrfs_end_transaction_throttle(trans, root);
3649 fail:
3650 if (drop_inode) {
3651 inode_dec_link_count(inode);
3652 iput(inode);
3654 btrfs_btree_balance_dirty(root, nr);
3655 return err;
3658 static int btrfs_create(struct inode *dir, struct dentry *dentry,
3659 int mode, struct nameidata *nd)
3661 struct btrfs_trans_handle *trans;
3662 struct btrfs_root *root = BTRFS_I(dir)->root;
3663 struct inode *inode = NULL;
3664 int err;
3665 int drop_inode = 0;
3666 unsigned long nr = 0;
3667 u64 objectid;
3668 u64 index = 0;
3670 err = btrfs_check_free_space(root, 1, 0);
3671 if (err)
3672 goto fail;
3673 trans = btrfs_start_transaction(root, 1);
3674 btrfs_set_trans_block_group(trans, dir);
3676 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3677 if (err) {
3678 err = -ENOSPC;
3679 goto out_unlock;
3682 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3683 dentry->d_name.len,
3684 dentry->d_parent->d_inode->i_ino,
3685 objectid, BTRFS_I(dir)->block_group, mode,
3686 &index);
3687 err = PTR_ERR(inode);
3688 if (IS_ERR(inode))
3689 goto out_unlock;
3691 err = btrfs_init_inode_security(inode, dir);
3692 if (err) {
3693 drop_inode = 1;
3694 goto out_unlock;
3697 btrfs_set_trans_block_group(trans, inode);
3698 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3699 if (err)
3700 drop_inode = 1;
3701 else {
3702 inode->i_mapping->a_ops = &btrfs_aops;
3703 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3704 inode->i_fop = &btrfs_file_operations;
3705 inode->i_op = &btrfs_file_inode_operations;
3706 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3708 dir->i_sb->s_dirt = 1;
3709 btrfs_update_inode_block_group(trans, inode);
3710 btrfs_update_inode_block_group(trans, dir);
3711 out_unlock:
3712 nr = trans->blocks_used;
3713 btrfs_end_transaction_throttle(trans, root);
3714 fail:
3715 if (drop_inode) {
3716 inode_dec_link_count(inode);
3717 iput(inode);
3719 btrfs_btree_balance_dirty(root, nr);
3720 return err;
3723 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3724 struct dentry *dentry)
3726 struct btrfs_trans_handle *trans;
3727 struct btrfs_root *root = BTRFS_I(dir)->root;
3728 struct inode *inode = old_dentry->d_inode;
3729 u64 index;
3730 unsigned long nr = 0;
3731 int err;
3732 int drop_inode = 0;
3734 if (inode->i_nlink == 0)
3735 return -ENOENT;
3737 btrfs_inc_nlink(inode);
3738 err = btrfs_check_free_space(root, 1, 0);
3739 if (err)
3740 goto fail;
3741 err = btrfs_set_inode_index(dir, &index);
3742 if (err)
3743 goto fail;
3745 trans = btrfs_start_transaction(root, 1);
3747 btrfs_set_trans_block_group(trans, dir);
3748 atomic_inc(&inode->i_count);
3750 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
3752 if (err)
3753 drop_inode = 1;
3755 dir->i_sb->s_dirt = 1;
3756 btrfs_update_inode_block_group(trans, dir);
3757 err = btrfs_update_inode(trans, root, inode);
3759 if (err)
3760 drop_inode = 1;
3762 nr = trans->blocks_used;
3763 btrfs_end_transaction_throttle(trans, root);
3764 fail:
3765 if (drop_inode) {
3766 inode_dec_link_count(inode);
3767 iput(inode);
3769 btrfs_btree_balance_dirty(root, nr);
3770 return err;
3773 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3775 struct inode *inode = NULL;
3776 struct btrfs_trans_handle *trans;
3777 struct btrfs_root *root = BTRFS_I(dir)->root;
3778 int err = 0;
3779 int drop_on_err = 0;
3780 u64 objectid = 0;
3781 u64 index = 0;
3782 unsigned long nr = 1;
3784 err = btrfs_check_free_space(root, 1, 0);
3785 if (err)
3786 goto out_unlock;
3788 trans = btrfs_start_transaction(root, 1);
3789 btrfs_set_trans_block_group(trans, dir);
3791 if (IS_ERR(trans)) {
3792 err = PTR_ERR(trans);
3793 goto out_unlock;
3796 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3797 if (err) {
3798 err = -ENOSPC;
3799 goto out_unlock;
3802 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3803 dentry->d_name.len,
3804 dentry->d_parent->d_inode->i_ino, objectid,
3805 BTRFS_I(dir)->block_group, S_IFDIR | mode,
3806 &index);
3807 if (IS_ERR(inode)) {
3808 err = PTR_ERR(inode);
3809 goto out_fail;
3812 drop_on_err = 1;
3814 err = btrfs_init_inode_security(inode, dir);
3815 if (err)
3816 goto out_fail;
3818 inode->i_op = &btrfs_dir_inode_operations;
3819 inode->i_fop = &btrfs_dir_file_operations;
3820 btrfs_set_trans_block_group(trans, inode);
3822 btrfs_i_size_write(inode, 0);
3823 err = btrfs_update_inode(trans, root, inode);
3824 if (err)
3825 goto out_fail;
3827 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3828 inode, dentry->d_name.name,
3829 dentry->d_name.len, 0, index);
3830 if (err)
3831 goto out_fail;
3833 d_instantiate(dentry, inode);
3834 drop_on_err = 0;
3835 dir->i_sb->s_dirt = 1;
3836 btrfs_update_inode_block_group(trans, inode);
3837 btrfs_update_inode_block_group(trans, dir);
3839 out_fail:
3840 nr = trans->blocks_used;
3841 btrfs_end_transaction_throttle(trans, root);
3843 out_unlock:
3844 if (drop_on_err)
3845 iput(inode);
3846 btrfs_btree_balance_dirty(root, nr);
3847 return err;
3850 /* helper for btfs_get_extent. Given an existing extent in the tree,
3851 * and an extent that you want to insert, deal with overlap and insert
3852 * the new extent into the tree.
3854 static int merge_extent_mapping(struct extent_map_tree *em_tree,
3855 struct extent_map *existing,
3856 struct extent_map *em,
3857 u64 map_start, u64 map_len)
3859 u64 start_diff;
3861 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
3862 start_diff = map_start - em->start;
3863 em->start = map_start;
3864 em->len = map_len;
3865 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
3866 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3867 em->block_start += start_diff;
3868 em->block_len -= start_diff;
3870 return add_extent_mapping(em_tree, em);
3873 static noinline int uncompress_inline(struct btrfs_path *path,
3874 struct inode *inode, struct page *page,
3875 size_t pg_offset, u64 extent_offset,
3876 struct btrfs_file_extent_item *item)
3878 int ret;
3879 struct extent_buffer *leaf = path->nodes[0];
3880 char *tmp;
3881 size_t max_size;
3882 unsigned long inline_size;
3883 unsigned long ptr;
3885 WARN_ON(pg_offset != 0);
3886 max_size = btrfs_file_extent_ram_bytes(leaf, item);
3887 inline_size = btrfs_file_extent_inline_item_len(leaf,
3888 btrfs_item_nr(leaf, path->slots[0]));
3889 tmp = kmalloc(inline_size, GFP_NOFS);
3890 ptr = btrfs_file_extent_inline_start(item);
3892 read_extent_buffer(leaf, tmp, ptr, inline_size);
3894 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
3895 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
3896 inline_size, max_size);
3897 if (ret) {
3898 char *kaddr = kmap_atomic(page, KM_USER0);
3899 unsigned long copy_size = min_t(u64,
3900 PAGE_CACHE_SIZE - pg_offset,
3901 max_size - extent_offset);
3902 memset(kaddr + pg_offset, 0, copy_size);
3903 kunmap_atomic(kaddr, KM_USER0);
3905 kfree(tmp);
3906 return 0;
3910 * a bit scary, this does extent mapping from logical file offset to the disk.
3911 * the ugly parts come from merging extents from the disk with the in-ram
3912 * representation. This gets more complex because of the data=ordered code,
3913 * where the in-ram extents might be locked pending data=ordered completion.
3915 * This also copies inline extents directly into the page.
3918 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
3919 size_t pg_offset, u64 start, u64 len,
3920 int create)
3922 int ret;
3923 int err = 0;
3924 u64 bytenr;
3925 u64 extent_start = 0;
3926 u64 extent_end = 0;
3927 u64 objectid = inode->i_ino;
3928 u32 found_type;
3929 struct btrfs_path *path = NULL;
3930 struct btrfs_root *root = BTRFS_I(inode)->root;
3931 struct btrfs_file_extent_item *item;
3932 struct extent_buffer *leaf;
3933 struct btrfs_key found_key;
3934 struct extent_map *em = NULL;
3935 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3936 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3937 struct btrfs_trans_handle *trans = NULL;
3938 int compressed;
3940 again:
3941 spin_lock(&em_tree->lock);
3942 em = lookup_extent_mapping(em_tree, start, len);
3943 if (em)
3944 em->bdev = root->fs_info->fs_devices->latest_bdev;
3945 spin_unlock(&em_tree->lock);
3947 if (em) {
3948 if (em->start > start || em->start + em->len <= start)
3949 free_extent_map(em);
3950 else if (em->block_start == EXTENT_MAP_INLINE && page)
3951 free_extent_map(em);
3952 else
3953 goto out;
3955 em = alloc_extent_map(GFP_NOFS);
3956 if (!em) {
3957 err = -ENOMEM;
3958 goto out;
3960 em->bdev = root->fs_info->fs_devices->latest_bdev;
3961 em->start = EXTENT_MAP_HOLE;
3962 em->orig_start = EXTENT_MAP_HOLE;
3963 em->len = (u64)-1;
3964 em->block_len = (u64)-1;
3966 if (!path) {
3967 path = btrfs_alloc_path();
3968 BUG_ON(!path);
3971 ret = btrfs_lookup_file_extent(trans, root, path,
3972 objectid, start, trans != NULL);
3973 if (ret < 0) {
3974 err = ret;
3975 goto out;
3978 if (ret != 0) {
3979 if (path->slots[0] == 0)
3980 goto not_found;
3981 path->slots[0]--;
3984 leaf = path->nodes[0];
3985 item = btrfs_item_ptr(leaf, path->slots[0],
3986 struct btrfs_file_extent_item);
3987 /* are we inside the extent that was found? */
3988 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3989 found_type = btrfs_key_type(&found_key);
3990 if (found_key.objectid != objectid ||
3991 found_type != BTRFS_EXTENT_DATA_KEY) {
3992 goto not_found;
3995 found_type = btrfs_file_extent_type(leaf, item);
3996 extent_start = found_key.offset;
3997 compressed = btrfs_file_extent_compression(leaf, item);
3998 if (found_type == BTRFS_FILE_EXTENT_REG ||
3999 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4000 extent_end = extent_start +
4001 btrfs_file_extent_num_bytes(leaf, item);
4002 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4003 size_t size;
4004 size = btrfs_file_extent_inline_len(leaf, item);
4005 extent_end = (extent_start + size + root->sectorsize - 1) &
4006 ~((u64)root->sectorsize - 1);
4009 if (start >= extent_end) {
4010 path->slots[0]++;
4011 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4012 ret = btrfs_next_leaf(root, path);
4013 if (ret < 0) {
4014 err = ret;
4015 goto out;
4017 if (ret > 0)
4018 goto not_found;
4019 leaf = path->nodes[0];
4021 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4022 if (found_key.objectid != objectid ||
4023 found_key.type != BTRFS_EXTENT_DATA_KEY)
4024 goto not_found;
4025 if (start + len <= found_key.offset)
4026 goto not_found;
4027 em->start = start;
4028 em->len = found_key.offset - start;
4029 goto not_found_em;
4032 if (found_type == BTRFS_FILE_EXTENT_REG ||
4033 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4034 em->start = extent_start;
4035 em->len = extent_end - extent_start;
4036 em->orig_start = extent_start -
4037 btrfs_file_extent_offset(leaf, item);
4038 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4039 if (bytenr == 0) {
4040 em->block_start = EXTENT_MAP_HOLE;
4041 goto insert;
4043 if (compressed) {
4044 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4045 em->block_start = bytenr;
4046 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4047 item);
4048 } else {
4049 bytenr += btrfs_file_extent_offset(leaf, item);
4050 em->block_start = bytenr;
4051 em->block_len = em->len;
4052 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4053 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4055 goto insert;
4056 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4057 unsigned long ptr;
4058 char *map;
4059 size_t size;
4060 size_t extent_offset;
4061 size_t copy_size;
4063 em->block_start = EXTENT_MAP_INLINE;
4064 if (!page || create) {
4065 em->start = extent_start;
4066 em->len = extent_end - extent_start;
4067 goto out;
4070 size = btrfs_file_extent_inline_len(leaf, item);
4071 extent_offset = page_offset(page) + pg_offset - extent_start;
4072 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4073 size - extent_offset);
4074 em->start = extent_start + extent_offset;
4075 em->len = (copy_size + root->sectorsize - 1) &
4076 ~((u64)root->sectorsize - 1);
4077 em->orig_start = EXTENT_MAP_INLINE;
4078 if (compressed)
4079 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4080 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4081 if (create == 0 && !PageUptodate(page)) {
4082 if (btrfs_file_extent_compression(leaf, item) ==
4083 BTRFS_COMPRESS_ZLIB) {
4084 ret = uncompress_inline(path, inode, page,
4085 pg_offset,
4086 extent_offset, item);
4087 BUG_ON(ret);
4088 } else {
4089 map = kmap(page);
4090 read_extent_buffer(leaf, map + pg_offset, ptr,
4091 copy_size);
4092 kunmap(page);
4094 flush_dcache_page(page);
4095 } else if (create && PageUptodate(page)) {
4096 if (!trans) {
4097 kunmap(page);
4098 free_extent_map(em);
4099 em = NULL;
4100 btrfs_release_path(root, path);
4101 trans = btrfs_join_transaction(root, 1);
4102 goto again;
4104 map = kmap(page);
4105 write_extent_buffer(leaf, map + pg_offset, ptr,
4106 copy_size);
4107 kunmap(page);
4108 btrfs_mark_buffer_dirty(leaf);
4110 set_extent_uptodate(io_tree, em->start,
4111 extent_map_end(em) - 1, GFP_NOFS);
4112 goto insert;
4113 } else {
4114 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4115 WARN_ON(1);
4117 not_found:
4118 em->start = start;
4119 em->len = len;
4120 not_found_em:
4121 em->block_start = EXTENT_MAP_HOLE;
4122 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4123 insert:
4124 btrfs_release_path(root, path);
4125 if (em->start > start || extent_map_end(em) <= start) {
4126 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4127 "[%llu %llu]\n", (unsigned long long)em->start,
4128 (unsigned long long)em->len,
4129 (unsigned long long)start,
4130 (unsigned long long)len);
4131 err = -EIO;
4132 goto out;
4135 err = 0;
4136 spin_lock(&em_tree->lock);
4137 ret = add_extent_mapping(em_tree, em);
4138 /* it is possible that someone inserted the extent into the tree
4139 * while we had the lock dropped. It is also possible that
4140 * an overlapping map exists in the tree
4142 if (ret == -EEXIST) {
4143 struct extent_map *existing;
4145 ret = 0;
4147 existing = lookup_extent_mapping(em_tree, start, len);
4148 if (existing && (existing->start > start ||
4149 existing->start + existing->len <= start)) {
4150 free_extent_map(existing);
4151 existing = NULL;
4153 if (!existing) {
4154 existing = lookup_extent_mapping(em_tree, em->start,
4155 em->len);
4156 if (existing) {
4157 err = merge_extent_mapping(em_tree, existing,
4158 em, start,
4159 root->sectorsize);
4160 free_extent_map(existing);
4161 if (err) {
4162 free_extent_map(em);
4163 em = NULL;
4165 } else {
4166 err = -EIO;
4167 free_extent_map(em);
4168 em = NULL;
4170 } else {
4171 free_extent_map(em);
4172 em = existing;
4173 err = 0;
4176 spin_unlock(&em_tree->lock);
4177 out:
4178 if (path)
4179 btrfs_free_path(path);
4180 if (trans) {
4181 ret = btrfs_end_transaction(trans, root);
4182 if (!err)
4183 err = ret;
4185 if (err) {
4186 free_extent_map(em);
4187 WARN_ON(1);
4188 return ERR_PTR(err);
4190 return em;
4193 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4194 const struct iovec *iov, loff_t offset,
4195 unsigned long nr_segs)
4197 return -EINVAL;
4200 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4201 __u64 start, __u64 len)
4203 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4206 int btrfs_readpage(struct file *file, struct page *page)
4208 struct extent_io_tree *tree;
4209 tree = &BTRFS_I(page->mapping->host)->io_tree;
4210 return extent_read_full_page(tree, page, btrfs_get_extent);
4213 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4215 struct extent_io_tree *tree;
4218 if (current->flags & PF_MEMALLOC) {
4219 redirty_page_for_writepage(wbc, page);
4220 unlock_page(page);
4221 return 0;
4223 tree = &BTRFS_I(page->mapping->host)->io_tree;
4224 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4227 int btrfs_writepages(struct address_space *mapping,
4228 struct writeback_control *wbc)
4230 struct extent_io_tree *tree;
4232 tree = &BTRFS_I(mapping->host)->io_tree;
4233 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4236 static int
4237 btrfs_readpages(struct file *file, struct address_space *mapping,
4238 struct list_head *pages, unsigned nr_pages)
4240 struct extent_io_tree *tree;
4241 tree = &BTRFS_I(mapping->host)->io_tree;
4242 return extent_readpages(tree, mapping, pages, nr_pages,
4243 btrfs_get_extent);
4245 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4247 struct extent_io_tree *tree;
4248 struct extent_map_tree *map;
4249 int ret;
4251 tree = &BTRFS_I(page->mapping->host)->io_tree;
4252 map = &BTRFS_I(page->mapping->host)->extent_tree;
4253 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4254 if (ret == 1) {
4255 ClearPagePrivate(page);
4256 set_page_private(page, 0);
4257 page_cache_release(page);
4259 return ret;
4262 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4264 if (PageWriteback(page) || PageDirty(page))
4265 return 0;
4266 return __btrfs_releasepage(page, gfp_flags);
4269 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4271 struct extent_io_tree *tree;
4272 struct btrfs_ordered_extent *ordered;
4273 u64 page_start = page_offset(page);
4274 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4276 wait_on_page_writeback(page);
4277 tree = &BTRFS_I(page->mapping->host)->io_tree;
4278 if (offset) {
4279 btrfs_releasepage(page, GFP_NOFS);
4280 return;
4283 lock_extent(tree, page_start, page_end, GFP_NOFS);
4284 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
4285 page_offset(page));
4286 if (ordered) {
4288 * IO on this page will never be started, so we need
4289 * to account for any ordered extents now
4291 clear_extent_bit(tree, page_start, page_end,
4292 EXTENT_DIRTY | EXTENT_DELALLOC |
4293 EXTENT_LOCKED, 1, 0, GFP_NOFS);
4294 btrfs_finish_ordered_io(page->mapping->host,
4295 page_start, page_end);
4296 btrfs_put_ordered_extent(ordered);
4297 lock_extent(tree, page_start, page_end, GFP_NOFS);
4299 clear_extent_bit(tree, page_start, page_end,
4300 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4301 EXTENT_ORDERED,
4302 1, 1, GFP_NOFS);
4303 __btrfs_releasepage(page, GFP_NOFS);
4305 ClearPageChecked(page);
4306 if (PagePrivate(page)) {
4307 ClearPagePrivate(page);
4308 set_page_private(page, 0);
4309 page_cache_release(page);
4314 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
4315 * called from a page fault handler when a page is first dirtied. Hence we must
4316 * be careful to check for EOF conditions here. We set the page up correctly
4317 * for a written page which means we get ENOSPC checking when writing into
4318 * holes and correct delalloc and unwritten extent mapping on filesystems that
4319 * support these features.
4321 * We are not allowed to take the i_mutex here so we have to play games to
4322 * protect against truncate races as the page could now be beyond EOF. Because
4323 * vmtruncate() writes the inode size before removing pages, once we have the
4324 * page lock we can determine safely if the page is beyond EOF. If it is not
4325 * beyond EOF, then the page is guaranteed safe against truncation until we
4326 * unlock the page.
4328 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
4330 struct inode *inode = fdentry(vma->vm_file)->d_inode;
4331 struct btrfs_root *root = BTRFS_I(inode)->root;
4332 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4333 struct btrfs_ordered_extent *ordered;
4334 char *kaddr;
4335 unsigned long zero_start;
4336 loff_t size;
4337 int ret;
4338 u64 page_start;
4339 u64 page_end;
4341 ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0);
4342 if (ret)
4343 goto out;
4345 ret = -EINVAL;
4346 again:
4347 lock_page(page);
4348 size = i_size_read(inode);
4349 page_start = page_offset(page);
4350 page_end = page_start + PAGE_CACHE_SIZE - 1;
4352 if ((page->mapping != inode->i_mapping) ||
4353 (page_start >= size)) {
4354 /* page got truncated out from underneath us */
4355 goto out_unlock;
4357 wait_on_page_writeback(page);
4359 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
4360 set_page_extent_mapped(page);
4363 * we can't set the delalloc bits if there are pending ordered
4364 * extents. Drop our locks and wait for them to finish
4366 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4367 if (ordered) {
4368 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4369 unlock_page(page);
4370 btrfs_start_ordered_extent(inode, ordered, 1);
4371 btrfs_put_ordered_extent(ordered);
4372 goto again;
4375 btrfs_set_extent_delalloc(inode, page_start, page_end);
4376 ret = 0;
4378 /* page is wholly or partially inside EOF */
4379 if (page_start + PAGE_CACHE_SIZE > size)
4380 zero_start = size & ~PAGE_CACHE_MASK;
4381 else
4382 zero_start = PAGE_CACHE_SIZE;
4384 if (zero_start != PAGE_CACHE_SIZE) {
4385 kaddr = kmap(page);
4386 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
4387 flush_dcache_page(page);
4388 kunmap(page);
4390 ClearPageChecked(page);
4391 set_page_dirty(page);
4392 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4394 out_unlock:
4395 unlock_page(page);
4396 out:
4397 return ret;
4400 static void btrfs_truncate(struct inode *inode)
4402 struct btrfs_root *root = BTRFS_I(inode)->root;
4403 int ret;
4404 struct btrfs_trans_handle *trans;
4405 unsigned long nr;
4406 u64 mask = root->sectorsize - 1;
4408 if (!S_ISREG(inode->i_mode))
4409 return;
4410 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4411 return;
4413 btrfs_truncate_page(inode->i_mapping, inode->i_size);
4414 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
4416 trans = btrfs_start_transaction(root, 1);
4417 btrfs_set_trans_block_group(trans, inode);
4418 btrfs_i_size_write(inode, inode->i_size);
4420 ret = btrfs_orphan_add(trans, inode);
4421 if (ret)
4422 goto out;
4423 /* FIXME, add redo link to tree so we don't leak on crash */
4424 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
4425 BTRFS_EXTENT_DATA_KEY);
4426 btrfs_update_inode(trans, root, inode);
4428 ret = btrfs_orphan_del(trans, inode);
4429 BUG_ON(ret);
4431 out:
4432 nr = trans->blocks_used;
4433 ret = btrfs_end_transaction_throttle(trans, root);
4434 BUG_ON(ret);
4435 btrfs_btree_balance_dirty(root, nr);
4439 * create a new subvolume directory/inode (helper for the ioctl).
4441 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
4442 struct btrfs_root *new_root, struct dentry *dentry,
4443 u64 new_dirid, u64 alloc_hint)
4445 struct inode *inode;
4446 int error;
4447 u64 index = 0;
4449 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
4450 new_dirid, alloc_hint, S_IFDIR | 0700, &index);
4451 if (IS_ERR(inode))
4452 return PTR_ERR(inode);
4453 inode->i_op = &btrfs_dir_inode_operations;
4454 inode->i_fop = &btrfs_dir_file_operations;
4456 inode->i_nlink = 1;
4457 btrfs_i_size_write(inode, 0);
4459 error = btrfs_update_inode(trans, new_root, inode);
4460 if (error)
4461 return error;
4463 d_instantiate(dentry, inode);
4464 return 0;
4467 /* helper function for file defrag and space balancing. This
4468 * forces readahead on a given range of bytes in an inode
4470 unsigned long btrfs_force_ra(struct address_space *mapping,
4471 struct file_ra_state *ra, struct file *file,
4472 pgoff_t offset, pgoff_t last_index)
4474 pgoff_t req_size = last_index - offset + 1;
4476 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
4477 return offset + req_size;
4480 struct inode *btrfs_alloc_inode(struct super_block *sb)
4482 struct btrfs_inode *ei;
4484 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
4485 if (!ei)
4486 return NULL;
4487 ei->last_trans = 0;
4488 ei->logged_trans = 0;
4489 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
4490 ei->i_acl = BTRFS_ACL_NOT_CACHED;
4491 ei->i_default_acl = BTRFS_ACL_NOT_CACHED;
4492 INIT_LIST_HEAD(&ei->i_orphan);
4493 return &ei->vfs_inode;
4496 void btrfs_destroy_inode(struct inode *inode)
4498 struct btrfs_ordered_extent *ordered;
4499 WARN_ON(!list_empty(&inode->i_dentry));
4500 WARN_ON(inode->i_data.nrpages);
4502 if (BTRFS_I(inode)->i_acl &&
4503 BTRFS_I(inode)->i_acl != BTRFS_ACL_NOT_CACHED)
4504 posix_acl_release(BTRFS_I(inode)->i_acl);
4505 if (BTRFS_I(inode)->i_default_acl &&
4506 BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED)
4507 posix_acl_release(BTRFS_I(inode)->i_default_acl);
4509 spin_lock(&BTRFS_I(inode)->root->list_lock);
4510 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
4511 printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
4512 " list\n", inode->i_ino);
4513 dump_stack();
4515 spin_unlock(&BTRFS_I(inode)->root->list_lock);
4517 while (1) {
4518 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
4519 if (!ordered)
4520 break;
4521 else {
4522 printk(KERN_ERR "btrfs found ordered "
4523 "extent %llu %llu on inode cleanup\n",
4524 (unsigned long long)ordered->file_offset,
4525 (unsigned long long)ordered->len);
4526 btrfs_remove_ordered_extent(inode, ordered);
4527 btrfs_put_ordered_extent(ordered);
4528 btrfs_put_ordered_extent(ordered);
4531 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
4532 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
4535 static void init_once(void *foo)
4537 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
4539 inode_init_once(&ei->vfs_inode);
4542 void btrfs_destroy_cachep(void)
4544 if (btrfs_inode_cachep)
4545 kmem_cache_destroy(btrfs_inode_cachep);
4546 if (btrfs_trans_handle_cachep)
4547 kmem_cache_destroy(btrfs_trans_handle_cachep);
4548 if (btrfs_transaction_cachep)
4549 kmem_cache_destroy(btrfs_transaction_cachep);
4550 if (btrfs_bit_radix_cachep)
4551 kmem_cache_destroy(btrfs_bit_radix_cachep);
4552 if (btrfs_path_cachep)
4553 kmem_cache_destroy(btrfs_path_cachep);
4556 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
4557 unsigned long extra_flags,
4558 void (*ctor)(void *))
4560 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
4561 SLAB_MEM_SPREAD | extra_flags), ctor);
4564 int btrfs_init_cachep(void)
4566 btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
4567 sizeof(struct btrfs_inode),
4568 0, init_once);
4569 if (!btrfs_inode_cachep)
4570 goto fail;
4571 btrfs_trans_handle_cachep =
4572 btrfs_cache_create("btrfs_trans_handle_cache",
4573 sizeof(struct btrfs_trans_handle),
4574 0, NULL);
4575 if (!btrfs_trans_handle_cachep)
4576 goto fail;
4577 btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
4578 sizeof(struct btrfs_transaction),
4579 0, NULL);
4580 if (!btrfs_transaction_cachep)
4581 goto fail;
4582 btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
4583 sizeof(struct btrfs_path),
4584 0, NULL);
4585 if (!btrfs_path_cachep)
4586 goto fail;
4587 btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
4588 SLAB_DESTROY_BY_RCU, NULL);
4589 if (!btrfs_bit_radix_cachep)
4590 goto fail;
4591 return 0;
4592 fail:
4593 btrfs_destroy_cachep();
4594 return -ENOMEM;
4597 static int btrfs_getattr(struct vfsmount *mnt,
4598 struct dentry *dentry, struct kstat *stat)
4600 struct inode *inode = dentry->d_inode;
4601 generic_fillattr(inode, stat);
4602 stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
4603 stat->blksize = PAGE_CACHE_SIZE;
4604 stat->blocks = (inode_get_bytes(inode) +
4605 BTRFS_I(inode)->delalloc_bytes) >> 9;
4606 return 0;
4609 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
4610 struct inode *new_dir, struct dentry *new_dentry)
4612 struct btrfs_trans_handle *trans;
4613 struct btrfs_root *root = BTRFS_I(old_dir)->root;
4614 struct inode *new_inode = new_dentry->d_inode;
4615 struct inode *old_inode = old_dentry->d_inode;
4616 struct timespec ctime = CURRENT_TIME;
4617 u64 index = 0;
4618 int ret;
4620 /* we're not allowed to rename between subvolumes */
4621 if (BTRFS_I(old_inode)->root->root_key.objectid !=
4622 BTRFS_I(new_dir)->root->root_key.objectid)
4623 return -EXDEV;
4625 if (S_ISDIR(old_inode->i_mode) && new_inode &&
4626 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
4627 return -ENOTEMPTY;
4630 /* to rename a snapshot or subvolume, we need to juggle the
4631 * backrefs. This isn't coded yet
4633 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
4634 return -EXDEV;
4636 ret = btrfs_check_free_space(root, 1, 0);
4637 if (ret)
4638 goto out_unlock;
4640 trans = btrfs_start_transaction(root, 1);
4642 btrfs_set_trans_block_group(trans, new_dir);
4644 btrfs_inc_nlink(old_dentry->d_inode);
4645 old_dir->i_ctime = old_dir->i_mtime = ctime;
4646 new_dir->i_ctime = new_dir->i_mtime = ctime;
4647 old_inode->i_ctime = ctime;
4649 ret = btrfs_unlink_inode(trans, root, old_dir, old_dentry->d_inode,
4650 old_dentry->d_name.name,
4651 old_dentry->d_name.len);
4652 if (ret)
4653 goto out_fail;
4655 if (new_inode) {
4656 new_inode->i_ctime = CURRENT_TIME;
4657 ret = btrfs_unlink_inode(trans, root, new_dir,
4658 new_dentry->d_inode,
4659 new_dentry->d_name.name,
4660 new_dentry->d_name.len);
4661 if (ret)
4662 goto out_fail;
4663 if (new_inode->i_nlink == 0) {
4664 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
4665 if (ret)
4666 goto out_fail;
4670 ret = btrfs_set_inode_index(new_dir, &index);
4671 if (ret)
4672 goto out_fail;
4674 ret = btrfs_add_link(trans, new_dentry->d_parent->d_inode,
4675 old_inode, new_dentry->d_name.name,
4676 new_dentry->d_name.len, 1, index);
4677 if (ret)
4678 goto out_fail;
4680 out_fail:
4681 btrfs_end_transaction_throttle(trans, root);
4682 out_unlock:
4683 return ret;
4687 * some fairly slow code that needs optimization. This walks the list
4688 * of all the inodes with pending delalloc and forces them to disk.
4690 int btrfs_start_delalloc_inodes(struct btrfs_root *root)
4692 struct list_head *head = &root->fs_info->delalloc_inodes;
4693 struct btrfs_inode *binode;
4694 struct inode *inode;
4696 if (root->fs_info->sb->s_flags & MS_RDONLY)
4697 return -EROFS;
4699 spin_lock(&root->fs_info->delalloc_lock);
4700 while (!list_empty(head)) {
4701 binode = list_entry(head->next, struct btrfs_inode,
4702 delalloc_inodes);
4703 inode = igrab(&binode->vfs_inode);
4704 if (!inode)
4705 list_del_init(&binode->delalloc_inodes);
4706 spin_unlock(&root->fs_info->delalloc_lock);
4707 if (inode) {
4708 filemap_flush(inode->i_mapping);
4709 iput(inode);
4711 cond_resched();
4712 spin_lock(&root->fs_info->delalloc_lock);
4714 spin_unlock(&root->fs_info->delalloc_lock);
4716 /* the filemap_flush will queue IO into the worker threads, but
4717 * we have to make sure the IO is actually started and that
4718 * ordered extents get created before we return
4720 atomic_inc(&root->fs_info->async_submit_draining);
4721 while (atomic_read(&root->fs_info->nr_async_submits) ||
4722 atomic_read(&root->fs_info->async_delalloc_pages)) {
4723 wait_event(root->fs_info->async_submit_wait,
4724 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
4725 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
4727 atomic_dec(&root->fs_info->async_submit_draining);
4728 return 0;
4731 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4732 const char *symname)
4734 struct btrfs_trans_handle *trans;
4735 struct btrfs_root *root = BTRFS_I(dir)->root;
4736 struct btrfs_path *path;
4737 struct btrfs_key key;
4738 struct inode *inode = NULL;
4739 int err;
4740 int drop_inode = 0;
4741 u64 objectid;
4742 u64 index = 0 ;
4743 int name_len;
4744 int datasize;
4745 unsigned long ptr;
4746 struct btrfs_file_extent_item *ei;
4747 struct extent_buffer *leaf;
4748 unsigned long nr = 0;
4750 name_len = strlen(symname) + 1;
4751 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
4752 return -ENAMETOOLONG;
4754 err = btrfs_check_free_space(root, 1, 0);
4755 if (err)
4756 goto out_fail;
4758 trans = btrfs_start_transaction(root, 1);
4759 btrfs_set_trans_block_group(trans, dir);
4761 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4762 if (err) {
4763 err = -ENOSPC;
4764 goto out_unlock;
4767 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4768 dentry->d_name.len,
4769 dentry->d_parent->d_inode->i_ino, objectid,
4770 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
4771 &index);
4772 err = PTR_ERR(inode);
4773 if (IS_ERR(inode))
4774 goto out_unlock;
4776 err = btrfs_init_inode_security(inode, dir);
4777 if (err) {
4778 drop_inode = 1;
4779 goto out_unlock;
4782 btrfs_set_trans_block_group(trans, inode);
4783 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4784 if (err)
4785 drop_inode = 1;
4786 else {
4787 inode->i_mapping->a_ops = &btrfs_aops;
4788 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4789 inode->i_fop = &btrfs_file_operations;
4790 inode->i_op = &btrfs_file_inode_operations;
4791 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4793 dir->i_sb->s_dirt = 1;
4794 btrfs_update_inode_block_group(trans, inode);
4795 btrfs_update_inode_block_group(trans, dir);
4796 if (drop_inode)
4797 goto out_unlock;
4799 path = btrfs_alloc_path();
4800 BUG_ON(!path);
4801 key.objectid = inode->i_ino;
4802 key.offset = 0;
4803 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
4804 datasize = btrfs_file_extent_calc_inline_size(name_len);
4805 err = btrfs_insert_empty_item(trans, root, path, &key,
4806 datasize);
4807 if (err) {
4808 drop_inode = 1;
4809 goto out_unlock;
4811 leaf = path->nodes[0];
4812 ei = btrfs_item_ptr(leaf, path->slots[0],
4813 struct btrfs_file_extent_item);
4814 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
4815 btrfs_set_file_extent_type(leaf, ei,
4816 BTRFS_FILE_EXTENT_INLINE);
4817 btrfs_set_file_extent_encryption(leaf, ei, 0);
4818 btrfs_set_file_extent_compression(leaf, ei, 0);
4819 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
4820 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
4822 ptr = btrfs_file_extent_inline_start(ei);
4823 write_extent_buffer(leaf, symname, ptr, name_len);
4824 btrfs_mark_buffer_dirty(leaf);
4825 btrfs_free_path(path);
4827 inode->i_op = &btrfs_symlink_inode_operations;
4828 inode->i_mapping->a_ops = &btrfs_symlink_aops;
4829 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4830 inode_set_bytes(inode, name_len);
4831 btrfs_i_size_write(inode, name_len - 1);
4832 err = btrfs_update_inode(trans, root, inode);
4833 if (err)
4834 drop_inode = 1;
4836 out_unlock:
4837 nr = trans->blocks_used;
4838 btrfs_end_transaction_throttle(trans, root);
4839 out_fail:
4840 if (drop_inode) {
4841 inode_dec_link_count(inode);
4842 iput(inode);
4844 btrfs_btree_balance_dirty(root, nr);
4845 return err;
4848 static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
4849 u64 alloc_hint, int mode)
4851 struct btrfs_trans_handle *trans;
4852 struct btrfs_root *root = BTRFS_I(inode)->root;
4853 struct btrfs_key ins;
4854 u64 alloc_size;
4855 u64 cur_offset = start;
4856 u64 num_bytes = end - start;
4857 int ret = 0;
4859 trans = btrfs_join_transaction(root, 1);
4860 BUG_ON(!trans);
4861 btrfs_set_trans_block_group(trans, inode);
4863 while (num_bytes > 0) {
4864 alloc_size = min(num_bytes, root->fs_info->max_extent);
4865 ret = btrfs_reserve_extent(trans, root, alloc_size,
4866 root->sectorsize, 0, alloc_hint,
4867 (u64)-1, &ins, 1);
4868 if (ret) {
4869 WARN_ON(1);
4870 goto out;
4872 ret = insert_reserved_file_extent(trans, inode,
4873 cur_offset, ins.objectid,
4874 ins.offset, ins.offset,
4875 ins.offset, 0, 0, 0,
4876 BTRFS_FILE_EXTENT_PREALLOC);
4877 BUG_ON(ret);
4878 num_bytes -= ins.offset;
4879 cur_offset += ins.offset;
4880 alloc_hint = ins.objectid + ins.offset;
4882 out:
4883 if (cur_offset > start) {
4884 inode->i_ctime = CURRENT_TIME;
4885 btrfs_set_flag(inode, PREALLOC);
4886 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4887 cur_offset > i_size_read(inode))
4888 btrfs_i_size_write(inode, cur_offset);
4889 ret = btrfs_update_inode(trans, root, inode);
4890 BUG_ON(ret);
4893 btrfs_end_transaction(trans, root);
4894 return ret;
4897 static long btrfs_fallocate(struct inode *inode, int mode,
4898 loff_t offset, loff_t len)
4900 u64 cur_offset;
4901 u64 last_byte;
4902 u64 alloc_start;
4903 u64 alloc_end;
4904 u64 alloc_hint = 0;
4905 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
4906 struct extent_map *em;
4907 int ret;
4909 alloc_start = offset & ~mask;
4910 alloc_end = (offset + len + mask) & ~mask;
4912 mutex_lock(&inode->i_mutex);
4913 if (alloc_start > inode->i_size) {
4914 ret = btrfs_cont_expand(inode, alloc_start);
4915 if (ret)
4916 goto out;
4919 while (1) {
4920 struct btrfs_ordered_extent *ordered;
4921 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start,
4922 alloc_end - 1, GFP_NOFS);
4923 ordered = btrfs_lookup_first_ordered_extent(inode,
4924 alloc_end - 1);
4925 if (ordered &&
4926 ordered->file_offset + ordered->len > alloc_start &&
4927 ordered->file_offset < alloc_end) {
4928 btrfs_put_ordered_extent(ordered);
4929 unlock_extent(&BTRFS_I(inode)->io_tree,
4930 alloc_start, alloc_end - 1, GFP_NOFS);
4931 btrfs_wait_ordered_range(inode, alloc_start,
4932 alloc_end - alloc_start);
4933 } else {
4934 if (ordered)
4935 btrfs_put_ordered_extent(ordered);
4936 break;
4940 cur_offset = alloc_start;
4941 while (1) {
4942 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4943 alloc_end - cur_offset, 0);
4944 BUG_ON(IS_ERR(em) || !em);
4945 last_byte = min(extent_map_end(em), alloc_end);
4946 last_byte = (last_byte + mask) & ~mask;
4947 if (em->block_start == EXTENT_MAP_HOLE) {
4948 ret = prealloc_file_range(inode, cur_offset,
4949 last_byte, alloc_hint, mode);
4950 if (ret < 0) {
4951 free_extent_map(em);
4952 break;
4955 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
4956 alloc_hint = em->block_start;
4957 free_extent_map(em);
4959 cur_offset = last_byte;
4960 if (cur_offset >= alloc_end) {
4961 ret = 0;
4962 break;
4965 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, alloc_end - 1,
4966 GFP_NOFS);
4967 out:
4968 mutex_unlock(&inode->i_mutex);
4969 return ret;
4972 static int btrfs_set_page_dirty(struct page *page)
4974 return __set_page_dirty_nobuffers(page);
4977 static int btrfs_permission(struct inode *inode, int mask)
4979 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
4980 return -EACCES;
4981 return generic_permission(inode, mask, btrfs_check_acl);
4984 static struct inode_operations btrfs_dir_inode_operations = {
4985 .getattr = btrfs_getattr,
4986 .lookup = btrfs_lookup,
4987 .create = btrfs_create,
4988 .unlink = btrfs_unlink,
4989 .link = btrfs_link,
4990 .mkdir = btrfs_mkdir,
4991 .rmdir = btrfs_rmdir,
4992 .rename = btrfs_rename,
4993 .symlink = btrfs_symlink,
4994 .setattr = btrfs_setattr,
4995 .mknod = btrfs_mknod,
4996 .setxattr = btrfs_setxattr,
4997 .getxattr = btrfs_getxattr,
4998 .listxattr = btrfs_listxattr,
4999 .removexattr = btrfs_removexattr,
5000 .permission = btrfs_permission,
5002 static struct inode_operations btrfs_dir_ro_inode_operations = {
5003 .lookup = btrfs_lookup,
5004 .permission = btrfs_permission,
5006 static struct file_operations btrfs_dir_file_operations = {
5007 .llseek = generic_file_llseek,
5008 .read = generic_read_dir,
5009 .readdir = btrfs_real_readdir,
5010 .unlocked_ioctl = btrfs_ioctl,
5011 #ifdef CONFIG_COMPAT
5012 .compat_ioctl = btrfs_ioctl,
5013 #endif
5014 .release = btrfs_release_file,
5015 .fsync = btrfs_sync_file,
5018 static struct extent_io_ops btrfs_extent_io_ops = {
5019 .fill_delalloc = run_delalloc_range,
5020 .submit_bio_hook = btrfs_submit_bio_hook,
5021 .merge_bio_hook = btrfs_merge_bio_hook,
5022 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
5023 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
5024 .writepage_start_hook = btrfs_writepage_start_hook,
5025 .readpage_io_failed_hook = btrfs_io_failed_hook,
5026 .set_bit_hook = btrfs_set_bit_hook,
5027 .clear_bit_hook = btrfs_clear_bit_hook,
5031 * btrfs doesn't support the bmap operation because swapfiles
5032 * use bmap to make a mapping of extents in the file. They assume
5033 * these extents won't change over the life of the file and they
5034 * use the bmap result to do IO directly to the drive.
5036 * the btrfs bmap call would return logical addresses that aren't
5037 * suitable for IO and they also will change frequently as COW
5038 * operations happen. So, swapfile + btrfs == corruption.
5040 * For now we're avoiding this by dropping bmap.
5042 static struct address_space_operations btrfs_aops = {
5043 .readpage = btrfs_readpage,
5044 .writepage = btrfs_writepage,
5045 .writepages = btrfs_writepages,
5046 .readpages = btrfs_readpages,
5047 .sync_page = block_sync_page,
5048 .direct_IO = btrfs_direct_IO,
5049 .invalidatepage = btrfs_invalidatepage,
5050 .releasepage = btrfs_releasepage,
5051 .set_page_dirty = btrfs_set_page_dirty,
5054 static struct address_space_operations btrfs_symlink_aops = {
5055 .readpage = btrfs_readpage,
5056 .writepage = btrfs_writepage,
5057 .invalidatepage = btrfs_invalidatepage,
5058 .releasepage = btrfs_releasepage,
5061 static struct inode_operations btrfs_file_inode_operations = {
5062 .truncate = btrfs_truncate,
5063 .getattr = btrfs_getattr,
5064 .setattr = btrfs_setattr,
5065 .setxattr = btrfs_setxattr,
5066 .getxattr = btrfs_getxattr,
5067 .listxattr = btrfs_listxattr,
5068 .removexattr = btrfs_removexattr,
5069 .permission = btrfs_permission,
5070 .fallocate = btrfs_fallocate,
5071 .fiemap = btrfs_fiemap,
5073 static struct inode_operations btrfs_special_inode_operations = {
5074 .getattr = btrfs_getattr,
5075 .setattr = btrfs_setattr,
5076 .permission = btrfs_permission,
5077 .setxattr = btrfs_setxattr,
5078 .getxattr = btrfs_getxattr,
5079 .listxattr = btrfs_listxattr,
5080 .removexattr = btrfs_removexattr,
5082 static struct inode_operations btrfs_symlink_inode_operations = {
5083 .readlink = generic_readlink,
5084 .follow_link = page_follow_link_light,
5085 .put_link = page_put_link,
5086 .permission = btrfs_permission,
5087 .setxattr = btrfs_setxattr,
5088 .getxattr = btrfs_getxattr,
5089 .listxattr = btrfs_listxattr,
5090 .removexattr = btrfs_removexattr,