davinci: enable ARCH_HAS_HOLES_MEMORYMODEL for DaVinci
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / btrfs / inode.c
blob5440bab2363565e5c50399eb32b9f6e8ed103728
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include "compat.h"
40 #include "ctree.h"
41 #include "disk-io.h"
42 #include "transaction.h"
43 #include "btrfs_inode.h"
44 #include "ioctl.h"
45 #include "print-tree.h"
46 #include "volumes.h"
47 #include "ordered-data.h"
48 #include "xattr.h"
49 #include "tree-log.h"
50 #include "compression.h"
51 #include "locking.h"
53 struct btrfs_iget_args {
54 u64 ino;
55 struct btrfs_root *root;
58 static const struct inode_operations btrfs_dir_inode_operations;
59 static const struct inode_operations btrfs_symlink_inode_operations;
60 static const struct inode_operations btrfs_dir_ro_inode_operations;
61 static const struct inode_operations btrfs_special_inode_operations;
62 static const struct inode_operations btrfs_file_inode_operations;
63 static const struct address_space_operations btrfs_aops;
64 static const struct address_space_operations btrfs_symlink_aops;
65 static const struct file_operations btrfs_dir_file_operations;
66 static struct extent_io_ops btrfs_extent_io_ops;
68 static struct kmem_cache *btrfs_inode_cachep;
69 struct kmem_cache *btrfs_trans_handle_cachep;
70 struct kmem_cache *btrfs_transaction_cachep;
71 struct kmem_cache *btrfs_path_cachep;
73 #define S_SHIFT 12
74 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
75 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
76 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
77 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
78 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
79 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
80 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
81 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
84 static void btrfs_truncate(struct inode *inode);
85 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
86 static noinline int cow_file_range(struct inode *inode,
87 struct page *locked_page,
88 u64 start, u64 end, int *page_started,
89 unsigned long *nr_written, int unlock);
91 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
92 struct inode *inode, struct inode *dir)
94 int err;
96 err = btrfs_init_acl(trans, inode, dir);
97 if (!err)
98 err = btrfs_xattr_security_init(trans, inode, dir);
99 return err;
103 * this does all the hard work for inserting an inline extent into
104 * the btree. The caller should have done a btrfs_drop_extents so that
105 * no overlapping inline items exist in the btree
107 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
108 struct btrfs_root *root, struct inode *inode,
109 u64 start, size_t size, size_t compressed_size,
110 struct page **compressed_pages)
112 struct btrfs_key key;
113 struct btrfs_path *path;
114 struct extent_buffer *leaf;
115 struct page *page = NULL;
116 char *kaddr;
117 unsigned long ptr;
118 struct btrfs_file_extent_item *ei;
119 int err = 0;
120 int ret;
121 size_t cur_size = size;
122 size_t datasize;
123 unsigned long offset;
124 int use_compress = 0;
126 if (compressed_size && compressed_pages) {
127 use_compress = 1;
128 cur_size = compressed_size;
131 path = btrfs_alloc_path();
132 if (!path)
133 return -ENOMEM;
135 path->leave_spinning = 1;
136 btrfs_set_trans_block_group(trans, inode);
138 key.objectid = inode->i_ino;
139 key.offset = start;
140 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
141 datasize = btrfs_file_extent_calc_inline_size(cur_size);
143 inode_add_bytes(inode, size);
144 ret = btrfs_insert_empty_item(trans, root, path, &key,
145 datasize);
146 BUG_ON(ret);
147 if (ret) {
148 err = ret;
149 goto fail;
151 leaf = path->nodes[0];
152 ei = btrfs_item_ptr(leaf, path->slots[0],
153 struct btrfs_file_extent_item);
154 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
155 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
156 btrfs_set_file_extent_encryption(leaf, ei, 0);
157 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
158 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
159 ptr = btrfs_file_extent_inline_start(ei);
161 if (use_compress) {
162 struct page *cpage;
163 int i = 0;
164 while (compressed_size > 0) {
165 cpage = compressed_pages[i];
166 cur_size = min_t(unsigned long, compressed_size,
167 PAGE_CACHE_SIZE);
169 kaddr = kmap_atomic(cpage, KM_USER0);
170 write_extent_buffer(leaf, kaddr, ptr, cur_size);
171 kunmap_atomic(kaddr, KM_USER0);
173 i++;
174 ptr += cur_size;
175 compressed_size -= cur_size;
177 btrfs_set_file_extent_compression(leaf, ei,
178 BTRFS_COMPRESS_ZLIB);
179 } else {
180 page = find_get_page(inode->i_mapping,
181 start >> PAGE_CACHE_SHIFT);
182 btrfs_set_file_extent_compression(leaf, ei, 0);
183 kaddr = kmap_atomic(page, KM_USER0);
184 offset = start & (PAGE_CACHE_SIZE - 1);
185 write_extent_buffer(leaf, kaddr + offset, ptr, size);
186 kunmap_atomic(kaddr, KM_USER0);
187 page_cache_release(page);
189 btrfs_mark_buffer_dirty(leaf);
190 btrfs_free_path(path);
193 * we're an inline extent, so nobody can
194 * extend the file past i_size without locking
195 * a page we already have locked.
197 * We must do any isize and inode updates
198 * before we unlock the pages. Otherwise we
199 * could end up racing with unlink.
201 BTRFS_I(inode)->disk_i_size = inode->i_size;
202 btrfs_update_inode(trans, root, inode);
204 return 0;
205 fail:
206 btrfs_free_path(path);
207 return err;
212 * conditionally insert an inline extent into the file. This
213 * does the checks required to make sure the data is small enough
214 * to fit as an inline extent.
216 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
217 struct btrfs_root *root,
218 struct inode *inode, u64 start, u64 end,
219 size_t compressed_size,
220 struct page **compressed_pages)
222 u64 isize = i_size_read(inode);
223 u64 actual_end = min(end + 1, isize);
224 u64 inline_len = actual_end - start;
225 u64 aligned_end = (end + root->sectorsize - 1) &
226 ~((u64)root->sectorsize - 1);
227 u64 hint_byte;
228 u64 data_len = inline_len;
229 int ret;
231 if (compressed_size)
232 data_len = compressed_size;
234 if (start > 0 ||
235 actual_end >= PAGE_CACHE_SIZE ||
236 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
237 (!compressed_size &&
238 (actual_end & (root->sectorsize - 1)) == 0) ||
239 end + 1 < isize ||
240 data_len > root->fs_info->max_inline) {
241 return 1;
244 ret = btrfs_drop_extents(trans, inode, start, aligned_end,
245 &hint_byte, 1);
246 BUG_ON(ret);
248 if (isize > actual_end)
249 inline_len = min_t(u64, isize, actual_end);
250 ret = insert_inline_extent(trans, root, inode, start,
251 inline_len, compressed_size,
252 compressed_pages);
253 BUG_ON(ret);
254 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
255 return 0;
258 struct async_extent {
259 u64 start;
260 u64 ram_size;
261 u64 compressed_size;
262 struct page **pages;
263 unsigned long nr_pages;
264 struct list_head list;
267 struct async_cow {
268 struct inode *inode;
269 struct btrfs_root *root;
270 struct page *locked_page;
271 u64 start;
272 u64 end;
273 struct list_head extents;
274 struct btrfs_work work;
277 static noinline int add_async_extent(struct async_cow *cow,
278 u64 start, u64 ram_size,
279 u64 compressed_size,
280 struct page **pages,
281 unsigned long nr_pages)
283 struct async_extent *async_extent;
285 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
286 async_extent->start = start;
287 async_extent->ram_size = ram_size;
288 async_extent->compressed_size = compressed_size;
289 async_extent->pages = pages;
290 async_extent->nr_pages = nr_pages;
291 list_add_tail(&async_extent->list, &cow->extents);
292 return 0;
296 * we create compressed extents in two phases. The first
297 * phase compresses a range of pages that have already been
298 * locked (both pages and state bits are locked).
300 * This is done inside an ordered work queue, and the compression
301 * is spread across many cpus. The actual IO submission is step
302 * two, and the ordered work queue takes care of making sure that
303 * happens in the same order things were put onto the queue by
304 * writepages and friends.
306 * If this code finds it can't get good compression, it puts an
307 * entry onto the work queue to write the uncompressed bytes. This
308 * makes sure that both compressed inodes and uncompressed inodes
309 * are written in the same order that pdflush sent them down.
311 static noinline int compress_file_range(struct inode *inode,
312 struct page *locked_page,
313 u64 start, u64 end,
314 struct async_cow *async_cow,
315 int *num_added)
317 struct btrfs_root *root = BTRFS_I(inode)->root;
318 struct btrfs_trans_handle *trans;
319 u64 num_bytes;
320 u64 orig_start;
321 u64 disk_num_bytes;
322 u64 blocksize = root->sectorsize;
323 u64 actual_end;
324 u64 isize = i_size_read(inode);
325 int ret = 0;
326 struct page **pages = NULL;
327 unsigned long nr_pages;
328 unsigned long nr_pages_ret = 0;
329 unsigned long total_compressed = 0;
330 unsigned long total_in = 0;
331 unsigned long max_compressed = 128 * 1024;
332 unsigned long max_uncompressed = 128 * 1024;
333 int i;
334 int will_compress;
336 orig_start = start;
338 actual_end = min_t(u64, isize, end + 1);
339 again:
340 will_compress = 0;
341 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
342 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
345 * we don't want to send crud past the end of i_size through
346 * compression, that's just a waste of CPU time. So, if the
347 * end of the file is before the start of our current
348 * requested range of bytes, we bail out to the uncompressed
349 * cleanup code that can deal with all of this.
351 * It isn't really the fastest way to fix things, but this is a
352 * very uncommon corner.
354 if (actual_end <= start)
355 goto cleanup_and_bail_uncompressed;
357 total_compressed = actual_end - start;
359 /* we want to make sure that amount of ram required to uncompress
360 * an extent is reasonable, so we limit the total size in ram
361 * of a compressed extent to 128k. This is a crucial number
362 * because it also controls how easily we can spread reads across
363 * cpus for decompression.
365 * We also want to make sure the amount of IO required to do
366 * a random read is reasonably small, so we limit the size of
367 * a compressed extent to 128k.
369 total_compressed = min(total_compressed, max_uncompressed);
370 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
371 num_bytes = max(blocksize, num_bytes);
372 disk_num_bytes = num_bytes;
373 total_in = 0;
374 ret = 0;
377 * we do compression for mount -o compress and when the
378 * inode has not been flagged as nocompress. This flag can
379 * change at any time if we discover bad compression ratios.
381 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
382 btrfs_test_opt(root, COMPRESS)) {
383 WARN_ON(pages);
384 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
386 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
387 total_compressed, pages,
388 nr_pages, &nr_pages_ret,
389 &total_in,
390 &total_compressed,
391 max_compressed);
393 if (!ret) {
394 unsigned long offset = total_compressed &
395 (PAGE_CACHE_SIZE - 1);
396 struct page *page = pages[nr_pages_ret - 1];
397 char *kaddr;
399 /* zero the tail end of the last page, we might be
400 * sending it down to disk
402 if (offset) {
403 kaddr = kmap_atomic(page, KM_USER0);
404 memset(kaddr + offset, 0,
405 PAGE_CACHE_SIZE - offset);
406 kunmap_atomic(kaddr, KM_USER0);
408 will_compress = 1;
411 if (start == 0) {
412 trans = btrfs_join_transaction(root, 1);
413 BUG_ON(!trans);
414 btrfs_set_trans_block_group(trans, inode);
416 /* lets try to make an inline extent */
417 if (ret || total_in < (actual_end - start)) {
418 /* we didn't compress the entire range, try
419 * to make an uncompressed inline extent.
421 ret = cow_file_range_inline(trans, root, inode,
422 start, end, 0, NULL);
423 } else {
424 /* try making a compressed inline extent */
425 ret = cow_file_range_inline(trans, root, inode,
426 start, end,
427 total_compressed, pages);
429 if (ret == 0) {
431 * inline extent creation worked, we don't need
432 * to create any more async work items. Unlock
433 * and free up our temp pages.
435 extent_clear_unlock_delalloc(inode,
436 &BTRFS_I(inode)->io_tree,
437 start, end, NULL,
438 EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
439 EXTENT_CLEAR_DELALLOC |
440 EXTENT_CLEAR_ACCOUNTING |
441 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
443 btrfs_end_transaction(trans, root);
444 goto free_pages_out;
446 btrfs_end_transaction(trans, root);
449 if (will_compress) {
451 * we aren't doing an inline extent round the compressed size
452 * up to a block size boundary so the allocator does sane
453 * things
455 total_compressed = (total_compressed + blocksize - 1) &
456 ~(blocksize - 1);
459 * one last check to make sure the compression is really a
460 * win, compare the page count read with the blocks on disk
462 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
463 ~(PAGE_CACHE_SIZE - 1);
464 if (total_compressed >= total_in) {
465 will_compress = 0;
466 } else {
467 disk_num_bytes = total_compressed;
468 num_bytes = total_in;
471 if (!will_compress && pages) {
473 * the compression code ran but failed to make things smaller,
474 * free any pages it allocated and our page pointer array
476 for (i = 0; i < nr_pages_ret; i++) {
477 WARN_ON(pages[i]->mapping);
478 page_cache_release(pages[i]);
480 kfree(pages);
481 pages = NULL;
482 total_compressed = 0;
483 nr_pages_ret = 0;
485 /* flag the file so we don't compress in the future */
486 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
488 if (will_compress) {
489 *num_added += 1;
491 /* the async work queues will take care of doing actual
492 * allocation on disk for these compressed pages,
493 * and will submit them to the elevator.
495 add_async_extent(async_cow, start, num_bytes,
496 total_compressed, pages, nr_pages_ret);
498 if (start + num_bytes < end && start + num_bytes < actual_end) {
499 start += num_bytes;
500 pages = NULL;
501 cond_resched();
502 goto again;
504 } else {
505 cleanup_and_bail_uncompressed:
507 * No compression, but we still need to write the pages in
508 * the file we've been given so far. redirty the locked
509 * page if it corresponds to our extent and set things up
510 * for the async work queue to run cow_file_range to do
511 * the normal delalloc dance
513 if (page_offset(locked_page) >= start &&
514 page_offset(locked_page) <= end) {
515 __set_page_dirty_nobuffers(locked_page);
516 /* unlocked later on in the async handlers */
518 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
519 *num_added += 1;
522 out:
523 return 0;
525 free_pages_out:
526 for (i = 0; i < nr_pages_ret; i++) {
527 WARN_ON(pages[i]->mapping);
528 page_cache_release(pages[i]);
530 kfree(pages);
532 goto out;
536 * phase two of compressed writeback. This is the ordered portion
537 * of the code, which only gets called in the order the work was
538 * queued. We walk all the async extents created by compress_file_range
539 * and send them down to the disk.
541 static noinline int submit_compressed_extents(struct inode *inode,
542 struct async_cow *async_cow)
544 struct async_extent *async_extent;
545 u64 alloc_hint = 0;
546 struct btrfs_trans_handle *trans;
547 struct btrfs_key ins;
548 struct extent_map *em;
549 struct btrfs_root *root = BTRFS_I(inode)->root;
550 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
551 struct extent_io_tree *io_tree;
552 int ret = 0;
554 if (list_empty(&async_cow->extents))
555 return 0;
558 while (!list_empty(&async_cow->extents)) {
559 async_extent = list_entry(async_cow->extents.next,
560 struct async_extent, list);
561 list_del(&async_extent->list);
563 io_tree = &BTRFS_I(inode)->io_tree;
565 retry:
566 /* did the compression code fall back to uncompressed IO? */
567 if (!async_extent->pages) {
568 int page_started = 0;
569 unsigned long nr_written = 0;
571 lock_extent(io_tree, async_extent->start,
572 async_extent->start +
573 async_extent->ram_size - 1, GFP_NOFS);
575 /* allocate blocks */
576 ret = cow_file_range(inode, async_cow->locked_page,
577 async_extent->start,
578 async_extent->start +
579 async_extent->ram_size - 1,
580 &page_started, &nr_written, 0);
583 * if page_started, cow_file_range inserted an
584 * inline extent and took care of all the unlocking
585 * and IO for us. Otherwise, we need to submit
586 * all those pages down to the drive.
588 if (!page_started && !ret)
589 extent_write_locked_range(io_tree,
590 inode, async_extent->start,
591 async_extent->start +
592 async_extent->ram_size - 1,
593 btrfs_get_extent,
594 WB_SYNC_ALL);
595 kfree(async_extent);
596 cond_resched();
597 continue;
600 lock_extent(io_tree, async_extent->start,
601 async_extent->start + async_extent->ram_size - 1,
602 GFP_NOFS);
604 trans = btrfs_join_transaction(root, 1);
605 ret = btrfs_reserve_extent(trans, root,
606 async_extent->compressed_size,
607 async_extent->compressed_size,
608 0, alloc_hint,
609 (u64)-1, &ins, 1);
610 btrfs_end_transaction(trans, root);
612 if (ret) {
613 int i;
614 for (i = 0; i < async_extent->nr_pages; i++) {
615 WARN_ON(async_extent->pages[i]->mapping);
616 page_cache_release(async_extent->pages[i]);
618 kfree(async_extent->pages);
619 async_extent->nr_pages = 0;
620 async_extent->pages = NULL;
621 unlock_extent(io_tree, async_extent->start,
622 async_extent->start +
623 async_extent->ram_size - 1, GFP_NOFS);
624 goto retry;
628 * here we're doing allocation and writeback of the
629 * compressed pages
631 btrfs_drop_extent_cache(inode, async_extent->start,
632 async_extent->start +
633 async_extent->ram_size - 1, 0);
635 em = alloc_extent_map(GFP_NOFS);
636 em->start = async_extent->start;
637 em->len = async_extent->ram_size;
638 em->orig_start = em->start;
640 em->block_start = ins.objectid;
641 em->block_len = ins.offset;
642 em->bdev = root->fs_info->fs_devices->latest_bdev;
643 set_bit(EXTENT_FLAG_PINNED, &em->flags);
644 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
646 while (1) {
647 write_lock(&em_tree->lock);
648 ret = add_extent_mapping(em_tree, em);
649 write_unlock(&em_tree->lock);
650 if (ret != -EEXIST) {
651 free_extent_map(em);
652 break;
654 btrfs_drop_extent_cache(inode, async_extent->start,
655 async_extent->start +
656 async_extent->ram_size - 1, 0);
659 ret = btrfs_add_ordered_extent(inode, async_extent->start,
660 ins.objectid,
661 async_extent->ram_size,
662 ins.offset,
663 BTRFS_ORDERED_COMPRESSED);
664 BUG_ON(ret);
667 * clear dirty, set writeback and unlock the pages.
669 extent_clear_unlock_delalloc(inode,
670 &BTRFS_I(inode)->io_tree,
671 async_extent->start,
672 async_extent->start +
673 async_extent->ram_size - 1,
674 NULL, EXTENT_CLEAR_UNLOCK_PAGE |
675 EXTENT_CLEAR_UNLOCK |
676 EXTENT_CLEAR_DELALLOC |
677 EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
679 ret = btrfs_submit_compressed_write(inode,
680 async_extent->start,
681 async_extent->ram_size,
682 ins.objectid,
683 ins.offset, async_extent->pages,
684 async_extent->nr_pages);
686 BUG_ON(ret);
687 alloc_hint = ins.objectid + ins.offset;
688 kfree(async_extent);
689 cond_resched();
692 return 0;
696 * when extent_io.c finds a delayed allocation range in the file,
697 * the call backs end up in this code. The basic idea is to
698 * allocate extents on disk for the range, and create ordered data structs
699 * in ram to track those extents.
701 * locked_page is the page that writepage had locked already. We use
702 * it to make sure we don't do extra locks or unlocks.
704 * *page_started is set to one if we unlock locked_page and do everything
705 * required to start IO on it. It may be clean and already done with
706 * IO when we return.
708 static noinline int cow_file_range(struct inode *inode,
709 struct page *locked_page,
710 u64 start, u64 end, int *page_started,
711 unsigned long *nr_written,
712 int unlock)
714 struct btrfs_root *root = BTRFS_I(inode)->root;
715 struct btrfs_trans_handle *trans;
716 u64 alloc_hint = 0;
717 u64 num_bytes;
718 unsigned long ram_size;
719 u64 disk_num_bytes;
720 u64 cur_alloc_size;
721 u64 blocksize = root->sectorsize;
722 u64 actual_end;
723 u64 isize = i_size_read(inode);
724 struct btrfs_key ins;
725 struct extent_map *em;
726 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
727 int ret = 0;
729 trans = btrfs_join_transaction(root, 1);
730 BUG_ON(!trans);
731 btrfs_set_trans_block_group(trans, inode);
733 actual_end = min_t(u64, isize, end + 1);
735 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
736 num_bytes = max(blocksize, num_bytes);
737 disk_num_bytes = num_bytes;
738 ret = 0;
740 if (start == 0) {
741 /* lets try to make an inline extent */
742 ret = cow_file_range_inline(trans, root, inode,
743 start, end, 0, NULL);
744 if (ret == 0) {
745 extent_clear_unlock_delalloc(inode,
746 &BTRFS_I(inode)->io_tree,
747 start, end, NULL,
748 EXTENT_CLEAR_UNLOCK_PAGE |
749 EXTENT_CLEAR_UNLOCK |
750 EXTENT_CLEAR_DELALLOC |
751 EXTENT_CLEAR_ACCOUNTING |
752 EXTENT_CLEAR_DIRTY |
753 EXTENT_SET_WRITEBACK |
754 EXTENT_END_WRITEBACK);
756 *nr_written = *nr_written +
757 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
758 *page_started = 1;
759 ret = 0;
760 goto out;
764 BUG_ON(disk_num_bytes >
765 btrfs_super_total_bytes(&root->fs_info->super_copy));
768 read_lock(&BTRFS_I(inode)->extent_tree.lock);
769 em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
770 start, num_bytes);
771 if (em) {
773 * if block start isn't an actual block number then find the
774 * first block in this inode and use that as a hint. If that
775 * block is also bogus then just don't worry about it.
777 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
778 free_extent_map(em);
779 em = search_extent_mapping(em_tree, 0, 0);
780 if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
781 alloc_hint = em->block_start;
782 if (em)
783 free_extent_map(em);
784 } else {
785 alloc_hint = em->block_start;
786 free_extent_map(em);
789 read_unlock(&BTRFS_I(inode)->extent_tree.lock);
790 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
792 while (disk_num_bytes > 0) {
793 unsigned long op;
795 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
796 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
797 root->sectorsize, 0, alloc_hint,
798 (u64)-1, &ins, 1);
799 BUG_ON(ret);
801 em = alloc_extent_map(GFP_NOFS);
802 em->start = start;
803 em->orig_start = em->start;
804 ram_size = ins.offset;
805 em->len = ins.offset;
807 em->block_start = ins.objectid;
808 em->block_len = ins.offset;
809 em->bdev = root->fs_info->fs_devices->latest_bdev;
810 set_bit(EXTENT_FLAG_PINNED, &em->flags);
812 while (1) {
813 write_lock(&em_tree->lock);
814 ret = add_extent_mapping(em_tree, em);
815 write_unlock(&em_tree->lock);
816 if (ret != -EEXIST) {
817 free_extent_map(em);
818 break;
820 btrfs_drop_extent_cache(inode, start,
821 start + ram_size - 1, 0);
824 cur_alloc_size = ins.offset;
825 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
826 ram_size, cur_alloc_size, 0);
827 BUG_ON(ret);
829 if (root->root_key.objectid ==
830 BTRFS_DATA_RELOC_TREE_OBJECTID) {
831 ret = btrfs_reloc_clone_csums(inode, start,
832 cur_alloc_size);
833 BUG_ON(ret);
836 if (disk_num_bytes < cur_alloc_size)
837 break;
839 /* we're not doing compressed IO, don't unlock the first
840 * page (which the caller expects to stay locked), don't
841 * clear any dirty bits and don't set any writeback bits
843 * Do set the Private2 bit so we know this page was properly
844 * setup for writepage
846 op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
847 op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
848 EXTENT_SET_PRIVATE2;
850 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
851 start, start + ram_size - 1,
852 locked_page, op);
853 disk_num_bytes -= cur_alloc_size;
854 num_bytes -= cur_alloc_size;
855 alloc_hint = ins.objectid + ins.offset;
856 start += cur_alloc_size;
858 out:
859 ret = 0;
860 btrfs_end_transaction(trans, root);
862 return ret;
866 * work queue call back to started compression on a file and pages
868 static noinline void async_cow_start(struct btrfs_work *work)
870 struct async_cow *async_cow;
871 int num_added = 0;
872 async_cow = container_of(work, struct async_cow, work);
874 compress_file_range(async_cow->inode, async_cow->locked_page,
875 async_cow->start, async_cow->end, async_cow,
876 &num_added);
877 if (num_added == 0)
878 async_cow->inode = NULL;
882 * work queue call back to submit previously compressed pages
884 static noinline void async_cow_submit(struct btrfs_work *work)
886 struct async_cow *async_cow;
887 struct btrfs_root *root;
888 unsigned long nr_pages;
890 async_cow = container_of(work, struct async_cow, work);
892 root = async_cow->root;
893 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
894 PAGE_CACHE_SHIFT;
896 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
898 if (atomic_read(&root->fs_info->async_delalloc_pages) <
899 5 * 1042 * 1024 &&
900 waitqueue_active(&root->fs_info->async_submit_wait))
901 wake_up(&root->fs_info->async_submit_wait);
903 if (async_cow->inode)
904 submit_compressed_extents(async_cow->inode, async_cow);
907 static noinline void async_cow_free(struct btrfs_work *work)
909 struct async_cow *async_cow;
910 async_cow = container_of(work, struct async_cow, work);
911 kfree(async_cow);
914 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
915 u64 start, u64 end, int *page_started,
916 unsigned long *nr_written)
918 struct async_cow *async_cow;
919 struct btrfs_root *root = BTRFS_I(inode)->root;
920 unsigned long nr_pages;
921 u64 cur_end;
922 int limit = 10 * 1024 * 1042;
924 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
925 1, 0, NULL, GFP_NOFS);
926 while (start < end) {
927 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
928 async_cow->inode = inode;
929 async_cow->root = root;
930 async_cow->locked_page = locked_page;
931 async_cow->start = start;
933 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
934 cur_end = end;
935 else
936 cur_end = min(end, start + 512 * 1024 - 1);
938 async_cow->end = cur_end;
939 INIT_LIST_HEAD(&async_cow->extents);
941 async_cow->work.func = async_cow_start;
942 async_cow->work.ordered_func = async_cow_submit;
943 async_cow->work.ordered_free = async_cow_free;
944 async_cow->work.flags = 0;
946 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
947 PAGE_CACHE_SHIFT;
948 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
950 btrfs_queue_worker(&root->fs_info->delalloc_workers,
951 &async_cow->work);
953 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
954 wait_event(root->fs_info->async_submit_wait,
955 (atomic_read(&root->fs_info->async_delalloc_pages) <
956 limit));
959 while (atomic_read(&root->fs_info->async_submit_draining) &&
960 atomic_read(&root->fs_info->async_delalloc_pages)) {
961 wait_event(root->fs_info->async_submit_wait,
962 (atomic_read(&root->fs_info->async_delalloc_pages) ==
963 0));
966 *nr_written += nr_pages;
967 start = cur_end + 1;
969 *page_started = 1;
970 return 0;
973 static noinline int csum_exist_in_range(struct btrfs_root *root,
974 u64 bytenr, u64 num_bytes)
976 int ret;
977 struct btrfs_ordered_sum *sums;
978 LIST_HEAD(list);
980 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
981 bytenr + num_bytes - 1, &list);
982 if (ret == 0 && list_empty(&list))
983 return 0;
985 while (!list_empty(&list)) {
986 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
987 list_del(&sums->list);
988 kfree(sums);
990 return 1;
994 * when nowcow writeback call back. This checks for snapshots or COW copies
995 * of the extents that exist in the file, and COWs the file as required.
997 * If no cow copies or snapshots exist, we write directly to the existing
998 * blocks on disk
1000 static noinline int run_delalloc_nocow(struct inode *inode,
1001 struct page *locked_page,
1002 u64 start, u64 end, int *page_started, int force,
1003 unsigned long *nr_written)
1005 struct btrfs_root *root = BTRFS_I(inode)->root;
1006 struct btrfs_trans_handle *trans;
1007 struct extent_buffer *leaf;
1008 struct btrfs_path *path;
1009 struct btrfs_file_extent_item *fi;
1010 struct btrfs_key found_key;
1011 u64 cow_start;
1012 u64 cur_offset;
1013 u64 extent_end;
1014 u64 extent_offset;
1015 u64 disk_bytenr;
1016 u64 num_bytes;
1017 int extent_type;
1018 int ret;
1019 int type;
1020 int nocow;
1021 int check_prev = 1;
1023 path = btrfs_alloc_path();
1024 BUG_ON(!path);
1025 trans = btrfs_join_transaction(root, 1);
1026 BUG_ON(!trans);
1028 cow_start = (u64)-1;
1029 cur_offset = start;
1030 while (1) {
1031 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
1032 cur_offset, 0);
1033 BUG_ON(ret < 0);
1034 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1035 leaf = path->nodes[0];
1036 btrfs_item_key_to_cpu(leaf, &found_key,
1037 path->slots[0] - 1);
1038 if (found_key.objectid == inode->i_ino &&
1039 found_key.type == BTRFS_EXTENT_DATA_KEY)
1040 path->slots[0]--;
1042 check_prev = 0;
1043 next_slot:
1044 leaf = path->nodes[0];
1045 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1046 ret = btrfs_next_leaf(root, path);
1047 if (ret < 0)
1048 BUG_ON(1);
1049 if (ret > 0)
1050 break;
1051 leaf = path->nodes[0];
1054 nocow = 0;
1055 disk_bytenr = 0;
1056 num_bytes = 0;
1057 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1059 if (found_key.objectid > inode->i_ino ||
1060 found_key.type > BTRFS_EXTENT_DATA_KEY ||
1061 found_key.offset > end)
1062 break;
1064 if (found_key.offset > cur_offset) {
1065 extent_end = found_key.offset;
1066 extent_type = 0;
1067 goto out_check;
1070 fi = btrfs_item_ptr(leaf, path->slots[0],
1071 struct btrfs_file_extent_item);
1072 extent_type = btrfs_file_extent_type(leaf, fi);
1074 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1075 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1076 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1077 extent_offset = btrfs_file_extent_offset(leaf, fi);
1078 extent_end = found_key.offset +
1079 btrfs_file_extent_num_bytes(leaf, fi);
1080 if (extent_end <= start) {
1081 path->slots[0]++;
1082 goto next_slot;
1084 if (disk_bytenr == 0)
1085 goto out_check;
1086 if (btrfs_file_extent_compression(leaf, fi) ||
1087 btrfs_file_extent_encryption(leaf, fi) ||
1088 btrfs_file_extent_other_encoding(leaf, fi))
1089 goto out_check;
1090 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1091 goto out_check;
1092 if (btrfs_extent_readonly(root, disk_bytenr))
1093 goto out_check;
1094 if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1095 found_key.offset -
1096 extent_offset, disk_bytenr))
1097 goto out_check;
1098 disk_bytenr += extent_offset;
1099 disk_bytenr += cur_offset - found_key.offset;
1100 num_bytes = min(end + 1, extent_end) - cur_offset;
1102 * force cow if csum exists in the range.
1103 * this ensure that csum for a given extent are
1104 * either valid or do not exist.
1106 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1107 goto out_check;
1108 nocow = 1;
1109 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1110 extent_end = found_key.offset +
1111 btrfs_file_extent_inline_len(leaf, fi);
1112 extent_end = ALIGN(extent_end, root->sectorsize);
1113 } else {
1114 BUG_ON(1);
1116 out_check:
1117 if (extent_end <= start) {
1118 path->slots[0]++;
1119 goto next_slot;
1121 if (!nocow) {
1122 if (cow_start == (u64)-1)
1123 cow_start = cur_offset;
1124 cur_offset = extent_end;
1125 if (cur_offset > end)
1126 break;
1127 path->slots[0]++;
1128 goto next_slot;
1131 btrfs_release_path(root, path);
1132 if (cow_start != (u64)-1) {
1133 ret = cow_file_range(inode, locked_page, cow_start,
1134 found_key.offset - 1, page_started,
1135 nr_written, 1);
1136 BUG_ON(ret);
1137 cow_start = (u64)-1;
1140 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1141 struct extent_map *em;
1142 struct extent_map_tree *em_tree;
1143 em_tree = &BTRFS_I(inode)->extent_tree;
1144 em = alloc_extent_map(GFP_NOFS);
1145 em->start = cur_offset;
1146 em->orig_start = em->start;
1147 em->len = num_bytes;
1148 em->block_len = num_bytes;
1149 em->block_start = disk_bytenr;
1150 em->bdev = root->fs_info->fs_devices->latest_bdev;
1151 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1152 while (1) {
1153 write_lock(&em_tree->lock);
1154 ret = add_extent_mapping(em_tree, em);
1155 write_unlock(&em_tree->lock);
1156 if (ret != -EEXIST) {
1157 free_extent_map(em);
1158 break;
1160 btrfs_drop_extent_cache(inode, em->start,
1161 em->start + em->len - 1, 0);
1163 type = BTRFS_ORDERED_PREALLOC;
1164 } else {
1165 type = BTRFS_ORDERED_NOCOW;
1168 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1169 num_bytes, num_bytes, type);
1170 BUG_ON(ret);
1172 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1173 cur_offset, cur_offset + num_bytes - 1,
1174 locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1175 EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1176 EXTENT_SET_PRIVATE2);
1177 cur_offset = extent_end;
1178 if (cur_offset > end)
1179 break;
1181 btrfs_release_path(root, path);
1183 if (cur_offset <= end && cow_start == (u64)-1)
1184 cow_start = cur_offset;
1185 if (cow_start != (u64)-1) {
1186 ret = cow_file_range(inode, locked_page, cow_start, end,
1187 page_started, nr_written, 1);
1188 BUG_ON(ret);
1191 ret = btrfs_end_transaction(trans, root);
1192 BUG_ON(ret);
1193 btrfs_free_path(path);
1194 return 0;
1198 * extent_io.c call back to do delayed allocation processing
1200 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1201 u64 start, u64 end, int *page_started,
1202 unsigned long *nr_written)
1204 int ret;
1205 struct btrfs_root *root = BTRFS_I(inode)->root;
1207 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1208 ret = run_delalloc_nocow(inode, locked_page, start, end,
1209 page_started, 1, nr_written);
1210 else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1211 ret = run_delalloc_nocow(inode, locked_page, start, end,
1212 page_started, 0, nr_written);
1213 else if (!btrfs_test_opt(root, COMPRESS))
1214 ret = cow_file_range(inode, locked_page, start, end,
1215 page_started, nr_written, 1);
1216 else
1217 ret = cow_file_range_async(inode, locked_page, start, end,
1218 page_started, nr_written);
1219 return ret;
1222 static int btrfs_split_extent_hook(struct inode *inode,
1223 struct extent_state *orig, u64 split)
1225 struct btrfs_root *root = BTRFS_I(inode)->root;
1226 u64 size;
1228 if (!(orig->state & EXTENT_DELALLOC))
1229 return 0;
1231 size = orig->end - orig->start + 1;
1232 if (size > root->fs_info->max_extent) {
1233 u64 num_extents;
1234 u64 new_size;
1236 new_size = orig->end - split + 1;
1237 num_extents = div64_u64(size + root->fs_info->max_extent - 1,
1238 root->fs_info->max_extent);
1241 * if we break a large extent up then leave oustanding_extents
1242 * be, since we've already accounted for the large extent.
1244 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1245 root->fs_info->max_extent) < num_extents)
1246 return 0;
1249 spin_lock(&BTRFS_I(inode)->accounting_lock);
1250 BTRFS_I(inode)->outstanding_extents++;
1251 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1253 return 0;
1257 * extent_io.c merge_extent_hook, used to track merged delayed allocation
1258 * extents so we can keep track of new extents that are just merged onto old
1259 * extents, such as when we are doing sequential writes, so we can properly
1260 * account for the metadata space we'll need.
1262 static int btrfs_merge_extent_hook(struct inode *inode,
1263 struct extent_state *new,
1264 struct extent_state *other)
1266 struct btrfs_root *root = BTRFS_I(inode)->root;
1267 u64 new_size, old_size;
1268 u64 num_extents;
1270 /* not delalloc, ignore it */
1271 if (!(other->state & EXTENT_DELALLOC))
1272 return 0;
1274 old_size = other->end - other->start + 1;
1275 if (new->start < other->start)
1276 new_size = other->end - new->start + 1;
1277 else
1278 new_size = new->end - other->start + 1;
1280 /* we're not bigger than the max, unreserve the space and go */
1281 if (new_size <= root->fs_info->max_extent) {
1282 spin_lock(&BTRFS_I(inode)->accounting_lock);
1283 BTRFS_I(inode)->outstanding_extents--;
1284 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1285 return 0;
1289 * If we grew by another max_extent, just return, we want to keep that
1290 * reserved amount.
1292 num_extents = div64_u64(old_size + root->fs_info->max_extent - 1,
1293 root->fs_info->max_extent);
1294 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1295 root->fs_info->max_extent) > num_extents)
1296 return 0;
1298 spin_lock(&BTRFS_I(inode)->accounting_lock);
1299 BTRFS_I(inode)->outstanding_extents--;
1300 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1302 return 0;
1306 * extent_io.c set_bit_hook, used to track delayed allocation
1307 * bytes in this file, and to maintain the list of inodes that
1308 * have pending delalloc work to be done.
1310 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1311 unsigned long old, unsigned long bits)
1315 * set_bit and clear bit hooks normally require _irqsave/restore
1316 * but in this case, we are only testeing for the DELALLOC
1317 * bit, which is only set or cleared with irqs on
1319 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1320 struct btrfs_root *root = BTRFS_I(inode)->root;
1322 spin_lock(&BTRFS_I(inode)->accounting_lock);
1323 BTRFS_I(inode)->outstanding_extents++;
1324 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1325 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1326 spin_lock(&root->fs_info->delalloc_lock);
1327 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1328 root->fs_info->delalloc_bytes += end - start + 1;
1329 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1330 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1331 &root->fs_info->delalloc_inodes);
1333 spin_unlock(&root->fs_info->delalloc_lock);
1335 return 0;
1339 * extent_io.c clear_bit_hook, see set_bit_hook for why
1341 static int btrfs_clear_bit_hook(struct inode *inode,
1342 struct extent_state *state, unsigned long bits)
1345 * set_bit and clear bit hooks normally require _irqsave/restore
1346 * but in this case, we are only testeing for the DELALLOC
1347 * bit, which is only set or cleared with irqs on
1349 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1350 struct btrfs_root *root = BTRFS_I(inode)->root;
1352 if (bits & EXTENT_DO_ACCOUNTING) {
1353 spin_lock(&BTRFS_I(inode)->accounting_lock);
1354 BTRFS_I(inode)->outstanding_extents--;
1355 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1356 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
1359 spin_lock(&root->fs_info->delalloc_lock);
1360 if (state->end - state->start + 1 >
1361 root->fs_info->delalloc_bytes) {
1362 printk(KERN_INFO "btrfs warning: delalloc account "
1363 "%llu %llu\n",
1364 (unsigned long long)
1365 state->end - state->start + 1,
1366 (unsigned long long)
1367 root->fs_info->delalloc_bytes);
1368 btrfs_delalloc_free_space(root, inode, (u64)-1);
1369 root->fs_info->delalloc_bytes = 0;
1370 BTRFS_I(inode)->delalloc_bytes = 0;
1371 } else {
1372 btrfs_delalloc_free_space(root, inode,
1373 state->end -
1374 state->start + 1);
1375 root->fs_info->delalloc_bytes -= state->end -
1376 state->start + 1;
1377 BTRFS_I(inode)->delalloc_bytes -= state->end -
1378 state->start + 1;
1380 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1381 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1382 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1384 spin_unlock(&root->fs_info->delalloc_lock);
1386 return 0;
1390 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1391 * we don't create bios that span stripes or chunks
1393 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1394 size_t size, struct bio *bio,
1395 unsigned long bio_flags)
1397 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1398 struct btrfs_mapping_tree *map_tree;
1399 u64 logical = (u64)bio->bi_sector << 9;
1400 u64 length = 0;
1401 u64 map_length;
1402 int ret;
1404 if (bio_flags & EXTENT_BIO_COMPRESSED)
1405 return 0;
1407 length = bio->bi_size;
1408 map_tree = &root->fs_info->mapping_tree;
1409 map_length = length;
1410 ret = btrfs_map_block(map_tree, READ, logical,
1411 &map_length, NULL, 0);
1413 if (map_length < length + size)
1414 return 1;
1415 return 0;
1419 * in order to insert checksums into the metadata in large chunks,
1420 * we wait until bio submission time. All the pages in the bio are
1421 * checksummed and sums are attached onto the ordered extent record.
1423 * At IO completion time the cums attached on the ordered extent record
1424 * are inserted into the btree
1426 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1427 struct bio *bio, int mirror_num,
1428 unsigned long bio_flags)
1430 struct btrfs_root *root = BTRFS_I(inode)->root;
1431 int ret = 0;
1433 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1434 BUG_ON(ret);
1435 return 0;
1439 * in order to insert checksums into the metadata in large chunks,
1440 * we wait until bio submission time. All the pages in the bio are
1441 * checksummed and sums are attached onto the ordered extent record.
1443 * At IO completion time the cums attached on the ordered extent record
1444 * are inserted into the btree
1446 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1447 int mirror_num, unsigned long bio_flags)
1449 struct btrfs_root *root = BTRFS_I(inode)->root;
1450 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1454 * extent_io.c submission hook. This does the right thing for csum calculation
1455 * on write, or reading the csums from the tree before a read
1457 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1458 int mirror_num, unsigned long bio_flags)
1460 struct btrfs_root *root = BTRFS_I(inode)->root;
1461 int ret = 0;
1462 int skip_sum;
1464 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1466 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1467 BUG_ON(ret);
1469 if (!(rw & (1 << BIO_RW))) {
1470 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1471 return btrfs_submit_compressed_read(inode, bio,
1472 mirror_num, bio_flags);
1473 } else if (!skip_sum)
1474 btrfs_lookup_bio_sums(root, inode, bio, NULL);
1475 goto mapit;
1476 } else if (!skip_sum) {
1477 /* csum items have already been cloned */
1478 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1479 goto mapit;
1480 /* we're doing a write, do the async checksumming */
1481 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1482 inode, rw, bio, mirror_num,
1483 bio_flags, __btrfs_submit_bio_start,
1484 __btrfs_submit_bio_done);
1487 mapit:
1488 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1492 * given a list of ordered sums record them in the inode. This happens
1493 * at IO completion time based on sums calculated at bio submission time.
1495 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1496 struct inode *inode, u64 file_offset,
1497 struct list_head *list)
1499 struct btrfs_ordered_sum *sum;
1501 btrfs_set_trans_block_group(trans, inode);
1503 list_for_each_entry(sum, list, list) {
1504 btrfs_csum_file_blocks(trans,
1505 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1507 return 0;
1510 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1512 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1513 WARN_ON(1);
1514 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1515 GFP_NOFS);
1518 /* see btrfs_writepage_start_hook for details on why this is required */
1519 struct btrfs_writepage_fixup {
1520 struct page *page;
1521 struct btrfs_work work;
1524 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1526 struct btrfs_writepage_fixup *fixup;
1527 struct btrfs_ordered_extent *ordered;
1528 struct page *page;
1529 struct inode *inode;
1530 u64 page_start;
1531 u64 page_end;
1533 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1534 page = fixup->page;
1535 again:
1536 lock_page(page);
1537 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1538 ClearPageChecked(page);
1539 goto out_page;
1542 inode = page->mapping->host;
1543 page_start = page_offset(page);
1544 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1546 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1548 /* already ordered? We're done */
1549 if (PagePrivate2(page))
1550 goto out;
1552 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1553 if (ordered) {
1554 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1555 page_end, GFP_NOFS);
1556 unlock_page(page);
1557 btrfs_start_ordered_extent(inode, ordered, 1);
1558 goto again;
1561 btrfs_set_extent_delalloc(inode, page_start, page_end);
1562 ClearPageChecked(page);
1563 out:
1564 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1565 out_page:
1566 unlock_page(page);
1567 page_cache_release(page);
1571 * There are a few paths in the higher layers of the kernel that directly
1572 * set the page dirty bit without asking the filesystem if it is a
1573 * good idea. This causes problems because we want to make sure COW
1574 * properly happens and the data=ordered rules are followed.
1576 * In our case any range that doesn't have the ORDERED bit set
1577 * hasn't been properly setup for IO. We kick off an async process
1578 * to fix it up. The async helper will wait for ordered extents, set
1579 * the delalloc bit and make it safe to write the page.
1581 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1583 struct inode *inode = page->mapping->host;
1584 struct btrfs_writepage_fixup *fixup;
1585 struct btrfs_root *root = BTRFS_I(inode)->root;
1587 /* this page is properly in the ordered list */
1588 if (TestClearPagePrivate2(page))
1589 return 0;
1591 if (PageChecked(page))
1592 return -EAGAIN;
1594 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1595 if (!fixup)
1596 return -EAGAIN;
1598 SetPageChecked(page);
1599 page_cache_get(page);
1600 fixup->work.func = btrfs_writepage_fixup_worker;
1601 fixup->page = page;
1602 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1603 return -EAGAIN;
1606 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1607 struct inode *inode, u64 file_pos,
1608 u64 disk_bytenr, u64 disk_num_bytes,
1609 u64 num_bytes, u64 ram_bytes,
1610 u8 compression, u8 encryption,
1611 u16 other_encoding, int extent_type)
1613 struct btrfs_root *root = BTRFS_I(inode)->root;
1614 struct btrfs_file_extent_item *fi;
1615 struct btrfs_path *path;
1616 struct extent_buffer *leaf;
1617 struct btrfs_key ins;
1618 u64 hint;
1619 int ret;
1621 path = btrfs_alloc_path();
1622 BUG_ON(!path);
1624 path->leave_spinning = 1;
1627 * we may be replacing one extent in the tree with another.
1628 * The new extent is pinned in the extent map, and we don't want
1629 * to drop it from the cache until it is completely in the btree.
1631 * So, tell btrfs_drop_extents to leave this extent in the cache.
1632 * the caller is expected to unpin it and allow it to be merged
1633 * with the others.
1635 ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
1636 &hint, 0);
1637 BUG_ON(ret);
1639 ins.objectid = inode->i_ino;
1640 ins.offset = file_pos;
1641 ins.type = BTRFS_EXTENT_DATA_KEY;
1642 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1643 BUG_ON(ret);
1644 leaf = path->nodes[0];
1645 fi = btrfs_item_ptr(leaf, path->slots[0],
1646 struct btrfs_file_extent_item);
1647 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1648 btrfs_set_file_extent_type(leaf, fi, extent_type);
1649 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1650 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1651 btrfs_set_file_extent_offset(leaf, fi, 0);
1652 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1653 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1654 btrfs_set_file_extent_compression(leaf, fi, compression);
1655 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1656 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1658 btrfs_unlock_up_safe(path, 1);
1659 btrfs_set_lock_blocking(leaf);
1661 btrfs_mark_buffer_dirty(leaf);
1663 inode_add_bytes(inode, num_bytes);
1665 ins.objectid = disk_bytenr;
1666 ins.offset = disk_num_bytes;
1667 ins.type = BTRFS_EXTENT_ITEM_KEY;
1668 ret = btrfs_alloc_reserved_file_extent(trans, root,
1669 root->root_key.objectid,
1670 inode->i_ino, file_pos, &ins);
1671 BUG_ON(ret);
1672 btrfs_free_path(path);
1674 return 0;
1678 * helper function for btrfs_finish_ordered_io, this
1679 * just reads in some of the csum leaves to prime them into ram
1680 * before we start the transaction. It limits the amount of btree
1681 * reads required while inside the transaction.
1683 static noinline void reada_csum(struct btrfs_root *root,
1684 struct btrfs_path *path,
1685 struct btrfs_ordered_extent *ordered_extent)
1687 struct btrfs_ordered_sum *sum;
1688 u64 bytenr;
1690 sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
1691 list);
1692 bytenr = sum->sums[0].bytenr;
1695 * we don't care about the results, the point of this search is
1696 * just to get the btree leaves into ram
1698 btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
1701 /* as ordered data IO finishes, this gets called so we can finish
1702 * an ordered extent if the range of bytes in the file it covers are
1703 * fully written.
1705 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1707 struct btrfs_root *root = BTRFS_I(inode)->root;
1708 struct btrfs_trans_handle *trans;
1709 struct btrfs_ordered_extent *ordered_extent = NULL;
1710 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1711 struct btrfs_path *path;
1712 int compressed = 0;
1713 int ret;
1715 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1716 if (!ret)
1717 return 0;
1720 * before we join the transaction, try to do some of our IO.
1721 * This will limit the amount of IO that we have to do with
1722 * the transaction running. We're unlikely to need to do any
1723 * IO if the file extents are new, the disk_i_size checks
1724 * covers the most common case.
1726 if (start < BTRFS_I(inode)->disk_i_size) {
1727 path = btrfs_alloc_path();
1728 if (path) {
1729 ret = btrfs_lookup_file_extent(NULL, root, path,
1730 inode->i_ino,
1731 start, 0);
1732 ordered_extent = btrfs_lookup_ordered_extent(inode,
1733 start);
1734 if (!list_empty(&ordered_extent->list)) {
1735 btrfs_release_path(root, path);
1736 reada_csum(root, path, ordered_extent);
1738 btrfs_free_path(path);
1742 if (!ordered_extent)
1743 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1744 BUG_ON(!ordered_extent);
1745 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1746 BUG_ON(!list_empty(&ordered_extent->list));
1747 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1748 if (!ret) {
1749 trans = btrfs_join_transaction(root, 1);
1750 ret = btrfs_update_inode(trans, root, inode);
1751 BUG_ON(ret);
1752 btrfs_end_transaction(trans, root);
1754 goto out;
1757 lock_extent(io_tree, ordered_extent->file_offset,
1758 ordered_extent->file_offset + ordered_extent->len - 1,
1759 GFP_NOFS);
1761 trans = btrfs_join_transaction(root, 1);
1763 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1764 compressed = 1;
1765 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1766 BUG_ON(compressed);
1767 ret = btrfs_mark_extent_written(trans, inode,
1768 ordered_extent->file_offset,
1769 ordered_extent->file_offset +
1770 ordered_extent->len);
1771 BUG_ON(ret);
1772 } else {
1773 ret = insert_reserved_file_extent(trans, inode,
1774 ordered_extent->file_offset,
1775 ordered_extent->start,
1776 ordered_extent->disk_len,
1777 ordered_extent->len,
1778 ordered_extent->len,
1779 compressed, 0, 0,
1780 BTRFS_FILE_EXTENT_REG);
1781 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1782 ordered_extent->file_offset,
1783 ordered_extent->len);
1784 BUG_ON(ret);
1786 unlock_extent(io_tree, ordered_extent->file_offset,
1787 ordered_extent->file_offset + ordered_extent->len - 1,
1788 GFP_NOFS);
1789 add_pending_csums(trans, inode, ordered_extent->file_offset,
1790 &ordered_extent->list);
1792 /* this also removes the ordered extent from the tree */
1793 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1794 ret = btrfs_update_inode(trans, root, inode);
1795 BUG_ON(ret);
1796 btrfs_end_transaction(trans, root);
1797 out:
1798 /* once for us */
1799 btrfs_put_ordered_extent(ordered_extent);
1800 /* once for the tree */
1801 btrfs_put_ordered_extent(ordered_extent);
1803 return 0;
1806 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1807 struct extent_state *state, int uptodate)
1809 ClearPagePrivate2(page);
1810 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1814 * When IO fails, either with EIO or csum verification fails, we
1815 * try other mirrors that might have a good copy of the data. This
1816 * io_failure_record is used to record state as we go through all the
1817 * mirrors. If another mirror has good data, the page is set up to date
1818 * and things continue. If a good mirror can't be found, the original
1819 * bio end_io callback is called to indicate things have failed.
1821 struct io_failure_record {
1822 struct page *page;
1823 u64 start;
1824 u64 len;
1825 u64 logical;
1826 unsigned long bio_flags;
1827 int last_mirror;
1830 static int btrfs_io_failed_hook(struct bio *failed_bio,
1831 struct page *page, u64 start, u64 end,
1832 struct extent_state *state)
1834 struct io_failure_record *failrec = NULL;
1835 u64 private;
1836 struct extent_map *em;
1837 struct inode *inode = page->mapping->host;
1838 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1839 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1840 struct bio *bio;
1841 int num_copies;
1842 int ret;
1843 int rw;
1844 u64 logical;
1846 ret = get_state_private(failure_tree, start, &private);
1847 if (ret) {
1848 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1849 if (!failrec)
1850 return -ENOMEM;
1851 failrec->start = start;
1852 failrec->len = end - start + 1;
1853 failrec->last_mirror = 0;
1854 failrec->bio_flags = 0;
1856 read_lock(&em_tree->lock);
1857 em = lookup_extent_mapping(em_tree, start, failrec->len);
1858 if (em->start > start || em->start + em->len < start) {
1859 free_extent_map(em);
1860 em = NULL;
1862 read_unlock(&em_tree->lock);
1864 if (!em || IS_ERR(em)) {
1865 kfree(failrec);
1866 return -EIO;
1868 logical = start - em->start;
1869 logical = em->block_start + logical;
1870 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1871 logical = em->block_start;
1872 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1874 failrec->logical = logical;
1875 free_extent_map(em);
1876 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1877 EXTENT_DIRTY, GFP_NOFS);
1878 set_state_private(failure_tree, start,
1879 (u64)(unsigned long)failrec);
1880 } else {
1881 failrec = (struct io_failure_record *)(unsigned long)private;
1883 num_copies = btrfs_num_copies(
1884 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1885 failrec->logical, failrec->len);
1886 failrec->last_mirror++;
1887 if (!state) {
1888 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1889 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1890 failrec->start,
1891 EXTENT_LOCKED);
1892 if (state && state->start != failrec->start)
1893 state = NULL;
1894 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1896 if (!state || failrec->last_mirror > num_copies) {
1897 set_state_private(failure_tree, failrec->start, 0);
1898 clear_extent_bits(failure_tree, failrec->start,
1899 failrec->start + failrec->len - 1,
1900 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1901 kfree(failrec);
1902 return -EIO;
1904 bio = bio_alloc(GFP_NOFS, 1);
1905 bio->bi_private = state;
1906 bio->bi_end_io = failed_bio->bi_end_io;
1907 bio->bi_sector = failrec->logical >> 9;
1908 bio->bi_bdev = failed_bio->bi_bdev;
1909 bio->bi_size = 0;
1911 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1912 if (failed_bio->bi_rw & (1 << BIO_RW))
1913 rw = WRITE;
1914 else
1915 rw = READ;
1917 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1918 failrec->last_mirror,
1919 failrec->bio_flags);
1920 return 0;
1924 * each time an IO finishes, we do a fast check in the IO failure tree
1925 * to see if we need to process or clean up an io_failure_record
1927 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1929 u64 private;
1930 u64 private_failure;
1931 struct io_failure_record *failure;
1932 int ret;
1934 private = 0;
1935 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1936 (u64)-1, 1, EXTENT_DIRTY)) {
1937 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1938 start, &private_failure);
1939 if (ret == 0) {
1940 failure = (struct io_failure_record *)(unsigned long)
1941 private_failure;
1942 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1943 failure->start, 0);
1944 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1945 failure->start,
1946 failure->start + failure->len - 1,
1947 EXTENT_DIRTY | EXTENT_LOCKED,
1948 GFP_NOFS);
1949 kfree(failure);
1952 return 0;
1956 * when reads are done, we need to check csums to verify the data is correct
1957 * if there's a match, we allow the bio to finish. If not, we go through
1958 * the io_failure_record routines to find good copies
1960 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1961 struct extent_state *state)
1963 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1964 struct inode *inode = page->mapping->host;
1965 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1966 char *kaddr;
1967 u64 private = ~(u32)0;
1968 int ret;
1969 struct btrfs_root *root = BTRFS_I(inode)->root;
1970 u32 csum = ~(u32)0;
1972 if (PageChecked(page)) {
1973 ClearPageChecked(page);
1974 goto good;
1977 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1978 return 0;
1980 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1981 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1982 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1983 GFP_NOFS);
1984 return 0;
1987 if (state && state->start == start) {
1988 private = state->private;
1989 ret = 0;
1990 } else {
1991 ret = get_state_private(io_tree, start, &private);
1993 kaddr = kmap_atomic(page, KM_USER0);
1994 if (ret)
1995 goto zeroit;
1997 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1998 btrfs_csum_final(csum, (char *)&csum);
1999 if (csum != private)
2000 goto zeroit;
2002 kunmap_atomic(kaddr, KM_USER0);
2003 good:
2004 /* if the io failure tree for this inode is non-empty,
2005 * check to see if we've recovered from a failed IO
2007 btrfs_clean_io_failures(inode, start);
2008 return 0;
2010 zeroit:
2011 if (printk_ratelimit()) {
2012 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
2013 "private %llu\n", page->mapping->host->i_ino,
2014 (unsigned long long)start, csum,
2015 (unsigned long long)private);
2017 memset(kaddr + offset, 1, end - start + 1);
2018 flush_dcache_page(page);
2019 kunmap_atomic(kaddr, KM_USER0);
2020 if (private == 0)
2021 return 0;
2022 return -EIO;
2025 struct delayed_iput {
2026 struct list_head list;
2027 struct inode *inode;
2030 void btrfs_add_delayed_iput(struct inode *inode)
2032 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2033 struct delayed_iput *delayed;
2035 if (atomic_add_unless(&inode->i_count, -1, 1))
2036 return;
2038 delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
2039 delayed->inode = inode;
2041 spin_lock(&fs_info->delayed_iput_lock);
2042 list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2043 spin_unlock(&fs_info->delayed_iput_lock);
2046 void btrfs_run_delayed_iputs(struct btrfs_root *root)
2048 LIST_HEAD(list);
2049 struct btrfs_fs_info *fs_info = root->fs_info;
2050 struct delayed_iput *delayed;
2051 int empty;
2053 spin_lock(&fs_info->delayed_iput_lock);
2054 empty = list_empty(&fs_info->delayed_iputs);
2055 spin_unlock(&fs_info->delayed_iput_lock);
2056 if (empty)
2057 return;
2059 down_read(&root->fs_info->cleanup_work_sem);
2060 spin_lock(&fs_info->delayed_iput_lock);
2061 list_splice_init(&fs_info->delayed_iputs, &list);
2062 spin_unlock(&fs_info->delayed_iput_lock);
2064 while (!list_empty(&list)) {
2065 delayed = list_entry(list.next, struct delayed_iput, list);
2066 list_del(&delayed->list);
2067 iput(delayed->inode);
2068 kfree(delayed);
2070 up_read(&root->fs_info->cleanup_work_sem);
2074 * This creates an orphan entry for the given inode in case something goes
2075 * wrong in the middle of an unlink/truncate.
2077 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2079 struct btrfs_root *root = BTRFS_I(inode)->root;
2080 int ret = 0;
2082 spin_lock(&root->list_lock);
2084 /* already on the orphan list, we're good */
2085 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
2086 spin_unlock(&root->list_lock);
2087 return 0;
2090 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2092 spin_unlock(&root->list_lock);
2095 * insert an orphan item to track this unlinked/truncated file
2097 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
2099 return ret;
2103 * We have done the truncate/delete so we can go ahead and remove the orphan
2104 * item for this particular inode.
2106 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2108 struct btrfs_root *root = BTRFS_I(inode)->root;
2109 int ret = 0;
2111 spin_lock(&root->list_lock);
2113 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
2114 spin_unlock(&root->list_lock);
2115 return 0;
2118 list_del_init(&BTRFS_I(inode)->i_orphan);
2119 if (!trans) {
2120 spin_unlock(&root->list_lock);
2121 return 0;
2124 spin_unlock(&root->list_lock);
2126 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
2128 return ret;
2132 * this cleans up any orphans that may be left on the list from the last use
2133 * of this root.
2135 void btrfs_orphan_cleanup(struct btrfs_root *root)
2137 struct btrfs_path *path;
2138 struct extent_buffer *leaf;
2139 struct btrfs_item *item;
2140 struct btrfs_key key, found_key;
2141 struct btrfs_trans_handle *trans;
2142 struct inode *inode;
2143 int ret = 0, nr_unlink = 0, nr_truncate = 0;
2145 if (!xchg(&root->clean_orphans, 0))
2146 return;
2148 path = btrfs_alloc_path();
2149 BUG_ON(!path);
2150 path->reada = -1;
2152 key.objectid = BTRFS_ORPHAN_OBJECTID;
2153 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2154 key.offset = (u64)-1;
2156 while (1) {
2157 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2158 if (ret < 0) {
2159 printk(KERN_ERR "Error searching slot for orphan: %d"
2160 "\n", ret);
2161 break;
2165 * if ret == 0 means we found what we were searching for, which
2166 * is weird, but possible, so only screw with path if we didnt
2167 * find the key and see if we have stuff that matches
2169 if (ret > 0) {
2170 if (path->slots[0] == 0)
2171 break;
2172 path->slots[0]--;
2175 /* pull out the item */
2176 leaf = path->nodes[0];
2177 item = btrfs_item_nr(leaf, path->slots[0]);
2178 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2180 /* make sure the item matches what we want */
2181 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2182 break;
2183 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2184 break;
2186 /* release the path since we're done with it */
2187 btrfs_release_path(root, path);
2190 * this is where we are basically btrfs_lookup, without the
2191 * crossing root thing. we store the inode number in the
2192 * offset of the orphan item.
2194 found_key.objectid = found_key.offset;
2195 found_key.type = BTRFS_INODE_ITEM_KEY;
2196 found_key.offset = 0;
2197 inode = btrfs_iget(root->fs_info->sb, &found_key, root);
2198 if (IS_ERR(inode))
2199 break;
2202 * add this inode to the orphan list so btrfs_orphan_del does
2203 * the proper thing when we hit it
2205 spin_lock(&root->list_lock);
2206 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2207 spin_unlock(&root->list_lock);
2210 * if this is a bad inode, means we actually succeeded in
2211 * removing the inode, but not the orphan record, which means
2212 * we need to manually delete the orphan since iput will just
2213 * do a destroy_inode
2215 if (is_bad_inode(inode)) {
2216 trans = btrfs_start_transaction(root, 1);
2217 btrfs_orphan_del(trans, inode);
2218 btrfs_end_transaction(trans, root);
2219 iput(inode);
2220 continue;
2223 /* if we have links, this was a truncate, lets do that */
2224 if (inode->i_nlink) {
2225 nr_truncate++;
2226 btrfs_truncate(inode);
2227 } else {
2228 nr_unlink++;
2231 /* this will do delete_inode and everything for us */
2232 iput(inode);
2235 if (nr_unlink)
2236 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2237 if (nr_truncate)
2238 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2240 btrfs_free_path(path);
2244 * very simple check to peek ahead in the leaf looking for xattrs. If we
2245 * don't find any xattrs, we know there can't be any acls.
2247 * slot is the slot the inode is in, objectid is the objectid of the inode
2249 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2250 int slot, u64 objectid)
2252 u32 nritems = btrfs_header_nritems(leaf);
2253 struct btrfs_key found_key;
2254 int scanned = 0;
2256 slot++;
2257 while (slot < nritems) {
2258 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2260 /* we found a different objectid, there must not be acls */
2261 if (found_key.objectid != objectid)
2262 return 0;
2264 /* we found an xattr, assume we've got an acl */
2265 if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2266 return 1;
2269 * we found a key greater than an xattr key, there can't
2270 * be any acls later on
2272 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2273 return 0;
2275 slot++;
2276 scanned++;
2279 * it goes inode, inode backrefs, xattrs, extents,
2280 * so if there are a ton of hard links to an inode there can
2281 * be a lot of backrefs. Don't waste time searching too hard,
2282 * this is just an optimization
2284 if (scanned >= 8)
2285 break;
2287 /* we hit the end of the leaf before we found an xattr or
2288 * something larger than an xattr. We have to assume the inode
2289 * has acls
2291 return 1;
2295 * read an inode from the btree into the in-memory inode
2297 static void btrfs_read_locked_inode(struct inode *inode)
2299 struct btrfs_path *path;
2300 struct extent_buffer *leaf;
2301 struct btrfs_inode_item *inode_item;
2302 struct btrfs_timespec *tspec;
2303 struct btrfs_root *root = BTRFS_I(inode)->root;
2304 struct btrfs_key location;
2305 int maybe_acls;
2306 u64 alloc_group_block;
2307 u32 rdev;
2308 int ret;
2310 path = btrfs_alloc_path();
2311 BUG_ON(!path);
2312 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2314 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2315 if (ret)
2316 goto make_bad;
2318 leaf = path->nodes[0];
2319 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2320 struct btrfs_inode_item);
2322 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2323 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2324 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2325 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2326 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2328 tspec = btrfs_inode_atime(inode_item);
2329 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2330 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2332 tspec = btrfs_inode_mtime(inode_item);
2333 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2334 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2336 tspec = btrfs_inode_ctime(inode_item);
2337 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2338 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2340 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2341 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2342 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2343 inode->i_generation = BTRFS_I(inode)->generation;
2344 inode->i_rdev = 0;
2345 rdev = btrfs_inode_rdev(leaf, inode_item);
2347 BTRFS_I(inode)->index_cnt = (u64)-1;
2348 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2350 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2353 * try to precache a NULL acl entry for files that don't have
2354 * any xattrs or acls
2356 maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2357 if (!maybe_acls)
2358 cache_no_acl(inode);
2360 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2361 alloc_group_block, 0);
2362 btrfs_free_path(path);
2363 inode_item = NULL;
2365 switch (inode->i_mode & S_IFMT) {
2366 case S_IFREG:
2367 inode->i_mapping->a_ops = &btrfs_aops;
2368 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2369 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2370 inode->i_fop = &btrfs_file_operations;
2371 inode->i_op = &btrfs_file_inode_operations;
2372 break;
2373 case S_IFDIR:
2374 inode->i_fop = &btrfs_dir_file_operations;
2375 if (root == root->fs_info->tree_root)
2376 inode->i_op = &btrfs_dir_ro_inode_operations;
2377 else
2378 inode->i_op = &btrfs_dir_inode_operations;
2379 break;
2380 case S_IFLNK:
2381 inode->i_op = &btrfs_symlink_inode_operations;
2382 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2383 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2384 break;
2385 default:
2386 inode->i_op = &btrfs_special_inode_operations;
2387 init_special_inode(inode, inode->i_mode, rdev);
2388 break;
2391 btrfs_update_iflags(inode);
2392 return;
2394 make_bad:
2395 btrfs_free_path(path);
2396 make_bad_inode(inode);
2400 * given a leaf and an inode, copy the inode fields into the leaf
2402 static void fill_inode_item(struct btrfs_trans_handle *trans,
2403 struct extent_buffer *leaf,
2404 struct btrfs_inode_item *item,
2405 struct inode *inode)
2407 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2408 btrfs_set_inode_gid(leaf, item, inode->i_gid);
2409 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2410 btrfs_set_inode_mode(leaf, item, inode->i_mode);
2411 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2413 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2414 inode->i_atime.tv_sec);
2415 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2416 inode->i_atime.tv_nsec);
2418 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2419 inode->i_mtime.tv_sec);
2420 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2421 inode->i_mtime.tv_nsec);
2423 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2424 inode->i_ctime.tv_sec);
2425 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2426 inode->i_ctime.tv_nsec);
2428 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2429 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2430 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2431 btrfs_set_inode_transid(leaf, item, trans->transid);
2432 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2433 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2434 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2438 * copy everything in the in-memory inode into the btree.
2440 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2441 struct btrfs_root *root, struct inode *inode)
2443 struct btrfs_inode_item *inode_item;
2444 struct btrfs_path *path;
2445 struct extent_buffer *leaf;
2446 int ret;
2448 path = btrfs_alloc_path();
2449 BUG_ON(!path);
2450 path->leave_spinning = 1;
2451 ret = btrfs_lookup_inode(trans, root, path,
2452 &BTRFS_I(inode)->location, 1);
2453 if (ret) {
2454 if (ret > 0)
2455 ret = -ENOENT;
2456 goto failed;
2459 btrfs_unlock_up_safe(path, 1);
2460 leaf = path->nodes[0];
2461 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2462 struct btrfs_inode_item);
2464 fill_inode_item(trans, leaf, inode_item, inode);
2465 btrfs_mark_buffer_dirty(leaf);
2466 btrfs_set_inode_last_trans(trans, inode);
2467 ret = 0;
2468 failed:
2469 btrfs_free_path(path);
2470 return ret;
2475 * unlink helper that gets used here in inode.c and in the tree logging
2476 * recovery code. It remove a link in a directory with a given name, and
2477 * also drops the back refs in the inode to the directory
2479 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2480 struct btrfs_root *root,
2481 struct inode *dir, struct inode *inode,
2482 const char *name, int name_len)
2484 struct btrfs_path *path;
2485 int ret = 0;
2486 struct extent_buffer *leaf;
2487 struct btrfs_dir_item *di;
2488 struct btrfs_key key;
2489 u64 index;
2491 path = btrfs_alloc_path();
2492 if (!path) {
2493 ret = -ENOMEM;
2494 goto err;
2497 path->leave_spinning = 1;
2498 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2499 name, name_len, -1);
2500 if (IS_ERR(di)) {
2501 ret = PTR_ERR(di);
2502 goto err;
2504 if (!di) {
2505 ret = -ENOENT;
2506 goto err;
2508 leaf = path->nodes[0];
2509 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2510 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2511 if (ret)
2512 goto err;
2513 btrfs_release_path(root, path);
2515 ret = btrfs_del_inode_ref(trans, root, name, name_len,
2516 inode->i_ino,
2517 dir->i_ino, &index);
2518 if (ret) {
2519 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2520 "inode %lu parent %lu\n", name_len, name,
2521 inode->i_ino, dir->i_ino);
2522 goto err;
2525 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2526 index, name, name_len, -1);
2527 if (IS_ERR(di)) {
2528 ret = PTR_ERR(di);
2529 goto err;
2531 if (!di) {
2532 ret = -ENOENT;
2533 goto err;
2535 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2536 btrfs_release_path(root, path);
2538 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2539 inode, dir->i_ino);
2540 BUG_ON(ret != 0 && ret != -ENOENT);
2542 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2543 dir, index);
2544 BUG_ON(ret);
2545 err:
2546 btrfs_free_path(path);
2547 if (ret)
2548 goto out;
2550 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2551 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2552 btrfs_update_inode(trans, root, dir);
2553 btrfs_drop_nlink(inode);
2554 ret = btrfs_update_inode(trans, root, inode);
2555 out:
2556 return ret;
2559 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2561 struct btrfs_root *root;
2562 struct btrfs_trans_handle *trans;
2563 struct inode *inode = dentry->d_inode;
2564 int ret;
2565 unsigned long nr = 0;
2567 root = BTRFS_I(dir)->root;
2570 * 5 items for unlink inode
2571 * 1 for orphan
2573 ret = btrfs_reserve_metadata_space(root, 6);
2574 if (ret)
2575 return ret;
2577 trans = btrfs_start_transaction(root, 1);
2578 if (IS_ERR(trans)) {
2579 btrfs_unreserve_metadata_space(root, 6);
2580 return PTR_ERR(trans);
2583 btrfs_set_trans_block_group(trans, dir);
2585 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2587 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2588 dentry->d_name.name, dentry->d_name.len);
2590 if (inode->i_nlink == 0)
2591 ret = btrfs_orphan_add(trans, inode);
2593 nr = trans->blocks_used;
2595 btrfs_end_transaction_throttle(trans, root);
2596 btrfs_unreserve_metadata_space(root, 6);
2597 btrfs_btree_balance_dirty(root, nr);
2598 return ret;
2601 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
2602 struct btrfs_root *root,
2603 struct inode *dir, u64 objectid,
2604 const char *name, int name_len)
2606 struct btrfs_path *path;
2607 struct extent_buffer *leaf;
2608 struct btrfs_dir_item *di;
2609 struct btrfs_key key;
2610 u64 index;
2611 int ret;
2613 path = btrfs_alloc_path();
2614 if (!path)
2615 return -ENOMEM;
2617 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2618 name, name_len, -1);
2619 BUG_ON(!di || IS_ERR(di));
2621 leaf = path->nodes[0];
2622 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2623 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2624 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2625 BUG_ON(ret);
2626 btrfs_release_path(root, path);
2628 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
2629 objectid, root->root_key.objectid,
2630 dir->i_ino, &index, name, name_len);
2631 if (ret < 0) {
2632 BUG_ON(ret != -ENOENT);
2633 di = btrfs_search_dir_index_item(root, path, dir->i_ino,
2634 name, name_len);
2635 BUG_ON(!di || IS_ERR(di));
2637 leaf = path->nodes[0];
2638 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2639 btrfs_release_path(root, path);
2640 index = key.offset;
2643 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2644 index, name, name_len, -1);
2645 BUG_ON(!di || IS_ERR(di));
2647 leaf = path->nodes[0];
2648 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2649 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2650 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2651 BUG_ON(ret);
2652 btrfs_release_path(root, path);
2654 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2655 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2656 ret = btrfs_update_inode(trans, root, dir);
2657 BUG_ON(ret);
2658 dir->i_sb->s_dirt = 1;
2660 btrfs_free_path(path);
2661 return 0;
2664 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2666 struct inode *inode = dentry->d_inode;
2667 int err = 0;
2668 int ret;
2669 struct btrfs_root *root = BTRFS_I(dir)->root;
2670 struct btrfs_trans_handle *trans;
2671 unsigned long nr = 0;
2673 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2674 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
2675 return -ENOTEMPTY;
2677 ret = btrfs_reserve_metadata_space(root, 5);
2678 if (ret)
2679 return ret;
2681 trans = btrfs_start_transaction(root, 1);
2682 if (IS_ERR(trans)) {
2683 btrfs_unreserve_metadata_space(root, 5);
2684 return PTR_ERR(trans);
2687 btrfs_set_trans_block_group(trans, dir);
2689 if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
2690 err = btrfs_unlink_subvol(trans, root, dir,
2691 BTRFS_I(inode)->location.objectid,
2692 dentry->d_name.name,
2693 dentry->d_name.len);
2694 goto out;
2697 err = btrfs_orphan_add(trans, inode);
2698 if (err)
2699 goto out;
2701 /* now the directory is empty */
2702 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2703 dentry->d_name.name, dentry->d_name.len);
2704 if (!err)
2705 btrfs_i_size_write(inode, 0);
2706 out:
2707 nr = trans->blocks_used;
2708 ret = btrfs_end_transaction_throttle(trans, root);
2709 btrfs_unreserve_metadata_space(root, 5);
2710 btrfs_btree_balance_dirty(root, nr);
2712 if (ret && !err)
2713 err = ret;
2714 return err;
2717 #if 0
2719 * when truncating bytes in a file, it is possible to avoid reading
2720 * the leaves that contain only checksum items. This can be the
2721 * majority of the IO required to delete a large file, but it must
2722 * be done carefully.
2724 * The keys in the level just above the leaves are checked to make sure
2725 * the lowest key in a given leaf is a csum key, and starts at an offset
2726 * after the new size.
2728 * Then the key for the next leaf is checked to make sure it also has
2729 * a checksum item for the same file. If it does, we know our target leaf
2730 * contains only checksum items, and it can be safely freed without reading
2731 * it.
2733 * This is just an optimization targeted at large files. It may do
2734 * nothing. It will return 0 unless things went badly.
2736 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2737 struct btrfs_root *root,
2738 struct btrfs_path *path,
2739 struct inode *inode, u64 new_size)
2741 struct btrfs_key key;
2742 int ret;
2743 int nritems;
2744 struct btrfs_key found_key;
2745 struct btrfs_key other_key;
2746 struct btrfs_leaf_ref *ref;
2747 u64 leaf_gen;
2748 u64 leaf_start;
2750 path->lowest_level = 1;
2751 key.objectid = inode->i_ino;
2752 key.type = BTRFS_CSUM_ITEM_KEY;
2753 key.offset = new_size;
2754 again:
2755 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2756 if (ret < 0)
2757 goto out;
2759 if (path->nodes[1] == NULL) {
2760 ret = 0;
2761 goto out;
2763 ret = 0;
2764 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2765 nritems = btrfs_header_nritems(path->nodes[1]);
2767 if (!nritems)
2768 goto out;
2770 if (path->slots[1] >= nritems)
2771 goto next_node;
2773 /* did we find a key greater than anything we want to delete? */
2774 if (found_key.objectid > inode->i_ino ||
2775 (found_key.objectid == inode->i_ino && found_key.type > key.type))
2776 goto out;
2778 /* we check the next key in the node to make sure the leave contains
2779 * only checksum items. This comparison doesn't work if our
2780 * leaf is the last one in the node
2782 if (path->slots[1] + 1 >= nritems) {
2783 next_node:
2784 /* search forward from the last key in the node, this
2785 * will bring us into the next node in the tree
2787 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2789 /* unlikely, but we inc below, so check to be safe */
2790 if (found_key.offset == (u64)-1)
2791 goto out;
2793 /* search_forward needs a path with locks held, do the
2794 * search again for the original key. It is possible
2795 * this will race with a balance and return a path that
2796 * we could modify, but this drop is just an optimization
2797 * and is allowed to miss some leaves.
2799 btrfs_release_path(root, path);
2800 found_key.offset++;
2802 /* setup a max key for search_forward */
2803 other_key.offset = (u64)-1;
2804 other_key.type = key.type;
2805 other_key.objectid = key.objectid;
2807 path->keep_locks = 1;
2808 ret = btrfs_search_forward(root, &found_key, &other_key,
2809 path, 0, 0);
2810 path->keep_locks = 0;
2811 if (ret || found_key.objectid != key.objectid ||
2812 found_key.type != key.type) {
2813 ret = 0;
2814 goto out;
2817 key.offset = found_key.offset;
2818 btrfs_release_path(root, path);
2819 cond_resched();
2820 goto again;
2823 /* we know there's one more slot after us in the tree,
2824 * read that key so we can verify it is also a checksum item
2826 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2828 if (found_key.objectid < inode->i_ino)
2829 goto next_key;
2831 if (found_key.type != key.type || found_key.offset < new_size)
2832 goto next_key;
2835 * if the key for the next leaf isn't a csum key from this objectid,
2836 * we can't be sure there aren't good items inside this leaf.
2837 * Bail out
2839 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2840 goto out;
2842 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2843 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2845 * it is safe to delete this leaf, it contains only
2846 * csum items from this inode at an offset >= new_size
2848 ret = btrfs_del_leaf(trans, root, path, leaf_start);
2849 BUG_ON(ret);
2851 if (root->ref_cows && leaf_gen < trans->transid) {
2852 ref = btrfs_alloc_leaf_ref(root, 0);
2853 if (ref) {
2854 ref->root_gen = root->root_key.offset;
2855 ref->bytenr = leaf_start;
2856 ref->owner = 0;
2857 ref->generation = leaf_gen;
2858 ref->nritems = 0;
2860 btrfs_sort_leaf_ref(ref);
2862 ret = btrfs_add_leaf_ref(root, ref, 0);
2863 WARN_ON(ret);
2864 btrfs_free_leaf_ref(root, ref);
2865 } else {
2866 WARN_ON(1);
2869 next_key:
2870 btrfs_release_path(root, path);
2872 if (other_key.objectid == inode->i_ino &&
2873 other_key.type == key.type && other_key.offset > key.offset) {
2874 key.offset = other_key.offset;
2875 cond_resched();
2876 goto again;
2878 ret = 0;
2879 out:
2880 /* fixup any changes we've made to the path */
2881 path->lowest_level = 0;
2882 path->keep_locks = 0;
2883 btrfs_release_path(root, path);
2884 return ret;
2887 #endif
2890 * this can truncate away extent items, csum items and directory items.
2891 * It starts at a high offset and removes keys until it can't find
2892 * any higher than new_size
2894 * csum items that cross the new i_size are truncated to the new size
2895 * as well.
2897 * min_type is the minimum key type to truncate down to. If set to 0, this
2898 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2900 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2901 struct btrfs_root *root,
2902 struct inode *inode,
2903 u64 new_size, u32 min_type)
2905 struct btrfs_path *path;
2906 struct extent_buffer *leaf;
2907 struct btrfs_file_extent_item *fi;
2908 struct btrfs_key key;
2909 struct btrfs_key found_key;
2910 u64 extent_start = 0;
2911 u64 extent_num_bytes = 0;
2912 u64 extent_offset = 0;
2913 u64 item_end = 0;
2914 u64 mask = root->sectorsize - 1;
2915 u32 found_type = (u8)-1;
2916 int found_extent;
2917 int del_item;
2918 int pending_del_nr = 0;
2919 int pending_del_slot = 0;
2920 int extent_type = -1;
2921 int encoding;
2922 int ret;
2923 int err = 0;
2925 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
2927 if (root->ref_cows)
2928 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2930 path = btrfs_alloc_path();
2931 BUG_ON(!path);
2932 path->reada = -1;
2934 key.objectid = inode->i_ino;
2935 key.offset = (u64)-1;
2936 key.type = (u8)-1;
2938 search_again:
2939 path->leave_spinning = 1;
2940 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2941 if (ret < 0) {
2942 err = ret;
2943 goto out;
2946 if (ret > 0) {
2947 /* there are no items in the tree for us to truncate, we're
2948 * done
2950 if (path->slots[0] == 0)
2951 goto out;
2952 path->slots[0]--;
2955 while (1) {
2956 fi = NULL;
2957 leaf = path->nodes[0];
2958 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2959 found_type = btrfs_key_type(&found_key);
2960 encoding = 0;
2962 if (found_key.objectid != inode->i_ino)
2963 break;
2965 if (found_type < min_type)
2966 break;
2968 item_end = found_key.offset;
2969 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2970 fi = btrfs_item_ptr(leaf, path->slots[0],
2971 struct btrfs_file_extent_item);
2972 extent_type = btrfs_file_extent_type(leaf, fi);
2973 encoding = btrfs_file_extent_compression(leaf, fi);
2974 encoding |= btrfs_file_extent_encryption(leaf, fi);
2975 encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2977 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2978 item_end +=
2979 btrfs_file_extent_num_bytes(leaf, fi);
2980 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2981 item_end += btrfs_file_extent_inline_len(leaf,
2982 fi);
2984 item_end--;
2986 if (found_type > min_type) {
2987 del_item = 1;
2988 } else {
2989 if (item_end < new_size)
2990 break;
2991 if (found_key.offset >= new_size)
2992 del_item = 1;
2993 else
2994 del_item = 0;
2996 found_extent = 0;
2997 /* FIXME, shrink the extent if the ref count is only 1 */
2998 if (found_type != BTRFS_EXTENT_DATA_KEY)
2999 goto delete;
3001 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3002 u64 num_dec;
3003 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
3004 if (!del_item && !encoding) {
3005 u64 orig_num_bytes =
3006 btrfs_file_extent_num_bytes(leaf, fi);
3007 extent_num_bytes = new_size -
3008 found_key.offset + root->sectorsize - 1;
3009 extent_num_bytes = extent_num_bytes &
3010 ~((u64)root->sectorsize - 1);
3011 btrfs_set_file_extent_num_bytes(leaf, fi,
3012 extent_num_bytes);
3013 num_dec = (orig_num_bytes -
3014 extent_num_bytes);
3015 if (root->ref_cows && extent_start != 0)
3016 inode_sub_bytes(inode, num_dec);
3017 btrfs_mark_buffer_dirty(leaf);
3018 } else {
3019 extent_num_bytes =
3020 btrfs_file_extent_disk_num_bytes(leaf,
3021 fi);
3022 extent_offset = found_key.offset -
3023 btrfs_file_extent_offset(leaf, fi);
3025 /* FIXME blocksize != 4096 */
3026 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
3027 if (extent_start != 0) {
3028 found_extent = 1;
3029 if (root->ref_cows)
3030 inode_sub_bytes(inode, num_dec);
3033 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3035 * we can't truncate inline items that have had
3036 * special encodings
3038 if (!del_item &&
3039 btrfs_file_extent_compression(leaf, fi) == 0 &&
3040 btrfs_file_extent_encryption(leaf, fi) == 0 &&
3041 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
3042 u32 size = new_size - found_key.offset;
3044 if (root->ref_cows) {
3045 inode_sub_bytes(inode, item_end + 1 -
3046 new_size);
3048 size =
3049 btrfs_file_extent_calc_inline_size(size);
3050 ret = btrfs_truncate_item(trans, root, path,
3051 size, 1);
3052 BUG_ON(ret);
3053 } else if (root->ref_cows) {
3054 inode_sub_bytes(inode, item_end + 1 -
3055 found_key.offset);
3058 delete:
3059 if (del_item) {
3060 if (!pending_del_nr) {
3061 /* no pending yet, add ourselves */
3062 pending_del_slot = path->slots[0];
3063 pending_del_nr = 1;
3064 } else if (pending_del_nr &&
3065 path->slots[0] + 1 == pending_del_slot) {
3066 /* hop on the pending chunk */
3067 pending_del_nr++;
3068 pending_del_slot = path->slots[0];
3069 } else {
3070 BUG();
3072 } else {
3073 break;
3075 if (found_extent && root->ref_cows) {
3076 btrfs_set_path_blocking(path);
3077 ret = btrfs_free_extent(trans, root, extent_start,
3078 extent_num_bytes, 0,
3079 btrfs_header_owner(leaf),
3080 inode->i_ino, extent_offset);
3081 BUG_ON(ret);
3084 if (found_type == BTRFS_INODE_ITEM_KEY)
3085 break;
3087 if (path->slots[0] == 0 ||
3088 path->slots[0] != pending_del_slot) {
3089 if (root->ref_cows) {
3090 err = -EAGAIN;
3091 goto out;
3093 if (pending_del_nr) {
3094 ret = btrfs_del_items(trans, root, path,
3095 pending_del_slot,
3096 pending_del_nr);
3097 BUG_ON(ret);
3098 pending_del_nr = 0;
3100 btrfs_release_path(root, path);
3101 goto search_again;
3102 } else {
3103 path->slots[0]--;
3106 out:
3107 if (pending_del_nr) {
3108 ret = btrfs_del_items(trans, root, path, pending_del_slot,
3109 pending_del_nr);
3111 btrfs_free_path(path);
3112 return err;
3116 * taken from block_truncate_page, but does cow as it zeros out
3117 * any bytes left in the last page in the file.
3119 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3121 struct inode *inode = mapping->host;
3122 struct btrfs_root *root = BTRFS_I(inode)->root;
3123 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3124 struct btrfs_ordered_extent *ordered;
3125 char *kaddr;
3126 u32 blocksize = root->sectorsize;
3127 pgoff_t index = from >> PAGE_CACHE_SHIFT;
3128 unsigned offset = from & (PAGE_CACHE_SIZE-1);
3129 struct page *page;
3130 int ret = 0;
3131 u64 page_start;
3132 u64 page_end;
3134 if ((offset & (blocksize - 1)) == 0)
3135 goto out;
3136 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
3137 if (ret)
3138 goto out;
3140 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
3141 if (ret)
3142 goto out;
3144 ret = -ENOMEM;
3145 again:
3146 page = grab_cache_page(mapping, index);
3147 if (!page) {
3148 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3149 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3150 goto out;
3153 page_start = page_offset(page);
3154 page_end = page_start + PAGE_CACHE_SIZE - 1;
3156 if (!PageUptodate(page)) {
3157 ret = btrfs_readpage(NULL, page);
3158 lock_page(page);
3159 if (page->mapping != mapping) {
3160 unlock_page(page);
3161 page_cache_release(page);
3162 goto again;
3164 if (!PageUptodate(page)) {
3165 ret = -EIO;
3166 goto out_unlock;
3169 wait_on_page_writeback(page);
3171 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
3172 set_page_extent_mapped(page);
3174 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3175 if (ordered) {
3176 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3177 unlock_page(page);
3178 page_cache_release(page);
3179 btrfs_start_ordered_extent(inode, ordered, 1);
3180 btrfs_put_ordered_extent(ordered);
3181 goto again;
3184 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
3185 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3186 GFP_NOFS);
3188 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
3189 if (ret) {
3190 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3191 goto out_unlock;
3194 ret = 0;
3195 if (offset != PAGE_CACHE_SIZE) {
3196 kaddr = kmap(page);
3197 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
3198 flush_dcache_page(page);
3199 kunmap(page);
3201 ClearPageChecked(page);
3202 set_page_dirty(page);
3203 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3205 out_unlock:
3206 if (ret)
3207 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3208 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3209 unlock_page(page);
3210 page_cache_release(page);
3211 out:
3212 return ret;
3215 int btrfs_cont_expand(struct inode *inode, loff_t size)
3217 struct btrfs_trans_handle *trans;
3218 struct btrfs_root *root = BTRFS_I(inode)->root;
3219 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3220 struct extent_map *em;
3221 u64 mask = root->sectorsize - 1;
3222 u64 hole_start = (inode->i_size + mask) & ~mask;
3223 u64 block_end = (size + mask) & ~mask;
3224 u64 last_byte;
3225 u64 cur_offset;
3226 u64 hole_size;
3227 int err = 0;
3229 if (size <= hole_start)
3230 return 0;
3232 while (1) {
3233 struct btrfs_ordered_extent *ordered;
3234 btrfs_wait_ordered_range(inode, hole_start,
3235 block_end - hole_start);
3236 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3237 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3238 if (!ordered)
3239 break;
3240 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3241 btrfs_put_ordered_extent(ordered);
3244 cur_offset = hole_start;
3245 while (1) {
3246 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3247 block_end - cur_offset, 0);
3248 BUG_ON(IS_ERR(em) || !em);
3249 last_byte = min(extent_map_end(em), block_end);
3250 last_byte = (last_byte + mask) & ~mask;
3251 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3252 u64 hint_byte = 0;
3253 hole_size = last_byte - cur_offset;
3255 err = btrfs_reserve_metadata_space(root, 2);
3256 if (err)
3257 break;
3259 trans = btrfs_start_transaction(root, 1);
3260 btrfs_set_trans_block_group(trans, inode);
3262 err = btrfs_drop_extents(trans, inode, cur_offset,
3263 cur_offset + hole_size,
3264 &hint_byte, 1);
3265 BUG_ON(err);
3267 err = btrfs_insert_file_extent(trans, root,
3268 inode->i_ino, cur_offset, 0,
3269 0, hole_size, 0, hole_size,
3270 0, 0, 0);
3271 BUG_ON(err);
3273 btrfs_drop_extent_cache(inode, hole_start,
3274 last_byte - 1, 0);
3276 btrfs_end_transaction(trans, root);
3277 btrfs_unreserve_metadata_space(root, 2);
3279 free_extent_map(em);
3280 cur_offset = last_byte;
3281 if (cur_offset >= block_end)
3282 break;
3285 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3286 return err;
3289 static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
3291 struct btrfs_root *root = BTRFS_I(inode)->root;
3292 struct btrfs_trans_handle *trans;
3293 unsigned long nr;
3294 int ret;
3296 if (attr->ia_size == inode->i_size)
3297 return 0;
3299 if (attr->ia_size > inode->i_size) {
3300 unsigned long limit;
3301 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
3302 if (attr->ia_size > inode->i_sb->s_maxbytes)
3303 return -EFBIG;
3304 if (limit != RLIM_INFINITY && attr->ia_size > limit) {
3305 send_sig(SIGXFSZ, current, 0);
3306 return -EFBIG;
3310 ret = btrfs_reserve_metadata_space(root, 1);
3311 if (ret)
3312 return ret;
3314 trans = btrfs_start_transaction(root, 1);
3315 btrfs_set_trans_block_group(trans, inode);
3317 ret = btrfs_orphan_add(trans, inode);
3318 BUG_ON(ret);
3320 nr = trans->blocks_used;
3321 btrfs_end_transaction(trans, root);
3322 btrfs_unreserve_metadata_space(root, 1);
3323 btrfs_btree_balance_dirty(root, nr);
3325 if (attr->ia_size > inode->i_size) {
3326 ret = btrfs_cont_expand(inode, attr->ia_size);
3327 if (ret) {
3328 btrfs_truncate(inode);
3329 return ret;
3332 i_size_write(inode, attr->ia_size);
3333 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
3335 trans = btrfs_start_transaction(root, 1);
3336 btrfs_set_trans_block_group(trans, inode);
3338 ret = btrfs_update_inode(trans, root, inode);
3339 BUG_ON(ret);
3340 if (inode->i_nlink > 0) {
3341 ret = btrfs_orphan_del(trans, inode);
3342 BUG_ON(ret);
3344 nr = trans->blocks_used;
3345 btrfs_end_transaction(trans, root);
3346 btrfs_btree_balance_dirty(root, nr);
3347 return 0;
3351 * We're truncating a file that used to have good data down to
3352 * zero. Make sure it gets into the ordered flush list so that
3353 * any new writes get down to disk quickly.
3355 if (attr->ia_size == 0)
3356 BTRFS_I(inode)->ordered_data_close = 1;
3358 /* we don't support swapfiles, so vmtruncate shouldn't fail */
3359 ret = vmtruncate(inode, attr->ia_size);
3360 BUG_ON(ret);
3362 return 0;
3365 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3367 struct inode *inode = dentry->d_inode;
3368 int err;
3370 err = inode_change_ok(inode, attr);
3371 if (err)
3372 return err;
3374 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3375 err = btrfs_setattr_size(inode, attr);
3376 if (err)
3377 return err;
3379 attr->ia_valid &= ~ATTR_SIZE;
3381 if (attr->ia_valid)
3382 err = inode_setattr(inode, attr);
3384 if (!err && ((attr->ia_valid & ATTR_MODE)))
3385 err = btrfs_acl_chmod(inode);
3386 return err;
3389 void btrfs_delete_inode(struct inode *inode)
3391 struct btrfs_trans_handle *trans;
3392 struct btrfs_root *root = BTRFS_I(inode)->root;
3393 unsigned long nr;
3394 int ret;
3396 truncate_inode_pages(&inode->i_data, 0);
3397 if (is_bad_inode(inode)) {
3398 btrfs_orphan_del(NULL, inode);
3399 goto no_delete;
3401 btrfs_wait_ordered_range(inode, 0, (u64)-1);
3403 if (root->fs_info->log_root_recovering) {
3404 BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
3405 goto no_delete;
3408 if (inode->i_nlink > 0) {
3409 BUG_ON(btrfs_root_refs(&root->root_item) != 0);
3410 goto no_delete;
3413 btrfs_i_size_write(inode, 0);
3415 while (1) {
3416 trans = btrfs_start_transaction(root, 1);
3417 btrfs_set_trans_block_group(trans, inode);
3418 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3420 if (ret != -EAGAIN)
3421 break;
3423 nr = trans->blocks_used;
3424 btrfs_end_transaction(trans, root);
3425 trans = NULL;
3426 btrfs_btree_balance_dirty(root, nr);
3429 if (ret == 0) {
3430 ret = btrfs_orphan_del(trans, inode);
3431 BUG_ON(ret);
3434 nr = trans->blocks_used;
3435 btrfs_end_transaction(trans, root);
3436 btrfs_btree_balance_dirty(root, nr);
3437 no_delete:
3438 clear_inode(inode);
3439 return;
3443 * this returns the key found in the dir entry in the location pointer.
3444 * If no dir entries were found, location->objectid is 0.
3446 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3447 struct btrfs_key *location)
3449 const char *name = dentry->d_name.name;
3450 int namelen = dentry->d_name.len;
3451 struct btrfs_dir_item *di;
3452 struct btrfs_path *path;
3453 struct btrfs_root *root = BTRFS_I(dir)->root;
3454 int ret = 0;
3456 path = btrfs_alloc_path();
3457 BUG_ON(!path);
3459 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
3460 namelen, 0);
3461 if (IS_ERR(di))
3462 ret = PTR_ERR(di);
3464 if (!di || IS_ERR(di))
3465 goto out_err;
3467 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3468 out:
3469 btrfs_free_path(path);
3470 return ret;
3471 out_err:
3472 location->objectid = 0;
3473 goto out;
3477 * when we hit a tree root in a directory, the btrfs part of the inode
3478 * needs to be changed to reflect the root directory of the tree root. This
3479 * is kind of like crossing a mount point.
3481 static int fixup_tree_root_location(struct btrfs_root *root,
3482 struct inode *dir,
3483 struct dentry *dentry,
3484 struct btrfs_key *location,
3485 struct btrfs_root **sub_root)
3487 struct btrfs_path *path;
3488 struct btrfs_root *new_root;
3489 struct btrfs_root_ref *ref;
3490 struct extent_buffer *leaf;
3491 int ret;
3492 int err = 0;
3494 path = btrfs_alloc_path();
3495 if (!path) {
3496 err = -ENOMEM;
3497 goto out;
3500 err = -ENOENT;
3501 ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
3502 BTRFS_I(dir)->root->root_key.objectid,
3503 location->objectid);
3504 if (ret) {
3505 if (ret < 0)
3506 err = ret;
3507 goto out;
3510 leaf = path->nodes[0];
3511 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
3512 if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino ||
3513 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
3514 goto out;
3516 ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
3517 (unsigned long)(ref + 1),
3518 dentry->d_name.len);
3519 if (ret)
3520 goto out;
3522 btrfs_release_path(root->fs_info->tree_root, path);
3524 new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
3525 if (IS_ERR(new_root)) {
3526 err = PTR_ERR(new_root);
3527 goto out;
3530 if (btrfs_root_refs(&new_root->root_item) == 0) {
3531 err = -ENOENT;
3532 goto out;
3535 *sub_root = new_root;
3536 location->objectid = btrfs_root_dirid(&new_root->root_item);
3537 location->type = BTRFS_INODE_ITEM_KEY;
3538 location->offset = 0;
3539 err = 0;
3540 out:
3541 btrfs_free_path(path);
3542 return err;
3545 static void inode_tree_add(struct inode *inode)
3547 struct btrfs_root *root = BTRFS_I(inode)->root;
3548 struct btrfs_inode *entry;
3549 struct rb_node **p;
3550 struct rb_node *parent;
3551 again:
3552 p = &root->inode_tree.rb_node;
3553 parent = NULL;
3555 if (hlist_unhashed(&inode->i_hash))
3556 return;
3558 spin_lock(&root->inode_lock);
3559 while (*p) {
3560 parent = *p;
3561 entry = rb_entry(parent, struct btrfs_inode, rb_node);
3563 if (inode->i_ino < entry->vfs_inode.i_ino)
3564 p = &parent->rb_left;
3565 else if (inode->i_ino > entry->vfs_inode.i_ino)
3566 p = &parent->rb_right;
3567 else {
3568 WARN_ON(!(entry->vfs_inode.i_state &
3569 (I_WILL_FREE | I_FREEING | I_CLEAR)));
3570 rb_erase(parent, &root->inode_tree);
3571 RB_CLEAR_NODE(parent);
3572 spin_unlock(&root->inode_lock);
3573 goto again;
3576 rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
3577 rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3578 spin_unlock(&root->inode_lock);
3581 static void inode_tree_del(struct inode *inode)
3583 struct btrfs_root *root = BTRFS_I(inode)->root;
3584 int empty = 0;
3586 spin_lock(&root->inode_lock);
3587 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
3588 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3589 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3590 empty = RB_EMPTY_ROOT(&root->inode_tree);
3592 spin_unlock(&root->inode_lock);
3594 if (empty && btrfs_root_refs(&root->root_item) == 0) {
3595 synchronize_srcu(&root->fs_info->subvol_srcu);
3596 spin_lock(&root->inode_lock);
3597 empty = RB_EMPTY_ROOT(&root->inode_tree);
3598 spin_unlock(&root->inode_lock);
3599 if (empty)
3600 btrfs_add_dead_root(root);
3604 int btrfs_invalidate_inodes(struct btrfs_root *root)
3606 struct rb_node *node;
3607 struct rb_node *prev;
3608 struct btrfs_inode *entry;
3609 struct inode *inode;
3610 u64 objectid = 0;
3612 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
3614 spin_lock(&root->inode_lock);
3615 again:
3616 node = root->inode_tree.rb_node;
3617 prev = NULL;
3618 while (node) {
3619 prev = node;
3620 entry = rb_entry(node, struct btrfs_inode, rb_node);
3622 if (objectid < entry->vfs_inode.i_ino)
3623 node = node->rb_left;
3624 else if (objectid > entry->vfs_inode.i_ino)
3625 node = node->rb_right;
3626 else
3627 break;
3629 if (!node) {
3630 while (prev) {
3631 entry = rb_entry(prev, struct btrfs_inode, rb_node);
3632 if (objectid <= entry->vfs_inode.i_ino) {
3633 node = prev;
3634 break;
3636 prev = rb_next(prev);
3639 while (node) {
3640 entry = rb_entry(node, struct btrfs_inode, rb_node);
3641 objectid = entry->vfs_inode.i_ino + 1;
3642 inode = igrab(&entry->vfs_inode);
3643 if (inode) {
3644 spin_unlock(&root->inode_lock);
3645 if (atomic_read(&inode->i_count) > 1)
3646 d_prune_aliases(inode);
3648 * btrfs_drop_inode will remove it from
3649 * the inode cache when its usage count
3650 * hits zero.
3652 iput(inode);
3653 cond_resched();
3654 spin_lock(&root->inode_lock);
3655 goto again;
3658 if (cond_resched_lock(&root->inode_lock))
3659 goto again;
3661 node = rb_next(node);
3663 spin_unlock(&root->inode_lock);
3664 return 0;
3667 static noinline void init_btrfs_i(struct inode *inode)
3669 struct btrfs_inode *bi = BTRFS_I(inode);
3671 bi->generation = 0;
3672 bi->sequence = 0;
3673 bi->last_trans = 0;
3674 bi->last_sub_trans = 0;
3675 bi->logged_trans = 0;
3676 bi->delalloc_bytes = 0;
3677 bi->reserved_bytes = 0;
3678 bi->disk_i_size = 0;
3679 bi->flags = 0;
3680 bi->index_cnt = (u64)-1;
3681 bi->last_unlink_trans = 0;
3682 bi->ordered_data_close = 0;
3683 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3684 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3685 inode->i_mapping, GFP_NOFS);
3686 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3687 inode->i_mapping, GFP_NOFS);
3688 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3689 INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
3690 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3691 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3692 mutex_init(&BTRFS_I(inode)->log_mutex);
3695 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3697 struct btrfs_iget_args *args = p;
3698 inode->i_ino = args->ino;
3699 init_btrfs_i(inode);
3700 BTRFS_I(inode)->root = args->root;
3701 btrfs_set_inode_space_info(args->root, inode);
3702 return 0;
3705 static int btrfs_find_actor(struct inode *inode, void *opaque)
3707 struct btrfs_iget_args *args = opaque;
3708 return args->ino == inode->i_ino &&
3709 args->root == BTRFS_I(inode)->root;
3712 static struct inode *btrfs_iget_locked(struct super_block *s,
3713 u64 objectid,
3714 struct btrfs_root *root)
3716 struct inode *inode;
3717 struct btrfs_iget_args args;
3718 args.ino = objectid;
3719 args.root = root;
3721 inode = iget5_locked(s, objectid, btrfs_find_actor,
3722 btrfs_init_locked_inode,
3723 (void *)&args);
3724 return inode;
3727 /* Get an inode object given its location and corresponding root.
3728 * Returns in *is_new if the inode was read from disk
3730 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3731 struct btrfs_root *root)
3733 struct inode *inode;
3735 inode = btrfs_iget_locked(s, location->objectid, root);
3736 if (!inode)
3737 return ERR_PTR(-ENOMEM);
3739 if (inode->i_state & I_NEW) {
3740 BTRFS_I(inode)->root = root;
3741 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3742 btrfs_read_locked_inode(inode);
3744 inode_tree_add(inode);
3745 unlock_new_inode(inode);
3748 return inode;
3751 static struct inode *new_simple_dir(struct super_block *s,
3752 struct btrfs_key *key,
3753 struct btrfs_root *root)
3755 struct inode *inode = new_inode(s);
3757 if (!inode)
3758 return ERR_PTR(-ENOMEM);
3760 init_btrfs_i(inode);
3762 BTRFS_I(inode)->root = root;
3763 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
3764 BTRFS_I(inode)->dummy_inode = 1;
3766 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
3767 inode->i_op = &simple_dir_inode_operations;
3768 inode->i_fop = &simple_dir_operations;
3769 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
3770 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3772 return inode;
3775 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3777 struct inode *inode;
3778 struct btrfs_root *root = BTRFS_I(dir)->root;
3779 struct btrfs_root *sub_root = root;
3780 struct btrfs_key location;
3781 int index;
3782 int ret;
3784 dentry->d_op = &btrfs_dentry_operations;
3786 if (dentry->d_name.len > BTRFS_NAME_LEN)
3787 return ERR_PTR(-ENAMETOOLONG);
3789 ret = btrfs_inode_by_name(dir, dentry, &location);
3791 if (ret < 0)
3792 return ERR_PTR(ret);
3794 if (location.objectid == 0)
3795 return NULL;
3797 if (location.type == BTRFS_INODE_ITEM_KEY) {
3798 inode = btrfs_iget(dir->i_sb, &location, root);
3799 return inode;
3802 BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
3804 index = srcu_read_lock(&root->fs_info->subvol_srcu);
3805 ret = fixup_tree_root_location(root, dir, dentry,
3806 &location, &sub_root);
3807 if (ret < 0) {
3808 if (ret != -ENOENT)
3809 inode = ERR_PTR(ret);
3810 else
3811 inode = new_simple_dir(dir->i_sb, &location, sub_root);
3812 } else {
3813 inode = btrfs_iget(dir->i_sb, &location, sub_root);
3815 srcu_read_unlock(&root->fs_info->subvol_srcu, index);
3817 if (root != sub_root) {
3818 down_read(&root->fs_info->cleanup_work_sem);
3819 if (!(inode->i_sb->s_flags & MS_RDONLY))
3820 btrfs_orphan_cleanup(sub_root);
3821 up_read(&root->fs_info->cleanup_work_sem);
3824 return inode;
3827 static int btrfs_dentry_delete(struct dentry *dentry)
3829 struct btrfs_root *root;
3831 if (!dentry->d_inode && !IS_ROOT(dentry))
3832 dentry = dentry->d_parent;
3834 if (dentry->d_inode) {
3835 root = BTRFS_I(dentry->d_inode)->root;
3836 if (btrfs_root_refs(&root->root_item) == 0)
3837 return 1;
3839 return 0;
3842 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3843 struct nameidata *nd)
3845 struct inode *inode;
3847 inode = btrfs_lookup_dentry(dir, dentry);
3848 if (IS_ERR(inode))
3849 return ERR_CAST(inode);
3851 return d_splice_alias(inode, dentry);
3854 static unsigned char btrfs_filetype_table[] = {
3855 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3858 static int btrfs_real_readdir(struct file *filp, void *dirent,
3859 filldir_t filldir)
3861 struct inode *inode = filp->f_dentry->d_inode;
3862 struct btrfs_root *root = BTRFS_I(inode)->root;
3863 struct btrfs_item *item;
3864 struct btrfs_dir_item *di;
3865 struct btrfs_key key;
3866 struct btrfs_key found_key;
3867 struct btrfs_path *path;
3868 int ret;
3869 u32 nritems;
3870 struct extent_buffer *leaf;
3871 int slot;
3872 int advance;
3873 unsigned char d_type;
3874 int over = 0;
3875 u32 di_cur;
3876 u32 di_total;
3877 u32 di_len;
3878 int key_type = BTRFS_DIR_INDEX_KEY;
3879 char tmp_name[32];
3880 char *name_ptr;
3881 int name_len;
3883 /* FIXME, use a real flag for deciding about the key type */
3884 if (root->fs_info->tree_root == root)
3885 key_type = BTRFS_DIR_ITEM_KEY;
3887 /* special case for "." */
3888 if (filp->f_pos == 0) {
3889 over = filldir(dirent, ".", 1,
3890 1, inode->i_ino,
3891 DT_DIR);
3892 if (over)
3893 return 0;
3894 filp->f_pos = 1;
3896 /* special case for .., just use the back ref */
3897 if (filp->f_pos == 1) {
3898 u64 pino = parent_ino(filp->f_path.dentry);
3899 over = filldir(dirent, "..", 2,
3900 2, pino, DT_DIR);
3901 if (over)
3902 return 0;
3903 filp->f_pos = 2;
3905 path = btrfs_alloc_path();
3906 path->reada = 2;
3908 btrfs_set_key_type(&key, key_type);
3909 key.offset = filp->f_pos;
3910 key.objectid = inode->i_ino;
3912 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3913 if (ret < 0)
3914 goto err;
3915 advance = 0;
3917 while (1) {
3918 leaf = path->nodes[0];
3919 nritems = btrfs_header_nritems(leaf);
3920 slot = path->slots[0];
3921 if (advance || slot >= nritems) {
3922 if (slot >= nritems - 1) {
3923 ret = btrfs_next_leaf(root, path);
3924 if (ret)
3925 break;
3926 leaf = path->nodes[0];
3927 nritems = btrfs_header_nritems(leaf);
3928 slot = path->slots[0];
3929 } else {
3930 slot++;
3931 path->slots[0]++;
3935 advance = 1;
3936 item = btrfs_item_nr(leaf, slot);
3937 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3939 if (found_key.objectid != key.objectid)
3940 break;
3941 if (btrfs_key_type(&found_key) != key_type)
3942 break;
3943 if (found_key.offset < filp->f_pos)
3944 continue;
3946 filp->f_pos = found_key.offset;
3948 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3949 di_cur = 0;
3950 di_total = btrfs_item_size(leaf, item);
3952 while (di_cur < di_total) {
3953 struct btrfs_key location;
3955 name_len = btrfs_dir_name_len(leaf, di);
3956 if (name_len <= sizeof(tmp_name)) {
3957 name_ptr = tmp_name;
3958 } else {
3959 name_ptr = kmalloc(name_len, GFP_NOFS);
3960 if (!name_ptr) {
3961 ret = -ENOMEM;
3962 goto err;
3965 read_extent_buffer(leaf, name_ptr,
3966 (unsigned long)(di + 1), name_len);
3968 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3969 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3971 /* is this a reference to our own snapshot? If so
3972 * skip it
3974 if (location.type == BTRFS_ROOT_ITEM_KEY &&
3975 location.objectid == root->root_key.objectid) {
3976 over = 0;
3977 goto skip;
3979 over = filldir(dirent, name_ptr, name_len,
3980 found_key.offset, location.objectid,
3981 d_type);
3983 skip:
3984 if (name_ptr != tmp_name)
3985 kfree(name_ptr);
3987 if (over)
3988 goto nopos;
3989 di_len = btrfs_dir_name_len(leaf, di) +
3990 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3991 di_cur += di_len;
3992 di = (struct btrfs_dir_item *)((char *)di + di_len);
3996 /* Reached end of directory/root. Bump pos past the last item. */
3997 if (key_type == BTRFS_DIR_INDEX_KEY)
3998 filp->f_pos = INT_LIMIT(off_t);
3999 else
4000 filp->f_pos++;
4001 nopos:
4002 ret = 0;
4003 err:
4004 btrfs_free_path(path);
4005 return ret;
4008 int btrfs_write_inode(struct inode *inode, int wait)
4010 struct btrfs_root *root = BTRFS_I(inode)->root;
4011 struct btrfs_trans_handle *trans;
4012 int ret = 0;
4014 if (root->fs_info->btree_inode == inode)
4015 return 0;
4017 if (wait) {
4018 trans = btrfs_join_transaction(root, 1);
4019 btrfs_set_trans_block_group(trans, inode);
4020 ret = btrfs_commit_transaction(trans, root);
4022 return ret;
4026 * This is somewhat expensive, updating the tree every time the
4027 * inode changes. But, it is most likely to find the inode in cache.
4028 * FIXME, needs more benchmarking...there are no reasons other than performance
4029 * to keep or drop this code.
4031 void btrfs_dirty_inode(struct inode *inode)
4033 struct btrfs_root *root = BTRFS_I(inode)->root;
4034 struct btrfs_trans_handle *trans;
4036 trans = btrfs_join_transaction(root, 1);
4037 btrfs_set_trans_block_group(trans, inode);
4038 btrfs_update_inode(trans, root, inode);
4039 btrfs_end_transaction(trans, root);
4043 * find the highest existing sequence number in a directory
4044 * and then set the in-memory index_cnt variable to reflect
4045 * free sequence numbers
4047 static int btrfs_set_inode_index_count(struct inode *inode)
4049 struct btrfs_root *root = BTRFS_I(inode)->root;
4050 struct btrfs_key key, found_key;
4051 struct btrfs_path *path;
4052 struct extent_buffer *leaf;
4053 int ret;
4055 key.objectid = inode->i_ino;
4056 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
4057 key.offset = (u64)-1;
4059 path = btrfs_alloc_path();
4060 if (!path)
4061 return -ENOMEM;
4063 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4064 if (ret < 0)
4065 goto out;
4066 /* FIXME: we should be able to handle this */
4067 if (ret == 0)
4068 goto out;
4069 ret = 0;
4072 * MAGIC NUMBER EXPLANATION:
4073 * since we search a directory based on f_pos we have to start at 2
4074 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
4075 * else has to start at 2
4077 if (path->slots[0] == 0) {
4078 BTRFS_I(inode)->index_cnt = 2;
4079 goto out;
4082 path->slots[0]--;
4084 leaf = path->nodes[0];
4085 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4087 if (found_key.objectid != inode->i_ino ||
4088 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
4089 BTRFS_I(inode)->index_cnt = 2;
4090 goto out;
4093 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
4094 out:
4095 btrfs_free_path(path);
4096 return ret;
4100 * helper to find a free sequence number in a given directory. This current
4101 * code is very simple, later versions will do smarter things in the btree
4103 int btrfs_set_inode_index(struct inode *dir, u64 *index)
4105 int ret = 0;
4107 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
4108 ret = btrfs_set_inode_index_count(dir);
4109 if (ret)
4110 return ret;
4113 *index = BTRFS_I(dir)->index_cnt;
4114 BTRFS_I(dir)->index_cnt++;
4116 return ret;
4119 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4120 struct btrfs_root *root,
4121 struct inode *dir,
4122 const char *name, int name_len,
4123 u64 ref_objectid, u64 objectid,
4124 u64 alloc_hint, int mode, u64 *index)
4126 struct inode *inode;
4127 struct btrfs_inode_item *inode_item;
4128 struct btrfs_key *location;
4129 struct btrfs_path *path;
4130 struct btrfs_inode_ref *ref;
4131 struct btrfs_key key[2];
4132 u32 sizes[2];
4133 unsigned long ptr;
4134 int ret;
4135 int owner;
4137 path = btrfs_alloc_path();
4138 BUG_ON(!path);
4140 inode = new_inode(root->fs_info->sb);
4141 if (!inode)
4142 return ERR_PTR(-ENOMEM);
4144 if (dir) {
4145 ret = btrfs_set_inode_index(dir, index);
4146 if (ret) {
4147 iput(inode);
4148 return ERR_PTR(ret);
4152 * index_cnt is ignored for everything but a dir,
4153 * btrfs_get_inode_index_count has an explanation for the magic
4154 * number
4156 init_btrfs_i(inode);
4157 BTRFS_I(inode)->index_cnt = 2;
4158 BTRFS_I(inode)->root = root;
4159 BTRFS_I(inode)->generation = trans->transid;
4160 btrfs_set_inode_space_info(root, inode);
4162 if (mode & S_IFDIR)
4163 owner = 0;
4164 else
4165 owner = 1;
4166 BTRFS_I(inode)->block_group =
4167 btrfs_find_block_group(root, 0, alloc_hint, owner);
4169 key[0].objectid = objectid;
4170 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
4171 key[0].offset = 0;
4173 key[1].objectid = objectid;
4174 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
4175 key[1].offset = ref_objectid;
4177 sizes[0] = sizeof(struct btrfs_inode_item);
4178 sizes[1] = name_len + sizeof(*ref);
4180 path->leave_spinning = 1;
4181 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
4182 if (ret != 0)
4183 goto fail;
4185 inode->i_uid = current_fsuid();
4187 if (dir && (dir->i_mode & S_ISGID)) {
4188 inode->i_gid = dir->i_gid;
4189 if (S_ISDIR(mode))
4190 mode |= S_ISGID;
4191 } else
4192 inode->i_gid = current_fsgid();
4194 inode->i_mode = mode;
4195 inode->i_ino = objectid;
4196 inode_set_bytes(inode, 0);
4197 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4198 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4199 struct btrfs_inode_item);
4200 fill_inode_item(trans, path->nodes[0], inode_item, inode);
4202 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
4203 struct btrfs_inode_ref);
4204 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4205 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4206 ptr = (unsigned long)(ref + 1);
4207 write_extent_buffer(path->nodes[0], name, ptr, name_len);
4209 btrfs_mark_buffer_dirty(path->nodes[0]);
4210 btrfs_free_path(path);
4212 location = &BTRFS_I(inode)->location;
4213 location->objectid = objectid;
4214 location->offset = 0;
4215 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
4217 btrfs_inherit_iflags(inode, dir);
4219 if ((mode & S_IFREG)) {
4220 if (btrfs_test_opt(root, NODATASUM))
4221 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4222 if (btrfs_test_opt(root, NODATACOW))
4223 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
4226 insert_inode_hash(inode);
4227 inode_tree_add(inode);
4228 return inode;
4229 fail:
4230 if (dir)
4231 BTRFS_I(dir)->index_cnt--;
4232 btrfs_free_path(path);
4233 iput(inode);
4234 return ERR_PTR(ret);
4237 static inline u8 btrfs_inode_type(struct inode *inode)
4239 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
4243 * utility function to add 'inode' into 'parent_inode' with
4244 * a give name and a given sequence number.
4245 * if 'add_backref' is true, also insert a backref from the
4246 * inode to the parent directory.
4248 int btrfs_add_link(struct btrfs_trans_handle *trans,
4249 struct inode *parent_inode, struct inode *inode,
4250 const char *name, int name_len, int add_backref, u64 index)
4252 int ret = 0;
4253 struct btrfs_key key;
4254 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4256 if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4257 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4258 } else {
4259 key.objectid = inode->i_ino;
4260 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4261 key.offset = 0;
4264 if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4265 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4266 key.objectid, root->root_key.objectid,
4267 parent_inode->i_ino,
4268 index, name, name_len);
4269 } else if (add_backref) {
4270 ret = btrfs_insert_inode_ref(trans, root,
4271 name, name_len, inode->i_ino,
4272 parent_inode->i_ino, index);
4275 if (ret == 0) {
4276 ret = btrfs_insert_dir_item(trans, root, name, name_len,
4277 parent_inode->i_ino, &key,
4278 btrfs_inode_type(inode), index);
4279 BUG_ON(ret);
4281 btrfs_i_size_write(parent_inode, parent_inode->i_size +
4282 name_len * 2);
4283 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4284 ret = btrfs_update_inode(trans, root, parent_inode);
4286 return ret;
4289 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4290 struct dentry *dentry, struct inode *inode,
4291 int backref, u64 index)
4293 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
4294 inode, dentry->d_name.name,
4295 dentry->d_name.len, backref, index);
4296 if (!err) {
4297 d_instantiate(dentry, inode);
4298 return 0;
4300 if (err > 0)
4301 err = -EEXIST;
4302 return err;
4305 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4306 int mode, dev_t rdev)
4308 struct btrfs_trans_handle *trans;
4309 struct btrfs_root *root = BTRFS_I(dir)->root;
4310 struct inode *inode = NULL;
4311 int err;
4312 int drop_inode = 0;
4313 u64 objectid;
4314 unsigned long nr = 0;
4315 u64 index = 0;
4317 if (!new_valid_dev(rdev))
4318 return -EINVAL;
4321 * 2 for inode item and ref
4322 * 2 for dir items
4323 * 1 for xattr if selinux is on
4325 err = btrfs_reserve_metadata_space(root, 5);
4326 if (err)
4327 return err;
4329 trans = btrfs_start_transaction(root, 1);
4330 if (!trans)
4331 goto fail;
4332 btrfs_set_trans_block_group(trans, dir);
4334 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4335 if (err) {
4336 err = -ENOSPC;
4337 goto out_unlock;
4340 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4341 dentry->d_name.len,
4342 dentry->d_parent->d_inode->i_ino, objectid,
4343 BTRFS_I(dir)->block_group, mode, &index);
4344 err = PTR_ERR(inode);
4345 if (IS_ERR(inode))
4346 goto out_unlock;
4348 err = btrfs_init_inode_security(trans, inode, dir);
4349 if (err) {
4350 drop_inode = 1;
4351 goto out_unlock;
4354 btrfs_set_trans_block_group(trans, inode);
4355 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4356 if (err)
4357 drop_inode = 1;
4358 else {
4359 inode->i_op = &btrfs_special_inode_operations;
4360 init_special_inode(inode, inode->i_mode, rdev);
4361 btrfs_update_inode(trans, root, inode);
4363 btrfs_update_inode_block_group(trans, inode);
4364 btrfs_update_inode_block_group(trans, dir);
4365 out_unlock:
4366 nr = trans->blocks_used;
4367 btrfs_end_transaction_throttle(trans, root);
4368 fail:
4369 btrfs_unreserve_metadata_space(root, 5);
4370 if (drop_inode) {
4371 inode_dec_link_count(inode);
4372 iput(inode);
4374 btrfs_btree_balance_dirty(root, nr);
4375 return err;
4378 static int btrfs_create(struct inode *dir, struct dentry *dentry,
4379 int mode, struct nameidata *nd)
4381 struct btrfs_trans_handle *trans;
4382 struct btrfs_root *root = BTRFS_I(dir)->root;
4383 struct inode *inode = NULL;
4384 int err;
4385 int drop_inode = 0;
4386 unsigned long nr = 0;
4387 u64 objectid;
4388 u64 index = 0;
4391 * 2 for inode item and ref
4392 * 2 for dir items
4393 * 1 for xattr if selinux is on
4395 err = btrfs_reserve_metadata_space(root, 5);
4396 if (err)
4397 return err;
4399 trans = btrfs_start_transaction(root, 1);
4400 if (!trans)
4401 goto fail;
4402 btrfs_set_trans_block_group(trans, dir);
4404 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4405 if (err) {
4406 err = -ENOSPC;
4407 goto out_unlock;
4410 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4411 dentry->d_name.len,
4412 dentry->d_parent->d_inode->i_ino,
4413 objectid, BTRFS_I(dir)->block_group, mode,
4414 &index);
4415 err = PTR_ERR(inode);
4416 if (IS_ERR(inode))
4417 goto out_unlock;
4419 err = btrfs_init_inode_security(trans, inode, dir);
4420 if (err) {
4421 drop_inode = 1;
4422 goto out_unlock;
4425 btrfs_set_trans_block_group(trans, inode);
4426 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4427 if (err)
4428 drop_inode = 1;
4429 else {
4430 inode->i_mapping->a_ops = &btrfs_aops;
4431 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4432 inode->i_fop = &btrfs_file_operations;
4433 inode->i_op = &btrfs_file_inode_operations;
4434 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4436 btrfs_update_inode_block_group(trans, inode);
4437 btrfs_update_inode_block_group(trans, dir);
4438 out_unlock:
4439 nr = trans->blocks_used;
4440 btrfs_end_transaction_throttle(trans, root);
4441 fail:
4442 btrfs_unreserve_metadata_space(root, 5);
4443 if (drop_inode) {
4444 inode_dec_link_count(inode);
4445 iput(inode);
4447 btrfs_btree_balance_dirty(root, nr);
4448 return err;
4451 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4452 struct dentry *dentry)
4454 struct btrfs_trans_handle *trans;
4455 struct btrfs_root *root = BTRFS_I(dir)->root;
4456 struct inode *inode = old_dentry->d_inode;
4457 u64 index;
4458 unsigned long nr = 0;
4459 int err;
4460 int drop_inode = 0;
4462 if (inode->i_nlink == 0)
4463 return -ENOENT;
4465 /* do not allow sys_link's with other subvols of the same device */
4466 if (root->objectid != BTRFS_I(inode)->root->objectid)
4467 return -EPERM;
4470 * 1 item for inode ref
4471 * 2 items for dir items
4473 err = btrfs_reserve_metadata_space(root, 3);
4474 if (err)
4475 return err;
4477 btrfs_inc_nlink(inode);
4479 err = btrfs_set_inode_index(dir, &index);
4480 if (err)
4481 goto fail;
4483 trans = btrfs_start_transaction(root, 1);
4485 btrfs_set_trans_block_group(trans, dir);
4486 atomic_inc(&inode->i_count);
4488 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
4490 if (err) {
4491 drop_inode = 1;
4492 } else {
4493 btrfs_update_inode_block_group(trans, dir);
4494 err = btrfs_update_inode(trans, root, inode);
4495 BUG_ON(err);
4496 btrfs_log_new_name(trans, inode, NULL, dentry->d_parent);
4499 nr = trans->blocks_used;
4500 btrfs_end_transaction_throttle(trans, root);
4501 fail:
4502 btrfs_unreserve_metadata_space(root, 3);
4503 if (drop_inode) {
4504 inode_dec_link_count(inode);
4505 iput(inode);
4507 btrfs_btree_balance_dirty(root, nr);
4508 return err;
4511 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4513 struct inode *inode = NULL;
4514 struct btrfs_trans_handle *trans;
4515 struct btrfs_root *root = BTRFS_I(dir)->root;
4516 int err = 0;
4517 int drop_on_err = 0;
4518 u64 objectid = 0;
4519 u64 index = 0;
4520 unsigned long nr = 1;
4523 * 2 items for inode and ref
4524 * 2 items for dir items
4525 * 1 for xattr if selinux is on
4527 err = btrfs_reserve_metadata_space(root, 5);
4528 if (err)
4529 return err;
4531 trans = btrfs_start_transaction(root, 1);
4532 if (!trans) {
4533 err = -ENOMEM;
4534 goto out_unlock;
4536 btrfs_set_trans_block_group(trans, dir);
4538 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4539 if (err) {
4540 err = -ENOSPC;
4541 goto out_unlock;
4544 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4545 dentry->d_name.len,
4546 dentry->d_parent->d_inode->i_ino, objectid,
4547 BTRFS_I(dir)->block_group, S_IFDIR | mode,
4548 &index);
4549 if (IS_ERR(inode)) {
4550 err = PTR_ERR(inode);
4551 goto out_fail;
4554 drop_on_err = 1;
4556 err = btrfs_init_inode_security(trans, inode, dir);
4557 if (err)
4558 goto out_fail;
4560 inode->i_op = &btrfs_dir_inode_operations;
4561 inode->i_fop = &btrfs_dir_file_operations;
4562 btrfs_set_trans_block_group(trans, inode);
4564 btrfs_i_size_write(inode, 0);
4565 err = btrfs_update_inode(trans, root, inode);
4566 if (err)
4567 goto out_fail;
4569 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
4570 inode, dentry->d_name.name,
4571 dentry->d_name.len, 0, index);
4572 if (err)
4573 goto out_fail;
4575 d_instantiate(dentry, inode);
4576 drop_on_err = 0;
4577 btrfs_update_inode_block_group(trans, inode);
4578 btrfs_update_inode_block_group(trans, dir);
4580 out_fail:
4581 nr = trans->blocks_used;
4582 btrfs_end_transaction_throttle(trans, root);
4584 out_unlock:
4585 btrfs_unreserve_metadata_space(root, 5);
4586 if (drop_on_err)
4587 iput(inode);
4588 btrfs_btree_balance_dirty(root, nr);
4589 return err;
4592 /* helper for btfs_get_extent. Given an existing extent in the tree,
4593 * and an extent that you want to insert, deal with overlap and insert
4594 * the new extent into the tree.
4596 static int merge_extent_mapping(struct extent_map_tree *em_tree,
4597 struct extent_map *existing,
4598 struct extent_map *em,
4599 u64 map_start, u64 map_len)
4601 u64 start_diff;
4603 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
4604 start_diff = map_start - em->start;
4605 em->start = map_start;
4606 em->len = map_len;
4607 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
4608 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4609 em->block_start += start_diff;
4610 em->block_len -= start_diff;
4612 return add_extent_mapping(em_tree, em);
4615 static noinline int uncompress_inline(struct btrfs_path *path,
4616 struct inode *inode, struct page *page,
4617 size_t pg_offset, u64 extent_offset,
4618 struct btrfs_file_extent_item *item)
4620 int ret;
4621 struct extent_buffer *leaf = path->nodes[0];
4622 char *tmp;
4623 size_t max_size;
4624 unsigned long inline_size;
4625 unsigned long ptr;
4627 WARN_ON(pg_offset != 0);
4628 max_size = btrfs_file_extent_ram_bytes(leaf, item);
4629 inline_size = btrfs_file_extent_inline_item_len(leaf,
4630 btrfs_item_nr(leaf, path->slots[0]));
4631 tmp = kmalloc(inline_size, GFP_NOFS);
4632 ptr = btrfs_file_extent_inline_start(item);
4634 read_extent_buffer(leaf, tmp, ptr, inline_size);
4636 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
4637 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
4638 inline_size, max_size);
4639 if (ret) {
4640 char *kaddr = kmap_atomic(page, KM_USER0);
4641 unsigned long copy_size = min_t(u64,
4642 PAGE_CACHE_SIZE - pg_offset,
4643 max_size - extent_offset);
4644 memset(kaddr + pg_offset, 0, copy_size);
4645 kunmap_atomic(kaddr, KM_USER0);
4647 kfree(tmp);
4648 return 0;
4652 * a bit scary, this does extent mapping from logical file offset to the disk.
4653 * the ugly parts come from merging extents from the disk with the in-ram
4654 * representation. This gets more complex because of the data=ordered code,
4655 * where the in-ram extents might be locked pending data=ordered completion.
4657 * This also copies inline extents directly into the page.
4660 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
4661 size_t pg_offset, u64 start, u64 len,
4662 int create)
4664 int ret;
4665 int err = 0;
4666 u64 bytenr;
4667 u64 extent_start = 0;
4668 u64 extent_end = 0;
4669 u64 objectid = inode->i_ino;
4670 u32 found_type;
4671 struct btrfs_path *path = NULL;
4672 struct btrfs_root *root = BTRFS_I(inode)->root;
4673 struct btrfs_file_extent_item *item;
4674 struct extent_buffer *leaf;
4675 struct btrfs_key found_key;
4676 struct extent_map *em = NULL;
4677 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4678 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4679 struct btrfs_trans_handle *trans = NULL;
4680 int compressed;
4682 again:
4683 read_lock(&em_tree->lock);
4684 em = lookup_extent_mapping(em_tree, start, len);
4685 if (em)
4686 em->bdev = root->fs_info->fs_devices->latest_bdev;
4687 read_unlock(&em_tree->lock);
4689 if (em) {
4690 if (em->start > start || em->start + em->len <= start)
4691 free_extent_map(em);
4692 else if (em->block_start == EXTENT_MAP_INLINE && page)
4693 free_extent_map(em);
4694 else
4695 goto out;
4697 em = alloc_extent_map(GFP_NOFS);
4698 if (!em) {
4699 err = -ENOMEM;
4700 goto out;
4702 em->bdev = root->fs_info->fs_devices->latest_bdev;
4703 em->start = EXTENT_MAP_HOLE;
4704 em->orig_start = EXTENT_MAP_HOLE;
4705 em->len = (u64)-1;
4706 em->block_len = (u64)-1;
4708 if (!path) {
4709 path = btrfs_alloc_path();
4710 BUG_ON(!path);
4713 ret = btrfs_lookup_file_extent(trans, root, path,
4714 objectid, start, trans != NULL);
4715 if (ret < 0) {
4716 err = ret;
4717 goto out;
4720 if (ret != 0) {
4721 if (path->slots[0] == 0)
4722 goto not_found;
4723 path->slots[0]--;
4726 leaf = path->nodes[0];
4727 item = btrfs_item_ptr(leaf, path->slots[0],
4728 struct btrfs_file_extent_item);
4729 /* are we inside the extent that was found? */
4730 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4731 found_type = btrfs_key_type(&found_key);
4732 if (found_key.objectid != objectid ||
4733 found_type != BTRFS_EXTENT_DATA_KEY) {
4734 goto not_found;
4737 found_type = btrfs_file_extent_type(leaf, item);
4738 extent_start = found_key.offset;
4739 compressed = btrfs_file_extent_compression(leaf, item);
4740 if (found_type == BTRFS_FILE_EXTENT_REG ||
4741 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4742 extent_end = extent_start +
4743 btrfs_file_extent_num_bytes(leaf, item);
4744 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4745 size_t size;
4746 size = btrfs_file_extent_inline_len(leaf, item);
4747 extent_end = (extent_start + size + root->sectorsize - 1) &
4748 ~((u64)root->sectorsize - 1);
4751 if (start >= extent_end) {
4752 path->slots[0]++;
4753 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4754 ret = btrfs_next_leaf(root, path);
4755 if (ret < 0) {
4756 err = ret;
4757 goto out;
4759 if (ret > 0)
4760 goto not_found;
4761 leaf = path->nodes[0];
4763 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4764 if (found_key.objectid != objectid ||
4765 found_key.type != BTRFS_EXTENT_DATA_KEY)
4766 goto not_found;
4767 if (start + len <= found_key.offset)
4768 goto not_found;
4769 em->start = start;
4770 em->len = found_key.offset - start;
4771 goto not_found_em;
4774 if (found_type == BTRFS_FILE_EXTENT_REG ||
4775 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4776 em->start = extent_start;
4777 em->len = extent_end - extent_start;
4778 em->orig_start = extent_start -
4779 btrfs_file_extent_offset(leaf, item);
4780 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4781 if (bytenr == 0) {
4782 em->block_start = EXTENT_MAP_HOLE;
4783 goto insert;
4785 if (compressed) {
4786 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4787 em->block_start = bytenr;
4788 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4789 item);
4790 } else {
4791 bytenr += btrfs_file_extent_offset(leaf, item);
4792 em->block_start = bytenr;
4793 em->block_len = em->len;
4794 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4795 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4797 goto insert;
4798 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4799 unsigned long ptr;
4800 char *map;
4801 size_t size;
4802 size_t extent_offset;
4803 size_t copy_size;
4805 em->block_start = EXTENT_MAP_INLINE;
4806 if (!page || create) {
4807 em->start = extent_start;
4808 em->len = extent_end - extent_start;
4809 goto out;
4812 size = btrfs_file_extent_inline_len(leaf, item);
4813 extent_offset = page_offset(page) + pg_offset - extent_start;
4814 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4815 size - extent_offset);
4816 em->start = extent_start + extent_offset;
4817 em->len = (copy_size + root->sectorsize - 1) &
4818 ~((u64)root->sectorsize - 1);
4819 em->orig_start = EXTENT_MAP_INLINE;
4820 if (compressed)
4821 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4822 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4823 if (create == 0 && !PageUptodate(page)) {
4824 if (btrfs_file_extent_compression(leaf, item) ==
4825 BTRFS_COMPRESS_ZLIB) {
4826 ret = uncompress_inline(path, inode, page,
4827 pg_offset,
4828 extent_offset, item);
4829 BUG_ON(ret);
4830 } else {
4831 map = kmap(page);
4832 read_extent_buffer(leaf, map + pg_offset, ptr,
4833 copy_size);
4834 if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
4835 memset(map + pg_offset + copy_size, 0,
4836 PAGE_CACHE_SIZE - pg_offset -
4837 copy_size);
4839 kunmap(page);
4841 flush_dcache_page(page);
4842 } else if (create && PageUptodate(page)) {
4843 if (!trans) {
4844 kunmap(page);
4845 free_extent_map(em);
4846 em = NULL;
4847 btrfs_release_path(root, path);
4848 trans = btrfs_join_transaction(root, 1);
4849 goto again;
4851 map = kmap(page);
4852 write_extent_buffer(leaf, map + pg_offset, ptr,
4853 copy_size);
4854 kunmap(page);
4855 btrfs_mark_buffer_dirty(leaf);
4857 set_extent_uptodate(io_tree, em->start,
4858 extent_map_end(em) - 1, GFP_NOFS);
4859 goto insert;
4860 } else {
4861 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4862 WARN_ON(1);
4864 not_found:
4865 em->start = start;
4866 em->len = len;
4867 not_found_em:
4868 em->block_start = EXTENT_MAP_HOLE;
4869 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4870 insert:
4871 btrfs_release_path(root, path);
4872 if (em->start > start || extent_map_end(em) <= start) {
4873 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4874 "[%llu %llu]\n", (unsigned long long)em->start,
4875 (unsigned long long)em->len,
4876 (unsigned long long)start,
4877 (unsigned long long)len);
4878 err = -EIO;
4879 goto out;
4882 err = 0;
4883 write_lock(&em_tree->lock);
4884 ret = add_extent_mapping(em_tree, em);
4885 /* it is possible that someone inserted the extent into the tree
4886 * while we had the lock dropped. It is also possible that
4887 * an overlapping map exists in the tree
4889 if (ret == -EEXIST) {
4890 struct extent_map *existing;
4892 ret = 0;
4894 existing = lookup_extent_mapping(em_tree, start, len);
4895 if (existing && (existing->start > start ||
4896 existing->start + existing->len <= start)) {
4897 free_extent_map(existing);
4898 existing = NULL;
4900 if (!existing) {
4901 existing = lookup_extent_mapping(em_tree, em->start,
4902 em->len);
4903 if (existing) {
4904 err = merge_extent_mapping(em_tree, existing,
4905 em, start,
4906 root->sectorsize);
4907 free_extent_map(existing);
4908 if (err) {
4909 free_extent_map(em);
4910 em = NULL;
4912 } else {
4913 err = -EIO;
4914 free_extent_map(em);
4915 em = NULL;
4917 } else {
4918 free_extent_map(em);
4919 em = existing;
4920 err = 0;
4923 write_unlock(&em_tree->lock);
4924 out:
4925 if (path)
4926 btrfs_free_path(path);
4927 if (trans) {
4928 ret = btrfs_end_transaction(trans, root);
4929 if (!err)
4930 err = ret;
4932 if (err) {
4933 free_extent_map(em);
4934 return ERR_PTR(err);
4936 return em;
4939 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4940 const struct iovec *iov, loff_t offset,
4941 unsigned long nr_segs)
4943 return -EINVAL;
4946 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4947 __u64 start, __u64 len)
4949 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4952 int btrfs_readpage(struct file *file, struct page *page)
4954 struct extent_io_tree *tree;
4955 tree = &BTRFS_I(page->mapping->host)->io_tree;
4956 return extent_read_full_page(tree, page, btrfs_get_extent);
4959 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4961 struct extent_io_tree *tree;
4964 if (current->flags & PF_MEMALLOC) {
4965 redirty_page_for_writepage(wbc, page);
4966 unlock_page(page);
4967 return 0;
4969 tree = &BTRFS_I(page->mapping->host)->io_tree;
4970 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4973 int btrfs_writepages(struct address_space *mapping,
4974 struct writeback_control *wbc)
4976 struct extent_io_tree *tree;
4978 tree = &BTRFS_I(mapping->host)->io_tree;
4979 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4982 static int
4983 btrfs_readpages(struct file *file, struct address_space *mapping,
4984 struct list_head *pages, unsigned nr_pages)
4986 struct extent_io_tree *tree;
4987 tree = &BTRFS_I(mapping->host)->io_tree;
4988 return extent_readpages(tree, mapping, pages, nr_pages,
4989 btrfs_get_extent);
4991 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4993 struct extent_io_tree *tree;
4994 struct extent_map_tree *map;
4995 int ret;
4997 tree = &BTRFS_I(page->mapping->host)->io_tree;
4998 map = &BTRFS_I(page->mapping->host)->extent_tree;
4999 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
5000 if (ret == 1) {
5001 ClearPagePrivate(page);
5002 set_page_private(page, 0);
5003 page_cache_release(page);
5005 return ret;
5008 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
5010 if (PageWriteback(page) || PageDirty(page))
5011 return 0;
5012 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
5015 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
5017 struct extent_io_tree *tree;
5018 struct btrfs_ordered_extent *ordered;
5019 u64 page_start = page_offset(page);
5020 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
5024 * we have the page locked, so new writeback can't start,
5025 * and the dirty bit won't be cleared while we are here.
5027 * Wait for IO on this page so that we can safely clear
5028 * the PagePrivate2 bit and do ordered accounting
5030 wait_on_page_writeback(page);
5032 tree = &BTRFS_I(page->mapping->host)->io_tree;
5033 if (offset) {
5034 btrfs_releasepage(page, GFP_NOFS);
5035 return;
5037 lock_extent(tree, page_start, page_end, GFP_NOFS);
5038 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
5039 page_offset(page));
5040 if (ordered) {
5042 * IO on this page will never be started, so we need
5043 * to account for any ordered extents now
5045 clear_extent_bit(tree, page_start, page_end,
5046 EXTENT_DIRTY | EXTENT_DELALLOC |
5047 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
5048 NULL, GFP_NOFS);
5050 * whoever cleared the private bit is responsible
5051 * for the finish_ordered_io
5053 if (TestClearPagePrivate2(page)) {
5054 btrfs_finish_ordered_io(page->mapping->host,
5055 page_start, page_end);
5057 btrfs_put_ordered_extent(ordered);
5058 lock_extent(tree, page_start, page_end, GFP_NOFS);
5060 clear_extent_bit(tree, page_start, page_end,
5061 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
5062 EXTENT_DO_ACCOUNTING, 1, 1, NULL, GFP_NOFS);
5063 __btrfs_releasepage(page, GFP_NOFS);
5065 ClearPageChecked(page);
5066 if (PagePrivate(page)) {
5067 ClearPagePrivate(page);
5068 set_page_private(page, 0);
5069 page_cache_release(page);
5074 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
5075 * called from a page fault handler when a page is first dirtied. Hence we must
5076 * be careful to check for EOF conditions here. We set the page up correctly
5077 * for a written page which means we get ENOSPC checking when writing into
5078 * holes and correct delalloc and unwritten extent mapping on filesystems that
5079 * support these features.
5081 * We are not allowed to take the i_mutex here so we have to play games to
5082 * protect against truncate races as the page could now be beyond EOF. Because
5083 * vmtruncate() writes the inode size before removing pages, once we have the
5084 * page lock we can determine safely if the page is beyond EOF. If it is not
5085 * beyond EOF, then the page is guaranteed safe against truncation until we
5086 * unlock the page.
5088 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5090 struct page *page = vmf->page;
5091 struct inode *inode = fdentry(vma->vm_file)->d_inode;
5092 struct btrfs_root *root = BTRFS_I(inode)->root;
5093 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5094 struct btrfs_ordered_extent *ordered;
5095 char *kaddr;
5096 unsigned long zero_start;
5097 loff_t size;
5098 int ret;
5099 u64 page_start;
5100 u64 page_end;
5102 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
5103 if (ret) {
5104 if (ret == -ENOMEM)
5105 ret = VM_FAULT_OOM;
5106 else /* -ENOSPC, -EIO, etc */
5107 ret = VM_FAULT_SIGBUS;
5108 goto out;
5111 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
5112 if (ret) {
5113 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5114 ret = VM_FAULT_SIGBUS;
5115 goto out;
5118 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
5119 again:
5120 lock_page(page);
5121 size = i_size_read(inode);
5122 page_start = page_offset(page);
5123 page_end = page_start + PAGE_CACHE_SIZE - 1;
5125 if ((page->mapping != inode->i_mapping) ||
5126 (page_start >= size)) {
5127 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5128 /* page got truncated out from underneath us */
5129 goto out_unlock;
5131 wait_on_page_writeback(page);
5133 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
5134 set_page_extent_mapped(page);
5137 * we can't set the delalloc bits if there are pending ordered
5138 * extents. Drop our locks and wait for them to finish
5140 ordered = btrfs_lookup_ordered_extent(inode, page_start);
5141 if (ordered) {
5142 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5143 unlock_page(page);
5144 btrfs_start_ordered_extent(inode, ordered, 1);
5145 btrfs_put_ordered_extent(ordered);
5146 goto again;
5150 * XXX - page_mkwrite gets called every time the page is dirtied, even
5151 * if it was already dirty, so for space accounting reasons we need to
5152 * clear any delalloc bits for the range we are fixing to save. There
5153 * is probably a better way to do this, but for now keep consistent with
5154 * prepare_pages in the normal write path.
5156 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
5157 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
5158 GFP_NOFS);
5160 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
5161 if (ret) {
5162 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5163 ret = VM_FAULT_SIGBUS;
5164 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5165 goto out_unlock;
5167 ret = 0;
5169 /* page is wholly or partially inside EOF */
5170 if (page_start + PAGE_CACHE_SIZE > size)
5171 zero_start = size & ~PAGE_CACHE_MASK;
5172 else
5173 zero_start = PAGE_CACHE_SIZE;
5175 if (zero_start != PAGE_CACHE_SIZE) {
5176 kaddr = kmap(page);
5177 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
5178 flush_dcache_page(page);
5179 kunmap(page);
5181 ClearPageChecked(page);
5182 set_page_dirty(page);
5183 SetPageUptodate(page);
5185 BTRFS_I(inode)->last_trans = root->fs_info->generation;
5186 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
5188 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5190 out_unlock:
5191 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
5192 if (!ret)
5193 return VM_FAULT_LOCKED;
5194 unlock_page(page);
5195 out:
5196 return ret;
5199 static void btrfs_truncate(struct inode *inode)
5201 struct btrfs_root *root = BTRFS_I(inode)->root;
5202 int ret;
5203 struct btrfs_trans_handle *trans;
5204 unsigned long nr;
5205 u64 mask = root->sectorsize - 1;
5207 if (!S_ISREG(inode->i_mode)) {
5208 WARN_ON(1);
5209 return;
5212 ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
5213 if (ret)
5214 return;
5216 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
5217 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
5219 trans = btrfs_start_transaction(root, 1);
5220 btrfs_set_trans_block_group(trans, inode);
5223 * setattr is responsible for setting the ordered_data_close flag,
5224 * but that is only tested during the last file release. That
5225 * could happen well after the next commit, leaving a great big
5226 * window where new writes may get lost if someone chooses to write
5227 * to this file after truncating to zero
5229 * The inode doesn't have any dirty data here, and so if we commit
5230 * this is a noop. If someone immediately starts writing to the inode
5231 * it is very likely we'll catch some of their writes in this
5232 * transaction, and the commit will find this file on the ordered
5233 * data list with good things to send down.
5235 * This is a best effort solution, there is still a window where
5236 * using truncate to replace the contents of the file will
5237 * end up with a zero length file after a crash.
5239 if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
5240 btrfs_add_ordered_operation(trans, root, inode);
5242 while (1) {
5243 ret = btrfs_truncate_inode_items(trans, root, inode,
5244 inode->i_size,
5245 BTRFS_EXTENT_DATA_KEY);
5246 if (ret != -EAGAIN)
5247 break;
5249 ret = btrfs_update_inode(trans, root, inode);
5250 BUG_ON(ret);
5252 nr = trans->blocks_used;
5253 btrfs_end_transaction(trans, root);
5254 btrfs_btree_balance_dirty(root, nr);
5256 trans = btrfs_start_transaction(root, 1);
5257 btrfs_set_trans_block_group(trans, inode);
5260 if (ret == 0 && inode->i_nlink > 0) {
5261 ret = btrfs_orphan_del(trans, inode);
5262 BUG_ON(ret);
5265 ret = btrfs_update_inode(trans, root, inode);
5266 BUG_ON(ret);
5268 nr = trans->blocks_used;
5269 ret = btrfs_end_transaction_throttle(trans, root);
5270 BUG_ON(ret);
5271 btrfs_btree_balance_dirty(root, nr);
5275 * create a new subvolume directory/inode (helper for the ioctl).
5277 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
5278 struct btrfs_root *new_root,
5279 u64 new_dirid, u64 alloc_hint)
5281 struct inode *inode;
5282 int err;
5283 u64 index = 0;
5285 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
5286 new_dirid, alloc_hint, S_IFDIR | 0700, &index);
5287 if (IS_ERR(inode))
5288 return PTR_ERR(inode);
5289 inode->i_op = &btrfs_dir_inode_operations;
5290 inode->i_fop = &btrfs_dir_file_operations;
5292 inode->i_nlink = 1;
5293 btrfs_i_size_write(inode, 0);
5295 err = btrfs_update_inode(trans, new_root, inode);
5296 BUG_ON(err);
5298 iput(inode);
5299 return 0;
5302 /* helper function for file defrag and space balancing. This
5303 * forces readahead on a given range of bytes in an inode
5305 unsigned long btrfs_force_ra(struct address_space *mapping,
5306 struct file_ra_state *ra, struct file *file,
5307 pgoff_t offset, pgoff_t last_index)
5309 pgoff_t req_size = last_index - offset + 1;
5311 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
5312 return offset + req_size;
5315 struct inode *btrfs_alloc_inode(struct super_block *sb)
5317 struct btrfs_inode *ei;
5319 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
5320 if (!ei)
5321 return NULL;
5322 ei->last_trans = 0;
5323 ei->last_sub_trans = 0;
5324 ei->logged_trans = 0;
5325 ei->outstanding_extents = 0;
5326 ei->reserved_extents = 0;
5327 ei->root = NULL;
5328 spin_lock_init(&ei->accounting_lock);
5329 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
5330 INIT_LIST_HEAD(&ei->i_orphan);
5331 INIT_LIST_HEAD(&ei->ordered_operations);
5332 return &ei->vfs_inode;
5335 void btrfs_destroy_inode(struct inode *inode)
5337 struct btrfs_ordered_extent *ordered;
5338 struct btrfs_root *root = BTRFS_I(inode)->root;
5340 WARN_ON(!list_empty(&inode->i_dentry));
5341 WARN_ON(inode->i_data.nrpages);
5344 * This can happen where we create an inode, but somebody else also
5345 * created the same inode and we need to destroy the one we already
5346 * created.
5348 if (!root)
5349 goto free;
5352 * Make sure we're properly removed from the ordered operation
5353 * lists.
5355 smp_mb();
5356 if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
5357 spin_lock(&root->fs_info->ordered_extent_lock);
5358 list_del_init(&BTRFS_I(inode)->ordered_operations);
5359 spin_unlock(&root->fs_info->ordered_extent_lock);
5362 spin_lock(&root->list_lock);
5363 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
5364 printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
5365 inode->i_ino);
5366 list_del_init(&BTRFS_I(inode)->i_orphan);
5368 spin_unlock(&root->list_lock);
5370 while (1) {
5371 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
5372 if (!ordered)
5373 break;
5374 else {
5375 printk(KERN_ERR "btrfs found ordered "
5376 "extent %llu %llu on inode cleanup\n",
5377 (unsigned long long)ordered->file_offset,
5378 (unsigned long long)ordered->len);
5379 btrfs_remove_ordered_extent(inode, ordered);
5380 btrfs_put_ordered_extent(ordered);
5381 btrfs_put_ordered_extent(ordered);
5384 inode_tree_del(inode);
5385 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
5386 free:
5387 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
5390 void btrfs_drop_inode(struct inode *inode)
5392 struct btrfs_root *root = BTRFS_I(inode)->root;
5394 if (inode->i_nlink > 0 && btrfs_root_refs(&root->root_item) == 0)
5395 generic_delete_inode(inode);
5396 else
5397 generic_drop_inode(inode);
5400 static void init_once(void *foo)
5402 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
5404 inode_init_once(&ei->vfs_inode);
5407 void btrfs_destroy_cachep(void)
5409 if (btrfs_inode_cachep)
5410 kmem_cache_destroy(btrfs_inode_cachep);
5411 if (btrfs_trans_handle_cachep)
5412 kmem_cache_destroy(btrfs_trans_handle_cachep);
5413 if (btrfs_transaction_cachep)
5414 kmem_cache_destroy(btrfs_transaction_cachep);
5415 if (btrfs_path_cachep)
5416 kmem_cache_destroy(btrfs_path_cachep);
5419 int btrfs_init_cachep(void)
5421 btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
5422 sizeof(struct btrfs_inode), 0,
5423 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
5424 if (!btrfs_inode_cachep)
5425 goto fail;
5427 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
5428 sizeof(struct btrfs_trans_handle), 0,
5429 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5430 if (!btrfs_trans_handle_cachep)
5431 goto fail;
5433 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
5434 sizeof(struct btrfs_transaction), 0,
5435 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5436 if (!btrfs_transaction_cachep)
5437 goto fail;
5439 btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
5440 sizeof(struct btrfs_path), 0,
5441 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5442 if (!btrfs_path_cachep)
5443 goto fail;
5445 return 0;
5446 fail:
5447 btrfs_destroy_cachep();
5448 return -ENOMEM;
5451 static int btrfs_getattr(struct vfsmount *mnt,
5452 struct dentry *dentry, struct kstat *stat)
5454 struct inode *inode = dentry->d_inode;
5455 generic_fillattr(inode, stat);
5456 stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
5457 stat->blksize = PAGE_CACHE_SIZE;
5458 stat->blocks = (inode_get_bytes(inode) +
5459 BTRFS_I(inode)->delalloc_bytes) >> 9;
5460 return 0;
5463 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5464 struct inode *new_dir, struct dentry *new_dentry)
5466 struct btrfs_trans_handle *trans;
5467 struct btrfs_root *root = BTRFS_I(old_dir)->root;
5468 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
5469 struct inode *new_inode = new_dentry->d_inode;
5470 struct inode *old_inode = old_dentry->d_inode;
5471 struct timespec ctime = CURRENT_TIME;
5472 u64 index = 0;
5473 u64 root_objectid;
5474 int ret;
5476 if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5477 return -EPERM;
5479 /* we only allow rename subvolume link between subvolumes */
5480 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
5481 return -EXDEV;
5483 if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
5484 (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID))
5485 return -ENOTEMPTY;
5487 if (S_ISDIR(old_inode->i_mode) && new_inode &&
5488 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
5489 return -ENOTEMPTY;
5492 * We want to reserve the absolute worst case amount of items. So if
5493 * both inodes are subvols and we need to unlink them then that would
5494 * require 4 item modifications, but if they are both normal inodes it
5495 * would require 5 item modifications, so we'll assume their normal
5496 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
5497 * should cover the worst case number of items we'll modify.
5499 ret = btrfs_reserve_metadata_space(root, 11);
5500 if (ret)
5501 return ret;
5504 * we're using rename to replace one file with another.
5505 * and the replacement file is large. Start IO on it now so
5506 * we don't add too much work to the end of the transaction
5508 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
5509 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
5510 filemap_flush(old_inode->i_mapping);
5512 /* close the racy window with snapshot create/destroy ioctl */
5513 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5514 down_read(&root->fs_info->subvol_sem);
5516 trans = btrfs_start_transaction(root, 1);
5517 btrfs_set_trans_block_group(trans, new_dir);
5519 if (dest != root)
5520 btrfs_record_root_in_trans(trans, dest);
5522 ret = btrfs_set_inode_index(new_dir, &index);
5523 if (ret)
5524 goto out_fail;
5526 if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
5527 /* force full log commit if subvolume involved. */
5528 root->fs_info->last_trans_log_full_commit = trans->transid;
5529 } else {
5530 ret = btrfs_insert_inode_ref(trans, dest,
5531 new_dentry->d_name.name,
5532 new_dentry->d_name.len,
5533 old_inode->i_ino,
5534 new_dir->i_ino, index);
5535 if (ret)
5536 goto out_fail;
5538 * this is an ugly little race, but the rename is required
5539 * to make sure that if we crash, the inode is either at the
5540 * old name or the new one. pinning the log transaction lets
5541 * us make sure we don't allow a log commit to come in after
5542 * we unlink the name but before we add the new name back in.
5544 btrfs_pin_log_trans(root);
5547 * make sure the inode gets flushed if it is replacing
5548 * something.
5550 if (new_inode && new_inode->i_size &&
5551 old_inode && S_ISREG(old_inode->i_mode)) {
5552 btrfs_add_ordered_operation(trans, root, old_inode);
5555 old_dir->i_ctime = old_dir->i_mtime = ctime;
5556 new_dir->i_ctime = new_dir->i_mtime = ctime;
5557 old_inode->i_ctime = ctime;
5559 if (old_dentry->d_parent != new_dentry->d_parent)
5560 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
5562 if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
5563 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
5564 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
5565 old_dentry->d_name.name,
5566 old_dentry->d_name.len);
5567 } else {
5568 btrfs_inc_nlink(old_dentry->d_inode);
5569 ret = btrfs_unlink_inode(trans, root, old_dir,
5570 old_dentry->d_inode,
5571 old_dentry->d_name.name,
5572 old_dentry->d_name.len);
5574 BUG_ON(ret);
5576 if (new_inode) {
5577 new_inode->i_ctime = CURRENT_TIME;
5578 if (unlikely(new_inode->i_ino ==
5579 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
5580 root_objectid = BTRFS_I(new_inode)->location.objectid;
5581 ret = btrfs_unlink_subvol(trans, dest, new_dir,
5582 root_objectid,
5583 new_dentry->d_name.name,
5584 new_dentry->d_name.len);
5585 BUG_ON(new_inode->i_nlink == 0);
5586 } else {
5587 ret = btrfs_unlink_inode(trans, dest, new_dir,
5588 new_dentry->d_inode,
5589 new_dentry->d_name.name,
5590 new_dentry->d_name.len);
5592 BUG_ON(ret);
5593 if (new_inode->i_nlink == 0) {
5594 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
5595 BUG_ON(ret);
5599 ret = btrfs_add_link(trans, new_dir, old_inode,
5600 new_dentry->d_name.name,
5601 new_dentry->d_name.len, 0, index);
5602 BUG_ON(ret);
5604 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
5605 btrfs_log_new_name(trans, old_inode, old_dir,
5606 new_dentry->d_parent);
5607 btrfs_end_log_trans(root);
5609 out_fail:
5610 btrfs_end_transaction_throttle(trans, root);
5612 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5613 up_read(&root->fs_info->subvol_sem);
5615 btrfs_unreserve_metadata_space(root, 11);
5616 return ret;
5620 * some fairly slow code that needs optimization. This walks the list
5621 * of all the inodes with pending delalloc and forces them to disk.
5623 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
5625 struct list_head *head = &root->fs_info->delalloc_inodes;
5626 struct btrfs_inode *binode;
5627 struct inode *inode;
5629 if (root->fs_info->sb->s_flags & MS_RDONLY)
5630 return -EROFS;
5632 spin_lock(&root->fs_info->delalloc_lock);
5633 while (!list_empty(head)) {
5634 binode = list_entry(head->next, struct btrfs_inode,
5635 delalloc_inodes);
5636 inode = igrab(&binode->vfs_inode);
5637 if (!inode)
5638 list_del_init(&binode->delalloc_inodes);
5639 spin_unlock(&root->fs_info->delalloc_lock);
5640 if (inode) {
5641 filemap_flush(inode->i_mapping);
5642 if (delay_iput)
5643 btrfs_add_delayed_iput(inode);
5644 else
5645 iput(inode);
5647 cond_resched();
5648 spin_lock(&root->fs_info->delalloc_lock);
5650 spin_unlock(&root->fs_info->delalloc_lock);
5652 /* the filemap_flush will queue IO into the worker threads, but
5653 * we have to make sure the IO is actually started and that
5654 * ordered extents get created before we return
5656 atomic_inc(&root->fs_info->async_submit_draining);
5657 while (atomic_read(&root->fs_info->nr_async_submits) ||
5658 atomic_read(&root->fs_info->async_delalloc_pages)) {
5659 wait_event(root->fs_info->async_submit_wait,
5660 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
5661 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
5663 atomic_dec(&root->fs_info->async_submit_draining);
5664 return 0;
5667 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
5668 const char *symname)
5670 struct btrfs_trans_handle *trans;
5671 struct btrfs_root *root = BTRFS_I(dir)->root;
5672 struct btrfs_path *path;
5673 struct btrfs_key key;
5674 struct inode *inode = NULL;
5675 int err;
5676 int drop_inode = 0;
5677 u64 objectid;
5678 u64 index = 0 ;
5679 int name_len;
5680 int datasize;
5681 unsigned long ptr;
5682 struct btrfs_file_extent_item *ei;
5683 struct extent_buffer *leaf;
5684 unsigned long nr = 0;
5686 name_len = strlen(symname) + 1;
5687 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
5688 return -ENAMETOOLONG;
5691 * 2 items for inode item and ref
5692 * 2 items for dir items
5693 * 1 item for xattr if selinux is on
5695 err = btrfs_reserve_metadata_space(root, 5);
5696 if (err)
5697 return err;
5699 trans = btrfs_start_transaction(root, 1);
5700 if (!trans)
5701 goto out_fail;
5702 btrfs_set_trans_block_group(trans, dir);
5704 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
5705 if (err) {
5706 err = -ENOSPC;
5707 goto out_unlock;
5710 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5711 dentry->d_name.len,
5712 dentry->d_parent->d_inode->i_ino, objectid,
5713 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
5714 &index);
5715 err = PTR_ERR(inode);
5716 if (IS_ERR(inode))
5717 goto out_unlock;
5719 err = btrfs_init_inode_security(trans, inode, dir);
5720 if (err) {
5721 drop_inode = 1;
5722 goto out_unlock;
5725 btrfs_set_trans_block_group(trans, inode);
5726 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
5727 if (err)
5728 drop_inode = 1;
5729 else {
5730 inode->i_mapping->a_ops = &btrfs_aops;
5731 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5732 inode->i_fop = &btrfs_file_operations;
5733 inode->i_op = &btrfs_file_inode_operations;
5734 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
5736 btrfs_update_inode_block_group(trans, inode);
5737 btrfs_update_inode_block_group(trans, dir);
5738 if (drop_inode)
5739 goto out_unlock;
5741 path = btrfs_alloc_path();
5742 BUG_ON(!path);
5743 key.objectid = inode->i_ino;
5744 key.offset = 0;
5745 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
5746 datasize = btrfs_file_extent_calc_inline_size(name_len);
5747 err = btrfs_insert_empty_item(trans, root, path, &key,
5748 datasize);
5749 if (err) {
5750 drop_inode = 1;
5751 goto out_unlock;
5753 leaf = path->nodes[0];
5754 ei = btrfs_item_ptr(leaf, path->slots[0],
5755 struct btrfs_file_extent_item);
5756 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
5757 btrfs_set_file_extent_type(leaf, ei,
5758 BTRFS_FILE_EXTENT_INLINE);
5759 btrfs_set_file_extent_encryption(leaf, ei, 0);
5760 btrfs_set_file_extent_compression(leaf, ei, 0);
5761 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
5762 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
5764 ptr = btrfs_file_extent_inline_start(ei);
5765 write_extent_buffer(leaf, symname, ptr, name_len);
5766 btrfs_mark_buffer_dirty(leaf);
5767 btrfs_free_path(path);
5769 inode->i_op = &btrfs_symlink_inode_operations;
5770 inode->i_mapping->a_ops = &btrfs_symlink_aops;
5771 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5772 inode_set_bytes(inode, name_len);
5773 btrfs_i_size_write(inode, name_len - 1);
5774 err = btrfs_update_inode(trans, root, inode);
5775 if (err)
5776 drop_inode = 1;
5778 out_unlock:
5779 nr = trans->blocks_used;
5780 btrfs_end_transaction_throttle(trans, root);
5781 out_fail:
5782 btrfs_unreserve_metadata_space(root, 5);
5783 if (drop_inode) {
5784 inode_dec_link_count(inode);
5785 iput(inode);
5787 btrfs_btree_balance_dirty(root, nr);
5788 return err;
5791 static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
5792 u64 alloc_hint, int mode)
5794 struct btrfs_trans_handle *trans;
5795 struct btrfs_root *root = BTRFS_I(inode)->root;
5796 struct btrfs_key ins;
5797 u64 alloc_size;
5798 u64 cur_offset = start;
5799 u64 num_bytes = end - start;
5800 int ret = 0;
5802 while (num_bytes > 0) {
5803 alloc_size = min(num_bytes, root->fs_info->max_extent);
5805 trans = btrfs_start_transaction(root, 1);
5807 ret = btrfs_reserve_extent(trans, root, alloc_size,
5808 root->sectorsize, 0, alloc_hint,
5809 (u64)-1, &ins, 1);
5810 if (ret) {
5811 WARN_ON(1);
5812 goto stop_trans;
5815 ret = btrfs_reserve_metadata_space(root, 3);
5816 if (ret) {
5817 btrfs_free_reserved_extent(root, ins.objectid,
5818 ins.offset);
5819 goto stop_trans;
5822 ret = insert_reserved_file_extent(trans, inode,
5823 cur_offset, ins.objectid,
5824 ins.offset, ins.offset,
5825 ins.offset, 0, 0, 0,
5826 BTRFS_FILE_EXTENT_PREALLOC);
5827 BUG_ON(ret);
5828 btrfs_drop_extent_cache(inode, cur_offset,
5829 cur_offset + ins.offset -1, 0);
5831 num_bytes -= ins.offset;
5832 cur_offset += ins.offset;
5833 alloc_hint = ins.objectid + ins.offset;
5835 inode->i_ctime = CURRENT_TIME;
5836 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
5837 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
5838 cur_offset > inode->i_size) {
5839 i_size_write(inode, cur_offset);
5840 btrfs_ordered_update_i_size(inode, cur_offset, NULL);
5843 ret = btrfs_update_inode(trans, root, inode);
5844 BUG_ON(ret);
5846 btrfs_end_transaction(trans, root);
5847 btrfs_unreserve_metadata_space(root, 3);
5849 return ret;
5851 stop_trans:
5852 btrfs_end_transaction(trans, root);
5853 return ret;
5857 static long btrfs_fallocate(struct inode *inode, int mode,
5858 loff_t offset, loff_t len)
5860 u64 cur_offset;
5861 u64 last_byte;
5862 u64 alloc_start;
5863 u64 alloc_end;
5864 u64 alloc_hint = 0;
5865 u64 locked_end;
5866 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
5867 struct extent_map *em;
5868 int ret;
5870 alloc_start = offset & ~mask;
5871 alloc_end = (offset + len + mask) & ~mask;
5874 * wait for ordered IO before we have any locks. We'll loop again
5875 * below with the locks held.
5877 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
5879 mutex_lock(&inode->i_mutex);
5880 if (alloc_start > inode->i_size) {
5881 ret = btrfs_cont_expand(inode, alloc_start);
5882 if (ret)
5883 goto out;
5886 ret = btrfs_check_data_free_space(BTRFS_I(inode)->root, inode,
5887 alloc_end - alloc_start);
5888 if (ret)
5889 goto out;
5891 locked_end = alloc_end - 1;
5892 while (1) {
5893 struct btrfs_ordered_extent *ordered;
5895 /* the extent lock is ordered inside the running
5896 * transaction
5898 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5899 GFP_NOFS);
5900 ordered = btrfs_lookup_first_ordered_extent(inode,
5901 alloc_end - 1);
5902 if (ordered &&
5903 ordered->file_offset + ordered->len > alloc_start &&
5904 ordered->file_offset < alloc_end) {
5905 btrfs_put_ordered_extent(ordered);
5906 unlock_extent(&BTRFS_I(inode)->io_tree,
5907 alloc_start, locked_end, GFP_NOFS);
5909 * we can't wait on the range with the transaction
5910 * running or with the extent lock held
5912 btrfs_wait_ordered_range(inode, alloc_start,
5913 alloc_end - alloc_start);
5914 } else {
5915 if (ordered)
5916 btrfs_put_ordered_extent(ordered);
5917 break;
5921 cur_offset = alloc_start;
5922 while (1) {
5923 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
5924 alloc_end - cur_offset, 0);
5925 BUG_ON(IS_ERR(em) || !em);
5926 last_byte = min(extent_map_end(em), alloc_end);
5927 last_byte = (last_byte + mask) & ~mask;
5928 if (em->block_start == EXTENT_MAP_HOLE ||
5929 (cur_offset >= inode->i_size &&
5930 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
5931 ret = prealloc_file_range(inode,
5932 cur_offset, last_byte,
5933 alloc_hint, mode);
5934 if (ret < 0) {
5935 free_extent_map(em);
5936 break;
5939 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
5940 alloc_hint = em->block_start;
5941 free_extent_map(em);
5943 cur_offset = last_byte;
5944 if (cur_offset >= alloc_end) {
5945 ret = 0;
5946 break;
5949 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5950 GFP_NOFS);
5952 btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode,
5953 alloc_end - alloc_start);
5954 out:
5955 mutex_unlock(&inode->i_mutex);
5956 return ret;
5959 static int btrfs_set_page_dirty(struct page *page)
5961 return __set_page_dirty_nobuffers(page);
5964 static int btrfs_permission(struct inode *inode, int mask)
5966 if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
5967 return -EACCES;
5968 return generic_permission(inode, mask, btrfs_check_acl);
5971 static const struct inode_operations btrfs_dir_inode_operations = {
5972 .getattr = btrfs_getattr,
5973 .lookup = btrfs_lookup,
5974 .create = btrfs_create,
5975 .unlink = btrfs_unlink,
5976 .link = btrfs_link,
5977 .mkdir = btrfs_mkdir,
5978 .rmdir = btrfs_rmdir,
5979 .rename = btrfs_rename,
5980 .symlink = btrfs_symlink,
5981 .setattr = btrfs_setattr,
5982 .mknod = btrfs_mknod,
5983 .setxattr = btrfs_setxattr,
5984 .getxattr = btrfs_getxattr,
5985 .listxattr = btrfs_listxattr,
5986 .removexattr = btrfs_removexattr,
5987 .permission = btrfs_permission,
5989 static const struct inode_operations btrfs_dir_ro_inode_operations = {
5990 .lookup = btrfs_lookup,
5991 .permission = btrfs_permission,
5994 static const struct file_operations btrfs_dir_file_operations = {
5995 .llseek = generic_file_llseek,
5996 .read = generic_read_dir,
5997 .readdir = btrfs_real_readdir,
5998 .unlocked_ioctl = btrfs_ioctl,
5999 #ifdef CONFIG_COMPAT
6000 .compat_ioctl = btrfs_ioctl,
6001 #endif
6002 .release = btrfs_release_file,
6003 .fsync = btrfs_sync_file,
6006 static struct extent_io_ops btrfs_extent_io_ops = {
6007 .fill_delalloc = run_delalloc_range,
6008 .submit_bio_hook = btrfs_submit_bio_hook,
6009 .merge_bio_hook = btrfs_merge_bio_hook,
6010 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
6011 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
6012 .writepage_start_hook = btrfs_writepage_start_hook,
6013 .readpage_io_failed_hook = btrfs_io_failed_hook,
6014 .set_bit_hook = btrfs_set_bit_hook,
6015 .clear_bit_hook = btrfs_clear_bit_hook,
6016 .merge_extent_hook = btrfs_merge_extent_hook,
6017 .split_extent_hook = btrfs_split_extent_hook,
6021 * btrfs doesn't support the bmap operation because swapfiles
6022 * use bmap to make a mapping of extents in the file. They assume
6023 * these extents won't change over the life of the file and they
6024 * use the bmap result to do IO directly to the drive.
6026 * the btrfs bmap call would return logical addresses that aren't
6027 * suitable for IO and they also will change frequently as COW
6028 * operations happen. So, swapfile + btrfs == corruption.
6030 * For now we're avoiding this by dropping bmap.
6032 static const struct address_space_operations btrfs_aops = {
6033 .readpage = btrfs_readpage,
6034 .writepage = btrfs_writepage,
6035 .writepages = btrfs_writepages,
6036 .readpages = btrfs_readpages,
6037 .sync_page = block_sync_page,
6038 .direct_IO = btrfs_direct_IO,
6039 .invalidatepage = btrfs_invalidatepage,
6040 .releasepage = btrfs_releasepage,
6041 .set_page_dirty = btrfs_set_page_dirty,
6042 .error_remove_page = generic_error_remove_page,
6045 static const struct address_space_operations btrfs_symlink_aops = {
6046 .readpage = btrfs_readpage,
6047 .writepage = btrfs_writepage,
6048 .invalidatepage = btrfs_invalidatepage,
6049 .releasepage = btrfs_releasepage,
6052 static const struct inode_operations btrfs_file_inode_operations = {
6053 .truncate = btrfs_truncate,
6054 .getattr = btrfs_getattr,
6055 .setattr = btrfs_setattr,
6056 .setxattr = btrfs_setxattr,
6057 .getxattr = btrfs_getxattr,
6058 .listxattr = btrfs_listxattr,
6059 .removexattr = btrfs_removexattr,
6060 .permission = btrfs_permission,
6061 .fallocate = btrfs_fallocate,
6062 .fiemap = btrfs_fiemap,
6064 static const struct inode_operations btrfs_special_inode_operations = {
6065 .getattr = btrfs_getattr,
6066 .setattr = btrfs_setattr,
6067 .permission = btrfs_permission,
6068 .setxattr = btrfs_setxattr,
6069 .getxattr = btrfs_getxattr,
6070 .listxattr = btrfs_listxattr,
6071 .removexattr = btrfs_removexattr,
6073 static const struct inode_operations btrfs_symlink_inode_operations = {
6074 .readlink = generic_readlink,
6075 .follow_link = page_follow_link_light,
6076 .put_link = page_put_link,
6077 .permission = btrfs_permission,
6078 .setxattr = btrfs_setxattr,
6079 .getxattr = btrfs_getxattr,
6080 .listxattr = btrfs_listxattr,
6081 .removexattr = btrfs_removexattr,
6084 const struct dentry_operations btrfs_dentry_operations = {
6085 .d_delete = btrfs_dentry_delete,