btrfs: fix btrfs_mkdir goto for no free objectids
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / btrfs / inode.c
blob50ce8840a99b35f457168875628fd4b34de28815
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include "compat.h"
40 #include "ctree.h"
41 #include "disk-io.h"
42 #include "transaction.h"
43 #include "btrfs_inode.h"
44 #include "ioctl.h"
45 #include "print-tree.h"
46 #include "volumes.h"
47 #include "ordered-data.h"
48 #include "xattr.h"
49 #include "tree-log.h"
50 #include "compression.h"
51 #include "locking.h"
53 struct btrfs_iget_args {
54 u64 ino;
55 struct btrfs_root *root;
58 static const struct inode_operations btrfs_dir_inode_operations;
59 static const struct inode_operations btrfs_symlink_inode_operations;
60 static const struct inode_operations btrfs_dir_ro_inode_operations;
61 static const struct inode_operations btrfs_special_inode_operations;
62 static const struct inode_operations btrfs_file_inode_operations;
63 static const struct address_space_operations btrfs_aops;
64 static const struct address_space_operations btrfs_symlink_aops;
65 static const struct file_operations btrfs_dir_file_operations;
66 static struct extent_io_ops btrfs_extent_io_ops;
68 static struct kmem_cache *btrfs_inode_cachep;
69 struct kmem_cache *btrfs_trans_handle_cachep;
70 struct kmem_cache *btrfs_transaction_cachep;
71 struct kmem_cache *btrfs_path_cachep;
73 #define S_SHIFT 12
74 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
75 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
76 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
77 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
78 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
79 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
80 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
81 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
84 static void btrfs_truncate(struct inode *inode);
85 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
86 static noinline int cow_file_range(struct inode *inode,
87 struct page *locked_page,
88 u64 start, u64 end, int *page_started,
89 unsigned long *nr_written, int unlock);
91 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
92 struct inode *inode, struct inode *dir)
94 int err;
96 err = btrfs_init_acl(trans, inode, dir);
97 if (!err)
98 err = btrfs_xattr_security_init(trans, inode, dir);
99 return err;
103 * this does all the hard work for inserting an inline extent into
104 * the btree. The caller should have done a btrfs_drop_extents so that
105 * no overlapping inline items exist in the btree
107 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
108 struct btrfs_root *root, struct inode *inode,
109 u64 start, size_t size, size_t compressed_size,
110 struct page **compressed_pages)
112 struct btrfs_key key;
113 struct btrfs_path *path;
114 struct extent_buffer *leaf;
115 struct page *page = NULL;
116 char *kaddr;
117 unsigned long ptr;
118 struct btrfs_file_extent_item *ei;
119 int err = 0;
120 int ret;
121 size_t cur_size = size;
122 size_t datasize;
123 unsigned long offset;
124 int use_compress = 0;
126 if (compressed_size && compressed_pages) {
127 use_compress = 1;
128 cur_size = compressed_size;
131 path = btrfs_alloc_path();
132 if (!path)
133 return -ENOMEM;
135 path->leave_spinning = 1;
136 btrfs_set_trans_block_group(trans, inode);
138 key.objectid = inode->i_ino;
139 key.offset = start;
140 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
141 datasize = btrfs_file_extent_calc_inline_size(cur_size);
143 inode_add_bytes(inode, size);
144 ret = btrfs_insert_empty_item(trans, root, path, &key,
145 datasize);
146 BUG_ON(ret);
147 if (ret) {
148 err = ret;
149 goto fail;
151 leaf = path->nodes[0];
152 ei = btrfs_item_ptr(leaf, path->slots[0],
153 struct btrfs_file_extent_item);
154 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
155 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
156 btrfs_set_file_extent_encryption(leaf, ei, 0);
157 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
158 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
159 ptr = btrfs_file_extent_inline_start(ei);
161 if (use_compress) {
162 struct page *cpage;
163 int i = 0;
164 while (compressed_size > 0) {
165 cpage = compressed_pages[i];
166 cur_size = min_t(unsigned long, compressed_size,
167 PAGE_CACHE_SIZE);
169 kaddr = kmap_atomic(cpage, KM_USER0);
170 write_extent_buffer(leaf, kaddr, ptr, cur_size);
171 kunmap_atomic(kaddr, KM_USER0);
173 i++;
174 ptr += cur_size;
175 compressed_size -= cur_size;
177 btrfs_set_file_extent_compression(leaf, ei,
178 BTRFS_COMPRESS_ZLIB);
179 } else {
180 page = find_get_page(inode->i_mapping,
181 start >> PAGE_CACHE_SHIFT);
182 btrfs_set_file_extent_compression(leaf, ei, 0);
183 kaddr = kmap_atomic(page, KM_USER0);
184 offset = start & (PAGE_CACHE_SIZE - 1);
185 write_extent_buffer(leaf, kaddr + offset, ptr, size);
186 kunmap_atomic(kaddr, KM_USER0);
187 page_cache_release(page);
189 btrfs_mark_buffer_dirty(leaf);
190 btrfs_free_path(path);
193 * we're an inline extent, so nobody can
194 * extend the file past i_size without locking
195 * a page we already have locked.
197 * We must do any isize and inode updates
198 * before we unlock the pages. Otherwise we
199 * could end up racing with unlink.
201 BTRFS_I(inode)->disk_i_size = inode->i_size;
202 btrfs_update_inode(trans, root, inode);
204 return 0;
205 fail:
206 btrfs_free_path(path);
207 return err;
212 * conditionally insert an inline extent into the file. This
213 * does the checks required to make sure the data is small enough
214 * to fit as an inline extent.
216 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
217 struct btrfs_root *root,
218 struct inode *inode, u64 start, u64 end,
219 size_t compressed_size,
220 struct page **compressed_pages)
222 u64 isize = i_size_read(inode);
223 u64 actual_end = min(end + 1, isize);
224 u64 inline_len = actual_end - start;
225 u64 aligned_end = (end + root->sectorsize - 1) &
226 ~((u64)root->sectorsize - 1);
227 u64 hint_byte;
228 u64 data_len = inline_len;
229 int ret;
231 if (compressed_size)
232 data_len = compressed_size;
234 if (start > 0 ||
235 actual_end >= PAGE_CACHE_SIZE ||
236 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
237 (!compressed_size &&
238 (actual_end & (root->sectorsize - 1)) == 0) ||
239 end + 1 < isize ||
240 data_len > root->fs_info->max_inline) {
241 return 1;
244 ret = btrfs_drop_extents(trans, inode, start, aligned_end,
245 &hint_byte, 1);
246 BUG_ON(ret);
248 if (isize > actual_end)
249 inline_len = min_t(u64, isize, actual_end);
250 ret = insert_inline_extent(trans, root, inode, start,
251 inline_len, compressed_size,
252 compressed_pages);
253 BUG_ON(ret);
254 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
255 return 0;
258 struct async_extent {
259 u64 start;
260 u64 ram_size;
261 u64 compressed_size;
262 struct page **pages;
263 unsigned long nr_pages;
264 struct list_head list;
267 struct async_cow {
268 struct inode *inode;
269 struct btrfs_root *root;
270 struct page *locked_page;
271 u64 start;
272 u64 end;
273 struct list_head extents;
274 struct btrfs_work work;
277 static noinline int add_async_extent(struct async_cow *cow,
278 u64 start, u64 ram_size,
279 u64 compressed_size,
280 struct page **pages,
281 unsigned long nr_pages)
283 struct async_extent *async_extent;
285 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
286 async_extent->start = start;
287 async_extent->ram_size = ram_size;
288 async_extent->compressed_size = compressed_size;
289 async_extent->pages = pages;
290 async_extent->nr_pages = nr_pages;
291 list_add_tail(&async_extent->list, &cow->extents);
292 return 0;
296 * we create compressed extents in two phases. The first
297 * phase compresses a range of pages that have already been
298 * locked (both pages and state bits are locked).
300 * This is done inside an ordered work queue, and the compression
301 * is spread across many cpus. The actual IO submission is step
302 * two, and the ordered work queue takes care of making sure that
303 * happens in the same order things were put onto the queue by
304 * writepages and friends.
306 * If this code finds it can't get good compression, it puts an
307 * entry onto the work queue to write the uncompressed bytes. This
308 * makes sure that both compressed inodes and uncompressed inodes
309 * are written in the same order that pdflush sent them down.
311 static noinline int compress_file_range(struct inode *inode,
312 struct page *locked_page,
313 u64 start, u64 end,
314 struct async_cow *async_cow,
315 int *num_added)
317 struct btrfs_root *root = BTRFS_I(inode)->root;
318 struct btrfs_trans_handle *trans;
319 u64 num_bytes;
320 u64 orig_start;
321 u64 disk_num_bytes;
322 u64 blocksize = root->sectorsize;
323 u64 actual_end;
324 u64 isize = i_size_read(inode);
325 int ret = 0;
326 struct page **pages = NULL;
327 unsigned long nr_pages;
328 unsigned long nr_pages_ret = 0;
329 unsigned long total_compressed = 0;
330 unsigned long total_in = 0;
331 unsigned long max_compressed = 128 * 1024;
332 unsigned long max_uncompressed = 128 * 1024;
333 int i;
334 int will_compress;
336 orig_start = start;
338 actual_end = min_t(u64, isize, end + 1);
339 again:
340 will_compress = 0;
341 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
342 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
345 * we don't want to send crud past the end of i_size through
346 * compression, that's just a waste of CPU time. So, if the
347 * end of the file is before the start of our current
348 * requested range of bytes, we bail out to the uncompressed
349 * cleanup code that can deal with all of this.
351 * It isn't really the fastest way to fix things, but this is a
352 * very uncommon corner.
354 if (actual_end <= start)
355 goto cleanup_and_bail_uncompressed;
357 total_compressed = actual_end - start;
359 /* we want to make sure that amount of ram required to uncompress
360 * an extent is reasonable, so we limit the total size in ram
361 * of a compressed extent to 128k. This is a crucial number
362 * because it also controls how easily we can spread reads across
363 * cpus for decompression.
365 * We also want to make sure the amount of IO required to do
366 * a random read is reasonably small, so we limit the size of
367 * a compressed extent to 128k.
369 total_compressed = min(total_compressed, max_uncompressed);
370 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
371 num_bytes = max(blocksize, num_bytes);
372 disk_num_bytes = num_bytes;
373 total_in = 0;
374 ret = 0;
377 * we do compression for mount -o compress and when the
378 * inode has not been flagged as nocompress. This flag can
379 * change at any time if we discover bad compression ratios.
381 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
382 (btrfs_test_opt(root, COMPRESS) ||
383 (BTRFS_I(inode)->force_compress))) {
384 WARN_ON(pages);
385 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
387 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
388 total_compressed, pages,
389 nr_pages, &nr_pages_ret,
390 &total_in,
391 &total_compressed,
392 max_compressed);
394 if (!ret) {
395 unsigned long offset = total_compressed &
396 (PAGE_CACHE_SIZE - 1);
397 struct page *page = pages[nr_pages_ret - 1];
398 char *kaddr;
400 /* zero the tail end of the last page, we might be
401 * sending it down to disk
403 if (offset) {
404 kaddr = kmap_atomic(page, KM_USER0);
405 memset(kaddr + offset, 0,
406 PAGE_CACHE_SIZE - offset);
407 kunmap_atomic(kaddr, KM_USER0);
409 will_compress = 1;
412 if (start == 0) {
413 trans = btrfs_join_transaction(root, 1);
414 BUG_ON(!trans);
415 btrfs_set_trans_block_group(trans, inode);
417 /* lets try to make an inline extent */
418 if (ret || total_in < (actual_end - start)) {
419 /* we didn't compress the entire range, try
420 * to make an uncompressed inline extent.
422 ret = cow_file_range_inline(trans, root, inode,
423 start, end, 0, NULL);
424 } else {
425 /* try making a compressed inline extent */
426 ret = cow_file_range_inline(trans, root, inode,
427 start, end,
428 total_compressed, pages);
430 if (ret == 0) {
432 * inline extent creation worked, we don't need
433 * to create any more async work items. Unlock
434 * and free up our temp pages.
436 extent_clear_unlock_delalloc(inode,
437 &BTRFS_I(inode)->io_tree,
438 start, end, NULL,
439 EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
440 EXTENT_CLEAR_DELALLOC |
441 EXTENT_CLEAR_ACCOUNTING |
442 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
444 btrfs_end_transaction(trans, root);
445 goto free_pages_out;
447 btrfs_end_transaction(trans, root);
450 if (will_compress) {
452 * we aren't doing an inline extent round the compressed size
453 * up to a block size boundary so the allocator does sane
454 * things
456 total_compressed = (total_compressed + blocksize - 1) &
457 ~(blocksize - 1);
460 * one last check to make sure the compression is really a
461 * win, compare the page count read with the blocks on disk
463 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
464 ~(PAGE_CACHE_SIZE - 1);
465 if (total_compressed >= total_in) {
466 will_compress = 0;
467 } else {
468 disk_num_bytes = total_compressed;
469 num_bytes = total_in;
472 if (!will_compress && pages) {
474 * the compression code ran but failed to make things smaller,
475 * free any pages it allocated and our page pointer array
477 for (i = 0; i < nr_pages_ret; i++) {
478 WARN_ON(pages[i]->mapping);
479 page_cache_release(pages[i]);
481 kfree(pages);
482 pages = NULL;
483 total_compressed = 0;
484 nr_pages_ret = 0;
486 /* flag the file so we don't compress in the future */
487 if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
488 !(BTRFS_I(inode)->force_compress)) {
489 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
492 if (will_compress) {
493 *num_added += 1;
495 /* the async work queues will take care of doing actual
496 * allocation on disk for these compressed pages,
497 * and will submit them to the elevator.
499 add_async_extent(async_cow, start, num_bytes,
500 total_compressed, pages, nr_pages_ret);
502 if (start + num_bytes < end && start + num_bytes < actual_end) {
503 start += num_bytes;
504 pages = NULL;
505 cond_resched();
506 goto again;
508 } else {
509 cleanup_and_bail_uncompressed:
511 * No compression, but we still need to write the pages in
512 * the file we've been given so far. redirty the locked
513 * page if it corresponds to our extent and set things up
514 * for the async work queue to run cow_file_range to do
515 * the normal delalloc dance
517 if (page_offset(locked_page) >= start &&
518 page_offset(locked_page) <= end) {
519 __set_page_dirty_nobuffers(locked_page);
520 /* unlocked later on in the async handlers */
522 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
523 *num_added += 1;
526 out:
527 return 0;
529 free_pages_out:
530 for (i = 0; i < nr_pages_ret; i++) {
531 WARN_ON(pages[i]->mapping);
532 page_cache_release(pages[i]);
534 kfree(pages);
536 goto out;
540 * phase two of compressed writeback. This is the ordered portion
541 * of the code, which only gets called in the order the work was
542 * queued. We walk all the async extents created by compress_file_range
543 * and send them down to the disk.
545 static noinline int submit_compressed_extents(struct inode *inode,
546 struct async_cow *async_cow)
548 struct async_extent *async_extent;
549 u64 alloc_hint = 0;
550 struct btrfs_trans_handle *trans;
551 struct btrfs_key ins;
552 struct extent_map *em;
553 struct btrfs_root *root = BTRFS_I(inode)->root;
554 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
555 struct extent_io_tree *io_tree;
556 int ret = 0;
558 if (list_empty(&async_cow->extents))
559 return 0;
562 while (!list_empty(&async_cow->extents)) {
563 async_extent = list_entry(async_cow->extents.next,
564 struct async_extent, list);
565 list_del(&async_extent->list);
567 io_tree = &BTRFS_I(inode)->io_tree;
569 retry:
570 /* did the compression code fall back to uncompressed IO? */
571 if (!async_extent->pages) {
572 int page_started = 0;
573 unsigned long nr_written = 0;
575 lock_extent(io_tree, async_extent->start,
576 async_extent->start +
577 async_extent->ram_size - 1, GFP_NOFS);
579 /* allocate blocks */
580 ret = cow_file_range(inode, async_cow->locked_page,
581 async_extent->start,
582 async_extent->start +
583 async_extent->ram_size - 1,
584 &page_started, &nr_written, 0);
587 * if page_started, cow_file_range inserted an
588 * inline extent and took care of all the unlocking
589 * and IO for us. Otherwise, we need to submit
590 * all those pages down to the drive.
592 if (!page_started && !ret)
593 extent_write_locked_range(io_tree,
594 inode, async_extent->start,
595 async_extent->start +
596 async_extent->ram_size - 1,
597 btrfs_get_extent,
598 WB_SYNC_ALL);
599 kfree(async_extent);
600 cond_resched();
601 continue;
604 lock_extent(io_tree, async_extent->start,
605 async_extent->start + async_extent->ram_size - 1,
606 GFP_NOFS);
608 trans = btrfs_join_transaction(root, 1);
609 ret = btrfs_reserve_extent(trans, root,
610 async_extent->compressed_size,
611 async_extent->compressed_size,
612 0, alloc_hint,
613 (u64)-1, &ins, 1);
614 btrfs_end_transaction(trans, root);
616 if (ret) {
617 int i;
618 for (i = 0; i < async_extent->nr_pages; i++) {
619 WARN_ON(async_extent->pages[i]->mapping);
620 page_cache_release(async_extent->pages[i]);
622 kfree(async_extent->pages);
623 async_extent->nr_pages = 0;
624 async_extent->pages = NULL;
625 unlock_extent(io_tree, async_extent->start,
626 async_extent->start +
627 async_extent->ram_size - 1, GFP_NOFS);
628 goto retry;
632 * here we're doing allocation and writeback of the
633 * compressed pages
635 btrfs_drop_extent_cache(inode, async_extent->start,
636 async_extent->start +
637 async_extent->ram_size - 1, 0);
639 em = alloc_extent_map(GFP_NOFS);
640 em->start = async_extent->start;
641 em->len = async_extent->ram_size;
642 em->orig_start = em->start;
644 em->block_start = ins.objectid;
645 em->block_len = ins.offset;
646 em->bdev = root->fs_info->fs_devices->latest_bdev;
647 set_bit(EXTENT_FLAG_PINNED, &em->flags);
648 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
650 while (1) {
651 write_lock(&em_tree->lock);
652 ret = add_extent_mapping(em_tree, em);
653 write_unlock(&em_tree->lock);
654 if (ret != -EEXIST) {
655 free_extent_map(em);
656 break;
658 btrfs_drop_extent_cache(inode, async_extent->start,
659 async_extent->start +
660 async_extent->ram_size - 1, 0);
663 ret = btrfs_add_ordered_extent(inode, async_extent->start,
664 ins.objectid,
665 async_extent->ram_size,
666 ins.offset,
667 BTRFS_ORDERED_COMPRESSED);
668 BUG_ON(ret);
671 * clear dirty, set writeback and unlock the pages.
673 extent_clear_unlock_delalloc(inode,
674 &BTRFS_I(inode)->io_tree,
675 async_extent->start,
676 async_extent->start +
677 async_extent->ram_size - 1,
678 NULL, EXTENT_CLEAR_UNLOCK_PAGE |
679 EXTENT_CLEAR_UNLOCK |
680 EXTENT_CLEAR_DELALLOC |
681 EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
683 ret = btrfs_submit_compressed_write(inode,
684 async_extent->start,
685 async_extent->ram_size,
686 ins.objectid,
687 ins.offset, async_extent->pages,
688 async_extent->nr_pages);
690 BUG_ON(ret);
691 alloc_hint = ins.objectid + ins.offset;
692 kfree(async_extent);
693 cond_resched();
696 return 0;
700 * when extent_io.c finds a delayed allocation range in the file,
701 * the call backs end up in this code. The basic idea is to
702 * allocate extents on disk for the range, and create ordered data structs
703 * in ram to track those extents.
705 * locked_page is the page that writepage had locked already. We use
706 * it to make sure we don't do extra locks or unlocks.
708 * *page_started is set to one if we unlock locked_page and do everything
709 * required to start IO on it. It may be clean and already done with
710 * IO when we return.
712 static noinline int cow_file_range(struct inode *inode,
713 struct page *locked_page,
714 u64 start, u64 end, int *page_started,
715 unsigned long *nr_written,
716 int unlock)
718 struct btrfs_root *root = BTRFS_I(inode)->root;
719 struct btrfs_trans_handle *trans;
720 u64 alloc_hint = 0;
721 u64 num_bytes;
722 unsigned long ram_size;
723 u64 disk_num_bytes;
724 u64 cur_alloc_size;
725 u64 blocksize = root->sectorsize;
726 u64 actual_end;
727 u64 isize = i_size_read(inode);
728 struct btrfs_key ins;
729 struct extent_map *em;
730 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
731 int ret = 0;
733 trans = btrfs_join_transaction(root, 1);
734 BUG_ON(!trans);
735 btrfs_set_trans_block_group(trans, inode);
737 actual_end = min_t(u64, isize, end + 1);
739 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
740 num_bytes = max(blocksize, num_bytes);
741 disk_num_bytes = num_bytes;
742 ret = 0;
744 if (start == 0) {
745 /* lets try to make an inline extent */
746 ret = cow_file_range_inline(trans, root, inode,
747 start, end, 0, NULL);
748 if (ret == 0) {
749 extent_clear_unlock_delalloc(inode,
750 &BTRFS_I(inode)->io_tree,
751 start, end, NULL,
752 EXTENT_CLEAR_UNLOCK_PAGE |
753 EXTENT_CLEAR_UNLOCK |
754 EXTENT_CLEAR_DELALLOC |
755 EXTENT_CLEAR_ACCOUNTING |
756 EXTENT_CLEAR_DIRTY |
757 EXTENT_SET_WRITEBACK |
758 EXTENT_END_WRITEBACK);
760 *nr_written = *nr_written +
761 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
762 *page_started = 1;
763 ret = 0;
764 goto out;
768 BUG_ON(disk_num_bytes >
769 btrfs_super_total_bytes(&root->fs_info->super_copy));
772 read_lock(&BTRFS_I(inode)->extent_tree.lock);
773 em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
774 start, num_bytes);
775 if (em) {
777 * if block start isn't an actual block number then find the
778 * first block in this inode and use that as a hint. If that
779 * block is also bogus then just don't worry about it.
781 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
782 free_extent_map(em);
783 em = search_extent_mapping(em_tree, 0, 0);
784 if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
785 alloc_hint = em->block_start;
786 if (em)
787 free_extent_map(em);
788 } else {
789 alloc_hint = em->block_start;
790 free_extent_map(em);
793 read_unlock(&BTRFS_I(inode)->extent_tree.lock);
794 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
796 while (disk_num_bytes > 0) {
797 unsigned long op;
799 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
800 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
801 root->sectorsize, 0, alloc_hint,
802 (u64)-1, &ins, 1);
803 BUG_ON(ret);
805 em = alloc_extent_map(GFP_NOFS);
806 em->start = start;
807 em->orig_start = em->start;
808 ram_size = ins.offset;
809 em->len = ins.offset;
811 em->block_start = ins.objectid;
812 em->block_len = ins.offset;
813 em->bdev = root->fs_info->fs_devices->latest_bdev;
814 set_bit(EXTENT_FLAG_PINNED, &em->flags);
816 while (1) {
817 write_lock(&em_tree->lock);
818 ret = add_extent_mapping(em_tree, em);
819 write_unlock(&em_tree->lock);
820 if (ret != -EEXIST) {
821 free_extent_map(em);
822 break;
824 btrfs_drop_extent_cache(inode, start,
825 start + ram_size - 1, 0);
828 cur_alloc_size = ins.offset;
829 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
830 ram_size, cur_alloc_size, 0);
831 BUG_ON(ret);
833 if (root->root_key.objectid ==
834 BTRFS_DATA_RELOC_TREE_OBJECTID) {
835 ret = btrfs_reloc_clone_csums(inode, start,
836 cur_alloc_size);
837 BUG_ON(ret);
840 if (disk_num_bytes < cur_alloc_size)
841 break;
843 /* we're not doing compressed IO, don't unlock the first
844 * page (which the caller expects to stay locked), don't
845 * clear any dirty bits and don't set any writeback bits
847 * Do set the Private2 bit so we know this page was properly
848 * setup for writepage
850 op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
851 op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
852 EXTENT_SET_PRIVATE2;
854 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
855 start, start + ram_size - 1,
856 locked_page, op);
857 disk_num_bytes -= cur_alloc_size;
858 num_bytes -= cur_alloc_size;
859 alloc_hint = ins.objectid + ins.offset;
860 start += cur_alloc_size;
862 out:
863 ret = 0;
864 btrfs_end_transaction(trans, root);
866 return ret;
870 * work queue call back to started compression on a file and pages
872 static noinline void async_cow_start(struct btrfs_work *work)
874 struct async_cow *async_cow;
875 int num_added = 0;
876 async_cow = container_of(work, struct async_cow, work);
878 compress_file_range(async_cow->inode, async_cow->locked_page,
879 async_cow->start, async_cow->end, async_cow,
880 &num_added);
881 if (num_added == 0)
882 async_cow->inode = NULL;
886 * work queue call back to submit previously compressed pages
888 static noinline void async_cow_submit(struct btrfs_work *work)
890 struct async_cow *async_cow;
891 struct btrfs_root *root;
892 unsigned long nr_pages;
894 async_cow = container_of(work, struct async_cow, work);
896 root = async_cow->root;
897 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
898 PAGE_CACHE_SHIFT;
900 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
902 if (atomic_read(&root->fs_info->async_delalloc_pages) <
903 5 * 1042 * 1024 &&
904 waitqueue_active(&root->fs_info->async_submit_wait))
905 wake_up(&root->fs_info->async_submit_wait);
907 if (async_cow->inode)
908 submit_compressed_extents(async_cow->inode, async_cow);
911 static noinline void async_cow_free(struct btrfs_work *work)
913 struct async_cow *async_cow;
914 async_cow = container_of(work, struct async_cow, work);
915 kfree(async_cow);
918 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
919 u64 start, u64 end, int *page_started,
920 unsigned long *nr_written)
922 struct async_cow *async_cow;
923 struct btrfs_root *root = BTRFS_I(inode)->root;
924 unsigned long nr_pages;
925 u64 cur_end;
926 int limit = 10 * 1024 * 1042;
928 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
929 1, 0, NULL, GFP_NOFS);
930 while (start < end) {
931 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
932 async_cow->inode = inode;
933 async_cow->root = root;
934 async_cow->locked_page = locked_page;
935 async_cow->start = start;
937 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
938 cur_end = end;
939 else
940 cur_end = min(end, start + 512 * 1024 - 1);
942 async_cow->end = cur_end;
943 INIT_LIST_HEAD(&async_cow->extents);
945 async_cow->work.func = async_cow_start;
946 async_cow->work.ordered_func = async_cow_submit;
947 async_cow->work.ordered_free = async_cow_free;
948 async_cow->work.flags = 0;
950 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
951 PAGE_CACHE_SHIFT;
952 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
954 btrfs_queue_worker(&root->fs_info->delalloc_workers,
955 &async_cow->work);
957 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
958 wait_event(root->fs_info->async_submit_wait,
959 (atomic_read(&root->fs_info->async_delalloc_pages) <
960 limit));
963 while (atomic_read(&root->fs_info->async_submit_draining) &&
964 atomic_read(&root->fs_info->async_delalloc_pages)) {
965 wait_event(root->fs_info->async_submit_wait,
966 (atomic_read(&root->fs_info->async_delalloc_pages) ==
967 0));
970 *nr_written += nr_pages;
971 start = cur_end + 1;
973 *page_started = 1;
974 return 0;
977 static noinline int csum_exist_in_range(struct btrfs_root *root,
978 u64 bytenr, u64 num_bytes)
980 int ret;
981 struct btrfs_ordered_sum *sums;
982 LIST_HEAD(list);
984 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
985 bytenr + num_bytes - 1, &list);
986 if (ret == 0 && list_empty(&list))
987 return 0;
989 while (!list_empty(&list)) {
990 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
991 list_del(&sums->list);
992 kfree(sums);
994 return 1;
998 * when nowcow writeback call back. This checks for snapshots or COW copies
999 * of the extents that exist in the file, and COWs the file as required.
1001 * If no cow copies or snapshots exist, we write directly to the existing
1002 * blocks on disk
1004 static noinline int run_delalloc_nocow(struct inode *inode,
1005 struct page *locked_page,
1006 u64 start, u64 end, int *page_started, int force,
1007 unsigned long *nr_written)
1009 struct btrfs_root *root = BTRFS_I(inode)->root;
1010 struct btrfs_trans_handle *trans;
1011 struct extent_buffer *leaf;
1012 struct btrfs_path *path;
1013 struct btrfs_file_extent_item *fi;
1014 struct btrfs_key found_key;
1015 u64 cow_start;
1016 u64 cur_offset;
1017 u64 extent_end;
1018 u64 extent_offset;
1019 u64 disk_bytenr;
1020 u64 num_bytes;
1021 int extent_type;
1022 int ret;
1023 int type;
1024 int nocow;
1025 int check_prev = 1;
1027 path = btrfs_alloc_path();
1028 BUG_ON(!path);
1029 trans = btrfs_join_transaction(root, 1);
1030 BUG_ON(!trans);
1032 cow_start = (u64)-1;
1033 cur_offset = start;
1034 while (1) {
1035 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
1036 cur_offset, 0);
1037 BUG_ON(ret < 0);
1038 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1039 leaf = path->nodes[0];
1040 btrfs_item_key_to_cpu(leaf, &found_key,
1041 path->slots[0] - 1);
1042 if (found_key.objectid == inode->i_ino &&
1043 found_key.type == BTRFS_EXTENT_DATA_KEY)
1044 path->slots[0]--;
1046 check_prev = 0;
1047 next_slot:
1048 leaf = path->nodes[0];
1049 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1050 ret = btrfs_next_leaf(root, path);
1051 if (ret < 0)
1052 BUG_ON(1);
1053 if (ret > 0)
1054 break;
1055 leaf = path->nodes[0];
1058 nocow = 0;
1059 disk_bytenr = 0;
1060 num_bytes = 0;
1061 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1063 if (found_key.objectid > inode->i_ino ||
1064 found_key.type > BTRFS_EXTENT_DATA_KEY ||
1065 found_key.offset > end)
1066 break;
1068 if (found_key.offset > cur_offset) {
1069 extent_end = found_key.offset;
1070 extent_type = 0;
1071 goto out_check;
1074 fi = btrfs_item_ptr(leaf, path->slots[0],
1075 struct btrfs_file_extent_item);
1076 extent_type = btrfs_file_extent_type(leaf, fi);
1078 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1079 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1080 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1081 extent_offset = btrfs_file_extent_offset(leaf, fi);
1082 extent_end = found_key.offset +
1083 btrfs_file_extent_num_bytes(leaf, fi);
1084 if (extent_end <= start) {
1085 path->slots[0]++;
1086 goto next_slot;
1088 if (disk_bytenr == 0)
1089 goto out_check;
1090 if (btrfs_file_extent_compression(leaf, fi) ||
1091 btrfs_file_extent_encryption(leaf, fi) ||
1092 btrfs_file_extent_other_encoding(leaf, fi))
1093 goto out_check;
1094 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1095 goto out_check;
1096 if (btrfs_extent_readonly(root, disk_bytenr))
1097 goto out_check;
1098 if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1099 found_key.offset -
1100 extent_offset, disk_bytenr))
1101 goto out_check;
1102 disk_bytenr += extent_offset;
1103 disk_bytenr += cur_offset - found_key.offset;
1104 num_bytes = min(end + 1, extent_end) - cur_offset;
1106 * force cow if csum exists in the range.
1107 * this ensure that csum for a given extent are
1108 * either valid or do not exist.
1110 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1111 goto out_check;
1112 nocow = 1;
1113 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1114 extent_end = found_key.offset +
1115 btrfs_file_extent_inline_len(leaf, fi);
1116 extent_end = ALIGN(extent_end, root->sectorsize);
1117 } else {
1118 BUG_ON(1);
1120 out_check:
1121 if (extent_end <= start) {
1122 path->slots[0]++;
1123 goto next_slot;
1125 if (!nocow) {
1126 if (cow_start == (u64)-1)
1127 cow_start = cur_offset;
1128 cur_offset = extent_end;
1129 if (cur_offset > end)
1130 break;
1131 path->slots[0]++;
1132 goto next_slot;
1135 btrfs_release_path(root, path);
1136 if (cow_start != (u64)-1) {
1137 ret = cow_file_range(inode, locked_page, cow_start,
1138 found_key.offset - 1, page_started,
1139 nr_written, 1);
1140 BUG_ON(ret);
1141 cow_start = (u64)-1;
1144 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1145 struct extent_map *em;
1146 struct extent_map_tree *em_tree;
1147 em_tree = &BTRFS_I(inode)->extent_tree;
1148 em = alloc_extent_map(GFP_NOFS);
1149 em->start = cur_offset;
1150 em->orig_start = em->start;
1151 em->len = num_bytes;
1152 em->block_len = num_bytes;
1153 em->block_start = disk_bytenr;
1154 em->bdev = root->fs_info->fs_devices->latest_bdev;
1155 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1156 while (1) {
1157 write_lock(&em_tree->lock);
1158 ret = add_extent_mapping(em_tree, em);
1159 write_unlock(&em_tree->lock);
1160 if (ret != -EEXIST) {
1161 free_extent_map(em);
1162 break;
1164 btrfs_drop_extent_cache(inode, em->start,
1165 em->start + em->len - 1, 0);
1167 type = BTRFS_ORDERED_PREALLOC;
1168 } else {
1169 type = BTRFS_ORDERED_NOCOW;
1172 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1173 num_bytes, num_bytes, type);
1174 BUG_ON(ret);
1176 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1177 cur_offset, cur_offset + num_bytes - 1,
1178 locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1179 EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1180 EXTENT_SET_PRIVATE2);
1181 cur_offset = extent_end;
1182 if (cur_offset > end)
1183 break;
1185 btrfs_release_path(root, path);
1187 if (cur_offset <= end && cow_start == (u64)-1)
1188 cow_start = cur_offset;
1189 if (cow_start != (u64)-1) {
1190 ret = cow_file_range(inode, locked_page, cow_start, end,
1191 page_started, nr_written, 1);
1192 BUG_ON(ret);
1195 ret = btrfs_end_transaction(trans, root);
1196 BUG_ON(ret);
1197 btrfs_free_path(path);
1198 return 0;
1202 * extent_io.c call back to do delayed allocation processing
1204 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1205 u64 start, u64 end, int *page_started,
1206 unsigned long *nr_written)
1208 int ret;
1209 struct btrfs_root *root = BTRFS_I(inode)->root;
1211 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1212 ret = run_delalloc_nocow(inode, locked_page, start, end,
1213 page_started, 1, nr_written);
1214 else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1215 ret = run_delalloc_nocow(inode, locked_page, start, end,
1216 page_started, 0, nr_written);
1217 else if (!btrfs_test_opt(root, COMPRESS) &&
1218 !(BTRFS_I(inode)->force_compress))
1219 ret = cow_file_range(inode, locked_page, start, end,
1220 page_started, nr_written, 1);
1221 else
1222 ret = cow_file_range_async(inode, locked_page, start, end,
1223 page_started, nr_written);
1224 return ret;
1227 static int btrfs_split_extent_hook(struct inode *inode,
1228 struct extent_state *orig, u64 split)
1230 struct btrfs_root *root = BTRFS_I(inode)->root;
1231 u64 size;
1233 if (!(orig->state & EXTENT_DELALLOC))
1234 return 0;
1236 size = orig->end - orig->start + 1;
1237 if (size > root->fs_info->max_extent) {
1238 u64 num_extents;
1239 u64 new_size;
1241 new_size = orig->end - split + 1;
1242 num_extents = div64_u64(size + root->fs_info->max_extent - 1,
1243 root->fs_info->max_extent);
1246 * if we break a large extent up then leave oustanding_extents
1247 * be, since we've already accounted for the large extent.
1249 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1250 root->fs_info->max_extent) < num_extents)
1251 return 0;
1254 spin_lock(&BTRFS_I(inode)->accounting_lock);
1255 BTRFS_I(inode)->outstanding_extents++;
1256 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1258 return 0;
1262 * extent_io.c merge_extent_hook, used to track merged delayed allocation
1263 * extents so we can keep track of new extents that are just merged onto old
1264 * extents, such as when we are doing sequential writes, so we can properly
1265 * account for the metadata space we'll need.
1267 static int btrfs_merge_extent_hook(struct inode *inode,
1268 struct extent_state *new,
1269 struct extent_state *other)
1271 struct btrfs_root *root = BTRFS_I(inode)->root;
1272 u64 new_size, old_size;
1273 u64 num_extents;
1275 /* not delalloc, ignore it */
1276 if (!(other->state & EXTENT_DELALLOC))
1277 return 0;
1279 old_size = other->end - other->start + 1;
1280 if (new->start < other->start)
1281 new_size = other->end - new->start + 1;
1282 else
1283 new_size = new->end - other->start + 1;
1285 /* we're not bigger than the max, unreserve the space and go */
1286 if (new_size <= root->fs_info->max_extent) {
1287 spin_lock(&BTRFS_I(inode)->accounting_lock);
1288 BTRFS_I(inode)->outstanding_extents--;
1289 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1290 return 0;
1294 * If we grew by another max_extent, just return, we want to keep that
1295 * reserved amount.
1297 num_extents = div64_u64(old_size + root->fs_info->max_extent - 1,
1298 root->fs_info->max_extent);
1299 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1300 root->fs_info->max_extent) > num_extents)
1301 return 0;
1303 spin_lock(&BTRFS_I(inode)->accounting_lock);
1304 BTRFS_I(inode)->outstanding_extents--;
1305 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1307 return 0;
1311 * extent_io.c set_bit_hook, used to track delayed allocation
1312 * bytes in this file, and to maintain the list of inodes that
1313 * have pending delalloc work to be done.
1315 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1316 unsigned long old, unsigned long bits)
1320 * set_bit and clear bit hooks normally require _irqsave/restore
1321 * but in this case, we are only testeing for the DELALLOC
1322 * bit, which is only set or cleared with irqs on
1324 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1325 struct btrfs_root *root = BTRFS_I(inode)->root;
1327 spin_lock(&BTRFS_I(inode)->accounting_lock);
1328 BTRFS_I(inode)->outstanding_extents++;
1329 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1330 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1331 spin_lock(&root->fs_info->delalloc_lock);
1332 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1333 root->fs_info->delalloc_bytes += end - start + 1;
1334 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1335 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1336 &root->fs_info->delalloc_inodes);
1338 spin_unlock(&root->fs_info->delalloc_lock);
1340 return 0;
1344 * extent_io.c clear_bit_hook, see set_bit_hook for why
1346 static int btrfs_clear_bit_hook(struct inode *inode,
1347 struct extent_state *state, unsigned long bits)
1350 * set_bit and clear bit hooks normally require _irqsave/restore
1351 * but in this case, we are only testeing for the DELALLOC
1352 * bit, which is only set or cleared with irqs on
1354 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1355 struct btrfs_root *root = BTRFS_I(inode)->root;
1357 if (bits & EXTENT_DO_ACCOUNTING) {
1358 spin_lock(&BTRFS_I(inode)->accounting_lock);
1359 BTRFS_I(inode)->outstanding_extents--;
1360 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1361 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
1364 spin_lock(&root->fs_info->delalloc_lock);
1365 if (state->end - state->start + 1 >
1366 root->fs_info->delalloc_bytes) {
1367 printk(KERN_INFO "btrfs warning: delalloc account "
1368 "%llu %llu\n",
1369 (unsigned long long)
1370 state->end - state->start + 1,
1371 (unsigned long long)
1372 root->fs_info->delalloc_bytes);
1373 btrfs_delalloc_free_space(root, inode, (u64)-1);
1374 root->fs_info->delalloc_bytes = 0;
1375 BTRFS_I(inode)->delalloc_bytes = 0;
1376 } else {
1377 btrfs_delalloc_free_space(root, inode,
1378 state->end -
1379 state->start + 1);
1380 root->fs_info->delalloc_bytes -= state->end -
1381 state->start + 1;
1382 BTRFS_I(inode)->delalloc_bytes -= state->end -
1383 state->start + 1;
1385 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1386 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1387 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1389 spin_unlock(&root->fs_info->delalloc_lock);
1391 return 0;
1395 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1396 * we don't create bios that span stripes or chunks
1398 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1399 size_t size, struct bio *bio,
1400 unsigned long bio_flags)
1402 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1403 struct btrfs_mapping_tree *map_tree;
1404 u64 logical = (u64)bio->bi_sector << 9;
1405 u64 length = 0;
1406 u64 map_length;
1407 int ret;
1409 if (bio_flags & EXTENT_BIO_COMPRESSED)
1410 return 0;
1412 length = bio->bi_size;
1413 map_tree = &root->fs_info->mapping_tree;
1414 map_length = length;
1415 ret = btrfs_map_block(map_tree, READ, logical,
1416 &map_length, NULL, 0);
1418 if (map_length < length + size)
1419 return 1;
1420 return 0;
1424 * in order to insert checksums into the metadata in large chunks,
1425 * we wait until bio submission time. All the pages in the bio are
1426 * checksummed and sums are attached onto the ordered extent record.
1428 * At IO completion time the cums attached on the ordered extent record
1429 * are inserted into the btree
1431 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1432 struct bio *bio, int mirror_num,
1433 unsigned long bio_flags)
1435 struct btrfs_root *root = BTRFS_I(inode)->root;
1436 int ret = 0;
1438 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1439 BUG_ON(ret);
1440 return 0;
1444 * in order to insert checksums into the metadata in large chunks,
1445 * we wait until bio submission time. All the pages in the bio are
1446 * checksummed and sums are attached onto the ordered extent record.
1448 * At IO completion time the cums attached on the ordered extent record
1449 * are inserted into the btree
1451 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1452 int mirror_num, unsigned long bio_flags)
1454 struct btrfs_root *root = BTRFS_I(inode)->root;
1455 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1459 * extent_io.c submission hook. This does the right thing for csum calculation
1460 * on write, or reading the csums from the tree before a read
1462 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1463 int mirror_num, unsigned long bio_flags)
1465 struct btrfs_root *root = BTRFS_I(inode)->root;
1466 int ret = 0;
1467 int skip_sum;
1469 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1471 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1472 BUG_ON(ret);
1474 if (!(rw & (1 << BIO_RW))) {
1475 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1476 return btrfs_submit_compressed_read(inode, bio,
1477 mirror_num, bio_flags);
1478 } else if (!skip_sum)
1479 btrfs_lookup_bio_sums(root, inode, bio, NULL);
1480 goto mapit;
1481 } else if (!skip_sum) {
1482 /* csum items have already been cloned */
1483 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1484 goto mapit;
1485 /* we're doing a write, do the async checksumming */
1486 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1487 inode, rw, bio, mirror_num,
1488 bio_flags, __btrfs_submit_bio_start,
1489 __btrfs_submit_bio_done);
1492 mapit:
1493 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1497 * given a list of ordered sums record them in the inode. This happens
1498 * at IO completion time based on sums calculated at bio submission time.
1500 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1501 struct inode *inode, u64 file_offset,
1502 struct list_head *list)
1504 struct btrfs_ordered_sum *sum;
1506 btrfs_set_trans_block_group(trans, inode);
1508 list_for_each_entry(sum, list, list) {
1509 btrfs_csum_file_blocks(trans,
1510 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1512 return 0;
1515 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1517 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1518 WARN_ON(1);
1519 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1520 GFP_NOFS);
1523 /* see btrfs_writepage_start_hook for details on why this is required */
1524 struct btrfs_writepage_fixup {
1525 struct page *page;
1526 struct btrfs_work work;
1529 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1531 struct btrfs_writepage_fixup *fixup;
1532 struct btrfs_ordered_extent *ordered;
1533 struct page *page;
1534 struct inode *inode;
1535 u64 page_start;
1536 u64 page_end;
1538 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1539 page = fixup->page;
1540 again:
1541 lock_page(page);
1542 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1543 ClearPageChecked(page);
1544 goto out_page;
1547 inode = page->mapping->host;
1548 page_start = page_offset(page);
1549 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1551 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1553 /* already ordered? We're done */
1554 if (PagePrivate2(page))
1555 goto out;
1557 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1558 if (ordered) {
1559 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1560 page_end, GFP_NOFS);
1561 unlock_page(page);
1562 btrfs_start_ordered_extent(inode, ordered, 1);
1563 goto again;
1566 btrfs_set_extent_delalloc(inode, page_start, page_end);
1567 ClearPageChecked(page);
1568 out:
1569 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1570 out_page:
1571 unlock_page(page);
1572 page_cache_release(page);
1576 * There are a few paths in the higher layers of the kernel that directly
1577 * set the page dirty bit without asking the filesystem if it is a
1578 * good idea. This causes problems because we want to make sure COW
1579 * properly happens and the data=ordered rules are followed.
1581 * In our case any range that doesn't have the ORDERED bit set
1582 * hasn't been properly setup for IO. We kick off an async process
1583 * to fix it up. The async helper will wait for ordered extents, set
1584 * the delalloc bit and make it safe to write the page.
1586 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1588 struct inode *inode = page->mapping->host;
1589 struct btrfs_writepage_fixup *fixup;
1590 struct btrfs_root *root = BTRFS_I(inode)->root;
1592 /* this page is properly in the ordered list */
1593 if (TestClearPagePrivate2(page))
1594 return 0;
1596 if (PageChecked(page))
1597 return -EAGAIN;
1599 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1600 if (!fixup)
1601 return -EAGAIN;
1603 SetPageChecked(page);
1604 page_cache_get(page);
1605 fixup->work.func = btrfs_writepage_fixup_worker;
1606 fixup->page = page;
1607 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1608 return -EAGAIN;
1611 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1612 struct inode *inode, u64 file_pos,
1613 u64 disk_bytenr, u64 disk_num_bytes,
1614 u64 num_bytes, u64 ram_bytes,
1615 u8 compression, u8 encryption,
1616 u16 other_encoding, int extent_type)
1618 struct btrfs_root *root = BTRFS_I(inode)->root;
1619 struct btrfs_file_extent_item *fi;
1620 struct btrfs_path *path;
1621 struct extent_buffer *leaf;
1622 struct btrfs_key ins;
1623 u64 hint;
1624 int ret;
1626 path = btrfs_alloc_path();
1627 BUG_ON(!path);
1629 path->leave_spinning = 1;
1632 * we may be replacing one extent in the tree with another.
1633 * The new extent is pinned in the extent map, and we don't want
1634 * to drop it from the cache until it is completely in the btree.
1636 * So, tell btrfs_drop_extents to leave this extent in the cache.
1637 * the caller is expected to unpin it and allow it to be merged
1638 * with the others.
1640 ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
1641 &hint, 0);
1642 BUG_ON(ret);
1644 ins.objectid = inode->i_ino;
1645 ins.offset = file_pos;
1646 ins.type = BTRFS_EXTENT_DATA_KEY;
1647 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1648 BUG_ON(ret);
1649 leaf = path->nodes[0];
1650 fi = btrfs_item_ptr(leaf, path->slots[0],
1651 struct btrfs_file_extent_item);
1652 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1653 btrfs_set_file_extent_type(leaf, fi, extent_type);
1654 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1655 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1656 btrfs_set_file_extent_offset(leaf, fi, 0);
1657 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1658 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1659 btrfs_set_file_extent_compression(leaf, fi, compression);
1660 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1661 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1663 btrfs_unlock_up_safe(path, 1);
1664 btrfs_set_lock_blocking(leaf);
1666 btrfs_mark_buffer_dirty(leaf);
1668 inode_add_bytes(inode, num_bytes);
1670 ins.objectid = disk_bytenr;
1671 ins.offset = disk_num_bytes;
1672 ins.type = BTRFS_EXTENT_ITEM_KEY;
1673 ret = btrfs_alloc_reserved_file_extent(trans, root,
1674 root->root_key.objectid,
1675 inode->i_ino, file_pos, &ins);
1676 BUG_ON(ret);
1677 btrfs_free_path(path);
1679 return 0;
1683 * helper function for btrfs_finish_ordered_io, this
1684 * just reads in some of the csum leaves to prime them into ram
1685 * before we start the transaction. It limits the amount of btree
1686 * reads required while inside the transaction.
1688 /* as ordered data IO finishes, this gets called so we can finish
1689 * an ordered extent if the range of bytes in the file it covers are
1690 * fully written.
1692 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1694 struct btrfs_root *root = BTRFS_I(inode)->root;
1695 struct btrfs_trans_handle *trans;
1696 struct btrfs_ordered_extent *ordered_extent = NULL;
1697 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1698 int compressed = 0;
1699 int ret;
1701 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1702 if (!ret)
1703 return 0;
1705 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1706 BUG_ON(!ordered_extent);
1708 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1709 BUG_ON(!list_empty(&ordered_extent->list));
1710 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1711 if (!ret) {
1712 trans = btrfs_join_transaction(root, 1);
1713 ret = btrfs_update_inode(trans, root, inode);
1714 BUG_ON(ret);
1715 btrfs_end_transaction(trans, root);
1717 goto out;
1720 lock_extent(io_tree, ordered_extent->file_offset,
1721 ordered_extent->file_offset + ordered_extent->len - 1,
1722 GFP_NOFS);
1724 trans = btrfs_join_transaction(root, 1);
1726 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1727 compressed = 1;
1728 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1729 BUG_ON(compressed);
1730 ret = btrfs_mark_extent_written(trans, inode,
1731 ordered_extent->file_offset,
1732 ordered_extent->file_offset +
1733 ordered_extent->len);
1734 BUG_ON(ret);
1735 } else {
1736 ret = insert_reserved_file_extent(trans, inode,
1737 ordered_extent->file_offset,
1738 ordered_extent->start,
1739 ordered_extent->disk_len,
1740 ordered_extent->len,
1741 ordered_extent->len,
1742 compressed, 0, 0,
1743 BTRFS_FILE_EXTENT_REG);
1744 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1745 ordered_extent->file_offset,
1746 ordered_extent->len);
1747 BUG_ON(ret);
1749 unlock_extent(io_tree, ordered_extent->file_offset,
1750 ordered_extent->file_offset + ordered_extent->len - 1,
1751 GFP_NOFS);
1752 add_pending_csums(trans, inode, ordered_extent->file_offset,
1753 &ordered_extent->list);
1755 /* this also removes the ordered extent from the tree */
1756 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1757 ret = btrfs_update_inode(trans, root, inode);
1758 BUG_ON(ret);
1759 btrfs_end_transaction(trans, root);
1760 out:
1761 /* once for us */
1762 btrfs_put_ordered_extent(ordered_extent);
1763 /* once for the tree */
1764 btrfs_put_ordered_extent(ordered_extent);
1766 return 0;
1769 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1770 struct extent_state *state, int uptodate)
1772 ClearPagePrivate2(page);
1773 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1777 * When IO fails, either with EIO or csum verification fails, we
1778 * try other mirrors that might have a good copy of the data. This
1779 * io_failure_record is used to record state as we go through all the
1780 * mirrors. If another mirror has good data, the page is set up to date
1781 * and things continue. If a good mirror can't be found, the original
1782 * bio end_io callback is called to indicate things have failed.
1784 struct io_failure_record {
1785 struct page *page;
1786 u64 start;
1787 u64 len;
1788 u64 logical;
1789 unsigned long bio_flags;
1790 int last_mirror;
1793 static int btrfs_io_failed_hook(struct bio *failed_bio,
1794 struct page *page, u64 start, u64 end,
1795 struct extent_state *state)
1797 struct io_failure_record *failrec = NULL;
1798 u64 private;
1799 struct extent_map *em;
1800 struct inode *inode = page->mapping->host;
1801 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1802 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1803 struct bio *bio;
1804 int num_copies;
1805 int ret;
1806 int rw;
1807 u64 logical;
1809 ret = get_state_private(failure_tree, start, &private);
1810 if (ret) {
1811 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1812 if (!failrec)
1813 return -ENOMEM;
1814 failrec->start = start;
1815 failrec->len = end - start + 1;
1816 failrec->last_mirror = 0;
1817 failrec->bio_flags = 0;
1819 read_lock(&em_tree->lock);
1820 em = lookup_extent_mapping(em_tree, start, failrec->len);
1821 if (em->start > start || em->start + em->len < start) {
1822 free_extent_map(em);
1823 em = NULL;
1825 read_unlock(&em_tree->lock);
1827 if (!em || IS_ERR(em)) {
1828 kfree(failrec);
1829 return -EIO;
1831 logical = start - em->start;
1832 logical = em->block_start + logical;
1833 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1834 logical = em->block_start;
1835 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1837 failrec->logical = logical;
1838 free_extent_map(em);
1839 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1840 EXTENT_DIRTY, GFP_NOFS);
1841 set_state_private(failure_tree, start,
1842 (u64)(unsigned long)failrec);
1843 } else {
1844 failrec = (struct io_failure_record *)(unsigned long)private;
1846 num_copies = btrfs_num_copies(
1847 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1848 failrec->logical, failrec->len);
1849 failrec->last_mirror++;
1850 if (!state) {
1851 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1852 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1853 failrec->start,
1854 EXTENT_LOCKED);
1855 if (state && state->start != failrec->start)
1856 state = NULL;
1857 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1859 if (!state || failrec->last_mirror > num_copies) {
1860 set_state_private(failure_tree, failrec->start, 0);
1861 clear_extent_bits(failure_tree, failrec->start,
1862 failrec->start + failrec->len - 1,
1863 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1864 kfree(failrec);
1865 return -EIO;
1867 bio = bio_alloc(GFP_NOFS, 1);
1868 bio->bi_private = state;
1869 bio->bi_end_io = failed_bio->bi_end_io;
1870 bio->bi_sector = failrec->logical >> 9;
1871 bio->bi_bdev = failed_bio->bi_bdev;
1872 bio->bi_size = 0;
1874 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1875 if (failed_bio->bi_rw & (1 << BIO_RW))
1876 rw = WRITE;
1877 else
1878 rw = READ;
1880 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1881 failrec->last_mirror,
1882 failrec->bio_flags);
1883 return 0;
1887 * each time an IO finishes, we do a fast check in the IO failure tree
1888 * to see if we need to process or clean up an io_failure_record
1890 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1892 u64 private;
1893 u64 private_failure;
1894 struct io_failure_record *failure;
1895 int ret;
1897 private = 0;
1898 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1899 (u64)-1, 1, EXTENT_DIRTY)) {
1900 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1901 start, &private_failure);
1902 if (ret == 0) {
1903 failure = (struct io_failure_record *)(unsigned long)
1904 private_failure;
1905 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1906 failure->start, 0);
1907 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1908 failure->start,
1909 failure->start + failure->len - 1,
1910 EXTENT_DIRTY | EXTENT_LOCKED,
1911 GFP_NOFS);
1912 kfree(failure);
1915 return 0;
1919 * when reads are done, we need to check csums to verify the data is correct
1920 * if there's a match, we allow the bio to finish. If not, we go through
1921 * the io_failure_record routines to find good copies
1923 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1924 struct extent_state *state)
1926 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1927 struct inode *inode = page->mapping->host;
1928 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1929 char *kaddr;
1930 u64 private = ~(u32)0;
1931 int ret;
1932 struct btrfs_root *root = BTRFS_I(inode)->root;
1933 u32 csum = ~(u32)0;
1935 if (PageChecked(page)) {
1936 ClearPageChecked(page);
1937 goto good;
1940 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1941 return 0;
1943 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1944 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1945 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1946 GFP_NOFS);
1947 return 0;
1950 if (state && state->start == start) {
1951 private = state->private;
1952 ret = 0;
1953 } else {
1954 ret = get_state_private(io_tree, start, &private);
1956 kaddr = kmap_atomic(page, KM_USER0);
1957 if (ret)
1958 goto zeroit;
1960 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1961 btrfs_csum_final(csum, (char *)&csum);
1962 if (csum != private)
1963 goto zeroit;
1965 kunmap_atomic(kaddr, KM_USER0);
1966 good:
1967 /* if the io failure tree for this inode is non-empty,
1968 * check to see if we've recovered from a failed IO
1970 btrfs_clean_io_failures(inode, start);
1971 return 0;
1973 zeroit:
1974 if (printk_ratelimit()) {
1975 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1976 "private %llu\n", page->mapping->host->i_ino,
1977 (unsigned long long)start, csum,
1978 (unsigned long long)private);
1980 memset(kaddr + offset, 1, end - start + 1);
1981 flush_dcache_page(page);
1982 kunmap_atomic(kaddr, KM_USER0);
1983 if (private == 0)
1984 return 0;
1985 return -EIO;
1988 struct delayed_iput {
1989 struct list_head list;
1990 struct inode *inode;
1993 void btrfs_add_delayed_iput(struct inode *inode)
1995 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1996 struct delayed_iput *delayed;
1998 if (atomic_add_unless(&inode->i_count, -1, 1))
1999 return;
2001 delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
2002 delayed->inode = inode;
2004 spin_lock(&fs_info->delayed_iput_lock);
2005 list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2006 spin_unlock(&fs_info->delayed_iput_lock);
2009 void btrfs_run_delayed_iputs(struct btrfs_root *root)
2011 LIST_HEAD(list);
2012 struct btrfs_fs_info *fs_info = root->fs_info;
2013 struct delayed_iput *delayed;
2014 int empty;
2016 spin_lock(&fs_info->delayed_iput_lock);
2017 empty = list_empty(&fs_info->delayed_iputs);
2018 spin_unlock(&fs_info->delayed_iput_lock);
2019 if (empty)
2020 return;
2022 down_read(&root->fs_info->cleanup_work_sem);
2023 spin_lock(&fs_info->delayed_iput_lock);
2024 list_splice_init(&fs_info->delayed_iputs, &list);
2025 spin_unlock(&fs_info->delayed_iput_lock);
2027 while (!list_empty(&list)) {
2028 delayed = list_entry(list.next, struct delayed_iput, list);
2029 list_del(&delayed->list);
2030 iput(delayed->inode);
2031 kfree(delayed);
2033 up_read(&root->fs_info->cleanup_work_sem);
2037 * This creates an orphan entry for the given inode in case something goes
2038 * wrong in the middle of an unlink/truncate.
2040 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2042 struct btrfs_root *root = BTRFS_I(inode)->root;
2043 int ret = 0;
2045 spin_lock(&root->list_lock);
2047 /* already on the orphan list, we're good */
2048 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
2049 spin_unlock(&root->list_lock);
2050 return 0;
2053 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2055 spin_unlock(&root->list_lock);
2058 * insert an orphan item to track this unlinked/truncated file
2060 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
2062 return ret;
2066 * We have done the truncate/delete so we can go ahead and remove the orphan
2067 * item for this particular inode.
2069 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2071 struct btrfs_root *root = BTRFS_I(inode)->root;
2072 int ret = 0;
2074 spin_lock(&root->list_lock);
2076 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
2077 spin_unlock(&root->list_lock);
2078 return 0;
2081 list_del_init(&BTRFS_I(inode)->i_orphan);
2082 if (!trans) {
2083 spin_unlock(&root->list_lock);
2084 return 0;
2087 spin_unlock(&root->list_lock);
2089 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
2091 return ret;
2095 * this cleans up any orphans that may be left on the list from the last use
2096 * of this root.
2098 void btrfs_orphan_cleanup(struct btrfs_root *root)
2100 struct btrfs_path *path;
2101 struct extent_buffer *leaf;
2102 struct btrfs_item *item;
2103 struct btrfs_key key, found_key;
2104 struct btrfs_trans_handle *trans;
2105 struct inode *inode;
2106 int ret = 0, nr_unlink = 0, nr_truncate = 0;
2108 if (!xchg(&root->clean_orphans, 0))
2109 return;
2111 path = btrfs_alloc_path();
2112 BUG_ON(!path);
2113 path->reada = -1;
2115 key.objectid = BTRFS_ORPHAN_OBJECTID;
2116 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2117 key.offset = (u64)-1;
2119 while (1) {
2120 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2121 if (ret < 0) {
2122 printk(KERN_ERR "Error searching slot for orphan: %d"
2123 "\n", ret);
2124 break;
2128 * if ret == 0 means we found what we were searching for, which
2129 * is weird, but possible, so only screw with path if we didnt
2130 * find the key and see if we have stuff that matches
2132 if (ret > 0) {
2133 if (path->slots[0] == 0)
2134 break;
2135 path->slots[0]--;
2138 /* pull out the item */
2139 leaf = path->nodes[0];
2140 item = btrfs_item_nr(leaf, path->slots[0]);
2141 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2143 /* make sure the item matches what we want */
2144 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2145 break;
2146 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2147 break;
2149 /* release the path since we're done with it */
2150 btrfs_release_path(root, path);
2153 * this is where we are basically btrfs_lookup, without the
2154 * crossing root thing. we store the inode number in the
2155 * offset of the orphan item.
2157 found_key.objectid = found_key.offset;
2158 found_key.type = BTRFS_INODE_ITEM_KEY;
2159 found_key.offset = 0;
2160 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2161 if (IS_ERR(inode))
2162 break;
2165 * add this inode to the orphan list so btrfs_orphan_del does
2166 * the proper thing when we hit it
2168 spin_lock(&root->list_lock);
2169 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2170 spin_unlock(&root->list_lock);
2173 * if this is a bad inode, means we actually succeeded in
2174 * removing the inode, but not the orphan record, which means
2175 * we need to manually delete the orphan since iput will just
2176 * do a destroy_inode
2178 if (is_bad_inode(inode)) {
2179 trans = btrfs_start_transaction(root, 1);
2180 btrfs_orphan_del(trans, inode);
2181 btrfs_end_transaction(trans, root);
2182 iput(inode);
2183 continue;
2186 /* if we have links, this was a truncate, lets do that */
2187 if (inode->i_nlink) {
2188 nr_truncate++;
2189 btrfs_truncate(inode);
2190 } else {
2191 nr_unlink++;
2194 /* this will do delete_inode and everything for us */
2195 iput(inode);
2198 if (nr_unlink)
2199 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2200 if (nr_truncate)
2201 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2203 btrfs_free_path(path);
2207 * very simple check to peek ahead in the leaf looking for xattrs. If we
2208 * don't find any xattrs, we know there can't be any acls.
2210 * slot is the slot the inode is in, objectid is the objectid of the inode
2212 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2213 int slot, u64 objectid)
2215 u32 nritems = btrfs_header_nritems(leaf);
2216 struct btrfs_key found_key;
2217 int scanned = 0;
2219 slot++;
2220 while (slot < nritems) {
2221 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2223 /* we found a different objectid, there must not be acls */
2224 if (found_key.objectid != objectid)
2225 return 0;
2227 /* we found an xattr, assume we've got an acl */
2228 if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2229 return 1;
2232 * we found a key greater than an xattr key, there can't
2233 * be any acls later on
2235 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2236 return 0;
2238 slot++;
2239 scanned++;
2242 * it goes inode, inode backrefs, xattrs, extents,
2243 * so if there are a ton of hard links to an inode there can
2244 * be a lot of backrefs. Don't waste time searching too hard,
2245 * this is just an optimization
2247 if (scanned >= 8)
2248 break;
2250 /* we hit the end of the leaf before we found an xattr or
2251 * something larger than an xattr. We have to assume the inode
2252 * has acls
2254 return 1;
2258 * read an inode from the btree into the in-memory inode
2260 static void btrfs_read_locked_inode(struct inode *inode)
2262 struct btrfs_path *path;
2263 struct extent_buffer *leaf;
2264 struct btrfs_inode_item *inode_item;
2265 struct btrfs_timespec *tspec;
2266 struct btrfs_root *root = BTRFS_I(inode)->root;
2267 struct btrfs_key location;
2268 int maybe_acls;
2269 u64 alloc_group_block;
2270 u32 rdev;
2271 int ret;
2273 path = btrfs_alloc_path();
2274 BUG_ON(!path);
2275 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2277 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2278 if (ret)
2279 goto make_bad;
2281 leaf = path->nodes[0];
2282 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2283 struct btrfs_inode_item);
2285 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2286 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2287 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2288 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2289 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2291 tspec = btrfs_inode_atime(inode_item);
2292 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2293 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2295 tspec = btrfs_inode_mtime(inode_item);
2296 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2297 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2299 tspec = btrfs_inode_ctime(inode_item);
2300 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2301 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2303 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2304 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2305 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2306 inode->i_generation = BTRFS_I(inode)->generation;
2307 inode->i_rdev = 0;
2308 rdev = btrfs_inode_rdev(leaf, inode_item);
2310 BTRFS_I(inode)->index_cnt = (u64)-1;
2311 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2313 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2316 * try to precache a NULL acl entry for files that don't have
2317 * any xattrs or acls
2319 maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2320 if (!maybe_acls)
2321 cache_no_acl(inode);
2323 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2324 alloc_group_block, 0);
2325 btrfs_free_path(path);
2326 inode_item = NULL;
2328 switch (inode->i_mode & S_IFMT) {
2329 case S_IFREG:
2330 inode->i_mapping->a_ops = &btrfs_aops;
2331 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2332 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2333 inode->i_fop = &btrfs_file_operations;
2334 inode->i_op = &btrfs_file_inode_operations;
2335 break;
2336 case S_IFDIR:
2337 inode->i_fop = &btrfs_dir_file_operations;
2338 if (root == root->fs_info->tree_root)
2339 inode->i_op = &btrfs_dir_ro_inode_operations;
2340 else
2341 inode->i_op = &btrfs_dir_inode_operations;
2342 break;
2343 case S_IFLNK:
2344 inode->i_op = &btrfs_symlink_inode_operations;
2345 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2346 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2347 break;
2348 default:
2349 inode->i_op = &btrfs_special_inode_operations;
2350 init_special_inode(inode, inode->i_mode, rdev);
2351 break;
2354 btrfs_update_iflags(inode);
2355 return;
2357 make_bad:
2358 btrfs_free_path(path);
2359 make_bad_inode(inode);
2363 * given a leaf and an inode, copy the inode fields into the leaf
2365 static void fill_inode_item(struct btrfs_trans_handle *trans,
2366 struct extent_buffer *leaf,
2367 struct btrfs_inode_item *item,
2368 struct inode *inode)
2370 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2371 btrfs_set_inode_gid(leaf, item, inode->i_gid);
2372 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2373 btrfs_set_inode_mode(leaf, item, inode->i_mode);
2374 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2376 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2377 inode->i_atime.tv_sec);
2378 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2379 inode->i_atime.tv_nsec);
2381 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2382 inode->i_mtime.tv_sec);
2383 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2384 inode->i_mtime.tv_nsec);
2386 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2387 inode->i_ctime.tv_sec);
2388 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2389 inode->i_ctime.tv_nsec);
2391 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2392 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2393 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2394 btrfs_set_inode_transid(leaf, item, trans->transid);
2395 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2396 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2397 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2401 * copy everything in the in-memory inode into the btree.
2403 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2404 struct btrfs_root *root, struct inode *inode)
2406 struct btrfs_inode_item *inode_item;
2407 struct btrfs_path *path;
2408 struct extent_buffer *leaf;
2409 int ret;
2411 path = btrfs_alloc_path();
2412 BUG_ON(!path);
2413 path->leave_spinning = 1;
2414 ret = btrfs_lookup_inode(trans, root, path,
2415 &BTRFS_I(inode)->location, 1);
2416 if (ret) {
2417 if (ret > 0)
2418 ret = -ENOENT;
2419 goto failed;
2422 btrfs_unlock_up_safe(path, 1);
2423 leaf = path->nodes[0];
2424 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2425 struct btrfs_inode_item);
2427 fill_inode_item(trans, leaf, inode_item, inode);
2428 btrfs_mark_buffer_dirty(leaf);
2429 btrfs_set_inode_last_trans(trans, inode);
2430 ret = 0;
2431 failed:
2432 btrfs_free_path(path);
2433 return ret;
2438 * unlink helper that gets used here in inode.c and in the tree logging
2439 * recovery code. It remove a link in a directory with a given name, and
2440 * also drops the back refs in the inode to the directory
2442 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2443 struct btrfs_root *root,
2444 struct inode *dir, struct inode *inode,
2445 const char *name, int name_len)
2447 struct btrfs_path *path;
2448 int ret = 0;
2449 struct extent_buffer *leaf;
2450 struct btrfs_dir_item *di;
2451 struct btrfs_key key;
2452 u64 index;
2454 path = btrfs_alloc_path();
2455 if (!path) {
2456 ret = -ENOMEM;
2457 goto err;
2460 path->leave_spinning = 1;
2461 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2462 name, name_len, -1);
2463 if (IS_ERR(di)) {
2464 ret = PTR_ERR(di);
2465 goto err;
2467 if (!di) {
2468 ret = -ENOENT;
2469 goto err;
2471 leaf = path->nodes[0];
2472 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2473 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2474 if (ret)
2475 goto err;
2476 btrfs_release_path(root, path);
2478 ret = btrfs_del_inode_ref(trans, root, name, name_len,
2479 inode->i_ino,
2480 dir->i_ino, &index);
2481 if (ret) {
2482 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2483 "inode %lu parent %lu\n", name_len, name,
2484 inode->i_ino, dir->i_ino);
2485 goto err;
2488 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2489 index, name, name_len, -1);
2490 if (IS_ERR(di)) {
2491 ret = PTR_ERR(di);
2492 goto err;
2494 if (!di) {
2495 ret = -ENOENT;
2496 goto err;
2498 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2499 btrfs_release_path(root, path);
2501 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2502 inode, dir->i_ino);
2503 BUG_ON(ret != 0 && ret != -ENOENT);
2505 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2506 dir, index);
2507 BUG_ON(ret);
2508 err:
2509 btrfs_free_path(path);
2510 if (ret)
2511 goto out;
2513 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2514 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2515 btrfs_update_inode(trans, root, dir);
2516 btrfs_drop_nlink(inode);
2517 ret = btrfs_update_inode(trans, root, inode);
2518 out:
2519 return ret;
2522 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2524 struct btrfs_root *root;
2525 struct btrfs_trans_handle *trans;
2526 struct inode *inode = dentry->d_inode;
2527 int ret;
2528 unsigned long nr = 0;
2530 root = BTRFS_I(dir)->root;
2533 * 5 items for unlink inode
2534 * 1 for orphan
2536 ret = btrfs_reserve_metadata_space(root, 6);
2537 if (ret)
2538 return ret;
2540 trans = btrfs_start_transaction(root, 1);
2541 if (IS_ERR(trans)) {
2542 btrfs_unreserve_metadata_space(root, 6);
2543 return PTR_ERR(trans);
2546 btrfs_set_trans_block_group(trans, dir);
2548 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2550 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2551 dentry->d_name.name, dentry->d_name.len);
2553 if (inode->i_nlink == 0)
2554 ret = btrfs_orphan_add(trans, inode);
2556 nr = trans->blocks_used;
2558 btrfs_end_transaction_throttle(trans, root);
2559 btrfs_unreserve_metadata_space(root, 6);
2560 btrfs_btree_balance_dirty(root, nr);
2561 return ret;
2564 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
2565 struct btrfs_root *root,
2566 struct inode *dir, u64 objectid,
2567 const char *name, int name_len)
2569 struct btrfs_path *path;
2570 struct extent_buffer *leaf;
2571 struct btrfs_dir_item *di;
2572 struct btrfs_key key;
2573 u64 index;
2574 int ret;
2576 path = btrfs_alloc_path();
2577 if (!path)
2578 return -ENOMEM;
2580 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2581 name, name_len, -1);
2582 BUG_ON(!di || IS_ERR(di));
2584 leaf = path->nodes[0];
2585 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2586 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2587 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2588 BUG_ON(ret);
2589 btrfs_release_path(root, path);
2591 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
2592 objectid, root->root_key.objectid,
2593 dir->i_ino, &index, name, name_len);
2594 if (ret < 0) {
2595 BUG_ON(ret != -ENOENT);
2596 di = btrfs_search_dir_index_item(root, path, dir->i_ino,
2597 name, name_len);
2598 BUG_ON(!di || IS_ERR(di));
2600 leaf = path->nodes[0];
2601 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2602 btrfs_release_path(root, path);
2603 index = key.offset;
2606 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2607 index, name, name_len, -1);
2608 BUG_ON(!di || IS_ERR(di));
2610 leaf = path->nodes[0];
2611 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2612 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2613 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2614 BUG_ON(ret);
2615 btrfs_release_path(root, path);
2617 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2618 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2619 ret = btrfs_update_inode(trans, root, dir);
2620 BUG_ON(ret);
2621 dir->i_sb->s_dirt = 1;
2623 btrfs_free_path(path);
2624 return 0;
2627 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2629 struct inode *inode = dentry->d_inode;
2630 int err = 0;
2631 int ret;
2632 struct btrfs_root *root = BTRFS_I(dir)->root;
2633 struct btrfs_trans_handle *trans;
2634 unsigned long nr = 0;
2636 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2637 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
2638 return -ENOTEMPTY;
2640 ret = btrfs_reserve_metadata_space(root, 5);
2641 if (ret)
2642 return ret;
2644 trans = btrfs_start_transaction(root, 1);
2645 if (IS_ERR(trans)) {
2646 btrfs_unreserve_metadata_space(root, 5);
2647 return PTR_ERR(trans);
2650 btrfs_set_trans_block_group(trans, dir);
2652 if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
2653 err = btrfs_unlink_subvol(trans, root, dir,
2654 BTRFS_I(inode)->location.objectid,
2655 dentry->d_name.name,
2656 dentry->d_name.len);
2657 goto out;
2660 err = btrfs_orphan_add(trans, inode);
2661 if (err)
2662 goto out;
2664 /* now the directory is empty */
2665 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2666 dentry->d_name.name, dentry->d_name.len);
2667 if (!err)
2668 btrfs_i_size_write(inode, 0);
2669 out:
2670 nr = trans->blocks_used;
2671 ret = btrfs_end_transaction_throttle(trans, root);
2672 btrfs_unreserve_metadata_space(root, 5);
2673 btrfs_btree_balance_dirty(root, nr);
2675 if (ret && !err)
2676 err = ret;
2677 return err;
2680 #if 0
2682 * when truncating bytes in a file, it is possible to avoid reading
2683 * the leaves that contain only checksum items. This can be the
2684 * majority of the IO required to delete a large file, but it must
2685 * be done carefully.
2687 * The keys in the level just above the leaves are checked to make sure
2688 * the lowest key in a given leaf is a csum key, and starts at an offset
2689 * after the new size.
2691 * Then the key for the next leaf is checked to make sure it also has
2692 * a checksum item for the same file. If it does, we know our target leaf
2693 * contains only checksum items, and it can be safely freed without reading
2694 * it.
2696 * This is just an optimization targeted at large files. It may do
2697 * nothing. It will return 0 unless things went badly.
2699 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2700 struct btrfs_root *root,
2701 struct btrfs_path *path,
2702 struct inode *inode, u64 new_size)
2704 struct btrfs_key key;
2705 int ret;
2706 int nritems;
2707 struct btrfs_key found_key;
2708 struct btrfs_key other_key;
2709 struct btrfs_leaf_ref *ref;
2710 u64 leaf_gen;
2711 u64 leaf_start;
2713 path->lowest_level = 1;
2714 key.objectid = inode->i_ino;
2715 key.type = BTRFS_CSUM_ITEM_KEY;
2716 key.offset = new_size;
2717 again:
2718 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2719 if (ret < 0)
2720 goto out;
2722 if (path->nodes[1] == NULL) {
2723 ret = 0;
2724 goto out;
2726 ret = 0;
2727 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2728 nritems = btrfs_header_nritems(path->nodes[1]);
2730 if (!nritems)
2731 goto out;
2733 if (path->slots[1] >= nritems)
2734 goto next_node;
2736 /* did we find a key greater than anything we want to delete? */
2737 if (found_key.objectid > inode->i_ino ||
2738 (found_key.objectid == inode->i_ino && found_key.type > key.type))
2739 goto out;
2741 /* we check the next key in the node to make sure the leave contains
2742 * only checksum items. This comparison doesn't work if our
2743 * leaf is the last one in the node
2745 if (path->slots[1] + 1 >= nritems) {
2746 next_node:
2747 /* search forward from the last key in the node, this
2748 * will bring us into the next node in the tree
2750 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2752 /* unlikely, but we inc below, so check to be safe */
2753 if (found_key.offset == (u64)-1)
2754 goto out;
2756 /* search_forward needs a path with locks held, do the
2757 * search again for the original key. It is possible
2758 * this will race with a balance and return a path that
2759 * we could modify, but this drop is just an optimization
2760 * and is allowed to miss some leaves.
2762 btrfs_release_path(root, path);
2763 found_key.offset++;
2765 /* setup a max key for search_forward */
2766 other_key.offset = (u64)-1;
2767 other_key.type = key.type;
2768 other_key.objectid = key.objectid;
2770 path->keep_locks = 1;
2771 ret = btrfs_search_forward(root, &found_key, &other_key,
2772 path, 0, 0);
2773 path->keep_locks = 0;
2774 if (ret || found_key.objectid != key.objectid ||
2775 found_key.type != key.type) {
2776 ret = 0;
2777 goto out;
2780 key.offset = found_key.offset;
2781 btrfs_release_path(root, path);
2782 cond_resched();
2783 goto again;
2786 /* we know there's one more slot after us in the tree,
2787 * read that key so we can verify it is also a checksum item
2789 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2791 if (found_key.objectid < inode->i_ino)
2792 goto next_key;
2794 if (found_key.type != key.type || found_key.offset < new_size)
2795 goto next_key;
2798 * if the key for the next leaf isn't a csum key from this objectid,
2799 * we can't be sure there aren't good items inside this leaf.
2800 * Bail out
2802 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2803 goto out;
2805 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2806 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2808 * it is safe to delete this leaf, it contains only
2809 * csum items from this inode at an offset >= new_size
2811 ret = btrfs_del_leaf(trans, root, path, leaf_start);
2812 BUG_ON(ret);
2814 if (root->ref_cows && leaf_gen < trans->transid) {
2815 ref = btrfs_alloc_leaf_ref(root, 0);
2816 if (ref) {
2817 ref->root_gen = root->root_key.offset;
2818 ref->bytenr = leaf_start;
2819 ref->owner = 0;
2820 ref->generation = leaf_gen;
2821 ref->nritems = 0;
2823 btrfs_sort_leaf_ref(ref);
2825 ret = btrfs_add_leaf_ref(root, ref, 0);
2826 WARN_ON(ret);
2827 btrfs_free_leaf_ref(root, ref);
2828 } else {
2829 WARN_ON(1);
2832 next_key:
2833 btrfs_release_path(root, path);
2835 if (other_key.objectid == inode->i_ino &&
2836 other_key.type == key.type && other_key.offset > key.offset) {
2837 key.offset = other_key.offset;
2838 cond_resched();
2839 goto again;
2841 ret = 0;
2842 out:
2843 /* fixup any changes we've made to the path */
2844 path->lowest_level = 0;
2845 path->keep_locks = 0;
2846 btrfs_release_path(root, path);
2847 return ret;
2850 #endif
2853 * this can truncate away extent items, csum items and directory items.
2854 * It starts at a high offset and removes keys until it can't find
2855 * any higher than new_size
2857 * csum items that cross the new i_size are truncated to the new size
2858 * as well.
2860 * min_type is the minimum key type to truncate down to. If set to 0, this
2861 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2863 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2864 struct btrfs_root *root,
2865 struct inode *inode,
2866 u64 new_size, u32 min_type)
2868 struct btrfs_path *path;
2869 struct extent_buffer *leaf;
2870 struct btrfs_file_extent_item *fi;
2871 struct btrfs_key key;
2872 struct btrfs_key found_key;
2873 u64 extent_start = 0;
2874 u64 extent_num_bytes = 0;
2875 u64 extent_offset = 0;
2876 u64 item_end = 0;
2877 u64 mask = root->sectorsize - 1;
2878 u32 found_type = (u8)-1;
2879 int found_extent;
2880 int del_item;
2881 int pending_del_nr = 0;
2882 int pending_del_slot = 0;
2883 int extent_type = -1;
2884 int encoding;
2885 int ret;
2886 int err = 0;
2888 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
2890 if (root->ref_cows)
2891 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2893 path = btrfs_alloc_path();
2894 BUG_ON(!path);
2895 path->reada = -1;
2897 key.objectid = inode->i_ino;
2898 key.offset = (u64)-1;
2899 key.type = (u8)-1;
2901 search_again:
2902 path->leave_spinning = 1;
2903 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2904 if (ret < 0) {
2905 err = ret;
2906 goto out;
2909 if (ret > 0) {
2910 /* there are no items in the tree for us to truncate, we're
2911 * done
2913 if (path->slots[0] == 0)
2914 goto out;
2915 path->slots[0]--;
2918 while (1) {
2919 fi = NULL;
2920 leaf = path->nodes[0];
2921 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2922 found_type = btrfs_key_type(&found_key);
2923 encoding = 0;
2925 if (found_key.objectid != inode->i_ino)
2926 break;
2928 if (found_type < min_type)
2929 break;
2931 item_end = found_key.offset;
2932 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2933 fi = btrfs_item_ptr(leaf, path->slots[0],
2934 struct btrfs_file_extent_item);
2935 extent_type = btrfs_file_extent_type(leaf, fi);
2936 encoding = btrfs_file_extent_compression(leaf, fi);
2937 encoding |= btrfs_file_extent_encryption(leaf, fi);
2938 encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2940 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2941 item_end +=
2942 btrfs_file_extent_num_bytes(leaf, fi);
2943 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2944 item_end += btrfs_file_extent_inline_len(leaf,
2945 fi);
2947 item_end--;
2949 if (found_type > min_type) {
2950 del_item = 1;
2951 } else {
2952 if (item_end < new_size)
2953 break;
2954 if (found_key.offset >= new_size)
2955 del_item = 1;
2956 else
2957 del_item = 0;
2959 found_extent = 0;
2960 /* FIXME, shrink the extent if the ref count is only 1 */
2961 if (found_type != BTRFS_EXTENT_DATA_KEY)
2962 goto delete;
2964 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2965 u64 num_dec;
2966 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2967 if (!del_item && !encoding) {
2968 u64 orig_num_bytes =
2969 btrfs_file_extent_num_bytes(leaf, fi);
2970 extent_num_bytes = new_size -
2971 found_key.offset + root->sectorsize - 1;
2972 extent_num_bytes = extent_num_bytes &
2973 ~((u64)root->sectorsize - 1);
2974 btrfs_set_file_extent_num_bytes(leaf, fi,
2975 extent_num_bytes);
2976 num_dec = (orig_num_bytes -
2977 extent_num_bytes);
2978 if (root->ref_cows && extent_start != 0)
2979 inode_sub_bytes(inode, num_dec);
2980 btrfs_mark_buffer_dirty(leaf);
2981 } else {
2982 extent_num_bytes =
2983 btrfs_file_extent_disk_num_bytes(leaf,
2984 fi);
2985 extent_offset = found_key.offset -
2986 btrfs_file_extent_offset(leaf, fi);
2988 /* FIXME blocksize != 4096 */
2989 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2990 if (extent_start != 0) {
2991 found_extent = 1;
2992 if (root->ref_cows)
2993 inode_sub_bytes(inode, num_dec);
2996 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2998 * we can't truncate inline items that have had
2999 * special encodings
3001 if (!del_item &&
3002 btrfs_file_extent_compression(leaf, fi) == 0 &&
3003 btrfs_file_extent_encryption(leaf, fi) == 0 &&
3004 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
3005 u32 size = new_size - found_key.offset;
3007 if (root->ref_cows) {
3008 inode_sub_bytes(inode, item_end + 1 -
3009 new_size);
3011 size =
3012 btrfs_file_extent_calc_inline_size(size);
3013 ret = btrfs_truncate_item(trans, root, path,
3014 size, 1);
3015 BUG_ON(ret);
3016 } else if (root->ref_cows) {
3017 inode_sub_bytes(inode, item_end + 1 -
3018 found_key.offset);
3021 delete:
3022 if (del_item) {
3023 if (!pending_del_nr) {
3024 /* no pending yet, add ourselves */
3025 pending_del_slot = path->slots[0];
3026 pending_del_nr = 1;
3027 } else if (pending_del_nr &&
3028 path->slots[0] + 1 == pending_del_slot) {
3029 /* hop on the pending chunk */
3030 pending_del_nr++;
3031 pending_del_slot = path->slots[0];
3032 } else {
3033 BUG();
3035 } else {
3036 break;
3038 if (found_extent && root->ref_cows) {
3039 btrfs_set_path_blocking(path);
3040 ret = btrfs_free_extent(trans, root, extent_start,
3041 extent_num_bytes, 0,
3042 btrfs_header_owner(leaf),
3043 inode->i_ino, extent_offset);
3044 BUG_ON(ret);
3047 if (found_type == BTRFS_INODE_ITEM_KEY)
3048 break;
3050 if (path->slots[0] == 0 ||
3051 path->slots[0] != pending_del_slot) {
3052 if (root->ref_cows) {
3053 err = -EAGAIN;
3054 goto out;
3056 if (pending_del_nr) {
3057 ret = btrfs_del_items(trans, root, path,
3058 pending_del_slot,
3059 pending_del_nr);
3060 BUG_ON(ret);
3061 pending_del_nr = 0;
3063 btrfs_release_path(root, path);
3064 goto search_again;
3065 } else {
3066 path->slots[0]--;
3069 out:
3070 if (pending_del_nr) {
3071 ret = btrfs_del_items(trans, root, path, pending_del_slot,
3072 pending_del_nr);
3074 btrfs_free_path(path);
3075 return err;
3079 * taken from block_truncate_page, but does cow as it zeros out
3080 * any bytes left in the last page in the file.
3082 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3084 struct inode *inode = mapping->host;
3085 struct btrfs_root *root = BTRFS_I(inode)->root;
3086 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3087 struct btrfs_ordered_extent *ordered;
3088 char *kaddr;
3089 u32 blocksize = root->sectorsize;
3090 pgoff_t index = from >> PAGE_CACHE_SHIFT;
3091 unsigned offset = from & (PAGE_CACHE_SIZE-1);
3092 struct page *page;
3093 int ret = 0;
3094 u64 page_start;
3095 u64 page_end;
3097 if ((offset & (blocksize - 1)) == 0)
3098 goto out;
3099 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
3100 if (ret)
3101 goto out;
3103 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
3104 if (ret)
3105 goto out;
3107 ret = -ENOMEM;
3108 again:
3109 page = grab_cache_page(mapping, index);
3110 if (!page) {
3111 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3112 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3113 goto out;
3116 page_start = page_offset(page);
3117 page_end = page_start + PAGE_CACHE_SIZE - 1;
3119 if (!PageUptodate(page)) {
3120 ret = btrfs_readpage(NULL, page);
3121 lock_page(page);
3122 if (page->mapping != mapping) {
3123 unlock_page(page);
3124 page_cache_release(page);
3125 goto again;
3127 if (!PageUptodate(page)) {
3128 ret = -EIO;
3129 goto out_unlock;
3132 wait_on_page_writeback(page);
3134 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
3135 set_page_extent_mapped(page);
3137 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3138 if (ordered) {
3139 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3140 unlock_page(page);
3141 page_cache_release(page);
3142 btrfs_start_ordered_extent(inode, ordered, 1);
3143 btrfs_put_ordered_extent(ordered);
3144 goto again;
3147 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
3148 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3149 GFP_NOFS);
3151 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
3152 if (ret) {
3153 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3154 goto out_unlock;
3157 ret = 0;
3158 if (offset != PAGE_CACHE_SIZE) {
3159 kaddr = kmap(page);
3160 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
3161 flush_dcache_page(page);
3162 kunmap(page);
3164 ClearPageChecked(page);
3165 set_page_dirty(page);
3166 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3168 out_unlock:
3169 if (ret)
3170 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3171 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3172 unlock_page(page);
3173 page_cache_release(page);
3174 out:
3175 return ret;
3178 int btrfs_cont_expand(struct inode *inode, loff_t size)
3180 struct btrfs_trans_handle *trans;
3181 struct btrfs_root *root = BTRFS_I(inode)->root;
3182 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3183 struct extent_map *em;
3184 u64 mask = root->sectorsize - 1;
3185 u64 hole_start = (inode->i_size + mask) & ~mask;
3186 u64 block_end = (size + mask) & ~mask;
3187 u64 last_byte;
3188 u64 cur_offset;
3189 u64 hole_size;
3190 int err = 0;
3192 if (size <= hole_start)
3193 return 0;
3195 while (1) {
3196 struct btrfs_ordered_extent *ordered;
3197 btrfs_wait_ordered_range(inode, hole_start,
3198 block_end - hole_start);
3199 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3200 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3201 if (!ordered)
3202 break;
3203 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3204 btrfs_put_ordered_extent(ordered);
3207 cur_offset = hole_start;
3208 while (1) {
3209 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3210 block_end - cur_offset, 0);
3211 BUG_ON(IS_ERR(em) || !em);
3212 last_byte = min(extent_map_end(em), block_end);
3213 last_byte = (last_byte + mask) & ~mask;
3214 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3215 u64 hint_byte = 0;
3216 hole_size = last_byte - cur_offset;
3218 err = btrfs_reserve_metadata_space(root, 2);
3219 if (err)
3220 break;
3222 trans = btrfs_start_transaction(root, 1);
3223 btrfs_set_trans_block_group(trans, inode);
3225 err = btrfs_drop_extents(trans, inode, cur_offset,
3226 cur_offset + hole_size,
3227 &hint_byte, 1);
3228 BUG_ON(err);
3230 err = btrfs_insert_file_extent(trans, root,
3231 inode->i_ino, cur_offset, 0,
3232 0, hole_size, 0, hole_size,
3233 0, 0, 0);
3234 BUG_ON(err);
3236 btrfs_drop_extent_cache(inode, hole_start,
3237 last_byte - 1, 0);
3239 btrfs_end_transaction(trans, root);
3240 btrfs_unreserve_metadata_space(root, 2);
3242 free_extent_map(em);
3243 cur_offset = last_byte;
3244 if (cur_offset >= block_end)
3245 break;
3248 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3249 return err;
3252 static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
3254 struct btrfs_root *root = BTRFS_I(inode)->root;
3255 struct btrfs_trans_handle *trans;
3256 unsigned long nr;
3257 int ret;
3259 if (attr->ia_size == inode->i_size)
3260 return 0;
3262 if (attr->ia_size > inode->i_size) {
3263 unsigned long limit;
3264 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
3265 if (attr->ia_size > inode->i_sb->s_maxbytes)
3266 return -EFBIG;
3267 if (limit != RLIM_INFINITY && attr->ia_size > limit) {
3268 send_sig(SIGXFSZ, current, 0);
3269 return -EFBIG;
3273 ret = btrfs_reserve_metadata_space(root, 1);
3274 if (ret)
3275 return ret;
3277 trans = btrfs_start_transaction(root, 1);
3278 btrfs_set_trans_block_group(trans, inode);
3280 ret = btrfs_orphan_add(trans, inode);
3281 BUG_ON(ret);
3283 nr = trans->blocks_used;
3284 btrfs_end_transaction(trans, root);
3285 btrfs_unreserve_metadata_space(root, 1);
3286 btrfs_btree_balance_dirty(root, nr);
3288 if (attr->ia_size > inode->i_size) {
3289 ret = btrfs_cont_expand(inode, attr->ia_size);
3290 if (ret) {
3291 btrfs_truncate(inode);
3292 return ret;
3295 i_size_write(inode, attr->ia_size);
3296 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
3298 trans = btrfs_start_transaction(root, 1);
3299 btrfs_set_trans_block_group(trans, inode);
3301 ret = btrfs_update_inode(trans, root, inode);
3302 BUG_ON(ret);
3303 if (inode->i_nlink > 0) {
3304 ret = btrfs_orphan_del(trans, inode);
3305 BUG_ON(ret);
3307 nr = trans->blocks_used;
3308 btrfs_end_transaction(trans, root);
3309 btrfs_btree_balance_dirty(root, nr);
3310 return 0;
3314 * We're truncating a file that used to have good data down to
3315 * zero. Make sure it gets into the ordered flush list so that
3316 * any new writes get down to disk quickly.
3318 if (attr->ia_size == 0)
3319 BTRFS_I(inode)->ordered_data_close = 1;
3321 /* we don't support swapfiles, so vmtruncate shouldn't fail */
3322 ret = vmtruncate(inode, attr->ia_size);
3323 BUG_ON(ret);
3325 return 0;
3328 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3330 struct inode *inode = dentry->d_inode;
3331 int err;
3333 err = inode_change_ok(inode, attr);
3334 if (err)
3335 return err;
3337 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3338 err = btrfs_setattr_size(inode, attr);
3339 if (err)
3340 return err;
3342 attr->ia_valid &= ~ATTR_SIZE;
3344 if (attr->ia_valid)
3345 err = inode_setattr(inode, attr);
3347 if (!err && ((attr->ia_valid & ATTR_MODE)))
3348 err = btrfs_acl_chmod(inode);
3349 return err;
3352 void btrfs_delete_inode(struct inode *inode)
3354 struct btrfs_trans_handle *trans;
3355 struct btrfs_root *root = BTRFS_I(inode)->root;
3356 unsigned long nr;
3357 int ret;
3359 truncate_inode_pages(&inode->i_data, 0);
3360 if (is_bad_inode(inode)) {
3361 btrfs_orphan_del(NULL, inode);
3362 goto no_delete;
3364 btrfs_wait_ordered_range(inode, 0, (u64)-1);
3366 if (root->fs_info->log_root_recovering) {
3367 BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
3368 goto no_delete;
3371 if (inode->i_nlink > 0) {
3372 BUG_ON(btrfs_root_refs(&root->root_item) != 0);
3373 goto no_delete;
3376 btrfs_i_size_write(inode, 0);
3378 while (1) {
3379 trans = btrfs_start_transaction(root, 1);
3380 btrfs_set_trans_block_group(trans, inode);
3381 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3383 if (ret != -EAGAIN)
3384 break;
3386 nr = trans->blocks_used;
3387 btrfs_end_transaction(trans, root);
3388 trans = NULL;
3389 btrfs_btree_balance_dirty(root, nr);
3392 if (ret == 0) {
3393 ret = btrfs_orphan_del(trans, inode);
3394 BUG_ON(ret);
3397 nr = trans->blocks_used;
3398 btrfs_end_transaction(trans, root);
3399 btrfs_btree_balance_dirty(root, nr);
3400 no_delete:
3401 clear_inode(inode);
3402 return;
3406 * this returns the key found in the dir entry in the location pointer.
3407 * If no dir entries were found, location->objectid is 0.
3409 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3410 struct btrfs_key *location)
3412 const char *name = dentry->d_name.name;
3413 int namelen = dentry->d_name.len;
3414 struct btrfs_dir_item *di;
3415 struct btrfs_path *path;
3416 struct btrfs_root *root = BTRFS_I(dir)->root;
3417 int ret = 0;
3419 path = btrfs_alloc_path();
3420 BUG_ON(!path);
3422 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
3423 namelen, 0);
3424 if (IS_ERR(di))
3425 ret = PTR_ERR(di);
3427 if (!di || IS_ERR(di))
3428 goto out_err;
3430 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3431 out:
3432 btrfs_free_path(path);
3433 return ret;
3434 out_err:
3435 location->objectid = 0;
3436 goto out;
3440 * when we hit a tree root in a directory, the btrfs part of the inode
3441 * needs to be changed to reflect the root directory of the tree root. This
3442 * is kind of like crossing a mount point.
3444 static int fixup_tree_root_location(struct btrfs_root *root,
3445 struct inode *dir,
3446 struct dentry *dentry,
3447 struct btrfs_key *location,
3448 struct btrfs_root **sub_root)
3450 struct btrfs_path *path;
3451 struct btrfs_root *new_root;
3452 struct btrfs_root_ref *ref;
3453 struct extent_buffer *leaf;
3454 int ret;
3455 int err = 0;
3457 path = btrfs_alloc_path();
3458 if (!path) {
3459 err = -ENOMEM;
3460 goto out;
3463 err = -ENOENT;
3464 ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
3465 BTRFS_I(dir)->root->root_key.objectid,
3466 location->objectid);
3467 if (ret) {
3468 if (ret < 0)
3469 err = ret;
3470 goto out;
3473 leaf = path->nodes[0];
3474 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
3475 if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino ||
3476 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
3477 goto out;
3479 ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
3480 (unsigned long)(ref + 1),
3481 dentry->d_name.len);
3482 if (ret)
3483 goto out;
3485 btrfs_release_path(root->fs_info->tree_root, path);
3487 new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
3488 if (IS_ERR(new_root)) {
3489 err = PTR_ERR(new_root);
3490 goto out;
3493 if (btrfs_root_refs(&new_root->root_item) == 0) {
3494 err = -ENOENT;
3495 goto out;
3498 *sub_root = new_root;
3499 location->objectid = btrfs_root_dirid(&new_root->root_item);
3500 location->type = BTRFS_INODE_ITEM_KEY;
3501 location->offset = 0;
3502 err = 0;
3503 out:
3504 btrfs_free_path(path);
3505 return err;
3508 static void inode_tree_add(struct inode *inode)
3510 struct btrfs_root *root = BTRFS_I(inode)->root;
3511 struct btrfs_inode *entry;
3512 struct rb_node **p;
3513 struct rb_node *parent;
3514 again:
3515 p = &root->inode_tree.rb_node;
3516 parent = NULL;
3518 if (hlist_unhashed(&inode->i_hash))
3519 return;
3521 spin_lock(&root->inode_lock);
3522 while (*p) {
3523 parent = *p;
3524 entry = rb_entry(parent, struct btrfs_inode, rb_node);
3526 if (inode->i_ino < entry->vfs_inode.i_ino)
3527 p = &parent->rb_left;
3528 else if (inode->i_ino > entry->vfs_inode.i_ino)
3529 p = &parent->rb_right;
3530 else {
3531 WARN_ON(!(entry->vfs_inode.i_state &
3532 (I_WILL_FREE | I_FREEING | I_CLEAR)));
3533 rb_erase(parent, &root->inode_tree);
3534 RB_CLEAR_NODE(parent);
3535 spin_unlock(&root->inode_lock);
3536 goto again;
3539 rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
3540 rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3541 spin_unlock(&root->inode_lock);
3544 static void inode_tree_del(struct inode *inode)
3546 struct btrfs_root *root = BTRFS_I(inode)->root;
3547 int empty = 0;
3549 spin_lock(&root->inode_lock);
3550 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
3551 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3552 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3553 empty = RB_EMPTY_ROOT(&root->inode_tree);
3555 spin_unlock(&root->inode_lock);
3557 if (empty && btrfs_root_refs(&root->root_item) == 0) {
3558 synchronize_srcu(&root->fs_info->subvol_srcu);
3559 spin_lock(&root->inode_lock);
3560 empty = RB_EMPTY_ROOT(&root->inode_tree);
3561 spin_unlock(&root->inode_lock);
3562 if (empty)
3563 btrfs_add_dead_root(root);
3567 int btrfs_invalidate_inodes(struct btrfs_root *root)
3569 struct rb_node *node;
3570 struct rb_node *prev;
3571 struct btrfs_inode *entry;
3572 struct inode *inode;
3573 u64 objectid = 0;
3575 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
3577 spin_lock(&root->inode_lock);
3578 again:
3579 node = root->inode_tree.rb_node;
3580 prev = NULL;
3581 while (node) {
3582 prev = node;
3583 entry = rb_entry(node, struct btrfs_inode, rb_node);
3585 if (objectid < entry->vfs_inode.i_ino)
3586 node = node->rb_left;
3587 else if (objectid > entry->vfs_inode.i_ino)
3588 node = node->rb_right;
3589 else
3590 break;
3592 if (!node) {
3593 while (prev) {
3594 entry = rb_entry(prev, struct btrfs_inode, rb_node);
3595 if (objectid <= entry->vfs_inode.i_ino) {
3596 node = prev;
3597 break;
3599 prev = rb_next(prev);
3602 while (node) {
3603 entry = rb_entry(node, struct btrfs_inode, rb_node);
3604 objectid = entry->vfs_inode.i_ino + 1;
3605 inode = igrab(&entry->vfs_inode);
3606 if (inode) {
3607 spin_unlock(&root->inode_lock);
3608 if (atomic_read(&inode->i_count) > 1)
3609 d_prune_aliases(inode);
3611 * btrfs_drop_inode will remove it from
3612 * the inode cache when its usage count
3613 * hits zero.
3615 iput(inode);
3616 cond_resched();
3617 spin_lock(&root->inode_lock);
3618 goto again;
3621 if (cond_resched_lock(&root->inode_lock))
3622 goto again;
3624 node = rb_next(node);
3626 spin_unlock(&root->inode_lock);
3627 return 0;
3630 static noinline void init_btrfs_i(struct inode *inode)
3632 struct btrfs_inode *bi = BTRFS_I(inode);
3634 bi->generation = 0;
3635 bi->sequence = 0;
3636 bi->last_trans = 0;
3637 bi->last_sub_trans = 0;
3638 bi->logged_trans = 0;
3639 bi->delalloc_bytes = 0;
3640 bi->reserved_bytes = 0;
3641 bi->disk_i_size = 0;
3642 bi->flags = 0;
3643 bi->index_cnt = (u64)-1;
3644 bi->last_unlink_trans = 0;
3645 bi->ordered_data_close = 0;
3646 bi->force_compress = 0;
3647 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3648 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3649 inode->i_mapping, GFP_NOFS);
3650 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3651 inode->i_mapping, GFP_NOFS);
3652 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3653 INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
3654 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3655 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3656 mutex_init(&BTRFS_I(inode)->log_mutex);
3659 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3661 struct btrfs_iget_args *args = p;
3662 inode->i_ino = args->ino;
3663 init_btrfs_i(inode);
3664 BTRFS_I(inode)->root = args->root;
3665 btrfs_set_inode_space_info(args->root, inode);
3666 return 0;
3669 static int btrfs_find_actor(struct inode *inode, void *opaque)
3671 struct btrfs_iget_args *args = opaque;
3672 return args->ino == inode->i_ino &&
3673 args->root == BTRFS_I(inode)->root;
3676 static struct inode *btrfs_iget_locked(struct super_block *s,
3677 u64 objectid,
3678 struct btrfs_root *root)
3680 struct inode *inode;
3681 struct btrfs_iget_args args;
3682 args.ino = objectid;
3683 args.root = root;
3685 inode = iget5_locked(s, objectid, btrfs_find_actor,
3686 btrfs_init_locked_inode,
3687 (void *)&args);
3688 return inode;
3691 /* Get an inode object given its location and corresponding root.
3692 * Returns in *is_new if the inode was read from disk
3694 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3695 struct btrfs_root *root, int *new)
3697 struct inode *inode;
3699 inode = btrfs_iget_locked(s, location->objectid, root);
3700 if (!inode)
3701 return ERR_PTR(-ENOMEM);
3703 if (inode->i_state & I_NEW) {
3704 BTRFS_I(inode)->root = root;
3705 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3706 btrfs_read_locked_inode(inode);
3708 inode_tree_add(inode);
3709 unlock_new_inode(inode);
3710 if (new)
3711 *new = 1;
3714 return inode;
3717 static struct inode *new_simple_dir(struct super_block *s,
3718 struct btrfs_key *key,
3719 struct btrfs_root *root)
3721 struct inode *inode = new_inode(s);
3723 if (!inode)
3724 return ERR_PTR(-ENOMEM);
3726 init_btrfs_i(inode);
3728 BTRFS_I(inode)->root = root;
3729 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
3730 BTRFS_I(inode)->dummy_inode = 1;
3732 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
3733 inode->i_op = &simple_dir_inode_operations;
3734 inode->i_fop = &simple_dir_operations;
3735 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
3736 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3738 return inode;
3741 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3743 struct inode *inode;
3744 struct btrfs_root *root = BTRFS_I(dir)->root;
3745 struct btrfs_root *sub_root = root;
3746 struct btrfs_key location;
3747 int index;
3748 int ret;
3750 dentry->d_op = &btrfs_dentry_operations;
3752 if (dentry->d_name.len > BTRFS_NAME_LEN)
3753 return ERR_PTR(-ENAMETOOLONG);
3755 ret = btrfs_inode_by_name(dir, dentry, &location);
3757 if (ret < 0)
3758 return ERR_PTR(ret);
3760 if (location.objectid == 0)
3761 return NULL;
3763 if (location.type == BTRFS_INODE_ITEM_KEY) {
3764 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
3765 return inode;
3768 BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
3770 index = srcu_read_lock(&root->fs_info->subvol_srcu);
3771 ret = fixup_tree_root_location(root, dir, dentry,
3772 &location, &sub_root);
3773 if (ret < 0) {
3774 if (ret != -ENOENT)
3775 inode = ERR_PTR(ret);
3776 else
3777 inode = new_simple_dir(dir->i_sb, &location, sub_root);
3778 } else {
3779 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
3781 srcu_read_unlock(&root->fs_info->subvol_srcu, index);
3783 if (root != sub_root) {
3784 down_read(&root->fs_info->cleanup_work_sem);
3785 if (!(inode->i_sb->s_flags & MS_RDONLY))
3786 btrfs_orphan_cleanup(sub_root);
3787 up_read(&root->fs_info->cleanup_work_sem);
3790 return inode;
3793 static int btrfs_dentry_delete(struct dentry *dentry)
3795 struct btrfs_root *root;
3797 if (!dentry->d_inode && !IS_ROOT(dentry))
3798 dentry = dentry->d_parent;
3800 if (dentry->d_inode) {
3801 root = BTRFS_I(dentry->d_inode)->root;
3802 if (btrfs_root_refs(&root->root_item) == 0)
3803 return 1;
3805 return 0;
3808 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3809 struct nameidata *nd)
3811 struct inode *inode;
3813 inode = btrfs_lookup_dentry(dir, dentry);
3814 if (IS_ERR(inode))
3815 return ERR_CAST(inode);
3817 return d_splice_alias(inode, dentry);
3820 static unsigned char btrfs_filetype_table[] = {
3821 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3824 static int btrfs_real_readdir(struct file *filp, void *dirent,
3825 filldir_t filldir)
3827 struct inode *inode = filp->f_dentry->d_inode;
3828 struct btrfs_root *root = BTRFS_I(inode)->root;
3829 struct btrfs_item *item;
3830 struct btrfs_dir_item *di;
3831 struct btrfs_key key;
3832 struct btrfs_key found_key;
3833 struct btrfs_path *path;
3834 int ret;
3835 u32 nritems;
3836 struct extent_buffer *leaf;
3837 int slot;
3838 int advance;
3839 unsigned char d_type;
3840 int over = 0;
3841 u32 di_cur;
3842 u32 di_total;
3843 u32 di_len;
3844 int key_type = BTRFS_DIR_INDEX_KEY;
3845 char tmp_name[32];
3846 char *name_ptr;
3847 int name_len;
3849 /* FIXME, use a real flag for deciding about the key type */
3850 if (root->fs_info->tree_root == root)
3851 key_type = BTRFS_DIR_ITEM_KEY;
3853 /* special case for "." */
3854 if (filp->f_pos == 0) {
3855 over = filldir(dirent, ".", 1,
3856 1, inode->i_ino,
3857 DT_DIR);
3858 if (over)
3859 return 0;
3860 filp->f_pos = 1;
3862 /* special case for .., just use the back ref */
3863 if (filp->f_pos == 1) {
3864 u64 pino = parent_ino(filp->f_path.dentry);
3865 over = filldir(dirent, "..", 2,
3866 2, pino, DT_DIR);
3867 if (over)
3868 return 0;
3869 filp->f_pos = 2;
3871 path = btrfs_alloc_path();
3872 path->reada = 2;
3874 btrfs_set_key_type(&key, key_type);
3875 key.offset = filp->f_pos;
3876 key.objectid = inode->i_ino;
3878 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3879 if (ret < 0)
3880 goto err;
3881 advance = 0;
3883 while (1) {
3884 leaf = path->nodes[0];
3885 nritems = btrfs_header_nritems(leaf);
3886 slot = path->slots[0];
3887 if (advance || slot >= nritems) {
3888 if (slot >= nritems - 1) {
3889 ret = btrfs_next_leaf(root, path);
3890 if (ret)
3891 break;
3892 leaf = path->nodes[0];
3893 nritems = btrfs_header_nritems(leaf);
3894 slot = path->slots[0];
3895 } else {
3896 slot++;
3897 path->slots[0]++;
3901 advance = 1;
3902 item = btrfs_item_nr(leaf, slot);
3903 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3905 if (found_key.objectid != key.objectid)
3906 break;
3907 if (btrfs_key_type(&found_key) != key_type)
3908 break;
3909 if (found_key.offset < filp->f_pos)
3910 continue;
3912 filp->f_pos = found_key.offset;
3914 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3915 di_cur = 0;
3916 di_total = btrfs_item_size(leaf, item);
3918 while (di_cur < di_total) {
3919 struct btrfs_key location;
3921 name_len = btrfs_dir_name_len(leaf, di);
3922 if (name_len <= sizeof(tmp_name)) {
3923 name_ptr = tmp_name;
3924 } else {
3925 name_ptr = kmalloc(name_len, GFP_NOFS);
3926 if (!name_ptr) {
3927 ret = -ENOMEM;
3928 goto err;
3931 read_extent_buffer(leaf, name_ptr,
3932 (unsigned long)(di + 1), name_len);
3934 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3935 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3937 /* is this a reference to our own snapshot? If so
3938 * skip it
3940 if (location.type == BTRFS_ROOT_ITEM_KEY &&
3941 location.objectid == root->root_key.objectid) {
3942 over = 0;
3943 goto skip;
3945 over = filldir(dirent, name_ptr, name_len,
3946 found_key.offset, location.objectid,
3947 d_type);
3949 skip:
3950 if (name_ptr != tmp_name)
3951 kfree(name_ptr);
3953 if (over)
3954 goto nopos;
3955 di_len = btrfs_dir_name_len(leaf, di) +
3956 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3957 di_cur += di_len;
3958 di = (struct btrfs_dir_item *)((char *)di + di_len);
3962 /* Reached end of directory/root. Bump pos past the last item. */
3963 if (key_type == BTRFS_DIR_INDEX_KEY)
3965 * 32-bit glibc will use getdents64, but then strtol -
3966 * so the last number we can serve is this.
3968 filp->f_pos = 0x7fffffff;
3969 else
3970 filp->f_pos++;
3971 nopos:
3972 ret = 0;
3973 err:
3974 btrfs_free_path(path);
3975 return ret;
3978 int btrfs_write_inode(struct inode *inode, int wait)
3980 struct btrfs_root *root = BTRFS_I(inode)->root;
3981 struct btrfs_trans_handle *trans;
3982 int ret = 0;
3984 if (root->fs_info->btree_inode == inode)
3985 return 0;
3987 if (wait) {
3988 trans = btrfs_join_transaction(root, 1);
3989 btrfs_set_trans_block_group(trans, inode);
3990 ret = btrfs_commit_transaction(trans, root);
3992 return ret;
3996 * This is somewhat expensive, updating the tree every time the
3997 * inode changes. But, it is most likely to find the inode in cache.
3998 * FIXME, needs more benchmarking...there are no reasons other than performance
3999 * to keep or drop this code.
4001 void btrfs_dirty_inode(struct inode *inode)
4003 struct btrfs_root *root = BTRFS_I(inode)->root;
4004 struct btrfs_trans_handle *trans;
4006 trans = btrfs_join_transaction(root, 1);
4007 btrfs_set_trans_block_group(trans, inode);
4008 btrfs_update_inode(trans, root, inode);
4009 btrfs_end_transaction(trans, root);
4013 * find the highest existing sequence number in a directory
4014 * and then set the in-memory index_cnt variable to reflect
4015 * free sequence numbers
4017 static int btrfs_set_inode_index_count(struct inode *inode)
4019 struct btrfs_root *root = BTRFS_I(inode)->root;
4020 struct btrfs_key key, found_key;
4021 struct btrfs_path *path;
4022 struct extent_buffer *leaf;
4023 int ret;
4025 key.objectid = inode->i_ino;
4026 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
4027 key.offset = (u64)-1;
4029 path = btrfs_alloc_path();
4030 if (!path)
4031 return -ENOMEM;
4033 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4034 if (ret < 0)
4035 goto out;
4036 /* FIXME: we should be able to handle this */
4037 if (ret == 0)
4038 goto out;
4039 ret = 0;
4042 * MAGIC NUMBER EXPLANATION:
4043 * since we search a directory based on f_pos we have to start at 2
4044 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
4045 * else has to start at 2
4047 if (path->slots[0] == 0) {
4048 BTRFS_I(inode)->index_cnt = 2;
4049 goto out;
4052 path->slots[0]--;
4054 leaf = path->nodes[0];
4055 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4057 if (found_key.objectid != inode->i_ino ||
4058 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
4059 BTRFS_I(inode)->index_cnt = 2;
4060 goto out;
4063 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
4064 out:
4065 btrfs_free_path(path);
4066 return ret;
4070 * helper to find a free sequence number in a given directory. This current
4071 * code is very simple, later versions will do smarter things in the btree
4073 int btrfs_set_inode_index(struct inode *dir, u64 *index)
4075 int ret = 0;
4077 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
4078 ret = btrfs_set_inode_index_count(dir);
4079 if (ret)
4080 return ret;
4083 *index = BTRFS_I(dir)->index_cnt;
4084 BTRFS_I(dir)->index_cnt++;
4086 return ret;
4089 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4090 struct btrfs_root *root,
4091 struct inode *dir,
4092 const char *name, int name_len,
4093 u64 ref_objectid, u64 objectid,
4094 u64 alloc_hint, int mode, u64 *index)
4096 struct inode *inode;
4097 struct btrfs_inode_item *inode_item;
4098 struct btrfs_key *location;
4099 struct btrfs_path *path;
4100 struct btrfs_inode_ref *ref;
4101 struct btrfs_key key[2];
4102 u32 sizes[2];
4103 unsigned long ptr;
4104 int ret;
4105 int owner;
4107 path = btrfs_alloc_path();
4108 BUG_ON(!path);
4110 inode = new_inode(root->fs_info->sb);
4111 if (!inode)
4112 return ERR_PTR(-ENOMEM);
4114 if (dir) {
4115 ret = btrfs_set_inode_index(dir, index);
4116 if (ret) {
4117 iput(inode);
4118 return ERR_PTR(ret);
4122 * index_cnt is ignored for everything but a dir,
4123 * btrfs_get_inode_index_count has an explanation for the magic
4124 * number
4126 init_btrfs_i(inode);
4127 BTRFS_I(inode)->index_cnt = 2;
4128 BTRFS_I(inode)->root = root;
4129 BTRFS_I(inode)->generation = trans->transid;
4130 btrfs_set_inode_space_info(root, inode);
4132 if (mode & S_IFDIR)
4133 owner = 0;
4134 else
4135 owner = 1;
4136 BTRFS_I(inode)->block_group =
4137 btrfs_find_block_group(root, 0, alloc_hint, owner);
4139 key[0].objectid = objectid;
4140 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
4141 key[0].offset = 0;
4143 key[1].objectid = objectid;
4144 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
4145 key[1].offset = ref_objectid;
4147 sizes[0] = sizeof(struct btrfs_inode_item);
4148 sizes[1] = name_len + sizeof(*ref);
4150 path->leave_spinning = 1;
4151 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
4152 if (ret != 0)
4153 goto fail;
4155 inode->i_uid = current_fsuid();
4157 if (dir && (dir->i_mode & S_ISGID)) {
4158 inode->i_gid = dir->i_gid;
4159 if (S_ISDIR(mode))
4160 mode |= S_ISGID;
4161 } else
4162 inode->i_gid = current_fsgid();
4164 inode->i_mode = mode;
4165 inode->i_ino = objectid;
4166 inode_set_bytes(inode, 0);
4167 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4168 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4169 struct btrfs_inode_item);
4170 fill_inode_item(trans, path->nodes[0], inode_item, inode);
4172 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
4173 struct btrfs_inode_ref);
4174 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4175 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4176 ptr = (unsigned long)(ref + 1);
4177 write_extent_buffer(path->nodes[0], name, ptr, name_len);
4179 btrfs_mark_buffer_dirty(path->nodes[0]);
4180 btrfs_free_path(path);
4182 location = &BTRFS_I(inode)->location;
4183 location->objectid = objectid;
4184 location->offset = 0;
4185 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
4187 btrfs_inherit_iflags(inode, dir);
4189 if ((mode & S_IFREG)) {
4190 if (btrfs_test_opt(root, NODATASUM))
4191 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4192 if (btrfs_test_opt(root, NODATACOW))
4193 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
4196 insert_inode_hash(inode);
4197 inode_tree_add(inode);
4198 return inode;
4199 fail:
4200 if (dir)
4201 BTRFS_I(dir)->index_cnt--;
4202 btrfs_free_path(path);
4203 iput(inode);
4204 return ERR_PTR(ret);
4207 static inline u8 btrfs_inode_type(struct inode *inode)
4209 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
4213 * utility function to add 'inode' into 'parent_inode' with
4214 * a give name and a given sequence number.
4215 * if 'add_backref' is true, also insert a backref from the
4216 * inode to the parent directory.
4218 int btrfs_add_link(struct btrfs_trans_handle *trans,
4219 struct inode *parent_inode, struct inode *inode,
4220 const char *name, int name_len, int add_backref, u64 index)
4222 int ret = 0;
4223 struct btrfs_key key;
4224 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4226 if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4227 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4228 } else {
4229 key.objectid = inode->i_ino;
4230 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4231 key.offset = 0;
4234 if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4235 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4236 key.objectid, root->root_key.objectid,
4237 parent_inode->i_ino,
4238 index, name, name_len);
4239 } else if (add_backref) {
4240 ret = btrfs_insert_inode_ref(trans, root,
4241 name, name_len, inode->i_ino,
4242 parent_inode->i_ino, index);
4245 if (ret == 0) {
4246 ret = btrfs_insert_dir_item(trans, root, name, name_len,
4247 parent_inode->i_ino, &key,
4248 btrfs_inode_type(inode), index);
4249 BUG_ON(ret);
4251 btrfs_i_size_write(parent_inode, parent_inode->i_size +
4252 name_len * 2);
4253 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4254 ret = btrfs_update_inode(trans, root, parent_inode);
4256 return ret;
4259 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4260 struct dentry *dentry, struct inode *inode,
4261 int backref, u64 index)
4263 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
4264 inode, dentry->d_name.name,
4265 dentry->d_name.len, backref, index);
4266 if (!err) {
4267 d_instantiate(dentry, inode);
4268 return 0;
4270 if (err > 0)
4271 err = -EEXIST;
4272 return err;
4275 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4276 int mode, dev_t rdev)
4278 struct btrfs_trans_handle *trans;
4279 struct btrfs_root *root = BTRFS_I(dir)->root;
4280 struct inode *inode = NULL;
4281 int err;
4282 int drop_inode = 0;
4283 u64 objectid;
4284 unsigned long nr = 0;
4285 u64 index = 0;
4287 if (!new_valid_dev(rdev))
4288 return -EINVAL;
4291 * 2 for inode item and ref
4292 * 2 for dir items
4293 * 1 for xattr if selinux is on
4295 err = btrfs_reserve_metadata_space(root, 5);
4296 if (err)
4297 return err;
4299 trans = btrfs_start_transaction(root, 1);
4300 if (!trans)
4301 goto fail;
4302 btrfs_set_trans_block_group(trans, dir);
4304 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4305 if (err) {
4306 err = -ENOSPC;
4307 goto out_unlock;
4310 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4311 dentry->d_name.len,
4312 dentry->d_parent->d_inode->i_ino, objectid,
4313 BTRFS_I(dir)->block_group, mode, &index);
4314 err = PTR_ERR(inode);
4315 if (IS_ERR(inode))
4316 goto out_unlock;
4318 err = btrfs_init_inode_security(trans, inode, dir);
4319 if (err) {
4320 drop_inode = 1;
4321 goto out_unlock;
4324 btrfs_set_trans_block_group(trans, inode);
4325 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4326 if (err)
4327 drop_inode = 1;
4328 else {
4329 inode->i_op = &btrfs_special_inode_operations;
4330 init_special_inode(inode, inode->i_mode, rdev);
4331 btrfs_update_inode(trans, root, inode);
4333 btrfs_update_inode_block_group(trans, inode);
4334 btrfs_update_inode_block_group(trans, dir);
4335 out_unlock:
4336 nr = trans->blocks_used;
4337 btrfs_end_transaction_throttle(trans, root);
4338 fail:
4339 btrfs_unreserve_metadata_space(root, 5);
4340 if (drop_inode) {
4341 inode_dec_link_count(inode);
4342 iput(inode);
4344 btrfs_btree_balance_dirty(root, nr);
4345 return err;
4348 static int btrfs_create(struct inode *dir, struct dentry *dentry,
4349 int mode, struct nameidata *nd)
4351 struct btrfs_trans_handle *trans;
4352 struct btrfs_root *root = BTRFS_I(dir)->root;
4353 struct inode *inode = NULL;
4354 int err;
4355 int drop_inode = 0;
4356 unsigned long nr = 0;
4357 u64 objectid;
4358 u64 index = 0;
4361 * 2 for inode item and ref
4362 * 2 for dir items
4363 * 1 for xattr if selinux is on
4365 err = btrfs_reserve_metadata_space(root, 5);
4366 if (err)
4367 return err;
4369 trans = btrfs_start_transaction(root, 1);
4370 if (!trans)
4371 goto fail;
4372 btrfs_set_trans_block_group(trans, dir);
4374 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4375 if (err) {
4376 err = -ENOSPC;
4377 goto out_unlock;
4380 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4381 dentry->d_name.len,
4382 dentry->d_parent->d_inode->i_ino,
4383 objectid, BTRFS_I(dir)->block_group, mode,
4384 &index);
4385 err = PTR_ERR(inode);
4386 if (IS_ERR(inode))
4387 goto out_unlock;
4389 err = btrfs_init_inode_security(trans, inode, dir);
4390 if (err) {
4391 drop_inode = 1;
4392 goto out_unlock;
4395 btrfs_set_trans_block_group(trans, inode);
4396 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4397 if (err)
4398 drop_inode = 1;
4399 else {
4400 inode->i_mapping->a_ops = &btrfs_aops;
4401 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4402 inode->i_fop = &btrfs_file_operations;
4403 inode->i_op = &btrfs_file_inode_operations;
4404 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4406 btrfs_update_inode_block_group(trans, inode);
4407 btrfs_update_inode_block_group(trans, dir);
4408 out_unlock:
4409 nr = trans->blocks_used;
4410 btrfs_end_transaction_throttle(trans, root);
4411 fail:
4412 btrfs_unreserve_metadata_space(root, 5);
4413 if (drop_inode) {
4414 inode_dec_link_count(inode);
4415 iput(inode);
4417 btrfs_btree_balance_dirty(root, nr);
4418 return err;
4421 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4422 struct dentry *dentry)
4424 struct btrfs_trans_handle *trans;
4425 struct btrfs_root *root = BTRFS_I(dir)->root;
4426 struct inode *inode = old_dentry->d_inode;
4427 u64 index;
4428 unsigned long nr = 0;
4429 int err;
4430 int drop_inode = 0;
4432 if (inode->i_nlink == 0)
4433 return -ENOENT;
4435 /* do not allow sys_link's with other subvols of the same device */
4436 if (root->objectid != BTRFS_I(inode)->root->objectid)
4437 return -EPERM;
4440 * 1 item for inode ref
4441 * 2 items for dir items
4443 err = btrfs_reserve_metadata_space(root, 3);
4444 if (err)
4445 return err;
4447 btrfs_inc_nlink(inode);
4449 err = btrfs_set_inode_index(dir, &index);
4450 if (err)
4451 goto fail;
4453 trans = btrfs_start_transaction(root, 1);
4455 btrfs_set_trans_block_group(trans, dir);
4456 atomic_inc(&inode->i_count);
4458 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
4460 if (err) {
4461 drop_inode = 1;
4462 } else {
4463 btrfs_update_inode_block_group(trans, dir);
4464 err = btrfs_update_inode(trans, root, inode);
4465 BUG_ON(err);
4466 btrfs_log_new_name(trans, inode, NULL, dentry->d_parent);
4469 nr = trans->blocks_used;
4470 btrfs_end_transaction_throttle(trans, root);
4471 fail:
4472 btrfs_unreserve_metadata_space(root, 3);
4473 if (drop_inode) {
4474 inode_dec_link_count(inode);
4475 iput(inode);
4477 btrfs_btree_balance_dirty(root, nr);
4478 return err;
4481 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4483 struct inode *inode = NULL;
4484 struct btrfs_trans_handle *trans;
4485 struct btrfs_root *root = BTRFS_I(dir)->root;
4486 int err = 0;
4487 int drop_on_err = 0;
4488 u64 objectid = 0;
4489 u64 index = 0;
4490 unsigned long nr = 1;
4493 * 2 items for inode and ref
4494 * 2 items for dir items
4495 * 1 for xattr if selinux is on
4497 err = btrfs_reserve_metadata_space(root, 5);
4498 if (err)
4499 return err;
4501 trans = btrfs_start_transaction(root, 1);
4502 if (!trans) {
4503 err = -ENOMEM;
4504 goto out_unlock;
4506 btrfs_set_trans_block_group(trans, dir);
4508 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4509 if (err) {
4510 err = -ENOSPC;
4511 goto out_fail;
4514 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4515 dentry->d_name.len,
4516 dentry->d_parent->d_inode->i_ino, objectid,
4517 BTRFS_I(dir)->block_group, S_IFDIR | mode,
4518 &index);
4519 if (IS_ERR(inode)) {
4520 err = PTR_ERR(inode);
4521 goto out_fail;
4524 drop_on_err = 1;
4526 err = btrfs_init_inode_security(trans, inode, dir);
4527 if (err)
4528 goto out_fail;
4530 inode->i_op = &btrfs_dir_inode_operations;
4531 inode->i_fop = &btrfs_dir_file_operations;
4532 btrfs_set_trans_block_group(trans, inode);
4534 btrfs_i_size_write(inode, 0);
4535 err = btrfs_update_inode(trans, root, inode);
4536 if (err)
4537 goto out_fail;
4539 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
4540 inode, dentry->d_name.name,
4541 dentry->d_name.len, 0, index);
4542 if (err)
4543 goto out_fail;
4545 d_instantiate(dentry, inode);
4546 drop_on_err = 0;
4547 btrfs_update_inode_block_group(trans, inode);
4548 btrfs_update_inode_block_group(trans, dir);
4550 out_fail:
4551 nr = trans->blocks_used;
4552 btrfs_end_transaction_throttle(trans, root);
4554 out_unlock:
4555 btrfs_unreserve_metadata_space(root, 5);
4556 if (drop_on_err)
4557 iput(inode);
4558 btrfs_btree_balance_dirty(root, nr);
4559 return err;
4562 /* helper for btfs_get_extent. Given an existing extent in the tree,
4563 * and an extent that you want to insert, deal with overlap and insert
4564 * the new extent into the tree.
4566 static int merge_extent_mapping(struct extent_map_tree *em_tree,
4567 struct extent_map *existing,
4568 struct extent_map *em,
4569 u64 map_start, u64 map_len)
4571 u64 start_diff;
4573 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
4574 start_diff = map_start - em->start;
4575 em->start = map_start;
4576 em->len = map_len;
4577 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
4578 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4579 em->block_start += start_diff;
4580 em->block_len -= start_diff;
4582 return add_extent_mapping(em_tree, em);
4585 static noinline int uncompress_inline(struct btrfs_path *path,
4586 struct inode *inode, struct page *page,
4587 size_t pg_offset, u64 extent_offset,
4588 struct btrfs_file_extent_item *item)
4590 int ret;
4591 struct extent_buffer *leaf = path->nodes[0];
4592 char *tmp;
4593 size_t max_size;
4594 unsigned long inline_size;
4595 unsigned long ptr;
4597 WARN_ON(pg_offset != 0);
4598 max_size = btrfs_file_extent_ram_bytes(leaf, item);
4599 inline_size = btrfs_file_extent_inline_item_len(leaf,
4600 btrfs_item_nr(leaf, path->slots[0]));
4601 tmp = kmalloc(inline_size, GFP_NOFS);
4602 ptr = btrfs_file_extent_inline_start(item);
4604 read_extent_buffer(leaf, tmp, ptr, inline_size);
4606 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
4607 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
4608 inline_size, max_size);
4609 if (ret) {
4610 char *kaddr = kmap_atomic(page, KM_USER0);
4611 unsigned long copy_size = min_t(u64,
4612 PAGE_CACHE_SIZE - pg_offset,
4613 max_size - extent_offset);
4614 memset(kaddr + pg_offset, 0, copy_size);
4615 kunmap_atomic(kaddr, KM_USER0);
4617 kfree(tmp);
4618 return 0;
4622 * a bit scary, this does extent mapping from logical file offset to the disk.
4623 * the ugly parts come from merging extents from the disk with the in-ram
4624 * representation. This gets more complex because of the data=ordered code,
4625 * where the in-ram extents might be locked pending data=ordered completion.
4627 * This also copies inline extents directly into the page.
4630 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
4631 size_t pg_offset, u64 start, u64 len,
4632 int create)
4634 int ret;
4635 int err = 0;
4636 u64 bytenr;
4637 u64 extent_start = 0;
4638 u64 extent_end = 0;
4639 u64 objectid = inode->i_ino;
4640 u32 found_type;
4641 struct btrfs_path *path = NULL;
4642 struct btrfs_root *root = BTRFS_I(inode)->root;
4643 struct btrfs_file_extent_item *item;
4644 struct extent_buffer *leaf;
4645 struct btrfs_key found_key;
4646 struct extent_map *em = NULL;
4647 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4648 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4649 struct btrfs_trans_handle *trans = NULL;
4650 int compressed;
4652 again:
4653 read_lock(&em_tree->lock);
4654 em = lookup_extent_mapping(em_tree, start, len);
4655 if (em)
4656 em->bdev = root->fs_info->fs_devices->latest_bdev;
4657 read_unlock(&em_tree->lock);
4659 if (em) {
4660 if (em->start > start || em->start + em->len <= start)
4661 free_extent_map(em);
4662 else if (em->block_start == EXTENT_MAP_INLINE && page)
4663 free_extent_map(em);
4664 else
4665 goto out;
4667 em = alloc_extent_map(GFP_NOFS);
4668 if (!em) {
4669 err = -ENOMEM;
4670 goto out;
4672 em->bdev = root->fs_info->fs_devices->latest_bdev;
4673 em->start = EXTENT_MAP_HOLE;
4674 em->orig_start = EXTENT_MAP_HOLE;
4675 em->len = (u64)-1;
4676 em->block_len = (u64)-1;
4678 if (!path) {
4679 path = btrfs_alloc_path();
4680 BUG_ON(!path);
4683 ret = btrfs_lookup_file_extent(trans, root, path,
4684 objectid, start, trans != NULL);
4685 if (ret < 0) {
4686 err = ret;
4687 goto out;
4690 if (ret != 0) {
4691 if (path->slots[0] == 0)
4692 goto not_found;
4693 path->slots[0]--;
4696 leaf = path->nodes[0];
4697 item = btrfs_item_ptr(leaf, path->slots[0],
4698 struct btrfs_file_extent_item);
4699 /* are we inside the extent that was found? */
4700 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4701 found_type = btrfs_key_type(&found_key);
4702 if (found_key.objectid != objectid ||
4703 found_type != BTRFS_EXTENT_DATA_KEY) {
4704 goto not_found;
4707 found_type = btrfs_file_extent_type(leaf, item);
4708 extent_start = found_key.offset;
4709 compressed = btrfs_file_extent_compression(leaf, item);
4710 if (found_type == BTRFS_FILE_EXTENT_REG ||
4711 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4712 extent_end = extent_start +
4713 btrfs_file_extent_num_bytes(leaf, item);
4714 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4715 size_t size;
4716 size = btrfs_file_extent_inline_len(leaf, item);
4717 extent_end = (extent_start + size + root->sectorsize - 1) &
4718 ~((u64)root->sectorsize - 1);
4721 if (start >= extent_end) {
4722 path->slots[0]++;
4723 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4724 ret = btrfs_next_leaf(root, path);
4725 if (ret < 0) {
4726 err = ret;
4727 goto out;
4729 if (ret > 0)
4730 goto not_found;
4731 leaf = path->nodes[0];
4733 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4734 if (found_key.objectid != objectid ||
4735 found_key.type != BTRFS_EXTENT_DATA_KEY)
4736 goto not_found;
4737 if (start + len <= found_key.offset)
4738 goto not_found;
4739 em->start = start;
4740 em->len = found_key.offset - start;
4741 goto not_found_em;
4744 if (found_type == BTRFS_FILE_EXTENT_REG ||
4745 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4746 em->start = extent_start;
4747 em->len = extent_end - extent_start;
4748 em->orig_start = extent_start -
4749 btrfs_file_extent_offset(leaf, item);
4750 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4751 if (bytenr == 0) {
4752 em->block_start = EXTENT_MAP_HOLE;
4753 goto insert;
4755 if (compressed) {
4756 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4757 em->block_start = bytenr;
4758 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4759 item);
4760 } else {
4761 bytenr += btrfs_file_extent_offset(leaf, item);
4762 em->block_start = bytenr;
4763 em->block_len = em->len;
4764 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4765 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4767 goto insert;
4768 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4769 unsigned long ptr;
4770 char *map;
4771 size_t size;
4772 size_t extent_offset;
4773 size_t copy_size;
4775 em->block_start = EXTENT_MAP_INLINE;
4776 if (!page || create) {
4777 em->start = extent_start;
4778 em->len = extent_end - extent_start;
4779 goto out;
4782 size = btrfs_file_extent_inline_len(leaf, item);
4783 extent_offset = page_offset(page) + pg_offset - extent_start;
4784 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4785 size - extent_offset);
4786 em->start = extent_start + extent_offset;
4787 em->len = (copy_size + root->sectorsize - 1) &
4788 ~((u64)root->sectorsize - 1);
4789 em->orig_start = EXTENT_MAP_INLINE;
4790 if (compressed)
4791 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4792 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4793 if (create == 0 && !PageUptodate(page)) {
4794 if (btrfs_file_extent_compression(leaf, item) ==
4795 BTRFS_COMPRESS_ZLIB) {
4796 ret = uncompress_inline(path, inode, page,
4797 pg_offset,
4798 extent_offset, item);
4799 BUG_ON(ret);
4800 } else {
4801 map = kmap(page);
4802 read_extent_buffer(leaf, map + pg_offset, ptr,
4803 copy_size);
4804 if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
4805 memset(map + pg_offset + copy_size, 0,
4806 PAGE_CACHE_SIZE - pg_offset -
4807 copy_size);
4809 kunmap(page);
4811 flush_dcache_page(page);
4812 } else if (create && PageUptodate(page)) {
4813 if (!trans) {
4814 kunmap(page);
4815 free_extent_map(em);
4816 em = NULL;
4817 btrfs_release_path(root, path);
4818 trans = btrfs_join_transaction(root, 1);
4819 goto again;
4821 map = kmap(page);
4822 write_extent_buffer(leaf, map + pg_offset, ptr,
4823 copy_size);
4824 kunmap(page);
4825 btrfs_mark_buffer_dirty(leaf);
4827 set_extent_uptodate(io_tree, em->start,
4828 extent_map_end(em) - 1, GFP_NOFS);
4829 goto insert;
4830 } else {
4831 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4832 WARN_ON(1);
4834 not_found:
4835 em->start = start;
4836 em->len = len;
4837 not_found_em:
4838 em->block_start = EXTENT_MAP_HOLE;
4839 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4840 insert:
4841 btrfs_release_path(root, path);
4842 if (em->start > start || extent_map_end(em) <= start) {
4843 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4844 "[%llu %llu]\n", (unsigned long long)em->start,
4845 (unsigned long long)em->len,
4846 (unsigned long long)start,
4847 (unsigned long long)len);
4848 err = -EIO;
4849 goto out;
4852 err = 0;
4853 write_lock(&em_tree->lock);
4854 ret = add_extent_mapping(em_tree, em);
4855 /* it is possible that someone inserted the extent into the tree
4856 * while we had the lock dropped. It is also possible that
4857 * an overlapping map exists in the tree
4859 if (ret == -EEXIST) {
4860 struct extent_map *existing;
4862 ret = 0;
4864 existing = lookup_extent_mapping(em_tree, start, len);
4865 if (existing && (existing->start > start ||
4866 existing->start + existing->len <= start)) {
4867 free_extent_map(existing);
4868 existing = NULL;
4870 if (!existing) {
4871 existing = lookup_extent_mapping(em_tree, em->start,
4872 em->len);
4873 if (existing) {
4874 err = merge_extent_mapping(em_tree, existing,
4875 em, start,
4876 root->sectorsize);
4877 free_extent_map(existing);
4878 if (err) {
4879 free_extent_map(em);
4880 em = NULL;
4882 } else {
4883 err = -EIO;
4884 free_extent_map(em);
4885 em = NULL;
4887 } else {
4888 free_extent_map(em);
4889 em = existing;
4890 err = 0;
4893 write_unlock(&em_tree->lock);
4894 out:
4895 if (path)
4896 btrfs_free_path(path);
4897 if (trans) {
4898 ret = btrfs_end_transaction(trans, root);
4899 if (!err)
4900 err = ret;
4902 if (err) {
4903 free_extent_map(em);
4904 return ERR_PTR(err);
4906 return em;
4909 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4910 const struct iovec *iov, loff_t offset,
4911 unsigned long nr_segs)
4913 return -EINVAL;
4916 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4917 __u64 start, __u64 len)
4919 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4922 int btrfs_readpage(struct file *file, struct page *page)
4924 struct extent_io_tree *tree;
4925 tree = &BTRFS_I(page->mapping->host)->io_tree;
4926 return extent_read_full_page(tree, page, btrfs_get_extent);
4929 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4931 struct extent_io_tree *tree;
4934 if (current->flags & PF_MEMALLOC) {
4935 redirty_page_for_writepage(wbc, page);
4936 unlock_page(page);
4937 return 0;
4939 tree = &BTRFS_I(page->mapping->host)->io_tree;
4940 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4943 int btrfs_writepages(struct address_space *mapping,
4944 struct writeback_control *wbc)
4946 struct extent_io_tree *tree;
4948 tree = &BTRFS_I(mapping->host)->io_tree;
4949 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4952 static int
4953 btrfs_readpages(struct file *file, struct address_space *mapping,
4954 struct list_head *pages, unsigned nr_pages)
4956 struct extent_io_tree *tree;
4957 tree = &BTRFS_I(mapping->host)->io_tree;
4958 return extent_readpages(tree, mapping, pages, nr_pages,
4959 btrfs_get_extent);
4961 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4963 struct extent_io_tree *tree;
4964 struct extent_map_tree *map;
4965 int ret;
4967 tree = &BTRFS_I(page->mapping->host)->io_tree;
4968 map = &BTRFS_I(page->mapping->host)->extent_tree;
4969 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4970 if (ret == 1) {
4971 ClearPagePrivate(page);
4972 set_page_private(page, 0);
4973 page_cache_release(page);
4975 return ret;
4978 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4980 if (PageWriteback(page) || PageDirty(page))
4981 return 0;
4982 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
4985 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4987 struct extent_io_tree *tree;
4988 struct btrfs_ordered_extent *ordered;
4989 u64 page_start = page_offset(page);
4990 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4994 * we have the page locked, so new writeback can't start,
4995 * and the dirty bit won't be cleared while we are here.
4997 * Wait for IO on this page so that we can safely clear
4998 * the PagePrivate2 bit and do ordered accounting
5000 wait_on_page_writeback(page);
5002 tree = &BTRFS_I(page->mapping->host)->io_tree;
5003 if (offset) {
5004 btrfs_releasepage(page, GFP_NOFS);
5005 return;
5007 lock_extent(tree, page_start, page_end, GFP_NOFS);
5008 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
5009 page_offset(page));
5010 if (ordered) {
5012 * IO on this page will never be started, so we need
5013 * to account for any ordered extents now
5015 clear_extent_bit(tree, page_start, page_end,
5016 EXTENT_DIRTY | EXTENT_DELALLOC |
5017 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
5018 NULL, GFP_NOFS);
5020 * whoever cleared the private bit is responsible
5021 * for the finish_ordered_io
5023 if (TestClearPagePrivate2(page)) {
5024 btrfs_finish_ordered_io(page->mapping->host,
5025 page_start, page_end);
5027 btrfs_put_ordered_extent(ordered);
5028 lock_extent(tree, page_start, page_end, GFP_NOFS);
5030 clear_extent_bit(tree, page_start, page_end,
5031 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
5032 EXTENT_DO_ACCOUNTING, 1, 1, NULL, GFP_NOFS);
5033 __btrfs_releasepage(page, GFP_NOFS);
5035 ClearPageChecked(page);
5036 if (PagePrivate(page)) {
5037 ClearPagePrivate(page);
5038 set_page_private(page, 0);
5039 page_cache_release(page);
5044 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
5045 * called from a page fault handler when a page is first dirtied. Hence we must
5046 * be careful to check for EOF conditions here. We set the page up correctly
5047 * for a written page which means we get ENOSPC checking when writing into
5048 * holes and correct delalloc and unwritten extent mapping on filesystems that
5049 * support these features.
5051 * We are not allowed to take the i_mutex here so we have to play games to
5052 * protect against truncate races as the page could now be beyond EOF. Because
5053 * vmtruncate() writes the inode size before removing pages, once we have the
5054 * page lock we can determine safely if the page is beyond EOF. If it is not
5055 * beyond EOF, then the page is guaranteed safe against truncation until we
5056 * unlock the page.
5058 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5060 struct page *page = vmf->page;
5061 struct inode *inode = fdentry(vma->vm_file)->d_inode;
5062 struct btrfs_root *root = BTRFS_I(inode)->root;
5063 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5064 struct btrfs_ordered_extent *ordered;
5065 char *kaddr;
5066 unsigned long zero_start;
5067 loff_t size;
5068 int ret;
5069 u64 page_start;
5070 u64 page_end;
5072 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
5073 if (ret) {
5074 if (ret == -ENOMEM)
5075 ret = VM_FAULT_OOM;
5076 else /* -ENOSPC, -EIO, etc */
5077 ret = VM_FAULT_SIGBUS;
5078 goto out;
5081 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
5082 if (ret) {
5083 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5084 ret = VM_FAULT_SIGBUS;
5085 goto out;
5088 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
5089 again:
5090 lock_page(page);
5091 size = i_size_read(inode);
5092 page_start = page_offset(page);
5093 page_end = page_start + PAGE_CACHE_SIZE - 1;
5095 if ((page->mapping != inode->i_mapping) ||
5096 (page_start >= size)) {
5097 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5098 /* page got truncated out from underneath us */
5099 goto out_unlock;
5101 wait_on_page_writeback(page);
5103 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
5104 set_page_extent_mapped(page);
5107 * we can't set the delalloc bits if there are pending ordered
5108 * extents. Drop our locks and wait for them to finish
5110 ordered = btrfs_lookup_ordered_extent(inode, page_start);
5111 if (ordered) {
5112 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5113 unlock_page(page);
5114 btrfs_start_ordered_extent(inode, ordered, 1);
5115 btrfs_put_ordered_extent(ordered);
5116 goto again;
5120 * XXX - page_mkwrite gets called every time the page is dirtied, even
5121 * if it was already dirty, so for space accounting reasons we need to
5122 * clear any delalloc bits for the range we are fixing to save. There
5123 * is probably a better way to do this, but for now keep consistent with
5124 * prepare_pages in the normal write path.
5126 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
5127 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
5128 GFP_NOFS);
5130 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
5131 if (ret) {
5132 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5133 ret = VM_FAULT_SIGBUS;
5134 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5135 goto out_unlock;
5137 ret = 0;
5139 /* page is wholly or partially inside EOF */
5140 if (page_start + PAGE_CACHE_SIZE > size)
5141 zero_start = size & ~PAGE_CACHE_MASK;
5142 else
5143 zero_start = PAGE_CACHE_SIZE;
5145 if (zero_start != PAGE_CACHE_SIZE) {
5146 kaddr = kmap(page);
5147 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
5148 flush_dcache_page(page);
5149 kunmap(page);
5151 ClearPageChecked(page);
5152 set_page_dirty(page);
5153 SetPageUptodate(page);
5155 BTRFS_I(inode)->last_trans = root->fs_info->generation;
5156 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
5158 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5160 out_unlock:
5161 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
5162 if (!ret)
5163 return VM_FAULT_LOCKED;
5164 unlock_page(page);
5165 out:
5166 return ret;
5169 static void btrfs_truncate(struct inode *inode)
5171 struct btrfs_root *root = BTRFS_I(inode)->root;
5172 int ret;
5173 struct btrfs_trans_handle *trans;
5174 unsigned long nr;
5175 u64 mask = root->sectorsize - 1;
5177 if (!S_ISREG(inode->i_mode)) {
5178 WARN_ON(1);
5179 return;
5182 ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
5183 if (ret)
5184 return;
5186 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
5187 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
5189 trans = btrfs_start_transaction(root, 1);
5190 btrfs_set_trans_block_group(trans, inode);
5193 * setattr is responsible for setting the ordered_data_close flag,
5194 * but that is only tested during the last file release. That
5195 * could happen well after the next commit, leaving a great big
5196 * window where new writes may get lost if someone chooses to write
5197 * to this file after truncating to zero
5199 * The inode doesn't have any dirty data here, and so if we commit
5200 * this is a noop. If someone immediately starts writing to the inode
5201 * it is very likely we'll catch some of their writes in this
5202 * transaction, and the commit will find this file on the ordered
5203 * data list with good things to send down.
5205 * This is a best effort solution, there is still a window where
5206 * using truncate to replace the contents of the file will
5207 * end up with a zero length file after a crash.
5209 if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
5210 btrfs_add_ordered_operation(trans, root, inode);
5212 while (1) {
5213 ret = btrfs_truncate_inode_items(trans, root, inode,
5214 inode->i_size,
5215 BTRFS_EXTENT_DATA_KEY);
5216 if (ret != -EAGAIN)
5217 break;
5219 ret = btrfs_update_inode(trans, root, inode);
5220 BUG_ON(ret);
5222 nr = trans->blocks_used;
5223 btrfs_end_transaction(trans, root);
5224 btrfs_btree_balance_dirty(root, nr);
5226 trans = btrfs_start_transaction(root, 1);
5227 btrfs_set_trans_block_group(trans, inode);
5230 if (ret == 0 && inode->i_nlink > 0) {
5231 ret = btrfs_orphan_del(trans, inode);
5232 BUG_ON(ret);
5235 ret = btrfs_update_inode(trans, root, inode);
5236 BUG_ON(ret);
5238 nr = trans->blocks_used;
5239 ret = btrfs_end_transaction_throttle(trans, root);
5240 BUG_ON(ret);
5241 btrfs_btree_balance_dirty(root, nr);
5245 * create a new subvolume directory/inode (helper for the ioctl).
5247 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
5248 struct btrfs_root *new_root,
5249 u64 new_dirid, u64 alloc_hint)
5251 struct inode *inode;
5252 int err;
5253 u64 index = 0;
5255 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
5256 new_dirid, alloc_hint, S_IFDIR | 0700, &index);
5257 if (IS_ERR(inode))
5258 return PTR_ERR(inode);
5259 inode->i_op = &btrfs_dir_inode_operations;
5260 inode->i_fop = &btrfs_dir_file_operations;
5262 inode->i_nlink = 1;
5263 btrfs_i_size_write(inode, 0);
5265 err = btrfs_update_inode(trans, new_root, inode);
5266 BUG_ON(err);
5268 iput(inode);
5269 return 0;
5272 /* helper function for file defrag and space balancing. This
5273 * forces readahead on a given range of bytes in an inode
5275 unsigned long btrfs_force_ra(struct address_space *mapping,
5276 struct file_ra_state *ra, struct file *file,
5277 pgoff_t offset, pgoff_t last_index)
5279 pgoff_t req_size = last_index - offset + 1;
5281 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
5282 return offset + req_size;
5285 struct inode *btrfs_alloc_inode(struct super_block *sb)
5287 struct btrfs_inode *ei;
5289 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
5290 if (!ei)
5291 return NULL;
5292 ei->last_trans = 0;
5293 ei->last_sub_trans = 0;
5294 ei->logged_trans = 0;
5295 ei->outstanding_extents = 0;
5296 ei->reserved_extents = 0;
5297 ei->root = NULL;
5298 spin_lock_init(&ei->accounting_lock);
5299 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
5300 INIT_LIST_HEAD(&ei->i_orphan);
5301 INIT_LIST_HEAD(&ei->ordered_operations);
5302 return &ei->vfs_inode;
5305 void btrfs_destroy_inode(struct inode *inode)
5307 struct btrfs_ordered_extent *ordered;
5308 struct btrfs_root *root = BTRFS_I(inode)->root;
5310 WARN_ON(!list_empty(&inode->i_dentry));
5311 WARN_ON(inode->i_data.nrpages);
5314 * This can happen where we create an inode, but somebody else also
5315 * created the same inode and we need to destroy the one we already
5316 * created.
5318 if (!root)
5319 goto free;
5322 * Make sure we're properly removed from the ordered operation
5323 * lists.
5325 smp_mb();
5326 if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
5327 spin_lock(&root->fs_info->ordered_extent_lock);
5328 list_del_init(&BTRFS_I(inode)->ordered_operations);
5329 spin_unlock(&root->fs_info->ordered_extent_lock);
5332 spin_lock(&root->list_lock);
5333 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
5334 printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
5335 inode->i_ino);
5336 list_del_init(&BTRFS_I(inode)->i_orphan);
5338 spin_unlock(&root->list_lock);
5340 while (1) {
5341 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
5342 if (!ordered)
5343 break;
5344 else {
5345 printk(KERN_ERR "btrfs found ordered "
5346 "extent %llu %llu on inode cleanup\n",
5347 (unsigned long long)ordered->file_offset,
5348 (unsigned long long)ordered->len);
5349 btrfs_remove_ordered_extent(inode, ordered);
5350 btrfs_put_ordered_extent(ordered);
5351 btrfs_put_ordered_extent(ordered);
5354 inode_tree_del(inode);
5355 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
5356 free:
5357 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
5360 void btrfs_drop_inode(struct inode *inode)
5362 struct btrfs_root *root = BTRFS_I(inode)->root;
5364 if (inode->i_nlink > 0 && btrfs_root_refs(&root->root_item) == 0)
5365 generic_delete_inode(inode);
5366 else
5367 generic_drop_inode(inode);
5370 static void init_once(void *foo)
5372 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
5374 inode_init_once(&ei->vfs_inode);
5377 void btrfs_destroy_cachep(void)
5379 if (btrfs_inode_cachep)
5380 kmem_cache_destroy(btrfs_inode_cachep);
5381 if (btrfs_trans_handle_cachep)
5382 kmem_cache_destroy(btrfs_trans_handle_cachep);
5383 if (btrfs_transaction_cachep)
5384 kmem_cache_destroy(btrfs_transaction_cachep);
5385 if (btrfs_path_cachep)
5386 kmem_cache_destroy(btrfs_path_cachep);
5389 int btrfs_init_cachep(void)
5391 btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
5392 sizeof(struct btrfs_inode), 0,
5393 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
5394 if (!btrfs_inode_cachep)
5395 goto fail;
5397 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
5398 sizeof(struct btrfs_trans_handle), 0,
5399 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5400 if (!btrfs_trans_handle_cachep)
5401 goto fail;
5403 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
5404 sizeof(struct btrfs_transaction), 0,
5405 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5406 if (!btrfs_transaction_cachep)
5407 goto fail;
5409 btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
5410 sizeof(struct btrfs_path), 0,
5411 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5412 if (!btrfs_path_cachep)
5413 goto fail;
5415 return 0;
5416 fail:
5417 btrfs_destroy_cachep();
5418 return -ENOMEM;
5421 static int btrfs_getattr(struct vfsmount *mnt,
5422 struct dentry *dentry, struct kstat *stat)
5424 struct inode *inode = dentry->d_inode;
5425 generic_fillattr(inode, stat);
5426 stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
5427 stat->blksize = PAGE_CACHE_SIZE;
5428 stat->blocks = (inode_get_bytes(inode) +
5429 BTRFS_I(inode)->delalloc_bytes) >> 9;
5430 return 0;
5433 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5434 struct inode *new_dir, struct dentry *new_dentry)
5436 struct btrfs_trans_handle *trans;
5437 struct btrfs_root *root = BTRFS_I(old_dir)->root;
5438 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
5439 struct inode *new_inode = new_dentry->d_inode;
5440 struct inode *old_inode = old_dentry->d_inode;
5441 struct timespec ctime = CURRENT_TIME;
5442 u64 index = 0;
5443 u64 root_objectid;
5444 int ret;
5446 if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5447 return -EPERM;
5449 /* we only allow rename subvolume link between subvolumes */
5450 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
5451 return -EXDEV;
5453 if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
5454 (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID))
5455 return -ENOTEMPTY;
5457 if (S_ISDIR(old_inode->i_mode) && new_inode &&
5458 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
5459 return -ENOTEMPTY;
5462 * We want to reserve the absolute worst case amount of items. So if
5463 * both inodes are subvols and we need to unlink them then that would
5464 * require 4 item modifications, but if they are both normal inodes it
5465 * would require 5 item modifications, so we'll assume their normal
5466 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
5467 * should cover the worst case number of items we'll modify.
5469 ret = btrfs_reserve_metadata_space(root, 11);
5470 if (ret)
5471 return ret;
5474 * we're using rename to replace one file with another.
5475 * and the replacement file is large. Start IO on it now so
5476 * we don't add too much work to the end of the transaction
5478 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
5479 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
5480 filemap_flush(old_inode->i_mapping);
5482 /* close the racy window with snapshot create/destroy ioctl */
5483 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5484 down_read(&root->fs_info->subvol_sem);
5486 trans = btrfs_start_transaction(root, 1);
5487 btrfs_set_trans_block_group(trans, new_dir);
5489 if (dest != root)
5490 btrfs_record_root_in_trans(trans, dest);
5492 ret = btrfs_set_inode_index(new_dir, &index);
5493 if (ret)
5494 goto out_fail;
5496 if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
5497 /* force full log commit if subvolume involved. */
5498 root->fs_info->last_trans_log_full_commit = trans->transid;
5499 } else {
5500 ret = btrfs_insert_inode_ref(trans, dest,
5501 new_dentry->d_name.name,
5502 new_dentry->d_name.len,
5503 old_inode->i_ino,
5504 new_dir->i_ino, index);
5505 if (ret)
5506 goto out_fail;
5508 * this is an ugly little race, but the rename is required
5509 * to make sure that if we crash, the inode is either at the
5510 * old name or the new one. pinning the log transaction lets
5511 * us make sure we don't allow a log commit to come in after
5512 * we unlink the name but before we add the new name back in.
5514 btrfs_pin_log_trans(root);
5517 * make sure the inode gets flushed if it is replacing
5518 * something.
5520 if (new_inode && new_inode->i_size &&
5521 old_inode && S_ISREG(old_inode->i_mode)) {
5522 btrfs_add_ordered_operation(trans, root, old_inode);
5525 old_dir->i_ctime = old_dir->i_mtime = ctime;
5526 new_dir->i_ctime = new_dir->i_mtime = ctime;
5527 old_inode->i_ctime = ctime;
5529 if (old_dentry->d_parent != new_dentry->d_parent)
5530 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
5532 if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
5533 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
5534 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
5535 old_dentry->d_name.name,
5536 old_dentry->d_name.len);
5537 } else {
5538 btrfs_inc_nlink(old_dentry->d_inode);
5539 ret = btrfs_unlink_inode(trans, root, old_dir,
5540 old_dentry->d_inode,
5541 old_dentry->d_name.name,
5542 old_dentry->d_name.len);
5544 BUG_ON(ret);
5546 if (new_inode) {
5547 new_inode->i_ctime = CURRENT_TIME;
5548 if (unlikely(new_inode->i_ino ==
5549 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
5550 root_objectid = BTRFS_I(new_inode)->location.objectid;
5551 ret = btrfs_unlink_subvol(trans, dest, new_dir,
5552 root_objectid,
5553 new_dentry->d_name.name,
5554 new_dentry->d_name.len);
5555 BUG_ON(new_inode->i_nlink == 0);
5556 } else {
5557 ret = btrfs_unlink_inode(trans, dest, new_dir,
5558 new_dentry->d_inode,
5559 new_dentry->d_name.name,
5560 new_dentry->d_name.len);
5562 BUG_ON(ret);
5563 if (new_inode->i_nlink == 0) {
5564 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
5565 BUG_ON(ret);
5569 ret = btrfs_add_link(trans, new_dir, old_inode,
5570 new_dentry->d_name.name,
5571 new_dentry->d_name.len, 0, index);
5572 BUG_ON(ret);
5574 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
5575 btrfs_log_new_name(trans, old_inode, old_dir,
5576 new_dentry->d_parent);
5577 btrfs_end_log_trans(root);
5579 out_fail:
5580 btrfs_end_transaction_throttle(trans, root);
5582 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5583 up_read(&root->fs_info->subvol_sem);
5585 btrfs_unreserve_metadata_space(root, 11);
5586 return ret;
5590 * some fairly slow code that needs optimization. This walks the list
5591 * of all the inodes with pending delalloc and forces them to disk.
5593 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
5595 struct list_head *head = &root->fs_info->delalloc_inodes;
5596 struct btrfs_inode *binode;
5597 struct inode *inode;
5599 if (root->fs_info->sb->s_flags & MS_RDONLY)
5600 return -EROFS;
5602 spin_lock(&root->fs_info->delalloc_lock);
5603 while (!list_empty(head)) {
5604 binode = list_entry(head->next, struct btrfs_inode,
5605 delalloc_inodes);
5606 inode = igrab(&binode->vfs_inode);
5607 if (!inode)
5608 list_del_init(&binode->delalloc_inodes);
5609 spin_unlock(&root->fs_info->delalloc_lock);
5610 if (inode) {
5611 filemap_flush(inode->i_mapping);
5612 if (delay_iput)
5613 btrfs_add_delayed_iput(inode);
5614 else
5615 iput(inode);
5617 cond_resched();
5618 spin_lock(&root->fs_info->delalloc_lock);
5620 spin_unlock(&root->fs_info->delalloc_lock);
5622 /* the filemap_flush will queue IO into the worker threads, but
5623 * we have to make sure the IO is actually started and that
5624 * ordered extents get created before we return
5626 atomic_inc(&root->fs_info->async_submit_draining);
5627 while (atomic_read(&root->fs_info->nr_async_submits) ||
5628 atomic_read(&root->fs_info->async_delalloc_pages)) {
5629 wait_event(root->fs_info->async_submit_wait,
5630 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
5631 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
5633 atomic_dec(&root->fs_info->async_submit_draining);
5634 return 0;
5637 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
5638 const char *symname)
5640 struct btrfs_trans_handle *trans;
5641 struct btrfs_root *root = BTRFS_I(dir)->root;
5642 struct btrfs_path *path;
5643 struct btrfs_key key;
5644 struct inode *inode = NULL;
5645 int err;
5646 int drop_inode = 0;
5647 u64 objectid;
5648 u64 index = 0 ;
5649 int name_len;
5650 int datasize;
5651 unsigned long ptr;
5652 struct btrfs_file_extent_item *ei;
5653 struct extent_buffer *leaf;
5654 unsigned long nr = 0;
5656 name_len = strlen(symname) + 1;
5657 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
5658 return -ENAMETOOLONG;
5661 * 2 items for inode item and ref
5662 * 2 items for dir items
5663 * 1 item for xattr if selinux is on
5665 err = btrfs_reserve_metadata_space(root, 5);
5666 if (err)
5667 return err;
5669 trans = btrfs_start_transaction(root, 1);
5670 if (!trans)
5671 goto out_fail;
5672 btrfs_set_trans_block_group(trans, dir);
5674 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
5675 if (err) {
5676 err = -ENOSPC;
5677 goto out_unlock;
5680 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5681 dentry->d_name.len,
5682 dentry->d_parent->d_inode->i_ino, objectid,
5683 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
5684 &index);
5685 err = PTR_ERR(inode);
5686 if (IS_ERR(inode))
5687 goto out_unlock;
5689 err = btrfs_init_inode_security(trans, inode, dir);
5690 if (err) {
5691 drop_inode = 1;
5692 goto out_unlock;
5695 btrfs_set_trans_block_group(trans, inode);
5696 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
5697 if (err)
5698 drop_inode = 1;
5699 else {
5700 inode->i_mapping->a_ops = &btrfs_aops;
5701 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5702 inode->i_fop = &btrfs_file_operations;
5703 inode->i_op = &btrfs_file_inode_operations;
5704 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
5706 btrfs_update_inode_block_group(trans, inode);
5707 btrfs_update_inode_block_group(trans, dir);
5708 if (drop_inode)
5709 goto out_unlock;
5711 path = btrfs_alloc_path();
5712 BUG_ON(!path);
5713 key.objectid = inode->i_ino;
5714 key.offset = 0;
5715 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
5716 datasize = btrfs_file_extent_calc_inline_size(name_len);
5717 err = btrfs_insert_empty_item(trans, root, path, &key,
5718 datasize);
5719 if (err) {
5720 drop_inode = 1;
5721 goto out_unlock;
5723 leaf = path->nodes[0];
5724 ei = btrfs_item_ptr(leaf, path->slots[0],
5725 struct btrfs_file_extent_item);
5726 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
5727 btrfs_set_file_extent_type(leaf, ei,
5728 BTRFS_FILE_EXTENT_INLINE);
5729 btrfs_set_file_extent_encryption(leaf, ei, 0);
5730 btrfs_set_file_extent_compression(leaf, ei, 0);
5731 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
5732 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
5734 ptr = btrfs_file_extent_inline_start(ei);
5735 write_extent_buffer(leaf, symname, ptr, name_len);
5736 btrfs_mark_buffer_dirty(leaf);
5737 btrfs_free_path(path);
5739 inode->i_op = &btrfs_symlink_inode_operations;
5740 inode->i_mapping->a_ops = &btrfs_symlink_aops;
5741 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5742 inode_set_bytes(inode, name_len);
5743 btrfs_i_size_write(inode, name_len - 1);
5744 err = btrfs_update_inode(trans, root, inode);
5745 if (err)
5746 drop_inode = 1;
5748 out_unlock:
5749 nr = trans->blocks_used;
5750 btrfs_end_transaction_throttle(trans, root);
5751 out_fail:
5752 btrfs_unreserve_metadata_space(root, 5);
5753 if (drop_inode) {
5754 inode_dec_link_count(inode);
5755 iput(inode);
5757 btrfs_btree_balance_dirty(root, nr);
5758 return err;
5761 static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
5762 u64 alloc_hint, int mode, loff_t actual_len)
5764 struct btrfs_trans_handle *trans;
5765 struct btrfs_root *root = BTRFS_I(inode)->root;
5766 struct btrfs_key ins;
5767 u64 alloc_size;
5768 u64 cur_offset = start;
5769 u64 num_bytes = end - start;
5770 int ret = 0;
5771 u64 i_size;
5773 while (num_bytes > 0) {
5774 alloc_size = min(num_bytes, root->fs_info->max_extent);
5776 trans = btrfs_start_transaction(root, 1);
5778 ret = btrfs_reserve_extent(trans, root, alloc_size,
5779 root->sectorsize, 0, alloc_hint,
5780 (u64)-1, &ins, 1);
5781 if (ret) {
5782 WARN_ON(1);
5783 goto stop_trans;
5786 ret = btrfs_reserve_metadata_space(root, 3);
5787 if (ret) {
5788 btrfs_free_reserved_extent(root, ins.objectid,
5789 ins.offset);
5790 goto stop_trans;
5793 ret = insert_reserved_file_extent(trans, inode,
5794 cur_offset, ins.objectid,
5795 ins.offset, ins.offset,
5796 ins.offset, 0, 0, 0,
5797 BTRFS_FILE_EXTENT_PREALLOC);
5798 BUG_ON(ret);
5799 btrfs_drop_extent_cache(inode, cur_offset,
5800 cur_offset + ins.offset -1, 0);
5802 num_bytes -= ins.offset;
5803 cur_offset += ins.offset;
5804 alloc_hint = ins.objectid + ins.offset;
5806 inode->i_ctime = CURRENT_TIME;
5807 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
5808 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
5809 (actual_len > inode->i_size) &&
5810 (cur_offset > inode->i_size)) {
5812 if (cur_offset > actual_len)
5813 i_size = actual_len;
5814 else
5815 i_size = cur_offset;
5816 i_size_write(inode, i_size);
5817 btrfs_ordered_update_i_size(inode, i_size, NULL);
5820 ret = btrfs_update_inode(trans, root, inode);
5821 BUG_ON(ret);
5823 btrfs_end_transaction(trans, root);
5824 btrfs_unreserve_metadata_space(root, 3);
5826 return ret;
5828 stop_trans:
5829 btrfs_end_transaction(trans, root);
5830 return ret;
5834 static long btrfs_fallocate(struct inode *inode, int mode,
5835 loff_t offset, loff_t len)
5837 u64 cur_offset;
5838 u64 last_byte;
5839 u64 alloc_start;
5840 u64 alloc_end;
5841 u64 alloc_hint = 0;
5842 u64 locked_end;
5843 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
5844 struct extent_map *em;
5845 int ret;
5847 alloc_start = offset & ~mask;
5848 alloc_end = (offset + len + mask) & ~mask;
5851 * wait for ordered IO before we have any locks. We'll loop again
5852 * below with the locks held.
5854 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
5856 mutex_lock(&inode->i_mutex);
5857 if (alloc_start > inode->i_size) {
5858 ret = btrfs_cont_expand(inode, alloc_start);
5859 if (ret)
5860 goto out;
5863 ret = btrfs_check_data_free_space(BTRFS_I(inode)->root, inode,
5864 alloc_end - alloc_start);
5865 if (ret)
5866 goto out;
5868 locked_end = alloc_end - 1;
5869 while (1) {
5870 struct btrfs_ordered_extent *ordered;
5872 /* the extent lock is ordered inside the running
5873 * transaction
5875 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5876 GFP_NOFS);
5877 ordered = btrfs_lookup_first_ordered_extent(inode,
5878 alloc_end - 1);
5879 if (ordered &&
5880 ordered->file_offset + ordered->len > alloc_start &&
5881 ordered->file_offset < alloc_end) {
5882 btrfs_put_ordered_extent(ordered);
5883 unlock_extent(&BTRFS_I(inode)->io_tree,
5884 alloc_start, locked_end, GFP_NOFS);
5886 * we can't wait on the range with the transaction
5887 * running or with the extent lock held
5889 btrfs_wait_ordered_range(inode, alloc_start,
5890 alloc_end - alloc_start);
5891 } else {
5892 if (ordered)
5893 btrfs_put_ordered_extent(ordered);
5894 break;
5898 cur_offset = alloc_start;
5899 while (1) {
5900 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
5901 alloc_end - cur_offset, 0);
5902 BUG_ON(IS_ERR(em) || !em);
5903 last_byte = min(extent_map_end(em), alloc_end);
5904 last_byte = (last_byte + mask) & ~mask;
5905 if (em->block_start == EXTENT_MAP_HOLE ||
5906 (cur_offset >= inode->i_size &&
5907 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
5908 ret = prealloc_file_range(inode,
5909 cur_offset, last_byte,
5910 alloc_hint, mode, offset+len);
5911 if (ret < 0) {
5912 free_extent_map(em);
5913 break;
5916 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
5917 alloc_hint = em->block_start;
5918 free_extent_map(em);
5920 cur_offset = last_byte;
5921 if (cur_offset >= alloc_end) {
5922 ret = 0;
5923 break;
5926 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5927 GFP_NOFS);
5929 btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode,
5930 alloc_end - alloc_start);
5931 out:
5932 mutex_unlock(&inode->i_mutex);
5933 return ret;
5936 static int btrfs_set_page_dirty(struct page *page)
5938 return __set_page_dirty_nobuffers(page);
5941 static int btrfs_permission(struct inode *inode, int mask)
5943 if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
5944 return -EACCES;
5945 return generic_permission(inode, mask, btrfs_check_acl);
5948 static const struct inode_operations btrfs_dir_inode_operations = {
5949 .getattr = btrfs_getattr,
5950 .lookup = btrfs_lookup,
5951 .create = btrfs_create,
5952 .unlink = btrfs_unlink,
5953 .link = btrfs_link,
5954 .mkdir = btrfs_mkdir,
5955 .rmdir = btrfs_rmdir,
5956 .rename = btrfs_rename,
5957 .symlink = btrfs_symlink,
5958 .setattr = btrfs_setattr,
5959 .mknod = btrfs_mknod,
5960 .setxattr = btrfs_setxattr,
5961 .getxattr = btrfs_getxattr,
5962 .listxattr = btrfs_listxattr,
5963 .removexattr = btrfs_removexattr,
5964 .permission = btrfs_permission,
5966 static const struct inode_operations btrfs_dir_ro_inode_operations = {
5967 .lookup = btrfs_lookup,
5968 .permission = btrfs_permission,
5971 static const struct file_operations btrfs_dir_file_operations = {
5972 .llseek = generic_file_llseek,
5973 .read = generic_read_dir,
5974 .readdir = btrfs_real_readdir,
5975 .unlocked_ioctl = btrfs_ioctl,
5976 #ifdef CONFIG_COMPAT
5977 .compat_ioctl = btrfs_ioctl,
5978 #endif
5979 .release = btrfs_release_file,
5980 .fsync = btrfs_sync_file,
5983 static struct extent_io_ops btrfs_extent_io_ops = {
5984 .fill_delalloc = run_delalloc_range,
5985 .submit_bio_hook = btrfs_submit_bio_hook,
5986 .merge_bio_hook = btrfs_merge_bio_hook,
5987 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
5988 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
5989 .writepage_start_hook = btrfs_writepage_start_hook,
5990 .readpage_io_failed_hook = btrfs_io_failed_hook,
5991 .set_bit_hook = btrfs_set_bit_hook,
5992 .clear_bit_hook = btrfs_clear_bit_hook,
5993 .merge_extent_hook = btrfs_merge_extent_hook,
5994 .split_extent_hook = btrfs_split_extent_hook,
5998 * btrfs doesn't support the bmap operation because swapfiles
5999 * use bmap to make a mapping of extents in the file. They assume
6000 * these extents won't change over the life of the file and they
6001 * use the bmap result to do IO directly to the drive.
6003 * the btrfs bmap call would return logical addresses that aren't
6004 * suitable for IO and they also will change frequently as COW
6005 * operations happen. So, swapfile + btrfs == corruption.
6007 * For now we're avoiding this by dropping bmap.
6009 static const struct address_space_operations btrfs_aops = {
6010 .readpage = btrfs_readpage,
6011 .writepage = btrfs_writepage,
6012 .writepages = btrfs_writepages,
6013 .readpages = btrfs_readpages,
6014 .sync_page = block_sync_page,
6015 .direct_IO = btrfs_direct_IO,
6016 .invalidatepage = btrfs_invalidatepage,
6017 .releasepage = btrfs_releasepage,
6018 .set_page_dirty = btrfs_set_page_dirty,
6019 .error_remove_page = generic_error_remove_page,
6022 static const struct address_space_operations btrfs_symlink_aops = {
6023 .readpage = btrfs_readpage,
6024 .writepage = btrfs_writepage,
6025 .invalidatepage = btrfs_invalidatepage,
6026 .releasepage = btrfs_releasepage,
6029 static const struct inode_operations btrfs_file_inode_operations = {
6030 .truncate = btrfs_truncate,
6031 .getattr = btrfs_getattr,
6032 .setattr = btrfs_setattr,
6033 .setxattr = btrfs_setxattr,
6034 .getxattr = btrfs_getxattr,
6035 .listxattr = btrfs_listxattr,
6036 .removexattr = btrfs_removexattr,
6037 .permission = btrfs_permission,
6038 .fallocate = btrfs_fallocate,
6039 .fiemap = btrfs_fiemap,
6041 static const struct inode_operations btrfs_special_inode_operations = {
6042 .getattr = btrfs_getattr,
6043 .setattr = btrfs_setattr,
6044 .permission = btrfs_permission,
6045 .setxattr = btrfs_setxattr,
6046 .getxattr = btrfs_getxattr,
6047 .listxattr = btrfs_listxattr,
6048 .removexattr = btrfs_removexattr,
6050 static const struct inode_operations btrfs_symlink_inode_operations = {
6051 .readlink = generic_readlink,
6052 .follow_link = page_follow_link_light,
6053 .put_link = page_put_link,
6054 .permission = btrfs_permission,
6055 .setxattr = btrfs_setxattr,
6056 .getxattr = btrfs_getxattr,
6057 .listxattr = btrfs_listxattr,
6058 .removexattr = btrfs_removexattr,
6061 const struct dentry_operations btrfs_dentry_operations = {
6062 .d_delete = btrfs_dentry_delete,