Btrfs: implement FS_IOC_GETFLAGS/SETFLAGS/GETVERSION
[linux-2.6/linux-2.6-openrd.git] / fs / btrfs / inode.c
blob5b68330f8585272f8b829c37329774bbe2f75267
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/statfs.h>
35 #include <linux/compat.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/xattr.h>
38 #include <linux/posix_acl.h>
39 #include <linux/falloc.h>
40 #include "compat.h"
41 #include "ctree.h"
42 #include "disk-io.h"
43 #include "transaction.h"
44 #include "btrfs_inode.h"
45 #include "ioctl.h"
46 #include "print-tree.h"
47 #include "volumes.h"
48 #include "ordered-data.h"
49 #include "xattr.h"
50 #include "tree-log.h"
51 #include "compression.h"
52 #include "locking.h"
54 struct btrfs_iget_args {
55 u64 ino;
56 struct btrfs_root *root;
59 static struct inode_operations btrfs_dir_inode_operations;
60 static struct inode_operations btrfs_symlink_inode_operations;
61 static struct inode_operations btrfs_dir_ro_inode_operations;
62 static struct inode_operations btrfs_special_inode_operations;
63 static struct inode_operations btrfs_file_inode_operations;
64 static struct address_space_operations btrfs_aops;
65 static struct address_space_operations btrfs_symlink_aops;
66 static struct file_operations btrfs_dir_file_operations;
67 static struct extent_io_ops btrfs_extent_io_ops;
69 static struct kmem_cache *btrfs_inode_cachep;
70 struct kmem_cache *btrfs_trans_handle_cachep;
71 struct kmem_cache *btrfs_transaction_cachep;
72 struct kmem_cache *btrfs_path_cachep;
74 #define S_SHIFT 12
75 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
76 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
77 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
78 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
79 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
80 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
81 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
82 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
85 static void btrfs_truncate(struct inode *inode);
86 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
87 static noinline int cow_file_range(struct inode *inode,
88 struct page *locked_page,
89 u64 start, u64 end, int *page_started,
90 unsigned long *nr_written, int unlock);
92 static int btrfs_init_inode_security(struct inode *inode, struct inode *dir)
94 int err;
96 err = btrfs_init_acl(inode, dir);
97 if (!err)
98 err = btrfs_xattr_security_init(inode, dir);
99 return err;
103 * this does all the hard work for inserting an inline extent into
104 * the btree. The caller should have done a btrfs_drop_extents so that
105 * no overlapping inline items exist in the btree
107 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
108 struct btrfs_root *root, struct inode *inode,
109 u64 start, size_t size, size_t compressed_size,
110 struct page **compressed_pages)
112 struct btrfs_key key;
113 struct btrfs_path *path;
114 struct extent_buffer *leaf;
115 struct page *page = NULL;
116 char *kaddr;
117 unsigned long ptr;
118 struct btrfs_file_extent_item *ei;
119 int err = 0;
120 int ret;
121 size_t cur_size = size;
122 size_t datasize;
123 unsigned long offset;
124 int use_compress = 0;
126 if (compressed_size && compressed_pages) {
127 use_compress = 1;
128 cur_size = compressed_size;
131 path = btrfs_alloc_path();
132 if (!path)
133 return -ENOMEM;
135 path->leave_spinning = 1;
136 btrfs_set_trans_block_group(trans, inode);
138 key.objectid = inode->i_ino;
139 key.offset = start;
140 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
141 datasize = btrfs_file_extent_calc_inline_size(cur_size);
143 inode_add_bytes(inode, size);
144 ret = btrfs_insert_empty_item(trans, root, path, &key,
145 datasize);
146 BUG_ON(ret);
147 if (ret) {
148 err = ret;
149 goto fail;
151 leaf = path->nodes[0];
152 ei = btrfs_item_ptr(leaf, path->slots[0],
153 struct btrfs_file_extent_item);
154 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
155 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
156 btrfs_set_file_extent_encryption(leaf, ei, 0);
157 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
158 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
159 ptr = btrfs_file_extent_inline_start(ei);
161 if (use_compress) {
162 struct page *cpage;
163 int i = 0;
164 while (compressed_size > 0) {
165 cpage = compressed_pages[i];
166 cur_size = min_t(unsigned long, compressed_size,
167 PAGE_CACHE_SIZE);
169 kaddr = kmap_atomic(cpage, KM_USER0);
170 write_extent_buffer(leaf, kaddr, ptr, cur_size);
171 kunmap_atomic(kaddr, KM_USER0);
173 i++;
174 ptr += cur_size;
175 compressed_size -= cur_size;
177 btrfs_set_file_extent_compression(leaf, ei,
178 BTRFS_COMPRESS_ZLIB);
179 } else {
180 page = find_get_page(inode->i_mapping,
181 start >> PAGE_CACHE_SHIFT);
182 btrfs_set_file_extent_compression(leaf, ei, 0);
183 kaddr = kmap_atomic(page, KM_USER0);
184 offset = start & (PAGE_CACHE_SIZE - 1);
185 write_extent_buffer(leaf, kaddr + offset, ptr, size);
186 kunmap_atomic(kaddr, KM_USER0);
187 page_cache_release(page);
189 btrfs_mark_buffer_dirty(leaf);
190 btrfs_free_path(path);
192 BTRFS_I(inode)->disk_i_size = inode->i_size;
193 btrfs_update_inode(trans, root, inode);
194 return 0;
195 fail:
196 btrfs_free_path(path);
197 return err;
202 * conditionally insert an inline extent into the file. This
203 * does the checks required to make sure the data is small enough
204 * to fit as an inline extent.
206 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
207 struct btrfs_root *root,
208 struct inode *inode, u64 start, u64 end,
209 size_t compressed_size,
210 struct page **compressed_pages)
212 u64 isize = i_size_read(inode);
213 u64 actual_end = min(end + 1, isize);
214 u64 inline_len = actual_end - start;
215 u64 aligned_end = (end + root->sectorsize - 1) &
216 ~((u64)root->sectorsize - 1);
217 u64 hint_byte;
218 u64 data_len = inline_len;
219 int ret;
221 if (compressed_size)
222 data_len = compressed_size;
224 if (start > 0 ||
225 actual_end >= PAGE_CACHE_SIZE ||
226 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
227 (!compressed_size &&
228 (actual_end & (root->sectorsize - 1)) == 0) ||
229 end + 1 < isize ||
230 data_len > root->fs_info->max_inline) {
231 return 1;
234 ret = btrfs_drop_extents(trans, root, inode, start,
235 aligned_end, aligned_end, start, &hint_byte);
236 BUG_ON(ret);
238 if (isize > actual_end)
239 inline_len = min_t(u64, isize, actual_end);
240 ret = insert_inline_extent(trans, root, inode, start,
241 inline_len, compressed_size,
242 compressed_pages);
243 BUG_ON(ret);
244 btrfs_drop_extent_cache(inode, start, aligned_end, 0);
245 return 0;
248 struct async_extent {
249 u64 start;
250 u64 ram_size;
251 u64 compressed_size;
252 struct page **pages;
253 unsigned long nr_pages;
254 struct list_head list;
257 struct async_cow {
258 struct inode *inode;
259 struct btrfs_root *root;
260 struct page *locked_page;
261 u64 start;
262 u64 end;
263 struct list_head extents;
264 struct btrfs_work work;
267 static noinline int add_async_extent(struct async_cow *cow,
268 u64 start, u64 ram_size,
269 u64 compressed_size,
270 struct page **pages,
271 unsigned long nr_pages)
273 struct async_extent *async_extent;
275 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
276 async_extent->start = start;
277 async_extent->ram_size = ram_size;
278 async_extent->compressed_size = compressed_size;
279 async_extent->pages = pages;
280 async_extent->nr_pages = nr_pages;
281 list_add_tail(&async_extent->list, &cow->extents);
282 return 0;
286 * we create compressed extents in two phases. The first
287 * phase compresses a range of pages that have already been
288 * locked (both pages and state bits are locked).
290 * This is done inside an ordered work queue, and the compression
291 * is spread across many cpus. The actual IO submission is step
292 * two, and the ordered work queue takes care of making sure that
293 * happens in the same order things were put onto the queue by
294 * writepages and friends.
296 * If this code finds it can't get good compression, it puts an
297 * entry onto the work queue to write the uncompressed bytes. This
298 * makes sure that both compressed inodes and uncompressed inodes
299 * are written in the same order that pdflush sent them down.
301 static noinline int compress_file_range(struct inode *inode,
302 struct page *locked_page,
303 u64 start, u64 end,
304 struct async_cow *async_cow,
305 int *num_added)
307 struct btrfs_root *root = BTRFS_I(inode)->root;
308 struct btrfs_trans_handle *trans;
309 u64 num_bytes;
310 u64 orig_start;
311 u64 disk_num_bytes;
312 u64 blocksize = root->sectorsize;
313 u64 actual_end;
314 u64 isize = i_size_read(inode);
315 int ret = 0;
316 struct page **pages = NULL;
317 unsigned long nr_pages;
318 unsigned long nr_pages_ret = 0;
319 unsigned long total_compressed = 0;
320 unsigned long total_in = 0;
321 unsigned long max_compressed = 128 * 1024;
322 unsigned long max_uncompressed = 128 * 1024;
323 int i;
324 int will_compress;
326 orig_start = start;
328 actual_end = min_t(u64, isize, end + 1);
329 again:
330 will_compress = 0;
331 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
332 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
335 * we don't want to send crud past the end of i_size through
336 * compression, that's just a waste of CPU time. So, if the
337 * end of the file is before the start of our current
338 * requested range of bytes, we bail out to the uncompressed
339 * cleanup code that can deal with all of this.
341 * It isn't really the fastest way to fix things, but this is a
342 * very uncommon corner.
344 if (actual_end <= start)
345 goto cleanup_and_bail_uncompressed;
347 total_compressed = actual_end - start;
349 /* we want to make sure that amount of ram required to uncompress
350 * an extent is reasonable, so we limit the total size in ram
351 * of a compressed extent to 128k. This is a crucial number
352 * because it also controls how easily we can spread reads across
353 * cpus for decompression.
355 * We also want to make sure the amount of IO required to do
356 * a random read is reasonably small, so we limit the size of
357 * a compressed extent to 128k.
359 total_compressed = min(total_compressed, max_uncompressed);
360 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
361 num_bytes = max(blocksize, num_bytes);
362 disk_num_bytes = num_bytes;
363 total_in = 0;
364 ret = 0;
367 * we do compression for mount -o compress and when the
368 * inode has not been flagged as nocompress. This flag can
369 * change at any time if we discover bad compression ratios.
371 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
372 btrfs_test_opt(root, COMPRESS)) {
373 WARN_ON(pages);
374 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
376 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
377 total_compressed, pages,
378 nr_pages, &nr_pages_ret,
379 &total_in,
380 &total_compressed,
381 max_compressed);
383 if (!ret) {
384 unsigned long offset = total_compressed &
385 (PAGE_CACHE_SIZE - 1);
386 struct page *page = pages[nr_pages_ret - 1];
387 char *kaddr;
389 /* zero the tail end of the last page, we might be
390 * sending it down to disk
392 if (offset) {
393 kaddr = kmap_atomic(page, KM_USER0);
394 memset(kaddr + offset, 0,
395 PAGE_CACHE_SIZE - offset);
396 kunmap_atomic(kaddr, KM_USER0);
398 will_compress = 1;
401 if (start == 0) {
402 trans = btrfs_join_transaction(root, 1);
403 BUG_ON(!trans);
404 btrfs_set_trans_block_group(trans, inode);
406 /* lets try to make an inline extent */
407 if (ret || total_in < (actual_end - start)) {
408 /* we didn't compress the entire range, try
409 * to make an uncompressed inline extent.
411 ret = cow_file_range_inline(trans, root, inode,
412 start, end, 0, NULL);
413 } else {
414 /* try making a compressed inline extent */
415 ret = cow_file_range_inline(trans, root, inode,
416 start, end,
417 total_compressed, pages);
419 btrfs_end_transaction(trans, root);
420 if (ret == 0) {
422 * inline extent creation worked, we don't need
423 * to create any more async work items. Unlock
424 * and free up our temp pages.
426 extent_clear_unlock_delalloc(inode,
427 &BTRFS_I(inode)->io_tree,
428 start, end, NULL, 1, 0,
429 0, 1, 1, 1);
430 ret = 0;
431 goto free_pages_out;
435 if (will_compress) {
437 * we aren't doing an inline extent round the compressed size
438 * up to a block size boundary so the allocator does sane
439 * things
441 total_compressed = (total_compressed + blocksize - 1) &
442 ~(blocksize - 1);
445 * one last check to make sure the compression is really a
446 * win, compare the page count read with the blocks on disk
448 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
449 ~(PAGE_CACHE_SIZE - 1);
450 if (total_compressed >= total_in) {
451 will_compress = 0;
452 } else {
453 disk_num_bytes = total_compressed;
454 num_bytes = total_in;
457 if (!will_compress && pages) {
459 * the compression code ran but failed to make things smaller,
460 * free any pages it allocated and our page pointer array
462 for (i = 0; i < nr_pages_ret; i++) {
463 WARN_ON(pages[i]->mapping);
464 page_cache_release(pages[i]);
466 kfree(pages);
467 pages = NULL;
468 total_compressed = 0;
469 nr_pages_ret = 0;
471 /* flag the file so we don't compress in the future */
472 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
474 if (will_compress) {
475 *num_added += 1;
477 /* the async work queues will take care of doing actual
478 * allocation on disk for these compressed pages,
479 * and will submit them to the elevator.
481 add_async_extent(async_cow, start, num_bytes,
482 total_compressed, pages, nr_pages_ret);
484 if (start + num_bytes < end && start + num_bytes < actual_end) {
485 start += num_bytes;
486 pages = NULL;
487 cond_resched();
488 goto again;
490 } else {
491 cleanup_and_bail_uncompressed:
493 * No compression, but we still need to write the pages in
494 * the file we've been given so far. redirty the locked
495 * page if it corresponds to our extent and set things up
496 * for the async work queue to run cow_file_range to do
497 * the normal delalloc dance
499 if (page_offset(locked_page) >= start &&
500 page_offset(locked_page) <= end) {
501 __set_page_dirty_nobuffers(locked_page);
502 /* unlocked later on in the async handlers */
504 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
505 *num_added += 1;
508 out:
509 return 0;
511 free_pages_out:
512 for (i = 0; i < nr_pages_ret; i++) {
513 WARN_ON(pages[i]->mapping);
514 page_cache_release(pages[i]);
516 kfree(pages);
518 goto out;
522 * phase two of compressed writeback. This is the ordered portion
523 * of the code, which only gets called in the order the work was
524 * queued. We walk all the async extents created by compress_file_range
525 * and send them down to the disk.
527 static noinline int submit_compressed_extents(struct inode *inode,
528 struct async_cow *async_cow)
530 struct async_extent *async_extent;
531 u64 alloc_hint = 0;
532 struct btrfs_trans_handle *trans;
533 struct btrfs_key ins;
534 struct extent_map *em;
535 struct btrfs_root *root = BTRFS_I(inode)->root;
536 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
537 struct extent_io_tree *io_tree;
538 int ret;
540 if (list_empty(&async_cow->extents))
541 return 0;
543 trans = btrfs_join_transaction(root, 1);
545 while (!list_empty(&async_cow->extents)) {
546 async_extent = list_entry(async_cow->extents.next,
547 struct async_extent, list);
548 list_del(&async_extent->list);
550 io_tree = &BTRFS_I(inode)->io_tree;
552 /* did the compression code fall back to uncompressed IO? */
553 if (!async_extent->pages) {
554 int page_started = 0;
555 unsigned long nr_written = 0;
557 lock_extent(io_tree, async_extent->start,
558 async_extent->start +
559 async_extent->ram_size - 1, GFP_NOFS);
561 /* allocate blocks */
562 cow_file_range(inode, async_cow->locked_page,
563 async_extent->start,
564 async_extent->start +
565 async_extent->ram_size - 1,
566 &page_started, &nr_written, 0);
569 * if page_started, cow_file_range inserted an
570 * inline extent and took care of all the unlocking
571 * and IO for us. Otherwise, we need to submit
572 * all those pages down to the drive.
574 if (!page_started)
575 extent_write_locked_range(io_tree,
576 inode, async_extent->start,
577 async_extent->start +
578 async_extent->ram_size - 1,
579 btrfs_get_extent,
580 WB_SYNC_ALL);
581 kfree(async_extent);
582 cond_resched();
583 continue;
586 lock_extent(io_tree, async_extent->start,
587 async_extent->start + async_extent->ram_size - 1,
588 GFP_NOFS);
590 * here we're doing allocation and writeback of the
591 * compressed pages
593 btrfs_drop_extent_cache(inode, async_extent->start,
594 async_extent->start +
595 async_extent->ram_size - 1, 0);
597 ret = btrfs_reserve_extent(trans, root,
598 async_extent->compressed_size,
599 async_extent->compressed_size,
600 0, alloc_hint,
601 (u64)-1, &ins, 1);
602 BUG_ON(ret);
603 em = alloc_extent_map(GFP_NOFS);
604 em->start = async_extent->start;
605 em->len = async_extent->ram_size;
606 em->orig_start = em->start;
608 em->block_start = ins.objectid;
609 em->block_len = ins.offset;
610 em->bdev = root->fs_info->fs_devices->latest_bdev;
611 set_bit(EXTENT_FLAG_PINNED, &em->flags);
612 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
614 while (1) {
615 spin_lock(&em_tree->lock);
616 ret = add_extent_mapping(em_tree, em);
617 spin_unlock(&em_tree->lock);
618 if (ret != -EEXIST) {
619 free_extent_map(em);
620 break;
622 btrfs_drop_extent_cache(inode, async_extent->start,
623 async_extent->start +
624 async_extent->ram_size - 1, 0);
627 ret = btrfs_add_ordered_extent(inode, async_extent->start,
628 ins.objectid,
629 async_extent->ram_size,
630 ins.offset,
631 BTRFS_ORDERED_COMPRESSED);
632 BUG_ON(ret);
634 btrfs_end_transaction(trans, root);
637 * clear dirty, set writeback and unlock the pages.
639 extent_clear_unlock_delalloc(inode,
640 &BTRFS_I(inode)->io_tree,
641 async_extent->start,
642 async_extent->start +
643 async_extent->ram_size - 1,
644 NULL, 1, 1, 0, 1, 1, 0);
646 ret = btrfs_submit_compressed_write(inode,
647 async_extent->start,
648 async_extent->ram_size,
649 ins.objectid,
650 ins.offset, async_extent->pages,
651 async_extent->nr_pages);
653 BUG_ON(ret);
654 trans = btrfs_join_transaction(root, 1);
655 alloc_hint = ins.objectid + ins.offset;
656 kfree(async_extent);
657 cond_resched();
660 btrfs_end_transaction(trans, root);
661 return 0;
665 * when extent_io.c finds a delayed allocation range in the file,
666 * the call backs end up in this code. The basic idea is to
667 * allocate extents on disk for the range, and create ordered data structs
668 * in ram to track those extents.
670 * locked_page is the page that writepage had locked already. We use
671 * it to make sure we don't do extra locks or unlocks.
673 * *page_started is set to one if we unlock locked_page and do everything
674 * required to start IO on it. It may be clean and already done with
675 * IO when we return.
677 static noinline int cow_file_range(struct inode *inode,
678 struct page *locked_page,
679 u64 start, u64 end, int *page_started,
680 unsigned long *nr_written,
681 int unlock)
683 struct btrfs_root *root = BTRFS_I(inode)->root;
684 struct btrfs_trans_handle *trans;
685 u64 alloc_hint = 0;
686 u64 num_bytes;
687 unsigned long ram_size;
688 u64 disk_num_bytes;
689 u64 cur_alloc_size;
690 u64 blocksize = root->sectorsize;
691 u64 actual_end;
692 u64 isize = i_size_read(inode);
693 struct btrfs_key ins;
694 struct extent_map *em;
695 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
696 int ret = 0;
698 trans = btrfs_join_transaction(root, 1);
699 BUG_ON(!trans);
700 btrfs_set_trans_block_group(trans, inode);
702 actual_end = min_t(u64, isize, end + 1);
704 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
705 num_bytes = max(blocksize, num_bytes);
706 disk_num_bytes = num_bytes;
707 ret = 0;
709 if (start == 0) {
710 /* lets try to make an inline extent */
711 ret = cow_file_range_inline(trans, root, inode,
712 start, end, 0, NULL);
713 if (ret == 0) {
714 extent_clear_unlock_delalloc(inode,
715 &BTRFS_I(inode)->io_tree,
716 start, end, NULL, 1, 1,
717 1, 1, 1, 1);
718 *nr_written = *nr_written +
719 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
720 *page_started = 1;
721 ret = 0;
722 goto out;
726 BUG_ON(disk_num_bytes >
727 btrfs_super_total_bytes(&root->fs_info->super_copy));
729 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
731 while (disk_num_bytes > 0) {
732 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
733 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
734 root->sectorsize, 0, alloc_hint,
735 (u64)-1, &ins, 1);
736 BUG_ON(ret);
738 em = alloc_extent_map(GFP_NOFS);
739 em->start = start;
740 em->orig_start = em->start;
742 ram_size = ins.offset;
743 em->len = ins.offset;
745 em->block_start = ins.objectid;
746 em->block_len = ins.offset;
747 em->bdev = root->fs_info->fs_devices->latest_bdev;
748 set_bit(EXTENT_FLAG_PINNED, &em->flags);
750 while (1) {
751 spin_lock(&em_tree->lock);
752 ret = add_extent_mapping(em_tree, em);
753 spin_unlock(&em_tree->lock);
754 if (ret != -EEXIST) {
755 free_extent_map(em);
756 break;
758 btrfs_drop_extent_cache(inode, start,
759 start + ram_size - 1, 0);
762 cur_alloc_size = ins.offset;
763 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
764 ram_size, cur_alloc_size, 0);
765 BUG_ON(ret);
767 if (root->root_key.objectid ==
768 BTRFS_DATA_RELOC_TREE_OBJECTID) {
769 ret = btrfs_reloc_clone_csums(inode, start,
770 cur_alloc_size);
771 BUG_ON(ret);
774 if (disk_num_bytes < cur_alloc_size)
775 break;
777 /* we're not doing compressed IO, don't unlock the first
778 * page (which the caller expects to stay locked), don't
779 * clear any dirty bits and don't set any writeback bits
781 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
782 start, start + ram_size - 1,
783 locked_page, unlock, 1,
784 1, 0, 0, 0);
785 disk_num_bytes -= cur_alloc_size;
786 num_bytes -= cur_alloc_size;
787 alloc_hint = ins.objectid + ins.offset;
788 start += cur_alloc_size;
790 out:
791 ret = 0;
792 btrfs_end_transaction(trans, root);
794 return ret;
798 * work queue call back to started compression on a file and pages
800 static noinline void async_cow_start(struct btrfs_work *work)
802 struct async_cow *async_cow;
803 int num_added = 0;
804 async_cow = container_of(work, struct async_cow, work);
806 compress_file_range(async_cow->inode, async_cow->locked_page,
807 async_cow->start, async_cow->end, async_cow,
808 &num_added);
809 if (num_added == 0)
810 async_cow->inode = NULL;
814 * work queue call back to submit previously compressed pages
816 static noinline void async_cow_submit(struct btrfs_work *work)
818 struct async_cow *async_cow;
819 struct btrfs_root *root;
820 unsigned long nr_pages;
822 async_cow = container_of(work, struct async_cow, work);
824 root = async_cow->root;
825 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
826 PAGE_CACHE_SHIFT;
828 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
830 if (atomic_read(&root->fs_info->async_delalloc_pages) <
831 5 * 1042 * 1024 &&
832 waitqueue_active(&root->fs_info->async_submit_wait))
833 wake_up(&root->fs_info->async_submit_wait);
835 if (async_cow->inode)
836 submit_compressed_extents(async_cow->inode, async_cow);
839 static noinline void async_cow_free(struct btrfs_work *work)
841 struct async_cow *async_cow;
842 async_cow = container_of(work, struct async_cow, work);
843 kfree(async_cow);
846 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
847 u64 start, u64 end, int *page_started,
848 unsigned long *nr_written)
850 struct async_cow *async_cow;
851 struct btrfs_root *root = BTRFS_I(inode)->root;
852 unsigned long nr_pages;
853 u64 cur_end;
854 int limit = 10 * 1024 * 1042;
856 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
857 EXTENT_DELALLOC, 1, 0, GFP_NOFS);
858 while (start < end) {
859 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
860 async_cow->inode = inode;
861 async_cow->root = root;
862 async_cow->locked_page = locked_page;
863 async_cow->start = start;
865 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
866 cur_end = end;
867 else
868 cur_end = min(end, start + 512 * 1024 - 1);
870 async_cow->end = cur_end;
871 INIT_LIST_HEAD(&async_cow->extents);
873 async_cow->work.func = async_cow_start;
874 async_cow->work.ordered_func = async_cow_submit;
875 async_cow->work.ordered_free = async_cow_free;
876 async_cow->work.flags = 0;
878 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
879 PAGE_CACHE_SHIFT;
880 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
882 btrfs_queue_worker(&root->fs_info->delalloc_workers,
883 &async_cow->work);
885 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
886 wait_event(root->fs_info->async_submit_wait,
887 (atomic_read(&root->fs_info->async_delalloc_pages) <
888 limit));
891 while (atomic_read(&root->fs_info->async_submit_draining) &&
892 atomic_read(&root->fs_info->async_delalloc_pages)) {
893 wait_event(root->fs_info->async_submit_wait,
894 (atomic_read(&root->fs_info->async_delalloc_pages) ==
895 0));
898 *nr_written += nr_pages;
899 start = cur_end + 1;
901 *page_started = 1;
902 return 0;
905 static noinline int csum_exist_in_range(struct btrfs_root *root,
906 u64 bytenr, u64 num_bytes)
908 int ret;
909 struct btrfs_ordered_sum *sums;
910 LIST_HEAD(list);
912 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
913 bytenr + num_bytes - 1, &list);
914 if (ret == 0 && list_empty(&list))
915 return 0;
917 while (!list_empty(&list)) {
918 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
919 list_del(&sums->list);
920 kfree(sums);
922 return 1;
926 * when nowcow writeback call back. This checks for snapshots or COW copies
927 * of the extents that exist in the file, and COWs the file as required.
929 * If no cow copies or snapshots exist, we write directly to the existing
930 * blocks on disk
932 static noinline int run_delalloc_nocow(struct inode *inode,
933 struct page *locked_page,
934 u64 start, u64 end, int *page_started, int force,
935 unsigned long *nr_written)
937 struct btrfs_root *root = BTRFS_I(inode)->root;
938 struct btrfs_trans_handle *trans;
939 struct extent_buffer *leaf;
940 struct btrfs_path *path;
941 struct btrfs_file_extent_item *fi;
942 struct btrfs_key found_key;
943 u64 cow_start;
944 u64 cur_offset;
945 u64 extent_end;
946 u64 extent_offset;
947 u64 disk_bytenr;
948 u64 num_bytes;
949 int extent_type;
950 int ret;
951 int type;
952 int nocow;
953 int check_prev = 1;
955 path = btrfs_alloc_path();
956 BUG_ON(!path);
957 trans = btrfs_join_transaction(root, 1);
958 BUG_ON(!trans);
960 cow_start = (u64)-1;
961 cur_offset = start;
962 while (1) {
963 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
964 cur_offset, 0);
965 BUG_ON(ret < 0);
966 if (ret > 0 && path->slots[0] > 0 && check_prev) {
967 leaf = path->nodes[0];
968 btrfs_item_key_to_cpu(leaf, &found_key,
969 path->slots[0] - 1);
970 if (found_key.objectid == inode->i_ino &&
971 found_key.type == BTRFS_EXTENT_DATA_KEY)
972 path->slots[0]--;
974 check_prev = 0;
975 next_slot:
976 leaf = path->nodes[0];
977 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
978 ret = btrfs_next_leaf(root, path);
979 if (ret < 0)
980 BUG_ON(1);
981 if (ret > 0)
982 break;
983 leaf = path->nodes[0];
986 nocow = 0;
987 disk_bytenr = 0;
988 num_bytes = 0;
989 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
991 if (found_key.objectid > inode->i_ino ||
992 found_key.type > BTRFS_EXTENT_DATA_KEY ||
993 found_key.offset > end)
994 break;
996 if (found_key.offset > cur_offset) {
997 extent_end = found_key.offset;
998 goto out_check;
1001 fi = btrfs_item_ptr(leaf, path->slots[0],
1002 struct btrfs_file_extent_item);
1003 extent_type = btrfs_file_extent_type(leaf, fi);
1005 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1006 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1007 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1008 extent_offset = btrfs_file_extent_offset(leaf, fi);
1009 extent_end = found_key.offset +
1010 btrfs_file_extent_num_bytes(leaf, fi);
1011 if (extent_end <= start) {
1012 path->slots[0]++;
1013 goto next_slot;
1015 if (disk_bytenr == 0)
1016 goto out_check;
1017 if (btrfs_file_extent_compression(leaf, fi) ||
1018 btrfs_file_extent_encryption(leaf, fi) ||
1019 btrfs_file_extent_other_encoding(leaf, fi))
1020 goto out_check;
1021 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1022 goto out_check;
1023 if (btrfs_extent_readonly(root, disk_bytenr))
1024 goto out_check;
1025 if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1026 found_key.offset -
1027 extent_offset, disk_bytenr))
1028 goto out_check;
1029 disk_bytenr += extent_offset;
1030 disk_bytenr += cur_offset - found_key.offset;
1031 num_bytes = min(end + 1, extent_end) - cur_offset;
1033 * force cow if csum exists in the range.
1034 * this ensure that csum for a given extent are
1035 * either valid or do not exist.
1037 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1038 goto out_check;
1039 nocow = 1;
1040 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1041 extent_end = found_key.offset +
1042 btrfs_file_extent_inline_len(leaf, fi);
1043 extent_end = ALIGN(extent_end, root->sectorsize);
1044 } else {
1045 BUG_ON(1);
1047 out_check:
1048 if (extent_end <= start) {
1049 path->slots[0]++;
1050 goto next_slot;
1052 if (!nocow) {
1053 if (cow_start == (u64)-1)
1054 cow_start = cur_offset;
1055 cur_offset = extent_end;
1056 if (cur_offset > end)
1057 break;
1058 path->slots[0]++;
1059 goto next_slot;
1062 btrfs_release_path(root, path);
1063 if (cow_start != (u64)-1) {
1064 ret = cow_file_range(inode, locked_page, cow_start,
1065 found_key.offset - 1, page_started,
1066 nr_written, 1);
1067 BUG_ON(ret);
1068 cow_start = (u64)-1;
1071 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1072 struct extent_map *em;
1073 struct extent_map_tree *em_tree;
1074 em_tree = &BTRFS_I(inode)->extent_tree;
1075 em = alloc_extent_map(GFP_NOFS);
1076 em->start = cur_offset;
1077 em->orig_start = em->start;
1078 em->len = num_bytes;
1079 em->block_len = num_bytes;
1080 em->block_start = disk_bytenr;
1081 em->bdev = root->fs_info->fs_devices->latest_bdev;
1082 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1083 while (1) {
1084 spin_lock(&em_tree->lock);
1085 ret = add_extent_mapping(em_tree, em);
1086 spin_unlock(&em_tree->lock);
1087 if (ret != -EEXIST) {
1088 free_extent_map(em);
1089 break;
1091 btrfs_drop_extent_cache(inode, em->start,
1092 em->start + em->len - 1, 0);
1094 type = BTRFS_ORDERED_PREALLOC;
1095 } else {
1096 type = BTRFS_ORDERED_NOCOW;
1099 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1100 num_bytes, num_bytes, type);
1101 BUG_ON(ret);
1103 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1104 cur_offset, cur_offset + num_bytes - 1,
1105 locked_page, 1, 1, 1, 0, 0, 0);
1106 cur_offset = extent_end;
1107 if (cur_offset > end)
1108 break;
1110 btrfs_release_path(root, path);
1112 if (cur_offset <= end && cow_start == (u64)-1)
1113 cow_start = cur_offset;
1114 if (cow_start != (u64)-1) {
1115 ret = cow_file_range(inode, locked_page, cow_start, end,
1116 page_started, nr_written, 1);
1117 BUG_ON(ret);
1120 ret = btrfs_end_transaction(trans, root);
1121 BUG_ON(ret);
1122 btrfs_free_path(path);
1123 return 0;
1127 * extent_io.c call back to do delayed allocation processing
1129 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1130 u64 start, u64 end, int *page_started,
1131 unsigned long *nr_written)
1133 int ret;
1134 struct btrfs_root *root = BTRFS_I(inode)->root;
1136 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1137 ret = run_delalloc_nocow(inode, locked_page, start, end,
1138 page_started, 1, nr_written);
1139 else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1140 ret = run_delalloc_nocow(inode, locked_page, start, end,
1141 page_started, 0, nr_written);
1142 else if (!btrfs_test_opt(root, COMPRESS))
1143 ret = cow_file_range(inode, locked_page, start, end,
1144 page_started, nr_written, 1);
1145 else
1146 ret = cow_file_range_async(inode, locked_page, start, end,
1147 page_started, nr_written);
1148 return ret;
1152 * extent_io.c set_bit_hook, used to track delayed allocation
1153 * bytes in this file, and to maintain the list of inodes that
1154 * have pending delalloc work to be done.
1156 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1157 unsigned long old, unsigned long bits)
1160 * set_bit and clear bit hooks normally require _irqsave/restore
1161 * but in this case, we are only testeing for the DELALLOC
1162 * bit, which is only set or cleared with irqs on
1164 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1165 struct btrfs_root *root = BTRFS_I(inode)->root;
1166 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1167 spin_lock(&root->fs_info->delalloc_lock);
1168 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1169 root->fs_info->delalloc_bytes += end - start + 1;
1170 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1171 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1172 &root->fs_info->delalloc_inodes);
1174 spin_unlock(&root->fs_info->delalloc_lock);
1176 return 0;
1180 * extent_io.c clear_bit_hook, see set_bit_hook for why
1182 static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
1183 unsigned long old, unsigned long bits)
1186 * set_bit and clear bit hooks normally require _irqsave/restore
1187 * but in this case, we are only testeing for the DELALLOC
1188 * bit, which is only set or cleared with irqs on
1190 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1191 struct btrfs_root *root = BTRFS_I(inode)->root;
1193 spin_lock(&root->fs_info->delalloc_lock);
1194 if (end - start + 1 > root->fs_info->delalloc_bytes) {
1195 printk(KERN_INFO "btrfs warning: delalloc account "
1196 "%llu %llu\n",
1197 (unsigned long long)end - start + 1,
1198 (unsigned long long)
1199 root->fs_info->delalloc_bytes);
1200 btrfs_delalloc_free_space(root, inode, (u64)-1);
1201 root->fs_info->delalloc_bytes = 0;
1202 BTRFS_I(inode)->delalloc_bytes = 0;
1203 } else {
1204 btrfs_delalloc_free_space(root, inode,
1205 end - start + 1);
1206 root->fs_info->delalloc_bytes -= end - start + 1;
1207 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
1209 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1210 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1211 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1213 spin_unlock(&root->fs_info->delalloc_lock);
1215 return 0;
1219 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1220 * we don't create bios that span stripes or chunks
1222 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1223 size_t size, struct bio *bio,
1224 unsigned long bio_flags)
1226 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1227 struct btrfs_mapping_tree *map_tree;
1228 u64 logical = (u64)bio->bi_sector << 9;
1229 u64 length = 0;
1230 u64 map_length;
1231 int ret;
1233 if (bio_flags & EXTENT_BIO_COMPRESSED)
1234 return 0;
1236 length = bio->bi_size;
1237 map_tree = &root->fs_info->mapping_tree;
1238 map_length = length;
1239 ret = btrfs_map_block(map_tree, READ, logical,
1240 &map_length, NULL, 0);
1242 if (map_length < length + size)
1243 return 1;
1244 return 0;
1248 * in order to insert checksums into the metadata in large chunks,
1249 * we wait until bio submission time. All the pages in the bio are
1250 * checksummed and sums are attached onto the ordered extent record.
1252 * At IO completion time the cums attached on the ordered extent record
1253 * are inserted into the btree
1255 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1256 struct bio *bio, int mirror_num,
1257 unsigned long bio_flags)
1259 struct btrfs_root *root = BTRFS_I(inode)->root;
1260 int ret = 0;
1262 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1263 BUG_ON(ret);
1264 return 0;
1268 * in order to insert checksums into the metadata in large chunks,
1269 * we wait until bio submission time. All the pages in the bio are
1270 * checksummed and sums are attached onto the ordered extent record.
1272 * At IO completion time the cums attached on the ordered extent record
1273 * are inserted into the btree
1275 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1276 int mirror_num, unsigned long bio_flags)
1278 struct btrfs_root *root = BTRFS_I(inode)->root;
1279 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1283 * extent_io.c submission hook. This does the right thing for csum calculation
1284 * on write, or reading the csums from the tree before a read
1286 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1287 int mirror_num, unsigned long bio_flags)
1289 struct btrfs_root *root = BTRFS_I(inode)->root;
1290 int ret = 0;
1291 int skip_sum;
1293 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1295 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1296 BUG_ON(ret);
1298 if (!(rw & (1 << BIO_RW))) {
1299 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1300 return btrfs_submit_compressed_read(inode, bio,
1301 mirror_num, bio_flags);
1302 } else if (!skip_sum)
1303 btrfs_lookup_bio_sums(root, inode, bio, NULL);
1304 goto mapit;
1305 } else if (!skip_sum) {
1306 /* csum items have already been cloned */
1307 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1308 goto mapit;
1309 /* we're doing a write, do the async checksumming */
1310 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1311 inode, rw, bio, mirror_num,
1312 bio_flags, __btrfs_submit_bio_start,
1313 __btrfs_submit_bio_done);
1316 mapit:
1317 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1321 * given a list of ordered sums record them in the inode. This happens
1322 * at IO completion time based on sums calculated at bio submission time.
1324 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1325 struct inode *inode, u64 file_offset,
1326 struct list_head *list)
1328 struct btrfs_ordered_sum *sum;
1330 btrfs_set_trans_block_group(trans, inode);
1332 list_for_each_entry(sum, list, list) {
1333 btrfs_csum_file_blocks(trans,
1334 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1336 return 0;
1339 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1341 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1342 WARN_ON(1);
1343 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1344 GFP_NOFS);
1347 /* see btrfs_writepage_start_hook for details on why this is required */
1348 struct btrfs_writepage_fixup {
1349 struct page *page;
1350 struct btrfs_work work;
1353 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1355 struct btrfs_writepage_fixup *fixup;
1356 struct btrfs_ordered_extent *ordered;
1357 struct page *page;
1358 struct inode *inode;
1359 u64 page_start;
1360 u64 page_end;
1362 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1363 page = fixup->page;
1364 again:
1365 lock_page(page);
1366 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1367 ClearPageChecked(page);
1368 goto out_page;
1371 inode = page->mapping->host;
1372 page_start = page_offset(page);
1373 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1375 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1377 /* already ordered? We're done */
1378 if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
1379 EXTENT_ORDERED, 0)) {
1380 goto out;
1383 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1384 if (ordered) {
1385 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1386 page_end, GFP_NOFS);
1387 unlock_page(page);
1388 btrfs_start_ordered_extent(inode, ordered, 1);
1389 goto again;
1392 btrfs_set_extent_delalloc(inode, page_start, page_end);
1393 ClearPageChecked(page);
1394 out:
1395 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1396 out_page:
1397 unlock_page(page);
1398 page_cache_release(page);
1402 * There are a few paths in the higher layers of the kernel that directly
1403 * set the page dirty bit without asking the filesystem if it is a
1404 * good idea. This causes problems because we want to make sure COW
1405 * properly happens and the data=ordered rules are followed.
1407 * In our case any range that doesn't have the ORDERED bit set
1408 * hasn't been properly setup for IO. We kick off an async process
1409 * to fix it up. The async helper will wait for ordered extents, set
1410 * the delalloc bit and make it safe to write the page.
1412 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1414 struct inode *inode = page->mapping->host;
1415 struct btrfs_writepage_fixup *fixup;
1416 struct btrfs_root *root = BTRFS_I(inode)->root;
1417 int ret;
1419 ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1420 EXTENT_ORDERED, 0);
1421 if (ret)
1422 return 0;
1424 if (PageChecked(page))
1425 return -EAGAIN;
1427 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1428 if (!fixup)
1429 return -EAGAIN;
1431 SetPageChecked(page);
1432 page_cache_get(page);
1433 fixup->work.func = btrfs_writepage_fixup_worker;
1434 fixup->page = page;
1435 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1436 return -EAGAIN;
1439 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1440 struct inode *inode, u64 file_pos,
1441 u64 disk_bytenr, u64 disk_num_bytes,
1442 u64 num_bytes, u64 ram_bytes,
1443 u64 locked_end,
1444 u8 compression, u8 encryption,
1445 u16 other_encoding, int extent_type)
1447 struct btrfs_root *root = BTRFS_I(inode)->root;
1448 struct btrfs_file_extent_item *fi;
1449 struct btrfs_path *path;
1450 struct extent_buffer *leaf;
1451 struct btrfs_key ins;
1452 u64 hint;
1453 int ret;
1455 path = btrfs_alloc_path();
1456 BUG_ON(!path);
1458 path->leave_spinning = 1;
1459 ret = btrfs_drop_extents(trans, root, inode, file_pos,
1460 file_pos + num_bytes, locked_end,
1461 file_pos, &hint);
1462 BUG_ON(ret);
1464 ins.objectid = inode->i_ino;
1465 ins.offset = file_pos;
1466 ins.type = BTRFS_EXTENT_DATA_KEY;
1467 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1468 BUG_ON(ret);
1469 leaf = path->nodes[0];
1470 fi = btrfs_item_ptr(leaf, path->slots[0],
1471 struct btrfs_file_extent_item);
1472 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1473 btrfs_set_file_extent_type(leaf, fi, extent_type);
1474 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1475 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1476 btrfs_set_file_extent_offset(leaf, fi, 0);
1477 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1478 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1479 btrfs_set_file_extent_compression(leaf, fi, compression);
1480 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1481 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1483 btrfs_unlock_up_safe(path, 1);
1484 btrfs_set_lock_blocking(leaf);
1486 btrfs_mark_buffer_dirty(leaf);
1488 inode_add_bytes(inode, num_bytes);
1489 btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0);
1491 ins.objectid = disk_bytenr;
1492 ins.offset = disk_num_bytes;
1493 ins.type = BTRFS_EXTENT_ITEM_KEY;
1494 ret = btrfs_alloc_reserved_file_extent(trans, root,
1495 root->root_key.objectid,
1496 inode->i_ino, file_pos, &ins);
1497 BUG_ON(ret);
1498 btrfs_free_path(path);
1500 return 0;
1504 * helper function for btrfs_finish_ordered_io, this
1505 * just reads in some of the csum leaves to prime them into ram
1506 * before we start the transaction. It limits the amount of btree
1507 * reads required while inside the transaction.
1509 static noinline void reada_csum(struct btrfs_root *root,
1510 struct btrfs_path *path,
1511 struct btrfs_ordered_extent *ordered_extent)
1513 struct btrfs_ordered_sum *sum;
1514 u64 bytenr;
1516 sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
1517 list);
1518 bytenr = sum->sums[0].bytenr;
1521 * we don't care about the results, the point of this search is
1522 * just to get the btree leaves into ram
1524 btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
1527 /* as ordered data IO finishes, this gets called so we can finish
1528 * an ordered extent if the range of bytes in the file it covers are
1529 * fully written.
1531 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1533 struct btrfs_root *root = BTRFS_I(inode)->root;
1534 struct btrfs_trans_handle *trans;
1535 struct btrfs_ordered_extent *ordered_extent = NULL;
1536 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1537 struct btrfs_path *path;
1538 int compressed = 0;
1539 int ret;
1541 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1542 if (!ret)
1543 return 0;
1546 * before we join the transaction, try to do some of our IO.
1547 * This will limit the amount of IO that we have to do with
1548 * the transaction running. We're unlikely to need to do any
1549 * IO if the file extents are new, the disk_i_size checks
1550 * covers the most common case.
1552 if (start < BTRFS_I(inode)->disk_i_size) {
1553 path = btrfs_alloc_path();
1554 if (path) {
1555 ret = btrfs_lookup_file_extent(NULL, root, path,
1556 inode->i_ino,
1557 start, 0);
1558 ordered_extent = btrfs_lookup_ordered_extent(inode,
1559 start);
1560 if (!list_empty(&ordered_extent->list)) {
1561 btrfs_release_path(root, path);
1562 reada_csum(root, path, ordered_extent);
1564 btrfs_free_path(path);
1568 trans = btrfs_join_transaction(root, 1);
1570 if (!ordered_extent)
1571 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1572 BUG_ON(!ordered_extent);
1573 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1574 goto nocow;
1576 lock_extent(io_tree, ordered_extent->file_offset,
1577 ordered_extent->file_offset + ordered_extent->len - 1,
1578 GFP_NOFS);
1580 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1581 compressed = 1;
1582 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1583 BUG_ON(compressed);
1584 ret = btrfs_mark_extent_written(trans, root, inode,
1585 ordered_extent->file_offset,
1586 ordered_extent->file_offset +
1587 ordered_extent->len);
1588 BUG_ON(ret);
1589 } else {
1590 ret = insert_reserved_file_extent(trans, inode,
1591 ordered_extent->file_offset,
1592 ordered_extent->start,
1593 ordered_extent->disk_len,
1594 ordered_extent->len,
1595 ordered_extent->len,
1596 ordered_extent->file_offset +
1597 ordered_extent->len,
1598 compressed, 0, 0,
1599 BTRFS_FILE_EXTENT_REG);
1600 BUG_ON(ret);
1602 unlock_extent(io_tree, ordered_extent->file_offset,
1603 ordered_extent->file_offset + ordered_extent->len - 1,
1604 GFP_NOFS);
1605 nocow:
1606 add_pending_csums(trans, inode, ordered_extent->file_offset,
1607 &ordered_extent->list);
1609 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1610 btrfs_ordered_update_i_size(inode, ordered_extent);
1611 btrfs_update_inode(trans, root, inode);
1612 btrfs_remove_ordered_extent(inode, ordered_extent);
1613 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1615 /* once for us */
1616 btrfs_put_ordered_extent(ordered_extent);
1617 /* once for the tree */
1618 btrfs_put_ordered_extent(ordered_extent);
1620 btrfs_end_transaction(trans, root);
1621 return 0;
1624 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1625 struct extent_state *state, int uptodate)
1627 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1631 * When IO fails, either with EIO or csum verification fails, we
1632 * try other mirrors that might have a good copy of the data. This
1633 * io_failure_record is used to record state as we go through all the
1634 * mirrors. If another mirror has good data, the page is set up to date
1635 * and things continue. If a good mirror can't be found, the original
1636 * bio end_io callback is called to indicate things have failed.
1638 struct io_failure_record {
1639 struct page *page;
1640 u64 start;
1641 u64 len;
1642 u64 logical;
1643 unsigned long bio_flags;
1644 int last_mirror;
1647 static int btrfs_io_failed_hook(struct bio *failed_bio,
1648 struct page *page, u64 start, u64 end,
1649 struct extent_state *state)
1651 struct io_failure_record *failrec = NULL;
1652 u64 private;
1653 struct extent_map *em;
1654 struct inode *inode = page->mapping->host;
1655 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1656 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1657 struct bio *bio;
1658 int num_copies;
1659 int ret;
1660 int rw;
1661 u64 logical;
1663 ret = get_state_private(failure_tree, start, &private);
1664 if (ret) {
1665 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1666 if (!failrec)
1667 return -ENOMEM;
1668 failrec->start = start;
1669 failrec->len = end - start + 1;
1670 failrec->last_mirror = 0;
1671 failrec->bio_flags = 0;
1673 spin_lock(&em_tree->lock);
1674 em = lookup_extent_mapping(em_tree, start, failrec->len);
1675 if (em->start > start || em->start + em->len < start) {
1676 free_extent_map(em);
1677 em = NULL;
1679 spin_unlock(&em_tree->lock);
1681 if (!em || IS_ERR(em)) {
1682 kfree(failrec);
1683 return -EIO;
1685 logical = start - em->start;
1686 logical = em->block_start + logical;
1687 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1688 logical = em->block_start;
1689 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1691 failrec->logical = logical;
1692 free_extent_map(em);
1693 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1694 EXTENT_DIRTY, GFP_NOFS);
1695 set_state_private(failure_tree, start,
1696 (u64)(unsigned long)failrec);
1697 } else {
1698 failrec = (struct io_failure_record *)(unsigned long)private;
1700 num_copies = btrfs_num_copies(
1701 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1702 failrec->logical, failrec->len);
1703 failrec->last_mirror++;
1704 if (!state) {
1705 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1706 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1707 failrec->start,
1708 EXTENT_LOCKED);
1709 if (state && state->start != failrec->start)
1710 state = NULL;
1711 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1713 if (!state || failrec->last_mirror > num_copies) {
1714 set_state_private(failure_tree, failrec->start, 0);
1715 clear_extent_bits(failure_tree, failrec->start,
1716 failrec->start + failrec->len - 1,
1717 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1718 kfree(failrec);
1719 return -EIO;
1721 bio = bio_alloc(GFP_NOFS, 1);
1722 bio->bi_private = state;
1723 bio->bi_end_io = failed_bio->bi_end_io;
1724 bio->bi_sector = failrec->logical >> 9;
1725 bio->bi_bdev = failed_bio->bi_bdev;
1726 bio->bi_size = 0;
1728 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1729 if (failed_bio->bi_rw & (1 << BIO_RW))
1730 rw = WRITE;
1731 else
1732 rw = READ;
1734 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1735 failrec->last_mirror,
1736 failrec->bio_flags);
1737 return 0;
1741 * each time an IO finishes, we do a fast check in the IO failure tree
1742 * to see if we need to process or clean up an io_failure_record
1744 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1746 u64 private;
1747 u64 private_failure;
1748 struct io_failure_record *failure;
1749 int ret;
1751 private = 0;
1752 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1753 (u64)-1, 1, EXTENT_DIRTY)) {
1754 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1755 start, &private_failure);
1756 if (ret == 0) {
1757 failure = (struct io_failure_record *)(unsigned long)
1758 private_failure;
1759 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1760 failure->start, 0);
1761 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1762 failure->start,
1763 failure->start + failure->len - 1,
1764 EXTENT_DIRTY | EXTENT_LOCKED,
1765 GFP_NOFS);
1766 kfree(failure);
1769 return 0;
1773 * when reads are done, we need to check csums to verify the data is correct
1774 * if there's a match, we allow the bio to finish. If not, we go through
1775 * the io_failure_record routines to find good copies
1777 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1778 struct extent_state *state)
1780 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1781 struct inode *inode = page->mapping->host;
1782 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1783 char *kaddr;
1784 u64 private = ~(u32)0;
1785 int ret;
1786 struct btrfs_root *root = BTRFS_I(inode)->root;
1787 u32 csum = ~(u32)0;
1789 if (PageChecked(page)) {
1790 ClearPageChecked(page);
1791 goto good;
1794 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1795 return 0;
1797 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1798 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1)) {
1799 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1800 GFP_NOFS);
1801 return 0;
1804 if (state && state->start == start) {
1805 private = state->private;
1806 ret = 0;
1807 } else {
1808 ret = get_state_private(io_tree, start, &private);
1810 kaddr = kmap_atomic(page, KM_USER0);
1811 if (ret)
1812 goto zeroit;
1814 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1815 btrfs_csum_final(csum, (char *)&csum);
1816 if (csum != private)
1817 goto zeroit;
1819 kunmap_atomic(kaddr, KM_USER0);
1820 good:
1821 /* if the io failure tree for this inode is non-empty,
1822 * check to see if we've recovered from a failed IO
1824 btrfs_clean_io_failures(inode, start);
1825 return 0;
1827 zeroit:
1828 if (printk_ratelimit()) {
1829 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1830 "private %llu\n", page->mapping->host->i_ino,
1831 (unsigned long long)start, csum,
1832 (unsigned long long)private);
1834 memset(kaddr + offset, 1, end - start + 1);
1835 flush_dcache_page(page);
1836 kunmap_atomic(kaddr, KM_USER0);
1837 if (private == 0)
1838 return 0;
1839 return -EIO;
1843 * This creates an orphan entry for the given inode in case something goes
1844 * wrong in the middle of an unlink/truncate.
1846 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1848 struct btrfs_root *root = BTRFS_I(inode)->root;
1849 int ret = 0;
1851 spin_lock(&root->list_lock);
1853 /* already on the orphan list, we're good */
1854 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1855 spin_unlock(&root->list_lock);
1856 return 0;
1859 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1861 spin_unlock(&root->list_lock);
1864 * insert an orphan item to track this unlinked/truncated file
1866 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
1868 return ret;
1872 * We have done the truncate/delete so we can go ahead and remove the orphan
1873 * item for this particular inode.
1875 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
1877 struct btrfs_root *root = BTRFS_I(inode)->root;
1878 int ret = 0;
1880 spin_lock(&root->list_lock);
1882 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
1883 spin_unlock(&root->list_lock);
1884 return 0;
1887 list_del_init(&BTRFS_I(inode)->i_orphan);
1888 if (!trans) {
1889 spin_unlock(&root->list_lock);
1890 return 0;
1893 spin_unlock(&root->list_lock);
1895 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
1897 return ret;
1901 * this cleans up any orphans that may be left on the list from the last use
1902 * of this root.
1904 void btrfs_orphan_cleanup(struct btrfs_root *root)
1906 struct btrfs_path *path;
1907 struct extent_buffer *leaf;
1908 struct btrfs_item *item;
1909 struct btrfs_key key, found_key;
1910 struct btrfs_trans_handle *trans;
1911 struct inode *inode;
1912 int ret = 0, nr_unlink = 0, nr_truncate = 0;
1914 path = btrfs_alloc_path();
1915 if (!path)
1916 return;
1917 path->reada = -1;
1919 key.objectid = BTRFS_ORPHAN_OBJECTID;
1920 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1921 key.offset = (u64)-1;
1924 while (1) {
1925 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1926 if (ret < 0) {
1927 printk(KERN_ERR "Error searching slot for orphan: %d"
1928 "\n", ret);
1929 break;
1933 * if ret == 0 means we found what we were searching for, which
1934 * is weird, but possible, so only screw with path if we didnt
1935 * find the key and see if we have stuff that matches
1937 if (ret > 0) {
1938 if (path->slots[0] == 0)
1939 break;
1940 path->slots[0]--;
1943 /* pull out the item */
1944 leaf = path->nodes[0];
1945 item = btrfs_item_nr(leaf, path->slots[0]);
1946 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1948 /* make sure the item matches what we want */
1949 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
1950 break;
1951 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
1952 break;
1954 /* release the path since we're done with it */
1955 btrfs_release_path(root, path);
1958 * this is where we are basically btrfs_lookup, without the
1959 * crossing root thing. we store the inode number in the
1960 * offset of the orphan item.
1962 found_key.objectid = found_key.offset;
1963 found_key.type = BTRFS_INODE_ITEM_KEY;
1964 found_key.offset = 0;
1965 inode = btrfs_iget(root->fs_info->sb, &found_key, root);
1966 if (IS_ERR(inode))
1967 break;
1970 * add this inode to the orphan list so btrfs_orphan_del does
1971 * the proper thing when we hit it
1973 spin_lock(&root->list_lock);
1974 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1975 spin_unlock(&root->list_lock);
1978 * if this is a bad inode, means we actually succeeded in
1979 * removing the inode, but not the orphan record, which means
1980 * we need to manually delete the orphan since iput will just
1981 * do a destroy_inode
1983 if (is_bad_inode(inode)) {
1984 trans = btrfs_start_transaction(root, 1);
1985 btrfs_orphan_del(trans, inode);
1986 btrfs_end_transaction(trans, root);
1987 iput(inode);
1988 continue;
1991 /* if we have links, this was a truncate, lets do that */
1992 if (inode->i_nlink) {
1993 nr_truncate++;
1994 btrfs_truncate(inode);
1995 } else {
1996 nr_unlink++;
1999 /* this will do delete_inode and everything for us */
2000 iput(inode);
2003 if (nr_unlink)
2004 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2005 if (nr_truncate)
2006 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2008 btrfs_free_path(path);
2012 * very simple check to peek ahead in the leaf looking for xattrs. If we
2013 * don't find any xattrs, we know there can't be any acls.
2015 * slot is the slot the inode is in, objectid is the objectid of the inode
2017 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2018 int slot, u64 objectid)
2020 u32 nritems = btrfs_header_nritems(leaf);
2021 struct btrfs_key found_key;
2022 int scanned = 0;
2024 slot++;
2025 while (slot < nritems) {
2026 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2028 /* we found a different objectid, there must not be acls */
2029 if (found_key.objectid != objectid)
2030 return 0;
2032 /* we found an xattr, assume we've got an acl */
2033 if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2034 return 1;
2037 * we found a key greater than an xattr key, there can't
2038 * be any acls later on
2040 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2041 return 0;
2043 slot++;
2044 scanned++;
2047 * it goes inode, inode backrefs, xattrs, extents,
2048 * so if there are a ton of hard links to an inode there can
2049 * be a lot of backrefs. Don't waste time searching too hard,
2050 * this is just an optimization
2052 if (scanned >= 8)
2053 break;
2055 /* we hit the end of the leaf before we found an xattr or
2056 * something larger than an xattr. We have to assume the inode
2057 * has acls
2059 return 1;
2063 * read an inode from the btree into the in-memory inode
2065 static void btrfs_read_locked_inode(struct inode *inode)
2067 struct btrfs_path *path;
2068 struct extent_buffer *leaf;
2069 struct btrfs_inode_item *inode_item;
2070 struct btrfs_timespec *tspec;
2071 struct btrfs_root *root = BTRFS_I(inode)->root;
2072 struct btrfs_key location;
2073 int maybe_acls;
2074 u64 alloc_group_block;
2075 u32 rdev;
2076 int ret;
2078 path = btrfs_alloc_path();
2079 BUG_ON(!path);
2080 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2082 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2083 if (ret)
2084 goto make_bad;
2086 leaf = path->nodes[0];
2087 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2088 struct btrfs_inode_item);
2090 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2091 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2092 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2093 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2094 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2096 tspec = btrfs_inode_atime(inode_item);
2097 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2098 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2100 tspec = btrfs_inode_mtime(inode_item);
2101 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2102 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2104 tspec = btrfs_inode_ctime(inode_item);
2105 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2106 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2108 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2109 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2110 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2111 inode->i_generation = BTRFS_I(inode)->generation;
2112 inode->i_rdev = 0;
2113 rdev = btrfs_inode_rdev(leaf, inode_item);
2115 BTRFS_I(inode)->index_cnt = (u64)-1;
2116 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2118 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2121 * try to precache a NULL acl entry for files that don't have
2122 * any xattrs or acls
2124 maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2125 if (!maybe_acls) {
2126 BTRFS_I(inode)->i_acl = NULL;
2127 BTRFS_I(inode)->i_default_acl = NULL;
2130 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2131 alloc_group_block, 0);
2132 btrfs_free_path(path);
2133 inode_item = NULL;
2135 switch (inode->i_mode & S_IFMT) {
2136 case S_IFREG:
2137 inode->i_mapping->a_ops = &btrfs_aops;
2138 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2139 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2140 inode->i_fop = &btrfs_file_operations;
2141 inode->i_op = &btrfs_file_inode_operations;
2142 break;
2143 case S_IFDIR:
2144 inode->i_fop = &btrfs_dir_file_operations;
2145 if (root == root->fs_info->tree_root)
2146 inode->i_op = &btrfs_dir_ro_inode_operations;
2147 else
2148 inode->i_op = &btrfs_dir_inode_operations;
2149 break;
2150 case S_IFLNK:
2151 inode->i_op = &btrfs_symlink_inode_operations;
2152 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2153 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2154 break;
2155 default:
2156 inode->i_op = &btrfs_special_inode_operations;
2157 init_special_inode(inode, inode->i_mode, rdev);
2158 break;
2161 btrfs_update_iflags(inode);
2162 return;
2164 make_bad:
2165 btrfs_free_path(path);
2166 make_bad_inode(inode);
2170 * given a leaf and an inode, copy the inode fields into the leaf
2172 static void fill_inode_item(struct btrfs_trans_handle *trans,
2173 struct extent_buffer *leaf,
2174 struct btrfs_inode_item *item,
2175 struct inode *inode)
2177 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2178 btrfs_set_inode_gid(leaf, item, inode->i_gid);
2179 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2180 btrfs_set_inode_mode(leaf, item, inode->i_mode);
2181 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2183 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2184 inode->i_atime.tv_sec);
2185 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2186 inode->i_atime.tv_nsec);
2188 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2189 inode->i_mtime.tv_sec);
2190 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2191 inode->i_mtime.tv_nsec);
2193 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2194 inode->i_ctime.tv_sec);
2195 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2196 inode->i_ctime.tv_nsec);
2198 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2199 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2200 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2201 btrfs_set_inode_transid(leaf, item, trans->transid);
2202 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2203 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2204 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2208 * copy everything in the in-memory inode into the btree.
2210 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2211 struct btrfs_root *root, struct inode *inode)
2213 struct btrfs_inode_item *inode_item;
2214 struct btrfs_path *path;
2215 struct extent_buffer *leaf;
2216 int ret;
2218 path = btrfs_alloc_path();
2219 BUG_ON(!path);
2220 path->leave_spinning = 1;
2221 ret = btrfs_lookup_inode(trans, root, path,
2222 &BTRFS_I(inode)->location, 1);
2223 if (ret) {
2224 if (ret > 0)
2225 ret = -ENOENT;
2226 goto failed;
2229 btrfs_unlock_up_safe(path, 1);
2230 leaf = path->nodes[0];
2231 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2232 struct btrfs_inode_item);
2234 fill_inode_item(trans, leaf, inode_item, inode);
2235 btrfs_mark_buffer_dirty(leaf);
2236 btrfs_set_inode_last_trans(trans, inode);
2237 ret = 0;
2238 failed:
2239 btrfs_free_path(path);
2240 return ret;
2245 * unlink helper that gets used here in inode.c and in the tree logging
2246 * recovery code. It remove a link in a directory with a given name, and
2247 * also drops the back refs in the inode to the directory
2249 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2250 struct btrfs_root *root,
2251 struct inode *dir, struct inode *inode,
2252 const char *name, int name_len)
2254 struct btrfs_path *path;
2255 int ret = 0;
2256 struct extent_buffer *leaf;
2257 struct btrfs_dir_item *di;
2258 struct btrfs_key key;
2259 u64 index;
2261 path = btrfs_alloc_path();
2262 if (!path) {
2263 ret = -ENOMEM;
2264 goto err;
2267 path->leave_spinning = 1;
2268 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2269 name, name_len, -1);
2270 if (IS_ERR(di)) {
2271 ret = PTR_ERR(di);
2272 goto err;
2274 if (!di) {
2275 ret = -ENOENT;
2276 goto err;
2278 leaf = path->nodes[0];
2279 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2280 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2281 if (ret)
2282 goto err;
2283 btrfs_release_path(root, path);
2285 ret = btrfs_del_inode_ref(trans, root, name, name_len,
2286 inode->i_ino,
2287 dir->i_ino, &index);
2288 if (ret) {
2289 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2290 "inode %lu parent %lu\n", name_len, name,
2291 inode->i_ino, dir->i_ino);
2292 goto err;
2295 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2296 index, name, name_len, -1);
2297 if (IS_ERR(di)) {
2298 ret = PTR_ERR(di);
2299 goto err;
2301 if (!di) {
2302 ret = -ENOENT;
2303 goto err;
2305 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2306 btrfs_release_path(root, path);
2308 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2309 inode, dir->i_ino);
2310 BUG_ON(ret != 0 && ret != -ENOENT);
2312 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2313 dir, index);
2314 BUG_ON(ret);
2315 err:
2316 btrfs_free_path(path);
2317 if (ret)
2318 goto out;
2320 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2321 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2322 btrfs_update_inode(trans, root, dir);
2323 btrfs_drop_nlink(inode);
2324 ret = btrfs_update_inode(trans, root, inode);
2325 dir->i_sb->s_dirt = 1;
2326 out:
2327 return ret;
2330 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2332 struct btrfs_root *root;
2333 struct btrfs_trans_handle *trans;
2334 struct inode *inode = dentry->d_inode;
2335 int ret;
2336 unsigned long nr = 0;
2338 root = BTRFS_I(dir)->root;
2340 trans = btrfs_start_transaction(root, 1);
2342 btrfs_set_trans_block_group(trans, dir);
2344 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2346 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2347 dentry->d_name.name, dentry->d_name.len);
2349 if (inode->i_nlink == 0)
2350 ret = btrfs_orphan_add(trans, inode);
2352 nr = trans->blocks_used;
2354 btrfs_end_transaction_throttle(trans, root);
2355 btrfs_btree_balance_dirty(root, nr);
2356 return ret;
2359 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2361 struct inode *inode = dentry->d_inode;
2362 int err = 0;
2363 int ret;
2364 struct btrfs_root *root = BTRFS_I(dir)->root;
2365 struct btrfs_trans_handle *trans;
2366 unsigned long nr = 0;
2369 * the FIRST_FREE_OBJECTID check makes sure we don't try to rmdir
2370 * the root of a subvolume or snapshot
2372 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2373 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
2374 return -ENOTEMPTY;
2377 trans = btrfs_start_transaction(root, 1);
2378 btrfs_set_trans_block_group(trans, dir);
2380 err = btrfs_orphan_add(trans, inode);
2381 if (err)
2382 goto fail_trans;
2384 /* now the directory is empty */
2385 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2386 dentry->d_name.name, dentry->d_name.len);
2387 if (!err)
2388 btrfs_i_size_write(inode, 0);
2390 fail_trans:
2391 nr = trans->blocks_used;
2392 ret = btrfs_end_transaction_throttle(trans, root);
2393 btrfs_btree_balance_dirty(root, nr);
2395 if (ret && !err)
2396 err = ret;
2397 return err;
2400 #if 0
2402 * when truncating bytes in a file, it is possible to avoid reading
2403 * the leaves that contain only checksum items. This can be the
2404 * majority of the IO required to delete a large file, but it must
2405 * be done carefully.
2407 * The keys in the level just above the leaves are checked to make sure
2408 * the lowest key in a given leaf is a csum key, and starts at an offset
2409 * after the new size.
2411 * Then the key for the next leaf is checked to make sure it also has
2412 * a checksum item for the same file. If it does, we know our target leaf
2413 * contains only checksum items, and it can be safely freed without reading
2414 * it.
2416 * This is just an optimization targeted at large files. It may do
2417 * nothing. It will return 0 unless things went badly.
2419 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2420 struct btrfs_root *root,
2421 struct btrfs_path *path,
2422 struct inode *inode, u64 new_size)
2424 struct btrfs_key key;
2425 int ret;
2426 int nritems;
2427 struct btrfs_key found_key;
2428 struct btrfs_key other_key;
2429 struct btrfs_leaf_ref *ref;
2430 u64 leaf_gen;
2431 u64 leaf_start;
2433 path->lowest_level = 1;
2434 key.objectid = inode->i_ino;
2435 key.type = BTRFS_CSUM_ITEM_KEY;
2436 key.offset = new_size;
2437 again:
2438 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2439 if (ret < 0)
2440 goto out;
2442 if (path->nodes[1] == NULL) {
2443 ret = 0;
2444 goto out;
2446 ret = 0;
2447 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2448 nritems = btrfs_header_nritems(path->nodes[1]);
2450 if (!nritems)
2451 goto out;
2453 if (path->slots[1] >= nritems)
2454 goto next_node;
2456 /* did we find a key greater than anything we want to delete? */
2457 if (found_key.objectid > inode->i_ino ||
2458 (found_key.objectid == inode->i_ino && found_key.type > key.type))
2459 goto out;
2461 /* we check the next key in the node to make sure the leave contains
2462 * only checksum items. This comparison doesn't work if our
2463 * leaf is the last one in the node
2465 if (path->slots[1] + 1 >= nritems) {
2466 next_node:
2467 /* search forward from the last key in the node, this
2468 * will bring us into the next node in the tree
2470 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2472 /* unlikely, but we inc below, so check to be safe */
2473 if (found_key.offset == (u64)-1)
2474 goto out;
2476 /* search_forward needs a path with locks held, do the
2477 * search again for the original key. It is possible
2478 * this will race with a balance and return a path that
2479 * we could modify, but this drop is just an optimization
2480 * and is allowed to miss some leaves.
2482 btrfs_release_path(root, path);
2483 found_key.offset++;
2485 /* setup a max key for search_forward */
2486 other_key.offset = (u64)-1;
2487 other_key.type = key.type;
2488 other_key.objectid = key.objectid;
2490 path->keep_locks = 1;
2491 ret = btrfs_search_forward(root, &found_key, &other_key,
2492 path, 0, 0);
2493 path->keep_locks = 0;
2494 if (ret || found_key.objectid != key.objectid ||
2495 found_key.type != key.type) {
2496 ret = 0;
2497 goto out;
2500 key.offset = found_key.offset;
2501 btrfs_release_path(root, path);
2502 cond_resched();
2503 goto again;
2506 /* we know there's one more slot after us in the tree,
2507 * read that key so we can verify it is also a checksum item
2509 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2511 if (found_key.objectid < inode->i_ino)
2512 goto next_key;
2514 if (found_key.type != key.type || found_key.offset < new_size)
2515 goto next_key;
2518 * if the key for the next leaf isn't a csum key from this objectid,
2519 * we can't be sure there aren't good items inside this leaf.
2520 * Bail out
2522 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2523 goto out;
2525 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2526 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2528 * it is safe to delete this leaf, it contains only
2529 * csum items from this inode at an offset >= new_size
2531 ret = btrfs_del_leaf(trans, root, path, leaf_start);
2532 BUG_ON(ret);
2534 if (root->ref_cows && leaf_gen < trans->transid) {
2535 ref = btrfs_alloc_leaf_ref(root, 0);
2536 if (ref) {
2537 ref->root_gen = root->root_key.offset;
2538 ref->bytenr = leaf_start;
2539 ref->owner = 0;
2540 ref->generation = leaf_gen;
2541 ref->nritems = 0;
2543 btrfs_sort_leaf_ref(ref);
2545 ret = btrfs_add_leaf_ref(root, ref, 0);
2546 WARN_ON(ret);
2547 btrfs_free_leaf_ref(root, ref);
2548 } else {
2549 WARN_ON(1);
2552 next_key:
2553 btrfs_release_path(root, path);
2555 if (other_key.objectid == inode->i_ino &&
2556 other_key.type == key.type && other_key.offset > key.offset) {
2557 key.offset = other_key.offset;
2558 cond_resched();
2559 goto again;
2561 ret = 0;
2562 out:
2563 /* fixup any changes we've made to the path */
2564 path->lowest_level = 0;
2565 path->keep_locks = 0;
2566 btrfs_release_path(root, path);
2567 return ret;
2570 #endif
2573 * this can truncate away extent items, csum items and directory items.
2574 * It starts at a high offset and removes keys until it can't find
2575 * any higher than new_size
2577 * csum items that cross the new i_size are truncated to the new size
2578 * as well.
2580 * min_type is the minimum key type to truncate down to. If set to 0, this
2581 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2583 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2584 struct btrfs_root *root,
2585 struct inode *inode,
2586 u64 new_size, u32 min_type)
2588 int ret;
2589 struct btrfs_path *path;
2590 struct btrfs_key key;
2591 struct btrfs_key found_key;
2592 u32 found_type = (u8)-1;
2593 struct extent_buffer *leaf;
2594 struct btrfs_file_extent_item *fi;
2595 u64 extent_start = 0;
2596 u64 extent_num_bytes = 0;
2597 u64 extent_offset = 0;
2598 u64 item_end = 0;
2599 int found_extent;
2600 int del_item;
2601 int pending_del_nr = 0;
2602 int pending_del_slot = 0;
2603 int extent_type = -1;
2604 int encoding;
2605 u64 mask = root->sectorsize - 1;
2607 if (root->ref_cows)
2608 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2609 path = btrfs_alloc_path();
2610 path->reada = -1;
2611 BUG_ON(!path);
2613 /* FIXME, add redo link to tree so we don't leak on crash */
2614 key.objectid = inode->i_ino;
2615 key.offset = (u64)-1;
2616 key.type = (u8)-1;
2618 search_again:
2619 path->leave_spinning = 1;
2620 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2621 if (ret < 0)
2622 goto error;
2624 if (ret > 0) {
2625 /* there are no items in the tree for us to truncate, we're
2626 * done
2628 if (path->slots[0] == 0) {
2629 ret = 0;
2630 goto error;
2632 path->slots[0]--;
2635 while (1) {
2636 fi = NULL;
2637 leaf = path->nodes[0];
2638 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2639 found_type = btrfs_key_type(&found_key);
2640 encoding = 0;
2642 if (found_key.objectid != inode->i_ino)
2643 break;
2645 if (found_type < min_type)
2646 break;
2648 item_end = found_key.offset;
2649 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2650 fi = btrfs_item_ptr(leaf, path->slots[0],
2651 struct btrfs_file_extent_item);
2652 extent_type = btrfs_file_extent_type(leaf, fi);
2653 encoding = btrfs_file_extent_compression(leaf, fi);
2654 encoding |= btrfs_file_extent_encryption(leaf, fi);
2655 encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2657 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2658 item_end +=
2659 btrfs_file_extent_num_bytes(leaf, fi);
2660 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2661 item_end += btrfs_file_extent_inline_len(leaf,
2662 fi);
2664 item_end--;
2666 if (item_end < new_size) {
2667 if (found_type == BTRFS_DIR_ITEM_KEY)
2668 found_type = BTRFS_INODE_ITEM_KEY;
2669 else if (found_type == BTRFS_EXTENT_ITEM_KEY)
2670 found_type = BTRFS_EXTENT_DATA_KEY;
2671 else if (found_type == BTRFS_EXTENT_DATA_KEY)
2672 found_type = BTRFS_XATTR_ITEM_KEY;
2673 else if (found_type == BTRFS_XATTR_ITEM_KEY)
2674 found_type = BTRFS_INODE_REF_KEY;
2675 else if (found_type)
2676 found_type--;
2677 else
2678 break;
2679 btrfs_set_key_type(&key, found_type);
2680 goto next;
2682 if (found_key.offset >= new_size)
2683 del_item = 1;
2684 else
2685 del_item = 0;
2686 found_extent = 0;
2688 /* FIXME, shrink the extent if the ref count is only 1 */
2689 if (found_type != BTRFS_EXTENT_DATA_KEY)
2690 goto delete;
2692 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2693 u64 num_dec;
2694 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2695 if (!del_item && !encoding) {
2696 u64 orig_num_bytes =
2697 btrfs_file_extent_num_bytes(leaf, fi);
2698 extent_num_bytes = new_size -
2699 found_key.offset + root->sectorsize - 1;
2700 extent_num_bytes = extent_num_bytes &
2701 ~((u64)root->sectorsize - 1);
2702 btrfs_set_file_extent_num_bytes(leaf, fi,
2703 extent_num_bytes);
2704 num_dec = (orig_num_bytes -
2705 extent_num_bytes);
2706 if (root->ref_cows && extent_start != 0)
2707 inode_sub_bytes(inode, num_dec);
2708 btrfs_mark_buffer_dirty(leaf);
2709 } else {
2710 extent_num_bytes =
2711 btrfs_file_extent_disk_num_bytes(leaf,
2712 fi);
2713 extent_offset = found_key.offset -
2714 btrfs_file_extent_offset(leaf, fi);
2716 /* FIXME blocksize != 4096 */
2717 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2718 if (extent_start != 0) {
2719 found_extent = 1;
2720 if (root->ref_cows)
2721 inode_sub_bytes(inode, num_dec);
2724 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2726 * we can't truncate inline items that have had
2727 * special encodings
2729 if (!del_item &&
2730 btrfs_file_extent_compression(leaf, fi) == 0 &&
2731 btrfs_file_extent_encryption(leaf, fi) == 0 &&
2732 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2733 u32 size = new_size - found_key.offset;
2735 if (root->ref_cows) {
2736 inode_sub_bytes(inode, item_end + 1 -
2737 new_size);
2739 size =
2740 btrfs_file_extent_calc_inline_size(size);
2741 ret = btrfs_truncate_item(trans, root, path,
2742 size, 1);
2743 BUG_ON(ret);
2744 } else if (root->ref_cows) {
2745 inode_sub_bytes(inode, item_end + 1 -
2746 found_key.offset);
2749 delete:
2750 if (del_item) {
2751 if (!pending_del_nr) {
2752 /* no pending yet, add ourselves */
2753 pending_del_slot = path->slots[0];
2754 pending_del_nr = 1;
2755 } else if (pending_del_nr &&
2756 path->slots[0] + 1 == pending_del_slot) {
2757 /* hop on the pending chunk */
2758 pending_del_nr++;
2759 pending_del_slot = path->slots[0];
2760 } else {
2761 BUG();
2763 } else {
2764 break;
2766 if (found_extent && root->ref_cows) {
2767 btrfs_set_path_blocking(path);
2768 ret = btrfs_free_extent(trans, root, extent_start,
2769 extent_num_bytes, 0,
2770 btrfs_header_owner(leaf),
2771 inode->i_ino, extent_offset);
2772 BUG_ON(ret);
2774 next:
2775 if (path->slots[0] == 0) {
2776 if (pending_del_nr)
2777 goto del_pending;
2778 btrfs_release_path(root, path);
2779 if (found_type == BTRFS_INODE_ITEM_KEY)
2780 break;
2781 goto search_again;
2784 path->slots[0]--;
2785 if (pending_del_nr &&
2786 path->slots[0] + 1 != pending_del_slot) {
2787 struct btrfs_key debug;
2788 del_pending:
2789 btrfs_item_key_to_cpu(path->nodes[0], &debug,
2790 pending_del_slot);
2791 ret = btrfs_del_items(trans, root, path,
2792 pending_del_slot,
2793 pending_del_nr);
2794 BUG_ON(ret);
2795 pending_del_nr = 0;
2796 btrfs_release_path(root, path);
2797 if (found_type == BTRFS_INODE_ITEM_KEY)
2798 break;
2799 goto search_again;
2802 ret = 0;
2803 error:
2804 if (pending_del_nr) {
2805 ret = btrfs_del_items(trans, root, path, pending_del_slot,
2806 pending_del_nr);
2808 btrfs_free_path(path);
2809 inode->i_sb->s_dirt = 1;
2810 return ret;
2814 * taken from block_truncate_page, but does cow as it zeros out
2815 * any bytes left in the last page in the file.
2817 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
2819 struct inode *inode = mapping->host;
2820 struct btrfs_root *root = BTRFS_I(inode)->root;
2821 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2822 struct btrfs_ordered_extent *ordered;
2823 char *kaddr;
2824 u32 blocksize = root->sectorsize;
2825 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2826 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2827 struct page *page;
2828 int ret = 0;
2829 u64 page_start;
2830 u64 page_end;
2832 if ((offset & (blocksize - 1)) == 0)
2833 goto out;
2835 ret = -ENOMEM;
2836 again:
2837 page = grab_cache_page(mapping, index);
2838 if (!page)
2839 goto out;
2841 page_start = page_offset(page);
2842 page_end = page_start + PAGE_CACHE_SIZE - 1;
2844 if (!PageUptodate(page)) {
2845 ret = btrfs_readpage(NULL, page);
2846 lock_page(page);
2847 if (page->mapping != mapping) {
2848 unlock_page(page);
2849 page_cache_release(page);
2850 goto again;
2852 if (!PageUptodate(page)) {
2853 ret = -EIO;
2854 goto out_unlock;
2857 wait_on_page_writeback(page);
2859 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2860 set_page_extent_mapped(page);
2862 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2863 if (ordered) {
2864 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2865 unlock_page(page);
2866 page_cache_release(page);
2867 btrfs_start_ordered_extent(inode, ordered, 1);
2868 btrfs_put_ordered_extent(ordered);
2869 goto again;
2872 btrfs_set_extent_delalloc(inode, page_start, page_end);
2873 ret = 0;
2874 if (offset != PAGE_CACHE_SIZE) {
2875 kaddr = kmap(page);
2876 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2877 flush_dcache_page(page);
2878 kunmap(page);
2880 ClearPageChecked(page);
2881 set_page_dirty(page);
2882 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2884 out_unlock:
2885 unlock_page(page);
2886 page_cache_release(page);
2887 out:
2888 return ret;
2891 int btrfs_cont_expand(struct inode *inode, loff_t size)
2893 struct btrfs_trans_handle *trans;
2894 struct btrfs_root *root = BTRFS_I(inode)->root;
2895 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2896 struct extent_map *em;
2897 u64 mask = root->sectorsize - 1;
2898 u64 hole_start = (inode->i_size + mask) & ~mask;
2899 u64 block_end = (size + mask) & ~mask;
2900 u64 last_byte;
2901 u64 cur_offset;
2902 u64 hole_size;
2903 int err;
2905 if (size <= hole_start)
2906 return 0;
2908 err = btrfs_check_metadata_free_space(root);
2909 if (err)
2910 return err;
2912 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2914 while (1) {
2915 struct btrfs_ordered_extent *ordered;
2916 btrfs_wait_ordered_range(inode, hole_start,
2917 block_end - hole_start);
2918 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2919 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
2920 if (!ordered)
2921 break;
2922 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2923 btrfs_put_ordered_extent(ordered);
2926 trans = btrfs_start_transaction(root, 1);
2927 btrfs_set_trans_block_group(trans, inode);
2929 cur_offset = hole_start;
2930 while (1) {
2931 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2932 block_end - cur_offset, 0);
2933 BUG_ON(IS_ERR(em) || !em);
2934 last_byte = min(extent_map_end(em), block_end);
2935 last_byte = (last_byte + mask) & ~mask;
2936 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2937 u64 hint_byte = 0;
2938 hole_size = last_byte - cur_offset;
2939 err = btrfs_drop_extents(trans, root, inode,
2940 cur_offset,
2941 cur_offset + hole_size,
2942 block_end,
2943 cur_offset, &hint_byte);
2944 if (err)
2945 break;
2946 err = btrfs_insert_file_extent(trans, root,
2947 inode->i_ino, cur_offset, 0,
2948 0, hole_size, 0, hole_size,
2949 0, 0, 0);
2950 btrfs_drop_extent_cache(inode, hole_start,
2951 last_byte - 1, 0);
2953 free_extent_map(em);
2954 cur_offset = last_byte;
2955 if (err || cur_offset >= block_end)
2956 break;
2959 btrfs_end_transaction(trans, root);
2960 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2961 return err;
2964 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
2966 struct inode *inode = dentry->d_inode;
2967 int err;
2969 err = inode_change_ok(inode, attr);
2970 if (err)
2971 return err;
2973 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
2974 if (attr->ia_size > inode->i_size) {
2975 err = btrfs_cont_expand(inode, attr->ia_size);
2976 if (err)
2977 return err;
2978 } else if (inode->i_size > 0 &&
2979 attr->ia_size == 0) {
2981 /* we're truncating a file that used to have good
2982 * data down to zero. Make sure it gets into
2983 * the ordered flush list so that any new writes
2984 * get down to disk quickly.
2986 BTRFS_I(inode)->ordered_data_close = 1;
2990 err = inode_setattr(inode, attr);
2992 if (!err && ((attr->ia_valid & ATTR_MODE)))
2993 err = btrfs_acl_chmod(inode);
2994 return err;
2997 void btrfs_delete_inode(struct inode *inode)
2999 struct btrfs_trans_handle *trans;
3000 struct btrfs_root *root = BTRFS_I(inode)->root;
3001 unsigned long nr;
3002 int ret;
3004 truncate_inode_pages(&inode->i_data, 0);
3005 if (is_bad_inode(inode)) {
3006 btrfs_orphan_del(NULL, inode);
3007 goto no_delete;
3009 btrfs_wait_ordered_range(inode, 0, (u64)-1);
3011 btrfs_i_size_write(inode, 0);
3012 trans = btrfs_join_transaction(root, 1);
3014 btrfs_set_trans_block_group(trans, inode);
3015 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
3016 if (ret) {
3017 btrfs_orphan_del(NULL, inode);
3018 goto no_delete_lock;
3021 btrfs_orphan_del(trans, inode);
3023 nr = trans->blocks_used;
3024 clear_inode(inode);
3026 btrfs_end_transaction(trans, root);
3027 btrfs_btree_balance_dirty(root, nr);
3028 return;
3030 no_delete_lock:
3031 nr = trans->blocks_used;
3032 btrfs_end_transaction(trans, root);
3033 btrfs_btree_balance_dirty(root, nr);
3034 no_delete:
3035 clear_inode(inode);
3039 * this returns the key found in the dir entry in the location pointer.
3040 * If no dir entries were found, location->objectid is 0.
3042 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3043 struct btrfs_key *location)
3045 const char *name = dentry->d_name.name;
3046 int namelen = dentry->d_name.len;
3047 struct btrfs_dir_item *di;
3048 struct btrfs_path *path;
3049 struct btrfs_root *root = BTRFS_I(dir)->root;
3050 int ret = 0;
3052 path = btrfs_alloc_path();
3053 BUG_ON(!path);
3055 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
3056 namelen, 0);
3057 if (IS_ERR(di))
3058 ret = PTR_ERR(di);
3060 if (!di || IS_ERR(di))
3061 goto out_err;
3063 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3064 out:
3065 btrfs_free_path(path);
3066 return ret;
3067 out_err:
3068 location->objectid = 0;
3069 goto out;
3073 * when we hit a tree root in a directory, the btrfs part of the inode
3074 * needs to be changed to reflect the root directory of the tree root. This
3075 * is kind of like crossing a mount point.
3077 static int fixup_tree_root_location(struct btrfs_root *root,
3078 struct btrfs_key *location,
3079 struct btrfs_root **sub_root,
3080 struct dentry *dentry)
3082 struct btrfs_root_item *ri;
3084 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
3085 return 0;
3086 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
3087 return 0;
3089 *sub_root = btrfs_read_fs_root(root->fs_info, location,
3090 dentry->d_name.name,
3091 dentry->d_name.len);
3092 if (IS_ERR(*sub_root))
3093 return PTR_ERR(*sub_root);
3095 ri = &(*sub_root)->root_item;
3096 location->objectid = btrfs_root_dirid(ri);
3097 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3098 location->offset = 0;
3100 return 0;
3103 static void inode_tree_add(struct inode *inode)
3105 struct btrfs_root *root = BTRFS_I(inode)->root;
3106 struct btrfs_inode *entry;
3107 struct rb_node **p = &root->inode_tree.rb_node;
3108 struct rb_node *parent = NULL;
3110 spin_lock(&root->inode_lock);
3111 while (*p) {
3112 parent = *p;
3113 entry = rb_entry(parent, struct btrfs_inode, rb_node);
3115 if (inode->i_ino < entry->vfs_inode.i_ino)
3116 p = &(*p)->rb_left;
3117 else if (inode->i_ino > entry->vfs_inode.i_ino)
3118 p = &(*p)->rb_right;
3119 else {
3120 WARN_ON(!(entry->vfs_inode.i_state &
3121 (I_WILL_FREE | I_FREEING | I_CLEAR)));
3122 break;
3125 rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
3126 rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3127 spin_unlock(&root->inode_lock);
3130 static void inode_tree_del(struct inode *inode)
3132 struct btrfs_root *root = BTRFS_I(inode)->root;
3134 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
3135 spin_lock(&root->inode_lock);
3136 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3137 spin_unlock(&root->inode_lock);
3138 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3142 static noinline void init_btrfs_i(struct inode *inode)
3144 struct btrfs_inode *bi = BTRFS_I(inode);
3146 bi->i_acl = BTRFS_ACL_NOT_CACHED;
3147 bi->i_default_acl = BTRFS_ACL_NOT_CACHED;
3149 bi->generation = 0;
3150 bi->sequence = 0;
3151 bi->last_trans = 0;
3152 bi->logged_trans = 0;
3153 bi->delalloc_bytes = 0;
3154 bi->reserved_bytes = 0;
3155 bi->disk_i_size = 0;
3156 bi->flags = 0;
3157 bi->index_cnt = (u64)-1;
3158 bi->last_unlink_trans = 0;
3159 bi->ordered_data_close = 0;
3160 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3161 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3162 inode->i_mapping, GFP_NOFS);
3163 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3164 inode->i_mapping, GFP_NOFS);
3165 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3166 INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
3167 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3168 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3169 mutex_init(&BTRFS_I(inode)->extent_mutex);
3170 mutex_init(&BTRFS_I(inode)->log_mutex);
3173 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3175 struct btrfs_iget_args *args = p;
3176 inode->i_ino = args->ino;
3177 init_btrfs_i(inode);
3178 BTRFS_I(inode)->root = args->root;
3179 btrfs_set_inode_space_info(args->root, inode);
3180 return 0;
3183 static int btrfs_find_actor(struct inode *inode, void *opaque)
3185 struct btrfs_iget_args *args = opaque;
3186 return args->ino == inode->i_ino &&
3187 args->root == BTRFS_I(inode)->root;
3190 static struct inode *btrfs_iget_locked(struct super_block *s,
3191 u64 objectid,
3192 struct btrfs_root *root)
3194 struct inode *inode;
3195 struct btrfs_iget_args args;
3196 args.ino = objectid;
3197 args.root = root;
3199 inode = iget5_locked(s, objectid, btrfs_find_actor,
3200 btrfs_init_locked_inode,
3201 (void *)&args);
3202 return inode;
3205 /* Get an inode object given its location and corresponding root.
3206 * Returns in *is_new if the inode was read from disk
3208 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3209 struct btrfs_root *root)
3211 struct inode *inode;
3213 inode = btrfs_iget_locked(s, location->objectid, root);
3214 if (!inode)
3215 return ERR_PTR(-ENOMEM);
3217 if (inode->i_state & I_NEW) {
3218 BTRFS_I(inode)->root = root;
3219 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3220 btrfs_read_locked_inode(inode);
3222 inode_tree_add(inode);
3223 unlock_new_inode(inode);
3226 return inode;
3229 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3231 struct inode *inode;
3232 struct btrfs_inode *bi = BTRFS_I(dir);
3233 struct btrfs_root *root = bi->root;
3234 struct btrfs_root *sub_root = root;
3235 struct btrfs_key location;
3236 int ret;
3238 if (dentry->d_name.len > BTRFS_NAME_LEN)
3239 return ERR_PTR(-ENAMETOOLONG);
3241 ret = btrfs_inode_by_name(dir, dentry, &location);
3243 if (ret < 0)
3244 return ERR_PTR(ret);
3246 inode = NULL;
3247 if (location.objectid) {
3248 ret = fixup_tree_root_location(root, &location, &sub_root,
3249 dentry);
3250 if (ret < 0)
3251 return ERR_PTR(ret);
3252 if (ret > 0)
3253 return ERR_PTR(-ENOENT);
3254 inode = btrfs_iget(dir->i_sb, &location, sub_root);
3255 if (IS_ERR(inode))
3256 return ERR_CAST(inode);
3258 return inode;
3261 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3262 struct nameidata *nd)
3264 struct inode *inode;
3266 if (dentry->d_name.len > BTRFS_NAME_LEN)
3267 return ERR_PTR(-ENAMETOOLONG);
3269 inode = btrfs_lookup_dentry(dir, dentry);
3270 if (IS_ERR(inode))
3271 return ERR_CAST(inode);
3273 return d_splice_alias(inode, dentry);
3276 static unsigned char btrfs_filetype_table[] = {
3277 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3280 static int btrfs_real_readdir(struct file *filp, void *dirent,
3281 filldir_t filldir)
3283 struct inode *inode = filp->f_dentry->d_inode;
3284 struct btrfs_root *root = BTRFS_I(inode)->root;
3285 struct btrfs_item *item;
3286 struct btrfs_dir_item *di;
3287 struct btrfs_key key;
3288 struct btrfs_key found_key;
3289 struct btrfs_path *path;
3290 int ret;
3291 u32 nritems;
3292 struct extent_buffer *leaf;
3293 int slot;
3294 int advance;
3295 unsigned char d_type;
3296 int over = 0;
3297 u32 di_cur;
3298 u32 di_total;
3299 u32 di_len;
3300 int key_type = BTRFS_DIR_INDEX_KEY;
3301 char tmp_name[32];
3302 char *name_ptr;
3303 int name_len;
3305 /* FIXME, use a real flag for deciding about the key type */
3306 if (root->fs_info->tree_root == root)
3307 key_type = BTRFS_DIR_ITEM_KEY;
3309 /* special case for "." */
3310 if (filp->f_pos == 0) {
3311 over = filldir(dirent, ".", 1,
3312 1, inode->i_ino,
3313 DT_DIR);
3314 if (over)
3315 return 0;
3316 filp->f_pos = 1;
3318 /* special case for .., just use the back ref */
3319 if (filp->f_pos == 1) {
3320 u64 pino = parent_ino(filp->f_path.dentry);
3321 over = filldir(dirent, "..", 2,
3322 2, pino, DT_DIR);
3323 if (over)
3324 return 0;
3325 filp->f_pos = 2;
3327 path = btrfs_alloc_path();
3328 path->reada = 2;
3330 btrfs_set_key_type(&key, key_type);
3331 key.offset = filp->f_pos;
3332 key.objectid = inode->i_ino;
3334 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3335 if (ret < 0)
3336 goto err;
3337 advance = 0;
3339 while (1) {
3340 leaf = path->nodes[0];
3341 nritems = btrfs_header_nritems(leaf);
3342 slot = path->slots[0];
3343 if (advance || slot >= nritems) {
3344 if (slot >= nritems - 1) {
3345 ret = btrfs_next_leaf(root, path);
3346 if (ret)
3347 break;
3348 leaf = path->nodes[0];
3349 nritems = btrfs_header_nritems(leaf);
3350 slot = path->slots[0];
3351 } else {
3352 slot++;
3353 path->slots[0]++;
3357 advance = 1;
3358 item = btrfs_item_nr(leaf, slot);
3359 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3361 if (found_key.objectid != key.objectid)
3362 break;
3363 if (btrfs_key_type(&found_key) != key_type)
3364 break;
3365 if (found_key.offset < filp->f_pos)
3366 continue;
3368 filp->f_pos = found_key.offset;
3370 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3371 di_cur = 0;
3372 di_total = btrfs_item_size(leaf, item);
3374 while (di_cur < di_total) {
3375 struct btrfs_key location;
3377 name_len = btrfs_dir_name_len(leaf, di);
3378 if (name_len <= sizeof(tmp_name)) {
3379 name_ptr = tmp_name;
3380 } else {
3381 name_ptr = kmalloc(name_len, GFP_NOFS);
3382 if (!name_ptr) {
3383 ret = -ENOMEM;
3384 goto err;
3387 read_extent_buffer(leaf, name_ptr,
3388 (unsigned long)(di + 1), name_len);
3390 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3391 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3393 /* is this a reference to our own snapshot? If so
3394 * skip it
3396 if (location.type == BTRFS_ROOT_ITEM_KEY &&
3397 location.objectid == root->root_key.objectid) {
3398 over = 0;
3399 goto skip;
3401 over = filldir(dirent, name_ptr, name_len,
3402 found_key.offset, location.objectid,
3403 d_type);
3405 skip:
3406 if (name_ptr != tmp_name)
3407 kfree(name_ptr);
3409 if (over)
3410 goto nopos;
3411 di_len = btrfs_dir_name_len(leaf, di) +
3412 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3413 di_cur += di_len;
3414 di = (struct btrfs_dir_item *)((char *)di + di_len);
3418 /* Reached end of directory/root. Bump pos past the last item. */
3419 if (key_type == BTRFS_DIR_INDEX_KEY)
3420 filp->f_pos = INT_LIMIT(off_t);
3421 else
3422 filp->f_pos++;
3423 nopos:
3424 ret = 0;
3425 err:
3426 btrfs_free_path(path);
3427 return ret;
3430 int btrfs_write_inode(struct inode *inode, int wait)
3432 struct btrfs_root *root = BTRFS_I(inode)->root;
3433 struct btrfs_trans_handle *trans;
3434 int ret = 0;
3436 if (root->fs_info->btree_inode == inode)
3437 return 0;
3439 if (wait) {
3440 trans = btrfs_join_transaction(root, 1);
3441 btrfs_set_trans_block_group(trans, inode);
3442 ret = btrfs_commit_transaction(trans, root);
3444 return ret;
3448 * This is somewhat expensive, updating the tree every time the
3449 * inode changes. But, it is most likely to find the inode in cache.
3450 * FIXME, needs more benchmarking...there are no reasons other than performance
3451 * to keep or drop this code.
3453 void btrfs_dirty_inode(struct inode *inode)
3455 struct btrfs_root *root = BTRFS_I(inode)->root;
3456 struct btrfs_trans_handle *trans;
3458 trans = btrfs_join_transaction(root, 1);
3459 btrfs_set_trans_block_group(trans, inode);
3460 btrfs_update_inode(trans, root, inode);
3461 btrfs_end_transaction(trans, root);
3465 * find the highest existing sequence number in a directory
3466 * and then set the in-memory index_cnt variable to reflect
3467 * free sequence numbers
3469 static int btrfs_set_inode_index_count(struct inode *inode)
3471 struct btrfs_root *root = BTRFS_I(inode)->root;
3472 struct btrfs_key key, found_key;
3473 struct btrfs_path *path;
3474 struct extent_buffer *leaf;
3475 int ret;
3477 key.objectid = inode->i_ino;
3478 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
3479 key.offset = (u64)-1;
3481 path = btrfs_alloc_path();
3482 if (!path)
3483 return -ENOMEM;
3485 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3486 if (ret < 0)
3487 goto out;
3488 /* FIXME: we should be able to handle this */
3489 if (ret == 0)
3490 goto out;
3491 ret = 0;
3494 * MAGIC NUMBER EXPLANATION:
3495 * since we search a directory based on f_pos we have to start at 2
3496 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
3497 * else has to start at 2
3499 if (path->slots[0] == 0) {
3500 BTRFS_I(inode)->index_cnt = 2;
3501 goto out;
3504 path->slots[0]--;
3506 leaf = path->nodes[0];
3507 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3509 if (found_key.objectid != inode->i_ino ||
3510 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
3511 BTRFS_I(inode)->index_cnt = 2;
3512 goto out;
3515 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
3516 out:
3517 btrfs_free_path(path);
3518 return ret;
3522 * helper to find a free sequence number in a given directory. This current
3523 * code is very simple, later versions will do smarter things in the btree
3525 int btrfs_set_inode_index(struct inode *dir, u64 *index)
3527 int ret = 0;
3529 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
3530 ret = btrfs_set_inode_index_count(dir);
3531 if (ret)
3532 return ret;
3535 *index = BTRFS_I(dir)->index_cnt;
3536 BTRFS_I(dir)->index_cnt++;
3538 return ret;
3541 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3542 struct btrfs_root *root,
3543 struct inode *dir,
3544 const char *name, int name_len,
3545 u64 ref_objectid, u64 objectid,
3546 u64 alloc_hint, int mode, u64 *index)
3548 struct inode *inode;
3549 struct btrfs_inode_item *inode_item;
3550 struct btrfs_key *location;
3551 struct btrfs_path *path;
3552 struct btrfs_inode_ref *ref;
3553 struct btrfs_key key[2];
3554 u32 sizes[2];
3555 unsigned long ptr;
3556 int ret;
3557 int owner;
3559 path = btrfs_alloc_path();
3560 BUG_ON(!path);
3562 inode = new_inode(root->fs_info->sb);
3563 if (!inode)
3564 return ERR_PTR(-ENOMEM);
3566 if (dir) {
3567 ret = btrfs_set_inode_index(dir, index);
3568 if (ret) {
3569 iput(inode);
3570 return ERR_PTR(ret);
3574 * index_cnt is ignored for everything but a dir,
3575 * btrfs_get_inode_index_count has an explanation for the magic
3576 * number
3578 init_btrfs_i(inode);
3579 BTRFS_I(inode)->index_cnt = 2;
3580 BTRFS_I(inode)->root = root;
3581 BTRFS_I(inode)->generation = trans->transid;
3582 btrfs_set_inode_space_info(root, inode);
3584 if (mode & S_IFDIR)
3585 owner = 0;
3586 else
3587 owner = 1;
3588 BTRFS_I(inode)->block_group =
3589 btrfs_find_block_group(root, 0, alloc_hint, owner);
3590 if ((mode & S_IFREG)) {
3591 if (btrfs_test_opt(root, NODATASUM))
3592 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
3593 if (btrfs_test_opt(root, NODATACOW))
3594 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
3597 key[0].objectid = objectid;
3598 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
3599 key[0].offset = 0;
3601 key[1].objectid = objectid;
3602 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
3603 key[1].offset = ref_objectid;
3605 sizes[0] = sizeof(struct btrfs_inode_item);
3606 sizes[1] = name_len + sizeof(*ref);
3608 path->leave_spinning = 1;
3609 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
3610 if (ret != 0)
3611 goto fail;
3613 if (objectid > root->highest_inode)
3614 root->highest_inode = objectid;
3616 inode->i_uid = current_fsuid();
3618 if (dir && (dir->i_mode & S_ISGID)) {
3619 inode->i_gid = dir->i_gid;
3620 if (S_ISDIR(mode))
3621 mode |= S_ISGID;
3622 } else
3623 inode->i_gid = current_fsgid();
3625 inode->i_mode = mode;
3626 inode->i_ino = objectid;
3627 inode_set_bytes(inode, 0);
3628 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3629 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3630 struct btrfs_inode_item);
3631 fill_inode_item(trans, path->nodes[0], inode_item, inode);
3633 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3634 struct btrfs_inode_ref);
3635 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
3636 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
3637 ptr = (unsigned long)(ref + 1);
3638 write_extent_buffer(path->nodes[0], name, ptr, name_len);
3640 btrfs_mark_buffer_dirty(path->nodes[0]);
3641 btrfs_free_path(path);
3643 location = &BTRFS_I(inode)->location;
3644 location->objectid = objectid;
3645 location->offset = 0;
3646 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3648 btrfs_inherit_iflags(inode, dir);
3650 insert_inode_hash(inode);
3651 inode_tree_add(inode);
3652 return inode;
3653 fail:
3654 if (dir)
3655 BTRFS_I(dir)->index_cnt--;
3656 btrfs_free_path(path);
3657 iput(inode);
3658 return ERR_PTR(ret);
3661 static inline u8 btrfs_inode_type(struct inode *inode)
3663 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
3667 * utility function to add 'inode' into 'parent_inode' with
3668 * a give name and a given sequence number.
3669 * if 'add_backref' is true, also insert a backref from the
3670 * inode to the parent directory.
3672 int btrfs_add_link(struct btrfs_trans_handle *trans,
3673 struct inode *parent_inode, struct inode *inode,
3674 const char *name, int name_len, int add_backref, u64 index)
3676 int ret;
3677 struct btrfs_key key;
3678 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
3680 key.objectid = inode->i_ino;
3681 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
3682 key.offset = 0;
3684 ret = btrfs_insert_dir_item(trans, root, name, name_len,
3685 parent_inode->i_ino,
3686 &key, btrfs_inode_type(inode),
3687 index);
3688 if (ret == 0) {
3689 if (add_backref) {
3690 ret = btrfs_insert_inode_ref(trans, root,
3691 name, name_len,
3692 inode->i_ino,
3693 parent_inode->i_ino,
3694 index);
3696 btrfs_i_size_write(parent_inode, parent_inode->i_size +
3697 name_len * 2);
3698 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
3699 ret = btrfs_update_inode(trans, root, parent_inode);
3701 return ret;
3704 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
3705 struct dentry *dentry, struct inode *inode,
3706 int backref, u64 index)
3708 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3709 inode, dentry->d_name.name,
3710 dentry->d_name.len, backref, index);
3711 if (!err) {
3712 d_instantiate(dentry, inode);
3713 return 0;
3715 if (err > 0)
3716 err = -EEXIST;
3717 return err;
3720 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3721 int mode, dev_t rdev)
3723 struct btrfs_trans_handle *trans;
3724 struct btrfs_root *root = BTRFS_I(dir)->root;
3725 struct inode *inode = NULL;
3726 int err;
3727 int drop_inode = 0;
3728 u64 objectid;
3729 unsigned long nr = 0;
3730 u64 index = 0;
3732 if (!new_valid_dev(rdev))
3733 return -EINVAL;
3735 err = btrfs_check_metadata_free_space(root);
3736 if (err)
3737 goto fail;
3739 trans = btrfs_start_transaction(root, 1);
3740 btrfs_set_trans_block_group(trans, dir);
3742 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3743 if (err) {
3744 err = -ENOSPC;
3745 goto out_unlock;
3748 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3749 dentry->d_name.len,
3750 dentry->d_parent->d_inode->i_ino, objectid,
3751 BTRFS_I(dir)->block_group, mode, &index);
3752 err = PTR_ERR(inode);
3753 if (IS_ERR(inode))
3754 goto out_unlock;
3756 err = btrfs_init_inode_security(inode, dir);
3757 if (err) {
3758 drop_inode = 1;
3759 goto out_unlock;
3762 btrfs_set_trans_block_group(trans, inode);
3763 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3764 if (err)
3765 drop_inode = 1;
3766 else {
3767 inode->i_op = &btrfs_special_inode_operations;
3768 init_special_inode(inode, inode->i_mode, rdev);
3769 btrfs_update_inode(trans, root, inode);
3771 dir->i_sb->s_dirt = 1;
3772 btrfs_update_inode_block_group(trans, inode);
3773 btrfs_update_inode_block_group(trans, dir);
3774 out_unlock:
3775 nr = trans->blocks_used;
3776 btrfs_end_transaction_throttle(trans, root);
3777 fail:
3778 if (drop_inode) {
3779 inode_dec_link_count(inode);
3780 iput(inode);
3782 btrfs_btree_balance_dirty(root, nr);
3783 return err;
3786 static int btrfs_create(struct inode *dir, struct dentry *dentry,
3787 int mode, struct nameidata *nd)
3789 struct btrfs_trans_handle *trans;
3790 struct btrfs_root *root = BTRFS_I(dir)->root;
3791 struct inode *inode = NULL;
3792 int err;
3793 int drop_inode = 0;
3794 unsigned long nr = 0;
3795 u64 objectid;
3796 u64 index = 0;
3798 err = btrfs_check_metadata_free_space(root);
3799 if (err)
3800 goto fail;
3801 trans = btrfs_start_transaction(root, 1);
3802 btrfs_set_trans_block_group(trans, dir);
3804 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3805 if (err) {
3806 err = -ENOSPC;
3807 goto out_unlock;
3810 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3811 dentry->d_name.len,
3812 dentry->d_parent->d_inode->i_ino,
3813 objectid, BTRFS_I(dir)->block_group, mode,
3814 &index);
3815 err = PTR_ERR(inode);
3816 if (IS_ERR(inode))
3817 goto out_unlock;
3819 err = btrfs_init_inode_security(inode, dir);
3820 if (err) {
3821 drop_inode = 1;
3822 goto out_unlock;
3825 btrfs_set_trans_block_group(trans, inode);
3826 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3827 if (err)
3828 drop_inode = 1;
3829 else {
3830 inode->i_mapping->a_ops = &btrfs_aops;
3831 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3832 inode->i_fop = &btrfs_file_operations;
3833 inode->i_op = &btrfs_file_inode_operations;
3834 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3836 dir->i_sb->s_dirt = 1;
3837 btrfs_update_inode_block_group(trans, inode);
3838 btrfs_update_inode_block_group(trans, dir);
3839 out_unlock:
3840 nr = trans->blocks_used;
3841 btrfs_end_transaction_throttle(trans, root);
3842 fail:
3843 if (drop_inode) {
3844 inode_dec_link_count(inode);
3845 iput(inode);
3847 btrfs_btree_balance_dirty(root, nr);
3848 return err;
3851 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3852 struct dentry *dentry)
3854 struct btrfs_trans_handle *trans;
3855 struct btrfs_root *root = BTRFS_I(dir)->root;
3856 struct inode *inode = old_dentry->d_inode;
3857 u64 index;
3858 unsigned long nr = 0;
3859 int err;
3860 int drop_inode = 0;
3862 if (inode->i_nlink == 0)
3863 return -ENOENT;
3865 btrfs_inc_nlink(inode);
3866 err = btrfs_check_metadata_free_space(root);
3867 if (err)
3868 goto fail;
3869 err = btrfs_set_inode_index(dir, &index);
3870 if (err)
3871 goto fail;
3873 trans = btrfs_start_transaction(root, 1);
3875 btrfs_set_trans_block_group(trans, dir);
3876 atomic_inc(&inode->i_count);
3878 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
3880 if (err)
3881 drop_inode = 1;
3883 dir->i_sb->s_dirt = 1;
3884 btrfs_update_inode_block_group(trans, dir);
3885 err = btrfs_update_inode(trans, root, inode);
3887 if (err)
3888 drop_inode = 1;
3890 nr = trans->blocks_used;
3892 btrfs_log_new_name(trans, inode, NULL, dentry->d_parent);
3893 btrfs_end_transaction_throttle(trans, root);
3894 fail:
3895 if (drop_inode) {
3896 inode_dec_link_count(inode);
3897 iput(inode);
3899 btrfs_btree_balance_dirty(root, nr);
3900 return err;
3903 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3905 struct inode *inode = NULL;
3906 struct btrfs_trans_handle *trans;
3907 struct btrfs_root *root = BTRFS_I(dir)->root;
3908 int err = 0;
3909 int drop_on_err = 0;
3910 u64 objectid = 0;
3911 u64 index = 0;
3912 unsigned long nr = 1;
3914 err = btrfs_check_metadata_free_space(root);
3915 if (err)
3916 goto out_unlock;
3918 trans = btrfs_start_transaction(root, 1);
3919 btrfs_set_trans_block_group(trans, dir);
3921 if (IS_ERR(trans)) {
3922 err = PTR_ERR(trans);
3923 goto out_unlock;
3926 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3927 if (err) {
3928 err = -ENOSPC;
3929 goto out_unlock;
3932 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3933 dentry->d_name.len,
3934 dentry->d_parent->d_inode->i_ino, objectid,
3935 BTRFS_I(dir)->block_group, S_IFDIR | mode,
3936 &index);
3937 if (IS_ERR(inode)) {
3938 err = PTR_ERR(inode);
3939 goto out_fail;
3942 drop_on_err = 1;
3944 err = btrfs_init_inode_security(inode, dir);
3945 if (err)
3946 goto out_fail;
3948 inode->i_op = &btrfs_dir_inode_operations;
3949 inode->i_fop = &btrfs_dir_file_operations;
3950 btrfs_set_trans_block_group(trans, inode);
3952 btrfs_i_size_write(inode, 0);
3953 err = btrfs_update_inode(trans, root, inode);
3954 if (err)
3955 goto out_fail;
3957 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3958 inode, dentry->d_name.name,
3959 dentry->d_name.len, 0, index);
3960 if (err)
3961 goto out_fail;
3963 d_instantiate(dentry, inode);
3964 drop_on_err = 0;
3965 dir->i_sb->s_dirt = 1;
3966 btrfs_update_inode_block_group(trans, inode);
3967 btrfs_update_inode_block_group(trans, dir);
3969 out_fail:
3970 nr = trans->blocks_used;
3971 btrfs_end_transaction_throttle(trans, root);
3973 out_unlock:
3974 if (drop_on_err)
3975 iput(inode);
3976 btrfs_btree_balance_dirty(root, nr);
3977 return err;
3980 /* helper for btfs_get_extent. Given an existing extent in the tree,
3981 * and an extent that you want to insert, deal with overlap and insert
3982 * the new extent into the tree.
3984 static int merge_extent_mapping(struct extent_map_tree *em_tree,
3985 struct extent_map *existing,
3986 struct extent_map *em,
3987 u64 map_start, u64 map_len)
3989 u64 start_diff;
3991 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
3992 start_diff = map_start - em->start;
3993 em->start = map_start;
3994 em->len = map_len;
3995 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
3996 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3997 em->block_start += start_diff;
3998 em->block_len -= start_diff;
4000 return add_extent_mapping(em_tree, em);
4003 static noinline int uncompress_inline(struct btrfs_path *path,
4004 struct inode *inode, struct page *page,
4005 size_t pg_offset, u64 extent_offset,
4006 struct btrfs_file_extent_item *item)
4008 int ret;
4009 struct extent_buffer *leaf = path->nodes[0];
4010 char *tmp;
4011 size_t max_size;
4012 unsigned long inline_size;
4013 unsigned long ptr;
4015 WARN_ON(pg_offset != 0);
4016 max_size = btrfs_file_extent_ram_bytes(leaf, item);
4017 inline_size = btrfs_file_extent_inline_item_len(leaf,
4018 btrfs_item_nr(leaf, path->slots[0]));
4019 tmp = kmalloc(inline_size, GFP_NOFS);
4020 ptr = btrfs_file_extent_inline_start(item);
4022 read_extent_buffer(leaf, tmp, ptr, inline_size);
4024 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
4025 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
4026 inline_size, max_size);
4027 if (ret) {
4028 char *kaddr = kmap_atomic(page, KM_USER0);
4029 unsigned long copy_size = min_t(u64,
4030 PAGE_CACHE_SIZE - pg_offset,
4031 max_size - extent_offset);
4032 memset(kaddr + pg_offset, 0, copy_size);
4033 kunmap_atomic(kaddr, KM_USER0);
4035 kfree(tmp);
4036 return 0;
4040 * a bit scary, this does extent mapping from logical file offset to the disk.
4041 * the ugly parts come from merging extents from the disk with the in-ram
4042 * representation. This gets more complex because of the data=ordered code,
4043 * where the in-ram extents might be locked pending data=ordered completion.
4045 * This also copies inline extents directly into the page.
4048 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
4049 size_t pg_offset, u64 start, u64 len,
4050 int create)
4052 int ret;
4053 int err = 0;
4054 u64 bytenr;
4055 u64 extent_start = 0;
4056 u64 extent_end = 0;
4057 u64 objectid = inode->i_ino;
4058 u32 found_type;
4059 struct btrfs_path *path = NULL;
4060 struct btrfs_root *root = BTRFS_I(inode)->root;
4061 struct btrfs_file_extent_item *item;
4062 struct extent_buffer *leaf;
4063 struct btrfs_key found_key;
4064 struct extent_map *em = NULL;
4065 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4066 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4067 struct btrfs_trans_handle *trans = NULL;
4068 int compressed;
4070 again:
4071 spin_lock(&em_tree->lock);
4072 em = lookup_extent_mapping(em_tree, start, len);
4073 if (em)
4074 em->bdev = root->fs_info->fs_devices->latest_bdev;
4075 spin_unlock(&em_tree->lock);
4077 if (em) {
4078 if (em->start > start || em->start + em->len <= start)
4079 free_extent_map(em);
4080 else if (em->block_start == EXTENT_MAP_INLINE && page)
4081 free_extent_map(em);
4082 else
4083 goto out;
4085 em = alloc_extent_map(GFP_NOFS);
4086 if (!em) {
4087 err = -ENOMEM;
4088 goto out;
4090 em->bdev = root->fs_info->fs_devices->latest_bdev;
4091 em->start = EXTENT_MAP_HOLE;
4092 em->orig_start = EXTENT_MAP_HOLE;
4093 em->len = (u64)-1;
4094 em->block_len = (u64)-1;
4096 if (!path) {
4097 path = btrfs_alloc_path();
4098 BUG_ON(!path);
4101 ret = btrfs_lookup_file_extent(trans, root, path,
4102 objectid, start, trans != NULL);
4103 if (ret < 0) {
4104 err = ret;
4105 goto out;
4108 if (ret != 0) {
4109 if (path->slots[0] == 0)
4110 goto not_found;
4111 path->slots[0]--;
4114 leaf = path->nodes[0];
4115 item = btrfs_item_ptr(leaf, path->slots[0],
4116 struct btrfs_file_extent_item);
4117 /* are we inside the extent that was found? */
4118 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4119 found_type = btrfs_key_type(&found_key);
4120 if (found_key.objectid != objectid ||
4121 found_type != BTRFS_EXTENT_DATA_KEY) {
4122 goto not_found;
4125 found_type = btrfs_file_extent_type(leaf, item);
4126 extent_start = found_key.offset;
4127 compressed = btrfs_file_extent_compression(leaf, item);
4128 if (found_type == BTRFS_FILE_EXTENT_REG ||
4129 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4130 extent_end = extent_start +
4131 btrfs_file_extent_num_bytes(leaf, item);
4132 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4133 size_t size;
4134 size = btrfs_file_extent_inline_len(leaf, item);
4135 extent_end = (extent_start + size + root->sectorsize - 1) &
4136 ~((u64)root->sectorsize - 1);
4139 if (start >= extent_end) {
4140 path->slots[0]++;
4141 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4142 ret = btrfs_next_leaf(root, path);
4143 if (ret < 0) {
4144 err = ret;
4145 goto out;
4147 if (ret > 0)
4148 goto not_found;
4149 leaf = path->nodes[0];
4151 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4152 if (found_key.objectid != objectid ||
4153 found_key.type != BTRFS_EXTENT_DATA_KEY)
4154 goto not_found;
4155 if (start + len <= found_key.offset)
4156 goto not_found;
4157 em->start = start;
4158 em->len = found_key.offset - start;
4159 goto not_found_em;
4162 if (found_type == BTRFS_FILE_EXTENT_REG ||
4163 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4164 em->start = extent_start;
4165 em->len = extent_end - extent_start;
4166 em->orig_start = extent_start -
4167 btrfs_file_extent_offset(leaf, item);
4168 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4169 if (bytenr == 0) {
4170 em->block_start = EXTENT_MAP_HOLE;
4171 goto insert;
4173 if (compressed) {
4174 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4175 em->block_start = bytenr;
4176 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4177 item);
4178 } else {
4179 bytenr += btrfs_file_extent_offset(leaf, item);
4180 em->block_start = bytenr;
4181 em->block_len = em->len;
4182 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4183 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4185 goto insert;
4186 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4187 unsigned long ptr;
4188 char *map;
4189 size_t size;
4190 size_t extent_offset;
4191 size_t copy_size;
4193 em->block_start = EXTENT_MAP_INLINE;
4194 if (!page || create) {
4195 em->start = extent_start;
4196 em->len = extent_end - extent_start;
4197 goto out;
4200 size = btrfs_file_extent_inline_len(leaf, item);
4201 extent_offset = page_offset(page) + pg_offset - extent_start;
4202 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4203 size - extent_offset);
4204 em->start = extent_start + extent_offset;
4205 em->len = (copy_size + root->sectorsize - 1) &
4206 ~((u64)root->sectorsize - 1);
4207 em->orig_start = EXTENT_MAP_INLINE;
4208 if (compressed)
4209 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4210 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4211 if (create == 0 && !PageUptodate(page)) {
4212 if (btrfs_file_extent_compression(leaf, item) ==
4213 BTRFS_COMPRESS_ZLIB) {
4214 ret = uncompress_inline(path, inode, page,
4215 pg_offset,
4216 extent_offset, item);
4217 BUG_ON(ret);
4218 } else {
4219 map = kmap(page);
4220 read_extent_buffer(leaf, map + pg_offset, ptr,
4221 copy_size);
4222 kunmap(page);
4224 flush_dcache_page(page);
4225 } else if (create && PageUptodate(page)) {
4226 if (!trans) {
4227 kunmap(page);
4228 free_extent_map(em);
4229 em = NULL;
4230 btrfs_release_path(root, path);
4231 trans = btrfs_join_transaction(root, 1);
4232 goto again;
4234 map = kmap(page);
4235 write_extent_buffer(leaf, map + pg_offset, ptr,
4236 copy_size);
4237 kunmap(page);
4238 btrfs_mark_buffer_dirty(leaf);
4240 set_extent_uptodate(io_tree, em->start,
4241 extent_map_end(em) - 1, GFP_NOFS);
4242 goto insert;
4243 } else {
4244 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4245 WARN_ON(1);
4247 not_found:
4248 em->start = start;
4249 em->len = len;
4250 not_found_em:
4251 em->block_start = EXTENT_MAP_HOLE;
4252 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4253 insert:
4254 btrfs_release_path(root, path);
4255 if (em->start > start || extent_map_end(em) <= start) {
4256 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4257 "[%llu %llu]\n", (unsigned long long)em->start,
4258 (unsigned long long)em->len,
4259 (unsigned long long)start,
4260 (unsigned long long)len);
4261 err = -EIO;
4262 goto out;
4265 err = 0;
4266 spin_lock(&em_tree->lock);
4267 ret = add_extent_mapping(em_tree, em);
4268 /* it is possible that someone inserted the extent into the tree
4269 * while we had the lock dropped. It is also possible that
4270 * an overlapping map exists in the tree
4272 if (ret == -EEXIST) {
4273 struct extent_map *existing;
4275 ret = 0;
4277 existing = lookup_extent_mapping(em_tree, start, len);
4278 if (existing && (existing->start > start ||
4279 existing->start + existing->len <= start)) {
4280 free_extent_map(existing);
4281 existing = NULL;
4283 if (!existing) {
4284 existing = lookup_extent_mapping(em_tree, em->start,
4285 em->len);
4286 if (existing) {
4287 err = merge_extent_mapping(em_tree, existing,
4288 em, start,
4289 root->sectorsize);
4290 free_extent_map(existing);
4291 if (err) {
4292 free_extent_map(em);
4293 em = NULL;
4295 } else {
4296 err = -EIO;
4297 free_extent_map(em);
4298 em = NULL;
4300 } else {
4301 free_extent_map(em);
4302 em = existing;
4303 err = 0;
4306 spin_unlock(&em_tree->lock);
4307 out:
4308 if (path)
4309 btrfs_free_path(path);
4310 if (trans) {
4311 ret = btrfs_end_transaction(trans, root);
4312 if (!err)
4313 err = ret;
4315 if (err) {
4316 free_extent_map(em);
4317 return ERR_PTR(err);
4319 return em;
4322 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4323 const struct iovec *iov, loff_t offset,
4324 unsigned long nr_segs)
4326 return -EINVAL;
4329 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4330 __u64 start, __u64 len)
4332 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4335 int btrfs_readpage(struct file *file, struct page *page)
4337 struct extent_io_tree *tree;
4338 tree = &BTRFS_I(page->mapping->host)->io_tree;
4339 return extent_read_full_page(tree, page, btrfs_get_extent);
4342 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4344 struct extent_io_tree *tree;
4347 if (current->flags & PF_MEMALLOC) {
4348 redirty_page_for_writepage(wbc, page);
4349 unlock_page(page);
4350 return 0;
4352 tree = &BTRFS_I(page->mapping->host)->io_tree;
4353 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4356 int btrfs_writepages(struct address_space *mapping,
4357 struct writeback_control *wbc)
4359 struct extent_io_tree *tree;
4361 tree = &BTRFS_I(mapping->host)->io_tree;
4362 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4365 static int
4366 btrfs_readpages(struct file *file, struct address_space *mapping,
4367 struct list_head *pages, unsigned nr_pages)
4369 struct extent_io_tree *tree;
4370 tree = &BTRFS_I(mapping->host)->io_tree;
4371 return extent_readpages(tree, mapping, pages, nr_pages,
4372 btrfs_get_extent);
4374 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4376 struct extent_io_tree *tree;
4377 struct extent_map_tree *map;
4378 int ret;
4380 tree = &BTRFS_I(page->mapping->host)->io_tree;
4381 map = &BTRFS_I(page->mapping->host)->extent_tree;
4382 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4383 if (ret == 1) {
4384 ClearPagePrivate(page);
4385 set_page_private(page, 0);
4386 page_cache_release(page);
4388 return ret;
4391 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4393 if (PageWriteback(page) || PageDirty(page))
4394 return 0;
4395 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
4398 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4400 struct extent_io_tree *tree;
4401 struct btrfs_ordered_extent *ordered;
4402 u64 page_start = page_offset(page);
4403 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4405 wait_on_page_writeback(page);
4406 tree = &BTRFS_I(page->mapping->host)->io_tree;
4407 if (offset) {
4408 btrfs_releasepage(page, GFP_NOFS);
4409 return;
4412 lock_extent(tree, page_start, page_end, GFP_NOFS);
4413 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
4414 page_offset(page));
4415 if (ordered) {
4417 * IO on this page will never be started, so we need
4418 * to account for any ordered extents now
4420 clear_extent_bit(tree, page_start, page_end,
4421 EXTENT_DIRTY | EXTENT_DELALLOC |
4422 EXTENT_LOCKED, 1, 0, GFP_NOFS);
4423 btrfs_finish_ordered_io(page->mapping->host,
4424 page_start, page_end);
4425 btrfs_put_ordered_extent(ordered);
4426 lock_extent(tree, page_start, page_end, GFP_NOFS);
4428 clear_extent_bit(tree, page_start, page_end,
4429 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4430 EXTENT_ORDERED,
4431 1, 1, GFP_NOFS);
4432 __btrfs_releasepage(page, GFP_NOFS);
4434 ClearPageChecked(page);
4435 if (PagePrivate(page)) {
4436 ClearPagePrivate(page);
4437 set_page_private(page, 0);
4438 page_cache_release(page);
4443 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
4444 * called from a page fault handler when a page is first dirtied. Hence we must
4445 * be careful to check for EOF conditions here. We set the page up correctly
4446 * for a written page which means we get ENOSPC checking when writing into
4447 * holes and correct delalloc and unwritten extent mapping on filesystems that
4448 * support these features.
4450 * We are not allowed to take the i_mutex here so we have to play games to
4451 * protect against truncate races as the page could now be beyond EOF. Because
4452 * vmtruncate() writes the inode size before removing pages, once we have the
4453 * page lock we can determine safely if the page is beyond EOF. If it is not
4454 * beyond EOF, then the page is guaranteed safe against truncation until we
4455 * unlock the page.
4457 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4459 struct page *page = vmf->page;
4460 struct inode *inode = fdentry(vma->vm_file)->d_inode;
4461 struct btrfs_root *root = BTRFS_I(inode)->root;
4462 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4463 struct btrfs_ordered_extent *ordered;
4464 char *kaddr;
4465 unsigned long zero_start;
4466 loff_t size;
4467 int ret;
4468 u64 page_start;
4469 u64 page_end;
4471 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
4472 if (ret) {
4473 if (ret == -ENOMEM)
4474 ret = VM_FAULT_OOM;
4475 else /* -ENOSPC, -EIO, etc */
4476 ret = VM_FAULT_SIGBUS;
4477 goto out;
4480 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
4481 again:
4482 lock_page(page);
4483 size = i_size_read(inode);
4484 page_start = page_offset(page);
4485 page_end = page_start + PAGE_CACHE_SIZE - 1;
4487 if ((page->mapping != inode->i_mapping) ||
4488 (page_start >= size)) {
4489 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4490 /* page got truncated out from underneath us */
4491 goto out_unlock;
4493 wait_on_page_writeback(page);
4495 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
4496 set_page_extent_mapped(page);
4499 * we can't set the delalloc bits if there are pending ordered
4500 * extents. Drop our locks and wait for them to finish
4502 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4503 if (ordered) {
4504 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4505 unlock_page(page);
4506 btrfs_start_ordered_extent(inode, ordered, 1);
4507 btrfs_put_ordered_extent(ordered);
4508 goto again;
4511 btrfs_set_extent_delalloc(inode, page_start, page_end);
4512 ret = 0;
4514 /* page is wholly or partially inside EOF */
4515 if (page_start + PAGE_CACHE_SIZE > size)
4516 zero_start = size & ~PAGE_CACHE_MASK;
4517 else
4518 zero_start = PAGE_CACHE_SIZE;
4520 if (zero_start != PAGE_CACHE_SIZE) {
4521 kaddr = kmap(page);
4522 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
4523 flush_dcache_page(page);
4524 kunmap(page);
4526 ClearPageChecked(page);
4527 set_page_dirty(page);
4529 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
4530 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4532 out_unlock:
4533 unlock_page(page);
4534 out:
4535 return ret;
4538 static void btrfs_truncate(struct inode *inode)
4540 struct btrfs_root *root = BTRFS_I(inode)->root;
4541 int ret;
4542 struct btrfs_trans_handle *trans;
4543 unsigned long nr;
4544 u64 mask = root->sectorsize - 1;
4546 if (!S_ISREG(inode->i_mode))
4547 return;
4548 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4549 return;
4551 btrfs_truncate_page(inode->i_mapping, inode->i_size);
4552 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
4554 trans = btrfs_start_transaction(root, 1);
4557 * setattr is responsible for setting the ordered_data_close flag,
4558 * but that is only tested during the last file release. That
4559 * could happen well after the next commit, leaving a great big
4560 * window where new writes may get lost if someone chooses to write
4561 * to this file after truncating to zero
4563 * The inode doesn't have any dirty data here, and so if we commit
4564 * this is a noop. If someone immediately starts writing to the inode
4565 * it is very likely we'll catch some of their writes in this
4566 * transaction, and the commit will find this file on the ordered
4567 * data list with good things to send down.
4569 * This is a best effort solution, there is still a window where
4570 * using truncate to replace the contents of the file will
4571 * end up with a zero length file after a crash.
4573 if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
4574 btrfs_add_ordered_operation(trans, root, inode);
4576 btrfs_set_trans_block_group(trans, inode);
4577 btrfs_i_size_write(inode, inode->i_size);
4579 ret = btrfs_orphan_add(trans, inode);
4580 if (ret)
4581 goto out;
4582 /* FIXME, add redo link to tree so we don't leak on crash */
4583 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
4584 BTRFS_EXTENT_DATA_KEY);
4585 btrfs_update_inode(trans, root, inode);
4587 ret = btrfs_orphan_del(trans, inode);
4588 BUG_ON(ret);
4590 out:
4591 nr = trans->blocks_used;
4592 ret = btrfs_end_transaction_throttle(trans, root);
4593 BUG_ON(ret);
4594 btrfs_btree_balance_dirty(root, nr);
4598 * create a new subvolume directory/inode (helper for the ioctl).
4600 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
4601 struct btrfs_root *new_root, struct dentry *dentry,
4602 u64 new_dirid, u64 alloc_hint)
4604 struct inode *inode;
4605 int error;
4606 u64 index = 0;
4608 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
4609 new_dirid, alloc_hint, S_IFDIR | 0700, &index);
4610 if (IS_ERR(inode))
4611 return PTR_ERR(inode);
4612 inode->i_op = &btrfs_dir_inode_operations;
4613 inode->i_fop = &btrfs_dir_file_operations;
4615 inode->i_nlink = 1;
4616 btrfs_i_size_write(inode, 0);
4618 error = btrfs_update_inode(trans, new_root, inode);
4619 if (error)
4620 return error;
4622 d_instantiate(dentry, inode);
4623 return 0;
4626 /* helper function for file defrag and space balancing. This
4627 * forces readahead on a given range of bytes in an inode
4629 unsigned long btrfs_force_ra(struct address_space *mapping,
4630 struct file_ra_state *ra, struct file *file,
4631 pgoff_t offset, pgoff_t last_index)
4633 pgoff_t req_size = last_index - offset + 1;
4635 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
4636 return offset + req_size;
4639 struct inode *btrfs_alloc_inode(struct super_block *sb)
4641 struct btrfs_inode *ei;
4643 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
4644 if (!ei)
4645 return NULL;
4646 ei->last_trans = 0;
4647 ei->logged_trans = 0;
4648 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
4649 ei->i_acl = BTRFS_ACL_NOT_CACHED;
4650 ei->i_default_acl = BTRFS_ACL_NOT_CACHED;
4651 INIT_LIST_HEAD(&ei->i_orphan);
4652 INIT_LIST_HEAD(&ei->ordered_operations);
4653 return &ei->vfs_inode;
4656 void btrfs_destroy_inode(struct inode *inode)
4658 struct btrfs_ordered_extent *ordered;
4659 struct btrfs_root *root = BTRFS_I(inode)->root;
4661 WARN_ON(!list_empty(&inode->i_dentry));
4662 WARN_ON(inode->i_data.nrpages);
4664 if (BTRFS_I(inode)->i_acl &&
4665 BTRFS_I(inode)->i_acl != BTRFS_ACL_NOT_CACHED)
4666 posix_acl_release(BTRFS_I(inode)->i_acl);
4667 if (BTRFS_I(inode)->i_default_acl &&
4668 BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED)
4669 posix_acl_release(BTRFS_I(inode)->i_default_acl);
4672 * Make sure we're properly removed from the ordered operation
4673 * lists.
4675 smp_mb();
4676 if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
4677 spin_lock(&root->fs_info->ordered_extent_lock);
4678 list_del_init(&BTRFS_I(inode)->ordered_operations);
4679 spin_unlock(&root->fs_info->ordered_extent_lock);
4682 spin_lock(&root->list_lock);
4683 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
4684 printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
4685 " list\n", inode->i_ino);
4686 dump_stack();
4688 spin_unlock(&root->list_lock);
4690 while (1) {
4691 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
4692 if (!ordered)
4693 break;
4694 else {
4695 printk(KERN_ERR "btrfs found ordered "
4696 "extent %llu %llu on inode cleanup\n",
4697 (unsigned long long)ordered->file_offset,
4698 (unsigned long long)ordered->len);
4699 btrfs_remove_ordered_extent(inode, ordered);
4700 btrfs_put_ordered_extent(ordered);
4701 btrfs_put_ordered_extent(ordered);
4704 inode_tree_del(inode);
4705 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
4706 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
4709 static void init_once(void *foo)
4711 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
4713 inode_init_once(&ei->vfs_inode);
4716 void btrfs_destroy_cachep(void)
4718 if (btrfs_inode_cachep)
4719 kmem_cache_destroy(btrfs_inode_cachep);
4720 if (btrfs_trans_handle_cachep)
4721 kmem_cache_destroy(btrfs_trans_handle_cachep);
4722 if (btrfs_transaction_cachep)
4723 kmem_cache_destroy(btrfs_transaction_cachep);
4724 if (btrfs_path_cachep)
4725 kmem_cache_destroy(btrfs_path_cachep);
4728 int btrfs_init_cachep(void)
4730 btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
4731 sizeof(struct btrfs_inode), 0,
4732 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
4733 if (!btrfs_inode_cachep)
4734 goto fail;
4736 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
4737 sizeof(struct btrfs_trans_handle), 0,
4738 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
4739 if (!btrfs_trans_handle_cachep)
4740 goto fail;
4742 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
4743 sizeof(struct btrfs_transaction), 0,
4744 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
4745 if (!btrfs_transaction_cachep)
4746 goto fail;
4748 btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
4749 sizeof(struct btrfs_path), 0,
4750 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
4751 if (!btrfs_path_cachep)
4752 goto fail;
4754 return 0;
4755 fail:
4756 btrfs_destroy_cachep();
4757 return -ENOMEM;
4760 static int btrfs_getattr(struct vfsmount *mnt,
4761 struct dentry *dentry, struct kstat *stat)
4763 struct inode *inode = dentry->d_inode;
4764 generic_fillattr(inode, stat);
4765 stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
4766 stat->blksize = PAGE_CACHE_SIZE;
4767 stat->blocks = (inode_get_bytes(inode) +
4768 BTRFS_I(inode)->delalloc_bytes) >> 9;
4769 return 0;
4772 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
4773 struct inode *new_dir, struct dentry *new_dentry)
4775 struct btrfs_trans_handle *trans;
4776 struct btrfs_root *root = BTRFS_I(old_dir)->root;
4777 struct inode *new_inode = new_dentry->d_inode;
4778 struct inode *old_inode = old_dentry->d_inode;
4779 struct timespec ctime = CURRENT_TIME;
4780 u64 index = 0;
4781 int ret;
4783 /* we're not allowed to rename between subvolumes */
4784 if (BTRFS_I(old_inode)->root->root_key.objectid !=
4785 BTRFS_I(new_dir)->root->root_key.objectid)
4786 return -EXDEV;
4788 if (S_ISDIR(old_inode->i_mode) && new_inode &&
4789 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
4790 return -ENOTEMPTY;
4793 /* to rename a snapshot or subvolume, we need to juggle the
4794 * backrefs. This isn't coded yet
4796 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
4797 return -EXDEV;
4799 ret = btrfs_check_metadata_free_space(root);
4800 if (ret)
4801 goto out_unlock;
4804 * we're using rename to replace one file with another.
4805 * and the replacement file is large. Start IO on it now so
4806 * we don't add too much work to the end of the transaction
4808 if (new_inode && old_inode && S_ISREG(old_inode->i_mode) &&
4809 new_inode->i_size &&
4810 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
4811 filemap_flush(old_inode->i_mapping);
4813 trans = btrfs_start_transaction(root, 1);
4816 * make sure the inode gets flushed if it is replacing
4817 * something.
4819 if (new_inode && new_inode->i_size &&
4820 old_inode && S_ISREG(old_inode->i_mode)) {
4821 btrfs_add_ordered_operation(trans, root, old_inode);
4825 * this is an ugly little race, but the rename is required to make
4826 * sure that if we crash, the inode is either at the old name
4827 * or the new one. pinning the log transaction lets us make sure
4828 * we don't allow a log commit to come in after we unlink the
4829 * name but before we add the new name back in.
4831 btrfs_pin_log_trans(root);
4833 btrfs_set_trans_block_group(trans, new_dir);
4835 btrfs_inc_nlink(old_dentry->d_inode);
4836 old_dir->i_ctime = old_dir->i_mtime = ctime;
4837 new_dir->i_ctime = new_dir->i_mtime = ctime;
4838 old_inode->i_ctime = ctime;
4840 if (old_dentry->d_parent != new_dentry->d_parent)
4841 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
4843 ret = btrfs_unlink_inode(trans, root, old_dir, old_dentry->d_inode,
4844 old_dentry->d_name.name,
4845 old_dentry->d_name.len);
4846 if (ret)
4847 goto out_fail;
4849 if (new_inode) {
4850 new_inode->i_ctime = CURRENT_TIME;
4851 ret = btrfs_unlink_inode(trans, root, new_dir,
4852 new_dentry->d_inode,
4853 new_dentry->d_name.name,
4854 new_dentry->d_name.len);
4855 if (ret)
4856 goto out_fail;
4857 if (new_inode->i_nlink == 0) {
4858 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
4859 if (ret)
4860 goto out_fail;
4864 ret = btrfs_set_inode_index(new_dir, &index);
4865 if (ret)
4866 goto out_fail;
4868 ret = btrfs_add_link(trans, new_dentry->d_parent->d_inode,
4869 old_inode, new_dentry->d_name.name,
4870 new_dentry->d_name.len, 1, index);
4871 if (ret)
4872 goto out_fail;
4874 btrfs_log_new_name(trans, old_inode, old_dir,
4875 new_dentry->d_parent);
4876 out_fail:
4878 /* this btrfs_end_log_trans just allows the current
4879 * log-sub transaction to complete
4881 btrfs_end_log_trans(root);
4882 btrfs_end_transaction_throttle(trans, root);
4883 out_unlock:
4884 return ret;
4888 * some fairly slow code that needs optimization. This walks the list
4889 * of all the inodes with pending delalloc and forces them to disk.
4891 int btrfs_start_delalloc_inodes(struct btrfs_root *root)
4893 struct list_head *head = &root->fs_info->delalloc_inodes;
4894 struct btrfs_inode *binode;
4895 struct inode *inode;
4897 if (root->fs_info->sb->s_flags & MS_RDONLY)
4898 return -EROFS;
4900 spin_lock(&root->fs_info->delalloc_lock);
4901 while (!list_empty(head)) {
4902 binode = list_entry(head->next, struct btrfs_inode,
4903 delalloc_inodes);
4904 inode = igrab(&binode->vfs_inode);
4905 if (!inode)
4906 list_del_init(&binode->delalloc_inodes);
4907 spin_unlock(&root->fs_info->delalloc_lock);
4908 if (inode) {
4909 filemap_flush(inode->i_mapping);
4910 iput(inode);
4912 cond_resched();
4913 spin_lock(&root->fs_info->delalloc_lock);
4915 spin_unlock(&root->fs_info->delalloc_lock);
4917 /* the filemap_flush will queue IO into the worker threads, but
4918 * we have to make sure the IO is actually started and that
4919 * ordered extents get created before we return
4921 atomic_inc(&root->fs_info->async_submit_draining);
4922 while (atomic_read(&root->fs_info->nr_async_submits) ||
4923 atomic_read(&root->fs_info->async_delalloc_pages)) {
4924 wait_event(root->fs_info->async_submit_wait,
4925 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
4926 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
4928 atomic_dec(&root->fs_info->async_submit_draining);
4929 return 0;
4932 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4933 const char *symname)
4935 struct btrfs_trans_handle *trans;
4936 struct btrfs_root *root = BTRFS_I(dir)->root;
4937 struct btrfs_path *path;
4938 struct btrfs_key key;
4939 struct inode *inode = NULL;
4940 int err;
4941 int drop_inode = 0;
4942 u64 objectid;
4943 u64 index = 0 ;
4944 int name_len;
4945 int datasize;
4946 unsigned long ptr;
4947 struct btrfs_file_extent_item *ei;
4948 struct extent_buffer *leaf;
4949 unsigned long nr = 0;
4951 name_len = strlen(symname) + 1;
4952 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
4953 return -ENAMETOOLONG;
4955 err = btrfs_check_metadata_free_space(root);
4956 if (err)
4957 goto out_fail;
4959 trans = btrfs_start_transaction(root, 1);
4960 btrfs_set_trans_block_group(trans, dir);
4962 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4963 if (err) {
4964 err = -ENOSPC;
4965 goto out_unlock;
4968 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4969 dentry->d_name.len,
4970 dentry->d_parent->d_inode->i_ino, objectid,
4971 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
4972 &index);
4973 err = PTR_ERR(inode);
4974 if (IS_ERR(inode))
4975 goto out_unlock;
4977 err = btrfs_init_inode_security(inode, dir);
4978 if (err) {
4979 drop_inode = 1;
4980 goto out_unlock;
4983 btrfs_set_trans_block_group(trans, inode);
4984 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4985 if (err)
4986 drop_inode = 1;
4987 else {
4988 inode->i_mapping->a_ops = &btrfs_aops;
4989 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4990 inode->i_fop = &btrfs_file_operations;
4991 inode->i_op = &btrfs_file_inode_operations;
4992 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4994 dir->i_sb->s_dirt = 1;
4995 btrfs_update_inode_block_group(trans, inode);
4996 btrfs_update_inode_block_group(trans, dir);
4997 if (drop_inode)
4998 goto out_unlock;
5000 path = btrfs_alloc_path();
5001 BUG_ON(!path);
5002 key.objectid = inode->i_ino;
5003 key.offset = 0;
5004 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
5005 datasize = btrfs_file_extent_calc_inline_size(name_len);
5006 err = btrfs_insert_empty_item(trans, root, path, &key,
5007 datasize);
5008 if (err) {
5009 drop_inode = 1;
5010 goto out_unlock;
5012 leaf = path->nodes[0];
5013 ei = btrfs_item_ptr(leaf, path->slots[0],
5014 struct btrfs_file_extent_item);
5015 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
5016 btrfs_set_file_extent_type(leaf, ei,
5017 BTRFS_FILE_EXTENT_INLINE);
5018 btrfs_set_file_extent_encryption(leaf, ei, 0);
5019 btrfs_set_file_extent_compression(leaf, ei, 0);
5020 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
5021 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
5023 ptr = btrfs_file_extent_inline_start(ei);
5024 write_extent_buffer(leaf, symname, ptr, name_len);
5025 btrfs_mark_buffer_dirty(leaf);
5026 btrfs_free_path(path);
5028 inode->i_op = &btrfs_symlink_inode_operations;
5029 inode->i_mapping->a_ops = &btrfs_symlink_aops;
5030 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5031 inode_set_bytes(inode, name_len);
5032 btrfs_i_size_write(inode, name_len - 1);
5033 err = btrfs_update_inode(trans, root, inode);
5034 if (err)
5035 drop_inode = 1;
5037 out_unlock:
5038 nr = trans->blocks_used;
5039 btrfs_end_transaction_throttle(trans, root);
5040 out_fail:
5041 if (drop_inode) {
5042 inode_dec_link_count(inode);
5043 iput(inode);
5045 btrfs_btree_balance_dirty(root, nr);
5046 return err;
5049 static int prealloc_file_range(struct btrfs_trans_handle *trans,
5050 struct inode *inode, u64 start, u64 end,
5051 u64 locked_end, u64 alloc_hint, int mode)
5053 struct btrfs_root *root = BTRFS_I(inode)->root;
5054 struct btrfs_key ins;
5055 u64 alloc_size;
5056 u64 cur_offset = start;
5057 u64 num_bytes = end - start;
5058 int ret = 0;
5060 while (num_bytes > 0) {
5061 alloc_size = min(num_bytes, root->fs_info->max_extent);
5062 ret = btrfs_reserve_extent(trans, root, alloc_size,
5063 root->sectorsize, 0, alloc_hint,
5064 (u64)-1, &ins, 1);
5065 if (ret) {
5066 WARN_ON(1);
5067 goto out;
5069 ret = insert_reserved_file_extent(trans, inode,
5070 cur_offset, ins.objectid,
5071 ins.offset, ins.offset,
5072 ins.offset, locked_end,
5073 0, 0, 0,
5074 BTRFS_FILE_EXTENT_PREALLOC);
5075 BUG_ON(ret);
5076 num_bytes -= ins.offset;
5077 cur_offset += ins.offset;
5078 alloc_hint = ins.objectid + ins.offset;
5080 out:
5081 if (cur_offset > start) {
5082 inode->i_ctime = CURRENT_TIME;
5083 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
5084 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
5085 cur_offset > i_size_read(inode))
5086 btrfs_i_size_write(inode, cur_offset);
5087 ret = btrfs_update_inode(trans, root, inode);
5088 BUG_ON(ret);
5091 return ret;
5094 static long btrfs_fallocate(struct inode *inode, int mode,
5095 loff_t offset, loff_t len)
5097 u64 cur_offset;
5098 u64 last_byte;
5099 u64 alloc_start;
5100 u64 alloc_end;
5101 u64 alloc_hint = 0;
5102 u64 locked_end;
5103 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
5104 struct extent_map *em;
5105 struct btrfs_trans_handle *trans;
5106 int ret;
5108 alloc_start = offset & ~mask;
5109 alloc_end = (offset + len + mask) & ~mask;
5112 * wait for ordered IO before we have any locks. We'll loop again
5113 * below with the locks held.
5115 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
5117 mutex_lock(&inode->i_mutex);
5118 if (alloc_start > inode->i_size) {
5119 ret = btrfs_cont_expand(inode, alloc_start);
5120 if (ret)
5121 goto out;
5124 locked_end = alloc_end - 1;
5125 while (1) {
5126 struct btrfs_ordered_extent *ordered;
5128 trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
5129 if (!trans) {
5130 ret = -EIO;
5131 goto out;
5134 /* the extent lock is ordered inside the running
5135 * transaction
5137 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5138 GFP_NOFS);
5139 ordered = btrfs_lookup_first_ordered_extent(inode,
5140 alloc_end - 1);
5141 if (ordered &&
5142 ordered->file_offset + ordered->len > alloc_start &&
5143 ordered->file_offset < alloc_end) {
5144 btrfs_put_ordered_extent(ordered);
5145 unlock_extent(&BTRFS_I(inode)->io_tree,
5146 alloc_start, locked_end, GFP_NOFS);
5147 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
5150 * we can't wait on the range with the transaction
5151 * running or with the extent lock held
5153 btrfs_wait_ordered_range(inode, alloc_start,
5154 alloc_end - alloc_start);
5155 } else {
5156 if (ordered)
5157 btrfs_put_ordered_extent(ordered);
5158 break;
5162 cur_offset = alloc_start;
5163 while (1) {
5164 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
5165 alloc_end - cur_offset, 0);
5166 BUG_ON(IS_ERR(em) || !em);
5167 last_byte = min(extent_map_end(em), alloc_end);
5168 last_byte = (last_byte + mask) & ~mask;
5169 if (em->block_start == EXTENT_MAP_HOLE) {
5170 ret = prealloc_file_range(trans, inode, cur_offset,
5171 last_byte, locked_end + 1,
5172 alloc_hint, mode);
5173 if (ret < 0) {
5174 free_extent_map(em);
5175 break;
5178 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
5179 alloc_hint = em->block_start;
5180 free_extent_map(em);
5182 cur_offset = last_byte;
5183 if (cur_offset >= alloc_end) {
5184 ret = 0;
5185 break;
5188 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5189 GFP_NOFS);
5191 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
5192 out:
5193 mutex_unlock(&inode->i_mutex);
5194 return ret;
5197 static int btrfs_set_page_dirty(struct page *page)
5199 return __set_page_dirty_nobuffers(page);
5202 static int btrfs_permission(struct inode *inode, int mask)
5204 if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
5205 return -EACCES;
5206 return generic_permission(inode, mask, btrfs_check_acl);
5209 static struct inode_operations btrfs_dir_inode_operations = {
5210 .getattr = btrfs_getattr,
5211 .lookup = btrfs_lookup,
5212 .create = btrfs_create,
5213 .unlink = btrfs_unlink,
5214 .link = btrfs_link,
5215 .mkdir = btrfs_mkdir,
5216 .rmdir = btrfs_rmdir,
5217 .rename = btrfs_rename,
5218 .symlink = btrfs_symlink,
5219 .setattr = btrfs_setattr,
5220 .mknod = btrfs_mknod,
5221 .setxattr = btrfs_setxattr,
5222 .getxattr = btrfs_getxattr,
5223 .listxattr = btrfs_listxattr,
5224 .removexattr = btrfs_removexattr,
5225 .permission = btrfs_permission,
5227 static struct inode_operations btrfs_dir_ro_inode_operations = {
5228 .lookup = btrfs_lookup,
5229 .permission = btrfs_permission,
5231 static struct file_operations btrfs_dir_file_operations = {
5232 .llseek = generic_file_llseek,
5233 .read = generic_read_dir,
5234 .readdir = btrfs_real_readdir,
5235 .unlocked_ioctl = btrfs_ioctl,
5236 #ifdef CONFIG_COMPAT
5237 .compat_ioctl = btrfs_ioctl,
5238 #endif
5239 .release = btrfs_release_file,
5240 .fsync = btrfs_sync_file,
5243 static struct extent_io_ops btrfs_extent_io_ops = {
5244 .fill_delalloc = run_delalloc_range,
5245 .submit_bio_hook = btrfs_submit_bio_hook,
5246 .merge_bio_hook = btrfs_merge_bio_hook,
5247 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
5248 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
5249 .writepage_start_hook = btrfs_writepage_start_hook,
5250 .readpage_io_failed_hook = btrfs_io_failed_hook,
5251 .set_bit_hook = btrfs_set_bit_hook,
5252 .clear_bit_hook = btrfs_clear_bit_hook,
5256 * btrfs doesn't support the bmap operation because swapfiles
5257 * use bmap to make a mapping of extents in the file. They assume
5258 * these extents won't change over the life of the file and they
5259 * use the bmap result to do IO directly to the drive.
5261 * the btrfs bmap call would return logical addresses that aren't
5262 * suitable for IO and they also will change frequently as COW
5263 * operations happen. So, swapfile + btrfs == corruption.
5265 * For now we're avoiding this by dropping bmap.
5267 static struct address_space_operations btrfs_aops = {
5268 .readpage = btrfs_readpage,
5269 .writepage = btrfs_writepage,
5270 .writepages = btrfs_writepages,
5271 .readpages = btrfs_readpages,
5272 .sync_page = block_sync_page,
5273 .direct_IO = btrfs_direct_IO,
5274 .invalidatepage = btrfs_invalidatepage,
5275 .releasepage = btrfs_releasepage,
5276 .set_page_dirty = btrfs_set_page_dirty,
5279 static struct address_space_operations btrfs_symlink_aops = {
5280 .readpage = btrfs_readpage,
5281 .writepage = btrfs_writepage,
5282 .invalidatepage = btrfs_invalidatepage,
5283 .releasepage = btrfs_releasepage,
5286 static struct inode_operations btrfs_file_inode_operations = {
5287 .truncate = btrfs_truncate,
5288 .getattr = btrfs_getattr,
5289 .setattr = btrfs_setattr,
5290 .setxattr = btrfs_setxattr,
5291 .getxattr = btrfs_getxattr,
5292 .listxattr = btrfs_listxattr,
5293 .removexattr = btrfs_removexattr,
5294 .permission = btrfs_permission,
5295 .fallocate = btrfs_fallocate,
5296 .fiemap = btrfs_fiemap,
5298 static struct inode_operations btrfs_special_inode_operations = {
5299 .getattr = btrfs_getattr,
5300 .setattr = btrfs_setattr,
5301 .permission = btrfs_permission,
5302 .setxattr = btrfs_setxattr,
5303 .getxattr = btrfs_getxattr,
5304 .listxattr = btrfs_listxattr,
5305 .removexattr = btrfs_removexattr,
5307 static struct inode_operations btrfs_symlink_inode_operations = {
5308 .readlink = generic_readlink,
5309 .follow_link = page_follow_link_light,
5310 .put_link = page_put_link,
5311 .permission = btrfs_permission,
5312 .setxattr = btrfs_setxattr,
5313 .getxattr = btrfs_getxattr,
5314 .listxattr = btrfs_listxattr,
5315 .removexattr = btrfs_removexattr,