Btrfs: Optimize compressed writeback and reads
[linux-2.6/kvm.git] / fs / btrfs / file.c
blob337221ecca27ea2e05d4d9351a5e1dda8463ff30
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/smp_lock.h>
26 #include <linux/backing-dev.h>
27 #include <linux/mpage.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/version.h>
33 #include "ctree.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "btrfs_inode.h"
37 #include "ioctl.h"
38 #include "print-tree.h"
39 #include "tree-log.h"
40 #include "locking.h"
41 #include "compat.h"
44 /* simple helper to fault in pages and copy. This should go away
45 * and be replaced with calls into generic code.
47 static int noinline btrfs_copy_from_user(loff_t pos, int num_pages,
48 int write_bytes,
49 struct page **prepared_pages,
50 const char __user * buf)
52 long page_fault = 0;
53 int i;
54 int offset = pos & (PAGE_CACHE_SIZE - 1);
56 for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
57 size_t count = min_t(size_t,
58 PAGE_CACHE_SIZE - offset, write_bytes);
59 struct page *page = prepared_pages[i];
60 fault_in_pages_readable(buf, count);
62 /* Copy data from userspace to the current page */
63 kmap(page);
64 page_fault = __copy_from_user(page_address(page) + offset,
65 buf, count);
66 /* Flush processor's dcache for this page */
67 flush_dcache_page(page);
68 kunmap(page);
69 buf += count;
70 write_bytes -= count;
72 if (page_fault)
73 break;
75 return page_fault ? -EFAULT : 0;
79 * unlocks pages after btrfs_file_write is done with them
81 static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages)
83 size_t i;
84 for (i = 0; i < num_pages; i++) {
85 if (!pages[i])
86 break;
87 /* page checked is some magic around finding pages that
88 * have been modified without going through btrfs_set_page_dirty
89 * clear it here
91 ClearPageChecked(pages[i]);
92 unlock_page(pages[i]);
93 mark_page_accessed(pages[i]);
94 page_cache_release(pages[i]);
99 * after copy_from_user, pages need to be dirtied and we need to make
100 * sure holes are created between the current EOF and the start of
101 * any next extents (if required).
103 * this also makes the decision about creating an inline extent vs
104 * doing real data extents, marking pages dirty and delalloc as required.
106 static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
107 struct btrfs_root *root,
108 struct file *file,
109 struct page **pages,
110 size_t num_pages,
111 loff_t pos,
112 size_t write_bytes)
114 int err = 0;
115 int i;
116 struct inode *inode = fdentry(file)->d_inode;
117 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
118 u64 hint_byte;
119 u64 num_bytes;
120 u64 start_pos;
121 u64 end_of_last_block;
122 u64 end_pos = pos + write_bytes;
123 loff_t isize = i_size_read(inode);
125 start_pos = pos & ~((u64)root->sectorsize - 1);
126 num_bytes = (write_bytes + pos - start_pos +
127 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
129 end_of_last_block = start_pos + num_bytes - 1;
131 lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
132 trans = btrfs_join_transaction(root, 1);
133 if (!trans) {
134 err = -ENOMEM;
135 goto out_unlock;
137 btrfs_set_trans_block_group(trans, inode);
138 hint_byte = 0;
140 if ((end_of_last_block & 4095) == 0) {
141 printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block);
143 set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
145 /* check for reserved extents on each page, we don't want
146 * to reset the delalloc bit on things that already have
147 * extents reserved.
149 btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
150 for (i = 0; i < num_pages; i++) {
151 struct page *p = pages[i];
152 SetPageUptodate(p);
153 ClearPageChecked(p);
154 set_page_dirty(p);
156 if (end_pos > isize) {
157 i_size_write(inode, end_pos);
158 btrfs_update_inode(trans, root, inode);
160 err = btrfs_end_transaction(trans, root);
161 out_unlock:
162 unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
163 return err;
167 * this drops all the extents in the cache that intersect the range
168 * [start, end]. Existing extents are split as required.
170 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
171 int skip_pinned)
173 struct extent_map *em;
174 struct extent_map *split = NULL;
175 struct extent_map *split2 = NULL;
176 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
177 u64 len = end - start + 1;
178 int ret;
179 int testend = 1;
180 unsigned long flags;
181 int compressed = 0;
183 WARN_ON(end < start);
184 if (end == (u64)-1) {
185 len = (u64)-1;
186 testend = 0;
188 while(1) {
189 if (!split)
190 split = alloc_extent_map(GFP_NOFS);
191 if (!split2)
192 split2 = alloc_extent_map(GFP_NOFS);
194 spin_lock(&em_tree->lock);
195 em = lookup_extent_mapping(em_tree, start, len);
196 if (!em) {
197 spin_unlock(&em_tree->lock);
198 break;
200 flags = em->flags;
201 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
202 spin_unlock(&em_tree->lock);
203 if (em->start <= start &&
204 (!testend || em->start + em->len >= start + len)) {
205 free_extent_map(em);
206 break;
208 if (start < em->start) {
209 len = em->start - start;
210 } else {
211 len = start + len - (em->start + em->len);
212 start = em->start + em->len;
214 free_extent_map(em);
215 continue;
217 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
218 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
219 remove_extent_mapping(em_tree, em);
221 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
222 em->start < start) {
223 split->start = em->start;
224 split->len = start - em->start;
225 split->block_start = em->block_start;
227 if (compressed)
228 split->block_len = em->block_len;
229 else
230 split->block_len = split->len;
232 split->bdev = em->bdev;
233 split->flags = flags;
234 ret = add_extent_mapping(em_tree, split);
235 BUG_ON(ret);
236 free_extent_map(split);
237 split = split2;
238 split2 = NULL;
240 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
241 testend && em->start + em->len > start + len) {
242 u64 diff = start + len - em->start;
244 split->start = start + len;
245 split->len = em->start + em->len - (start + len);
246 split->bdev = em->bdev;
247 split->flags = flags;
249 if (compressed) {
250 split->block_len = em->block_len;
251 split->block_start = em->block_start;
252 } else {
253 split->block_len = split->len;
254 split->block_start = em->block_start + diff;
257 ret = add_extent_mapping(em_tree, split);
258 BUG_ON(ret);
259 free_extent_map(split);
260 split = NULL;
262 spin_unlock(&em_tree->lock);
264 /* once for us */
265 free_extent_map(em);
266 /* once for the tree*/
267 free_extent_map(em);
269 if (split)
270 free_extent_map(split);
271 if (split2)
272 free_extent_map(split2);
273 return 0;
276 int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
278 return 0;
279 #if 0
280 struct btrfs_path *path;
281 struct btrfs_key found_key;
282 struct extent_buffer *leaf;
283 struct btrfs_file_extent_item *extent;
284 u64 last_offset = 0;
285 int nritems;
286 int slot;
287 int found_type;
288 int ret;
289 int err = 0;
290 u64 extent_end = 0;
292 path = btrfs_alloc_path();
293 ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino,
294 last_offset, 0);
295 while(1) {
296 nritems = btrfs_header_nritems(path->nodes[0]);
297 if (path->slots[0] >= nritems) {
298 ret = btrfs_next_leaf(root, path);
299 if (ret)
300 goto out;
301 nritems = btrfs_header_nritems(path->nodes[0]);
303 slot = path->slots[0];
304 leaf = path->nodes[0];
305 btrfs_item_key_to_cpu(leaf, &found_key, slot);
306 if (found_key.objectid != inode->i_ino)
307 break;
308 if (found_key.type != BTRFS_EXTENT_DATA_KEY)
309 goto out;
311 if (found_key.offset < last_offset) {
312 WARN_ON(1);
313 btrfs_print_leaf(root, leaf);
314 printk("inode %lu found offset %Lu expected %Lu\n",
315 inode->i_ino, found_key.offset, last_offset);
316 err = 1;
317 goto out;
319 extent = btrfs_item_ptr(leaf, slot,
320 struct btrfs_file_extent_item);
321 found_type = btrfs_file_extent_type(leaf, extent);
322 if (found_type == BTRFS_FILE_EXTENT_REG) {
323 extent_end = found_key.offset +
324 btrfs_file_extent_num_bytes(leaf, extent);
325 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
326 struct btrfs_item *item;
327 item = btrfs_item_nr(leaf, slot);
328 extent_end = found_key.offset +
329 btrfs_file_extent_inline_len(leaf, extent);
330 extent_end = (extent_end + root->sectorsize - 1) &
331 ~((u64)root->sectorsize -1 );
333 last_offset = extent_end;
334 path->slots[0]++;
336 if (0 && last_offset < inode->i_size) {
337 WARN_ON(1);
338 btrfs_print_leaf(root, leaf);
339 printk("inode %lu found offset %Lu size %Lu\n", inode->i_ino,
340 last_offset, inode->i_size);
341 err = 1;
344 out:
345 btrfs_free_path(path);
346 return err;
347 #endif
351 * this is very complex, but the basic idea is to drop all extents
352 * in the range start - end. hint_block is filled in with a block number
353 * that would be a good hint to the block allocator for this file.
355 * If an extent intersects the range but is not entirely inside the range
356 * it is either truncated or split. Anything entirely inside the range
357 * is deleted from the tree.
359 * inline_limit is used to tell this code which offsets in the file to keep
360 * if they contain inline extents.
362 int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans,
363 struct btrfs_root *root, struct inode *inode,
364 u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
366 u64 extent_end = 0;
367 u64 locked_end = end;
368 u64 search_start = start;
369 u64 leaf_start;
370 u64 ram_bytes = 0;
371 u64 orig_parent = 0;
372 u64 disk_bytenr = 0;
373 u8 compression;
374 u8 encryption;
375 u16 other_encoding = 0;
376 u64 root_gen;
377 u64 root_owner;
378 struct extent_buffer *leaf;
379 struct btrfs_file_extent_item *extent;
380 struct btrfs_path *path;
381 struct btrfs_key key;
382 struct btrfs_file_extent_item old;
383 int keep;
384 int slot;
385 int bookend;
386 int found_type = 0;
387 int found_extent;
388 int found_inline;
389 int recow;
390 int ret;
392 inline_limit = 0;
393 btrfs_drop_extent_cache(inode, start, end - 1, 0);
395 path = btrfs_alloc_path();
396 if (!path)
397 return -ENOMEM;
398 while(1) {
399 recow = 0;
400 btrfs_release_path(root, path);
401 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
402 search_start, -1);
403 if (ret < 0)
404 goto out;
405 if (ret > 0) {
406 if (path->slots[0] == 0) {
407 ret = 0;
408 goto out;
410 path->slots[0]--;
412 next_slot:
413 keep = 0;
414 bookend = 0;
415 found_extent = 0;
416 found_inline = 0;
417 leaf_start = 0;
418 root_gen = 0;
419 root_owner = 0;
420 compression = 0;
421 encryption = 0;
422 extent = NULL;
423 leaf = path->nodes[0];
424 slot = path->slots[0];
425 ret = 0;
426 btrfs_item_key_to_cpu(leaf, &key, slot);
427 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
428 key.offset >= end) {
429 goto out;
431 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
432 key.objectid != inode->i_ino) {
433 goto out;
435 if (recow) {
436 search_start = key.offset;
437 continue;
439 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
440 extent = btrfs_item_ptr(leaf, slot,
441 struct btrfs_file_extent_item);
442 found_type = btrfs_file_extent_type(leaf, extent);
443 compression = btrfs_file_extent_compression(leaf,
444 extent);
445 encryption = btrfs_file_extent_encryption(leaf,
446 extent);
447 other_encoding = btrfs_file_extent_other_encoding(leaf,
448 extent);
449 if (found_type == BTRFS_FILE_EXTENT_REG ||
450 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
451 extent_end =
452 btrfs_file_extent_disk_bytenr(leaf,
453 extent);
454 if (extent_end)
455 *hint_byte = extent_end;
457 extent_end = key.offset +
458 btrfs_file_extent_num_bytes(leaf, extent);
459 ram_bytes = btrfs_file_extent_ram_bytes(leaf,
460 extent);
461 found_extent = 1;
462 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
463 found_inline = 1;
464 extent_end = key.offset +
465 btrfs_file_extent_inline_len(leaf, extent);
467 } else {
468 extent_end = search_start;
471 /* we found nothing we can drop */
472 if ((!found_extent && !found_inline) ||
473 search_start >= extent_end) {
474 int nextret;
475 u32 nritems;
476 nritems = btrfs_header_nritems(leaf);
477 if (slot >= nritems - 1) {
478 nextret = btrfs_next_leaf(root, path);
479 if (nextret)
480 goto out;
481 recow = 1;
482 } else {
483 path->slots[0]++;
485 goto next_slot;
488 if (end <= extent_end && start >= key.offset && found_inline)
489 *hint_byte = EXTENT_MAP_INLINE;
491 if (found_extent) {
492 read_extent_buffer(leaf, &old, (unsigned long)extent,
493 sizeof(old));
494 root_gen = btrfs_header_generation(leaf);
495 root_owner = btrfs_header_owner(leaf);
496 leaf_start = leaf->start;
499 if (end < extent_end && end >= key.offset) {
500 bookend = 1;
501 if (found_inline && start <= key.offset)
502 keep = 1;
505 if (bookend && found_extent) {
506 if (locked_end < extent_end) {
507 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
508 locked_end, extent_end - 1,
509 GFP_NOFS);
510 if (!ret) {
511 btrfs_release_path(root, path);
512 lock_extent(&BTRFS_I(inode)->io_tree,
513 locked_end, extent_end - 1,
514 GFP_NOFS);
515 locked_end = extent_end;
516 continue;
518 locked_end = extent_end;
520 orig_parent = path->nodes[0]->start;
521 disk_bytenr = le64_to_cpu(old.disk_bytenr);
522 if (disk_bytenr != 0) {
523 ret = btrfs_inc_extent_ref(trans, root,
524 disk_bytenr,
525 le64_to_cpu(old.disk_num_bytes),
526 orig_parent, root->root_key.objectid,
527 trans->transid, inode->i_ino);
528 BUG_ON(ret);
532 if (found_inline) {
533 u64 mask = root->sectorsize - 1;
534 search_start = (extent_end + mask) & ~mask;
535 } else
536 search_start = extent_end;
538 /* truncate existing extent */
539 if (start > key.offset) {
540 u64 new_num;
541 u64 old_num;
542 keep = 1;
543 WARN_ON(start & (root->sectorsize - 1));
544 if (found_extent) {
545 new_num = start - key.offset;
546 old_num = btrfs_file_extent_num_bytes(leaf,
547 extent);
548 *hint_byte =
549 btrfs_file_extent_disk_bytenr(leaf,
550 extent);
551 if (btrfs_file_extent_disk_bytenr(leaf,
552 extent)) {
553 inode_sub_bytes(inode, old_num -
554 new_num);
556 if (!compression && !encryption) {
557 btrfs_set_file_extent_ram_bytes(leaf,
558 extent, new_num);
560 btrfs_set_file_extent_num_bytes(leaf,
561 extent, new_num);
562 btrfs_mark_buffer_dirty(leaf);
563 } else if (key.offset < inline_limit &&
564 (end > extent_end) &&
565 (inline_limit < extent_end)) {
566 u32 new_size;
567 new_size = btrfs_file_extent_calc_inline_size(
568 inline_limit - key.offset);
569 inode_sub_bytes(inode, extent_end -
570 inline_limit);
571 btrfs_set_file_extent_ram_bytes(leaf, extent,
572 new_size);
573 if (!compression && !encryption) {
574 btrfs_truncate_item(trans, root, path,
575 new_size, 1);
579 /* delete the entire extent */
580 if (!keep) {
581 if (found_inline)
582 inode_sub_bytes(inode, extent_end -
583 key.offset);
584 ret = btrfs_del_item(trans, root, path);
585 /* TODO update progress marker and return */
586 BUG_ON(ret);
587 extent = NULL;
588 btrfs_release_path(root, path);
589 /* the extent will be freed later */
591 if (bookend && found_inline && start <= key.offset) {
592 u32 new_size;
593 new_size = btrfs_file_extent_calc_inline_size(
594 extent_end - end);
595 inode_sub_bytes(inode, end - key.offset);
596 btrfs_set_file_extent_ram_bytes(leaf, extent,
597 new_size);
598 if (!compression && !encryption)
599 ret = btrfs_truncate_item(trans, root, path,
600 new_size, 0);
601 BUG_ON(ret);
603 /* create bookend, splitting the extent in two */
604 if (bookend && found_extent) {
605 struct btrfs_key ins;
606 ins.objectid = inode->i_ino;
607 ins.offset = end;
608 btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
610 btrfs_release_path(root, path);
611 ret = btrfs_insert_empty_item(trans, root, path, &ins,
612 sizeof(*extent));
613 BUG_ON(ret);
615 leaf = path->nodes[0];
616 extent = btrfs_item_ptr(leaf, path->slots[0],
617 struct btrfs_file_extent_item);
618 write_extent_buffer(leaf, &old,
619 (unsigned long)extent, sizeof(old));
621 btrfs_set_file_extent_compression(leaf, extent,
622 compression);
623 btrfs_set_file_extent_encryption(leaf, extent,
624 encryption);
625 btrfs_set_file_extent_other_encoding(leaf, extent,
626 other_encoding);
627 btrfs_set_file_extent_offset(leaf, extent,
628 le64_to_cpu(old.offset) + end - key.offset);
629 WARN_ON(le64_to_cpu(old.num_bytes) <
630 (extent_end - end));
631 btrfs_set_file_extent_num_bytes(leaf, extent,
632 extent_end - end);
635 * set the ram bytes to the size of the full extent
636 * before splitting. This is a worst case flag,
637 * but its the best we can do because we don't know
638 * how splitting affects compression
640 btrfs_set_file_extent_ram_bytes(leaf, extent,
641 ram_bytes);
642 btrfs_set_file_extent_type(leaf, extent, found_type);
644 btrfs_mark_buffer_dirty(path->nodes[0]);
646 if (disk_bytenr != 0) {
647 ret = btrfs_update_extent_ref(trans, root,
648 disk_bytenr, orig_parent,
649 leaf->start,
650 root->root_key.objectid,
651 trans->transid, ins.objectid);
653 BUG_ON(ret);
655 btrfs_release_path(root, path);
656 if (disk_bytenr != 0) {
657 inode_add_bytes(inode, extent_end - end);
661 if (found_extent && !keep) {
662 u64 disk_bytenr = le64_to_cpu(old.disk_bytenr);
664 if (disk_bytenr != 0) {
665 inode_sub_bytes(inode,
666 le64_to_cpu(old.num_bytes));
667 ret = btrfs_free_extent(trans, root,
668 disk_bytenr,
669 le64_to_cpu(old.disk_num_bytes),
670 leaf_start, root_owner,
671 root_gen, key.objectid, 0);
672 BUG_ON(ret);
673 *hint_byte = disk_bytenr;
677 if (search_start >= end) {
678 ret = 0;
679 goto out;
682 out:
683 btrfs_free_path(path);
684 if (locked_end > end) {
685 unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
686 GFP_NOFS);
688 btrfs_check_file(root, inode);
689 return ret;
692 static int extent_mergeable(struct extent_buffer *leaf, int slot,
693 u64 objectid, u64 bytenr, u64 *start, u64 *end)
695 struct btrfs_file_extent_item *fi;
696 struct btrfs_key key;
697 u64 extent_end;
699 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
700 return 0;
702 btrfs_item_key_to_cpu(leaf, &key, slot);
703 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
704 return 0;
706 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
707 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
708 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
709 btrfs_file_extent_compression(leaf, fi) ||
710 btrfs_file_extent_encryption(leaf, fi) ||
711 btrfs_file_extent_other_encoding(leaf, fi))
712 return 0;
714 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
715 if ((*start && *start != key.offset) || (*end && *end != extent_end))
716 return 0;
718 *start = key.offset;
719 *end = extent_end;
720 return 1;
724 * Mark extent in the range start - end as written.
726 * This changes extent type from 'pre-allocated' to 'regular'. If only
727 * part of extent is marked as written, the extent will be split into
728 * two or three.
730 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
731 struct btrfs_root *root,
732 struct inode *inode, u64 start, u64 end)
734 struct extent_buffer *leaf;
735 struct btrfs_path *path;
736 struct btrfs_file_extent_item *fi;
737 struct btrfs_key key;
738 u64 bytenr;
739 u64 num_bytes;
740 u64 extent_end;
741 u64 extent_offset;
742 u64 other_start;
743 u64 other_end;
744 u64 split = start;
745 u64 locked_end = end;
746 int extent_type;
747 int split_end = 1;
748 int ret;
750 btrfs_drop_extent_cache(inode, start, end - 1, 0);
752 path = btrfs_alloc_path();
753 BUG_ON(!path);
754 again:
755 key.objectid = inode->i_ino;
756 key.type = BTRFS_EXTENT_DATA_KEY;
757 if (split == start)
758 key.offset = split;
759 else
760 key.offset = split - 1;
762 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
763 if (ret > 0 && path->slots[0] > 0)
764 path->slots[0]--;
766 leaf = path->nodes[0];
767 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
768 BUG_ON(key.objectid != inode->i_ino ||
769 key.type != BTRFS_EXTENT_DATA_KEY);
770 fi = btrfs_item_ptr(leaf, path->slots[0],
771 struct btrfs_file_extent_item);
772 extent_type = btrfs_file_extent_type(leaf, fi);
773 BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC);
774 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
775 BUG_ON(key.offset > start || extent_end < end);
777 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
778 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
779 extent_offset = btrfs_file_extent_offset(leaf, fi);
781 if (key.offset == start)
782 split = end;
784 if (key.offset == start && extent_end == end) {
785 int del_nr = 0;
786 int del_slot = 0;
787 u64 leaf_owner = btrfs_header_owner(leaf);
788 u64 leaf_gen = btrfs_header_generation(leaf);
789 other_start = end;
790 other_end = 0;
791 if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
792 bytenr, &other_start, &other_end)) {
793 extent_end = other_end;
794 del_slot = path->slots[0] + 1;
795 del_nr++;
796 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
797 leaf->start, leaf_owner,
798 leaf_gen, inode->i_ino, 0);
799 BUG_ON(ret);
801 other_start = 0;
802 other_end = start;
803 if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
804 bytenr, &other_start, &other_end)) {
805 key.offset = other_start;
806 del_slot = path->slots[0];
807 del_nr++;
808 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
809 leaf->start, leaf_owner,
810 leaf_gen, inode->i_ino, 0);
811 BUG_ON(ret);
813 split_end = 0;
814 if (del_nr == 0) {
815 btrfs_set_file_extent_type(leaf, fi,
816 BTRFS_FILE_EXTENT_REG);
817 goto done;
820 fi = btrfs_item_ptr(leaf, del_slot - 1,
821 struct btrfs_file_extent_item);
822 btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
823 btrfs_set_file_extent_num_bytes(leaf, fi,
824 extent_end - key.offset);
825 btrfs_mark_buffer_dirty(leaf);
827 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
828 BUG_ON(ret);
829 goto done;
830 } else if (split == start) {
831 if (locked_end < extent_end) {
832 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
833 locked_end, extent_end - 1, GFP_NOFS);
834 if (!ret) {
835 btrfs_release_path(root, path);
836 lock_extent(&BTRFS_I(inode)->io_tree,
837 locked_end, extent_end - 1, GFP_NOFS);
838 locked_end = extent_end;
839 goto again;
841 locked_end = extent_end;
843 btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset);
844 extent_offset += split - key.offset;
845 } else {
846 BUG_ON(key.offset != start);
847 btrfs_set_file_extent_offset(leaf, fi, extent_offset +
848 split - key.offset);
849 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split);
850 key.offset = split;
851 btrfs_set_item_key_safe(trans, root, path, &key);
852 extent_end = split;
855 if (extent_end == end) {
856 split_end = 0;
857 extent_type = BTRFS_FILE_EXTENT_REG;
859 if (extent_end == end && split == start) {
860 other_start = end;
861 other_end = 0;
862 if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
863 bytenr, &other_start, &other_end)) {
864 path->slots[0]++;
865 fi = btrfs_item_ptr(leaf, path->slots[0],
866 struct btrfs_file_extent_item);
867 key.offset = split;
868 btrfs_set_item_key_safe(trans, root, path, &key);
869 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
870 btrfs_set_file_extent_num_bytes(leaf, fi,
871 other_end - split);
872 goto done;
875 if (extent_end == end && split == end) {
876 other_start = 0;
877 other_end = start;
878 if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino,
879 bytenr, &other_start, &other_end)) {
880 path->slots[0]--;
881 fi = btrfs_item_ptr(leaf, path->slots[0],
882 struct btrfs_file_extent_item);
883 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end -
884 other_start);
885 goto done;
889 btrfs_mark_buffer_dirty(leaf);
890 btrfs_release_path(root, path);
892 key.offset = start;
893 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi));
894 BUG_ON(ret);
896 leaf = path->nodes[0];
897 fi = btrfs_item_ptr(leaf, path->slots[0],
898 struct btrfs_file_extent_item);
899 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
900 btrfs_set_file_extent_type(leaf, fi, extent_type);
901 btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr);
902 btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes);
903 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
904 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset);
905 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
906 btrfs_set_file_extent_compression(leaf, fi, 0);
907 btrfs_set_file_extent_encryption(leaf, fi, 0);
908 btrfs_set_file_extent_other_encoding(leaf, fi, 0);
910 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
911 leaf->start, root->root_key.objectid,
912 trans->transid, inode->i_ino);
913 BUG_ON(ret);
914 done:
915 btrfs_mark_buffer_dirty(leaf);
916 btrfs_release_path(root, path);
917 if (split_end && split == start) {
918 split = end;
919 goto again;
921 if (locked_end > end) {
922 unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
923 GFP_NOFS);
925 btrfs_free_path(path);
926 return 0;
930 * this gets pages into the page cache and locks them down, it also properly
931 * waits for data=ordered extents to finish before allowing the pages to be
932 * modified.
934 static int noinline prepare_pages(struct btrfs_root *root, struct file *file,
935 struct page **pages, size_t num_pages,
936 loff_t pos, unsigned long first_index,
937 unsigned long last_index, size_t write_bytes)
939 int i;
940 unsigned long index = pos >> PAGE_CACHE_SHIFT;
941 struct inode *inode = fdentry(file)->d_inode;
942 int err = 0;
943 u64 start_pos;
944 u64 last_pos;
946 start_pos = pos & ~((u64)root->sectorsize - 1);
947 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
949 if (start_pos > inode->i_size) {
950 err = btrfs_cont_expand(inode, start_pos);
951 if (err)
952 return err;
955 memset(pages, 0, num_pages * sizeof(struct page *));
956 again:
957 for (i = 0; i < num_pages; i++) {
958 pages[i] = grab_cache_page(inode->i_mapping, index + i);
959 if (!pages[i]) {
960 err = -ENOMEM;
961 BUG_ON(1);
963 wait_on_page_writeback(pages[i]);
965 if (start_pos < inode->i_size) {
966 struct btrfs_ordered_extent *ordered;
967 lock_extent(&BTRFS_I(inode)->io_tree,
968 start_pos, last_pos - 1, GFP_NOFS);
969 ordered = btrfs_lookup_first_ordered_extent(inode, last_pos -1);
970 if (ordered &&
971 ordered->file_offset + ordered->len > start_pos &&
972 ordered->file_offset < last_pos) {
973 btrfs_put_ordered_extent(ordered);
974 unlock_extent(&BTRFS_I(inode)->io_tree,
975 start_pos, last_pos - 1, GFP_NOFS);
976 for (i = 0; i < num_pages; i++) {
977 unlock_page(pages[i]);
978 page_cache_release(pages[i]);
980 btrfs_wait_ordered_range(inode, start_pos,
981 last_pos - start_pos);
982 goto again;
984 if (ordered)
985 btrfs_put_ordered_extent(ordered);
987 clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
988 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
989 GFP_NOFS);
990 unlock_extent(&BTRFS_I(inode)->io_tree,
991 start_pos, last_pos - 1, GFP_NOFS);
993 for (i = 0; i < num_pages; i++) {
994 clear_page_dirty_for_io(pages[i]);
995 set_page_extent_mapped(pages[i]);
996 WARN_ON(!PageLocked(pages[i]));
998 return 0;
1001 static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
1002 size_t count, loff_t *ppos)
1004 loff_t pos;
1005 loff_t start_pos;
1006 ssize_t num_written = 0;
1007 ssize_t err = 0;
1008 int ret = 0;
1009 struct inode *inode = fdentry(file)->d_inode;
1010 struct btrfs_root *root = BTRFS_I(inode)->root;
1011 struct page **pages = NULL;
1012 int nrptrs;
1013 struct page *pinned[2];
1014 unsigned long first_index;
1015 unsigned long last_index;
1016 int will_write;
1018 will_write = ((file->f_flags & O_SYNC) || IS_SYNC(inode) ||
1019 (file->f_flags & O_DIRECT));
1021 nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
1022 PAGE_CACHE_SIZE / (sizeof(struct page *)));
1023 pinned[0] = NULL;
1024 pinned[1] = NULL;
1026 pos = *ppos;
1027 start_pos = pos;
1029 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1030 current->backing_dev_info = inode->i_mapping->backing_dev_info;
1031 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1032 if (err)
1033 goto out_nolock;
1034 if (count == 0)
1035 goto out_nolock;
1037 err = file_remove_suid(file);
1038 if (err)
1039 goto out_nolock;
1040 file_update_time(file);
1042 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1044 mutex_lock(&inode->i_mutex);
1045 first_index = pos >> PAGE_CACHE_SHIFT;
1046 last_index = (pos + count) >> PAGE_CACHE_SHIFT;
1049 * if this is a nodatasum mount, force summing off for the inode
1050 * all the time. That way a later mount with summing on won't
1051 * get confused
1053 if (btrfs_test_opt(root, NODATASUM))
1054 btrfs_set_flag(inode, NODATASUM);
1057 * there are lots of better ways to do this, but this code
1058 * makes sure the first and last page in the file range are
1059 * up to date and ready for cow
1061 if ((pos & (PAGE_CACHE_SIZE - 1))) {
1062 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
1063 if (!PageUptodate(pinned[0])) {
1064 ret = btrfs_readpage(NULL, pinned[0]);
1065 BUG_ON(ret);
1066 wait_on_page_locked(pinned[0]);
1067 } else {
1068 unlock_page(pinned[0]);
1071 if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
1072 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
1073 if (!PageUptodate(pinned[1])) {
1074 ret = btrfs_readpage(NULL, pinned[1]);
1075 BUG_ON(ret);
1076 wait_on_page_locked(pinned[1]);
1077 } else {
1078 unlock_page(pinned[1]);
1082 while(count > 0) {
1083 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1084 size_t write_bytes = min(count, nrptrs *
1085 (size_t)PAGE_CACHE_SIZE -
1086 offset);
1087 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
1088 PAGE_CACHE_SHIFT;
1090 WARN_ON(num_pages > nrptrs);
1091 memset(pages, 0, sizeof(pages));
1093 ret = btrfs_check_free_space(root, write_bytes, 0);
1094 if (ret)
1095 goto out;
1097 ret = prepare_pages(root, file, pages, num_pages,
1098 pos, first_index, last_index,
1099 write_bytes);
1100 if (ret)
1101 goto out;
1103 ret = btrfs_copy_from_user(pos, num_pages,
1104 write_bytes, pages, buf);
1105 if (ret) {
1106 btrfs_drop_pages(pages, num_pages);
1107 goto out;
1110 ret = dirty_and_release_pages(NULL, root, file, pages,
1111 num_pages, pos, write_bytes);
1112 btrfs_drop_pages(pages, num_pages);
1113 if (ret)
1114 goto out;
1116 if (will_write) {
1117 btrfs_fdatawrite_range(inode->i_mapping, pos,
1118 pos + write_bytes - 1,
1119 WB_SYNC_NONE);
1120 } else {
1121 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1122 num_pages);
1123 if (num_pages <
1124 (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1125 btrfs_btree_balance_dirty(root, 1);
1126 btrfs_throttle(root);
1129 buf += write_bytes;
1130 count -= write_bytes;
1131 pos += write_bytes;
1132 num_written += write_bytes;
1134 cond_resched();
1136 out:
1137 mutex_unlock(&inode->i_mutex);
1139 out_nolock:
1140 kfree(pages);
1141 if (pinned[0])
1142 page_cache_release(pinned[0]);
1143 if (pinned[1])
1144 page_cache_release(pinned[1]);
1145 *ppos = pos;
1147 if (num_written > 0 && will_write) {
1148 struct btrfs_trans_handle *trans;
1150 err = btrfs_wait_ordered_range(inode, start_pos, num_written);
1151 if (err)
1152 num_written = err;
1154 if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
1155 trans = btrfs_start_transaction(root, 1);
1156 ret = btrfs_log_dentry_safe(trans, root,
1157 file->f_dentry);
1158 if (ret == 0) {
1159 btrfs_sync_log(trans, root);
1160 btrfs_end_transaction(trans, root);
1161 } else {
1162 btrfs_commit_transaction(trans, root);
1165 if (file->f_flags & O_DIRECT) {
1166 invalidate_mapping_pages(inode->i_mapping,
1167 start_pos >> PAGE_CACHE_SHIFT,
1168 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
1171 current->backing_dev_info = NULL;
1172 return num_written ? num_written : err;
1175 int btrfs_release_file(struct inode * inode, struct file * filp)
1177 if (filp->private_data)
1178 btrfs_ioctl_trans_end(filp);
1179 return 0;
1183 * fsync call for both files and directories. This logs the inode into
1184 * the tree log instead of forcing full commits whenever possible.
1186 * It needs to call filemap_fdatawait so that all ordered extent updates are
1187 * in the metadata btree are up to date for copying to the log.
1189 * It drops the inode mutex before doing the tree log commit. This is an
1190 * important optimization for directories because holding the mutex prevents
1191 * new operations on the dir while we write to disk.
1193 int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1195 struct inode *inode = dentry->d_inode;
1196 struct btrfs_root *root = BTRFS_I(inode)->root;
1197 int ret = 0;
1198 struct btrfs_trans_handle *trans;
1201 * check the transaction that last modified this inode
1202 * and see if its already been committed
1204 if (!BTRFS_I(inode)->last_trans)
1205 goto out;
1207 mutex_lock(&root->fs_info->trans_mutex);
1208 if (BTRFS_I(inode)->last_trans <=
1209 root->fs_info->last_trans_committed) {
1210 BTRFS_I(inode)->last_trans = 0;
1211 mutex_unlock(&root->fs_info->trans_mutex);
1212 goto out;
1214 mutex_unlock(&root->fs_info->trans_mutex);
1216 root->fs_info->tree_log_batch++;
1217 filemap_fdatawait(inode->i_mapping);
1218 root->fs_info->tree_log_batch++;
1221 * ok we haven't committed the transaction yet, lets do a commit
1223 if (file->private_data)
1224 btrfs_ioctl_trans_end(file);
1226 trans = btrfs_start_transaction(root, 1);
1227 if (!trans) {
1228 ret = -ENOMEM;
1229 goto out;
1232 ret = btrfs_log_dentry_safe(trans, root, file->f_dentry);
1233 if (ret < 0) {
1234 goto out;
1237 /* we've logged all the items and now have a consistent
1238 * version of the file in the log. It is possible that
1239 * someone will come in and modify the file, but that's
1240 * fine because the log is consistent on disk, and we
1241 * have references to all of the file's extents
1243 * It is possible that someone will come in and log the
1244 * file again, but that will end up using the synchronization
1245 * inside btrfs_sync_log to keep things safe.
1247 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
1249 if (ret > 0) {
1250 ret = btrfs_commit_transaction(trans, root);
1251 } else {
1252 btrfs_sync_log(trans, root);
1253 ret = btrfs_end_transaction(trans, root);
1255 mutex_lock(&file->f_dentry->d_inode->i_mutex);
1256 out:
1257 return ret > 0 ? EIO : ret;
1260 static struct vm_operations_struct btrfs_file_vm_ops = {
1261 .fault = filemap_fault,
1262 .page_mkwrite = btrfs_page_mkwrite,
1265 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1267 vma->vm_ops = &btrfs_file_vm_ops;
1268 file_accessed(filp);
1269 return 0;
1272 struct file_operations btrfs_file_operations = {
1273 .llseek = generic_file_llseek,
1274 .read = do_sync_read,
1275 .aio_read = generic_file_aio_read,
1276 .splice_read = generic_file_splice_read,
1277 .write = btrfs_file_write,
1278 .mmap = btrfs_file_mmap,
1279 .open = generic_file_open,
1280 .release = btrfs_release_file,
1281 .fsync = btrfs_sync_file,
1282 .unlocked_ioctl = btrfs_ioctl,
1283 #ifdef CONFIG_COMPAT
1284 .compat_ioctl = btrfs_ioctl,
1285 #endif