2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/smp_lock.h>
26 #include <linux/backing-dev.h>
27 #include <linux/mpage.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
34 #include "transaction.h"
35 #include "btrfs_inode.h"
37 #include "print-tree.h"
43 /* simple helper to fault in pages and copy. This should go away
44 * and be replaced with calls into generic code.
46 static noinline
int btrfs_copy_from_user(loff_t pos
, int num_pages
,
48 struct page
**prepared_pages
,
49 const char __user
*buf
)
53 int offset
= pos
& (PAGE_CACHE_SIZE
- 1);
55 for (i
= 0; i
< num_pages
&& write_bytes
> 0; i
++, offset
= 0) {
56 size_t count
= min_t(size_t,
57 PAGE_CACHE_SIZE
- offset
, write_bytes
);
58 struct page
*page
= prepared_pages
[i
];
59 fault_in_pages_readable(buf
, count
);
61 /* Copy data from userspace to the current page */
63 page_fault
= __copy_from_user(page_address(page
) + offset
,
65 /* Flush processor's dcache for this page */
66 flush_dcache_page(page
);
74 return page_fault
? -EFAULT
: 0;
78 * unlocks pages after btrfs_file_write is done with them
80 static noinline
void btrfs_drop_pages(struct page
**pages
, size_t num_pages
)
83 for (i
= 0; i
< num_pages
; i
++) {
86 /* page checked is some magic around finding pages that
87 * have been modified without going through btrfs_set_page_dirty
90 ClearPageChecked(pages
[i
]);
91 unlock_page(pages
[i
]);
92 mark_page_accessed(pages
[i
]);
93 page_cache_release(pages
[i
]);
98 * after copy_from_user, pages need to be dirtied and we need to make
99 * sure holes are created between the current EOF and the start of
100 * any next extents (if required).
102 * this also makes the decision about creating an inline extent vs
103 * doing real data extents, marking pages dirty and delalloc as required.
105 static noinline
int dirty_and_release_pages(struct btrfs_trans_handle
*trans
,
106 struct btrfs_root
*root
,
115 struct inode
*inode
= fdentry(file
)->d_inode
;
116 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
120 u64 end_of_last_block
;
121 u64 end_pos
= pos
+ write_bytes
;
122 loff_t isize
= i_size_read(inode
);
124 start_pos
= pos
& ~((u64
)root
->sectorsize
- 1);
125 num_bytes
= (write_bytes
+ pos
- start_pos
+
126 root
->sectorsize
- 1) & ~((u64
)root
->sectorsize
- 1);
128 end_of_last_block
= start_pos
+ num_bytes
- 1;
130 lock_extent(io_tree
, start_pos
, end_of_last_block
, GFP_NOFS
);
131 trans
= btrfs_join_transaction(root
, 1);
136 btrfs_set_trans_block_group(trans
, inode
);
139 set_extent_uptodate(io_tree
, start_pos
, end_of_last_block
, GFP_NOFS
);
141 /* check for reserved extents on each page, we don't want
142 * to reset the delalloc bit on things that already have
145 btrfs_set_extent_delalloc(inode
, start_pos
, end_of_last_block
);
146 for (i
= 0; i
< num_pages
; i
++) {
147 struct page
*p
= pages
[i
];
152 if (end_pos
> isize
) {
153 i_size_write(inode
, end_pos
);
154 btrfs_update_inode(trans
, root
, inode
);
156 err
= btrfs_end_transaction(trans
, root
);
158 unlock_extent(io_tree
, start_pos
, end_of_last_block
, GFP_NOFS
);
163 * this drops all the extents in the cache that intersect the range
164 * [start, end]. Existing extents are split as required.
166 int btrfs_drop_extent_cache(struct inode
*inode
, u64 start
, u64 end
,
169 struct extent_map
*em
;
170 struct extent_map
*split
= NULL
;
171 struct extent_map
*split2
= NULL
;
172 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
173 u64 len
= end
- start
+ 1;
179 WARN_ON(end
< start
);
180 if (end
== (u64
)-1) {
186 split
= alloc_extent_map(GFP_NOFS
);
188 split2
= alloc_extent_map(GFP_NOFS
);
190 spin_lock(&em_tree
->lock
);
191 em
= lookup_extent_mapping(em_tree
, start
, len
);
193 spin_unlock(&em_tree
->lock
);
197 if (skip_pinned
&& test_bit(EXTENT_FLAG_PINNED
, &em
->flags
)) {
198 spin_unlock(&em_tree
->lock
);
199 if (em
->start
<= start
&&
200 (!testend
|| em
->start
+ em
->len
>= start
+ len
)) {
204 if (start
< em
->start
) {
205 len
= em
->start
- start
;
207 len
= start
+ len
- (em
->start
+ em
->len
);
208 start
= em
->start
+ em
->len
;
213 compressed
= test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
214 clear_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
215 remove_extent_mapping(em_tree
, em
);
217 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
&&
219 split
->start
= em
->start
;
220 split
->len
= start
- em
->start
;
221 split
->orig_start
= em
->orig_start
;
222 split
->block_start
= em
->block_start
;
225 split
->block_len
= em
->block_len
;
227 split
->block_len
= split
->len
;
229 split
->bdev
= em
->bdev
;
230 split
->flags
= flags
;
231 ret
= add_extent_mapping(em_tree
, split
);
233 free_extent_map(split
);
237 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
&&
238 testend
&& em
->start
+ em
->len
> start
+ len
) {
239 u64 diff
= start
+ len
- em
->start
;
241 split
->start
= start
+ len
;
242 split
->len
= em
->start
+ em
->len
- (start
+ len
);
243 split
->bdev
= em
->bdev
;
244 split
->flags
= flags
;
247 split
->block_len
= em
->block_len
;
248 split
->block_start
= em
->block_start
;
249 split
->orig_start
= em
->orig_start
;
251 split
->block_len
= split
->len
;
252 split
->block_start
= em
->block_start
+ diff
;
253 split
->orig_start
= split
->start
;
256 ret
= add_extent_mapping(em_tree
, split
);
258 free_extent_map(split
);
261 spin_unlock(&em_tree
->lock
);
265 /* once for the tree*/
269 free_extent_map(split
);
271 free_extent_map(split2
);
276 * this is very complex, but the basic idea is to drop all extents
277 * in the range start - end. hint_block is filled in with a block number
278 * that would be a good hint to the block allocator for this file.
280 * If an extent intersects the range but is not entirely inside the range
281 * it is either truncated or split. Anything entirely inside the range
282 * is deleted from the tree.
284 * inline_limit is used to tell this code which offsets in the file to keep
285 * if they contain inline extents.
287 noinline
int btrfs_drop_extents(struct btrfs_trans_handle
*trans
,
288 struct btrfs_root
*root
, struct inode
*inode
,
289 u64 start
, u64 end
, u64 locked_end
,
290 u64 inline_limit
, u64
*hint_byte
)
293 u64 search_start
= start
;
298 u64 orig_locked_end
= locked_end
;
301 u16 other_encoding
= 0;
304 struct extent_buffer
*leaf
;
305 struct btrfs_file_extent_item
*extent
;
306 struct btrfs_path
*path
;
307 struct btrfs_key key
;
308 struct btrfs_file_extent_item old
;
319 btrfs_drop_extent_cache(inode
, start
, end
- 1, 0);
321 path
= btrfs_alloc_path();
326 btrfs_release_path(root
, path
);
327 ret
= btrfs_lookup_file_extent(trans
, root
, path
, inode
->i_ino
,
332 if (path
->slots
[0] == 0) {
349 leaf
= path
->nodes
[0];
350 slot
= path
->slots
[0];
352 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
353 if (btrfs_key_type(&key
) == BTRFS_EXTENT_DATA_KEY
&&
357 if (btrfs_key_type(&key
) > BTRFS_EXTENT_DATA_KEY
||
358 key
.objectid
!= inode
->i_ino
) {
362 search_start
= max(key
.offset
, start
);
365 if (btrfs_key_type(&key
) == BTRFS_EXTENT_DATA_KEY
) {
366 extent
= btrfs_item_ptr(leaf
, slot
,
367 struct btrfs_file_extent_item
);
368 found_type
= btrfs_file_extent_type(leaf
, extent
);
369 compression
= btrfs_file_extent_compression(leaf
,
371 encryption
= btrfs_file_extent_encryption(leaf
,
373 other_encoding
= btrfs_file_extent_other_encoding(leaf
,
375 if (found_type
== BTRFS_FILE_EXTENT_REG
||
376 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
378 btrfs_file_extent_disk_bytenr(leaf
,
381 *hint_byte
= extent_end
;
383 extent_end
= key
.offset
+
384 btrfs_file_extent_num_bytes(leaf
, extent
);
385 ram_bytes
= btrfs_file_extent_ram_bytes(leaf
,
388 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
390 extent_end
= key
.offset
+
391 btrfs_file_extent_inline_len(leaf
, extent
);
394 extent_end
= search_start
;
397 /* we found nothing we can drop */
398 if ((!found_extent
&& !found_inline
) ||
399 search_start
>= extent_end
) {
402 nritems
= btrfs_header_nritems(leaf
);
403 if (slot
>= nritems
- 1) {
404 nextret
= btrfs_next_leaf(root
, path
);
414 if (end
<= extent_end
&& start
>= key
.offset
&& found_inline
)
415 *hint_byte
= EXTENT_MAP_INLINE
;
418 read_extent_buffer(leaf
, &old
, (unsigned long)extent
,
420 root_gen
= btrfs_header_generation(leaf
);
421 root_owner
= btrfs_header_owner(leaf
);
422 leaf_start
= leaf
->start
;
425 if (end
< extent_end
&& end
>= key
.offset
) {
427 if (found_inline
&& start
<= key
.offset
)
431 if (bookend
&& found_extent
) {
432 if (locked_end
< extent_end
) {
433 ret
= try_lock_extent(&BTRFS_I(inode
)->io_tree
,
434 locked_end
, extent_end
- 1,
437 btrfs_release_path(root
, path
);
438 lock_extent(&BTRFS_I(inode
)->io_tree
,
439 locked_end
, extent_end
- 1,
441 locked_end
= extent_end
;
444 locked_end
= extent_end
;
446 orig_parent
= path
->nodes
[0]->start
;
447 disk_bytenr
= le64_to_cpu(old
.disk_bytenr
);
448 if (disk_bytenr
!= 0) {
449 ret
= btrfs_inc_extent_ref(trans
, root
,
451 le64_to_cpu(old
.disk_num_bytes
),
452 orig_parent
, root
->root_key
.objectid
,
453 trans
->transid
, inode
->i_ino
);
459 u64 mask
= root
->sectorsize
- 1;
460 search_start
= (extent_end
+ mask
) & ~mask
;
462 search_start
= extent_end
;
464 /* truncate existing extent */
465 if (start
> key
.offset
) {
469 WARN_ON(start
& (root
->sectorsize
- 1));
471 new_num
= start
- key
.offset
;
472 old_num
= btrfs_file_extent_num_bytes(leaf
,
475 btrfs_file_extent_disk_bytenr(leaf
,
477 if (btrfs_file_extent_disk_bytenr(leaf
,
479 inode_sub_bytes(inode
, old_num
-
482 btrfs_set_file_extent_num_bytes(leaf
,
484 btrfs_mark_buffer_dirty(leaf
);
485 } else if (key
.offset
< inline_limit
&&
486 (end
> extent_end
) &&
487 (inline_limit
< extent_end
)) {
489 new_size
= btrfs_file_extent_calc_inline_size(
490 inline_limit
- key
.offset
);
491 inode_sub_bytes(inode
, extent_end
-
493 btrfs_set_file_extent_ram_bytes(leaf
, extent
,
495 if (!compression
&& !encryption
) {
496 btrfs_truncate_item(trans
, root
, path
,
501 /* delete the entire extent */
504 inode_sub_bytes(inode
, extent_end
-
506 ret
= btrfs_del_item(trans
, root
, path
);
507 /* TODO update progress marker and return */
510 btrfs_release_path(root
, path
);
511 /* the extent will be freed later */
513 if (bookend
&& found_inline
&& start
<= key
.offset
) {
515 new_size
= btrfs_file_extent_calc_inline_size(
517 inode_sub_bytes(inode
, end
- key
.offset
);
518 btrfs_set_file_extent_ram_bytes(leaf
, extent
,
520 if (!compression
&& !encryption
)
521 ret
= btrfs_truncate_item(trans
, root
, path
,
525 /* create bookend, splitting the extent in two */
526 if (bookend
&& found_extent
) {
527 struct btrfs_key ins
;
528 ins
.objectid
= inode
->i_ino
;
530 btrfs_set_key_type(&ins
, BTRFS_EXTENT_DATA_KEY
);
532 btrfs_release_path(root
, path
);
533 path
->leave_spinning
= 1;
534 ret
= btrfs_insert_empty_item(trans
, root
, path
, &ins
,
538 leaf
= path
->nodes
[0];
539 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
540 struct btrfs_file_extent_item
);
541 write_extent_buffer(leaf
, &old
,
542 (unsigned long)extent
, sizeof(old
));
544 btrfs_set_file_extent_compression(leaf
, extent
,
546 btrfs_set_file_extent_encryption(leaf
, extent
,
548 btrfs_set_file_extent_other_encoding(leaf
, extent
,
550 btrfs_set_file_extent_offset(leaf
, extent
,
551 le64_to_cpu(old
.offset
) + end
- key
.offset
);
552 WARN_ON(le64_to_cpu(old
.num_bytes
) <
554 btrfs_set_file_extent_num_bytes(leaf
, extent
,
558 * set the ram bytes to the size of the full extent
559 * before splitting. This is a worst case flag,
560 * but its the best we can do because we don't know
561 * how splitting affects compression
563 btrfs_set_file_extent_ram_bytes(leaf
, extent
,
565 btrfs_set_file_extent_type(leaf
, extent
, found_type
);
567 btrfs_unlock_up_safe(path
, 1);
568 btrfs_mark_buffer_dirty(path
->nodes
[0]);
569 btrfs_set_lock_blocking(path
->nodes
[0]);
571 if (disk_bytenr
!= 0) {
572 ret
= btrfs_update_extent_ref(trans
, root
,
574 le64_to_cpu(old
.disk_num_bytes
),
577 root
->root_key
.objectid
,
578 trans
->transid
, ins
.objectid
);
582 path
->leave_spinning
= 0;
583 btrfs_release_path(root
, path
);
584 if (disk_bytenr
!= 0)
585 inode_add_bytes(inode
, extent_end
- end
);
588 if (found_extent
&& !keep
) {
589 u64 old_disk_bytenr
= le64_to_cpu(old
.disk_bytenr
);
591 if (old_disk_bytenr
!= 0) {
592 inode_sub_bytes(inode
,
593 le64_to_cpu(old
.num_bytes
));
594 ret
= btrfs_free_extent(trans
, root
,
596 le64_to_cpu(old
.disk_num_bytes
),
597 leaf_start
, root_owner
,
598 root_gen
, key
.objectid
, 0);
600 *hint_byte
= old_disk_bytenr
;
604 if (search_start
>= end
) {
610 btrfs_free_path(path
);
611 if (locked_end
> orig_locked_end
) {
612 unlock_extent(&BTRFS_I(inode
)->io_tree
, orig_locked_end
,
613 locked_end
- 1, GFP_NOFS
);
618 static int extent_mergeable(struct extent_buffer
*leaf
, int slot
,
619 u64 objectid
, u64 bytenr
, u64
*start
, u64
*end
)
621 struct btrfs_file_extent_item
*fi
;
622 struct btrfs_key key
;
625 if (slot
< 0 || slot
>= btrfs_header_nritems(leaf
))
628 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
629 if (key
.objectid
!= objectid
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
632 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
633 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_REG
||
634 btrfs_file_extent_disk_bytenr(leaf
, fi
) != bytenr
||
635 btrfs_file_extent_compression(leaf
, fi
) ||
636 btrfs_file_extent_encryption(leaf
, fi
) ||
637 btrfs_file_extent_other_encoding(leaf
, fi
))
640 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
641 if ((*start
&& *start
!= key
.offset
) || (*end
&& *end
!= extent_end
))
650 * Mark extent in the range start - end as written.
652 * This changes extent type from 'pre-allocated' to 'regular'. If only
653 * part of extent is marked as written, the extent will be split into
656 int btrfs_mark_extent_written(struct btrfs_trans_handle
*trans
,
657 struct btrfs_root
*root
,
658 struct inode
*inode
, u64 start
, u64 end
)
660 struct extent_buffer
*leaf
;
661 struct btrfs_path
*path
;
662 struct btrfs_file_extent_item
*fi
;
663 struct btrfs_key key
;
671 u64 locked_end
= end
;
677 btrfs_drop_extent_cache(inode
, start
, end
- 1, 0);
679 path
= btrfs_alloc_path();
682 key
.objectid
= inode
->i_ino
;
683 key
.type
= BTRFS_EXTENT_DATA_KEY
;
687 key
.offset
= split
- 1;
689 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
690 if (ret
> 0 && path
->slots
[0] > 0)
693 leaf
= path
->nodes
[0];
694 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
695 BUG_ON(key
.objectid
!= inode
->i_ino
||
696 key
.type
!= BTRFS_EXTENT_DATA_KEY
);
697 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
698 struct btrfs_file_extent_item
);
699 extent_type
= btrfs_file_extent_type(leaf
, fi
);
700 BUG_ON(extent_type
!= BTRFS_FILE_EXTENT_PREALLOC
);
701 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
702 BUG_ON(key
.offset
> start
|| extent_end
< end
);
704 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
705 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
706 extent_offset
= btrfs_file_extent_offset(leaf
, fi
);
708 if (key
.offset
== start
)
711 if (key
.offset
== start
&& extent_end
== end
) {
714 u64 leaf_owner
= btrfs_header_owner(leaf
);
715 u64 leaf_gen
= btrfs_header_generation(leaf
);
718 if (extent_mergeable(leaf
, path
->slots
[0] + 1, inode
->i_ino
,
719 bytenr
, &other_start
, &other_end
)) {
720 extent_end
= other_end
;
721 del_slot
= path
->slots
[0] + 1;
723 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
724 leaf
->start
, leaf_owner
,
725 leaf_gen
, inode
->i_ino
, 0);
730 if (extent_mergeable(leaf
, path
->slots
[0] - 1, inode
->i_ino
,
731 bytenr
, &other_start
, &other_end
)) {
732 key
.offset
= other_start
;
733 del_slot
= path
->slots
[0];
735 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
736 leaf
->start
, leaf_owner
,
737 leaf_gen
, inode
->i_ino
, 0);
742 btrfs_set_file_extent_type(leaf
, fi
,
743 BTRFS_FILE_EXTENT_REG
);
747 fi
= btrfs_item_ptr(leaf
, del_slot
- 1,
748 struct btrfs_file_extent_item
);
749 btrfs_set_file_extent_type(leaf
, fi
, BTRFS_FILE_EXTENT_REG
);
750 btrfs_set_file_extent_num_bytes(leaf
, fi
,
751 extent_end
- key
.offset
);
752 btrfs_mark_buffer_dirty(leaf
);
754 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
757 } else if (split
== start
) {
758 if (locked_end
< extent_end
) {
759 ret
= try_lock_extent(&BTRFS_I(inode
)->io_tree
,
760 locked_end
, extent_end
- 1, GFP_NOFS
);
762 btrfs_release_path(root
, path
);
763 lock_extent(&BTRFS_I(inode
)->io_tree
,
764 locked_end
, extent_end
- 1, GFP_NOFS
);
765 locked_end
= extent_end
;
768 locked_end
= extent_end
;
770 btrfs_set_file_extent_num_bytes(leaf
, fi
, split
- key
.offset
);
771 extent_offset
+= split
- key
.offset
;
773 BUG_ON(key
.offset
!= start
);
774 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
+
776 btrfs_set_file_extent_num_bytes(leaf
, fi
, extent_end
- split
);
778 btrfs_set_item_key_safe(trans
, root
, path
, &key
);
782 if (extent_end
== end
) {
784 extent_type
= BTRFS_FILE_EXTENT_REG
;
786 if (extent_end
== end
&& split
== start
) {
789 if (extent_mergeable(leaf
, path
->slots
[0] + 1, inode
->i_ino
,
790 bytenr
, &other_start
, &other_end
)) {
792 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
793 struct btrfs_file_extent_item
);
795 btrfs_set_item_key_safe(trans
, root
, path
, &key
);
796 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
797 btrfs_set_file_extent_num_bytes(leaf
, fi
,
802 if (extent_end
== end
&& split
== end
) {
805 if (extent_mergeable(leaf
, path
->slots
[0] - 1 , inode
->i_ino
,
806 bytenr
, &other_start
, &other_end
)) {
808 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
809 struct btrfs_file_extent_item
);
810 btrfs_set_file_extent_num_bytes(leaf
, fi
, extent_end
-
816 btrfs_mark_buffer_dirty(leaf
);
818 orig_parent
= leaf
->start
;
819 ret
= btrfs_inc_extent_ref(trans
, root
, bytenr
, num_bytes
,
820 orig_parent
, root
->root_key
.objectid
,
821 trans
->transid
, inode
->i_ino
);
823 btrfs_release_path(root
, path
);
826 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, sizeof(*fi
));
829 leaf
= path
->nodes
[0];
830 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
831 struct btrfs_file_extent_item
);
832 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
833 btrfs_set_file_extent_type(leaf
, fi
, extent_type
);
834 btrfs_set_file_extent_disk_bytenr(leaf
, fi
, bytenr
);
835 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
, num_bytes
);
836 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
837 btrfs_set_file_extent_num_bytes(leaf
, fi
, extent_end
- key
.offset
);
838 btrfs_set_file_extent_ram_bytes(leaf
, fi
, num_bytes
);
839 btrfs_set_file_extent_compression(leaf
, fi
, 0);
840 btrfs_set_file_extent_encryption(leaf
, fi
, 0);
841 btrfs_set_file_extent_other_encoding(leaf
, fi
, 0);
843 if (orig_parent
!= leaf
->start
) {
844 ret
= btrfs_update_extent_ref(trans
, root
, bytenr
, num_bytes
,
845 orig_parent
, leaf
->start
,
846 root
->root_key
.objectid
,
847 trans
->transid
, inode
->i_ino
);
851 btrfs_mark_buffer_dirty(leaf
);
854 btrfs_release_path(root
, path
);
855 if (split_end
&& split
== start
) {
859 if (locked_end
> end
) {
860 unlock_extent(&BTRFS_I(inode
)->io_tree
, end
, locked_end
- 1,
863 btrfs_free_path(path
);
868 * this gets pages into the page cache and locks them down, it also properly
869 * waits for data=ordered extents to finish before allowing the pages to be
872 static noinline
int prepare_pages(struct btrfs_root
*root
, struct file
*file
,
873 struct page
**pages
, size_t num_pages
,
874 loff_t pos
, unsigned long first_index
,
875 unsigned long last_index
, size_t write_bytes
)
878 unsigned long index
= pos
>> PAGE_CACHE_SHIFT
;
879 struct inode
*inode
= fdentry(file
)->d_inode
;
884 start_pos
= pos
& ~((u64
)root
->sectorsize
- 1);
885 last_pos
= ((u64
)index
+ num_pages
) << PAGE_CACHE_SHIFT
;
887 if (start_pos
> inode
->i_size
) {
888 err
= btrfs_cont_expand(inode
, start_pos
);
893 memset(pages
, 0, num_pages
* sizeof(struct page
*));
895 for (i
= 0; i
< num_pages
; i
++) {
896 pages
[i
] = grab_cache_page(inode
->i_mapping
, index
+ i
);
901 wait_on_page_writeback(pages
[i
]);
903 if (start_pos
< inode
->i_size
) {
904 struct btrfs_ordered_extent
*ordered
;
905 lock_extent(&BTRFS_I(inode
)->io_tree
,
906 start_pos
, last_pos
- 1, GFP_NOFS
);
907 ordered
= btrfs_lookup_first_ordered_extent(inode
,
910 ordered
->file_offset
+ ordered
->len
> start_pos
&&
911 ordered
->file_offset
< last_pos
) {
912 btrfs_put_ordered_extent(ordered
);
913 unlock_extent(&BTRFS_I(inode
)->io_tree
,
914 start_pos
, last_pos
- 1, GFP_NOFS
);
915 for (i
= 0; i
< num_pages
; i
++) {
916 unlock_page(pages
[i
]);
917 page_cache_release(pages
[i
]);
919 btrfs_wait_ordered_range(inode
, start_pos
,
920 last_pos
- start_pos
);
924 btrfs_put_ordered_extent(ordered
);
926 clear_extent_bits(&BTRFS_I(inode
)->io_tree
, start_pos
,
927 last_pos
- 1, EXTENT_DIRTY
| EXTENT_DELALLOC
,
929 unlock_extent(&BTRFS_I(inode
)->io_tree
,
930 start_pos
, last_pos
- 1, GFP_NOFS
);
932 for (i
= 0; i
< num_pages
; i
++) {
933 clear_page_dirty_for_io(pages
[i
]);
934 set_page_extent_mapped(pages
[i
]);
935 WARN_ON(!PageLocked(pages
[i
]));
940 static ssize_t
btrfs_file_write(struct file
*file
, const char __user
*buf
,
941 size_t count
, loff_t
*ppos
)
945 ssize_t num_written
= 0;
948 struct inode
*inode
= fdentry(file
)->d_inode
;
949 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
950 struct page
**pages
= NULL
;
952 struct page
*pinned
[2];
953 unsigned long first_index
;
954 unsigned long last_index
;
957 will_write
= ((file
->f_flags
& O_SYNC
) || IS_SYNC(inode
) ||
958 (file
->f_flags
& O_DIRECT
));
960 nrptrs
= min((count
+ PAGE_CACHE_SIZE
- 1) / PAGE_CACHE_SIZE
,
961 PAGE_CACHE_SIZE
/ (sizeof(struct page
*)));
968 vfs_check_frozen(inode
->i_sb
, SB_FREEZE_WRITE
);
969 current
->backing_dev_info
= inode
->i_mapping
->backing_dev_info
;
970 err
= generic_write_checks(file
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
976 err
= file_remove_suid(file
);
979 file_update_time(file
);
981 pages
= kmalloc(nrptrs
* sizeof(struct page
*), GFP_KERNEL
);
983 mutex_lock(&inode
->i_mutex
);
984 BTRFS_I(inode
)->sequence
++;
985 first_index
= pos
>> PAGE_CACHE_SHIFT
;
986 last_index
= (pos
+ count
) >> PAGE_CACHE_SHIFT
;
989 * there are lots of better ways to do this, but this code
990 * makes sure the first and last page in the file range are
991 * up to date and ready for cow
993 if ((pos
& (PAGE_CACHE_SIZE
- 1))) {
994 pinned
[0] = grab_cache_page(inode
->i_mapping
, first_index
);
995 if (!PageUptodate(pinned
[0])) {
996 ret
= btrfs_readpage(NULL
, pinned
[0]);
998 wait_on_page_locked(pinned
[0]);
1000 unlock_page(pinned
[0]);
1003 if ((pos
+ count
) & (PAGE_CACHE_SIZE
- 1)) {
1004 pinned
[1] = grab_cache_page(inode
->i_mapping
, last_index
);
1005 if (!PageUptodate(pinned
[1])) {
1006 ret
= btrfs_readpage(NULL
, pinned
[1]);
1008 wait_on_page_locked(pinned
[1]);
1010 unlock_page(pinned
[1]);
1015 size_t offset
= pos
& (PAGE_CACHE_SIZE
- 1);
1016 size_t write_bytes
= min(count
, nrptrs
*
1017 (size_t)PAGE_CACHE_SIZE
-
1019 size_t num_pages
= (write_bytes
+ PAGE_CACHE_SIZE
- 1) >>
1022 WARN_ON(num_pages
> nrptrs
);
1023 memset(pages
, 0, sizeof(struct page
*) * nrptrs
);
1025 ret
= btrfs_check_data_free_space(root
, inode
, write_bytes
);
1029 ret
= prepare_pages(root
, file
, pages
, num_pages
,
1030 pos
, first_index
, last_index
,
1033 btrfs_free_reserved_data_space(root
, inode
,
1038 ret
= btrfs_copy_from_user(pos
, num_pages
,
1039 write_bytes
, pages
, buf
);
1041 btrfs_free_reserved_data_space(root
, inode
,
1043 btrfs_drop_pages(pages
, num_pages
);
1047 ret
= dirty_and_release_pages(NULL
, root
, file
, pages
,
1048 num_pages
, pos
, write_bytes
);
1049 btrfs_drop_pages(pages
, num_pages
);
1051 btrfs_free_reserved_data_space(root
, inode
,
1057 btrfs_fdatawrite_range(inode
->i_mapping
, pos
,
1058 pos
+ write_bytes
- 1,
1061 balance_dirty_pages_ratelimited_nr(inode
->i_mapping
,
1064 (root
->leafsize
>> PAGE_CACHE_SHIFT
) + 1)
1065 btrfs_btree_balance_dirty(root
, 1);
1066 btrfs_throttle(root
);
1070 count
-= write_bytes
;
1072 num_written
+= write_bytes
;
1077 mutex_unlock(&inode
->i_mutex
);
1084 page_cache_release(pinned
[0]);
1086 page_cache_release(pinned
[1]);
1090 * we want to make sure fsync finds this change
1091 * but we haven't joined a transaction running right now.
1093 * Later on, someone is sure to update the inode and get the
1094 * real transid recorded.
1096 * We set last_trans now to the fs_info generation + 1,
1097 * this will either be one more than the running transaction
1098 * or the generation used for the next transaction if there isn't
1099 * one running right now.
1101 BTRFS_I(inode
)->last_trans
= root
->fs_info
->generation
+ 1;
1103 if (num_written
> 0 && will_write
) {
1104 struct btrfs_trans_handle
*trans
;
1106 err
= btrfs_wait_ordered_range(inode
, start_pos
, num_written
);
1110 if ((file
->f_flags
& O_SYNC
) || IS_SYNC(inode
)) {
1111 trans
= btrfs_start_transaction(root
, 1);
1112 ret
= btrfs_log_dentry_safe(trans
, root
,
1115 ret
= btrfs_sync_log(trans
, root
);
1117 btrfs_end_transaction(trans
, root
);
1119 btrfs_commit_transaction(trans
, root
);
1121 btrfs_commit_transaction(trans
, root
);
1124 if (file
->f_flags
& O_DIRECT
) {
1125 invalidate_mapping_pages(inode
->i_mapping
,
1126 start_pos
>> PAGE_CACHE_SHIFT
,
1127 (start_pos
+ num_written
- 1) >> PAGE_CACHE_SHIFT
);
1130 current
->backing_dev_info
= NULL
;
1131 return num_written
? num_written
: err
;
1134 int btrfs_release_file(struct inode
*inode
, struct file
*filp
)
1137 * ordered_data_close is set by settattr when we are about to truncate
1138 * a file from a non-zero size to a zero size. This tries to
1139 * flush down new bytes that may have been written if the
1140 * application were using truncate to replace a file in place.
1142 if (BTRFS_I(inode
)->ordered_data_close
) {
1143 BTRFS_I(inode
)->ordered_data_close
= 0;
1144 btrfs_add_ordered_operation(NULL
, BTRFS_I(inode
)->root
, inode
);
1145 if (inode
->i_size
> BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT
)
1146 filemap_flush(inode
->i_mapping
);
1148 if (filp
->private_data
)
1149 btrfs_ioctl_trans_end(filp
);
1154 * fsync call for both files and directories. This logs the inode into
1155 * the tree log instead of forcing full commits whenever possible.
1157 * It needs to call filemap_fdatawait so that all ordered extent updates are
1158 * in the metadata btree are up to date for copying to the log.
1160 * It drops the inode mutex before doing the tree log commit. This is an
1161 * important optimization for directories because holding the mutex prevents
1162 * new operations on the dir while we write to disk.
1164 int btrfs_sync_file(struct file
*file
, struct dentry
*dentry
, int datasync
)
1166 struct inode
*inode
= dentry
->d_inode
;
1167 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1169 struct btrfs_trans_handle
*trans
;
1172 * check the transaction that last modified this inode
1173 * and see if its already been committed
1175 if (!BTRFS_I(inode
)->last_trans
)
1178 mutex_lock(&root
->fs_info
->trans_mutex
);
1179 if (BTRFS_I(inode
)->last_trans
<=
1180 root
->fs_info
->last_trans_committed
) {
1181 BTRFS_I(inode
)->last_trans
= 0;
1182 mutex_unlock(&root
->fs_info
->trans_mutex
);
1185 mutex_unlock(&root
->fs_info
->trans_mutex
);
1188 filemap_fdatawrite(inode
->i_mapping
);
1189 btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
1193 * ok we haven't committed the transaction yet, lets do a commit
1195 if (file
&& file
->private_data
)
1196 btrfs_ioctl_trans_end(file
);
1198 trans
= btrfs_start_transaction(root
, 1);
1204 ret
= btrfs_log_dentry_safe(trans
, root
, dentry
);
1208 /* we've logged all the items and now have a consistent
1209 * version of the file in the log. It is possible that
1210 * someone will come in and modify the file, but that's
1211 * fine because the log is consistent on disk, and we
1212 * have references to all of the file's extents
1214 * It is possible that someone will come in and log the
1215 * file again, but that will end up using the synchronization
1216 * inside btrfs_sync_log to keep things safe.
1218 mutex_unlock(&dentry
->d_inode
->i_mutex
);
1221 ret
= btrfs_commit_transaction(trans
, root
);
1223 ret
= btrfs_sync_log(trans
, root
);
1225 ret
= btrfs_end_transaction(trans
, root
);
1227 ret
= btrfs_commit_transaction(trans
, root
);
1229 mutex_lock(&dentry
->d_inode
->i_mutex
);
1231 return ret
> 0 ? EIO
: ret
;
1234 static struct vm_operations_struct btrfs_file_vm_ops
= {
1235 .fault
= filemap_fault
,
1236 .page_mkwrite
= btrfs_page_mkwrite
,
1239 static int btrfs_file_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1241 vma
->vm_ops
= &btrfs_file_vm_ops
;
1242 file_accessed(filp
);
1246 struct file_operations btrfs_file_operations
= {
1247 .llseek
= generic_file_llseek
,
1248 .read
= do_sync_read
,
1249 .aio_read
= generic_file_aio_read
,
1250 .splice_read
= generic_file_splice_read
,
1251 .write
= btrfs_file_write
,
1252 .mmap
= btrfs_file_mmap
,
1253 .open
= generic_file_open
,
1254 .release
= btrfs_release_file
,
1255 .fsync
= btrfs_sync_file
,
1256 .unlocked_ioctl
= btrfs_ioctl
,
1257 #ifdef CONFIG_COMPAT
1258 .compat_ioctl
= btrfs_ioctl
,