2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
35 #include "transaction.h"
36 #include "btrfs_inode.h"
38 #include "print-tree.h"
44 /* simple helper to fault in pages and copy. This should go away
45 * and be replaced with calls into generic code.
47 static noinline
int btrfs_copy_from_user(loff_t pos
, int num_pages
,
49 struct page
**prepared_pages
,
54 int offset
= pos
& (PAGE_CACHE_SIZE
- 1);
57 while (write_bytes
> 0) {
58 size_t count
= min_t(size_t,
59 PAGE_CACHE_SIZE
- offset
, write_bytes
);
60 struct page
*page
= prepared_pages
[pg
];
62 * Copy data from userspace to the current page
64 * Disable pagefault to avoid recursive lock since
65 * the pages are already locked
68 copied
= iov_iter_copy_from_user_atomic(page
, i
, offset
, count
);
71 /* Flush processor's dcache for this page */
72 flush_dcache_page(page
);
73 iov_iter_advance(i
, copied
);
74 write_bytes
-= copied
;
75 total_copied
+= copied
;
77 /* Return to btrfs_file_aio_write to fault page */
78 if (unlikely(copied
== 0)) {
82 if (unlikely(copied
< PAGE_CACHE_SIZE
- offset
)) {
93 * unlocks pages after btrfs_file_write is done with them
95 static noinline
void btrfs_drop_pages(struct page
**pages
, size_t num_pages
)
98 for (i
= 0; i
< num_pages
; i
++) {
101 /* page checked is some magic around finding pages that
102 * have been modified without going through btrfs_set_page_dirty
105 ClearPageChecked(pages
[i
]);
106 unlock_page(pages
[i
]);
107 mark_page_accessed(pages
[i
]);
108 page_cache_release(pages
[i
]);
113 * after copy_from_user, pages need to be dirtied and we need to make
114 * sure holes are created between the current EOF and the start of
115 * any next extents (if required).
117 * this also makes the decision about creating an inline extent vs
118 * doing real data extents, marking pages dirty and delalloc as required.
120 static noinline
int dirty_and_release_pages(struct btrfs_trans_handle
*trans
,
121 struct btrfs_root
*root
,
130 struct inode
*inode
= fdentry(file
)->d_inode
;
133 u64 end_of_last_block
;
134 u64 end_pos
= pos
+ write_bytes
;
135 loff_t isize
= i_size_read(inode
);
137 start_pos
= pos
& ~((u64
)root
->sectorsize
- 1);
138 num_bytes
= (write_bytes
+ pos
- start_pos
+
139 root
->sectorsize
- 1) & ~((u64
)root
->sectorsize
- 1);
141 end_of_last_block
= start_pos
+ num_bytes
- 1;
142 err
= btrfs_set_extent_delalloc(inode
, start_pos
, end_of_last_block
,
146 for (i
= 0; i
< num_pages
; i
++) {
147 struct page
*p
= pages
[i
];
152 if (end_pos
> isize
) {
153 i_size_write(inode
, end_pos
);
154 /* we've only changed i_size in ram, and we haven't updated
155 * the disk i_size. There is no need to log the inode
163 * this drops all the extents in the cache that intersect the range
164 * [start, end]. Existing extents are split as required.
166 int btrfs_drop_extent_cache(struct inode
*inode
, u64 start
, u64 end
,
169 struct extent_map
*em
;
170 struct extent_map
*split
= NULL
;
171 struct extent_map
*split2
= NULL
;
172 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
173 u64 len
= end
- start
+ 1;
179 WARN_ON(end
< start
);
180 if (end
== (u64
)-1) {
186 split
= alloc_extent_map(GFP_NOFS
);
188 split2
= alloc_extent_map(GFP_NOFS
);
190 write_lock(&em_tree
->lock
);
191 em
= lookup_extent_mapping(em_tree
, start
, len
);
193 write_unlock(&em_tree
->lock
);
197 if (skip_pinned
&& test_bit(EXTENT_FLAG_PINNED
, &em
->flags
)) {
198 if (testend
&& em
->start
+ em
->len
>= start
+ len
) {
200 write_unlock(&em_tree
->lock
);
203 start
= em
->start
+ em
->len
;
205 len
= start
+ len
- (em
->start
+ em
->len
);
207 write_unlock(&em_tree
->lock
);
210 compressed
= test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
211 clear_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
212 remove_extent_mapping(em_tree
, em
);
214 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
&&
216 split
->start
= em
->start
;
217 split
->len
= start
- em
->start
;
218 split
->orig_start
= em
->orig_start
;
219 split
->block_start
= em
->block_start
;
222 split
->block_len
= em
->block_len
;
224 split
->block_len
= split
->len
;
226 split
->bdev
= em
->bdev
;
227 split
->flags
= flags
;
228 split
->compress_type
= em
->compress_type
;
229 ret
= add_extent_mapping(em_tree
, split
);
231 free_extent_map(split
);
235 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
&&
236 testend
&& em
->start
+ em
->len
> start
+ len
) {
237 u64 diff
= start
+ len
- em
->start
;
239 split
->start
= start
+ len
;
240 split
->len
= em
->start
+ em
->len
- (start
+ len
);
241 split
->bdev
= em
->bdev
;
242 split
->flags
= flags
;
243 split
->compress_type
= em
->compress_type
;
246 split
->block_len
= em
->block_len
;
247 split
->block_start
= em
->block_start
;
248 split
->orig_start
= em
->orig_start
;
250 split
->block_len
= split
->len
;
251 split
->block_start
= em
->block_start
+ diff
;
252 split
->orig_start
= split
->start
;
255 ret
= add_extent_mapping(em_tree
, split
);
257 free_extent_map(split
);
260 write_unlock(&em_tree
->lock
);
264 /* once for the tree*/
268 free_extent_map(split
);
270 free_extent_map(split2
);
275 * this is very complex, but the basic idea is to drop all extents
276 * in the range start - end. hint_block is filled in with a block number
277 * that would be a good hint to the block allocator for this file.
279 * If an extent intersects the range but is not entirely inside the range
280 * it is either truncated or split. Anything entirely inside the range
281 * is deleted from the tree.
283 int btrfs_drop_extents(struct btrfs_trans_handle
*trans
, struct inode
*inode
,
284 u64 start
, u64 end
, u64
*hint_byte
, int drop_cache
)
286 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
287 struct extent_buffer
*leaf
;
288 struct btrfs_file_extent_item
*fi
;
289 struct btrfs_path
*path
;
290 struct btrfs_key key
;
291 struct btrfs_key new_key
;
292 u64 search_start
= start
;
295 u64 extent_offset
= 0;
304 btrfs_drop_extent_cache(inode
, start
, end
- 1, 0);
306 path
= btrfs_alloc_path();
312 ret
= btrfs_lookup_file_extent(trans
, root
, path
, inode
->i_ino
,
316 if (ret
> 0 && path
->slots
[0] > 0 && search_start
== start
) {
317 leaf
= path
->nodes
[0];
318 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0] - 1);
319 if (key
.objectid
== inode
->i_ino
&&
320 key
.type
== BTRFS_EXTENT_DATA_KEY
)
325 leaf
= path
->nodes
[0];
326 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
328 ret
= btrfs_next_leaf(root
, path
);
335 leaf
= path
->nodes
[0];
339 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
340 if (key
.objectid
> inode
->i_ino
||
341 key
.type
> BTRFS_EXTENT_DATA_KEY
|| key
.offset
>= end
)
344 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
345 struct btrfs_file_extent_item
);
346 extent_type
= btrfs_file_extent_type(leaf
, fi
);
348 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
349 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
350 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
351 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
352 extent_offset
= btrfs_file_extent_offset(leaf
, fi
);
353 extent_end
= key
.offset
+
354 btrfs_file_extent_num_bytes(leaf
, fi
);
355 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
356 extent_end
= key
.offset
+
357 btrfs_file_extent_inline_len(leaf
, fi
);
360 extent_end
= search_start
;
363 if (extent_end
<= search_start
) {
368 search_start
= max(key
.offset
, start
);
370 btrfs_release_path(root
, path
);
375 * | - range to drop - |
376 * | -------- extent -------- |
378 if (start
> key
.offset
&& end
< extent_end
) {
380 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
382 memcpy(&new_key
, &key
, sizeof(new_key
));
383 new_key
.offset
= start
;
384 ret
= btrfs_duplicate_item(trans
, root
, path
,
386 if (ret
== -EAGAIN
) {
387 btrfs_release_path(root
, path
);
393 leaf
= path
->nodes
[0];
394 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
395 struct btrfs_file_extent_item
);
396 btrfs_set_file_extent_num_bytes(leaf
, fi
,
399 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
400 struct btrfs_file_extent_item
);
402 extent_offset
+= start
- key
.offset
;
403 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
404 btrfs_set_file_extent_num_bytes(leaf
, fi
,
406 btrfs_mark_buffer_dirty(leaf
);
408 if (disk_bytenr
> 0) {
409 ret
= btrfs_inc_extent_ref(trans
, root
,
410 disk_bytenr
, num_bytes
, 0,
411 root
->root_key
.objectid
,
413 start
- extent_offset
);
415 *hint_byte
= disk_bytenr
;
420 * | ---- range to drop ----- |
421 * | -------- extent -------- |
423 if (start
<= key
.offset
&& end
< extent_end
) {
424 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
426 memcpy(&new_key
, &key
, sizeof(new_key
));
427 new_key
.offset
= end
;
428 btrfs_set_item_key_safe(trans
, root
, path
, &new_key
);
430 extent_offset
+= end
- key
.offset
;
431 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
432 btrfs_set_file_extent_num_bytes(leaf
, fi
,
434 btrfs_mark_buffer_dirty(leaf
);
435 if (disk_bytenr
> 0) {
436 inode_sub_bytes(inode
, end
- key
.offset
);
437 *hint_byte
= disk_bytenr
;
442 search_start
= extent_end
;
444 * | ---- range to drop ----- |
445 * | -------- extent -------- |
447 if (start
> key
.offset
&& end
>= extent_end
) {
449 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
451 btrfs_set_file_extent_num_bytes(leaf
, fi
,
453 btrfs_mark_buffer_dirty(leaf
);
454 if (disk_bytenr
> 0) {
455 inode_sub_bytes(inode
, extent_end
- start
);
456 *hint_byte
= disk_bytenr
;
458 if (end
== extent_end
)
466 * | ---- range to drop ----- |
467 * | ------ extent ------ |
469 if (start
<= key
.offset
&& end
>= extent_end
) {
471 del_slot
= path
->slots
[0];
474 BUG_ON(del_slot
+ del_nr
!= path
->slots
[0]);
478 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
479 inode_sub_bytes(inode
,
480 extent_end
- key
.offset
);
481 extent_end
= ALIGN(extent_end
,
483 } else if (disk_bytenr
> 0) {
484 ret
= btrfs_free_extent(trans
, root
,
485 disk_bytenr
, num_bytes
, 0,
486 root
->root_key
.objectid
,
487 key
.objectid
, key
.offset
-
490 inode_sub_bytes(inode
,
491 extent_end
- key
.offset
);
492 *hint_byte
= disk_bytenr
;
495 if (end
== extent_end
)
498 if (path
->slots
[0] + 1 < btrfs_header_nritems(leaf
)) {
503 ret
= btrfs_del_items(trans
, root
, path
, del_slot
,
510 btrfs_release_path(root
, path
);
518 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
522 btrfs_free_path(path
);
526 static int extent_mergeable(struct extent_buffer
*leaf
, int slot
,
527 u64 objectid
, u64 bytenr
, u64 orig_offset
,
528 u64
*start
, u64
*end
)
530 struct btrfs_file_extent_item
*fi
;
531 struct btrfs_key key
;
534 if (slot
< 0 || slot
>= btrfs_header_nritems(leaf
))
537 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
538 if (key
.objectid
!= objectid
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
541 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
542 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_REG
||
543 btrfs_file_extent_disk_bytenr(leaf
, fi
) != bytenr
||
544 btrfs_file_extent_offset(leaf
, fi
) != key
.offset
- orig_offset
||
545 btrfs_file_extent_compression(leaf
, fi
) ||
546 btrfs_file_extent_encryption(leaf
, fi
) ||
547 btrfs_file_extent_other_encoding(leaf
, fi
))
550 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
551 if ((*start
&& *start
!= key
.offset
) || (*end
&& *end
!= extent_end
))
560 * Mark extent in the range start - end as written.
562 * This changes extent type from 'pre-allocated' to 'regular'. If only
563 * part of extent is marked as written, the extent will be split into
566 int btrfs_mark_extent_written(struct btrfs_trans_handle
*trans
,
567 struct inode
*inode
, u64 start
, u64 end
)
569 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
570 struct extent_buffer
*leaf
;
571 struct btrfs_path
*path
;
572 struct btrfs_file_extent_item
*fi
;
573 struct btrfs_key key
;
574 struct btrfs_key new_key
;
587 btrfs_drop_extent_cache(inode
, start
, end
- 1, 0);
589 path
= btrfs_alloc_path();
594 key
.objectid
= inode
->i_ino
;
595 key
.type
= BTRFS_EXTENT_DATA_KEY
;
598 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
599 if (ret
> 0 && path
->slots
[0] > 0)
602 leaf
= path
->nodes
[0];
603 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
604 BUG_ON(key
.objectid
!= inode
->i_ino
||
605 key
.type
!= BTRFS_EXTENT_DATA_KEY
);
606 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
607 struct btrfs_file_extent_item
);
608 BUG_ON(btrfs_file_extent_type(leaf
, fi
) !=
609 BTRFS_FILE_EXTENT_PREALLOC
);
610 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
611 BUG_ON(key
.offset
> start
|| extent_end
< end
);
613 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
614 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
615 orig_offset
= key
.offset
- btrfs_file_extent_offset(leaf
, fi
);
616 memcpy(&new_key
, &key
, sizeof(new_key
));
618 if (start
== key
.offset
&& end
< extent_end
) {
621 if (extent_mergeable(leaf
, path
->slots
[0] - 1,
622 inode
->i_ino
, bytenr
, orig_offset
,
623 &other_start
, &other_end
)) {
624 new_key
.offset
= end
;
625 btrfs_set_item_key_safe(trans
, root
, path
, &new_key
);
626 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
627 struct btrfs_file_extent_item
);
628 btrfs_set_file_extent_num_bytes(leaf
, fi
,
630 btrfs_set_file_extent_offset(leaf
, fi
,
632 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
633 struct btrfs_file_extent_item
);
634 btrfs_set_file_extent_num_bytes(leaf
, fi
,
636 btrfs_mark_buffer_dirty(leaf
);
641 if (start
> key
.offset
&& end
== extent_end
) {
644 if (extent_mergeable(leaf
, path
->slots
[0] + 1,
645 inode
->i_ino
, bytenr
, orig_offset
,
646 &other_start
, &other_end
)) {
647 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
648 struct btrfs_file_extent_item
);
649 btrfs_set_file_extent_num_bytes(leaf
, fi
,
652 new_key
.offset
= start
;
653 btrfs_set_item_key_safe(trans
, root
, path
, &new_key
);
655 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
656 struct btrfs_file_extent_item
);
657 btrfs_set_file_extent_num_bytes(leaf
, fi
,
659 btrfs_set_file_extent_offset(leaf
, fi
,
660 start
- orig_offset
);
661 btrfs_mark_buffer_dirty(leaf
);
666 while (start
> key
.offset
|| end
< extent_end
) {
667 if (key
.offset
== start
)
670 new_key
.offset
= split
;
671 ret
= btrfs_duplicate_item(trans
, root
, path
, &new_key
);
672 if (ret
== -EAGAIN
) {
673 btrfs_release_path(root
, path
);
678 leaf
= path
->nodes
[0];
679 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
680 struct btrfs_file_extent_item
);
681 btrfs_set_file_extent_num_bytes(leaf
, fi
,
684 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
685 struct btrfs_file_extent_item
);
687 btrfs_set_file_extent_offset(leaf
, fi
, split
- orig_offset
);
688 btrfs_set_file_extent_num_bytes(leaf
, fi
,
690 btrfs_mark_buffer_dirty(leaf
);
692 ret
= btrfs_inc_extent_ref(trans
, root
, bytenr
, num_bytes
, 0,
693 root
->root_key
.objectid
,
694 inode
->i_ino
, orig_offset
);
697 if (split
== start
) {
700 BUG_ON(start
!= key
.offset
);
709 if (extent_mergeable(leaf
, path
->slots
[0] + 1,
710 inode
->i_ino
, bytenr
, orig_offset
,
711 &other_start
, &other_end
)) {
713 btrfs_release_path(root
, path
);
716 extent_end
= other_end
;
717 del_slot
= path
->slots
[0] + 1;
719 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
720 0, root
->root_key
.objectid
,
721 inode
->i_ino
, orig_offset
);
726 if (extent_mergeable(leaf
, path
->slots
[0] - 1,
727 inode
->i_ino
, bytenr
, orig_offset
,
728 &other_start
, &other_end
)) {
730 btrfs_release_path(root
, path
);
733 key
.offset
= other_start
;
734 del_slot
= path
->slots
[0];
736 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
737 0, root
->root_key
.objectid
,
738 inode
->i_ino
, orig_offset
);
742 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
743 struct btrfs_file_extent_item
);
744 btrfs_set_file_extent_type(leaf
, fi
,
745 BTRFS_FILE_EXTENT_REG
);
746 btrfs_mark_buffer_dirty(leaf
);
748 fi
= btrfs_item_ptr(leaf
, del_slot
- 1,
749 struct btrfs_file_extent_item
);
750 btrfs_set_file_extent_type(leaf
, fi
,
751 BTRFS_FILE_EXTENT_REG
);
752 btrfs_set_file_extent_num_bytes(leaf
, fi
,
753 extent_end
- key
.offset
);
754 btrfs_mark_buffer_dirty(leaf
);
756 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
760 btrfs_free_path(path
);
765 * this gets pages into the page cache and locks them down, it also properly
766 * waits for data=ordered extents to finish before allowing the pages to be
769 static noinline
int prepare_pages(struct btrfs_root
*root
, struct file
*file
,
770 struct page
**pages
, size_t num_pages
,
771 loff_t pos
, unsigned long first_index
,
772 unsigned long last_index
, size_t write_bytes
)
774 struct extent_state
*cached_state
= NULL
;
776 unsigned long index
= pos
>> PAGE_CACHE_SHIFT
;
777 struct inode
*inode
= fdentry(file
)->d_inode
;
782 start_pos
= pos
& ~((u64
)root
->sectorsize
- 1);
783 last_pos
= ((u64
)index
+ num_pages
) << PAGE_CACHE_SHIFT
;
785 if (start_pos
> inode
->i_size
) {
786 err
= btrfs_cont_expand(inode
, start_pos
);
791 memset(pages
, 0, num_pages
* sizeof(struct page
*));
793 for (i
= 0; i
< num_pages
; i
++) {
794 pages
[i
] = grab_cache_page(inode
->i_mapping
, index
+ i
);
799 wait_on_page_writeback(pages
[i
]);
801 if (start_pos
< inode
->i_size
) {
802 struct btrfs_ordered_extent
*ordered
;
803 lock_extent_bits(&BTRFS_I(inode
)->io_tree
,
804 start_pos
, last_pos
- 1, 0, &cached_state
,
806 ordered
= btrfs_lookup_first_ordered_extent(inode
,
809 ordered
->file_offset
+ ordered
->len
> start_pos
&&
810 ordered
->file_offset
< last_pos
) {
811 btrfs_put_ordered_extent(ordered
);
812 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
813 start_pos
, last_pos
- 1,
814 &cached_state
, GFP_NOFS
);
815 for (i
= 0; i
< num_pages
; i
++) {
816 unlock_page(pages
[i
]);
817 page_cache_release(pages
[i
]);
819 btrfs_wait_ordered_range(inode
, start_pos
,
820 last_pos
- start_pos
);
824 btrfs_put_ordered_extent(ordered
);
826 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, start_pos
,
827 last_pos
- 1, EXTENT_DIRTY
| EXTENT_DELALLOC
|
828 EXTENT_DO_ACCOUNTING
, 0, 0, &cached_state
,
830 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
831 start_pos
, last_pos
- 1, &cached_state
,
834 for (i
= 0; i
< num_pages
; i
++) {
835 clear_page_dirty_for_io(pages
[i
]);
836 set_page_extent_mapped(pages
[i
]);
837 WARN_ON(!PageLocked(pages
[i
]));
842 static ssize_t
btrfs_file_aio_write(struct kiocb
*iocb
,
843 const struct iovec
*iov
,
844 unsigned long nr_segs
, loff_t pos
)
846 struct file
*file
= iocb
->ki_filp
;
847 struct inode
*inode
= fdentry(file
)->d_inode
;
848 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
849 struct page
*pinned
[2];
850 struct page
**pages
= NULL
;
852 loff_t
*ppos
= &iocb
->ki_pos
;
854 ssize_t num_written
= 0;
860 unsigned long first_index
;
861 unsigned long last_index
;
867 will_write
= ((file
->f_flags
& O_DSYNC
) || IS_SYNC(inode
) ||
868 (file
->f_flags
& O_DIRECT
));
875 vfs_check_frozen(inode
->i_sb
, SB_FREEZE_WRITE
);
877 mutex_lock(&inode
->i_mutex
);
879 err
= generic_segment_checks(iov
, &nr_segs
, &ocount
, VERIFY_READ
);
884 current
->backing_dev_info
= inode
->i_mapping
->backing_dev_info
;
885 err
= generic_write_checks(file
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
892 err
= file_remove_suid(file
);
897 * If BTRFS flips readonly due to some impossible error
898 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
899 * although we have opened a file as writable, we have
900 * to stop this write operation to ensure FS consistency.
902 if (root
->fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
) {
907 file_update_time(file
);
908 BTRFS_I(inode
)->sequence
++;
910 if (unlikely(file
->f_flags
& O_DIRECT
)) {
911 num_written
= generic_file_direct_write(iocb
, iov
, &nr_segs
,
915 * the generic O_DIRECT will update in-memory i_size after the
916 * DIOs are done. But our endio handlers that update the on
917 * disk i_size never update past the in memory i_size. So we
918 * need one more update here to catch any additions to the
921 if (inode
->i_size
!= BTRFS_I(inode
)->disk_i_size
) {
922 btrfs_ordered_update_i_size(inode
, inode
->i_size
, NULL
);
923 mark_inode_dirty(inode
);
926 if (num_written
< 0) {
930 } else if (num_written
== count
) {
931 /* pick up pos changes done by the generic code */
936 * We are going to do buffered for the rest of the range, so we
937 * need to make sure to invalidate the buffered pages when we're
944 iov_iter_init(&i
, iov
, nr_segs
, count
, num_written
);
945 nrptrs
= min((iov_iter_count(&i
) + PAGE_CACHE_SIZE
- 1) /
946 PAGE_CACHE_SIZE
, PAGE_CACHE_SIZE
/
947 (sizeof(struct page
*)));
948 pages
= kmalloc(nrptrs
* sizeof(struct page
*), GFP_KERNEL
);
950 /* generic_write_checks can change our pos */
953 first_index
= pos
>> PAGE_CACHE_SHIFT
;
954 last_index
= (pos
+ iov_iter_count(&i
)) >> PAGE_CACHE_SHIFT
;
957 * there are lots of better ways to do this, but this code
958 * makes sure the first and last page in the file range are
959 * up to date and ready for cow
961 if ((pos
& (PAGE_CACHE_SIZE
- 1))) {
962 pinned
[0] = grab_cache_page(inode
->i_mapping
, first_index
);
963 if (!PageUptodate(pinned
[0])) {
964 ret
= btrfs_readpage(NULL
, pinned
[0]);
966 wait_on_page_locked(pinned
[0]);
968 unlock_page(pinned
[0]);
971 if ((pos
+ iov_iter_count(&i
)) & (PAGE_CACHE_SIZE
- 1)) {
972 pinned
[1] = grab_cache_page(inode
->i_mapping
, last_index
);
973 if (!PageUptodate(pinned
[1])) {
974 ret
= btrfs_readpage(NULL
, pinned
[1]);
976 wait_on_page_locked(pinned
[1]);
978 unlock_page(pinned
[1]);
982 while (iov_iter_count(&i
) > 0) {
983 size_t offset
= pos
& (PAGE_CACHE_SIZE
- 1);
984 size_t write_bytes
= min(iov_iter_count(&i
),
985 nrptrs
* (size_t)PAGE_CACHE_SIZE
-
987 size_t num_pages
= (write_bytes
+ PAGE_CACHE_SIZE
- 1) >>
990 WARN_ON(num_pages
> nrptrs
);
991 memset(pages
, 0, sizeof(struct page
*) * nrptrs
);
994 * Fault pages before locking them in prepare_pages
995 * to avoid recursive lock
997 if (unlikely(iov_iter_fault_in_readable(&i
, write_bytes
))) {
1002 ret
= btrfs_delalloc_reserve_space(inode
,
1003 num_pages
<< PAGE_CACHE_SHIFT
);
1007 ret
= prepare_pages(root
, file
, pages
, num_pages
,
1008 pos
, first_index
, last_index
,
1011 btrfs_delalloc_release_space(inode
,
1012 num_pages
<< PAGE_CACHE_SHIFT
);
1016 copied
= btrfs_copy_from_user(pos
, num_pages
,
1017 write_bytes
, pages
, &i
);
1018 dirty_pages
= (copied
+ PAGE_CACHE_SIZE
- 1) >>
1021 if (num_pages
> dirty_pages
) {
1024 &BTRFS_I(inode
)->outstanding_extents
);
1025 btrfs_delalloc_release_space(inode
,
1026 (num_pages
- dirty_pages
) <<
1031 dirty_and_release_pages(NULL
, root
, file
, pages
,
1032 dirty_pages
, pos
, copied
);
1035 btrfs_drop_pages(pages
, num_pages
);
1039 filemap_fdatawrite_range(inode
->i_mapping
, pos
,
1042 balance_dirty_pages_ratelimited_nr(
1046 (root
->leafsize
>> PAGE_CACHE_SHIFT
) + 1)
1047 btrfs_btree_balance_dirty(root
, 1);
1048 btrfs_throttle(root
);
1053 num_written
+= copied
;
1058 mutex_unlock(&inode
->i_mutex
);
1064 page_cache_release(pinned
[0]);
1066 page_cache_release(pinned
[1]);
1070 * we want to make sure fsync finds this change
1071 * but we haven't joined a transaction running right now.
1073 * Later on, someone is sure to update the inode and get the
1074 * real transid recorded.
1076 * We set last_trans now to the fs_info generation + 1,
1077 * this will either be one more than the running transaction
1078 * or the generation used for the next transaction if there isn't
1079 * one running right now.
1081 BTRFS_I(inode
)->last_trans
= root
->fs_info
->generation
+ 1;
1083 if (num_written
> 0 && will_write
) {
1084 struct btrfs_trans_handle
*trans
;
1086 err
= btrfs_wait_ordered_range(inode
, start_pos
, num_written
);
1090 if ((file
->f_flags
& O_DSYNC
) || IS_SYNC(inode
)) {
1091 trans
= btrfs_start_transaction(root
, 0);
1092 if (IS_ERR(trans
)) {
1093 num_written
= PTR_ERR(trans
);
1096 mutex_lock(&inode
->i_mutex
);
1097 ret
= btrfs_log_dentry_safe(trans
, root
,
1099 mutex_unlock(&inode
->i_mutex
);
1101 ret
= btrfs_sync_log(trans
, root
);
1103 btrfs_end_transaction(trans
, root
);
1105 btrfs_commit_transaction(trans
, root
);
1106 } else if (ret
!= BTRFS_NO_LOG_SYNC
) {
1107 btrfs_commit_transaction(trans
, root
);
1109 btrfs_end_transaction(trans
, root
);
1112 if (file
->f_flags
& O_DIRECT
&& buffered
) {
1113 invalidate_mapping_pages(inode
->i_mapping
,
1114 start_pos
>> PAGE_CACHE_SHIFT
,
1115 (start_pos
+ num_written
- 1) >> PAGE_CACHE_SHIFT
);
1119 current
->backing_dev_info
= NULL
;
1120 return num_written
? num_written
: err
;
1123 int btrfs_release_file(struct inode
*inode
, struct file
*filp
)
1126 * ordered_data_close is set by settattr when we are about to truncate
1127 * a file from a non-zero size to a zero size. This tries to
1128 * flush down new bytes that may have been written if the
1129 * application were using truncate to replace a file in place.
1131 if (BTRFS_I(inode
)->ordered_data_close
) {
1132 BTRFS_I(inode
)->ordered_data_close
= 0;
1133 btrfs_add_ordered_operation(NULL
, BTRFS_I(inode
)->root
, inode
);
1134 if (inode
->i_size
> BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT
)
1135 filemap_flush(inode
->i_mapping
);
1137 if (filp
->private_data
)
1138 btrfs_ioctl_trans_end(filp
);
1143 * fsync call for both files and directories. This logs the inode into
1144 * the tree log instead of forcing full commits whenever possible.
1146 * It needs to call filemap_fdatawait so that all ordered extent updates are
1147 * in the metadata btree are up to date for copying to the log.
1149 * It drops the inode mutex before doing the tree log commit. This is an
1150 * important optimization for directories because holding the mutex prevents
1151 * new operations on the dir while we write to disk.
1153 int btrfs_sync_file(struct file
*file
, int datasync
)
1155 struct dentry
*dentry
= file
->f_path
.dentry
;
1156 struct inode
*inode
= dentry
->d_inode
;
1157 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1159 struct btrfs_trans_handle
*trans
;
1162 /* we wait first, since the writeback may change the inode */
1164 /* the VFS called filemap_fdatawrite for us */
1165 btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
1169 * check the transaction that last modified this inode
1170 * and see if its already been committed
1172 if (!BTRFS_I(inode
)->last_trans
)
1176 * if the last transaction that changed this file was before
1177 * the current transaction, we can bail out now without any
1180 mutex_lock(&root
->fs_info
->trans_mutex
);
1181 if (BTRFS_I(inode
)->last_trans
<=
1182 root
->fs_info
->last_trans_committed
) {
1183 BTRFS_I(inode
)->last_trans
= 0;
1184 mutex_unlock(&root
->fs_info
->trans_mutex
);
1187 mutex_unlock(&root
->fs_info
->trans_mutex
);
1190 * ok we haven't committed the transaction yet, lets do a commit
1192 if (file
->private_data
)
1193 btrfs_ioctl_trans_end(file
);
1195 trans
= btrfs_start_transaction(root
, 0);
1196 if (IS_ERR(trans
)) {
1197 ret
= PTR_ERR(trans
);
1201 ret
= btrfs_log_dentry_safe(trans
, root
, dentry
);
1205 /* we've logged all the items and now have a consistent
1206 * version of the file in the log. It is possible that
1207 * someone will come in and modify the file, but that's
1208 * fine because the log is consistent on disk, and we
1209 * have references to all of the file's extents
1211 * It is possible that someone will come in and log the
1212 * file again, but that will end up using the synchronization
1213 * inside btrfs_sync_log to keep things safe.
1215 mutex_unlock(&dentry
->d_inode
->i_mutex
);
1217 if (ret
!= BTRFS_NO_LOG_SYNC
) {
1219 ret
= btrfs_commit_transaction(trans
, root
);
1221 ret
= btrfs_sync_log(trans
, root
);
1223 ret
= btrfs_end_transaction(trans
, root
);
1225 ret
= btrfs_commit_transaction(trans
, root
);
1228 ret
= btrfs_end_transaction(trans
, root
);
1230 mutex_lock(&dentry
->d_inode
->i_mutex
);
1232 return ret
> 0 ? -EIO
: ret
;
1235 static const struct vm_operations_struct btrfs_file_vm_ops
= {
1236 .fault
= filemap_fault
,
1237 .page_mkwrite
= btrfs_page_mkwrite
,
1240 static int btrfs_file_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1242 struct address_space
*mapping
= filp
->f_mapping
;
1244 if (!mapping
->a_ops
->readpage
)
1247 file_accessed(filp
);
1248 vma
->vm_ops
= &btrfs_file_vm_ops
;
1249 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
1254 static long btrfs_fallocate(struct file
*file
, int mode
,
1255 loff_t offset
, loff_t len
)
1257 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1258 struct extent_state
*cached_state
= NULL
;
1265 u64 mask
= BTRFS_I(inode
)->root
->sectorsize
- 1;
1266 struct extent_map
*em
;
1269 alloc_start
= offset
& ~mask
;
1270 alloc_end
= (offset
+ len
+ mask
) & ~mask
;
1272 /* We only support the FALLOC_FL_KEEP_SIZE mode */
1273 if (mode
& ~FALLOC_FL_KEEP_SIZE
)
1277 * wait for ordered IO before we have any locks. We'll loop again
1278 * below with the locks held.
1280 btrfs_wait_ordered_range(inode
, alloc_start
, alloc_end
- alloc_start
);
1282 mutex_lock(&inode
->i_mutex
);
1283 ret
= inode_newsize_ok(inode
, alloc_end
);
1287 if (alloc_start
> inode
->i_size
) {
1288 ret
= btrfs_cont_expand(inode
, alloc_start
);
1293 ret
= btrfs_check_data_free_space(inode
, alloc_end
- alloc_start
);
1297 locked_end
= alloc_end
- 1;
1299 struct btrfs_ordered_extent
*ordered
;
1301 /* the extent lock is ordered inside the running
1304 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, alloc_start
,
1305 locked_end
, 0, &cached_state
, GFP_NOFS
);
1306 ordered
= btrfs_lookup_first_ordered_extent(inode
,
1309 ordered
->file_offset
+ ordered
->len
> alloc_start
&&
1310 ordered
->file_offset
< alloc_end
) {
1311 btrfs_put_ordered_extent(ordered
);
1312 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
1313 alloc_start
, locked_end
,
1314 &cached_state
, GFP_NOFS
);
1316 * we can't wait on the range with the transaction
1317 * running or with the extent lock held
1319 btrfs_wait_ordered_range(inode
, alloc_start
,
1320 alloc_end
- alloc_start
);
1323 btrfs_put_ordered_extent(ordered
);
1328 cur_offset
= alloc_start
;
1330 em
= btrfs_get_extent(inode
, NULL
, 0, cur_offset
,
1331 alloc_end
- cur_offset
, 0);
1332 BUG_ON(IS_ERR(em
) || !em
);
1333 last_byte
= min(extent_map_end(em
), alloc_end
);
1334 last_byte
= (last_byte
+ mask
) & ~mask
;
1335 if (em
->block_start
== EXTENT_MAP_HOLE
||
1336 (cur_offset
>= inode
->i_size
&&
1337 !test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))) {
1338 ret
= btrfs_prealloc_file_range(inode
, mode
, cur_offset
,
1339 last_byte
- cur_offset
,
1340 1 << inode
->i_blkbits
,
1344 free_extent_map(em
);
1348 free_extent_map(em
);
1350 cur_offset
= last_byte
;
1351 if (cur_offset
>= alloc_end
) {
1356 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, alloc_start
, locked_end
,
1357 &cached_state
, GFP_NOFS
);
1359 btrfs_free_reserved_data_space(inode
, alloc_end
- alloc_start
);
1361 mutex_unlock(&inode
->i_mutex
);
1365 const struct file_operations btrfs_file_operations
= {
1366 .llseek
= generic_file_llseek
,
1367 .read
= do_sync_read
,
1368 .write
= do_sync_write
,
1369 .aio_read
= generic_file_aio_read
,
1370 .splice_read
= generic_file_splice_read
,
1371 .aio_write
= btrfs_file_aio_write
,
1372 .mmap
= btrfs_file_mmap
,
1373 .open
= generic_file_open
,
1374 .release
= btrfs_release_file
,
1375 .fsync
= btrfs_sync_file
,
1376 .fallocate
= btrfs_fallocate
,
1377 .unlocked_ioctl
= btrfs_ioctl
,
1378 #ifdef CONFIG_COMPAT
1379 .compat_ioctl
= btrfs_ioctl
,