2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/buffer_head.h>
21 #include <linux/pagemap.h>
22 #include <linux/highmem.h>
23 #include <linux/time.h>
24 #include <linux/init.h>
25 #include <linux/string.h>
26 #include <linux/smp_lock.h>
27 #include <linux/backing-dev.h>
28 #include <linux/mpage.h>
29 #include <linux/swap.h>
30 #include <linux/writeback.h>
31 #include <linux/statfs.h>
32 #include <linux/compat.h>
35 #include "transaction.h"
36 #include "btrfs_inode.h"
38 #include "print-tree.h"
41 static int btrfs_copy_from_user(loff_t pos
, int num_pages
, int write_bytes
,
42 struct page
**prepared_pages
,
43 const char __user
* buf
)
47 int offset
= pos
& (PAGE_CACHE_SIZE
- 1);
49 for (i
= 0; i
< num_pages
&& write_bytes
> 0; i
++, offset
= 0) {
50 size_t count
= min_t(size_t,
51 PAGE_CACHE_SIZE
- offset
, write_bytes
);
52 struct page
*page
= prepared_pages
[i
];
53 fault_in_pages_readable(buf
, count
);
55 /* Copy data from userspace to the current page */
57 page_fault
= __copy_from_user(page_address(page
) + offset
,
59 /* Flush processor's dcache for this page */
60 flush_dcache_page(page
);
68 return page_fault
? -EFAULT
: 0;
71 static void btrfs_drop_pages(struct page
**pages
, size_t num_pages
)
74 for (i
= 0; i
< num_pages
; i
++) {
77 unlock_page(pages
[i
]);
78 mark_page_accessed(pages
[i
]);
79 page_cache_release(pages
[i
]);
83 static int insert_inline_extent(struct btrfs_root
*root
, struct inode
*inode
,
84 u64 offset
, ssize_t size
,
85 struct buffer_head
*bh
)
88 struct btrfs_path
*path
;
90 struct btrfs_trans_handle
*trans
;
91 struct btrfs_file_extent_item
*ei
;
96 path
= btrfs_alloc_path();
100 mutex_lock(&root
->fs_info
->fs_mutex
);
101 trans
= btrfs_start_transaction(root
, 1);
102 btrfs_set_trans_block_group(trans
, inode
);
104 key
.objectid
= inode
->i_ino
;
107 btrfs_set_key_type(&key
, BTRFS_EXTENT_DATA_KEY
);
108 BUG_ON(size
>= PAGE_CACHE_SIZE
);
109 datasize
= btrfs_file_extent_calc_inline_size(size
);
111 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
117 ei
= btrfs_item_ptr(btrfs_buffer_leaf(path
->nodes
[0]),
118 path
->slots
[0], struct btrfs_file_extent_item
);
119 btrfs_set_file_extent_generation(ei
, trans
->transid
);
120 btrfs_set_file_extent_type(ei
,
121 BTRFS_FILE_EXTENT_INLINE
);
122 ptr
= btrfs_file_extent_inline_start(ei
);
124 kaddr
= kmap_atomic(bh
->b_page
, KM_USER0
);
125 btrfs_memcpy(root
, path
->nodes
[0]->b_data
,
126 ptr
, kaddr
+ bh_offset(bh
),
128 kunmap_atomic(kaddr
, KM_USER0
);
129 btrfs_mark_buffer_dirty(path
->nodes
[0]);
131 btrfs_free_path(path
);
132 ret
= btrfs_end_transaction(trans
, root
);
135 mutex_unlock(&root
->fs_info
->fs_mutex
);
139 static int dirty_and_release_pages(struct btrfs_trans_handle
*trans
,
140 struct btrfs_root
*root
,
152 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
153 struct buffer_head
*bh
;
155 for (i
= 0; i
< num_pages
; i
++) {
156 offset
= pos
& (PAGE_CACHE_SIZE
-1);
157 this_write
= min((size_t)PAGE_CACHE_SIZE
- offset
, write_bytes
);
159 /* FIXME, one block at a time */
160 bh
= page_buffers(pages
[i
]);
162 if (buffer_mapped(bh
) && bh
->b_blocknr
== 0) {
163 ret
= insert_inline_extent(root
, inode
,
164 pages
[i
]->index
<< PAGE_CACHE_SHIFT
,
165 offset
+ this_write
, bh
);
172 ret
= btrfs_commit_write(file
, pages
[i
], offset
,
173 offset
+ this_write
);
179 WARN_ON(this_write
> write_bytes
);
180 write_bytes
-= this_write
;
187 * this is very complex, but the basic idea is to drop all extents
188 * in the range start - end. hint_block is filled in with a block number
189 * that would be a good hint to the block allocator for this file.
191 * If an extent intersects the range but is not entirely inside the range
192 * it is either truncated or split. Anything entirely inside the range
193 * is deleted from the tree.
195 int btrfs_drop_extents(struct btrfs_trans_handle
*trans
,
196 struct btrfs_root
*root
, struct inode
*inode
,
197 u64 start
, u64 end
, u64
*hint_block
)
200 struct btrfs_key key
;
201 struct btrfs_leaf
*leaf
;
203 struct btrfs_file_extent_item
*extent
;
206 struct btrfs_file_extent_item old
;
207 struct btrfs_path
*path
;
208 u64 search_start
= start
;
215 path
= btrfs_alloc_path();
220 btrfs_release_path(root
, path
);
221 ret
= btrfs_lookup_file_extent(trans
, root
, path
, inode
->i_ino
,
226 if (path
->slots
[0] == 0) {
238 leaf
= btrfs_buffer_leaf(path
->nodes
[0]);
239 slot
= path
->slots
[0];
241 btrfs_disk_key_to_cpu(&key
, &leaf
->items
[slot
].key
);
242 if (key
.offset
>= end
|| key
.objectid
!= inode
->i_ino
) {
245 if (btrfs_key_type(&key
) > BTRFS_EXTENT_DATA_KEY
) {
249 search_start
= key
.offset
;
252 if (btrfs_key_type(&key
) == BTRFS_EXTENT_DATA_KEY
) {
253 extent
= btrfs_item_ptr(leaf
, slot
,
254 struct btrfs_file_extent_item
);
255 found_type
= btrfs_file_extent_type(extent
);
256 if (found_type
== BTRFS_FILE_EXTENT_REG
) {
257 extent_end
= key
.offset
+
258 (btrfs_file_extent_num_blocks(extent
) <<
261 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
263 extent_end
= key
.offset
+
264 btrfs_file_extent_inline_len(leaf
->items
+
268 extent_end
= search_start
;
271 /* we found nothing we can drop */
272 if ((!found_extent
&& !found_inline
) ||
273 search_start
>= extent_end
) {
276 nritems
= btrfs_header_nritems(
277 btrfs_buffer_header(path
->nodes
[0]));
278 if (slot
>= nritems
- 1) {
279 nextret
= btrfs_next_leaf(root
, path
);
289 /* FIXME, there's only one inline extent allowed right now */
291 u64 mask
= root
->blocksize
- 1;
292 search_start
= (extent_end
+ mask
) & ~mask
;
294 search_start
= extent_end
;
296 if (end
< extent_end
&& end
>= key
.offset
) {
299 btrfs_file_extent_disk_blocknr(extent
);
300 u64 disk_num_blocks
=
301 btrfs_file_extent_disk_num_blocks(extent
);
302 memcpy(&old
, extent
, sizeof(old
));
303 if (disk_blocknr
!= 0) {
304 ret
= btrfs_inc_extent_ref(trans
, root
,
305 disk_blocknr
, disk_num_blocks
);
309 WARN_ON(found_inline
);
312 /* truncate existing extent */
313 if (start
> key
.offset
) {
317 WARN_ON(start
& (root
->blocksize
- 1));
319 new_num
= (start
- key
.offset
) >>
321 old_num
= btrfs_file_extent_num_blocks(extent
);
323 btrfs_file_extent_disk_blocknr(extent
);
324 if (btrfs_file_extent_disk_blocknr(extent
)) {
326 (old_num
- new_num
) << 3;
328 btrfs_set_file_extent_num_blocks(extent
,
330 btrfs_mark_buffer_dirty(path
->nodes
[0]);
335 /* delete the entire extent */
337 u64 disk_blocknr
= 0;
338 u64 disk_num_blocks
= 0;
339 u64 extent_num_blocks
= 0;
342 btrfs_file_extent_disk_blocknr(extent
);
344 btrfs_file_extent_disk_num_blocks(extent
);
346 btrfs_file_extent_num_blocks(extent
);
348 btrfs_file_extent_disk_blocknr(extent
);
350 ret
= btrfs_del_item(trans
, root
, path
);
351 /* TODO update progress marker and return */
353 btrfs_release_path(root
, path
);
355 if (found_extent
&& disk_blocknr
!= 0) {
356 inode
->i_blocks
-= extent_num_blocks
<< 3;
357 ret
= btrfs_free_extent(trans
, root
,
363 if (!bookend
&& search_start
>= end
) {
370 /* create bookend, splitting the extent in two */
371 if (bookend
&& found_extent
) {
372 struct btrfs_key ins
;
373 ins
.objectid
= inode
->i_ino
;
376 btrfs_set_key_type(&ins
, BTRFS_EXTENT_DATA_KEY
);
377 btrfs_release_path(root
, path
);
378 ret
= btrfs_insert_empty_item(trans
, root
, path
, &ins
,
382 btrfs_print_leaf(root
, btrfs_buffer_leaf(path
->nodes
[0]));
383 printk("got %d on inserting %Lu %u %Lu start %Lu end %Lu found %Lu %Lu\n", ret
, ins
.objectid
, ins
.flags
, ins
.offset
, start
, end
, key
.offset
, extent_end
);
386 extent
= btrfs_item_ptr(
387 btrfs_buffer_leaf(path
->nodes
[0]),
389 struct btrfs_file_extent_item
);
390 btrfs_set_file_extent_disk_blocknr(extent
,
391 btrfs_file_extent_disk_blocknr(&old
));
392 btrfs_set_file_extent_disk_num_blocks(extent
,
393 btrfs_file_extent_disk_num_blocks(&old
));
395 btrfs_set_file_extent_offset(extent
,
396 btrfs_file_extent_offset(&old
) +
397 ((end
- key
.offset
) >> inode
->i_blkbits
));
398 WARN_ON(btrfs_file_extent_num_blocks(&old
) <
399 (extent_end
- end
) >> inode
->i_blkbits
);
400 btrfs_set_file_extent_num_blocks(extent
,
401 (extent_end
- end
) >> inode
->i_blkbits
);
403 btrfs_set_file_extent_type(extent
,
404 BTRFS_FILE_EXTENT_REG
);
405 btrfs_set_file_extent_generation(extent
,
406 btrfs_file_extent_generation(&old
));
407 btrfs_mark_buffer_dirty(path
->nodes
[0]);
408 if (btrfs_file_extent_disk_blocknr(&old
) != 0) {
410 btrfs_file_extent_num_blocks(extent
) << 3;
417 btrfs_free_path(path
);
422 * this gets pages into the page cache and locks them down
424 static int prepare_pages(struct btrfs_root
*root
,
429 unsigned long first_index
,
430 unsigned long last_index
,
434 unsigned long index
= pos
>> PAGE_CACHE_SHIFT
;
435 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
439 struct buffer_head
*bh
;
440 struct buffer_head
*head
;
441 loff_t isize
= i_size_read(inode
);
442 struct btrfs_trans_handle
*trans
;
445 u64 alloc_extent_start
;
447 struct btrfs_key ins
;
449 start_pos
= pos
& ~((u64
)PAGE_CACHE_SIZE
- 1);
450 num_blocks
= (write_bytes
+ pos
- start_pos
+ root
->blocksize
- 1) >>
453 memset(pages
, 0, num_pages
* sizeof(struct page
*));
455 for (i
= 0; i
< num_pages
; i
++) {
456 pages
[i
] = grab_cache_page(inode
->i_mapping
, index
+ i
);
461 cancel_dirty_page(pages
[i
], PAGE_CACHE_SIZE
);
462 wait_on_page_writeback(pages
[i
]);
465 mutex_lock(&root
->fs_info
->fs_mutex
);
466 trans
= btrfs_start_transaction(root
, 1);
469 mutex_unlock(&root
->fs_info
->fs_mutex
);
472 btrfs_set_trans_block_group(trans
, inode
);
473 /* FIXME blocksize != 4096 */
474 inode
->i_blocks
+= num_blocks
<< 3;
477 /* FIXME...EIEIO, ENOSPC and more */
479 /* step one, delete the existing extents in this range */
480 /* FIXME blocksize != pagesize */
481 if (start_pos
< inode
->i_size
) {
482 err
= btrfs_drop_extents(trans
, root
, inode
,
483 start_pos
, (pos
+ write_bytes
+ root
->blocksize
-1) &
484 ~((u64
)root
->blocksize
- 1), &hint_block
);
489 /* insert any holes we need to create */
490 if (inode
->i_size
< start_pos
) {
491 u64 last_pos_in_file
;
493 u64 mask
= root
->blocksize
- 1;
494 last_pos_in_file
= (isize
+ mask
) & ~mask
;
495 hole_size
= (start_pos
- last_pos_in_file
+ mask
) & ~mask
;
496 hole_size
>>= inode
->i_blkbits
;
497 if (last_pos_in_file
< start_pos
) {
498 err
= btrfs_insert_file_extent(trans
, root
,
508 * either allocate an extent for the new bytes or setup the key
509 * to show we are doing inline data in the extent
511 if (isize
>= PAGE_CACHE_SIZE
|| pos
+ write_bytes
< inode
->i_size
||
512 pos
+ write_bytes
- start_pos
> BTRFS_MAX_INLINE_DATA_SIZE(root
)) {
513 err
= btrfs_alloc_extent(trans
, root
, inode
->i_ino
,
514 num_blocks
, hint_block
, (u64
)-1,
517 goto failed_truncate
;
518 err
= btrfs_insert_file_extent(trans
, root
, inode
->i_ino
,
519 start_pos
, ins
.objectid
, ins
.offset
,
522 goto failed_truncate
;
528 alloc_extent_start
= ins
.objectid
;
529 err
= btrfs_end_transaction(trans
, root
);
530 mutex_unlock(&root
->fs_info
->fs_mutex
);
532 for (i
= 0; i
< num_pages
; i
++) {
533 offset
= pos
& (PAGE_CACHE_SIZE
-1);
534 this_write
= min((size_t)PAGE_CACHE_SIZE
- offset
, write_bytes
);
535 if (!page_has_buffers(pages
[i
])) {
536 create_empty_buffers(pages
[i
],
537 root
->fs_info
->sb
->s_blocksize
,
540 head
= page_buffers(pages
[i
]);
543 err
= btrfs_map_bh_to_logical(root
, bh
,
547 goto failed_truncate
;
548 bh
= bh
->b_this_page
;
549 if (alloc_extent_start
)
550 alloc_extent_start
++;
551 } while (bh
!= head
);
553 WARN_ON(this_write
> write_bytes
);
554 write_bytes
-= this_write
;
559 btrfs_drop_pages(pages
, num_pages
);
563 btrfs_drop_pages(pages
, num_pages
);
565 vmtruncate(inode
, isize
);
569 mutex_unlock(&root
->fs_info
->fs_mutex
);
574 static ssize_t
btrfs_file_write(struct file
*file
, const char __user
*buf
,
575 size_t count
, loff_t
*ppos
)
578 size_t num_written
= 0;
581 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
582 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
583 struct page
**pages
= NULL
;
585 struct page
*pinned
[2];
586 unsigned long first_index
;
587 unsigned long last_index
;
589 nrptrs
= min((count
+ PAGE_CACHE_SIZE
- 1) / PAGE_CACHE_SIZE
,
590 PAGE_CACHE_SIZE
/ (sizeof(struct page
*)));
593 if (file
->f_flags
& O_DIRECT
)
596 vfs_check_frozen(inode
->i_sb
, SB_FREEZE_WRITE
);
597 current
->backing_dev_info
= inode
->i_mapping
->backing_dev_info
;
598 err
= generic_write_checks(file
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
603 err
= remove_suid(file
->f_path
.dentry
);
606 file_update_time(file
);
608 pages
= kmalloc(nrptrs
* sizeof(struct page
*), GFP_KERNEL
);
610 mutex_lock(&inode
->i_mutex
);
611 first_index
= pos
>> PAGE_CACHE_SHIFT
;
612 last_index
= (pos
+ count
) >> PAGE_CACHE_SHIFT
;
615 * there are lots of better ways to do this, but this code
616 * makes sure the first and last page in the file range are
617 * up to date and ready for cow
619 if ((pos
& (PAGE_CACHE_SIZE
- 1))) {
620 pinned
[0] = grab_cache_page(inode
->i_mapping
, first_index
);
621 if (!PageUptodate(pinned
[0])) {
622 ret
= btrfs_readpage(NULL
, pinned
[0]);
624 wait_on_page_locked(pinned
[0]);
626 unlock_page(pinned
[0]);
629 if ((pos
+ count
) & (PAGE_CACHE_SIZE
- 1)) {
630 pinned
[1] = grab_cache_page(inode
->i_mapping
, last_index
);
631 if (!PageUptodate(pinned
[1])) {
632 ret
= btrfs_readpage(NULL
, pinned
[1]);
634 wait_on_page_locked(pinned
[1]);
636 unlock_page(pinned
[1]);
641 size_t offset
= pos
& (PAGE_CACHE_SIZE
- 1);
642 size_t write_bytes
= min(count
, nrptrs
*
643 (size_t)PAGE_CACHE_SIZE
-
645 size_t num_pages
= (write_bytes
+ PAGE_CACHE_SIZE
- 1) >>
648 WARN_ON(num_pages
> nrptrs
);
649 memset(pages
, 0, sizeof(pages
));
650 ret
= prepare_pages(root
, file
, pages
, num_pages
,
651 pos
, first_index
, last_index
,
656 ret
= btrfs_copy_from_user(pos
, num_pages
,
657 write_bytes
, pages
, buf
);
659 btrfs_drop_pages(pages
, num_pages
);
663 ret
= dirty_and_release_pages(NULL
, root
, file
, pages
,
664 num_pages
, pos
, write_bytes
);
665 btrfs_drop_pages(pages
, num_pages
);
670 count
-= write_bytes
;
672 num_written
+= write_bytes
;
674 balance_dirty_pages_ratelimited_nr(inode
->i_mapping
, num_pages
);
675 btrfs_btree_balance_dirty(root
);
678 mutex_unlock(&inode
->i_mutex
);
682 page_cache_release(pinned
[0]);
684 page_cache_release(pinned
[1]);
686 current
->backing_dev_info
= NULL
;
687 mark_inode_dirty(inode
);
688 return num_written
? num_written
: err
;
691 static int btrfs_sync_file(struct file
*file
,
692 struct dentry
*dentry
, int datasync
)
694 struct inode
*inode
= dentry
->d_inode
;
695 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
697 struct btrfs_trans_handle
*trans
;
700 * FIXME, use inode generation number to check if we can skip the
703 mutex_lock(&root
->fs_info
->fs_mutex
);
704 trans
= btrfs_start_transaction(root
, 1);
709 ret
= btrfs_commit_transaction(trans
, root
);
710 mutex_unlock(&root
->fs_info
->fs_mutex
);
712 return ret
> 0 ? EIO
: ret
;
715 static struct vm_operations_struct btrfs_file_vm_ops
= {
716 .nopage
= filemap_nopage
,
717 .populate
= filemap_populate
,
718 .page_mkwrite
= btrfs_page_mkwrite
,
721 static int btrfs_file_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
723 vma
->vm_ops
= &btrfs_file_vm_ops
;
728 struct file_operations btrfs_file_operations
= {
729 .llseek
= generic_file_llseek
,
730 .read
= do_sync_read
,
731 .aio_read
= generic_file_aio_read
,
732 .write
= btrfs_file_write
,
733 .mmap
= btrfs_file_mmap
,
734 .open
= generic_file_open
,
735 .ioctl
= btrfs_ioctl
,
736 .fsync
= btrfs_sync_file
,
738 .compat_ioctl
= btrfs_compat_ioctl
,