2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
5 #include <linux/time.h>
6 #include <linux/reiserfs_fs.h>
7 #include <linux/reiserfs_acl.h>
8 #include <linux/reiserfs_xattr.h>
9 #include <linux/smp_lock.h>
10 #include <asm/uaccess.h>
11 #include <linux/pagemap.h>
12 #include <linux/swap.h>
13 #include <linux/writeback.h>
14 #include <linux/blkdev.h>
15 #include <linux/buffer_head.h>
16 #include <linux/quotaops.h>
19 ** We pack the tails of files on file close, not at the time they are written.
20 ** This implies an unnecessary copy of the tail and an unnecessary indirect item
21 ** insertion/balancing, for files that are written in one write.
22 ** It avoids unnecessary tail packings (balances) for files that are written in
23 ** multiple writes and are small enough to have tails.
25 ** file_release is called by the VFS layer when the file is closed. If
26 ** this is the last open file descriptor, and the file
27 ** small enough to have a tail, and the tail is currently in an
28 ** unformatted node, the tail is converted back into a direct item.
30 ** We use reiserfs_truncate_file to pack the tail, since it already has
31 ** all the conditions coded.
33 static int reiserfs_file_release(struct inode
*inode
, struct file
*filp
)
36 struct reiserfs_transaction_handle th
;
38 int jbegin_failure
= 0;
40 if (!S_ISREG(inode
->i_mode
))
43 /* fast out for when nothing needs to be done */
44 if ((atomic_read(&inode
->i_count
) > 1 ||
45 !(REISERFS_I(inode
)->i_flags
& i_pack_on_close_mask
) ||
46 !tail_has_to_be_packed(inode
)) &&
47 REISERFS_I(inode
)->i_prealloc_count
<= 0) {
51 reiserfs_write_lock(inode
->i_sb
);
52 mutex_lock(&inode
->i_mutex
);
53 /* freeing preallocation only involves relogging blocks that
54 * are already in the current transaction. preallocation gets
55 * freed at the end of each transaction, so it is impossible for
56 * us to log any additional blocks (including quota blocks)
58 err
= journal_begin(&th
, inode
->i_sb
, 1);
60 /* uh oh, we can't allow the inode to go away while there
61 * is still preallocation blocks pending. Try to join the
65 err
= journal_join_abort(&th
, inode
->i_sb
, 1);
68 /* hmpf, our choices here aren't good. We can pin the inode
69 * which will disallow unmount from every happening, we can
70 * do nothing, which will corrupt random memory on unmount,
71 * or we can forcibly remove the file from the preallocation
72 * list, which will leak blocks on disk. Lets pin the inode
73 * and let the admin know what is going on.
76 reiserfs_warning(inode
->i_sb
,
77 "pinning inode %lu because the "
78 "preallocation can't be freed");
82 reiserfs_update_inode_transaction(inode
);
84 #ifdef REISERFS_PREALLOCATE
85 reiserfs_discard_prealloc(&th
, inode
);
87 err
= journal_end(&th
, inode
->i_sb
, 1);
89 /* copy back the error code from journal_begin */
93 if (!err
&& atomic_read(&inode
->i_count
) <= 1 &&
94 (REISERFS_I(inode
)->i_flags
& i_pack_on_close_mask
) &&
95 tail_has_to_be_packed(inode
)) {
96 /* if regular file is released by last holder and it has been
97 appended (we append by unformatted node only) or its direct
98 item(s) had to be converted, then it may have to be
99 indirect2direct converted */
100 err
= reiserfs_truncate_file(inode
, 0);
103 mutex_unlock(&inode
->i_mutex
);
104 reiserfs_write_unlock(inode
->i_sb
);
108 static void reiserfs_vfs_truncate_file(struct inode
*inode
)
110 reiserfs_truncate_file(inode
, 1);
113 /* Sync a reiserfs file. */
116 * FIXME: sync_mapping_buffers() never has anything to sync. Can
120 static int reiserfs_sync_file(struct file
*p_s_filp
,
121 struct dentry
*p_s_dentry
, int datasync
)
123 struct inode
*p_s_inode
= p_s_dentry
->d_inode
;
127 if (!S_ISREG(p_s_inode
->i_mode
))
129 n_err
= sync_mapping_buffers(p_s_inode
->i_mapping
);
130 reiserfs_write_lock(p_s_inode
->i_sb
);
131 barrier_done
= reiserfs_commit_for_inode(p_s_inode
);
132 reiserfs_write_unlock(p_s_inode
->i_sb
);
133 if (barrier_done
!= 1)
134 blkdev_issue_flush(p_s_inode
->i_sb
->s_bdev
, NULL
);
135 if (barrier_done
< 0)
137 return (n_err
< 0) ? -EIO
: 0;
140 /* I really do not want to play with memory shortage right now, so
141 to simplify the code, we are not going to write more than this much pages at
142 a time. This still should considerably improve performance compared to 4k
143 at a time case. This is 32 pages of 4k size. */
144 #define REISERFS_WRITE_PAGES_AT_A_TIME (128 * 1024) / PAGE_CACHE_SIZE
146 /* Allocates blocks for a file to fulfil write request.
147 Maps all unmapped but prepared pages from the list.
148 Updates metadata with newly allocated blocknumbers as needed */
149 static int reiserfs_allocate_blocks_for_region(struct reiserfs_transaction_handle
*th
, struct inode
*inode
, /* Inode we work with */
150 loff_t pos
, /* Writing position */
151 int num_pages
, /* number of pages write going
153 int write_bytes
, /* amount of bytes to write */
154 struct page
**prepared_pages
, /* array of
157 int blocks_to_allocate
/* Amount of blocks we
159 fit the data into file
163 struct cpu_key key
; // cpu key of item that we are going to deal with
164 struct item_head
*ih
; // pointer to item head that we are going to deal with
165 struct buffer_head
*bh
; // Buffer head that contains items that we are going to deal with
166 __le32
*item
; // pointer to item we are going to deal with
167 INITIALIZE_PATH(path
); // path to item, that we are going to deal with.
168 b_blocknr_t
*allocated_blocks
; // Pointer to a place where allocated blocknumbers would be stored.
169 reiserfs_blocknr_hint_t hint
; // hint structure for block allocator.
170 size_t res
; // return value of various functions that we call.
171 int curr_block
; // current block used to keep track of unmapped blocks.
172 int i
; // loop counter
173 int itempos
; // position in item
174 unsigned int from
= (pos
& (PAGE_CACHE_SIZE
- 1)); // writing position in
176 unsigned int to
= ((pos
+ write_bytes
- 1) & (PAGE_CACHE_SIZE
- 1)) + 1; /* last modified byte offset in last page */
177 __u64 hole_size
; // amount of blocks for a file hole, if it needed to be created.
178 int modifying_this_item
= 0; // Flag for items traversal code to keep track
179 // of the fact that we already prepared
180 // current block for journal
181 int will_prealloc
= 0;
182 RFALSE(!blocks_to_allocate
,
183 "green-9004: tried to allocate zero blocks?");
185 /* only preallocate if this is a small write */
186 if (REISERFS_I(inode
)->i_prealloc_count
||
187 (!(write_bytes
& (inode
->i_sb
->s_blocksize
- 1)) &&
189 REISERFS_SB(inode
->i_sb
)->s_alloc_options
.preallocsize
))
191 REISERFS_SB(inode
->i_sb
)->s_alloc_options
.preallocsize
;
193 allocated_blocks
= kmalloc((blocks_to_allocate
+ will_prealloc
) *
194 sizeof(b_blocknr_t
), GFP_NOFS
);
195 if (!allocated_blocks
)
198 /* First we compose a key to point at the writing position, we want to do
199 that outside of any locking region. */
200 make_cpu_key(&key
, inode
, pos
+ 1, TYPE_ANY
, 3 /*key length */ );
202 /* If we came here, it means we absolutely need to open a transaction,
203 since we need to allocate some blocks */
204 reiserfs_write_lock(inode
->i_sb
); // Journaling stuff and we need that.
205 res
= journal_begin(th
, inode
->i_sb
, JOURNAL_PER_BALANCE_CNT
* 3 + 1 + 2 * REISERFS_QUOTA_TRANS_BLOCKS(inode
->i_sb
)); // Wish I know if this number enough
208 reiserfs_update_inode_transaction(inode
);
210 /* Look for the in-tree position of our write, need path for block allocator */
211 res
= search_for_position_by_key(inode
->i_sb
, &key
, &path
);
212 if (res
== IO_ERROR
) {
217 /* Allocate blocks */
218 /* First fill in "hint" structure for block allocator */
219 hint
.th
= th
; // transaction handle.
220 hint
.path
= &path
; // Path, so that block allocator can determine packing locality or whatever it needs to determine.
221 hint
.inode
= inode
; // Inode is needed by block allocator too.
222 hint
.search_start
= 0; // We have no hint on where to search free blocks for block allocator.
223 hint
.key
= key
.on_disk_key
; // on disk key of file.
224 hint
.block
= inode
->i_blocks
>> (inode
->i_sb
->s_blocksize_bits
- 9); // Number of disk blocks this file occupies already.
225 hint
.formatted_node
= 0; // We are allocating blocks for unformatted node.
226 hint
.preallocate
= will_prealloc
;
228 /* Call block allocator to allocate blocks */
230 reiserfs_allocate_blocknrs(&hint
, allocated_blocks
,
231 blocks_to_allocate
, blocks_to_allocate
);
232 if (res
!= CARRY_ON
) {
233 if (res
== NO_DISK_SPACE
) {
234 /* We flush the transaction in case of no space. This way some
235 blocks might become free */
236 SB_JOURNAL(inode
->i_sb
)->j_must_wait
= 1;
237 res
= restart_transaction(th
, inode
, &path
);
241 /* We might have scheduled, so search again */
243 search_for_position_by_key(inode
->i_sb
, &key
,
245 if (res
== IO_ERROR
) {
250 /* update changed info for hint structure. */
252 reiserfs_allocate_blocknrs(&hint
, allocated_blocks
,
255 if (res
!= CARRY_ON
) {
256 res
= res
== QUOTA_EXCEEDED
? -EDQUOT
: -ENOSPC
;
261 res
= res
== QUOTA_EXCEEDED
? -EDQUOT
: -ENOSPC
;
267 // Too bad, I have not found any way to convert a given region from
268 // cpu format to little endian format
271 for (i
= 0; i
< blocks_to_allocate
; i
++)
272 allocated_blocks
[i
] = cpu_to_le32(allocated_blocks
[i
]);
276 /* Blocks allocating well might have scheduled and tree might have changed,
277 let's search the tree again */
278 /* find where in the tree our write should go */
279 res
= search_for_position_by_key(inode
->i_sb
, &key
, &path
);
280 if (res
== IO_ERROR
) {
282 goto error_exit_free_blocks
;
285 bh
= get_last_bh(&path
); // Get a bufferhead for last element in path.
286 ih
= get_ih(&path
); // Get a pointer to last item head in path.
287 item
= get_item(&path
); // Get a pointer to last item in path
289 /* Let's see what we have found */
290 if (res
!= POSITION_FOUND
) { /* position not found, this means that we
291 might need to append file with holes
293 // Since we are writing past the file's end, we need to find out if
294 // there is a hole that needs to be inserted before our writing
295 // position, and how many blocks it is going to cover (we need to
296 // populate pointers to file blocks representing the hole with zeros)
301 * if ih is stat data, its offset is 0 and we don't want to
302 * add 1 to pos in the hole_size calculation
304 if (is_statdata_le_ih(ih
))
306 hole_size
= (pos
+ item_offset
-
308 (get_inode_item_key_version(inode
),
309 &(ih
->ih_key
)) + op_bytes_number(ih
,
313 >> inode
->i_sb
->s_blocksize_bits
;
317 int to_paste
= min_t(__u64
, hole_size
, MAX_ITEM_LEN(inode
->i_sb
->s_blocksize
) / UNFM_P_SIZE
); // How much data to insert first time.
318 /* area filled with zeroes, to supply as list of zero blocknumbers
319 We allocate it outside of loop just in case loop would spin for
320 several iterations. */
321 char *zeros
= kmalloc(to_paste
* UNFM_P_SIZE
, GFP_ATOMIC
); // We cannot insert more than MAX_ITEM_LEN bytes anyway.
324 goto error_exit_free_blocks
;
326 memset(zeros
, 0, to_paste
* UNFM_P_SIZE
);
329 min_t(__u64
, hole_size
,
330 MAX_ITEM_LEN(inode
->i_sb
->
333 if (is_indirect_le_ih(ih
)) {
334 /* Ok, there is existing indirect item already. Need to append it */
335 /* Calculate position past inserted item */
336 make_cpu_key(&key
, inode
,
338 (get_inode_item_key_version
347 reiserfs_paste_into_item(th
, &path
,
357 goto error_exit_free_blocks
;
359 } else if (is_statdata_le_ih(ih
)) {
360 /* No existing item, create it */
361 /* item head for new item */
362 struct item_head ins_ih
;
364 /* create a key for our new item */
365 make_cpu_key(&key
, inode
, 1,
368 /* Create new item head for our new item */
369 make_le_item_head(&ins_ih
, &key
,
374 0 /* free space */ );
376 /* Find where such item should live in the tree */
378 search_item(inode
->i_sb
, &key
,
380 if (res
!= ITEM_NOT_FOUND
) {
381 /* item should not exist, otherwise we have error */
382 if (res
!= -ENOSPC
) {
383 reiserfs_warning(inode
->
385 "green-9008: search_by_key (%K) returned %d",
391 goto error_exit_free_blocks
;
394 reiserfs_insert_item(th
, &path
,
399 reiserfs_panic(inode
->i_sb
,
400 "green-9011: Unexpected key type %K\n",
405 goto error_exit_free_blocks
;
407 /* Now we want to check if transaction is too full, and if it is
408 we restart it. This will also free the path. */
409 if (journal_transaction_should_end
410 (th
, th
->t_blocks_allocated
)) {
412 restart_transaction(th
, inode
,
421 /* Well, need to recalculate path and stuff */
422 set_cpu_key_k_offset(&key
,
423 cpu_key_k_offset(&key
) +
427 search_for_position_by_key(inode
->i_sb
,
429 if (res
== IO_ERROR
) {
432 goto error_exit_free_blocks
;
434 bh
= get_last_bh(&path
);
436 item
= get_item(&path
);
437 hole_size
-= to_paste
;
442 // Go through existing indirect items first
443 // replace all zeroes with blocknumbers from list
444 // Note that if no corresponding item was found, by previous search,
445 // it means there are no existing in-tree representation for file area
446 // we are going to overwrite, so there is nothing to scan through for holes.
447 for (curr_block
= 0, itempos
= path
.pos_in_item
;
448 curr_block
< blocks_to_allocate
&& res
== POSITION_FOUND
;) {
451 if (itempos
>= ih_item_len(ih
) / UNFM_P_SIZE
) {
452 /* We run out of data in this indirect item, let's look for another
454 /* First if we are already modifying current item, log it */
455 if (modifying_this_item
) {
456 journal_mark_dirty(th
, inode
->i_sb
, bh
);
457 modifying_this_item
= 0;
459 /* Then set the key to look for a new indirect item (offset of old
460 item is added to old item length */
461 set_cpu_key_k_offset(&key
,
463 (get_inode_item_key_version(inode
),
468 /* Search ofor position of new key in the tree. */
470 search_for_position_by_key(inode
->i_sb
, &key
,
472 if (res
== IO_ERROR
) {
474 goto error_exit_free_blocks
;
476 bh
= get_last_bh(&path
);
478 item
= get_item(&path
);
479 itempos
= path
.pos_in_item
;
480 continue; // loop to check all kinds of conditions and so on.
482 /* Ok, we have correct position in item now, so let's see if it is
483 representing file hole (blocknumber is zero) and fill it if needed */
484 if (!item
[itempos
]) {
485 /* Ok, a hole. Now we need to check if we already prepared this
486 block to be journaled */
487 while (!modifying_this_item
) { // loop until succeed
488 /* Well, this item is not journaled yet, so we must prepare
489 it for journal first, before we can change it */
490 struct item_head tmp_ih
; // We copy item head of found item,
491 // here to detect if fs changed under
492 // us while we were preparing for
494 int fs_gen
; // We store fs generation here to find if someone
495 // changes fs under our feet
497 copy_item_head(&tmp_ih
, ih
); // Remember itemhead
498 fs_gen
= get_generation(inode
->i_sb
); // remember fs generation
499 reiserfs_prepare_for_journal(inode
->i_sb
, bh
, 1); // Prepare a buffer within which indirect item is stored for changing.
500 if (fs_changed(fs_gen
, inode
->i_sb
)
501 && item_moved(&tmp_ih
, &path
)) {
502 // Sigh, fs was changed under us, we need to look for new
503 // location of item we are working with
505 /* unmark prepaerd area as journaled and search for it's
507 reiserfs_restore_prepared_buffer(inode
->
511 search_for_position_by_key(inode
->
515 if (res
== IO_ERROR
) {
517 goto error_exit_free_blocks
;
519 bh
= get_last_bh(&path
);
521 item
= get_item(&path
);
522 itempos
= path
.pos_in_item
;
525 modifying_this_item
= 1;
527 item
[itempos
] = allocated_blocks
[curr_block
]; // Assign new block
533 if (modifying_this_item
) { // We need to log last-accessed block, if it
534 // was modified, but not logged yet.
535 journal_mark_dirty(th
, inode
->i_sb
, bh
);
538 if (curr_block
< blocks_to_allocate
) {
539 // Oh, well need to append to indirect item, or to create indirect item
540 // if there weren't any
541 if (is_indirect_le_ih(ih
)) {
542 // Existing indirect item - append. First calculate key for append
543 // position. We do not need to recalculate path as it should
544 // already point to correct place.
545 make_cpu_key(&key
, inode
,
546 le_key_k_offset(get_inode_item_key_version
550 inode
->i_sb
->s_blocksize
),
553 reiserfs_paste_into_item(th
, &path
, &key
, inode
,
554 (char *)(allocated_blocks
+
557 (blocks_to_allocate
-
560 goto error_exit_free_blocks
;
562 } else if (is_statdata_le_ih(ih
)) {
563 // Last found item was statdata. That means we need to create indirect item.
564 struct item_head ins_ih
; /* itemhead for new item */
566 /* create a key for our new item */
567 make_cpu_key(&key
, inode
, 1, TYPE_INDIRECT
, 3); // Position one,
572 /* Create new item head for our new item */
573 make_le_item_head(&ins_ih
, &key
, key
.version
, 1,
575 (blocks_to_allocate
-
576 curr_block
) * UNFM_P_SIZE
,
577 0 /* free space */ );
578 /* Find where such item should live in the tree */
579 res
= search_item(inode
->i_sb
, &key
, &path
);
580 if (res
!= ITEM_NOT_FOUND
) {
581 /* Well, if we have found such item already, or some error
582 occured, we need to warn user and return error */
583 if (res
!= -ENOSPC
) {
584 reiserfs_warning(inode
->i_sb
,
585 "green-9009: search_by_key (%K) "
590 goto error_exit_free_blocks
;
592 /* Insert item into the tree with the data as its body */
594 reiserfs_insert_item(th
, &path
, &key
, &ins_ih
,
596 (char *)(allocated_blocks
+
599 reiserfs_panic(inode
->i_sb
,
600 "green-9010: unexpected item type for key %K\n",
604 // the caller is responsible for closing the transaction
605 // unless we return an error, they are also responsible for logging
610 * cleanup prellocation from previous writes
611 * if this is a partial block write
613 if (write_bytes
& (inode
->i_sb
->s_blocksize
- 1))
614 reiserfs_discard_prealloc(th
, inode
);
615 reiserfs_write_unlock(inode
->i_sb
);
617 // go through all the pages/buffers and map the buffers to newly allocated
618 // blocks (so that system knows where to write these pages later).
620 for (i
= 0; i
< num_pages
; i
++) {
621 struct page
*page
= prepared_pages
[i
]; //current page
622 struct buffer_head
*head
= page_buffers(page
); // first buffer for a page
623 int block_start
, block_end
; // in-page offsets for buffers.
625 if (!page_buffers(page
))
626 reiserfs_panic(inode
->i_sb
,
627 "green-9005: No buffers for prepared page???");
629 /* For each buffer in page */
630 for (bh
= head
, block_start
= 0; bh
!= head
|| !block_start
;
631 block_start
= block_end
, bh
= bh
->b_this_page
) {
633 reiserfs_panic(inode
->i_sb
,
634 "green-9006: Allocated but absent buffer for a page?");
635 block_end
= block_start
+ inode
->i_sb
->s_blocksize
;
636 if (i
== 0 && block_end
<= from
)
637 /* if this buffer is before requested data to map, skip it */
639 if (i
== num_pages
- 1 && block_start
>= to
)
640 /* If this buffer is after requested data to map, abort
641 processing of current page */
644 if (!buffer_mapped(bh
)) { // Ok, unmapped buffer, need to map it
645 map_bh(bh
, inode
->i_sb
,
646 le32_to_cpu(allocated_blocks
654 RFALSE(curr_block
> blocks_to_allocate
,
655 "green-9007: Used too many blocks? weird");
657 kfree(allocated_blocks
);
660 // Need to deal with transaction here.
661 error_exit_free_blocks
:
664 for (i
= 0; i
< blocks_to_allocate
; i
++)
665 reiserfs_free_block(th
, inode
, le32_to_cpu(allocated_blocks
[i
]),
669 if (th
->t_trans_id
) {
671 // update any changes we made to blk count
672 mark_inode_dirty(inode
);
674 journal_end(th
, inode
->i_sb
,
675 JOURNAL_PER_BALANCE_CNT
* 3 + 1 +
676 2 * REISERFS_QUOTA_TRANS_BLOCKS(inode
->i_sb
));
680 reiserfs_write_unlock(inode
->i_sb
);
681 kfree(allocated_blocks
);
686 /* Unlock pages prepared by reiserfs_prepare_file_region_for_write */
687 static void reiserfs_unprepare_pages(struct page
**prepared_pages
, /* list of locked pages */
688 size_t num_pages
/* amount of pages */ )
690 int i
; // loop counter
692 for (i
= 0; i
< num_pages
; i
++) {
693 struct page
*page
= prepared_pages
[i
];
695 try_to_free_buffers(page
);
697 page_cache_release(page
);
701 /* This function will copy data from userspace to specified pages within
702 supplied byte range */
703 static int reiserfs_copy_from_user_to_file_region(loff_t pos
, /* In-file position */
704 int num_pages
, /* Number of pages affected */
705 int write_bytes
, /* Amount of bytes to write */
706 struct page
**prepared_pages
, /* pointer to
710 const char __user
* buf
/* Pointer to user-supplied
714 long page_fault
= 0; // status of copy_from_user.
715 int i
; // loop counter.
716 int offset
; // offset in page
718 for (i
= 0, offset
= (pos
& (PAGE_CACHE_SIZE
- 1)); i
< num_pages
;
720 size_t count
= min_t(size_t, PAGE_CACHE_SIZE
- offset
, write_bytes
); // How much of bytes to write to this page
721 struct page
*page
= prepared_pages
[i
]; // Current page we process.
723 fault_in_pages_readable(buf
, count
);
725 /* Copy data from userspace to the current page */
727 page_fault
= __copy_from_user(page_address(page
) + offset
, buf
, count
); // Copy the data.
728 /* Flush processor's dcache for this page */
729 flush_dcache_page(page
);
732 write_bytes
-= count
;
735 break; // Was there a fault? abort.
738 return page_fault
? -EFAULT
: 0;
741 /* taken fs/buffer.c:__block_commit_write */
742 int reiserfs_commit_page(struct inode
*inode
, struct page
*page
,
743 unsigned from
, unsigned to
)
745 unsigned block_start
, block_end
;
748 struct buffer_head
*bh
, *head
;
749 unsigned long i_size_index
= inode
->i_size
>> PAGE_CACHE_SHIFT
;
751 int logit
= reiserfs_file_data_log(inode
);
752 struct super_block
*s
= inode
->i_sb
;
753 int bh_per_page
= PAGE_CACHE_SIZE
/ s
->s_blocksize
;
754 struct reiserfs_transaction_handle th
;
758 blocksize
= 1 << inode
->i_blkbits
;
761 reiserfs_write_lock(s
);
762 ret
= journal_begin(&th
, s
, bh_per_page
+ 1);
764 goto drop_write_lock
;
765 reiserfs_update_inode_transaction(inode
);
767 for (bh
= head
= page_buffers(page
), block_start
= 0;
768 bh
!= head
|| !block_start
;
769 block_start
= block_end
, bh
= bh
->b_this_page
) {
771 new = buffer_new(bh
);
772 clear_buffer_new(bh
);
773 block_end
= block_start
+ blocksize
;
774 if (block_end
<= from
|| block_start
>= to
) {
775 if (!buffer_uptodate(bh
))
778 set_buffer_uptodate(bh
);
780 reiserfs_prepare_for_journal(s
, bh
, 1);
781 journal_mark_dirty(&th
, s
, bh
);
782 } else if (!buffer_dirty(bh
)) {
783 mark_buffer_dirty(bh
);
784 /* do data=ordered on any page past the end
785 * of file and any buffer marked BH_New.
787 if (reiserfs_data_ordered(inode
->i_sb
) &&
788 (new || page
->index
>= i_size_index
)) {
789 reiserfs_add_ordered_list(inode
, bh
);
795 ret
= journal_end(&th
, s
, bh_per_page
+ 1);
797 reiserfs_write_unlock(s
);
800 * If this is a partial write which happened to make all buffers
801 * uptodate then we can optimize away a bogus readpage() for
802 * the next read(). Here we 'discover' whether the page went
803 * uptodate as a result of this (potentially partial) write.
806 SetPageUptodate(page
);
810 /* Submit pages for write. This was separated from actual file copying
811 because we might want to allocate block numbers in-between.
812 This function assumes that caller will adjust file size to correct value. */
813 static int reiserfs_submit_file_region_for_write(struct reiserfs_transaction_handle
*th
, struct inode
*inode
, loff_t pos
, /* Writing position offset */
814 size_t num_pages
, /* Number of pages to write */
815 size_t write_bytes
, /* number of bytes to write */
816 struct page
**prepared_pages
/* list of pages */
819 int status
; // return status of block_commit_write.
820 int retval
= 0; // Return value we are going to return.
821 int i
; // loop counter
822 int offset
; // Writing offset in page.
823 int orig_write_bytes
= write_bytes
;
826 for (i
= 0, offset
= (pos
& (PAGE_CACHE_SIZE
- 1)); i
< num_pages
;
828 int count
= min_t(int, PAGE_CACHE_SIZE
- offset
, write_bytes
); // How much of bytes to write to this page
829 struct page
*page
= prepared_pages
[i
]; // Current page we process.
832 reiserfs_commit_page(inode
, page
, offset
, offset
+ count
);
834 retval
= status
; // To not overcomplicate matters We are going to
835 // submit all the pages even if there was error.
836 // we only remember error status to report it on
838 write_bytes
-= count
;
840 /* now that we've gotten all the ordered buffers marked dirty,
841 * we can safely update i_size and close any running transaction
843 if (pos
+ orig_write_bytes
> inode
->i_size
) {
844 inode
->i_size
= pos
+ orig_write_bytes
; // Set new size
845 /* If the file have grown so much that tail packing is no
846 * longer possible, reset "need to pack" flag */
847 if ((have_large_tails(inode
->i_sb
) &&
848 inode
->i_size
> i_block_size(inode
) * 4) ||
849 (have_small_tails(inode
->i_sb
) &&
850 inode
->i_size
> i_block_size(inode
)))
851 REISERFS_I(inode
)->i_flags
&= ~i_pack_on_close_mask
;
852 else if ((have_large_tails(inode
->i_sb
) &&
853 inode
->i_size
< i_block_size(inode
) * 4) ||
854 (have_small_tails(inode
->i_sb
) &&
855 inode
->i_size
< i_block_size(inode
)))
856 REISERFS_I(inode
)->i_flags
|= i_pack_on_close_mask
;
858 if (th
->t_trans_id
) {
859 reiserfs_write_lock(inode
->i_sb
);
860 // this sets the proper flags for O_SYNC to trigger a commit
861 mark_inode_dirty(inode
);
862 reiserfs_write_unlock(inode
->i_sb
);
864 reiserfs_write_lock(inode
->i_sb
);
865 reiserfs_update_inode_transaction(inode
);
866 mark_inode_dirty(inode
);
867 reiserfs_write_unlock(inode
->i_sb
);
872 if (th
->t_trans_id
) {
873 reiserfs_write_lock(inode
->i_sb
);
875 mark_inode_dirty(inode
);
876 status
= journal_end(th
, th
->t_super
, th
->t_blocks_allocated
);
879 reiserfs_write_unlock(inode
->i_sb
);
884 * we have to unlock the pages after updating i_size, otherwise
885 * we race with writepage
887 for (i
= 0; i
< num_pages
; i
++) {
888 struct page
*page
= prepared_pages
[i
];
890 mark_page_accessed(page
);
891 page_cache_release(page
);
896 /* Look if passed writing region is going to touch file's tail
897 (if it is present). And if it is, convert the tail to unformatted node */
898 static int reiserfs_check_for_tail_and_convert(struct inode
*inode
, /* inode to deal with */
899 loff_t pos
, /* Writing position */
900 int write_bytes
/* amount of bytes to write */
903 INITIALIZE_PATH(path
); // needed for search_for_position
904 struct cpu_key key
; // Key that would represent last touched writing byte.
905 struct item_head
*ih
; // item header of found block;
906 int res
; // Return value of various functions we call.
907 int cont_expand_offset
; // We will put offset for generic_cont_expand here
908 // This can be int just because tails are created
909 // only for small files.
911 /* this embodies a dependency on a particular tail policy */
912 if (inode
->i_size
>= inode
->i_sb
->s_blocksize
* 4) {
913 /* such a big files do not have tails, so we won't bother ourselves
914 to look for tails, simply return */
918 reiserfs_write_lock(inode
->i_sb
);
919 /* find the item containing the last byte to be written, or if
920 * writing past the end of the file then the last item of the
921 * file (and then we check its type). */
922 make_cpu_key(&key
, inode
, pos
+ write_bytes
+ 1, TYPE_ANY
,
924 res
= search_for_position_by_key(inode
->i_sb
, &key
, &path
);
925 if (res
== IO_ERROR
) {
926 reiserfs_write_unlock(inode
->i_sb
);
931 if (is_direct_le_ih(ih
)) {
932 /* Ok, closest item is file tail (tails are stored in "direct"
933 * items), so we need to unpack it. */
934 /* To not overcomplicate matters, we just call generic_cont_expand
935 which will in turn call other stuff and finally will boil down to
936 reiserfs_get_block() that would do necessary conversion. */
938 le_key_k_offset(get_inode_item_key_version(inode
),
941 res
= generic_cont_expand(inode
, cont_expand_offset
);
945 reiserfs_write_unlock(inode
->i_sb
);
949 /* This function locks pages starting from @pos for @inode.
950 @num_pages pages are locked and stored in
951 @prepared_pages array. Also buffers are allocated for these pages.
952 First and last page of the region is read if it is overwritten only
953 partially. If last page did not exist before write (file hole or file
954 append), it is zeroed, then.
955 Returns number of unallocated blocks that should be allocated to cover
957 static int reiserfs_prepare_file_region_for_write(struct inode
*inode
958 /* Inode of the file */ ,
959 loff_t pos
, /* position in the file */
960 size_t num_pages
, /* number of pages to
962 size_t write_bytes
, /* Amount of bytes to be
965 struct page
**prepared_pages
/* pointer to array
970 int res
= 0; // Return values of different functions we call.
971 unsigned long index
= pos
>> PAGE_CACHE_SHIFT
; // Offset in file in pages.
972 int from
= (pos
& (PAGE_CACHE_SIZE
- 1)); // Writing offset in first page
973 int to
= ((pos
+ write_bytes
- 1) & (PAGE_CACHE_SIZE
- 1)) + 1;
974 /* offset of last modified byte in last
976 struct address_space
*mapping
= inode
->i_mapping
; // Pages are mapped here.
977 int i
; // Simple counter
978 int blocks
= 0; /* Return value (blocks that should be allocated) */
979 struct buffer_head
*bh
, *head
; // Current bufferhead and first bufferhead
981 unsigned block_start
, block_end
; // Starting and ending offsets of current
982 // buffer in the page.
983 struct buffer_head
*wait
[2], **wait_bh
= wait
; // Buffers for page, if
984 // Page appeared to be not up
985 // to date. Note how we have
986 // at most 2 buffers, this is
987 // because we at most may
988 // partially overwrite two
989 // buffers for one page. One at // the beginning of write area
990 // and one at the end.
991 // Everything inthe middle gets // overwritten totally.
993 struct cpu_key key
; // cpu key of item that we are going to deal with
994 struct item_head
*ih
= NULL
; // pointer to item head that we are going to deal with
995 struct buffer_head
*itembuf
= NULL
; // Buffer head that contains items that we are going to deal with
996 INITIALIZE_PATH(path
); // path to item, that we are going to deal with.
997 __le32
*item
= NULL
; // pointer to item we are going to deal with
998 int item_pos
= -1; /* Position in indirect item */
1000 if (num_pages
< 1) {
1001 reiserfs_warning(inode
->i_sb
,
1002 "green-9001: reiserfs_prepare_file_region_for_write "
1003 "called with zero number of pages to process");
1007 /* We have 2 loops for pages. In first loop we grab and lock the pages, so
1008 that nobody would touch these until we release the pages. Then
1009 we'd start to deal with mapping buffers to blocks. */
1010 for (i
= 0; i
< num_pages
; i
++) {
1011 prepared_pages
[i
] = grab_cache_page(mapping
, index
+ i
); // locks the page
1012 if (!prepared_pages
[i
]) {
1014 goto failed_page_grabbing
;
1016 if (!page_has_buffers(prepared_pages
[i
]))
1017 create_empty_buffers(prepared_pages
[i
],
1018 inode
->i_sb
->s_blocksize
, 0);
1021 /* Let's count amount of blocks for a case where all the blocks
1022 overwritten are new (we will substract already allocated blocks later) */
1024 /* These are full-overwritten pages so we count all the blocks in
1025 these pages are counted as needed to be allocated */
1027 (num_pages
- 2) << (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
1029 /* count blocks needed for first page (possibly partially written) */
1030 blocks
+= ((PAGE_CACHE_SIZE
- from
) >> inode
->i_blkbits
) + !!(from
& (inode
->i_sb
->s_blocksize
- 1)); /* roundup */
1032 /* Now we account for last page. If last page == first page (we
1033 overwrite only one page), we substract all the blocks past the
1034 last writing position in a page out of already calculated number
1036 blocks
+= ((num_pages
> 1) << (PAGE_CACHE_SHIFT
- inode
->i_blkbits
)) -
1037 ((PAGE_CACHE_SIZE
- to
) >> inode
->i_blkbits
);
1038 /* Note how we do not roundup here since partial blocks still
1039 should be allocated */
1041 /* Now if all the write area lies past the file end, no point in
1042 maping blocks, since there is none, so we just zero out remaining
1043 parts of first and last pages in write area (if needed) */
1044 if ((pos
& ~((loff_t
) PAGE_CACHE_SIZE
- 1)) > inode
->i_size
) {
1045 if (from
!= 0) { /* First page needs to be partially zeroed */
1046 char *kaddr
= kmap_atomic(prepared_pages
[0], KM_USER0
);
1047 memset(kaddr
, 0, from
);
1048 kunmap_atomic(kaddr
, KM_USER0
);
1050 if (to
!= PAGE_CACHE_SIZE
) { /* Last page needs to be partially zeroed */
1052 kmap_atomic(prepared_pages
[num_pages
- 1],
1054 memset(kaddr
+ to
, 0, PAGE_CACHE_SIZE
- to
);
1055 kunmap_atomic(kaddr
, KM_USER0
);
1058 /* Since all blocks are new - use already calculated value */
1062 /* Well, since we write somewhere into the middle of a file, there is
1063 possibility we are writing over some already allocated blocks, so
1064 let's map these blocks and substract number of such blocks out of blocks
1065 we need to allocate (calculated above) */
1066 /* Mask write position to start on blocksize, we do it out of the
1067 loop for performance reasons */
1068 pos
&= ~((loff_t
) inode
->i_sb
->s_blocksize
- 1);
1069 /* Set cpu key to the starting position in a file (on left block boundary) */
1070 make_cpu_key(&key
, inode
,
1071 1 + ((pos
) & ~((loff_t
) inode
->i_sb
->s_blocksize
- 1)),
1072 TYPE_ANY
, 3 /*key length */ );
1074 reiserfs_write_lock(inode
->i_sb
); // We need that for at least search_by_key()
1075 for (i
= 0; i
< num_pages
; i
++) {
1077 head
= page_buffers(prepared_pages
[i
]);
1078 /* For each buffer in the page */
1079 for (bh
= head
, block_start
= 0; bh
!= head
|| !block_start
;
1080 block_start
= block_end
, bh
= bh
->b_this_page
) {
1082 reiserfs_panic(inode
->i_sb
,
1083 "green-9002: Allocated but absent buffer for a page?");
1084 /* Find where this buffer ends */
1085 block_end
= block_start
+ inode
->i_sb
->s_blocksize
;
1086 if (i
== 0 && block_end
<= from
)
1087 /* if this buffer is before requested data to map, skip it */
1090 if (i
== num_pages
- 1 && block_start
>= to
) {
1091 /* If this buffer is after requested data to map, abort
1092 processing of current page */
1096 if (buffer_mapped(bh
) && bh
->b_blocknr
!= 0) {
1097 /* This is optimisation for a case where buffer is mapped
1098 and have blocknumber assigned. In case significant amount
1099 of such buffers are present, we may avoid some amount
1100 of search_by_key calls.
1101 Probably it would be possible to move parts of this code
1102 out of BKL, but I afraid that would overcomplicate code
1103 without any noticeable benefit.
1106 /* Update the key */
1107 set_cpu_key_k_offset(&key
,
1108 cpu_key_k_offset(&key
) +
1109 inode
->i_sb
->s_blocksize
);
1110 blocks
--; // Decrease the amount of blocks that need to be
1112 continue; // Go to the next buffer
1115 if (!itembuf
|| /* if first iteration */
1116 item_pos
>= ih_item_len(ih
) / UNFM_P_SIZE
) { /* or if we progressed past the
1117 current unformatted_item */
1118 /* Try to find next item */
1120 search_for_position_by_key(inode
->i_sb
,
1122 /* Abort if no more items */
1123 if (res
!= POSITION_FOUND
) {
1124 /* make sure later loops don't use this item */
1130 /* Update information about current indirect item */
1131 itembuf
= get_last_bh(&path
);
1133 item
= get_item(&path
);
1134 item_pos
= path
.pos_in_item
;
1136 RFALSE(!is_indirect_le_ih(ih
),
1137 "green-9003: indirect item expected");
1140 /* See if there is some block associated with the file
1141 at that position, map the buffer to this block */
1142 if (get_block_num(item
, item_pos
)) {
1143 map_bh(bh
, inode
->i_sb
,
1144 get_block_num(item
, item_pos
));
1145 blocks
--; // Decrease the amount of blocks that need to be
1149 /* Update the key */
1150 set_cpu_key_k_offset(&key
,
1151 cpu_key_k_offset(&key
) +
1152 inode
->i_sb
->s_blocksize
);
1155 pathrelse(&path
); // Free the path
1156 reiserfs_write_unlock(inode
->i_sb
);
1158 /* Now zero out unmappend buffers for the first and last pages of
1159 write area or issue read requests if page is mapped. */
1160 /* First page, see if it is not uptodate */
1161 if (!PageUptodate(prepared_pages
[0])) {
1162 head
= page_buffers(prepared_pages
[0]);
1164 /* For each buffer in page */
1165 for (bh
= head
, block_start
= 0; bh
!= head
|| !block_start
;
1166 block_start
= block_end
, bh
= bh
->b_this_page
) {
1169 reiserfs_panic(inode
->i_sb
,
1170 "green-9002: Allocated but absent buffer for a page?");
1171 /* Find where this buffer ends */
1172 block_end
= block_start
+ inode
->i_sb
->s_blocksize
;
1173 if (block_end
<= from
)
1174 /* if this buffer is before requested data to map, skip it */
1176 if (block_start
< from
) { /* Aha, our partial buffer */
1177 if (buffer_mapped(bh
)) { /* If it is mapped, we need to
1178 issue READ request for it to
1180 ll_rw_block(READ
, 1, &bh
);
1182 } else { /* Not mapped, zero it */
1184 kmap_atomic(prepared_pages
[0],
1186 memset(kaddr
+ block_start
, 0,
1187 from
- block_start
);
1188 kunmap_atomic(kaddr
, KM_USER0
);
1189 set_buffer_uptodate(bh
);
1195 /* Last page, see if it is not uptodate, or if the last page is past the end of the file. */
1196 if (!PageUptodate(prepared_pages
[num_pages
- 1]) ||
1197 ((pos
+ write_bytes
) >> PAGE_CACHE_SHIFT
) >
1198 (inode
->i_size
>> PAGE_CACHE_SHIFT
)) {
1199 head
= page_buffers(prepared_pages
[num_pages
- 1]);
1201 /* for each buffer in page */
1202 for (bh
= head
, block_start
= 0; bh
!= head
|| !block_start
;
1203 block_start
= block_end
, bh
= bh
->b_this_page
) {
1206 reiserfs_panic(inode
->i_sb
,
1207 "green-9002: Allocated but absent buffer for a page?");
1208 /* Find where this buffer ends */
1209 block_end
= block_start
+ inode
->i_sb
->s_blocksize
;
1210 if (block_start
>= to
)
1211 /* if this buffer is after requested data to map, skip it */
1213 if (block_end
> to
) { /* Aha, our partial buffer */
1214 if (buffer_mapped(bh
)) { /* If it is mapped, we need to
1215 issue READ request for it to
1217 ll_rw_block(READ
, 1, &bh
);
1219 } else { /* Not mapped, zero it */
1221 kmap_atomic(prepared_pages
1224 memset(kaddr
+ to
, 0, block_end
- to
);
1225 kunmap_atomic(kaddr
, KM_USER0
);
1226 set_buffer_uptodate(bh
);
1232 /* Wait for read requests we made to happen, if necessary */
1233 while (wait_bh
> wait
) {
1234 wait_on_buffer(*--wait_bh
);
1235 if (!buffer_uptodate(*wait_bh
)) {
1242 failed_page_grabbing
:
1245 reiserfs_unprepare_pages(prepared_pages
, num_pages
);
1249 /* Write @count bytes at position @ppos in a file indicated by @file
1250 from the buffer @buf.
1252 generic_file_write() is only appropriate for filesystems that are not seeking to optimize performance and want
1253 something simple that works. It is not for serious use by general purpose filesystems, excepting the one that it was
1254 written for (ext2/3). This is for several reasons:
1256 * It has no understanding of any filesystem specific optimizations.
1258 * It enters the filesystem repeatedly for each page that is written.
1260 * It depends on reiserfs_get_block() function which if implemented by reiserfs performs costly search_by_key
1261 * operation for each page it is supplied with. By contrast reiserfs_file_write() feeds as much as possible at a time
1262 * to reiserfs which allows for fewer tree traversals.
1264 * Each indirect pointer insertion takes a lot of cpu, because it involves memory moves inside of blocks.
1266 * Asking the block allocation code for blocks one at a time is slightly less efficient.
1268 All of these reasons for not using only generic file write were understood back when reiserfs was first miscoded to
1269 use it, but we were in a hurry to make code freeze, and so it couldn't be revised then. This new code should make
1270 things right finally.
1272 Future Features: providing search_by_key with hints.
1275 static ssize_t
reiserfs_file_write(struct file
*file
, /* the file we are going to write into */
1276 const char __user
* buf
, /* pointer to user supplied data
1278 size_t count
, /* amount of bytes to write */
1279 loff_t
* ppos
/* pointer to position in file that we start writing at. Should be updated to
1280 * new current position before returning. */
1283 size_t already_written
= 0; // Number of bytes already written to the file.
1284 loff_t pos
; // Current position in the file.
1285 ssize_t res
; // return value of various functions that we call.
1287 struct inode
*inode
= file
->f_dentry
->d_inode
; // Inode of the file that we are writing to.
1288 /* To simplify coding at this time, we store
1289 locked pages in array for now */
1290 struct page
*prepared_pages
[REISERFS_WRITE_PAGES_AT_A_TIME
];
1291 struct reiserfs_transaction_handle th
;
1294 /* If a filesystem is converted from 3.5 to 3.6, we'll have v3.5 items
1295 * lying around (most of the disk, in fact). Despite the filesystem
1296 * now being a v3.6 format, the old items still can't support large
1297 * file sizes. Catch this case here, as the rest of the VFS layer is
1298 * oblivious to the different limitations between old and new items.
1299 * reiserfs_setattr catches this for truncates. This chunk is lifted
1300 * from generic_write_checks. */
1301 if (get_inode_item_key_version (inode
) == KEY_FORMAT_3_5
&&
1302 *ppos
+ count
> MAX_NON_LFS
) {
1303 if (*ppos
>= MAX_NON_LFS
) {
1304 send_sig(SIGXFSZ
, current
, 0);
1307 if (count
> MAX_NON_LFS
- (unsigned long)*ppos
)
1308 count
= MAX_NON_LFS
- (unsigned long)*ppos
;
1311 if (file
->f_flags
& O_DIRECT
) { // Direct IO needs treatment
1312 ssize_t result
, after_file_end
= 0;
1313 if ((*ppos
+ count
>= inode
->i_size
)
1314 || (file
->f_flags
& O_APPEND
)) {
1315 /* If we are appending a file, we need to put this savelink in here.
1316 If we will crash while doing direct io, finish_unfinished will
1317 cut the garbage from the file end. */
1318 reiserfs_write_lock(inode
->i_sb
);
1320 journal_begin(&th
, inode
->i_sb
,
1321 JOURNAL_PER_BALANCE_CNT
);
1323 reiserfs_write_unlock(inode
->i_sb
);
1326 reiserfs_update_inode_transaction(inode
);
1327 add_save_link(&th
, inode
, 1 /* Truncate */ );
1330 journal_end(&th
, inode
->i_sb
,
1331 JOURNAL_PER_BALANCE_CNT
);
1332 reiserfs_write_unlock(inode
->i_sb
);
1336 result
= generic_file_write(file
, buf
, count
, ppos
);
1338 if (after_file_end
) { /* Now update i_size and remove the savelink */
1339 struct reiserfs_transaction_handle th
;
1340 reiserfs_write_lock(inode
->i_sb
);
1341 err
= journal_begin(&th
, inode
->i_sb
, 1);
1343 reiserfs_write_unlock(inode
->i_sb
);
1346 reiserfs_update_inode_transaction(inode
);
1347 mark_inode_dirty(inode
);
1348 err
= journal_end(&th
, inode
->i_sb
, 1);
1350 reiserfs_write_unlock(inode
->i_sb
);
1353 err
= remove_save_link(inode
, 1 /* truncate */ );
1354 reiserfs_write_unlock(inode
->i_sb
);
1362 if (unlikely((ssize_t
) count
< 0))
1365 if (unlikely(!access_ok(VERIFY_READ
, buf
, count
)))
1368 mutex_lock(&inode
->i_mutex
); // locks the entire file for just us
1372 /* Check if we can write to specified region of file, file
1373 is not overly big and this kind of stuff. Adjust pos and
1375 res
= generic_write_checks(file
, &pos
, &count
, 0);
1382 res
= remove_suid(file
->f_dentry
);
1386 file_update_time(file
);
1388 // Ok, we are done with all the checks.
1390 // Now we should start real work
1392 /* If we are going to write past the file's packed tail or if we are going
1393 to overwrite part of the tail, we need that tail to be converted into
1395 res
= reiserfs_check_for_tail_and_convert(inode
, pos
, count
);
1400 /* This is the main loop in which we running until some error occures
1401 or until we write all of the data. */
1402 size_t num_pages
; /* amount of pages we are going to write this iteration */
1403 size_t write_bytes
; /* amount of bytes to write during this iteration */
1404 size_t blocks_to_allocate
; /* how much blocks we need to allocate for this iteration */
1406 /* (pos & (PAGE_CACHE_SIZE-1)) is an idiom for offset into a page of pos */
1407 num_pages
= !!((pos
+ count
) & (PAGE_CACHE_SIZE
- 1)) + /* round up partial
1410 (pos
& (PAGE_CACHE_SIZE
- 1))) >> PAGE_CACHE_SHIFT
);
1411 /* convert size to amount of
1413 reiserfs_write_lock(inode
->i_sb
);
1414 if (num_pages
> REISERFS_WRITE_PAGES_AT_A_TIME
1415 || num_pages
> reiserfs_can_fit_pages(inode
->i_sb
)) {
1416 /* If we were asked to write more data than we want to or if there
1417 is not that much space, then we shorten amount of data to write
1418 for this iteration. */
1420 min_t(size_t, REISERFS_WRITE_PAGES_AT_A_TIME
,
1421 reiserfs_can_fit_pages(inode
->i_sb
));
1422 /* Also we should not forget to set size in bytes accordingly */
1423 write_bytes
= (num_pages
<< PAGE_CACHE_SHIFT
) -
1424 (pos
& (PAGE_CACHE_SIZE
- 1));
1425 /* If position is not on the
1426 start of the page, we need
1427 to substract the offset
1430 write_bytes
= count
;
1432 /* reserve the blocks to be allocated later, so that later on
1433 we still have the space to write the blocks to */
1434 reiserfs_claim_blocks_to_be_allocated(inode
->i_sb
,
1438 reiserfs_write_unlock(inode
->i_sb
);
1440 if (!num_pages
) { /* If we do not have enough space even for a single page... */
1442 inode
->i_size
+ inode
->i_sb
->s_blocksize
-
1443 (pos
& (inode
->i_sb
->s_blocksize
- 1))) {
1445 break; // In case we are writing past the end of the last file block, break.
1447 // Otherwise we are possibly overwriting the file, so
1448 // let's set write size to be equal or less than blocksize.
1449 // This way we get it correctly for file holes.
1450 // But overwriting files on absolutelly full volumes would not
1451 // be very efficient. Well, people are not supposed to fill
1452 // 100% of disk space anyway.
1454 min_t(size_t, count
,
1455 inode
->i_sb
->s_blocksize
-
1456 (pos
& (inode
->i_sb
->s_blocksize
- 1)));
1458 // No blocks were claimed before, so do it now.
1459 reiserfs_claim_blocks_to_be_allocated(inode
->i_sb
,
1467 /* Prepare for writing into the region, read in all the
1468 partially overwritten pages, if needed. And lock the pages,
1469 so that nobody else can access these until we are done.
1470 We get number of actual blocks needed as a result. */
1471 res
= reiserfs_prepare_file_region_for_write(inode
, pos
,
1476 reiserfs_release_claimed_blocks(inode
->i_sb
,
1483 blocks_to_allocate
= res
;
1485 /* First we correct our estimate of how many blocks we need */
1486 reiserfs_release_claimed_blocks(inode
->i_sb
,
1490 s_blocksize_bits
)) -
1491 blocks_to_allocate
);
1493 if (blocks_to_allocate
> 0) { /*We only allocate blocks if we need to */
1494 /* Fill in all the possible holes and append the file if needed */
1496 reiserfs_allocate_blocks_for_region(&th
, inode
, pos
,
1500 blocks_to_allocate
);
1503 /* well, we have allocated the blocks, so it is time to free
1504 the reservation we made earlier. */
1505 reiserfs_release_claimed_blocks(inode
->i_sb
,
1506 blocks_to_allocate
);
1508 reiserfs_unprepare_pages(prepared_pages
, num_pages
);
1512 /* NOTE that allocating blocks and filling blocks can be done in reverse order
1513 and probably we would do that just to get rid of garbage in files after a
1516 /* Copy data from user-supplied buffer to file's pages */
1518 reiserfs_copy_from_user_to_file_region(pos
, num_pages
,
1520 prepared_pages
, buf
);
1522 reiserfs_unprepare_pages(prepared_pages
, num_pages
);
1526 /* Send the pages to disk and unlock them. */
1528 reiserfs_submit_file_region_for_write(&th
, inode
, pos
,
1535 already_written
+= write_bytes
;
1537 *ppos
= pos
+= write_bytes
;
1538 count
-= write_bytes
;
1539 balance_dirty_pages_ratelimited_nr(inode
->i_mapping
, num_pages
);
1542 /* this is only true on error */
1543 if (th
.t_trans_id
) {
1544 reiserfs_write_lock(inode
->i_sb
);
1545 err
= journal_end(&th
, th
.t_super
, th
.t_blocks_allocated
);
1546 reiserfs_write_unlock(inode
->i_sb
);
1553 if (likely(res
>= 0) &&
1554 (unlikely((file
->f_flags
& O_SYNC
) || IS_SYNC(inode
))))
1555 res
= generic_osync_inode(inode
, file
->f_mapping
,
1556 OSYNC_METADATA
| OSYNC_DATA
);
1558 mutex_unlock(&inode
->i_mutex
);
1559 reiserfs_async_progress_wait(inode
->i_sb
);
1560 return (already_written
!= 0) ? already_written
: res
;
1563 mutex_unlock(&inode
->i_mutex
); // unlock the file on exit.
1567 const struct file_operations reiserfs_file_operations
= {
1568 .read
= generic_file_read
,
1569 .write
= reiserfs_file_write
,
1570 .ioctl
= reiserfs_ioctl
,
1571 .mmap
= generic_file_mmap
,
1572 .release
= reiserfs_file_release
,
1573 .fsync
= reiserfs_sync_file
,
1574 .sendfile
= generic_file_sendfile
,
1575 .aio_read
= generic_file_aio_read
,
1576 .aio_write
= generic_file_aio_write
,
1577 .splice_read
= generic_file_splice_read
,
1578 .splice_write
= generic_file_splice_write
,
1581 struct inode_operations reiserfs_file_inode_operations
= {
1582 .truncate
= reiserfs_vfs_truncate_file
,
1583 .setattr
= reiserfs_setattr
,
1584 .setxattr
= reiserfs_setxattr
,
1585 .getxattr
= reiserfs_getxattr
,
1586 .listxattr
= reiserfs_listxattr
,
1587 .removexattr
= reiserfs_removexattr
,
1588 .permission
= reiserfs_permission
,