2 * file.c - NTFS kernel file operations. Part of the Linux-NTFS project.
4 * Copyright (c) 2001-2005 Anton Altaparmakov
6 * This program/include file is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as published
8 * by the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program/include file is distributed in the hope that it will be
12 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program (in the main directory of the Linux-NTFS
18 * distribution in the file COPYING); if not, write to the Free Software
19 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/buffer_head.h>
23 #include <linux/pagemap.h>
24 #include <linux/pagevec.h>
25 #include <linux/sched.h>
26 #include <linux/swap.h>
27 #include <linux/uio.h>
28 #include <linux/writeback.h>
31 #include <asm/uaccess.h>
43 * ntfs_file_open - called when an inode is about to be opened
44 * @vi: inode to be opened
45 * @filp: file structure describing the inode
47 * Limit file size to the page cache limit on architectures where unsigned long
48 * is 32-bits. This is the most we can do for now without overflowing the page
49 * cache page index. Doing it this way means we don't run into problems because
50 * of existing too large files. It would be better to allow the user to read
51 * the beginning of the file but I doubt very much anyone is going to hit this
52 * check on a 32-bit architecture, so there is no point in adding the extra
53 * complexity required to support this.
55 * On 64-bit architectures, the check is hopefully optimized away by the
58 * After the check passes, just call generic_file_open() to do its work.
60 static int ntfs_file_open(struct inode
*vi
, struct file
*filp
)
62 if (sizeof(unsigned long) < 8) {
63 if (i_size_read(vi
) > MAX_LFS_FILESIZE
)
66 return generic_file_open(vi
, filp
);
72 * ntfs_attr_extend_initialized - extend the initialized size of an attribute
73 * @ni: ntfs inode of the attribute to extend
74 * @new_init_size: requested new initialized size in bytes
75 * @cached_page: store any allocated but unused page here
76 * @lru_pvec: lru-buffering pagevec of the caller
78 * Extend the initialized size of an attribute described by the ntfs inode @ni
79 * to @new_init_size bytes. This involves zeroing any non-sparse space between
80 * the old initialized size and @new_init_size both in the page cache and on
81 * disk (if relevant complete pages are zeroed in the page cache then these may
82 * simply be marked dirty for later writeout). There is one caveat and that is
83 * that if any uptodate page cache pages between the old initialized size and
84 * the smaller of @new_init_size and the file size (vfs inode->i_size) are in
85 * memory, these need to be marked dirty without being zeroed since they could
86 * be non-zero due to mmap() based writes.
88 * As a side-effect, the file size (vfs inode->i_size) may be incremented as,
89 * in the resident attribute case, it is tied to the initialized size and, in
90 * the non-resident attribute case, it may not fall below the initialized size.
92 * Note that if the attribute is resident, we do not need to touch the page
93 * cache at all. This is because if the page cache page is not uptodate we
94 * bring it uptodate later, when doing the write to the mft record since we
95 * then already have the page mapped. And if the page is uptodate, the
96 * non-initialized region will already have been zeroed when the page was
97 * brought uptodate and the region may in fact already have been overwritten
98 * with new data via mmap() based writes, so we cannot just zero it. And since
99 * POSIX specifies that the behaviour of resizing a file whilst it is mmap()ped
100 * is unspecified, we choose not to do zeroing and thus we do not need to touch
101 * the page at all. For a more detailed explanation see ntfs_truncate() which
102 * is in fs/ntfs/inode.c.
104 * @cached_page and @lru_pvec are just optimisations for dealing with multiple
107 * Return 0 on success and -errno on error. In the case that an error is
108 * encountered it is possible that the initialized size will already have been
109 * incremented some way towards @new_init_size but it is guaranteed that if
110 * this is the case, the necessary zeroing will also have happened and that all
111 * metadata is self-consistent.
113 * Locking: This function locks the mft record of the base ntfs inode and
114 * maintains the lock throughout execution of the function. This is required
115 * so that the initialized size of the attribute can be modified safely.
117 static int ntfs_attr_extend_initialized(ntfs_inode
*ni
, const s64 new_init_size
,
118 struct page
**cached_page
, struct pagevec
*lru_pvec
)
122 pgoff_t index
, end_index
;
124 struct inode
*vi
= VFS_I(ni
);
126 MFT_RECORD
*m
= NULL
;
128 ntfs_attr_search_ctx
*ctx
= NULL
;
129 struct address_space
*mapping
;
130 struct page
*page
= NULL
;
135 read_lock_irqsave(&ni
->size_lock
, flags
);
136 old_init_size
= ni
->initialized_size
;
137 old_i_size
= i_size_read(vi
);
138 BUG_ON(new_init_size
> ni
->allocated_size
);
139 read_unlock_irqrestore(&ni
->size_lock
, flags
);
140 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
141 "old_initialized_size 0x%llx, "
142 "new_initialized_size 0x%llx, i_size 0x%llx.",
143 vi
->i_ino
, (unsigned)le32_to_cpu(ni
->type
),
144 (unsigned long long)old_init_size
,
145 (unsigned long long)new_init_size
, old_i_size
);
149 base_ni
= ni
->ext
.base_ntfs_ino
;
150 /* Use goto to reduce indentation and we need the label below anyway. */
151 if (NInoNonResident(ni
))
152 goto do_non_resident_extend
;
153 BUG_ON(old_init_size
!= old_i_size
);
154 m
= map_mft_record(base_ni
);
160 ctx
= ntfs_attr_get_search_ctx(base_ni
, m
);
161 if (unlikely(!ctx
)) {
165 err
= ntfs_attr_lookup(ni
->type
, ni
->name
, ni
->name_len
,
166 CASE_SENSITIVE
, 0, NULL
, 0, ctx
);
174 BUG_ON(a
->non_resident
);
175 /* The total length of the attribute value. */
176 attr_len
= le32_to_cpu(a
->data
.resident
.value_length
);
177 BUG_ON(old_i_size
!= (loff_t
)attr_len
);
179 * Do the zeroing in the mft record and update the attribute size in
182 kattr
= (u8
*)a
+ le16_to_cpu(a
->data
.resident
.value_offset
);
183 memset(kattr
+ attr_len
, 0, new_init_size
- attr_len
);
184 a
->data
.resident
.value_length
= cpu_to_le32((u32
)new_init_size
);
185 /* Finally, update the sizes in the vfs and ntfs inodes. */
186 write_lock_irqsave(&ni
->size_lock
, flags
);
187 i_size_write(vi
, new_init_size
);
188 ni
->initialized_size
= new_init_size
;
189 write_unlock_irqrestore(&ni
->size_lock
, flags
);
191 do_non_resident_extend
:
193 * If the new initialized size @new_init_size exceeds the current file
194 * size (vfs inode->i_size), we need to extend the file size to the
195 * new initialized size.
197 if (new_init_size
> old_i_size
) {
198 m
= map_mft_record(base_ni
);
204 ctx
= ntfs_attr_get_search_ctx(base_ni
, m
);
205 if (unlikely(!ctx
)) {
209 err
= ntfs_attr_lookup(ni
->type
, ni
->name
, ni
->name_len
,
210 CASE_SENSITIVE
, 0, NULL
, 0, ctx
);
218 BUG_ON(!a
->non_resident
);
219 BUG_ON(old_i_size
!= (loff_t
)
220 sle64_to_cpu(a
->data
.non_resident
.data_size
));
221 a
->data
.non_resident
.data_size
= cpu_to_sle64(new_init_size
);
222 flush_dcache_mft_record_page(ctx
->ntfs_ino
);
223 mark_mft_record_dirty(ctx
->ntfs_ino
);
224 /* Update the file size in the vfs inode. */
225 i_size_write(vi
, new_init_size
);
226 ntfs_attr_put_search_ctx(ctx
);
228 unmap_mft_record(base_ni
);
231 mapping
= vi
->i_mapping
;
232 index
= old_init_size
>> PAGE_CACHE_SHIFT
;
233 end_index
= (new_init_size
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
236 * Read the page. If the page is not present, this will zero
237 * the uninitialized regions for us.
239 page
= read_cache_page(mapping
, index
,
240 (filler_t
*)mapping
->a_ops
->readpage
, NULL
);
245 wait_on_page_locked(page
);
246 if (unlikely(!PageUptodate(page
) || PageError(page
))) {
247 page_cache_release(page
);
252 * Update the initialized size in the ntfs inode. This is
253 * enough to make ntfs_writepage() work.
255 write_lock_irqsave(&ni
->size_lock
, flags
);
256 ni
->initialized_size
= (index
+ 1) << PAGE_CACHE_SHIFT
;
257 if (ni
->initialized_size
> new_init_size
)
258 ni
->initialized_size
= new_init_size
;
259 write_unlock_irqrestore(&ni
->size_lock
, flags
);
260 /* Set the page dirty so it gets written out. */
261 set_page_dirty(page
);
262 page_cache_release(page
);
264 * Play nice with the vm and the rest of the system. This is
265 * very much needed as we can potentially be modifying the
266 * initialised size from a very small value to a really huge
268 * f = open(somefile, O_TRUNC);
269 * truncate(f, 10GiB);
272 * And this would mean we would be marking dirty hundreds of
273 * thousands of pages or as in the above example more than
274 * two and a half million pages!
276 * TODO: For sparse pages could optimize this workload by using
277 * the FsMisc / MiscFs page bit as a "PageIsSparse" bit. This
278 * would be set in readpage for sparse pages and here we would
279 * not need to mark dirty any pages which have this bit set.
280 * The only caveat is that we have to clear the bit everywhere
281 * where we allocate any clusters that lie in the page or that
284 * TODO: An even greater optimization would be for us to only
285 * call readpage() on pages which are not in sparse regions as
286 * determined from the runlist. This would greatly reduce the
287 * number of pages we read and make dirty in the case of sparse
290 balance_dirty_pages_ratelimited(mapping
);
292 } while (++index
< end_index
);
293 read_lock_irqsave(&ni
->size_lock
, flags
);
294 BUG_ON(ni
->initialized_size
!= new_init_size
);
295 read_unlock_irqrestore(&ni
->size_lock
, flags
);
296 /* Now bring in sync the initialized_size in the mft record. */
297 m
= map_mft_record(base_ni
);
303 ctx
= ntfs_attr_get_search_ctx(base_ni
, m
);
304 if (unlikely(!ctx
)) {
308 err
= ntfs_attr_lookup(ni
->type
, ni
->name
, ni
->name_len
,
309 CASE_SENSITIVE
, 0, NULL
, 0, ctx
);
317 BUG_ON(!a
->non_resident
);
318 a
->data
.non_resident
.initialized_size
= cpu_to_sle64(new_init_size
);
320 flush_dcache_mft_record_page(ctx
->ntfs_ino
);
321 mark_mft_record_dirty(ctx
->ntfs_ino
);
323 ntfs_attr_put_search_ctx(ctx
);
325 unmap_mft_record(base_ni
);
326 ntfs_debug("Done, initialized_size 0x%llx, i_size 0x%llx.",
327 (unsigned long long)new_init_size
, i_size_read(vi
));
330 write_lock_irqsave(&ni
->size_lock
, flags
);
331 ni
->initialized_size
= old_init_size
;
332 write_unlock_irqrestore(&ni
->size_lock
, flags
);
335 ntfs_attr_put_search_ctx(ctx
);
337 unmap_mft_record(base_ni
);
338 ntfs_debug("Failed. Returning error code %i.", err
);
343 * ntfs_fault_in_pages_readable -
345 * Fault a number of userspace pages into pagetables.
347 * Unlike include/linux/pagemap.h::fault_in_pages_readable(), this one copes
348 * with more than two userspace pages as well as handling the single page case
351 * If you find this difficult to understand, then think of the while loop being
352 * the following code, except that we do without the integer variable ret:
355 * ret = __get_user(c, uaddr);
356 * uaddr += PAGE_SIZE;
357 * } while (!ret && uaddr < end);
359 * Note, the final __get_user() may well run out-of-bounds of the user buffer,
360 * but _not_ out-of-bounds of the page the user buffer belongs to, and since
361 * this is only a read and not a write, and since it is still in the same page,
362 * it should not matter and this makes the code much simpler.
364 static inline void ntfs_fault_in_pages_readable(const char __user
*uaddr
,
367 const char __user
*end
;
370 /* Set @end to the first byte outside the last page we care about. */
371 end
= (const char __user
*)PAGE_ALIGN((ptrdiff_t __user
)uaddr
+ bytes
);
373 while (!__get_user(c
, uaddr
) && (uaddr
+= PAGE_SIZE
, uaddr
< end
))
378 * ntfs_fault_in_pages_readable_iovec -
380 * Same as ntfs_fault_in_pages_readable() but operates on an array of iovecs.
382 static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec
*iov
,
383 size_t iov_ofs
, int bytes
)
386 const char __user
*buf
;
389 buf
= iov
->iov_base
+ iov_ofs
;
390 len
= iov
->iov_len
- iov_ofs
;
393 ntfs_fault_in_pages_readable(buf
, len
);
401 * __ntfs_grab_cache_pages - obtain a number of locked pages
402 * @mapping: address space mapping from which to obtain page cache pages
403 * @index: starting index in @mapping at which to begin obtaining pages
404 * @nr_pages: number of page cache pages to obtain
405 * @pages: array of pages in which to return the obtained page cache pages
406 * @cached_page: allocated but as yet unused page
407 * @lru_pvec: lru-buffering pagevec of caller
409 * Obtain @nr_pages locked page cache pages from the mapping @maping and
410 * starting at index @index.
412 * If a page is newly created, increment its refcount and add it to the
413 * caller's lru-buffering pagevec @lru_pvec.
415 * This is the same as mm/filemap.c::__grab_cache_page(), except that @nr_pages
416 * are obtained at once instead of just one page and that 0 is returned on
417 * success and -errno on error.
419 * Note, the page locks are obtained in ascending page index order.
421 static inline int __ntfs_grab_cache_pages(struct address_space
*mapping
,
422 pgoff_t index
, const unsigned nr_pages
, struct page
**pages
,
423 struct page
**cached_page
, struct pagevec
*lru_pvec
)
430 pages
[nr
] = find_lock_page(mapping
, index
);
433 *cached_page
= page_cache_alloc(mapping
);
434 if (unlikely(!*cached_page
)) {
439 err
= add_to_page_cache(*cached_page
, mapping
, index
,
446 pages
[nr
] = *cached_page
;
447 page_cache_get(*cached_page
);
448 if (unlikely(!pagevec_add(lru_pvec
, *cached_page
)))
449 __pagevec_lru_add(lru_pvec
);
454 } while (nr
< nr_pages
);
459 unlock_page(pages
[--nr
]);
460 page_cache_release(pages
[nr
]);
465 static inline int ntfs_submit_bh_for_read(struct buffer_head
*bh
)
469 bh
->b_end_io
= end_buffer_read_sync
;
470 return submit_bh(READ
, bh
);
474 * ntfs_prepare_pages_for_non_resident_write - prepare pages for receiving data
475 * @pages: array of destination pages
476 * @nr_pages: number of pages in @pages
477 * @pos: byte position in file at which the write begins
478 * @bytes: number of bytes to be written
480 * This is called for non-resident attributes from ntfs_file_buffered_write()
481 * with i_sem held on the inode (@pages[0]->mapping->host). There are
482 * @nr_pages pages in @pages which are locked but not kmap()ped. The source
483 * data has not yet been copied into the @pages.
485 * Need to fill any holes with actual clusters, allocate buffers if necessary,
486 * ensure all the buffers are mapped, and bring uptodate any buffers that are
487 * only partially being written to.
489 * If @nr_pages is greater than one, we are guaranteed that the cluster size is
490 * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside
491 * the same cluster and that they are the entirety of that cluster, and that
492 * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
494 * i_size is not to be modified yet.
496 * Return 0 on success or -errno on error.
498 static int ntfs_prepare_pages_for_non_resident_write(struct page
**pages
,
499 unsigned nr_pages
, s64 pos
, size_t bytes
)
501 VCN vcn
, highest_vcn
= 0, cpos
, cend
, bh_cpos
, bh_cend
;
503 s64 bh_pos
, vcn_len
, end
, initialized_size
;
507 ntfs_inode
*ni
, *base_ni
= NULL
;
509 runlist_element
*rl
, *rl2
;
510 struct buffer_head
*bh
, *head
, *wait
[2], **wait_bh
= wait
;
511 ntfs_attr_search_ctx
*ctx
= NULL
;
512 MFT_RECORD
*m
= NULL
;
513 ATTR_RECORD
*a
= NULL
;
515 u32 attr_rec_len
= 0;
516 unsigned blocksize
, u
;
518 BOOL rl_write_locked
, was_hole
, is_retry
;
519 unsigned char blocksize_bits
;
522 u8 mft_attr_mapped
:1;
525 } status
= { 0, 0, 0, 0 };
530 vi
= pages
[0]->mapping
->host
;
533 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
534 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%x.",
535 vi
->i_ino
, ni
->type
, pages
[0]->index
, nr_pages
,
536 (long long)pos
, bytes
);
537 blocksize_bits
= vi
->i_blkbits
;
538 blocksize
= 1 << blocksize_bits
;
541 struct page
*page
= pages
[u
];
543 * create_empty_buffers() will create uptodate/dirty buffers if
544 * the page is uptodate/dirty.
546 if (!page_has_buffers(page
)) {
547 create_empty_buffers(page
, blocksize
, 0);
548 if (unlikely(!page_has_buffers(page
)))
551 } while (++u
< nr_pages
);
552 rl_write_locked
= FALSE
;
559 cpos
= pos
>> vol
->cluster_size_bits
;
561 cend
= (end
+ vol
->cluster_size
- 1) >> vol
->cluster_size_bits
;
563 * Loop over each page and for each page over each buffer. Use goto to
564 * reduce indentation.
569 bh_pos
= (s64
)page
->index
<< PAGE_CACHE_SHIFT
;
570 bh
= head
= page_buffers(page
);
576 /* Clear buffer_new on all buffers to reinitialise state. */
578 clear_buffer_new(bh
);
579 bh_end
= bh_pos
+ blocksize
;
580 bh_cpos
= bh_pos
>> vol
->cluster_size_bits
;
581 bh_cofs
= bh_pos
& vol
->cluster_size_mask
;
582 if (buffer_mapped(bh
)) {
584 * The buffer is already mapped. If it is uptodate,
587 if (buffer_uptodate(bh
))
590 * The buffer is not uptodate. If the page is uptodate
591 * set the buffer uptodate and otherwise ignore it.
593 if (PageUptodate(page
)) {
594 set_buffer_uptodate(bh
);
598 * Neither the page nor the buffer are uptodate. If
599 * the buffer is only partially being written to, we
600 * need to read it in before the write, i.e. now.
602 if ((bh_pos
< pos
&& bh_end
> pos
) ||
603 (bh_pos
< end
&& bh_end
> end
)) {
605 * If the buffer is fully or partially within
606 * the initialized size, do an actual read.
607 * Otherwise, simply zero the buffer.
609 read_lock_irqsave(&ni
->size_lock
, flags
);
610 initialized_size
= ni
->initialized_size
;
611 read_unlock_irqrestore(&ni
->size_lock
, flags
);
612 if (bh_pos
< initialized_size
) {
613 ntfs_submit_bh_for_read(bh
);
616 u8
*kaddr
= kmap_atomic(page
, KM_USER0
);
617 memset(kaddr
+ bh_offset(bh
), 0,
619 kunmap_atomic(kaddr
, KM_USER0
);
620 flush_dcache_page(page
);
621 set_buffer_uptodate(bh
);
626 /* Unmapped buffer. Need to map it. */
627 bh
->b_bdev
= vol
->sb
->s_bdev
;
629 * If the current buffer is in the same clusters as the map
630 * cache, there is no need to check the runlist again. The
631 * map cache is made up of @vcn, which is the first cached file
632 * cluster, @vcn_len which is the number of cached file
633 * clusters, @lcn is the device cluster corresponding to @vcn,
634 * and @lcn_block is the block number corresponding to @lcn.
636 cdelta
= bh_cpos
- vcn
;
637 if (likely(!cdelta
|| (cdelta
> 0 && cdelta
< vcn_len
))) {
640 bh
->b_blocknr
= lcn_block
+
641 (cdelta
<< (vol
->cluster_size_bits
-
643 (bh_cofs
>> blocksize_bits
);
644 set_buffer_mapped(bh
);
646 * If the page is uptodate so is the buffer. If the
647 * buffer is fully outside the write, we ignore it if
648 * it was already allocated and we mark it dirty so it
649 * gets written out if we allocated it. On the other
650 * hand, if we allocated the buffer but we are not
651 * marking it dirty we set buffer_new so we can do
654 if (PageUptodate(page
)) {
655 if (!buffer_uptodate(bh
))
656 set_buffer_uptodate(bh
);
657 if (unlikely(was_hole
)) {
658 /* We allocated the buffer. */
659 unmap_underlying_metadata(bh
->b_bdev
,
661 if (bh_end
<= pos
|| bh_pos
>= end
)
662 mark_buffer_dirty(bh
);
668 /* Page is _not_ uptodate. */
669 if (likely(!was_hole
)) {
671 * Buffer was already allocated. If it is not
672 * uptodate and is only partially being written
673 * to, we need to read it in before the write,
676 if (!buffer_uptodate(bh
) && ((bh_pos
< pos
&&
681 * If the buffer is fully or partially
682 * within the initialized size, do an
683 * actual read. Otherwise, simply zero
686 read_lock_irqsave(&ni
->size_lock
,
688 initialized_size
= ni
->initialized_size
;
689 read_unlock_irqrestore(&ni
->size_lock
,
691 if (bh_pos
< initialized_size
) {
692 ntfs_submit_bh_for_read(bh
);
695 u8
*kaddr
= kmap_atomic(page
,
697 memset(kaddr
+ bh_offset(bh
),
699 kunmap_atomic(kaddr
, KM_USER0
);
700 flush_dcache_page(page
);
701 set_buffer_uptodate(bh
);
706 /* We allocated the buffer. */
707 unmap_underlying_metadata(bh
->b_bdev
, bh
->b_blocknr
);
709 * If the buffer is fully outside the write, zero it,
710 * set it uptodate, and mark it dirty so it gets
711 * written out. If it is partially being written to,
712 * zero region surrounding the write but leave it to
713 * commit write to do anything else. Finally, if the
714 * buffer is fully being overwritten, do nothing.
716 if (bh_end
<= pos
|| bh_pos
>= end
) {
717 if (!buffer_uptodate(bh
)) {
718 u8
*kaddr
= kmap_atomic(page
, KM_USER0
);
719 memset(kaddr
+ bh_offset(bh
), 0,
721 kunmap_atomic(kaddr
, KM_USER0
);
722 flush_dcache_page(page
);
723 set_buffer_uptodate(bh
);
725 mark_buffer_dirty(bh
);
729 if (!buffer_uptodate(bh
) &&
730 (bh_pos
< pos
|| bh_end
> end
)) {
734 kaddr
= kmap_atomic(page
, KM_USER0
);
736 pofs
= bh_pos
& ~PAGE_CACHE_MASK
;
737 memset(kaddr
+ pofs
, 0, pos
- bh_pos
);
740 pofs
= end
& ~PAGE_CACHE_MASK
;
741 memset(kaddr
+ pofs
, 0, bh_end
- end
);
743 kunmap_atomic(kaddr
, KM_USER0
);
744 flush_dcache_page(page
);
749 * Slow path: this is the first buffer in the cluster. If it
750 * is outside allocated size and is not uptodate, zero it and
753 read_lock_irqsave(&ni
->size_lock
, flags
);
754 initialized_size
= ni
->allocated_size
;
755 read_unlock_irqrestore(&ni
->size_lock
, flags
);
756 if (bh_pos
> initialized_size
) {
757 if (PageUptodate(page
)) {
758 if (!buffer_uptodate(bh
))
759 set_buffer_uptodate(bh
);
760 } else if (!buffer_uptodate(bh
)) {
761 u8
*kaddr
= kmap_atomic(page
, KM_USER0
);
762 memset(kaddr
+ bh_offset(bh
), 0, blocksize
);
763 kunmap_atomic(kaddr
, KM_USER0
);
764 flush_dcache_page(page
);
765 set_buffer_uptodate(bh
);
771 down_read(&ni
->runlist
.lock
);
775 if (likely(rl
!= NULL
)) {
776 /* Seek to element containing target cluster. */
777 while (rl
->length
&& rl
[1].vcn
<= bh_cpos
)
779 lcn
= ntfs_rl_vcn_to_lcn(rl
, bh_cpos
);
780 if (likely(lcn
>= 0)) {
782 * Successful remap, setup the map cache and
783 * use that to deal with the buffer.
787 vcn_len
= rl
[1].vcn
- vcn
;
788 lcn_block
= lcn
<< (vol
->cluster_size_bits
-
792 * If the number of remaining clusters in the
793 * @pages is smaller or equal to the number of
794 * cached clusters, unlock the runlist as the
795 * map cache will be used from now on.
797 if (likely(vcn
+ vcn_len
>= cend
)) {
798 if (rl_write_locked
) {
799 up_write(&ni
->runlist
.lock
);
800 rl_write_locked
= FALSE
;
802 up_read(&ni
->runlist
.lock
);
805 goto map_buffer_cached
;
808 lcn
= LCN_RL_NOT_MAPPED
;
810 * If it is not a hole and not out of bounds, the runlist is
811 * probably unmapped so try to map it now.
813 if (unlikely(lcn
!= LCN_HOLE
&& lcn
!= LCN_ENOENT
)) {
814 if (likely(!is_retry
&& lcn
== LCN_RL_NOT_MAPPED
)) {
815 /* Attempt to map runlist. */
816 if (!rl_write_locked
) {
818 * We need the runlist locked for
819 * writing, so if it is locked for
820 * reading relock it now and retry in
821 * case it changed whilst we dropped
824 up_read(&ni
->runlist
.lock
);
825 down_write(&ni
->runlist
.lock
);
826 rl_write_locked
= TRUE
;
829 err
= ntfs_map_runlist_nolock(ni
, bh_cpos
,
836 * If @vcn is out of bounds, pretend @lcn is
837 * LCN_ENOENT. As long as the buffer is out
838 * of bounds this will work fine.
840 if (err
== -ENOENT
) {
843 goto rl_not_mapped_enoent
;
847 /* Failed to map the buffer, even after retrying. */
849 ntfs_error(vol
->sb
, "Failed to write to inode 0x%lx, "
850 "attribute type 0x%x, vcn 0x%llx, "
851 "vcn offset 0x%x, because its "
852 "location on disk could not be "
853 "determined%s (error code %i).",
854 ni
->mft_no
, ni
->type
,
855 (unsigned long long)bh_cpos
,
857 vol
->cluster_size_mask
,
858 is_retry
? " even after retrying" : "",
862 rl_not_mapped_enoent
:
864 * The buffer is in a hole or out of bounds. We need to fill
865 * the hole, unless the buffer is in a cluster which is not
866 * touched by the write, in which case we just leave the buffer
867 * unmapped. This can only happen when the cluster size is
868 * less than the page cache size.
870 if (unlikely(vol
->cluster_size
< PAGE_CACHE_SIZE
)) {
871 bh_cend
= (bh_end
+ vol
->cluster_size
- 1) >>
872 vol
->cluster_size_bits
;
873 if ((bh_cend
<= cpos
|| bh_cpos
>= cend
)) {
876 * If the buffer is uptodate we skip it. If it
877 * is not but the page is uptodate, we can set
878 * the buffer uptodate. If the page is not
879 * uptodate, we can clear the buffer and set it
880 * uptodate. Whether this is worthwhile is
881 * debatable and this could be removed.
883 if (PageUptodate(page
)) {
884 if (!buffer_uptodate(bh
))
885 set_buffer_uptodate(bh
);
886 } else if (!buffer_uptodate(bh
)) {
887 u8
*kaddr
= kmap_atomic(page
, KM_USER0
);
888 memset(kaddr
+ bh_offset(bh
), 0,
890 kunmap_atomic(kaddr
, KM_USER0
);
891 flush_dcache_page(page
);
892 set_buffer_uptodate(bh
);
898 * Out of bounds buffer is invalid if it was not really out of
901 BUG_ON(lcn
!= LCN_HOLE
);
903 * We need the runlist locked for writing, so if it is locked
904 * for reading relock it now and retry in case it changed
905 * whilst we dropped the lock.
908 if (!rl_write_locked
) {
909 up_read(&ni
->runlist
.lock
);
910 down_write(&ni
->runlist
.lock
);
911 rl_write_locked
= TRUE
;
914 /* Find the previous last allocated cluster. */
915 BUG_ON(rl
->lcn
!= LCN_HOLE
);
918 while (--rl2
>= ni
->runlist
.rl
) {
920 lcn
= rl2
->lcn
+ rl2
->length
;
924 rl2
= ntfs_cluster_alloc(vol
, bh_cpos
, 1, lcn
, DATA_ZONE
,
928 ntfs_debug("Failed to allocate cluster, error code %i.",
933 rl
= ntfs_runlists_merge(ni
->runlist
.rl
, rl2
);
938 if (ntfs_cluster_free_from_rl(vol
, rl2
)) {
939 ntfs_error(vol
->sb
, "Failed to release "
940 "allocated cluster in error "
941 "code path. Run chkdsk to "
942 "recover the lost cluster.");
949 status
.runlist_merged
= 1;
950 ntfs_debug("Allocated cluster, lcn 0x%llx.", lcn
);
951 /* Map and lock the mft record and get the attribute record. */
955 base_ni
= ni
->ext
.base_ntfs_ino
;
956 m
= map_mft_record(base_ni
);
961 ctx
= ntfs_attr_get_search_ctx(base_ni
, m
);
962 if (unlikely(!ctx
)) {
964 unmap_mft_record(base_ni
);
967 status
.mft_attr_mapped
= 1;
968 err
= ntfs_attr_lookup(ni
->type
, ni
->name
, ni
->name_len
,
969 CASE_SENSITIVE
, bh_cpos
, NULL
, 0, ctx
);
978 * Find the runlist element with which the attribute extent
979 * starts. Note, we cannot use the _attr_ version because we
980 * have mapped the mft record. That is ok because we know the
981 * runlist fragment must be mapped already to have ever gotten
982 * here, so we can just use the _rl_ version.
984 vcn
= sle64_to_cpu(a
->data
.non_resident
.lowest_vcn
);
985 rl2
= ntfs_rl_find_vcn_nolock(rl
, vcn
);
987 BUG_ON(!rl2
->length
);
988 BUG_ON(rl2
->lcn
< LCN_HOLE
);
989 highest_vcn
= sle64_to_cpu(a
->data
.non_resident
.highest_vcn
);
991 * If @highest_vcn is zero, calculate the real highest_vcn
992 * (which can really be zero).
995 highest_vcn
= (sle64_to_cpu(
996 a
->data
.non_resident
.allocated_size
) >>
997 vol
->cluster_size_bits
) - 1;
999 * Determine the size of the mapping pairs array for the new
1000 * extent, i.e. the old extent with the hole filled.
1002 mp_size
= ntfs_get_size_for_mapping_pairs(vol
, rl2
, vcn
,
1004 if (unlikely(mp_size
<= 0)) {
1005 if (!(err
= mp_size
))
1007 ntfs_debug("Failed to get size for mapping pairs "
1008 "array, error code %i.", err
);
1012 * Resize the attribute record to fit the new mapping pairs
1015 attr_rec_len
= le32_to_cpu(a
->length
);
1016 err
= ntfs_attr_record_resize(m
, a
, mp_size
+ le16_to_cpu(
1017 a
->data
.non_resident
.mapping_pairs_offset
));
1018 if (unlikely(err
)) {
1019 BUG_ON(err
!= -ENOSPC
);
1020 // TODO: Deal with this by using the current attribute
1021 // and fill it with as much of the mapping pairs
1022 // array as possible. Then loop over each attribute
1023 // extent rewriting the mapping pairs arrays as we go
1024 // along and if when we reach the end we have not
1025 // enough space, try to resize the last attribute
1026 // extent and if even that fails, add a new attribute
1028 // We could also try to resize at each step in the hope
1029 // that we will not need to rewrite every single extent.
1030 // Note, we may need to decompress some extents to fill
1031 // the runlist as we are walking the extents...
1032 ntfs_error(vol
->sb
, "Not enough space in the mft "
1033 "record for the extended attribute "
1034 "record. This case is not "
1035 "implemented yet.");
1039 status
.mp_rebuilt
= 1;
1041 * Generate the mapping pairs array directly into the attribute
1044 err
= ntfs_mapping_pairs_build(vol
, (u8
*)a
+ le16_to_cpu(
1045 a
->data
.non_resident
.mapping_pairs_offset
),
1046 mp_size
, rl2
, vcn
, highest_vcn
, NULL
);
1047 if (unlikely(err
)) {
1048 ntfs_error(vol
->sb
, "Cannot fill hole in inode 0x%lx, "
1049 "attribute type 0x%x, because building "
1050 "the mapping pairs failed with error "
1051 "code %i.", vi
->i_ino
,
1052 (unsigned)le32_to_cpu(ni
->type
), err
);
1056 /* Update the highest_vcn but only if it was not set. */
1057 if (unlikely(!a
->data
.non_resident
.highest_vcn
))
1058 a
->data
.non_resident
.highest_vcn
=
1059 cpu_to_sle64(highest_vcn
);
1061 * If the attribute is sparse/compressed, update the compressed
1062 * size in the ntfs_inode structure and the attribute record.
1064 if (likely(NInoSparse(ni
) || NInoCompressed(ni
))) {
1066 * If we are not in the first attribute extent, switch
1067 * to it, but first ensure the changes will make it to
1070 if (a
->data
.non_resident
.lowest_vcn
) {
1071 flush_dcache_mft_record_page(ctx
->ntfs_ino
);
1072 mark_mft_record_dirty(ctx
->ntfs_ino
);
1073 ntfs_attr_reinit_search_ctx(ctx
);
1074 err
= ntfs_attr_lookup(ni
->type
, ni
->name
,
1075 ni
->name_len
, CASE_SENSITIVE
,
1077 if (unlikely(err
)) {
1078 status
.attr_switched
= 1;
1081 /* @m is not used any more so do not set it. */
1084 write_lock_irqsave(&ni
->size_lock
, flags
);
1085 ni
->itype
.compressed
.size
+= vol
->cluster_size
;
1086 a
->data
.non_resident
.compressed_size
=
1087 cpu_to_sle64(ni
->itype
.compressed
.size
);
1088 write_unlock_irqrestore(&ni
->size_lock
, flags
);
1090 /* Ensure the changes make it to disk. */
1091 flush_dcache_mft_record_page(ctx
->ntfs_ino
);
1092 mark_mft_record_dirty(ctx
->ntfs_ino
);
1093 ntfs_attr_put_search_ctx(ctx
);
1094 unmap_mft_record(base_ni
);
1095 /* Successfully filled the hole. */
1096 status
.runlist_merged
= 0;
1097 status
.mft_attr_mapped
= 0;
1098 status
.mp_rebuilt
= 0;
1099 /* Setup the map cache and use that to deal with the buffer. */
1103 lcn_block
= lcn
<< (vol
->cluster_size_bits
- blocksize_bits
);
1106 * If the number of remaining clusters in the @pages is smaller
1107 * or equal to the number of cached clusters, unlock the
1108 * runlist as the map cache will be used from now on.
1110 if (likely(vcn
+ vcn_len
>= cend
)) {
1111 up_write(&ni
->runlist
.lock
);
1112 rl_write_locked
= FALSE
;
1115 goto map_buffer_cached
;
1116 } while (bh_pos
+= blocksize
, (bh
= bh
->b_this_page
) != head
);
1117 /* If there are no errors, do the next page. */
1118 if (likely(!err
&& ++u
< nr_pages
))
1120 /* If there are no errors, release the runlist lock if we took it. */
1122 if (unlikely(rl_write_locked
)) {
1123 up_write(&ni
->runlist
.lock
);
1124 rl_write_locked
= FALSE
;
1125 } else if (unlikely(rl
))
1126 up_read(&ni
->runlist
.lock
);
1129 /* If we issued read requests, let them complete. */
1130 read_lock_irqsave(&ni
->size_lock
, flags
);
1131 initialized_size
= ni
->initialized_size
;
1132 read_unlock_irqrestore(&ni
->size_lock
, flags
);
1133 while (wait_bh
> wait
) {
1136 if (likely(buffer_uptodate(bh
))) {
1138 bh_pos
= ((s64
)page
->index
<< PAGE_CACHE_SHIFT
) +
1141 * If the buffer overflows the initialized size, need
1142 * to zero the overflowing region.
1144 if (unlikely(bh_pos
+ blocksize
> initialized_size
)) {
1148 if (likely(bh_pos
< initialized_size
))
1149 ofs
= initialized_size
- bh_pos
;
1150 kaddr
= kmap_atomic(page
, KM_USER0
);
1151 memset(kaddr
+ bh_offset(bh
) + ofs
, 0,
1153 kunmap_atomic(kaddr
, KM_USER0
);
1154 flush_dcache_page(page
);
1156 } else /* if (unlikely(!buffer_uptodate(bh))) */
1160 /* Clear buffer_new on all buffers. */
1163 bh
= head
= page_buffers(pages
[u
]);
1166 clear_buffer_new(bh
);
1167 } while ((bh
= bh
->b_this_page
) != head
);
1168 } while (++u
< nr_pages
);
1169 ntfs_debug("Done.");
1172 if (status
.attr_switched
) {
1173 /* Get back to the attribute extent we modified. */
1174 ntfs_attr_reinit_search_ctx(ctx
);
1175 if (ntfs_attr_lookup(ni
->type
, ni
->name
, ni
->name_len
,
1176 CASE_SENSITIVE
, bh_cpos
, NULL
, 0, ctx
)) {
1177 ntfs_error(vol
->sb
, "Failed to find required "
1178 "attribute extent of attribute in "
1179 "error code path. Run chkdsk to "
1181 write_lock_irqsave(&ni
->size_lock
, flags
);
1182 ni
->itype
.compressed
.size
+= vol
->cluster_size
;
1183 write_unlock_irqrestore(&ni
->size_lock
, flags
);
1184 flush_dcache_mft_record_page(ctx
->ntfs_ino
);
1185 mark_mft_record_dirty(ctx
->ntfs_ino
);
1187 * The only thing that is now wrong is the compressed
1188 * size of the base attribute extent which chkdsk
1189 * should be able to fix.
1195 status
.attr_switched
= 0;
1199 * If the runlist has been modified, need to restore it by punching a
1200 * hole into it and we then need to deallocate the on-disk cluster as
1201 * well. Note, we only modify the runlist if we are able to generate a
1202 * new mapping pairs array, i.e. only when the mapped attribute extent
1205 if (status
.runlist_merged
&& !status
.attr_switched
) {
1206 BUG_ON(!rl_write_locked
);
1207 /* Make the file cluster we allocated sparse in the runlist. */
1208 if (ntfs_rl_punch_nolock(vol
, &ni
->runlist
, bh_cpos
, 1)) {
1209 ntfs_error(vol
->sb
, "Failed to punch hole into "
1210 "attribute runlist in error code "
1211 "path. Run chkdsk to recover the "
1214 make_bad_inode(VFS_I(base_ni
));
1216 } else /* if (success) */ {
1217 status
.runlist_merged
= 0;
1219 * Deallocate the on-disk cluster we allocated but only
1220 * if we succeeded in punching its vcn out of the
1223 down_write(&vol
->lcnbmp_lock
);
1224 if (ntfs_bitmap_clear_bit(vol
->lcnbmp_ino
, lcn
)) {
1225 ntfs_error(vol
->sb
, "Failed to release "
1226 "allocated cluster in error "
1227 "code path. Run chkdsk to "
1228 "recover the lost cluster.");
1231 up_write(&vol
->lcnbmp_lock
);
1235 * Resize the attribute record to its old size and rebuild the mapping
1236 * pairs array. Note, we only can do this if the runlist has been
1237 * restored to its old state which also implies that the mapped
1238 * attribute extent is not switched.
1240 if (status
.mp_rebuilt
&& !status
.runlist_merged
) {
1241 if (ntfs_attr_record_resize(m
, a
, attr_rec_len
)) {
1242 ntfs_error(vol
->sb
, "Failed to restore attribute "
1243 "record in error code path. Run "
1244 "chkdsk to recover.");
1246 make_bad_inode(VFS_I(base_ni
));
1248 } else /* if (success) */ {
1249 if (ntfs_mapping_pairs_build(vol
, (u8
*)a
+
1250 le16_to_cpu(a
->data
.non_resident
.
1251 mapping_pairs_offset
), attr_rec_len
-
1252 le16_to_cpu(a
->data
.non_resident
.
1253 mapping_pairs_offset
), ni
->runlist
.rl
,
1254 vcn
, highest_vcn
, NULL
)) {
1255 ntfs_error(vol
->sb
, "Failed to restore "
1256 "mapping pairs array in error "
1257 "code path. Run chkdsk to "
1260 make_bad_inode(VFS_I(base_ni
));
1263 flush_dcache_mft_record_page(ctx
->ntfs_ino
);
1264 mark_mft_record_dirty(ctx
->ntfs_ino
);
1267 /* Release the mft record and the attribute. */
1268 if (status
.mft_attr_mapped
) {
1269 ntfs_attr_put_search_ctx(ctx
);
1270 unmap_mft_record(base_ni
);
1272 /* Release the runlist lock. */
1273 if (rl_write_locked
)
1274 up_write(&ni
->runlist
.lock
);
1276 up_read(&ni
->runlist
.lock
);
1278 * Zero out any newly allocated blocks to avoid exposing stale data.
1279 * If BH_New is set, we know that the block was newly allocated above
1280 * and that it has not been fully zeroed and marked dirty yet.
1284 end
= bh_cpos
<< vol
->cluster_size_bits
;
1287 bh
= head
= page_buffers(page
);
1289 if (u
== nr_pages
&&
1290 ((s64
)page
->index
<< PAGE_CACHE_SHIFT
) +
1291 bh_offset(bh
) >= end
)
1293 if (!buffer_new(bh
))
1295 clear_buffer_new(bh
);
1296 if (!buffer_uptodate(bh
)) {
1297 if (PageUptodate(page
))
1298 set_buffer_uptodate(bh
);
1300 u8
*kaddr
= kmap_atomic(page
, KM_USER0
);
1301 memset(kaddr
+ bh_offset(bh
), 0,
1303 kunmap_atomic(kaddr
, KM_USER0
);
1304 flush_dcache_page(page
);
1305 set_buffer_uptodate(bh
);
1308 mark_buffer_dirty(bh
);
1309 } while ((bh
= bh
->b_this_page
) != head
);
1310 } while (++u
<= nr_pages
);
1311 ntfs_error(vol
->sb
, "Failed. Returning error code %i.", err
);
1316 * Copy as much as we can into the pages and return the number of bytes which
1317 * were sucessfully copied. If a fault is encountered then clear the pages
1318 * out to (ofs + bytes) and return the number of bytes which were copied.
1320 static inline size_t ntfs_copy_from_user(struct page
**pages
,
1321 unsigned nr_pages
, unsigned ofs
, const char __user
*buf
,
1324 struct page
**last_page
= pages
+ nr_pages
;
1331 len
= PAGE_CACHE_SIZE
- ofs
;
1334 kaddr
= kmap_atomic(*pages
, KM_USER0
);
1335 left
= __copy_from_user_inatomic(kaddr
+ ofs
, buf
, len
);
1336 kunmap_atomic(kaddr
, KM_USER0
);
1337 if (unlikely(left
)) {
1338 /* Do it the slow way. */
1339 kaddr
= kmap(*pages
);
1340 left
= __copy_from_user(kaddr
+ ofs
, buf
, len
);
1351 } while (++pages
< last_page
);
1355 total
+= len
- left
;
1356 /* Zero the rest of the target like __copy_from_user(). */
1357 while (++pages
< last_page
) {
1361 len
= PAGE_CACHE_SIZE
;
1364 kaddr
= kmap_atomic(*pages
, KM_USER0
);
1365 memset(kaddr
, 0, len
);
1366 kunmap_atomic(kaddr
, KM_USER0
);
1371 static size_t __ntfs_copy_from_user_iovec(char *vaddr
,
1372 const struct iovec
*iov
, size_t iov_ofs
, size_t bytes
)
1377 const char __user
*buf
= iov
->iov_base
+ iov_ofs
;
1381 len
= iov
->iov_len
- iov_ofs
;
1384 left
= __copy_from_user_inatomic(vaddr
, buf
, len
);
1388 if (unlikely(left
)) {
1390 * Zero the rest of the target like __copy_from_user().
1392 memset(vaddr
, 0, bytes
);
1404 static inline void ntfs_set_next_iovec(const struct iovec
**iovp
,
1405 size_t *iov_ofsp
, size_t bytes
)
1407 const struct iovec
*iov
= *iovp
;
1408 size_t iov_ofs
= *iov_ofsp
;
1413 len
= iov
->iov_len
- iov_ofs
;
1418 if (iov
->iov_len
== iov_ofs
) {
1424 *iov_ofsp
= iov_ofs
;
1428 * This has the same side-effects and return value as ntfs_copy_from_user().
1429 * The difference is that on a fault we need to memset the remainder of the
1430 * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
1431 * single-segment behaviour.
1433 * We call the same helper (__ntfs_copy_from_user_iovec()) both when atomic and
1434 * when not atomic. This is ok because __ntfs_copy_from_user_iovec() calls
1435 * __copy_from_user_inatomic() and it is ok to call this when non-atomic. In
1436 * fact, the only difference between __copy_from_user_inatomic() and
1437 * __copy_from_user() is that the latter calls might_sleep(). And on many
1438 * architectures __copy_from_user_inatomic() is just defined to
1439 * __copy_from_user() so it makes no difference at all on those architectures.
1441 static inline size_t ntfs_copy_from_user_iovec(struct page
**pages
,
1442 unsigned nr_pages
, unsigned ofs
, const struct iovec
**iov
,
1443 size_t *iov_ofs
, size_t bytes
)
1445 struct page
**last_page
= pages
+ nr_pages
;
1447 size_t copied
, len
, total
= 0;
1450 len
= PAGE_CACHE_SIZE
- ofs
;
1453 kaddr
= kmap_atomic(*pages
, KM_USER0
);
1454 copied
= __ntfs_copy_from_user_iovec(kaddr
+ ofs
,
1455 *iov
, *iov_ofs
, len
);
1456 kunmap_atomic(kaddr
, KM_USER0
);
1457 if (unlikely(copied
!= len
)) {
1458 /* Do it the slow way. */
1459 kaddr
= kmap(*pages
);
1460 copied
= __ntfs_copy_from_user_iovec(kaddr
+ ofs
,
1461 *iov
, *iov_ofs
, len
);
1463 if (unlikely(copied
!= len
))
1470 ntfs_set_next_iovec(iov
, iov_ofs
, len
);
1472 } while (++pages
< last_page
);
1477 /* Zero the rest of the target like __copy_from_user(). */
1478 while (++pages
< last_page
) {
1482 len
= PAGE_CACHE_SIZE
;
1485 kaddr
= kmap_atomic(*pages
, KM_USER0
);
1486 memset(kaddr
, 0, len
);
1487 kunmap_atomic(kaddr
, KM_USER0
);
1492 static inline void ntfs_flush_dcache_pages(struct page
**pages
,
1498 * Warning: Do not do the decrement at the same time as the
1499 * call because flush_dcache_page() is a NULL macro on i386
1500 * and hence the decrement never happens.
1502 flush_dcache_page(pages
[nr_pages
]);
1503 } while (--nr_pages
> 0);
1507 * ntfs_commit_pages_after_non_resident_write - commit the received data
1508 * @pages: array of destination pages
1509 * @nr_pages: number of pages in @pages
1510 * @pos: byte position in file at which the write begins
1511 * @bytes: number of bytes to be written
1513 * See description of ntfs_commit_pages_after_write(), below.
1515 static inline int ntfs_commit_pages_after_non_resident_write(
1516 struct page
**pages
, const unsigned nr_pages
,
1517 s64 pos
, size_t bytes
)
1519 s64 end
, initialized_size
;
1521 ntfs_inode
*ni
, *base_ni
;
1522 struct buffer_head
*bh
, *head
;
1523 ntfs_attr_search_ctx
*ctx
;
1526 unsigned long flags
;
1527 unsigned blocksize
, u
;
1530 vi
= pages
[0]->mapping
->host
;
1532 blocksize
= 1 << vi
->i_blkbits
;
1541 bh_pos
= (s64
)page
->index
<< PAGE_CACHE_SHIFT
;
1542 bh
= head
= page_buffers(page
);
1547 bh_end
= bh_pos
+ blocksize
;
1548 if (bh_end
<= pos
|| bh_pos
>= end
) {
1549 if (!buffer_uptodate(bh
))
1552 set_buffer_uptodate(bh
);
1553 mark_buffer_dirty(bh
);
1555 } while (bh_pos
+= blocksize
, (bh
= bh
->b_this_page
) != head
);
1557 * If all buffers are now uptodate but the page is not, set the
1560 if (!partial
&& !PageUptodate(page
))
1561 SetPageUptodate(page
);
1562 } while (++u
< nr_pages
);
1564 * Finally, if we do not need to update initialized_size or i_size we
1567 read_lock_irqsave(&ni
->size_lock
, flags
);
1568 initialized_size
= ni
->initialized_size
;
1569 read_unlock_irqrestore(&ni
->size_lock
, flags
);
1570 if (end
<= initialized_size
) {
1571 ntfs_debug("Done.");
1575 * Update initialized_size/i_size as appropriate, both in the inode and
1581 base_ni
= ni
->ext
.base_ntfs_ino
;
1582 /* Map, pin, and lock the mft record. */
1583 m
= map_mft_record(base_ni
);
1590 BUG_ON(!NInoNonResident(ni
));
1591 ctx
= ntfs_attr_get_search_ctx(base_ni
, m
);
1592 if (unlikely(!ctx
)) {
1596 err
= ntfs_attr_lookup(ni
->type
, ni
->name
, ni
->name_len
,
1597 CASE_SENSITIVE
, 0, NULL
, 0, ctx
);
1598 if (unlikely(err
)) {
1604 BUG_ON(!a
->non_resident
);
1605 write_lock_irqsave(&ni
->size_lock
, flags
);
1606 BUG_ON(end
> ni
->allocated_size
);
1607 ni
->initialized_size
= end
;
1608 a
->data
.non_resident
.initialized_size
= cpu_to_sle64(end
);
1609 if (end
> i_size_read(vi
)) {
1610 i_size_write(vi
, end
);
1611 a
->data
.non_resident
.data_size
=
1612 a
->data
.non_resident
.initialized_size
;
1614 write_unlock_irqrestore(&ni
->size_lock
, flags
);
1615 /* Mark the mft record dirty, so it gets written back. */
1616 flush_dcache_mft_record_page(ctx
->ntfs_ino
);
1617 mark_mft_record_dirty(ctx
->ntfs_ino
);
1618 ntfs_attr_put_search_ctx(ctx
);
1619 unmap_mft_record(base_ni
);
1620 ntfs_debug("Done.");
1624 ntfs_attr_put_search_ctx(ctx
);
1626 unmap_mft_record(base_ni
);
1627 ntfs_error(vi
->i_sb
, "Failed to update initialized_size/i_size (error "
1629 if (err
!= -ENOMEM
) {
1630 NVolSetErrors(ni
->vol
);
1631 make_bad_inode(VFS_I(base_ni
));
1638 * ntfs_commit_pages_after_write - commit the received data
1639 * @pages: array of destination pages
1640 * @nr_pages: number of pages in @pages
1641 * @pos: byte position in file at which the write begins
1642 * @bytes: number of bytes to be written
1644 * This is called from ntfs_file_buffered_write() with i_sem held on the inode
1645 * (@pages[0]->mapping->host). There are @nr_pages pages in @pages which are
1646 * locked but not kmap()ped. The source data has already been copied into the
1647 * @page. ntfs_prepare_pages_for_non_resident_write() has been called before
1648 * the data was copied (for non-resident attributes only) and it returned
1651 * Need to set uptodate and mark dirty all buffers within the boundary of the
1652 * write. If all buffers in a page are uptodate we set the page uptodate, too.
1654 * Setting the buffers dirty ensures that they get written out later when
1655 * ntfs_writepage() is invoked by the VM.
1657 * Finally, we need to update i_size and initialized_size as appropriate both
1658 * in the inode and the mft record.
1660 * This is modelled after fs/buffer.c::generic_commit_write(), which marks
1661 * buffers uptodate and dirty, sets the page uptodate if all buffers in the
1662 * page are uptodate, and updates i_size if the end of io is beyond i_size. In
1663 * that case, it also marks the inode dirty.
1665 * If things have gone as outlined in
1666 * ntfs_prepare_pages_for_non_resident_write(), we do not need to do any page
1667 * content modifications here for non-resident attributes. For resident
1668 * attributes we need to do the uptodate bringing here which we combine with
1669 * the copying into the mft record which means we save one atomic kmap.
1671 * Return 0 on success or -errno on error.
1673 static int ntfs_commit_pages_after_write(struct page
**pages
,
1674 const unsigned nr_pages
, s64 pos
, size_t bytes
)
1676 s64 end
, initialized_size
;
1679 ntfs_inode
*ni
, *base_ni
;
1681 ntfs_attr_search_ctx
*ctx
;
1684 char *kattr
, *kaddr
;
1685 unsigned long flags
;
1693 vi
= page
->mapping
->host
;
1695 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
1696 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%x.",
1697 vi
->i_ino
, ni
->type
, page
->index
, nr_pages
,
1698 (long long)pos
, bytes
);
1699 if (NInoNonResident(ni
))
1700 return ntfs_commit_pages_after_non_resident_write(pages
,
1701 nr_pages
, pos
, bytes
);
1702 BUG_ON(nr_pages
> 1);
1704 * Attribute is resident, implying it is not compressed, encrypted, or
1710 base_ni
= ni
->ext
.base_ntfs_ino
;
1711 BUG_ON(NInoNonResident(ni
));
1712 /* Map, pin, and lock the mft record. */
1713 m
= map_mft_record(base_ni
);
1720 ctx
= ntfs_attr_get_search_ctx(base_ni
, m
);
1721 if (unlikely(!ctx
)) {
1725 err
= ntfs_attr_lookup(ni
->type
, ni
->name
, ni
->name_len
,
1726 CASE_SENSITIVE
, 0, NULL
, 0, ctx
);
1727 if (unlikely(err
)) {
1733 BUG_ON(a
->non_resident
);
1734 /* The total length of the attribute value. */
1735 attr_len
= le32_to_cpu(a
->data
.resident
.value_length
);
1736 i_size
= i_size_read(vi
);
1737 BUG_ON(attr_len
!= i_size
);
1738 BUG_ON(pos
> attr_len
);
1740 BUG_ON(end
> le32_to_cpu(a
->length
) -
1741 le16_to_cpu(a
->data
.resident
.value_offset
));
1742 kattr
= (u8
*)a
+ le16_to_cpu(a
->data
.resident
.value_offset
);
1743 kaddr
= kmap_atomic(page
, KM_USER0
);
1744 /* Copy the received data from the page to the mft record. */
1745 memcpy(kattr
+ pos
, kaddr
+ pos
, bytes
);
1746 /* Update the attribute length if necessary. */
1747 if (end
> attr_len
) {
1749 a
->data
.resident
.value_length
= cpu_to_le32(attr_len
);
1752 * If the page is not uptodate, bring the out of bounds area(s)
1753 * uptodate by copying data from the mft record to the page.
1755 if (!PageUptodate(page
)) {
1757 memcpy(kaddr
, kattr
, pos
);
1759 memcpy(kaddr
+ end
, kattr
+ end
, attr_len
- end
);
1760 /* Zero the region outside the end of the attribute value. */
1761 memset(kaddr
+ attr_len
, 0, PAGE_CACHE_SIZE
- attr_len
);
1762 flush_dcache_page(page
);
1763 SetPageUptodate(page
);
1765 kunmap_atomic(kaddr
, KM_USER0
);
1766 /* Update initialized_size/i_size if necessary. */
1767 read_lock_irqsave(&ni
->size_lock
, flags
);
1768 initialized_size
= ni
->initialized_size
;
1769 BUG_ON(end
> ni
->allocated_size
);
1770 read_unlock_irqrestore(&ni
->size_lock
, flags
);
1771 BUG_ON(initialized_size
!= i_size
);
1772 if (end
> initialized_size
) {
1773 unsigned long flags
;
1775 write_lock_irqsave(&ni
->size_lock
, flags
);
1776 ni
->initialized_size
= end
;
1777 i_size_write(vi
, end
);
1778 write_unlock_irqrestore(&ni
->size_lock
, flags
);
1780 /* Mark the mft record dirty, so it gets written back. */
1781 flush_dcache_mft_record_page(ctx
->ntfs_ino
);
1782 mark_mft_record_dirty(ctx
->ntfs_ino
);
1783 ntfs_attr_put_search_ctx(ctx
);
1784 unmap_mft_record(base_ni
);
1785 ntfs_debug("Done.");
1788 if (err
== -ENOMEM
) {
1789 ntfs_warning(vi
->i_sb
, "Error allocating memory required to "
1790 "commit the write.");
1791 if (PageUptodate(page
)) {
1792 ntfs_warning(vi
->i_sb
, "Page is uptodate, setting "
1793 "dirty so the write will be retried "
1794 "later on by the VM.");
1796 * Put the page on mapping->dirty_pages, but leave its
1797 * buffers' dirty state as-is.
1799 __set_page_dirty_nobuffers(page
);
1802 ntfs_error(vi
->i_sb
, "Page is not uptodate. Written "
1803 "data has been lost.");
1805 ntfs_error(vi
->i_sb
, "Resident attribute commit write failed "
1806 "with error %i.", err
);
1807 NVolSetErrors(ni
->vol
);
1808 make_bad_inode(VFS_I(base_ni
));
1812 ntfs_attr_put_search_ctx(ctx
);
1814 unmap_mft_record(base_ni
);
1819 * ntfs_file_buffered_write -
1821 * Locking: The vfs is holding ->i_sem on the inode.
1823 static ssize_t
ntfs_file_buffered_write(struct kiocb
*iocb
,
1824 const struct iovec
*iov
, unsigned long nr_segs
,
1825 loff_t pos
, loff_t
*ppos
, size_t count
)
1827 struct file
*file
= iocb
->ki_filp
;
1828 struct address_space
*mapping
= file
->f_mapping
;
1829 struct inode
*vi
= mapping
->host
;
1830 ntfs_inode
*ni
= NTFS_I(vi
);
1831 ntfs_volume
*vol
= ni
->vol
;
1832 struct page
*pages
[NTFS_MAX_PAGES_PER_CLUSTER
];
1833 struct page
*cached_page
= NULL
;
1834 char __user
*buf
= NULL
;
1838 unsigned long flags
;
1839 size_t bytes
, iov_ofs
;
1840 ssize_t status
, written
;
1843 struct pagevec lru_pvec
;
1845 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
1846 "pos 0x%llx, count 0x%lx.",
1847 vi
->i_ino
, (unsigned)le32_to_cpu(ni
->type
),
1848 (unsigned long long)pos
, (unsigned long)count
);
1849 if (unlikely(!count
))
1851 BUG_ON(NInoMstProtected(ni
));
1853 * If the attribute is not an index root and it is encrypted or
1854 * compressed, we cannot write to it yet. Note we need to check for
1855 * AT_INDEX_ALLOCATION since this is the type of both directory and
1858 if (ni
->type
!= AT_INDEX_ALLOCATION
) {
1859 /* If file is encrypted, deny access, just like NT4. */
1860 if (NInoEncrypted(ni
)) {
1862 * Reminder for later: Encrypted files are _always_
1863 * non-resident so that the content can always be
1866 ntfs_debug("Denying write access to encrypted file.");
1869 if (NInoCompressed(ni
)) {
1870 /* Only unnamed $DATA attribute can be compressed. */
1871 BUG_ON(ni
->type
!= AT_DATA
);
1872 BUG_ON(ni
->name_len
);
1874 * Reminder for later: If resident, the data is not
1875 * actually compressed. Only on the switch to non-
1876 * resident does compression kick in. This is in
1877 * contrast to encrypted files (see above).
1879 ntfs_error(vi
->i_sb
, "Writing to compressed files is "
1880 "not implemented yet. Sorry.");
1885 * If a previous ntfs_truncate() failed, repeat it and abort if it
1888 if (unlikely(NInoTruncateFailed(ni
))) {
1889 down_write(&vi
->i_alloc_sem
);
1890 err
= ntfs_truncate(vi
);
1891 up_write(&vi
->i_alloc_sem
);
1892 if (err
|| NInoTruncateFailed(ni
)) {
1895 ntfs_error(vol
->sb
, "Cannot perform write to inode "
1896 "0x%lx, attribute type 0x%x, because "
1897 "ntfs_truncate() failed (error code "
1899 (unsigned)le32_to_cpu(ni
->type
), err
);
1903 /* The first byte after the write. */
1906 * If the write goes beyond the allocated size, extend the allocation
1907 * to cover the whole of the write, rounded up to the nearest cluster.
1909 read_lock_irqsave(&ni
->size_lock
, flags
);
1910 ll
= ni
->allocated_size
;
1911 read_unlock_irqrestore(&ni
->size_lock
, flags
);
1913 /* Extend the allocation without changing the data size. */
1914 ll
= ntfs_attr_extend_allocation(ni
, end
, -1, pos
);
1915 if (likely(ll
>= 0)) {
1917 /* If the extension was partial truncate the write. */
1919 ntfs_debug("Truncating write to inode 0x%lx, "
1920 "attribute type 0x%x, because "
1921 "the allocation was only "
1922 "partially extended.",
1923 vi
->i_ino
, (unsigned)
1924 le32_to_cpu(ni
->type
));
1930 read_lock_irqsave(&ni
->size_lock
, flags
);
1931 ll
= ni
->allocated_size
;
1932 read_unlock_irqrestore(&ni
->size_lock
, flags
);
1933 /* Perform a partial write if possible or fail. */
1935 ntfs_debug("Truncating write to inode 0x%lx, "
1936 "attribute type 0x%x, because "
1937 "extending the allocation "
1938 "failed (error code %i).",
1939 vi
->i_ino
, (unsigned)
1940 le32_to_cpu(ni
->type
), err
);
1944 ntfs_error(vol
->sb
, "Cannot perform write to "
1945 "inode 0x%lx, attribute type "
1946 "0x%x, because extending the "
1947 "allocation failed (error "
1948 "code %i).", vi
->i_ino
,
1950 le32_to_cpu(ni
->type
), err
);
1955 pagevec_init(&lru_pvec
, 0);
1958 * If the write starts beyond the initialized size, extend it up to the
1959 * beginning of the write and initialize all non-sparse space between
1960 * the old initialized size and the new one. This automatically also
1961 * increments the vfs inode->i_size to keep it above or equal to the
1964 read_lock_irqsave(&ni
->size_lock
, flags
);
1965 ll
= ni
->initialized_size
;
1966 read_unlock_irqrestore(&ni
->size_lock
, flags
);
1968 err
= ntfs_attr_extend_initialized(ni
, pos
, &cached_page
,
1971 ntfs_error(vol
->sb
, "Cannot perform write to inode "
1972 "0x%lx, attribute type 0x%x, because "
1973 "extending the initialized size "
1974 "failed (error code %i).", vi
->i_ino
,
1975 (unsigned)le32_to_cpu(ni
->type
), err
);
1981 * Determine the number of pages per cluster for non-resident
1985 if (vol
->cluster_size
> PAGE_CACHE_SIZE
&& NInoNonResident(ni
))
1986 nr_pages
= vol
->cluster_size
>> PAGE_CACHE_SHIFT
;
1987 /* Finally, perform the actual write. */
1989 if (likely(nr_segs
== 1))
1990 buf
= iov
->iov_base
;
1992 iov_ofs
= 0; /* Offset in the current iovec. */
1995 pgoff_t idx
, start_idx
;
1996 unsigned ofs
, do_pages
, u
;
1999 start_idx
= idx
= pos
>> PAGE_CACHE_SHIFT
;
2000 ofs
= pos
& ~PAGE_CACHE_MASK
;
2001 bytes
= PAGE_CACHE_SIZE
- ofs
;
2004 vcn
= pos
>> vol
->cluster_size_bits
;
2005 if (vcn
!= last_vcn
) {
2008 * Get the lcn of the vcn the write is in. If
2009 * it is a hole, need to lock down all pages in
2012 down_read(&ni
->runlist
.lock
);
2013 lcn
= ntfs_attr_vcn_to_lcn_nolock(ni
, pos
>>
2014 vol
->cluster_size_bits
, FALSE
);
2015 up_read(&ni
->runlist
.lock
);
2016 if (unlikely(lcn
< LCN_HOLE
)) {
2018 if (lcn
== LCN_ENOMEM
)
2021 ntfs_error(vol
->sb
, "Cannot "
2024 "attribute type 0x%x, "
2025 "because the attribute "
2027 vi
->i_ino
, (unsigned)
2028 le32_to_cpu(ni
->type
));
2031 if (lcn
== LCN_HOLE
) {
2032 start_idx
= (pos
& ~(s64
)
2033 vol
->cluster_size_mask
)
2034 >> PAGE_CACHE_SHIFT
;
2035 bytes
= vol
->cluster_size
- (pos
&
2036 vol
->cluster_size_mask
);
2037 do_pages
= nr_pages
;
2044 * Bring in the user page(s) that we will copy from _first_.
2045 * Otherwise there is a nasty deadlock on copying from the same
2046 * page(s) as we are writing to, without it/them being marked
2047 * up-to-date. Note, at present there is nothing to stop the
2048 * pages being swapped out between us bringing them into memory
2049 * and doing the actual copying.
2051 if (likely(nr_segs
== 1))
2052 ntfs_fault_in_pages_readable(buf
, bytes
);
2054 ntfs_fault_in_pages_readable_iovec(iov
, iov_ofs
, bytes
);
2055 /* Get and lock @do_pages starting at index @start_idx. */
2056 status
= __ntfs_grab_cache_pages(mapping
, start_idx
, do_pages
,
2057 pages
, &cached_page
, &lru_pvec
);
2058 if (unlikely(status
))
2061 * For non-resident attributes, we need to fill any holes with
2062 * actual clusters and ensure all bufferes are mapped. We also
2063 * need to bring uptodate any buffers that are only partially
2066 if (NInoNonResident(ni
)) {
2067 status
= ntfs_prepare_pages_for_non_resident_write(
2068 pages
, do_pages
, pos
, bytes
);
2069 if (unlikely(status
)) {
2073 unlock_page(pages
[--do_pages
]);
2074 page_cache_release(pages
[do_pages
]);
2077 * The write preparation may have instantiated
2078 * allocated space outside i_size. Trim this
2079 * off again. We can ignore any errors in this
2080 * case as we will just be waisting a bit of
2081 * allocated space, which is not a disaster.
2083 i_size
= i_size_read(vi
);
2084 if (pos
+ bytes
> i_size
)
2085 vmtruncate(vi
, i_size
);
2089 u
= (pos
>> PAGE_CACHE_SHIFT
) - pages
[0]->index
;
2090 if (likely(nr_segs
== 1)) {
2091 copied
= ntfs_copy_from_user(pages
+ u
, do_pages
- u
,
2095 copied
= ntfs_copy_from_user_iovec(pages
+ u
,
2096 do_pages
- u
, ofs
, &iov
, &iov_ofs
,
2098 ntfs_flush_dcache_pages(pages
+ u
, do_pages
- u
);
2099 status
= ntfs_commit_pages_after_write(pages
, do_pages
, pos
,
2101 if (likely(!status
)) {
2105 if (unlikely(copied
!= bytes
))
2109 unlock_page(pages
[--do_pages
]);
2110 mark_page_accessed(pages
[do_pages
]);
2111 page_cache_release(pages
[do_pages
]);
2113 if (unlikely(status
))
2115 balance_dirty_pages_ratelimited(mapping
);
2121 page_cache_release(cached_page
);
2122 /* For now, when the user asks for O_SYNC, we actually give O_DSYNC. */
2123 if (likely(!status
)) {
2124 if (unlikely((file
->f_flags
& O_SYNC
) || IS_SYNC(vi
))) {
2125 if (!mapping
->a_ops
->writepage
|| !is_sync_kiocb(iocb
))
2126 status
= generic_osync_inode(vi
, mapping
,
2127 OSYNC_METADATA
|OSYNC_DATA
);
2130 pagevec_lru_add(&lru_pvec
);
2131 ntfs_debug("Done. Returning %s (written 0x%lx, status %li).",
2132 written
? "written" : "status", (unsigned long)written
,
2134 return written
? written
: status
;
2138 * ntfs_file_aio_write_nolock -
2140 static ssize_t
ntfs_file_aio_write_nolock(struct kiocb
*iocb
,
2141 const struct iovec
*iov
, unsigned long nr_segs
, loff_t
*ppos
)
2143 struct file
*file
= iocb
->ki_filp
;
2144 struct address_space
*mapping
= file
->f_mapping
;
2145 struct inode
*inode
= mapping
->host
;
2148 size_t count
; /* after file limit checks */
2149 ssize_t written
, err
;
2152 for (seg
= 0; seg
< nr_segs
; seg
++) {
2153 const struct iovec
*iv
= &iov
[seg
];
2155 * If any segment has a negative length, or the cumulative
2156 * length ever wraps negative then return -EINVAL.
2158 count
+= iv
->iov_len
;
2159 if (unlikely((ssize_t
)(count
|iv
->iov_len
) < 0))
2161 if (access_ok(VERIFY_READ
, iv
->iov_base
, iv
->iov_len
))
2166 count
-= iv
->iov_len
; /* This segment is no good */
2170 vfs_check_frozen(inode
->i_sb
, SB_FREEZE_WRITE
);
2171 /* We can write back this queue in page reclaim. */
2172 current
->backing_dev_info
= mapping
->backing_dev_info
;
2174 err
= generic_write_checks(file
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
2179 err
= remove_suid(file
->f_dentry
);
2182 inode_update_time(inode
, 1);
2183 written
= ntfs_file_buffered_write(iocb
, iov
, nr_segs
, pos
, ppos
,
2186 current
->backing_dev_info
= NULL
;
2187 return written
? written
: err
;
2191 * ntfs_file_aio_write -
2193 static ssize_t
ntfs_file_aio_write(struct kiocb
*iocb
, const char __user
*buf
,
2194 size_t count
, loff_t pos
)
2196 struct file
*file
= iocb
->ki_filp
;
2197 struct address_space
*mapping
= file
->f_mapping
;
2198 struct inode
*inode
= mapping
->host
;
2200 struct iovec local_iov
= { .iov_base
= (void __user
*)buf
,
2203 BUG_ON(iocb
->ki_pos
!= pos
);
2205 down(&inode
->i_sem
);
2206 ret
= ntfs_file_aio_write_nolock(iocb
, &local_iov
, 1, &iocb
->ki_pos
);
2208 if (ret
> 0 && ((file
->f_flags
& O_SYNC
) || IS_SYNC(inode
))) {
2209 int err
= sync_page_range(inode
, mapping
, pos
, ret
);
2217 * ntfs_file_writev -
2219 * Basically the same as generic_file_writev() except that it ends up calling
2220 * ntfs_file_aio_write_nolock() instead of __generic_file_aio_write_nolock().
2222 static ssize_t
ntfs_file_writev(struct file
*file
, const struct iovec
*iov
,
2223 unsigned long nr_segs
, loff_t
*ppos
)
2225 struct address_space
*mapping
= file
->f_mapping
;
2226 struct inode
*inode
= mapping
->host
;
2230 down(&inode
->i_sem
);
2231 init_sync_kiocb(&kiocb
, file
);
2232 ret
= ntfs_file_aio_write_nolock(&kiocb
, iov
, nr_segs
, ppos
);
2233 if (ret
== -EIOCBQUEUED
)
2234 ret
= wait_on_sync_kiocb(&kiocb
);
2236 if (ret
> 0 && ((file
->f_flags
& O_SYNC
) || IS_SYNC(inode
))) {
2237 int err
= sync_page_range(inode
, mapping
, *ppos
- ret
, ret
);
2245 * ntfs_file_write - simple wrapper for ntfs_file_writev()
2247 static ssize_t
ntfs_file_write(struct file
*file
, const char __user
*buf
,
2248 size_t count
, loff_t
*ppos
)
2250 struct iovec local_iov
= { .iov_base
= (void __user
*)buf
,
2253 return ntfs_file_writev(file
, &local_iov
, 1, ppos
);
2257 * ntfs_file_fsync - sync a file to disk
2258 * @filp: file to be synced
2259 * @dentry: dentry describing the file to sync
2260 * @datasync: if non-zero only flush user data and not metadata
2262 * Data integrity sync of a file to disk. Used for fsync, fdatasync, and msync
2263 * system calls. This function is inspired by fs/buffer.c::file_fsync().
2265 * If @datasync is false, write the mft record and all associated extent mft
2266 * records as well as the $DATA attribute and then sync the block device.
2268 * If @datasync is true and the attribute is non-resident, we skip the writing
2269 * of the mft record and all associated extent mft records (this might still
2270 * happen due to the write_inode_now() call).
2272 * Also, if @datasync is true, we do not wait on the inode to be written out
2273 * but we always wait on the page cache pages to be written out.
2275 * Note: In the past @filp could be NULL so we ignore it as we don't need it
2278 * Locking: Caller must hold i_sem on the inode.
2280 * TODO: We should probably also write all attribute/index inodes associated
2281 * with this inode but since we have no simple way of getting to them we ignore
2282 * this problem for now.
2284 static int ntfs_file_fsync(struct file
*filp
, struct dentry
*dentry
,
2287 struct inode
*vi
= dentry
->d_inode
;
2290 ntfs_debug("Entering for inode 0x%lx.", vi
->i_ino
);
2291 BUG_ON(S_ISDIR(vi
->i_mode
));
2292 if (!datasync
|| !NInoNonResident(NTFS_I(vi
)))
2293 ret
= ntfs_write_inode(vi
, 1);
2294 write_inode_now(vi
, !datasync
);
2296 * NOTE: If we were to use mapping->private_list (see ext2 and
2297 * fs/buffer.c) for dirty blocks then we could optimize the below to be
2298 * sync_mapping_buffers(vi->i_mapping).
2300 err
= sync_blockdev(vi
->i_sb
->s_bdev
);
2301 if (unlikely(err
&& !ret
))
2304 ntfs_debug("Done.");
2306 ntfs_warning(vi
->i_sb
, "Failed to f%ssync inode 0x%lx. Error "
2307 "%u.", datasync
? "data" : "", vi
->i_ino
, -ret
);
2311 #endif /* NTFS_RW */
2313 struct file_operations ntfs_file_ops
= {
2314 .llseek
= generic_file_llseek
, /* Seek inside file. */
2315 .read
= generic_file_read
, /* Read from file. */
2316 .aio_read
= generic_file_aio_read
, /* Async read from file. */
2317 .readv
= generic_file_readv
, /* Read from file. */
2319 .write
= ntfs_file_write
, /* Write to file. */
2320 .aio_write
= ntfs_file_aio_write
, /* Async write to file. */
2321 .writev
= ntfs_file_writev
, /* Write to file. */
2322 /*.release = ,*/ /* Last file is closed. See
2324 ext2_release_file() for
2325 how to use this to discard
2326 preallocated space for
2327 write opened files. */
2328 .fsync
= ntfs_file_fsync
, /* Sync a file to disk. */
2329 /*.aio_fsync = ,*/ /* Sync all outstanding async
2332 #endif /* NTFS_RW */
2333 /*.ioctl = ,*/ /* Perform function on the
2334 mounted filesystem. */
2335 .mmap
= generic_file_mmap
, /* Mmap file. */
2336 .open
= ntfs_file_open
, /* Open file. */
2337 .sendfile
= generic_file_sendfile
, /* Zero-copy data send with
2338 the data source being on
2339 the ntfs partition. We do
2340 not need to care about the
2341 data destination. */
2342 /*.sendpage = ,*/ /* Zero-copy data send with
2343 the data destination being
2344 on the ntfs partition. We
2345 do not need to care about
2349 struct inode_operations ntfs_file_inode_ops
= {
2351 .truncate
= ntfs_truncate_vfs
,
2352 .setattr
= ntfs_setattr
,
2353 #endif /* NTFS_RW */
2356 struct file_operations ntfs_empty_file_ops
= {};
2358 struct inode_operations ntfs_empty_inode_ops
= {};