1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * File open, close, extend, truncate
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/capability.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/pagemap.h>
32 #include <linux/uio.h>
33 #include <linux/sched.h>
34 #include <linux/splice.h>
35 #include <linux/mount.h>
36 #include <linux/writeback.h>
37 #include <linux/falloc.h>
38 #include <linux/quotaops.h>
40 #define MLOG_MASK_PREFIX ML_INODE
41 #include <cluster/masklog.h>
49 #include "extent_map.h"
62 #include "refcounttree.h"
64 #include "buffer_head_io.h"
66 static int ocfs2_sync_inode(struct inode
*inode
)
68 filemap_fdatawrite(inode
->i_mapping
);
69 return sync_mapping_buffers(inode
->i_mapping
);
72 static int ocfs2_init_file_private(struct inode
*inode
, struct file
*file
)
74 struct ocfs2_file_private
*fp
;
76 fp
= kzalloc(sizeof(struct ocfs2_file_private
), GFP_KERNEL
);
81 mutex_init(&fp
->fp_mutex
);
82 ocfs2_file_lock_res_init(&fp
->fp_flock
, fp
);
83 file
->private_data
= fp
;
88 static void ocfs2_free_file_private(struct inode
*inode
, struct file
*file
)
90 struct ocfs2_file_private
*fp
= file
->private_data
;
91 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
94 ocfs2_simple_drop_lockres(osb
, &fp
->fp_flock
);
95 ocfs2_lock_res_free(&fp
->fp_flock
);
97 file
->private_data
= NULL
;
101 static int ocfs2_file_open(struct inode
*inode
, struct file
*file
)
104 int mode
= file
->f_flags
;
105 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
107 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode
, file
,
108 file
->f_path
.dentry
->d_name
.len
, file
->f_path
.dentry
->d_name
.name
);
110 spin_lock(&oi
->ip_lock
);
112 /* Check that the inode hasn't been wiped from disk by another
113 * node. If it hasn't then we're safe as long as we hold the
114 * spin lock until our increment of open count. */
115 if (OCFS2_I(inode
)->ip_flags
& OCFS2_INODE_DELETED
) {
116 spin_unlock(&oi
->ip_lock
);
123 oi
->ip_flags
|= OCFS2_INODE_OPEN_DIRECT
;
126 spin_unlock(&oi
->ip_lock
);
128 status
= ocfs2_init_file_private(inode
, file
);
131 * We want to set open count back if we're failing the
134 spin_lock(&oi
->ip_lock
);
136 spin_unlock(&oi
->ip_lock
);
144 static int ocfs2_file_release(struct inode
*inode
, struct file
*file
)
146 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
148 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode
, file
,
149 file
->f_path
.dentry
->d_name
.len
,
150 file
->f_path
.dentry
->d_name
.name
);
152 spin_lock(&oi
->ip_lock
);
153 if (!--oi
->ip_open_count
)
154 oi
->ip_flags
&= ~OCFS2_INODE_OPEN_DIRECT
;
155 spin_unlock(&oi
->ip_lock
);
157 ocfs2_free_file_private(inode
, file
);
164 static int ocfs2_dir_open(struct inode
*inode
, struct file
*file
)
166 return ocfs2_init_file_private(inode
, file
);
169 static int ocfs2_dir_release(struct inode
*inode
, struct file
*file
)
171 ocfs2_free_file_private(inode
, file
);
175 static int ocfs2_sync_file(struct file
*file
,
176 struct dentry
*dentry
,
181 struct inode
*inode
= dentry
->d_inode
;
182 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
184 mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", file
, dentry
, datasync
,
185 dentry
->d_name
.len
, dentry
->d_name
.name
);
187 err
= ocfs2_sync_inode(dentry
->d_inode
);
191 if (datasync
&& !(inode
->i_state
& I_DIRTY_DATASYNC
))
194 journal
= osb
->journal
->j_journal
;
195 err
= jbd2_journal_force_commit(journal
);
200 return (err
< 0) ? -EIO
: 0;
203 int ocfs2_should_update_atime(struct inode
*inode
,
204 struct vfsmount
*vfsmnt
)
207 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
209 if (ocfs2_is_hard_readonly(osb
) || ocfs2_is_soft_readonly(osb
))
212 if ((inode
->i_flags
& S_NOATIME
) ||
213 ((inode
->i_sb
->s_flags
& MS_NODIRATIME
) && S_ISDIR(inode
->i_mode
)))
217 * We can be called with no vfsmnt structure - NFSD will
220 * Note that our action here is different than touch_atime() -
221 * if we can't tell whether this is a noatime mount, then we
222 * don't know whether to trust the value of s_atime_quantum.
227 if ((vfsmnt
->mnt_flags
& MNT_NOATIME
) ||
228 ((vfsmnt
->mnt_flags
& MNT_NODIRATIME
) && S_ISDIR(inode
->i_mode
)))
231 if (vfsmnt
->mnt_flags
& MNT_RELATIME
) {
232 if ((timespec_compare(&inode
->i_atime
, &inode
->i_mtime
) <= 0) ||
233 (timespec_compare(&inode
->i_atime
, &inode
->i_ctime
) <= 0))
240 if ((now
.tv_sec
- inode
->i_atime
.tv_sec
<= osb
->s_atime_quantum
))
246 int ocfs2_update_inode_atime(struct inode
*inode
,
247 struct buffer_head
*bh
)
250 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
252 struct ocfs2_dinode
*di
= (struct ocfs2_dinode
*) bh
->b_data
;
256 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
257 if (IS_ERR(handle
)) {
258 ret
= PTR_ERR(handle
);
263 ret
= ocfs2_journal_access_di(handle
, INODE_CACHE(inode
), bh
,
264 OCFS2_JOURNAL_ACCESS_WRITE
);
271 * Don't use ocfs2_mark_inode_dirty() here as we don't always
272 * have i_mutex to guard against concurrent changes to other
275 inode
->i_atime
= CURRENT_TIME
;
276 di
->i_atime
= cpu_to_le64(inode
->i_atime
.tv_sec
);
277 di
->i_atime_nsec
= cpu_to_le32(inode
->i_atime
.tv_nsec
);
279 ret
= ocfs2_journal_dirty(handle
, bh
);
284 ocfs2_commit_trans(OCFS2_SB(inode
->i_sb
), handle
);
290 static int ocfs2_set_inode_size(handle_t
*handle
,
292 struct buffer_head
*fe_bh
,
298 i_size_write(inode
, new_i_size
);
299 inode
->i_blocks
= ocfs2_inode_sector_count(inode
);
300 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
302 status
= ocfs2_mark_inode_dirty(handle
, inode
, fe_bh
);
313 int ocfs2_simple_size_update(struct inode
*inode
,
314 struct buffer_head
*di_bh
,
318 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
319 handle_t
*handle
= NULL
;
321 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
322 if (IS_ERR(handle
)) {
323 ret
= PTR_ERR(handle
);
328 ret
= ocfs2_set_inode_size(handle
, inode
, di_bh
,
333 ocfs2_commit_trans(osb
, handle
);
338 static int ocfs2_cow_file_pos(struct inode
*inode
,
339 struct buffer_head
*fe_bh
,
343 u32 phys
, cpos
= offset
>> OCFS2_SB(inode
->i_sb
)->s_clustersize_bits
;
344 unsigned int num_clusters
= 0;
345 unsigned int ext_flags
= 0;
348 * If the new offset is aligned to the range of the cluster, there is
349 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
352 if ((offset
& (OCFS2_SB(inode
->i_sb
)->s_clustersize
- 1)) == 0)
355 status
= ocfs2_get_clusters(inode
, cpos
, &phys
,
356 &num_clusters
, &ext_flags
);
362 if (!(ext_flags
& OCFS2_EXT_REFCOUNTED
))
365 return ocfs2_refcount_cow(inode
, fe_bh
, cpos
, 1, cpos
+1);
371 static int ocfs2_orphan_for_truncate(struct ocfs2_super
*osb
,
373 struct buffer_head
*fe_bh
,
378 struct ocfs2_dinode
*di
;
384 * We need to CoW the cluster contains the offset if it is reflinked
385 * since we will call ocfs2_zero_range_for_truncate later which will
386 * write "0" from offset to the end of the cluster.
388 status
= ocfs2_cow_file_pos(inode
, fe_bh
, new_i_size
);
394 /* TODO: This needs to actually orphan the inode in this
397 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
398 if (IS_ERR(handle
)) {
399 status
= PTR_ERR(handle
);
404 status
= ocfs2_journal_access_di(handle
, INODE_CACHE(inode
), fe_bh
,
405 OCFS2_JOURNAL_ACCESS_WRITE
);
412 * Do this before setting i_size.
414 cluster_bytes
= ocfs2_align_bytes_to_clusters(inode
->i_sb
, new_i_size
);
415 status
= ocfs2_zero_range_for_truncate(inode
, handle
, new_i_size
,
422 i_size_write(inode
, new_i_size
);
423 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
425 di
= (struct ocfs2_dinode
*) fe_bh
->b_data
;
426 di
->i_size
= cpu_to_le64(new_i_size
);
427 di
->i_ctime
= di
->i_mtime
= cpu_to_le64(inode
->i_ctime
.tv_sec
);
428 di
->i_ctime_nsec
= di
->i_mtime_nsec
= cpu_to_le32(inode
->i_ctime
.tv_nsec
);
430 status
= ocfs2_journal_dirty(handle
, fe_bh
);
435 ocfs2_commit_trans(osb
, handle
);
442 static int ocfs2_truncate_file(struct inode
*inode
,
443 struct buffer_head
*di_bh
,
447 struct ocfs2_dinode
*fe
= NULL
;
448 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
449 struct ocfs2_truncate_context
*tc
= NULL
;
451 mlog_entry("(inode = %llu, new_i_size = %llu\n",
452 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
453 (unsigned long long)new_i_size
);
455 /* We trust di_bh because it comes from ocfs2_inode_lock(), which
456 * already validated it */
457 fe
= (struct ocfs2_dinode
*) di_bh
->b_data
;
459 mlog_bug_on_msg(le64_to_cpu(fe
->i_size
) != i_size_read(inode
),
460 "Inode %llu, inode i_size = %lld != di "
461 "i_size = %llu, i_flags = 0x%x\n",
462 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
464 (unsigned long long)le64_to_cpu(fe
->i_size
),
465 le32_to_cpu(fe
->i_flags
));
467 if (new_i_size
> le64_to_cpu(fe
->i_size
)) {
468 mlog(0, "asked to truncate file with size (%llu) to size (%llu)!\n",
469 (unsigned long long)le64_to_cpu(fe
->i_size
),
470 (unsigned long long)new_i_size
);
476 mlog(0, "inode %llu, i_size = %llu, new_i_size = %llu\n",
477 (unsigned long long)le64_to_cpu(fe
->i_blkno
),
478 (unsigned long long)le64_to_cpu(fe
->i_size
),
479 (unsigned long long)new_i_size
);
481 /* lets handle the simple truncate cases before doing any more
482 * cluster locking. */
483 if (new_i_size
== le64_to_cpu(fe
->i_size
))
486 down_write(&OCFS2_I(inode
)->ip_alloc_sem
);
489 * The inode lock forced other nodes to sync and drop their
490 * pages, which (correctly) happens even if we have a truncate
491 * without allocation change - ocfs2 cluster sizes can be much
492 * greater than page size, so we have to truncate them
495 unmap_mapping_range(inode
->i_mapping
, new_i_size
+ PAGE_SIZE
- 1, 0, 1);
496 truncate_inode_pages(inode
->i_mapping
, new_i_size
);
498 if (OCFS2_I(inode
)->ip_dyn_features
& OCFS2_INLINE_DATA_FL
) {
499 status
= ocfs2_truncate_inline(inode
, di_bh
, new_i_size
,
500 i_size_read(inode
), 1);
504 goto bail_unlock_sem
;
507 /* alright, we're going to need to do a full blown alloc size
508 * change. Orphan the inode so that recovery can complete the
509 * truncate if necessary. This does the task of marking
511 status
= ocfs2_orphan_for_truncate(osb
, inode
, di_bh
, new_i_size
);
514 goto bail_unlock_sem
;
517 status
= ocfs2_prepare_truncate(osb
, inode
, di_bh
, &tc
);
520 goto bail_unlock_sem
;
523 status
= ocfs2_commit_truncate(osb
, inode
, di_bh
, tc
);
526 goto bail_unlock_sem
;
529 /* TODO: orphan dir cleanup here. */
531 up_write(&OCFS2_I(inode
)->ip_alloc_sem
);
540 * extend file allocation only here.
541 * we'll update all the disk stuff, and oip->alloc_size
543 * expect stuff to be locked, a transaction started and enough data /
544 * metadata reservations in the contexts.
546 * Will return -EAGAIN, and a reason if a restart is needed.
547 * If passed in, *reason will always be set, even in error.
549 int ocfs2_add_inode_data(struct ocfs2_super
*osb
,
554 struct buffer_head
*fe_bh
,
556 struct ocfs2_alloc_context
*data_ac
,
557 struct ocfs2_alloc_context
*meta_ac
,
558 enum ocfs2_alloc_restarted
*reason_ret
)
561 struct ocfs2_extent_tree et
;
563 ocfs2_init_dinode_extent_tree(&et
, INODE_CACHE(inode
), fe_bh
);
564 ret
= ocfs2_add_clusters_in_btree(handle
, &et
, logical_offset
,
565 clusters_to_add
, mark_unwritten
,
566 data_ac
, meta_ac
, reason_ret
);
571 static int __ocfs2_extend_allocation(struct inode
*inode
, u32 logical_start
,
572 u32 clusters_to_add
, int mark_unwritten
)
575 int restart_func
= 0;
578 struct buffer_head
*bh
= NULL
;
579 struct ocfs2_dinode
*fe
= NULL
;
580 handle_t
*handle
= NULL
;
581 struct ocfs2_alloc_context
*data_ac
= NULL
;
582 struct ocfs2_alloc_context
*meta_ac
= NULL
;
583 enum ocfs2_alloc_restarted why
;
584 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
585 struct ocfs2_extent_tree et
;
588 mlog_entry("(clusters_to_add = %u)\n", clusters_to_add
);
591 * This function only exists for file systems which don't
594 BUG_ON(mark_unwritten
&& !ocfs2_sparse_alloc(osb
));
596 status
= ocfs2_read_inode_block(inode
, &bh
);
601 fe
= (struct ocfs2_dinode
*) bh
->b_data
;
604 BUG_ON(le32_to_cpu(fe
->i_clusters
) != OCFS2_I(inode
)->ip_clusters
);
606 mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, "
607 "clusters_to_add = %u\n",
608 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
609 (long long)i_size_read(inode
), le32_to_cpu(fe
->i_clusters
),
611 ocfs2_init_dinode_extent_tree(&et
, INODE_CACHE(inode
), bh
);
612 status
= ocfs2_lock_allocators(inode
, &et
, clusters_to_add
, 0,
619 credits
= ocfs2_calc_extend_credits(osb
->sb
, &fe
->id2
.i_list
,
621 handle
= ocfs2_start_trans(osb
, credits
);
622 if (IS_ERR(handle
)) {
623 status
= PTR_ERR(handle
);
629 restarted_transaction
:
630 if (vfs_dq_alloc_space_nodirty(inode
, ocfs2_clusters_to_bytes(osb
->sb
,
637 /* reserve a write to the file entry early on - that we if we
638 * run out of credits in the allocation path, we can still
640 status
= ocfs2_journal_access_di(handle
, INODE_CACHE(inode
), bh
,
641 OCFS2_JOURNAL_ACCESS_WRITE
);
647 prev_clusters
= OCFS2_I(inode
)->ip_clusters
;
649 status
= ocfs2_add_inode_data(osb
,
659 if ((status
< 0) && (status
!= -EAGAIN
)) {
660 if (status
!= -ENOSPC
)
665 status
= ocfs2_journal_dirty(handle
, bh
);
671 spin_lock(&OCFS2_I(inode
)->ip_lock
);
672 clusters_to_add
-= (OCFS2_I(inode
)->ip_clusters
- prev_clusters
);
673 spin_unlock(&OCFS2_I(inode
)->ip_lock
);
674 /* Release unused quota reservation */
675 vfs_dq_free_space(inode
,
676 ocfs2_clusters_to_bytes(osb
->sb
, clusters_to_add
));
679 if (why
!= RESTART_NONE
&& clusters_to_add
) {
680 if (why
== RESTART_META
) {
681 mlog(0, "restarting function.\n");
684 BUG_ON(why
!= RESTART_TRANS
);
686 mlog(0, "restarting transaction.\n");
687 /* TODO: This can be more intelligent. */
688 credits
= ocfs2_calc_extend_credits(osb
->sb
,
691 status
= ocfs2_extend_trans(handle
, credits
);
693 /* handle still has to be committed at
699 goto restarted_transaction
;
703 mlog(0, "fe: i_clusters = %u, i_size=%llu\n",
704 le32_to_cpu(fe
->i_clusters
),
705 (unsigned long long)le64_to_cpu(fe
->i_size
));
706 mlog(0, "inode: ip_clusters=%u, i_size=%lld\n",
707 OCFS2_I(inode
)->ip_clusters
, (long long)i_size_read(inode
));
710 if (status
< 0 && did_quota
)
711 vfs_dq_free_space(inode
,
712 ocfs2_clusters_to_bytes(osb
->sb
, clusters_to_add
));
714 ocfs2_commit_trans(osb
, handle
);
718 ocfs2_free_alloc_context(data_ac
);
722 ocfs2_free_alloc_context(meta_ac
);
725 if ((!status
) && restart_func
) {
736 /* Some parts of this taken from generic_cont_expand, which turned out
737 * to be too fragile to do exactly what we need without us having to
738 * worry about recursive locking in ->write_begin() and ->write_end(). */
739 static int ocfs2_write_zero_page(struct inode
*inode
,
742 struct address_space
*mapping
= inode
->i_mapping
;
746 handle_t
*handle
= NULL
;
749 offset
= (size
& (PAGE_CACHE_SIZE
-1)); /* Within page */
750 /* ugh. in prepare/commit_write, if from==to==start of block, we
751 ** skip the prepare. make sure we never send an offset for the start
754 if ((offset
& (inode
->i_sb
->s_blocksize
- 1)) == 0) {
757 index
= size
>> PAGE_CACHE_SHIFT
;
759 page
= grab_cache_page(mapping
, index
);
766 ret
= ocfs2_prepare_write_nolock(inode
, page
, offset
, offset
);
772 if (ocfs2_should_order_data(inode
)) {
773 handle
= ocfs2_start_walk_page_trans(inode
, page
, offset
,
775 if (IS_ERR(handle
)) {
776 ret
= PTR_ERR(handle
);
782 /* must not update i_size! */
783 ret
= block_commit_write(page
, offset
, offset
);
790 ocfs2_commit_trans(OCFS2_SB(inode
->i_sb
), handle
);
793 page_cache_release(page
);
798 static int ocfs2_zero_extend(struct inode
*inode
,
803 struct super_block
*sb
= inode
->i_sb
;
805 start_off
= ocfs2_align_bytes_to_blocks(sb
, i_size_read(inode
));
806 while (start_off
< zero_to_size
) {
807 ret
= ocfs2_write_zero_page(inode
, start_off
);
813 start_off
+= sb
->s_blocksize
;
816 * Very large extends have the potential to lock up
817 * the cpu for extended periods of time.
826 int ocfs2_extend_no_holes(struct inode
*inode
, u64 new_i_size
, u64 zero_to
)
830 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
832 clusters_to_add
= ocfs2_clusters_for_bytes(inode
->i_sb
, new_i_size
);
833 if (clusters_to_add
< oi
->ip_clusters
)
836 clusters_to_add
-= oi
->ip_clusters
;
838 if (clusters_to_add
) {
839 ret
= __ocfs2_extend_allocation(inode
, oi
->ip_clusters
,
848 * Call this even if we don't add any clusters to the tree. We
849 * still need to zero the area between the old i_size and the
852 ret
= ocfs2_zero_extend(inode
, zero_to
);
860 static int ocfs2_extend_file(struct inode
*inode
,
861 struct buffer_head
*di_bh
,
865 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
869 /* setattr sometimes calls us like this. */
873 if (i_size_read(inode
) == new_i_size
)
875 BUG_ON(new_i_size
< i_size_read(inode
));
878 * Fall through for converting inline data, even if the fs
879 * supports sparse files.
881 * The check for inline data here is legal - nobody can add
882 * the feature since we have i_mutex. We must check it again
883 * after acquiring ip_alloc_sem though, as paths like mmap
884 * might have raced us to converting the inode to extents.
886 if (!(oi
->ip_dyn_features
& OCFS2_INLINE_DATA_FL
)
887 && ocfs2_sparse_alloc(OCFS2_SB(inode
->i_sb
)))
888 goto out_update_size
;
891 * The alloc sem blocks people in read/write from reading our
892 * allocation until we're done changing it. We depend on
893 * i_mutex to block other extend/truncate calls while we're
896 down_write(&oi
->ip_alloc_sem
);
898 if (oi
->ip_dyn_features
& OCFS2_INLINE_DATA_FL
) {
900 * We can optimize small extends by keeping the inodes
903 if (ocfs2_size_fits_inline_data(di_bh
, new_i_size
)) {
904 up_write(&oi
->ip_alloc_sem
);
905 goto out_update_size
;
908 ret
= ocfs2_convert_inline_data_to_extents(inode
, di_bh
);
910 up_write(&oi
->ip_alloc_sem
);
917 if (!ocfs2_sparse_alloc(OCFS2_SB(inode
->i_sb
)))
918 ret
= ocfs2_extend_no_holes(inode
, new_i_size
, new_i_size
);
920 up_write(&oi
->ip_alloc_sem
);
928 ret
= ocfs2_simple_size_update(inode
, di_bh
, new_i_size
);
936 int ocfs2_setattr(struct dentry
*dentry
, struct iattr
*attr
)
938 int status
= 0, size_change
;
939 struct inode
*inode
= dentry
->d_inode
;
940 struct super_block
*sb
= inode
->i_sb
;
941 struct ocfs2_super
*osb
= OCFS2_SB(sb
);
942 struct buffer_head
*bh
= NULL
;
943 handle_t
*handle
= NULL
;
945 struct dquot
*transfer_from
[MAXQUOTAS
] = { };
946 struct dquot
*transfer_to
[MAXQUOTAS
] = { };
948 mlog_entry("(0x%p, '%.*s')\n", dentry
,
949 dentry
->d_name
.len
, dentry
->d_name
.name
);
951 /* ensuring we don't even attempt to truncate a symlink */
952 if (S_ISLNK(inode
->i_mode
))
953 attr
->ia_valid
&= ~ATTR_SIZE
;
955 if (attr
->ia_valid
& ATTR_MODE
)
956 mlog(0, "mode change: %d\n", attr
->ia_mode
);
957 if (attr
->ia_valid
& ATTR_UID
)
958 mlog(0, "uid change: %d\n", attr
->ia_uid
);
959 if (attr
->ia_valid
& ATTR_GID
)
960 mlog(0, "gid change: %d\n", attr
->ia_gid
);
961 if (attr
->ia_valid
& ATTR_SIZE
)
962 mlog(0, "size change...\n");
963 if (attr
->ia_valid
& (ATTR_ATIME
| ATTR_MTIME
| ATTR_CTIME
))
964 mlog(0, "time change...\n");
966 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
967 | ATTR_GID | ATTR_UID | ATTR_MODE)
968 if (!(attr
->ia_valid
& OCFS2_VALID_ATTRS
)) {
969 mlog(0, "can't handle attrs: 0x%x\n", attr
->ia_valid
);
973 status
= inode_change_ok(inode
, attr
);
977 size_change
= S_ISREG(inode
->i_mode
) && attr
->ia_valid
& ATTR_SIZE
;
979 status
= ocfs2_rw_lock(inode
, 1);
986 status
= ocfs2_inode_lock(inode
, &bh
, 1);
988 if (status
!= -ENOENT
)
993 if (size_change
&& attr
->ia_size
!= i_size_read(inode
)) {
994 if (attr
->ia_size
> sb
->s_maxbytes
) {
999 if (i_size_read(inode
) > attr
->ia_size
) {
1000 if (ocfs2_should_order_data(inode
)) {
1001 status
= ocfs2_begin_ordered_truncate(inode
,
1006 status
= ocfs2_truncate_file(inode
, bh
, attr
->ia_size
);
1008 status
= ocfs2_extend_file(inode
, bh
, attr
->ia_size
);
1010 if (status
!= -ENOSPC
)
1017 if ((attr
->ia_valid
& ATTR_UID
&& attr
->ia_uid
!= inode
->i_uid
) ||
1018 (attr
->ia_valid
& ATTR_GID
&& attr
->ia_gid
!= inode
->i_gid
)) {
1020 * Gather pointers to quota structures so that allocation /
1021 * freeing of quota structures happens here and not inside
1022 * vfs_dq_transfer() where we have problems with lock ordering
1024 if (attr
->ia_valid
& ATTR_UID
&& attr
->ia_uid
!= inode
->i_uid
1025 && OCFS2_HAS_RO_COMPAT_FEATURE(sb
,
1026 OCFS2_FEATURE_RO_COMPAT_USRQUOTA
)) {
1027 transfer_to
[USRQUOTA
] = dqget(sb
, attr
->ia_uid
,
1029 transfer_from
[USRQUOTA
] = dqget(sb
, inode
->i_uid
,
1031 if (!transfer_to
[USRQUOTA
] || !transfer_from
[USRQUOTA
]) {
1036 if (attr
->ia_valid
& ATTR_GID
&& attr
->ia_gid
!= inode
->i_gid
1037 && OCFS2_HAS_RO_COMPAT_FEATURE(sb
,
1038 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA
)) {
1039 transfer_to
[GRPQUOTA
] = dqget(sb
, attr
->ia_gid
,
1041 transfer_from
[GRPQUOTA
] = dqget(sb
, inode
->i_gid
,
1043 if (!transfer_to
[GRPQUOTA
] || !transfer_from
[GRPQUOTA
]) {
1048 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
+
1049 2 * ocfs2_quota_trans_credits(sb
));
1050 if (IS_ERR(handle
)) {
1051 status
= PTR_ERR(handle
);
1055 status
= vfs_dq_transfer(inode
, attr
) ? -EDQUOT
: 0;
1059 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
1060 if (IS_ERR(handle
)) {
1061 status
= PTR_ERR(handle
);
1068 * This will intentionally not wind up calling vmtruncate(),
1069 * since all the work for a size change has been done above.
1070 * Otherwise, we could get into problems with truncate as
1071 * ip_alloc_sem is used there to protect against i_size
1074 status
= inode_setattr(inode
, attr
);
1080 status
= ocfs2_mark_inode_dirty(handle
, inode
, bh
);
1085 ocfs2_commit_trans(osb
, handle
);
1087 ocfs2_inode_unlock(inode
, 1);
1090 ocfs2_rw_unlock(inode
, 1);
1094 /* Release quota pointers in case we acquired them */
1095 for (qtype
= 0; qtype
< MAXQUOTAS
; qtype
++) {
1096 dqput(transfer_to
[qtype
]);
1097 dqput(transfer_from
[qtype
]);
1100 if (!status
&& attr
->ia_valid
& ATTR_MODE
) {
1101 status
= ocfs2_acl_chmod(inode
);
1110 int ocfs2_getattr(struct vfsmount
*mnt
,
1111 struct dentry
*dentry
,
1114 struct inode
*inode
= dentry
->d_inode
;
1115 struct super_block
*sb
= dentry
->d_inode
->i_sb
;
1116 struct ocfs2_super
*osb
= sb
->s_fs_info
;
1121 err
= ocfs2_inode_revalidate(dentry
);
1128 generic_fillattr(inode
, stat
);
1130 /* We set the blksize from the cluster size for performance */
1131 stat
->blksize
= osb
->s_clustersize
;
1139 int ocfs2_permission(struct inode
*inode
, int mask
)
1145 ret
= ocfs2_inode_lock(inode
, NULL
, 0);
1152 ret
= generic_permission(inode
, mask
, ocfs2_check_acl
);
1154 ocfs2_inode_unlock(inode
, 0);
1160 static int __ocfs2_write_remove_suid(struct inode
*inode
,
1161 struct buffer_head
*bh
)
1165 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1166 struct ocfs2_dinode
*di
;
1168 mlog_entry("(Inode %llu, mode 0%o)\n",
1169 (unsigned long long)OCFS2_I(inode
)->ip_blkno
, inode
->i_mode
);
1171 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
1172 if (IS_ERR(handle
)) {
1173 ret
= PTR_ERR(handle
);
1178 ret
= ocfs2_journal_access_di(handle
, INODE_CACHE(inode
), bh
,
1179 OCFS2_JOURNAL_ACCESS_WRITE
);
1185 inode
->i_mode
&= ~S_ISUID
;
1186 if ((inode
->i_mode
& S_ISGID
) && (inode
->i_mode
& S_IXGRP
))
1187 inode
->i_mode
&= ~S_ISGID
;
1189 di
= (struct ocfs2_dinode
*) bh
->b_data
;
1190 di
->i_mode
= cpu_to_le16(inode
->i_mode
);
1192 ret
= ocfs2_journal_dirty(handle
, bh
);
1197 ocfs2_commit_trans(osb
, handle
);
1204 * Will look for holes and unwritten extents in the range starting at
1205 * pos for count bytes (inclusive).
1207 static int ocfs2_check_range_for_holes(struct inode
*inode
, loff_t pos
,
1211 unsigned int extent_flags
;
1212 u32 cpos
, clusters
, extent_len
, phys_cpos
;
1213 struct super_block
*sb
= inode
->i_sb
;
1215 cpos
= pos
>> OCFS2_SB(sb
)->s_clustersize_bits
;
1216 clusters
= ocfs2_clusters_for_bytes(sb
, pos
+ count
) - cpos
;
1219 ret
= ocfs2_get_clusters(inode
, cpos
, &phys_cpos
, &extent_len
,
1226 if (phys_cpos
== 0 || (extent_flags
& OCFS2_EXT_UNWRITTEN
)) {
1231 if (extent_len
> clusters
)
1232 extent_len
= clusters
;
1234 clusters
-= extent_len
;
1241 static int ocfs2_write_remove_suid(struct inode
*inode
)
1244 struct buffer_head
*bh
= NULL
;
1246 ret
= ocfs2_read_inode_block(inode
, &bh
);
1252 ret
= __ocfs2_write_remove_suid(inode
, bh
);
1259 * Allocate enough extents to cover the region starting at byte offset
1260 * start for len bytes. Existing extents are skipped, any extents
1261 * added are marked as "unwritten".
1263 static int ocfs2_allocate_unwritten_extents(struct inode
*inode
,
1267 u32 cpos
, phys_cpos
, clusters
, alloc_size
;
1268 u64 end
= start
+ len
;
1269 struct buffer_head
*di_bh
= NULL
;
1271 if (OCFS2_I(inode
)->ip_dyn_features
& OCFS2_INLINE_DATA_FL
) {
1272 ret
= ocfs2_read_inode_block(inode
, &di_bh
);
1279 * Nothing to do if the requested reservation range
1280 * fits within the inode.
1282 if (ocfs2_size_fits_inline_data(di_bh
, end
))
1285 ret
= ocfs2_convert_inline_data_to_extents(inode
, di_bh
);
1293 * We consider both start and len to be inclusive.
1295 cpos
= start
>> OCFS2_SB(inode
->i_sb
)->s_clustersize_bits
;
1296 clusters
= ocfs2_clusters_for_bytes(inode
->i_sb
, start
+ len
);
1300 ret
= ocfs2_get_clusters(inode
, cpos
, &phys_cpos
,
1308 * Hole or existing extent len can be arbitrary, so
1309 * cap it to our own allocation request.
1311 if (alloc_size
> clusters
)
1312 alloc_size
= clusters
;
1316 * We already have an allocation at this
1317 * region so we can safely skip it.
1322 ret
= __ocfs2_extend_allocation(inode
, cpos
, alloc_size
, 1);
1331 clusters
-= alloc_size
;
1342 * Truncate a byte range, avoiding pages within partial clusters. This
1343 * preserves those pages for the zeroing code to write to.
1345 static void ocfs2_truncate_cluster_pages(struct inode
*inode
, u64 byte_start
,
1348 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1350 struct address_space
*mapping
= inode
->i_mapping
;
1352 start
= (loff_t
)ocfs2_align_bytes_to_clusters(inode
->i_sb
, byte_start
);
1353 end
= byte_start
+ byte_len
;
1354 end
= end
& ~(osb
->s_clustersize
- 1);
1357 unmap_mapping_range(mapping
, start
, end
- start
, 0);
1358 truncate_inode_pages_range(mapping
, start
, end
- 1);
1362 static int ocfs2_zero_partial_clusters(struct inode
*inode
,
1366 u64 tmpend
, end
= start
+ len
;
1367 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1368 unsigned int csize
= osb
->s_clustersize
;
1372 * The "start" and "end" values are NOT necessarily part of
1373 * the range whose allocation is being deleted. Rather, this
1374 * is what the user passed in with the request. We must zero
1375 * partial clusters here. There's no need to worry about
1376 * physical allocation - the zeroing code knows to skip holes.
1378 mlog(0, "byte start: %llu, end: %llu\n",
1379 (unsigned long long)start
, (unsigned long long)end
);
1382 * If both edges are on a cluster boundary then there's no
1383 * zeroing required as the region is part of the allocation to
1386 if ((start
& (csize
- 1)) == 0 && (end
& (csize
- 1)) == 0)
1389 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
1390 if (IS_ERR(handle
)) {
1391 ret
= PTR_ERR(handle
);
1397 * We want to get the byte offset of the end of the 1st cluster.
1399 tmpend
= (u64
)osb
->s_clustersize
+ (start
& ~(osb
->s_clustersize
- 1));
1403 mlog(0, "1st range: start: %llu, tmpend: %llu\n",
1404 (unsigned long long)start
, (unsigned long long)tmpend
);
1406 ret
= ocfs2_zero_range_for_truncate(inode
, handle
, start
, tmpend
);
1412 * This may make start and end equal, but the zeroing
1413 * code will skip any work in that case so there's no
1414 * need to catch it up here.
1416 start
= end
& ~(osb
->s_clustersize
- 1);
1418 mlog(0, "2nd range: start: %llu, end: %llu\n",
1419 (unsigned long long)start
, (unsigned long long)end
);
1421 ret
= ocfs2_zero_range_for_truncate(inode
, handle
, start
, end
);
1426 ocfs2_commit_trans(osb
, handle
);
1431 static int ocfs2_remove_inode_range(struct inode
*inode
,
1432 struct buffer_head
*di_bh
, u64 byte_start
,
1436 u32 trunc_start
, trunc_len
, cpos
, phys_cpos
, alloc_size
;
1437 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1438 struct ocfs2_cached_dealloc_ctxt dealloc
;
1439 struct address_space
*mapping
= inode
->i_mapping
;
1440 struct ocfs2_extent_tree et
;
1442 ocfs2_init_dinode_extent_tree(&et
, INODE_CACHE(inode
), di_bh
);
1443 ocfs2_init_dealloc_ctxt(&dealloc
);
1448 if (OCFS2_I(inode
)->ip_dyn_features
& OCFS2_INLINE_DATA_FL
) {
1449 ret
= ocfs2_truncate_inline(inode
, di_bh
, byte_start
,
1450 byte_start
+ byte_len
, 0);
1456 * There's no need to get fancy with the page cache
1457 * truncate of an inline-data inode. We're talking
1458 * about less than a page here, which will be cached
1459 * in the dinode buffer anyway.
1461 unmap_mapping_range(mapping
, 0, 0, 0);
1462 truncate_inode_pages(mapping
, 0);
1466 trunc_start
= ocfs2_clusters_for_bytes(osb
->sb
, byte_start
);
1467 trunc_len
= (byte_start
+ byte_len
) >> osb
->s_clustersize_bits
;
1468 if (trunc_len
>= trunc_start
)
1469 trunc_len
-= trunc_start
;
1473 mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u\n",
1474 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
1475 (unsigned long long)byte_start
,
1476 (unsigned long long)byte_len
, trunc_start
, trunc_len
);
1478 ret
= ocfs2_zero_partial_clusters(inode
, byte_start
, byte_len
);
1486 ret
= ocfs2_get_clusters(inode
, cpos
, &phys_cpos
,
1493 if (alloc_size
> trunc_len
)
1494 alloc_size
= trunc_len
;
1496 /* Only do work for non-holes */
1497 if (phys_cpos
!= 0) {
1498 ret
= ocfs2_remove_btree_range(inode
, &et
, cpos
,
1499 phys_cpos
, alloc_size
,
1508 trunc_len
-= alloc_size
;
1511 ocfs2_truncate_cluster_pages(inode
, byte_start
, byte_len
);
1514 ocfs2_schedule_truncate_log_flush(osb
, 1);
1515 ocfs2_run_deallocs(osb
, &dealloc
);
1521 * Parts of this function taken from xfs_change_file_space()
1523 static int __ocfs2_change_file_space(struct file
*file
, struct inode
*inode
,
1524 loff_t f_pos
, unsigned int cmd
,
1525 struct ocfs2_space_resv
*sr
,
1531 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1532 struct buffer_head
*di_bh
= NULL
;
1534 unsigned long long max_off
= inode
->i_sb
->s_maxbytes
;
1536 if (ocfs2_is_hard_readonly(osb
) || ocfs2_is_soft_readonly(osb
))
1539 mutex_lock(&inode
->i_mutex
);
1542 * This prevents concurrent writes on other nodes
1544 ret
= ocfs2_rw_lock(inode
, 1);
1550 ret
= ocfs2_inode_lock(inode
, &di_bh
, 1);
1556 if (inode
->i_flags
& (S_IMMUTABLE
|S_APPEND
)) {
1558 goto out_inode_unlock
;
1561 switch (sr
->l_whence
) {
1562 case 0: /*SEEK_SET*/
1564 case 1: /*SEEK_CUR*/
1565 sr
->l_start
+= f_pos
;
1567 case 2: /*SEEK_END*/
1568 sr
->l_start
+= i_size_read(inode
);
1572 goto out_inode_unlock
;
1576 llen
= sr
->l_len
> 0 ? sr
->l_len
- 1 : sr
->l_len
;
1579 || sr
->l_start
> max_off
1580 || (sr
->l_start
+ llen
) < 0
1581 || (sr
->l_start
+ llen
) > max_off
) {
1583 goto out_inode_unlock
;
1585 size
= sr
->l_start
+ sr
->l_len
;
1587 if (cmd
== OCFS2_IOC_RESVSP
|| cmd
== OCFS2_IOC_RESVSP64
) {
1588 if (sr
->l_len
<= 0) {
1590 goto out_inode_unlock
;
1594 if (file
&& should_remove_suid(file
->f_path
.dentry
)) {
1595 ret
= __ocfs2_write_remove_suid(inode
, di_bh
);
1598 goto out_inode_unlock
;
1602 down_write(&OCFS2_I(inode
)->ip_alloc_sem
);
1604 case OCFS2_IOC_RESVSP
:
1605 case OCFS2_IOC_RESVSP64
:
1607 * This takes unsigned offsets, but the signed ones we
1608 * pass have been checked against overflow above.
1610 ret
= ocfs2_allocate_unwritten_extents(inode
, sr
->l_start
,
1613 case OCFS2_IOC_UNRESVSP
:
1614 case OCFS2_IOC_UNRESVSP64
:
1615 ret
= ocfs2_remove_inode_range(inode
, di_bh
, sr
->l_start
,
1621 up_write(&OCFS2_I(inode
)->ip_alloc_sem
);
1624 goto out_inode_unlock
;
1628 * We update c/mtime for these changes
1630 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
1631 if (IS_ERR(handle
)) {
1632 ret
= PTR_ERR(handle
);
1634 goto out_inode_unlock
;
1637 if (change_size
&& i_size_read(inode
) < size
)
1638 i_size_write(inode
, size
);
1640 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
1641 ret
= ocfs2_mark_inode_dirty(handle
, inode
, di_bh
);
1645 ocfs2_commit_trans(osb
, handle
);
1649 ocfs2_inode_unlock(inode
, 1);
1651 ocfs2_rw_unlock(inode
, 1);
1654 mutex_unlock(&inode
->i_mutex
);
1658 int ocfs2_change_file_space(struct file
*file
, unsigned int cmd
,
1659 struct ocfs2_space_resv
*sr
)
1661 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1662 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1664 if ((cmd
== OCFS2_IOC_RESVSP
|| cmd
== OCFS2_IOC_RESVSP64
) &&
1665 !ocfs2_writes_unwritten_extents(osb
))
1667 else if ((cmd
== OCFS2_IOC_UNRESVSP
|| cmd
== OCFS2_IOC_UNRESVSP64
) &&
1668 !ocfs2_sparse_alloc(osb
))
1671 if (!S_ISREG(inode
->i_mode
))
1674 if (!(file
->f_mode
& FMODE_WRITE
))
1677 return __ocfs2_change_file_space(file
, inode
, file
->f_pos
, cmd
, sr
, 0);
1680 static long ocfs2_fallocate(struct inode
*inode
, int mode
, loff_t offset
,
1683 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1684 struct ocfs2_space_resv sr
;
1685 int change_size
= 1;
1687 if (!ocfs2_writes_unwritten_extents(osb
))
1690 if (S_ISDIR(inode
->i_mode
))
1693 if (mode
& FALLOC_FL_KEEP_SIZE
)
1697 sr
.l_start
= (s64
)offset
;
1698 sr
.l_len
= (s64
)len
;
1700 return __ocfs2_change_file_space(NULL
, inode
, offset
,
1701 OCFS2_IOC_RESVSP64
, &sr
, change_size
);
1704 int ocfs2_check_range_for_refcount(struct inode
*inode
, loff_t pos
,
1708 unsigned int extent_flags
;
1709 u32 cpos
, clusters
, extent_len
, phys_cpos
;
1710 struct super_block
*sb
= inode
->i_sb
;
1712 if (!ocfs2_refcount_tree(OCFS2_SB(inode
->i_sb
)) ||
1713 !(OCFS2_I(inode
)->ip_dyn_features
& OCFS2_HAS_REFCOUNT_FL
))
1716 cpos
= pos
>> OCFS2_SB(sb
)->s_clustersize_bits
;
1717 clusters
= ocfs2_clusters_for_bytes(sb
, pos
+ count
) - cpos
;
1720 ret
= ocfs2_get_clusters(inode
, cpos
, &phys_cpos
, &extent_len
,
1727 if (phys_cpos
&& (extent_flags
& OCFS2_EXT_REFCOUNTED
)) {
1732 if (extent_len
> clusters
)
1733 extent_len
= clusters
;
1735 clusters
-= extent_len
;
1742 static int ocfs2_prepare_inode_for_refcount(struct inode
*inode
,
1743 loff_t pos
, size_t count
,
1747 struct buffer_head
*di_bh
= NULL
;
1748 u32 cpos
= pos
>> OCFS2_SB(inode
->i_sb
)->s_clustersize_bits
;
1750 ocfs2_clusters_for_bytes(inode
->i_sb
, pos
+ count
) - cpos
;
1752 ret
= ocfs2_inode_lock(inode
, &di_bh
, 1);
1760 ret
= ocfs2_refcount_cow(inode
, di_bh
, cpos
, clusters
, UINT_MAX
);
1768 static int ocfs2_prepare_inode_for_write(struct dentry
*dentry
,
1774 int ret
= 0, meta_level
= 0;
1775 struct inode
*inode
= dentry
->d_inode
;
1776 loff_t saved_pos
, end
;
1779 * We start with a read level meta lock and only jump to an ex
1780 * if we need to make modifications here.
1783 ret
= ocfs2_inode_lock(inode
, NULL
, meta_level
);
1790 /* Clear suid / sgid if necessary. We do this here
1791 * instead of later in the write path because
1792 * remove_suid() calls ->setattr without any hint that
1793 * we may have already done our cluster locking. Since
1794 * ocfs2_setattr() *must* take cluster locks to
1795 * proceeed, this will lead us to recursively lock the
1796 * inode. There's also the dinode i_size state which
1797 * can be lost via setattr during extending writes (we
1798 * set inode->i_size at the end of a write. */
1799 if (should_remove_suid(dentry
)) {
1800 if (meta_level
== 0) {
1801 ocfs2_inode_unlock(inode
, meta_level
);
1806 ret
= ocfs2_write_remove_suid(inode
);
1813 /* work on a copy of ppos until we're sure that we won't have
1814 * to recalculate it due to relocking. */
1816 saved_pos
= i_size_read(inode
);
1817 mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos
);
1822 end
= saved_pos
+ count
;
1824 ret
= ocfs2_check_range_for_refcount(inode
, saved_pos
, count
);
1826 ocfs2_inode_unlock(inode
, meta_level
);
1829 ret
= ocfs2_prepare_inode_for_refcount(inode
,
1841 * Skip the O_DIRECT checks if we don't need
1844 if (!direct_io
|| !(*direct_io
))
1848 * There's no sane way to do direct writes to an inode
1851 if (OCFS2_I(inode
)->ip_dyn_features
& OCFS2_INLINE_DATA_FL
) {
1857 * Allowing concurrent direct writes means
1858 * i_size changes wouldn't be synchronized, so
1859 * one node could wind up truncating another
1862 if (end
> i_size_read(inode
)) {
1868 * We don't fill holes during direct io, so
1869 * check for them here. If any are found, the
1870 * caller will have to retake some cluster
1871 * locks and initiate the io as buffered.
1873 ret
= ocfs2_check_range_for_holes(inode
, saved_pos
, count
);
1886 if (meta_level
>= 0)
1887 ocfs2_inode_unlock(inode
, meta_level
);
1893 static ssize_t
ocfs2_file_aio_write(struct kiocb
*iocb
,
1894 const struct iovec
*iov
,
1895 unsigned long nr_segs
,
1898 int ret
, direct_io
, appending
, rw_level
, have_alloc_sem
= 0;
1900 ssize_t written
= 0;
1901 size_t ocount
; /* original count */
1902 size_t count
; /* after file limit checks */
1903 loff_t old_size
, *ppos
= &iocb
->ki_pos
;
1905 struct file
*file
= iocb
->ki_filp
;
1906 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1907 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1909 mlog_entry("(0x%p, %u, '%.*s')\n", file
,
1910 (unsigned int)nr_segs
,
1911 file
->f_path
.dentry
->d_name
.len
,
1912 file
->f_path
.dentry
->d_name
.name
);
1914 if (iocb
->ki_left
== 0)
1917 vfs_check_frozen(inode
->i_sb
, SB_FREEZE_WRITE
);
1919 appending
= file
->f_flags
& O_APPEND
? 1 : 0;
1920 direct_io
= file
->f_flags
& O_DIRECT
? 1 : 0;
1922 mutex_lock(&inode
->i_mutex
);
1925 /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
1927 down_read(&inode
->i_alloc_sem
);
1931 /* concurrent O_DIRECT writes are allowed */
1932 rw_level
= !direct_io
;
1933 ret
= ocfs2_rw_lock(inode
, rw_level
);
1939 can_do_direct
= direct_io
;
1940 ret
= ocfs2_prepare_inode_for_write(file
->f_path
.dentry
, ppos
,
1941 iocb
->ki_left
, appending
,
1949 * We can't complete the direct I/O as requested, fall back to
1952 if (direct_io
&& !can_do_direct
) {
1953 ocfs2_rw_unlock(inode
, rw_level
);
1954 up_read(&inode
->i_alloc_sem
);
1964 * To later detect whether a journal commit for sync writes is
1965 * necessary, we sample i_size, and cluster count here.
1967 old_size
= i_size_read(inode
);
1968 old_clusters
= OCFS2_I(inode
)->ip_clusters
;
1970 /* communicate with ocfs2_dio_end_io */
1971 ocfs2_iocb_set_rw_locked(iocb
, rw_level
);
1974 ret
= generic_segment_checks(iov
, &nr_segs
, &ocount
,
1980 ret
= generic_write_checks(file
, ppos
, &count
,
1981 S_ISBLK(inode
->i_mode
));
1985 written
= generic_file_direct_write(iocb
, iov
, &nr_segs
, *ppos
,
1986 ppos
, count
, ocount
);
1989 * direct write may have instantiated a few
1990 * blocks outside i_size. Trim these off again.
1991 * Don't need i_size_read because we hold i_mutex.
1993 if (*ppos
+ count
> inode
->i_size
)
1994 vmtruncate(inode
, inode
->i_size
);
1999 written
= generic_file_aio_write_nolock(iocb
, iov
, nr_segs
,
2004 /* buffered aio wouldn't have proper lock coverage today */
2005 BUG_ON(ret
== -EIOCBQUEUED
&& !(file
->f_flags
& O_DIRECT
));
2007 if ((file
->f_flags
& O_SYNC
&& !direct_io
) || IS_SYNC(inode
)) {
2009 * The generic write paths have handled getting data
2010 * to disk, but since we don't make use of the dirty
2011 * inode list, a manual journal commit is necessary
2014 if (old_size
!= i_size_read(inode
) ||
2015 old_clusters
!= OCFS2_I(inode
)->ip_clusters
) {
2016 ret
= jbd2_journal_force_commit(osb
->journal
->j_journal
);
2023 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2024 * function pointer which is called when o_direct io completes so that
2025 * it can unlock our rw lock. (it's the clustered equivalent of
2026 * i_alloc_sem; protects truncate from racing with pending ios).
2027 * Unfortunately there are error cases which call end_io and others
2028 * that don't. so we don't have to unlock the rw_lock if either an
2029 * async dio is going to do it in the future or an end_io after an
2030 * error has already done it.
2032 if (ret
== -EIOCBQUEUED
|| !ocfs2_iocb_is_rw_locked(iocb
)) {
2039 ocfs2_rw_unlock(inode
, rw_level
);
2043 up_read(&inode
->i_alloc_sem
);
2045 mutex_unlock(&inode
->i_mutex
);
2053 static int ocfs2_splice_to_file(struct pipe_inode_info
*pipe
,
2055 struct splice_desc
*sd
)
2059 ret
= ocfs2_prepare_inode_for_write(out
->f_path
.dentry
, &sd
->pos
,
2060 sd
->total_len
, 0, NULL
);
2066 return splice_from_pipe_feed(pipe
, sd
, pipe_to_file
);
2069 static ssize_t
ocfs2_file_splice_write(struct pipe_inode_info
*pipe
,
2076 struct address_space
*mapping
= out
->f_mapping
;
2077 struct inode
*inode
= mapping
->host
;
2078 struct splice_desc sd
= {
2085 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out
, pipe
,
2087 out
->f_path
.dentry
->d_name
.len
,
2088 out
->f_path
.dentry
->d_name
.name
);
2091 mutex_lock_nested(&pipe
->inode
->i_mutex
, I_MUTEX_PARENT
);
2093 splice_from_pipe_begin(&sd
);
2095 ret
= splice_from_pipe_next(pipe
, &sd
);
2099 mutex_lock_nested(&inode
->i_mutex
, I_MUTEX_CHILD
);
2100 ret
= ocfs2_rw_lock(inode
, 1);
2104 ret
= ocfs2_splice_to_file(pipe
, out
, &sd
);
2105 ocfs2_rw_unlock(inode
, 1);
2107 mutex_unlock(&inode
->i_mutex
);
2109 splice_from_pipe_end(pipe
, &sd
);
2112 mutex_unlock(&pipe
->inode
->i_mutex
);
2115 ret
= sd
.num_spliced
;
2118 unsigned long nr_pages
;
2121 nr_pages
= (ret
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
2124 * If file or inode is SYNC and we actually wrote some data,
2127 if (unlikely((out
->f_flags
& O_SYNC
) || IS_SYNC(inode
))) {
2130 mutex_lock(&inode
->i_mutex
);
2131 err
= ocfs2_rw_lock(inode
, 1);
2135 err
= generic_osync_inode(inode
, mapping
,
2136 OSYNC_METADATA
|OSYNC_DATA
);
2137 ocfs2_rw_unlock(inode
, 1);
2139 mutex_unlock(&inode
->i_mutex
);
2144 balance_dirty_pages_ratelimited_nr(mapping
, nr_pages
);
2151 static ssize_t
ocfs2_file_splice_read(struct file
*in
,
2153 struct pipe_inode_info
*pipe
,
2157 int ret
= 0, lock_level
= 0;
2158 struct inode
*inode
= in
->f_path
.dentry
->d_inode
;
2160 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in
, pipe
,
2162 in
->f_path
.dentry
->d_name
.len
,
2163 in
->f_path
.dentry
->d_name
.name
);
2166 * See the comment in ocfs2_file_aio_read()
2168 ret
= ocfs2_inode_lock_atime(inode
, in
->f_vfsmnt
, &lock_level
);
2173 ocfs2_inode_unlock(inode
, lock_level
);
2175 ret
= generic_file_splice_read(in
, ppos
, pipe
, len
, flags
);
2182 static ssize_t
ocfs2_file_aio_read(struct kiocb
*iocb
,
2183 const struct iovec
*iov
,
2184 unsigned long nr_segs
,
2187 int ret
= 0, rw_level
= -1, have_alloc_sem
= 0, lock_level
= 0;
2188 struct file
*filp
= iocb
->ki_filp
;
2189 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
2191 mlog_entry("(0x%p, %u, '%.*s')\n", filp
,
2192 (unsigned int)nr_segs
,
2193 filp
->f_path
.dentry
->d_name
.len
,
2194 filp
->f_path
.dentry
->d_name
.name
);
2203 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2204 * need locks to protect pending reads from racing with truncate.
2206 if (filp
->f_flags
& O_DIRECT
) {
2207 down_read(&inode
->i_alloc_sem
);
2210 ret
= ocfs2_rw_lock(inode
, 0);
2216 /* communicate with ocfs2_dio_end_io */
2217 ocfs2_iocb_set_rw_locked(iocb
, rw_level
);
2221 * We're fine letting folks race truncates and extending
2222 * writes with read across the cluster, just like they can
2223 * locally. Hence no rw_lock during read.
2225 * Take and drop the meta data lock to update inode fields
2226 * like i_size. This allows the checks down below
2227 * generic_file_aio_read() a chance of actually working.
2229 ret
= ocfs2_inode_lock_atime(inode
, filp
->f_vfsmnt
, &lock_level
);
2234 ocfs2_inode_unlock(inode
, lock_level
);
2236 ret
= generic_file_aio_read(iocb
, iov
, nr_segs
, iocb
->ki_pos
);
2238 mlog(0, "generic_file_aio_read returned -EINVAL\n");
2240 /* buffered aio wouldn't have proper lock coverage today */
2241 BUG_ON(ret
== -EIOCBQUEUED
&& !(filp
->f_flags
& O_DIRECT
));
2243 /* see ocfs2_file_aio_write */
2244 if (ret
== -EIOCBQUEUED
|| !ocfs2_iocb_is_rw_locked(iocb
)) {
2251 up_read(&inode
->i_alloc_sem
);
2253 ocfs2_rw_unlock(inode
, rw_level
);
2259 const struct inode_operations ocfs2_file_iops
= {
2260 .setattr
= ocfs2_setattr
,
2261 .getattr
= ocfs2_getattr
,
2262 .permission
= ocfs2_permission
,
2263 .setxattr
= generic_setxattr
,
2264 .getxattr
= generic_getxattr
,
2265 .listxattr
= ocfs2_listxattr
,
2266 .removexattr
= generic_removexattr
,
2267 .fallocate
= ocfs2_fallocate
,
2268 .fiemap
= ocfs2_fiemap
,
2271 const struct inode_operations ocfs2_special_file_iops
= {
2272 .setattr
= ocfs2_setattr
,
2273 .getattr
= ocfs2_getattr
,
2274 .permission
= ocfs2_permission
,
2278 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2279 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2281 const struct file_operations ocfs2_fops
= {
2282 .llseek
= generic_file_llseek
,
2283 .read
= do_sync_read
,
2284 .write
= do_sync_write
,
2286 .fsync
= ocfs2_sync_file
,
2287 .release
= ocfs2_file_release
,
2288 .open
= ocfs2_file_open
,
2289 .aio_read
= ocfs2_file_aio_read
,
2290 .aio_write
= ocfs2_file_aio_write
,
2291 .unlocked_ioctl
= ocfs2_ioctl
,
2292 #ifdef CONFIG_COMPAT
2293 .compat_ioctl
= ocfs2_compat_ioctl
,
2296 .flock
= ocfs2_flock
,
2297 .splice_read
= ocfs2_file_splice_read
,
2298 .splice_write
= ocfs2_file_splice_write
,
2301 const struct file_operations ocfs2_dops
= {
2302 .llseek
= generic_file_llseek
,
2303 .read
= generic_read_dir
,
2304 .readdir
= ocfs2_readdir
,
2305 .fsync
= ocfs2_sync_file
,
2306 .release
= ocfs2_dir_release
,
2307 .open
= ocfs2_dir_open
,
2308 .unlocked_ioctl
= ocfs2_ioctl
,
2309 #ifdef CONFIG_COMPAT
2310 .compat_ioctl
= ocfs2_compat_ioctl
,
2313 .flock
= ocfs2_flock
,
2317 * POSIX-lockless variants of our file_operations.
2319 * These will be used if the underlying cluster stack does not support
2320 * posix file locking, if the user passes the "localflocks" mount
2321 * option, or if we have a local-only fs.
2323 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2324 * so we still want it in the case of no stack support for
2325 * plocks. Internally, it will do the right thing when asked to ignore
2328 const struct file_operations ocfs2_fops_no_plocks
= {
2329 .llseek
= generic_file_llseek
,
2330 .read
= do_sync_read
,
2331 .write
= do_sync_write
,
2333 .fsync
= ocfs2_sync_file
,
2334 .release
= ocfs2_file_release
,
2335 .open
= ocfs2_file_open
,
2336 .aio_read
= ocfs2_file_aio_read
,
2337 .aio_write
= ocfs2_file_aio_write
,
2338 .unlocked_ioctl
= ocfs2_ioctl
,
2339 #ifdef CONFIG_COMPAT
2340 .compat_ioctl
= ocfs2_compat_ioctl
,
2342 .flock
= ocfs2_flock
,
2343 .splice_read
= ocfs2_file_splice_read
,
2344 .splice_write
= ocfs2_file_splice_write
,
2347 const struct file_operations ocfs2_dops_no_plocks
= {
2348 .llseek
= generic_file_llseek
,
2349 .read
= generic_read_dir
,
2350 .readdir
= ocfs2_readdir
,
2351 .fsync
= ocfs2_sync_file
,
2352 .release
= ocfs2_dir_release
,
2353 .open
= ocfs2_dir_open
,
2354 .unlocked_ioctl
= ocfs2_ioctl
,
2355 #ifdef CONFIG_COMPAT
2356 .compat_ioctl
= ocfs2_compat_ioctl
,
2358 .flock
= ocfs2_flock
,