ocfs2: Support creation of unwritten extents
[linux-2.6/mini2440.git] / fs / ocfs2 / file.c
blob3e21ad9a6dde43bf3aad4970712b307345e2702f
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
4 * file.c
6 * File open, close, extend, truncate
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/capability.h>
27 #include <linux/fs.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/pagemap.h>
32 #include <linux/uio.h>
33 #include <linux/sched.h>
34 #include <linux/splice.h>
35 #include <linux/mount.h>
36 #include <linux/writeback.h>
38 #define MLOG_MASK_PREFIX ML_INODE
39 #include <cluster/masklog.h>
41 #include "ocfs2.h"
43 #include "alloc.h"
44 #include "aops.h"
45 #include "dir.h"
46 #include "dlmglue.h"
47 #include "extent_map.h"
48 #include "file.h"
49 #include "sysfile.h"
50 #include "inode.h"
51 #include "ioctl.h"
52 #include "journal.h"
53 #include "mmap.h"
54 #include "suballoc.h"
55 #include "super.h"
57 #include "buffer_head_io.h"
59 static int ocfs2_sync_inode(struct inode *inode)
61 filemap_fdatawrite(inode->i_mapping);
62 return sync_mapping_buffers(inode->i_mapping);
65 static int ocfs2_file_open(struct inode *inode, struct file *file)
67 int status;
68 int mode = file->f_flags;
69 struct ocfs2_inode_info *oi = OCFS2_I(inode);
71 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
72 file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name);
74 spin_lock(&oi->ip_lock);
76 /* Check that the inode hasn't been wiped from disk by another
77 * node. If it hasn't then we're safe as long as we hold the
78 * spin lock until our increment of open count. */
79 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
80 spin_unlock(&oi->ip_lock);
82 status = -ENOENT;
83 goto leave;
86 if (mode & O_DIRECT)
87 oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
89 oi->ip_open_count++;
90 spin_unlock(&oi->ip_lock);
91 status = 0;
92 leave:
93 mlog_exit(status);
94 return status;
97 static int ocfs2_file_release(struct inode *inode, struct file *file)
99 struct ocfs2_inode_info *oi = OCFS2_I(inode);
101 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
102 file->f_path.dentry->d_name.len,
103 file->f_path.dentry->d_name.name);
105 spin_lock(&oi->ip_lock);
106 if (!--oi->ip_open_count)
107 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
108 spin_unlock(&oi->ip_lock);
110 mlog_exit(0);
112 return 0;
115 static int ocfs2_sync_file(struct file *file,
116 struct dentry *dentry,
117 int datasync)
119 int err = 0;
120 journal_t *journal;
121 struct inode *inode = dentry->d_inode;
122 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
124 mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", file, dentry, datasync,
125 dentry->d_name.len, dentry->d_name.name);
127 err = ocfs2_sync_inode(dentry->d_inode);
128 if (err)
129 goto bail;
131 journal = osb->journal->j_journal;
132 err = journal_force_commit(journal);
134 bail:
135 mlog_exit(err);
137 return (err < 0) ? -EIO : 0;
140 int ocfs2_should_update_atime(struct inode *inode,
141 struct vfsmount *vfsmnt)
143 struct timespec now;
144 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
146 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
147 return 0;
149 if ((inode->i_flags & S_NOATIME) ||
150 ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
151 return 0;
154 * We can be called with no vfsmnt structure - NFSD will
155 * sometimes do this.
157 * Note that our action here is different than touch_atime() -
158 * if we can't tell whether this is a noatime mount, then we
159 * don't know whether to trust the value of s_atime_quantum.
161 if (vfsmnt == NULL)
162 return 0;
164 if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
165 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
166 return 0;
168 if (vfsmnt->mnt_flags & MNT_RELATIME) {
169 if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
170 (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
171 return 1;
173 return 0;
176 now = CURRENT_TIME;
177 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
178 return 0;
179 else
180 return 1;
183 int ocfs2_update_inode_atime(struct inode *inode,
184 struct buffer_head *bh)
186 int ret;
187 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
188 handle_t *handle;
190 mlog_entry_void();
192 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
193 if (handle == NULL) {
194 ret = -ENOMEM;
195 mlog_errno(ret);
196 goto out;
199 inode->i_atime = CURRENT_TIME;
200 ret = ocfs2_mark_inode_dirty(handle, inode, bh);
201 if (ret < 0)
202 mlog_errno(ret);
204 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
205 out:
206 mlog_exit(ret);
207 return ret;
210 static int ocfs2_set_inode_size(handle_t *handle,
211 struct inode *inode,
212 struct buffer_head *fe_bh,
213 u64 new_i_size)
215 int status;
217 mlog_entry_void();
218 i_size_write(inode, new_i_size);
219 inode->i_blocks = ocfs2_inode_sector_count(inode);
220 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
222 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
223 if (status < 0) {
224 mlog_errno(status);
225 goto bail;
228 bail:
229 mlog_exit(status);
230 return status;
233 static int ocfs2_simple_size_update(struct inode *inode,
234 struct buffer_head *di_bh,
235 u64 new_i_size)
237 int ret;
238 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
239 handle_t *handle = NULL;
241 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
242 if (handle == NULL) {
243 ret = -ENOMEM;
244 mlog_errno(ret);
245 goto out;
248 ret = ocfs2_set_inode_size(handle, inode, di_bh,
249 new_i_size);
250 if (ret < 0)
251 mlog_errno(ret);
253 ocfs2_commit_trans(osb, handle);
254 out:
255 return ret;
258 static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
259 struct inode *inode,
260 struct buffer_head *fe_bh,
261 u64 new_i_size)
263 int status;
264 handle_t *handle;
265 struct ocfs2_dinode *di;
267 mlog_entry_void();
269 /* TODO: This needs to actually orphan the inode in this
270 * transaction. */
272 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
273 if (IS_ERR(handle)) {
274 status = PTR_ERR(handle);
275 mlog_errno(status);
276 goto out;
279 status = ocfs2_journal_access(handle, inode, fe_bh,
280 OCFS2_JOURNAL_ACCESS_WRITE);
281 if (status < 0) {
282 mlog_errno(status);
283 goto out_commit;
287 * Do this before setting i_size.
289 status = ocfs2_zero_tail_for_truncate(inode, handle, new_i_size);
290 if (status) {
291 mlog_errno(status);
292 goto out_commit;
295 i_size_write(inode, new_i_size);
296 inode->i_blocks = ocfs2_align_bytes_to_sectors(new_i_size);
297 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
299 di = (struct ocfs2_dinode *) fe_bh->b_data;
300 di->i_size = cpu_to_le64(new_i_size);
301 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
302 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
304 status = ocfs2_journal_dirty(handle, fe_bh);
305 if (status < 0)
306 mlog_errno(status);
308 out_commit:
309 ocfs2_commit_trans(osb, handle);
310 out:
312 mlog_exit(status);
313 return status;
316 static int ocfs2_truncate_file(struct inode *inode,
317 struct buffer_head *di_bh,
318 u64 new_i_size)
320 int status = 0;
321 struct ocfs2_dinode *fe = NULL;
322 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
323 struct ocfs2_truncate_context *tc = NULL;
325 mlog_entry("(inode = %llu, new_i_size = %llu\n",
326 (unsigned long long)OCFS2_I(inode)->ip_blkno,
327 (unsigned long long)new_i_size);
329 fe = (struct ocfs2_dinode *) di_bh->b_data;
330 if (!OCFS2_IS_VALID_DINODE(fe)) {
331 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
332 status = -EIO;
333 goto bail;
336 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
337 "Inode %llu, inode i_size = %lld != di "
338 "i_size = %llu, i_flags = 0x%x\n",
339 (unsigned long long)OCFS2_I(inode)->ip_blkno,
340 i_size_read(inode),
341 (unsigned long long)le64_to_cpu(fe->i_size),
342 le32_to_cpu(fe->i_flags));
344 if (new_i_size > le64_to_cpu(fe->i_size)) {
345 mlog(0, "asked to truncate file with size (%llu) to size (%llu)!\n",
346 (unsigned long long)le64_to_cpu(fe->i_size),
347 (unsigned long long)new_i_size);
348 status = -EINVAL;
349 mlog_errno(status);
350 goto bail;
353 mlog(0, "inode %llu, i_size = %llu, new_i_size = %llu\n",
354 (unsigned long long)le64_to_cpu(fe->i_blkno),
355 (unsigned long long)le64_to_cpu(fe->i_size),
356 (unsigned long long)new_i_size);
358 /* lets handle the simple truncate cases before doing any more
359 * cluster locking. */
360 if (new_i_size == le64_to_cpu(fe->i_size))
361 goto bail;
363 down_write(&OCFS2_I(inode)->ip_alloc_sem);
365 /* This forces other nodes to sync and drop their pages. Do
366 * this even if we have a truncate without allocation change -
367 * ocfs2 cluster sizes can be much greater than page size, so
368 * we have to truncate them anyway. */
369 status = ocfs2_data_lock(inode, 1);
370 if (status < 0) {
371 up_write(&OCFS2_I(inode)->ip_alloc_sem);
373 mlog_errno(status);
374 goto bail;
377 unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
378 truncate_inode_pages(inode->i_mapping, new_i_size);
380 /* alright, we're going to need to do a full blown alloc size
381 * change. Orphan the inode so that recovery can complete the
382 * truncate if necessary. This does the task of marking
383 * i_size. */
384 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
385 if (status < 0) {
386 mlog_errno(status);
387 goto bail_unlock_data;
390 status = ocfs2_prepare_truncate(osb, inode, di_bh, &tc);
391 if (status < 0) {
392 mlog_errno(status);
393 goto bail_unlock_data;
396 status = ocfs2_commit_truncate(osb, inode, di_bh, tc);
397 if (status < 0) {
398 mlog_errno(status);
399 goto bail_unlock_data;
402 /* TODO: orphan dir cleanup here. */
403 bail_unlock_data:
404 ocfs2_data_unlock(inode, 1);
406 up_write(&OCFS2_I(inode)->ip_alloc_sem);
408 bail:
410 mlog_exit(status);
411 return status;
415 * extend allocation only here.
416 * we'll update all the disk stuff, and oip->alloc_size
418 * expect stuff to be locked, a transaction started and enough data /
419 * metadata reservations in the contexts.
421 * Will return -EAGAIN, and a reason if a restart is needed.
422 * If passed in, *reason will always be set, even in error.
424 int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
425 struct inode *inode,
426 u32 *logical_offset,
427 u32 clusters_to_add,
428 int mark_unwritten,
429 struct buffer_head *fe_bh,
430 handle_t *handle,
431 struct ocfs2_alloc_context *data_ac,
432 struct ocfs2_alloc_context *meta_ac,
433 enum ocfs2_alloc_restarted *reason_ret)
435 int status = 0;
436 int free_extents;
437 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
438 enum ocfs2_alloc_restarted reason = RESTART_NONE;
439 u32 bit_off, num_bits;
440 u64 block;
441 u8 flags = 0;
443 BUG_ON(!clusters_to_add);
445 if (mark_unwritten)
446 flags = OCFS2_EXT_UNWRITTEN;
448 free_extents = ocfs2_num_free_extents(osb, inode, fe);
449 if (free_extents < 0) {
450 status = free_extents;
451 mlog_errno(status);
452 goto leave;
455 /* there are two cases which could cause us to EAGAIN in the
456 * we-need-more-metadata case:
457 * 1) we haven't reserved *any*
458 * 2) we are so fragmented, we've needed to add metadata too
459 * many times. */
460 if (!free_extents && !meta_ac) {
461 mlog(0, "we haven't reserved any metadata!\n");
462 status = -EAGAIN;
463 reason = RESTART_META;
464 goto leave;
465 } else if ((!free_extents)
466 && (ocfs2_alloc_context_bits_left(meta_ac)
467 < ocfs2_extend_meta_needed(fe))) {
468 mlog(0, "filesystem is really fragmented...\n");
469 status = -EAGAIN;
470 reason = RESTART_META;
471 goto leave;
474 status = ocfs2_claim_clusters(osb, handle, data_ac, 1,
475 &bit_off, &num_bits);
476 if (status < 0) {
477 if (status != -ENOSPC)
478 mlog_errno(status);
479 goto leave;
482 BUG_ON(num_bits > clusters_to_add);
484 /* reserve our write early -- insert_extent may update the inode */
485 status = ocfs2_journal_access(handle, inode, fe_bh,
486 OCFS2_JOURNAL_ACCESS_WRITE);
487 if (status < 0) {
488 mlog_errno(status);
489 goto leave;
492 block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
493 mlog(0, "Allocating %u clusters at block %u for inode %llu\n",
494 num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno);
495 status = ocfs2_insert_extent(osb, handle, inode, fe_bh,
496 *logical_offset, block, num_bits,
497 flags, meta_ac);
498 if (status < 0) {
499 mlog_errno(status);
500 goto leave;
503 status = ocfs2_journal_dirty(handle, fe_bh);
504 if (status < 0) {
505 mlog_errno(status);
506 goto leave;
509 clusters_to_add -= num_bits;
510 *logical_offset += num_bits;
512 if (clusters_to_add) {
513 mlog(0, "need to alloc once more, clusters = %u, wanted = "
514 "%u\n", fe->i_clusters, clusters_to_add);
515 status = -EAGAIN;
516 reason = RESTART_TRANS;
519 leave:
520 mlog_exit(status);
521 if (reason_ret)
522 *reason_ret = reason;
523 return status;
527 * For a given allocation, determine which allocators will need to be
528 * accessed, and lock them, reserving the appropriate number of bits.
530 * Sparse file systems call this from ocfs2_write_begin_nolock()
531 * and ocfs2_allocate_unwritten_extents().
533 * File systems which don't support holes call this from
534 * ocfs2_extend_allocation().
536 int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_dinode *di,
537 u32 clusters_to_add, u32 extents_to_split,
538 struct ocfs2_alloc_context **data_ac,
539 struct ocfs2_alloc_context **meta_ac)
541 int ret, num_free_extents;
542 unsigned int max_recs_needed = clusters_to_add + 2 * extents_to_split;
543 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
545 *meta_ac = NULL;
546 *data_ac = NULL;
548 mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, "
549 "clusters_to_add = %u, extents_to_split = %u\n",
550 (unsigned long long)OCFS2_I(inode)->ip_blkno, i_size_read(inode),
551 le32_to_cpu(di->i_clusters), clusters_to_add, extents_to_split);
553 num_free_extents = ocfs2_num_free_extents(osb, inode, di);
554 if (num_free_extents < 0) {
555 ret = num_free_extents;
556 mlog_errno(ret);
557 goto out;
561 * Sparse allocation file systems need to be more conservative
562 * with reserving room for expansion - the actual allocation
563 * happens while we've got a journal handle open so re-taking
564 * a cluster lock (because we ran out of room for another
565 * extent) will violate ordering rules.
567 * Most of the time we'll only be seeing this 1 cluster at a time
568 * anyway.
570 * Always lock for any unwritten extents - we might want to
571 * add blocks during a split.
573 if (!num_free_extents ||
574 (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) {
575 ret = ocfs2_reserve_new_metadata(osb, di, meta_ac);
576 if (ret < 0) {
577 if (ret != -ENOSPC)
578 mlog_errno(ret);
579 goto out;
583 ret = ocfs2_reserve_clusters(osb, clusters_to_add, data_ac);
584 if (ret < 0) {
585 if (ret != -ENOSPC)
586 mlog_errno(ret);
587 goto out;
590 out:
591 if (ret) {
592 if (*meta_ac) {
593 ocfs2_free_alloc_context(*meta_ac);
594 *meta_ac = NULL;
598 * We cannot have an error and a non null *data_ac.
602 return ret;
605 static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
606 u32 clusters_to_add, int mark_unwritten)
608 int status = 0;
609 int restart_func = 0;
610 int credits;
611 u32 prev_clusters;
612 struct buffer_head *bh = NULL;
613 struct ocfs2_dinode *fe = NULL;
614 handle_t *handle = NULL;
615 struct ocfs2_alloc_context *data_ac = NULL;
616 struct ocfs2_alloc_context *meta_ac = NULL;
617 enum ocfs2_alloc_restarted why;
618 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
620 mlog_entry("(clusters_to_add = %u)\n", clusters_to_add);
623 * This function only exists for file systems which don't
624 * support holes.
626 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
628 status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &bh,
629 OCFS2_BH_CACHED, inode);
630 if (status < 0) {
631 mlog_errno(status);
632 goto leave;
635 fe = (struct ocfs2_dinode *) bh->b_data;
636 if (!OCFS2_IS_VALID_DINODE(fe)) {
637 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
638 status = -EIO;
639 goto leave;
642 restart_all:
643 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
645 status = ocfs2_lock_allocators(inode, fe, clusters_to_add, 0, &data_ac,
646 &meta_ac);
647 if (status) {
648 mlog_errno(status);
649 goto leave;
652 credits = ocfs2_calc_extend_credits(osb->sb, fe, clusters_to_add);
653 handle = ocfs2_start_trans(osb, credits);
654 if (IS_ERR(handle)) {
655 status = PTR_ERR(handle);
656 handle = NULL;
657 mlog_errno(status);
658 goto leave;
661 restarted_transaction:
662 /* reserve a write to the file entry early on - that we if we
663 * run out of credits in the allocation path, we can still
664 * update i_size. */
665 status = ocfs2_journal_access(handle, inode, bh,
666 OCFS2_JOURNAL_ACCESS_WRITE);
667 if (status < 0) {
668 mlog_errno(status);
669 goto leave;
672 prev_clusters = OCFS2_I(inode)->ip_clusters;
674 status = ocfs2_do_extend_allocation(osb,
675 inode,
676 &logical_start,
677 clusters_to_add,
678 mark_unwritten,
680 handle,
681 data_ac,
682 meta_ac,
683 &why);
684 if ((status < 0) && (status != -EAGAIN)) {
685 if (status != -ENOSPC)
686 mlog_errno(status);
687 goto leave;
690 status = ocfs2_journal_dirty(handle, bh);
691 if (status < 0) {
692 mlog_errno(status);
693 goto leave;
696 spin_lock(&OCFS2_I(inode)->ip_lock);
697 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
698 spin_unlock(&OCFS2_I(inode)->ip_lock);
700 if (why != RESTART_NONE && clusters_to_add) {
701 if (why == RESTART_META) {
702 mlog(0, "restarting function.\n");
703 restart_func = 1;
704 } else {
705 BUG_ON(why != RESTART_TRANS);
707 mlog(0, "restarting transaction.\n");
708 /* TODO: This can be more intelligent. */
709 credits = ocfs2_calc_extend_credits(osb->sb,
711 clusters_to_add);
712 status = ocfs2_extend_trans(handle, credits);
713 if (status < 0) {
714 /* handle still has to be committed at
715 * this point. */
716 status = -ENOMEM;
717 mlog_errno(status);
718 goto leave;
720 goto restarted_transaction;
724 mlog(0, "fe: i_clusters = %u, i_size=%llu\n",
725 le32_to_cpu(fe->i_clusters),
726 (unsigned long long)le64_to_cpu(fe->i_size));
727 mlog(0, "inode: ip_clusters=%u, i_size=%lld\n",
728 OCFS2_I(inode)->ip_clusters, i_size_read(inode));
730 leave:
731 if (handle) {
732 ocfs2_commit_trans(osb, handle);
733 handle = NULL;
735 if (data_ac) {
736 ocfs2_free_alloc_context(data_ac);
737 data_ac = NULL;
739 if (meta_ac) {
740 ocfs2_free_alloc_context(meta_ac);
741 meta_ac = NULL;
743 if ((!status) && restart_func) {
744 restart_func = 0;
745 goto restart_all;
747 if (bh) {
748 brelse(bh);
749 bh = NULL;
752 mlog_exit(status);
753 return status;
756 static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
757 u32 clusters_to_add, int mark_unwritten)
759 int ret;
762 * The alloc sem blocks peope in read/write from reading our
763 * allocation until we're done changing it. We depend on
764 * i_mutex to block other extend/truncate calls while we're
765 * here.
767 down_write(&OCFS2_I(inode)->ip_alloc_sem);
768 ret = __ocfs2_extend_allocation(inode, logical_start, clusters_to_add,
769 mark_unwritten);
770 up_write(&OCFS2_I(inode)->ip_alloc_sem);
772 return ret;
775 /* Some parts of this taken from generic_cont_expand, which turned out
776 * to be too fragile to do exactly what we need without us having to
777 * worry about recursive locking in ->prepare_write() and
778 * ->commit_write(). */
779 static int ocfs2_write_zero_page(struct inode *inode,
780 u64 size)
782 struct address_space *mapping = inode->i_mapping;
783 struct page *page;
784 unsigned long index;
785 unsigned int offset;
786 handle_t *handle = NULL;
787 int ret;
789 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
790 /* ugh. in prepare/commit_write, if from==to==start of block, we
791 ** skip the prepare. make sure we never send an offset for the start
792 ** of a block
794 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
795 offset++;
797 index = size >> PAGE_CACHE_SHIFT;
799 page = grab_cache_page(mapping, index);
800 if (!page) {
801 ret = -ENOMEM;
802 mlog_errno(ret);
803 goto out;
806 ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
807 if (ret < 0) {
808 mlog_errno(ret);
809 goto out_unlock;
812 if (ocfs2_should_order_data(inode)) {
813 handle = ocfs2_start_walk_page_trans(inode, page, offset,
814 offset);
815 if (IS_ERR(handle)) {
816 ret = PTR_ERR(handle);
817 handle = NULL;
818 goto out_unlock;
822 /* must not update i_size! */
823 ret = block_commit_write(page, offset, offset);
824 if (ret < 0)
825 mlog_errno(ret);
826 else
827 ret = 0;
829 if (handle)
830 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
831 out_unlock:
832 unlock_page(page);
833 page_cache_release(page);
834 out:
835 return ret;
838 static int ocfs2_zero_extend(struct inode *inode,
839 u64 zero_to_size)
841 int ret = 0;
842 u64 start_off;
843 struct super_block *sb = inode->i_sb;
845 start_off = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
846 while (start_off < zero_to_size) {
847 ret = ocfs2_write_zero_page(inode, start_off);
848 if (ret < 0) {
849 mlog_errno(ret);
850 goto out;
853 start_off += sb->s_blocksize;
856 * Very large extends have the potential to lock up
857 * the cpu for extended periods of time.
859 cond_resched();
862 out:
863 return ret;
867 * A tail_to_skip value > 0 indicates that we're being called from
868 * ocfs2_file_aio_write(). This has the following implications:
870 * - we don't want to update i_size
871 * - di_bh will be NULL, which is fine because it's only used in the
872 * case where we want to update i_size.
873 * - ocfs2_zero_extend() will then only be filling the hole created
874 * between i_size and the start of the write.
876 static int ocfs2_extend_file(struct inode *inode,
877 struct buffer_head *di_bh,
878 u64 new_i_size,
879 size_t tail_to_skip)
881 int ret = 0;
882 u32 clusters_to_add = 0;
884 BUG_ON(!tail_to_skip && !di_bh);
886 /* setattr sometimes calls us like this. */
887 if (new_i_size == 0)
888 goto out;
890 if (i_size_read(inode) == new_i_size)
891 goto out;
892 BUG_ON(new_i_size < i_size_read(inode));
894 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
895 BUG_ON(tail_to_skip != 0);
896 goto out_update_size;
899 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size) -
900 OCFS2_I(inode)->ip_clusters;
903 * protect the pages that ocfs2_zero_extend is going to be
904 * pulling into the page cache.. we do this before the
905 * metadata extend so that we don't get into the situation
906 * where we've extended the metadata but can't get the data
907 * lock to zero.
909 ret = ocfs2_data_lock(inode, 1);
910 if (ret < 0) {
911 mlog_errno(ret);
912 goto out;
915 if (clusters_to_add) {
916 ret = ocfs2_extend_allocation(inode,
917 OCFS2_I(inode)->ip_clusters,
918 clusters_to_add, 0);
919 if (ret < 0) {
920 mlog_errno(ret);
921 goto out_unlock;
926 * Call this even if we don't add any clusters to the tree. We
927 * still need to zero the area between the old i_size and the
928 * new i_size.
930 ret = ocfs2_zero_extend(inode, (u64)new_i_size - tail_to_skip);
931 if (ret < 0) {
932 mlog_errno(ret);
933 goto out_unlock;
936 out_update_size:
937 if (!tail_to_skip) {
938 /* We're being called from ocfs2_setattr() which wants
939 * us to update i_size */
940 ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
941 if (ret < 0)
942 mlog_errno(ret);
945 out_unlock:
946 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
947 ocfs2_data_unlock(inode, 1);
949 out:
950 return ret;
953 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
955 int status = 0, size_change;
956 struct inode *inode = dentry->d_inode;
957 struct super_block *sb = inode->i_sb;
958 struct ocfs2_super *osb = OCFS2_SB(sb);
959 struct buffer_head *bh = NULL;
960 handle_t *handle = NULL;
962 mlog_entry("(0x%p, '%.*s')\n", dentry,
963 dentry->d_name.len, dentry->d_name.name);
965 if (attr->ia_valid & ATTR_MODE)
966 mlog(0, "mode change: %d\n", attr->ia_mode);
967 if (attr->ia_valid & ATTR_UID)
968 mlog(0, "uid change: %d\n", attr->ia_uid);
969 if (attr->ia_valid & ATTR_GID)
970 mlog(0, "gid change: %d\n", attr->ia_gid);
971 if (attr->ia_valid & ATTR_SIZE)
972 mlog(0, "size change...\n");
973 if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME))
974 mlog(0, "time change...\n");
976 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
977 | ATTR_GID | ATTR_UID | ATTR_MODE)
978 if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) {
979 mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid);
980 return 0;
983 status = inode_change_ok(inode, attr);
984 if (status)
985 return status;
987 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
988 if (size_change) {
989 status = ocfs2_rw_lock(inode, 1);
990 if (status < 0) {
991 mlog_errno(status);
992 goto bail;
996 status = ocfs2_meta_lock(inode, &bh, 1);
997 if (status < 0) {
998 if (status != -ENOENT)
999 mlog_errno(status);
1000 goto bail_unlock_rw;
1003 if (size_change && attr->ia_size != i_size_read(inode)) {
1004 if (i_size_read(inode) > attr->ia_size)
1005 status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1006 else
1007 status = ocfs2_extend_file(inode, bh, attr->ia_size, 0);
1008 if (status < 0) {
1009 if (status != -ENOSPC)
1010 mlog_errno(status);
1011 status = -ENOSPC;
1012 goto bail_unlock;
1016 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1017 if (IS_ERR(handle)) {
1018 status = PTR_ERR(handle);
1019 mlog_errno(status);
1020 goto bail_unlock;
1024 * This will intentionally not wind up calling vmtruncate(),
1025 * since all the work for a size change has been done above.
1026 * Otherwise, we could get into problems with truncate as
1027 * ip_alloc_sem is used there to protect against i_size
1028 * changes.
1030 status = inode_setattr(inode, attr);
1031 if (status < 0) {
1032 mlog_errno(status);
1033 goto bail_commit;
1036 status = ocfs2_mark_inode_dirty(handle, inode, bh);
1037 if (status < 0)
1038 mlog_errno(status);
1040 bail_commit:
1041 ocfs2_commit_trans(osb, handle);
1042 bail_unlock:
1043 ocfs2_meta_unlock(inode, 1);
1044 bail_unlock_rw:
1045 if (size_change)
1046 ocfs2_rw_unlock(inode, 1);
1047 bail:
1048 if (bh)
1049 brelse(bh);
1051 mlog_exit(status);
1052 return status;
1055 int ocfs2_getattr(struct vfsmount *mnt,
1056 struct dentry *dentry,
1057 struct kstat *stat)
1059 struct inode *inode = dentry->d_inode;
1060 struct super_block *sb = dentry->d_inode->i_sb;
1061 struct ocfs2_super *osb = sb->s_fs_info;
1062 int err;
1064 mlog_entry_void();
1066 err = ocfs2_inode_revalidate(dentry);
1067 if (err) {
1068 if (err != -ENOENT)
1069 mlog_errno(err);
1070 goto bail;
1073 generic_fillattr(inode, stat);
1075 /* We set the blksize from the cluster size for performance */
1076 stat->blksize = osb->s_clustersize;
1078 bail:
1079 mlog_exit(err);
1081 return err;
1084 int ocfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
1086 int ret;
1088 mlog_entry_void();
1090 ret = ocfs2_meta_lock(inode, NULL, 0);
1091 if (ret) {
1092 if (ret != -ENOENT)
1093 mlog_errno(ret);
1094 goto out;
1097 ret = generic_permission(inode, mask, NULL);
1099 ocfs2_meta_unlock(inode, 0);
1100 out:
1101 mlog_exit(ret);
1102 return ret;
1105 static int ocfs2_write_remove_suid(struct inode *inode)
1107 int ret;
1108 struct buffer_head *bh = NULL;
1109 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1110 handle_t *handle;
1111 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1112 struct ocfs2_dinode *di;
1114 mlog_entry("(Inode %llu, mode 0%o)\n",
1115 (unsigned long long)oi->ip_blkno, inode->i_mode);
1117 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1118 if (handle == NULL) {
1119 ret = -ENOMEM;
1120 mlog_errno(ret);
1121 goto out;
1124 ret = ocfs2_read_block(osb, oi->ip_blkno, &bh, OCFS2_BH_CACHED, inode);
1125 if (ret < 0) {
1126 mlog_errno(ret);
1127 goto out_trans;
1130 ret = ocfs2_journal_access(handle, inode, bh,
1131 OCFS2_JOURNAL_ACCESS_WRITE);
1132 if (ret < 0) {
1133 mlog_errno(ret);
1134 goto out_bh;
1137 inode->i_mode &= ~S_ISUID;
1138 if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1139 inode->i_mode &= ~S_ISGID;
1141 di = (struct ocfs2_dinode *) bh->b_data;
1142 di->i_mode = cpu_to_le16(inode->i_mode);
1144 ret = ocfs2_journal_dirty(handle, bh);
1145 if (ret < 0)
1146 mlog_errno(ret);
1147 out_bh:
1148 brelse(bh);
1149 out_trans:
1150 ocfs2_commit_trans(osb, handle);
1151 out:
1152 mlog_exit(ret);
1153 return ret;
1157 * Will look for holes and unwritten extents in the range starting at
1158 * pos for count bytes (inclusive).
1160 static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos,
1161 size_t count)
1163 int ret = 0;
1164 unsigned int extent_flags;
1165 u32 cpos, clusters, extent_len, phys_cpos;
1166 struct super_block *sb = inode->i_sb;
1168 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
1169 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
1171 while (clusters) {
1172 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
1173 &extent_flags);
1174 if (ret < 0) {
1175 mlog_errno(ret);
1176 goto out;
1179 if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) {
1180 ret = 1;
1181 break;
1184 if (extent_len > clusters)
1185 extent_len = clusters;
1187 clusters -= extent_len;
1188 cpos += extent_len;
1190 out:
1191 return ret;
1195 * Allocate enough extents to cover the region starting at byte offset
1196 * start for len bytes. Existing extents are skipped, any extents
1197 * added are marked as "unwritten".
1199 static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1200 u64 start, u64 len)
1202 int ret;
1203 u32 cpos, phys_cpos, clusters, alloc_size;
1206 * We consider both start and len to be inclusive.
1208 cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1209 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1210 clusters -= cpos;
1212 while (clusters) {
1213 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1214 &alloc_size, NULL);
1215 if (ret) {
1216 mlog_errno(ret);
1217 goto out;
1221 * Hole or existing extent len can be arbitrary, so
1222 * cap it to our own allocation request.
1224 if (alloc_size > clusters)
1225 alloc_size = clusters;
1227 if (phys_cpos) {
1229 * We already have an allocation at this
1230 * region so we can safely skip it.
1232 goto next;
1235 ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1236 if (ret) {
1237 if (ret != -ENOSPC)
1238 mlog_errno(ret);
1239 goto out;
1242 next:
1243 cpos += alloc_size;
1244 clusters -= alloc_size;
1247 ret = 0;
1248 out:
1249 return ret;
1252 static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1253 loff_t *ppos,
1254 size_t count,
1255 int appending,
1256 int *direct_io)
1258 int ret = 0, meta_level = appending;
1259 struct inode *inode = dentry->d_inode;
1260 u32 clusters;
1261 loff_t newsize, saved_pos;
1264 * We sample i_size under a read level meta lock to see if our write
1265 * is extending the file, if it is we back off and get a write level
1266 * meta lock.
1268 for(;;) {
1269 ret = ocfs2_meta_lock(inode, NULL, meta_level);
1270 if (ret < 0) {
1271 meta_level = -1;
1272 mlog_errno(ret);
1273 goto out;
1276 /* Clear suid / sgid if necessary. We do this here
1277 * instead of later in the write path because
1278 * remove_suid() calls ->setattr without any hint that
1279 * we may have already done our cluster locking. Since
1280 * ocfs2_setattr() *must* take cluster locks to
1281 * proceeed, this will lead us to recursively lock the
1282 * inode. There's also the dinode i_size state which
1283 * can be lost via setattr during extending writes (we
1284 * set inode->i_size at the end of a write. */
1285 if (should_remove_suid(dentry)) {
1286 if (meta_level == 0) {
1287 ocfs2_meta_unlock(inode, meta_level);
1288 meta_level = 1;
1289 continue;
1292 ret = ocfs2_write_remove_suid(inode);
1293 if (ret < 0) {
1294 mlog_errno(ret);
1295 goto out_unlock;
1299 /* work on a copy of ppos until we're sure that we won't have
1300 * to recalculate it due to relocking. */
1301 if (appending) {
1302 saved_pos = i_size_read(inode);
1303 mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos);
1304 } else {
1305 saved_pos = *ppos;
1308 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
1309 loff_t end = saved_pos + count;
1312 * Skip the O_DIRECT checks if we don't need
1313 * them.
1315 if (!direct_io || !(*direct_io))
1316 break;
1319 * Allowing concurrent direct writes means
1320 * i_size changes wouldn't be synchronized, so
1321 * one node could wind up truncating another
1322 * nodes writes.
1324 if (end > i_size_read(inode)) {
1325 *direct_io = 0;
1326 break;
1330 * We don't fill holes during direct io, so
1331 * check for them here. If any are found, the
1332 * caller will have to retake some cluster
1333 * locks and initiate the io as buffered.
1335 ret = ocfs2_check_range_for_holes(inode, saved_pos,
1336 count);
1337 if (ret == 1) {
1338 *direct_io = 0;
1339 ret = 0;
1340 } else if (ret < 0)
1341 mlog_errno(ret);
1342 break;
1346 * The rest of this loop is concerned with legacy file
1347 * systems which don't support sparse files.
1350 newsize = count + saved_pos;
1352 mlog(0, "pos=%lld newsize=%lld cursize=%lld\n",
1353 (long long) saved_pos, (long long) newsize,
1354 (long long) i_size_read(inode));
1356 /* No need for a higher level metadata lock if we're
1357 * never going past i_size. */
1358 if (newsize <= i_size_read(inode))
1359 break;
1361 if (meta_level == 0) {
1362 ocfs2_meta_unlock(inode, meta_level);
1363 meta_level = 1;
1364 continue;
1367 spin_lock(&OCFS2_I(inode)->ip_lock);
1368 clusters = ocfs2_clusters_for_bytes(inode->i_sb, newsize) -
1369 OCFS2_I(inode)->ip_clusters;
1370 spin_unlock(&OCFS2_I(inode)->ip_lock);
1372 mlog(0, "Writing at EOF, may need more allocation: "
1373 "i_size = %lld, newsize = %lld, need %u clusters\n",
1374 (long long) i_size_read(inode), (long long) newsize,
1375 clusters);
1377 /* We only want to continue the rest of this loop if
1378 * our extend will actually require more
1379 * allocation. */
1380 if (!clusters)
1381 break;
1383 ret = ocfs2_extend_file(inode, NULL, newsize, count);
1384 if (ret < 0) {
1385 if (ret != -ENOSPC)
1386 mlog_errno(ret);
1387 goto out_unlock;
1389 break;
1392 if (appending)
1393 *ppos = saved_pos;
1395 out_unlock:
1396 ocfs2_meta_unlock(inode, meta_level);
1398 out:
1399 return ret;
1402 static inline void
1403 ocfs2_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes)
1405 const struct iovec *iov = *iovp;
1406 size_t base = *basep;
1408 do {
1409 int copy = min(bytes, iov->iov_len - base);
1411 bytes -= copy;
1412 base += copy;
1413 if (iov->iov_len == base) {
1414 iov++;
1415 base = 0;
1417 } while (bytes);
1418 *iovp = iov;
1419 *basep = base;
1422 static struct page * ocfs2_get_write_source(char **ret_src_buf,
1423 const struct iovec *cur_iov,
1424 size_t iov_offset)
1426 int ret;
1427 char *buf = cur_iov->iov_base + iov_offset;
1428 struct page *src_page = NULL;
1429 unsigned long off;
1431 off = (unsigned long)(buf) & ~PAGE_CACHE_MASK;
1433 if (!segment_eq(get_fs(), KERNEL_DS)) {
1435 * Pull in the user page. We want to do this outside
1436 * of the meta data locks in order to preserve locking
1437 * order in case of page fault.
1439 ret = get_user_pages(current, current->mm,
1440 (unsigned long)buf & PAGE_CACHE_MASK, 1,
1441 0, 0, &src_page, NULL);
1442 if (ret == 1)
1443 *ret_src_buf = kmap(src_page) + off;
1444 else
1445 src_page = ERR_PTR(-EFAULT);
1446 } else {
1447 *ret_src_buf = buf;
1450 return src_page;
1453 static void ocfs2_put_write_source(struct page *page)
1455 if (page) {
1456 kunmap(page);
1457 page_cache_release(page);
1461 static ssize_t ocfs2_file_buffered_write(struct file *file, loff_t *ppos,
1462 const struct iovec *iov,
1463 unsigned long nr_segs,
1464 size_t count,
1465 ssize_t o_direct_written)
1467 int ret = 0;
1468 ssize_t copied, total = 0;
1469 size_t iov_offset = 0, bytes;
1470 loff_t pos;
1471 const struct iovec *cur_iov = iov;
1472 struct page *user_page, *page;
1473 char *buf, *dst;
1474 void *fsdata;
1477 * handle partial DIO write. Adjust cur_iov if needed.
1479 ocfs2_set_next_iovec(&cur_iov, &iov_offset, o_direct_written);
1481 do {
1482 pos = *ppos;
1484 user_page = ocfs2_get_write_source(&buf, cur_iov, iov_offset);
1485 if (IS_ERR(user_page)) {
1486 ret = PTR_ERR(user_page);
1487 goto out;
1490 /* Stay within our page boundaries */
1491 bytes = min((PAGE_CACHE_SIZE - ((unsigned long)pos & ~PAGE_CACHE_MASK)),
1492 (PAGE_CACHE_SIZE - ((unsigned long)buf & ~PAGE_CACHE_MASK)));
1493 /* Stay within the vector boundary */
1494 bytes = min_t(size_t, bytes, cur_iov->iov_len - iov_offset);
1495 /* Stay within count */
1496 bytes = min(bytes, count);
1498 page = NULL;
1499 ret = ocfs2_write_begin(file, file->f_mapping, pos, bytes, 0,
1500 &page, &fsdata);
1501 if (ret) {
1502 mlog_errno(ret);
1503 goto out;
1506 dst = kmap_atomic(page, KM_USER0);
1507 memcpy(dst + (pos & (PAGE_CACHE_SIZE - 1)), buf, bytes);
1508 kunmap_atomic(dst, KM_USER0);
1509 flush_dcache_page(page);
1510 ocfs2_put_write_source(user_page);
1512 copied = ocfs2_write_end(file, file->f_mapping, pos, bytes,
1513 bytes, page, fsdata);
1514 if (copied < 0) {
1515 mlog_errno(copied);
1516 ret = copied;
1517 goto out;
1520 total += copied;
1521 *ppos = pos + copied;
1522 count -= copied;
1524 ocfs2_set_next_iovec(&cur_iov, &iov_offset, copied);
1525 } while(count);
1527 out:
1528 return total ? total : ret;
1531 static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
1532 const struct iovec *iov,
1533 unsigned long nr_segs,
1534 loff_t pos)
1536 int ret, direct_io, appending, rw_level, have_alloc_sem = 0;
1537 int can_do_direct, sync = 0;
1538 ssize_t written = 0;
1539 size_t ocount; /* original count */
1540 size_t count; /* after file limit checks */
1541 loff_t *ppos = &iocb->ki_pos;
1542 struct file *file = iocb->ki_filp;
1543 struct inode *inode = file->f_path.dentry->d_inode;
1545 mlog_entry("(0x%p, %u, '%.*s')\n", file,
1546 (unsigned int)nr_segs,
1547 file->f_path.dentry->d_name.len,
1548 file->f_path.dentry->d_name.name);
1550 if (iocb->ki_left == 0)
1551 return 0;
1553 ret = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1554 if (ret)
1555 return ret;
1557 count = ocount;
1559 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1561 appending = file->f_flags & O_APPEND ? 1 : 0;
1562 direct_io = file->f_flags & O_DIRECT ? 1 : 0;
1564 mutex_lock(&inode->i_mutex);
1566 relock:
1567 /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
1568 if (direct_io) {
1569 down_read(&inode->i_alloc_sem);
1570 have_alloc_sem = 1;
1573 /* concurrent O_DIRECT writes are allowed */
1574 rw_level = !direct_io;
1575 ret = ocfs2_rw_lock(inode, rw_level);
1576 if (ret < 0) {
1577 mlog_errno(ret);
1578 goto out_sems;
1581 can_do_direct = direct_io;
1582 ret = ocfs2_prepare_inode_for_write(file->f_path.dentry, ppos,
1583 iocb->ki_left, appending,
1584 &can_do_direct);
1585 if (ret < 0) {
1586 mlog_errno(ret);
1587 goto out;
1591 * We can't complete the direct I/O as requested, fall back to
1592 * buffered I/O.
1594 if (direct_io && !can_do_direct) {
1595 ocfs2_rw_unlock(inode, rw_level);
1596 up_read(&inode->i_alloc_sem);
1598 have_alloc_sem = 0;
1599 rw_level = -1;
1601 direct_io = 0;
1602 sync = 1;
1603 goto relock;
1606 if (!sync && ((file->f_flags & O_SYNC) || IS_SYNC(inode)))
1607 sync = 1;
1610 * XXX: Is it ok to execute these checks a second time?
1612 ret = generic_write_checks(file, ppos, &count, S_ISBLK(inode->i_mode));
1613 if (ret)
1614 goto out;
1617 * Set pos so that sync_page_range_nolock() below understands
1618 * where to start from. We might've moved it around via the
1619 * calls above. The range we want to actually sync starts from
1620 * *ppos here.
1623 pos = *ppos;
1625 /* communicate with ocfs2_dio_end_io */
1626 ocfs2_iocb_set_rw_locked(iocb, rw_level);
1628 if (direct_io) {
1629 written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
1630 ppos, count, ocount);
1631 if (written < 0) {
1632 ret = written;
1633 goto out_dio;
1635 } else {
1636 written = ocfs2_file_buffered_write(file, ppos, iov, nr_segs,
1637 count, written);
1638 if (written < 0) {
1639 ret = written;
1640 if (ret != -EFAULT || ret != -ENOSPC)
1641 mlog_errno(ret);
1642 goto out;
1646 out_dio:
1647 /* buffered aio wouldn't have proper lock coverage today */
1648 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
1651 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
1652 * function pointer which is called when o_direct io completes so that
1653 * it can unlock our rw lock. (it's the clustered equivalent of
1654 * i_alloc_sem; protects truncate from racing with pending ios).
1655 * Unfortunately there are error cases which call end_io and others
1656 * that don't. so we don't have to unlock the rw_lock if either an
1657 * async dio is going to do it in the future or an end_io after an
1658 * error has already done it.
1660 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
1661 rw_level = -1;
1662 have_alloc_sem = 0;
1665 out:
1666 if (rw_level != -1)
1667 ocfs2_rw_unlock(inode, rw_level);
1669 out_sems:
1670 if (have_alloc_sem)
1671 up_read(&inode->i_alloc_sem);
1673 if (written > 0 && sync) {
1674 ssize_t err;
1676 err = sync_page_range_nolock(inode, file->f_mapping, pos, count);
1677 if (err < 0)
1678 written = err;
1681 mutex_unlock(&inode->i_mutex);
1683 mlog_exit(ret);
1684 return written ? written : ret;
1687 static int ocfs2_splice_write_actor(struct pipe_inode_info *pipe,
1688 struct pipe_buffer *buf,
1689 struct splice_desc *sd)
1691 int ret, count;
1692 ssize_t copied = 0;
1693 struct file *file = sd->u.file;
1694 unsigned int offset;
1695 struct page *page = NULL;
1696 void *fsdata;
1697 char *src, *dst;
1699 ret = buf->ops->confirm(pipe, buf);
1700 if (ret)
1701 goto out;
1703 offset = sd->pos & ~PAGE_CACHE_MASK;
1704 count = sd->len;
1705 if (count + offset > PAGE_CACHE_SIZE)
1706 count = PAGE_CACHE_SIZE - offset;
1708 ret = ocfs2_write_begin(file, file->f_mapping, sd->pos, count, 0,
1709 &page, &fsdata);
1710 if (ret) {
1711 mlog_errno(ret);
1712 goto out;
1715 src = buf->ops->map(pipe, buf, 1);
1716 dst = kmap_atomic(page, KM_USER1);
1717 memcpy(dst + offset, src + buf->offset, count);
1718 kunmap_atomic(page, KM_USER1);
1719 buf->ops->unmap(pipe, buf, src);
1721 copied = ocfs2_write_end(file, file->f_mapping, sd->pos, count, count,
1722 page, fsdata);
1723 if (copied < 0) {
1724 mlog_errno(copied);
1725 ret = copied;
1726 goto out;
1728 out:
1730 return copied ? copied : ret;
1733 static ssize_t __ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1734 struct file *out,
1735 loff_t *ppos,
1736 size_t len,
1737 unsigned int flags)
1739 int ret, err;
1740 struct address_space *mapping = out->f_mapping;
1741 struct inode *inode = mapping->host;
1742 struct splice_desc sd = {
1743 .total_len = len,
1744 .flags = flags,
1745 .pos = *ppos,
1746 .u.file = out,
1749 ret = __splice_from_pipe(pipe, &sd, ocfs2_splice_write_actor);
1750 if (ret > 0) {
1751 *ppos += ret;
1753 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
1754 err = generic_osync_inode(inode, mapping,
1755 OSYNC_METADATA|OSYNC_DATA);
1756 if (err)
1757 ret = err;
1761 return ret;
1764 static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1765 struct file *out,
1766 loff_t *ppos,
1767 size_t len,
1768 unsigned int flags)
1770 int ret;
1771 struct inode *inode = out->f_path.dentry->d_inode;
1773 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe,
1774 (unsigned int)len,
1775 out->f_path.dentry->d_name.len,
1776 out->f_path.dentry->d_name.name);
1778 inode_double_lock(inode, pipe->inode);
1780 ret = ocfs2_rw_lock(inode, 1);
1781 if (ret < 0) {
1782 mlog_errno(ret);
1783 goto out;
1786 ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, ppos, len, 0,
1787 NULL);
1788 if (ret < 0) {
1789 mlog_errno(ret);
1790 goto out_unlock;
1793 /* ok, we're done with i_size and alloc work */
1794 ret = __ocfs2_file_splice_write(pipe, out, ppos, len, flags);
1796 out_unlock:
1797 ocfs2_rw_unlock(inode, 1);
1798 out:
1799 inode_double_unlock(inode, pipe->inode);
1801 mlog_exit(ret);
1802 return ret;
1805 static ssize_t ocfs2_file_splice_read(struct file *in,
1806 loff_t *ppos,
1807 struct pipe_inode_info *pipe,
1808 size_t len,
1809 unsigned int flags)
1811 int ret = 0;
1812 struct inode *inode = in->f_path.dentry->d_inode;
1814 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe,
1815 (unsigned int)len,
1816 in->f_path.dentry->d_name.len,
1817 in->f_path.dentry->d_name.name);
1820 * See the comment in ocfs2_file_aio_read()
1822 ret = ocfs2_meta_lock(inode, NULL, 0);
1823 if (ret < 0) {
1824 mlog_errno(ret);
1825 goto bail;
1827 ocfs2_meta_unlock(inode, 0);
1829 ret = generic_file_splice_read(in, ppos, pipe, len, flags);
1831 bail:
1832 mlog_exit(ret);
1833 return ret;
1836 static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
1837 const struct iovec *iov,
1838 unsigned long nr_segs,
1839 loff_t pos)
1841 int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
1842 struct file *filp = iocb->ki_filp;
1843 struct inode *inode = filp->f_path.dentry->d_inode;
1845 mlog_entry("(0x%p, %u, '%.*s')\n", filp,
1846 (unsigned int)nr_segs,
1847 filp->f_path.dentry->d_name.len,
1848 filp->f_path.dentry->d_name.name);
1850 if (!inode) {
1851 ret = -EINVAL;
1852 mlog_errno(ret);
1853 goto bail;
1857 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
1858 * need locks to protect pending reads from racing with truncate.
1860 if (filp->f_flags & O_DIRECT) {
1861 down_read(&inode->i_alloc_sem);
1862 have_alloc_sem = 1;
1864 ret = ocfs2_rw_lock(inode, 0);
1865 if (ret < 0) {
1866 mlog_errno(ret);
1867 goto bail;
1869 rw_level = 0;
1870 /* communicate with ocfs2_dio_end_io */
1871 ocfs2_iocb_set_rw_locked(iocb, rw_level);
1875 * We're fine letting folks race truncates and extending
1876 * writes with read across the cluster, just like they can
1877 * locally. Hence no rw_lock during read.
1879 * Take and drop the meta data lock to update inode fields
1880 * like i_size. This allows the checks down below
1881 * generic_file_aio_read() a chance of actually working.
1883 ret = ocfs2_meta_lock_atime(inode, filp->f_vfsmnt, &lock_level);
1884 if (ret < 0) {
1885 mlog_errno(ret);
1886 goto bail;
1888 ocfs2_meta_unlock(inode, lock_level);
1890 ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
1891 if (ret == -EINVAL)
1892 mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n");
1894 /* buffered aio wouldn't have proper lock coverage today */
1895 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
1897 /* see ocfs2_file_aio_write */
1898 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
1899 rw_level = -1;
1900 have_alloc_sem = 0;
1903 bail:
1904 if (have_alloc_sem)
1905 up_read(&inode->i_alloc_sem);
1906 if (rw_level != -1)
1907 ocfs2_rw_unlock(inode, rw_level);
1908 mlog_exit(ret);
1910 return ret;
1913 const struct inode_operations ocfs2_file_iops = {
1914 .setattr = ocfs2_setattr,
1915 .getattr = ocfs2_getattr,
1916 .permission = ocfs2_permission,
1919 const struct inode_operations ocfs2_special_file_iops = {
1920 .setattr = ocfs2_setattr,
1921 .getattr = ocfs2_getattr,
1922 .permission = ocfs2_permission,
1925 const struct file_operations ocfs2_fops = {
1926 .read = do_sync_read,
1927 .write = do_sync_write,
1928 .mmap = ocfs2_mmap,
1929 .fsync = ocfs2_sync_file,
1930 .release = ocfs2_file_release,
1931 .open = ocfs2_file_open,
1932 .aio_read = ocfs2_file_aio_read,
1933 .aio_write = ocfs2_file_aio_write,
1934 .ioctl = ocfs2_ioctl,
1935 #ifdef CONFIG_COMPAT
1936 .compat_ioctl = ocfs2_compat_ioctl,
1937 #endif
1938 .splice_read = ocfs2_file_splice_read,
1939 .splice_write = ocfs2_file_splice_write,
1942 const struct file_operations ocfs2_dops = {
1943 .read = generic_read_dir,
1944 .readdir = ocfs2_readdir,
1945 .fsync = ocfs2_sync_file,
1946 .ioctl = ocfs2_ioctl,
1947 #ifdef CONFIG_COMPAT
1948 .compat_ioctl = ocfs2_compat_ioctl,
1949 #endif