drivers: misc: pass miscdevice pointer via file private data
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / ocfs2 / file.c
blob97e54b9e654bbb05526fe077138d26c131019174
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
4 * file.c
6 * File open, close, extend, truncate
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/capability.h>
27 #include <linux/fs.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/pagemap.h>
32 #include <linux/uio.h>
33 #include <linux/sched.h>
34 #include <linux/splice.h>
35 #include <linux/mount.h>
36 #include <linux/writeback.h>
37 #include <linux/falloc.h>
38 #include <linux/quotaops.h>
40 #define MLOG_MASK_PREFIX ML_INODE
41 #include <cluster/masklog.h>
43 #include "ocfs2.h"
45 #include "alloc.h"
46 #include "aops.h"
47 #include "dir.h"
48 #include "dlmglue.h"
49 #include "extent_map.h"
50 #include "file.h"
51 #include "sysfile.h"
52 #include "inode.h"
53 #include "ioctl.h"
54 #include "journal.h"
55 #include "locks.h"
56 #include "mmap.h"
57 #include "suballoc.h"
58 #include "super.h"
59 #include "xattr.h"
60 #include "acl.h"
61 #include "quota.h"
62 #include "refcounttree.h"
64 #include "buffer_head_io.h"
66 static int ocfs2_sync_inode(struct inode *inode)
68 filemap_fdatawrite(inode->i_mapping);
69 return sync_mapping_buffers(inode->i_mapping);
72 static int ocfs2_init_file_private(struct inode *inode, struct file *file)
74 struct ocfs2_file_private *fp;
76 fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
77 if (!fp)
78 return -ENOMEM;
80 fp->fp_file = file;
81 mutex_init(&fp->fp_mutex);
82 ocfs2_file_lock_res_init(&fp->fp_flock, fp);
83 file->private_data = fp;
85 return 0;
88 static void ocfs2_free_file_private(struct inode *inode, struct file *file)
90 struct ocfs2_file_private *fp = file->private_data;
91 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
93 if (fp) {
94 ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
95 ocfs2_lock_res_free(&fp->fp_flock);
96 kfree(fp);
97 file->private_data = NULL;
101 static int ocfs2_file_open(struct inode *inode, struct file *file)
103 int status;
104 int mode = file->f_flags;
105 struct ocfs2_inode_info *oi = OCFS2_I(inode);
107 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
108 file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name);
110 if (file->f_mode & FMODE_WRITE)
111 dquot_initialize(inode);
113 spin_lock(&oi->ip_lock);
115 /* Check that the inode hasn't been wiped from disk by another
116 * node. If it hasn't then we're safe as long as we hold the
117 * spin lock until our increment of open count. */
118 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
119 spin_unlock(&oi->ip_lock);
121 status = -ENOENT;
122 goto leave;
125 if (mode & O_DIRECT)
126 oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
128 oi->ip_open_count++;
129 spin_unlock(&oi->ip_lock);
131 status = ocfs2_init_file_private(inode, file);
132 if (status) {
134 * We want to set open count back if we're failing the
135 * open.
137 spin_lock(&oi->ip_lock);
138 oi->ip_open_count--;
139 spin_unlock(&oi->ip_lock);
142 leave:
143 mlog_exit(status);
144 return status;
147 static int ocfs2_file_release(struct inode *inode, struct file *file)
149 struct ocfs2_inode_info *oi = OCFS2_I(inode);
151 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
152 file->f_path.dentry->d_name.len,
153 file->f_path.dentry->d_name.name);
155 spin_lock(&oi->ip_lock);
156 if (!--oi->ip_open_count)
157 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
158 spin_unlock(&oi->ip_lock);
160 ocfs2_free_file_private(inode, file);
162 mlog_exit(0);
164 return 0;
167 static int ocfs2_dir_open(struct inode *inode, struct file *file)
169 return ocfs2_init_file_private(inode, file);
172 static int ocfs2_dir_release(struct inode *inode, struct file *file)
174 ocfs2_free_file_private(inode, file);
175 return 0;
178 static int ocfs2_sync_file(struct file *file,
179 struct dentry *dentry,
180 int datasync)
182 int err = 0;
183 journal_t *journal;
184 struct inode *inode = dentry->d_inode;
185 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
187 mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", file, dentry, datasync,
188 dentry->d_name.len, dentry->d_name.name);
190 err = ocfs2_sync_inode(dentry->d_inode);
191 if (err)
192 goto bail;
194 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
195 goto bail;
197 journal = osb->journal->j_journal;
198 err = jbd2_journal_force_commit(journal);
200 bail:
201 mlog_exit(err);
203 return (err < 0) ? -EIO : 0;
206 int ocfs2_should_update_atime(struct inode *inode,
207 struct vfsmount *vfsmnt)
209 struct timespec now;
210 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
212 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
213 return 0;
215 if ((inode->i_flags & S_NOATIME) ||
216 ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
217 return 0;
220 * We can be called with no vfsmnt structure - NFSD will
221 * sometimes do this.
223 * Note that our action here is different than touch_atime() -
224 * if we can't tell whether this is a noatime mount, then we
225 * don't know whether to trust the value of s_atime_quantum.
227 if (vfsmnt == NULL)
228 return 0;
230 if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
231 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
232 return 0;
234 if (vfsmnt->mnt_flags & MNT_RELATIME) {
235 if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
236 (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
237 return 1;
239 return 0;
242 now = CURRENT_TIME;
243 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
244 return 0;
245 else
246 return 1;
249 int ocfs2_update_inode_atime(struct inode *inode,
250 struct buffer_head *bh)
252 int ret;
253 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
254 handle_t *handle;
255 struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
257 mlog_entry_void();
259 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
260 if (IS_ERR(handle)) {
261 ret = PTR_ERR(handle);
262 mlog_errno(ret);
263 goto out;
266 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
267 OCFS2_JOURNAL_ACCESS_WRITE);
268 if (ret) {
269 mlog_errno(ret);
270 goto out_commit;
274 * Don't use ocfs2_mark_inode_dirty() here as we don't always
275 * have i_mutex to guard against concurrent changes to other
276 * inode fields.
278 inode->i_atime = CURRENT_TIME;
279 di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
280 di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
281 ocfs2_journal_dirty(handle, bh);
283 out_commit:
284 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
285 out:
286 mlog_exit(ret);
287 return ret;
290 static int ocfs2_set_inode_size(handle_t *handle,
291 struct inode *inode,
292 struct buffer_head *fe_bh,
293 u64 new_i_size)
295 int status;
297 mlog_entry_void();
298 i_size_write(inode, new_i_size);
299 inode->i_blocks = ocfs2_inode_sector_count(inode);
300 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
302 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
303 if (status < 0) {
304 mlog_errno(status);
305 goto bail;
308 bail:
309 mlog_exit(status);
310 return status;
313 int ocfs2_simple_size_update(struct inode *inode,
314 struct buffer_head *di_bh,
315 u64 new_i_size)
317 int ret;
318 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
319 handle_t *handle = NULL;
321 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
322 if (IS_ERR(handle)) {
323 ret = PTR_ERR(handle);
324 mlog_errno(ret);
325 goto out;
328 ret = ocfs2_set_inode_size(handle, inode, di_bh,
329 new_i_size);
330 if (ret < 0)
331 mlog_errno(ret);
333 ocfs2_commit_trans(osb, handle);
334 out:
335 return ret;
338 static int ocfs2_cow_file_pos(struct inode *inode,
339 struct buffer_head *fe_bh,
340 u64 offset)
342 int status;
343 u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
344 unsigned int num_clusters = 0;
345 unsigned int ext_flags = 0;
348 * If the new offset is aligned to the range of the cluster, there is
349 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
350 * CoW either.
352 if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
353 return 0;
355 status = ocfs2_get_clusters(inode, cpos, &phys,
356 &num_clusters, &ext_flags);
357 if (status) {
358 mlog_errno(status);
359 goto out;
362 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
363 goto out;
365 return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
367 out:
368 return status;
371 static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
372 struct inode *inode,
373 struct buffer_head *fe_bh,
374 u64 new_i_size)
376 int status;
377 handle_t *handle;
378 struct ocfs2_dinode *di;
379 u64 cluster_bytes;
381 mlog_entry_void();
384 * We need to CoW the cluster contains the offset if it is reflinked
385 * since we will call ocfs2_zero_range_for_truncate later which will
386 * write "0" from offset to the end of the cluster.
388 status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
389 if (status) {
390 mlog_errno(status);
391 return status;
394 /* TODO: This needs to actually orphan the inode in this
395 * transaction. */
397 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
398 if (IS_ERR(handle)) {
399 status = PTR_ERR(handle);
400 mlog_errno(status);
401 goto out;
404 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
405 OCFS2_JOURNAL_ACCESS_WRITE);
406 if (status < 0) {
407 mlog_errno(status);
408 goto out_commit;
412 * Do this before setting i_size.
414 cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
415 status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
416 cluster_bytes);
417 if (status) {
418 mlog_errno(status);
419 goto out_commit;
422 i_size_write(inode, new_i_size);
423 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
425 di = (struct ocfs2_dinode *) fe_bh->b_data;
426 di->i_size = cpu_to_le64(new_i_size);
427 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
428 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
430 ocfs2_journal_dirty(handle, fe_bh);
432 out_commit:
433 ocfs2_commit_trans(osb, handle);
434 out:
436 mlog_exit(status);
437 return status;
440 static int ocfs2_truncate_file(struct inode *inode,
441 struct buffer_head *di_bh,
442 u64 new_i_size)
444 int status = 0;
445 struct ocfs2_dinode *fe = NULL;
446 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
448 mlog_entry("(inode = %llu, new_i_size = %llu\n",
449 (unsigned long long)OCFS2_I(inode)->ip_blkno,
450 (unsigned long long)new_i_size);
452 /* We trust di_bh because it comes from ocfs2_inode_lock(), which
453 * already validated it */
454 fe = (struct ocfs2_dinode *) di_bh->b_data;
456 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
457 "Inode %llu, inode i_size = %lld != di "
458 "i_size = %llu, i_flags = 0x%x\n",
459 (unsigned long long)OCFS2_I(inode)->ip_blkno,
460 i_size_read(inode),
461 (unsigned long long)le64_to_cpu(fe->i_size),
462 le32_to_cpu(fe->i_flags));
464 if (new_i_size > le64_to_cpu(fe->i_size)) {
465 mlog(0, "asked to truncate file with size (%llu) to size (%llu)!\n",
466 (unsigned long long)le64_to_cpu(fe->i_size),
467 (unsigned long long)new_i_size);
468 status = -EINVAL;
469 mlog_errno(status);
470 goto bail;
473 mlog(0, "inode %llu, i_size = %llu, new_i_size = %llu\n",
474 (unsigned long long)le64_to_cpu(fe->i_blkno),
475 (unsigned long long)le64_to_cpu(fe->i_size),
476 (unsigned long long)new_i_size);
478 /* lets handle the simple truncate cases before doing any more
479 * cluster locking. */
480 if (new_i_size == le64_to_cpu(fe->i_size))
481 goto bail;
483 down_write(&OCFS2_I(inode)->ip_alloc_sem);
485 ocfs2_resv_discard(&osb->osb_la_resmap,
486 &OCFS2_I(inode)->ip_la_data_resv);
489 * The inode lock forced other nodes to sync and drop their
490 * pages, which (correctly) happens even if we have a truncate
491 * without allocation change - ocfs2 cluster sizes can be much
492 * greater than page size, so we have to truncate them
493 * anyway.
495 unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
496 truncate_inode_pages(inode->i_mapping, new_i_size);
498 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
499 status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
500 i_size_read(inode), 1);
501 if (status)
502 mlog_errno(status);
504 goto bail_unlock_sem;
507 /* alright, we're going to need to do a full blown alloc size
508 * change. Orphan the inode so that recovery can complete the
509 * truncate if necessary. This does the task of marking
510 * i_size. */
511 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
512 if (status < 0) {
513 mlog_errno(status);
514 goto bail_unlock_sem;
517 status = ocfs2_commit_truncate(osb, inode, di_bh);
518 if (status < 0) {
519 mlog_errno(status);
520 goto bail_unlock_sem;
523 /* TODO: orphan dir cleanup here. */
524 bail_unlock_sem:
525 up_write(&OCFS2_I(inode)->ip_alloc_sem);
527 bail:
528 if (!status && OCFS2_I(inode)->ip_clusters == 0)
529 status = ocfs2_try_remove_refcount_tree(inode, di_bh);
531 mlog_exit(status);
532 return status;
536 * extend file allocation only here.
537 * we'll update all the disk stuff, and oip->alloc_size
539 * expect stuff to be locked, a transaction started and enough data /
540 * metadata reservations in the contexts.
542 * Will return -EAGAIN, and a reason if a restart is needed.
543 * If passed in, *reason will always be set, even in error.
545 int ocfs2_add_inode_data(struct ocfs2_super *osb,
546 struct inode *inode,
547 u32 *logical_offset,
548 u32 clusters_to_add,
549 int mark_unwritten,
550 struct buffer_head *fe_bh,
551 handle_t *handle,
552 struct ocfs2_alloc_context *data_ac,
553 struct ocfs2_alloc_context *meta_ac,
554 enum ocfs2_alloc_restarted *reason_ret)
556 int ret;
557 struct ocfs2_extent_tree et;
559 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
560 ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
561 clusters_to_add, mark_unwritten,
562 data_ac, meta_ac, reason_ret);
564 return ret;
567 static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
568 u32 clusters_to_add, int mark_unwritten)
570 int status = 0;
571 int restart_func = 0;
572 int credits;
573 u32 prev_clusters;
574 struct buffer_head *bh = NULL;
575 struct ocfs2_dinode *fe = NULL;
576 handle_t *handle = NULL;
577 struct ocfs2_alloc_context *data_ac = NULL;
578 struct ocfs2_alloc_context *meta_ac = NULL;
579 enum ocfs2_alloc_restarted why;
580 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
581 struct ocfs2_extent_tree et;
582 int did_quota = 0;
584 mlog_entry("(clusters_to_add = %u)\n", clusters_to_add);
587 * This function only exists for file systems which don't
588 * support holes.
590 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
592 status = ocfs2_read_inode_block(inode, &bh);
593 if (status < 0) {
594 mlog_errno(status);
595 goto leave;
597 fe = (struct ocfs2_dinode *) bh->b_data;
599 restart_all:
600 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
602 mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, "
603 "clusters_to_add = %u\n",
604 (unsigned long long)OCFS2_I(inode)->ip_blkno,
605 (long long)i_size_read(inode), le32_to_cpu(fe->i_clusters),
606 clusters_to_add);
607 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
608 status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
609 &data_ac, &meta_ac);
610 if (status) {
611 mlog_errno(status);
612 goto leave;
615 credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list,
616 clusters_to_add);
617 handle = ocfs2_start_trans(osb, credits);
618 if (IS_ERR(handle)) {
619 status = PTR_ERR(handle);
620 handle = NULL;
621 mlog_errno(status);
622 goto leave;
625 restarted_transaction:
626 status = dquot_alloc_space_nodirty(inode,
627 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
628 if (status)
629 goto leave;
630 did_quota = 1;
632 /* reserve a write to the file entry early on - that we if we
633 * run out of credits in the allocation path, we can still
634 * update i_size. */
635 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
636 OCFS2_JOURNAL_ACCESS_WRITE);
637 if (status < 0) {
638 mlog_errno(status);
639 goto leave;
642 prev_clusters = OCFS2_I(inode)->ip_clusters;
644 status = ocfs2_add_inode_data(osb,
645 inode,
646 &logical_start,
647 clusters_to_add,
648 mark_unwritten,
650 handle,
651 data_ac,
652 meta_ac,
653 &why);
654 if ((status < 0) && (status != -EAGAIN)) {
655 if (status != -ENOSPC)
656 mlog_errno(status);
657 goto leave;
660 ocfs2_journal_dirty(handle, bh);
662 spin_lock(&OCFS2_I(inode)->ip_lock);
663 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
664 spin_unlock(&OCFS2_I(inode)->ip_lock);
665 /* Release unused quota reservation */
666 dquot_free_space(inode,
667 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
668 did_quota = 0;
670 if (why != RESTART_NONE && clusters_to_add) {
671 if (why == RESTART_META) {
672 mlog(0, "restarting function.\n");
673 restart_func = 1;
674 status = 0;
675 } else {
676 BUG_ON(why != RESTART_TRANS);
678 mlog(0, "restarting transaction.\n");
679 /* TODO: This can be more intelligent. */
680 credits = ocfs2_calc_extend_credits(osb->sb,
681 &fe->id2.i_list,
682 clusters_to_add);
683 status = ocfs2_extend_trans(handle, credits);
684 if (status < 0) {
685 /* handle still has to be committed at
686 * this point. */
687 status = -ENOMEM;
688 mlog_errno(status);
689 goto leave;
691 goto restarted_transaction;
695 mlog(0, "fe: i_clusters = %u, i_size=%llu\n",
696 le32_to_cpu(fe->i_clusters),
697 (unsigned long long)le64_to_cpu(fe->i_size));
698 mlog(0, "inode: ip_clusters=%u, i_size=%lld\n",
699 OCFS2_I(inode)->ip_clusters, (long long)i_size_read(inode));
701 leave:
702 if (status < 0 && did_quota)
703 dquot_free_space(inode,
704 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
705 if (handle) {
706 ocfs2_commit_trans(osb, handle);
707 handle = NULL;
709 if (data_ac) {
710 ocfs2_free_alloc_context(data_ac);
711 data_ac = NULL;
713 if (meta_ac) {
714 ocfs2_free_alloc_context(meta_ac);
715 meta_ac = NULL;
717 if ((!status) && restart_func) {
718 restart_func = 0;
719 goto restart_all;
721 brelse(bh);
722 bh = NULL;
724 mlog_exit(status);
725 return status;
728 /* Some parts of this taken from generic_cont_expand, which turned out
729 * to be too fragile to do exactly what we need without us having to
730 * worry about recursive locking in ->write_begin() and ->write_end(). */
731 static int ocfs2_write_zero_page(struct inode *inode,
732 u64 size)
734 struct address_space *mapping = inode->i_mapping;
735 struct page *page;
736 unsigned long index;
737 unsigned int offset;
738 handle_t *handle = NULL;
739 int ret;
741 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
742 /* ugh. in prepare/commit_write, if from==to==start of block, we
743 ** skip the prepare. make sure we never send an offset for the start
744 ** of a block
746 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
747 offset++;
749 index = size >> PAGE_CACHE_SHIFT;
751 page = grab_cache_page(mapping, index);
752 if (!page) {
753 ret = -ENOMEM;
754 mlog_errno(ret);
755 goto out;
758 ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
759 if (ret < 0) {
760 mlog_errno(ret);
761 goto out_unlock;
764 if (ocfs2_should_order_data(inode)) {
765 handle = ocfs2_start_walk_page_trans(inode, page, offset,
766 offset);
767 if (IS_ERR(handle)) {
768 ret = PTR_ERR(handle);
769 handle = NULL;
770 goto out_unlock;
774 /* must not update i_size! */
775 ret = block_commit_write(page, offset, offset);
776 if (ret < 0)
777 mlog_errno(ret);
778 else
779 ret = 0;
781 if (handle)
782 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
783 out_unlock:
784 unlock_page(page);
785 page_cache_release(page);
786 out:
787 return ret;
790 static int ocfs2_zero_extend(struct inode *inode,
791 u64 zero_to_size)
793 int ret = 0;
794 u64 start_off;
795 struct super_block *sb = inode->i_sb;
797 start_off = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
798 while (start_off < zero_to_size) {
799 ret = ocfs2_write_zero_page(inode, start_off);
800 if (ret < 0) {
801 mlog_errno(ret);
802 goto out;
805 start_off += sb->s_blocksize;
808 * Very large extends have the potential to lock up
809 * the cpu for extended periods of time.
811 cond_resched();
814 out:
815 return ret;
818 int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, u64 zero_to)
820 int ret;
821 u32 clusters_to_add;
822 struct ocfs2_inode_info *oi = OCFS2_I(inode);
824 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
825 if (clusters_to_add < oi->ip_clusters)
826 clusters_to_add = 0;
827 else
828 clusters_to_add -= oi->ip_clusters;
830 if (clusters_to_add) {
831 ret = __ocfs2_extend_allocation(inode, oi->ip_clusters,
832 clusters_to_add, 0);
833 if (ret) {
834 mlog_errno(ret);
835 goto out;
840 * Call this even if we don't add any clusters to the tree. We
841 * still need to zero the area between the old i_size and the
842 * new i_size.
844 ret = ocfs2_zero_extend(inode, zero_to);
845 if (ret < 0)
846 mlog_errno(ret);
848 out:
849 return ret;
852 static int ocfs2_extend_file(struct inode *inode,
853 struct buffer_head *di_bh,
854 u64 new_i_size)
856 int ret = 0;
857 struct ocfs2_inode_info *oi = OCFS2_I(inode);
859 BUG_ON(!di_bh);
861 /* setattr sometimes calls us like this. */
862 if (new_i_size == 0)
863 goto out;
865 if (i_size_read(inode) == new_i_size)
866 goto out;
867 BUG_ON(new_i_size < i_size_read(inode));
870 * Fall through for converting inline data, even if the fs
871 * supports sparse files.
873 * The check for inline data here is legal - nobody can add
874 * the feature since we have i_mutex. We must check it again
875 * after acquiring ip_alloc_sem though, as paths like mmap
876 * might have raced us to converting the inode to extents.
878 if (!(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
879 && ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
880 goto out_update_size;
883 * The alloc sem blocks people in read/write from reading our
884 * allocation until we're done changing it. We depend on
885 * i_mutex to block other extend/truncate calls while we're
886 * here.
888 down_write(&oi->ip_alloc_sem);
890 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
892 * We can optimize small extends by keeping the inodes
893 * inline data.
895 if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
896 up_write(&oi->ip_alloc_sem);
897 goto out_update_size;
900 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
901 if (ret) {
902 up_write(&oi->ip_alloc_sem);
904 mlog_errno(ret);
905 goto out;
909 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
910 ret = ocfs2_extend_no_holes(inode, new_i_size, new_i_size);
912 up_write(&oi->ip_alloc_sem);
914 if (ret < 0) {
915 mlog_errno(ret);
916 goto out;
919 out_update_size:
920 ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
921 if (ret < 0)
922 mlog_errno(ret);
924 out:
925 return ret;
928 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
930 int status = 0, size_change;
931 struct inode *inode = dentry->d_inode;
932 struct super_block *sb = inode->i_sb;
933 struct ocfs2_super *osb = OCFS2_SB(sb);
934 struct buffer_head *bh = NULL;
935 handle_t *handle = NULL;
936 struct dquot *transfer_to[MAXQUOTAS] = { };
937 int qtype;
939 mlog_entry("(0x%p, '%.*s')\n", dentry,
940 dentry->d_name.len, dentry->d_name.name);
942 /* ensuring we don't even attempt to truncate a symlink */
943 if (S_ISLNK(inode->i_mode))
944 attr->ia_valid &= ~ATTR_SIZE;
946 if (attr->ia_valid & ATTR_MODE)
947 mlog(0, "mode change: %d\n", attr->ia_mode);
948 if (attr->ia_valid & ATTR_UID)
949 mlog(0, "uid change: %d\n", attr->ia_uid);
950 if (attr->ia_valid & ATTR_GID)
951 mlog(0, "gid change: %d\n", attr->ia_gid);
952 if (attr->ia_valid & ATTR_SIZE)
953 mlog(0, "size change...\n");
954 if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME))
955 mlog(0, "time change...\n");
957 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
958 | ATTR_GID | ATTR_UID | ATTR_MODE)
959 if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) {
960 mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid);
961 return 0;
964 status = inode_change_ok(inode, attr);
965 if (status)
966 return status;
968 if (is_quota_modification(inode, attr))
969 dquot_initialize(inode);
970 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
971 if (size_change) {
972 status = ocfs2_rw_lock(inode, 1);
973 if (status < 0) {
974 mlog_errno(status);
975 goto bail;
979 status = ocfs2_inode_lock(inode, &bh, 1);
980 if (status < 0) {
981 if (status != -ENOENT)
982 mlog_errno(status);
983 goto bail_unlock_rw;
986 if (size_change && attr->ia_size != i_size_read(inode)) {
987 status = inode_newsize_ok(inode, attr->ia_size);
988 if (status)
989 goto bail_unlock;
991 if (i_size_read(inode) > attr->ia_size) {
992 if (ocfs2_should_order_data(inode)) {
993 status = ocfs2_begin_ordered_truncate(inode,
994 attr->ia_size);
995 if (status)
996 goto bail_unlock;
998 status = ocfs2_truncate_file(inode, bh, attr->ia_size);
999 } else
1000 status = ocfs2_extend_file(inode, bh, attr->ia_size);
1001 if (status < 0) {
1002 if (status != -ENOSPC)
1003 mlog_errno(status);
1004 status = -ENOSPC;
1005 goto bail_unlock;
1009 if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
1010 (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
1012 * Gather pointers to quota structures so that allocation /
1013 * freeing of quota structures happens here and not inside
1014 * dquot_transfer() where we have problems with lock ordering
1016 if (attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid
1017 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1018 OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
1019 transfer_to[USRQUOTA] = dqget(sb, attr->ia_uid,
1020 USRQUOTA);
1021 if (!transfer_to[USRQUOTA]) {
1022 status = -ESRCH;
1023 goto bail_unlock;
1026 if (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid
1027 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1028 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
1029 transfer_to[GRPQUOTA] = dqget(sb, attr->ia_gid,
1030 GRPQUOTA);
1031 if (!transfer_to[GRPQUOTA]) {
1032 status = -ESRCH;
1033 goto bail_unlock;
1036 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
1037 2 * ocfs2_quota_trans_credits(sb));
1038 if (IS_ERR(handle)) {
1039 status = PTR_ERR(handle);
1040 mlog_errno(status);
1041 goto bail_unlock;
1043 status = __dquot_transfer(inode, transfer_to);
1044 if (status < 0)
1045 goto bail_commit;
1046 } else {
1047 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1048 if (IS_ERR(handle)) {
1049 status = PTR_ERR(handle);
1050 mlog_errno(status);
1051 goto bail_unlock;
1056 * This will intentionally not wind up calling vmtruncate(),
1057 * since all the work for a size change has been done above.
1058 * Otherwise, we could get into problems with truncate as
1059 * ip_alloc_sem is used there to protect against i_size
1060 * changes.
1062 status = inode_setattr(inode, attr);
1063 if (status < 0) {
1064 mlog_errno(status);
1065 goto bail_commit;
1068 status = ocfs2_mark_inode_dirty(handle, inode, bh);
1069 if (status < 0)
1070 mlog_errno(status);
1072 bail_commit:
1073 ocfs2_commit_trans(osb, handle);
1074 bail_unlock:
1075 ocfs2_inode_unlock(inode, 1);
1076 bail_unlock_rw:
1077 if (size_change)
1078 ocfs2_rw_unlock(inode, 1);
1079 bail:
1080 brelse(bh);
1082 /* Release quota pointers in case we acquired them */
1083 for (qtype = 0; qtype < MAXQUOTAS; qtype++)
1084 dqput(transfer_to[qtype]);
1086 if (!status && attr->ia_valid & ATTR_MODE) {
1087 status = ocfs2_acl_chmod(inode);
1088 if (status < 0)
1089 mlog_errno(status);
1092 mlog_exit(status);
1093 return status;
1096 int ocfs2_getattr(struct vfsmount *mnt,
1097 struct dentry *dentry,
1098 struct kstat *stat)
1100 struct inode *inode = dentry->d_inode;
1101 struct super_block *sb = dentry->d_inode->i_sb;
1102 struct ocfs2_super *osb = sb->s_fs_info;
1103 int err;
1105 mlog_entry_void();
1107 err = ocfs2_inode_revalidate(dentry);
1108 if (err) {
1109 if (err != -ENOENT)
1110 mlog_errno(err);
1111 goto bail;
1114 generic_fillattr(inode, stat);
1116 /* We set the blksize from the cluster size for performance */
1117 stat->blksize = osb->s_clustersize;
1119 bail:
1120 mlog_exit(err);
1122 return err;
1125 int ocfs2_permission(struct inode *inode, int mask)
1127 int ret;
1129 mlog_entry_void();
1131 ret = ocfs2_inode_lock(inode, NULL, 0);
1132 if (ret) {
1133 if (ret != -ENOENT)
1134 mlog_errno(ret);
1135 goto out;
1138 ret = generic_permission(inode, mask, ocfs2_check_acl);
1140 ocfs2_inode_unlock(inode, 0);
1141 out:
1142 mlog_exit(ret);
1143 return ret;
1146 static int __ocfs2_write_remove_suid(struct inode *inode,
1147 struct buffer_head *bh)
1149 int ret;
1150 handle_t *handle;
1151 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1152 struct ocfs2_dinode *di;
1154 mlog_entry("(Inode %llu, mode 0%o)\n",
1155 (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_mode);
1157 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1158 if (IS_ERR(handle)) {
1159 ret = PTR_ERR(handle);
1160 mlog_errno(ret);
1161 goto out;
1164 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
1165 OCFS2_JOURNAL_ACCESS_WRITE);
1166 if (ret < 0) {
1167 mlog_errno(ret);
1168 goto out_trans;
1171 inode->i_mode &= ~S_ISUID;
1172 if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1173 inode->i_mode &= ~S_ISGID;
1175 di = (struct ocfs2_dinode *) bh->b_data;
1176 di->i_mode = cpu_to_le16(inode->i_mode);
1178 ocfs2_journal_dirty(handle, bh);
1180 out_trans:
1181 ocfs2_commit_trans(osb, handle);
1182 out:
1183 mlog_exit(ret);
1184 return ret;
1188 * Will look for holes and unwritten extents in the range starting at
1189 * pos for count bytes (inclusive).
1191 static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos,
1192 size_t count)
1194 int ret = 0;
1195 unsigned int extent_flags;
1196 u32 cpos, clusters, extent_len, phys_cpos;
1197 struct super_block *sb = inode->i_sb;
1199 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
1200 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
1202 while (clusters) {
1203 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
1204 &extent_flags);
1205 if (ret < 0) {
1206 mlog_errno(ret);
1207 goto out;
1210 if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) {
1211 ret = 1;
1212 break;
1215 if (extent_len > clusters)
1216 extent_len = clusters;
1218 clusters -= extent_len;
1219 cpos += extent_len;
1221 out:
1222 return ret;
1225 static int ocfs2_write_remove_suid(struct inode *inode)
1227 int ret;
1228 struct buffer_head *bh = NULL;
1230 ret = ocfs2_read_inode_block(inode, &bh);
1231 if (ret < 0) {
1232 mlog_errno(ret);
1233 goto out;
1236 ret = __ocfs2_write_remove_suid(inode, bh);
1237 out:
1238 brelse(bh);
1239 return ret;
1243 * Allocate enough extents to cover the region starting at byte offset
1244 * start for len bytes. Existing extents are skipped, any extents
1245 * added are marked as "unwritten".
1247 static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1248 u64 start, u64 len)
1250 int ret;
1251 u32 cpos, phys_cpos, clusters, alloc_size;
1252 u64 end = start + len;
1253 struct buffer_head *di_bh = NULL;
1255 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1256 ret = ocfs2_read_inode_block(inode, &di_bh);
1257 if (ret) {
1258 mlog_errno(ret);
1259 goto out;
1263 * Nothing to do if the requested reservation range
1264 * fits within the inode.
1266 if (ocfs2_size_fits_inline_data(di_bh, end))
1267 goto out;
1269 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1270 if (ret) {
1271 mlog_errno(ret);
1272 goto out;
1277 * We consider both start and len to be inclusive.
1279 cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1280 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1281 clusters -= cpos;
1283 while (clusters) {
1284 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1285 &alloc_size, NULL);
1286 if (ret) {
1287 mlog_errno(ret);
1288 goto out;
1292 * Hole or existing extent len can be arbitrary, so
1293 * cap it to our own allocation request.
1295 if (alloc_size > clusters)
1296 alloc_size = clusters;
1298 if (phys_cpos) {
1300 * We already have an allocation at this
1301 * region so we can safely skip it.
1303 goto next;
1306 ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1307 if (ret) {
1308 if (ret != -ENOSPC)
1309 mlog_errno(ret);
1310 goto out;
1313 next:
1314 cpos += alloc_size;
1315 clusters -= alloc_size;
1318 ret = 0;
1319 out:
1321 brelse(di_bh);
1322 return ret;
1326 * Truncate a byte range, avoiding pages within partial clusters. This
1327 * preserves those pages for the zeroing code to write to.
1329 static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1330 u64 byte_len)
1332 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1333 loff_t start, end;
1334 struct address_space *mapping = inode->i_mapping;
1336 start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1337 end = byte_start + byte_len;
1338 end = end & ~(osb->s_clustersize - 1);
1340 if (start < end) {
1341 unmap_mapping_range(mapping, start, end - start, 0);
1342 truncate_inode_pages_range(mapping, start, end - 1);
1346 static int ocfs2_zero_partial_clusters(struct inode *inode,
1347 u64 start, u64 len)
1349 int ret = 0;
1350 u64 tmpend, end = start + len;
1351 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1352 unsigned int csize = osb->s_clustersize;
1353 handle_t *handle;
1356 * The "start" and "end" values are NOT necessarily part of
1357 * the range whose allocation is being deleted. Rather, this
1358 * is what the user passed in with the request. We must zero
1359 * partial clusters here. There's no need to worry about
1360 * physical allocation - the zeroing code knows to skip holes.
1362 mlog(0, "byte start: %llu, end: %llu\n",
1363 (unsigned long long)start, (unsigned long long)end);
1366 * If both edges are on a cluster boundary then there's no
1367 * zeroing required as the region is part of the allocation to
1368 * be truncated.
1370 if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1371 goto out;
1373 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1374 if (IS_ERR(handle)) {
1375 ret = PTR_ERR(handle);
1376 mlog_errno(ret);
1377 goto out;
1381 * We want to get the byte offset of the end of the 1st cluster.
1383 tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
1384 if (tmpend > end)
1385 tmpend = end;
1387 mlog(0, "1st range: start: %llu, tmpend: %llu\n",
1388 (unsigned long long)start, (unsigned long long)tmpend);
1390 ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
1391 if (ret)
1392 mlog_errno(ret);
1394 if (tmpend < end) {
1396 * This may make start and end equal, but the zeroing
1397 * code will skip any work in that case so there's no
1398 * need to catch it up here.
1400 start = end & ~(osb->s_clustersize - 1);
1402 mlog(0, "2nd range: start: %llu, end: %llu\n",
1403 (unsigned long long)start, (unsigned long long)end);
1405 ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1406 if (ret)
1407 mlog_errno(ret);
1410 ocfs2_commit_trans(osb, handle);
1411 out:
1412 return ret;
1415 static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
1417 int i;
1418 struct ocfs2_extent_rec *rec = NULL;
1420 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1422 rec = &el->l_recs[i];
1424 if (le32_to_cpu(rec->e_cpos) < pos)
1425 break;
1428 return i;
1432 * Helper to calculate the punching pos and length in one run, we handle the
1433 * following three cases in order:
1435 * - remove the entire record
1436 * - remove a partial record
1437 * - no record needs to be removed (hole-punching completed)
1439 static void ocfs2_calc_trunc_pos(struct inode *inode,
1440 struct ocfs2_extent_list *el,
1441 struct ocfs2_extent_rec *rec,
1442 u32 trunc_start, u32 *trunc_cpos,
1443 u32 *trunc_len, u32 *trunc_end,
1444 u64 *blkno, int *done)
1446 int ret = 0;
1447 u32 coff, range;
1449 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1451 if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1452 *trunc_cpos = le32_to_cpu(rec->e_cpos);
1454 * Skip holes if any.
1456 if (range < *trunc_end)
1457 *trunc_end = range;
1458 *trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
1459 *blkno = le64_to_cpu(rec->e_blkno);
1460 *trunc_end = le32_to_cpu(rec->e_cpos);
1461 } else if (range > trunc_start) {
1462 *trunc_cpos = trunc_start;
1463 *trunc_len = *trunc_end - trunc_start;
1464 coff = trunc_start - le32_to_cpu(rec->e_cpos);
1465 *blkno = le64_to_cpu(rec->e_blkno) +
1466 ocfs2_clusters_to_blocks(inode->i_sb, coff);
1467 *trunc_end = trunc_start;
1468 } else {
1470 * It may have two following possibilities:
1472 * - last record has been removed
1473 * - trunc_start was within a hole
1475 * both two cases mean the completion of hole punching.
1477 ret = 1;
1480 *done = ret;
1483 static int ocfs2_remove_inode_range(struct inode *inode,
1484 struct buffer_head *di_bh, u64 byte_start,
1485 u64 byte_len)
1487 int ret = 0, flags = 0, done = 0, i;
1488 u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
1489 u32 cluster_in_el;
1490 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1491 struct ocfs2_cached_dealloc_ctxt dealloc;
1492 struct address_space *mapping = inode->i_mapping;
1493 struct ocfs2_extent_tree et;
1494 struct ocfs2_path *path = NULL;
1495 struct ocfs2_extent_list *el = NULL;
1496 struct ocfs2_extent_rec *rec = NULL;
1497 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1498 u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
1500 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
1501 ocfs2_init_dealloc_ctxt(&dealloc);
1503 if (byte_len == 0)
1504 return 0;
1506 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1507 ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1508 byte_start + byte_len, 0);
1509 if (ret) {
1510 mlog_errno(ret);
1511 goto out;
1514 * There's no need to get fancy with the page cache
1515 * truncate of an inline-data inode. We're talking
1516 * about less than a page here, which will be cached
1517 * in the dinode buffer anyway.
1519 unmap_mapping_range(mapping, 0, 0, 0);
1520 truncate_inode_pages(mapping, 0);
1521 goto out;
1525 * For reflinks, we may need to CoW 2 clusters which might be
1526 * partially zero'd later, if hole's start and end offset were
1527 * within one cluster(means is not exactly aligned to clustersize).
1530 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) {
1532 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
1533 if (ret) {
1534 mlog_errno(ret);
1535 goto out;
1538 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
1539 if (ret) {
1540 mlog_errno(ret);
1541 goto out;
1545 trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1546 trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
1547 cluster_in_el = trunc_end;
1549 mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, cend: %u\n",
1550 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1551 (unsigned long long)byte_start,
1552 (unsigned long long)byte_len, trunc_start, trunc_end);
1554 ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1555 if (ret) {
1556 mlog_errno(ret);
1557 goto out;
1560 path = ocfs2_new_path_from_et(&et);
1561 if (!path) {
1562 ret = -ENOMEM;
1563 mlog_errno(ret);
1564 goto out;
1567 while (trunc_end > trunc_start) {
1569 ret = ocfs2_find_path(INODE_CACHE(inode), path,
1570 cluster_in_el);
1571 if (ret) {
1572 mlog_errno(ret);
1573 goto out;
1576 el = path_leaf_el(path);
1578 i = ocfs2_find_rec(el, trunc_end);
1580 * Need to go to previous extent block.
1582 if (i < 0) {
1583 if (path->p_tree_depth == 0)
1584 break;
1586 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
1587 path,
1588 &cluster_in_el);
1589 if (ret) {
1590 mlog_errno(ret);
1591 goto out;
1595 * We've reached the leftmost extent block,
1596 * it's safe to leave.
1598 if (cluster_in_el == 0)
1599 break;
1602 * The 'pos' searched for previous extent block is
1603 * always one cluster less than actual trunc_end.
1605 trunc_end = cluster_in_el + 1;
1607 ocfs2_reinit_path(path, 1);
1609 continue;
1611 } else
1612 rec = &el->l_recs[i];
1614 ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
1615 &trunc_len, &trunc_end, &blkno, &done);
1616 if (done)
1617 break;
1619 flags = rec->e_flags;
1620 phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
1622 ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
1623 phys_cpos, trunc_len, flags,
1624 &dealloc, refcount_loc);
1625 if (ret < 0) {
1626 mlog_errno(ret);
1627 goto out;
1630 cluster_in_el = trunc_end;
1632 ocfs2_reinit_path(path, 1);
1635 ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1637 out:
1638 ocfs2_schedule_truncate_log_flush(osb, 1);
1639 ocfs2_run_deallocs(osb, &dealloc);
1641 return ret;
1645 * Parts of this function taken from xfs_change_file_space()
1647 static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1648 loff_t f_pos, unsigned int cmd,
1649 struct ocfs2_space_resv *sr,
1650 int change_size)
1652 int ret;
1653 s64 llen;
1654 loff_t size;
1655 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1656 struct buffer_head *di_bh = NULL;
1657 handle_t *handle;
1658 unsigned long long max_off = inode->i_sb->s_maxbytes;
1660 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1661 return -EROFS;
1663 mutex_lock(&inode->i_mutex);
1666 * This prevents concurrent writes on other nodes
1668 ret = ocfs2_rw_lock(inode, 1);
1669 if (ret) {
1670 mlog_errno(ret);
1671 goto out;
1674 ret = ocfs2_inode_lock(inode, &di_bh, 1);
1675 if (ret) {
1676 mlog_errno(ret);
1677 goto out_rw_unlock;
1680 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1681 ret = -EPERM;
1682 goto out_inode_unlock;
1685 switch (sr->l_whence) {
1686 case 0: /*SEEK_SET*/
1687 break;
1688 case 1: /*SEEK_CUR*/
1689 sr->l_start += f_pos;
1690 break;
1691 case 2: /*SEEK_END*/
1692 sr->l_start += i_size_read(inode);
1693 break;
1694 default:
1695 ret = -EINVAL;
1696 goto out_inode_unlock;
1698 sr->l_whence = 0;
1700 llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1702 if (sr->l_start < 0
1703 || sr->l_start > max_off
1704 || (sr->l_start + llen) < 0
1705 || (sr->l_start + llen) > max_off) {
1706 ret = -EINVAL;
1707 goto out_inode_unlock;
1709 size = sr->l_start + sr->l_len;
1711 if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) {
1712 if (sr->l_len <= 0) {
1713 ret = -EINVAL;
1714 goto out_inode_unlock;
1718 if (file && should_remove_suid(file->f_path.dentry)) {
1719 ret = __ocfs2_write_remove_suid(inode, di_bh);
1720 if (ret) {
1721 mlog_errno(ret);
1722 goto out_inode_unlock;
1726 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1727 switch (cmd) {
1728 case OCFS2_IOC_RESVSP:
1729 case OCFS2_IOC_RESVSP64:
1731 * This takes unsigned offsets, but the signed ones we
1732 * pass have been checked against overflow above.
1734 ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
1735 sr->l_len);
1736 break;
1737 case OCFS2_IOC_UNRESVSP:
1738 case OCFS2_IOC_UNRESVSP64:
1739 ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
1740 sr->l_len);
1741 break;
1742 default:
1743 ret = -EINVAL;
1745 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1746 if (ret) {
1747 mlog_errno(ret);
1748 goto out_inode_unlock;
1752 * We update c/mtime for these changes
1754 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1755 if (IS_ERR(handle)) {
1756 ret = PTR_ERR(handle);
1757 mlog_errno(ret);
1758 goto out_inode_unlock;
1761 if (change_size && i_size_read(inode) < size)
1762 i_size_write(inode, size);
1764 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1765 ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
1766 if (ret < 0)
1767 mlog_errno(ret);
1769 ocfs2_commit_trans(osb, handle);
1771 out_inode_unlock:
1772 brelse(di_bh);
1773 ocfs2_inode_unlock(inode, 1);
1774 out_rw_unlock:
1775 ocfs2_rw_unlock(inode, 1);
1777 out:
1778 mutex_unlock(&inode->i_mutex);
1779 return ret;
1782 int ocfs2_change_file_space(struct file *file, unsigned int cmd,
1783 struct ocfs2_space_resv *sr)
1785 struct inode *inode = file->f_path.dentry->d_inode;
1786 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1788 if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
1789 !ocfs2_writes_unwritten_extents(osb))
1790 return -ENOTTY;
1791 else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
1792 !ocfs2_sparse_alloc(osb))
1793 return -ENOTTY;
1795 if (!S_ISREG(inode->i_mode))
1796 return -EINVAL;
1798 if (!(file->f_mode & FMODE_WRITE))
1799 return -EBADF;
1801 return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
1804 static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset,
1805 loff_t len)
1807 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1808 struct ocfs2_space_resv sr;
1809 int change_size = 1;
1811 if (!ocfs2_writes_unwritten_extents(osb))
1812 return -EOPNOTSUPP;
1814 if (S_ISDIR(inode->i_mode))
1815 return -ENODEV;
1817 if (mode & FALLOC_FL_KEEP_SIZE)
1818 change_size = 0;
1820 sr.l_whence = 0;
1821 sr.l_start = (s64)offset;
1822 sr.l_len = (s64)len;
1824 return __ocfs2_change_file_space(NULL, inode, offset,
1825 OCFS2_IOC_RESVSP64, &sr, change_size);
1828 int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
1829 size_t count)
1831 int ret = 0;
1832 unsigned int extent_flags;
1833 u32 cpos, clusters, extent_len, phys_cpos;
1834 struct super_block *sb = inode->i_sb;
1836 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
1837 !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) ||
1838 OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1839 return 0;
1841 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
1842 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
1844 while (clusters) {
1845 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
1846 &extent_flags);
1847 if (ret < 0) {
1848 mlog_errno(ret);
1849 goto out;
1852 if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
1853 ret = 1;
1854 break;
1857 if (extent_len > clusters)
1858 extent_len = clusters;
1860 clusters -= extent_len;
1861 cpos += extent_len;
1863 out:
1864 return ret;
1867 static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
1868 loff_t pos, size_t count,
1869 int *meta_level)
1871 int ret;
1872 struct buffer_head *di_bh = NULL;
1873 u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1874 u32 clusters =
1875 ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
1877 ret = ocfs2_inode_lock(inode, &di_bh, 1);
1878 if (ret) {
1879 mlog_errno(ret);
1880 goto out;
1883 *meta_level = 1;
1885 ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
1886 if (ret)
1887 mlog_errno(ret);
1888 out:
1889 brelse(di_bh);
1890 return ret;
1893 static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1894 loff_t *ppos,
1895 size_t count,
1896 int appending,
1897 int *direct_io,
1898 int *has_refcount)
1900 int ret = 0, meta_level = 0;
1901 struct inode *inode = dentry->d_inode;
1902 loff_t saved_pos, end;
1905 * We start with a read level meta lock and only jump to an ex
1906 * if we need to make modifications here.
1908 for(;;) {
1909 ret = ocfs2_inode_lock(inode, NULL, meta_level);
1910 if (ret < 0) {
1911 meta_level = -1;
1912 mlog_errno(ret);
1913 goto out;
1916 /* Clear suid / sgid if necessary. We do this here
1917 * instead of later in the write path because
1918 * remove_suid() calls ->setattr without any hint that
1919 * we may have already done our cluster locking. Since
1920 * ocfs2_setattr() *must* take cluster locks to
1921 * proceeed, this will lead us to recursively lock the
1922 * inode. There's also the dinode i_size state which
1923 * can be lost via setattr during extending writes (we
1924 * set inode->i_size at the end of a write. */
1925 if (should_remove_suid(dentry)) {
1926 if (meta_level == 0) {
1927 ocfs2_inode_unlock(inode, meta_level);
1928 meta_level = 1;
1929 continue;
1932 ret = ocfs2_write_remove_suid(inode);
1933 if (ret < 0) {
1934 mlog_errno(ret);
1935 goto out_unlock;
1939 /* work on a copy of ppos until we're sure that we won't have
1940 * to recalculate it due to relocking. */
1941 if (appending) {
1942 saved_pos = i_size_read(inode);
1943 mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos);
1944 } else {
1945 saved_pos = *ppos;
1948 end = saved_pos + count;
1950 ret = ocfs2_check_range_for_refcount(inode, saved_pos, count);
1951 if (ret == 1) {
1952 ocfs2_inode_unlock(inode, meta_level);
1953 meta_level = -1;
1955 ret = ocfs2_prepare_inode_for_refcount(inode,
1956 saved_pos,
1957 count,
1958 &meta_level);
1959 if (has_refcount)
1960 *has_refcount = 1;
1961 if (direct_io)
1962 *direct_io = 0;
1965 if (ret < 0) {
1966 mlog_errno(ret);
1967 goto out_unlock;
1971 * Skip the O_DIRECT checks if we don't need
1972 * them.
1974 if (!direct_io || !(*direct_io))
1975 break;
1978 * There's no sane way to do direct writes to an inode
1979 * with inline data.
1981 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1982 *direct_io = 0;
1983 break;
1987 * Allowing concurrent direct writes means
1988 * i_size changes wouldn't be synchronized, so
1989 * one node could wind up truncating another
1990 * nodes writes.
1992 if (end > i_size_read(inode)) {
1993 *direct_io = 0;
1994 break;
1998 * We don't fill holes during direct io, so
1999 * check for them here. If any are found, the
2000 * caller will have to retake some cluster
2001 * locks and initiate the io as buffered.
2003 ret = ocfs2_check_range_for_holes(inode, saved_pos, count);
2004 if (ret == 1) {
2005 *direct_io = 0;
2006 ret = 0;
2007 } else if (ret < 0)
2008 mlog_errno(ret);
2009 break;
2012 if (appending)
2013 *ppos = saved_pos;
2015 out_unlock:
2016 if (meta_level >= 0)
2017 ocfs2_inode_unlock(inode, meta_level);
2019 out:
2020 return ret;
2023 static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
2024 const struct iovec *iov,
2025 unsigned long nr_segs,
2026 loff_t pos)
2028 int ret, direct_io, appending, rw_level, have_alloc_sem = 0;
2029 int can_do_direct, has_refcount = 0;
2030 ssize_t written = 0;
2031 size_t ocount; /* original count */
2032 size_t count; /* after file limit checks */
2033 loff_t old_size, *ppos = &iocb->ki_pos;
2034 u32 old_clusters;
2035 struct file *file = iocb->ki_filp;
2036 struct inode *inode = file->f_path.dentry->d_inode;
2037 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2039 mlog_entry("(0x%p, %u, '%.*s')\n", file,
2040 (unsigned int)nr_segs,
2041 file->f_path.dentry->d_name.len,
2042 file->f_path.dentry->d_name.name);
2044 if (iocb->ki_left == 0)
2045 return 0;
2047 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2049 appending = file->f_flags & O_APPEND ? 1 : 0;
2050 direct_io = file->f_flags & O_DIRECT ? 1 : 0;
2052 mutex_lock(&inode->i_mutex);
2054 relock:
2055 /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
2056 if (direct_io) {
2057 down_read(&inode->i_alloc_sem);
2058 have_alloc_sem = 1;
2061 /* concurrent O_DIRECT writes are allowed */
2062 rw_level = !direct_io;
2063 ret = ocfs2_rw_lock(inode, rw_level);
2064 if (ret < 0) {
2065 mlog_errno(ret);
2066 goto out_sems;
2069 can_do_direct = direct_io;
2070 ret = ocfs2_prepare_inode_for_write(file->f_path.dentry, ppos,
2071 iocb->ki_left, appending,
2072 &can_do_direct, &has_refcount);
2073 if (ret < 0) {
2074 mlog_errno(ret);
2075 goto out;
2079 * We can't complete the direct I/O as requested, fall back to
2080 * buffered I/O.
2082 if (direct_io && !can_do_direct) {
2083 ocfs2_rw_unlock(inode, rw_level);
2084 up_read(&inode->i_alloc_sem);
2086 have_alloc_sem = 0;
2087 rw_level = -1;
2089 direct_io = 0;
2090 goto relock;
2094 * To later detect whether a journal commit for sync writes is
2095 * necessary, we sample i_size, and cluster count here.
2097 old_size = i_size_read(inode);
2098 old_clusters = OCFS2_I(inode)->ip_clusters;
2100 /* communicate with ocfs2_dio_end_io */
2101 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2103 ret = generic_segment_checks(iov, &nr_segs, &ocount,
2104 VERIFY_READ);
2105 if (ret)
2106 goto out_dio;
2108 count = ocount;
2109 ret = generic_write_checks(file, ppos, &count,
2110 S_ISBLK(inode->i_mode));
2111 if (ret)
2112 goto out_dio;
2114 if (direct_io) {
2115 written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
2116 ppos, count, ocount);
2117 if (written < 0) {
2119 * direct write may have instantiated a few
2120 * blocks outside i_size. Trim these off again.
2121 * Don't need i_size_read because we hold i_mutex.
2123 if (*ppos + count > inode->i_size)
2124 vmtruncate(inode, inode->i_size);
2125 ret = written;
2126 goto out_dio;
2128 } else {
2129 current->backing_dev_info = file->f_mapping->backing_dev_info;
2130 written = generic_file_buffered_write(iocb, iov, nr_segs, *ppos,
2131 ppos, count, 0);
2132 current->backing_dev_info = NULL;
2135 out_dio:
2136 /* buffered aio wouldn't have proper lock coverage today */
2137 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
2139 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
2140 ((file->f_flags & O_DIRECT) && has_refcount)) {
2141 ret = filemap_fdatawrite_range(file->f_mapping, pos,
2142 pos + count - 1);
2143 if (ret < 0)
2144 written = ret;
2146 if (!ret && ((old_size != i_size_read(inode)) ||
2147 (old_clusters != OCFS2_I(inode)->ip_clusters) ||
2148 has_refcount)) {
2149 ret = jbd2_journal_force_commit(osb->journal->j_journal);
2150 if (ret < 0)
2151 written = ret;
2154 if (!ret)
2155 ret = filemap_fdatawait_range(file->f_mapping, pos,
2156 pos + count - 1);
2160 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2161 * function pointer which is called when o_direct io completes so that
2162 * it can unlock our rw lock. (it's the clustered equivalent of
2163 * i_alloc_sem; protects truncate from racing with pending ios).
2164 * Unfortunately there are error cases which call end_io and others
2165 * that don't. so we don't have to unlock the rw_lock if either an
2166 * async dio is going to do it in the future or an end_io after an
2167 * error has already done it.
2169 if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2170 rw_level = -1;
2171 have_alloc_sem = 0;
2174 out:
2175 if (rw_level != -1)
2176 ocfs2_rw_unlock(inode, rw_level);
2178 out_sems:
2179 if (have_alloc_sem)
2180 up_read(&inode->i_alloc_sem);
2182 mutex_unlock(&inode->i_mutex);
2184 if (written)
2185 ret = written;
2186 mlog_exit(ret);
2187 return ret;
2190 static int ocfs2_splice_to_file(struct pipe_inode_info *pipe,
2191 struct file *out,
2192 struct splice_desc *sd)
2194 int ret;
2196 ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, &sd->pos,
2197 sd->total_len, 0, NULL, NULL);
2198 if (ret < 0) {
2199 mlog_errno(ret);
2200 return ret;
2203 return splice_from_pipe_feed(pipe, sd, pipe_to_file);
2206 static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
2207 struct file *out,
2208 loff_t *ppos,
2209 size_t len,
2210 unsigned int flags)
2212 int ret;
2213 struct address_space *mapping = out->f_mapping;
2214 struct inode *inode = mapping->host;
2215 struct splice_desc sd = {
2216 .total_len = len,
2217 .flags = flags,
2218 .pos = *ppos,
2219 .u.file = out,
2222 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe,
2223 (unsigned int)len,
2224 out->f_path.dentry->d_name.len,
2225 out->f_path.dentry->d_name.name);
2227 if (pipe->inode)
2228 mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_PARENT);
2230 splice_from_pipe_begin(&sd);
2231 do {
2232 ret = splice_from_pipe_next(pipe, &sd);
2233 if (ret <= 0)
2234 break;
2236 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
2237 ret = ocfs2_rw_lock(inode, 1);
2238 if (ret < 0)
2239 mlog_errno(ret);
2240 else {
2241 ret = ocfs2_splice_to_file(pipe, out, &sd);
2242 ocfs2_rw_unlock(inode, 1);
2244 mutex_unlock(&inode->i_mutex);
2245 } while (ret > 0);
2246 splice_from_pipe_end(pipe, &sd);
2248 if (pipe->inode)
2249 mutex_unlock(&pipe->inode->i_mutex);
2251 if (sd.num_spliced)
2252 ret = sd.num_spliced;
2254 if (ret > 0) {
2255 unsigned long nr_pages;
2256 int err;
2258 nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
2260 err = generic_write_sync(out, *ppos, ret);
2261 if (err)
2262 ret = err;
2263 else
2264 *ppos += ret;
2266 balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
2269 mlog_exit(ret);
2270 return ret;
2273 static ssize_t ocfs2_file_splice_read(struct file *in,
2274 loff_t *ppos,
2275 struct pipe_inode_info *pipe,
2276 size_t len,
2277 unsigned int flags)
2279 int ret = 0, lock_level = 0;
2280 struct inode *inode = in->f_path.dentry->d_inode;
2282 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe,
2283 (unsigned int)len,
2284 in->f_path.dentry->d_name.len,
2285 in->f_path.dentry->d_name.name);
2288 * See the comment in ocfs2_file_aio_read()
2290 ret = ocfs2_inode_lock_atime(inode, in->f_vfsmnt, &lock_level);
2291 if (ret < 0) {
2292 mlog_errno(ret);
2293 goto bail;
2295 ocfs2_inode_unlock(inode, lock_level);
2297 ret = generic_file_splice_read(in, ppos, pipe, len, flags);
2299 bail:
2300 mlog_exit(ret);
2301 return ret;
2304 static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2305 const struct iovec *iov,
2306 unsigned long nr_segs,
2307 loff_t pos)
2309 int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
2310 struct file *filp = iocb->ki_filp;
2311 struct inode *inode = filp->f_path.dentry->d_inode;
2313 mlog_entry("(0x%p, %u, '%.*s')\n", filp,
2314 (unsigned int)nr_segs,
2315 filp->f_path.dentry->d_name.len,
2316 filp->f_path.dentry->d_name.name);
2318 if (!inode) {
2319 ret = -EINVAL;
2320 mlog_errno(ret);
2321 goto bail;
2325 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2326 * need locks to protect pending reads from racing with truncate.
2328 if (filp->f_flags & O_DIRECT) {
2329 down_read(&inode->i_alloc_sem);
2330 have_alloc_sem = 1;
2332 ret = ocfs2_rw_lock(inode, 0);
2333 if (ret < 0) {
2334 mlog_errno(ret);
2335 goto bail;
2337 rw_level = 0;
2338 /* communicate with ocfs2_dio_end_io */
2339 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2343 * We're fine letting folks race truncates and extending
2344 * writes with read across the cluster, just like they can
2345 * locally. Hence no rw_lock during read.
2347 * Take and drop the meta data lock to update inode fields
2348 * like i_size. This allows the checks down below
2349 * generic_file_aio_read() a chance of actually working.
2351 ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
2352 if (ret < 0) {
2353 mlog_errno(ret);
2354 goto bail;
2356 ocfs2_inode_unlock(inode, lock_level);
2358 ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
2359 if (ret == -EINVAL)
2360 mlog(0, "generic_file_aio_read returned -EINVAL\n");
2362 /* buffered aio wouldn't have proper lock coverage today */
2363 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
2365 /* see ocfs2_file_aio_write */
2366 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2367 rw_level = -1;
2368 have_alloc_sem = 0;
2371 bail:
2372 if (have_alloc_sem)
2373 up_read(&inode->i_alloc_sem);
2374 if (rw_level != -1)
2375 ocfs2_rw_unlock(inode, rw_level);
2376 mlog_exit(ret);
2378 return ret;
2381 const struct inode_operations ocfs2_file_iops = {
2382 .setattr = ocfs2_setattr,
2383 .getattr = ocfs2_getattr,
2384 .permission = ocfs2_permission,
2385 .setxattr = generic_setxattr,
2386 .getxattr = generic_getxattr,
2387 .listxattr = ocfs2_listxattr,
2388 .removexattr = generic_removexattr,
2389 .fallocate = ocfs2_fallocate,
2390 .fiemap = ocfs2_fiemap,
2393 const struct inode_operations ocfs2_special_file_iops = {
2394 .setattr = ocfs2_setattr,
2395 .getattr = ocfs2_getattr,
2396 .permission = ocfs2_permission,
2400 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2401 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2403 const struct file_operations ocfs2_fops = {
2404 .llseek = generic_file_llseek,
2405 .read = do_sync_read,
2406 .write = do_sync_write,
2407 .mmap = ocfs2_mmap,
2408 .fsync = ocfs2_sync_file,
2409 .release = ocfs2_file_release,
2410 .open = ocfs2_file_open,
2411 .aio_read = ocfs2_file_aio_read,
2412 .aio_write = ocfs2_file_aio_write,
2413 .unlocked_ioctl = ocfs2_ioctl,
2414 #ifdef CONFIG_COMPAT
2415 .compat_ioctl = ocfs2_compat_ioctl,
2416 #endif
2417 .lock = ocfs2_lock,
2418 .flock = ocfs2_flock,
2419 .splice_read = ocfs2_file_splice_read,
2420 .splice_write = ocfs2_file_splice_write,
2423 const struct file_operations ocfs2_dops = {
2424 .llseek = generic_file_llseek,
2425 .read = generic_read_dir,
2426 .readdir = ocfs2_readdir,
2427 .fsync = ocfs2_sync_file,
2428 .release = ocfs2_dir_release,
2429 .open = ocfs2_dir_open,
2430 .unlocked_ioctl = ocfs2_ioctl,
2431 #ifdef CONFIG_COMPAT
2432 .compat_ioctl = ocfs2_compat_ioctl,
2433 #endif
2434 .lock = ocfs2_lock,
2435 .flock = ocfs2_flock,
2439 * POSIX-lockless variants of our file_operations.
2441 * These will be used if the underlying cluster stack does not support
2442 * posix file locking, if the user passes the "localflocks" mount
2443 * option, or if we have a local-only fs.
2445 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2446 * so we still want it in the case of no stack support for
2447 * plocks. Internally, it will do the right thing when asked to ignore
2448 * the cluster.
2450 const struct file_operations ocfs2_fops_no_plocks = {
2451 .llseek = generic_file_llseek,
2452 .read = do_sync_read,
2453 .write = do_sync_write,
2454 .mmap = ocfs2_mmap,
2455 .fsync = ocfs2_sync_file,
2456 .release = ocfs2_file_release,
2457 .open = ocfs2_file_open,
2458 .aio_read = ocfs2_file_aio_read,
2459 .aio_write = ocfs2_file_aio_write,
2460 .unlocked_ioctl = ocfs2_ioctl,
2461 #ifdef CONFIG_COMPAT
2462 .compat_ioctl = ocfs2_compat_ioctl,
2463 #endif
2464 .flock = ocfs2_flock,
2465 .splice_read = ocfs2_file_splice_read,
2466 .splice_write = ocfs2_file_splice_write,
2469 const struct file_operations ocfs2_dops_no_plocks = {
2470 .llseek = generic_file_llseek,
2471 .read = generic_read_dir,
2472 .readdir = ocfs2_readdir,
2473 .fsync = ocfs2_sync_file,
2474 .release = ocfs2_dir_release,
2475 .open = ocfs2_dir_open,
2476 .unlocked_ioctl = ocfs2_ioctl,
2477 #ifdef CONFIG_COMPAT
2478 .compat_ioctl = ocfs2_compat_ioctl,
2479 #endif
2480 .flock = ocfs2_flock,