drm/radeon/kms: fix support for DDC on dp bridges
[linux-2.6.git] / fs / gfs2 / file.c
bloba9f5cbe45cd976de463c094cc49f721b849c5c24
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
17 #include <linux/mm.h>
18 #include <linux/mount.h>
19 #include <linux/fs.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/ext2_fs.h>
22 #include <linux/falloc.h>
23 #include <linux/swap.h>
24 #include <linux/crc32.h>
25 #include <linux/writeback.h>
26 #include <asm/uaccess.h>
27 #include <linux/dlm.h>
28 #include <linux/dlm_plock.h>
30 #include "gfs2.h"
31 #include "incore.h"
32 #include "bmap.h"
33 #include "dir.h"
34 #include "glock.h"
35 #include "glops.h"
36 #include "inode.h"
37 #include "log.h"
38 #include "meta_io.h"
39 #include "quota.h"
40 #include "rgrp.h"
41 #include "trans.h"
42 #include "util.h"
44 /**
45 * gfs2_llseek - seek to a location in a file
46 * @file: the file
47 * @offset: the offset
48 * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
50 * SEEK_END requires the glock for the file because it references the
51 * file's size.
53 * Returns: The new offset, or errno
56 static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
58 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
59 struct gfs2_holder i_gh;
60 loff_t error;
62 if (origin == 2) {
63 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
64 &i_gh);
65 if (!error) {
66 error = generic_file_llseek_unlocked(file, offset, origin);
67 gfs2_glock_dq_uninit(&i_gh);
69 } else
70 error = generic_file_llseek_unlocked(file, offset, origin);
72 return error;
75 /**
76 * gfs2_readdir - Read directory entries from a directory
77 * @file: The directory to read from
78 * @dirent: Buffer for dirents
79 * @filldir: Function used to do the copying
81 * Returns: errno
84 static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
86 struct inode *dir = file->f_mapping->host;
87 struct gfs2_inode *dip = GFS2_I(dir);
88 struct gfs2_holder d_gh;
89 u64 offset = file->f_pos;
90 int error;
92 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
93 error = gfs2_glock_nq(&d_gh);
94 if (error) {
95 gfs2_holder_uninit(&d_gh);
96 return error;
99 error = gfs2_dir_read(dir, &offset, dirent, filldir);
101 gfs2_glock_dq_uninit(&d_gh);
103 file->f_pos = offset;
105 return error;
109 * fsflags_cvt
110 * @table: A table of 32 u32 flags
111 * @val: a 32 bit value to convert
113 * This function can be used to convert between fsflags values and
114 * GFS2's own flags values.
116 * Returns: the converted flags
118 static u32 fsflags_cvt(const u32 *table, u32 val)
120 u32 res = 0;
121 while(val) {
122 if (val & 1)
123 res |= *table;
124 table++;
125 val >>= 1;
127 return res;
130 static const u32 fsflags_to_gfs2[32] = {
131 [3] = GFS2_DIF_SYNC,
132 [4] = GFS2_DIF_IMMUTABLE,
133 [5] = GFS2_DIF_APPENDONLY,
134 [7] = GFS2_DIF_NOATIME,
135 [12] = GFS2_DIF_EXHASH,
136 [14] = GFS2_DIF_INHERIT_JDATA,
139 static const u32 gfs2_to_fsflags[32] = {
140 [gfs2fl_Sync] = FS_SYNC_FL,
141 [gfs2fl_Immutable] = FS_IMMUTABLE_FL,
142 [gfs2fl_AppendOnly] = FS_APPEND_FL,
143 [gfs2fl_NoAtime] = FS_NOATIME_FL,
144 [gfs2fl_ExHash] = FS_INDEX_FL,
145 [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
148 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
150 struct inode *inode = filp->f_path.dentry->d_inode;
151 struct gfs2_inode *ip = GFS2_I(inode);
152 struct gfs2_holder gh;
153 int error;
154 u32 fsflags;
156 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
157 error = gfs2_glock_nq(&gh);
158 if (error)
159 return error;
161 fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
162 if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
163 fsflags |= FS_JOURNAL_DATA_FL;
164 if (put_user(fsflags, ptr))
165 error = -EFAULT;
167 gfs2_glock_dq(&gh);
168 gfs2_holder_uninit(&gh);
169 return error;
172 void gfs2_set_inode_flags(struct inode *inode)
174 struct gfs2_inode *ip = GFS2_I(inode);
175 unsigned int flags = inode->i_flags;
177 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
178 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
179 flags |= S_IMMUTABLE;
180 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
181 flags |= S_APPEND;
182 if (ip->i_diskflags & GFS2_DIF_NOATIME)
183 flags |= S_NOATIME;
184 if (ip->i_diskflags & GFS2_DIF_SYNC)
185 flags |= S_SYNC;
186 inode->i_flags = flags;
189 /* Flags that can be set by user space */
190 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
191 GFS2_DIF_IMMUTABLE| \
192 GFS2_DIF_APPENDONLY| \
193 GFS2_DIF_NOATIME| \
194 GFS2_DIF_SYNC| \
195 GFS2_DIF_SYSTEM| \
196 GFS2_DIF_INHERIT_JDATA)
199 * gfs2_set_flags - set flags on an inode
200 * @inode: The inode
201 * @flags: The flags to set
202 * @mask: Indicates which flags are valid
205 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
207 struct inode *inode = filp->f_path.dentry->d_inode;
208 struct gfs2_inode *ip = GFS2_I(inode);
209 struct gfs2_sbd *sdp = GFS2_SB(inode);
210 struct buffer_head *bh;
211 struct gfs2_holder gh;
212 int error;
213 u32 new_flags, flags;
215 error = mnt_want_write(filp->f_path.mnt);
216 if (error)
217 return error;
219 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
220 if (error)
221 goto out_drop_write;
223 error = -EACCES;
224 if (!inode_owner_or_capable(inode))
225 goto out;
227 error = 0;
228 flags = ip->i_diskflags;
229 new_flags = (flags & ~mask) | (reqflags & mask);
230 if ((new_flags ^ flags) == 0)
231 goto out;
233 error = -EINVAL;
234 if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
235 goto out;
237 error = -EPERM;
238 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
239 goto out;
240 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
241 goto out;
242 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
243 !capable(CAP_LINUX_IMMUTABLE))
244 goto out;
245 if (!IS_IMMUTABLE(inode)) {
246 error = gfs2_permission(inode, MAY_WRITE, 0);
247 if (error)
248 goto out;
250 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
251 if (flags & GFS2_DIF_JDATA)
252 gfs2_log_flush(sdp, ip->i_gl);
253 error = filemap_fdatawrite(inode->i_mapping);
254 if (error)
255 goto out;
256 error = filemap_fdatawait(inode->i_mapping);
257 if (error)
258 goto out;
260 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
261 if (error)
262 goto out;
263 error = gfs2_meta_inode_buffer(ip, &bh);
264 if (error)
265 goto out_trans_end;
266 gfs2_trans_add_bh(ip->i_gl, bh, 1);
267 ip->i_diskflags = new_flags;
268 gfs2_dinode_out(ip, bh->b_data);
269 brelse(bh);
270 gfs2_set_inode_flags(inode);
271 gfs2_set_aops(inode);
272 out_trans_end:
273 gfs2_trans_end(sdp);
274 out:
275 gfs2_glock_dq_uninit(&gh);
276 out_drop_write:
277 mnt_drop_write(filp->f_path.mnt);
278 return error;
281 static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
283 struct inode *inode = filp->f_path.dentry->d_inode;
284 u32 fsflags, gfsflags;
286 if (get_user(fsflags, ptr))
287 return -EFAULT;
289 gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
290 if (!S_ISDIR(inode->i_mode)) {
291 if (gfsflags & GFS2_DIF_INHERIT_JDATA)
292 gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
293 return do_gfs2_set_flags(filp, gfsflags, ~0);
295 return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
298 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
300 switch(cmd) {
301 case FS_IOC_GETFLAGS:
302 return gfs2_get_flags(filp, (u32 __user *)arg);
303 case FS_IOC_SETFLAGS:
304 return gfs2_set_flags(filp, (u32 __user *)arg);
306 return -ENOTTY;
310 * gfs2_allocate_page_backing - Use bmap to allocate blocks
311 * @page: The (locked) page to allocate backing for
313 * We try to allocate all the blocks required for the page in
314 * one go. This might fail for various reasons, so we keep
315 * trying until all the blocks to back this page are allocated.
316 * If some of the blocks are already allocated, thats ok too.
319 static int gfs2_allocate_page_backing(struct page *page)
321 struct inode *inode = page->mapping->host;
322 struct buffer_head bh;
323 unsigned long size = PAGE_CACHE_SIZE;
324 u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
326 do {
327 bh.b_state = 0;
328 bh.b_size = size;
329 gfs2_block_map(inode, lblock, &bh, 1);
330 if (!buffer_mapped(&bh))
331 return -EIO;
332 size -= bh.b_size;
333 lblock += (bh.b_size >> inode->i_blkbits);
334 } while(size > 0);
335 return 0;
339 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
340 * @vma: The virtual memory area
341 * @page: The page which is about to become writable
343 * When the page becomes writable, we need to ensure that we have
344 * blocks allocated on disk to back that page.
347 static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
349 struct page *page = vmf->page;
350 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
351 struct gfs2_inode *ip = GFS2_I(inode);
352 struct gfs2_sbd *sdp = GFS2_SB(inode);
353 unsigned long last_index;
354 u64 pos = page->index << PAGE_CACHE_SHIFT;
355 unsigned int data_blocks, ind_blocks, rblocks;
356 struct gfs2_holder gh;
357 struct gfs2_alloc *al;
358 int ret;
360 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
361 ret = gfs2_glock_nq(&gh);
362 if (ret)
363 goto out;
365 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
366 set_bit(GIF_SW_PAGED, &ip->i_flags);
368 if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE))
369 goto out_unlock;
370 ret = -ENOMEM;
371 al = gfs2_alloc_get(ip);
372 if (al == NULL)
373 goto out_unlock;
375 ret = gfs2_quota_lock_check(ip);
376 if (ret)
377 goto out_alloc_put;
378 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
379 al->al_requested = data_blocks + ind_blocks;
380 ret = gfs2_inplace_reserve(ip);
381 if (ret)
382 goto out_quota_unlock;
384 rblocks = RES_DINODE + ind_blocks;
385 if (gfs2_is_jdata(ip))
386 rblocks += data_blocks ? data_blocks : 1;
387 if (ind_blocks || data_blocks) {
388 rblocks += RES_STATFS + RES_QUOTA;
389 rblocks += gfs2_rg_blocks(al);
391 ret = gfs2_trans_begin(sdp, rblocks, 0);
392 if (ret)
393 goto out_trans_fail;
395 lock_page(page);
396 ret = -EINVAL;
397 last_index = ip->i_inode.i_size >> PAGE_CACHE_SHIFT;
398 if (page->index > last_index)
399 goto out_unlock_page;
400 ret = 0;
401 if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping)
402 goto out_unlock_page;
403 if (gfs2_is_stuffed(ip)) {
404 ret = gfs2_unstuff_dinode(ip, page);
405 if (ret)
406 goto out_unlock_page;
408 ret = gfs2_allocate_page_backing(page);
410 out_unlock_page:
411 unlock_page(page);
412 gfs2_trans_end(sdp);
413 out_trans_fail:
414 gfs2_inplace_release(ip);
415 out_quota_unlock:
416 gfs2_quota_unlock(ip);
417 out_alloc_put:
418 gfs2_alloc_put(ip);
419 out_unlock:
420 gfs2_glock_dq(&gh);
421 out:
422 gfs2_holder_uninit(&gh);
423 if (ret == -ENOMEM)
424 ret = VM_FAULT_OOM;
425 else if (ret)
426 ret = VM_FAULT_SIGBUS;
427 return ret;
430 static const struct vm_operations_struct gfs2_vm_ops = {
431 .fault = filemap_fault,
432 .page_mkwrite = gfs2_page_mkwrite,
436 * gfs2_mmap -
437 * @file: The file to map
438 * @vma: The VMA which described the mapping
440 * There is no need to get a lock here unless we should be updating
441 * atime. We ignore any locking errors since the only consequence is
442 * a missed atime update (which will just be deferred until later).
444 * Returns: 0
447 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
449 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
451 if (!(file->f_flags & O_NOATIME) &&
452 !IS_NOATIME(&ip->i_inode)) {
453 struct gfs2_holder i_gh;
454 int error;
456 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
457 error = gfs2_glock_nq(&i_gh);
458 if (error == 0) {
459 file_accessed(file);
460 gfs2_glock_dq(&i_gh);
462 gfs2_holder_uninit(&i_gh);
463 if (error)
464 return error;
466 vma->vm_ops = &gfs2_vm_ops;
467 vma->vm_flags |= VM_CAN_NONLINEAR;
469 return 0;
473 * gfs2_open - open a file
474 * @inode: the inode to open
475 * @file: the struct file for this opening
477 * Returns: errno
480 static int gfs2_open(struct inode *inode, struct file *file)
482 struct gfs2_inode *ip = GFS2_I(inode);
483 struct gfs2_holder i_gh;
484 struct gfs2_file *fp;
485 int error;
487 fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
488 if (!fp)
489 return -ENOMEM;
491 mutex_init(&fp->f_fl_mutex);
493 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
494 file->private_data = fp;
496 if (S_ISREG(ip->i_inode.i_mode)) {
497 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
498 &i_gh);
499 if (error)
500 goto fail;
502 if (!(file->f_flags & O_LARGEFILE) &&
503 i_size_read(inode) > MAX_NON_LFS) {
504 error = -EOVERFLOW;
505 goto fail_gunlock;
508 gfs2_glock_dq_uninit(&i_gh);
511 return 0;
513 fail_gunlock:
514 gfs2_glock_dq_uninit(&i_gh);
515 fail:
516 file->private_data = NULL;
517 kfree(fp);
518 return error;
522 * gfs2_close - called to close a struct file
523 * @inode: the inode the struct file belongs to
524 * @file: the struct file being closed
526 * Returns: errno
529 static int gfs2_close(struct inode *inode, struct file *file)
531 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
532 struct gfs2_file *fp;
534 fp = file->private_data;
535 file->private_data = NULL;
537 if (gfs2_assert_warn(sdp, fp))
538 return -EIO;
540 kfree(fp);
542 return 0;
546 * gfs2_fsync - sync the dirty data for a file (across the cluster)
547 * @file: the file that points to the dentry (we ignore this)
548 * @datasync: set if we can ignore timestamp changes
550 * The VFS will flush data for us. We only need to worry
551 * about metadata here.
553 * Returns: errno
556 static int gfs2_fsync(struct file *file, int datasync)
558 struct inode *inode = file->f_mapping->host;
559 int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
560 struct gfs2_inode *ip = GFS2_I(inode);
561 int ret;
563 if (datasync)
564 sync_state &= ~I_DIRTY_SYNC;
566 if (sync_state) {
567 ret = sync_inode_metadata(inode, 1);
568 if (ret)
569 return ret;
570 gfs2_ail_flush(ip->i_gl);
573 return 0;
577 * gfs2_file_aio_write - Perform a write to a file
578 * @iocb: The io context
579 * @iov: The data to write
580 * @nr_segs: Number of @iov segments
581 * @pos: The file position
583 * We have to do a lock/unlock here to refresh the inode size for
584 * O_APPEND writes, otherwise we can land up writing at the wrong
585 * offset. There is still a race, but provided the app is using its
586 * own file locking, this will make O_APPEND work as expected.
590 static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
591 unsigned long nr_segs, loff_t pos)
593 struct file *file = iocb->ki_filp;
595 if (file->f_flags & O_APPEND) {
596 struct dentry *dentry = file->f_dentry;
597 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
598 struct gfs2_holder gh;
599 int ret;
601 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
602 if (ret)
603 return ret;
604 gfs2_glock_dq_uninit(&gh);
607 return generic_file_aio_write(iocb, iov, nr_segs, pos);
610 static int empty_write_end(struct page *page, unsigned from,
611 unsigned to, int mode)
613 struct inode *inode = page->mapping->host;
614 struct gfs2_inode *ip = GFS2_I(inode);
615 struct buffer_head *bh;
616 unsigned offset, blksize = 1 << inode->i_blkbits;
617 pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
619 zero_user(page, from, to-from);
620 mark_page_accessed(page);
622 if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) {
623 if (!gfs2_is_writeback(ip))
624 gfs2_page_add_databufs(ip, page, from, to);
626 block_commit_write(page, from, to);
627 return 0;
630 offset = 0;
631 bh = page_buffers(page);
632 while (offset < to) {
633 if (offset >= from) {
634 set_buffer_uptodate(bh);
635 mark_buffer_dirty(bh);
636 clear_buffer_new(bh);
637 write_dirty_buffer(bh, WRITE);
639 offset += blksize;
640 bh = bh->b_this_page;
643 offset = 0;
644 bh = page_buffers(page);
645 while (offset < to) {
646 if (offset >= from) {
647 wait_on_buffer(bh);
648 if (!buffer_uptodate(bh))
649 return -EIO;
651 offset += blksize;
652 bh = bh->b_this_page;
654 return 0;
657 static int needs_empty_write(sector_t block, struct inode *inode)
659 int error;
660 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
662 bh_map.b_size = 1 << inode->i_blkbits;
663 error = gfs2_block_map(inode, block, &bh_map, 0);
664 if (unlikely(error))
665 return error;
666 return !buffer_mapped(&bh_map);
669 static int write_empty_blocks(struct page *page, unsigned from, unsigned to,
670 int mode)
672 struct inode *inode = page->mapping->host;
673 unsigned start, end, next, blksize;
674 sector_t block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
675 int ret;
677 blksize = 1 << inode->i_blkbits;
678 next = end = 0;
679 while (next < from) {
680 next += blksize;
681 block++;
683 start = next;
684 do {
685 next += blksize;
686 ret = needs_empty_write(block, inode);
687 if (unlikely(ret < 0))
688 return ret;
689 if (ret == 0) {
690 if (end) {
691 ret = __block_write_begin(page, start, end - start,
692 gfs2_block_map);
693 if (unlikely(ret))
694 return ret;
695 ret = empty_write_end(page, start, end, mode);
696 if (unlikely(ret))
697 return ret;
698 end = 0;
700 start = next;
702 else
703 end = next;
704 block++;
705 } while (next < to);
707 if (end) {
708 ret = __block_write_begin(page, start, end - start, gfs2_block_map);
709 if (unlikely(ret))
710 return ret;
711 ret = empty_write_end(page, start, end, mode);
712 if (unlikely(ret))
713 return ret;
716 return 0;
719 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
720 int mode)
722 struct gfs2_inode *ip = GFS2_I(inode);
723 struct buffer_head *dibh;
724 int error;
725 u64 start = offset >> PAGE_CACHE_SHIFT;
726 unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
727 u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
728 pgoff_t curr;
729 struct page *page;
730 unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
731 unsigned int from, to;
733 if (!end_offset)
734 end_offset = PAGE_CACHE_SIZE;
736 error = gfs2_meta_inode_buffer(ip, &dibh);
737 if (unlikely(error))
738 goto out;
740 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
742 if (gfs2_is_stuffed(ip)) {
743 error = gfs2_unstuff_dinode(ip, NULL);
744 if (unlikely(error))
745 goto out;
748 curr = start;
749 offset = start << PAGE_CACHE_SHIFT;
750 from = start_offset;
751 to = PAGE_CACHE_SIZE;
752 while (curr <= end) {
753 page = grab_cache_page_write_begin(inode->i_mapping, curr,
754 AOP_FLAG_NOFS);
755 if (unlikely(!page)) {
756 error = -ENOMEM;
757 goto out;
760 if (curr == end)
761 to = end_offset;
762 error = write_empty_blocks(page, from, to, mode);
763 if (!error && offset + to > inode->i_size &&
764 !(mode & FALLOC_FL_KEEP_SIZE)) {
765 i_size_write(inode, offset + to);
767 unlock_page(page);
768 page_cache_release(page);
769 if (error)
770 goto out;
771 curr++;
772 offset += PAGE_CACHE_SIZE;
773 from = 0;
776 gfs2_dinode_out(ip, dibh->b_data);
777 mark_inode_dirty(inode);
779 brelse(dibh);
781 out:
782 return error;
785 static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
786 unsigned int *data_blocks, unsigned int *ind_blocks)
788 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
789 unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone;
790 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
792 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
793 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
794 max_data -= tmp;
796 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
797 so it might end up with fewer data blocks */
798 if (max_data <= *data_blocks)
799 return;
800 *data_blocks = max_data;
801 *ind_blocks = max_blocks - max_data;
802 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
803 if (*len > max) {
804 *len = max;
805 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
809 static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
810 loff_t len)
812 struct inode *inode = file->f_path.dentry->d_inode;
813 struct gfs2_sbd *sdp = GFS2_SB(inode);
814 struct gfs2_inode *ip = GFS2_I(inode);
815 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
816 loff_t bytes, max_bytes;
817 struct gfs2_alloc *al;
818 int error;
819 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
820 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
821 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
823 /* We only support the FALLOC_FL_KEEP_SIZE mode */
824 if (mode & ~FALLOC_FL_KEEP_SIZE)
825 return -EOPNOTSUPP;
827 offset &= bsize_mask;
829 len = next - offset;
830 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
831 if (!bytes)
832 bytes = UINT_MAX;
833 bytes &= bsize_mask;
834 if (bytes == 0)
835 bytes = sdp->sd_sb.sb_bsize;
837 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
838 error = gfs2_glock_nq(&ip->i_gh);
839 if (unlikely(error))
840 goto out_uninit;
842 if (!gfs2_write_alloc_required(ip, offset, len))
843 goto out_unlock;
845 while (len > 0) {
846 if (len < bytes)
847 bytes = len;
848 al = gfs2_alloc_get(ip);
849 if (!al) {
850 error = -ENOMEM;
851 goto out_unlock;
854 error = gfs2_quota_lock_check(ip);
855 if (error)
856 goto out_alloc_put;
858 retry:
859 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
861 al->al_requested = data_blocks + ind_blocks;
862 error = gfs2_inplace_reserve(ip);
863 if (error) {
864 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
865 bytes >>= 1;
866 bytes &= bsize_mask;
867 if (bytes == 0)
868 bytes = sdp->sd_sb.sb_bsize;
869 goto retry;
871 goto out_qunlock;
873 max_bytes = bytes;
874 calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
875 al->al_requested = data_blocks + ind_blocks;
877 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
878 RES_RG_HDR + gfs2_rg_blocks(al);
879 if (gfs2_is_jdata(ip))
880 rblocks += data_blocks ? data_blocks : 1;
882 error = gfs2_trans_begin(sdp, rblocks,
883 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
884 if (error)
885 goto out_trans_fail;
887 error = fallocate_chunk(inode, offset, max_bytes, mode);
888 gfs2_trans_end(sdp);
890 if (error)
891 goto out_trans_fail;
893 len -= max_bytes;
894 offset += max_bytes;
895 gfs2_inplace_release(ip);
896 gfs2_quota_unlock(ip);
897 gfs2_alloc_put(ip);
899 goto out_unlock;
901 out_trans_fail:
902 gfs2_inplace_release(ip);
903 out_qunlock:
904 gfs2_quota_unlock(ip);
905 out_alloc_put:
906 gfs2_alloc_put(ip);
907 out_unlock:
908 gfs2_glock_dq(&ip->i_gh);
909 out_uninit:
910 gfs2_holder_uninit(&ip->i_gh);
911 return error;
914 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
917 * gfs2_setlease - acquire/release a file lease
918 * @file: the file pointer
919 * @arg: lease type
920 * @fl: file lock
922 * We don't currently have a way to enforce a lease across the whole
923 * cluster; until we do, disable leases (by just returning -EINVAL),
924 * unless the administrator has requested purely local locking.
926 * Locking: called under lock_flocks
928 * Returns: errno
931 static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
933 return -EINVAL;
937 * gfs2_lock - acquire/release a posix lock on a file
938 * @file: the file pointer
939 * @cmd: either modify or retrieve lock state, possibly wait
940 * @fl: type and range of lock
942 * Returns: errno
945 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
947 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
948 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
949 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
951 if (!(fl->fl_flags & FL_POSIX))
952 return -ENOLCK;
953 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
954 return -ENOLCK;
956 if (cmd == F_CANCELLK) {
957 /* Hack: */
958 cmd = F_SETLK;
959 fl->fl_type = F_UNLCK;
961 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
962 return -EIO;
963 if (IS_GETLK(cmd))
964 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
965 else if (fl->fl_type == F_UNLCK)
966 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
967 else
968 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
971 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
973 struct gfs2_file *fp = file->private_data;
974 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
975 struct gfs2_inode *ip = GFS2_I(file->f_path.dentry->d_inode);
976 struct gfs2_glock *gl;
977 unsigned int state;
978 int flags;
979 int error = 0;
981 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
982 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
984 mutex_lock(&fp->f_fl_mutex);
986 gl = fl_gh->gh_gl;
987 if (gl) {
988 if (fl_gh->gh_state == state)
989 goto out;
990 flock_lock_file_wait(file,
991 &(struct file_lock){.fl_type = F_UNLCK});
992 gfs2_glock_dq_wait(fl_gh);
993 gfs2_holder_reinit(state, flags, fl_gh);
994 } else {
995 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
996 &gfs2_flock_glops, CREATE, &gl);
997 if (error)
998 goto out;
999 gfs2_holder_init(gl, state, flags, fl_gh);
1000 gfs2_glock_put(gl);
1002 error = gfs2_glock_nq(fl_gh);
1003 if (error) {
1004 gfs2_holder_uninit(fl_gh);
1005 if (error == GLR_TRYFAILED)
1006 error = -EAGAIN;
1007 } else {
1008 error = flock_lock_file_wait(file, fl);
1009 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1012 out:
1013 mutex_unlock(&fp->f_fl_mutex);
1014 return error;
1017 static void do_unflock(struct file *file, struct file_lock *fl)
1019 struct gfs2_file *fp = file->private_data;
1020 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1022 mutex_lock(&fp->f_fl_mutex);
1023 flock_lock_file_wait(file, fl);
1024 if (fl_gh->gh_gl) {
1025 gfs2_glock_dq_wait(fl_gh);
1026 gfs2_holder_uninit(fl_gh);
1028 mutex_unlock(&fp->f_fl_mutex);
1032 * gfs2_flock - acquire/release a flock lock on a file
1033 * @file: the file pointer
1034 * @cmd: either modify or retrieve lock state, possibly wait
1035 * @fl: type and range of lock
1037 * Returns: errno
1040 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1042 if (!(fl->fl_flags & FL_FLOCK))
1043 return -ENOLCK;
1044 if (fl->fl_type & LOCK_MAND)
1045 return -EOPNOTSUPP;
1047 if (fl->fl_type == F_UNLCK) {
1048 do_unflock(file, fl);
1049 return 0;
1050 } else {
1051 return do_flock(file, cmd, fl);
1055 const struct file_operations gfs2_file_fops = {
1056 .llseek = gfs2_llseek,
1057 .read = do_sync_read,
1058 .aio_read = generic_file_aio_read,
1059 .write = do_sync_write,
1060 .aio_write = gfs2_file_aio_write,
1061 .unlocked_ioctl = gfs2_ioctl,
1062 .mmap = gfs2_mmap,
1063 .open = gfs2_open,
1064 .release = gfs2_close,
1065 .fsync = gfs2_fsync,
1066 .lock = gfs2_lock,
1067 .flock = gfs2_flock,
1068 .splice_read = generic_file_splice_read,
1069 .splice_write = generic_file_splice_write,
1070 .setlease = gfs2_setlease,
1071 .fallocate = gfs2_fallocate,
1074 const struct file_operations gfs2_dir_fops = {
1075 .readdir = gfs2_readdir,
1076 .unlocked_ioctl = gfs2_ioctl,
1077 .open = gfs2_open,
1078 .release = gfs2_close,
1079 .fsync = gfs2_fsync,
1080 .lock = gfs2_lock,
1081 .flock = gfs2_flock,
1082 .llseek = default_llseek,
1085 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1087 const struct file_operations gfs2_file_fops_nolock = {
1088 .llseek = gfs2_llseek,
1089 .read = do_sync_read,
1090 .aio_read = generic_file_aio_read,
1091 .write = do_sync_write,
1092 .aio_write = gfs2_file_aio_write,
1093 .unlocked_ioctl = gfs2_ioctl,
1094 .mmap = gfs2_mmap,
1095 .open = gfs2_open,
1096 .release = gfs2_close,
1097 .fsync = gfs2_fsync,
1098 .splice_read = generic_file_splice_read,
1099 .splice_write = generic_file_splice_write,
1100 .setlease = generic_setlease,
1101 .fallocate = gfs2_fallocate,
1104 const struct file_operations gfs2_dir_fops_nolock = {
1105 .readdir = gfs2_readdir,
1106 .unlocked_ioctl = gfs2_ioctl,
1107 .open = gfs2_open,
1108 .release = gfs2_close,
1109 .fsync = gfs2_fsync,
1110 .llseek = default_llseek,