2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
18 #include <linux/mount.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/ext2_fs.h>
22 #include <linux/falloc.h>
23 #include <linux/swap.h>
24 #include <linux/crc32.h>
25 #include <linux/writeback.h>
26 #include <asm/uaccess.h>
27 #include <linux/dlm.h>
28 #include <linux/dlm_plock.h>
45 * gfs2_llseek - seek to a location in a file
48 * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
50 * SEEK_END requires the glock for the file because it references the
53 * Returns: The new offset, or errno
56 static loff_t
gfs2_llseek(struct file
*file
, loff_t offset
, int origin
)
58 struct gfs2_inode
*ip
= GFS2_I(file
->f_mapping
->host
);
59 struct gfs2_holder i_gh
;
63 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
,
66 error
= generic_file_llseek_unlocked(file
, offset
, origin
);
67 gfs2_glock_dq_uninit(&i_gh
);
70 error
= generic_file_llseek_unlocked(file
, offset
, origin
);
76 * gfs2_readdir - Read directory entries from a directory
77 * @file: The directory to read from
78 * @dirent: Buffer for dirents
79 * @filldir: Function used to do the copying
84 static int gfs2_readdir(struct file
*file
, void *dirent
, filldir_t filldir
)
86 struct inode
*dir
= file
->f_mapping
->host
;
87 struct gfs2_inode
*dip
= GFS2_I(dir
);
88 struct gfs2_holder d_gh
;
89 u64 offset
= file
->f_pos
;
92 gfs2_holder_init(dip
->i_gl
, LM_ST_SHARED
, 0, &d_gh
);
93 error
= gfs2_glock_nq(&d_gh
);
95 gfs2_holder_uninit(&d_gh
);
99 error
= gfs2_dir_read(dir
, &offset
, dirent
, filldir
);
101 gfs2_glock_dq_uninit(&d_gh
);
103 file
->f_pos
= offset
;
110 * @table: A table of 32 u32 flags
111 * @val: a 32 bit value to convert
113 * This function can be used to convert between fsflags values and
114 * GFS2's own flags values.
116 * Returns: the converted flags
118 static u32
fsflags_cvt(const u32
*table
, u32 val
)
130 static const u32 fsflags_to_gfs2
[32] = {
132 [4] = GFS2_DIF_IMMUTABLE
,
133 [5] = GFS2_DIF_APPENDONLY
,
134 [7] = GFS2_DIF_NOATIME
,
135 [12] = GFS2_DIF_EXHASH
,
136 [14] = GFS2_DIF_INHERIT_JDATA
,
139 static const u32 gfs2_to_fsflags
[32] = {
140 [gfs2fl_Sync
] = FS_SYNC_FL
,
141 [gfs2fl_Immutable
] = FS_IMMUTABLE_FL
,
142 [gfs2fl_AppendOnly
] = FS_APPEND_FL
,
143 [gfs2fl_NoAtime
] = FS_NOATIME_FL
,
144 [gfs2fl_ExHash
] = FS_INDEX_FL
,
145 [gfs2fl_InheritJdata
] = FS_JOURNAL_DATA_FL
,
148 static int gfs2_get_flags(struct file
*filp
, u32 __user
*ptr
)
150 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
151 struct gfs2_inode
*ip
= GFS2_I(inode
);
152 struct gfs2_holder gh
;
156 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, 0, &gh
);
157 error
= gfs2_glock_nq(&gh
);
161 fsflags
= fsflags_cvt(gfs2_to_fsflags
, ip
->i_diskflags
);
162 if (!S_ISDIR(inode
->i_mode
) && ip
->i_diskflags
& GFS2_DIF_JDATA
)
163 fsflags
|= FS_JOURNAL_DATA_FL
;
164 if (put_user(fsflags
, ptr
))
168 gfs2_holder_uninit(&gh
);
172 void gfs2_set_inode_flags(struct inode
*inode
)
174 struct gfs2_inode
*ip
= GFS2_I(inode
);
175 unsigned int flags
= inode
->i_flags
;
177 flags
&= ~(S_SYNC
|S_APPEND
|S_IMMUTABLE
|S_NOATIME
|S_DIRSYNC
|S_NOSEC
);
178 if ((ip
->i_eattr
== 0) && !is_sxid(inode
->i_mode
))
179 inode
->i_flags
|= S_NOSEC
;
180 if (ip
->i_diskflags
& GFS2_DIF_IMMUTABLE
)
181 flags
|= S_IMMUTABLE
;
182 if (ip
->i_diskflags
& GFS2_DIF_APPENDONLY
)
184 if (ip
->i_diskflags
& GFS2_DIF_NOATIME
)
186 if (ip
->i_diskflags
& GFS2_DIF_SYNC
)
188 inode
->i_flags
= flags
;
191 /* Flags that can be set by user space */
192 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
193 GFS2_DIF_IMMUTABLE| \
194 GFS2_DIF_APPENDONLY| \
198 GFS2_DIF_INHERIT_JDATA)
201 * gfs2_set_flags - set flags on an inode
203 * @flags: The flags to set
204 * @mask: Indicates which flags are valid
207 static int do_gfs2_set_flags(struct file
*filp
, u32 reqflags
, u32 mask
)
209 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
210 struct gfs2_inode
*ip
= GFS2_I(inode
);
211 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
212 struct buffer_head
*bh
;
213 struct gfs2_holder gh
;
215 u32 new_flags
, flags
;
217 error
= mnt_want_write(filp
->f_path
.mnt
);
221 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &gh
);
226 if (!inode_owner_or_capable(inode
))
230 flags
= ip
->i_diskflags
;
231 new_flags
= (flags
& ~mask
) | (reqflags
& mask
);
232 if ((new_flags
^ flags
) == 0)
236 if ((new_flags
^ flags
) & ~GFS2_FLAGS_USER_SET
)
240 if (IS_IMMUTABLE(inode
) && (new_flags
& GFS2_DIF_IMMUTABLE
))
242 if (IS_APPEND(inode
) && (new_flags
& GFS2_DIF_APPENDONLY
))
244 if (((new_flags
^ flags
) & GFS2_DIF_IMMUTABLE
) &&
245 !capable(CAP_LINUX_IMMUTABLE
))
247 if (!IS_IMMUTABLE(inode
)) {
248 error
= gfs2_permission(inode
, MAY_WRITE
);
252 if ((flags
^ new_flags
) & GFS2_DIF_JDATA
) {
253 if (flags
& GFS2_DIF_JDATA
)
254 gfs2_log_flush(sdp
, ip
->i_gl
);
255 error
= filemap_fdatawrite(inode
->i_mapping
);
258 error
= filemap_fdatawait(inode
->i_mapping
);
262 error
= gfs2_trans_begin(sdp
, RES_DINODE
, 0);
265 error
= gfs2_meta_inode_buffer(ip
, &bh
);
268 gfs2_trans_add_bh(ip
->i_gl
, bh
, 1);
269 ip
->i_diskflags
= new_flags
;
270 gfs2_dinode_out(ip
, bh
->b_data
);
272 gfs2_set_inode_flags(inode
);
273 gfs2_set_aops(inode
);
277 gfs2_glock_dq_uninit(&gh
);
279 mnt_drop_write(filp
->f_path
.mnt
);
283 static int gfs2_set_flags(struct file
*filp
, u32 __user
*ptr
)
285 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
286 u32 fsflags
, gfsflags
;
288 if (get_user(fsflags
, ptr
))
291 gfsflags
= fsflags_cvt(fsflags_to_gfs2
, fsflags
);
292 if (!S_ISDIR(inode
->i_mode
)) {
293 if (gfsflags
& GFS2_DIF_INHERIT_JDATA
)
294 gfsflags
^= (GFS2_DIF_JDATA
| GFS2_DIF_INHERIT_JDATA
);
295 return do_gfs2_set_flags(filp
, gfsflags
, ~0);
297 return do_gfs2_set_flags(filp
, gfsflags
, ~GFS2_DIF_JDATA
);
300 static long gfs2_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
303 case FS_IOC_GETFLAGS
:
304 return gfs2_get_flags(filp
, (u32 __user
*)arg
);
305 case FS_IOC_SETFLAGS
:
306 return gfs2_set_flags(filp
, (u32 __user
*)arg
);
312 * gfs2_allocate_page_backing - Use bmap to allocate blocks
313 * @page: The (locked) page to allocate backing for
315 * We try to allocate all the blocks required for the page in
316 * one go. This might fail for various reasons, so we keep
317 * trying until all the blocks to back this page are allocated.
318 * If some of the blocks are already allocated, thats ok too.
321 static int gfs2_allocate_page_backing(struct page
*page
)
323 struct inode
*inode
= page
->mapping
->host
;
324 struct buffer_head bh
;
325 unsigned long size
= PAGE_CACHE_SIZE
;
326 u64 lblock
= page
->index
<< (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
331 gfs2_block_map(inode
, lblock
, &bh
, 1);
332 if (!buffer_mapped(&bh
))
335 lblock
+= (bh
.b_size
>> inode
->i_blkbits
);
341 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
342 * @vma: The virtual memory area
343 * @page: The page which is about to become writable
345 * When the page becomes writable, we need to ensure that we have
346 * blocks allocated on disk to back that page.
349 static int gfs2_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
351 struct page
*page
= vmf
->page
;
352 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
353 struct gfs2_inode
*ip
= GFS2_I(inode
);
354 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
355 unsigned long last_index
;
356 u64 pos
= page
->index
<< PAGE_CACHE_SHIFT
;
357 unsigned int data_blocks
, ind_blocks
, rblocks
;
358 struct gfs2_holder gh
;
359 struct gfs2_alloc
*al
;
362 gfs2_holder_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &gh
);
363 ret
= gfs2_glock_nq(&gh
);
367 set_bit(GLF_DIRTY
, &ip
->i_gl
->gl_flags
);
368 set_bit(GIF_SW_PAGED
, &ip
->i_flags
);
370 if (!gfs2_write_alloc_required(ip
, pos
, PAGE_CACHE_SIZE
))
373 al
= gfs2_alloc_get(ip
);
377 ret
= gfs2_quota_lock_check(ip
);
380 gfs2_write_calc_reserv(ip
, PAGE_CACHE_SIZE
, &data_blocks
, &ind_blocks
);
381 al
->al_requested
= data_blocks
+ ind_blocks
;
382 ret
= gfs2_inplace_reserve(ip
);
384 goto out_quota_unlock
;
386 rblocks
= RES_DINODE
+ ind_blocks
;
387 if (gfs2_is_jdata(ip
))
388 rblocks
+= data_blocks
? data_blocks
: 1;
389 if (ind_blocks
|| data_blocks
) {
390 rblocks
+= RES_STATFS
+ RES_QUOTA
;
391 rblocks
+= gfs2_rg_blocks(al
);
393 ret
= gfs2_trans_begin(sdp
, rblocks
, 0);
399 last_index
= ip
->i_inode
.i_size
>> PAGE_CACHE_SHIFT
;
400 if (page
->index
> last_index
)
401 goto out_unlock_page
;
403 if (!PageUptodate(page
) || page
->mapping
!= ip
->i_inode
.i_mapping
)
404 goto out_unlock_page
;
405 if (gfs2_is_stuffed(ip
)) {
406 ret
= gfs2_unstuff_dinode(ip
, page
);
408 goto out_unlock_page
;
410 ret
= gfs2_allocate_page_backing(page
);
416 gfs2_inplace_release(ip
);
418 gfs2_quota_unlock(ip
);
424 gfs2_holder_uninit(&gh
);
428 ret
= VM_FAULT_SIGBUS
;
432 static const struct vm_operations_struct gfs2_vm_ops
= {
433 .fault
= filemap_fault
,
434 .page_mkwrite
= gfs2_page_mkwrite
,
439 * @file: The file to map
440 * @vma: The VMA which described the mapping
442 * There is no need to get a lock here unless we should be updating
443 * atime. We ignore any locking errors since the only consequence is
444 * a missed atime update (which will just be deferred until later).
449 static int gfs2_mmap(struct file
*file
, struct vm_area_struct
*vma
)
451 struct gfs2_inode
*ip
= GFS2_I(file
->f_mapping
->host
);
453 if (!(file
->f_flags
& O_NOATIME
) &&
454 !IS_NOATIME(&ip
->i_inode
)) {
455 struct gfs2_holder i_gh
;
458 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
, &i_gh
);
459 error
= gfs2_glock_nq(&i_gh
);
462 gfs2_glock_dq(&i_gh
);
464 gfs2_holder_uninit(&i_gh
);
468 vma
->vm_ops
= &gfs2_vm_ops
;
469 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
475 * gfs2_open - open a file
476 * @inode: the inode to open
477 * @file: the struct file for this opening
482 static int gfs2_open(struct inode
*inode
, struct file
*file
)
484 struct gfs2_inode
*ip
= GFS2_I(inode
);
485 struct gfs2_holder i_gh
;
486 struct gfs2_file
*fp
;
489 fp
= kzalloc(sizeof(struct gfs2_file
), GFP_KERNEL
);
493 mutex_init(&fp
->f_fl_mutex
);
495 gfs2_assert_warn(GFS2_SB(inode
), !file
->private_data
);
496 file
->private_data
= fp
;
498 if (S_ISREG(ip
->i_inode
.i_mode
)) {
499 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
,
504 if (!(file
->f_flags
& O_LARGEFILE
) &&
505 i_size_read(inode
) > MAX_NON_LFS
) {
510 gfs2_glock_dq_uninit(&i_gh
);
516 gfs2_glock_dq_uninit(&i_gh
);
518 file
->private_data
= NULL
;
524 * gfs2_close - called to close a struct file
525 * @inode: the inode the struct file belongs to
526 * @file: the struct file being closed
531 static int gfs2_close(struct inode
*inode
, struct file
*file
)
533 struct gfs2_sbd
*sdp
= inode
->i_sb
->s_fs_info
;
534 struct gfs2_file
*fp
;
536 fp
= file
->private_data
;
537 file
->private_data
= NULL
;
539 if (gfs2_assert_warn(sdp
, fp
))
548 * gfs2_fsync - sync the dirty data for a file (across the cluster)
549 * @file: the file that points to the dentry
550 * @start: the start position in the file to sync
551 * @end: the end position in the file to sync
552 * @datasync: set if we can ignore timestamp changes
554 * The VFS will flush data for us. We only need to worry
555 * about metadata here.
560 static int gfs2_fsync(struct file
*file
, loff_t start
, loff_t end
,
563 struct inode
*inode
= file
->f_mapping
->host
;
564 int sync_state
= inode
->i_state
& (I_DIRTY_SYNC
|I_DIRTY_DATASYNC
);
565 struct gfs2_inode
*ip
= GFS2_I(inode
);
568 ret
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
571 mutex_lock(&inode
->i_mutex
);
574 sync_state
&= ~I_DIRTY_SYNC
;
577 ret
= sync_inode_metadata(inode
, 1);
579 mutex_unlock(&inode
->i_mutex
);
582 gfs2_ail_flush(ip
->i_gl
);
585 mutex_unlock(&inode
->i_mutex
);
590 * gfs2_file_aio_write - Perform a write to a file
591 * @iocb: The io context
592 * @iov: The data to write
593 * @nr_segs: Number of @iov segments
594 * @pos: The file position
596 * We have to do a lock/unlock here to refresh the inode size for
597 * O_APPEND writes, otherwise we can land up writing at the wrong
598 * offset. There is still a race, but provided the app is using its
599 * own file locking, this will make O_APPEND work as expected.
603 static ssize_t
gfs2_file_aio_write(struct kiocb
*iocb
, const struct iovec
*iov
,
604 unsigned long nr_segs
, loff_t pos
)
606 struct file
*file
= iocb
->ki_filp
;
608 if (file
->f_flags
& O_APPEND
) {
609 struct dentry
*dentry
= file
->f_dentry
;
610 struct gfs2_inode
*ip
= GFS2_I(dentry
->d_inode
);
611 struct gfs2_holder gh
;
614 ret
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, 0, &gh
);
617 gfs2_glock_dq_uninit(&gh
);
620 return generic_file_aio_write(iocb
, iov
, nr_segs
, pos
);
623 static int empty_write_end(struct page
*page
, unsigned from
,
624 unsigned to
, int mode
)
626 struct inode
*inode
= page
->mapping
->host
;
627 struct gfs2_inode
*ip
= GFS2_I(inode
);
628 struct buffer_head
*bh
;
629 unsigned offset
, blksize
= 1 << inode
->i_blkbits
;
630 pgoff_t end_index
= i_size_read(inode
) >> PAGE_CACHE_SHIFT
;
632 zero_user(page
, from
, to
-from
);
633 mark_page_accessed(page
);
635 if (page
->index
< end_index
|| !(mode
& FALLOC_FL_KEEP_SIZE
)) {
636 if (!gfs2_is_writeback(ip
))
637 gfs2_page_add_databufs(ip
, page
, from
, to
);
639 block_commit_write(page
, from
, to
);
644 bh
= page_buffers(page
);
645 while (offset
< to
) {
646 if (offset
>= from
) {
647 set_buffer_uptodate(bh
);
648 mark_buffer_dirty(bh
);
649 clear_buffer_new(bh
);
650 write_dirty_buffer(bh
, WRITE
);
653 bh
= bh
->b_this_page
;
657 bh
= page_buffers(page
);
658 while (offset
< to
) {
659 if (offset
>= from
) {
661 if (!buffer_uptodate(bh
))
665 bh
= bh
->b_this_page
;
670 static int needs_empty_write(sector_t block
, struct inode
*inode
)
673 struct buffer_head bh_map
= { .b_state
= 0, .b_blocknr
= 0 };
675 bh_map
.b_size
= 1 << inode
->i_blkbits
;
676 error
= gfs2_block_map(inode
, block
, &bh_map
, 0);
679 return !buffer_mapped(&bh_map
);
682 static int write_empty_blocks(struct page
*page
, unsigned from
, unsigned to
,
685 struct inode
*inode
= page
->mapping
->host
;
686 unsigned start
, end
, next
, blksize
;
687 sector_t block
= page
->index
<< (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
690 blksize
= 1 << inode
->i_blkbits
;
692 while (next
< from
) {
699 ret
= needs_empty_write(block
, inode
);
700 if (unlikely(ret
< 0))
704 ret
= __block_write_begin(page
, start
, end
- start
,
708 ret
= empty_write_end(page
, start
, end
, mode
);
721 ret
= __block_write_begin(page
, start
, end
- start
, gfs2_block_map
);
724 ret
= empty_write_end(page
, start
, end
, mode
);
732 static int fallocate_chunk(struct inode
*inode
, loff_t offset
, loff_t len
,
735 struct gfs2_inode
*ip
= GFS2_I(inode
);
736 struct buffer_head
*dibh
;
738 u64 start
= offset
>> PAGE_CACHE_SHIFT
;
739 unsigned int start_offset
= offset
& ~PAGE_CACHE_MASK
;
740 u64 end
= (offset
+ len
- 1) >> PAGE_CACHE_SHIFT
;
743 unsigned int end_offset
= (offset
+ len
) & ~PAGE_CACHE_MASK
;
744 unsigned int from
, to
;
747 end_offset
= PAGE_CACHE_SIZE
;
749 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
753 gfs2_trans_add_bh(ip
->i_gl
, dibh
, 1);
755 if (gfs2_is_stuffed(ip
)) {
756 error
= gfs2_unstuff_dinode(ip
, NULL
);
762 offset
= start
<< PAGE_CACHE_SHIFT
;
764 to
= PAGE_CACHE_SIZE
;
765 while (curr
<= end
) {
766 page
= grab_cache_page_write_begin(inode
->i_mapping
, curr
,
768 if (unlikely(!page
)) {
775 error
= write_empty_blocks(page
, from
, to
, mode
);
776 if (!error
&& offset
+ to
> inode
->i_size
&&
777 !(mode
& FALLOC_FL_KEEP_SIZE
)) {
778 i_size_write(inode
, offset
+ to
);
781 page_cache_release(page
);
785 offset
+= PAGE_CACHE_SIZE
;
789 gfs2_dinode_out(ip
, dibh
->b_data
);
790 mark_inode_dirty(inode
);
798 static void calc_max_reserv(struct gfs2_inode
*ip
, loff_t max
, loff_t
*len
,
799 unsigned int *data_blocks
, unsigned int *ind_blocks
)
801 const struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
802 unsigned int max_blocks
= ip
->i_alloc
->al_rgd
->rd_free_clone
;
803 unsigned int tmp
, max_data
= max_blocks
- 3 * (sdp
->sd_max_height
- 1);
805 for (tmp
= max_data
; tmp
> sdp
->sd_diptrs
;) {
806 tmp
= DIV_ROUND_UP(tmp
, sdp
->sd_inptrs
);
809 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
810 so it might end up with fewer data blocks */
811 if (max_data
<= *data_blocks
)
813 *data_blocks
= max_data
;
814 *ind_blocks
= max_blocks
- max_data
;
815 *len
= ((loff_t
)max_data
- 3) << sdp
->sd_sb
.sb_bsize_shift
;
818 gfs2_write_calc_reserv(ip
, max
, data_blocks
, ind_blocks
);
822 static long gfs2_fallocate(struct file
*file
, int mode
, loff_t offset
,
825 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
826 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
827 struct gfs2_inode
*ip
= GFS2_I(inode
);
828 unsigned int data_blocks
= 0, ind_blocks
= 0, rblocks
;
829 loff_t bytes
, max_bytes
;
830 struct gfs2_alloc
*al
;
832 loff_t bsize_mask
= ~((loff_t
)sdp
->sd_sb
.sb_bsize
- 1);
833 loff_t next
= (offset
+ len
- 1) >> sdp
->sd_sb
.sb_bsize_shift
;
834 next
= (next
+ 1) << sdp
->sd_sb
.sb_bsize_shift
;
836 /* We only support the FALLOC_FL_KEEP_SIZE mode */
837 if (mode
& ~FALLOC_FL_KEEP_SIZE
)
840 offset
&= bsize_mask
;
843 bytes
= sdp
->sd_max_rg_data
* sdp
->sd_sb
.sb_bsize
/ 2;
848 bytes
= sdp
->sd_sb
.sb_bsize
;
850 gfs2_holder_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &ip
->i_gh
);
851 error
= gfs2_glock_nq(&ip
->i_gh
);
855 if (!gfs2_write_alloc_required(ip
, offset
, len
))
861 al
= gfs2_alloc_get(ip
);
867 error
= gfs2_quota_lock_check(ip
);
872 gfs2_write_calc_reserv(ip
, bytes
, &data_blocks
, &ind_blocks
);
874 al
->al_requested
= data_blocks
+ ind_blocks
;
875 error
= gfs2_inplace_reserve(ip
);
877 if (error
== -ENOSPC
&& bytes
> sdp
->sd_sb
.sb_bsize
) {
881 bytes
= sdp
->sd_sb
.sb_bsize
;
887 calc_max_reserv(ip
, len
, &max_bytes
, &data_blocks
, &ind_blocks
);
888 al
->al_requested
= data_blocks
+ ind_blocks
;
890 rblocks
= RES_DINODE
+ ind_blocks
+ RES_STATFS
+ RES_QUOTA
+
891 RES_RG_HDR
+ gfs2_rg_blocks(al
);
892 if (gfs2_is_jdata(ip
))
893 rblocks
+= data_blocks
? data_blocks
: 1;
895 error
= gfs2_trans_begin(sdp
, rblocks
,
896 PAGE_CACHE_SIZE
/sdp
->sd_sb
.sb_bsize
);
900 error
= fallocate_chunk(inode
, offset
, max_bytes
, mode
);
908 gfs2_inplace_release(ip
);
909 gfs2_quota_unlock(ip
);
915 gfs2_inplace_release(ip
);
917 gfs2_quota_unlock(ip
);
921 gfs2_glock_dq(&ip
->i_gh
);
923 gfs2_holder_uninit(&ip
->i_gh
);
927 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
930 * gfs2_setlease - acquire/release a file lease
931 * @file: the file pointer
935 * We don't currently have a way to enforce a lease across the whole
936 * cluster; until we do, disable leases (by just returning -EINVAL),
937 * unless the administrator has requested purely local locking.
939 * Locking: called under lock_flocks
944 static int gfs2_setlease(struct file
*file
, long arg
, struct file_lock
**fl
)
950 * gfs2_lock - acquire/release a posix lock on a file
951 * @file: the file pointer
952 * @cmd: either modify or retrieve lock state, possibly wait
953 * @fl: type and range of lock
958 static int gfs2_lock(struct file
*file
, int cmd
, struct file_lock
*fl
)
960 struct gfs2_inode
*ip
= GFS2_I(file
->f_mapping
->host
);
961 struct gfs2_sbd
*sdp
= GFS2_SB(file
->f_mapping
->host
);
962 struct lm_lockstruct
*ls
= &sdp
->sd_lockstruct
;
964 if (!(fl
->fl_flags
& FL_POSIX
))
966 if (__mandatory_lock(&ip
->i_inode
) && fl
->fl_type
!= F_UNLCK
)
969 if (cmd
== F_CANCELLK
) {
972 fl
->fl_type
= F_UNLCK
;
974 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
977 return dlm_posix_get(ls
->ls_dlm
, ip
->i_no_addr
, file
, fl
);
978 else if (fl
->fl_type
== F_UNLCK
)
979 return dlm_posix_unlock(ls
->ls_dlm
, ip
->i_no_addr
, file
, fl
);
981 return dlm_posix_lock(ls
->ls_dlm
, ip
->i_no_addr
, file
, cmd
, fl
);
984 static int do_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
986 struct gfs2_file
*fp
= file
->private_data
;
987 struct gfs2_holder
*fl_gh
= &fp
->f_fl_gh
;
988 struct gfs2_inode
*ip
= GFS2_I(file
->f_path
.dentry
->d_inode
);
989 struct gfs2_glock
*gl
;
994 state
= (fl
->fl_type
== F_WRLCK
) ? LM_ST_EXCLUSIVE
: LM_ST_SHARED
;
995 flags
= (IS_SETLKW(cmd
) ? 0 : LM_FLAG_TRY
) | GL_EXACT
| GL_NOCACHE
;
997 mutex_lock(&fp
->f_fl_mutex
);
1001 if (fl_gh
->gh_state
== state
)
1003 flock_lock_file_wait(file
,
1004 &(struct file_lock
){.fl_type
= F_UNLCK
});
1005 gfs2_glock_dq_wait(fl_gh
);
1006 gfs2_holder_reinit(state
, flags
, fl_gh
);
1008 error
= gfs2_glock_get(GFS2_SB(&ip
->i_inode
), ip
->i_no_addr
,
1009 &gfs2_flock_glops
, CREATE
, &gl
);
1012 gfs2_holder_init(gl
, state
, flags
, fl_gh
);
1015 error
= gfs2_glock_nq(fl_gh
);
1017 gfs2_holder_uninit(fl_gh
);
1018 if (error
== GLR_TRYFAILED
)
1021 error
= flock_lock_file_wait(file
, fl
);
1022 gfs2_assert_warn(GFS2_SB(&ip
->i_inode
), !error
);
1026 mutex_unlock(&fp
->f_fl_mutex
);
1030 static void do_unflock(struct file
*file
, struct file_lock
*fl
)
1032 struct gfs2_file
*fp
= file
->private_data
;
1033 struct gfs2_holder
*fl_gh
= &fp
->f_fl_gh
;
1035 mutex_lock(&fp
->f_fl_mutex
);
1036 flock_lock_file_wait(file
, fl
);
1038 gfs2_glock_dq_wait(fl_gh
);
1039 gfs2_holder_uninit(fl_gh
);
1041 mutex_unlock(&fp
->f_fl_mutex
);
1045 * gfs2_flock - acquire/release a flock lock on a file
1046 * @file: the file pointer
1047 * @cmd: either modify or retrieve lock state, possibly wait
1048 * @fl: type and range of lock
1053 static int gfs2_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
1055 if (!(fl
->fl_flags
& FL_FLOCK
))
1057 if (fl
->fl_type
& LOCK_MAND
)
1060 if (fl
->fl_type
== F_UNLCK
) {
1061 do_unflock(file
, fl
);
1064 return do_flock(file
, cmd
, fl
);
1068 const struct file_operations gfs2_file_fops
= {
1069 .llseek
= gfs2_llseek
,
1070 .read
= do_sync_read
,
1071 .aio_read
= generic_file_aio_read
,
1072 .write
= do_sync_write
,
1073 .aio_write
= gfs2_file_aio_write
,
1074 .unlocked_ioctl
= gfs2_ioctl
,
1077 .release
= gfs2_close
,
1078 .fsync
= gfs2_fsync
,
1080 .flock
= gfs2_flock
,
1081 .splice_read
= generic_file_splice_read
,
1082 .splice_write
= generic_file_splice_write
,
1083 .setlease
= gfs2_setlease
,
1084 .fallocate
= gfs2_fallocate
,
1087 const struct file_operations gfs2_dir_fops
= {
1088 .readdir
= gfs2_readdir
,
1089 .unlocked_ioctl
= gfs2_ioctl
,
1091 .release
= gfs2_close
,
1092 .fsync
= gfs2_fsync
,
1094 .flock
= gfs2_flock
,
1095 .llseek
= default_llseek
,
1098 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1100 const struct file_operations gfs2_file_fops_nolock
= {
1101 .llseek
= gfs2_llseek
,
1102 .read
= do_sync_read
,
1103 .aio_read
= generic_file_aio_read
,
1104 .write
= do_sync_write
,
1105 .aio_write
= gfs2_file_aio_write
,
1106 .unlocked_ioctl
= gfs2_ioctl
,
1109 .release
= gfs2_close
,
1110 .fsync
= gfs2_fsync
,
1111 .splice_read
= generic_file_splice_read
,
1112 .splice_write
= generic_file_splice_write
,
1113 .setlease
= generic_setlease
,
1114 .fallocate
= gfs2_fallocate
,
1117 const struct file_operations gfs2_dir_fops_nolock
= {
1118 .readdir
= gfs2_readdir
,
1119 .unlocked_ioctl
= gfs2_ioctl
,
1121 .release
= gfs2_close
,
1122 .fsync
= gfs2_fsync
,
1123 .llseek
= default_llseek
,