1 #include "ceph_debug.h"
3 #include <linux/sched.h>
4 #include <linux/slab.h>
5 #include <linux/file.h>
6 #include <linux/namei.h>
7 #include <linux/writeback.h>
10 #include "mds_client.h"
13 * Ceph file operations
15 * Implement basic open/close functionality, and implement
18 * We implement three modes of file I/O:
19 * - buffered uses the generic_file_aio_{read,write} helpers
21 * - synchronous is used when there is multi-client read/write
22 * sharing, avoids the page cache, and synchronously waits for an
25 * - direct io takes the variant of the sync path that references
26 * user pages directly.
28 * fsync() flushes and waits on dirty pages, but just queues metadata
29 * for writeback: since the MDS can recover size and mtime there is no
30 * need to wait for MDS acknowledgement.
35 * Prepare an open request. Preallocate ceph_cap to avoid an
36 * inopportune ENOMEM later.
38 static struct ceph_mds_request
*
39 prepare_open_request(struct super_block
*sb
, int flags
, int create_mode
)
41 struct ceph_client
*client
= ceph_sb_to_client(sb
);
42 struct ceph_mds_client
*mdsc
= &client
->mdsc
;
43 struct ceph_mds_request
*req
;
44 int want_auth
= USE_ANY_MDS
;
45 int op
= (flags
& O_CREAT
) ? CEPH_MDS_OP_CREATE
: CEPH_MDS_OP_OPEN
;
47 if (flags
& (O_WRONLY
|O_RDWR
|O_CREAT
|O_TRUNC
))
48 want_auth
= USE_AUTH_MDS
;
50 req
= ceph_mdsc_create_request(mdsc
, op
, want_auth
);
53 req
->r_fmode
= ceph_flags_to_mode(flags
);
54 req
->r_args
.open
.flags
= cpu_to_le32(flags
);
55 req
->r_args
.open
.mode
= cpu_to_le32(create_mode
);
56 req
->r_args
.open
.preferred
= cpu_to_le32(-1);
62 * initialize private struct file data.
63 * if we fail, clean up by dropping fmode reference on the ceph_inode
65 static int ceph_init_file(struct inode
*inode
, struct file
*file
, int fmode
)
67 struct ceph_file_info
*cf
;
70 switch (inode
->i_mode
& S_IFMT
) {
73 dout("init_file %p %p 0%o (regular)\n", inode
, file
,
75 cf
= kmem_cache_alloc(ceph_file_cachep
, GFP_NOFS
| __GFP_ZERO
);
77 ceph_put_fmode(ceph_inode(inode
), fmode
); /* clean up */
82 file
->private_data
= cf
;
83 BUG_ON(inode
->i_fop
->release
!= ceph_release
);
87 dout("init_file %p %p 0%o (symlink)\n", inode
, file
,
89 ceph_put_fmode(ceph_inode(inode
), fmode
); /* clean up */
93 dout("init_file %p %p 0%o (special)\n", inode
, file
,
96 * we need to drop the open ref now, since we don't
97 * have .release set to ceph_release.
99 ceph_put_fmode(ceph_inode(inode
), fmode
); /* clean up */
100 BUG_ON(inode
->i_fop
->release
== ceph_release
);
102 /* call the proper open fop */
103 ret
= inode
->i_fop
->open(inode
, file
);
109 * If the filp already has private_data, that means the file was
110 * already opened by intent during lookup, and we do nothing.
112 * If we already have the requisite capabilities, we can satisfy
113 * the open request locally (no need to request new caps from the
114 * MDS). We do, however, need to inform the MDS (asynchronously)
115 * if our wanted caps set expands.
117 int ceph_open(struct inode
*inode
, struct file
*file
)
119 struct ceph_inode_info
*ci
= ceph_inode(inode
);
120 struct ceph_client
*client
= ceph_sb_to_client(inode
->i_sb
);
121 struct ceph_mds_client
*mdsc
= &client
->mdsc
;
122 struct ceph_mds_request
*req
;
123 struct ceph_file_info
*cf
= file
->private_data
;
124 struct inode
*parent_inode
= file
->f_dentry
->d_parent
->d_inode
;
126 int flags
, fmode
, wanted
;
129 dout("open file %p is already opened\n", file
);
133 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
134 flags
= file
->f_flags
& ~(O_CREAT
|O_EXCL
);
135 if (S_ISDIR(inode
->i_mode
))
136 flags
= O_DIRECTORY
; /* mds likes to know */
138 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode
,
139 ceph_vinop(inode
), file
, flags
, file
->f_flags
);
140 fmode
= ceph_flags_to_mode(flags
);
141 wanted
= ceph_caps_for_mode(fmode
);
143 /* snapped files are read-only */
144 if (ceph_snap(inode
) != CEPH_NOSNAP
&& (file
->f_mode
& FMODE_WRITE
))
147 /* trivially open snapdir */
148 if (ceph_snap(inode
) == CEPH_SNAPDIR
) {
149 spin_lock(&inode
->i_lock
);
150 __ceph_get_fmode(ci
, fmode
);
151 spin_unlock(&inode
->i_lock
);
152 return ceph_init_file(inode
, file
, fmode
);
156 * No need to block if we have any caps. Update wanted set
159 spin_lock(&inode
->i_lock
);
160 if (__ceph_is_any_real_caps(ci
)) {
161 int mds_wanted
= __ceph_caps_mds_wanted(ci
);
162 int issued
= __ceph_caps_issued(ci
, NULL
);
164 dout("open %p fmode %d want %s issued %s using existing\n",
165 inode
, fmode
, ceph_cap_string(wanted
),
166 ceph_cap_string(issued
));
167 __ceph_get_fmode(ci
, fmode
);
168 spin_unlock(&inode
->i_lock
);
171 if ((issued
& wanted
) != wanted
&&
172 (mds_wanted
& wanted
) != wanted
&&
173 ceph_snap(inode
) != CEPH_SNAPDIR
)
174 ceph_check_caps(ci
, 0, NULL
);
176 return ceph_init_file(inode
, file
, fmode
);
177 } else if (ceph_snap(inode
) != CEPH_NOSNAP
&&
178 (ci
->i_snap_caps
& wanted
) == wanted
) {
179 __ceph_get_fmode(ci
, fmode
);
180 spin_unlock(&inode
->i_lock
);
181 return ceph_init_file(inode
, file
, fmode
);
183 spin_unlock(&inode
->i_lock
);
185 dout("open fmode %d wants %s\n", fmode
, ceph_cap_string(wanted
));
186 req
= prepare_open_request(inode
->i_sb
, flags
, 0);
191 req
->r_inode
= igrab(inode
);
193 err
= ceph_mdsc_do_request(mdsc
, parent_inode
, req
);
195 err
= ceph_init_file(inode
, file
, req
->r_fmode
);
196 ceph_mdsc_put_request(req
);
197 dout("open result=%d on %llx.%llx\n", err
, ceph_vinop(inode
));
204 * Do a lookup + open with a single request.
206 * If this succeeds, but some subsequent check in the vfs
207 * may_open() fails, the struct *file gets cleaned up (i.e.
208 * ceph_release gets called). So fear not!
212 * path_lookup_open -> LOOKUP_OPEN
213 * path_lookup_create -> LOOKUP_OPEN|LOOKUP_CREATE
215 struct dentry
*ceph_lookup_open(struct inode
*dir
, struct dentry
*dentry
,
216 struct nameidata
*nd
, int mode
,
219 struct ceph_client
*client
= ceph_sb_to_client(dir
->i_sb
);
220 struct ceph_mds_client
*mdsc
= &client
->mdsc
;
221 struct file
*file
= nd
->intent
.open
.file
;
222 struct inode
*parent_inode
= get_dentry_parent_inode(file
->f_dentry
);
223 struct ceph_mds_request
*req
;
225 int flags
= nd
->intent
.open
.flags
- 1; /* silly vfs! */
227 dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n",
228 dentry
, dentry
->d_name
.len
, dentry
->d_name
.name
, flags
, mode
);
231 req
= prepare_open_request(dir
->i_sb
, flags
, mode
);
233 return ERR_CAST(req
);
234 req
->r_dentry
= dget(dentry
);
236 if (flags
& O_CREAT
) {
237 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
238 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
240 req
->r_locked_dir
= dir
; /* caller holds dir->i_mutex */
241 err
= ceph_mdsc_do_request(mdsc
, parent_inode
, req
);
242 dentry
= ceph_finish_lookup(req
, dentry
, err
);
243 if (!err
&& (flags
& O_CREAT
) && !req
->r_reply_info
.head
->is_dentry
)
244 err
= ceph_handle_notrace_create(dir
, dentry
);
246 err
= ceph_init_file(req
->r_dentry
->d_inode
, file
,
248 ceph_mdsc_put_request(req
);
249 dout("ceph_lookup_open result=%p\n", dentry
);
253 int ceph_release(struct inode
*inode
, struct file
*file
)
255 struct ceph_inode_info
*ci
= ceph_inode(inode
);
256 struct ceph_file_info
*cf
= file
->private_data
;
258 dout("release inode %p file %p\n", inode
, file
);
259 ceph_put_fmode(ci
, cf
->fmode
);
260 if (cf
->last_readdir
)
261 ceph_mdsc_put_request(cf
->last_readdir
);
262 kfree(cf
->last_name
);
265 kmem_cache_free(ceph_file_cachep
, cf
);
267 /* wake up anyone waiting for caps on this inode */
268 wake_up(&ci
->i_cap_wq
);
273 * build a vector of user pages
275 static struct page
**get_direct_page_vector(const char __user
*data
,
277 loff_t off
, size_t len
)
282 pages
= kmalloc(sizeof(*pages
) * num_pages
, GFP_NOFS
);
284 return ERR_PTR(-ENOMEM
);
286 down_read(¤t
->mm
->mmap_sem
);
287 rc
= get_user_pages(current
, current
->mm
, (unsigned long)data
,
288 num_pages
, 0, 0, pages
, NULL
);
289 up_read(¤t
->mm
->mmap_sem
);
299 static void put_page_vector(struct page
**pages
, int num_pages
)
303 for (i
= 0; i
< num_pages
; i
++)
308 void ceph_release_page_vector(struct page
**pages
, int num_pages
)
312 for (i
= 0; i
< num_pages
; i
++)
313 __free_pages(pages
[i
], 0);
318 * allocate a vector new pages
320 struct page
**ceph_alloc_page_vector(int num_pages
, gfp_t flags
)
325 pages
= kmalloc(sizeof(*pages
) * num_pages
, flags
);
327 return ERR_PTR(-ENOMEM
);
328 for (i
= 0; i
< num_pages
; i
++) {
329 pages
[i
] = __page_cache_alloc(flags
);
330 if (pages
[i
] == NULL
) {
331 ceph_release_page_vector(pages
, i
);
332 return ERR_PTR(-ENOMEM
);
339 * copy user data into a page vector
341 static int copy_user_to_page_vector(struct page
**pages
,
342 const char __user
*data
,
343 loff_t off
, size_t len
)
346 int po
= off
& ~PAGE_CACHE_MASK
;
351 l
= min_t(int, PAGE_CACHE_SIZE
-po
, left
);
352 bad
= copy_from_user(page_address(pages
[i
]) + po
, data
, l
);
358 if (po
== PAGE_CACHE_SIZE
) {
367 * copy user data from a page vector into a user pointer
369 static int copy_page_vector_to_user(struct page
**pages
, char __user
*data
,
370 loff_t off
, size_t len
)
373 int po
= off
& ~PAGE_CACHE_MASK
;
378 l
= min_t(int, left
, PAGE_CACHE_SIZE
-po
);
379 bad
= copy_to_user(data
, page_address(pages
[i
]) + po
, l
);
386 if (po
== PAGE_CACHE_SIZE
)
395 * Zero an extent within a page vector. Offset is relative to the
396 * start of the first page.
398 static void zero_page_vector_range(int off
, int len
, struct page
**pages
)
400 int i
= off
>> PAGE_CACHE_SHIFT
;
402 off
&= ~PAGE_CACHE_MASK
;
404 dout("zero_page_vector_page %u~%u\n", off
, len
);
406 /* leading partial page? */
408 int end
= min((int)PAGE_CACHE_SIZE
, off
+ len
);
409 dout("zeroing %d %p head from %d\n", i
, pages
[i
],
411 zero_user_segment(pages
[i
], off
, end
);
415 while (len
>= PAGE_CACHE_SIZE
) {
416 dout("zeroing %d %p len=%d\n", i
, pages
[i
], len
);
417 zero_user_segment(pages
[i
], 0, PAGE_CACHE_SIZE
);
418 len
-= PAGE_CACHE_SIZE
;
421 /* trailing partial page? */
423 dout("zeroing %d %p tail to %d\n", i
, pages
[i
], (int)len
);
424 zero_user_segment(pages
[i
], 0, len
);
430 * Read a range of bytes striped over one or more objects. Iterate over
431 * objects we stripe over. (That's not atomic, but good enough for now.)
433 * If we get a short result from the OSD, check against i_size; we need to
434 * only return a short read to the caller if we hit EOF.
436 static int striped_read(struct inode
*inode
,
438 struct page
**pages
, int num_pages
,
441 struct ceph_client
*client
= ceph_inode_to_client(inode
);
442 struct ceph_inode_info
*ci
= ceph_inode(inode
);
444 int page_off
= off
& ~PAGE_CACHE_MASK
; /* first byte's offset in page */
445 int left
, pages_left
;
447 struct page
**page_pos
;
449 bool hit_stripe
, was_short
;
452 * we may need to do multiple reads. not atomic, unfortunately.
457 pages_left
= num_pages
;
462 ret
= ceph_osdc_readpages(&client
->osdc
, ceph_vino(inode
),
463 &ci
->i_layout
, pos
, &this_len
,
466 page_pos
, pages_left
);
467 hit_stripe
= this_len
< left
;
468 was_short
= ret
>= 0 && ret
< this_len
;
471 dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos
, left
, read
,
472 ret
, hit_stripe
? " HITSTRIPE" : "", was_short
? " SHORT" : "");
476 ((pos
& ~PAGE_CACHE_MASK
) + ret
) >> PAGE_CACHE_SHIFT
;
478 if (read
< pos
- off
) {
479 dout(" zero gap %llu to %llu\n", off
+ read
, pos
);
480 zero_page_vector_range(page_off
+ read
,
481 pos
- off
- read
, pages
);
486 page_pos
+= didpages
;
487 pages_left
-= didpages
;
490 if (left
&& hit_stripe
)
495 /* was original extent fully inside i_size? */
496 if (pos
+ left
<= inode
->i_size
) {
498 zero_page_vector_range(page_off
+ read
, len
- read
,
511 dout("striped_read returns %d\n", ret
);
516 * Completely synchronous read and write methods. Direct from __user
517 * buffer to osd, or directly to user pages (if O_DIRECT).
519 * If the read spans object boundary, just do multiple reads.
521 static ssize_t
ceph_sync_read(struct file
*file
, char __user
*data
,
522 unsigned len
, loff_t
*poff
, int *checkeof
)
524 struct inode
*inode
= file
->f_dentry
->d_inode
;
527 int num_pages
= calc_pages_for(off
, len
);
530 dout("sync_read on file %p %llu~%u %s\n", file
, off
, len
,
531 (file
->f_flags
& O_DIRECT
) ? "O_DIRECT" : "");
533 if (file
->f_flags
& O_DIRECT
) {
534 pages
= get_direct_page_vector(data
, num_pages
, off
, len
);
537 * flush any page cache pages in this range. this
538 * will make concurrent normal and O_DIRECT io slow,
539 * but it will at least behave sensibly when they are
543 pages
= ceph_alloc_page_vector(num_pages
, GFP_NOFS
);
546 return PTR_ERR(pages
);
548 ret
= filemap_write_and_wait(inode
->i_mapping
);
552 ret
= striped_read(inode
, off
, len
, pages
, num_pages
, checkeof
);
554 if (ret
>= 0 && (file
->f_flags
& O_DIRECT
) == 0)
555 ret
= copy_page_vector_to_user(pages
, data
, off
, ret
);
560 if (file
->f_flags
& O_DIRECT
)
561 put_page_vector(pages
, num_pages
);
563 ceph_release_page_vector(pages
, num_pages
);
564 dout("sync_read result %d\n", ret
);
569 * Write commit callback, called if we requested both an ACK and
570 * ONDISK commit reply from the OSD.
572 static void sync_write_commit(struct ceph_osd_request
*req
,
573 struct ceph_msg
*msg
)
575 struct ceph_inode_info
*ci
= ceph_inode(req
->r_inode
);
577 dout("sync_write_commit %p tid %llu\n", req
, req
->r_tid
);
578 spin_lock(&ci
->i_unsafe_lock
);
579 list_del_init(&req
->r_unsafe_item
);
580 spin_unlock(&ci
->i_unsafe_lock
);
581 ceph_put_cap_refs(ci
, CEPH_CAP_FILE_WR
);
585 * Synchronous write, straight from __user pointer or user pages (if
588 * If write spans object boundary, just do multiple writes. (For a
589 * correct atomic write, we should e.g. take write locks on all
590 * objects, rollback on failure, etc.)
592 static ssize_t
ceph_sync_write(struct file
*file
, const char __user
*data
,
593 size_t left
, loff_t
*offset
)
595 struct inode
*inode
= file
->f_dentry
->d_inode
;
596 struct ceph_inode_info
*ci
= ceph_inode(inode
);
597 struct ceph_client
*client
= ceph_inode_to_client(inode
);
598 struct ceph_osd_request
*req
;
601 long long unsigned pos
;
608 struct timespec mtime
= CURRENT_TIME
;
610 if (ceph_snap(file
->f_dentry
->d_inode
) != CEPH_NOSNAP
)
613 dout("sync_write on file %p %lld~%u %s\n", file
, *offset
,
614 (unsigned)left
, (file
->f_flags
& O_DIRECT
) ? "O_DIRECT" : "");
616 if (file
->f_flags
& O_APPEND
)
617 pos
= i_size_read(inode
);
621 ret
= filemap_write_and_wait_range(inode
->i_mapping
, pos
, pos
+ left
);
625 ret
= invalidate_inode_pages2_range(inode
->i_mapping
,
626 pos
>> PAGE_CACHE_SHIFT
,
627 (pos
+ left
) >> PAGE_CACHE_SHIFT
);
629 dout("invalidate_inode_pages2_range returned %d\n", ret
);
631 flags
= CEPH_OSD_FLAG_ORDERSNAP
|
632 CEPH_OSD_FLAG_ONDISK
|
634 if ((file
->f_flags
& (O_SYNC
|O_DIRECT
)) == 0)
635 flags
|= CEPH_OSD_FLAG_ACK
;
640 * we may need to do multiple writes here if we span an object
641 * boundary. this isn't atomic, unfortunately. :(
645 req
= ceph_osdc_new_request(&client
->osdc
, &ci
->i_layout
,
646 ceph_vino(inode
), pos
, &len
,
647 CEPH_OSD_OP_WRITE
, flags
,
648 ci
->i_snap_realm
->cached_context
,
650 ci
->i_truncate_seq
, ci
->i_truncate_size
,
655 num_pages
= calc_pages_for(pos
, len
);
657 if (file
->f_flags
& O_DIRECT
) {
658 pages
= get_direct_page_vector(data
, num_pages
, pos
, len
);
660 ret
= PTR_ERR(pages
);
665 * throw out any page cache pages in this range. this
668 truncate_inode_pages_range(inode
->i_mapping
, pos
,
669 (pos
+len
) | (PAGE_CACHE_SIZE
-1));
671 pages
= ceph_alloc_page_vector(num_pages
, GFP_NOFS
);
673 ret
= PTR_ERR(pages
);
676 ret
= copy_user_to_page_vector(pages
, data
, pos
, len
);
678 ceph_release_page_vector(pages
, num_pages
);
682 if ((file
->f_flags
& O_SYNC
) == 0) {
683 /* get a second commit callback */
684 req
->r_safe_callback
= sync_write_commit
;
685 req
->r_own_pages
= 1;
688 req
->r_pages
= pages
;
689 req
->r_num_pages
= num_pages
;
690 req
->r_inode
= inode
;
692 ret
= ceph_osdc_start_request(&client
->osdc
, req
, false);
694 if (req
->r_safe_callback
) {
696 * Add to inode unsafe list only after we
697 * start_request so that a tid has been assigned.
699 spin_lock(&ci
->i_unsafe_lock
);
700 list_add(&ci
->i_unsafe_writes
, &req
->r_unsafe_item
);
701 spin_unlock(&ci
->i_unsafe_lock
);
702 ceph_get_cap_refs(ci
, CEPH_CAP_FILE_WR
);
704 ret
= ceph_osdc_wait_request(&client
->osdc
, req
);
707 if (file
->f_flags
& O_DIRECT
)
708 put_page_vector(pages
, num_pages
);
709 else if (file
->f_flags
& O_SYNC
)
710 ceph_release_page_vector(pages
, num_pages
);
713 ceph_osdc_put_request(req
);
723 if (pos
> i_size_read(inode
))
724 check_caps
= ceph_inode_set_size(inode
, pos
);
726 ceph_check_caps(ceph_inode(inode
), CHECK_CAPS_AUTHONLY
,
733 * Wrap generic_file_aio_read with checks for cap bits on the inode.
734 * Atomically grab references, so that those bits are not released
735 * back to the MDS mid-read.
737 * Hmm, the sync read case isn't actually async... should it be?
739 static ssize_t
ceph_aio_read(struct kiocb
*iocb
, const struct iovec
*iov
,
740 unsigned long nr_segs
, loff_t pos
)
742 struct file
*filp
= iocb
->ki_filp
;
743 loff_t
*ppos
= &iocb
->ki_pos
;
744 size_t len
= iov
->iov_len
;
745 struct inode
*inode
= filp
->f_dentry
->d_inode
;
746 struct ceph_inode_info
*ci
= ceph_inode(inode
);
747 void *base
= iov
->iov_base
;
750 int checkeof
= 0, read
= 0;
752 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
753 inode
, ceph_vinop(inode
), pos
, (unsigned)len
, inode
);
755 __ceph_do_pending_vmtruncate(inode
);
756 ret
= ceph_get_caps(ci
, CEPH_CAP_FILE_RD
, CEPH_CAP_FILE_CACHE
,
760 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
761 inode
, ceph_vinop(inode
), pos
, (unsigned)len
,
762 ceph_cap_string(got
));
764 if ((got
& CEPH_CAP_FILE_CACHE
) == 0 ||
765 (iocb
->ki_filp
->f_flags
& O_DIRECT
) ||
766 (inode
->i_sb
->s_flags
& MS_SYNCHRONOUS
))
767 /* hmm, this isn't really async... */
768 ret
= ceph_sync_read(filp
, base
, len
, ppos
, &checkeof
);
770 ret
= generic_file_aio_read(iocb
, iov
, nr_segs
, pos
);
773 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
774 inode
, ceph_vinop(inode
), ceph_cap_string(got
), (int)ret
);
775 ceph_put_cap_refs(ci
, got
);
777 if (checkeof
&& ret
>= 0) {
778 int statret
= ceph_do_getattr(inode
, CEPH_STAT_CAP_SIZE
);
780 /* hit EOF or hole? */
781 if (statret
== 0 && *ppos
< inode
->i_size
) {
782 dout("aio_read sync_read hit hole, reading more\n");
797 * Take cap references to avoid releasing caps to MDS mid-write.
799 * If we are synchronous, and write with an old snap context, the OSD
800 * may return EOLDSNAPC. In that case, retry the write.. _after_
801 * dropping our cap refs and allowing the pending snap to logically
802 * complete _before_ this write occurs.
804 * If we are near ENOSPC, write synchronously.
806 static ssize_t
ceph_aio_write(struct kiocb
*iocb
, const struct iovec
*iov
,
807 unsigned long nr_segs
, loff_t pos
)
809 struct file
*file
= iocb
->ki_filp
;
810 struct inode
*inode
= file
->f_dentry
->d_inode
;
811 struct ceph_inode_info
*ci
= ceph_inode(inode
);
812 struct ceph_osd_client
*osdc
= &ceph_sb_to_client(inode
->i_sb
)->osdc
;
813 loff_t endoff
= pos
+ iov
->iov_len
;
817 if (ceph_snap(inode
) != CEPH_NOSNAP
)
821 if (ceph_osdmap_flag(osdc
->osdmap
, CEPH_OSDMAP_FULL
))
823 __ceph_do_pending_vmtruncate(inode
);
824 dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n",
825 inode
, ceph_vinop(inode
), pos
, (unsigned)iov
->iov_len
,
827 ret
= ceph_get_caps(ci
, CEPH_CAP_FILE_WR
, CEPH_CAP_FILE_BUFFER
,
832 dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n",
833 inode
, ceph_vinop(inode
), pos
, (unsigned)iov
->iov_len
,
834 ceph_cap_string(got
));
836 if ((got
& CEPH_CAP_FILE_BUFFER
) == 0 ||
837 (iocb
->ki_filp
->f_flags
& O_DIRECT
) ||
838 (inode
->i_sb
->s_flags
& MS_SYNCHRONOUS
)) {
839 ret
= ceph_sync_write(file
, iov
->iov_base
, iov
->iov_len
,
842 ret
= generic_file_aio_write(iocb
, iov
, nr_segs
, pos
);
844 if ((ret
>= 0 || ret
== -EIOCBQUEUED
) &&
845 ((file
->f_flags
& O_SYNC
) || IS_SYNC(file
->f_mapping
->host
)
846 || ceph_osdmap_flag(osdc
->osdmap
, CEPH_OSDMAP_NEARFULL
))) {
847 err
= vfs_fsync_range(file
, pos
, pos
+ ret
- 1, 1);
853 spin_lock(&inode
->i_lock
);
854 __ceph_mark_dirty_caps(ci
, CEPH_CAP_FILE_WR
);
855 spin_unlock(&inode
->i_lock
);
859 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
860 inode
, ceph_vinop(inode
), pos
, (unsigned)iov
->iov_len
,
861 ceph_cap_string(got
));
862 ceph_put_cap_refs(ci
, got
);
864 if (ret
== -EOLDSNAPC
) {
865 dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n",
866 inode
, ceph_vinop(inode
), pos
, (unsigned)iov
->iov_len
);
874 * llseek. be sure to verify file size on SEEK_END.
876 static loff_t
ceph_llseek(struct file
*file
, loff_t offset
, int origin
)
878 struct inode
*inode
= file
->f_mapping
->host
;
881 mutex_lock(&inode
->i_mutex
);
882 __ceph_do_pending_vmtruncate(inode
);
885 ret
= ceph_do_getattr(inode
, CEPH_STAT_CAP_SIZE
);
890 offset
+= inode
->i_size
;
894 * Here we special-case the lseek(fd, 0, SEEK_CUR)
895 * position-querying operation. Avoid rewriting the "same"
896 * f_pos value back to the file because a concurrent read(),
897 * write() or lseek() might have altered it
900 offset
= file
->f_pos
;
903 offset
+= file
->f_pos
;
907 if (offset
< 0 || offset
> inode
->i_sb
->s_maxbytes
) {
912 /* Special lock needed here? */
913 if (offset
!= file
->f_pos
) {
914 file
->f_pos
= offset
;
919 mutex_unlock(&inode
->i_mutex
);
923 const struct file_operations ceph_file_fops
= {
925 .release
= ceph_release
,
926 .llseek
= ceph_llseek
,
927 .read
= do_sync_read
,
928 .write
= do_sync_write
,
929 .aio_read
= ceph_aio_read
,
930 .aio_write
= ceph_aio_write
,
933 .splice_read
= generic_file_splice_read
,
934 .splice_write
= generic_file_splice_write
,
935 .unlocked_ioctl
= ceph_ioctl
,
936 .compat_ioctl
= ceph_ioctl
,