1 #include "ceph_debug.h"
3 #include <linux/spinlock.h>
4 #include <linux/fs_struct.h>
5 #include <linux/namei.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
12 * Directory operations: readdir, lookup, create, link, unlink,
17 * Ceph MDS operations are specified in terms of a base ino and
18 * relative path. Thus, the client can specify an operation on a
19 * specific inode (e.g., a getattr due to fstat(2)), or as a path
20 * relative to, say, the root directory.
22 * Normally, we limit ourselves to strict inode ops (no path component)
23 * or dentry operations (a single path component relative to an ino). The
24 * exception to this is open_root_dentry(), which will open the mount
28 const struct inode_operations ceph_dir_iops
;
29 const struct file_operations ceph_dir_fops
;
30 struct dentry_operations ceph_dentry_ops
;
33 * Initialize ceph dentry state.
35 int ceph_init_dentry(struct dentry
*dentry
)
37 struct ceph_dentry_info
*di
;
42 if (ceph_snap(dentry
->d_parent
->d_inode
) == CEPH_NOSNAP
)
43 dentry
->d_op
= &ceph_dentry_ops
;
44 else if (ceph_snap(dentry
->d_parent
->d_inode
) == CEPH_SNAPDIR
)
45 dentry
->d_op
= &ceph_snapdir_dentry_ops
;
47 dentry
->d_op
= &ceph_snap_dentry_ops
;
49 di
= kmem_cache_alloc(ceph_dentry_cachep
, GFP_NOFS
);
51 return -ENOMEM
; /* oh well */
53 spin_lock(&dentry
->d_lock
);
54 if (dentry
->d_fsdata
) /* lost a race */
57 di
->lease_session
= NULL
;
58 dentry
->d_fsdata
= di
;
59 dentry
->d_time
= jiffies
;
60 ceph_dentry_lru_add(dentry
);
62 spin_unlock(&dentry
->d_lock
);
69 * for readdir, we encode the directory frag and offset within that
72 static unsigned fpos_frag(loff_t p
)
76 static unsigned fpos_off(loff_t p
)
78 return p
& 0xffffffff;
82 * When possible, we try to satisfy a readdir by peeking at the
83 * dcache. We make this work by carefully ordering dentries on
84 * d_u.d_child when we initially get results back from the MDS, and
85 * falling back to a "normal" sync readdir if any dentries in the dir
88 * I_COMPLETE tells indicates we have all dentries in the dir. It is
89 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
90 * the MDS if/when the directory is modified).
92 static int __dcache_readdir(struct file
*filp
,
93 void *dirent
, filldir_t filldir
)
95 struct inode
*inode
= filp
->f_dentry
->d_inode
;
96 struct ceph_file_info
*fi
= filp
->private_data
;
97 struct dentry
*parent
= filp
->f_dentry
;
98 struct inode
*dir
= parent
->d_inode
;
100 struct dentry
*dentry
, *last
;
101 struct ceph_dentry_info
*di
;
104 /* claim ref on last dentry we returned */
108 dout("__dcache_readdir %p at %llu (last %p)\n", dir
, filp
->f_pos
,
111 spin_lock(&dcache_lock
);
113 /* start at beginning? */
114 if (filp
->f_pos
== 2 || (last
&&
115 filp
->f_pos
< ceph_dentry(last
)->offset
)) {
116 if (list_empty(&parent
->d_subdirs
))
118 p
= parent
->d_subdirs
.prev
;
119 dout(" initial p %p/%p\n", p
->prev
, p
->next
);
121 p
= last
->d_u
.d_child
.prev
;
125 dentry
= list_entry(p
, struct dentry
, d_u
.d_child
);
126 di
= ceph_dentry(dentry
);
128 dout(" p %p/%p d_subdirs %p/%p\n", p
->prev
, p
->next
,
129 parent
->d_subdirs
.prev
, parent
->d_subdirs
.next
);
130 if (p
== &parent
->d_subdirs
) {
134 if (!d_unhashed(dentry
) && dentry
->d_inode
&&
135 ceph_snap(dentry
->d_inode
) != CEPH_SNAPDIR
&&
136 ceph_ino(dentry
->d_inode
) != CEPH_INO_CEPH
&&
137 filp
->f_pos
<= di
->offset
)
139 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry
,
140 dentry
->d_name
.len
, dentry
->d_name
.name
, di
->offset
,
141 filp
->f_pos
, d_unhashed(dentry
) ? " unhashed" : "",
142 !dentry
->d_inode
? " null" : "");
144 dentry
= list_entry(p
, struct dentry
, d_u
.d_child
);
145 di
= ceph_dentry(dentry
);
148 atomic_inc(&dentry
->d_count
);
149 spin_unlock(&dcache_lock
);
150 spin_unlock(&inode
->i_lock
);
152 dout(" %llu (%llu) dentry %p %.*s %p\n", di
->offset
, filp
->f_pos
,
153 dentry
, dentry
->d_name
.len
, dentry
->d_name
.name
, dentry
->d_inode
);
154 filp
->f_pos
= di
->offset
;
155 err
= filldir(dirent
, dentry
->d_name
.name
,
156 dentry
->d_name
.len
, di
->offset
,
157 dentry
->d_inode
->i_ino
,
158 dentry
->d_inode
->i_mode
>> 12);
162 /* remember our position */
164 fi
->next_offset
= di
->offset
;
171 spin_lock(&inode
->i_lock
);
172 spin_lock(&dcache_lock
);
182 /* make sure a dentry wasn't dropped while we didn't have dcache_lock */
183 if ((ceph_inode(dir
)->i_ceph_flags
& CEPH_I_COMPLETE
))
185 dout(" lost I_COMPLETE on %p; falling back to mds\n", dir
);
189 spin_unlock(&dcache_lock
);
192 spin_unlock(&inode
->i_lock
);
194 spin_lock(&inode
->i_lock
);
201 * make note of the last dentry we read, so we can
202 * continue at the same lexicographical point,
203 * regardless of what dir changes take place on the
206 static int note_last_dentry(struct ceph_file_info
*fi
, const char *name
,
209 kfree(fi
->last_name
);
210 fi
->last_name
= kmalloc(len
+1, GFP_NOFS
);
213 memcpy(fi
->last_name
, name
, len
);
214 fi
->last_name
[len
] = 0;
215 dout("note_last_dentry '%s'\n", fi
->last_name
);
219 static int ceph_readdir(struct file
*filp
, void *dirent
, filldir_t filldir
)
221 struct ceph_file_info
*fi
= filp
->private_data
;
222 struct inode
*inode
= filp
->f_dentry
->d_inode
;
223 struct ceph_inode_info
*ci
= ceph_inode(inode
);
224 struct ceph_client
*client
= ceph_inode_to_client(inode
);
225 struct ceph_mds_client
*mdsc
= &client
->mdsc
;
226 unsigned frag
= fpos_frag(filp
->f_pos
);
227 int off
= fpos_off(filp
->f_pos
);
230 struct ceph_mds_reply_info_parsed
*rinfo
;
231 const int max_entries
= client
->mount_args
->max_readdir
;
233 dout("readdir %p filp %p frag %u off %u\n", inode
, filp
, frag
, off
);
237 /* always start with . and .. */
238 if (filp
->f_pos
== 0) {
239 /* note dir version at start of readdir so we can tell
240 * if any dentries get dropped */
241 fi
->dir_release_count
= ci
->i_release_count
;
243 dout("readdir off 0 -> '.'\n");
244 if (filldir(dirent
, ".", 1, ceph_make_fpos(0, 0),
245 inode
->i_ino
, inode
->i_mode
>> 12) < 0)
250 if (filp
->f_pos
== 1) {
251 dout("readdir off 1 -> '..'\n");
252 if (filldir(dirent
, "..", 2, ceph_make_fpos(0, 1),
253 filp
->f_dentry
->d_parent
->d_inode
->i_ino
,
254 inode
->i_mode
>> 12) < 0)
260 /* can we use the dcache? */
261 spin_lock(&inode
->i_lock
);
262 if ((filp
->f_pos
== 2 || fi
->dentry
) &&
263 !ceph_test_opt(client
, NOASYNCREADDIR
) &&
264 (ci
->i_ceph_flags
& CEPH_I_COMPLETE
) &&
265 __ceph_caps_issued_mask(ci
, CEPH_CAP_FILE_SHARED
, 1)) {
266 err
= __dcache_readdir(filp
, dirent
, filldir
);
267 if (err
!= -EAGAIN
) {
268 spin_unlock(&inode
->i_lock
);
272 spin_unlock(&inode
->i_lock
);
274 err
= note_last_dentry(fi
, fi
->dentry
->d_name
.name
,
275 fi
->dentry
->d_name
.len
);
282 /* proceed with a normal readdir */
285 /* do we have the correct frag content buffered? */
286 if (fi
->frag
!= frag
|| fi
->last_readdir
== NULL
) {
287 struct ceph_mds_request
*req
;
288 int op
= ceph_snap(inode
) == CEPH_SNAPDIR
?
289 CEPH_MDS_OP_LSSNAP
: CEPH_MDS_OP_READDIR
;
291 /* discard old result, if any */
292 if (fi
->last_readdir
) {
293 ceph_mdsc_put_request(fi
->last_readdir
);
294 fi
->last_readdir
= NULL
;
297 /* requery frag tree, as the frag topology may have changed */
298 frag
= ceph_choose_frag(ceph_inode(inode
), frag
, NULL
, NULL
);
300 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
301 ceph_vinop(inode
), frag
, fi
->last_name
);
302 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
305 req
->r_inode
= igrab(inode
);
306 req
->r_dentry
= dget(filp
->f_dentry
);
307 /* hints to request -> mds selection code */
308 req
->r_direct_mode
= USE_AUTH_MDS
;
309 req
->r_direct_hash
= ceph_frag_value(frag
);
310 req
->r_direct_is_hash
= true;
311 req
->r_path2
= kstrdup(fi
->last_name
, GFP_NOFS
);
312 req
->r_readdir_offset
= fi
->next_offset
;
313 req
->r_args
.readdir
.frag
= cpu_to_le32(frag
);
314 req
->r_args
.readdir
.max_entries
= cpu_to_le32(max_entries
);
315 req
->r_num_caps
= max_entries
+ 1;
316 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
318 ceph_mdsc_put_request(req
);
321 dout("readdir got and parsed readdir result=%d"
322 " on frag %x, end=%d, complete=%d\n", err
, frag
,
323 (int)req
->r_reply_info
.dir_end
,
324 (int)req
->r_reply_info
.dir_complete
);
326 if (!req
->r_did_prepopulate
) {
327 dout("readdir !did_prepopulate");
328 fi
->dir_release_count
--; /* preclude I_COMPLETE */
331 /* note next offset and last dentry name */
332 fi
->offset
= fi
->next_offset
;
333 fi
->last_readdir
= req
;
335 if (req
->r_reply_info
.dir_end
) {
336 kfree(fi
->last_name
);
337 fi
->last_name
= NULL
;
340 rinfo
= &req
->r_reply_info
;
341 err
= note_last_dentry(fi
,
342 rinfo
->dir_dname
[rinfo
->dir_nr
-1],
343 rinfo
->dir_dname_len
[rinfo
->dir_nr
-1]);
346 fi
->next_offset
+= rinfo
->dir_nr
;
350 rinfo
= &fi
->last_readdir
->r_reply_info
;
351 dout("readdir frag %x num %d off %d chunkoff %d\n", frag
,
352 rinfo
->dir_nr
, off
, fi
->offset
);
353 while (off
- fi
->offset
>= 0 && off
- fi
->offset
< rinfo
->dir_nr
) {
354 u64 pos
= ceph_make_fpos(frag
, off
);
355 struct ceph_mds_reply_inode
*in
=
356 rinfo
->dir_in
[off
- fi
->offset
].in
;
357 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
358 off
, off
- fi
->offset
, rinfo
->dir_nr
, pos
,
359 rinfo
->dir_dname_len
[off
- fi
->offset
],
360 rinfo
->dir_dname
[off
- fi
->offset
], in
);
362 ftype
= le32_to_cpu(in
->mode
) >> 12;
364 rinfo
->dir_dname
[off
- fi
->offset
],
365 rinfo
->dir_dname_len
[off
- fi
->offset
],
367 le64_to_cpu(in
->ino
),
369 dout("filldir stopping us...\n");
373 filp
->f_pos
= pos
+ 1;
377 ceph_mdsc_put_request(fi
->last_readdir
);
378 fi
->last_readdir
= NULL
;
383 if (!ceph_frag_is_rightmost(frag
)) {
384 frag
= ceph_frag_next(frag
);
386 filp
->f_pos
= ceph_make_fpos(frag
, off
);
387 dout("readdir next frag is %x\n", frag
);
393 * if dir_release_count still matches the dir, no dentries
394 * were released during the whole readdir, and we should have
395 * the complete dir contents in our cache.
397 spin_lock(&inode
->i_lock
);
398 if (ci
->i_release_count
== fi
->dir_release_count
) {
399 dout(" marking %p complete\n", inode
);
400 ci
->i_ceph_flags
|= CEPH_I_COMPLETE
;
401 ci
->i_max_offset
= filp
->f_pos
;
403 spin_unlock(&inode
->i_lock
);
405 dout("readdir %p filp %p done.\n", inode
, filp
);
409 static void reset_readdir(struct ceph_file_info
*fi
)
411 if (fi
->last_readdir
) {
412 ceph_mdsc_put_request(fi
->last_readdir
);
413 fi
->last_readdir
= NULL
;
415 kfree(fi
->last_name
);
416 fi
->next_offset
= 2; /* compensate for . and .. */
424 static loff_t
ceph_dir_llseek(struct file
*file
, loff_t offset
, int origin
)
426 struct ceph_file_info
*fi
= file
->private_data
;
427 struct inode
*inode
= file
->f_mapping
->host
;
428 loff_t old_offset
= offset
;
431 mutex_lock(&inode
->i_mutex
);
434 offset
+= inode
->i_size
+ 2; /* FIXME */
437 offset
+= file
->f_pos
;
440 if (offset
>= 0 && offset
<= inode
->i_sb
->s_maxbytes
) {
441 if (offset
!= file
->f_pos
) {
442 file
->f_pos
= offset
;
449 * discard buffered readdir content on seekdir(0), or
450 * seek to new frag, or seek prior to current chunk.
453 fpos_frag(offset
) != fpos_frag(old_offset
) ||
454 fpos_off(offset
) < fi
->offset
) {
455 dout("dir_llseek dropping %p content\n", file
);
459 /* bump dir_release_count if we did a forward seek */
460 if (offset
> old_offset
)
461 fi
->dir_release_count
--;
463 mutex_unlock(&inode
->i_mutex
);
468 * Process result of a lookup/open request.
470 * Mainly, make sure we return the final req->r_dentry (if it already
471 * existed) in place of the original VFS-provided dentry when they
474 * Gracefully handle the case where the MDS replies with -ENOENT and
475 * no trace (which it may do, at its discretion, e.g., if it doesn't
476 * care to issue a lease on the negative dentry).
478 struct dentry
*ceph_finish_lookup(struct ceph_mds_request
*req
,
479 struct dentry
*dentry
, int err
)
481 struct ceph_client
*client
= ceph_client(dentry
->d_sb
);
482 struct inode
*parent
= dentry
->d_parent
->d_inode
;
485 if (err
== -ENOENT
&&
486 ceph_vino(parent
).ino
!= CEPH_INO_ROOT
&& /* no .snap in root dir */
487 strcmp(dentry
->d_name
.name
,
488 client
->mount_args
->snapdir_name
) == 0) {
489 struct inode
*inode
= ceph_get_snapdir(parent
);
490 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
491 dentry
, dentry
->d_name
.len
, dentry
->d_name
.name
, inode
);
492 BUG_ON(!d_unhashed(dentry
));
493 d_add(dentry
, inode
);
497 if (err
== -ENOENT
) {
500 if (!req
->r_reply_info
.head
->is_dentry
) {
501 dout("ENOENT and no trace, dentry %p inode %p\n",
502 dentry
, dentry
->d_inode
);
503 if (dentry
->d_inode
) {
512 dentry
= ERR_PTR(err
);
513 else if (dentry
!= req
->r_dentry
)
514 dentry
= dget(req
->r_dentry
); /* we got spliced */
520 static int is_root_ceph_dentry(struct inode
*inode
, struct dentry
*dentry
)
522 return ceph_ino(inode
) == CEPH_INO_ROOT
&&
523 strncmp(dentry
->d_name
.name
, ".ceph", 5) == 0;
527 * Look up a single dir entry. If there is a lookup intent, inform
528 * the MDS so that it gets our 'caps wanted' value in a single op.
530 static struct dentry
*ceph_lookup(struct inode
*dir
, struct dentry
*dentry
,
531 struct nameidata
*nd
)
533 struct ceph_client
*client
= ceph_sb_to_client(dir
->i_sb
);
534 struct ceph_mds_client
*mdsc
= &client
->mdsc
;
535 struct ceph_mds_request
*req
;
539 dout("lookup %p dentry %p '%.*s'\n",
540 dir
, dentry
, dentry
->d_name
.len
, dentry
->d_name
.name
);
542 if (dentry
->d_name
.len
> NAME_MAX
)
543 return ERR_PTR(-ENAMETOOLONG
);
545 err
= ceph_init_dentry(dentry
);
549 /* open (but not create!) intent? */
551 (nd
->flags
& LOOKUP_OPEN
) &&
552 (nd
->flags
& LOOKUP_CONTINUE
) == 0 && /* only open last component */
553 !(nd
->intent
.open
.flags
& O_CREAT
)) {
554 int mode
= nd
->intent
.open
.create_mode
& ~current
->fs
->umask
;
555 return ceph_lookup_open(dir
, dentry
, nd
, mode
, 1);
558 /* can we conclude ENOENT locally? */
559 if (dentry
->d_inode
== NULL
) {
560 struct ceph_inode_info
*ci
= ceph_inode(dir
);
561 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
563 spin_lock(&dir
->i_lock
);
564 dout(" dir %p flags are %d\n", dir
, ci
->i_ceph_flags
);
565 if (strncmp(dentry
->d_name
.name
,
566 client
->mount_args
->snapdir_name
,
567 dentry
->d_name
.len
) &&
568 !is_root_ceph_dentry(dir
, dentry
) &&
569 (ci
->i_ceph_flags
& CEPH_I_COMPLETE
) &&
570 (__ceph_caps_issued_mask(ci
, CEPH_CAP_FILE_SHARED
, 1))) {
571 di
->offset
= ci
->i_max_offset
++;
572 spin_unlock(&dir
->i_lock
);
573 dout(" dir %p complete, -ENOENT\n", dir
);
575 di
->lease_shared_gen
= ci
->i_shared_gen
;
578 spin_unlock(&dir
->i_lock
);
581 op
= ceph_snap(dir
) == CEPH_SNAPDIR
?
582 CEPH_MDS_OP_LOOKUPSNAP
: CEPH_MDS_OP_LOOKUP
;
583 req
= ceph_mdsc_create_request(mdsc
, op
, USE_ANY_MDS
);
585 return ERR_PTR(PTR_ERR(req
));
586 req
->r_dentry
= dget(dentry
);
588 /* we only need inode linkage */
589 req
->r_args
.getattr
.mask
= cpu_to_le32(CEPH_STAT_CAP_INODE
);
590 req
->r_locked_dir
= dir
;
591 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
592 dentry
= ceph_finish_lookup(req
, dentry
, err
);
593 ceph_mdsc_put_request(req
); /* will dput(dentry) */
594 dout("lookup result=%p\n", dentry
);
599 * If we do a create but get no trace back from the MDS, follow up with
600 * a lookup (the VFS expects us to link up the provided dentry).
602 int ceph_handle_notrace_create(struct inode
*dir
, struct dentry
*dentry
)
604 struct dentry
*result
= ceph_lookup(dir
, dentry
, NULL
);
606 if (result
&& !IS_ERR(result
)) {
608 * We created the item, then did a lookup, and found
609 * it was already linked to another inode we already
610 * had in our cache (and thus got spliced). Link our
611 * dentry to that inode, but don't hash it, just in
612 * case the VFS wants to dereference it.
614 BUG_ON(!result
->d_inode
);
615 d_instantiate(dentry
, result
->d_inode
);
618 return PTR_ERR(result
);
621 static int ceph_mknod(struct inode
*dir
, struct dentry
*dentry
,
622 int mode
, dev_t rdev
)
624 struct ceph_client
*client
= ceph_sb_to_client(dir
->i_sb
);
625 struct ceph_mds_client
*mdsc
= &client
->mdsc
;
626 struct ceph_mds_request
*req
;
629 if (ceph_snap(dir
) != CEPH_NOSNAP
)
632 dout("mknod in dir %p dentry %p mode 0%o rdev %d\n",
633 dir
, dentry
, mode
, rdev
);
634 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_MKNOD
, USE_AUTH_MDS
);
639 req
->r_dentry
= dget(dentry
);
641 req
->r_locked_dir
= dir
;
642 req
->r_args
.mknod
.mode
= cpu_to_le32(mode
);
643 req
->r_args
.mknod
.rdev
= cpu_to_le32(rdev
);
644 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
645 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
646 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
647 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
648 err
= ceph_handle_notrace_create(dir
, dentry
);
649 ceph_mdsc_put_request(req
);
655 static int ceph_create(struct inode
*dir
, struct dentry
*dentry
, int mode
,
656 struct nameidata
*nd
)
658 dout("create in dir %p dentry %p name '%.*s'\n",
659 dir
, dentry
, dentry
->d_name
.len
, dentry
->d_name
.name
);
661 if (ceph_snap(dir
) != CEPH_NOSNAP
)
665 BUG_ON((nd
->flags
& LOOKUP_OPEN
) == 0);
666 dentry
= ceph_lookup_open(dir
, dentry
, nd
, mode
, 0);
667 /* hrm, what should i do here if we get aliased? */
669 return PTR_ERR(dentry
);
673 /* fall back to mknod */
674 return ceph_mknod(dir
, dentry
, (mode
& ~S_IFMT
) | S_IFREG
, 0);
677 static int ceph_symlink(struct inode
*dir
, struct dentry
*dentry
,
680 struct ceph_client
*client
= ceph_sb_to_client(dir
->i_sb
);
681 struct ceph_mds_client
*mdsc
= &client
->mdsc
;
682 struct ceph_mds_request
*req
;
685 if (ceph_snap(dir
) != CEPH_NOSNAP
)
688 dout("symlink in dir %p dentry %p to '%s'\n", dir
, dentry
, dest
);
689 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_SYMLINK
, USE_AUTH_MDS
);
694 req
->r_dentry
= dget(dentry
);
696 req
->r_path2
= kstrdup(dest
, GFP_NOFS
);
697 req
->r_locked_dir
= dir
;
698 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
699 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
700 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
701 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
702 err
= ceph_handle_notrace_create(dir
, dentry
);
703 ceph_mdsc_put_request(req
);
709 static int ceph_mkdir(struct inode
*dir
, struct dentry
*dentry
, int mode
)
711 struct ceph_client
*client
= ceph_sb_to_client(dir
->i_sb
);
712 struct ceph_mds_client
*mdsc
= &client
->mdsc
;
713 struct ceph_mds_request
*req
;
717 if (ceph_snap(dir
) == CEPH_SNAPDIR
) {
718 /* mkdir .snap/foo is a MKSNAP */
719 op
= CEPH_MDS_OP_MKSNAP
;
720 dout("mksnap dir %p snap '%.*s' dn %p\n", dir
,
721 dentry
->d_name
.len
, dentry
->d_name
.name
, dentry
);
722 } else if (ceph_snap(dir
) == CEPH_NOSNAP
) {
723 dout("mkdir dir %p dn %p mode 0%o\n", dir
, dentry
, mode
);
724 op
= CEPH_MDS_OP_MKDIR
;
728 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
734 req
->r_dentry
= dget(dentry
);
736 req
->r_locked_dir
= dir
;
737 req
->r_args
.mkdir
.mode
= cpu_to_le32(mode
);
738 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
739 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
740 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
741 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
742 err
= ceph_handle_notrace_create(dir
, dentry
);
743 ceph_mdsc_put_request(req
);
750 static int ceph_link(struct dentry
*old_dentry
, struct inode
*dir
,
751 struct dentry
*dentry
)
753 struct ceph_client
*client
= ceph_sb_to_client(dir
->i_sb
);
754 struct ceph_mds_client
*mdsc
= &client
->mdsc
;
755 struct ceph_mds_request
*req
;
758 if (ceph_snap(dir
) != CEPH_NOSNAP
)
761 dout("link in dir %p old_dentry %p dentry %p\n", dir
,
763 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_LINK
, USE_AUTH_MDS
);
768 req
->r_dentry
= dget(dentry
);
770 req
->r_old_dentry
= dget(old_dentry
); /* or inode? hrm. */
771 req
->r_locked_dir
= dir
;
772 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
773 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
774 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
777 else if (!req
->r_reply_info
.head
->is_dentry
)
778 d_instantiate(dentry
, igrab(old_dentry
->d_inode
));
779 ceph_mdsc_put_request(req
);
784 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
785 * looks like the link count will hit 0, drop any other caps (other
786 * than PIN) we don't specifically want (due to the file still being
789 static int drop_caps_for_unlink(struct inode
*inode
)
791 struct ceph_inode_info
*ci
= ceph_inode(inode
);
792 int drop
= CEPH_CAP_LINK_SHARED
| CEPH_CAP_LINK_EXCL
;
794 spin_lock(&inode
->i_lock
);
795 if (inode
->i_nlink
== 1) {
796 drop
|= ~(__ceph_caps_wanted(ci
) | CEPH_CAP_PIN
);
797 ci
->i_ceph_flags
|= CEPH_I_NODELAY
;
799 spin_unlock(&inode
->i_lock
);
804 * rmdir and unlink are differ only by the metadata op code
806 static int ceph_unlink(struct inode
*dir
, struct dentry
*dentry
)
808 struct ceph_client
*client
= ceph_sb_to_client(dir
->i_sb
);
809 struct ceph_mds_client
*mdsc
= &client
->mdsc
;
810 struct inode
*inode
= dentry
->d_inode
;
811 struct ceph_mds_request
*req
;
815 if (ceph_snap(dir
) == CEPH_SNAPDIR
) {
816 /* rmdir .snap/foo is RMSNAP */
817 dout("rmsnap dir %p '%.*s' dn %p\n", dir
, dentry
->d_name
.len
,
818 dentry
->d_name
.name
, dentry
);
819 op
= CEPH_MDS_OP_RMSNAP
;
820 } else if (ceph_snap(dir
) == CEPH_NOSNAP
) {
821 dout("unlink/rmdir dir %p dn %p inode %p\n",
823 op
= ((dentry
->d_inode
->i_mode
& S_IFMT
) == S_IFDIR
) ?
824 CEPH_MDS_OP_RMDIR
: CEPH_MDS_OP_UNLINK
;
827 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
832 req
->r_dentry
= dget(dentry
);
834 req
->r_locked_dir
= dir
;
835 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
836 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
837 req
->r_inode_drop
= drop_caps_for_unlink(inode
);
838 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
839 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
841 ceph_mdsc_put_request(req
);
846 static int ceph_rename(struct inode
*old_dir
, struct dentry
*old_dentry
,
847 struct inode
*new_dir
, struct dentry
*new_dentry
)
849 struct ceph_client
*client
= ceph_sb_to_client(old_dir
->i_sb
);
850 struct ceph_mds_client
*mdsc
= &client
->mdsc
;
851 struct ceph_mds_request
*req
;
854 if (ceph_snap(old_dir
) != ceph_snap(new_dir
))
856 if (ceph_snap(old_dir
) != CEPH_NOSNAP
||
857 ceph_snap(new_dir
) != CEPH_NOSNAP
)
859 dout("rename dir %p dentry %p to dir %p dentry %p\n",
860 old_dir
, old_dentry
, new_dir
, new_dentry
);
861 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_RENAME
, USE_AUTH_MDS
);
864 req
->r_dentry
= dget(new_dentry
);
866 req
->r_old_dentry
= dget(old_dentry
);
867 req
->r_locked_dir
= new_dir
;
868 req
->r_old_dentry_drop
= CEPH_CAP_FILE_SHARED
;
869 req
->r_old_dentry_unless
= CEPH_CAP_FILE_EXCL
;
870 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
871 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
872 /* release LINK_RDCACHE on source inode (mds will lock it) */
873 req
->r_old_inode_drop
= CEPH_CAP_LINK_SHARED
;
874 if (new_dentry
->d_inode
)
875 req
->r_inode_drop
= drop_caps_for_unlink(new_dentry
->d_inode
);
876 err
= ceph_mdsc_do_request(mdsc
, old_dir
, req
);
877 if (!err
&& !req
->r_reply_info
.head
->is_dentry
) {
879 * Normally d_move() is done by fill_trace (called by
880 * do_request, above). If there is no trace, we need
883 d_move(old_dentry
, new_dentry
);
885 ceph_mdsc_put_request(req
);
891 * Check if dentry lease is valid. If not, delete the lease. Try to
892 * renew if the least is more than half up.
894 static int dentry_lease_is_valid(struct dentry
*dentry
)
896 struct ceph_dentry_info
*di
;
897 struct ceph_mds_session
*s
;
901 struct ceph_mds_session
*session
= NULL
;
902 struct inode
*dir
= NULL
;
905 spin_lock(&dentry
->d_lock
);
906 di
= ceph_dentry(dentry
);
907 if (di
&& di
->lease_session
) {
908 s
= di
->lease_session
;
909 spin_lock(&s
->s_cap_lock
);
912 spin_unlock(&s
->s_cap_lock
);
914 if (di
->lease_gen
== gen
&&
915 time_before(jiffies
, dentry
->d_time
) &&
916 time_before(jiffies
, ttl
)) {
918 if (di
->lease_renew_after
&&
919 time_after(jiffies
, di
->lease_renew_after
)) {
920 /* we should renew */
921 dir
= dentry
->d_parent
->d_inode
;
922 session
= ceph_get_mds_session(s
);
924 di
->lease_renew_after
= 0;
925 di
->lease_renew_from
= jiffies
;
929 spin_unlock(&dentry
->d_lock
);
932 ceph_mdsc_lease_send_msg(session
, dir
, dentry
,
933 CEPH_MDS_LEASE_RENEW
, seq
);
934 ceph_put_mds_session(session
);
936 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry
, valid
);
941 * Check if directory-wide content lease/cap is valid.
943 static int dir_lease_is_valid(struct inode
*dir
, struct dentry
*dentry
)
945 struct ceph_inode_info
*ci
= ceph_inode(dir
);
946 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
949 spin_lock(&dir
->i_lock
);
950 if (ci
->i_shared_gen
== di
->lease_shared_gen
)
951 valid
= __ceph_caps_issued_mask(ci
, CEPH_CAP_FILE_SHARED
, 1);
952 spin_unlock(&dir
->i_lock
);
953 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
954 dir
, (unsigned)ci
->i_shared_gen
, dentry
,
955 (unsigned)di
->lease_shared_gen
, valid
);
960 * Check if cached dentry can be trusted.
962 static int ceph_d_revalidate(struct dentry
*dentry
, struct nameidata
*nd
)
964 struct inode
*dir
= dentry
->d_parent
->d_inode
;
966 dout("d_revalidate %p '%.*s' inode %p\n", dentry
,
967 dentry
->d_name
.len
, dentry
->d_name
.name
, dentry
->d_inode
);
969 /* always trust cached snapped dentries, snapdir dentry */
970 if (ceph_snap(dir
) != CEPH_NOSNAP
) {
971 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry
,
972 dentry
->d_name
.len
, dentry
->d_name
.name
, dentry
->d_inode
);
975 if (dentry
->d_inode
&& ceph_snap(dentry
->d_inode
) == CEPH_SNAPDIR
)
978 if (dentry_lease_is_valid(dentry
) ||
979 dir_lease_is_valid(dir
, dentry
))
982 dout("d_revalidate %p invalid\n", dentry
);
986 ceph_dentry_lru_touch(dentry
);
991 * When a dentry is released, clear the dir I_COMPLETE if it was part
992 * of the current dir gen.
994 static void ceph_dentry_release(struct dentry
*dentry
)
996 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
997 struct inode
*parent_inode
= dentry
->d_parent
->d_inode
;
1000 struct ceph_inode_info
*ci
= ceph_inode(parent_inode
);
1002 spin_lock(&parent_inode
->i_lock
);
1003 if (ci
->i_shared_gen
== di
->lease_shared_gen
) {
1004 dout(" clearing %p complete (d_release)\n",
1006 ci
->i_ceph_flags
&= ~CEPH_I_COMPLETE
;
1007 ci
->i_release_count
++;
1009 spin_unlock(&parent_inode
->i_lock
);
1012 ceph_dentry_lru_del(dentry
);
1013 if (di
->lease_session
)
1014 ceph_put_mds_session(di
->lease_session
);
1015 kmem_cache_free(ceph_dentry_cachep
, di
);
1016 dentry
->d_fsdata
= NULL
;
1020 static int ceph_snapdir_d_revalidate(struct dentry
*dentry
,
1021 struct nameidata
*nd
)
1024 * Eventually, we'll want to revalidate snapped metadata
1025 * too... probably...
1033 * read() on a dir. This weird interface hack only works if mounted
1034 * with '-o dirstat'.
1036 static ssize_t
ceph_read_dir(struct file
*file
, char __user
*buf
, size_t size
,
1039 struct ceph_file_info
*cf
= file
->private_data
;
1040 struct inode
*inode
= file
->f_dentry
->d_inode
;
1041 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1044 if (!ceph_test_opt(ceph_client(inode
->i_sb
), DIRSTAT
))
1047 if (!cf
->dir_info
) {
1048 cf
->dir_info
= kmalloc(1024, GFP_NOFS
);
1052 sprintf(cf
->dir_info
,
1055 " subdirs: %20lld\n"
1056 "rentries: %20lld\n"
1058 " rsubdirs: %20lld\n"
1060 "rctime: %10ld.%09ld\n",
1061 ci
->i_files
+ ci
->i_subdirs
,
1064 ci
->i_rfiles
+ ci
->i_rsubdirs
,
1068 (long)ci
->i_rctime
.tv_sec
,
1069 (long)ci
->i_rctime
.tv_nsec
);
1072 if (*ppos
>= cf
->dir_info_len
)
1074 size
= min_t(unsigned, size
, cf
->dir_info_len
-*ppos
);
1075 left
= copy_to_user(buf
, cf
->dir_info
+ *ppos
, size
);
1078 *ppos
+= (size
- left
);
1083 * an fsync() on a dir will wait for any uncommitted directory
1084 * operations to commit.
1086 static int ceph_dir_fsync(struct file
*file
, struct dentry
*dentry
,
1089 struct inode
*inode
= dentry
->d_inode
;
1090 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1091 struct list_head
*head
= &ci
->i_unsafe_dirops
;
1092 struct ceph_mds_request
*req
;
1096 dout("dir_fsync %p\n", inode
);
1097 spin_lock(&ci
->i_unsafe_lock
);
1098 if (list_empty(head
))
1101 req
= list_entry(head
->prev
,
1102 struct ceph_mds_request
, r_unsafe_dir_item
);
1103 last_tid
= req
->r_tid
;
1106 ceph_mdsc_get_request(req
);
1107 spin_unlock(&ci
->i_unsafe_lock
);
1108 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1109 inode
, req
->r_tid
, last_tid
);
1110 if (req
->r_timeout
) {
1111 ret
= wait_for_completion_timeout(
1112 &req
->r_safe_completion
, req
->r_timeout
);
1116 ret
= -EIO
; /* timed out */
1118 wait_for_completion(&req
->r_safe_completion
);
1120 spin_lock(&ci
->i_unsafe_lock
);
1121 ceph_mdsc_put_request(req
);
1123 if (ret
|| list_empty(head
))
1125 req
= list_entry(head
->next
,
1126 struct ceph_mds_request
, r_unsafe_dir_item
);
1127 } while (req
->r_tid
< last_tid
);
1129 spin_unlock(&ci
->i_unsafe_lock
);
1134 * We maintain a private dentry LRU.
1136 * FIXME: this needs to be changed to a per-mds lru to be useful.
1138 void ceph_dentry_lru_add(struct dentry
*dn
)
1140 struct ceph_dentry_info
*di
= ceph_dentry(dn
);
1141 struct ceph_mds_client
*mdsc
;
1143 dout("dentry_lru_add %p %p '%.*s'\n", di
, dn
,
1144 dn
->d_name
.len
, dn
->d_name
.name
);
1146 mdsc
= &ceph_client(dn
->d_sb
)->mdsc
;
1147 spin_lock(&mdsc
->dentry_lru_lock
);
1148 list_add_tail(&di
->lru
, &mdsc
->dentry_lru
);
1150 spin_unlock(&mdsc
->dentry_lru_lock
);
1154 void ceph_dentry_lru_touch(struct dentry
*dn
)
1156 struct ceph_dentry_info
*di
= ceph_dentry(dn
);
1157 struct ceph_mds_client
*mdsc
;
1159 dout("dentry_lru_touch %p %p '%.*s'\n", di
, dn
,
1160 dn
->d_name
.len
, dn
->d_name
.name
);
1162 mdsc
= &ceph_client(dn
->d_sb
)->mdsc
;
1163 spin_lock(&mdsc
->dentry_lru_lock
);
1164 list_move_tail(&di
->lru
, &mdsc
->dentry_lru
);
1165 spin_unlock(&mdsc
->dentry_lru_lock
);
1169 void ceph_dentry_lru_del(struct dentry
*dn
)
1171 struct ceph_dentry_info
*di
= ceph_dentry(dn
);
1172 struct ceph_mds_client
*mdsc
;
1174 dout("dentry_lru_del %p %p '%.*s'\n", di
, dn
,
1175 dn
->d_name
.len
, dn
->d_name
.name
);
1177 mdsc
= &ceph_client(dn
->d_sb
)->mdsc
;
1178 spin_lock(&mdsc
->dentry_lru_lock
);
1179 list_del_init(&di
->lru
);
1181 spin_unlock(&mdsc
->dentry_lru_lock
);
1185 const struct file_operations ceph_dir_fops
= {
1186 .read
= ceph_read_dir
,
1187 .readdir
= ceph_readdir
,
1188 .llseek
= ceph_dir_llseek
,
1190 .release
= ceph_release
,
1191 .unlocked_ioctl
= ceph_ioctl
,
1192 .fsync
= ceph_dir_fsync
,
1195 const struct inode_operations ceph_dir_iops
= {
1196 .lookup
= ceph_lookup
,
1197 .permission
= ceph_permission
,
1198 .getattr
= ceph_getattr
,
1199 .setattr
= ceph_setattr
,
1200 .setxattr
= ceph_setxattr
,
1201 .getxattr
= ceph_getxattr
,
1202 .listxattr
= ceph_listxattr
,
1203 .removexattr
= ceph_removexattr
,
1204 .mknod
= ceph_mknod
,
1205 .symlink
= ceph_symlink
,
1206 .mkdir
= ceph_mkdir
,
1208 .unlink
= ceph_unlink
,
1209 .rmdir
= ceph_unlink
,
1210 .rename
= ceph_rename
,
1211 .create
= ceph_create
,
1214 struct dentry_operations ceph_dentry_ops
= {
1215 .d_revalidate
= ceph_d_revalidate
,
1216 .d_release
= ceph_dentry_release
,
1219 struct dentry_operations ceph_snapdir_dentry_ops
= {
1220 .d_revalidate
= ceph_snapdir_d_revalidate
,
1223 struct dentry_operations ceph_snap_dentry_ops
= {