lockd: push lock_flocks down
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / ceph / dir.c
blobe0a2dc6fcafcb62266909c5ec71329e58083d1d7
1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/spinlock.h>
4 #include <linux/fs_struct.h>
5 #include <linux/namei.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
9 #include "super.h"
10 #include "mds_client.h"
13 * Directory operations: readdir, lookup, create, link, unlink,
14 * rename, etc.
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
26 * point by name.
29 const struct inode_operations ceph_dir_iops;
30 const struct file_operations ceph_dir_fops;
31 const struct dentry_operations ceph_dentry_ops;
34 * Initialize ceph dentry state.
36 int ceph_init_dentry(struct dentry *dentry)
38 struct ceph_dentry_info *di;
40 if (dentry->d_fsdata)
41 return 0;
43 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
44 dentry->d_op = &ceph_dentry_ops;
45 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
46 dentry->d_op = &ceph_snapdir_dentry_ops;
47 else
48 dentry->d_op = &ceph_snap_dentry_ops;
50 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
51 if (!di)
52 return -ENOMEM; /* oh well */
54 spin_lock(&dentry->d_lock);
55 if (dentry->d_fsdata) {
56 /* lost a race */
57 kmem_cache_free(ceph_dentry_cachep, di);
58 goto out_unlock;
60 di->dentry = dentry;
61 di->lease_session = NULL;
62 dentry->d_fsdata = di;
63 dentry->d_time = jiffies;
64 ceph_dentry_lru_add(dentry);
65 out_unlock:
66 spin_unlock(&dentry->d_lock);
67 return 0;
73 * for readdir, we encode the directory frag and offset within that
74 * frag into f_pos.
76 static unsigned fpos_frag(loff_t p)
78 return p >> 32;
80 static unsigned fpos_off(loff_t p)
82 return p & 0xffffffff;
86 * When possible, we try to satisfy a readdir by peeking at the
87 * dcache. We make this work by carefully ordering dentries on
88 * d_u.d_child when we initially get results back from the MDS, and
89 * falling back to a "normal" sync readdir if any dentries in the dir
90 * are dropped.
92 * I_COMPLETE tells indicates we have all dentries in the dir. It is
93 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
94 * the MDS if/when the directory is modified).
96 static int __dcache_readdir(struct file *filp,
97 void *dirent, filldir_t filldir)
99 struct ceph_file_info *fi = filp->private_data;
100 struct dentry *parent = filp->f_dentry;
101 struct inode *dir = parent->d_inode;
102 struct list_head *p;
103 struct dentry *dentry, *last;
104 struct ceph_dentry_info *di;
105 int err = 0;
107 /* claim ref on last dentry we returned */
108 last = fi->dentry;
109 fi->dentry = NULL;
111 dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
112 last);
114 spin_lock(&dcache_lock);
116 /* start at beginning? */
117 if (filp->f_pos == 2 || (last &&
118 filp->f_pos < ceph_dentry(last)->offset)) {
119 if (list_empty(&parent->d_subdirs))
120 goto out_unlock;
121 p = parent->d_subdirs.prev;
122 dout(" initial p %p/%p\n", p->prev, p->next);
123 } else {
124 p = last->d_u.d_child.prev;
127 more:
128 dentry = list_entry(p, struct dentry, d_u.d_child);
129 di = ceph_dentry(dentry);
130 while (1) {
131 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
132 d_unhashed(dentry) ? "!hashed" : "hashed",
133 parent->d_subdirs.prev, parent->d_subdirs.next);
134 if (p == &parent->d_subdirs) {
135 fi->at_end = 1;
136 goto out_unlock;
138 if (!d_unhashed(dentry) && dentry->d_inode &&
139 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
140 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
141 filp->f_pos <= di->offset)
142 break;
143 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
144 dentry->d_name.len, dentry->d_name.name, di->offset,
145 filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
146 !dentry->d_inode ? " null" : "");
147 p = p->prev;
148 dentry = list_entry(p, struct dentry, d_u.d_child);
149 di = ceph_dentry(dentry);
152 atomic_inc(&dentry->d_count);
153 spin_unlock(&dcache_lock);
155 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
156 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
157 filp->f_pos = di->offset;
158 err = filldir(dirent, dentry->d_name.name,
159 dentry->d_name.len, di->offset,
160 dentry->d_inode->i_ino,
161 dentry->d_inode->i_mode >> 12);
163 if (last) {
164 if (err < 0) {
165 /* remember our position */
166 fi->dentry = last;
167 fi->next_offset = di->offset;
168 } else {
169 dput(last);
172 last = dentry;
174 if (err < 0)
175 goto out;
177 filp->f_pos++;
179 /* make sure a dentry wasn't dropped while we didn't have dcache_lock */
180 if (!ceph_i_test(dir, CEPH_I_COMPLETE)) {
181 dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
182 err = -EAGAIN;
183 goto out;
186 spin_lock(&dcache_lock);
187 p = p->prev; /* advance to next dentry */
188 goto more;
190 out_unlock:
191 spin_unlock(&dcache_lock);
192 out:
193 if (last)
194 dput(last);
195 return err;
199 * make note of the last dentry we read, so we can
200 * continue at the same lexicographical point,
201 * regardless of what dir changes take place on the
202 * server.
204 static int note_last_dentry(struct ceph_file_info *fi, const char *name,
205 int len)
207 kfree(fi->last_name);
208 fi->last_name = kmalloc(len+1, GFP_NOFS);
209 if (!fi->last_name)
210 return -ENOMEM;
211 memcpy(fi->last_name, name, len);
212 fi->last_name[len] = 0;
213 dout("note_last_dentry '%s'\n", fi->last_name);
214 return 0;
217 static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
219 struct ceph_file_info *fi = filp->private_data;
220 struct inode *inode = filp->f_dentry->d_inode;
221 struct ceph_inode_info *ci = ceph_inode(inode);
222 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
223 struct ceph_mds_client *mdsc = fsc->mdsc;
224 unsigned frag = fpos_frag(filp->f_pos);
225 int off = fpos_off(filp->f_pos);
226 int err;
227 u32 ftype;
228 struct ceph_mds_reply_info_parsed *rinfo;
229 const int max_entries = fsc->mount_options->max_readdir;
230 const int max_bytes = fsc->mount_options->max_readdir_bytes;
232 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
233 if (fi->at_end)
234 return 0;
236 /* always start with . and .. */
237 if (filp->f_pos == 0) {
238 /* note dir version at start of readdir so we can tell
239 * if any dentries get dropped */
240 fi->dir_release_count = ci->i_release_count;
242 dout("readdir off 0 -> '.'\n");
243 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
244 inode->i_ino, inode->i_mode >> 12) < 0)
245 return 0;
246 filp->f_pos = 1;
247 off = 1;
249 if (filp->f_pos == 1) {
250 dout("readdir off 1 -> '..'\n");
251 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
252 filp->f_dentry->d_parent->d_inode->i_ino,
253 inode->i_mode >> 12) < 0)
254 return 0;
255 filp->f_pos = 2;
256 off = 2;
259 /* can we use the dcache? */
260 spin_lock(&inode->i_lock);
261 if ((filp->f_pos == 2 || fi->dentry) &&
262 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
263 ceph_snap(inode) != CEPH_SNAPDIR &&
264 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
265 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
266 spin_unlock(&inode->i_lock);
267 err = __dcache_readdir(filp, dirent, filldir);
268 if (err != -EAGAIN)
269 return err;
270 } else {
271 spin_unlock(&inode->i_lock);
273 if (fi->dentry) {
274 err = note_last_dentry(fi, fi->dentry->d_name.name,
275 fi->dentry->d_name.len);
276 if (err)
277 return err;
278 dput(fi->dentry);
279 fi->dentry = NULL;
282 /* proceed with a normal readdir */
284 more:
285 /* do we have the correct frag content buffered? */
286 if (fi->frag != frag || fi->last_readdir == NULL) {
287 struct ceph_mds_request *req;
288 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
289 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
291 /* discard old result, if any */
292 if (fi->last_readdir) {
293 ceph_mdsc_put_request(fi->last_readdir);
294 fi->last_readdir = NULL;
297 /* requery frag tree, as the frag topology may have changed */
298 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
300 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
301 ceph_vinop(inode), frag, fi->last_name);
302 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
303 if (IS_ERR(req))
304 return PTR_ERR(req);
305 req->r_inode = igrab(inode);
306 req->r_dentry = dget(filp->f_dentry);
307 /* hints to request -> mds selection code */
308 req->r_direct_mode = USE_AUTH_MDS;
309 req->r_direct_hash = ceph_frag_value(frag);
310 req->r_direct_is_hash = true;
311 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
312 req->r_readdir_offset = fi->next_offset;
313 req->r_args.readdir.frag = cpu_to_le32(frag);
314 req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
315 req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes);
316 req->r_num_caps = max_entries + 1;
317 err = ceph_mdsc_do_request(mdsc, NULL, req);
318 if (err < 0) {
319 ceph_mdsc_put_request(req);
320 return err;
322 dout("readdir got and parsed readdir result=%d"
323 " on frag %x, end=%d, complete=%d\n", err, frag,
324 (int)req->r_reply_info.dir_end,
325 (int)req->r_reply_info.dir_complete);
327 if (!req->r_did_prepopulate) {
328 dout("readdir !did_prepopulate");
329 fi->dir_release_count--; /* preclude I_COMPLETE */
332 /* note next offset and last dentry name */
333 fi->offset = fi->next_offset;
334 fi->last_readdir = req;
336 if (req->r_reply_info.dir_end) {
337 kfree(fi->last_name);
338 fi->last_name = NULL;
339 fi->next_offset = 2;
340 } else {
341 rinfo = &req->r_reply_info;
342 err = note_last_dentry(fi,
343 rinfo->dir_dname[rinfo->dir_nr-1],
344 rinfo->dir_dname_len[rinfo->dir_nr-1]);
345 if (err)
346 return err;
347 fi->next_offset += rinfo->dir_nr;
351 rinfo = &fi->last_readdir->r_reply_info;
352 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
353 rinfo->dir_nr, off, fi->offset);
354 while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
355 u64 pos = ceph_make_fpos(frag, off);
356 struct ceph_mds_reply_inode *in =
357 rinfo->dir_in[off - fi->offset].in;
358 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
359 off, off - fi->offset, rinfo->dir_nr, pos,
360 rinfo->dir_dname_len[off - fi->offset],
361 rinfo->dir_dname[off - fi->offset], in);
362 BUG_ON(!in);
363 ftype = le32_to_cpu(in->mode) >> 12;
364 if (filldir(dirent,
365 rinfo->dir_dname[off - fi->offset],
366 rinfo->dir_dname_len[off - fi->offset],
367 pos,
368 le64_to_cpu(in->ino),
369 ftype) < 0) {
370 dout("filldir stopping us...\n");
371 return 0;
373 off++;
374 filp->f_pos = pos + 1;
377 if (fi->last_name) {
378 ceph_mdsc_put_request(fi->last_readdir);
379 fi->last_readdir = NULL;
380 goto more;
383 /* more frags? */
384 if (!ceph_frag_is_rightmost(frag)) {
385 frag = ceph_frag_next(frag);
386 off = 0;
387 filp->f_pos = ceph_make_fpos(frag, off);
388 dout("readdir next frag is %x\n", frag);
389 goto more;
391 fi->at_end = 1;
394 * if dir_release_count still matches the dir, no dentries
395 * were released during the whole readdir, and we should have
396 * the complete dir contents in our cache.
398 spin_lock(&inode->i_lock);
399 if (ci->i_release_count == fi->dir_release_count) {
400 dout(" marking %p complete\n", inode);
401 ci->i_ceph_flags |= CEPH_I_COMPLETE;
402 ci->i_max_offset = filp->f_pos;
404 spin_unlock(&inode->i_lock);
406 dout("readdir %p filp %p done.\n", inode, filp);
407 return 0;
410 static void reset_readdir(struct ceph_file_info *fi)
412 if (fi->last_readdir) {
413 ceph_mdsc_put_request(fi->last_readdir);
414 fi->last_readdir = NULL;
416 kfree(fi->last_name);
417 fi->next_offset = 2; /* compensate for . and .. */
418 if (fi->dentry) {
419 dput(fi->dentry);
420 fi->dentry = NULL;
422 fi->at_end = 0;
425 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
427 struct ceph_file_info *fi = file->private_data;
428 struct inode *inode = file->f_mapping->host;
429 loff_t old_offset = offset;
430 loff_t retval;
432 mutex_lock(&inode->i_mutex);
433 switch (origin) {
434 case SEEK_END:
435 offset += inode->i_size + 2; /* FIXME */
436 break;
437 case SEEK_CUR:
438 offset += file->f_pos;
440 retval = -EINVAL;
441 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
442 if (offset != file->f_pos) {
443 file->f_pos = offset;
444 file->f_version = 0;
445 fi->at_end = 0;
447 retval = offset;
450 * discard buffered readdir content on seekdir(0), or
451 * seek to new frag, or seek prior to current chunk.
453 if (offset == 0 ||
454 fpos_frag(offset) != fpos_frag(old_offset) ||
455 fpos_off(offset) < fi->offset) {
456 dout("dir_llseek dropping %p content\n", file);
457 reset_readdir(fi);
460 /* bump dir_release_count if we did a forward seek */
461 if (offset > old_offset)
462 fi->dir_release_count--;
464 mutex_unlock(&inode->i_mutex);
465 return retval;
469 * Process result of a lookup/open request.
471 * Mainly, make sure we return the final req->r_dentry (if it already
472 * existed) in place of the original VFS-provided dentry when they
473 * differ.
475 * Gracefully handle the case where the MDS replies with -ENOENT and
476 * no trace (which it may do, at its discretion, e.g., if it doesn't
477 * care to issue a lease on the negative dentry).
479 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
480 struct dentry *dentry, int err)
482 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
483 struct inode *parent = dentry->d_parent->d_inode;
485 /* .snap dir? */
486 if (err == -ENOENT &&
487 strcmp(dentry->d_name.name,
488 fsc->mount_options->snapdir_name) == 0) {
489 struct inode *inode = ceph_get_snapdir(parent);
490 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
491 dentry, dentry->d_name.len, dentry->d_name.name, inode);
492 BUG_ON(!d_unhashed(dentry));
493 d_add(dentry, inode);
494 err = 0;
497 if (err == -ENOENT) {
498 /* no trace? */
499 err = 0;
500 if (!req->r_reply_info.head->is_dentry) {
501 dout("ENOENT and no trace, dentry %p inode %p\n",
502 dentry, dentry->d_inode);
503 if (dentry->d_inode) {
504 d_drop(dentry);
505 err = -ENOENT;
506 } else {
507 d_add(dentry, NULL);
511 if (err)
512 dentry = ERR_PTR(err);
513 else if (dentry != req->r_dentry)
514 dentry = dget(req->r_dentry); /* we got spliced */
515 else
516 dentry = NULL;
517 return dentry;
520 static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
522 return ceph_ino(inode) == CEPH_INO_ROOT &&
523 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
527 * Look up a single dir entry. If there is a lookup intent, inform
528 * the MDS so that it gets our 'caps wanted' value in a single op.
530 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
531 struct nameidata *nd)
533 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
534 struct ceph_mds_client *mdsc = fsc->mdsc;
535 struct ceph_mds_request *req;
536 int op;
537 int err;
539 dout("lookup %p dentry %p '%.*s'\n",
540 dir, dentry, dentry->d_name.len, dentry->d_name.name);
542 if (dentry->d_name.len > NAME_MAX)
543 return ERR_PTR(-ENAMETOOLONG);
545 err = ceph_init_dentry(dentry);
546 if (err < 0)
547 return ERR_PTR(err);
549 /* open (but not create!) intent? */
550 if (nd &&
551 (nd->flags & LOOKUP_OPEN) &&
552 (nd->flags & LOOKUP_CONTINUE) == 0 && /* only open last component */
553 !(nd->intent.open.flags & O_CREAT)) {
554 int mode = nd->intent.open.create_mode & ~current->fs->umask;
555 return ceph_lookup_open(dir, dentry, nd, mode, 1);
558 /* can we conclude ENOENT locally? */
559 if (dentry->d_inode == NULL) {
560 struct ceph_inode_info *ci = ceph_inode(dir);
561 struct ceph_dentry_info *di = ceph_dentry(dentry);
563 spin_lock(&dir->i_lock);
564 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
565 if (strncmp(dentry->d_name.name,
566 fsc->mount_options->snapdir_name,
567 dentry->d_name.len) &&
568 !is_root_ceph_dentry(dir, dentry) &&
569 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
570 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
571 spin_unlock(&dir->i_lock);
572 dout(" dir %p complete, -ENOENT\n", dir);
573 d_add(dentry, NULL);
574 di->lease_shared_gen = ci->i_shared_gen;
575 return NULL;
577 spin_unlock(&dir->i_lock);
580 op = ceph_snap(dir) == CEPH_SNAPDIR ?
581 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
582 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
583 if (IS_ERR(req))
584 return ERR_CAST(req);
585 req->r_dentry = dget(dentry);
586 req->r_num_caps = 2;
587 /* we only need inode linkage */
588 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
589 req->r_locked_dir = dir;
590 err = ceph_mdsc_do_request(mdsc, NULL, req);
591 dentry = ceph_finish_lookup(req, dentry, err);
592 ceph_mdsc_put_request(req); /* will dput(dentry) */
593 dout("lookup result=%p\n", dentry);
594 return dentry;
598 * If we do a create but get no trace back from the MDS, follow up with
599 * a lookup (the VFS expects us to link up the provided dentry).
601 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
603 struct dentry *result = ceph_lookup(dir, dentry, NULL);
605 if (result && !IS_ERR(result)) {
607 * We created the item, then did a lookup, and found
608 * it was already linked to another inode we already
609 * had in our cache (and thus got spliced). Link our
610 * dentry to that inode, but don't hash it, just in
611 * case the VFS wants to dereference it.
613 BUG_ON(!result->d_inode);
614 d_instantiate(dentry, result->d_inode);
615 return 0;
617 return PTR_ERR(result);
620 static int ceph_mknod(struct inode *dir, struct dentry *dentry,
621 int mode, dev_t rdev)
623 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
624 struct ceph_mds_client *mdsc = fsc->mdsc;
625 struct ceph_mds_request *req;
626 int err;
628 if (ceph_snap(dir) != CEPH_NOSNAP)
629 return -EROFS;
631 dout("mknod in dir %p dentry %p mode 0%o rdev %d\n",
632 dir, dentry, mode, rdev);
633 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
634 if (IS_ERR(req)) {
635 d_drop(dentry);
636 return PTR_ERR(req);
638 req->r_dentry = dget(dentry);
639 req->r_num_caps = 2;
640 req->r_locked_dir = dir;
641 req->r_args.mknod.mode = cpu_to_le32(mode);
642 req->r_args.mknod.rdev = cpu_to_le32(rdev);
643 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
644 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
645 err = ceph_mdsc_do_request(mdsc, dir, req);
646 if (!err && !req->r_reply_info.head->is_dentry)
647 err = ceph_handle_notrace_create(dir, dentry);
648 ceph_mdsc_put_request(req);
649 if (err)
650 d_drop(dentry);
651 return err;
654 static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
655 struct nameidata *nd)
657 dout("create in dir %p dentry %p name '%.*s'\n",
658 dir, dentry, dentry->d_name.len, dentry->d_name.name);
660 if (ceph_snap(dir) != CEPH_NOSNAP)
661 return -EROFS;
663 if (nd) {
664 BUG_ON((nd->flags & LOOKUP_OPEN) == 0);
665 dentry = ceph_lookup_open(dir, dentry, nd, mode, 0);
666 /* hrm, what should i do here if we get aliased? */
667 if (IS_ERR(dentry))
668 return PTR_ERR(dentry);
669 return 0;
672 /* fall back to mknod */
673 return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0);
676 static int ceph_symlink(struct inode *dir, struct dentry *dentry,
677 const char *dest)
679 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
680 struct ceph_mds_client *mdsc = fsc->mdsc;
681 struct ceph_mds_request *req;
682 int err;
684 if (ceph_snap(dir) != CEPH_NOSNAP)
685 return -EROFS;
687 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
688 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
689 if (IS_ERR(req)) {
690 d_drop(dentry);
691 return PTR_ERR(req);
693 req->r_dentry = dget(dentry);
694 req->r_num_caps = 2;
695 req->r_path2 = kstrdup(dest, GFP_NOFS);
696 req->r_locked_dir = dir;
697 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
698 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
699 err = ceph_mdsc_do_request(mdsc, dir, req);
700 if (!err && !req->r_reply_info.head->is_dentry)
701 err = ceph_handle_notrace_create(dir, dentry);
702 ceph_mdsc_put_request(req);
703 if (err)
704 d_drop(dentry);
705 return err;
708 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
710 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
711 struct ceph_mds_client *mdsc = fsc->mdsc;
712 struct ceph_mds_request *req;
713 int err = -EROFS;
714 int op;
716 if (ceph_snap(dir) == CEPH_SNAPDIR) {
717 /* mkdir .snap/foo is a MKSNAP */
718 op = CEPH_MDS_OP_MKSNAP;
719 dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
720 dentry->d_name.len, dentry->d_name.name, dentry);
721 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
722 dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode);
723 op = CEPH_MDS_OP_MKDIR;
724 } else {
725 goto out;
727 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
728 if (IS_ERR(req)) {
729 err = PTR_ERR(req);
730 goto out;
733 req->r_dentry = dget(dentry);
734 req->r_num_caps = 2;
735 req->r_locked_dir = dir;
736 req->r_args.mkdir.mode = cpu_to_le32(mode);
737 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
738 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
739 err = ceph_mdsc_do_request(mdsc, dir, req);
740 if (!err && !req->r_reply_info.head->is_dentry)
741 err = ceph_handle_notrace_create(dir, dentry);
742 ceph_mdsc_put_request(req);
743 out:
744 if (err < 0)
745 d_drop(dentry);
746 return err;
749 static int ceph_link(struct dentry *old_dentry, struct inode *dir,
750 struct dentry *dentry)
752 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
753 struct ceph_mds_client *mdsc = fsc->mdsc;
754 struct ceph_mds_request *req;
755 int err;
757 if (ceph_snap(dir) != CEPH_NOSNAP)
758 return -EROFS;
760 dout("link in dir %p old_dentry %p dentry %p\n", dir,
761 old_dentry, dentry);
762 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
763 if (IS_ERR(req)) {
764 d_drop(dentry);
765 return PTR_ERR(req);
767 req->r_dentry = dget(dentry);
768 req->r_num_caps = 2;
769 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
770 req->r_locked_dir = dir;
771 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
772 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
773 err = ceph_mdsc_do_request(mdsc, dir, req);
774 if (err)
775 d_drop(dentry);
776 else if (!req->r_reply_info.head->is_dentry)
777 d_instantiate(dentry, igrab(old_dentry->d_inode));
778 ceph_mdsc_put_request(req);
779 return err;
783 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
784 * looks like the link count will hit 0, drop any other caps (other
785 * than PIN) we don't specifically want (due to the file still being
786 * open).
788 static int drop_caps_for_unlink(struct inode *inode)
790 struct ceph_inode_info *ci = ceph_inode(inode);
791 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
793 spin_lock(&inode->i_lock);
794 if (inode->i_nlink == 1) {
795 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
796 ci->i_ceph_flags |= CEPH_I_NODELAY;
798 spin_unlock(&inode->i_lock);
799 return drop;
803 * rmdir and unlink are differ only by the metadata op code
805 static int ceph_unlink(struct inode *dir, struct dentry *dentry)
807 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
808 struct ceph_mds_client *mdsc = fsc->mdsc;
809 struct inode *inode = dentry->d_inode;
810 struct ceph_mds_request *req;
811 int err = -EROFS;
812 int op;
814 if (ceph_snap(dir) == CEPH_SNAPDIR) {
815 /* rmdir .snap/foo is RMSNAP */
816 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
817 dentry->d_name.name, dentry);
818 op = CEPH_MDS_OP_RMSNAP;
819 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
820 dout("unlink/rmdir dir %p dn %p inode %p\n",
821 dir, dentry, inode);
822 op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ?
823 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
824 } else
825 goto out;
826 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
827 if (IS_ERR(req)) {
828 err = PTR_ERR(req);
829 goto out;
831 req->r_dentry = dget(dentry);
832 req->r_num_caps = 2;
833 req->r_locked_dir = dir;
834 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
835 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
836 req->r_inode_drop = drop_caps_for_unlink(inode);
837 err = ceph_mdsc_do_request(mdsc, dir, req);
838 if (!err && !req->r_reply_info.head->is_dentry)
839 d_delete(dentry);
840 ceph_mdsc_put_request(req);
841 out:
842 return err;
845 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
846 struct inode *new_dir, struct dentry *new_dentry)
848 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
849 struct ceph_mds_client *mdsc = fsc->mdsc;
850 struct ceph_mds_request *req;
851 int err;
853 if (ceph_snap(old_dir) != ceph_snap(new_dir))
854 return -EXDEV;
855 if (ceph_snap(old_dir) != CEPH_NOSNAP ||
856 ceph_snap(new_dir) != CEPH_NOSNAP)
857 return -EROFS;
858 dout("rename dir %p dentry %p to dir %p dentry %p\n",
859 old_dir, old_dentry, new_dir, new_dentry);
860 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
861 if (IS_ERR(req))
862 return PTR_ERR(req);
863 req->r_dentry = dget(new_dentry);
864 req->r_num_caps = 2;
865 req->r_old_dentry = dget(old_dentry);
866 req->r_locked_dir = new_dir;
867 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
868 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
869 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
870 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
871 /* release LINK_RDCACHE on source inode (mds will lock it) */
872 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
873 if (new_dentry->d_inode)
874 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
875 err = ceph_mdsc_do_request(mdsc, old_dir, req);
876 if (!err && !req->r_reply_info.head->is_dentry) {
878 * Normally d_move() is done by fill_trace (called by
879 * do_request, above). If there is no trace, we need
880 * to do it here.
883 /* d_move screws up d_subdirs order */
884 ceph_i_clear(new_dir, CEPH_I_COMPLETE);
886 d_move(old_dentry, new_dentry);
888 /* ensure target dentry is invalidated, despite
889 rehashing bug in vfs_rename_dir */
890 ceph_invalidate_dentry_lease(new_dentry);
892 ceph_mdsc_put_request(req);
893 return err;
897 * Ensure a dentry lease will no longer revalidate.
899 void ceph_invalidate_dentry_lease(struct dentry *dentry)
901 spin_lock(&dentry->d_lock);
902 dentry->d_time = jiffies;
903 ceph_dentry(dentry)->lease_shared_gen = 0;
904 spin_unlock(&dentry->d_lock);
908 * Check if dentry lease is valid. If not, delete the lease. Try to
909 * renew if the least is more than half up.
911 static int dentry_lease_is_valid(struct dentry *dentry)
913 struct ceph_dentry_info *di;
914 struct ceph_mds_session *s;
915 int valid = 0;
916 u32 gen;
917 unsigned long ttl;
918 struct ceph_mds_session *session = NULL;
919 struct inode *dir = NULL;
920 u32 seq = 0;
922 spin_lock(&dentry->d_lock);
923 di = ceph_dentry(dentry);
924 if (di && di->lease_session) {
925 s = di->lease_session;
926 spin_lock(&s->s_cap_lock);
927 gen = s->s_cap_gen;
928 ttl = s->s_cap_ttl;
929 spin_unlock(&s->s_cap_lock);
931 if (di->lease_gen == gen &&
932 time_before(jiffies, dentry->d_time) &&
933 time_before(jiffies, ttl)) {
934 valid = 1;
935 if (di->lease_renew_after &&
936 time_after(jiffies, di->lease_renew_after)) {
937 /* we should renew */
938 dir = dentry->d_parent->d_inode;
939 session = ceph_get_mds_session(s);
940 seq = di->lease_seq;
941 di->lease_renew_after = 0;
942 di->lease_renew_from = jiffies;
946 spin_unlock(&dentry->d_lock);
948 if (session) {
949 ceph_mdsc_lease_send_msg(session, dir, dentry,
950 CEPH_MDS_LEASE_RENEW, seq);
951 ceph_put_mds_session(session);
953 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
954 return valid;
958 * Check if directory-wide content lease/cap is valid.
960 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
962 struct ceph_inode_info *ci = ceph_inode(dir);
963 struct ceph_dentry_info *di = ceph_dentry(dentry);
964 int valid = 0;
966 spin_lock(&dir->i_lock);
967 if (ci->i_shared_gen == di->lease_shared_gen)
968 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
969 spin_unlock(&dir->i_lock);
970 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
971 dir, (unsigned)ci->i_shared_gen, dentry,
972 (unsigned)di->lease_shared_gen, valid);
973 return valid;
977 * Check if cached dentry can be trusted.
979 static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
981 struct inode *dir = dentry->d_parent->d_inode;
983 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
984 dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
985 ceph_dentry(dentry)->offset);
987 /* always trust cached snapped dentries, snapdir dentry */
988 if (ceph_snap(dir) != CEPH_NOSNAP) {
989 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
990 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
991 goto out_touch;
993 if (dentry->d_inode && ceph_snap(dentry->d_inode) == CEPH_SNAPDIR)
994 goto out_touch;
996 if (dentry_lease_is_valid(dentry) ||
997 dir_lease_is_valid(dir, dentry))
998 goto out_touch;
1000 dout("d_revalidate %p invalid\n", dentry);
1001 d_drop(dentry);
1002 return 0;
1003 out_touch:
1004 ceph_dentry_lru_touch(dentry);
1005 return 1;
1009 * When a dentry is released, clear the dir I_COMPLETE if it was part
1010 * of the current dir gen or if this is in the snapshot namespace.
1012 static void ceph_dentry_release(struct dentry *dentry)
1014 struct ceph_dentry_info *di = ceph_dentry(dentry);
1015 struct inode *parent_inode = NULL;
1016 u64 snapid = CEPH_NOSNAP;
1018 if (!IS_ROOT(dentry)) {
1019 parent_inode = dentry->d_parent->d_inode;
1020 if (parent_inode)
1021 snapid = ceph_snap(parent_inode);
1023 dout("dentry_release %p parent %p\n", dentry, parent_inode);
1024 if (parent_inode && snapid != CEPH_SNAPDIR) {
1025 struct ceph_inode_info *ci = ceph_inode(parent_inode);
1027 spin_lock(&parent_inode->i_lock);
1028 if (ci->i_shared_gen == di->lease_shared_gen ||
1029 snapid <= CEPH_MAXSNAP) {
1030 dout(" clearing %p complete (d_release)\n",
1031 parent_inode);
1032 ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
1033 ci->i_release_count++;
1035 spin_unlock(&parent_inode->i_lock);
1037 if (di) {
1038 ceph_dentry_lru_del(dentry);
1039 if (di->lease_session)
1040 ceph_put_mds_session(di->lease_session);
1041 kmem_cache_free(ceph_dentry_cachep, di);
1042 dentry->d_fsdata = NULL;
1046 static int ceph_snapdir_d_revalidate(struct dentry *dentry,
1047 struct nameidata *nd)
1050 * Eventually, we'll want to revalidate snapped metadata
1051 * too... probably...
1053 return 1;
1059 * read() on a dir. This weird interface hack only works if mounted
1060 * with '-o dirstat'.
1062 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1063 loff_t *ppos)
1065 struct ceph_file_info *cf = file->private_data;
1066 struct inode *inode = file->f_dentry->d_inode;
1067 struct ceph_inode_info *ci = ceph_inode(inode);
1068 int left;
1070 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
1071 return -EISDIR;
1073 if (!cf->dir_info) {
1074 cf->dir_info = kmalloc(1024, GFP_NOFS);
1075 if (!cf->dir_info)
1076 return -ENOMEM;
1077 cf->dir_info_len =
1078 sprintf(cf->dir_info,
1079 "entries: %20lld\n"
1080 " files: %20lld\n"
1081 " subdirs: %20lld\n"
1082 "rentries: %20lld\n"
1083 " rfiles: %20lld\n"
1084 " rsubdirs: %20lld\n"
1085 "rbytes: %20lld\n"
1086 "rctime: %10ld.%09ld\n",
1087 ci->i_files + ci->i_subdirs,
1088 ci->i_files,
1089 ci->i_subdirs,
1090 ci->i_rfiles + ci->i_rsubdirs,
1091 ci->i_rfiles,
1092 ci->i_rsubdirs,
1093 ci->i_rbytes,
1094 (long)ci->i_rctime.tv_sec,
1095 (long)ci->i_rctime.tv_nsec);
1098 if (*ppos >= cf->dir_info_len)
1099 return 0;
1100 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1101 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1102 if (left == size)
1103 return -EFAULT;
1104 *ppos += (size - left);
1105 return size - left;
1109 * an fsync() on a dir will wait for any uncommitted directory
1110 * operations to commit.
1112 static int ceph_dir_fsync(struct file *file, int datasync)
1114 struct inode *inode = file->f_path.dentry->d_inode;
1115 struct ceph_inode_info *ci = ceph_inode(inode);
1116 struct list_head *head = &ci->i_unsafe_dirops;
1117 struct ceph_mds_request *req;
1118 u64 last_tid;
1119 int ret = 0;
1121 dout("dir_fsync %p\n", inode);
1122 spin_lock(&ci->i_unsafe_lock);
1123 if (list_empty(head))
1124 goto out;
1126 req = list_entry(head->prev,
1127 struct ceph_mds_request, r_unsafe_dir_item);
1128 last_tid = req->r_tid;
1130 do {
1131 ceph_mdsc_get_request(req);
1132 spin_unlock(&ci->i_unsafe_lock);
1133 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1134 inode, req->r_tid, last_tid);
1135 if (req->r_timeout) {
1136 ret = wait_for_completion_timeout(
1137 &req->r_safe_completion, req->r_timeout);
1138 if (ret > 0)
1139 ret = 0;
1140 else if (ret == 0)
1141 ret = -EIO; /* timed out */
1142 } else {
1143 wait_for_completion(&req->r_safe_completion);
1145 spin_lock(&ci->i_unsafe_lock);
1146 ceph_mdsc_put_request(req);
1148 if (ret || list_empty(head))
1149 break;
1150 req = list_entry(head->next,
1151 struct ceph_mds_request, r_unsafe_dir_item);
1152 } while (req->r_tid < last_tid);
1153 out:
1154 spin_unlock(&ci->i_unsafe_lock);
1155 return ret;
1159 * We maintain a private dentry LRU.
1161 * FIXME: this needs to be changed to a per-mds lru to be useful.
1163 void ceph_dentry_lru_add(struct dentry *dn)
1165 struct ceph_dentry_info *di = ceph_dentry(dn);
1166 struct ceph_mds_client *mdsc;
1168 dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
1169 dn->d_name.len, dn->d_name.name);
1170 if (di) {
1171 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1172 spin_lock(&mdsc->dentry_lru_lock);
1173 list_add_tail(&di->lru, &mdsc->dentry_lru);
1174 mdsc->num_dentry++;
1175 spin_unlock(&mdsc->dentry_lru_lock);
1179 void ceph_dentry_lru_touch(struct dentry *dn)
1181 struct ceph_dentry_info *di = ceph_dentry(dn);
1182 struct ceph_mds_client *mdsc;
1184 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
1185 dn->d_name.len, dn->d_name.name, di->offset);
1186 if (di) {
1187 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1188 spin_lock(&mdsc->dentry_lru_lock);
1189 list_move_tail(&di->lru, &mdsc->dentry_lru);
1190 spin_unlock(&mdsc->dentry_lru_lock);
1194 void ceph_dentry_lru_del(struct dentry *dn)
1196 struct ceph_dentry_info *di = ceph_dentry(dn);
1197 struct ceph_mds_client *mdsc;
1199 dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
1200 dn->d_name.len, dn->d_name.name);
1201 if (di) {
1202 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1203 spin_lock(&mdsc->dentry_lru_lock);
1204 list_del_init(&di->lru);
1205 mdsc->num_dentry--;
1206 spin_unlock(&mdsc->dentry_lru_lock);
1210 const struct file_operations ceph_dir_fops = {
1211 .read = ceph_read_dir,
1212 .readdir = ceph_readdir,
1213 .llseek = ceph_dir_llseek,
1214 .open = ceph_open,
1215 .release = ceph_release,
1216 .unlocked_ioctl = ceph_ioctl,
1217 .fsync = ceph_dir_fsync,
1220 const struct inode_operations ceph_dir_iops = {
1221 .lookup = ceph_lookup,
1222 .permission = ceph_permission,
1223 .getattr = ceph_getattr,
1224 .setattr = ceph_setattr,
1225 .setxattr = ceph_setxattr,
1226 .getxattr = ceph_getxattr,
1227 .listxattr = ceph_listxattr,
1228 .removexattr = ceph_removexattr,
1229 .mknod = ceph_mknod,
1230 .symlink = ceph_symlink,
1231 .mkdir = ceph_mkdir,
1232 .link = ceph_link,
1233 .unlink = ceph_unlink,
1234 .rmdir = ceph_unlink,
1235 .rename = ceph_rename,
1236 .create = ceph_create,
1239 const struct dentry_operations ceph_dentry_ops = {
1240 .d_revalidate = ceph_d_revalidate,
1241 .d_release = ceph_dentry_release,
1244 const struct dentry_operations ceph_snapdir_dentry_ops = {
1245 .d_revalidate = ceph_snapdir_d_revalidate,
1246 .d_release = ceph_dentry_release,
1249 const struct dentry_operations ceph_snap_dentry_ops = {
1250 .d_release = ceph_dentry_release,