staging: comedi: drivers: rtd520: Removed variables that is never used
[linux-2.6/btrfs-unstable.git] / fs / overlayfs / readdir.c
blobc0205990a9f54d03f77909555e368efc5a90bd6f
1 /*
3 * Copyright (C) 2011 Novell Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
10 #include <linux/fs.h>
11 #include <linux/slab.h>
12 #include <linux/namei.h>
13 #include <linux/file.h>
14 #include <linux/xattr.h>
15 #include <linux/rbtree.h>
16 #include <linux/security.h>
17 #include <linux/cred.h>
18 #include "overlayfs.h"
20 struct ovl_cache_entry {
21 unsigned int len;
22 unsigned int type;
23 u64 ino;
24 struct list_head l_node;
25 struct rb_node node;
26 bool is_whiteout;
27 bool is_cursor;
28 char name[];
31 struct ovl_dir_cache {
32 long refcount;
33 u64 version;
34 struct list_head entries;
37 struct ovl_readdir_data {
38 struct dir_context ctx;
39 bool is_merge;
40 struct rb_root root;
41 struct list_head *list;
42 struct list_head middle;
43 int count;
44 int err;
47 struct ovl_dir_file {
48 bool is_real;
49 bool is_upper;
50 struct ovl_dir_cache *cache;
51 struct ovl_cache_entry cursor;
52 struct file *realfile;
53 struct file *upperfile;
56 static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n)
58 return container_of(n, struct ovl_cache_entry, node);
61 static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
62 const char *name, int len)
64 struct rb_node *node = root->rb_node;
65 int cmp;
67 while (node) {
68 struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
70 cmp = strncmp(name, p->name, len);
71 if (cmp > 0)
72 node = p->node.rb_right;
73 else if (cmp < 0 || len < p->len)
74 node = p->node.rb_left;
75 else
76 return p;
79 return NULL;
82 static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len,
83 u64 ino, unsigned int d_type)
85 struct ovl_cache_entry *p;
86 size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
88 p = kmalloc(size, GFP_KERNEL);
89 if (p) {
90 memcpy(p->name, name, len);
91 p->name[len] = '\0';
92 p->len = len;
93 p->type = d_type;
94 p->ino = ino;
95 p->is_whiteout = false;
96 p->is_cursor = false;
99 return p;
102 static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
103 const char *name, int len, u64 ino,
104 unsigned int d_type)
106 struct rb_node **newp = &rdd->root.rb_node;
107 struct rb_node *parent = NULL;
108 struct ovl_cache_entry *p;
110 while (*newp) {
111 int cmp;
112 struct ovl_cache_entry *tmp;
114 parent = *newp;
115 tmp = ovl_cache_entry_from_node(*newp);
116 cmp = strncmp(name, tmp->name, len);
117 if (cmp > 0)
118 newp = &tmp->node.rb_right;
119 else if (cmp < 0 || len < tmp->len)
120 newp = &tmp->node.rb_left;
121 else
122 return 0;
125 p = ovl_cache_entry_new(name, len, ino, d_type);
126 if (p == NULL)
127 return -ENOMEM;
129 list_add_tail(&p->l_node, rdd->list);
130 rb_link_node(&p->node, parent, newp);
131 rb_insert_color(&p->node, &rdd->root);
133 return 0;
136 static int ovl_fill_lower(struct ovl_readdir_data *rdd,
137 const char *name, int namelen,
138 loff_t offset, u64 ino, unsigned int d_type)
140 struct ovl_cache_entry *p;
142 p = ovl_cache_entry_find(&rdd->root, name, namelen);
143 if (p) {
144 list_move_tail(&p->l_node, &rdd->middle);
145 } else {
146 p = ovl_cache_entry_new(name, namelen, ino, d_type);
147 if (p == NULL)
148 rdd->err = -ENOMEM;
149 else
150 list_add_tail(&p->l_node, &rdd->middle);
153 return rdd->err;
156 void ovl_cache_free(struct list_head *list)
158 struct ovl_cache_entry *p;
159 struct ovl_cache_entry *n;
161 list_for_each_entry_safe(p, n, list, l_node)
162 kfree(p);
164 INIT_LIST_HEAD(list);
167 static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry)
169 struct ovl_dir_cache *cache = od->cache;
171 list_del_init(&od->cursor.l_node);
172 WARN_ON(cache->refcount <= 0);
173 cache->refcount--;
174 if (!cache->refcount) {
175 if (ovl_dir_cache(dentry) == cache)
176 ovl_set_dir_cache(dentry, NULL);
178 ovl_cache_free(&cache->entries);
179 kfree(cache);
183 static int ovl_fill_merge(struct dir_context *ctx, const char *name,
184 int namelen, loff_t offset, u64 ino,
185 unsigned int d_type)
187 struct ovl_readdir_data *rdd =
188 container_of(ctx, struct ovl_readdir_data, ctx);
190 rdd->count++;
191 if (!rdd->is_merge)
192 return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
193 else
194 return ovl_fill_lower(rdd, name, namelen, offset, ino, d_type);
197 static inline int ovl_dir_read(struct path *realpath,
198 struct ovl_readdir_data *rdd)
200 struct file *realfile;
201 int err;
203 realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY);
204 if (IS_ERR(realfile))
205 return PTR_ERR(realfile);
207 rdd->ctx.pos = 0;
208 do {
209 rdd->count = 0;
210 rdd->err = 0;
211 err = iterate_dir(realfile, &rdd->ctx);
212 if (err >= 0)
213 err = rdd->err;
214 } while (!err && rdd->count);
215 fput(realfile);
217 return err;
220 static void ovl_dir_reset(struct file *file)
222 struct ovl_dir_file *od = file->private_data;
223 struct ovl_dir_cache *cache = od->cache;
224 struct dentry *dentry = file->f_path.dentry;
225 enum ovl_path_type type = ovl_path_type(dentry);
227 if (cache && ovl_dentry_version_get(dentry) != cache->version) {
228 ovl_cache_put(od, dentry);
229 od->cache = NULL;
231 WARN_ON(!od->is_real && type != OVL_PATH_MERGE);
232 if (od->is_real && type == OVL_PATH_MERGE)
233 od->is_real = false;
236 static int ovl_dir_mark_whiteouts(struct dentry *dir,
237 struct ovl_readdir_data *rdd)
239 struct ovl_cache_entry *p;
240 struct dentry *dentry;
241 const struct cred *old_cred;
242 struct cred *override_cred;
244 override_cred = prepare_creds();
245 if (!override_cred) {
246 ovl_cache_free(rdd->list);
247 return -ENOMEM;
251 * CAP_DAC_OVERRIDE for lookup
253 cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
254 old_cred = override_creds(override_cred);
256 mutex_lock(&dir->d_inode->i_mutex);
257 list_for_each_entry(p, rdd->list, l_node) {
258 if (p->is_cursor)
259 continue;
261 if (p->type != DT_CHR)
262 continue;
264 dentry = lookup_one_len(p->name, dir, p->len);
265 if (IS_ERR(dentry))
266 continue;
268 p->is_whiteout = ovl_is_whiteout(dentry);
269 dput(dentry);
271 mutex_unlock(&dir->d_inode->i_mutex);
273 revert_creds(old_cred);
274 put_cred(override_cred);
276 return 0;
279 static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
281 int err;
282 struct path lowerpath;
283 struct path upperpath;
284 struct ovl_readdir_data rdd = {
285 .ctx.actor = ovl_fill_merge,
286 .list = list,
287 .root = RB_ROOT,
288 .is_merge = false,
291 ovl_path_lower(dentry, &lowerpath);
292 ovl_path_upper(dentry, &upperpath);
294 if (upperpath.dentry) {
295 err = ovl_dir_read(&upperpath, &rdd);
296 if (err)
297 goto out;
299 if (lowerpath.dentry) {
300 err = ovl_dir_mark_whiteouts(upperpath.dentry, &rdd);
301 if (err)
302 goto out;
305 if (lowerpath.dentry) {
307 * Insert lowerpath entries before upperpath ones, this allows
308 * offsets to be reasonably constant
310 list_add(&rdd.middle, rdd.list);
311 rdd.is_merge = true;
312 err = ovl_dir_read(&lowerpath, &rdd);
313 list_del(&rdd.middle);
315 out:
316 return err;
319 static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
321 struct ovl_cache_entry *p;
322 loff_t off = 0;
324 list_for_each_entry(p, &od->cache->entries, l_node) {
325 if (p->is_cursor)
326 continue;
327 if (off >= pos)
328 break;
329 off++;
331 list_move_tail(&od->cursor.l_node, &p->l_node);
334 static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
336 int res;
337 struct ovl_dir_cache *cache;
339 cache = ovl_dir_cache(dentry);
340 if (cache && ovl_dentry_version_get(dentry) == cache->version) {
341 cache->refcount++;
342 return cache;
344 ovl_set_dir_cache(dentry, NULL);
346 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL);
347 if (!cache)
348 return ERR_PTR(-ENOMEM);
350 cache->refcount = 1;
351 INIT_LIST_HEAD(&cache->entries);
353 res = ovl_dir_read_merged(dentry, &cache->entries);
354 if (res) {
355 ovl_cache_free(&cache->entries);
356 kfree(cache);
357 return ERR_PTR(res);
360 cache->version = ovl_dentry_version_get(dentry);
361 ovl_set_dir_cache(dentry, cache);
363 return cache;
366 static int ovl_iterate(struct file *file, struct dir_context *ctx)
368 struct ovl_dir_file *od = file->private_data;
369 struct dentry *dentry = file->f_path.dentry;
371 if (!ctx->pos)
372 ovl_dir_reset(file);
374 if (od->is_real)
375 return iterate_dir(od->realfile, ctx);
377 if (!od->cache) {
378 struct ovl_dir_cache *cache;
380 cache = ovl_cache_get(dentry);
381 if (IS_ERR(cache))
382 return PTR_ERR(cache);
384 od->cache = cache;
385 ovl_seek_cursor(od, ctx->pos);
388 while (od->cursor.l_node.next != &od->cache->entries) {
389 struct ovl_cache_entry *p;
391 p = list_entry(od->cursor.l_node.next, struct ovl_cache_entry, l_node);
392 /* Skip cursors */
393 if (!p->is_cursor) {
394 if (!p->is_whiteout) {
395 if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
396 break;
398 ctx->pos++;
400 list_move(&od->cursor.l_node, &p->l_node);
402 return 0;
405 static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
407 loff_t res;
408 struct ovl_dir_file *od = file->private_data;
410 mutex_lock(&file_inode(file)->i_mutex);
411 if (!file->f_pos)
412 ovl_dir_reset(file);
414 if (od->is_real) {
415 res = vfs_llseek(od->realfile, offset, origin);
416 file->f_pos = od->realfile->f_pos;
417 } else {
418 res = -EINVAL;
420 switch (origin) {
421 case SEEK_CUR:
422 offset += file->f_pos;
423 break;
424 case SEEK_SET:
425 break;
426 default:
427 goto out_unlock;
429 if (offset < 0)
430 goto out_unlock;
432 if (offset != file->f_pos) {
433 file->f_pos = offset;
434 if (od->cache)
435 ovl_seek_cursor(od, offset);
437 res = offset;
439 out_unlock:
440 mutex_unlock(&file_inode(file)->i_mutex);
442 return res;
445 static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
446 int datasync)
448 struct ovl_dir_file *od = file->private_data;
449 struct dentry *dentry = file->f_path.dentry;
450 struct file *realfile = od->realfile;
453 * Need to check if we started out being a lower dir, but got copied up
455 if (!od->is_upper && ovl_path_type(dentry) != OVL_PATH_LOWER) {
456 struct inode *inode = file_inode(file);
458 realfile = lockless_dereference(od->upperfile);
459 if (!realfile) {
460 struct path upperpath;
462 ovl_path_upper(dentry, &upperpath);
463 realfile = ovl_path_open(&upperpath, O_RDONLY);
464 smp_mb__before_spinlock();
465 mutex_lock(&inode->i_mutex);
466 if (!od->upperfile) {
467 if (IS_ERR(realfile)) {
468 mutex_unlock(&inode->i_mutex);
469 return PTR_ERR(realfile);
471 od->upperfile = realfile;
472 } else {
473 /* somebody has beaten us to it */
474 if (!IS_ERR(realfile))
475 fput(realfile);
476 realfile = od->upperfile;
478 mutex_unlock(&inode->i_mutex);
482 return vfs_fsync_range(realfile, start, end, datasync);
485 static int ovl_dir_release(struct inode *inode, struct file *file)
487 struct ovl_dir_file *od = file->private_data;
489 if (od->cache) {
490 mutex_lock(&inode->i_mutex);
491 ovl_cache_put(od, file->f_path.dentry);
492 mutex_unlock(&inode->i_mutex);
494 fput(od->realfile);
495 if (od->upperfile)
496 fput(od->upperfile);
497 kfree(od);
499 return 0;
502 static int ovl_dir_open(struct inode *inode, struct file *file)
504 struct path realpath;
505 struct file *realfile;
506 struct ovl_dir_file *od;
507 enum ovl_path_type type;
509 od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL);
510 if (!od)
511 return -ENOMEM;
513 type = ovl_path_real(file->f_path.dentry, &realpath);
514 realfile = ovl_path_open(&realpath, file->f_flags);
515 if (IS_ERR(realfile)) {
516 kfree(od);
517 return PTR_ERR(realfile);
519 INIT_LIST_HEAD(&od->cursor.l_node);
520 od->realfile = realfile;
521 od->is_real = (type != OVL_PATH_MERGE);
522 od->is_upper = (type != OVL_PATH_LOWER);
523 od->cursor.is_cursor = true;
524 file->private_data = od;
526 return 0;
529 const struct file_operations ovl_dir_operations = {
530 .read = generic_read_dir,
531 .open = ovl_dir_open,
532 .iterate = ovl_iterate,
533 .llseek = ovl_dir_llseek,
534 .fsync = ovl_dir_fsync,
535 .release = ovl_dir_release,
538 int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
540 int err;
541 struct ovl_cache_entry *p;
543 err = ovl_dir_read_merged(dentry, list);
544 if (err)
545 return err;
547 err = 0;
549 list_for_each_entry(p, list, l_node) {
550 if (p->is_whiteout)
551 continue;
553 if (p->name[0] == '.') {
554 if (p->len == 1)
555 continue;
556 if (p->len == 2 && p->name[1] == '.')
557 continue;
559 err = -ENOTEMPTY;
560 break;
563 return err;
566 void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
568 struct ovl_cache_entry *p;
570 mutex_lock_nested(&upper->d_inode->i_mutex, I_MUTEX_CHILD);
571 list_for_each_entry(p, list, l_node) {
572 struct dentry *dentry;
574 if (!p->is_whiteout)
575 continue;
577 dentry = lookup_one_len(p->name, upper, p->len);
578 if (IS_ERR(dentry)) {
579 pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n",
580 upper->d_name.name, p->len, p->name,
581 (int) PTR_ERR(dentry));
582 continue;
584 ovl_cleanup(upper->d_inode, dentry);
585 dput(dentry);
587 mutex_unlock(&upper->d_inode->i_mutex);