intel-iommu: optimize sg map/unmap calls
[linux-2.6/kvm.git] / fs / libfs.c
blobae51481e45e5f679e240d64e5f8e1f39e7c0e2c7
1 /*
2 * fs/libfs.c
3 * Library for filesystems writers.
4 */
6 #include <linux/module.h>
7 #include <linux/pagemap.h>
8 #include <linux/mount.h>
9 #include <linux/vfs.h>
10 #include <linux/mutex.h>
12 #include <asm/uaccess.h>
14 int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
15 struct kstat *stat)
17 struct inode *inode = dentry->d_inode;
18 generic_fillattr(inode, stat);
19 stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9);
20 return 0;
23 int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
25 buf->f_type = dentry->d_sb->s_magic;
26 buf->f_bsize = PAGE_CACHE_SIZE;
27 buf->f_namelen = NAME_MAX;
28 return 0;
32 * Retaining negative dentries for an in-memory filesystem just wastes
33 * memory and lookup time: arrange for them to be deleted immediately.
35 static int simple_delete_dentry(struct dentry *dentry)
37 return 1;
41 * Lookup the data. This is trivial - if the dentry didn't already
42 * exist, we know it is negative. Set d_op to delete negative dentries.
44 struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
46 static struct dentry_operations simple_dentry_operations = {
47 .d_delete = simple_delete_dentry,
50 if (dentry->d_name.len > NAME_MAX)
51 return ERR_PTR(-ENAMETOOLONG);
52 dentry->d_op = &simple_dentry_operations;
53 d_add(dentry, NULL);
54 return NULL;
57 int simple_sync_file(struct file * file, struct dentry *dentry, int datasync)
59 return 0;
62 int dcache_dir_open(struct inode *inode, struct file *file)
64 static struct qstr cursor_name = {.len = 1, .name = "."};
66 file->private_data = d_alloc(file->f_path.dentry, &cursor_name);
68 return file->private_data ? 0 : -ENOMEM;
71 int dcache_dir_close(struct inode *inode, struct file *file)
73 dput(file->private_data);
74 return 0;
77 loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
79 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
80 switch (origin) {
81 case 1:
82 offset += file->f_pos;
83 case 0:
84 if (offset >= 0)
85 break;
86 default:
87 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
88 return -EINVAL;
90 if (offset != file->f_pos) {
91 file->f_pos = offset;
92 if (file->f_pos >= 2) {
93 struct list_head *p;
94 struct dentry *cursor = file->private_data;
95 loff_t n = file->f_pos - 2;
97 spin_lock(&dcache_lock);
98 list_del(&cursor->d_u.d_child);
99 p = file->f_path.dentry->d_subdirs.next;
100 while (n && p != &file->f_path.dentry->d_subdirs) {
101 struct dentry *next;
102 next = list_entry(p, struct dentry, d_u.d_child);
103 if (!d_unhashed(next) && next->d_inode)
104 n--;
105 p = p->next;
107 list_add_tail(&cursor->d_u.d_child, p);
108 spin_unlock(&dcache_lock);
111 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
112 return offset;
115 /* Relationship between i_mode and the DT_xxx types */
116 static inline unsigned char dt_type(struct inode *inode)
118 return (inode->i_mode >> 12) & 15;
122 * Directory is locked and all positive dentries in it are safe, since
123 * for ramfs-type trees they can't go away without unlink() or rmdir(),
124 * both impossible due to the lock on directory.
127 int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
129 struct dentry *dentry = filp->f_path.dentry;
130 struct dentry *cursor = filp->private_data;
131 struct list_head *p, *q = &cursor->d_u.d_child;
132 ino_t ino;
133 int i = filp->f_pos;
135 switch (i) {
136 case 0:
137 ino = dentry->d_inode->i_ino;
138 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
139 break;
140 filp->f_pos++;
141 i++;
142 /* fallthrough */
143 case 1:
144 ino = parent_ino(dentry);
145 if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
146 break;
147 filp->f_pos++;
148 i++;
149 /* fallthrough */
150 default:
151 spin_lock(&dcache_lock);
152 if (filp->f_pos == 2)
153 list_move(q, &dentry->d_subdirs);
155 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
156 struct dentry *next;
157 next = list_entry(p, struct dentry, d_u.d_child);
158 if (d_unhashed(next) || !next->d_inode)
159 continue;
161 spin_unlock(&dcache_lock);
162 if (filldir(dirent, next->d_name.name,
163 next->d_name.len, filp->f_pos,
164 next->d_inode->i_ino,
165 dt_type(next->d_inode)) < 0)
166 return 0;
167 spin_lock(&dcache_lock);
168 /* next is still alive */
169 list_move(q, p);
170 p = q;
171 filp->f_pos++;
173 spin_unlock(&dcache_lock);
175 return 0;
178 ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos)
180 return -EISDIR;
183 const struct file_operations simple_dir_operations = {
184 .open = dcache_dir_open,
185 .release = dcache_dir_close,
186 .llseek = dcache_dir_lseek,
187 .read = generic_read_dir,
188 .readdir = dcache_readdir,
189 .fsync = simple_sync_file,
192 const struct inode_operations simple_dir_inode_operations = {
193 .lookup = simple_lookup,
196 static const struct super_operations simple_super_operations = {
197 .statfs = simple_statfs,
201 * Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that
202 * will never be mountable)
204 int get_sb_pseudo(struct file_system_type *fs_type, char *name,
205 const struct super_operations *ops, unsigned long magic,
206 struct vfsmount *mnt)
208 struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
209 struct dentry *dentry;
210 struct inode *root;
211 struct qstr d_name = {.name = name, .len = strlen(name)};
213 if (IS_ERR(s))
214 return PTR_ERR(s);
216 s->s_flags = MS_NOUSER;
217 s->s_maxbytes = ~0ULL;
218 s->s_blocksize = 1024;
219 s->s_blocksize_bits = 10;
220 s->s_magic = magic;
221 s->s_op = ops ? ops : &simple_super_operations;
222 s->s_time_gran = 1;
223 root = new_inode(s);
224 if (!root)
225 goto Enomem;
227 * since this is the first inode, make it number 1. New inodes created
228 * after this must take care not to collide with it (by passing
229 * max_reserved of 1 to iunique).
231 root->i_ino = 1;
232 root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
233 root->i_uid = root->i_gid = 0;
234 root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME;
235 dentry = d_alloc(NULL, &d_name);
236 if (!dentry) {
237 iput(root);
238 goto Enomem;
240 dentry->d_sb = s;
241 dentry->d_parent = dentry;
242 d_instantiate(dentry, root);
243 s->s_root = dentry;
244 s->s_flags |= MS_ACTIVE;
245 return simple_set_mnt(mnt, s);
247 Enomem:
248 up_write(&s->s_umount);
249 deactivate_super(s);
250 return -ENOMEM;
253 int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
255 struct inode *inode = old_dentry->d_inode;
257 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
258 inc_nlink(inode);
259 atomic_inc(&inode->i_count);
260 dget(dentry);
261 d_instantiate(dentry, inode);
262 return 0;
265 static inline int simple_positive(struct dentry *dentry)
267 return dentry->d_inode && !d_unhashed(dentry);
270 int simple_empty(struct dentry *dentry)
272 struct dentry *child;
273 int ret = 0;
275 spin_lock(&dcache_lock);
276 list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child)
277 if (simple_positive(child))
278 goto out;
279 ret = 1;
280 out:
281 spin_unlock(&dcache_lock);
282 return ret;
285 int simple_unlink(struct inode *dir, struct dentry *dentry)
287 struct inode *inode = dentry->d_inode;
289 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
290 drop_nlink(inode);
291 dput(dentry);
292 return 0;
295 int simple_rmdir(struct inode *dir, struct dentry *dentry)
297 if (!simple_empty(dentry))
298 return -ENOTEMPTY;
300 drop_nlink(dentry->d_inode);
301 simple_unlink(dir, dentry);
302 drop_nlink(dir);
303 return 0;
306 int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
307 struct inode *new_dir, struct dentry *new_dentry)
309 struct inode *inode = old_dentry->d_inode;
310 int they_are_dirs = S_ISDIR(old_dentry->d_inode->i_mode);
312 if (!simple_empty(new_dentry))
313 return -ENOTEMPTY;
315 if (new_dentry->d_inode) {
316 simple_unlink(new_dir, new_dentry);
317 if (they_are_dirs)
318 drop_nlink(old_dir);
319 } else if (they_are_dirs) {
320 drop_nlink(old_dir);
321 inc_nlink(new_dir);
324 old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime =
325 new_dir->i_mtime = inode->i_ctime = CURRENT_TIME;
327 return 0;
330 int simple_readpage(struct file *file, struct page *page)
332 clear_highpage(page);
333 flush_dcache_page(page);
334 SetPageUptodate(page);
335 unlock_page(page);
336 return 0;
339 int simple_prepare_write(struct file *file, struct page *page,
340 unsigned from, unsigned to)
342 if (!PageUptodate(page)) {
343 if (to - from != PAGE_CACHE_SIZE) {
344 void *kaddr = kmap_atomic(page, KM_USER0);
345 memset(kaddr, 0, from);
346 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
347 flush_dcache_page(page);
348 kunmap_atomic(kaddr, KM_USER0);
351 return 0;
354 int simple_write_begin(struct file *file, struct address_space *mapping,
355 loff_t pos, unsigned len, unsigned flags,
356 struct page **pagep, void **fsdata)
358 struct page *page;
359 pgoff_t index;
360 unsigned from;
362 index = pos >> PAGE_CACHE_SHIFT;
363 from = pos & (PAGE_CACHE_SIZE - 1);
365 page = __grab_cache_page(mapping, index);
366 if (!page)
367 return -ENOMEM;
369 *pagep = page;
371 return simple_prepare_write(file, page, from, from+len);
374 static int simple_commit_write(struct file *file, struct page *page,
375 unsigned from, unsigned to)
377 struct inode *inode = page->mapping->host;
378 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
380 if (!PageUptodate(page))
381 SetPageUptodate(page);
383 * No need to use i_size_read() here, the i_size
384 * cannot change under us because we hold the i_mutex.
386 if (pos > inode->i_size)
387 i_size_write(inode, pos);
388 set_page_dirty(page);
389 return 0;
392 int simple_write_end(struct file *file, struct address_space *mapping,
393 loff_t pos, unsigned len, unsigned copied,
394 struct page *page, void *fsdata)
396 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
398 /* zero the stale part of the page if we did a short copy */
399 if (copied < len) {
400 void *kaddr = kmap_atomic(page, KM_USER0);
401 memset(kaddr + from + copied, 0, len - copied);
402 flush_dcache_page(page);
403 kunmap_atomic(kaddr, KM_USER0);
406 simple_commit_write(file, page, from, from+copied);
408 unlock_page(page);
409 page_cache_release(page);
411 return copied;
415 * the inodes created here are not hashed. If you use iunique to generate
416 * unique inode values later for this filesystem, then you must take care
417 * to pass it an appropriate max_reserved value to avoid collisions.
419 int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files)
421 struct inode *inode;
422 struct dentry *root;
423 struct dentry *dentry;
424 int i;
426 s->s_blocksize = PAGE_CACHE_SIZE;
427 s->s_blocksize_bits = PAGE_CACHE_SHIFT;
428 s->s_magic = magic;
429 s->s_op = &simple_super_operations;
430 s->s_time_gran = 1;
432 inode = new_inode(s);
433 if (!inode)
434 return -ENOMEM;
436 * because the root inode is 1, the files array must not contain an
437 * entry at index 1
439 inode->i_ino = 1;
440 inode->i_mode = S_IFDIR | 0755;
441 inode->i_uid = inode->i_gid = 0;
442 inode->i_blocks = 0;
443 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
444 inode->i_op = &simple_dir_inode_operations;
445 inode->i_fop = &simple_dir_operations;
446 inode->i_nlink = 2;
447 root = d_alloc_root(inode);
448 if (!root) {
449 iput(inode);
450 return -ENOMEM;
452 for (i = 0; !files->name || files->name[0]; i++, files++) {
453 if (!files->name)
454 continue;
456 /* warn if it tries to conflict with the root inode */
457 if (unlikely(i == 1))
458 printk(KERN_WARNING "%s: %s passed in a files array"
459 "with an index of 1!\n", __func__,
460 s->s_type->name);
462 dentry = d_alloc_name(root, files->name);
463 if (!dentry)
464 goto out;
465 inode = new_inode(s);
466 if (!inode)
467 goto out;
468 inode->i_mode = S_IFREG | files->mode;
469 inode->i_uid = inode->i_gid = 0;
470 inode->i_blocks = 0;
471 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
472 inode->i_fop = files->ops;
473 inode->i_ino = i;
474 d_add(dentry, inode);
476 s->s_root = root;
477 return 0;
478 out:
479 d_genocide(root);
480 dput(root);
481 return -ENOMEM;
484 static DEFINE_SPINLOCK(pin_fs_lock);
486 int simple_pin_fs(struct file_system_type *type, struct vfsmount **mount, int *count)
488 struct vfsmount *mnt = NULL;
489 spin_lock(&pin_fs_lock);
490 if (unlikely(!*mount)) {
491 spin_unlock(&pin_fs_lock);
492 mnt = vfs_kern_mount(type, 0, type->name, NULL);
493 if (IS_ERR(mnt))
494 return PTR_ERR(mnt);
495 spin_lock(&pin_fs_lock);
496 if (!*mount)
497 *mount = mnt;
499 mntget(*mount);
500 ++*count;
501 spin_unlock(&pin_fs_lock);
502 mntput(mnt);
503 return 0;
506 void simple_release_fs(struct vfsmount **mount, int *count)
508 struct vfsmount *mnt;
509 spin_lock(&pin_fs_lock);
510 mnt = *mount;
511 if (!--*count)
512 *mount = NULL;
513 spin_unlock(&pin_fs_lock);
514 mntput(mnt);
517 ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
518 const void *from, size_t available)
520 loff_t pos = *ppos;
521 if (pos < 0)
522 return -EINVAL;
523 if (pos >= available)
524 return 0;
525 if (count > available - pos)
526 count = available - pos;
527 if (copy_to_user(to, from + pos, count))
528 return -EFAULT;
529 *ppos = pos + count;
530 return count;
534 * Transaction based IO.
535 * The file expects a single write which triggers the transaction, and then
536 * possibly a read which collects the result - which is stored in a
537 * file-local buffer.
539 char *simple_transaction_get(struct file *file, const char __user *buf, size_t size)
541 struct simple_transaction_argresp *ar;
542 static DEFINE_SPINLOCK(simple_transaction_lock);
544 if (size > SIMPLE_TRANSACTION_LIMIT - 1)
545 return ERR_PTR(-EFBIG);
547 ar = (struct simple_transaction_argresp *)get_zeroed_page(GFP_KERNEL);
548 if (!ar)
549 return ERR_PTR(-ENOMEM);
551 spin_lock(&simple_transaction_lock);
553 /* only one write allowed per open */
554 if (file->private_data) {
555 spin_unlock(&simple_transaction_lock);
556 free_page((unsigned long)ar);
557 return ERR_PTR(-EBUSY);
560 file->private_data = ar;
562 spin_unlock(&simple_transaction_lock);
564 if (copy_from_user(ar->data, buf, size))
565 return ERR_PTR(-EFAULT);
567 return ar->data;
570 ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
572 struct simple_transaction_argresp *ar = file->private_data;
574 if (!ar)
575 return 0;
576 return simple_read_from_buffer(buf, size, pos, ar->data, ar->size);
579 int simple_transaction_release(struct inode *inode, struct file *file)
581 free_page((unsigned long)file->private_data);
582 return 0;
585 /* Simple attribute files */
587 struct simple_attr {
588 u64 (*get)(void *);
589 void (*set)(void *, u64);
590 char get_buf[24]; /* enough to store a u64 and "\n\0" */
591 char set_buf[24];
592 void *data;
593 const char *fmt; /* format for read operation */
594 struct mutex mutex; /* protects access to these buffers */
597 /* simple_attr_open is called by an actual attribute open file operation
598 * to set the attribute specific access operations. */
599 int simple_attr_open(struct inode *inode, struct file *file,
600 u64 (*get)(void *), void (*set)(void *, u64),
601 const char *fmt)
603 struct simple_attr *attr;
605 attr = kmalloc(sizeof(*attr), GFP_KERNEL);
606 if (!attr)
607 return -ENOMEM;
609 attr->get = get;
610 attr->set = set;
611 attr->data = inode->i_private;
612 attr->fmt = fmt;
613 mutex_init(&attr->mutex);
615 file->private_data = attr;
617 return nonseekable_open(inode, file);
620 int simple_attr_close(struct inode *inode, struct file *file)
622 kfree(file->private_data);
623 return 0;
626 /* read from the buffer that is filled with the get function */
627 ssize_t simple_attr_read(struct file *file, char __user *buf,
628 size_t len, loff_t *ppos)
630 struct simple_attr *attr;
631 size_t size;
632 ssize_t ret;
634 attr = file->private_data;
636 if (!attr->get)
637 return -EACCES;
639 mutex_lock(&attr->mutex);
640 if (*ppos) /* continued read */
641 size = strlen(attr->get_buf);
642 else /* first read */
643 size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
644 attr->fmt,
645 (unsigned long long)attr->get(attr->data));
647 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
648 mutex_unlock(&attr->mutex);
649 return ret;
652 /* interpret the buffer as a number to call the set function with */
653 ssize_t simple_attr_write(struct file *file, const char __user *buf,
654 size_t len, loff_t *ppos)
656 struct simple_attr *attr;
657 u64 val;
658 size_t size;
659 ssize_t ret;
661 attr = file->private_data;
663 if (!attr->set)
664 return -EACCES;
666 mutex_lock(&attr->mutex);
667 ret = -EFAULT;
668 size = min(sizeof(attr->set_buf) - 1, len);
669 if (copy_from_user(attr->set_buf, buf, size))
670 goto out;
672 ret = len; /* claim we got the whole input */
673 attr->set_buf[size] = '\0';
674 val = simple_strtol(attr->set_buf, NULL, 0);
675 attr->set(attr->data, val);
676 out:
677 mutex_unlock(&attr->mutex);
678 return ret;
681 EXPORT_SYMBOL(dcache_dir_close);
682 EXPORT_SYMBOL(dcache_dir_lseek);
683 EXPORT_SYMBOL(dcache_dir_open);
684 EXPORT_SYMBOL(dcache_readdir);
685 EXPORT_SYMBOL(generic_read_dir);
686 EXPORT_SYMBOL(get_sb_pseudo);
687 EXPORT_SYMBOL(simple_write_begin);
688 EXPORT_SYMBOL(simple_write_end);
689 EXPORT_SYMBOL(simple_dir_inode_operations);
690 EXPORT_SYMBOL(simple_dir_operations);
691 EXPORT_SYMBOL(simple_empty);
692 EXPORT_SYMBOL(d_alloc_name);
693 EXPORT_SYMBOL(simple_fill_super);
694 EXPORT_SYMBOL(simple_getattr);
695 EXPORT_SYMBOL(simple_link);
696 EXPORT_SYMBOL(simple_lookup);
697 EXPORT_SYMBOL(simple_pin_fs);
698 EXPORT_SYMBOL(simple_prepare_write);
699 EXPORT_SYMBOL(simple_readpage);
700 EXPORT_SYMBOL(simple_release_fs);
701 EXPORT_SYMBOL(simple_rename);
702 EXPORT_SYMBOL(simple_rmdir);
703 EXPORT_SYMBOL(simple_statfs);
704 EXPORT_SYMBOL(simple_sync_file);
705 EXPORT_SYMBOL(simple_unlink);
706 EXPORT_SYMBOL(simple_read_from_buffer);
707 EXPORT_SYMBOL(simple_transaction_get);
708 EXPORT_SYMBOL(simple_transaction_read);
709 EXPORT_SYMBOL(simple_transaction_release);
710 EXPORT_SYMBOL_GPL(simple_attr_open);
711 EXPORT_SYMBOL_GPL(simple_attr_close);
712 EXPORT_SYMBOL_GPL(simple_attr_read);
713 EXPORT_SYMBOL_GPL(simple_attr_write);