x86, PAT: Remove page granularity tracking for vm_insert_pfn maps
[linux-2.6/mini2440.git] / fs / libfs.c
blobcd223190c4e948c5bb1f55416049a88cadbd04a4
1 /*
2 * fs/libfs.c
3 * Library for filesystems writers.
4 */
6 #include <linux/module.h>
7 #include <linux/pagemap.h>
8 #include <linux/mount.h>
9 #include <linux/vfs.h>
10 #include <linux/mutex.h>
11 #include <linux/exportfs.h>
13 #include <asm/uaccess.h>
15 int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
16 struct kstat *stat)
18 struct inode *inode = dentry->d_inode;
19 generic_fillattr(inode, stat);
20 stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9);
21 return 0;
24 int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
26 buf->f_type = dentry->d_sb->s_magic;
27 buf->f_bsize = PAGE_CACHE_SIZE;
28 buf->f_namelen = NAME_MAX;
29 return 0;
33 * Retaining negative dentries for an in-memory filesystem just wastes
34 * memory and lookup time: arrange for them to be deleted immediately.
36 static int simple_delete_dentry(struct dentry *dentry)
38 return 1;
42 * Lookup the data. This is trivial - if the dentry didn't already
43 * exist, we know it is negative. Set d_op to delete negative dentries.
45 struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
47 static const struct dentry_operations simple_dentry_operations = {
48 .d_delete = simple_delete_dentry,
51 if (dentry->d_name.len > NAME_MAX)
52 return ERR_PTR(-ENAMETOOLONG);
53 dentry->d_op = &simple_dentry_operations;
54 d_add(dentry, NULL);
55 return NULL;
58 int simple_sync_file(struct file * file, struct dentry *dentry, int datasync)
60 return 0;
63 int dcache_dir_open(struct inode *inode, struct file *file)
65 static struct qstr cursor_name = {.len = 1, .name = "."};
67 file->private_data = d_alloc(file->f_path.dentry, &cursor_name);
69 return file->private_data ? 0 : -ENOMEM;
72 int dcache_dir_close(struct inode *inode, struct file *file)
74 dput(file->private_data);
75 return 0;
78 loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
80 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
81 switch (origin) {
82 case 1:
83 offset += file->f_pos;
84 case 0:
85 if (offset >= 0)
86 break;
87 default:
88 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
89 return -EINVAL;
91 if (offset != file->f_pos) {
92 file->f_pos = offset;
93 if (file->f_pos >= 2) {
94 struct list_head *p;
95 struct dentry *cursor = file->private_data;
96 loff_t n = file->f_pos - 2;
98 spin_lock(&dcache_lock);
99 list_del(&cursor->d_u.d_child);
100 p = file->f_path.dentry->d_subdirs.next;
101 while (n && p != &file->f_path.dentry->d_subdirs) {
102 struct dentry *next;
103 next = list_entry(p, struct dentry, d_u.d_child);
104 if (!d_unhashed(next) && next->d_inode)
105 n--;
106 p = p->next;
108 list_add_tail(&cursor->d_u.d_child, p);
109 spin_unlock(&dcache_lock);
112 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
113 return offset;
116 /* Relationship between i_mode and the DT_xxx types */
117 static inline unsigned char dt_type(struct inode *inode)
119 return (inode->i_mode >> 12) & 15;
123 * Directory is locked and all positive dentries in it are safe, since
124 * for ramfs-type trees they can't go away without unlink() or rmdir(),
125 * both impossible due to the lock on directory.
128 int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
130 struct dentry *dentry = filp->f_path.dentry;
131 struct dentry *cursor = filp->private_data;
132 struct list_head *p, *q = &cursor->d_u.d_child;
133 ino_t ino;
134 int i = filp->f_pos;
136 switch (i) {
137 case 0:
138 ino = dentry->d_inode->i_ino;
139 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
140 break;
141 filp->f_pos++;
142 i++;
143 /* fallthrough */
144 case 1:
145 ino = parent_ino(dentry);
146 if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
147 break;
148 filp->f_pos++;
149 i++;
150 /* fallthrough */
151 default:
152 spin_lock(&dcache_lock);
153 if (filp->f_pos == 2)
154 list_move(q, &dentry->d_subdirs);
156 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
157 struct dentry *next;
158 next = list_entry(p, struct dentry, d_u.d_child);
159 if (d_unhashed(next) || !next->d_inode)
160 continue;
162 spin_unlock(&dcache_lock);
163 if (filldir(dirent, next->d_name.name,
164 next->d_name.len, filp->f_pos,
165 next->d_inode->i_ino,
166 dt_type(next->d_inode)) < 0)
167 return 0;
168 spin_lock(&dcache_lock);
169 /* next is still alive */
170 list_move(q, p);
171 p = q;
172 filp->f_pos++;
174 spin_unlock(&dcache_lock);
176 return 0;
179 ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos)
181 return -EISDIR;
184 const struct file_operations simple_dir_operations = {
185 .open = dcache_dir_open,
186 .release = dcache_dir_close,
187 .llseek = dcache_dir_lseek,
188 .read = generic_read_dir,
189 .readdir = dcache_readdir,
190 .fsync = simple_sync_file,
193 const struct inode_operations simple_dir_inode_operations = {
194 .lookup = simple_lookup,
197 static const struct super_operations simple_super_operations = {
198 .statfs = simple_statfs,
202 * Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that
203 * will never be mountable)
205 int get_sb_pseudo(struct file_system_type *fs_type, char *name,
206 const struct super_operations *ops, unsigned long magic,
207 struct vfsmount *mnt)
209 struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
210 struct dentry *dentry;
211 struct inode *root;
212 struct qstr d_name = {.name = name, .len = strlen(name)};
214 if (IS_ERR(s))
215 return PTR_ERR(s);
217 s->s_flags = MS_NOUSER;
218 s->s_maxbytes = ~0ULL;
219 s->s_blocksize = PAGE_SIZE;
220 s->s_blocksize_bits = PAGE_SHIFT;
221 s->s_magic = magic;
222 s->s_op = ops ? ops : &simple_super_operations;
223 s->s_time_gran = 1;
224 root = new_inode(s);
225 if (!root)
226 goto Enomem;
228 * since this is the first inode, make it number 1. New inodes created
229 * after this must take care not to collide with it (by passing
230 * max_reserved of 1 to iunique).
232 root->i_ino = 1;
233 root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
234 root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME;
235 dentry = d_alloc(NULL, &d_name);
236 if (!dentry) {
237 iput(root);
238 goto Enomem;
240 dentry->d_sb = s;
241 dentry->d_parent = dentry;
242 d_instantiate(dentry, root);
243 s->s_root = dentry;
244 s->s_flags |= MS_ACTIVE;
245 simple_set_mnt(mnt, s);
246 return 0;
248 Enomem:
249 up_write(&s->s_umount);
250 deactivate_super(s);
251 return -ENOMEM;
254 int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
256 struct inode *inode = old_dentry->d_inode;
258 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
259 inc_nlink(inode);
260 atomic_inc(&inode->i_count);
261 dget(dentry);
262 d_instantiate(dentry, inode);
263 return 0;
266 static inline int simple_positive(struct dentry *dentry)
268 return dentry->d_inode && !d_unhashed(dentry);
271 int simple_empty(struct dentry *dentry)
273 struct dentry *child;
274 int ret = 0;
276 spin_lock(&dcache_lock);
277 list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child)
278 if (simple_positive(child))
279 goto out;
280 ret = 1;
281 out:
282 spin_unlock(&dcache_lock);
283 return ret;
286 int simple_unlink(struct inode *dir, struct dentry *dentry)
288 struct inode *inode = dentry->d_inode;
290 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
291 drop_nlink(inode);
292 dput(dentry);
293 return 0;
296 int simple_rmdir(struct inode *dir, struct dentry *dentry)
298 if (!simple_empty(dentry))
299 return -ENOTEMPTY;
301 drop_nlink(dentry->d_inode);
302 simple_unlink(dir, dentry);
303 drop_nlink(dir);
304 return 0;
307 int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
308 struct inode *new_dir, struct dentry *new_dentry)
310 struct inode *inode = old_dentry->d_inode;
311 int they_are_dirs = S_ISDIR(old_dentry->d_inode->i_mode);
313 if (!simple_empty(new_dentry))
314 return -ENOTEMPTY;
316 if (new_dentry->d_inode) {
317 simple_unlink(new_dir, new_dentry);
318 if (they_are_dirs)
319 drop_nlink(old_dir);
320 } else if (they_are_dirs) {
321 drop_nlink(old_dir);
322 inc_nlink(new_dir);
325 old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime =
326 new_dir->i_mtime = inode->i_ctime = CURRENT_TIME;
328 return 0;
331 int simple_readpage(struct file *file, struct page *page)
333 clear_highpage(page);
334 flush_dcache_page(page);
335 SetPageUptodate(page);
336 unlock_page(page);
337 return 0;
340 int simple_prepare_write(struct file *file, struct page *page,
341 unsigned from, unsigned to)
343 if (!PageUptodate(page)) {
344 if (to - from != PAGE_CACHE_SIZE)
345 zero_user_segments(page,
346 0, from,
347 to, PAGE_CACHE_SIZE);
349 return 0;
352 int simple_write_begin(struct file *file, struct address_space *mapping,
353 loff_t pos, unsigned len, unsigned flags,
354 struct page **pagep, void **fsdata)
356 struct page *page;
357 pgoff_t index;
358 unsigned from;
360 index = pos >> PAGE_CACHE_SHIFT;
361 from = pos & (PAGE_CACHE_SIZE - 1);
363 page = grab_cache_page_write_begin(mapping, index, flags);
364 if (!page)
365 return -ENOMEM;
367 *pagep = page;
369 return simple_prepare_write(file, page, from, from+len);
372 static int simple_commit_write(struct file *file, struct page *page,
373 unsigned from, unsigned to)
375 struct inode *inode = page->mapping->host;
376 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
378 if (!PageUptodate(page))
379 SetPageUptodate(page);
381 * No need to use i_size_read() here, the i_size
382 * cannot change under us because we hold the i_mutex.
384 if (pos > inode->i_size)
385 i_size_write(inode, pos);
386 set_page_dirty(page);
387 return 0;
390 int simple_write_end(struct file *file, struct address_space *mapping,
391 loff_t pos, unsigned len, unsigned copied,
392 struct page *page, void *fsdata)
394 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
396 /* zero the stale part of the page if we did a short copy */
397 if (copied < len) {
398 void *kaddr = kmap_atomic(page, KM_USER0);
399 memset(kaddr + from + copied, 0, len - copied);
400 flush_dcache_page(page);
401 kunmap_atomic(kaddr, KM_USER0);
404 simple_commit_write(file, page, from, from+copied);
406 unlock_page(page);
407 page_cache_release(page);
409 return copied;
413 * the inodes created here are not hashed. If you use iunique to generate
414 * unique inode values later for this filesystem, then you must take care
415 * to pass it an appropriate max_reserved value to avoid collisions.
417 int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files)
419 struct inode *inode;
420 struct dentry *root;
421 struct dentry *dentry;
422 int i;
424 s->s_blocksize = PAGE_CACHE_SIZE;
425 s->s_blocksize_bits = PAGE_CACHE_SHIFT;
426 s->s_magic = magic;
427 s->s_op = &simple_super_operations;
428 s->s_time_gran = 1;
430 inode = new_inode(s);
431 if (!inode)
432 return -ENOMEM;
434 * because the root inode is 1, the files array must not contain an
435 * entry at index 1
437 inode->i_ino = 1;
438 inode->i_mode = S_IFDIR | 0755;
439 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
440 inode->i_op = &simple_dir_inode_operations;
441 inode->i_fop = &simple_dir_operations;
442 inode->i_nlink = 2;
443 root = d_alloc_root(inode);
444 if (!root) {
445 iput(inode);
446 return -ENOMEM;
448 for (i = 0; !files->name || files->name[0]; i++, files++) {
449 if (!files->name)
450 continue;
452 /* warn if it tries to conflict with the root inode */
453 if (unlikely(i == 1))
454 printk(KERN_WARNING "%s: %s passed in a files array"
455 "with an index of 1!\n", __func__,
456 s->s_type->name);
458 dentry = d_alloc_name(root, files->name);
459 if (!dentry)
460 goto out;
461 inode = new_inode(s);
462 if (!inode)
463 goto out;
464 inode->i_mode = S_IFREG | files->mode;
465 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
466 inode->i_fop = files->ops;
467 inode->i_ino = i;
468 d_add(dentry, inode);
470 s->s_root = root;
471 return 0;
472 out:
473 d_genocide(root);
474 dput(root);
475 return -ENOMEM;
478 static DEFINE_SPINLOCK(pin_fs_lock);
480 int simple_pin_fs(struct file_system_type *type, struct vfsmount **mount, int *count)
482 struct vfsmount *mnt = NULL;
483 spin_lock(&pin_fs_lock);
484 if (unlikely(!*mount)) {
485 spin_unlock(&pin_fs_lock);
486 mnt = vfs_kern_mount(type, 0, type->name, NULL);
487 if (IS_ERR(mnt))
488 return PTR_ERR(mnt);
489 spin_lock(&pin_fs_lock);
490 if (!*mount)
491 *mount = mnt;
493 mntget(*mount);
494 ++*count;
495 spin_unlock(&pin_fs_lock);
496 mntput(mnt);
497 return 0;
500 void simple_release_fs(struct vfsmount **mount, int *count)
502 struct vfsmount *mnt;
503 spin_lock(&pin_fs_lock);
504 mnt = *mount;
505 if (!--*count)
506 *mount = NULL;
507 spin_unlock(&pin_fs_lock);
508 mntput(mnt);
512 * simple_read_from_buffer - copy data from the buffer to user space
513 * @to: the user space buffer to read to
514 * @count: the maximum number of bytes to read
515 * @ppos: the current position in the buffer
516 * @from: the buffer to read from
517 * @available: the size of the buffer
519 * The simple_read_from_buffer() function reads up to @count bytes from the
520 * buffer @from at offset @ppos into the user space address starting at @to.
522 * On success, the number of bytes read is returned and the offset @ppos is
523 * advanced by this number, or negative value is returned on error.
525 ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
526 const void *from, size_t available)
528 loff_t pos = *ppos;
529 if (pos < 0)
530 return -EINVAL;
531 if (pos >= available)
532 return 0;
533 if (count > available - pos)
534 count = available - pos;
535 if (copy_to_user(to, from + pos, count))
536 return -EFAULT;
537 *ppos = pos + count;
538 return count;
542 * memory_read_from_buffer - copy data from the buffer
543 * @to: the kernel space buffer to read to
544 * @count: the maximum number of bytes to read
545 * @ppos: the current position in the buffer
546 * @from: the buffer to read from
547 * @available: the size of the buffer
549 * The memory_read_from_buffer() function reads up to @count bytes from the
550 * buffer @from at offset @ppos into the kernel space address starting at @to.
552 * On success, the number of bytes read is returned and the offset @ppos is
553 * advanced by this number, or negative value is returned on error.
555 ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
556 const void *from, size_t available)
558 loff_t pos = *ppos;
560 if (pos < 0)
561 return -EINVAL;
562 if (pos >= available)
563 return 0;
564 if (count > available - pos)
565 count = available - pos;
566 memcpy(to, from + pos, count);
567 *ppos = pos + count;
569 return count;
573 * Transaction based IO.
574 * The file expects a single write which triggers the transaction, and then
575 * possibly a read which collects the result - which is stored in a
576 * file-local buffer.
579 void simple_transaction_set(struct file *file, size_t n)
581 struct simple_transaction_argresp *ar = file->private_data;
583 BUG_ON(n > SIMPLE_TRANSACTION_LIMIT);
586 * The barrier ensures that ar->size will really remain zero until
587 * ar->data is ready for reading.
589 smp_mb();
590 ar->size = n;
593 char *simple_transaction_get(struct file *file, const char __user *buf, size_t size)
595 struct simple_transaction_argresp *ar;
596 static DEFINE_SPINLOCK(simple_transaction_lock);
598 if (size > SIMPLE_TRANSACTION_LIMIT - 1)
599 return ERR_PTR(-EFBIG);
601 ar = (struct simple_transaction_argresp *)get_zeroed_page(GFP_KERNEL);
602 if (!ar)
603 return ERR_PTR(-ENOMEM);
605 spin_lock(&simple_transaction_lock);
607 /* only one write allowed per open */
608 if (file->private_data) {
609 spin_unlock(&simple_transaction_lock);
610 free_page((unsigned long)ar);
611 return ERR_PTR(-EBUSY);
614 file->private_data = ar;
616 spin_unlock(&simple_transaction_lock);
618 if (copy_from_user(ar->data, buf, size))
619 return ERR_PTR(-EFAULT);
621 return ar->data;
624 ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
626 struct simple_transaction_argresp *ar = file->private_data;
628 if (!ar)
629 return 0;
630 return simple_read_from_buffer(buf, size, pos, ar->data, ar->size);
633 int simple_transaction_release(struct inode *inode, struct file *file)
635 free_page((unsigned long)file->private_data);
636 return 0;
639 /* Simple attribute files */
641 struct simple_attr {
642 int (*get)(void *, u64 *);
643 int (*set)(void *, u64);
644 char get_buf[24]; /* enough to store a u64 and "\n\0" */
645 char set_buf[24];
646 void *data;
647 const char *fmt; /* format for read operation */
648 struct mutex mutex; /* protects access to these buffers */
651 /* simple_attr_open is called by an actual attribute open file operation
652 * to set the attribute specific access operations. */
653 int simple_attr_open(struct inode *inode, struct file *file,
654 int (*get)(void *, u64 *), int (*set)(void *, u64),
655 const char *fmt)
657 struct simple_attr *attr;
659 attr = kmalloc(sizeof(*attr), GFP_KERNEL);
660 if (!attr)
661 return -ENOMEM;
663 attr->get = get;
664 attr->set = set;
665 attr->data = inode->i_private;
666 attr->fmt = fmt;
667 mutex_init(&attr->mutex);
669 file->private_data = attr;
671 return nonseekable_open(inode, file);
674 int simple_attr_release(struct inode *inode, struct file *file)
676 kfree(file->private_data);
677 return 0;
680 /* read from the buffer that is filled with the get function */
681 ssize_t simple_attr_read(struct file *file, char __user *buf,
682 size_t len, loff_t *ppos)
684 struct simple_attr *attr;
685 size_t size;
686 ssize_t ret;
688 attr = file->private_data;
690 if (!attr->get)
691 return -EACCES;
693 ret = mutex_lock_interruptible(&attr->mutex);
694 if (ret)
695 return ret;
697 if (*ppos) { /* continued read */
698 size = strlen(attr->get_buf);
699 } else { /* first read */
700 u64 val;
701 ret = attr->get(attr->data, &val);
702 if (ret)
703 goto out;
705 size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
706 attr->fmt, (unsigned long long)val);
709 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
710 out:
711 mutex_unlock(&attr->mutex);
712 return ret;
715 /* interpret the buffer as a number to call the set function with */
716 ssize_t simple_attr_write(struct file *file, const char __user *buf,
717 size_t len, loff_t *ppos)
719 struct simple_attr *attr;
720 u64 val;
721 size_t size;
722 ssize_t ret;
724 attr = file->private_data;
725 if (!attr->set)
726 return -EACCES;
728 ret = mutex_lock_interruptible(&attr->mutex);
729 if (ret)
730 return ret;
732 ret = -EFAULT;
733 size = min(sizeof(attr->set_buf) - 1, len);
734 if (copy_from_user(attr->set_buf, buf, size))
735 goto out;
737 ret = len; /* claim we got the whole input */
738 attr->set_buf[size] = '\0';
739 val = simple_strtol(attr->set_buf, NULL, 0);
740 attr->set(attr->data, val);
741 out:
742 mutex_unlock(&attr->mutex);
743 return ret;
747 * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation
748 * @sb: filesystem to do the file handle conversion on
749 * @fid: file handle to convert
750 * @fh_len: length of the file handle in bytes
751 * @fh_type: type of file handle
752 * @get_inode: filesystem callback to retrieve inode
754 * This function decodes @fid as long as it has one of the well-known
755 * Linux filehandle types and calls @get_inode on it to retrieve the
756 * inode for the object specified in the file handle.
758 struct dentry *generic_fh_to_dentry(struct super_block *sb, struct fid *fid,
759 int fh_len, int fh_type, struct inode *(*get_inode)
760 (struct super_block *sb, u64 ino, u32 gen))
762 struct inode *inode = NULL;
764 if (fh_len < 2)
765 return NULL;
767 switch (fh_type) {
768 case FILEID_INO32_GEN:
769 case FILEID_INO32_GEN_PARENT:
770 inode = get_inode(sb, fid->i32.ino, fid->i32.gen);
771 break;
774 return d_obtain_alias(inode);
776 EXPORT_SYMBOL_GPL(generic_fh_to_dentry);
779 * generic_fh_to_dentry - generic helper for the fh_to_parent export operation
780 * @sb: filesystem to do the file handle conversion on
781 * @fid: file handle to convert
782 * @fh_len: length of the file handle in bytes
783 * @fh_type: type of file handle
784 * @get_inode: filesystem callback to retrieve inode
786 * This function decodes @fid as long as it has one of the well-known
787 * Linux filehandle types and calls @get_inode on it to retrieve the
788 * inode for the _parent_ object specified in the file handle if it
789 * is specified in the file handle, or NULL otherwise.
791 struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid,
792 int fh_len, int fh_type, struct inode *(*get_inode)
793 (struct super_block *sb, u64 ino, u32 gen))
795 struct inode *inode = NULL;
797 if (fh_len <= 2)
798 return NULL;
800 switch (fh_type) {
801 case FILEID_INO32_GEN_PARENT:
802 inode = get_inode(sb, fid->i32.parent_ino,
803 (fh_len > 3 ? fid->i32.parent_gen : 0));
804 break;
807 return d_obtain_alias(inode);
809 EXPORT_SYMBOL_GPL(generic_fh_to_parent);
811 EXPORT_SYMBOL(dcache_dir_close);
812 EXPORT_SYMBOL(dcache_dir_lseek);
813 EXPORT_SYMBOL(dcache_dir_open);
814 EXPORT_SYMBOL(dcache_readdir);
815 EXPORT_SYMBOL(generic_read_dir);
816 EXPORT_SYMBOL(get_sb_pseudo);
817 EXPORT_SYMBOL(simple_write_begin);
818 EXPORT_SYMBOL(simple_write_end);
819 EXPORT_SYMBOL(simple_dir_inode_operations);
820 EXPORT_SYMBOL(simple_dir_operations);
821 EXPORT_SYMBOL(simple_empty);
822 EXPORT_SYMBOL(d_alloc_name);
823 EXPORT_SYMBOL(simple_fill_super);
824 EXPORT_SYMBOL(simple_getattr);
825 EXPORT_SYMBOL(simple_link);
826 EXPORT_SYMBOL(simple_lookup);
827 EXPORT_SYMBOL(simple_pin_fs);
828 EXPORT_UNUSED_SYMBOL(simple_prepare_write);
829 EXPORT_SYMBOL(simple_readpage);
830 EXPORT_SYMBOL(simple_release_fs);
831 EXPORT_SYMBOL(simple_rename);
832 EXPORT_SYMBOL(simple_rmdir);
833 EXPORT_SYMBOL(simple_statfs);
834 EXPORT_SYMBOL(simple_sync_file);
835 EXPORT_SYMBOL(simple_unlink);
836 EXPORT_SYMBOL(simple_read_from_buffer);
837 EXPORT_SYMBOL(memory_read_from_buffer);
838 EXPORT_SYMBOL(simple_transaction_set);
839 EXPORT_SYMBOL(simple_transaction_get);
840 EXPORT_SYMBOL(simple_transaction_read);
841 EXPORT_SYMBOL(simple_transaction_release);
842 EXPORT_SYMBOL_GPL(simple_attr_open);
843 EXPORT_SYMBOL_GPL(simple_attr_release);
844 EXPORT_SYMBOL_GPL(simple_attr_read);
845 EXPORT_SYMBOL_GPL(simple_attr_write);