mach-ux500: no MMC_CAP_SD_HIGHSPEED on Snowball
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / hugetlbfs / inode.c
blob0be5a78598d02a71f627ac0acb0ec83365b41845
1 /*
2 * hugetlbpage-backed filesystem. Based on ramfs.
4 * William Irwin, 2002
6 * Copyright (C) 2002 Linus Torvalds.
7 */
9 #include <linux/module.h>
10 #include <linux/thread_info.h>
11 #include <asm/current.h>
12 #include <linux/sched.h> /* remove ASAP */
13 #include <linux/fs.h>
14 #include <linux/mount.h>
15 #include <linux/file.h>
16 #include <linux/kernel.h>
17 #include <linux/writeback.h>
18 #include <linux/pagemap.h>
19 #include <linux/highmem.h>
20 #include <linux/init.h>
21 #include <linux/string.h>
22 #include <linux/capability.h>
23 #include <linux/ctype.h>
24 #include <linux/backing-dev.h>
25 #include <linux/hugetlb.h>
26 #include <linux/pagevec.h>
27 #include <linux/parser.h>
28 #include <linux/mman.h>
29 #include <linux/slab.h>
30 #include <linux/dnotify.h>
31 #include <linux/statfs.h>
32 #include <linux/security.h>
33 #include <linux/magic.h>
34 #include <linux/migrate.h>
36 #include <asm/uaccess.h>
38 static const struct super_operations hugetlbfs_ops;
39 static const struct address_space_operations hugetlbfs_aops;
40 const struct file_operations hugetlbfs_file_operations;
41 static const struct inode_operations hugetlbfs_dir_inode_operations;
42 static const struct inode_operations hugetlbfs_inode_operations;
44 static struct backing_dev_info hugetlbfs_backing_dev_info = {
45 .name = "hugetlbfs",
46 .ra_pages = 0, /* No readahead */
47 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
50 int sysctl_hugetlb_shm_group;
52 enum {
53 Opt_size, Opt_nr_inodes,
54 Opt_mode, Opt_uid, Opt_gid,
55 Opt_pagesize,
56 Opt_err,
59 static const match_table_t tokens = {
60 {Opt_size, "size=%s"},
61 {Opt_nr_inodes, "nr_inodes=%s"},
62 {Opt_mode, "mode=%o"},
63 {Opt_uid, "uid=%u"},
64 {Opt_gid, "gid=%u"},
65 {Opt_pagesize, "pagesize=%s"},
66 {Opt_err, NULL},
69 static void huge_pagevec_release(struct pagevec *pvec)
71 int i;
73 for (i = 0; i < pagevec_count(pvec); ++i)
74 put_page(pvec->pages[i]);
76 pagevec_reinit(pvec);
79 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
81 struct inode *inode = file->f_path.dentry->d_inode;
82 loff_t len, vma_len;
83 int ret;
84 struct hstate *h = hstate_file(file);
87 * vma address alignment (but not the pgoff alignment) has
88 * already been checked by prepare_hugepage_range. If you add
89 * any error returns here, do so after setting VM_HUGETLB, so
90 * is_vm_hugetlb_page tests below unmap_region go the right
91 * way when do_mmap_pgoff unwinds (may be important on powerpc
92 * and ia64).
94 vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
95 vma->vm_ops = &hugetlb_vm_ops;
97 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
98 return -EINVAL;
100 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
102 mutex_lock(&inode->i_mutex);
103 file_accessed(file);
105 ret = -ENOMEM;
106 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
108 if (hugetlb_reserve_pages(inode,
109 vma->vm_pgoff >> huge_page_order(h),
110 len >> huge_page_shift(h), vma,
111 vma->vm_flags))
112 goto out;
114 ret = 0;
115 hugetlb_prefault_arch_hook(vma->vm_mm);
116 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
117 inode->i_size = len;
118 out:
119 mutex_unlock(&inode->i_mutex);
121 return ret;
125 * Called under down_write(mmap_sem).
128 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
129 static unsigned long
130 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
131 unsigned long len, unsigned long pgoff, unsigned long flags)
133 struct mm_struct *mm = current->mm;
134 struct vm_area_struct *vma;
135 unsigned long start_addr;
136 struct hstate *h = hstate_file(file);
138 if (len & ~huge_page_mask(h))
139 return -EINVAL;
140 if (len > TASK_SIZE)
141 return -ENOMEM;
143 if (flags & MAP_FIXED) {
144 if (prepare_hugepage_range(file, addr, len))
145 return -EINVAL;
146 return addr;
149 if (addr) {
150 addr = ALIGN(addr, huge_page_size(h));
151 vma = find_vma(mm, addr);
152 if (TASK_SIZE - len >= addr &&
153 (!vma || addr + len <= vma->vm_start))
154 return addr;
157 start_addr = mm->free_area_cache;
159 if (len <= mm->cached_hole_size)
160 start_addr = TASK_UNMAPPED_BASE;
162 full_search:
163 addr = ALIGN(start_addr, huge_page_size(h));
165 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
166 /* At this point: (!vma || addr < vma->vm_end). */
167 if (TASK_SIZE - len < addr) {
169 * Start a new search - just in case we missed
170 * some holes.
172 if (start_addr != TASK_UNMAPPED_BASE) {
173 start_addr = TASK_UNMAPPED_BASE;
174 goto full_search;
176 return -ENOMEM;
179 if (!vma || addr + len <= vma->vm_start)
180 return addr;
181 addr = ALIGN(vma->vm_end, huge_page_size(h));
184 #endif
186 static int
187 hugetlbfs_read_actor(struct page *page, unsigned long offset,
188 char __user *buf, unsigned long count,
189 unsigned long size)
191 char *kaddr;
192 unsigned long left, copied = 0;
193 int i, chunksize;
195 if (size > count)
196 size = count;
198 /* Find which 4k chunk and offset with in that chunk */
199 i = offset >> PAGE_CACHE_SHIFT;
200 offset = offset & ~PAGE_CACHE_MASK;
202 while (size) {
203 chunksize = PAGE_CACHE_SIZE;
204 if (offset)
205 chunksize -= offset;
206 if (chunksize > size)
207 chunksize = size;
208 kaddr = kmap(&page[i]);
209 left = __copy_to_user(buf, kaddr + offset, chunksize);
210 kunmap(&page[i]);
211 if (left) {
212 copied += (chunksize - left);
213 break;
215 offset = 0;
216 size -= chunksize;
217 buf += chunksize;
218 copied += chunksize;
219 i++;
221 return copied ? copied : -EFAULT;
225 * Support for read() - Find the page attached to f_mapping and copy out the
226 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
227 * since it has PAGE_CACHE_SIZE assumptions.
229 static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
230 size_t len, loff_t *ppos)
232 struct hstate *h = hstate_file(filp);
233 struct address_space *mapping = filp->f_mapping;
234 struct inode *inode = mapping->host;
235 unsigned long index = *ppos >> huge_page_shift(h);
236 unsigned long offset = *ppos & ~huge_page_mask(h);
237 unsigned long end_index;
238 loff_t isize;
239 ssize_t retval = 0;
241 mutex_lock(&inode->i_mutex);
243 /* validate length */
244 if (len == 0)
245 goto out;
247 isize = i_size_read(inode);
248 if (!isize)
249 goto out;
251 end_index = (isize - 1) >> huge_page_shift(h);
252 for (;;) {
253 struct page *page;
254 unsigned long nr, ret;
255 int ra;
257 /* nr is the maximum number of bytes to copy from this page */
258 nr = huge_page_size(h);
259 if (index >= end_index) {
260 if (index > end_index)
261 goto out;
262 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
263 if (nr <= offset) {
264 goto out;
267 nr = nr - offset;
269 /* Find the page */
270 page = find_get_page(mapping, index);
271 if (unlikely(page == NULL)) {
273 * We have a HOLE, zero out the user-buffer for the
274 * length of the hole or request.
276 ret = len < nr ? len : nr;
277 if (clear_user(buf, ret))
278 ra = -EFAULT;
279 else
280 ra = 0;
281 } else {
283 * We have the page, copy it to user space buffer.
285 ra = hugetlbfs_read_actor(page, offset, buf, len, nr);
286 ret = ra;
288 if (ra < 0) {
289 if (retval == 0)
290 retval = ra;
291 if (page)
292 page_cache_release(page);
293 goto out;
296 offset += ret;
297 retval += ret;
298 len -= ret;
299 index += offset >> huge_page_shift(h);
300 offset &= ~huge_page_mask(h);
302 if (page)
303 page_cache_release(page);
305 /* short read or no more work */
306 if ((ret != nr) || (len == 0))
307 break;
309 out:
310 *ppos = ((loff_t)index << huge_page_shift(h)) + offset;
311 mutex_unlock(&inode->i_mutex);
312 return retval;
315 static int hugetlbfs_write_begin(struct file *file,
316 struct address_space *mapping,
317 loff_t pos, unsigned len, unsigned flags,
318 struct page **pagep, void **fsdata)
320 return -EINVAL;
323 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
324 loff_t pos, unsigned len, unsigned copied,
325 struct page *page, void *fsdata)
327 BUG();
328 return -EINVAL;
331 static void truncate_huge_page(struct page *page)
333 cancel_dirty_page(page, /* No IO accounting for huge pages? */0);
334 ClearPageUptodate(page);
335 delete_from_page_cache(page);
338 static void truncate_hugepages(struct inode *inode, loff_t lstart)
340 struct hstate *h = hstate_inode(inode);
341 struct address_space *mapping = &inode->i_data;
342 const pgoff_t start = lstart >> huge_page_shift(h);
343 struct pagevec pvec;
344 pgoff_t next;
345 int i, freed = 0;
347 pagevec_init(&pvec, 0);
348 next = start;
349 while (1) {
350 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
351 if (next == start)
352 break;
353 next = start;
354 continue;
357 for (i = 0; i < pagevec_count(&pvec); ++i) {
358 struct page *page = pvec.pages[i];
360 lock_page(page);
361 if (page->index > next)
362 next = page->index;
363 ++next;
364 truncate_huge_page(page);
365 unlock_page(page);
366 freed++;
368 huge_pagevec_release(&pvec);
370 BUG_ON(!lstart && mapping->nrpages);
371 hugetlb_unreserve_pages(inode, start, freed);
374 static void hugetlbfs_evict_inode(struct inode *inode)
376 truncate_hugepages(inode, 0);
377 end_writeback(inode);
380 static inline void
381 hugetlb_vmtruncate_list(struct prio_tree_root *root, pgoff_t pgoff)
383 struct vm_area_struct *vma;
384 struct prio_tree_iter iter;
386 vma_prio_tree_foreach(vma, &iter, root, pgoff, ULONG_MAX) {
387 unsigned long v_offset;
390 * Can the expression below overflow on 32-bit arches?
391 * No, because the prio_tree returns us only those vmas
392 * which overlap the truncated area starting at pgoff,
393 * and no vma on a 32-bit arch can span beyond the 4GB.
395 if (vma->vm_pgoff < pgoff)
396 v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT;
397 else
398 v_offset = 0;
400 __unmap_hugepage_range(vma,
401 vma->vm_start + v_offset, vma->vm_end, NULL);
405 static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
407 pgoff_t pgoff;
408 struct address_space *mapping = inode->i_mapping;
409 struct hstate *h = hstate_inode(inode);
411 BUG_ON(offset & ~huge_page_mask(h));
412 pgoff = offset >> PAGE_SHIFT;
414 i_size_write(inode, offset);
415 mutex_lock(&mapping->i_mmap_mutex);
416 if (!prio_tree_empty(&mapping->i_mmap))
417 hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
418 mutex_unlock(&mapping->i_mmap_mutex);
419 truncate_hugepages(inode, offset);
420 return 0;
423 static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
425 struct inode *inode = dentry->d_inode;
426 struct hstate *h = hstate_inode(inode);
427 int error;
428 unsigned int ia_valid = attr->ia_valid;
430 BUG_ON(!inode);
432 error = inode_change_ok(inode, attr);
433 if (error)
434 return error;
436 if (ia_valid & ATTR_SIZE) {
437 error = -EINVAL;
438 if (attr->ia_size & ~huge_page_mask(h))
439 return -EINVAL;
440 error = hugetlb_vmtruncate(inode, attr->ia_size);
441 if (error)
442 return error;
445 setattr_copy(inode, attr);
446 mark_inode_dirty(inode);
447 return 0;
450 static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid,
451 gid_t gid, int mode, dev_t dev)
453 struct inode *inode;
455 inode = new_inode(sb);
456 if (inode) {
457 struct hugetlbfs_inode_info *info;
458 inode->i_ino = get_next_ino();
459 inode->i_mode = mode;
460 inode->i_uid = uid;
461 inode->i_gid = gid;
462 inode->i_mapping->a_ops = &hugetlbfs_aops;
463 inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
464 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
465 INIT_LIST_HEAD(&inode->i_mapping->private_list);
466 info = HUGETLBFS_I(inode);
468 * The policy is initialized here even if we are creating a
469 * private inode because initialization simply creates an
470 * an empty rb tree and calls spin_lock_init(), later when we
471 * call mpol_free_shared_policy() it will just return because
472 * the rb tree will still be empty.
474 mpol_shared_policy_init(&info->policy, NULL);
475 switch (mode & S_IFMT) {
476 default:
477 init_special_inode(inode, mode, dev);
478 break;
479 case S_IFREG:
480 inode->i_op = &hugetlbfs_inode_operations;
481 inode->i_fop = &hugetlbfs_file_operations;
482 break;
483 case S_IFDIR:
484 inode->i_op = &hugetlbfs_dir_inode_operations;
485 inode->i_fop = &simple_dir_operations;
487 /* directory inodes start off with i_nlink == 2 (for "." entry) */
488 inc_nlink(inode);
489 break;
490 case S_IFLNK:
491 inode->i_op = &page_symlink_inode_operations;
492 break;
494 lockdep_annotate_inode_mutex_key(inode);
496 return inode;
500 * File creation. Allocate an inode, and we're done..
502 static int hugetlbfs_mknod(struct inode *dir,
503 struct dentry *dentry, int mode, dev_t dev)
505 struct inode *inode;
506 int error = -ENOSPC;
507 gid_t gid;
509 if (dir->i_mode & S_ISGID) {
510 gid = dir->i_gid;
511 if (S_ISDIR(mode))
512 mode |= S_ISGID;
513 } else {
514 gid = current_fsgid();
516 inode = hugetlbfs_get_inode(dir->i_sb, current_fsuid(), gid, mode, dev);
517 if (inode) {
518 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
519 d_instantiate(dentry, inode);
520 dget(dentry); /* Extra count - pin the dentry in core */
521 error = 0;
523 return error;
526 static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
528 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
529 if (!retval)
530 inc_nlink(dir);
531 return retval;
534 static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
536 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
539 static int hugetlbfs_symlink(struct inode *dir,
540 struct dentry *dentry, const char *symname)
542 struct inode *inode;
543 int error = -ENOSPC;
544 gid_t gid;
546 if (dir->i_mode & S_ISGID)
547 gid = dir->i_gid;
548 else
549 gid = current_fsgid();
551 inode = hugetlbfs_get_inode(dir->i_sb, current_fsuid(),
552 gid, S_IFLNK|S_IRWXUGO, 0);
553 if (inode) {
554 int l = strlen(symname)+1;
555 error = page_symlink(inode, symname, l);
556 if (!error) {
557 d_instantiate(dentry, inode);
558 dget(dentry);
559 } else
560 iput(inode);
562 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
564 return error;
568 * mark the head page dirty
570 static int hugetlbfs_set_page_dirty(struct page *page)
572 struct page *head = compound_head(page);
574 SetPageDirty(head);
575 return 0;
578 static int hugetlbfs_migrate_page(struct address_space *mapping,
579 struct page *newpage, struct page *page)
581 int rc;
583 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
584 if (rc)
585 return rc;
586 migrate_page_copy(newpage, page);
588 return 0;
591 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
593 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
594 struct hstate *h = hstate_inode(dentry->d_inode);
596 buf->f_type = HUGETLBFS_MAGIC;
597 buf->f_bsize = huge_page_size(h);
598 if (sbinfo) {
599 spin_lock(&sbinfo->stat_lock);
600 /* If no limits set, just report 0 for max/free/used
601 * blocks, like simple_statfs() */
602 if (sbinfo->max_blocks >= 0) {
603 buf->f_blocks = sbinfo->max_blocks;
604 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
605 buf->f_files = sbinfo->max_inodes;
606 buf->f_ffree = sbinfo->free_inodes;
608 spin_unlock(&sbinfo->stat_lock);
610 buf->f_namelen = NAME_MAX;
611 return 0;
614 static void hugetlbfs_put_super(struct super_block *sb)
616 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
618 if (sbi) {
619 sb->s_fs_info = NULL;
620 kfree(sbi);
624 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
626 if (sbinfo->free_inodes >= 0) {
627 spin_lock(&sbinfo->stat_lock);
628 if (unlikely(!sbinfo->free_inodes)) {
629 spin_unlock(&sbinfo->stat_lock);
630 return 0;
632 sbinfo->free_inodes--;
633 spin_unlock(&sbinfo->stat_lock);
636 return 1;
639 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
641 if (sbinfo->free_inodes >= 0) {
642 spin_lock(&sbinfo->stat_lock);
643 sbinfo->free_inodes++;
644 spin_unlock(&sbinfo->stat_lock);
649 static struct kmem_cache *hugetlbfs_inode_cachep;
651 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
653 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
654 struct hugetlbfs_inode_info *p;
656 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
657 return NULL;
658 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
659 if (unlikely(!p)) {
660 hugetlbfs_inc_free_inodes(sbinfo);
661 return NULL;
663 return &p->vfs_inode;
666 static void hugetlbfs_i_callback(struct rcu_head *head)
668 struct inode *inode = container_of(head, struct inode, i_rcu);
669 INIT_LIST_HEAD(&inode->i_dentry);
670 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
673 static void hugetlbfs_destroy_inode(struct inode *inode)
675 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
676 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
677 call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
680 static const struct address_space_operations hugetlbfs_aops = {
681 .write_begin = hugetlbfs_write_begin,
682 .write_end = hugetlbfs_write_end,
683 .set_page_dirty = hugetlbfs_set_page_dirty,
684 .migratepage = hugetlbfs_migrate_page,
688 static void init_once(void *foo)
690 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
692 inode_init_once(&ei->vfs_inode);
695 const struct file_operations hugetlbfs_file_operations = {
696 .read = hugetlbfs_read,
697 .mmap = hugetlbfs_file_mmap,
698 .fsync = noop_fsync,
699 .get_unmapped_area = hugetlb_get_unmapped_area,
700 .llseek = default_llseek,
703 static const struct inode_operations hugetlbfs_dir_inode_operations = {
704 .create = hugetlbfs_create,
705 .lookup = simple_lookup,
706 .link = simple_link,
707 .unlink = simple_unlink,
708 .symlink = hugetlbfs_symlink,
709 .mkdir = hugetlbfs_mkdir,
710 .rmdir = simple_rmdir,
711 .mknod = hugetlbfs_mknod,
712 .rename = simple_rename,
713 .setattr = hugetlbfs_setattr,
716 static const struct inode_operations hugetlbfs_inode_operations = {
717 .setattr = hugetlbfs_setattr,
720 static const struct super_operations hugetlbfs_ops = {
721 .alloc_inode = hugetlbfs_alloc_inode,
722 .destroy_inode = hugetlbfs_destroy_inode,
723 .evict_inode = hugetlbfs_evict_inode,
724 .statfs = hugetlbfs_statfs,
725 .put_super = hugetlbfs_put_super,
726 .show_options = generic_show_options,
729 static int
730 hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
732 char *p, *rest;
733 substring_t args[MAX_OPT_ARGS];
734 int option;
735 unsigned long long size = 0;
736 enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE;
738 if (!options)
739 return 0;
741 while ((p = strsep(&options, ",")) != NULL) {
742 int token;
743 if (!*p)
744 continue;
746 token = match_token(p, tokens, args);
747 switch (token) {
748 case Opt_uid:
749 if (match_int(&args[0], &option))
750 goto bad_val;
751 pconfig->uid = option;
752 break;
754 case Opt_gid:
755 if (match_int(&args[0], &option))
756 goto bad_val;
757 pconfig->gid = option;
758 break;
760 case Opt_mode:
761 if (match_octal(&args[0], &option))
762 goto bad_val;
763 pconfig->mode = option & 01777U;
764 break;
766 case Opt_size: {
767 /* memparse() will accept a K/M/G without a digit */
768 if (!isdigit(*args[0].from))
769 goto bad_val;
770 size = memparse(args[0].from, &rest);
771 setsize = SIZE_STD;
772 if (*rest == '%')
773 setsize = SIZE_PERCENT;
774 break;
777 case Opt_nr_inodes:
778 /* memparse() will accept a K/M/G without a digit */
779 if (!isdigit(*args[0].from))
780 goto bad_val;
781 pconfig->nr_inodes = memparse(args[0].from, &rest);
782 break;
784 case Opt_pagesize: {
785 unsigned long ps;
786 ps = memparse(args[0].from, &rest);
787 pconfig->hstate = size_to_hstate(ps);
788 if (!pconfig->hstate) {
789 printk(KERN_ERR
790 "hugetlbfs: Unsupported page size %lu MB\n",
791 ps >> 20);
792 return -EINVAL;
794 break;
797 default:
798 printk(KERN_ERR "hugetlbfs: Bad mount option: \"%s\"\n",
800 return -EINVAL;
801 break;
805 /* Do size after hstate is set up */
806 if (setsize > NO_SIZE) {
807 struct hstate *h = pconfig->hstate;
808 if (setsize == SIZE_PERCENT) {
809 size <<= huge_page_shift(h);
810 size *= h->max_huge_pages;
811 do_div(size, 100);
813 pconfig->nr_blocks = (size >> huge_page_shift(h));
816 return 0;
818 bad_val:
819 printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n",
820 args[0].from, p);
821 return -EINVAL;
824 static int
825 hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
827 struct inode * inode;
828 struct dentry * root;
829 int ret;
830 struct hugetlbfs_config config;
831 struct hugetlbfs_sb_info *sbinfo;
833 save_mount_options(sb, data);
835 config.nr_blocks = -1; /* No limit on size by default */
836 config.nr_inodes = -1; /* No limit on number of inodes by default */
837 config.uid = current_fsuid();
838 config.gid = current_fsgid();
839 config.mode = 0755;
840 config.hstate = &default_hstate;
841 ret = hugetlbfs_parse_options(data, &config);
842 if (ret)
843 return ret;
845 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
846 if (!sbinfo)
847 return -ENOMEM;
848 sb->s_fs_info = sbinfo;
849 sbinfo->hstate = config.hstate;
850 spin_lock_init(&sbinfo->stat_lock);
851 sbinfo->max_blocks = config.nr_blocks;
852 sbinfo->free_blocks = config.nr_blocks;
853 sbinfo->max_inodes = config.nr_inodes;
854 sbinfo->free_inodes = config.nr_inodes;
855 sb->s_maxbytes = MAX_LFS_FILESIZE;
856 sb->s_blocksize = huge_page_size(config.hstate);
857 sb->s_blocksize_bits = huge_page_shift(config.hstate);
858 sb->s_magic = HUGETLBFS_MAGIC;
859 sb->s_op = &hugetlbfs_ops;
860 sb->s_time_gran = 1;
861 inode = hugetlbfs_get_inode(sb, config.uid, config.gid,
862 S_IFDIR | config.mode, 0);
863 if (!inode)
864 goto out_free;
866 root = d_alloc_root(inode);
867 if (!root) {
868 iput(inode);
869 goto out_free;
871 sb->s_root = root;
872 return 0;
873 out_free:
874 kfree(sbinfo);
875 return -ENOMEM;
878 int hugetlb_get_quota(struct address_space *mapping, long delta)
880 int ret = 0;
881 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
883 if (sbinfo->free_blocks > -1) {
884 spin_lock(&sbinfo->stat_lock);
885 if (sbinfo->free_blocks - delta >= 0)
886 sbinfo->free_blocks -= delta;
887 else
888 ret = -ENOMEM;
889 spin_unlock(&sbinfo->stat_lock);
892 return ret;
895 void hugetlb_put_quota(struct address_space *mapping, long delta)
897 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
899 if (sbinfo->free_blocks > -1) {
900 spin_lock(&sbinfo->stat_lock);
901 sbinfo->free_blocks += delta;
902 spin_unlock(&sbinfo->stat_lock);
906 static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
907 int flags, const char *dev_name, void *data)
909 return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
912 static struct file_system_type hugetlbfs_fs_type = {
913 .name = "hugetlbfs",
914 .mount = hugetlbfs_mount,
915 .kill_sb = kill_litter_super,
918 static struct vfsmount *hugetlbfs_vfsmount;
920 static int can_do_hugetlb_shm(void)
922 return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group);
925 struct file *hugetlb_file_setup(const char *name, size_t size,
926 vm_flags_t acctflag,
927 struct user_struct **user, int creat_flags)
929 int error = -ENOMEM;
930 struct file *file;
931 struct inode *inode;
932 struct path path;
933 struct dentry *root;
934 struct qstr quick_string;
936 *user = NULL;
937 if (!hugetlbfs_vfsmount)
938 return ERR_PTR(-ENOENT);
940 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
941 *user = current_user();
942 if (user_shm_lock(size, *user)) {
943 printk_once(KERN_WARNING "Using mlock ulimits for SHM_HUGETLB is deprecated\n");
944 } else {
945 *user = NULL;
946 return ERR_PTR(-EPERM);
950 root = hugetlbfs_vfsmount->mnt_root;
951 quick_string.name = name;
952 quick_string.len = strlen(quick_string.name);
953 quick_string.hash = 0;
954 path.dentry = d_alloc(root, &quick_string);
955 if (!path.dentry)
956 goto out_shm_unlock;
958 path.mnt = mntget(hugetlbfs_vfsmount);
959 error = -ENOSPC;
960 inode = hugetlbfs_get_inode(root->d_sb, current_fsuid(),
961 current_fsgid(), S_IFREG | S_IRWXUGO, 0);
962 if (!inode)
963 goto out_dentry;
965 error = -ENOMEM;
966 if (hugetlb_reserve_pages(inode, 0,
967 size >> huge_page_shift(hstate_inode(inode)), NULL,
968 acctflag))
969 goto out_inode;
971 d_instantiate(path.dentry, inode);
972 inode->i_size = size;
973 clear_nlink(inode);
975 error = -ENFILE;
976 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
977 &hugetlbfs_file_operations);
978 if (!file)
979 goto out_dentry; /* inode is already attached */
981 return file;
983 out_inode:
984 iput(inode);
985 out_dentry:
986 path_put(&path);
987 out_shm_unlock:
988 if (*user) {
989 user_shm_unlock(size, *user);
990 *user = NULL;
992 return ERR_PTR(error);
995 static int __init init_hugetlbfs_fs(void)
997 int error;
998 struct vfsmount *vfsmount;
1000 error = bdi_init(&hugetlbfs_backing_dev_info);
1001 if (error)
1002 return error;
1004 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1005 sizeof(struct hugetlbfs_inode_info),
1006 0, 0, init_once);
1007 if (hugetlbfs_inode_cachep == NULL)
1008 goto out2;
1010 error = register_filesystem(&hugetlbfs_fs_type);
1011 if (error)
1012 goto out;
1014 vfsmount = kern_mount(&hugetlbfs_fs_type);
1016 if (!IS_ERR(vfsmount)) {
1017 hugetlbfs_vfsmount = vfsmount;
1018 return 0;
1021 error = PTR_ERR(vfsmount);
1023 out:
1024 if (error)
1025 kmem_cache_destroy(hugetlbfs_inode_cachep);
1026 out2:
1027 bdi_destroy(&hugetlbfs_backing_dev_info);
1028 return error;
1031 static void __exit exit_hugetlbfs_fs(void)
1033 kmem_cache_destroy(hugetlbfs_inode_cachep);
1034 kern_unmount(hugetlbfs_vfsmount);
1035 unregister_filesystem(&hugetlbfs_fs_type);
1036 bdi_destroy(&hugetlbfs_backing_dev_info);
1039 module_init(init_hugetlbfs_fs)
1040 module_exit(exit_hugetlbfs_fs)
1042 MODULE_LICENSE("GPL");