Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[linux-2.6.git] / fs / hugetlbfs / inode.c
blobd19b30ababf10b115175f3b9814e10130c7c52ed
1 /*
2 * hugetlbpage-backed filesystem. Based on ramfs.
4 * Nadia Yvette Chambers, 2002
6 * Copyright (C) 2002 Linus Torvalds.
7 */
9 #include <linux/module.h>
10 #include <linux/thread_info.h>
11 #include <asm/current.h>
12 #include <linux/sched.h> /* remove ASAP */
13 #include <linux/fs.h>
14 #include <linux/mount.h>
15 #include <linux/file.h>
16 #include <linux/kernel.h>
17 #include <linux/writeback.h>
18 #include <linux/pagemap.h>
19 #include <linux/highmem.h>
20 #include <linux/init.h>
21 #include <linux/string.h>
22 #include <linux/capability.h>
23 #include <linux/ctype.h>
24 #include <linux/backing-dev.h>
25 #include <linux/hugetlb.h>
26 #include <linux/pagevec.h>
27 #include <linux/parser.h>
28 #include <linux/mman.h>
29 #include <linux/slab.h>
30 #include <linux/dnotify.h>
31 #include <linux/statfs.h>
32 #include <linux/security.h>
33 #include <linux/magic.h>
34 #include <linux/migrate.h>
36 #include <asm/uaccess.h>
38 static const struct super_operations hugetlbfs_ops;
39 static const struct address_space_operations hugetlbfs_aops;
40 const struct file_operations hugetlbfs_file_operations;
41 static const struct inode_operations hugetlbfs_dir_inode_operations;
42 static const struct inode_operations hugetlbfs_inode_operations;
44 struct hugetlbfs_config {
45 kuid_t uid;
46 kgid_t gid;
47 umode_t mode;
48 long nr_blocks;
49 long nr_inodes;
50 struct hstate *hstate;
53 struct hugetlbfs_inode_info {
54 struct shared_policy policy;
55 struct inode vfs_inode;
58 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
60 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
63 static struct backing_dev_info hugetlbfs_backing_dev_info = {
64 .name = "hugetlbfs",
65 .ra_pages = 0, /* No readahead */
66 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
69 int sysctl_hugetlb_shm_group;
71 enum {
72 Opt_size, Opt_nr_inodes,
73 Opt_mode, Opt_uid, Opt_gid,
74 Opt_pagesize,
75 Opt_err,
78 static const match_table_t tokens = {
79 {Opt_size, "size=%s"},
80 {Opt_nr_inodes, "nr_inodes=%s"},
81 {Opt_mode, "mode=%o"},
82 {Opt_uid, "uid=%u"},
83 {Opt_gid, "gid=%u"},
84 {Opt_pagesize, "pagesize=%s"},
85 {Opt_err, NULL},
88 static void huge_pagevec_release(struct pagevec *pvec)
90 int i;
92 for (i = 0; i < pagevec_count(pvec); ++i)
93 put_page(pvec->pages[i]);
95 pagevec_reinit(pvec);
98 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
100 struct inode *inode = file_inode(file);
101 loff_t len, vma_len;
102 int ret;
103 struct hstate *h = hstate_file(file);
106 * vma address alignment (but not the pgoff alignment) has
107 * already been checked by prepare_hugepage_range. If you add
108 * any error returns here, do so after setting VM_HUGETLB, so
109 * is_vm_hugetlb_page tests below unmap_region go the right
110 * way when do_mmap_pgoff unwinds (may be important on powerpc
111 * and ia64).
113 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
114 vma->vm_ops = &hugetlb_vm_ops;
116 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
117 return -EINVAL;
119 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
121 mutex_lock(&inode->i_mutex);
122 file_accessed(file);
124 ret = -ENOMEM;
125 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
127 if (hugetlb_reserve_pages(inode,
128 vma->vm_pgoff >> huge_page_order(h),
129 len >> huge_page_shift(h), vma,
130 vma->vm_flags))
131 goto out;
133 ret = 0;
134 hugetlb_prefault_arch_hook(vma->vm_mm);
135 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
136 inode->i_size = len;
137 out:
138 mutex_unlock(&inode->i_mutex);
140 return ret;
144 * Called under down_write(mmap_sem).
147 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
148 static unsigned long
149 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
150 unsigned long len, unsigned long pgoff, unsigned long flags)
152 struct mm_struct *mm = current->mm;
153 struct vm_area_struct *vma;
154 struct hstate *h = hstate_file(file);
155 struct vm_unmapped_area_info info;
157 if (len & ~huge_page_mask(h))
158 return -EINVAL;
159 if (len > TASK_SIZE)
160 return -ENOMEM;
162 if (flags & MAP_FIXED) {
163 if (prepare_hugepage_range(file, addr, len))
164 return -EINVAL;
165 return addr;
168 if (addr) {
169 addr = ALIGN(addr, huge_page_size(h));
170 vma = find_vma(mm, addr);
171 if (TASK_SIZE - len >= addr &&
172 (!vma || addr + len <= vma->vm_start))
173 return addr;
176 info.flags = 0;
177 info.length = len;
178 info.low_limit = TASK_UNMAPPED_BASE;
179 info.high_limit = TASK_SIZE;
180 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
181 info.align_offset = 0;
182 return vm_unmapped_area(&info);
184 #endif
186 static int
187 hugetlbfs_read_actor(struct page *page, unsigned long offset,
188 char __user *buf, unsigned long count,
189 unsigned long size)
191 char *kaddr;
192 unsigned long left, copied = 0;
193 int i, chunksize;
195 if (size > count)
196 size = count;
198 /* Find which 4k chunk and offset with in that chunk */
199 i = offset >> PAGE_CACHE_SHIFT;
200 offset = offset & ~PAGE_CACHE_MASK;
202 while (size) {
203 chunksize = PAGE_CACHE_SIZE;
204 if (offset)
205 chunksize -= offset;
206 if (chunksize > size)
207 chunksize = size;
208 kaddr = kmap(&page[i]);
209 left = __copy_to_user(buf, kaddr + offset, chunksize);
210 kunmap(&page[i]);
211 if (left) {
212 copied += (chunksize - left);
213 break;
215 offset = 0;
216 size -= chunksize;
217 buf += chunksize;
218 copied += chunksize;
219 i++;
221 return copied ? copied : -EFAULT;
225 * Support for read() - Find the page attached to f_mapping and copy out the
226 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
227 * since it has PAGE_CACHE_SIZE assumptions.
229 static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
230 size_t len, loff_t *ppos)
232 struct hstate *h = hstate_file(filp);
233 struct address_space *mapping = filp->f_mapping;
234 struct inode *inode = mapping->host;
235 unsigned long index = *ppos >> huge_page_shift(h);
236 unsigned long offset = *ppos & ~huge_page_mask(h);
237 unsigned long end_index;
238 loff_t isize;
239 ssize_t retval = 0;
241 /* validate length */
242 if (len == 0)
243 goto out;
245 for (;;) {
246 struct page *page;
247 unsigned long nr, ret;
248 int ra;
250 /* nr is the maximum number of bytes to copy from this page */
251 nr = huge_page_size(h);
252 isize = i_size_read(inode);
253 if (!isize)
254 goto out;
255 end_index = (isize - 1) >> huge_page_shift(h);
256 if (index >= end_index) {
257 if (index > end_index)
258 goto out;
259 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
260 if (nr <= offset)
261 goto out;
263 nr = nr - offset;
265 /* Find the page */
266 page = find_lock_page(mapping, index);
267 if (unlikely(page == NULL)) {
269 * We have a HOLE, zero out the user-buffer for the
270 * length of the hole or request.
272 ret = len < nr ? len : nr;
273 if (clear_user(buf, ret))
274 ra = -EFAULT;
275 else
276 ra = 0;
277 } else {
278 unlock_page(page);
281 * We have the page, copy it to user space buffer.
283 ra = hugetlbfs_read_actor(page, offset, buf, len, nr);
284 ret = ra;
285 page_cache_release(page);
287 if (ra < 0) {
288 if (retval == 0)
289 retval = ra;
290 goto out;
293 offset += ret;
294 retval += ret;
295 len -= ret;
296 index += offset >> huge_page_shift(h);
297 offset &= ~huge_page_mask(h);
299 /* short read or no more work */
300 if ((ret != nr) || (len == 0))
301 break;
303 out:
304 *ppos = ((loff_t)index << huge_page_shift(h)) + offset;
305 return retval;
308 static int hugetlbfs_write_begin(struct file *file,
309 struct address_space *mapping,
310 loff_t pos, unsigned len, unsigned flags,
311 struct page **pagep, void **fsdata)
313 return -EINVAL;
316 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
317 loff_t pos, unsigned len, unsigned copied,
318 struct page *page, void *fsdata)
320 BUG();
321 return -EINVAL;
324 static void truncate_huge_page(struct page *page)
326 cancel_dirty_page(page, /* No IO accounting for huge pages? */0);
327 ClearPageUptodate(page);
328 delete_from_page_cache(page);
331 static void truncate_hugepages(struct inode *inode, loff_t lstart)
333 struct hstate *h = hstate_inode(inode);
334 struct address_space *mapping = &inode->i_data;
335 const pgoff_t start = lstart >> huge_page_shift(h);
336 struct pagevec pvec;
337 pgoff_t next;
338 int i, freed = 0;
340 pagevec_init(&pvec, 0);
341 next = start;
342 while (1) {
343 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
344 if (next == start)
345 break;
346 next = start;
347 continue;
350 for (i = 0; i < pagevec_count(&pvec); ++i) {
351 struct page *page = pvec.pages[i];
353 lock_page(page);
354 if (page->index > next)
355 next = page->index;
356 ++next;
357 truncate_huge_page(page);
358 unlock_page(page);
359 freed++;
361 huge_pagevec_release(&pvec);
363 BUG_ON(!lstart && mapping->nrpages);
364 hugetlb_unreserve_pages(inode, start, freed);
367 static void hugetlbfs_evict_inode(struct inode *inode)
369 truncate_hugepages(inode, 0);
370 clear_inode(inode);
373 static inline void
374 hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff)
376 struct vm_area_struct *vma;
378 vma_interval_tree_foreach(vma, root, pgoff, ULONG_MAX) {
379 unsigned long v_offset;
382 * Can the expression below overflow on 32-bit arches?
383 * No, because the interval tree returns us only those vmas
384 * which overlap the truncated area starting at pgoff,
385 * and no vma on a 32-bit arch can span beyond the 4GB.
387 if (vma->vm_pgoff < pgoff)
388 v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT;
389 else
390 v_offset = 0;
392 unmap_hugepage_range(vma, vma->vm_start + v_offset,
393 vma->vm_end, NULL);
397 static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
399 pgoff_t pgoff;
400 struct address_space *mapping = inode->i_mapping;
401 struct hstate *h = hstate_inode(inode);
403 BUG_ON(offset & ~huge_page_mask(h));
404 pgoff = offset >> PAGE_SHIFT;
406 i_size_write(inode, offset);
407 mutex_lock(&mapping->i_mmap_mutex);
408 if (!RB_EMPTY_ROOT(&mapping->i_mmap))
409 hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
410 mutex_unlock(&mapping->i_mmap_mutex);
411 truncate_hugepages(inode, offset);
412 return 0;
415 static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
417 struct inode *inode = dentry->d_inode;
418 struct hstate *h = hstate_inode(inode);
419 int error;
420 unsigned int ia_valid = attr->ia_valid;
422 BUG_ON(!inode);
424 error = inode_change_ok(inode, attr);
425 if (error)
426 return error;
428 if (ia_valid & ATTR_SIZE) {
429 error = -EINVAL;
430 if (attr->ia_size & ~huge_page_mask(h))
431 return -EINVAL;
432 error = hugetlb_vmtruncate(inode, attr->ia_size);
433 if (error)
434 return error;
437 setattr_copy(inode, attr);
438 mark_inode_dirty(inode);
439 return 0;
442 static struct inode *hugetlbfs_get_root(struct super_block *sb,
443 struct hugetlbfs_config *config)
445 struct inode *inode;
447 inode = new_inode(sb);
448 if (inode) {
449 struct hugetlbfs_inode_info *info;
450 inode->i_ino = get_next_ino();
451 inode->i_mode = S_IFDIR | config->mode;
452 inode->i_uid = config->uid;
453 inode->i_gid = config->gid;
454 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
455 info = HUGETLBFS_I(inode);
456 mpol_shared_policy_init(&info->policy, NULL);
457 inode->i_op = &hugetlbfs_dir_inode_operations;
458 inode->i_fop = &simple_dir_operations;
459 /* directory inodes start off with i_nlink == 2 (for "." entry) */
460 inc_nlink(inode);
461 lockdep_annotate_inode_mutex_key(inode);
463 return inode;
467 * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never
468 * be taken from reclaim -- unlike regular filesystems. This needs an
469 * annotation because huge_pmd_share() does an allocation under
470 * i_mmap_mutex.
472 struct lock_class_key hugetlbfs_i_mmap_mutex_key;
474 static struct inode *hugetlbfs_get_inode(struct super_block *sb,
475 struct inode *dir,
476 umode_t mode, dev_t dev)
478 struct inode *inode;
480 inode = new_inode(sb);
481 if (inode) {
482 struct hugetlbfs_inode_info *info;
483 inode->i_ino = get_next_ino();
484 inode_init_owner(inode, dir, mode);
485 lockdep_set_class(&inode->i_mapping->i_mmap_mutex,
486 &hugetlbfs_i_mmap_mutex_key);
487 inode->i_mapping->a_ops = &hugetlbfs_aops;
488 inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
489 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
490 INIT_LIST_HEAD(&inode->i_mapping->private_list);
491 info = HUGETLBFS_I(inode);
493 * The policy is initialized here even if we are creating a
494 * private inode because initialization simply creates an
495 * an empty rb tree and calls spin_lock_init(), later when we
496 * call mpol_free_shared_policy() it will just return because
497 * the rb tree will still be empty.
499 mpol_shared_policy_init(&info->policy, NULL);
500 switch (mode & S_IFMT) {
501 default:
502 init_special_inode(inode, mode, dev);
503 break;
504 case S_IFREG:
505 inode->i_op = &hugetlbfs_inode_operations;
506 inode->i_fop = &hugetlbfs_file_operations;
507 break;
508 case S_IFDIR:
509 inode->i_op = &hugetlbfs_dir_inode_operations;
510 inode->i_fop = &simple_dir_operations;
512 /* directory inodes start off with i_nlink == 2 (for "." entry) */
513 inc_nlink(inode);
514 break;
515 case S_IFLNK:
516 inode->i_op = &page_symlink_inode_operations;
517 break;
519 lockdep_annotate_inode_mutex_key(inode);
521 return inode;
525 * File creation. Allocate an inode, and we're done..
527 static int hugetlbfs_mknod(struct inode *dir,
528 struct dentry *dentry, umode_t mode, dev_t dev)
530 struct inode *inode;
531 int error = -ENOSPC;
533 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
534 if (inode) {
535 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
536 d_instantiate(dentry, inode);
537 dget(dentry); /* Extra count - pin the dentry in core */
538 error = 0;
540 return error;
543 static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
545 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
546 if (!retval)
547 inc_nlink(dir);
548 return retval;
551 static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
553 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
556 static int hugetlbfs_symlink(struct inode *dir,
557 struct dentry *dentry, const char *symname)
559 struct inode *inode;
560 int error = -ENOSPC;
562 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
563 if (inode) {
564 int l = strlen(symname)+1;
565 error = page_symlink(inode, symname, l);
566 if (!error) {
567 d_instantiate(dentry, inode);
568 dget(dentry);
569 } else
570 iput(inode);
572 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
574 return error;
578 * mark the head page dirty
580 static int hugetlbfs_set_page_dirty(struct page *page)
582 struct page *head = compound_head(page);
584 SetPageDirty(head);
585 return 0;
588 static int hugetlbfs_migrate_page(struct address_space *mapping,
589 struct page *newpage, struct page *page,
590 enum migrate_mode mode)
592 int rc;
594 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
595 if (rc != MIGRATEPAGE_SUCCESS)
596 return rc;
597 migrate_page_copy(newpage, page);
599 return MIGRATEPAGE_SUCCESS;
602 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
604 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
605 struct hstate *h = hstate_inode(dentry->d_inode);
607 buf->f_type = HUGETLBFS_MAGIC;
608 buf->f_bsize = huge_page_size(h);
609 if (sbinfo) {
610 spin_lock(&sbinfo->stat_lock);
611 /* If no limits set, just report 0 for max/free/used
612 * blocks, like simple_statfs() */
613 if (sbinfo->spool) {
614 long free_pages;
616 spin_lock(&sbinfo->spool->lock);
617 buf->f_blocks = sbinfo->spool->max_hpages;
618 free_pages = sbinfo->spool->max_hpages
619 - sbinfo->spool->used_hpages;
620 buf->f_bavail = buf->f_bfree = free_pages;
621 spin_unlock(&sbinfo->spool->lock);
622 buf->f_files = sbinfo->max_inodes;
623 buf->f_ffree = sbinfo->free_inodes;
625 spin_unlock(&sbinfo->stat_lock);
627 buf->f_namelen = NAME_MAX;
628 return 0;
631 static void hugetlbfs_put_super(struct super_block *sb)
633 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
635 if (sbi) {
636 sb->s_fs_info = NULL;
638 if (sbi->spool)
639 hugepage_put_subpool(sbi->spool);
641 kfree(sbi);
645 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
647 if (sbinfo->free_inodes >= 0) {
648 spin_lock(&sbinfo->stat_lock);
649 if (unlikely(!sbinfo->free_inodes)) {
650 spin_unlock(&sbinfo->stat_lock);
651 return 0;
653 sbinfo->free_inodes--;
654 spin_unlock(&sbinfo->stat_lock);
657 return 1;
660 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
662 if (sbinfo->free_inodes >= 0) {
663 spin_lock(&sbinfo->stat_lock);
664 sbinfo->free_inodes++;
665 spin_unlock(&sbinfo->stat_lock);
670 static struct kmem_cache *hugetlbfs_inode_cachep;
672 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
674 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
675 struct hugetlbfs_inode_info *p;
677 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
678 return NULL;
679 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
680 if (unlikely(!p)) {
681 hugetlbfs_inc_free_inodes(sbinfo);
682 return NULL;
684 return &p->vfs_inode;
687 static void hugetlbfs_i_callback(struct rcu_head *head)
689 struct inode *inode = container_of(head, struct inode, i_rcu);
690 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
693 static void hugetlbfs_destroy_inode(struct inode *inode)
695 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
696 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
697 call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
700 static const struct address_space_operations hugetlbfs_aops = {
701 .write_begin = hugetlbfs_write_begin,
702 .write_end = hugetlbfs_write_end,
703 .set_page_dirty = hugetlbfs_set_page_dirty,
704 .migratepage = hugetlbfs_migrate_page,
708 static void init_once(void *foo)
710 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
712 inode_init_once(&ei->vfs_inode);
715 const struct file_operations hugetlbfs_file_operations = {
716 .read = hugetlbfs_read,
717 .mmap = hugetlbfs_file_mmap,
718 .fsync = noop_fsync,
719 .get_unmapped_area = hugetlb_get_unmapped_area,
720 .llseek = default_llseek,
723 static const struct inode_operations hugetlbfs_dir_inode_operations = {
724 .create = hugetlbfs_create,
725 .lookup = simple_lookup,
726 .link = simple_link,
727 .unlink = simple_unlink,
728 .symlink = hugetlbfs_symlink,
729 .mkdir = hugetlbfs_mkdir,
730 .rmdir = simple_rmdir,
731 .mknod = hugetlbfs_mknod,
732 .rename = simple_rename,
733 .setattr = hugetlbfs_setattr,
736 static const struct inode_operations hugetlbfs_inode_operations = {
737 .setattr = hugetlbfs_setattr,
740 static const struct super_operations hugetlbfs_ops = {
741 .alloc_inode = hugetlbfs_alloc_inode,
742 .destroy_inode = hugetlbfs_destroy_inode,
743 .evict_inode = hugetlbfs_evict_inode,
744 .statfs = hugetlbfs_statfs,
745 .put_super = hugetlbfs_put_super,
746 .show_options = generic_show_options,
749 static int
750 hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
752 char *p, *rest;
753 substring_t args[MAX_OPT_ARGS];
754 int option;
755 unsigned long long size = 0;
756 enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE;
758 if (!options)
759 return 0;
761 while ((p = strsep(&options, ",")) != NULL) {
762 int token;
763 if (!*p)
764 continue;
766 token = match_token(p, tokens, args);
767 switch (token) {
768 case Opt_uid:
769 if (match_int(&args[0], &option))
770 goto bad_val;
771 pconfig->uid = make_kuid(current_user_ns(), option);
772 if (!uid_valid(pconfig->uid))
773 goto bad_val;
774 break;
776 case Opt_gid:
777 if (match_int(&args[0], &option))
778 goto bad_val;
779 pconfig->gid = make_kgid(current_user_ns(), option);
780 if (!gid_valid(pconfig->gid))
781 goto bad_val;
782 break;
784 case Opt_mode:
785 if (match_octal(&args[0], &option))
786 goto bad_val;
787 pconfig->mode = option & 01777U;
788 break;
790 case Opt_size: {
791 /* memparse() will accept a K/M/G without a digit */
792 if (!isdigit(*args[0].from))
793 goto bad_val;
794 size = memparse(args[0].from, &rest);
795 setsize = SIZE_STD;
796 if (*rest == '%')
797 setsize = SIZE_PERCENT;
798 break;
801 case Opt_nr_inodes:
802 /* memparse() will accept a K/M/G without a digit */
803 if (!isdigit(*args[0].from))
804 goto bad_val;
805 pconfig->nr_inodes = memparse(args[0].from, &rest);
806 break;
808 case Opt_pagesize: {
809 unsigned long ps;
810 ps = memparse(args[0].from, &rest);
811 pconfig->hstate = size_to_hstate(ps);
812 if (!pconfig->hstate) {
813 printk(KERN_ERR
814 "hugetlbfs: Unsupported page size %lu MB\n",
815 ps >> 20);
816 return -EINVAL;
818 break;
821 default:
822 printk(KERN_ERR "hugetlbfs: Bad mount option: \"%s\"\n",
824 return -EINVAL;
825 break;
829 /* Do size after hstate is set up */
830 if (setsize > NO_SIZE) {
831 struct hstate *h = pconfig->hstate;
832 if (setsize == SIZE_PERCENT) {
833 size <<= huge_page_shift(h);
834 size *= h->max_huge_pages;
835 do_div(size, 100);
837 pconfig->nr_blocks = (size >> huge_page_shift(h));
840 return 0;
842 bad_val:
843 printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n",
844 args[0].from, p);
845 return -EINVAL;
848 static int
849 hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
851 int ret;
852 struct hugetlbfs_config config;
853 struct hugetlbfs_sb_info *sbinfo;
855 save_mount_options(sb, data);
857 config.nr_blocks = -1; /* No limit on size by default */
858 config.nr_inodes = -1; /* No limit on number of inodes by default */
859 config.uid = current_fsuid();
860 config.gid = current_fsgid();
861 config.mode = 0755;
862 config.hstate = &default_hstate;
863 ret = hugetlbfs_parse_options(data, &config);
864 if (ret)
865 return ret;
867 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
868 if (!sbinfo)
869 return -ENOMEM;
870 sb->s_fs_info = sbinfo;
871 sbinfo->hstate = config.hstate;
872 spin_lock_init(&sbinfo->stat_lock);
873 sbinfo->max_inodes = config.nr_inodes;
874 sbinfo->free_inodes = config.nr_inodes;
875 sbinfo->spool = NULL;
876 if (config.nr_blocks != -1) {
877 sbinfo->spool = hugepage_new_subpool(config.nr_blocks);
878 if (!sbinfo->spool)
879 goto out_free;
881 sb->s_maxbytes = MAX_LFS_FILESIZE;
882 sb->s_blocksize = huge_page_size(config.hstate);
883 sb->s_blocksize_bits = huge_page_shift(config.hstate);
884 sb->s_magic = HUGETLBFS_MAGIC;
885 sb->s_op = &hugetlbfs_ops;
886 sb->s_time_gran = 1;
887 sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
888 if (!sb->s_root)
889 goto out_free;
890 return 0;
891 out_free:
892 if (sbinfo->spool)
893 kfree(sbinfo->spool);
894 kfree(sbinfo);
895 return -ENOMEM;
898 static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
899 int flags, const char *dev_name, void *data)
901 return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
904 static struct file_system_type hugetlbfs_fs_type = {
905 .name = "hugetlbfs",
906 .mount = hugetlbfs_mount,
907 .kill_sb = kill_litter_super,
909 MODULE_ALIAS_FS("hugetlbfs");
911 static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
913 static int can_do_hugetlb_shm(void)
915 kgid_t shm_group;
916 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
917 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
920 static int get_hstate_idx(int page_size_log)
922 struct hstate *h = hstate_sizelog(page_size_log);
924 if (!h)
925 return -1;
926 return h - hstates;
929 static struct dentry_operations anon_ops = {
930 .d_dname = simple_dname
934 * Note that size should be aligned to proper hugepage size in caller side,
935 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
937 struct file *hugetlb_file_setup(const char *name, size_t size,
938 vm_flags_t acctflag, struct user_struct **user,
939 int creat_flags, int page_size_log)
941 struct file *file = ERR_PTR(-ENOMEM);
942 struct inode *inode;
943 struct path path;
944 struct super_block *sb;
945 struct qstr quick_string;
946 int hstate_idx;
948 hstate_idx = get_hstate_idx(page_size_log);
949 if (hstate_idx < 0)
950 return ERR_PTR(-ENODEV);
952 *user = NULL;
953 if (!hugetlbfs_vfsmount[hstate_idx])
954 return ERR_PTR(-ENOENT);
956 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
957 *user = current_user();
958 if (user_shm_lock(size, *user)) {
959 task_lock(current);
960 printk_once(KERN_WARNING
961 "%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
962 current->comm, current->pid);
963 task_unlock(current);
964 } else {
965 *user = NULL;
966 return ERR_PTR(-EPERM);
970 sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
971 quick_string.name = name;
972 quick_string.len = strlen(quick_string.name);
973 quick_string.hash = 0;
974 path.dentry = d_alloc_pseudo(sb, &quick_string);
975 if (!path.dentry)
976 goto out_shm_unlock;
978 d_set_d_op(path.dentry, &anon_ops);
979 path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
980 file = ERR_PTR(-ENOSPC);
981 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
982 if (!inode)
983 goto out_dentry;
985 file = ERR_PTR(-ENOMEM);
986 if (hugetlb_reserve_pages(inode, 0,
987 size >> huge_page_shift(hstate_inode(inode)), NULL,
988 acctflag))
989 goto out_inode;
991 d_instantiate(path.dentry, inode);
992 inode->i_size = size;
993 clear_nlink(inode);
995 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
996 &hugetlbfs_file_operations);
997 if (IS_ERR(file))
998 goto out_dentry; /* inode is already attached */
1000 return file;
1002 out_inode:
1003 iput(inode);
1004 out_dentry:
1005 path_put(&path);
1006 out_shm_unlock:
1007 if (*user) {
1008 user_shm_unlock(size, *user);
1009 *user = NULL;
1011 return file;
1014 static int __init init_hugetlbfs_fs(void)
1016 struct hstate *h;
1017 int error;
1018 int i;
1020 error = bdi_init(&hugetlbfs_backing_dev_info);
1021 if (error)
1022 return error;
1024 error = -ENOMEM;
1025 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1026 sizeof(struct hugetlbfs_inode_info),
1027 0, 0, init_once);
1028 if (hugetlbfs_inode_cachep == NULL)
1029 goto out2;
1031 error = register_filesystem(&hugetlbfs_fs_type);
1032 if (error)
1033 goto out;
1035 i = 0;
1036 for_each_hstate(h) {
1037 char buf[50];
1038 unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
1040 snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
1041 hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
1042 buf);
1044 if (IS_ERR(hugetlbfs_vfsmount[i])) {
1045 pr_err("hugetlb: Cannot mount internal hugetlbfs for "
1046 "page size %uK", ps_kb);
1047 error = PTR_ERR(hugetlbfs_vfsmount[i]);
1048 hugetlbfs_vfsmount[i] = NULL;
1050 i++;
1052 /* Non default hstates are optional */
1053 if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
1054 return 0;
1056 out:
1057 kmem_cache_destroy(hugetlbfs_inode_cachep);
1058 out2:
1059 bdi_destroy(&hugetlbfs_backing_dev_info);
1060 return error;
1063 static void __exit exit_hugetlbfs_fs(void)
1065 struct hstate *h;
1066 int i;
1070 * Make sure all delayed rcu free inodes are flushed before we
1071 * destroy cache.
1073 rcu_barrier();
1074 kmem_cache_destroy(hugetlbfs_inode_cachep);
1075 i = 0;
1076 for_each_hstate(h)
1077 kern_unmount(hugetlbfs_vfsmount[i++]);
1078 unregister_filesystem(&hugetlbfs_fs_type);
1079 bdi_destroy(&hugetlbfs_backing_dev_info);
1082 module_init(init_hugetlbfs_fs)
1083 module_exit(exit_hugetlbfs_fs)
1085 MODULE_LICENSE("GPL");