hugetlb: add allocate function for hugepage migration
[linux-2.6.git] / mm / hugetlb.c
blob83fa0c3b6e2b14b5dc520cc58f607bf5c95c44ef
1 /*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <asm/io.h>
29 #include <linux/hugetlb.h>
30 #include <linux/node.h>
31 #include "internal.h"
33 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
34 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
35 unsigned long hugepages_treat_as_movable;
37 static int max_hstate;
38 unsigned int default_hstate_idx;
39 struct hstate hstates[HUGE_MAX_HSTATE];
41 __initdata LIST_HEAD(huge_boot_pages);
43 /* for command line parsing */
44 static struct hstate * __initdata parsed_hstate;
45 static unsigned long __initdata default_hstate_max_huge_pages;
46 static unsigned long __initdata default_hstate_size;
48 #define for_each_hstate(h) \
49 for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
52 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
54 static DEFINE_SPINLOCK(hugetlb_lock);
57 * Region tracking -- allows tracking of reservations and instantiated pages
58 * across the pages in a mapping.
60 * The region data structures are protected by a combination of the mmap_sem
61 * and the hugetlb_instantion_mutex. To access or modify a region the caller
62 * must either hold the mmap_sem for write, or the mmap_sem for read and
63 * the hugetlb_instantiation mutex:
65 * down_write(&mm->mmap_sem);
66 * or
67 * down_read(&mm->mmap_sem);
68 * mutex_lock(&hugetlb_instantiation_mutex);
70 struct file_region {
71 struct list_head link;
72 long from;
73 long to;
76 static long region_add(struct list_head *head, long f, long t)
78 struct file_region *rg, *nrg, *trg;
80 /* Locate the region we are either in or before. */
81 list_for_each_entry(rg, head, link)
82 if (f <= rg->to)
83 break;
85 /* Round our left edge to the current segment if it encloses us. */
86 if (f > rg->from)
87 f = rg->from;
89 /* Check for and consume any regions we now overlap with. */
90 nrg = rg;
91 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
92 if (&rg->link == head)
93 break;
94 if (rg->from > t)
95 break;
97 /* If this area reaches higher then extend our area to
98 * include it completely. If this is not the first area
99 * which we intend to reuse, free it. */
100 if (rg->to > t)
101 t = rg->to;
102 if (rg != nrg) {
103 list_del(&rg->link);
104 kfree(rg);
107 nrg->from = f;
108 nrg->to = t;
109 return 0;
112 static long region_chg(struct list_head *head, long f, long t)
114 struct file_region *rg, *nrg;
115 long chg = 0;
117 /* Locate the region we are before or in. */
118 list_for_each_entry(rg, head, link)
119 if (f <= rg->to)
120 break;
122 /* If we are below the current region then a new region is required.
123 * Subtle, allocate a new region at the position but make it zero
124 * size such that we can guarantee to record the reservation. */
125 if (&rg->link == head || t < rg->from) {
126 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
127 if (!nrg)
128 return -ENOMEM;
129 nrg->from = f;
130 nrg->to = f;
131 INIT_LIST_HEAD(&nrg->link);
132 list_add(&nrg->link, rg->link.prev);
134 return t - f;
137 /* Round our left edge to the current segment if it encloses us. */
138 if (f > rg->from)
139 f = rg->from;
140 chg = t - f;
142 /* Check for and consume any regions we now overlap with. */
143 list_for_each_entry(rg, rg->link.prev, link) {
144 if (&rg->link == head)
145 break;
146 if (rg->from > t)
147 return chg;
149 /* We overlap with this area, if it extends futher than
150 * us then we must extend ourselves. Account for its
151 * existing reservation. */
152 if (rg->to > t) {
153 chg += rg->to - t;
154 t = rg->to;
156 chg -= rg->to - rg->from;
158 return chg;
161 static long region_truncate(struct list_head *head, long end)
163 struct file_region *rg, *trg;
164 long chg = 0;
166 /* Locate the region we are either in or before. */
167 list_for_each_entry(rg, head, link)
168 if (end <= rg->to)
169 break;
170 if (&rg->link == head)
171 return 0;
173 /* If we are in the middle of a region then adjust it. */
174 if (end > rg->from) {
175 chg = rg->to - end;
176 rg->to = end;
177 rg = list_entry(rg->link.next, typeof(*rg), link);
180 /* Drop any remaining regions. */
181 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
182 if (&rg->link == head)
183 break;
184 chg += rg->to - rg->from;
185 list_del(&rg->link);
186 kfree(rg);
188 return chg;
191 static long region_count(struct list_head *head, long f, long t)
193 struct file_region *rg;
194 long chg = 0;
196 /* Locate each segment we overlap with, and count that overlap. */
197 list_for_each_entry(rg, head, link) {
198 int seg_from;
199 int seg_to;
201 if (rg->to <= f)
202 continue;
203 if (rg->from >= t)
204 break;
206 seg_from = max(rg->from, f);
207 seg_to = min(rg->to, t);
209 chg += seg_to - seg_from;
212 return chg;
216 * Convert the address within this vma to the page offset within
217 * the mapping, in pagecache page units; huge pages here.
219 static pgoff_t vma_hugecache_offset(struct hstate *h,
220 struct vm_area_struct *vma, unsigned long address)
222 return ((address - vma->vm_start) >> huge_page_shift(h)) +
223 (vma->vm_pgoff >> huge_page_order(h));
226 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
227 unsigned long address)
229 return vma_hugecache_offset(hstate_vma(vma), vma, address);
233 * Return the size of the pages allocated when backing a VMA. In the majority
234 * cases this will be same size as used by the page table entries.
236 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
238 struct hstate *hstate;
240 if (!is_vm_hugetlb_page(vma))
241 return PAGE_SIZE;
243 hstate = hstate_vma(vma);
245 return 1UL << (hstate->order + PAGE_SHIFT);
247 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
250 * Return the page size being used by the MMU to back a VMA. In the majority
251 * of cases, the page size used by the kernel matches the MMU size. On
252 * architectures where it differs, an architecture-specific version of this
253 * function is required.
255 #ifndef vma_mmu_pagesize
256 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
258 return vma_kernel_pagesize(vma);
260 #endif
263 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
264 * bits of the reservation map pointer, which are always clear due to
265 * alignment.
267 #define HPAGE_RESV_OWNER (1UL << 0)
268 #define HPAGE_RESV_UNMAPPED (1UL << 1)
269 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
272 * These helpers are used to track how many pages are reserved for
273 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
274 * is guaranteed to have their future faults succeed.
276 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
277 * the reserve counters are updated with the hugetlb_lock held. It is safe
278 * to reset the VMA at fork() time as it is not in use yet and there is no
279 * chance of the global counters getting corrupted as a result of the values.
281 * The private mapping reservation is represented in a subtly different
282 * manner to a shared mapping. A shared mapping has a region map associated
283 * with the underlying file, this region map represents the backing file
284 * pages which have ever had a reservation assigned which this persists even
285 * after the page is instantiated. A private mapping has a region map
286 * associated with the original mmap which is attached to all VMAs which
287 * reference it, this region map represents those offsets which have consumed
288 * reservation ie. where pages have been instantiated.
290 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
292 return (unsigned long)vma->vm_private_data;
295 static void set_vma_private_data(struct vm_area_struct *vma,
296 unsigned long value)
298 vma->vm_private_data = (void *)value;
301 struct resv_map {
302 struct kref refs;
303 struct list_head regions;
306 static struct resv_map *resv_map_alloc(void)
308 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
309 if (!resv_map)
310 return NULL;
312 kref_init(&resv_map->refs);
313 INIT_LIST_HEAD(&resv_map->regions);
315 return resv_map;
318 static void resv_map_release(struct kref *ref)
320 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
322 /* Clear out any active regions before we release the map. */
323 region_truncate(&resv_map->regions, 0);
324 kfree(resv_map);
327 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
329 VM_BUG_ON(!is_vm_hugetlb_page(vma));
330 if (!(vma->vm_flags & VM_MAYSHARE))
331 return (struct resv_map *)(get_vma_private_data(vma) &
332 ~HPAGE_RESV_MASK);
333 return NULL;
336 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
338 VM_BUG_ON(!is_vm_hugetlb_page(vma));
339 VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
341 set_vma_private_data(vma, (get_vma_private_data(vma) &
342 HPAGE_RESV_MASK) | (unsigned long)map);
345 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
347 VM_BUG_ON(!is_vm_hugetlb_page(vma));
348 VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
350 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
353 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
355 VM_BUG_ON(!is_vm_hugetlb_page(vma));
357 return (get_vma_private_data(vma) & flag) != 0;
360 /* Decrement the reserved pages in the hugepage pool by one */
361 static void decrement_hugepage_resv_vma(struct hstate *h,
362 struct vm_area_struct *vma)
364 if (vma->vm_flags & VM_NORESERVE)
365 return;
367 if (vma->vm_flags & VM_MAYSHARE) {
368 /* Shared mappings always use reserves */
369 h->resv_huge_pages--;
370 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
372 * Only the process that called mmap() has reserves for
373 * private mappings.
375 h->resv_huge_pages--;
379 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
380 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
382 VM_BUG_ON(!is_vm_hugetlb_page(vma));
383 if (!(vma->vm_flags & VM_MAYSHARE))
384 vma->vm_private_data = (void *)0;
387 /* Returns true if the VMA has associated reserve pages */
388 static int vma_has_reserves(struct vm_area_struct *vma)
390 if (vma->vm_flags & VM_MAYSHARE)
391 return 1;
392 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
393 return 1;
394 return 0;
397 static void clear_gigantic_page(struct page *page,
398 unsigned long addr, unsigned long sz)
400 int i;
401 struct page *p = page;
403 might_sleep();
404 for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
405 cond_resched();
406 clear_user_highpage(p, addr + i * PAGE_SIZE);
409 static void clear_huge_page(struct page *page,
410 unsigned long addr, unsigned long sz)
412 int i;
414 if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
415 clear_gigantic_page(page, addr, sz);
416 return;
419 might_sleep();
420 for (i = 0; i < sz/PAGE_SIZE; i++) {
421 cond_resched();
422 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
426 static void copy_gigantic_page(struct page *dst, struct page *src,
427 unsigned long addr, struct vm_area_struct *vma)
429 int i;
430 struct hstate *h = hstate_vma(vma);
431 struct page *dst_base = dst;
432 struct page *src_base = src;
433 might_sleep();
434 for (i = 0; i < pages_per_huge_page(h); ) {
435 cond_resched();
436 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
438 i++;
439 dst = mem_map_next(dst, dst_base, i);
440 src = mem_map_next(src, src_base, i);
443 static void copy_huge_page(struct page *dst, struct page *src,
444 unsigned long addr, struct vm_area_struct *vma)
446 int i;
447 struct hstate *h = hstate_vma(vma);
449 if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
450 copy_gigantic_page(dst, src, addr, vma);
451 return;
454 might_sleep();
455 for (i = 0; i < pages_per_huge_page(h); i++) {
456 cond_resched();
457 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
461 static void enqueue_huge_page(struct hstate *h, struct page *page)
463 int nid = page_to_nid(page);
464 list_add(&page->lru, &h->hugepage_freelists[nid]);
465 h->free_huge_pages++;
466 h->free_huge_pages_node[nid]++;
469 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
471 struct page *page;
473 if (list_empty(&h->hugepage_freelists[nid]))
474 return NULL;
475 page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
476 list_del(&page->lru);
477 h->free_huge_pages--;
478 h->free_huge_pages_node[nid]--;
479 return page;
482 static struct page *dequeue_huge_page_vma(struct hstate *h,
483 struct vm_area_struct *vma,
484 unsigned long address, int avoid_reserve)
486 struct page *page = NULL;
487 struct mempolicy *mpol;
488 nodemask_t *nodemask;
489 struct zonelist *zonelist;
490 struct zone *zone;
491 struct zoneref *z;
493 get_mems_allowed();
494 zonelist = huge_zonelist(vma, address,
495 htlb_alloc_mask, &mpol, &nodemask);
497 * A child process with MAP_PRIVATE mappings created by their parent
498 * have no page reserves. This check ensures that reservations are
499 * not "stolen". The child may still get SIGKILLed
501 if (!vma_has_reserves(vma) &&
502 h->free_huge_pages - h->resv_huge_pages == 0)
503 goto err;
505 /* If reserves cannot be used, ensure enough pages are in the pool */
506 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
507 goto err;;
509 for_each_zone_zonelist_nodemask(zone, z, zonelist,
510 MAX_NR_ZONES - 1, nodemask) {
511 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
512 page = dequeue_huge_page_node(h, zone_to_nid(zone));
513 if (page) {
514 if (!avoid_reserve)
515 decrement_hugepage_resv_vma(h, vma);
516 break;
520 err:
521 mpol_cond_put(mpol);
522 put_mems_allowed();
523 return page;
526 static void update_and_free_page(struct hstate *h, struct page *page)
528 int i;
530 VM_BUG_ON(h->order >= MAX_ORDER);
532 h->nr_huge_pages--;
533 h->nr_huge_pages_node[page_to_nid(page)]--;
534 for (i = 0; i < pages_per_huge_page(h); i++) {
535 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
536 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
537 1 << PG_private | 1<< PG_writeback);
539 set_compound_page_dtor(page, NULL);
540 set_page_refcounted(page);
541 arch_release_hugepage(page);
542 __free_pages(page, huge_page_order(h));
545 struct hstate *size_to_hstate(unsigned long size)
547 struct hstate *h;
549 for_each_hstate(h) {
550 if (huge_page_size(h) == size)
551 return h;
553 return NULL;
556 static void free_huge_page(struct page *page)
559 * Can't pass hstate in here because it is called from the
560 * compound page destructor.
562 struct hstate *h = page_hstate(page);
563 int nid = page_to_nid(page);
564 struct address_space *mapping;
566 mapping = (struct address_space *) page_private(page);
567 set_page_private(page, 0);
568 page->mapping = NULL;
569 BUG_ON(page_count(page));
570 BUG_ON(page_mapcount(page));
571 INIT_LIST_HEAD(&page->lru);
573 spin_lock(&hugetlb_lock);
574 if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
575 update_and_free_page(h, page);
576 h->surplus_huge_pages--;
577 h->surplus_huge_pages_node[nid]--;
578 } else {
579 enqueue_huge_page(h, page);
581 spin_unlock(&hugetlb_lock);
582 if (mapping)
583 hugetlb_put_quota(mapping, 1);
586 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
588 set_compound_page_dtor(page, free_huge_page);
589 spin_lock(&hugetlb_lock);
590 h->nr_huge_pages++;
591 h->nr_huge_pages_node[nid]++;
592 spin_unlock(&hugetlb_lock);
593 put_page(page); /* free it into the hugepage allocator */
596 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
598 int i;
599 int nr_pages = 1 << order;
600 struct page *p = page + 1;
602 /* we rely on prep_new_huge_page to set the destructor */
603 set_compound_order(page, order);
604 __SetPageHead(page);
605 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
606 __SetPageTail(p);
607 p->first_page = page;
611 int PageHuge(struct page *page)
613 compound_page_dtor *dtor;
615 if (!PageCompound(page))
616 return 0;
618 page = compound_head(page);
619 dtor = get_compound_page_dtor(page);
621 return dtor == free_huge_page;
624 EXPORT_SYMBOL_GPL(PageHuge);
626 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
628 struct page *page;
630 if (h->order >= MAX_ORDER)
631 return NULL;
633 page = alloc_pages_exact_node(nid,
634 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
635 __GFP_REPEAT|__GFP_NOWARN,
636 huge_page_order(h));
637 if (page) {
638 if (arch_prepare_hugepage(page)) {
639 __free_pages(page, huge_page_order(h));
640 return NULL;
642 prep_new_huge_page(h, page, nid);
645 return page;
649 * common helper functions for hstate_next_node_to_{alloc|free}.
650 * We may have allocated or freed a huge page based on a different
651 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
652 * be outside of *nodes_allowed. Ensure that we use an allowed
653 * node for alloc or free.
655 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
657 nid = next_node(nid, *nodes_allowed);
658 if (nid == MAX_NUMNODES)
659 nid = first_node(*nodes_allowed);
660 VM_BUG_ON(nid >= MAX_NUMNODES);
662 return nid;
665 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
667 if (!node_isset(nid, *nodes_allowed))
668 nid = next_node_allowed(nid, nodes_allowed);
669 return nid;
673 * returns the previously saved node ["this node"] from which to
674 * allocate a persistent huge page for the pool and advance the
675 * next node from which to allocate, handling wrap at end of node
676 * mask.
678 static int hstate_next_node_to_alloc(struct hstate *h,
679 nodemask_t *nodes_allowed)
681 int nid;
683 VM_BUG_ON(!nodes_allowed);
685 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
686 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
688 return nid;
691 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
693 struct page *page;
694 int start_nid;
695 int next_nid;
696 int ret = 0;
698 start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
699 next_nid = start_nid;
701 do {
702 page = alloc_fresh_huge_page_node(h, next_nid);
703 if (page) {
704 ret = 1;
705 break;
707 next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
708 } while (next_nid != start_nid);
710 if (ret)
711 count_vm_event(HTLB_BUDDY_PGALLOC);
712 else
713 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
715 return ret;
719 * helper for free_pool_huge_page() - return the previously saved
720 * node ["this node"] from which to free a huge page. Advance the
721 * next node id whether or not we find a free huge page to free so
722 * that the next attempt to free addresses the next node.
724 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
726 int nid;
728 VM_BUG_ON(!nodes_allowed);
730 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
731 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
733 return nid;
737 * Free huge page from pool from next node to free.
738 * Attempt to keep persistent huge pages more or less
739 * balanced over allowed nodes.
740 * Called with hugetlb_lock locked.
742 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
743 bool acct_surplus)
745 int start_nid;
746 int next_nid;
747 int ret = 0;
749 start_nid = hstate_next_node_to_free(h, nodes_allowed);
750 next_nid = start_nid;
752 do {
754 * If we're returning unused surplus pages, only examine
755 * nodes with surplus pages.
757 if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
758 !list_empty(&h->hugepage_freelists[next_nid])) {
759 struct page *page =
760 list_entry(h->hugepage_freelists[next_nid].next,
761 struct page, lru);
762 list_del(&page->lru);
763 h->free_huge_pages--;
764 h->free_huge_pages_node[next_nid]--;
765 if (acct_surplus) {
766 h->surplus_huge_pages--;
767 h->surplus_huge_pages_node[next_nid]--;
769 update_and_free_page(h, page);
770 ret = 1;
771 break;
773 next_nid = hstate_next_node_to_free(h, nodes_allowed);
774 } while (next_nid != start_nid);
776 return ret;
779 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
781 struct page *page;
782 unsigned int r_nid;
784 if (h->order >= MAX_ORDER)
785 return NULL;
788 * Assume we will successfully allocate the surplus page to
789 * prevent racing processes from causing the surplus to exceed
790 * overcommit
792 * This however introduces a different race, where a process B
793 * tries to grow the static hugepage pool while alloc_pages() is
794 * called by process A. B will only examine the per-node
795 * counters in determining if surplus huge pages can be
796 * converted to normal huge pages in adjust_pool_surplus(). A
797 * won't be able to increment the per-node counter, until the
798 * lock is dropped by B, but B doesn't drop hugetlb_lock until
799 * no more huge pages can be converted from surplus to normal
800 * state (and doesn't try to convert again). Thus, we have a
801 * case where a surplus huge page exists, the pool is grown, and
802 * the surplus huge page still exists after, even though it
803 * should just have been converted to a normal huge page. This
804 * does not leak memory, though, as the hugepage will be freed
805 * once it is out of use. It also does not allow the counters to
806 * go out of whack in adjust_pool_surplus() as we don't modify
807 * the node values until we've gotten the hugepage and only the
808 * per-node value is checked there.
810 spin_lock(&hugetlb_lock);
811 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
812 spin_unlock(&hugetlb_lock);
813 return NULL;
814 } else {
815 h->nr_huge_pages++;
816 h->surplus_huge_pages++;
818 spin_unlock(&hugetlb_lock);
820 if (nid == NUMA_NO_NODE)
821 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
822 __GFP_REPEAT|__GFP_NOWARN,
823 huge_page_order(h));
824 else
825 page = alloc_pages_exact_node(nid,
826 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
827 __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
829 if (page && arch_prepare_hugepage(page)) {
830 __free_pages(page, huge_page_order(h));
831 return NULL;
834 spin_lock(&hugetlb_lock);
835 if (page) {
837 * This page is now managed by the hugetlb allocator and has
838 * no users -- drop the buddy allocator's reference.
840 put_page_testzero(page);
841 VM_BUG_ON(page_count(page));
842 r_nid = page_to_nid(page);
843 set_compound_page_dtor(page, free_huge_page);
845 * We incremented the global counters already
847 h->nr_huge_pages_node[r_nid]++;
848 h->surplus_huge_pages_node[r_nid]++;
849 __count_vm_event(HTLB_BUDDY_PGALLOC);
850 } else {
851 h->nr_huge_pages--;
852 h->surplus_huge_pages--;
853 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
855 spin_unlock(&hugetlb_lock);
857 return page;
861 * This allocation function is useful in the context where vma is irrelevant.
862 * E.g. soft-offlining uses this function because it only cares physical
863 * address of error page.
865 struct page *alloc_huge_page_node(struct hstate *h, int nid)
867 struct page *page;
869 spin_lock(&hugetlb_lock);
870 page = dequeue_huge_page_node(h, nid);
871 spin_unlock(&hugetlb_lock);
873 if (!page)
874 page = alloc_buddy_huge_page(h, nid);
876 return page;
880 * Increase the hugetlb pool such that it can accomodate a reservation
881 * of size 'delta'.
883 static int gather_surplus_pages(struct hstate *h, int delta)
885 struct list_head surplus_list;
886 struct page *page, *tmp;
887 int ret, i;
888 int needed, allocated;
890 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
891 if (needed <= 0) {
892 h->resv_huge_pages += delta;
893 return 0;
896 allocated = 0;
897 INIT_LIST_HEAD(&surplus_list);
899 ret = -ENOMEM;
900 retry:
901 spin_unlock(&hugetlb_lock);
902 for (i = 0; i < needed; i++) {
903 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
904 if (!page) {
906 * We were not able to allocate enough pages to
907 * satisfy the entire reservation so we free what
908 * we've allocated so far.
910 spin_lock(&hugetlb_lock);
911 needed = 0;
912 goto free;
915 list_add(&page->lru, &surplus_list);
917 allocated += needed;
920 * After retaking hugetlb_lock, we need to recalculate 'needed'
921 * because either resv_huge_pages or free_huge_pages may have changed.
923 spin_lock(&hugetlb_lock);
924 needed = (h->resv_huge_pages + delta) -
925 (h->free_huge_pages + allocated);
926 if (needed > 0)
927 goto retry;
930 * The surplus_list now contains _at_least_ the number of extra pages
931 * needed to accomodate the reservation. Add the appropriate number
932 * of pages to the hugetlb pool and free the extras back to the buddy
933 * allocator. Commit the entire reservation here to prevent another
934 * process from stealing the pages as they are added to the pool but
935 * before they are reserved.
937 needed += allocated;
938 h->resv_huge_pages += delta;
939 ret = 0;
940 free:
941 /* Free the needed pages to the hugetlb pool */
942 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
943 if ((--needed) < 0)
944 break;
945 list_del(&page->lru);
946 enqueue_huge_page(h, page);
949 /* Free unnecessary surplus pages to the buddy allocator */
950 if (!list_empty(&surplus_list)) {
951 spin_unlock(&hugetlb_lock);
952 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
953 list_del(&page->lru);
955 * The page has a reference count of zero already, so
956 * call free_huge_page directly instead of using
957 * put_page. This must be done with hugetlb_lock
958 * unlocked which is safe because free_huge_page takes
959 * hugetlb_lock before deciding how to free the page.
961 free_huge_page(page);
963 spin_lock(&hugetlb_lock);
966 return ret;
970 * When releasing a hugetlb pool reservation, any surplus pages that were
971 * allocated to satisfy the reservation must be explicitly freed if they were
972 * never used.
973 * Called with hugetlb_lock held.
975 static void return_unused_surplus_pages(struct hstate *h,
976 unsigned long unused_resv_pages)
978 unsigned long nr_pages;
980 /* Uncommit the reservation */
981 h->resv_huge_pages -= unused_resv_pages;
983 /* Cannot return gigantic pages currently */
984 if (h->order >= MAX_ORDER)
985 return;
987 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
990 * We want to release as many surplus pages as possible, spread
991 * evenly across all nodes with memory. Iterate across these nodes
992 * until we can no longer free unreserved surplus pages. This occurs
993 * when the nodes with surplus pages have no free pages.
994 * free_pool_huge_page() will balance the the freed pages across the
995 * on-line nodes with memory and will handle the hstate accounting.
997 while (nr_pages--) {
998 if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
999 break;
1004 * Determine if the huge page at addr within the vma has an associated
1005 * reservation. Where it does not we will need to logically increase
1006 * reservation and actually increase quota before an allocation can occur.
1007 * Where any new reservation would be required the reservation change is
1008 * prepared, but not committed. Once the page has been quota'd allocated
1009 * an instantiated the change should be committed via vma_commit_reservation.
1010 * No action is required on failure.
1012 static long vma_needs_reservation(struct hstate *h,
1013 struct vm_area_struct *vma, unsigned long addr)
1015 struct address_space *mapping = vma->vm_file->f_mapping;
1016 struct inode *inode = mapping->host;
1018 if (vma->vm_flags & VM_MAYSHARE) {
1019 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1020 return region_chg(&inode->i_mapping->private_list,
1021 idx, idx + 1);
1023 } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1024 return 1;
1026 } else {
1027 long err;
1028 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1029 struct resv_map *reservations = vma_resv_map(vma);
1031 err = region_chg(&reservations->regions, idx, idx + 1);
1032 if (err < 0)
1033 return err;
1034 return 0;
1037 static void vma_commit_reservation(struct hstate *h,
1038 struct vm_area_struct *vma, unsigned long addr)
1040 struct address_space *mapping = vma->vm_file->f_mapping;
1041 struct inode *inode = mapping->host;
1043 if (vma->vm_flags & VM_MAYSHARE) {
1044 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1045 region_add(&inode->i_mapping->private_list, idx, idx + 1);
1047 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1048 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1049 struct resv_map *reservations = vma_resv_map(vma);
1051 /* Mark this page used in the map. */
1052 region_add(&reservations->regions, idx, idx + 1);
1056 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1057 unsigned long addr, int avoid_reserve)
1059 struct hstate *h = hstate_vma(vma);
1060 struct page *page;
1061 struct address_space *mapping = vma->vm_file->f_mapping;
1062 struct inode *inode = mapping->host;
1063 long chg;
1066 * Processes that did not create the mapping will have no reserves and
1067 * will not have accounted against quota. Check that the quota can be
1068 * made before satisfying the allocation
1069 * MAP_NORESERVE mappings may also need pages and quota allocated
1070 * if no reserve mapping overlaps.
1072 chg = vma_needs_reservation(h, vma, addr);
1073 if (chg < 0)
1074 return ERR_PTR(chg);
1075 if (chg)
1076 if (hugetlb_get_quota(inode->i_mapping, chg))
1077 return ERR_PTR(-ENOSPC);
1079 spin_lock(&hugetlb_lock);
1080 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1081 spin_unlock(&hugetlb_lock);
1083 if (!page) {
1084 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1085 if (!page) {
1086 hugetlb_put_quota(inode->i_mapping, chg);
1087 return ERR_PTR(-VM_FAULT_SIGBUS);
1091 set_page_refcounted(page);
1092 set_page_private(page, (unsigned long) mapping);
1094 vma_commit_reservation(h, vma, addr);
1096 return page;
1099 int __weak alloc_bootmem_huge_page(struct hstate *h)
1101 struct huge_bootmem_page *m;
1102 int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
1104 while (nr_nodes) {
1105 void *addr;
1107 addr = __alloc_bootmem_node_nopanic(
1108 NODE_DATA(hstate_next_node_to_alloc(h,
1109 &node_states[N_HIGH_MEMORY])),
1110 huge_page_size(h), huge_page_size(h), 0);
1112 if (addr) {
1114 * Use the beginning of the huge page to store the
1115 * huge_bootmem_page struct (until gather_bootmem
1116 * puts them into the mem_map).
1118 m = addr;
1119 goto found;
1121 nr_nodes--;
1123 return 0;
1125 found:
1126 BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1127 /* Put them into a private list first because mem_map is not up yet */
1128 list_add(&m->list, &huge_boot_pages);
1129 m->hstate = h;
1130 return 1;
1133 static void prep_compound_huge_page(struct page *page, int order)
1135 if (unlikely(order > (MAX_ORDER - 1)))
1136 prep_compound_gigantic_page(page, order);
1137 else
1138 prep_compound_page(page, order);
1141 /* Put bootmem huge pages into the standard lists after mem_map is up */
1142 static void __init gather_bootmem_prealloc(void)
1144 struct huge_bootmem_page *m;
1146 list_for_each_entry(m, &huge_boot_pages, list) {
1147 struct page *page = virt_to_page(m);
1148 struct hstate *h = m->hstate;
1149 __ClearPageReserved(page);
1150 WARN_ON(page_count(page) != 1);
1151 prep_compound_huge_page(page, h->order);
1152 prep_new_huge_page(h, page, page_to_nid(page));
1156 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1158 unsigned long i;
1160 for (i = 0; i < h->max_huge_pages; ++i) {
1161 if (h->order >= MAX_ORDER) {
1162 if (!alloc_bootmem_huge_page(h))
1163 break;
1164 } else if (!alloc_fresh_huge_page(h,
1165 &node_states[N_HIGH_MEMORY]))
1166 break;
1168 h->max_huge_pages = i;
1171 static void __init hugetlb_init_hstates(void)
1173 struct hstate *h;
1175 for_each_hstate(h) {
1176 /* oversize hugepages were init'ed in early boot */
1177 if (h->order < MAX_ORDER)
1178 hugetlb_hstate_alloc_pages(h);
1182 static char * __init memfmt(char *buf, unsigned long n)
1184 if (n >= (1UL << 30))
1185 sprintf(buf, "%lu GB", n >> 30);
1186 else if (n >= (1UL << 20))
1187 sprintf(buf, "%lu MB", n >> 20);
1188 else
1189 sprintf(buf, "%lu KB", n >> 10);
1190 return buf;
1193 static void __init report_hugepages(void)
1195 struct hstate *h;
1197 for_each_hstate(h) {
1198 char buf[32];
1199 printk(KERN_INFO "HugeTLB registered %s page size, "
1200 "pre-allocated %ld pages\n",
1201 memfmt(buf, huge_page_size(h)),
1202 h->free_huge_pages);
1206 #ifdef CONFIG_HIGHMEM
1207 static void try_to_free_low(struct hstate *h, unsigned long count,
1208 nodemask_t *nodes_allowed)
1210 int i;
1212 if (h->order >= MAX_ORDER)
1213 return;
1215 for_each_node_mask(i, *nodes_allowed) {
1216 struct page *page, *next;
1217 struct list_head *freel = &h->hugepage_freelists[i];
1218 list_for_each_entry_safe(page, next, freel, lru) {
1219 if (count >= h->nr_huge_pages)
1220 return;
1221 if (PageHighMem(page))
1222 continue;
1223 list_del(&page->lru);
1224 update_and_free_page(h, page);
1225 h->free_huge_pages--;
1226 h->free_huge_pages_node[page_to_nid(page)]--;
1230 #else
1231 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1232 nodemask_t *nodes_allowed)
1235 #endif
1238 * Increment or decrement surplus_huge_pages. Keep node-specific counters
1239 * balanced by operating on them in a round-robin fashion.
1240 * Returns 1 if an adjustment was made.
1242 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1243 int delta)
1245 int start_nid, next_nid;
1246 int ret = 0;
1248 VM_BUG_ON(delta != -1 && delta != 1);
1250 if (delta < 0)
1251 start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1252 else
1253 start_nid = hstate_next_node_to_free(h, nodes_allowed);
1254 next_nid = start_nid;
1256 do {
1257 int nid = next_nid;
1258 if (delta < 0) {
1260 * To shrink on this node, there must be a surplus page
1262 if (!h->surplus_huge_pages_node[nid]) {
1263 next_nid = hstate_next_node_to_alloc(h,
1264 nodes_allowed);
1265 continue;
1268 if (delta > 0) {
1270 * Surplus cannot exceed the total number of pages
1272 if (h->surplus_huge_pages_node[nid] >=
1273 h->nr_huge_pages_node[nid]) {
1274 next_nid = hstate_next_node_to_free(h,
1275 nodes_allowed);
1276 continue;
1280 h->surplus_huge_pages += delta;
1281 h->surplus_huge_pages_node[nid] += delta;
1282 ret = 1;
1283 break;
1284 } while (next_nid != start_nid);
1286 return ret;
1289 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1290 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1291 nodemask_t *nodes_allowed)
1293 unsigned long min_count, ret;
1295 if (h->order >= MAX_ORDER)
1296 return h->max_huge_pages;
1299 * Increase the pool size
1300 * First take pages out of surplus state. Then make up the
1301 * remaining difference by allocating fresh huge pages.
1303 * We might race with alloc_buddy_huge_page() here and be unable
1304 * to convert a surplus huge page to a normal huge page. That is
1305 * not critical, though, it just means the overall size of the
1306 * pool might be one hugepage larger than it needs to be, but
1307 * within all the constraints specified by the sysctls.
1309 spin_lock(&hugetlb_lock);
1310 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1311 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1312 break;
1315 while (count > persistent_huge_pages(h)) {
1317 * If this allocation races such that we no longer need the
1318 * page, free_huge_page will handle it by freeing the page
1319 * and reducing the surplus.
1321 spin_unlock(&hugetlb_lock);
1322 ret = alloc_fresh_huge_page(h, nodes_allowed);
1323 spin_lock(&hugetlb_lock);
1324 if (!ret)
1325 goto out;
1327 /* Bail for signals. Probably ctrl-c from user */
1328 if (signal_pending(current))
1329 goto out;
1333 * Decrease the pool size
1334 * First return free pages to the buddy allocator (being careful
1335 * to keep enough around to satisfy reservations). Then place
1336 * pages into surplus state as needed so the pool will shrink
1337 * to the desired size as pages become free.
1339 * By placing pages into the surplus state independent of the
1340 * overcommit value, we are allowing the surplus pool size to
1341 * exceed overcommit. There are few sane options here. Since
1342 * alloc_buddy_huge_page() is checking the global counter,
1343 * though, we'll note that we're not allowed to exceed surplus
1344 * and won't grow the pool anywhere else. Not until one of the
1345 * sysctls are changed, or the surplus pages go out of use.
1347 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1348 min_count = max(count, min_count);
1349 try_to_free_low(h, min_count, nodes_allowed);
1350 while (min_count < persistent_huge_pages(h)) {
1351 if (!free_pool_huge_page(h, nodes_allowed, 0))
1352 break;
1354 while (count < persistent_huge_pages(h)) {
1355 if (!adjust_pool_surplus(h, nodes_allowed, 1))
1356 break;
1358 out:
1359 ret = persistent_huge_pages(h);
1360 spin_unlock(&hugetlb_lock);
1361 return ret;
1364 #define HSTATE_ATTR_RO(_name) \
1365 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1367 #define HSTATE_ATTR(_name) \
1368 static struct kobj_attribute _name##_attr = \
1369 __ATTR(_name, 0644, _name##_show, _name##_store)
1371 static struct kobject *hugepages_kobj;
1372 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1374 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1376 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1378 int i;
1380 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1381 if (hstate_kobjs[i] == kobj) {
1382 if (nidp)
1383 *nidp = NUMA_NO_NODE;
1384 return &hstates[i];
1387 return kobj_to_node_hstate(kobj, nidp);
1390 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1391 struct kobj_attribute *attr, char *buf)
1393 struct hstate *h;
1394 unsigned long nr_huge_pages;
1395 int nid;
1397 h = kobj_to_hstate(kobj, &nid);
1398 if (nid == NUMA_NO_NODE)
1399 nr_huge_pages = h->nr_huge_pages;
1400 else
1401 nr_huge_pages = h->nr_huge_pages_node[nid];
1403 return sprintf(buf, "%lu\n", nr_huge_pages);
1405 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1406 struct kobject *kobj, struct kobj_attribute *attr,
1407 const char *buf, size_t len)
1409 int err;
1410 int nid;
1411 unsigned long count;
1412 struct hstate *h;
1413 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1415 err = strict_strtoul(buf, 10, &count);
1416 if (err)
1417 return 0;
1419 h = kobj_to_hstate(kobj, &nid);
1420 if (nid == NUMA_NO_NODE) {
1422 * global hstate attribute
1424 if (!(obey_mempolicy &&
1425 init_nodemask_of_mempolicy(nodes_allowed))) {
1426 NODEMASK_FREE(nodes_allowed);
1427 nodes_allowed = &node_states[N_HIGH_MEMORY];
1429 } else if (nodes_allowed) {
1431 * per node hstate attribute: adjust count to global,
1432 * but restrict alloc/free to the specified node.
1434 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1435 init_nodemask_of_node(nodes_allowed, nid);
1436 } else
1437 nodes_allowed = &node_states[N_HIGH_MEMORY];
1439 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1441 if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1442 NODEMASK_FREE(nodes_allowed);
1444 return len;
1447 static ssize_t nr_hugepages_show(struct kobject *kobj,
1448 struct kobj_attribute *attr, char *buf)
1450 return nr_hugepages_show_common(kobj, attr, buf);
1453 static ssize_t nr_hugepages_store(struct kobject *kobj,
1454 struct kobj_attribute *attr, const char *buf, size_t len)
1456 return nr_hugepages_store_common(false, kobj, attr, buf, len);
1458 HSTATE_ATTR(nr_hugepages);
1460 #ifdef CONFIG_NUMA
1463 * hstate attribute for optionally mempolicy-based constraint on persistent
1464 * huge page alloc/free.
1466 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1467 struct kobj_attribute *attr, char *buf)
1469 return nr_hugepages_show_common(kobj, attr, buf);
1472 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1473 struct kobj_attribute *attr, const char *buf, size_t len)
1475 return nr_hugepages_store_common(true, kobj, attr, buf, len);
1477 HSTATE_ATTR(nr_hugepages_mempolicy);
1478 #endif
1481 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1482 struct kobj_attribute *attr, char *buf)
1484 struct hstate *h = kobj_to_hstate(kobj, NULL);
1485 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1487 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1488 struct kobj_attribute *attr, const char *buf, size_t count)
1490 int err;
1491 unsigned long input;
1492 struct hstate *h = kobj_to_hstate(kobj, NULL);
1494 err = strict_strtoul(buf, 10, &input);
1495 if (err)
1496 return 0;
1498 spin_lock(&hugetlb_lock);
1499 h->nr_overcommit_huge_pages = input;
1500 spin_unlock(&hugetlb_lock);
1502 return count;
1504 HSTATE_ATTR(nr_overcommit_hugepages);
1506 static ssize_t free_hugepages_show(struct kobject *kobj,
1507 struct kobj_attribute *attr, char *buf)
1509 struct hstate *h;
1510 unsigned long free_huge_pages;
1511 int nid;
1513 h = kobj_to_hstate(kobj, &nid);
1514 if (nid == NUMA_NO_NODE)
1515 free_huge_pages = h->free_huge_pages;
1516 else
1517 free_huge_pages = h->free_huge_pages_node[nid];
1519 return sprintf(buf, "%lu\n", free_huge_pages);
1521 HSTATE_ATTR_RO(free_hugepages);
1523 static ssize_t resv_hugepages_show(struct kobject *kobj,
1524 struct kobj_attribute *attr, char *buf)
1526 struct hstate *h = kobj_to_hstate(kobj, NULL);
1527 return sprintf(buf, "%lu\n", h->resv_huge_pages);
1529 HSTATE_ATTR_RO(resv_hugepages);
1531 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1532 struct kobj_attribute *attr, char *buf)
1534 struct hstate *h;
1535 unsigned long surplus_huge_pages;
1536 int nid;
1538 h = kobj_to_hstate(kobj, &nid);
1539 if (nid == NUMA_NO_NODE)
1540 surplus_huge_pages = h->surplus_huge_pages;
1541 else
1542 surplus_huge_pages = h->surplus_huge_pages_node[nid];
1544 return sprintf(buf, "%lu\n", surplus_huge_pages);
1546 HSTATE_ATTR_RO(surplus_hugepages);
1548 static struct attribute *hstate_attrs[] = {
1549 &nr_hugepages_attr.attr,
1550 &nr_overcommit_hugepages_attr.attr,
1551 &free_hugepages_attr.attr,
1552 &resv_hugepages_attr.attr,
1553 &surplus_hugepages_attr.attr,
1554 #ifdef CONFIG_NUMA
1555 &nr_hugepages_mempolicy_attr.attr,
1556 #endif
1557 NULL,
1560 static struct attribute_group hstate_attr_group = {
1561 .attrs = hstate_attrs,
1564 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1565 struct kobject **hstate_kobjs,
1566 struct attribute_group *hstate_attr_group)
1568 int retval;
1569 int hi = h - hstates;
1571 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1572 if (!hstate_kobjs[hi])
1573 return -ENOMEM;
1575 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1576 if (retval)
1577 kobject_put(hstate_kobjs[hi]);
1579 return retval;
1582 static void __init hugetlb_sysfs_init(void)
1584 struct hstate *h;
1585 int err;
1587 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1588 if (!hugepages_kobj)
1589 return;
1591 for_each_hstate(h) {
1592 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1593 hstate_kobjs, &hstate_attr_group);
1594 if (err)
1595 printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1596 h->name);
1600 #ifdef CONFIG_NUMA
1603 * node_hstate/s - associate per node hstate attributes, via their kobjects,
1604 * with node sysdevs in node_devices[] using a parallel array. The array
1605 * index of a node sysdev or _hstate == node id.
1606 * This is here to avoid any static dependency of the node sysdev driver, in
1607 * the base kernel, on the hugetlb module.
1609 struct node_hstate {
1610 struct kobject *hugepages_kobj;
1611 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1613 struct node_hstate node_hstates[MAX_NUMNODES];
1616 * A subset of global hstate attributes for node sysdevs
1618 static struct attribute *per_node_hstate_attrs[] = {
1619 &nr_hugepages_attr.attr,
1620 &free_hugepages_attr.attr,
1621 &surplus_hugepages_attr.attr,
1622 NULL,
1625 static struct attribute_group per_node_hstate_attr_group = {
1626 .attrs = per_node_hstate_attrs,
1630 * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
1631 * Returns node id via non-NULL nidp.
1633 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1635 int nid;
1637 for (nid = 0; nid < nr_node_ids; nid++) {
1638 struct node_hstate *nhs = &node_hstates[nid];
1639 int i;
1640 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1641 if (nhs->hstate_kobjs[i] == kobj) {
1642 if (nidp)
1643 *nidp = nid;
1644 return &hstates[i];
1648 BUG();
1649 return NULL;
1653 * Unregister hstate attributes from a single node sysdev.
1654 * No-op if no hstate attributes attached.
1656 void hugetlb_unregister_node(struct node *node)
1658 struct hstate *h;
1659 struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1661 if (!nhs->hugepages_kobj)
1662 return; /* no hstate attributes */
1664 for_each_hstate(h)
1665 if (nhs->hstate_kobjs[h - hstates]) {
1666 kobject_put(nhs->hstate_kobjs[h - hstates]);
1667 nhs->hstate_kobjs[h - hstates] = NULL;
1670 kobject_put(nhs->hugepages_kobj);
1671 nhs->hugepages_kobj = NULL;
1675 * hugetlb module exit: unregister hstate attributes from node sysdevs
1676 * that have them.
1678 static void hugetlb_unregister_all_nodes(void)
1680 int nid;
1683 * disable node sysdev registrations.
1685 register_hugetlbfs_with_node(NULL, NULL);
1688 * remove hstate attributes from any nodes that have them.
1690 for (nid = 0; nid < nr_node_ids; nid++)
1691 hugetlb_unregister_node(&node_devices[nid]);
1695 * Register hstate attributes for a single node sysdev.
1696 * No-op if attributes already registered.
1698 void hugetlb_register_node(struct node *node)
1700 struct hstate *h;
1701 struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1702 int err;
1704 if (nhs->hugepages_kobj)
1705 return; /* already allocated */
1707 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1708 &node->sysdev.kobj);
1709 if (!nhs->hugepages_kobj)
1710 return;
1712 for_each_hstate(h) {
1713 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1714 nhs->hstate_kobjs,
1715 &per_node_hstate_attr_group);
1716 if (err) {
1717 printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
1718 " for node %d\n",
1719 h->name, node->sysdev.id);
1720 hugetlb_unregister_node(node);
1721 break;
1727 * hugetlb init time: register hstate attributes for all registered node
1728 * sysdevs of nodes that have memory. All on-line nodes should have
1729 * registered their associated sysdev by this time.
1731 static void hugetlb_register_all_nodes(void)
1733 int nid;
1735 for_each_node_state(nid, N_HIGH_MEMORY) {
1736 struct node *node = &node_devices[nid];
1737 if (node->sysdev.id == nid)
1738 hugetlb_register_node(node);
1742 * Let the node sysdev driver know we're here so it can
1743 * [un]register hstate attributes on node hotplug.
1745 register_hugetlbfs_with_node(hugetlb_register_node,
1746 hugetlb_unregister_node);
1748 #else /* !CONFIG_NUMA */
1750 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1752 BUG();
1753 if (nidp)
1754 *nidp = -1;
1755 return NULL;
1758 static void hugetlb_unregister_all_nodes(void) { }
1760 static void hugetlb_register_all_nodes(void) { }
1762 #endif
1764 static void __exit hugetlb_exit(void)
1766 struct hstate *h;
1768 hugetlb_unregister_all_nodes();
1770 for_each_hstate(h) {
1771 kobject_put(hstate_kobjs[h - hstates]);
1774 kobject_put(hugepages_kobj);
1776 module_exit(hugetlb_exit);
1778 static int __init hugetlb_init(void)
1780 /* Some platform decide whether they support huge pages at boot
1781 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1782 * there is no such support
1784 if (HPAGE_SHIFT == 0)
1785 return 0;
1787 if (!size_to_hstate(default_hstate_size)) {
1788 default_hstate_size = HPAGE_SIZE;
1789 if (!size_to_hstate(default_hstate_size))
1790 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1792 default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
1793 if (default_hstate_max_huge_pages)
1794 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1796 hugetlb_init_hstates();
1798 gather_bootmem_prealloc();
1800 report_hugepages();
1802 hugetlb_sysfs_init();
1804 hugetlb_register_all_nodes();
1806 return 0;
1808 module_init(hugetlb_init);
1810 /* Should be called on processing a hugepagesz=... option */
1811 void __init hugetlb_add_hstate(unsigned order)
1813 struct hstate *h;
1814 unsigned long i;
1816 if (size_to_hstate(PAGE_SIZE << order)) {
1817 printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1818 return;
1820 BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
1821 BUG_ON(order == 0);
1822 h = &hstates[max_hstate++];
1823 h->order = order;
1824 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1825 h->nr_huge_pages = 0;
1826 h->free_huge_pages = 0;
1827 for (i = 0; i < MAX_NUMNODES; ++i)
1828 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1829 h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
1830 h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
1831 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1832 huge_page_size(h)/1024);
1834 parsed_hstate = h;
1837 static int __init hugetlb_nrpages_setup(char *s)
1839 unsigned long *mhp;
1840 static unsigned long *last_mhp;
1843 * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
1844 * so this hugepages= parameter goes to the "default hstate".
1846 if (!max_hstate)
1847 mhp = &default_hstate_max_huge_pages;
1848 else
1849 mhp = &parsed_hstate->max_huge_pages;
1851 if (mhp == last_mhp) {
1852 printk(KERN_WARNING "hugepages= specified twice without "
1853 "interleaving hugepagesz=, ignoring\n");
1854 return 1;
1857 if (sscanf(s, "%lu", mhp) <= 0)
1858 *mhp = 0;
1861 * Global state is always initialized later in hugetlb_init.
1862 * But we need to allocate >= MAX_ORDER hstates here early to still
1863 * use the bootmem allocator.
1865 if (max_hstate && parsed_hstate->order >= MAX_ORDER)
1866 hugetlb_hstate_alloc_pages(parsed_hstate);
1868 last_mhp = mhp;
1870 return 1;
1872 __setup("hugepages=", hugetlb_nrpages_setup);
1874 static int __init hugetlb_default_setup(char *s)
1876 default_hstate_size = memparse(s, &s);
1877 return 1;
1879 __setup("default_hugepagesz=", hugetlb_default_setup);
1881 static unsigned int cpuset_mems_nr(unsigned int *array)
1883 int node;
1884 unsigned int nr = 0;
1886 for_each_node_mask(node, cpuset_current_mems_allowed)
1887 nr += array[node];
1889 return nr;
1892 #ifdef CONFIG_SYSCTL
1893 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
1894 struct ctl_table *table, int write,
1895 void __user *buffer, size_t *length, loff_t *ppos)
1897 struct hstate *h = &default_hstate;
1898 unsigned long tmp;
1900 if (!write)
1901 tmp = h->max_huge_pages;
1903 table->data = &tmp;
1904 table->maxlen = sizeof(unsigned long);
1905 proc_doulongvec_minmax(table, write, buffer, length, ppos);
1907 if (write) {
1908 NODEMASK_ALLOC(nodemask_t, nodes_allowed,
1909 GFP_KERNEL | __GFP_NORETRY);
1910 if (!(obey_mempolicy &&
1911 init_nodemask_of_mempolicy(nodes_allowed))) {
1912 NODEMASK_FREE(nodes_allowed);
1913 nodes_allowed = &node_states[N_HIGH_MEMORY];
1915 h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
1917 if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1918 NODEMASK_FREE(nodes_allowed);
1921 return 0;
1924 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
1925 void __user *buffer, size_t *length, loff_t *ppos)
1928 return hugetlb_sysctl_handler_common(false, table, write,
1929 buffer, length, ppos);
1932 #ifdef CONFIG_NUMA
1933 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
1934 void __user *buffer, size_t *length, loff_t *ppos)
1936 return hugetlb_sysctl_handler_common(true, table, write,
1937 buffer, length, ppos);
1939 #endif /* CONFIG_NUMA */
1941 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
1942 void __user *buffer,
1943 size_t *length, loff_t *ppos)
1945 proc_dointvec(table, write, buffer, length, ppos);
1946 if (hugepages_treat_as_movable)
1947 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
1948 else
1949 htlb_alloc_mask = GFP_HIGHUSER;
1950 return 0;
1953 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
1954 void __user *buffer,
1955 size_t *length, loff_t *ppos)
1957 struct hstate *h = &default_hstate;
1958 unsigned long tmp;
1960 if (!write)
1961 tmp = h->nr_overcommit_huge_pages;
1963 table->data = &tmp;
1964 table->maxlen = sizeof(unsigned long);
1965 proc_doulongvec_minmax(table, write, buffer, length, ppos);
1967 if (write) {
1968 spin_lock(&hugetlb_lock);
1969 h->nr_overcommit_huge_pages = tmp;
1970 spin_unlock(&hugetlb_lock);
1973 return 0;
1976 #endif /* CONFIG_SYSCTL */
1978 void hugetlb_report_meminfo(struct seq_file *m)
1980 struct hstate *h = &default_hstate;
1981 seq_printf(m,
1982 "HugePages_Total: %5lu\n"
1983 "HugePages_Free: %5lu\n"
1984 "HugePages_Rsvd: %5lu\n"
1985 "HugePages_Surp: %5lu\n"
1986 "Hugepagesize: %8lu kB\n",
1987 h->nr_huge_pages,
1988 h->free_huge_pages,
1989 h->resv_huge_pages,
1990 h->surplus_huge_pages,
1991 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
1994 int hugetlb_report_node_meminfo(int nid, char *buf)
1996 struct hstate *h = &default_hstate;
1997 return sprintf(buf,
1998 "Node %d HugePages_Total: %5u\n"
1999 "Node %d HugePages_Free: %5u\n"
2000 "Node %d HugePages_Surp: %5u\n",
2001 nid, h->nr_huge_pages_node[nid],
2002 nid, h->free_huge_pages_node[nid],
2003 nid, h->surplus_huge_pages_node[nid]);
2006 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2007 unsigned long hugetlb_total_pages(void)
2009 struct hstate *h = &default_hstate;
2010 return h->nr_huge_pages * pages_per_huge_page(h);
2013 static int hugetlb_acct_memory(struct hstate *h, long delta)
2015 int ret = -ENOMEM;
2017 spin_lock(&hugetlb_lock);
2019 * When cpuset is configured, it breaks the strict hugetlb page
2020 * reservation as the accounting is done on a global variable. Such
2021 * reservation is completely rubbish in the presence of cpuset because
2022 * the reservation is not checked against page availability for the
2023 * current cpuset. Application can still potentially OOM'ed by kernel
2024 * with lack of free htlb page in cpuset that the task is in.
2025 * Attempt to enforce strict accounting with cpuset is almost
2026 * impossible (or too ugly) because cpuset is too fluid that
2027 * task or memory node can be dynamically moved between cpusets.
2029 * The change of semantics for shared hugetlb mapping with cpuset is
2030 * undesirable. However, in order to preserve some of the semantics,
2031 * we fall back to check against current free page availability as
2032 * a best attempt and hopefully to minimize the impact of changing
2033 * semantics that cpuset has.
2035 if (delta > 0) {
2036 if (gather_surplus_pages(h, delta) < 0)
2037 goto out;
2039 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2040 return_unused_surplus_pages(h, delta);
2041 goto out;
2045 ret = 0;
2046 if (delta < 0)
2047 return_unused_surplus_pages(h, (unsigned long) -delta);
2049 out:
2050 spin_unlock(&hugetlb_lock);
2051 return ret;
2054 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2056 struct resv_map *reservations = vma_resv_map(vma);
2059 * This new VMA should share its siblings reservation map if present.
2060 * The VMA will only ever have a valid reservation map pointer where
2061 * it is being copied for another still existing VMA. As that VMA
2062 * has a reference to the reservation map it cannot dissappear until
2063 * after this open call completes. It is therefore safe to take a
2064 * new reference here without additional locking.
2066 if (reservations)
2067 kref_get(&reservations->refs);
2070 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2072 struct hstate *h = hstate_vma(vma);
2073 struct resv_map *reservations = vma_resv_map(vma);
2074 unsigned long reserve;
2075 unsigned long start;
2076 unsigned long end;
2078 if (reservations) {
2079 start = vma_hugecache_offset(h, vma, vma->vm_start);
2080 end = vma_hugecache_offset(h, vma, vma->vm_end);
2082 reserve = (end - start) -
2083 region_count(&reservations->regions, start, end);
2085 kref_put(&reservations->refs, resv_map_release);
2087 if (reserve) {
2088 hugetlb_acct_memory(h, -reserve);
2089 hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
2095 * We cannot handle pagefaults against hugetlb pages at all. They cause
2096 * handle_mm_fault() to try to instantiate regular-sized pages in the
2097 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
2098 * this far.
2100 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2102 BUG();
2103 return 0;
2106 const struct vm_operations_struct hugetlb_vm_ops = {
2107 .fault = hugetlb_vm_op_fault,
2108 .open = hugetlb_vm_op_open,
2109 .close = hugetlb_vm_op_close,
2112 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2113 int writable)
2115 pte_t entry;
2117 if (writable) {
2118 entry =
2119 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
2120 } else {
2121 entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
2123 entry = pte_mkyoung(entry);
2124 entry = pte_mkhuge(entry);
2126 return entry;
2129 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2130 unsigned long address, pte_t *ptep)
2132 pte_t entry;
2134 entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2135 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
2136 update_mmu_cache(vma, address, ptep);
2141 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2142 struct vm_area_struct *vma)
2144 pte_t *src_pte, *dst_pte, entry;
2145 struct page *ptepage;
2146 unsigned long addr;
2147 int cow;
2148 struct hstate *h = hstate_vma(vma);
2149 unsigned long sz = huge_page_size(h);
2151 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2153 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2154 src_pte = huge_pte_offset(src, addr);
2155 if (!src_pte)
2156 continue;
2157 dst_pte = huge_pte_alloc(dst, addr, sz);
2158 if (!dst_pte)
2159 goto nomem;
2161 /* If the pagetables are shared don't copy or take references */
2162 if (dst_pte == src_pte)
2163 continue;
2165 spin_lock(&dst->page_table_lock);
2166 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2167 if (!huge_pte_none(huge_ptep_get(src_pte))) {
2168 if (cow)
2169 huge_ptep_set_wrprotect(src, addr, src_pte);
2170 entry = huge_ptep_get(src_pte);
2171 ptepage = pte_page(entry);
2172 get_page(ptepage);
2173 page_dup_rmap(ptepage);
2174 set_huge_pte_at(dst, addr, dst_pte, entry);
2176 spin_unlock(&src->page_table_lock);
2177 spin_unlock(&dst->page_table_lock);
2179 return 0;
2181 nomem:
2182 return -ENOMEM;
2185 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2187 swp_entry_t swp;
2189 if (huge_pte_none(pte) || pte_present(pte))
2190 return 0;
2191 swp = pte_to_swp_entry(pte);
2192 if (non_swap_entry(swp) && is_hwpoison_entry(swp)) {
2193 return 1;
2194 } else
2195 return 0;
2198 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2199 unsigned long end, struct page *ref_page)
2201 struct mm_struct *mm = vma->vm_mm;
2202 unsigned long address;
2203 pte_t *ptep;
2204 pte_t pte;
2205 struct page *page;
2206 struct page *tmp;
2207 struct hstate *h = hstate_vma(vma);
2208 unsigned long sz = huge_page_size(h);
2211 * A page gathering list, protected by per file i_mmap_lock. The
2212 * lock is used to avoid list corruption from multiple unmapping
2213 * of the same page since we are using page->lru.
2215 LIST_HEAD(page_list);
2217 WARN_ON(!is_vm_hugetlb_page(vma));
2218 BUG_ON(start & ~huge_page_mask(h));
2219 BUG_ON(end & ~huge_page_mask(h));
2221 mmu_notifier_invalidate_range_start(mm, start, end);
2222 spin_lock(&mm->page_table_lock);
2223 for (address = start; address < end; address += sz) {
2224 ptep = huge_pte_offset(mm, address);
2225 if (!ptep)
2226 continue;
2228 if (huge_pmd_unshare(mm, &address, ptep))
2229 continue;
2232 * If a reference page is supplied, it is because a specific
2233 * page is being unmapped, not a range. Ensure the page we
2234 * are about to unmap is the actual page of interest.
2236 if (ref_page) {
2237 pte = huge_ptep_get(ptep);
2238 if (huge_pte_none(pte))
2239 continue;
2240 page = pte_page(pte);
2241 if (page != ref_page)
2242 continue;
2245 * Mark the VMA as having unmapped its page so that
2246 * future faults in this VMA will fail rather than
2247 * looking like data was lost
2249 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2252 pte = huge_ptep_get_and_clear(mm, address, ptep);
2253 if (huge_pte_none(pte))
2254 continue;
2257 * HWPoisoned hugepage is already unmapped and dropped reference
2259 if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
2260 continue;
2262 page = pte_page(pte);
2263 if (pte_dirty(pte))
2264 set_page_dirty(page);
2265 list_add(&page->lru, &page_list);
2267 spin_unlock(&mm->page_table_lock);
2268 flush_tlb_range(vma, start, end);
2269 mmu_notifier_invalidate_range_end(mm, start, end);
2270 list_for_each_entry_safe(page, tmp, &page_list, lru) {
2271 page_remove_rmap(page);
2272 list_del(&page->lru);
2273 put_page(page);
2277 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2278 unsigned long end, struct page *ref_page)
2280 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
2281 __unmap_hugepage_range(vma, start, end, ref_page);
2282 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
2286 * This is called when the original mapper is failing to COW a MAP_PRIVATE
2287 * mappping it owns the reserve page for. The intention is to unmap the page
2288 * from other VMAs and let the children be SIGKILLed if they are faulting the
2289 * same region.
2291 static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2292 struct page *page, unsigned long address)
2294 struct hstate *h = hstate_vma(vma);
2295 struct vm_area_struct *iter_vma;
2296 struct address_space *mapping;
2297 struct prio_tree_iter iter;
2298 pgoff_t pgoff;
2301 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2302 * from page cache lookup which is in HPAGE_SIZE units.
2304 address = address & huge_page_mask(h);
2305 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
2306 + (vma->vm_pgoff >> PAGE_SHIFT);
2307 mapping = (struct address_space *)page_private(page);
2310 * Take the mapping lock for the duration of the table walk. As
2311 * this mapping should be shared between all the VMAs,
2312 * __unmap_hugepage_range() is called as the lock is already held
2314 spin_lock(&mapping->i_mmap_lock);
2315 vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2316 /* Do not unmap the current VMA */
2317 if (iter_vma == vma)
2318 continue;
2321 * Unmap the page from other VMAs without their own reserves.
2322 * They get marked to be SIGKILLed if they fault in these
2323 * areas. This is because a future no-page fault on this VMA
2324 * could insert a zeroed page instead of the data existing
2325 * from the time of fork. This would look like data corruption
2327 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2328 __unmap_hugepage_range(iter_vma,
2329 address, address + huge_page_size(h),
2330 page);
2332 spin_unlock(&mapping->i_mmap_lock);
2334 return 1;
2338 * Hugetlb_cow() should be called with page lock of the original hugepage held.
2340 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2341 unsigned long address, pte_t *ptep, pte_t pte,
2342 struct page *pagecache_page)
2344 struct hstate *h = hstate_vma(vma);
2345 struct page *old_page, *new_page;
2346 int avoidcopy;
2347 int outside_reserve = 0;
2349 old_page = pte_page(pte);
2351 retry_avoidcopy:
2352 /* If no-one else is actually using this page, avoid the copy
2353 * and just make the page writable */
2354 avoidcopy = (page_mapcount(old_page) == 1);
2355 if (avoidcopy) {
2356 if (PageAnon(old_page))
2357 page_move_anon_rmap(old_page, vma, address);
2358 set_huge_ptep_writable(vma, address, ptep);
2359 return 0;
2363 * If the process that created a MAP_PRIVATE mapping is about to
2364 * perform a COW due to a shared page count, attempt to satisfy
2365 * the allocation without using the existing reserves. The pagecache
2366 * page is used to determine if the reserve at this address was
2367 * consumed or not. If reserves were used, a partial faulted mapping
2368 * at the time of fork() could consume its reserves on COW instead
2369 * of the full address range.
2371 if (!(vma->vm_flags & VM_MAYSHARE) &&
2372 is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2373 old_page != pagecache_page)
2374 outside_reserve = 1;
2376 page_cache_get(old_page);
2378 /* Drop page_table_lock as buddy allocator may be called */
2379 spin_unlock(&mm->page_table_lock);
2380 new_page = alloc_huge_page(vma, address, outside_reserve);
2382 if (IS_ERR(new_page)) {
2383 page_cache_release(old_page);
2386 * If a process owning a MAP_PRIVATE mapping fails to COW,
2387 * it is due to references held by a child and an insufficient
2388 * huge page pool. To guarantee the original mappers
2389 * reliability, unmap the page from child processes. The child
2390 * may get SIGKILLed if it later faults.
2392 if (outside_reserve) {
2393 BUG_ON(huge_pte_none(pte));
2394 if (unmap_ref_private(mm, vma, old_page, address)) {
2395 BUG_ON(page_count(old_page) != 1);
2396 BUG_ON(huge_pte_none(pte));
2397 spin_lock(&mm->page_table_lock);
2398 goto retry_avoidcopy;
2400 WARN_ON_ONCE(1);
2403 /* Caller expects lock to be held */
2404 spin_lock(&mm->page_table_lock);
2405 return -PTR_ERR(new_page);
2409 * When the original hugepage is shared one, it does not have
2410 * anon_vma prepared.
2412 if (unlikely(anon_vma_prepare(vma)))
2413 return VM_FAULT_OOM;
2415 copy_huge_page(new_page, old_page, address, vma);
2416 __SetPageUptodate(new_page);
2419 * Retake the page_table_lock to check for racing updates
2420 * before the page tables are altered
2422 spin_lock(&mm->page_table_lock);
2423 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2424 if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2425 /* Break COW */
2426 mmu_notifier_invalidate_range_start(mm,
2427 address & huge_page_mask(h),
2428 (address & huge_page_mask(h)) + huge_page_size(h));
2429 huge_ptep_clear_flush(vma, address, ptep);
2430 set_huge_pte_at(mm, address, ptep,
2431 make_huge_pte(vma, new_page, 1));
2432 page_remove_rmap(old_page);
2433 hugepage_add_new_anon_rmap(new_page, vma, address);
2434 /* Make the old page be freed below */
2435 new_page = old_page;
2436 mmu_notifier_invalidate_range_end(mm,
2437 address & huge_page_mask(h),
2438 (address & huge_page_mask(h)) + huge_page_size(h));
2440 page_cache_release(new_page);
2441 page_cache_release(old_page);
2442 return 0;
2445 /* Return the pagecache page at a given address within a VMA */
2446 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2447 struct vm_area_struct *vma, unsigned long address)
2449 struct address_space *mapping;
2450 pgoff_t idx;
2452 mapping = vma->vm_file->f_mapping;
2453 idx = vma_hugecache_offset(h, vma, address);
2455 return find_lock_page(mapping, idx);
2459 * Return whether there is a pagecache page to back given address within VMA.
2460 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2462 static bool hugetlbfs_pagecache_present(struct hstate *h,
2463 struct vm_area_struct *vma, unsigned long address)
2465 struct address_space *mapping;
2466 pgoff_t idx;
2467 struct page *page;
2469 mapping = vma->vm_file->f_mapping;
2470 idx = vma_hugecache_offset(h, vma, address);
2472 page = find_get_page(mapping, idx);
2473 if (page)
2474 put_page(page);
2475 return page != NULL;
2478 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2479 unsigned long address, pte_t *ptep, unsigned int flags)
2481 struct hstate *h = hstate_vma(vma);
2482 int ret = VM_FAULT_SIGBUS;
2483 pgoff_t idx;
2484 unsigned long size;
2485 struct page *page;
2486 struct address_space *mapping;
2487 pte_t new_pte;
2490 * Currently, we are forced to kill the process in the event the
2491 * original mapper has unmapped pages from the child due to a failed
2492 * COW. Warn that such a situation has occured as it may not be obvious
2494 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2495 printk(KERN_WARNING
2496 "PID %d killed due to inadequate hugepage pool\n",
2497 current->pid);
2498 return ret;
2501 mapping = vma->vm_file->f_mapping;
2502 idx = vma_hugecache_offset(h, vma, address);
2505 * Use page lock to guard against racing truncation
2506 * before we get page_table_lock.
2508 retry:
2509 page = find_lock_page(mapping, idx);
2510 if (!page) {
2511 size = i_size_read(mapping->host) >> huge_page_shift(h);
2512 if (idx >= size)
2513 goto out;
2514 page = alloc_huge_page(vma, address, 0);
2515 if (IS_ERR(page)) {
2516 ret = -PTR_ERR(page);
2517 goto out;
2519 clear_huge_page(page, address, huge_page_size(h));
2520 __SetPageUptodate(page);
2522 if (vma->vm_flags & VM_MAYSHARE) {
2523 int err;
2524 struct inode *inode = mapping->host;
2526 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2527 if (err) {
2528 put_page(page);
2529 if (err == -EEXIST)
2530 goto retry;
2531 goto out;
2534 spin_lock(&inode->i_lock);
2535 inode->i_blocks += blocks_per_huge_page(h);
2536 spin_unlock(&inode->i_lock);
2537 page_dup_rmap(page);
2538 } else {
2539 lock_page(page);
2540 if (unlikely(anon_vma_prepare(vma))) {
2541 ret = VM_FAULT_OOM;
2542 goto backout_unlocked;
2544 hugepage_add_new_anon_rmap(page, vma, address);
2546 } else {
2548 * If memory error occurs between mmap() and fault, some process
2549 * don't have hwpoisoned swap entry for errored virtual address.
2550 * So we need to block hugepage fault by PG_hwpoison bit check.
2552 if (unlikely(PageHWPoison(page))) {
2553 ret = VM_FAULT_HWPOISON;
2554 goto backout_unlocked;
2556 page_dup_rmap(page);
2560 * If we are going to COW a private mapping later, we examine the
2561 * pending reservations for this page now. This will ensure that
2562 * any allocations necessary to record that reservation occur outside
2563 * the spinlock.
2565 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2566 if (vma_needs_reservation(h, vma, address) < 0) {
2567 ret = VM_FAULT_OOM;
2568 goto backout_unlocked;
2571 spin_lock(&mm->page_table_lock);
2572 size = i_size_read(mapping->host) >> huge_page_shift(h);
2573 if (idx >= size)
2574 goto backout;
2576 ret = 0;
2577 if (!huge_pte_none(huge_ptep_get(ptep)))
2578 goto backout;
2580 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2581 && (vma->vm_flags & VM_SHARED)));
2582 set_huge_pte_at(mm, address, ptep, new_pte);
2584 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2585 /* Optimization, do the COW without a second fault */
2586 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2589 spin_unlock(&mm->page_table_lock);
2590 unlock_page(page);
2591 out:
2592 return ret;
2594 backout:
2595 spin_unlock(&mm->page_table_lock);
2596 backout_unlocked:
2597 unlock_page(page);
2598 put_page(page);
2599 goto out;
2602 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2603 unsigned long address, unsigned int flags)
2605 pte_t *ptep;
2606 pte_t entry;
2607 int ret;
2608 struct page *page = NULL;
2609 struct page *pagecache_page = NULL;
2610 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2611 struct hstate *h = hstate_vma(vma);
2613 ptep = huge_pte_offset(mm, address);
2614 if (ptep) {
2615 entry = huge_ptep_get(ptep);
2616 if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2617 return VM_FAULT_HWPOISON;
2620 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2621 if (!ptep)
2622 return VM_FAULT_OOM;
2625 * Serialize hugepage allocation and instantiation, so that we don't
2626 * get spurious allocation failures if two CPUs race to instantiate
2627 * the same page in the page cache.
2629 mutex_lock(&hugetlb_instantiation_mutex);
2630 entry = huge_ptep_get(ptep);
2631 if (huge_pte_none(entry)) {
2632 ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2633 goto out_mutex;
2636 ret = 0;
2639 * If we are going to COW the mapping later, we examine the pending
2640 * reservations for this page now. This will ensure that any
2641 * allocations necessary to record that reservation occur outside the
2642 * spinlock. For private mappings, we also lookup the pagecache
2643 * page now as it is used to determine if a reservation has been
2644 * consumed.
2646 if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2647 if (vma_needs_reservation(h, vma, address) < 0) {
2648 ret = VM_FAULT_OOM;
2649 goto out_mutex;
2652 if (!(vma->vm_flags & VM_MAYSHARE))
2653 pagecache_page = hugetlbfs_pagecache_page(h,
2654 vma, address);
2658 * hugetlb_cow() requires page locks of pte_page(entry) and
2659 * pagecache_page, so here we need take the former one
2660 * when page != pagecache_page or !pagecache_page.
2661 * Note that locking order is always pagecache_page -> page,
2662 * so no worry about deadlock.
2664 page = pte_page(entry);
2665 if (page != pagecache_page)
2666 lock_page(page);
2668 spin_lock(&mm->page_table_lock);
2669 /* Check for a racing update before calling hugetlb_cow */
2670 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2671 goto out_page_table_lock;
2674 if (flags & FAULT_FLAG_WRITE) {
2675 if (!pte_write(entry)) {
2676 ret = hugetlb_cow(mm, vma, address, ptep, entry,
2677 pagecache_page);
2678 goto out_page_table_lock;
2680 entry = pte_mkdirty(entry);
2682 entry = pte_mkyoung(entry);
2683 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2684 flags & FAULT_FLAG_WRITE))
2685 update_mmu_cache(vma, address, ptep);
2687 out_page_table_lock:
2688 spin_unlock(&mm->page_table_lock);
2690 if (pagecache_page) {
2691 unlock_page(pagecache_page);
2692 put_page(pagecache_page);
2694 unlock_page(page);
2696 out_mutex:
2697 mutex_unlock(&hugetlb_instantiation_mutex);
2699 return ret;
2702 /* Can be overriden by architectures */
2703 __attribute__((weak)) struct page *
2704 follow_huge_pud(struct mm_struct *mm, unsigned long address,
2705 pud_t *pud, int write)
2707 BUG();
2708 return NULL;
2711 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2712 struct page **pages, struct vm_area_struct **vmas,
2713 unsigned long *position, int *length, int i,
2714 unsigned int flags)
2716 unsigned long pfn_offset;
2717 unsigned long vaddr = *position;
2718 int remainder = *length;
2719 struct hstate *h = hstate_vma(vma);
2721 spin_lock(&mm->page_table_lock);
2722 while (vaddr < vma->vm_end && remainder) {
2723 pte_t *pte;
2724 int absent;
2725 struct page *page;
2728 * Some archs (sparc64, sh*) have multiple pte_ts to
2729 * each hugepage. We have to make sure we get the
2730 * first, for the page indexing below to work.
2732 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2733 absent = !pte || huge_pte_none(huge_ptep_get(pte));
2736 * When coredumping, it suits get_dump_page if we just return
2737 * an error where there's an empty slot with no huge pagecache
2738 * to back it. This way, we avoid allocating a hugepage, and
2739 * the sparse dumpfile avoids allocating disk blocks, but its
2740 * huge holes still show up with zeroes where they need to be.
2742 if (absent && (flags & FOLL_DUMP) &&
2743 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
2744 remainder = 0;
2745 break;
2748 if (absent ||
2749 ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
2750 int ret;
2752 spin_unlock(&mm->page_table_lock);
2753 ret = hugetlb_fault(mm, vma, vaddr,
2754 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2755 spin_lock(&mm->page_table_lock);
2756 if (!(ret & VM_FAULT_ERROR))
2757 continue;
2759 remainder = 0;
2760 break;
2763 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2764 page = pte_page(huge_ptep_get(pte));
2765 same_page:
2766 if (pages) {
2767 pages[i] = mem_map_offset(page, pfn_offset);
2768 get_page(pages[i]);
2771 if (vmas)
2772 vmas[i] = vma;
2774 vaddr += PAGE_SIZE;
2775 ++pfn_offset;
2776 --remainder;
2777 ++i;
2778 if (vaddr < vma->vm_end && remainder &&
2779 pfn_offset < pages_per_huge_page(h)) {
2781 * We use pfn_offset to avoid touching the pageframes
2782 * of this compound page.
2784 goto same_page;
2787 spin_unlock(&mm->page_table_lock);
2788 *length = remainder;
2789 *position = vaddr;
2791 return i ? i : -EFAULT;
2794 void hugetlb_change_protection(struct vm_area_struct *vma,
2795 unsigned long address, unsigned long end, pgprot_t newprot)
2797 struct mm_struct *mm = vma->vm_mm;
2798 unsigned long start = address;
2799 pte_t *ptep;
2800 pte_t pte;
2801 struct hstate *h = hstate_vma(vma);
2803 BUG_ON(address >= end);
2804 flush_cache_range(vma, address, end);
2806 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
2807 spin_lock(&mm->page_table_lock);
2808 for (; address < end; address += huge_page_size(h)) {
2809 ptep = huge_pte_offset(mm, address);
2810 if (!ptep)
2811 continue;
2812 if (huge_pmd_unshare(mm, &address, ptep))
2813 continue;
2814 if (!huge_pte_none(huge_ptep_get(ptep))) {
2815 pte = huge_ptep_get_and_clear(mm, address, ptep);
2816 pte = pte_mkhuge(pte_modify(pte, newprot));
2817 set_huge_pte_at(mm, address, ptep, pte);
2820 spin_unlock(&mm->page_table_lock);
2821 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
2823 flush_tlb_range(vma, start, end);
2826 int hugetlb_reserve_pages(struct inode *inode,
2827 long from, long to,
2828 struct vm_area_struct *vma,
2829 int acctflag)
2831 long ret, chg;
2832 struct hstate *h = hstate_inode(inode);
2835 * Only apply hugepage reservation if asked. At fault time, an
2836 * attempt will be made for VM_NORESERVE to allocate a page
2837 * and filesystem quota without using reserves
2839 if (acctflag & VM_NORESERVE)
2840 return 0;
2843 * Shared mappings base their reservation on the number of pages that
2844 * are already allocated on behalf of the file. Private mappings need
2845 * to reserve the full area even if read-only as mprotect() may be
2846 * called to make the mapping read-write. Assume !vma is a shm mapping
2848 if (!vma || vma->vm_flags & VM_MAYSHARE)
2849 chg = region_chg(&inode->i_mapping->private_list, from, to);
2850 else {
2851 struct resv_map *resv_map = resv_map_alloc();
2852 if (!resv_map)
2853 return -ENOMEM;
2855 chg = to - from;
2857 set_vma_resv_map(vma, resv_map);
2858 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
2861 if (chg < 0)
2862 return chg;
2864 /* There must be enough filesystem quota for the mapping */
2865 if (hugetlb_get_quota(inode->i_mapping, chg))
2866 return -ENOSPC;
2869 * Check enough hugepages are available for the reservation.
2870 * Hand back the quota if there are not
2872 ret = hugetlb_acct_memory(h, chg);
2873 if (ret < 0) {
2874 hugetlb_put_quota(inode->i_mapping, chg);
2875 return ret;
2879 * Account for the reservations made. Shared mappings record regions
2880 * that have reservations as they are shared by multiple VMAs.
2881 * When the last VMA disappears, the region map says how much
2882 * the reservation was and the page cache tells how much of
2883 * the reservation was consumed. Private mappings are per-VMA and
2884 * only the consumed reservations are tracked. When the VMA
2885 * disappears, the original reservation is the VMA size and the
2886 * consumed reservations are stored in the map. Hence, nothing
2887 * else has to be done for private mappings here
2889 if (!vma || vma->vm_flags & VM_MAYSHARE)
2890 region_add(&inode->i_mapping->private_list, from, to);
2891 return 0;
2894 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
2896 struct hstate *h = hstate_inode(inode);
2897 long chg = region_truncate(&inode->i_mapping->private_list, offset);
2899 spin_lock(&inode->i_lock);
2900 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
2901 spin_unlock(&inode->i_lock);
2903 hugetlb_put_quota(inode->i_mapping, (chg - freed));
2904 hugetlb_acct_memory(h, -(chg - freed));
2908 * This function is called from memory failure code.
2909 * Assume the caller holds page lock of the head page.
2911 void __isolate_hwpoisoned_huge_page(struct page *hpage)
2913 struct hstate *h = page_hstate(hpage);
2914 int nid = page_to_nid(hpage);
2916 spin_lock(&hugetlb_lock);
2917 list_del(&hpage->lru);
2918 h->free_huge_pages--;
2919 h->free_huge_pages_node[nid]--;
2920 spin_unlock(&hugetlb_lock);