2 * Copyright (C) 2009 Red Hat, Inc.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
9 #include <linux/sched.h>
10 #include <linux/highmem.h>
11 #include <linux/hugetlb.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/mm_inline.h>
16 #include <linux/kthread.h>
17 #include <linux/khugepaged.h>
18 #include <linux/freezer.h>
19 #include <linux/mman.h>
21 #include <asm/pgalloc.h>
25 * By default transparent hugepage support is enabled for all mappings
26 * and khugepaged scans all mappings. Defrag is only invoked by
27 * khugepaged hugepage allocations and by page faults inside
28 * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
31 unsigned long transparent_hugepage_flags __read_mostly
=
32 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
33 (1<<TRANSPARENT_HUGEPAGE_FLAG
)|
35 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
36 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
)|
38 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG
)|
39 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
41 /* default scan 8*512 pte (or vmas) every 30 second */
42 static unsigned int khugepaged_pages_to_scan __read_mostly
= HPAGE_PMD_NR
*8;
43 static unsigned int khugepaged_pages_collapsed
;
44 static unsigned int khugepaged_full_scans
;
45 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly
= 10000;
46 /* during fragmentation poll the hugepage allocator once every minute */
47 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly
= 60000;
48 static struct task_struct
*khugepaged_thread __read_mostly
;
49 static DEFINE_MUTEX(khugepaged_mutex
);
50 static DEFINE_SPINLOCK(khugepaged_mm_lock
);
51 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait
);
53 * default collapse hugepages if there is at least one pte mapped like
54 * it would have happened if the vma was large enough during page
57 static unsigned int khugepaged_max_ptes_none __read_mostly
= HPAGE_PMD_NR
-1;
59 static int khugepaged(void *none
);
60 static int mm_slots_hash_init(void);
61 static int khugepaged_slab_init(void);
62 static void khugepaged_slab_free(void);
64 #define MM_SLOTS_HASH_HEADS 1024
65 static struct hlist_head
*mm_slots_hash __read_mostly
;
66 static struct kmem_cache
*mm_slot_cache __read_mostly
;
69 * struct mm_slot - hash lookup from mm to mm_slot
70 * @hash: hash collision list
71 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
72 * @mm: the mm that this information is valid for
75 struct hlist_node hash
;
76 struct list_head mm_node
;
81 * struct khugepaged_scan - cursor for scanning
82 * @mm_head: the head of the mm list to scan
83 * @mm_slot: the current mm_slot we are scanning
84 * @address: the next address inside that to be scanned
86 * There is only the one khugepaged_scan instance of this cursor structure.
88 struct khugepaged_scan
{
89 struct list_head mm_head
;
90 struct mm_slot
*mm_slot
;
91 unsigned long address
;
93 .mm_head
= LIST_HEAD_INIT(khugepaged_scan
.mm_head
),
97 static int set_recommended_min_free_kbytes(void)
101 unsigned long recommended_min
;
102 extern int min_free_kbytes
;
104 if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG
,
105 &transparent_hugepage_flags
) &&
106 !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
107 &transparent_hugepage_flags
))
110 for_each_populated_zone(zone
)
113 /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
114 recommended_min
= pageblock_nr_pages
* nr_zones
* 2;
117 * Make sure that on average at least two pageblocks are almost free
118 * of another type, one for a migratetype to fall back to and a
119 * second to avoid subsequent fallbacks of other types There are 3
120 * MIGRATE_TYPES we care about.
122 recommended_min
+= pageblock_nr_pages
* nr_zones
*
123 MIGRATE_PCPTYPES
* MIGRATE_PCPTYPES
;
125 /* don't ever allow to reserve more than 5% of the lowmem */
126 recommended_min
= min(recommended_min
,
127 (unsigned long) nr_free_buffer_pages() / 20);
128 recommended_min
<<= (PAGE_SHIFT
-10);
130 if (recommended_min
> min_free_kbytes
)
131 min_free_kbytes
= recommended_min
;
132 setup_per_zone_wmarks();
135 late_initcall(set_recommended_min_free_kbytes
);
137 static int start_khugepaged(void)
140 if (khugepaged_enabled()) {
142 if (unlikely(!mm_slot_cache
|| !mm_slots_hash
)) {
146 mutex_lock(&khugepaged_mutex
);
147 if (!khugepaged_thread
)
148 khugepaged_thread
= kthread_run(khugepaged
, NULL
,
150 if (unlikely(IS_ERR(khugepaged_thread
))) {
152 "khugepaged: kthread_run(khugepaged) failed\n");
153 err
= PTR_ERR(khugepaged_thread
);
154 khugepaged_thread
= NULL
;
156 wakeup
= !list_empty(&khugepaged_scan
.mm_head
);
157 mutex_unlock(&khugepaged_mutex
);
159 wake_up_interruptible(&khugepaged_wait
);
161 set_recommended_min_free_kbytes();
164 wake_up_interruptible(&khugepaged_wait
);
171 static ssize_t
double_flag_show(struct kobject
*kobj
,
172 struct kobj_attribute
*attr
, char *buf
,
173 enum transparent_hugepage_flag enabled
,
174 enum transparent_hugepage_flag req_madv
)
176 if (test_bit(enabled
, &transparent_hugepage_flags
)) {
177 VM_BUG_ON(test_bit(req_madv
, &transparent_hugepage_flags
));
178 return sprintf(buf
, "[always] madvise never\n");
179 } else if (test_bit(req_madv
, &transparent_hugepage_flags
))
180 return sprintf(buf
, "always [madvise] never\n");
182 return sprintf(buf
, "always madvise [never]\n");
184 static ssize_t
double_flag_store(struct kobject
*kobj
,
185 struct kobj_attribute
*attr
,
186 const char *buf
, size_t count
,
187 enum transparent_hugepage_flag enabled
,
188 enum transparent_hugepage_flag req_madv
)
190 if (!memcmp("always", buf
,
191 min(sizeof("always")-1, count
))) {
192 set_bit(enabled
, &transparent_hugepage_flags
);
193 clear_bit(req_madv
, &transparent_hugepage_flags
);
194 } else if (!memcmp("madvise", buf
,
195 min(sizeof("madvise")-1, count
))) {
196 clear_bit(enabled
, &transparent_hugepage_flags
);
197 set_bit(req_madv
, &transparent_hugepage_flags
);
198 } else if (!memcmp("never", buf
,
199 min(sizeof("never")-1, count
))) {
200 clear_bit(enabled
, &transparent_hugepage_flags
);
201 clear_bit(req_madv
, &transparent_hugepage_flags
);
208 static ssize_t
enabled_show(struct kobject
*kobj
,
209 struct kobj_attribute
*attr
, char *buf
)
211 return double_flag_show(kobj
, attr
, buf
,
212 TRANSPARENT_HUGEPAGE_FLAG
,
213 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
);
215 static ssize_t
enabled_store(struct kobject
*kobj
,
216 struct kobj_attribute
*attr
,
217 const char *buf
, size_t count
)
221 ret
= double_flag_store(kobj
, attr
, buf
, count
,
222 TRANSPARENT_HUGEPAGE_FLAG
,
223 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
);
226 int err
= start_khugepaged();
232 (test_bit(TRANSPARENT_HUGEPAGE_FLAG
,
233 &transparent_hugepage_flags
) ||
234 test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
235 &transparent_hugepage_flags
)))
236 set_recommended_min_free_kbytes();
240 static struct kobj_attribute enabled_attr
=
241 __ATTR(enabled
, 0644, enabled_show
, enabled_store
);
243 static ssize_t
single_flag_show(struct kobject
*kobj
,
244 struct kobj_attribute
*attr
, char *buf
,
245 enum transparent_hugepage_flag flag
)
247 return sprintf(buf
, "%d\n",
248 !!test_bit(flag
, &transparent_hugepage_flags
));
251 static ssize_t
single_flag_store(struct kobject
*kobj
,
252 struct kobj_attribute
*attr
,
253 const char *buf
, size_t count
,
254 enum transparent_hugepage_flag flag
)
259 ret
= kstrtoul(buf
, 10, &value
);
266 set_bit(flag
, &transparent_hugepage_flags
);
268 clear_bit(flag
, &transparent_hugepage_flags
);
274 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
275 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
276 * memory just to allocate one more hugepage.
278 static ssize_t
defrag_show(struct kobject
*kobj
,
279 struct kobj_attribute
*attr
, char *buf
)
281 return double_flag_show(kobj
, attr
, buf
,
282 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG
,
283 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
);
285 static ssize_t
defrag_store(struct kobject
*kobj
,
286 struct kobj_attribute
*attr
,
287 const char *buf
, size_t count
)
289 return double_flag_store(kobj
, attr
, buf
, count
,
290 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG
,
291 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
);
293 static struct kobj_attribute defrag_attr
=
294 __ATTR(defrag
, 0644, defrag_show
, defrag_store
);
296 #ifdef CONFIG_DEBUG_VM
297 static ssize_t
debug_cow_show(struct kobject
*kobj
,
298 struct kobj_attribute
*attr
, char *buf
)
300 return single_flag_show(kobj
, attr
, buf
,
301 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG
);
303 static ssize_t
debug_cow_store(struct kobject
*kobj
,
304 struct kobj_attribute
*attr
,
305 const char *buf
, size_t count
)
307 return single_flag_store(kobj
, attr
, buf
, count
,
308 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG
);
310 static struct kobj_attribute debug_cow_attr
=
311 __ATTR(debug_cow
, 0644, debug_cow_show
, debug_cow_store
);
312 #endif /* CONFIG_DEBUG_VM */
314 static struct attribute
*hugepage_attr
[] = {
317 #ifdef CONFIG_DEBUG_VM
318 &debug_cow_attr
.attr
,
323 static struct attribute_group hugepage_attr_group
= {
324 .attrs
= hugepage_attr
,
327 static ssize_t
scan_sleep_millisecs_show(struct kobject
*kobj
,
328 struct kobj_attribute
*attr
,
331 return sprintf(buf
, "%u\n", khugepaged_scan_sleep_millisecs
);
334 static ssize_t
scan_sleep_millisecs_store(struct kobject
*kobj
,
335 struct kobj_attribute
*attr
,
336 const char *buf
, size_t count
)
341 err
= strict_strtoul(buf
, 10, &msecs
);
342 if (err
|| msecs
> UINT_MAX
)
345 khugepaged_scan_sleep_millisecs
= msecs
;
346 wake_up_interruptible(&khugepaged_wait
);
350 static struct kobj_attribute scan_sleep_millisecs_attr
=
351 __ATTR(scan_sleep_millisecs
, 0644, scan_sleep_millisecs_show
,
352 scan_sleep_millisecs_store
);
354 static ssize_t
alloc_sleep_millisecs_show(struct kobject
*kobj
,
355 struct kobj_attribute
*attr
,
358 return sprintf(buf
, "%u\n", khugepaged_alloc_sleep_millisecs
);
361 static ssize_t
alloc_sleep_millisecs_store(struct kobject
*kobj
,
362 struct kobj_attribute
*attr
,
363 const char *buf
, size_t count
)
368 err
= strict_strtoul(buf
, 10, &msecs
);
369 if (err
|| msecs
> UINT_MAX
)
372 khugepaged_alloc_sleep_millisecs
= msecs
;
373 wake_up_interruptible(&khugepaged_wait
);
377 static struct kobj_attribute alloc_sleep_millisecs_attr
=
378 __ATTR(alloc_sleep_millisecs
, 0644, alloc_sleep_millisecs_show
,
379 alloc_sleep_millisecs_store
);
381 static ssize_t
pages_to_scan_show(struct kobject
*kobj
,
382 struct kobj_attribute
*attr
,
385 return sprintf(buf
, "%u\n", khugepaged_pages_to_scan
);
387 static ssize_t
pages_to_scan_store(struct kobject
*kobj
,
388 struct kobj_attribute
*attr
,
389 const char *buf
, size_t count
)
394 err
= strict_strtoul(buf
, 10, &pages
);
395 if (err
|| !pages
|| pages
> UINT_MAX
)
398 khugepaged_pages_to_scan
= pages
;
402 static struct kobj_attribute pages_to_scan_attr
=
403 __ATTR(pages_to_scan
, 0644, pages_to_scan_show
,
404 pages_to_scan_store
);
406 static ssize_t
pages_collapsed_show(struct kobject
*kobj
,
407 struct kobj_attribute
*attr
,
410 return sprintf(buf
, "%u\n", khugepaged_pages_collapsed
);
412 static struct kobj_attribute pages_collapsed_attr
=
413 __ATTR_RO(pages_collapsed
);
415 static ssize_t
full_scans_show(struct kobject
*kobj
,
416 struct kobj_attribute
*attr
,
419 return sprintf(buf
, "%u\n", khugepaged_full_scans
);
421 static struct kobj_attribute full_scans_attr
=
422 __ATTR_RO(full_scans
);
424 static ssize_t
khugepaged_defrag_show(struct kobject
*kobj
,
425 struct kobj_attribute
*attr
, char *buf
)
427 return single_flag_show(kobj
, attr
, buf
,
428 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
430 static ssize_t
khugepaged_defrag_store(struct kobject
*kobj
,
431 struct kobj_attribute
*attr
,
432 const char *buf
, size_t count
)
434 return single_flag_store(kobj
, attr
, buf
, count
,
435 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
437 static struct kobj_attribute khugepaged_defrag_attr
=
438 __ATTR(defrag
, 0644, khugepaged_defrag_show
,
439 khugepaged_defrag_store
);
442 * max_ptes_none controls if khugepaged should collapse hugepages over
443 * any unmapped ptes in turn potentially increasing the memory
444 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
445 * reduce the available free memory in the system as it
446 * runs. Increasing max_ptes_none will instead potentially reduce the
447 * free memory in the system during the khugepaged scan.
449 static ssize_t
khugepaged_max_ptes_none_show(struct kobject
*kobj
,
450 struct kobj_attribute
*attr
,
453 return sprintf(buf
, "%u\n", khugepaged_max_ptes_none
);
455 static ssize_t
khugepaged_max_ptes_none_store(struct kobject
*kobj
,
456 struct kobj_attribute
*attr
,
457 const char *buf
, size_t count
)
460 unsigned long max_ptes_none
;
462 err
= strict_strtoul(buf
, 10, &max_ptes_none
);
463 if (err
|| max_ptes_none
> HPAGE_PMD_NR
-1)
466 khugepaged_max_ptes_none
= max_ptes_none
;
470 static struct kobj_attribute khugepaged_max_ptes_none_attr
=
471 __ATTR(max_ptes_none
, 0644, khugepaged_max_ptes_none_show
,
472 khugepaged_max_ptes_none_store
);
474 static struct attribute
*khugepaged_attr
[] = {
475 &khugepaged_defrag_attr
.attr
,
476 &khugepaged_max_ptes_none_attr
.attr
,
477 &pages_to_scan_attr
.attr
,
478 &pages_collapsed_attr
.attr
,
479 &full_scans_attr
.attr
,
480 &scan_sleep_millisecs_attr
.attr
,
481 &alloc_sleep_millisecs_attr
.attr
,
485 static struct attribute_group khugepaged_attr_group
= {
486 .attrs
= khugepaged_attr
,
487 .name
= "khugepaged",
489 #endif /* CONFIG_SYSFS */
491 static int __init
hugepage_init(void)
495 static struct kobject
*hugepage_kobj
;
499 if (!has_transparent_hugepage()) {
500 transparent_hugepage_flags
= 0;
506 hugepage_kobj
= kobject_create_and_add("transparent_hugepage", mm_kobj
);
507 if (unlikely(!hugepage_kobj
)) {
508 printk(KERN_ERR
"hugepage: failed kobject create\n");
512 err
= sysfs_create_group(hugepage_kobj
, &hugepage_attr_group
);
514 printk(KERN_ERR
"hugepage: failed register hugeage group\n");
518 err
= sysfs_create_group(hugepage_kobj
, &khugepaged_attr_group
);
520 printk(KERN_ERR
"hugepage: failed register hugeage group\n");
525 err
= khugepaged_slab_init();
529 err
= mm_slots_hash_init();
531 khugepaged_slab_free();
536 * By default disable transparent hugepages on smaller systems,
537 * where the extra memory used could hurt more than TLB overhead
538 * is likely to save. The admin can still enable it through /sys.
540 if (totalram_pages
< (512 << (20 - PAGE_SHIFT
)))
541 transparent_hugepage_flags
= 0;
545 set_recommended_min_free_kbytes();
550 module_init(hugepage_init
)
552 static int __init
setup_transparent_hugepage(char *str
)
557 if (!strcmp(str
, "always")) {
558 set_bit(TRANSPARENT_HUGEPAGE_FLAG
,
559 &transparent_hugepage_flags
);
560 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
561 &transparent_hugepage_flags
);
563 } else if (!strcmp(str
, "madvise")) {
564 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
,
565 &transparent_hugepage_flags
);
566 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
567 &transparent_hugepage_flags
);
569 } else if (!strcmp(str
, "never")) {
570 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
,
571 &transparent_hugepage_flags
);
572 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
573 &transparent_hugepage_flags
);
579 "transparent_hugepage= cannot parse, ignored\n");
582 __setup("transparent_hugepage=", setup_transparent_hugepage
);
584 static void prepare_pmd_huge_pte(pgtable_t pgtable
,
585 struct mm_struct
*mm
)
587 assert_spin_locked(&mm
->page_table_lock
);
590 if (!mm
->pmd_huge_pte
)
591 INIT_LIST_HEAD(&pgtable
->lru
);
593 list_add(&pgtable
->lru
, &mm
->pmd_huge_pte
->lru
);
594 mm
->pmd_huge_pte
= pgtable
;
597 static inline pmd_t
maybe_pmd_mkwrite(pmd_t pmd
, struct vm_area_struct
*vma
)
599 if (likely(vma
->vm_flags
& VM_WRITE
))
600 pmd
= pmd_mkwrite(pmd
);
604 static int __do_huge_pmd_anonymous_page(struct mm_struct
*mm
,
605 struct vm_area_struct
*vma
,
606 unsigned long haddr
, pmd_t
*pmd
,
612 VM_BUG_ON(!PageCompound(page
));
613 pgtable
= pte_alloc_one(mm
, haddr
);
614 if (unlikely(!pgtable
)) {
615 mem_cgroup_uncharge_page(page
);
620 clear_huge_page(page
, haddr
, HPAGE_PMD_NR
);
621 __SetPageUptodate(page
);
623 spin_lock(&mm
->page_table_lock
);
624 if (unlikely(!pmd_none(*pmd
))) {
625 spin_unlock(&mm
->page_table_lock
);
626 mem_cgroup_uncharge_page(page
);
628 pte_free(mm
, pgtable
);
631 entry
= mk_pmd(page
, vma
->vm_page_prot
);
632 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
633 entry
= pmd_mkhuge(entry
);
635 * The spinlocking to take the lru_lock inside
636 * page_add_new_anon_rmap() acts as a full memory
637 * barrier to be sure clear_huge_page writes become
638 * visible after the set_pmd_at() write.
640 page_add_new_anon_rmap(page
, vma
, haddr
);
641 set_pmd_at(mm
, haddr
, pmd
, entry
);
642 prepare_pmd_huge_pte(pgtable
, mm
);
643 add_mm_counter(mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
644 spin_unlock(&mm
->page_table_lock
);
650 static inline gfp_t
alloc_hugepage_gfpmask(int defrag
)
652 return GFP_TRANSHUGE
& ~(defrag
? 0 : __GFP_WAIT
);
655 static inline struct page
*alloc_hugepage_vma(int defrag
,
656 struct vm_area_struct
*vma
,
657 unsigned long haddr
, int nd
)
659 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag
),
660 HPAGE_PMD_ORDER
, vma
, haddr
, nd
);
664 static inline struct page
*alloc_hugepage(int defrag
)
666 return alloc_pages(alloc_hugepage_gfpmask(defrag
),
671 int do_huge_pmd_anonymous_page(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
672 unsigned long address
, pmd_t
*pmd
,
676 unsigned long haddr
= address
& HPAGE_PMD_MASK
;
679 if (haddr
>= vma
->vm_start
&& haddr
+ HPAGE_PMD_SIZE
<= vma
->vm_end
) {
680 if (unlikely(anon_vma_prepare(vma
)))
682 if (unlikely(khugepaged_enter(vma
)))
684 page
= alloc_hugepage_vma(transparent_hugepage_defrag(vma
),
685 vma
, haddr
, numa_node_id());
688 if (unlikely(mem_cgroup_newpage_charge(page
, mm
, GFP_KERNEL
))) {
693 return __do_huge_pmd_anonymous_page(mm
, vma
, haddr
, pmd
, page
);
697 * Use __pte_alloc instead of pte_alloc_map, because we can't
698 * run pte_offset_map on the pmd, if an huge pmd could
699 * materialize from under us from a different thread.
701 if (unlikely(__pte_alloc(mm
, vma
, pmd
, address
)))
703 /* if an huge pmd materialized from under us just retry later */
704 if (unlikely(pmd_trans_huge(*pmd
)))
707 * A regular pmd is established and it can't morph into a huge pmd
708 * from under us anymore at this point because we hold the mmap_sem
709 * read mode and khugepaged takes it in write mode. So now it's
710 * safe to run pte_offset_map().
712 pte
= pte_offset_map(pmd
, address
);
713 return handle_pte_fault(mm
, vma
, address
, pte
, pmd
, flags
);
716 int copy_huge_pmd(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
717 pmd_t
*dst_pmd
, pmd_t
*src_pmd
, unsigned long addr
,
718 struct vm_area_struct
*vma
)
720 struct page
*src_page
;
726 pgtable
= pte_alloc_one(dst_mm
, addr
);
727 if (unlikely(!pgtable
))
730 spin_lock(&dst_mm
->page_table_lock
);
731 spin_lock_nested(&src_mm
->page_table_lock
, SINGLE_DEPTH_NESTING
);
735 if (unlikely(!pmd_trans_huge(pmd
))) {
736 pte_free(dst_mm
, pgtable
);
739 if (unlikely(pmd_trans_splitting(pmd
))) {
740 /* split huge page running from under us */
741 spin_unlock(&src_mm
->page_table_lock
);
742 spin_unlock(&dst_mm
->page_table_lock
);
743 pte_free(dst_mm
, pgtable
);
745 wait_split_huge_page(vma
->anon_vma
, src_pmd
); /* src_vma */
748 src_page
= pmd_page(pmd
);
749 VM_BUG_ON(!PageHead(src_page
));
751 page_dup_rmap(src_page
);
752 add_mm_counter(dst_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
754 pmdp_set_wrprotect(src_mm
, addr
, src_pmd
);
755 pmd
= pmd_mkold(pmd_wrprotect(pmd
));
756 set_pmd_at(dst_mm
, addr
, dst_pmd
, pmd
);
757 prepare_pmd_huge_pte(pgtable
, dst_mm
);
761 spin_unlock(&src_mm
->page_table_lock
);
762 spin_unlock(&dst_mm
->page_table_lock
);
767 /* no "address" argument so destroys page coloring of some arch */
768 pgtable_t
get_pmd_huge_pte(struct mm_struct
*mm
)
772 assert_spin_locked(&mm
->page_table_lock
);
775 pgtable
= mm
->pmd_huge_pte
;
776 if (list_empty(&pgtable
->lru
))
777 mm
->pmd_huge_pte
= NULL
;
779 mm
->pmd_huge_pte
= list_entry(pgtable
->lru
.next
,
781 list_del(&pgtable
->lru
);
786 static int do_huge_pmd_wp_page_fallback(struct mm_struct
*mm
,
787 struct vm_area_struct
*vma
,
788 unsigned long address
,
789 pmd_t
*pmd
, pmd_t orig_pmd
,
798 pages
= kmalloc(sizeof(struct page
*) * HPAGE_PMD_NR
,
800 if (unlikely(!pages
)) {
805 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
806 pages
[i
] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE
,
807 vma
, address
, page_to_nid(page
));
808 if (unlikely(!pages
[i
] ||
809 mem_cgroup_newpage_charge(pages
[i
], mm
,
813 mem_cgroup_uncharge_start();
815 mem_cgroup_uncharge_page(pages
[i
]);
818 mem_cgroup_uncharge_end();
825 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
826 copy_user_highpage(pages
[i
], page
+ i
,
827 haddr
+ PAGE_SHIFT
*i
, vma
);
828 __SetPageUptodate(pages
[i
]);
832 spin_lock(&mm
->page_table_lock
);
833 if (unlikely(!pmd_same(*pmd
, orig_pmd
)))
835 VM_BUG_ON(!PageHead(page
));
837 pmdp_clear_flush_notify(vma
, haddr
, pmd
);
838 /* leave pmd empty until pte is filled */
840 pgtable
= get_pmd_huge_pte(mm
);
841 pmd_populate(mm
, &_pmd
, pgtable
);
843 for (i
= 0; i
< HPAGE_PMD_NR
; i
++, haddr
+= PAGE_SIZE
) {
845 entry
= mk_pte(pages
[i
], vma
->vm_page_prot
);
846 entry
= maybe_mkwrite(pte_mkdirty(entry
), vma
);
847 page_add_new_anon_rmap(pages
[i
], vma
, haddr
);
848 pte
= pte_offset_map(&_pmd
, haddr
);
849 VM_BUG_ON(!pte_none(*pte
));
850 set_pte_at(mm
, haddr
, pte
, entry
);
856 smp_wmb(); /* make pte visible before pmd */
857 pmd_populate(mm
, pmd
, pgtable
);
858 page_remove_rmap(page
);
859 spin_unlock(&mm
->page_table_lock
);
861 ret
|= VM_FAULT_WRITE
;
868 spin_unlock(&mm
->page_table_lock
);
869 mem_cgroup_uncharge_start();
870 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
871 mem_cgroup_uncharge_page(pages
[i
]);
874 mem_cgroup_uncharge_end();
879 int do_huge_pmd_wp_page(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
880 unsigned long address
, pmd_t
*pmd
, pmd_t orig_pmd
)
883 struct page
*page
, *new_page
;
886 VM_BUG_ON(!vma
->anon_vma
);
887 spin_lock(&mm
->page_table_lock
);
888 if (unlikely(!pmd_same(*pmd
, orig_pmd
)))
891 page
= pmd_page(orig_pmd
);
892 VM_BUG_ON(!PageCompound(page
) || !PageHead(page
));
893 haddr
= address
& HPAGE_PMD_MASK
;
894 if (page_mapcount(page
) == 1) {
896 entry
= pmd_mkyoung(orig_pmd
);
897 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
898 if (pmdp_set_access_flags(vma
, haddr
, pmd
, entry
, 1))
899 update_mmu_cache(vma
, address
, entry
);
900 ret
|= VM_FAULT_WRITE
;
904 spin_unlock(&mm
->page_table_lock
);
906 if (transparent_hugepage_enabled(vma
) &&
907 !transparent_hugepage_debug_cow())
908 new_page
= alloc_hugepage_vma(transparent_hugepage_defrag(vma
),
909 vma
, haddr
, numa_node_id());
913 if (unlikely(!new_page
)) {
914 ret
= do_huge_pmd_wp_page_fallback(mm
, vma
, address
,
915 pmd
, orig_pmd
, page
, haddr
);
920 if (unlikely(mem_cgroup_newpage_charge(new_page
, mm
, GFP_KERNEL
))) {
927 copy_user_huge_page(new_page
, page
, haddr
, vma
, HPAGE_PMD_NR
);
928 __SetPageUptodate(new_page
);
930 spin_lock(&mm
->page_table_lock
);
932 if (unlikely(!pmd_same(*pmd
, orig_pmd
))) {
933 mem_cgroup_uncharge_page(new_page
);
937 VM_BUG_ON(!PageHead(page
));
938 entry
= mk_pmd(new_page
, vma
->vm_page_prot
);
939 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
940 entry
= pmd_mkhuge(entry
);
941 pmdp_clear_flush_notify(vma
, haddr
, pmd
);
942 page_add_new_anon_rmap(new_page
, vma
, haddr
);
943 set_pmd_at(mm
, haddr
, pmd
, entry
);
944 update_mmu_cache(vma
, address
, entry
);
945 page_remove_rmap(page
);
947 ret
|= VM_FAULT_WRITE
;
950 spin_unlock(&mm
->page_table_lock
);
955 struct page
*follow_trans_huge_pmd(struct mm_struct
*mm
,
960 struct page
*page
= NULL
;
962 assert_spin_locked(&mm
->page_table_lock
);
964 if (flags
& FOLL_WRITE
&& !pmd_write(*pmd
))
967 page
= pmd_page(*pmd
);
968 VM_BUG_ON(!PageHead(page
));
969 if (flags
& FOLL_TOUCH
) {
972 * We should set the dirty bit only for FOLL_WRITE but
973 * for now the dirty bit in the pmd is meaningless.
974 * And if the dirty bit will become meaningful and
975 * we'll only set it with FOLL_WRITE, an atomic
976 * set_bit will be required on the pmd to set the
977 * young bit, instead of the current set_pmd_at.
979 _pmd
= pmd_mkyoung(pmd_mkdirty(*pmd
));
980 set_pmd_at(mm
, addr
& HPAGE_PMD_MASK
, pmd
, _pmd
);
982 page
+= (addr
& ~HPAGE_PMD_MASK
) >> PAGE_SHIFT
;
983 VM_BUG_ON(!PageCompound(page
));
984 if (flags
& FOLL_GET
)
991 int zap_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
996 spin_lock(&tlb
->mm
->page_table_lock
);
997 if (likely(pmd_trans_huge(*pmd
))) {
998 if (unlikely(pmd_trans_splitting(*pmd
))) {
999 spin_unlock(&tlb
->mm
->page_table_lock
);
1000 wait_split_huge_page(vma
->anon_vma
,
1005 pgtable
= get_pmd_huge_pte(tlb
->mm
);
1006 page
= pmd_page(*pmd
);
1008 page_remove_rmap(page
);
1009 VM_BUG_ON(page_mapcount(page
) < 0);
1010 add_mm_counter(tlb
->mm
, MM_ANONPAGES
, -HPAGE_PMD_NR
);
1011 VM_BUG_ON(!PageHead(page
));
1012 spin_unlock(&tlb
->mm
->page_table_lock
);
1013 tlb_remove_page(tlb
, page
);
1014 pte_free(tlb
->mm
, pgtable
);
1018 spin_unlock(&tlb
->mm
->page_table_lock
);
1023 int mincore_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
1024 unsigned long addr
, unsigned long end
,
1029 spin_lock(&vma
->vm_mm
->page_table_lock
);
1030 if (likely(pmd_trans_huge(*pmd
))) {
1031 ret
= !pmd_trans_splitting(*pmd
);
1032 spin_unlock(&vma
->vm_mm
->page_table_lock
);
1034 wait_split_huge_page(vma
->anon_vma
, pmd
);
1037 * All logical pages in the range are present
1038 * if backed by a huge page.
1040 memset(vec
, 1, (end
- addr
) >> PAGE_SHIFT
);
1043 spin_unlock(&vma
->vm_mm
->page_table_lock
);
1048 int change_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
1049 unsigned long addr
, pgprot_t newprot
)
1051 struct mm_struct
*mm
= vma
->vm_mm
;
1054 spin_lock(&mm
->page_table_lock
);
1055 if (likely(pmd_trans_huge(*pmd
))) {
1056 if (unlikely(pmd_trans_splitting(*pmd
))) {
1057 spin_unlock(&mm
->page_table_lock
);
1058 wait_split_huge_page(vma
->anon_vma
, pmd
);
1062 entry
= pmdp_get_and_clear(mm
, addr
, pmd
);
1063 entry
= pmd_modify(entry
, newprot
);
1064 set_pmd_at(mm
, addr
, pmd
, entry
);
1065 spin_unlock(&vma
->vm_mm
->page_table_lock
);
1066 flush_tlb_range(vma
, addr
, addr
+ HPAGE_PMD_SIZE
);
1070 spin_unlock(&vma
->vm_mm
->page_table_lock
);
1075 pmd_t
*page_check_address_pmd(struct page
*page
,
1076 struct mm_struct
*mm
,
1077 unsigned long address
,
1078 enum page_check_address_pmd_flag flag
)
1082 pmd_t
*pmd
, *ret
= NULL
;
1084 if (address
& ~HPAGE_PMD_MASK
)
1087 pgd
= pgd_offset(mm
, address
);
1088 if (!pgd_present(*pgd
))
1091 pud
= pud_offset(pgd
, address
);
1092 if (!pud_present(*pud
))
1095 pmd
= pmd_offset(pud
, address
);
1098 if (pmd_page(*pmd
) != page
)
1101 * split_vma() may create temporary aliased mappings. There is
1102 * no risk as long as all huge pmd are found and have their
1103 * splitting bit set before __split_huge_page_refcount
1104 * runs. Finding the same huge pmd more than once during the
1105 * same rmap walk is not a problem.
1107 if (flag
== PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG
&&
1108 pmd_trans_splitting(*pmd
))
1110 if (pmd_trans_huge(*pmd
)) {
1111 VM_BUG_ON(flag
== PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG
&&
1112 !pmd_trans_splitting(*pmd
));
1119 static int __split_huge_page_splitting(struct page
*page
,
1120 struct vm_area_struct
*vma
,
1121 unsigned long address
)
1123 struct mm_struct
*mm
= vma
->vm_mm
;
1127 spin_lock(&mm
->page_table_lock
);
1128 pmd
= page_check_address_pmd(page
, mm
, address
,
1129 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG
);
1132 * We can't temporarily set the pmd to null in order
1133 * to split it, the pmd must remain marked huge at all
1134 * times or the VM won't take the pmd_trans_huge paths
1135 * and it won't wait on the anon_vma->root->lock to
1136 * serialize against split_huge_page*.
1138 pmdp_splitting_flush_notify(vma
, address
, pmd
);
1141 spin_unlock(&mm
->page_table_lock
);
1146 static void __split_huge_page_refcount(struct page
*page
)
1149 unsigned long head_index
= page
->index
;
1150 struct zone
*zone
= page_zone(page
);
1153 /* prevent PageLRU to go away from under us, and freeze lru stats */
1154 spin_lock_irq(&zone
->lru_lock
);
1155 compound_lock(page
);
1157 for (i
= 1; i
< HPAGE_PMD_NR
; i
++) {
1158 struct page
*page_tail
= page
+ i
;
1160 /* tail_page->_count cannot change */
1161 atomic_sub(atomic_read(&page_tail
->_count
), &page
->_count
);
1162 BUG_ON(page_count(page
) <= 0);
1163 atomic_add(page_mapcount(page
) + 1, &page_tail
->_count
);
1164 BUG_ON(atomic_read(&page_tail
->_count
) <= 0);
1166 /* after clearing PageTail the gup refcount can be released */
1170 * retain hwpoison flag of the poisoned tail page:
1171 * fix for the unsuitable process killed on Guest Machine(KVM)
1172 * by the memory-failure.
1174 page_tail
->flags
&= ~PAGE_FLAGS_CHECK_AT_PREP
| __PG_HWPOISON
;
1175 page_tail
->flags
|= (page
->flags
&
1176 ((1L << PG_referenced
) |
1177 (1L << PG_swapbacked
) |
1178 (1L << PG_mlocked
) |
1179 (1L << PG_uptodate
)));
1180 page_tail
->flags
|= (1L << PG_dirty
);
1183 * 1) clear PageTail before overwriting first_page
1184 * 2) clear PageTail before clearing PageHead for VM_BUG_ON
1189 * __split_huge_page_splitting() already set the
1190 * splitting bit in all pmd that could map this
1191 * hugepage, that will ensure no CPU can alter the
1192 * mapcount on the head page. The mapcount is only
1193 * accounted in the head page and it has to be
1194 * transferred to all tail pages in the below code. So
1195 * for this code to be safe, the split the mapcount
1196 * can't change. But that doesn't mean userland can't
1197 * keep changing and reading the page contents while
1198 * we transfer the mapcount, so the pmd splitting
1199 * status is achieved setting a reserved bit in the
1200 * pmd, not by clearing the present bit.
1202 BUG_ON(page_mapcount(page_tail
));
1203 page_tail
->_mapcount
= page
->_mapcount
;
1205 BUG_ON(page_tail
->mapping
);
1206 page_tail
->mapping
= page
->mapping
;
1208 page_tail
->index
= ++head_index
;
1210 BUG_ON(!PageAnon(page_tail
));
1211 BUG_ON(!PageUptodate(page_tail
));
1212 BUG_ON(!PageDirty(page_tail
));
1213 BUG_ON(!PageSwapBacked(page_tail
));
1215 mem_cgroup_split_huge_fixup(page
, page_tail
);
1217 lru_add_page_tail(zone
, page
, page_tail
);
1220 __dec_zone_page_state(page
, NR_ANON_TRANSPARENT_HUGEPAGES
);
1221 __mod_zone_page_state(zone
, NR_ANON_PAGES
, HPAGE_PMD_NR
);
1224 * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics,
1225 * so adjust those appropriately if this page is on the LRU.
1227 if (PageLRU(page
)) {
1228 zonestat
= NR_LRU_BASE
+ page_lru(page
);
1229 __mod_zone_page_state(zone
, zonestat
, -(HPAGE_PMD_NR
-1));
1232 ClearPageCompound(page
);
1233 compound_unlock(page
);
1234 spin_unlock_irq(&zone
->lru_lock
);
1236 for (i
= 1; i
< HPAGE_PMD_NR
; i
++) {
1237 struct page
*page_tail
= page
+ i
;
1238 BUG_ON(page_count(page_tail
) <= 0);
1240 * Tail pages may be freed if there wasn't any mapping
1241 * like if add_to_swap() is running on a lru page that
1242 * had its mapping zapped. And freeing these pages
1243 * requires taking the lru_lock so we do the put_page
1244 * of the tail pages after the split is complete.
1246 put_page(page_tail
);
1250 * Only the head page (now become a regular page) is required
1251 * to be pinned by the caller.
1253 BUG_ON(page_count(page
) <= 0);
1256 static int __split_huge_page_map(struct page
*page
,
1257 struct vm_area_struct
*vma
,
1258 unsigned long address
)
1260 struct mm_struct
*mm
= vma
->vm_mm
;
1264 unsigned long haddr
;
1266 spin_lock(&mm
->page_table_lock
);
1267 pmd
= page_check_address_pmd(page
, mm
, address
,
1268 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG
);
1270 pgtable
= get_pmd_huge_pte(mm
);
1271 pmd_populate(mm
, &_pmd
, pgtable
);
1273 for (i
= 0, haddr
= address
; i
< HPAGE_PMD_NR
;
1274 i
++, haddr
+= PAGE_SIZE
) {
1276 BUG_ON(PageCompound(page
+i
));
1277 entry
= mk_pte(page
+ i
, vma
->vm_page_prot
);
1278 entry
= maybe_mkwrite(pte_mkdirty(entry
), vma
);
1279 if (!pmd_write(*pmd
))
1280 entry
= pte_wrprotect(entry
);
1282 BUG_ON(page_mapcount(page
) != 1);
1283 if (!pmd_young(*pmd
))
1284 entry
= pte_mkold(entry
);
1285 pte
= pte_offset_map(&_pmd
, haddr
);
1286 BUG_ON(!pte_none(*pte
));
1287 set_pte_at(mm
, haddr
, pte
, entry
);
1292 smp_wmb(); /* make pte visible before pmd */
1294 * Up to this point the pmd is present and huge and
1295 * userland has the whole access to the hugepage
1296 * during the split (which happens in place). If we
1297 * overwrite the pmd with the not-huge version
1298 * pointing to the pte here (which of course we could
1299 * if all CPUs were bug free), userland could trigger
1300 * a small page size TLB miss on the small sized TLB
1301 * while the hugepage TLB entry is still established
1302 * in the huge TLB. Some CPU doesn't like that. See
1303 * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1304 * Erratum 383 on page 93. Intel should be safe but is
1305 * also warns that it's only safe if the permission
1306 * and cache attributes of the two entries loaded in
1307 * the two TLB is identical (which should be the case
1308 * here). But it is generally safer to never allow
1309 * small and huge TLB entries for the same virtual
1310 * address to be loaded simultaneously. So instead of
1311 * doing "pmd_populate(); flush_tlb_range();" we first
1312 * mark the current pmd notpresent (atomically because
1313 * here the pmd_trans_huge and pmd_trans_splitting
1314 * must remain set at all times on the pmd until the
1315 * split is complete for this pmd), then we flush the
1316 * SMP TLB and finally we write the non-huge version
1317 * of the pmd entry with pmd_populate.
1319 set_pmd_at(mm
, address
, pmd
, pmd_mknotpresent(*pmd
));
1320 flush_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
1321 pmd_populate(mm
, pmd
, pgtable
);
1324 spin_unlock(&mm
->page_table_lock
);
1329 /* must be called with anon_vma->root->lock hold */
1330 static void __split_huge_page(struct page
*page
,
1331 struct anon_vma
*anon_vma
)
1333 int mapcount
, mapcount2
;
1334 struct anon_vma_chain
*avc
;
1336 BUG_ON(!PageHead(page
));
1337 BUG_ON(PageTail(page
));
1340 list_for_each_entry(avc
, &anon_vma
->head
, same_anon_vma
) {
1341 struct vm_area_struct
*vma
= avc
->vma
;
1342 unsigned long addr
= vma_address(page
, vma
);
1343 BUG_ON(is_vma_temporary_stack(vma
));
1344 if (addr
== -EFAULT
)
1346 mapcount
+= __split_huge_page_splitting(page
, vma
, addr
);
1349 * It is critical that new vmas are added to the tail of the
1350 * anon_vma list. This guarantes that if copy_huge_pmd() runs
1351 * and establishes a child pmd before
1352 * __split_huge_page_splitting() freezes the parent pmd (so if
1353 * we fail to prevent copy_huge_pmd() from running until the
1354 * whole __split_huge_page() is complete), we will still see
1355 * the newly established pmd of the child later during the
1356 * walk, to be able to set it as pmd_trans_splitting too.
1358 if (mapcount
!= page_mapcount(page
))
1359 printk(KERN_ERR
"mapcount %d page_mapcount %d\n",
1360 mapcount
, page_mapcount(page
));
1361 BUG_ON(mapcount
!= page_mapcount(page
));
1363 __split_huge_page_refcount(page
);
1366 list_for_each_entry(avc
, &anon_vma
->head
, same_anon_vma
) {
1367 struct vm_area_struct
*vma
= avc
->vma
;
1368 unsigned long addr
= vma_address(page
, vma
);
1369 BUG_ON(is_vma_temporary_stack(vma
));
1370 if (addr
== -EFAULT
)
1372 mapcount2
+= __split_huge_page_map(page
, vma
, addr
);
1374 if (mapcount
!= mapcount2
)
1375 printk(KERN_ERR
"mapcount %d mapcount2 %d page_mapcount %d\n",
1376 mapcount
, mapcount2
, page_mapcount(page
));
1377 BUG_ON(mapcount
!= mapcount2
);
1380 int split_huge_page(struct page
*page
)
1382 struct anon_vma
*anon_vma
;
1385 BUG_ON(!PageAnon(page
));
1386 anon_vma
= page_lock_anon_vma(page
);
1390 if (!PageCompound(page
))
1393 BUG_ON(!PageSwapBacked(page
));
1394 __split_huge_page(page
, anon_vma
);
1396 BUG_ON(PageCompound(page
));
1398 page_unlock_anon_vma(anon_vma
);
1403 #define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \
1404 VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
1406 int hugepage_madvise(struct vm_area_struct
*vma
,
1407 unsigned long *vm_flags
, int advice
)
1412 * Be somewhat over-protective like KSM for now!
1414 if (*vm_flags
& (VM_HUGEPAGE
| VM_NO_THP
))
1416 *vm_flags
&= ~VM_NOHUGEPAGE
;
1417 *vm_flags
|= VM_HUGEPAGE
;
1419 * If the vma become good for khugepaged to scan,
1420 * register it here without waiting a page fault that
1421 * may not happen any time soon.
1423 if (unlikely(khugepaged_enter_vma_merge(vma
)))
1426 case MADV_NOHUGEPAGE
:
1428 * Be somewhat over-protective like KSM for now!
1430 if (*vm_flags
& (VM_NOHUGEPAGE
| VM_NO_THP
))
1432 *vm_flags
&= ~VM_HUGEPAGE
;
1433 *vm_flags
|= VM_NOHUGEPAGE
;
1435 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1436 * this vma even if we leave the mm registered in khugepaged if
1437 * it got registered before VM_NOHUGEPAGE was set.
1445 static int __init
khugepaged_slab_init(void)
1447 mm_slot_cache
= kmem_cache_create("khugepaged_mm_slot",
1448 sizeof(struct mm_slot
),
1449 __alignof__(struct mm_slot
), 0, NULL
);
1456 static void __init
khugepaged_slab_free(void)
1458 kmem_cache_destroy(mm_slot_cache
);
1459 mm_slot_cache
= NULL
;
1462 static inline struct mm_slot
*alloc_mm_slot(void)
1464 if (!mm_slot_cache
) /* initialization failed */
1466 return kmem_cache_zalloc(mm_slot_cache
, GFP_KERNEL
);
1469 static inline void free_mm_slot(struct mm_slot
*mm_slot
)
1471 kmem_cache_free(mm_slot_cache
, mm_slot
);
1474 static int __init
mm_slots_hash_init(void)
1476 mm_slots_hash
= kzalloc(MM_SLOTS_HASH_HEADS
* sizeof(struct hlist_head
),
1484 static void __init
mm_slots_hash_free(void)
1486 kfree(mm_slots_hash
);
1487 mm_slots_hash
= NULL
;
1491 static struct mm_slot
*get_mm_slot(struct mm_struct
*mm
)
1493 struct mm_slot
*mm_slot
;
1494 struct hlist_head
*bucket
;
1495 struct hlist_node
*node
;
1497 bucket
= &mm_slots_hash
[((unsigned long)mm
/ sizeof(struct mm_struct
))
1498 % MM_SLOTS_HASH_HEADS
];
1499 hlist_for_each_entry(mm_slot
, node
, bucket
, hash
) {
1500 if (mm
== mm_slot
->mm
)
1506 static void insert_to_mm_slots_hash(struct mm_struct
*mm
,
1507 struct mm_slot
*mm_slot
)
1509 struct hlist_head
*bucket
;
1511 bucket
= &mm_slots_hash
[((unsigned long)mm
/ sizeof(struct mm_struct
))
1512 % MM_SLOTS_HASH_HEADS
];
1514 hlist_add_head(&mm_slot
->hash
, bucket
);
1517 static inline int khugepaged_test_exit(struct mm_struct
*mm
)
1519 return atomic_read(&mm
->mm_users
) == 0;
1522 int __khugepaged_enter(struct mm_struct
*mm
)
1524 struct mm_slot
*mm_slot
;
1527 mm_slot
= alloc_mm_slot();
1531 /* __khugepaged_exit() must not run from under us */
1532 VM_BUG_ON(khugepaged_test_exit(mm
));
1533 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE
, &mm
->flags
))) {
1534 free_mm_slot(mm_slot
);
1538 spin_lock(&khugepaged_mm_lock
);
1539 insert_to_mm_slots_hash(mm
, mm_slot
);
1541 * Insert just behind the scanning cursor, to let the area settle
1544 wakeup
= list_empty(&khugepaged_scan
.mm_head
);
1545 list_add_tail(&mm_slot
->mm_node
, &khugepaged_scan
.mm_head
);
1546 spin_unlock(&khugepaged_mm_lock
);
1548 atomic_inc(&mm
->mm_count
);
1550 wake_up_interruptible(&khugepaged_wait
);
1555 int khugepaged_enter_vma_merge(struct vm_area_struct
*vma
)
1557 unsigned long hstart
, hend
;
1560 * Not yet faulted in so we will register later in the
1561 * page fault if needed.
1565 /* khugepaged not yet working on file or special mappings */
1568 * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
1569 * true too, verify it here.
1571 VM_BUG_ON(is_linear_pfn_mapping(vma
) || vma
->vm_flags
& VM_NO_THP
);
1572 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
1573 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
1575 return khugepaged_enter(vma
);
1579 void __khugepaged_exit(struct mm_struct
*mm
)
1581 struct mm_slot
*mm_slot
;
1584 spin_lock(&khugepaged_mm_lock
);
1585 mm_slot
= get_mm_slot(mm
);
1586 if (mm_slot
&& khugepaged_scan
.mm_slot
!= mm_slot
) {
1587 hlist_del(&mm_slot
->hash
);
1588 list_del(&mm_slot
->mm_node
);
1593 spin_unlock(&khugepaged_mm_lock
);
1594 clear_bit(MMF_VM_HUGEPAGE
, &mm
->flags
);
1595 free_mm_slot(mm_slot
);
1597 } else if (mm_slot
) {
1598 spin_unlock(&khugepaged_mm_lock
);
1600 * This is required to serialize against
1601 * khugepaged_test_exit() (which is guaranteed to run
1602 * under mmap sem read mode). Stop here (after we
1603 * return all pagetables will be destroyed) until
1604 * khugepaged has finished working on the pagetables
1605 * under the mmap_sem.
1607 down_write(&mm
->mmap_sem
);
1608 up_write(&mm
->mmap_sem
);
1610 spin_unlock(&khugepaged_mm_lock
);
1613 static void release_pte_page(struct page
*page
)
1615 /* 0 stands for page_is_file_cache(page) == false */
1616 dec_zone_page_state(page
, NR_ISOLATED_ANON
+ 0);
1618 putback_lru_page(page
);
1621 static void release_pte_pages(pte_t
*pte
, pte_t
*_pte
)
1623 while (--_pte
>= pte
) {
1624 pte_t pteval
= *_pte
;
1625 if (!pte_none(pteval
))
1626 release_pte_page(pte_page(pteval
));
1630 static void release_all_pte_pages(pte_t
*pte
)
1632 release_pte_pages(pte
, pte
+ HPAGE_PMD_NR
);
1635 static int __collapse_huge_page_isolate(struct vm_area_struct
*vma
,
1636 unsigned long address
,
1641 int referenced
= 0, isolated
= 0, none
= 0;
1642 for (_pte
= pte
; _pte
< pte
+HPAGE_PMD_NR
;
1643 _pte
++, address
+= PAGE_SIZE
) {
1644 pte_t pteval
= *_pte
;
1645 if (pte_none(pteval
)) {
1646 if (++none
<= khugepaged_max_ptes_none
)
1649 release_pte_pages(pte
, _pte
);
1653 if (!pte_present(pteval
) || !pte_write(pteval
)) {
1654 release_pte_pages(pte
, _pte
);
1657 page
= vm_normal_page(vma
, address
, pteval
);
1658 if (unlikely(!page
)) {
1659 release_pte_pages(pte
, _pte
);
1662 VM_BUG_ON(PageCompound(page
));
1663 BUG_ON(!PageAnon(page
));
1664 VM_BUG_ON(!PageSwapBacked(page
));
1666 /* cannot use mapcount: can't collapse if there's a gup pin */
1667 if (page_count(page
) != 1) {
1668 release_pte_pages(pte
, _pte
);
1672 * We can do it before isolate_lru_page because the
1673 * page can't be freed from under us. NOTE: PG_lock
1674 * is needed to serialize against split_huge_page
1675 * when invoked from the VM.
1677 if (!trylock_page(page
)) {
1678 release_pte_pages(pte
, _pte
);
1682 * Isolate the page to avoid collapsing an hugepage
1683 * currently in use by the VM.
1685 if (isolate_lru_page(page
)) {
1687 release_pte_pages(pte
, _pte
);
1690 /* 0 stands for page_is_file_cache(page) == false */
1691 inc_zone_page_state(page
, NR_ISOLATED_ANON
+ 0);
1692 VM_BUG_ON(!PageLocked(page
));
1693 VM_BUG_ON(PageLRU(page
));
1695 /* If there is no mapped pte young don't collapse the page */
1696 if (pte_young(pteval
) || PageReferenced(page
) ||
1697 mmu_notifier_test_young(vma
->vm_mm
, address
))
1700 if (unlikely(!referenced
))
1701 release_all_pte_pages(pte
);
1708 static void __collapse_huge_page_copy(pte_t
*pte
, struct page
*page
,
1709 struct vm_area_struct
*vma
,
1710 unsigned long address
,
1714 for (_pte
= pte
; _pte
< pte
+HPAGE_PMD_NR
; _pte
++) {
1715 pte_t pteval
= *_pte
;
1716 struct page
*src_page
;
1718 if (pte_none(pteval
)) {
1719 clear_user_highpage(page
, address
);
1720 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, 1);
1722 src_page
= pte_page(pteval
);
1723 copy_user_highpage(page
, src_page
, address
, vma
);
1724 VM_BUG_ON(page_mapcount(src_page
) != 1);
1725 VM_BUG_ON(page_count(src_page
) != 2);
1726 release_pte_page(src_page
);
1728 * ptl mostly unnecessary, but preempt has to
1729 * be disabled to update the per-cpu stats
1730 * inside page_remove_rmap().
1734 * paravirt calls inside pte_clear here are
1737 pte_clear(vma
->vm_mm
, address
, _pte
);
1738 page_remove_rmap(src_page
);
1740 free_page_and_swap_cache(src_page
);
1743 address
+= PAGE_SIZE
;
1748 static void collapse_huge_page(struct mm_struct
*mm
,
1749 unsigned long address
,
1750 struct page
**hpage
,
1751 struct vm_area_struct
*vma
,
1759 struct page
*new_page
;
1762 unsigned long hstart
, hend
;
1764 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
1768 if (unlikely(mem_cgroup_newpage_charge(new_page
, mm
, GFP_KERNEL
))) {
1769 up_read(&mm
->mmap_sem
);
1775 * Allocate the page while the vma is still valid and under
1776 * the mmap_sem read mode so there is no memory allocation
1777 * later when we take the mmap_sem in write mode. This is more
1778 * friendly behavior (OTOH it may actually hide bugs) to
1779 * filesystems in userland with daemons allocating memory in
1780 * the userland I/O paths. Allocating memory with the
1781 * mmap_sem in read mode is good idea also to allow greater
1784 new_page
= alloc_hugepage_vma(khugepaged_defrag(), vma
, address
,
1786 if (unlikely(!new_page
)) {
1787 up_read(&mm
->mmap_sem
);
1788 *hpage
= ERR_PTR(-ENOMEM
);
1791 if (unlikely(mem_cgroup_newpage_charge(new_page
, mm
, GFP_KERNEL
))) {
1792 up_read(&mm
->mmap_sem
);
1798 /* after allocating the hugepage upgrade to mmap_sem write mode */
1799 up_read(&mm
->mmap_sem
);
1802 * Prevent all access to pagetables with the exception of
1803 * gup_fast later hanlded by the ptep_clear_flush and the VM
1804 * handled by the anon_vma lock + PG_lock.
1806 down_write(&mm
->mmap_sem
);
1807 if (unlikely(khugepaged_test_exit(mm
)))
1810 vma
= find_vma(mm
, address
);
1811 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
1812 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
1813 if (address
< hstart
|| address
+ HPAGE_PMD_SIZE
> hend
)
1816 if ((!(vma
->vm_flags
& VM_HUGEPAGE
) && !khugepaged_always()) ||
1817 (vma
->vm_flags
& VM_NOHUGEPAGE
))
1820 if (!vma
->anon_vma
|| vma
->vm_ops
)
1822 if (is_vma_temporary_stack(vma
))
1825 * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
1826 * true too, verify it here.
1828 VM_BUG_ON(is_linear_pfn_mapping(vma
) || vma
->vm_flags
& VM_NO_THP
);
1830 pgd
= pgd_offset(mm
, address
);
1831 if (!pgd_present(*pgd
))
1834 pud
= pud_offset(pgd
, address
);
1835 if (!pud_present(*pud
))
1838 pmd
= pmd_offset(pud
, address
);
1839 /* pmd can't go away or become huge under us */
1840 if (!pmd_present(*pmd
) || pmd_trans_huge(*pmd
))
1843 anon_vma_lock(vma
->anon_vma
);
1845 pte
= pte_offset_map(pmd
, address
);
1846 ptl
= pte_lockptr(mm
, pmd
);
1848 spin_lock(&mm
->page_table_lock
); /* probably unnecessary */
1850 * After this gup_fast can't run anymore. This also removes
1851 * any huge TLB entry from the CPU so we won't allow
1852 * huge and small TLB entries for the same virtual address
1853 * to avoid the risk of CPU bugs in that area.
1855 _pmd
= pmdp_clear_flush_notify(vma
, address
, pmd
);
1856 spin_unlock(&mm
->page_table_lock
);
1859 isolated
= __collapse_huge_page_isolate(vma
, address
, pte
);
1862 if (unlikely(!isolated
)) {
1864 spin_lock(&mm
->page_table_lock
);
1865 BUG_ON(!pmd_none(*pmd
));
1866 set_pmd_at(mm
, address
, pmd
, _pmd
);
1867 spin_unlock(&mm
->page_table_lock
);
1868 anon_vma_unlock(vma
->anon_vma
);
1873 * All pages are isolated and locked so anon_vma rmap
1874 * can't run anymore.
1876 anon_vma_unlock(vma
->anon_vma
);
1878 __collapse_huge_page_copy(pte
, new_page
, vma
, address
, ptl
);
1880 __SetPageUptodate(new_page
);
1881 pgtable
= pmd_pgtable(_pmd
);
1882 VM_BUG_ON(page_count(pgtable
) != 1);
1883 VM_BUG_ON(page_mapcount(pgtable
) != 0);
1885 _pmd
= mk_pmd(new_page
, vma
->vm_page_prot
);
1886 _pmd
= maybe_pmd_mkwrite(pmd_mkdirty(_pmd
), vma
);
1887 _pmd
= pmd_mkhuge(_pmd
);
1890 * spin_lock() below is not the equivalent of smp_wmb(), so
1891 * this is needed to avoid the copy_huge_page writes to become
1892 * visible after the set_pmd_at() write.
1896 spin_lock(&mm
->page_table_lock
);
1897 BUG_ON(!pmd_none(*pmd
));
1898 page_add_new_anon_rmap(new_page
, vma
, address
);
1899 set_pmd_at(mm
, address
, pmd
, _pmd
);
1900 update_mmu_cache(vma
, address
, entry
);
1901 prepare_pmd_huge_pte(pgtable
, mm
);
1903 spin_unlock(&mm
->page_table_lock
);
1908 khugepaged_pages_collapsed
++;
1910 up_write(&mm
->mmap_sem
);
1914 mem_cgroup_uncharge_page(new_page
);
1921 static int khugepaged_scan_pmd(struct mm_struct
*mm
,
1922 struct vm_area_struct
*vma
,
1923 unsigned long address
,
1924 struct page
**hpage
)
1930 int ret
= 0, referenced
= 0, none
= 0;
1932 unsigned long _address
;
1936 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
1938 pgd
= pgd_offset(mm
, address
);
1939 if (!pgd_present(*pgd
))
1942 pud
= pud_offset(pgd
, address
);
1943 if (!pud_present(*pud
))
1946 pmd
= pmd_offset(pud
, address
);
1947 if (!pmd_present(*pmd
) || pmd_trans_huge(*pmd
))
1950 pte
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
1951 for (_address
= address
, _pte
= pte
; _pte
< pte
+HPAGE_PMD_NR
;
1952 _pte
++, _address
+= PAGE_SIZE
) {
1953 pte_t pteval
= *_pte
;
1954 if (pte_none(pteval
)) {
1955 if (++none
<= khugepaged_max_ptes_none
)
1960 if (!pte_present(pteval
) || !pte_write(pteval
))
1962 page
= vm_normal_page(vma
, _address
, pteval
);
1963 if (unlikely(!page
))
1966 * Chose the node of the first page. This could
1967 * be more sophisticated and look at more pages,
1968 * but isn't for now.
1971 node
= page_to_nid(page
);
1972 VM_BUG_ON(PageCompound(page
));
1973 if (!PageLRU(page
) || PageLocked(page
) || !PageAnon(page
))
1975 /* cannot use mapcount: can't collapse if there's a gup pin */
1976 if (page_count(page
) != 1)
1978 if (pte_young(pteval
) || PageReferenced(page
) ||
1979 mmu_notifier_test_young(vma
->vm_mm
, address
))
1985 pte_unmap_unlock(pte
, ptl
);
1987 /* collapse_huge_page will return with the mmap_sem released */
1988 collapse_huge_page(mm
, address
, hpage
, vma
, node
);
1993 static void collect_mm_slot(struct mm_slot
*mm_slot
)
1995 struct mm_struct
*mm
= mm_slot
->mm
;
1997 VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock
));
1999 if (khugepaged_test_exit(mm
)) {
2001 hlist_del(&mm_slot
->hash
);
2002 list_del(&mm_slot
->mm_node
);
2005 * Not strictly needed because the mm exited already.
2007 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2010 /* khugepaged_mm_lock actually not necessary for the below */
2011 free_mm_slot(mm_slot
);
2016 static unsigned int khugepaged_scan_mm_slot(unsigned int pages
,
2017 struct page
**hpage
)
2019 struct mm_slot
*mm_slot
;
2020 struct mm_struct
*mm
;
2021 struct vm_area_struct
*vma
;
2025 VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock
));
2027 if (khugepaged_scan
.mm_slot
)
2028 mm_slot
= khugepaged_scan
.mm_slot
;
2030 mm_slot
= list_entry(khugepaged_scan
.mm_head
.next
,
2031 struct mm_slot
, mm_node
);
2032 khugepaged_scan
.address
= 0;
2033 khugepaged_scan
.mm_slot
= mm_slot
;
2035 spin_unlock(&khugepaged_mm_lock
);
2038 down_read(&mm
->mmap_sem
);
2039 if (unlikely(khugepaged_test_exit(mm
)))
2042 vma
= find_vma(mm
, khugepaged_scan
.address
);
2045 for (; vma
; vma
= vma
->vm_next
) {
2046 unsigned long hstart
, hend
;
2049 if (unlikely(khugepaged_test_exit(mm
))) {
2054 if ((!(vma
->vm_flags
& VM_HUGEPAGE
) &&
2055 !khugepaged_always()) ||
2056 (vma
->vm_flags
& VM_NOHUGEPAGE
)) {
2061 if (!vma
->anon_vma
|| vma
->vm_ops
)
2063 if (is_vma_temporary_stack(vma
))
2066 * If is_pfn_mapping() is true is_learn_pfn_mapping()
2067 * must be true too, verify it here.
2069 VM_BUG_ON(is_linear_pfn_mapping(vma
) ||
2070 vma
->vm_flags
& VM_NO_THP
);
2072 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
2073 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
2076 if (khugepaged_scan
.address
> hend
)
2078 if (khugepaged_scan
.address
< hstart
)
2079 khugepaged_scan
.address
= hstart
;
2080 VM_BUG_ON(khugepaged_scan
.address
& ~HPAGE_PMD_MASK
);
2082 while (khugepaged_scan
.address
< hend
) {
2085 if (unlikely(khugepaged_test_exit(mm
)))
2086 goto breakouterloop
;
2088 VM_BUG_ON(khugepaged_scan
.address
< hstart
||
2089 khugepaged_scan
.address
+ HPAGE_PMD_SIZE
>
2091 ret
= khugepaged_scan_pmd(mm
, vma
,
2092 khugepaged_scan
.address
,
2094 /* move to next address */
2095 khugepaged_scan
.address
+= HPAGE_PMD_SIZE
;
2096 progress
+= HPAGE_PMD_NR
;
2098 /* we released mmap_sem so break loop */
2099 goto breakouterloop_mmap_sem
;
2100 if (progress
>= pages
)
2101 goto breakouterloop
;
2105 up_read(&mm
->mmap_sem
); /* exit_mmap will destroy ptes after this */
2106 breakouterloop_mmap_sem
:
2108 spin_lock(&khugepaged_mm_lock
);
2109 VM_BUG_ON(khugepaged_scan
.mm_slot
!= mm_slot
);
2111 * Release the current mm_slot if this mm is about to die, or
2112 * if we scanned all vmas of this mm.
2114 if (khugepaged_test_exit(mm
) || !vma
) {
2116 * Make sure that if mm_users is reaching zero while
2117 * khugepaged runs here, khugepaged_exit will find
2118 * mm_slot not pointing to the exiting mm.
2120 if (mm_slot
->mm_node
.next
!= &khugepaged_scan
.mm_head
) {
2121 khugepaged_scan
.mm_slot
= list_entry(
2122 mm_slot
->mm_node
.next
,
2123 struct mm_slot
, mm_node
);
2124 khugepaged_scan
.address
= 0;
2126 khugepaged_scan
.mm_slot
= NULL
;
2127 khugepaged_full_scans
++;
2130 collect_mm_slot(mm_slot
);
2136 static int khugepaged_has_work(void)
2138 return !list_empty(&khugepaged_scan
.mm_head
) &&
2139 khugepaged_enabled();
2142 static int khugepaged_wait_event(void)
2144 return !list_empty(&khugepaged_scan
.mm_head
) ||
2145 !khugepaged_enabled();
2148 static void khugepaged_do_scan(struct page
**hpage
)
2150 unsigned int progress
= 0, pass_through_head
= 0;
2151 unsigned int pages
= khugepaged_pages_to_scan
;
2153 barrier(); /* write khugepaged_pages_to_scan to local stack */
2155 while (progress
< pages
) {
2160 *hpage
= alloc_hugepage(khugepaged_defrag());
2161 if (unlikely(!*hpage
))
2169 if (unlikely(kthread_should_stop() || freezing(current
)))
2172 spin_lock(&khugepaged_mm_lock
);
2173 if (!khugepaged_scan
.mm_slot
)
2174 pass_through_head
++;
2175 if (khugepaged_has_work() &&
2176 pass_through_head
< 2)
2177 progress
+= khugepaged_scan_mm_slot(pages
- progress
,
2181 spin_unlock(&khugepaged_mm_lock
);
2185 static void khugepaged_alloc_sleep(void)
2188 add_wait_queue(&khugepaged_wait
, &wait
);
2189 schedule_timeout_interruptible(
2191 khugepaged_alloc_sleep_millisecs
));
2192 remove_wait_queue(&khugepaged_wait
, &wait
);
2196 static struct page
*khugepaged_alloc_hugepage(void)
2201 hpage
= alloc_hugepage(khugepaged_defrag());
2203 khugepaged_alloc_sleep();
2204 } while (unlikely(!hpage
) &&
2205 likely(khugepaged_enabled()));
2210 static void khugepaged_loop(void)
2217 while (likely(khugepaged_enabled())) {
2219 hpage
= khugepaged_alloc_hugepage();
2220 if (unlikely(!hpage
))
2223 if (IS_ERR(hpage
)) {
2224 khugepaged_alloc_sleep();
2229 khugepaged_do_scan(&hpage
);
2235 if (unlikely(kthread_should_stop()))
2237 if (khugepaged_has_work()) {
2239 if (!khugepaged_scan_sleep_millisecs
)
2241 add_wait_queue(&khugepaged_wait
, &wait
);
2242 schedule_timeout_interruptible(
2244 khugepaged_scan_sleep_millisecs
));
2245 remove_wait_queue(&khugepaged_wait
, &wait
);
2246 } else if (khugepaged_enabled())
2247 wait_event_freezable(khugepaged_wait
,
2248 khugepaged_wait_event());
2252 static int khugepaged(void *none
)
2254 struct mm_slot
*mm_slot
;
2257 set_user_nice(current
, 19);
2259 /* serialize with start_khugepaged() */
2260 mutex_lock(&khugepaged_mutex
);
2263 mutex_unlock(&khugepaged_mutex
);
2264 VM_BUG_ON(khugepaged_thread
!= current
);
2266 VM_BUG_ON(khugepaged_thread
!= current
);
2268 mutex_lock(&khugepaged_mutex
);
2269 if (!khugepaged_enabled())
2271 if (unlikely(kthread_should_stop()))
2275 spin_lock(&khugepaged_mm_lock
);
2276 mm_slot
= khugepaged_scan
.mm_slot
;
2277 khugepaged_scan
.mm_slot
= NULL
;
2279 collect_mm_slot(mm_slot
);
2280 spin_unlock(&khugepaged_mm_lock
);
2282 khugepaged_thread
= NULL
;
2283 mutex_unlock(&khugepaged_mutex
);
2288 void __split_huge_page_pmd(struct mm_struct
*mm
, pmd_t
*pmd
)
2292 spin_lock(&mm
->page_table_lock
);
2293 if (unlikely(!pmd_trans_huge(*pmd
))) {
2294 spin_unlock(&mm
->page_table_lock
);
2297 page
= pmd_page(*pmd
);
2298 VM_BUG_ON(!page_count(page
));
2300 spin_unlock(&mm
->page_table_lock
);
2302 split_huge_page(page
);
2305 BUG_ON(pmd_trans_huge(*pmd
));
2308 static void split_huge_page_address(struct mm_struct
*mm
,
2309 unsigned long address
)
2315 VM_BUG_ON(!(address
& ~HPAGE_PMD_MASK
));
2317 pgd
= pgd_offset(mm
, address
);
2318 if (!pgd_present(*pgd
))
2321 pud
= pud_offset(pgd
, address
);
2322 if (!pud_present(*pud
))
2325 pmd
= pmd_offset(pud
, address
);
2326 if (!pmd_present(*pmd
))
2329 * Caller holds the mmap_sem write mode, so a huge pmd cannot
2330 * materialize from under us.
2332 split_huge_page_pmd(mm
, pmd
);
2335 void __vma_adjust_trans_huge(struct vm_area_struct
*vma
,
2336 unsigned long start
,
2341 * If the new start address isn't hpage aligned and it could
2342 * previously contain an hugepage: check if we need to split
2345 if (start
& ~HPAGE_PMD_MASK
&&
2346 (start
& HPAGE_PMD_MASK
) >= vma
->vm_start
&&
2347 (start
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
<= vma
->vm_end
)
2348 split_huge_page_address(vma
->vm_mm
, start
);
2351 * If the new end address isn't hpage aligned and it could
2352 * previously contain an hugepage: check if we need to split
2355 if (end
& ~HPAGE_PMD_MASK
&&
2356 (end
& HPAGE_PMD_MASK
) >= vma
->vm_start
&&
2357 (end
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
<= vma
->vm_end
)
2358 split_huge_page_address(vma
->vm_mm
, end
);
2361 * If we're also updating the vma->vm_next->vm_start, if the new
2362 * vm_next->vm_start isn't page aligned and it could previously
2363 * contain an hugepage: check if we need to split an huge pmd.
2365 if (adjust_next
> 0) {
2366 struct vm_area_struct
*next
= vma
->vm_next
;
2367 unsigned long nstart
= next
->vm_start
;
2368 nstart
+= adjust_next
<< PAGE_SHIFT
;
2369 if (nstart
& ~HPAGE_PMD_MASK
&&
2370 (nstart
& HPAGE_PMD_MASK
) >= next
->vm_start
&&
2371 (nstart
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
<= next
->vm_end
)
2372 split_huge_page_address(next
->vm_mm
, nstart
);