2 * Copyright (C) 2009 Red Hat, Inc.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
9 #include <linux/sched.h>
10 #include <linux/highmem.h>
11 #include <linux/hugetlb.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/mm_inline.h>
16 #include <linux/kthread.h>
17 #include <linux/khugepaged.h>
19 #include <asm/pgalloc.h>
23 * By default transparent hugepage support is enabled for all mappings
24 * and khugepaged scans all mappings. Defrag is only invoked by
25 * khugepaged hugepage allocations and by page faults inside
26 * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
29 unsigned long transparent_hugepage_flags __read_mostly
=
30 (1<<TRANSPARENT_HUGEPAGE_FLAG
)|
31 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG
)|
32 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
34 /* default scan 8*512 pte (or vmas) every 30 second */
35 static unsigned int khugepaged_pages_to_scan __read_mostly
= HPAGE_PMD_NR
*8;
36 static unsigned int khugepaged_pages_collapsed
;
37 static unsigned int khugepaged_full_scans
;
38 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly
= 10000;
39 /* during fragmentation poll the hugepage allocator once every minute */
40 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly
= 60000;
41 static struct task_struct
*khugepaged_thread __read_mostly
;
42 static DEFINE_MUTEX(khugepaged_mutex
);
43 static DEFINE_SPINLOCK(khugepaged_mm_lock
);
44 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait
);
46 * default collapse hugepages if there is at least one pte mapped like
47 * it would have happened if the vma was large enough during page
50 static unsigned int khugepaged_max_ptes_none __read_mostly
= HPAGE_PMD_NR
-1;
52 static int khugepaged(void *none
);
53 static int mm_slots_hash_init(void);
54 static int khugepaged_slab_init(void);
55 static void khugepaged_slab_free(void);
57 #define MM_SLOTS_HASH_HEADS 1024
58 static struct hlist_head
*mm_slots_hash __read_mostly
;
59 static struct kmem_cache
*mm_slot_cache __read_mostly
;
62 * struct mm_slot - hash lookup from mm to mm_slot
63 * @hash: hash collision list
64 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
65 * @mm: the mm that this information is valid for
68 struct hlist_node hash
;
69 struct list_head mm_node
;
74 * struct khugepaged_scan - cursor for scanning
75 * @mm_head: the head of the mm list to scan
76 * @mm_slot: the current mm_slot we are scanning
77 * @address: the next address inside that to be scanned
79 * There is only the one khugepaged_scan instance of this cursor structure.
81 struct khugepaged_scan
{
82 struct list_head mm_head
;
83 struct mm_slot
*mm_slot
;
84 unsigned long address
;
86 .mm_head
= LIST_HEAD_INIT(khugepaged_scan
.mm_head
),
90 static int set_recommended_min_free_kbytes(void)
94 unsigned long recommended_min
;
95 extern int min_free_kbytes
;
97 if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG
,
98 &transparent_hugepage_flags
) &&
99 !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
100 &transparent_hugepage_flags
))
103 for_each_populated_zone(zone
)
106 /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
107 recommended_min
= pageblock_nr_pages
* nr_zones
* 2;
110 * Make sure that on average at least two pageblocks are almost free
111 * of another type, one for a migratetype to fall back to and a
112 * second to avoid subsequent fallbacks of other types There are 3
113 * MIGRATE_TYPES we care about.
115 recommended_min
+= pageblock_nr_pages
* nr_zones
*
116 MIGRATE_PCPTYPES
* MIGRATE_PCPTYPES
;
118 /* don't ever allow to reserve more than 5% of the lowmem */
119 recommended_min
= min(recommended_min
,
120 (unsigned long) nr_free_buffer_pages() / 20);
121 recommended_min
<<= (PAGE_SHIFT
-10);
123 if (recommended_min
> min_free_kbytes
)
124 min_free_kbytes
= recommended_min
;
125 setup_per_zone_wmarks();
128 late_initcall(set_recommended_min_free_kbytes
);
130 static int start_khugepaged(void)
133 if (khugepaged_enabled()) {
135 if (unlikely(!mm_slot_cache
|| !mm_slots_hash
)) {
139 mutex_lock(&khugepaged_mutex
);
140 if (!khugepaged_thread
)
141 khugepaged_thread
= kthread_run(khugepaged
, NULL
,
143 if (unlikely(IS_ERR(khugepaged_thread
))) {
145 "khugepaged: kthread_run(khugepaged) failed\n");
146 err
= PTR_ERR(khugepaged_thread
);
147 khugepaged_thread
= NULL
;
149 wakeup
= !list_empty(&khugepaged_scan
.mm_head
);
150 mutex_unlock(&khugepaged_mutex
);
152 wake_up_interruptible(&khugepaged_wait
);
154 set_recommended_min_free_kbytes();
157 wake_up_interruptible(&khugepaged_wait
);
164 static ssize_t
double_flag_show(struct kobject
*kobj
,
165 struct kobj_attribute
*attr
, char *buf
,
166 enum transparent_hugepage_flag enabled
,
167 enum transparent_hugepage_flag req_madv
)
169 if (test_bit(enabled
, &transparent_hugepage_flags
)) {
170 VM_BUG_ON(test_bit(req_madv
, &transparent_hugepage_flags
));
171 return sprintf(buf
, "[always] madvise never\n");
172 } else if (test_bit(req_madv
, &transparent_hugepage_flags
))
173 return sprintf(buf
, "always [madvise] never\n");
175 return sprintf(buf
, "always madvise [never]\n");
177 static ssize_t
double_flag_store(struct kobject
*kobj
,
178 struct kobj_attribute
*attr
,
179 const char *buf
, size_t count
,
180 enum transparent_hugepage_flag enabled
,
181 enum transparent_hugepage_flag req_madv
)
183 if (!memcmp("always", buf
,
184 min(sizeof("always")-1, count
))) {
185 set_bit(enabled
, &transparent_hugepage_flags
);
186 clear_bit(req_madv
, &transparent_hugepage_flags
);
187 } else if (!memcmp("madvise", buf
,
188 min(sizeof("madvise")-1, count
))) {
189 clear_bit(enabled
, &transparent_hugepage_flags
);
190 set_bit(req_madv
, &transparent_hugepage_flags
);
191 } else if (!memcmp("never", buf
,
192 min(sizeof("never")-1, count
))) {
193 clear_bit(enabled
, &transparent_hugepage_flags
);
194 clear_bit(req_madv
, &transparent_hugepage_flags
);
201 static ssize_t
enabled_show(struct kobject
*kobj
,
202 struct kobj_attribute
*attr
, char *buf
)
204 return double_flag_show(kobj
, attr
, buf
,
205 TRANSPARENT_HUGEPAGE_FLAG
,
206 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
);
208 static ssize_t
enabled_store(struct kobject
*kobj
,
209 struct kobj_attribute
*attr
,
210 const char *buf
, size_t count
)
214 ret
= double_flag_store(kobj
, attr
, buf
, count
,
215 TRANSPARENT_HUGEPAGE_FLAG
,
216 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
);
219 int err
= start_khugepaged();
225 (test_bit(TRANSPARENT_HUGEPAGE_FLAG
,
226 &transparent_hugepage_flags
) ||
227 test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
228 &transparent_hugepage_flags
)))
229 set_recommended_min_free_kbytes();
233 static struct kobj_attribute enabled_attr
=
234 __ATTR(enabled
, 0644, enabled_show
, enabled_store
);
236 static ssize_t
single_flag_show(struct kobject
*kobj
,
237 struct kobj_attribute
*attr
, char *buf
,
238 enum transparent_hugepage_flag flag
)
240 if (test_bit(flag
, &transparent_hugepage_flags
))
241 return sprintf(buf
, "[yes] no\n");
243 return sprintf(buf
, "yes [no]\n");
245 static ssize_t
single_flag_store(struct kobject
*kobj
,
246 struct kobj_attribute
*attr
,
247 const char *buf
, size_t count
,
248 enum transparent_hugepage_flag flag
)
250 if (!memcmp("yes", buf
,
251 min(sizeof("yes")-1, count
))) {
252 set_bit(flag
, &transparent_hugepage_flags
);
253 } else if (!memcmp("no", buf
,
254 min(sizeof("no")-1, count
))) {
255 clear_bit(flag
, &transparent_hugepage_flags
);
263 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
264 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
265 * memory just to allocate one more hugepage.
267 static ssize_t
defrag_show(struct kobject
*kobj
,
268 struct kobj_attribute
*attr
, char *buf
)
270 return double_flag_show(kobj
, attr
, buf
,
271 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG
,
272 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
);
274 static ssize_t
defrag_store(struct kobject
*kobj
,
275 struct kobj_attribute
*attr
,
276 const char *buf
, size_t count
)
278 return double_flag_store(kobj
, attr
, buf
, count
,
279 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG
,
280 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
);
282 static struct kobj_attribute defrag_attr
=
283 __ATTR(defrag
, 0644, defrag_show
, defrag_store
);
285 #ifdef CONFIG_DEBUG_VM
286 static ssize_t
debug_cow_show(struct kobject
*kobj
,
287 struct kobj_attribute
*attr
, char *buf
)
289 return single_flag_show(kobj
, attr
, buf
,
290 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG
);
292 static ssize_t
debug_cow_store(struct kobject
*kobj
,
293 struct kobj_attribute
*attr
,
294 const char *buf
, size_t count
)
296 return single_flag_store(kobj
, attr
, buf
, count
,
297 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG
);
299 static struct kobj_attribute debug_cow_attr
=
300 __ATTR(debug_cow
, 0644, debug_cow_show
, debug_cow_store
);
301 #endif /* CONFIG_DEBUG_VM */
303 static struct attribute
*hugepage_attr
[] = {
306 #ifdef CONFIG_DEBUG_VM
307 &debug_cow_attr
.attr
,
312 static struct attribute_group hugepage_attr_group
= {
313 .attrs
= hugepage_attr
,
316 static ssize_t
scan_sleep_millisecs_show(struct kobject
*kobj
,
317 struct kobj_attribute
*attr
,
320 return sprintf(buf
, "%u\n", khugepaged_scan_sleep_millisecs
);
323 static ssize_t
scan_sleep_millisecs_store(struct kobject
*kobj
,
324 struct kobj_attribute
*attr
,
325 const char *buf
, size_t count
)
330 err
= strict_strtoul(buf
, 10, &msecs
);
331 if (err
|| msecs
> UINT_MAX
)
334 khugepaged_scan_sleep_millisecs
= msecs
;
335 wake_up_interruptible(&khugepaged_wait
);
339 static struct kobj_attribute scan_sleep_millisecs_attr
=
340 __ATTR(scan_sleep_millisecs
, 0644, scan_sleep_millisecs_show
,
341 scan_sleep_millisecs_store
);
343 static ssize_t
alloc_sleep_millisecs_show(struct kobject
*kobj
,
344 struct kobj_attribute
*attr
,
347 return sprintf(buf
, "%u\n", khugepaged_alloc_sleep_millisecs
);
350 static ssize_t
alloc_sleep_millisecs_store(struct kobject
*kobj
,
351 struct kobj_attribute
*attr
,
352 const char *buf
, size_t count
)
357 err
= strict_strtoul(buf
, 10, &msecs
);
358 if (err
|| msecs
> UINT_MAX
)
361 khugepaged_alloc_sleep_millisecs
= msecs
;
362 wake_up_interruptible(&khugepaged_wait
);
366 static struct kobj_attribute alloc_sleep_millisecs_attr
=
367 __ATTR(alloc_sleep_millisecs
, 0644, alloc_sleep_millisecs_show
,
368 alloc_sleep_millisecs_store
);
370 static ssize_t
pages_to_scan_show(struct kobject
*kobj
,
371 struct kobj_attribute
*attr
,
374 return sprintf(buf
, "%u\n", khugepaged_pages_to_scan
);
376 static ssize_t
pages_to_scan_store(struct kobject
*kobj
,
377 struct kobj_attribute
*attr
,
378 const char *buf
, size_t count
)
383 err
= strict_strtoul(buf
, 10, &pages
);
384 if (err
|| !pages
|| pages
> UINT_MAX
)
387 khugepaged_pages_to_scan
= pages
;
391 static struct kobj_attribute pages_to_scan_attr
=
392 __ATTR(pages_to_scan
, 0644, pages_to_scan_show
,
393 pages_to_scan_store
);
395 static ssize_t
pages_collapsed_show(struct kobject
*kobj
,
396 struct kobj_attribute
*attr
,
399 return sprintf(buf
, "%u\n", khugepaged_pages_collapsed
);
401 static struct kobj_attribute pages_collapsed_attr
=
402 __ATTR_RO(pages_collapsed
);
404 static ssize_t
full_scans_show(struct kobject
*kobj
,
405 struct kobj_attribute
*attr
,
408 return sprintf(buf
, "%u\n", khugepaged_full_scans
);
410 static struct kobj_attribute full_scans_attr
=
411 __ATTR_RO(full_scans
);
413 static ssize_t
khugepaged_defrag_show(struct kobject
*kobj
,
414 struct kobj_attribute
*attr
, char *buf
)
416 return single_flag_show(kobj
, attr
, buf
,
417 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
419 static ssize_t
khugepaged_defrag_store(struct kobject
*kobj
,
420 struct kobj_attribute
*attr
,
421 const char *buf
, size_t count
)
423 return single_flag_store(kobj
, attr
, buf
, count
,
424 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
426 static struct kobj_attribute khugepaged_defrag_attr
=
427 __ATTR(defrag
, 0644, khugepaged_defrag_show
,
428 khugepaged_defrag_store
);
431 * max_ptes_none controls if khugepaged should collapse hugepages over
432 * any unmapped ptes in turn potentially increasing the memory
433 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
434 * reduce the available free memory in the system as it
435 * runs. Increasing max_ptes_none will instead potentially reduce the
436 * free memory in the system during the khugepaged scan.
438 static ssize_t
khugepaged_max_ptes_none_show(struct kobject
*kobj
,
439 struct kobj_attribute
*attr
,
442 return sprintf(buf
, "%u\n", khugepaged_max_ptes_none
);
444 static ssize_t
khugepaged_max_ptes_none_store(struct kobject
*kobj
,
445 struct kobj_attribute
*attr
,
446 const char *buf
, size_t count
)
449 unsigned long max_ptes_none
;
451 err
= strict_strtoul(buf
, 10, &max_ptes_none
);
452 if (err
|| max_ptes_none
> HPAGE_PMD_NR
-1)
455 khugepaged_max_ptes_none
= max_ptes_none
;
459 static struct kobj_attribute khugepaged_max_ptes_none_attr
=
460 __ATTR(max_ptes_none
, 0644, khugepaged_max_ptes_none_show
,
461 khugepaged_max_ptes_none_store
);
463 static struct attribute
*khugepaged_attr
[] = {
464 &khugepaged_defrag_attr
.attr
,
465 &khugepaged_max_ptes_none_attr
.attr
,
466 &pages_to_scan_attr
.attr
,
467 &pages_collapsed_attr
.attr
,
468 &full_scans_attr
.attr
,
469 &scan_sleep_millisecs_attr
.attr
,
470 &alloc_sleep_millisecs_attr
.attr
,
474 static struct attribute_group khugepaged_attr_group
= {
475 .attrs
= khugepaged_attr
,
476 .name
= "khugepaged",
478 #endif /* CONFIG_SYSFS */
480 static int __init
hugepage_init(void)
484 static struct kobject
*hugepage_kobj
;
487 hugepage_kobj
= kobject_create_and_add("transparent_hugepage", mm_kobj
);
488 if (unlikely(!hugepage_kobj
)) {
489 printk(KERN_ERR
"hugepage: failed kobject create\n");
493 err
= sysfs_create_group(hugepage_kobj
, &hugepage_attr_group
);
495 printk(KERN_ERR
"hugepage: failed register hugeage group\n");
499 err
= sysfs_create_group(hugepage_kobj
, &khugepaged_attr_group
);
501 printk(KERN_ERR
"hugepage: failed register hugeage group\n");
506 err
= khugepaged_slab_init();
510 err
= mm_slots_hash_init();
512 khugepaged_slab_free();
518 set_recommended_min_free_kbytes();
523 module_init(hugepage_init
)
525 static int __init
setup_transparent_hugepage(char *str
)
530 if (!strcmp(str
, "always")) {
531 set_bit(TRANSPARENT_HUGEPAGE_FLAG
,
532 &transparent_hugepage_flags
);
533 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
534 &transparent_hugepage_flags
);
536 } else if (!strcmp(str
, "madvise")) {
537 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
,
538 &transparent_hugepage_flags
);
539 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
540 &transparent_hugepage_flags
);
542 } else if (!strcmp(str
, "never")) {
543 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
,
544 &transparent_hugepage_flags
);
545 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
546 &transparent_hugepage_flags
);
552 "transparent_hugepage= cannot parse, ignored\n");
555 __setup("transparent_hugepage=", setup_transparent_hugepage
);
557 static void prepare_pmd_huge_pte(pgtable_t pgtable
,
558 struct mm_struct
*mm
)
560 assert_spin_locked(&mm
->page_table_lock
);
563 if (!mm
->pmd_huge_pte
)
564 INIT_LIST_HEAD(&pgtable
->lru
);
566 list_add(&pgtable
->lru
, &mm
->pmd_huge_pte
->lru
);
567 mm
->pmd_huge_pte
= pgtable
;
570 static inline pmd_t
maybe_pmd_mkwrite(pmd_t pmd
, struct vm_area_struct
*vma
)
572 if (likely(vma
->vm_flags
& VM_WRITE
))
573 pmd
= pmd_mkwrite(pmd
);
577 static int __do_huge_pmd_anonymous_page(struct mm_struct
*mm
,
578 struct vm_area_struct
*vma
,
579 unsigned long haddr
, pmd_t
*pmd
,
585 VM_BUG_ON(!PageCompound(page
));
586 pgtable
= pte_alloc_one(mm
, haddr
);
587 if (unlikely(!pgtable
)) {
588 mem_cgroup_uncharge_page(page
);
593 clear_huge_page(page
, haddr
, HPAGE_PMD_NR
);
594 __SetPageUptodate(page
);
596 spin_lock(&mm
->page_table_lock
);
597 if (unlikely(!pmd_none(*pmd
))) {
598 spin_unlock(&mm
->page_table_lock
);
599 mem_cgroup_uncharge_page(page
);
601 pte_free(mm
, pgtable
);
604 entry
= mk_pmd(page
, vma
->vm_page_prot
);
605 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
606 entry
= pmd_mkhuge(entry
);
608 * The spinlocking to take the lru_lock inside
609 * page_add_new_anon_rmap() acts as a full memory
610 * barrier to be sure clear_huge_page writes become
611 * visible after the set_pmd_at() write.
613 page_add_new_anon_rmap(page
, vma
, haddr
);
614 set_pmd_at(mm
, haddr
, pmd
, entry
);
615 prepare_pmd_huge_pte(pgtable
, mm
);
616 add_mm_counter(mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
617 spin_unlock(&mm
->page_table_lock
);
623 static inline gfp_t
alloc_hugepage_gfpmask(int defrag
)
625 return GFP_TRANSHUGE
& ~(defrag
? 0 : __GFP_WAIT
);
628 static inline struct page
*alloc_hugepage_vma(int defrag
,
629 struct vm_area_struct
*vma
,
632 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag
),
633 HPAGE_PMD_ORDER
, vma
, haddr
);
637 static inline struct page
*alloc_hugepage(int defrag
)
639 return alloc_pages(alloc_hugepage_gfpmask(defrag
),
644 int do_huge_pmd_anonymous_page(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
645 unsigned long address
, pmd_t
*pmd
,
649 unsigned long haddr
= address
& HPAGE_PMD_MASK
;
652 if (haddr
>= vma
->vm_start
&& haddr
+ HPAGE_PMD_SIZE
<= vma
->vm_end
) {
653 if (unlikely(anon_vma_prepare(vma
)))
655 if (unlikely(khugepaged_enter(vma
)))
657 page
= alloc_hugepage_vma(transparent_hugepage_defrag(vma
),
661 if (unlikely(mem_cgroup_newpage_charge(page
, mm
, GFP_KERNEL
))) {
666 return __do_huge_pmd_anonymous_page(mm
, vma
, haddr
, pmd
, page
);
670 * Use __pte_alloc instead of pte_alloc_map, because we can't
671 * run pte_offset_map on the pmd, if an huge pmd could
672 * materialize from under us from a different thread.
674 if (unlikely(__pte_alloc(mm
, vma
, pmd
, address
)))
676 /* if an huge pmd materialized from under us just retry later */
677 if (unlikely(pmd_trans_huge(*pmd
)))
680 * A regular pmd is established and it can't morph into a huge pmd
681 * from under us anymore at this point because we hold the mmap_sem
682 * read mode and khugepaged takes it in write mode. So now it's
683 * safe to run pte_offset_map().
685 pte
= pte_offset_map(pmd
, address
);
686 return handle_pte_fault(mm
, vma
, address
, pte
, pmd
, flags
);
689 int copy_huge_pmd(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
690 pmd_t
*dst_pmd
, pmd_t
*src_pmd
, unsigned long addr
,
691 struct vm_area_struct
*vma
)
693 struct page
*src_page
;
699 pgtable
= pte_alloc_one(dst_mm
, addr
);
700 if (unlikely(!pgtable
))
703 spin_lock(&dst_mm
->page_table_lock
);
704 spin_lock_nested(&src_mm
->page_table_lock
, SINGLE_DEPTH_NESTING
);
708 if (unlikely(!pmd_trans_huge(pmd
))) {
709 pte_free(dst_mm
, pgtable
);
712 if (unlikely(pmd_trans_splitting(pmd
))) {
713 /* split huge page running from under us */
714 spin_unlock(&src_mm
->page_table_lock
);
715 spin_unlock(&dst_mm
->page_table_lock
);
716 pte_free(dst_mm
, pgtable
);
718 wait_split_huge_page(vma
->anon_vma
, src_pmd
); /* src_vma */
721 src_page
= pmd_page(pmd
);
722 VM_BUG_ON(!PageHead(src_page
));
724 page_dup_rmap(src_page
);
725 add_mm_counter(dst_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
727 pmdp_set_wrprotect(src_mm
, addr
, src_pmd
);
728 pmd
= pmd_mkold(pmd_wrprotect(pmd
));
729 set_pmd_at(dst_mm
, addr
, dst_pmd
, pmd
);
730 prepare_pmd_huge_pte(pgtable
, dst_mm
);
734 spin_unlock(&src_mm
->page_table_lock
);
735 spin_unlock(&dst_mm
->page_table_lock
);
740 /* no "address" argument so destroys page coloring of some arch */
741 pgtable_t
get_pmd_huge_pte(struct mm_struct
*mm
)
745 assert_spin_locked(&mm
->page_table_lock
);
748 pgtable
= mm
->pmd_huge_pte
;
749 if (list_empty(&pgtable
->lru
))
750 mm
->pmd_huge_pte
= NULL
;
752 mm
->pmd_huge_pte
= list_entry(pgtable
->lru
.next
,
754 list_del(&pgtable
->lru
);
759 static int do_huge_pmd_wp_page_fallback(struct mm_struct
*mm
,
760 struct vm_area_struct
*vma
,
761 unsigned long address
,
762 pmd_t
*pmd
, pmd_t orig_pmd
,
771 pages
= kmalloc(sizeof(struct page
*) * HPAGE_PMD_NR
,
773 if (unlikely(!pages
)) {
778 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
779 pages
[i
] = alloc_page_vma(GFP_HIGHUSER_MOVABLE
,
781 if (unlikely(!pages
[i
] ||
782 mem_cgroup_newpage_charge(pages
[i
], mm
,
786 mem_cgroup_uncharge_start();
788 mem_cgroup_uncharge_page(pages
[i
]);
791 mem_cgroup_uncharge_end();
798 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
799 copy_user_highpage(pages
[i
], page
+ i
,
800 haddr
+ PAGE_SHIFT
*i
, vma
);
801 __SetPageUptodate(pages
[i
]);
805 spin_lock(&mm
->page_table_lock
);
806 if (unlikely(!pmd_same(*pmd
, orig_pmd
)))
808 VM_BUG_ON(!PageHead(page
));
810 pmdp_clear_flush_notify(vma
, haddr
, pmd
);
811 /* leave pmd empty until pte is filled */
813 pgtable
= get_pmd_huge_pte(mm
);
814 pmd_populate(mm
, &_pmd
, pgtable
);
816 for (i
= 0; i
< HPAGE_PMD_NR
; i
++, haddr
+= PAGE_SIZE
) {
818 entry
= mk_pte(pages
[i
], vma
->vm_page_prot
);
819 entry
= maybe_mkwrite(pte_mkdirty(entry
), vma
);
820 page_add_new_anon_rmap(pages
[i
], vma
, haddr
);
821 pte
= pte_offset_map(&_pmd
, haddr
);
822 VM_BUG_ON(!pte_none(*pte
));
823 set_pte_at(mm
, haddr
, pte
, entry
);
829 smp_wmb(); /* make pte visible before pmd */
830 pmd_populate(mm
, pmd
, pgtable
);
831 page_remove_rmap(page
);
832 spin_unlock(&mm
->page_table_lock
);
834 ret
|= VM_FAULT_WRITE
;
841 spin_unlock(&mm
->page_table_lock
);
842 mem_cgroup_uncharge_start();
843 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
844 mem_cgroup_uncharge_page(pages
[i
]);
847 mem_cgroup_uncharge_end();
852 int do_huge_pmd_wp_page(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
853 unsigned long address
, pmd_t
*pmd
, pmd_t orig_pmd
)
856 struct page
*page
, *new_page
;
859 VM_BUG_ON(!vma
->anon_vma
);
860 spin_lock(&mm
->page_table_lock
);
861 if (unlikely(!pmd_same(*pmd
, orig_pmd
)))
864 page
= pmd_page(orig_pmd
);
865 VM_BUG_ON(!PageCompound(page
) || !PageHead(page
));
866 haddr
= address
& HPAGE_PMD_MASK
;
867 if (page_mapcount(page
) == 1) {
869 entry
= pmd_mkyoung(orig_pmd
);
870 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
871 if (pmdp_set_access_flags(vma
, haddr
, pmd
, entry
, 1))
872 update_mmu_cache(vma
, address
, entry
);
873 ret
|= VM_FAULT_WRITE
;
877 spin_unlock(&mm
->page_table_lock
);
879 if (transparent_hugepage_enabled(vma
) &&
880 !transparent_hugepage_debug_cow())
881 new_page
= alloc_hugepage_vma(transparent_hugepage_defrag(vma
),
886 if (unlikely(!new_page
)) {
887 ret
= do_huge_pmd_wp_page_fallback(mm
, vma
, address
,
888 pmd
, orig_pmd
, page
, haddr
);
893 if (unlikely(mem_cgroup_newpage_charge(new_page
, mm
, GFP_KERNEL
))) {
900 copy_user_huge_page(new_page
, page
, haddr
, vma
, HPAGE_PMD_NR
);
901 __SetPageUptodate(new_page
);
903 spin_lock(&mm
->page_table_lock
);
905 if (unlikely(!pmd_same(*pmd
, orig_pmd
))) {
906 mem_cgroup_uncharge_page(new_page
);
910 VM_BUG_ON(!PageHead(page
));
911 entry
= mk_pmd(new_page
, vma
->vm_page_prot
);
912 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
913 entry
= pmd_mkhuge(entry
);
914 pmdp_clear_flush_notify(vma
, haddr
, pmd
);
915 page_add_new_anon_rmap(new_page
, vma
, haddr
);
916 set_pmd_at(mm
, haddr
, pmd
, entry
);
917 update_mmu_cache(vma
, address
, entry
);
918 page_remove_rmap(page
);
920 ret
|= VM_FAULT_WRITE
;
923 spin_unlock(&mm
->page_table_lock
);
928 struct page
*follow_trans_huge_pmd(struct mm_struct
*mm
,
933 struct page
*page
= NULL
;
935 assert_spin_locked(&mm
->page_table_lock
);
937 if (flags
& FOLL_WRITE
&& !pmd_write(*pmd
))
940 page
= pmd_page(*pmd
);
941 VM_BUG_ON(!PageHead(page
));
942 if (flags
& FOLL_TOUCH
) {
945 * We should set the dirty bit only for FOLL_WRITE but
946 * for now the dirty bit in the pmd is meaningless.
947 * And if the dirty bit will become meaningful and
948 * we'll only set it with FOLL_WRITE, an atomic
949 * set_bit will be required on the pmd to set the
950 * young bit, instead of the current set_pmd_at.
952 _pmd
= pmd_mkyoung(pmd_mkdirty(*pmd
));
953 set_pmd_at(mm
, addr
& HPAGE_PMD_MASK
, pmd
, _pmd
);
955 page
+= (addr
& ~HPAGE_PMD_MASK
) >> PAGE_SHIFT
;
956 VM_BUG_ON(!PageCompound(page
));
957 if (flags
& FOLL_GET
)
964 int zap_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
969 spin_lock(&tlb
->mm
->page_table_lock
);
970 if (likely(pmd_trans_huge(*pmd
))) {
971 if (unlikely(pmd_trans_splitting(*pmd
))) {
972 spin_unlock(&tlb
->mm
->page_table_lock
);
973 wait_split_huge_page(vma
->anon_vma
,
978 pgtable
= get_pmd_huge_pte(tlb
->mm
);
979 page
= pmd_page(*pmd
);
981 page_remove_rmap(page
);
982 VM_BUG_ON(page_mapcount(page
) < 0);
983 add_mm_counter(tlb
->mm
, MM_ANONPAGES
, -HPAGE_PMD_NR
);
984 VM_BUG_ON(!PageHead(page
));
985 spin_unlock(&tlb
->mm
->page_table_lock
);
986 tlb_remove_page(tlb
, page
);
987 pte_free(tlb
->mm
, pgtable
);
991 spin_unlock(&tlb
->mm
->page_table_lock
);
996 int mincore_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
997 unsigned long addr
, unsigned long end
,
1002 spin_lock(&vma
->vm_mm
->page_table_lock
);
1003 if (likely(pmd_trans_huge(*pmd
))) {
1004 ret
= !pmd_trans_splitting(*pmd
);
1005 spin_unlock(&vma
->vm_mm
->page_table_lock
);
1007 wait_split_huge_page(vma
->anon_vma
, pmd
);
1010 * All logical pages in the range are present
1011 * if backed by a huge page.
1013 memset(vec
, 1, (end
- addr
) >> PAGE_SHIFT
);
1016 spin_unlock(&vma
->vm_mm
->page_table_lock
);
1021 int change_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
1022 unsigned long addr
, pgprot_t newprot
)
1024 struct mm_struct
*mm
= vma
->vm_mm
;
1027 spin_lock(&mm
->page_table_lock
);
1028 if (likely(pmd_trans_huge(*pmd
))) {
1029 if (unlikely(pmd_trans_splitting(*pmd
))) {
1030 spin_unlock(&mm
->page_table_lock
);
1031 wait_split_huge_page(vma
->anon_vma
, pmd
);
1035 entry
= pmdp_get_and_clear(mm
, addr
, pmd
);
1036 entry
= pmd_modify(entry
, newprot
);
1037 set_pmd_at(mm
, addr
, pmd
, entry
);
1038 spin_unlock(&vma
->vm_mm
->page_table_lock
);
1039 flush_tlb_range(vma
, addr
, addr
+ HPAGE_PMD_SIZE
);
1043 spin_unlock(&vma
->vm_mm
->page_table_lock
);
1048 pmd_t
*page_check_address_pmd(struct page
*page
,
1049 struct mm_struct
*mm
,
1050 unsigned long address
,
1051 enum page_check_address_pmd_flag flag
)
1055 pmd_t
*pmd
, *ret
= NULL
;
1057 if (address
& ~HPAGE_PMD_MASK
)
1060 pgd
= pgd_offset(mm
, address
);
1061 if (!pgd_present(*pgd
))
1064 pud
= pud_offset(pgd
, address
);
1065 if (!pud_present(*pud
))
1068 pmd
= pmd_offset(pud
, address
);
1071 if (pmd_page(*pmd
) != page
)
1073 VM_BUG_ON(flag
== PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG
&&
1074 pmd_trans_splitting(*pmd
));
1075 if (pmd_trans_huge(*pmd
)) {
1076 VM_BUG_ON(flag
== PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG
&&
1077 !pmd_trans_splitting(*pmd
));
1084 static int __split_huge_page_splitting(struct page
*page
,
1085 struct vm_area_struct
*vma
,
1086 unsigned long address
)
1088 struct mm_struct
*mm
= vma
->vm_mm
;
1092 spin_lock(&mm
->page_table_lock
);
1093 pmd
= page_check_address_pmd(page
, mm
, address
,
1094 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG
);
1097 * We can't temporarily set the pmd to null in order
1098 * to split it, the pmd must remain marked huge at all
1099 * times or the VM won't take the pmd_trans_huge paths
1100 * and it won't wait on the anon_vma->root->lock to
1101 * serialize against split_huge_page*.
1103 pmdp_splitting_flush_notify(vma
, address
, pmd
);
1106 spin_unlock(&mm
->page_table_lock
);
1111 static void __split_huge_page_refcount(struct page
*page
)
1114 unsigned long head_index
= page
->index
;
1115 struct zone
*zone
= page_zone(page
);
1117 /* prevent PageLRU to go away from under us, and freeze lru stats */
1118 spin_lock_irq(&zone
->lru_lock
);
1119 compound_lock(page
);
1121 for (i
= 1; i
< HPAGE_PMD_NR
; i
++) {
1122 struct page
*page_tail
= page
+ i
;
1124 /* tail_page->_count cannot change */
1125 atomic_sub(atomic_read(&page_tail
->_count
), &page
->_count
);
1126 BUG_ON(page_count(page
) <= 0);
1127 atomic_add(page_mapcount(page
) + 1, &page_tail
->_count
);
1128 BUG_ON(atomic_read(&page_tail
->_count
) <= 0);
1130 /* after clearing PageTail the gup refcount can be released */
1133 page_tail
->flags
&= ~PAGE_FLAGS_CHECK_AT_PREP
;
1134 page_tail
->flags
|= (page
->flags
&
1135 ((1L << PG_referenced
) |
1136 (1L << PG_swapbacked
) |
1137 (1L << PG_mlocked
) |
1138 (1L << PG_uptodate
)));
1139 page_tail
->flags
|= (1L << PG_dirty
);
1142 * 1) clear PageTail before overwriting first_page
1143 * 2) clear PageTail before clearing PageHead for VM_BUG_ON
1148 * __split_huge_page_splitting() already set the
1149 * splitting bit in all pmd that could map this
1150 * hugepage, that will ensure no CPU can alter the
1151 * mapcount on the head page. The mapcount is only
1152 * accounted in the head page and it has to be
1153 * transferred to all tail pages in the below code. So
1154 * for this code to be safe, the split the mapcount
1155 * can't change. But that doesn't mean userland can't
1156 * keep changing and reading the page contents while
1157 * we transfer the mapcount, so the pmd splitting
1158 * status is achieved setting a reserved bit in the
1159 * pmd, not by clearing the present bit.
1161 BUG_ON(page_mapcount(page_tail
));
1162 page_tail
->_mapcount
= page
->_mapcount
;
1164 BUG_ON(page_tail
->mapping
);
1165 page_tail
->mapping
= page
->mapping
;
1167 page_tail
->index
= ++head_index
;
1169 BUG_ON(!PageAnon(page_tail
));
1170 BUG_ON(!PageUptodate(page_tail
));
1171 BUG_ON(!PageDirty(page_tail
));
1172 BUG_ON(!PageSwapBacked(page_tail
));
1174 lru_add_page_tail(zone
, page
, page_tail
);
1177 __dec_zone_page_state(page
, NR_ANON_TRANSPARENT_HUGEPAGES
);
1178 __mod_zone_page_state(zone
, NR_ANON_PAGES
, HPAGE_PMD_NR
);
1180 ClearPageCompound(page
);
1181 compound_unlock(page
);
1182 spin_unlock_irq(&zone
->lru_lock
);
1184 for (i
= 1; i
< HPAGE_PMD_NR
; i
++) {
1185 struct page
*page_tail
= page
+ i
;
1186 BUG_ON(page_count(page_tail
) <= 0);
1188 * Tail pages may be freed if there wasn't any mapping
1189 * like if add_to_swap() is running on a lru page that
1190 * had its mapping zapped. And freeing these pages
1191 * requires taking the lru_lock so we do the put_page
1192 * of the tail pages after the split is complete.
1194 put_page(page_tail
);
1198 * Only the head page (now become a regular page) is required
1199 * to be pinned by the caller.
1201 BUG_ON(page_count(page
) <= 0);
1204 static int __split_huge_page_map(struct page
*page
,
1205 struct vm_area_struct
*vma
,
1206 unsigned long address
)
1208 struct mm_struct
*mm
= vma
->vm_mm
;
1212 unsigned long haddr
;
1214 spin_lock(&mm
->page_table_lock
);
1215 pmd
= page_check_address_pmd(page
, mm
, address
,
1216 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG
);
1218 pgtable
= get_pmd_huge_pte(mm
);
1219 pmd_populate(mm
, &_pmd
, pgtable
);
1221 for (i
= 0, haddr
= address
; i
< HPAGE_PMD_NR
;
1222 i
++, haddr
+= PAGE_SIZE
) {
1224 BUG_ON(PageCompound(page
+i
));
1225 entry
= mk_pte(page
+ i
, vma
->vm_page_prot
);
1226 entry
= maybe_mkwrite(pte_mkdirty(entry
), vma
);
1227 if (!pmd_write(*pmd
))
1228 entry
= pte_wrprotect(entry
);
1230 BUG_ON(page_mapcount(page
) != 1);
1231 if (!pmd_young(*pmd
))
1232 entry
= pte_mkold(entry
);
1233 pte
= pte_offset_map(&_pmd
, haddr
);
1234 BUG_ON(!pte_none(*pte
));
1235 set_pte_at(mm
, haddr
, pte
, entry
);
1240 smp_wmb(); /* make pte visible before pmd */
1242 * Up to this point the pmd is present and huge and
1243 * userland has the whole access to the hugepage
1244 * during the split (which happens in place). If we
1245 * overwrite the pmd with the not-huge version
1246 * pointing to the pte here (which of course we could
1247 * if all CPUs were bug free), userland could trigger
1248 * a small page size TLB miss on the small sized TLB
1249 * while the hugepage TLB entry is still established
1250 * in the huge TLB. Some CPU doesn't like that. See
1251 * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1252 * Erratum 383 on page 93. Intel should be safe but is
1253 * also warns that it's only safe if the permission
1254 * and cache attributes of the two entries loaded in
1255 * the two TLB is identical (which should be the case
1256 * here). But it is generally safer to never allow
1257 * small and huge TLB entries for the same virtual
1258 * address to be loaded simultaneously. So instead of
1259 * doing "pmd_populate(); flush_tlb_range();" we first
1260 * mark the current pmd notpresent (atomically because
1261 * here the pmd_trans_huge and pmd_trans_splitting
1262 * must remain set at all times on the pmd until the
1263 * split is complete for this pmd), then we flush the
1264 * SMP TLB and finally we write the non-huge version
1265 * of the pmd entry with pmd_populate.
1267 set_pmd_at(mm
, address
, pmd
, pmd_mknotpresent(*pmd
));
1268 flush_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
1269 pmd_populate(mm
, pmd
, pgtable
);
1272 spin_unlock(&mm
->page_table_lock
);
1277 /* must be called with anon_vma->root->lock hold */
1278 static void __split_huge_page(struct page
*page
,
1279 struct anon_vma
*anon_vma
)
1281 int mapcount
, mapcount2
;
1282 struct anon_vma_chain
*avc
;
1284 BUG_ON(!PageHead(page
));
1285 BUG_ON(PageTail(page
));
1288 list_for_each_entry(avc
, &anon_vma
->head
, same_anon_vma
) {
1289 struct vm_area_struct
*vma
= avc
->vma
;
1290 unsigned long addr
= vma_address(page
, vma
);
1291 BUG_ON(is_vma_temporary_stack(vma
));
1292 if (addr
== -EFAULT
)
1294 mapcount
+= __split_huge_page_splitting(page
, vma
, addr
);
1297 * It is critical that new vmas are added to the tail of the
1298 * anon_vma list. This guarantes that if copy_huge_pmd() runs
1299 * and establishes a child pmd before
1300 * __split_huge_page_splitting() freezes the parent pmd (so if
1301 * we fail to prevent copy_huge_pmd() from running until the
1302 * whole __split_huge_page() is complete), we will still see
1303 * the newly established pmd of the child later during the
1304 * walk, to be able to set it as pmd_trans_splitting too.
1306 if (mapcount
!= page_mapcount(page
))
1307 printk(KERN_ERR
"mapcount %d page_mapcount %d\n",
1308 mapcount
, page_mapcount(page
));
1309 BUG_ON(mapcount
!= page_mapcount(page
));
1311 __split_huge_page_refcount(page
);
1314 list_for_each_entry(avc
, &anon_vma
->head
, same_anon_vma
) {
1315 struct vm_area_struct
*vma
= avc
->vma
;
1316 unsigned long addr
= vma_address(page
, vma
);
1317 BUG_ON(is_vma_temporary_stack(vma
));
1318 if (addr
== -EFAULT
)
1320 mapcount2
+= __split_huge_page_map(page
, vma
, addr
);
1322 if (mapcount
!= mapcount2
)
1323 printk(KERN_ERR
"mapcount %d mapcount2 %d page_mapcount %d\n",
1324 mapcount
, mapcount2
, page_mapcount(page
));
1325 BUG_ON(mapcount
!= mapcount2
);
1328 int split_huge_page(struct page
*page
)
1330 struct anon_vma
*anon_vma
;
1333 BUG_ON(!PageAnon(page
));
1334 anon_vma
= page_lock_anon_vma(page
);
1338 if (!PageCompound(page
))
1341 BUG_ON(!PageSwapBacked(page
));
1342 __split_huge_page(page
, anon_vma
);
1344 BUG_ON(PageCompound(page
));
1346 page_unlock_anon_vma(anon_vma
);
1351 int hugepage_madvise(unsigned long *vm_flags
)
1354 * Be somewhat over-protective like KSM for now!
1356 if (*vm_flags
& (VM_HUGEPAGE
| VM_SHARED
| VM_MAYSHARE
|
1357 VM_PFNMAP
| VM_IO
| VM_DONTEXPAND
|
1358 VM_RESERVED
| VM_HUGETLB
| VM_INSERTPAGE
|
1359 VM_MIXEDMAP
| VM_SAO
))
1362 *vm_flags
|= VM_HUGEPAGE
;
1367 static int __init
khugepaged_slab_init(void)
1369 mm_slot_cache
= kmem_cache_create("khugepaged_mm_slot",
1370 sizeof(struct mm_slot
),
1371 __alignof__(struct mm_slot
), 0, NULL
);
1378 static void __init
khugepaged_slab_free(void)
1380 kmem_cache_destroy(mm_slot_cache
);
1381 mm_slot_cache
= NULL
;
1384 static inline struct mm_slot
*alloc_mm_slot(void)
1386 if (!mm_slot_cache
) /* initialization failed */
1388 return kmem_cache_zalloc(mm_slot_cache
, GFP_KERNEL
);
1391 static inline void free_mm_slot(struct mm_slot
*mm_slot
)
1393 kmem_cache_free(mm_slot_cache
, mm_slot
);
1396 static int __init
mm_slots_hash_init(void)
1398 mm_slots_hash
= kzalloc(MM_SLOTS_HASH_HEADS
* sizeof(struct hlist_head
),
1406 static void __init
mm_slots_hash_free(void)
1408 kfree(mm_slots_hash
);
1409 mm_slots_hash
= NULL
;
1413 static struct mm_slot
*get_mm_slot(struct mm_struct
*mm
)
1415 struct mm_slot
*mm_slot
;
1416 struct hlist_head
*bucket
;
1417 struct hlist_node
*node
;
1419 bucket
= &mm_slots_hash
[((unsigned long)mm
/ sizeof(struct mm_struct
))
1420 % MM_SLOTS_HASH_HEADS
];
1421 hlist_for_each_entry(mm_slot
, node
, bucket
, hash
) {
1422 if (mm
== mm_slot
->mm
)
1428 static void insert_to_mm_slots_hash(struct mm_struct
*mm
,
1429 struct mm_slot
*mm_slot
)
1431 struct hlist_head
*bucket
;
1433 bucket
= &mm_slots_hash
[((unsigned long)mm
/ sizeof(struct mm_struct
))
1434 % MM_SLOTS_HASH_HEADS
];
1436 hlist_add_head(&mm_slot
->hash
, bucket
);
1439 static inline int khugepaged_test_exit(struct mm_struct
*mm
)
1441 return atomic_read(&mm
->mm_users
) == 0;
1444 int __khugepaged_enter(struct mm_struct
*mm
)
1446 struct mm_slot
*mm_slot
;
1449 mm_slot
= alloc_mm_slot();
1453 /* __khugepaged_exit() must not run from under us */
1454 VM_BUG_ON(khugepaged_test_exit(mm
));
1455 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE
, &mm
->flags
))) {
1456 free_mm_slot(mm_slot
);
1460 spin_lock(&khugepaged_mm_lock
);
1461 insert_to_mm_slots_hash(mm
, mm_slot
);
1463 * Insert just behind the scanning cursor, to let the area settle
1466 wakeup
= list_empty(&khugepaged_scan
.mm_head
);
1467 list_add_tail(&mm_slot
->mm_node
, &khugepaged_scan
.mm_head
);
1468 spin_unlock(&khugepaged_mm_lock
);
1470 atomic_inc(&mm
->mm_count
);
1472 wake_up_interruptible(&khugepaged_wait
);
1477 int khugepaged_enter_vma_merge(struct vm_area_struct
*vma
)
1479 unsigned long hstart
, hend
;
1482 * Not yet faulted in so we will register later in the
1483 * page fault if needed.
1486 if (vma
->vm_file
|| vma
->vm_ops
)
1487 /* khugepaged not yet working on file or special mappings */
1489 VM_BUG_ON(is_linear_pfn_mapping(vma
) || is_pfn_mapping(vma
));
1490 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
1491 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
1493 return khugepaged_enter(vma
);
1497 void __khugepaged_exit(struct mm_struct
*mm
)
1499 struct mm_slot
*mm_slot
;
1502 spin_lock(&khugepaged_mm_lock
);
1503 mm_slot
= get_mm_slot(mm
);
1504 if (mm_slot
&& khugepaged_scan
.mm_slot
!= mm_slot
) {
1505 hlist_del(&mm_slot
->hash
);
1506 list_del(&mm_slot
->mm_node
);
1511 spin_unlock(&khugepaged_mm_lock
);
1512 clear_bit(MMF_VM_HUGEPAGE
, &mm
->flags
);
1513 free_mm_slot(mm_slot
);
1515 } else if (mm_slot
) {
1516 spin_unlock(&khugepaged_mm_lock
);
1518 * This is required to serialize against
1519 * khugepaged_test_exit() (which is guaranteed to run
1520 * under mmap sem read mode). Stop here (after we
1521 * return all pagetables will be destroyed) until
1522 * khugepaged has finished working on the pagetables
1523 * under the mmap_sem.
1525 down_write(&mm
->mmap_sem
);
1526 up_write(&mm
->mmap_sem
);
1528 spin_unlock(&khugepaged_mm_lock
);
1531 static void release_pte_page(struct page
*page
)
1533 /* 0 stands for page_is_file_cache(page) == false */
1534 dec_zone_page_state(page
, NR_ISOLATED_ANON
+ 0);
1536 putback_lru_page(page
);
1539 static void release_pte_pages(pte_t
*pte
, pte_t
*_pte
)
1541 while (--_pte
>= pte
) {
1542 pte_t pteval
= *_pte
;
1543 if (!pte_none(pteval
))
1544 release_pte_page(pte_page(pteval
));
1548 static void release_all_pte_pages(pte_t
*pte
)
1550 release_pte_pages(pte
, pte
+ HPAGE_PMD_NR
);
1553 static int __collapse_huge_page_isolate(struct vm_area_struct
*vma
,
1554 unsigned long address
,
1559 int referenced
= 0, isolated
= 0, none
= 0;
1560 for (_pte
= pte
; _pte
< pte
+HPAGE_PMD_NR
;
1561 _pte
++, address
+= PAGE_SIZE
) {
1562 pte_t pteval
= *_pte
;
1563 if (pte_none(pteval
)) {
1564 if (++none
<= khugepaged_max_ptes_none
)
1567 release_pte_pages(pte
, _pte
);
1571 if (!pte_present(pteval
) || !pte_write(pteval
)) {
1572 release_pte_pages(pte
, _pte
);
1575 page
= vm_normal_page(vma
, address
, pteval
);
1576 if (unlikely(!page
)) {
1577 release_pte_pages(pte
, _pte
);
1580 VM_BUG_ON(PageCompound(page
));
1581 BUG_ON(!PageAnon(page
));
1582 VM_BUG_ON(!PageSwapBacked(page
));
1584 /* cannot use mapcount: can't collapse if there's a gup pin */
1585 if (page_count(page
) != 1) {
1586 release_pte_pages(pte
, _pte
);
1590 * We can do it before isolate_lru_page because the
1591 * page can't be freed from under us. NOTE: PG_lock
1592 * is needed to serialize against split_huge_page
1593 * when invoked from the VM.
1595 if (!trylock_page(page
)) {
1596 release_pte_pages(pte
, _pte
);
1600 * Isolate the page to avoid collapsing an hugepage
1601 * currently in use by the VM.
1603 if (isolate_lru_page(page
)) {
1605 release_pte_pages(pte
, _pte
);
1608 /* 0 stands for page_is_file_cache(page) == false */
1609 inc_zone_page_state(page
, NR_ISOLATED_ANON
+ 0);
1610 VM_BUG_ON(!PageLocked(page
));
1611 VM_BUG_ON(PageLRU(page
));
1613 /* If there is no mapped pte young don't collapse the page */
1614 if (pte_young(pteval
))
1617 if (unlikely(!referenced
))
1618 release_all_pte_pages(pte
);
1625 static void __collapse_huge_page_copy(pte_t
*pte
, struct page
*page
,
1626 struct vm_area_struct
*vma
,
1627 unsigned long address
,
1631 for (_pte
= pte
; _pte
< pte
+HPAGE_PMD_NR
; _pte
++) {
1632 pte_t pteval
= *_pte
;
1633 struct page
*src_page
;
1635 if (pte_none(pteval
)) {
1636 clear_user_highpage(page
, address
);
1637 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, 1);
1639 src_page
= pte_page(pteval
);
1640 copy_user_highpage(page
, src_page
, address
, vma
);
1641 VM_BUG_ON(page_mapcount(src_page
) != 1);
1642 VM_BUG_ON(page_count(src_page
) != 2);
1643 release_pte_page(src_page
);
1645 * ptl mostly unnecessary, but preempt has to
1646 * be disabled to update the per-cpu stats
1647 * inside page_remove_rmap().
1651 * paravirt calls inside pte_clear here are
1654 pte_clear(vma
->vm_mm
, address
, _pte
);
1655 page_remove_rmap(src_page
);
1657 free_page_and_swap_cache(src_page
);
1660 address
+= PAGE_SIZE
;
1665 static void collapse_huge_page(struct mm_struct
*mm
,
1666 unsigned long address
,
1667 struct page
**hpage
)
1669 struct vm_area_struct
*vma
;
1675 struct page
*new_page
;
1678 unsigned long hstart
, hend
;
1680 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
1688 * Prevent all access to pagetables with the exception of
1689 * gup_fast later hanlded by the ptep_clear_flush and the VM
1690 * handled by the anon_vma lock + PG_lock.
1692 down_write(&mm
->mmap_sem
);
1693 if (unlikely(khugepaged_test_exit(mm
)))
1696 vma
= find_vma(mm
, address
);
1697 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
1698 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
1699 if (address
< hstart
|| address
+ HPAGE_PMD_SIZE
> hend
)
1702 if (!(vma
->vm_flags
& VM_HUGEPAGE
) && !khugepaged_always())
1705 /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
1706 if (!vma
->anon_vma
|| vma
->vm_ops
|| vma
->vm_file
)
1708 VM_BUG_ON(is_linear_pfn_mapping(vma
) || is_pfn_mapping(vma
));
1710 pgd
= pgd_offset(mm
, address
);
1711 if (!pgd_present(*pgd
))
1714 pud
= pud_offset(pgd
, address
);
1715 if (!pud_present(*pud
))
1718 pmd
= pmd_offset(pud
, address
);
1719 /* pmd can't go away or become huge under us */
1720 if (!pmd_present(*pmd
) || pmd_trans_huge(*pmd
))
1726 new_page
= alloc_hugepage_vma(khugepaged_defrag(), vma
, address
);
1727 if (unlikely(!new_page
)) {
1728 *hpage
= ERR_PTR(-ENOMEM
);
1732 if (unlikely(mem_cgroup_newpage_charge(new_page
, mm
, GFP_KERNEL
)))
1735 anon_vma_lock(vma
->anon_vma
);
1737 pte
= pte_offset_map(pmd
, address
);
1738 ptl
= pte_lockptr(mm
, pmd
);
1740 spin_lock(&mm
->page_table_lock
); /* probably unnecessary */
1742 * After this gup_fast can't run anymore. This also removes
1743 * any huge TLB entry from the CPU so we won't allow
1744 * huge and small TLB entries for the same virtual address
1745 * to avoid the risk of CPU bugs in that area.
1747 _pmd
= pmdp_clear_flush_notify(vma
, address
, pmd
);
1748 spin_unlock(&mm
->page_table_lock
);
1751 isolated
= __collapse_huge_page_isolate(vma
, address
, pte
);
1755 if (unlikely(!isolated
)) {
1756 spin_lock(&mm
->page_table_lock
);
1757 BUG_ON(!pmd_none(*pmd
));
1758 set_pmd_at(mm
, address
, pmd
, _pmd
);
1759 spin_unlock(&mm
->page_table_lock
);
1760 anon_vma_unlock(vma
->anon_vma
);
1761 mem_cgroup_uncharge_page(new_page
);
1766 * All pages are isolated and locked so anon_vma rmap
1767 * can't run anymore.
1769 anon_vma_unlock(vma
->anon_vma
);
1771 __collapse_huge_page_copy(pte
, new_page
, vma
, address
, ptl
);
1772 __SetPageUptodate(new_page
);
1773 pgtable
= pmd_pgtable(_pmd
);
1774 VM_BUG_ON(page_count(pgtable
) != 1);
1775 VM_BUG_ON(page_mapcount(pgtable
) != 0);
1777 _pmd
= mk_pmd(new_page
, vma
->vm_page_prot
);
1778 _pmd
= maybe_pmd_mkwrite(pmd_mkdirty(_pmd
), vma
);
1779 _pmd
= pmd_mkhuge(_pmd
);
1782 * spin_lock() below is not the equivalent of smp_wmb(), so
1783 * this is needed to avoid the copy_huge_page writes to become
1784 * visible after the set_pmd_at() write.
1788 spin_lock(&mm
->page_table_lock
);
1789 BUG_ON(!pmd_none(*pmd
));
1790 page_add_new_anon_rmap(new_page
, vma
, address
);
1791 set_pmd_at(mm
, address
, pmd
, _pmd
);
1792 update_mmu_cache(vma
, address
, entry
);
1793 prepare_pmd_huge_pte(pgtable
, mm
);
1795 spin_unlock(&mm
->page_table_lock
);
1800 khugepaged_pages_collapsed
++;
1802 up_write(&mm
->mmap_sem
);
1812 static int khugepaged_scan_pmd(struct mm_struct
*mm
,
1813 struct vm_area_struct
*vma
,
1814 unsigned long address
,
1815 struct page
**hpage
)
1821 int ret
= 0, referenced
= 0, none
= 0;
1823 unsigned long _address
;
1826 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
1828 pgd
= pgd_offset(mm
, address
);
1829 if (!pgd_present(*pgd
))
1832 pud
= pud_offset(pgd
, address
);
1833 if (!pud_present(*pud
))
1836 pmd
= pmd_offset(pud
, address
);
1837 if (!pmd_present(*pmd
) || pmd_trans_huge(*pmd
))
1840 pte
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
1841 for (_address
= address
, _pte
= pte
; _pte
< pte
+HPAGE_PMD_NR
;
1842 _pte
++, _address
+= PAGE_SIZE
) {
1843 pte_t pteval
= *_pte
;
1844 if (pte_none(pteval
)) {
1845 if (++none
<= khugepaged_max_ptes_none
)
1850 if (!pte_present(pteval
) || !pte_write(pteval
))
1852 page
= vm_normal_page(vma
, _address
, pteval
);
1853 if (unlikely(!page
))
1855 VM_BUG_ON(PageCompound(page
));
1856 if (!PageLRU(page
) || PageLocked(page
) || !PageAnon(page
))
1858 /* cannot use mapcount: can't collapse if there's a gup pin */
1859 if (page_count(page
) != 1)
1861 if (pte_young(pteval
))
1867 pte_unmap_unlock(pte
, ptl
);
1869 up_read(&mm
->mmap_sem
);
1870 collapse_huge_page(mm
, address
, hpage
);
1876 static void collect_mm_slot(struct mm_slot
*mm_slot
)
1878 struct mm_struct
*mm
= mm_slot
->mm
;
1880 VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock
));
1882 if (khugepaged_test_exit(mm
)) {
1884 hlist_del(&mm_slot
->hash
);
1885 list_del(&mm_slot
->mm_node
);
1888 * Not strictly needed because the mm exited already.
1890 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1893 /* khugepaged_mm_lock actually not necessary for the below */
1894 free_mm_slot(mm_slot
);
1899 static unsigned int khugepaged_scan_mm_slot(unsigned int pages
,
1900 struct page
**hpage
)
1902 struct mm_slot
*mm_slot
;
1903 struct mm_struct
*mm
;
1904 struct vm_area_struct
*vma
;
1908 VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock
));
1910 if (khugepaged_scan
.mm_slot
)
1911 mm_slot
= khugepaged_scan
.mm_slot
;
1913 mm_slot
= list_entry(khugepaged_scan
.mm_head
.next
,
1914 struct mm_slot
, mm_node
);
1915 khugepaged_scan
.address
= 0;
1916 khugepaged_scan
.mm_slot
= mm_slot
;
1918 spin_unlock(&khugepaged_mm_lock
);
1921 down_read(&mm
->mmap_sem
);
1922 if (unlikely(khugepaged_test_exit(mm
)))
1925 vma
= find_vma(mm
, khugepaged_scan
.address
);
1928 for (; vma
; vma
= vma
->vm_next
) {
1929 unsigned long hstart
, hend
;
1932 if (unlikely(khugepaged_test_exit(mm
))) {
1937 if (!(vma
->vm_flags
& VM_HUGEPAGE
) &&
1938 !khugepaged_always()) {
1943 /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
1944 if (!vma
->anon_vma
|| vma
->vm_ops
|| vma
->vm_file
) {
1945 khugepaged_scan
.address
= vma
->vm_end
;
1949 VM_BUG_ON(is_linear_pfn_mapping(vma
) || is_pfn_mapping(vma
));
1951 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
1952 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
1953 if (hstart
>= hend
) {
1957 if (khugepaged_scan
.address
< hstart
)
1958 khugepaged_scan
.address
= hstart
;
1959 if (khugepaged_scan
.address
> hend
) {
1960 khugepaged_scan
.address
= hend
+ HPAGE_PMD_SIZE
;
1964 BUG_ON(khugepaged_scan
.address
& ~HPAGE_PMD_MASK
);
1966 while (khugepaged_scan
.address
< hend
) {
1969 if (unlikely(khugepaged_test_exit(mm
)))
1970 goto breakouterloop
;
1972 VM_BUG_ON(khugepaged_scan
.address
< hstart
||
1973 khugepaged_scan
.address
+ HPAGE_PMD_SIZE
>
1975 ret
= khugepaged_scan_pmd(mm
, vma
,
1976 khugepaged_scan
.address
,
1978 /* move to next address */
1979 khugepaged_scan
.address
+= HPAGE_PMD_SIZE
;
1980 progress
+= HPAGE_PMD_NR
;
1982 /* we released mmap_sem so break loop */
1983 goto breakouterloop_mmap_sem
;
1984 if (progress
>= pages
)
1985 goto breakouterloop
;
1989 up_read(&mm
->mmap_sem
); /* exit_mmap will destroy ptes after this */
1990 breakouterloop_mmap_sem
:
1992 spin_lock(&khugepaged_mm_lock
);
1993 BUG_ON(khugepaged_scan
.mm_slot
!= mm_slot
);
1995 * Release the current mm_slot if this mm is about to die, or
1996 * if we scanned all vmas of this mm.
1998 if (khugepaged_test_exit(mm
) || !vma
) {
2000 * Make sure that if mm_users is reaching zero while
2001 * khugepaged runs here, khugepaged_exit will find
2002 * mm_slot not pointing to the exiting mm.
2004 if (mm_slot
->mm_node
.next
!= &khugepaged_scan
.mm_head
) {
2005 khugepaged_scan
.mm_slot
= list_entry(
2006 mm_slot
->mm_node
.next
,
2007 struct mm_slot
, mm_node
);
2008 khugepaged_scan
.address
= 0;
2010 khugepaged_scan
.mm_slot
= NULL
;
2011 khugepaged_full_scans
++;
2014 collect_mm_slot(mm_slot
);
2020 static int khugepaged_has_work(void)
2022 return !list_empty(&khugepaged_scan
.mm_head
) &&
2023 khugepaged_enabled();
2026 static int khugepaged_wait_event(void)
2028 return !list_empty(&khugepaged_scan
.mm_head
) ||
2029 !khugepaged_enabled();
2032 static void khugepaged_do_scan(struct page
**hpage
)
2034 unsigned int progress
= 0, pass_through_head
= 0;
2035 unsigned int pages
= khugepaged_pages_to_scan
;
2037 barrier(); /* write khugepaged_pages_to_scan to local stack */
2039 while (progress
< pages
) {
2044 *hpage
= alloc_hugepage(khugepaged_defrag());
2045 if (unlikely(!*hpage
))
2053 spin_lock(&khugepaged_mm_lock
);
2054 if (!khugepaged_scan
.mm_slot
)
2055 pass_through_head
++;
2056 if (khugepaged_has_work() &&
2057 pass_through_head
< 2)
2058 progress
+= khugepaged_scan_mm_slot(pages
- progress
,
2062 spin_unlock(&khugepaged_mm_lock
);
2066 static void khugepaged_alloc_sleep(void)
2069 add_wait_queue(&khugepaged_wait
, &wait
);
2070 schedule_timeout_interruptible(
2072 khugepaged_alloc_sleep_millisecs
));
2073 remove_wait_queue(&khugepaged_wait
, &wait
);
2077 static struct page
*khugepaged_alloc_hugepage(void)
2082 hpage
= alloc_hugepage(khugepaged_defrag());
2084 khugepaged_alloc_sleep();
2085 } while (unlikely(!hpage
) &&
2086 likely(khugepaged_enabled()));
2091 static void khugepaged_loop(void)
2098 while (likely(khugepaged_enabled())) {
2100 hpage
= khugepaged_alloc_hugepage();
2101 if (unlikely(!hpage
))
2104 if (IS_ERR(hpage
)) {
2105 khugepaged_alloc_sleep();
2110 khugepaged_do_scan(&hpage
);
2115 if (khugepaged_has_work()) {
2117 if (!khugepaged_scan_sleep_millisecs
)
2119 add_wait_queue(&khugepaged_wait
, &wait
);
2120 schedule_timeout_interruptible(
2122 khugepaged_scan_sleep_millisecs
));
2123 remove_wait_queue(&khugepaged_wait
, &wait
);
2124 } else if (khugepaged_enabled())
2125 wait_event_interruptible(khugepaged_wait
,
2126 khugepaged_wait_event());
2130 static int khugepaged(void *none
)
2132 struct mm_slot
*mm_slot
;
2134 set_user_nice(current
, 19);
2136 /* serialize with start_khugepaged() */
2137 mutex_lock(&khugepaged_mutex
);
2140 mutex_unlock(&khugepaged_mutex
);
2141 BUG_ON(khugepaged_thread
!= current
);
2143 BUG_ON(khugepaged_thread
!= current
);
2145 mutex_lock(&khugepaged_mutex
);
2146 if (!khugepaged_enabled())
2150 spin_lock(&khugepaged_mm_lock
);
2151 mm_slot
= khugepaged_scan
.mm_slot
;
2152 khugepaged_scan
.mm_slot
= NULL
;
2154 collect_mm_slot(mm_slot
);
2155 spin_unlock(&khugepaged_mm_lock
);
2157 khugepaged_thread
= NULL
;
2158 mutex_unlock(&khugepaged_mutex
);
2163 void __split_huge_page_pmd(struct mm_struct
*mm
, pmd_t
*pmd
)
2167 spin_lock(&mm
->page_table_lock
);
2168 if (unlikely(!pmd_trans_huge(*pmd
))) {
2169 spin_unlock(&mm
->page_table_lock
);
2172 page
= pmd_page(*pmd
);
2173 VM_BUG_ON(!page_count(page
));
2175 spin_unlock(&mm
->page_table_lock
);
2177 split_huge_page(page
);
2180 BUG_ON(pmd_trans_huge(*pmd
));