1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
23 #include <asm/pgalloc.h>
33 SCAN_LACK_REFERENCED_PAGE
,
47 SCAN_ALLOC_HUGE_PAGE_FAIL
,
48 SCAN_CGROUP_CHARGE_FAIL
,
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/huge_memory.h>
56 /* default scan 8*512 pte (or vmas) every 30 second */
57 static unsigned int khugepaged_pages_to_scan __read_mostly
;
58 static unsigned int khugepaged_pages_collapsed
;
59 static unsigned int khugepaged_full_scans
;
60 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly
= 10000;
61 /* during fragmentation poll the hugepage allocator once every minute */
62 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly
= 60000;
63 static unsigned long khugepaged_sleep_expire
;
64 static DEFINE_SPINLOCK(khugepaged_mm_lock
);
65 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait
);
67 * default collapse hugepages if there is at least one pte mapped like
68 * it would have happened if the vma was large enough during page
71 static unsigned int khugepaged_max_ptes_none __read_mostly
;
72 static unsigned int khugepaged_max_ptes_swap __read_mostly
;
74 #define MM_SLOTS_HASH_BITS 10
75 static __read_mostly
DEFINE_HASHTABLE(mm_slots_hash
, MM_SLOTS_HASH_BITS
);
77 static struct kmem_cache
*mm_slot_cache __read_mostly
;
80 * struct mm_slot - hash lookup from mm to mm_slot
81 * @hash: hash collision list
82 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
83 * @mm: the mm that this information is valid for
86 struct hlist_node hash
;
87 struct list_head mm_node
;
92 * struct khugepaged_scan - cursor for scanning
93 * @mm_head: the head of the mm list to scan
94 * @mm_slot: the current mm_slot we are scanning
95 * @address: the next address inside that to be scanned
97 * There is only the one khugepaged_scan instance of this cursor structure.
99 struct khugepaged_scan
{
100 struct list_head mm_head
;
101 struct mm_slot
*mm_slot
;
102 unsigned long address
;
105 static struct khugepaged_scan khugepaged_scan
= {
106 .mm_head
= LIST_HEAD_INIT(khugepaged_scan
.mm_head
),
110 static ssize_t
scan_sleep_millisecs_show(struct kobject
*kobj
,
111 struct kobj_attribute
*attr
,
114 return sprintf(buf
, "%u\n", khugepaged_scan_sleep_millisecs
);
117 static ssize_t
scan_sleep_millisecs_store(struct kobject
*kobj
,
118 struct kobj_attribute
*attr
,
119 const char *buf
, size_t count
)
124 err
= kstrtoul(buf
, 10, &msecs
);
125 if (err
|| msecs
> UINT_MAX
)
128 khugepaged_scan_sleep_millisecs
= msecs
;
129 khugepaged_sleep_expire
= 0;
130 wake_up_interruptible(&khugepaged_wait
);
134 static struct kobj_attribute scan_sleep_millisecs_attr
=
135 __ATTR(scan_sleep_millisecs
, 0644, scan_sleep_millisecs_show
,
136 scan_sleep_millisecs_store
);
138 static ssize_t
alloc_sleep_millisecs_show(struct kobject
*kobj
,
139 struct kobj_attribute
*attr
,
142 return sprintf(buf
, "%u\n", khugepaged_alloc_sleep_millisecs
);
145 static ssize_t
alloc_sleep_millisecs_store(struct kobject
*kobj
,
146 struct kobj_attribute
*attr
,
147 const char *buf
, size_t count
)
152 err
= kstrtoul(buf
, 10, &msecs
);
153 if (err
|| msecs
> UINT_MAX
)
156 khugepaged_alloc_sleep_millisecs
= msecs
;
157 khugepaged_sleep_expire
= 0;
158 wake_up_interruptible(&khugepaged_wait
);
162 static struct kobj_attribute alloc_sleep_millisecs_attr
=
163 __ATTR(alloc_sleep_millisecs
, 0644, alloc_sleep_millisecs_show
,
164 alloc_sleep_millisecs_store
);
166 static ssize_t
pages_to_scan_show(struct kobject
*kobj
,
167 struct kobj_attribute
*attr
,
170 return sprintf(buf
, "%u\n", khugepaged_pages_to_scan
);
172 static ssize_t
pages_to_scan_store(struct kobject
*kobj
,
173 struct kobj_attribute
*attr
,
174 const char *buf
, size_t count
)
179 err
= kstrtoul(buf
, 10, &pages
);
180 if (err
|| !pages
|| pages
> UINT_MAX
)
183 khugepaged_pages_to_scan
= pages
;
187 static struct kobj_attribute pages_to_scan_attr
=
188 __ATTR(pages_to_scan
, 0644, pages_to_scan_show
,
189 pages_to_scan_store
);
191 static ssize_t
pages_collapsed_show(struct kobject
*kobj
,
192 struct kobj_attribute
*attr
,
195 return sprintf(buf
, "%u\n", khugepaged_pages_collapsed
);
197 static struct kobj_attribute pages_collapsed_attr
=
198 __ATTR_RO(pages_collapsed
);
200 static ssize_t
full_scans_show(struct kobject
*kobj
,
201 struct kobj_attribute
*attr
,
204 return sprintf(buf
, "%u\n", khugepaged_full_scans
);
206 static struct kobj_attribute full_scans_attr
=
207 __ATTR_RO(full_scans
);
209 static ssize_t
khugepaged_defrag_show(struct kobject
*kobj
,
210 struct kobj_attribute
*attr
, char *buf
)
212 return single_hugepage_flag_show(kobj
, attr
, buf
,
213 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
215 static ssize_t
khugepaged_defrag_store(struct kobject
*kobj
,
216 struct kobj_attribute
*attr
,
217 const char *buf
, size_t count
)
219 return single_hugepage_flag_store(kobj
, attr
, buf
, count
,
220 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
222 static struct kobj_attribute khugepaged_defrag_attr
=
223 __ATTR(defrag
, 0644, khugepaged_defrag_show
,
224 khugepaged_defrag_store
);
227 * max_ptes_none controls if khugepaged should collapse hugepages over
228 * any unmapped ptes in turn potentially increasing the memory
229 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
230 * reduce the available free memory in the system as it
231 * runs. Increasing max_ptes_none will instead potentially reduce the
232 * free memory in the system during the khugepaged scan.
234 static ssize_t
khugepaged_max_ptes_none_show(struct kobject
*kobj
,
235 struct kobj_attribute
*attr
,
238 return sprintf(buf
, "%u\n", khugepaged_max_ptes_none
);
240 static ssize_t
khugepaged_max_ptes_none_store(struct kobject
*kobj
,
241 struct kobj_attribute
*attr
,
242 const char *buf
, size_t count
)
245 unsigned long max_ptes_none
;
247 err
= kstrtoul(buf
, 10, &max_ptes_none
);
248 if (err
|| max_ptes_none
> HPAGE_PMD_NR
-1)
251 khugepaged_max_ptes_none
= max_ptes_none
;
255 static struct kobj_attribute khugepaged_max_ptes_none_attr
=
256 __ATTR(max_ptes_none
, 0644, khugepaged_max_ptes_none_show
,
257 khugepaged_max_ptes_none_store
);
259 static ssize_t
khugepaged_max_ptes_swap_show(struct kobject
*kobj
,
260 struct kobj_attribute
*attr
,
263 return sprintf(buf
, "%u\n", khugepaged_max_ptes_swap
);
266 static ssize_t
khugepaged_max_ptes_swap_store(struct kobject
*kobj
,
267 struct kobj_attribute
*attr
,
268 const char *buf
, size_t count
)
271 unsigned long max_ptes_swap
;
273 err
= kstrtoul(buf
, 10, &max_ptes_swap
);
274 if (err
|| max_ptes_swap
> HPAGE_PMD_NR
-1)
277 khugepaged_max_ptes_swap
= max_ptes_swap
;
282 static struct kobj_attribute khugepaged_max_ptes_swap_attr
=
283 __ATTR(max_ptes_swap
, 0644, khugepaged_max_ptes_swap_show
,
284 khugepaged_max_ptes_swap_store
);
286 static struct attribute
*khugepaged_attr
[] = {
287 &khugepaged_defrag_attr
.attr
,
288 &khugepaged_max_ptes_none_attr
.attr
,
289 &pages_to_scan_attr
.attr
,
290 &pages_collapsed_attr
.attr
,
291 &full_scans_attr
.attr
,
292 &scan_sleep_millisecs_attr
.attr
,
293 &alloc_sleep_millisecs_attr
.attr
,
294 &khugepaged_max_ptes_swap_attr
.attr
,
298 struct attribute_group khugepaged_attr_group
= {
299 .attrs
= khugepaged_attr
,
300 .name
= "khugepaged",
302 #endif /* CONFIG_SYSFS */
304 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
306 int hugepage_madvise(struct vm_area_struct
*vma
,
307 unsigned long *vm_flags
, int advice
)
313 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
314 * can't handle this properly after s390_enable_sie, so we simply
315 * ignore the madvise to prevent qemu from causing a SIGSEGV.
317 if (mm_has_pgste(vma
->vm_mm
))
320 *vm_flags
&= ~VM_NOHUGEPAGE
;
321 *vm_flags
|= VM_HUGEPAGE
;
323 * If the vma become good for khugepaged to scan,
324 * register it here without waiting a page fault that
325 * may not happen any time soon.
327 if (!(*vm_flags
& VM_NO_KHUGEPAGED
) &&
328 khugepaged_enter_vma_merge(vma
, *vm_flags
))
331 case MADV_NOHUGEPAGE
:
332 *vm_flags
&= ~VM_HUGEPAGE
;
333 *vm_flags
|= VM_NOHUGEPAGE
;
335 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
336 * this vma even if we leave the mm registered in khugepaged if
337 * it got registered before VM_NOHUGEPAGE was set.
345 int __init
khugepaged_init(void)
347 mm_slot_cache
= kmem_cache_create("khugepaged_mm_slot",
348 sizeof(struct mm_slot
),
349 __alignof__(struct mm_slot
), 0, NULL
);
353 khugepaged_pages_to_scan
= HPAGE_PMD_NR
* 8;
354 khugepaged_max_ptes_none
= HPAGE_PMD_NR
- 1;
355 khugepaged_max_ptes_swap
= HPAGE_PMD_NR
/ 8;
360 void __init
khugepaged_destroy(void)
362 kmem_cache_destroy(mm_slot_cache
);
365 static inline struct mm_slot
*alloc_mm_slot(void)
367 if (!mm_slot_cache
) /* initialization failed */
369 return kmem_cache_zalloc(mm_slot_cache
, GFP_KERNEL
);
372 static inline void free_mm_slot(struct mm_slot
*mm_slot
)
374 kmem_cache_free(mm_slot_cache
, mm_slot
);
377 static struct mm_slot
*get_mm_slot(struct mm_struct
*mm
)
379 struct mm_slot
*mm_slot
;
381 hash_for_each_possible(mm_slots_hash
, mm_slot
, hash
, (unsigned long)mm
)
382 if (mm
== mm_slot
->mm
)
388 static void insert_to_mm_slots_hash(struct mm_struct
*mm
,
389 struct mm_slot
*mm_slot
)
392 hash_add(mm_slots_hash
, &mm_slot
->hash
, (long)mm
);
395 static inline int khugepaged_test_exit(struct mm_struct
*mm
)
397 return atomic_read(&mm
->mm_users
) == 0;
400 int __khugepaged_enter(struct mm_struct
*mm
)
402 struct mm_slot
*mm_slot
;
405 mm_slot
= alloc_mm_slot();
409 /* __khugepaged_exit() must not run from under us */
410 VM_BUG_ON_MM(khugepaged_test_exit(mm
), mm
);
411 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE
, &mm
->flags
))) {
412 free_mm_slot(mm_slot
);
416 spin_lock(&khugepaged_mm_lock
);
417 insert_to_mm_slots_hash(mm
, mm_slot
);
419 * Insert just behind the scanning cursor, to let the area settle
422 wakeup
= list_empty(&khugepaged_scan
.mm_head
);
423 list_add_tail(&mm_slot
->mm_node
, &khugepaged_scan
.mm_head
);
424 spin_unlock(&khugepaged_mm_lock
);
428 wake_up_interruptible(&khugepaged_wait
);
433 int khugepaged_enter_vma_merge(struct vm_area_struct
*vma
,
434 unsigned long vm_flags
)
436 unsigned long hstart
, hend
;
439 * Not yet faulted in so we will register later in the
440 * page fault if needed.
443 if (vma
->vm_ops
|| (vm_flags
& VM_NO_KHUGEPAGED
))
444 /* khugepaged not yet working on file or special mappings */
446 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
447 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
449 return khugepaged_enter(vma
, vm_flags
);
453 void __khugepaged_exit(struct mm_struct
*mm
)
455 struct mm_slot
*mm_slot
;
458 spin_lock(&khugepaged_mm_lock
);
459 mm_slot
= get_mm_slot(mm
);
460 if (mm_slot
&& khugepaged_scan
.mm_slot
!= mm_slot
) {
461 hash_del(&mm_slot
->hash
);
462 list_del(&mm_slot
->mm_node
);
465 spin_unlock(&khugepaged_mm_lock
);
468 clear_bit(MMF_VM_HUGEPAGE
, &mm
->flags
);
469 free_mm_slot(mm_slot
);
471 } else if (mm_slot
) {
473 * This is required to serialize against
474 * khugepaged_test_exit() (which is guaranteed to run
475 * under mmap sem read mode). Stop here (after we
476 * return all pagetables will be destroyed) until
477 * khugepaged has finished working on the pagetables
478 * under the mmap_sem.
480 down_write(&mm
->mmap_sem
);
481 up_write(&mm
->mmap_sem
);
485 static void release_pte_page(struct page
*page
)
487 dec_node_page_state(page
, NR_ISOLATED_ANON
+ page_is_file_cache(page
));
489 putback_lru_page(page
);
492 static void release_pte_pages(pte_t
*pte
, pte_t
*_pte
)
494 while (--_pte
>= pte
) {
495 pte_t pteval
= *_pte
;
496 if (!pte_none(pteval
) && !is_zero_pfn(pte_pfn(pteval
)))
497 release_pte_page(pte_page(pteval
));
501 static int __collapse_huge_page_isolate(struct vm_area_struct
*vma
,
502 unsigned long address
,
505 struct page
*page
= NULL
;
507 int none_or_zero
= 0, result
= 0, referenced
= 0;
508 bool writable
= false;
510 for (_pte
= pte
; _pte
< pte
+HPAGE_PMD_NR
;
511 _pte
++, address
+= PAGE_SIZE
) {
512 pte_t pteval
= *_pte
;
513 if (pte_none(pteval
) || (pte_present(pteval
) &&
514 is_zero_pfn(pte_pfn(pteval
)))) {
515 if (!userfaultfd_armed(vma
) &&
516 ++none_or_zero
<= khugepaged_max_ptes_none
) {
519 result
= SCAN_EXCEED_NONE_PTE
;
523 if (!pte_present(pteval
)) {
524 result
= SCAN_PTE_NON_PRESENT
;
527 page
= vm_normal_page(vma
, address
, pteval
);
528 if (unlikely(!page
)) {
529 result
= SCAN_PAGE_NULL
;
533 VM_BUG_ON_PAGE(PageCompound(page
), page
);
534 VM_BUG_ON_PAGE(!PageAnon(page
), page
);
537 * We can do it before isolate_lru_page because the
538 * page can't be freed from under us. NOTE: PG_lock
539 * is needed to serialize against split_huge_page
540 * when invoked from the VM.
542 if (!trylock_page(page
)) {
543 result
= SCAN_PAGE_LOCK
;
548 * cannot use mapcount: can't collapse if there's a gup pin.
549 * The page must only be referenced by the scanned process
550 * and page swap cache.
552 if (page_count(page
) != 1 + PageSwapCache(page
)) {
554 result
= SCAN_PAGE_COUNT
;
557 if (pte_write(pteval
)) {
560 if (PageSwapCache(page
) &&
561 !reuse_swap_page(page
, NULL
)) {
563 result
= SCAN_SWAP_CACHE_PAGE
;
567 * Page is not in the swap cache. It can be collapsed
573 * Isolate the page to avoid collapsing an hugepage
574 * currently in use by the VM.
576 if (isolate_lru_page(page
)) {
578 result
= SCAN_DEL_PAGE_LRU
;
581 inc_node_page_state(page
,
582 NR_ISOLATED_ANON
+ page_is_file_cache(page
));
583 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
584 VM_BUG_ON_PAGE(PageLRU(page
), page
);
586 /* There should be enough young pte to collapse the page */
587 if (pte_young(pteval
) ||
588 page_is_young(page
) || PageReferenced(page
) ||
589 mmu_notifier_test_young(vma
->vm_mm
, address
))
592 if (likely(writable
)) {
593 if (likely(referenced
)) {
594 result
= SCAN_SUCCEED
;
595 trace_mm_collapse_huge_page_isolate(page
, none_or_zero
,
596 referenced
, writable
, result
);
600 result
= SCAN_PAGE_RO
;
604 release_pte_pages(pte
, _pte
);
605 trace_mm_collapse_huge_page_isolate(page
, none_or_zero
,
606 referenced
, writable
, result
);
610 static void __collapse_huge_page_copy(pte_t
*pte
, struct page
*page
,
611 struct vm_area_struct
*vma
,
612 unsigned long address
,
616 for (_pte
= pte
; _pte
< pte
+ HPAGE_PMD_NR
;
617 _pte
++, page
++, address
+= PAGE_SIZE
) {
618 pte_t pteval
= *_pte
;
619 struct page
*src_page
;
621 if (pte_none(pteval
) || is_zero_pfn(pte_pfn(pteval
))) {
622 clear_user_highpage(page
, address
);
623 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, 1);
624 if (is_zero_pfn(pte_pfn(pteval
))) {
626 * ptl mostly unnecessary.
630 * paravirt calls inside pte_clear here are
633 pte_clear(vma
->vm_mm
, address
, _pte
);
637 src_page
= pte_page(pteval
);
638 copy_user_highpage(page
, src_page
, address
, vma
);
639 VM_BUG_ON_PAGE(page_mapcount(src_page
) != 1, src_page
);
640 release_pte_page(src_page
);
642 * ptl mostly unnecessary, but preempt has to
643 * be disabled to update the per-cpu stats
644 * inside page_remove_rmap().
648 * paravirt calls inside pte_clear here are
651 pte_clear(vma
->vm_mm
, address
, _pte
);
652 page_remove_rmap(src_page
, false);
654 free_page_and_swap_cache(src_page
);
659 static void khugepaged_alloc_sleep(void)
663 add_wait_queue(&khugepaged_wait
, &wait
);
664 freezable_schedule_timeout_interruptible(
665 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs
));
666 remove_wait_queue(&khugepaged_wait
, &wait
);
669 static int khugepaged_node_load
[MAX_NUMNODES
];
671 static bool khugepaged_scan_abort(int nid
)
676 * If node_reclaim_mode is disabled, then no extra effort is made to
677 * allocate memory locally.
679 if (!node_reclaim_mode
)
682 /* If there is a count for this node already, it must be acceptable */
683 if (khugepaged_node_load
[nid
])
686 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
687 if (!khugepaged_node_load
[i
])
689 if (node_distance(nid
, i
) > RECLAIM_DISTANCE
)
695 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
696 static inline gfp_t
alloc_hugepage_khugepaged_gfpmask(void)
698 return khugepaged_defrag() ? GFP_TRANSHUGE
: GFP_TRANSHUGE_LIGHT
;
702 static int khugepaged_find_target_node(void)
704 static int last_khugepaged_target_node
= NUMA_NO_NODE
;
705 int nid
, target_node
= 0, max_value
= 0;
707 /* find first node with max normal pages hit */
708 for (nid
= 0; nid
< MAX_NUMNODES
; nid
++)
709 if (khugepaged_node_load
[nid
] > max_value
) {
710 max_value
= khugepaged_node_load
[nid
];
714 /* do some balance if several nodes have the same hit record */
715 if (target_node
<= last_khugepaged_target_node
)
716 for (nid
= last_khugepaged_target_node
+ 1; nid
< MAX_NUMNODES
;
718 if (max_value
== khugepaged_node_load
[nid
]) {
723 last_khugepaged_target_node
= target_node
;
727 static bool khugepaged_prealloc_page(struct page
**hpage
, bool *wait
)
729 if (IS_ERR(*hpage
)) {
735 khugepaged_alloc_sleep();
745 khugepaged_alloc_page(struct page
**hpage
, gfp_t gfp
, int node
)
747 VM_BUG_ON_PAGE(*hpage
, *hpage
);
749 *hpage
= __alloc_pages_node(node
, gfp
, HPAGE_PMD_ORDER
);
750 if (unlikely(!*hpage
)) {
751 count_vm_event(THP_COLLAPSE_ALLOC_FAILED
);
752 *hpage
= ERR_PTR(-ENOMEM
);
756 prep_transhuge_page(*hpage
);
757 count_vm_event(THP_COLLAPSE_ALLOC
);
761 static int khugepaged_find_target_node(void)
766 static inline struct page
*alloc_khugepaged_hugepage(void)
770 page
= alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
773 prep_transhuge_page(page
);
777 static struct page
*khugepaged_alloc_hugepage(bool *wait
)
782 hpage
= alloc_khugepaged_hugepage();
784 count_vm_event(THP_COLLAPSE_ALLOC_FAILED
);
789 khugepaged_alloc_sleep();
791 count_vm_event(THP_COLLAPSE_ALLOC
);
792 } while (unlikely(!hpage
) && likely(khugepaged_enabled()));
797 static bool khugepaged_prealloc_page(struct page
**hpage
, bool *wait
)
800 *hpage
= khugepaged_alloc_hugepage(wait
);
802 if (unlikely(!*hpage
))
809 khugepaged_alloc_page(struct page
**hpage
, gfp_t gfp
, int node
)
817 static bool hugepage_vma_check(struct vm_area_struct
*vma
)
819 if ((!(vma
->vm_flags
& VM_HUGEPAGE
) && !khugepaged_always()) ||
820 (vma
->vm_flags
& VM_NOHUGEPAGE
) ||
821 test_bit(MMF_DISABLE_THP
, &vma
->vm_mm
->flags
))
823 if (shmem_file(vma
->vm_file
)) {
824 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE
))
826 return IS_ALIGNED((vma
->vm_start
>> PAGE_SHIFT
) - vma
->vm_pgoff
,
829 if (!vma
->anon_vma
|| vma
->vm_ops
)
831 if (is_vma_temporary_stack(vma
))
833 return !(vma
->vm_flags
& VM_NO_KHUGEPAGED
);
837 * If mmap_sem temporarily dropped, revalidate vma
838 * before taking mmap_sem.
839 * Return 0 if succeeds, otherwise return none-zero
843 static int hugepage_vma_revalidate(struct mm_struct
*mm
, unsigned long address
,
844 struct vm_area_struct
**vmap
)
846 struct vm_area_struct
*vma
;
847 unsigned long hstart
, hend
;
849 if (unlikely(khugepaged_test_exit(mm
)))
850 return SCAN_ANY_PROCESS
;
852 *vmap
= vma
= find_vma(mm
, address
);
854 return SCAN_VMA_NULL
;
856 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
857 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
858 if (address
< hstart
|| address
+ HPAGE_PMD_SIZE
> hend
)
859 return SCAN_ADDRESS_RANGE
;
860 if (!hugepage_vma_check(vma
))
861 return SCAN_VMA_CHECK
;
866 * Bring missing pages in from swap, to complete THP collapse.
867 * Only done if khugepaged_scan_pmd believes it is worthwhile.
869 * Called and returns without pte mapped or spinlocks held,
870 * but with mmap_sem held to protect against vma changes.
873 static bool __collapse_huge_page_swapin(struct mm_struct
*mm
,
874 struct vm_area_struct
*vma
,
875 unsigned long address
, pmd_t
*pmd
,
878 int swapped_in
= 0, ret
= 0;
879 struct vm_fault vmf
= {
882 .flags
= FAULT_FLAG_ALLOW_RETRY
,
884 .pgoff
= linear_page_index(vma
, address
),
887 /* we only decide to swapin, if there is enough young ptes */
888 if (referenced
< HPAGE_PMD_NR
/2) {
889 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 0);
892 vmf
.pte
= pte_offset_map(pmd
, address
);
893 for (; vmf
.address
< address
+ HPAGE_PMD_NR
*PAGE_SIZE
;
894 vmf
.pte
++, vmf
.address
+= PAGE_SIZE
) {
895 vmf
.orig_pte
= *vmf
.pte
;
896 if (!is_swap_pte(vmf
.orig_pte
))
899 ret
= do_swap_page(&vmf
);
901 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
902 if (ret
& VM_FAULT_RETRY
) {
903 down_read(&mm
->mmap_sem
);
904 if (hugepage_vma_revalidate(mm
, address
, &vmf
.vma
)) {
905 /* vma is no longer available, don't continue to swapin */
906 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 0);
909 /* check if the pmd is still valid */
910 if (mm_find_pmd(mm
, address
) != pmd
) {
911 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 0);
915 if (ret
& VM_FAULT_ERROR
) {
916 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 0);
919 /* pte is unmapped now, we need to map it */
920 vmf
.pte
= pte_offset_map(pmd
, vmf
.address
);
924 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 1);
928 static void collapse_huge_page(struct mm_struct
*mm
,
929 unsigned long address
,
931 int node
, int referenced
)
936 struct page
*new_page
;
937 spinlock_t
*pmd_ptl
, *pte_ptl
;
938 int isolated
= 0, result
= 0;
939 struct mem_cgroup
*memcg
;
940 struct vm_area_struct
*vma
;
941 unsigned long mmun_start
; /* For mmu_notifiers */
942 unsigned long mmun_end
; /* For mmu_notifiers */
945 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
947 /* Only allocate from the target node */
948 gfp
= alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE
;
951 * Before allocating the hugepage, release the mmap_sem read lock.
952 * The allocation can take potentially a long time if it involves
953 * sync compaction, and we do not need to hold the mmap_sem during
954 * that. We will recheck the vma after taking it again in write mode.
956 up_read(&mm
->mmap_sem
);
957 new_page
= khugepaged_alloc_page(hpage
, gfp
, node
);
959 result
= SCAN_ALLOC_HUGE_PAGE_FAIL
;
963 if (unlikely(mem_cgroup_try_charge(new_page
, mm
, gfp
, &memcg
, true))) {
964 result
= SCAN_CGROUP_CHARGE_FAIL
;
968 down_read(&mm
->mmap_sem
);
969 result
= hugepage_vma_revalidate(mm
, address
, &vma
);
971 mem_cgroup_cancel_charge(new_page
, memcg
, true);
972 up_read(&mm
->mmap_sem
);
976 pmd
= mm_find_pmd(mm
, address
);
978 result
= SCAN_PMD_NULL
;
979 mem_cgroup_cancel_charge(new_page
, memcg
, true);
980 up_read(&mm
->mmap_sem
);
985 * __collapse_huge_page_swapin always returns with mmap_sem locked.
986 * If it fails, we release mmap_sem and jump out_nolock.
987 * Continuing to collapse causes inconsistency.
989 if (!__collapse_huge_page_swapin(mm
, vma
, address
, pmd
, referenced
)) {
990 mem_cgroup_cancel_charge(new_page
, memcg
, true);
991 up_read(&mm
->mmap_sem
);
995 up_read(&mm
->mmap_sem
);
997 * Prevent all access to pagetables with the exception of
998 * gup_fast later handled by the ptep_clear_flush and the VM
999 * handled by the anon_vma lock + PG_lock.
1001 down_write(&mm
->mmap_sem
);
1002 result
= hugepage_vma_revalidate(mm
, address
, &vma
);
1005 /* check if the pmd is still valid */
1006 if (mm_find_pmd(mm
, address
) != pmd
)
1009 anon_vma_lock_write(vma
->anon_vma
);
1011 pte
= pte_offset_map(pmd
, address
);
1012 pte_ptl
= pte_lockptr(mm
, pmd
);
1014 mmun_start
= address
;
1015 mmun_end
= address
+ HPAGE_PMD_SIZE
;
1016 mmu_notifier_invalidate_range_start(mm
, mmun_start
, mmun_end
);
1017 pmd_ptl
= pmd_lock(mm
, pmd
); /* probably unnecessary */
1019 * After this gup_fast can't run anymore. This also removes
1020 * any huge TLB entry from the CPU so we won't allow
1021 * huge and small TLB entries for the same virtual address
1022 * to avoid the risk of CPU bugs in that area.
1024 _pmd
= pmdp_collapse_flush(vma
, address
, pmd
);
1025 spin_unlock(pmd_ptl
);
1026 mmu_notifier_invalidate_range_end(mm
, mmun_start
, mmun_end
);
1029 isolated
= __collapse_huge_page_isolate(vma
, address
, pte
);
1030 spin_unlock(pte_ptl
);
1032 if (unlikely(!isolated
)) {
1035 BUG_ON(!pmd_none(*pmd
));
1037 * We can only use set_pmd_at when establishing
1038 * hugepmds and never for establishing regular pmds that
1039 * points to regular pagetables. Use pmd_populate for that
1041 pmd_populate(mm
, pmd
, pmd_pgtable(_pmd
));
1042 spin_unlock(pmd_ptl
);
1043 anon_vma_unlock_write(vma
->anon_vma
);
1049 * All pages are isolated and locked so anon_vma rmap
1050 * can't run anymore.
1052 anon_vma_unlock_write(vma
->anon_vma
);
1054 __collapse_huge_page_copy(pte
, new_page
, vma
, address
, pte_ptl
);
1056 __SetPageUptodate(new_page
);
1057 pgtable
= pmd_pgtable(_pmd
);
1059 _pmd
= mk_huge_pmd(new_page
, vma
->vm_page_prot
);
1060 _pmd
= maybe_pmd_mkwrite(pmd_mkdirty(_pmd
), vma
);
1063 * spin_lock() below is not the equivalent of smp_wmb(), so
1064 * this is needed to avoid the copy_huge_page writes to become
1065 * visible after the set_pmd_at() write.
1070 BUG_ON(!pmd_none(*pmd
));
1071 page_add_new_anon_rmap(new_page
, vma
, address
, true);
1072 mem_cgroup_commit_charge(new_page
, memcg
, false, true);
1073 lru_cache_add_active_or_unevictable(new_page
, vma
);
1074 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
1075 set_pmd_at(mm
, address
, pmd
, _pmd
);
1076 update_mmu_cache_pmd(vma
, address
, pmd
);
1077 spin_unlock(pmd_ptl
);
1081 khugepaged_pages_collapsed
++;
1082 result
= SCAN_SUCCEED
;
1084 up_write(&mm
->mmap_sem
);
1086 trace_mm_collapse_huge_page(mm
, isolated
, result
);
1089 mem_cgroup_cancel_charge(new_page
, memcg
, true);
1093 static int khugepaged_scan_pmd(struct mm_struct
*mm
,
1094 struct vm_area_struct
*vma
,
1095 unsigned long address
,
1096 struct page
**hpage
)
1100 int ret
= 0, none_or_zero
= 0, result
= 0, referenced
= 0;
1101 struct page
*page
= NULL
;
1102 unsigned long _address
;
1104 int node
= NUMA_NO_NODE
, unmapped
= 0;
1105 bool writable
= false;
1107 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
1109 pmd
= mm_find_pmd(mm
, address
);
1111 result
= SCAN_PMD_NULL
;
1115 memset(khugepaged_node_load
, 0, sizeof(khugepaged_node_load
));
1116 pte
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
1117 for (_address
= address
, _pte
= pte
; _pte
< pte
+HPAGE_PMD_NR
;
1118 _pte
++, _address
+= PAGE_SIZE
) {
1119 pte_t pteval
= *_pte
;
1120 if (is_swap_pte(pteval
)) {
1121 if (++unmapped
<= khugepaged_max_ptes_swap
) {
1124 result
= SCAN_EXCEED_SWAP_PTE
;
1128 if (pte_none(pteval
) || is_zero_pfn(pte_pfn(pteval
))) {
1129 if (!userfaultfd_armed(vma
) &&
1130 ++none_or_zero
<= khugepaged_max_ptes_none
) {
1133 result
= SCAN_EXCEED_NONE_PTE
;
1137 if (!pte_present(pteval
)) {
1138 result
= SCAN_PTE_NON_PRESENT
;
1141 if (pte_write(pteval
))
1144 page
= vm_normal_page(vma
, _address
, pteval
);
1145 if (unlikely(!page
)) {
1146 result
= SCAN_PAGE_NULL
;
1150 /* TODO: teach khugepaged to collapse THP mapped with pte */
1151 if (PageCompound(page
)) {
1152 result
= SCAN_PAGE_COMPOUND
;
1157 * Record which node the original page is from and save this
1158 * information to khugepaged_node_load[].
1159 * Khupaged will allocate hugepage from the node has the max
1162 node
= page_to_nid(page
);
1163 if (khugepaged_scan_abort(node
)) {
1164 result
= SCAN_SCAN_ABORT
;
1167 khugepaged_node_load
[node
]++;
1168 if (!PageLRU(page
)) {
1169 result
= SCAN_PAGE_LRU
;
1172 if (PageLocked(page
)) {
1173 result
= SCAN_PAGE_LOCK
;
1176 if (!PageAnon(page
)) {
1177 result
= SCAN_PAGE_ANON
;
1182 * cannot use mapcount: can't collapse if there's a gup pin.
1183 * The page must only be referenced by the scanned process
1184 * and page swap cache.
1186 if (page_count(page
) != 1 + PageSwapCache(page
)) {
1187 result
= SCAN_PAGE_COUNT
;
1190 if (pte_young(pteval
) ||
1191 page_is_young(page
) || PageReferenced(page
) ||
1192 mmu_notifier_test_young(vma
->vm_mm
, address
))
1197 result
= SCAN_SUCCEED
;
1200 result
= SCAN_LACK_REFERENCED_PAGE
;
1203 result
= SCAN_PAGE_RO
;
1206 pte_unmap_unlock(pte
, ptl
);
1208 node
= khugepaged_find_target_node();
1209 /* collapse_huge_page will return with the mmap_sem released */
1210 collapse_huge_page(mm
, address
, hpage
, node
, referenced
);
1213 trace_mm_khugepaged_scan_pmd(mm
, page
, writable
, referenced
,
1214 none_or_zero
, result
, unmapped
);
1218 static void collect_mm_slot(struct mm_slot
*mm_slot
)
1220 struct mm_struct
*mm
= mm_slot
->mm
;
1222 VM_BUG_ON(NR_CPUS
!= 1 && !spin_is_locked(&khugepaged_mm_lock
));
1224 if (khugepaged_test_exit(mm
)) {
1226 hash_del(&mm_slot
->hash
);
1227 list_del(&mm_slot
->mm_node
);
1230 * Not strictly needed because the mm exited already.
1232 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1235 /* khugepaged_mm_lock actually not necessary for the below */
1236 free_mm_slot(mm_slot
);
1241 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
1242 static void retract_page_tables(struct address_space
*mapping
, pgoff_t pgoff
)
1244 struct vm_area_struct
*vma
;
1248 i_mmap_lock_write(mapping
);
1249 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, pgoff
, pgoff
) {
1250 /* probably overkill */
1253 addr
= vma
->vm_start
+ ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
1254 if (addr
& ~HPAGE_PMD_MASK
)
1256 if (vma
->vm_end
< addr
+ HPAGE_PMD_SIZE
)
1258 pmd
= mm_find_pmd(vma
->vm_mm
, addr
);
1262 * We need exclusive mmap_sem to retract page table.
1263 * If trylock fails we would end up with pte-mapped THP after
1264 * re-fault. Not ideal, but it's more important to not disturb
1265 * the system too much.
1267 if (down_write_trylock(&vma
->vm_mm
->mmap_sem
)) {
1268 spinlock_t
*ptl
= pmd_lock(vma
->vm_mm
, pmd
);
1269 /* assume page table is clear */
1270 _pmd
= pmdp_collapse_flush(vma
, addr
, pmd
);
1272 up_write(&vma
->vm_mm
->mmap_sem
);
1273 mm_dec_nr_ptes(vma
->vm_mm
);
1274 pte_free(vma
->vm_mm
, pmd_pgtable(_pmd
));
1277 i_mmap_unlock_write(mapping
);
1281 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1283 * Basic scheme is simple, details are more complex:
1284 * - allocate and freeze a new huge page;
1285 * - scan over radix tree replacing old pages the new one
1286 * + swap in pages if necessary;
1288 * + keep old pages around in case if rollback is required;
1289 * - if replacing succeed:
1292 * + unfreeze huge page;
1293 * - if replacing failed;
1294 * + put all pages back and unfreeze them;
1295 * + restore gaps in the radix-tree;
1298 static void collapse_shmem(struct mm_struct
*mm
,
1299 struct address_space
*mapping
, pgoff_t start
,
1300 struct page
**hpage
, int node
)
1303 struct page
*page
, *new_page
, *tmp
;
1304 struct mem_cgroup
*memcg
;
1305 pgoff_t index
, end
= start
+ HPAGE_PMD_NR
;
1306 LIST_HEAD(pagelist
);
1307 struct radix_tree_iter iter
;
1309 int nr_none
= 0, result
= SCAN_SUCCEED
;
1311 VM_BUG_ON(start
& (HPAGE_PMD_NR
- 1));
1313 /* Only allocate from the target node */
1314 gfp
= alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE
;
1316 new_page
= khugepaged_alloc_page(hpage
, gfp
, node
);
1318 result
= SCAN_ALLOC_HUGE_PAGE_FAIL
;
1322 if (unlikely(mem_cgroup_try_charge(new_page
, mm
, gfp
, &memcg
, true))) {
1323 result
= SCAN_CGROUP_CHARGE_FAIL
;
1327 new_page
->index
= start
;
1328 new_page
->mapping
= mapping
;
1329 __SetPageSwapBacked(new_page
);
1330 __SetPageLocked(new_page
);
1331 BUG_ON(!page_ref_freeze(new_page
, 1));
1335 * At this point the new_page is 'frozen' (page_count() is zero), locked
1336 * and not up-to-date. It's safe to insert it into radix tree, because
1337 * nobody would be able to map it or use it in other way until we
1342 spin_lock_irq(&mapping
->tree_lock
);
1343 radix_tree_for_each_slot(slot
, &mapping
->page_tree
, &iter
, start
) {
1344 int n
= min(iter
.index
, end
) - index
;
1347 * Handle holes in the radix tree: charge it from shmem and
1348 * insert relevant subpage of new_page into the radix-tree.
1350 if (n
&& !shmem_charge(mapping
->host
, n
)) {
1355 for (; index
< min(iter
.index
, end
); index
++) {
1356 radix_tree_insert(&mapping
->page_tree
, index
,
1357 new_page
+ (index
% HPAGE_PMD_NR
));
1364 page
= radix_tree_deref_slot_protected(slot
,
1365 &mapping
->tree_lock
);
1366 if (radix_tree_exceptional_entry(page
) || !PageUptodate(page
)) {
1367 spin_unlock_irq(&mapping
->tree_lock
);
1368 /* swap in or instantiate fallocated page */
1369 if (shmem_getpage(mapping
->host
, index
, &page
,
1374 spin_lock_irq(&mapping
->tree_lock
);
1375 } else if (trylock_page(page
)) {
1378 result
= SCAN_PAGE_LOCK
;
1383 * The page must be locked, so we can drop the tree_lock
1384 * without racing with truncate.
1386 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
1387 VM_BUG_ON_PAGE(!PageUptodate(page
), page
);
1388 VM_BUG_ON_PAGE(PageTransCompound(page
), page
);
1390 if (page_mapping(page
) != mapping
) {
1391 result
= SCAN_TRUNCATED
;
1394 spin_unlock_irq(&mapping
->tree_lock
);
1396 if (isolate_lru_page(page
)) {
1397 result
= SCAN_DEL_PAGE_LRU
;
1398 goto out_isolate_failed
;
1401 if (page_mapped(page
))
1402 unmap_mapping_pages(mapping
, index
, 1, false);
1404 spin_lock_irq(&mapping
->tree_lock
);
1406 slot
= radix_tree_lookup_slot(&mapping
->page_tree
, index
);
1407 VM_BUG_ON_PAGE(page
!= radix_tree_deref_slot_protected(slot
,
1408 &mapping
->tree_lock
), page
);
1409 VM_BUG_ON_PAGE(page_mapped(page
), page
);
1412 * The page is expected to have page_count() == 3:
1413 * - we hold a pin on it;
1414 * - one reference from radix tree;
1415 * - one from isolate_lru_page;
1417 if (!page_ref_freeze(page
, 3)) {
1418 result
= SCAN_PAGE_COUNT
;
1423 * Add the page to the list to be able to undo the collapse if
1424 * something go wrong.
1426 list_add_tail(&page
->lru
, &pagelist
);
1428 /* Finally, replace with the new page. */
1429 radix_tree_replace_slot(&mapping
->page_tree
, slot
,
1430 new_page
+ (index
% HPAGE_PMD_NR
));
1432 slot
= radix_tree_iter_resume(slot
, &iter
);
1436 spin_unlock_irq(&mapping
->tree_lock
);
1437 putback_lru_page(page
);
1449 * Handle hole in radix tree at the end of the range.
1450 * This code only triggers if there's nothing in radix tree
1453 if (result
== SCAN_SUCCEED
&& index
< end
) {
1454 int n
= end
- index
;
1456 if (!shmem_charge(mapping
->host
, n
)) {
1461 for (; index
< end
; index
++) {
1462 radix_tree_insert(&mapping
->page_tree
, index
,
1463 new_page
+ (index
% HPAGE_PMD_NR
));
1469 spin_unlock_irq(&mapping
->tree_lock
);
1472 if (result
== SCAN_SUCCEED
) {
1473 unsigned long flags
;
1474 struct zone
*zone
= page_zone(new_page
);
1477 * Replacing old pages with new one has succeed, now we need to
1478 * copy the content and free old pages.
1480 list_for_each_entry_safe(page
, tmp
, &pagelist
, lru
) {
1481 copy_highpage(new_page
+ (page
->index
% HPAGE_PMD_NR
),
1483 list_del(&page
->lru
);
1485 page_ref_unfreeze(page
, 1);
1486 page
->mapping
= NULL
;
1487 ClearPageActive(page
);
1488 ClearPageUnevictable(page
);
1492 local_irq_save(flags
);
1493 __inc_node_page_state(new_page
, NR_SHMEM_THPS
);
1495 __mod_node_page_state(zone
->zone_pgdat
, NR_FILE_PAGES
, nr_none
);
1496 __mod_node_page_state(zone
->zone_pgdat
, NR_SHMEM
, nr_none
);
1498 local_irq_restore(flags
);
1501 * Remove pte page tables, so we can re-faulti
1504 retract_page_tables(mapping
, start
);
1506 /* Everything is ready, let's unfreeze the new_page */
1507 set_page_dirty(new_page
);
1508 SetPageUptodate(new_page
);
1509 page_ref_unfreeze(new_page
, HPAGE_PMD_NR
);
1510 mem_cgroup_commit_charge(new_page
, memcg
, false, true);
1511 lru_cache_add_anon(new_page
);
1512 unlock_page(new_page
);
1516 /* Something went wrong: rollback changes to the radix-tree */
1517 shmem_uncharge(mapping
->host
, nr_none
);
1518 spin_lock_irq(&mapping
->tree_lock
);
1519 radix_tree_for_each_slot(slot
, &mapping
->page_tree
, &iter
,
1521 if (iter
.index
>= end
)
1523 page
= list_first_entry_or_null(&pagelist
,
1525 if (!page
|| iter
.index
< page
->index
) {
1529 /* Put holes back where they were */
1530 radix_tree_delete(&mapping
->page_tree
,
1535 VM_BUG_ON_PAGE(page
->index
!= iter
.index
, page
);
1537 /* Unfreeze the page. */
1538 list_del(&page
->lru
);
1539 page_ref_unfreeze(page
, 2);
1540 radix_tree_replace_slot(&mapping
->page_tree
,
1542 slot
= radix_tree_iter_resume(slot
, &iter
);
1543 spin_unlock_irq(&mapping
->tree_lock
);
1544 putback_lru_page(page
);
1546 spin_lock_irq(&mapping
->tree_lock
);
1549 spin_unlock_irq(&mapping
->tree_lock
);
1551 /* Unfreeze new_page, caller would take care about freeing it */
1552 page_ref_unfreeze(new_page
, 1);
1553 mem_cgroup_cancel_charge(new_page
, memcg
, true);
1554 unlock_page(new_page
);
1555 new_page
->mapping
= NULL
;
1558 VM_BUG_ON(!list_empty(&pagelist
));
1559 /* TODO: tracepoints */
1562 static void khugepaged_scan_shmem(struct mm_struct
*mm
,
1563 struct address_space
*mapping
,
1564 pgoff_t start
, struct page
**hpage
)
1566 struct page
*page
= NULL
;
1567 struct radix_tree_iter iter
;
1570 int node
= NUMA_NO_NODE
;
1571 int result
= SCAN_SUCCEED
;
1575 memset(khugepaged_node_load
, 0, sizeof(khugepaged_node_load
));
1577 radix_tree_for_each_slot(slot
, &mapping
->page_tree
, &iter
, start
) {
1578 if (iter
.index
>= start
+ HPAGE_PMD_NR
)
1581 page
= radix_tree_deref_slot(slot
);
1582 if (radix_tree_deref_retry(page
)) {
1583 slot
= radix_tree_iter_retry(&iter
);
1587 if (radix_tree_exception(page
)) {
1588 if (++swap
> khugepaged_max_ptes_swap
) {
1589 result
= SCAN_EXCEED_SWAP_PTE
;
1595 if (PageTransCompound(page
)) {
1596 result
= SCAN_PAGE_COMPOUND
;
1600 node
= page_to_nid(page
);
1601 if (khugepaged_scan_abort(node
)) {
1602 result
= SCAN_SCAN_ABORT
;
1605 khugepaged_node_load
[node
]++;
1607 if (!PageLRU(page
)) {
1608 result
= SCAN_PAGE_LRU
;
1612 if (page_count(page
) != 1 + page_mapcount(page
)) {
1613 result
= SCAN_PAGE_COUNT
;
1618 * We probably should check if the page is referenced here, but
1619 * nobody would transfer pte_young() to PageReferenced() for us.
1620 * And rmap walk here is just too costly...
1625 if (need_resched()) {
1626 slot
= radix_tree_iter_resume(slot
, &iter
);
1632 if (result
== SCAN_SUCCEED
) {
1633 if (present
< HPAGE_PMD_NR
- khugepaged_max_ptes_none
) {
1634 result
= SCAN_EXCEED_NONE_PTE
;
1636 node
= khugepaged_find_target_node();
1637 collapse_shmem(mm
, mapping
, start
, hpage
, node
);
1641 /* TODO: tracepoints */
1644 static void khugepaged_scan_shmem(struct mm_struct
*mm
,
1645 struct address_space
*mapping
,
1646 pgoff_t start
, struct page
**hpage
)
1652 static unsigned int khugepaged_scan_mm_slot(unsigned int pages
,
1653 struct page
**hpage
)
1654 __releases(&khugepaged_mm_lock
)
1655 __acquires(&khugepaged_mm_lock
)
1657 struct mm_slot
*mm_slot
;
1658 struct mm_struct
*mm
;
1659 struct vm_area_struct
*vma
;
1663 VM_BUG_ON(NR_CPUS
!= 1 && !spin_is_locked(&khugepaged_mm_lock
));
1665 if (khugepaged_scan
.mm_slot
)
1666 mm_slot
= khugepaged_scan
.mm_slot
;
1668 mm_slot
= list_entry(khugepaged_scan
.mm_head
.next
,
1669 struct mm_slot
, mm_node
);
1670 khugepaged_scan
.address
= 0;
1671 khugepaged_scan
.mm_slot
= mm_slot
;
1673 spin_unlock(&khugepaged_mm_lock
);
1677 * Don't wait for semaphore (to avoid long wait times). Just move to
1678 * the next mm on the list.
1681 if (unlikely(!down_read_trylock(&mm
->mmap_sem
)))
1682 goto breakouterloop_mmap_sem
;
1683 if (likely(!khugepaged_test_exit(mm
)))
1684 vma
= find_vma(mm
, khugepaged_scan
.address
);
1687 for (; vma
; vma
= vma
->vm_next
) {
1688 unsigned long hstart
, hend
;
1691 if (unlikely(khugepaged_test_exit(mm
))) {
1695 if (!hugepage_vma_check(vma
)) {
1700 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
1701 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
1704 if (khugepaged_scan
.address
> hend
)
1706 if (khugepaged_scan
.address
< hstart
)
1707 khugepaged_scan
.address
= hstart
;
1708 VM_BUG_ON(khugepaged_scan
.address
& ~HPAGE_PMD_MASK
);
1710 while (khugepaged_scan
.address
< hend
) {
1713 if (unlikely(khugepaged_test_exit(mm
)))
1714 goto breakouterloop
;
1716 VM_BUG_ON(khugepaged_scan
.address
< hstart
||
1717 khugepaged_scan
.address
+ HPAGE_PMD_SIZE
>
1719 if (shmem_file(vma
->vm_file
)) {
1721 pgoff_t pgoff
= linear_page_index(vma
,
1722 khugepaged_scan
.address
);
1723 if (!shmem_huge_enabled(vma
))
1725 file
= get_file(vma
->vm_file
);
1726 up_read(&mm
->mmap_sem
);
1728 khugepaged_scan_shmem(mm
, file
->f_mapping
,
1732 ret
= khugepaged_scan_pmd(mm
, vma
,
1733 khugepaged_scan
.address
,
1736 /* move to next address */
1737 khugepaged_scan
.address
+= HPAGE_PMD_SIZE
;
1738 progress
+= HPAGE_PMD_NR
;
1740 /* we released mmap_sem so break loop */
1741 goto breakouterloop_mmap_sem
;
1742 if (progress
>= pages
)
1743 goto breakouterloop
;
1747 up_read(&mm
->mmap_sem
); /* exit_mmap will destroy ptes after this */
1748 breakouterloop_mmap_sem
:
1750 spin_lock(&khugepaged_mm_lock
);
1751 VM_BUG_ON(khugepaged_scan
.mm_slot
!= mm_slot
);
1753 * Release the current mm_slot if this mm is about to die, or
1754 * if we scanned all vmas of this mm.
1756 if (khugepaged_test_exit(mm
) || !vma
) {
1758 * Make sure that if mm_users is reaching zero while
1759 * khugepaged runs here, khugepaged_exit will find
1760 * mm_slot not pointing to the exiting mm.
1762 if (mm_slot
->mm_node
.next
!= &khugepaged_scan
.mm_head
) {
1763 khugepaged_scan
.mm_slot
= list_entry(
1764 mm_slot
->mm_node
.next
,
1765 struct mm_slot
, mm_node
);
1766 khugepaged_scan
.address
= 0;
1768 khugepaged_scan
.mm_slot
= NULL
;
1769 khugepaged_full_scans
++;
1772 collect_mm_slot(mm_slot
);
1778 static int khugepaged_has_work(void)
1780 return !list_empty(&khugepaged_scan
.mm_head
) &&
1781 khugepaged_enabled();
1784 static int khugepaged_wait_event(void)
1786 return !list_empty(&khugepaged_scan
.mm_head
) ||
1787 kthread_should_stop();
1790 static void khugepaged_do_scan(void)
1792 struct page
*hpage
= NULL
;
1793 unsigned int progress
= 0, pass_through_head
= 0;
1794 unsigned int pages
= khugepaged_pages_to_scan
;
1797 barrier(); /* write khugepaged_pages_to_scan to local stack */
1799 while (progress
< pages
) {
1800 if (!khugepaged_prealloc_page(&hpage
, &wait
))
1805 if (unlikely(kthread_should_stop() || try_to_freeze()))
1808 spin_lock(&khugepaged_mm_lock
);
1809 if (!khugepaged_scan
.mm_slot
)
1810 pass_through_head
++;
1811 if (khugepaged_has_work() &&
1812 pass_through_head
< 2)
1813 progress
+= khugepaged_scan_mm_slot(pages
- progress
,
1817 spin_unlock(&khugepaged_mm_lock
);
1820 if (!IS_ERR_OR_NULL(hpage
))
1824 static bool khugepaged_should_wakeup(void)
1826 return kthread_should_stop() ||
1827 time_after_eq(jiffies
, khugepaged_sleep_expire
);
1830 static void khugepaged_wait_work(void)
1832 if (khugepaged_has_work()) {
1833 const unsigned long scan_sleep_jiffies
=
1834 msecs_to_jiffies(khugepaged_scan_sleep_millisecs
);
1836 if (!scan_sleep_jiffies
)
1839 khugepaged_sleep_expire
= jiffies
+ scan_sleep_jiffies
;
1840 wait_event_freezable_timeout(khugepaged_wait
,
1841 khugepaged_should_wakeup(),
1842 scan_sleep_jiffies
);
1846 if (khugepaged_enabled())
1847 wait_event_freezable(khugepaged_wait
, khugepaged_wait_event());
1850 static int khugepaged(void *none
)
1852 struct mm_slot
*mm_slot
;
1855 set_user_nice(current
, MAX_NICE
);
1857 while (!kthread_should_stop()) {
1858 khugepaged_do_scan();
1859 khugepaged_wait_work();
1862 spin_lock(&khugepaged_mm_lock
);
1863 mm_slot
= khugepaged_scan
.mm_slot
;
1864 khugepaged_scan
.mm_slot
= NULL
;
1866 collect_mm_slot(mm_slot
);
1867 spin_unlock(&khugepaged_mm_lock
);
1871 static void set_recommended_min_free_kbytes(void)
1875 unsigned long recommended_min
;
1877 for_each_populated_zone(zone
)
1880 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1881 recommended_min
= pageblock_nr_pages
* nr_zones
* 2;
1884 * Make sure that on average at least two pageblocks are almost free
1885 * of another type, one for a migratetype to fall back to and a
1886 * second to avoid subsequent fallbacks of other types There are 3
1887 * MIGRATE_TYPES we care about.
1889 recommended_min
+= pageblock_nr_pages
* nr_zones
*
1890 MIGRATE_PCPTYPES
* MIGRATE_PCPTYPES
;
1892 /* don't ever allow to reserve more than 5% of the lowmem */
1893 recommended_min
= min(recommended_min
,
1894 (unsigned long) nr_free_buffer_pages() / 20);
1895 recommended_min
<<= (PAGE_SHIFT
-10);
1897 if (recommended_min
> min_free_kbytes
) {
1898 if (user_min_free_kbytes
>= 0)
1899 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1900 min_free_kbytes
, recommended_min
);
1902 min_free_kbytes
= recommended_min
;
1904 setup_per_zone_wmarks();
1907 int start_stop_khugepaged(void)
1909 static struct task_struct
*khugepaged_thread __read_mostly
;
1910 static DEFINE_MUTEX(khugepaged_mutex
);
1913 mutex_lock(&khugepaged_mutex
);
1914 if (khugepaged_enabled()) {
1915 if (!khugepaged_thread
)
1916 khugepaged_thread
= kthread_run(khugepaged
, NULL
,
1918 if (IS_ERR(khugepaged_thread
)) {
1919 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1920 err
= PTR_ERR(khugepaged_thread
);
1921 khugepaged_thread
= NULL
;
1925 if (!list_empty(&khugepaged_scan
.mm_head
))
1926 wake_up_interruptible(&khugepaged_wait
);
1928 set_recommended_min_free_kbytes();
1929 } else if (khugepaged_thread
) {
1930 kthread_stop(khugepaged_thread
);
1931 khugepaged_thread
= NULL
;
1934 mutex_unlock(&khugepaged_mutex
);