cifs: use 64-bit timestamps for fscache
[linux-2.6/btrfs-unstable.git] / mm / khugepaged.c
blobd7b2a4bf8671643e3345b7ac197570582ac14aef
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
22 #include <asm/tlb.h>
23 #include <asm/pgalloc.h>
24 #include "internal.h"
26 enum scan_result {
27 SCAN_FAIL,
28 SCAN_SUCCEED,
29 SCAN_PMD_NULL,
30 SCAN_EXCEED_NONE_PTE,
31 SCAN_PTE_NON_PRESENT,
32 SCAN_PAGE_RO,
33 SCAN_LACK_REFERENCED_PAGE,
34 SCAN_PAGE_NULL,
35 SCAN_SCAN_ABORT,
36 SCAN_PAGE_COUNT,
37 SCAN_PAGE_LRU,
38 SCAN_PAGE_LOCK,
39 SCAN_PAGE_ANON,
40 SCAN_PAGE_COMPOUND,
41 SCAN_ANY_PROCESS,
42 SCAN_VMA_NULL,
43 SCAN_VMA_CHECK,
44 SCAN_ADDRESS_RANGE,
45 SCAN_SWAP_CACHE_PAGE,
46 SCAN_DEL_PAGE_LRU,
47 SCAN_ALLOC_HUGE_PAGE_FAIL,
48 SCAN_CGROUP_CHARGE_FAIL,
49 SCAN_EXCEED_SWAP_PTE,
50 SCAN_TRUNCATED,
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/huge_memory.h>
56 /* default scan 8*512 pte (or vmas) every 30 second */
57 static unsigned int khugepaged_pages_to_scan __read_mostly;
58 static unsigned int khugepaged_pages_collapsed;
59 static unsigned int khugepaged_full_scans;
60 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
61 /* during fragmentation poll the hugepage allocator once every minute */
62 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
63 static unsigned long khugepaged_sleep_expire;
64 static DEFINE_SPINLOCK(khugepaged_mm_lock);
65 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
67 * default collapse hugepages if there is at least one pte mapped like
68 * it would have happened if the vma was large enough during page
69 * fault.
71 static unsigned int khugepaged_max_ptes_none __read_mostly;
72 static unsigned int khugepaged_max_ptes_swap __read_mostly;
74 #define MM_SLOTS_HASH_BITS 10
75 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
77 static struct kmem_cache *mm_slot_cache __read_mostly;
79 /**
80 * struct mm_slot - hash lookup from mm to mm_slot
81 * @hash: hash collision list
82 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
83 * @mm: the mm that this information is valid for
85 struct mm_slot {
86 struct hlist_node hash;
87 struct list_head mm_node;
88 struct mm_struct *mm;
91 /**
92 * struct khugepaged_scan - cursor for scanning
93 * @mm_head: the head of the mm list to scan
94 * @mm_slot: the current mm_slot we are scanning
95 * @address: the next address inside that to be scanned
97 * There is only the one khugepaged_scan instance of this cursor structure.
99 struct khugepaged_scan {
100 struct list_head mm_head;
101 struct mm_slot *mm_slot;
102 unsigned long address;
105 static struct khugepaged_scan khugepaged_scan = {
106 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
109 #ifdef CONFIG_SYSFS
110 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
111 struct kobj_attribute *attr,
112 char *buf)
114 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
117 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
118 struct kobj_attribute *attr,
119 const char *buf, size_t count)
121 unsigned long msecs;
122 int err;
124 err = kstrtoul(buf, 10, &msecs);
125 if (err || msecs > UINT_MAX)
126 return -EINVAL;
128 khugepaged_scan_sleep_millisecs = msecs;
129 khugepaged_sleep_expire = 0;
130 wake_up_interruptible(&khugepaged_wait);
132 return count;
134 static struct kobj_attribute scan_sleep_millisecs_attr =
135 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
136 scan_sleep_millisecs_store);
138 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
139 struct kobj_attribute *attr,
140 char *buf)
142 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
145 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
146 struct kobj_attribute *attr,
147 const char *buf, size_t count)
149 unsigned long msecs;
150 int err;
152 err = kstrtoul(buf, 10, &msecs);
153 if (err || msecs > UINT_MAX)
154 return -EINVAL;
156 khugepaged_alloc_sleep_millisecs = msecs;
157 khugepaged_sleep_expire = 0;
158 wake_up_interruptible(&khugepaged_wait);
160 return count;
162 static struct kobj_attribute alloc_sleep_millisecs_attr =
163 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
164 alloc_sleep_millisecs_store);
166 static ssize_t pages_to_scan_show(struct kobject *kobj,
167 struct kobj_attribute *attr,
168 char *buf)
170 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
172 static ssize_t pages_to_scan_store(struct kobject *kobj,
173 struct kobj_attribute *attr,
174 const char *buf, size_t count)
176 int err;
177 unsigned long pages;
179 err = kstrtoul(buf, 10, &pages);
180 if (err || !pages || pages > UINT_MAX)
181 return -EINVAL;
183 khugepaged_pages_to_scan = pages;
185 return count;
187 static struct kobj_attribute pages_to_scan_attr =
188 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
189 pages_to_scan_store);
191 static ssize_t pages_collapsed_show(struct kobject *kobj,
192 struct kobj_attribute *attr,
193 char *buf)
195 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
197 static struct kobj_attribute pages_collapsed_attr =
198 __ATTR_RO(pages_collapsed);
200 static ssize_t full_scans_show(struct kobject *kobj,
201 struct kobj_attribute *attr,
202 char *buf)
204 return sprintf(buf, "%u\n", khugepaged_full_scans);
206 static struct kobj_attribute full_scans_attr =
207 __ATTR_RO(full_scans);
209 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
210 struct kobj_attribute *attr, char *buf)
212 return single_hugepage_flag_show(kobj, attr, buf,
213 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
215 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
216 struct kobj_attribute *attr,
217 const char *buf, size_t count)
219 return single_hugepage_flag_store(kobj, attr, buf, count,
220 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
222 static struct kobj_attribute khugepaged_defrag_attr =
223 __ATTR(defrag, 0644, khugepaged_defrag_show,
224 khugepaged_defrag_store);
227 * max_ptes_none controls if khugepaged should collapse hugepages over
228 * any unmapped ptes in turn potentially increasing the memory
229 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
230 * reduce the available free memory in the system as it
231 * runs. Increasing max_ptes_none will instead potentially reduce the
232 * free memory in the system during the khugepaged scan.
234 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
235 struct kobj_attribute *attr,
236 char *buf)
238 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
240 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
241 struct kobj_attribute *attr,
242 const char *buf, size_t count)
244 int err;
245 unsigned long max_ptes_none;
247 err = kstrtoul(buf, 10, &max_ptes_none);
248 if (err || max_ptes_none > HPAGE_PMD_NR-1)
249 return -EINVAL;
251 khugepaged_max_ptes_none = max_ptes_none;
253 return count;
255 static struct kobj_attribute khugepaged_max_ptes_none_attr =
256 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
257 khugepaged_max_ptes_none_store);
259 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
260 struct kobj_attribute *attr,
261 char *buf)
263 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
266 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
267 struct kobj_attribute *attr,
268 const char *buf, size_t count)
270 int err;
271 unsigned long max_ptes_swap;
273 err = kstrtoul(buf, 10, &max_ptes_swap);
274 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
275 return -EINVAL;
277 khugepaged_max_ptes_swap = max_ptes_swap;
279 return count;
282 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
283 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
284 khugepaged_max_ptes_swap_store);
286 static struct attribute *khugepaged_attr[] = {
287 &khugepaged_defrag_attr.attr,
288 &khugepaged_max_ptes_none_attr.attr,
289 &pages_to_scan_attr.attr,
290 &pages_collapsed_attr.attr,
291 &full_scans_attr.attr,
292 &scan_sleep_millisecs_attr.attr,
293 &alloc_sleep_millisecs_attr.attr,
294 &khugepaged_max_ptes_swap_attr.attr,
295 NULL,
298 struct attribute_group khugepaged_attr_group = {
299 .attrs = khugepaged_attr,
300 .name = "khugepaged",
302 #endif /* CONFIG_SYSFS */
304 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
306 int hugepage_madvise(struct vm_area_struct *vma,
307 unsigned long *vm_flags, int advice)
309 switch (advice) {
310 case MADV_HUGEPAGE:
311 #ifdef CONFIG_S390
313 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
314 * can't handle this properly after s390_enable_sie, so we simply
315 * ignore the madvise to prevent qemu from causing a SIGSEGV.
317 if (mm_has_pgste(vma->vm_mm))
318 return 0;
319 #endif
320 *vm_flags &= ~VM_NOHUGEPAGE;
321 *vm_flags |= VM_HUGEPAGE;
323 * If the vma become good for khugepaged to scan,
324 * register it here without waiting a page fault that
325 * may not happen any time soon.
327 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
328 khugepaged_enter_vma_merge(vma, *vm_flags))
329 return -ENOMEM;
330 break;
331 case MADV_NOHUGEPAGE:
332 *vm_flags &= ~VM_HUGEPAGE;
333 *vm_flags |= VM_NOHUGEPAGE;
335 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
336 * this vma even if we leave the mm registered in khugepaged if
337 * it got registered before VM_NOHUGEPAGE was set.
339 break;
342 return 0;
345 int __init khugepaged_init(void)
347 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
348 sizeof(struct mm_slot),
349 __alignof__(struct mm_slot), 0, NULL);
350 if (!mm_slot_cache)
351 return -ENOMEM;
353 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
354 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
355 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
357 return 0;
360 void __init khugepaged_destroy(void)
362 kmem_cache_destroy(mm_slot_cache);
365 static inline struct mm_slot *alloc_mm_slot(void)
367 if (!mm_slot_cache) /* initialization failed */
368 return NULL;
369 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
372 static inline void free_mm_slot(struct mm_slot *mm_slot)
374 kmem_cache_free(mm_slot_cache, mm_slot);
377 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
379 struct mm_slot *mm_slot;
381 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
382 if (mm == mm_slot->mm)
383 return mm_slot;
385 return NULL;
388 static void insert_to_mm_slots_hash(struct mm_struct *mm,
389 struct mm_slot *mm_slot)
391 mm_slot->mm = mm;
392 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
395 static inline int khugepaged_test_exit(struct mm_struct *mm)
397 return atomic_read(&mm->mm_users) == 0;
400 int __khugepaged_enter(struct mm_struct *mm)
402 struct mm_slot *mm_slot;
403 int wakeup;
405 mm_slot = alloc_mm_slot();
406 if (!mm_slot)
407 return -ENOMEM;
409 /* __khugepaged_exit() must not run from under us */
410 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
411 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
412 free_mm_slot(mm_slot);
413 return 0;
416 spin_lock(&khugepaged_mm_lock);
417 insert_to_mm_slots_hash(mm, mm_slot);
419 * Insert just behind the scanning cursor, to let the area settle
420 * down a little.
422 wakeup = list_empty(&khugepaged_scan.mm_head);
423 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
424 spin_unlock(&khugepaged_mm_lock);
426 mmgrab(mm);
427 if (wakeup)
428 wake_up_interruptible(&khugepaged_wait);
430 return 0;
433 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
434 unsigned long vm_flags)
436 unsigned long hstart, hend;
437 if (!vma->anon_vma)
439 * Not yet faulted in so we will register later in the
440 * page fault if needed.
442 return 0;
443 if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED))
444 /* khugepaged not yet working on file or special mappings */
445 return 0;
446 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
447 hend = vma->vm_end & HPAGE_PMD_MASK;
448 if (hstart < hend)
449 return khugepaged_enter(vma, vm_flags);
450 return 0;
453 void __khugepaged_exit(struct mm_struct *mm)
455 struct mm_slot *mm_slot;
456 int free = 0;
458 spin_lock(&khugepaged_mm_lock);
459 mm_slot = get_mm_slot(mm);
460 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
461 hash_del(&mm_slot->hash);
462 list_del(&mm_slot->mm_node);
463 free = 1;
465 spin_unlock(&khugepaged_mm_lock);
467 if (free) {
468 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
469 free_mm_slot(mm_slot);
470 mmdrop(mm);
471 } else if (mm_slot) {
473 * This is required to serialize against
474 * khugepaged_test_exit() (which is guaranteed to run
475 * under mmap sem read mode). Stop here (after we
476 * return all pagetables will be destroyed) until
477 * khugepaged has finished working on the pagetables
478 * under the mmap_sem.
480 down_write(&mm->mmap_sem);
481 up_write(&mm->mmap_sem);
485 static void release_pte_page(struct page *page)
487 dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
488 unlock_page(page);
489 putback_lru_page(page);
492 static void release_pte_pages(pte_t *pte, pte_t *_pte)
494 while (--_pte >= pte) {
495 pte_t pteval = *_pte;
496 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
497 release_pte_page(pte_page(pteval));
501 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
502 unsigned long address,
503 pte_t *pte)
505 struct page *page = NULL;
506 pte_t *_pte;
507 int none_or_zero = 0, result = 0, referenced = 0;
508 bool writable = false;
510 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
511 _pte++, address += PAGE_SIZE) {
512 pte_t pteval = *_pte;
513 if (pte_none(pteval) || (pte_present(pteval) &&
514 is_zero_pfn(pte_pfn(pteval)))) {
515 if (!userfaultfd_armed(vma) &&
516 ++none_or_zero <= khugepaged_max_ptes_none) {
517 continue;
518 } else {
519 result = SCAN_EXCEED_NONE_PTE;
520 goto out;
523 if (!pte_present(pteval)) {
524 result = SCAN_PTE_NON_PRESENT;
525 goto out;
527 page = vm_normal_page(vma, address, pteval);
528 if (unlikely(!page)) {
529 result = SCAN_PAGE_NULL;
530 goto out;
533 /* TODO: teach khugepaged to collapse THP mapped with pte */
534 if (PageCompound(page)) {
535 result = SCAN_PAGE_COMPOUND;
536 goto out;
539 VM_BUG_ON_PAGE(!PageAnon(page), page);
542 * We can do it before isolate_lru_page because the
543 * page can't be freed from under us. NOTE: PG_lock
544 * is needed to serialize against split_huge_page
545 * when invoked from the VM.
547 if (!trylock_page(page)) {
548 result = SCAN_PAGE_LOCK;
549 goto out;
553 * cannot use mapcount: can't collapse if there's a gup pin.
554 * The page must only be referenced by the scanned process
555 * and page swap cache.
557 if (page_count(page) != 1 + PageSwapCache(page)) {
558 unlock_page(page);
559 result = SCAN_PAGE_COUNT;
560 goto out;
562 if (pte_write(pteval)) {
563 writable = true;
564 } else {
565 if (PageSwapCache(page) &&
566 !reuse_swap_page(page, NULL)) {
567 unlock_page(page);
568 result = SCAN_SWAP_CACHE_PAGE;
569 goto out;
572 * Page is not in the swap cache. It can be collapsed
573 * into a THP.
578 * Isolate the page to avoid collapsing an hugepage
579 * currently in use by the VM.
581 if (isolate_lru_page(page)) {
582 unlock_page(page);
583 result = SCAN_DEL_PAGE_LRU;
584 goto out;
586 inc_node_page_state(page,
587 NR_ISOLATED_ANON + page_is_file_cache(page));
588 VM_BUG_ON_PAGE(!PageLocked(page), page);
589 VM_BUG_ON_PAGE(PageLRU(page), page);
591 /* There should be enough young pte to collapse the page */
592 if (pte_young(pteval) ||
593 page_is_young(page) || PageReferenced(page) ||
594 mmu_notifier_test_young(vma->vm_mm, address))
595 referenced++;
597 if (likely(writable)) {
598 if (likely(referenced)) {
599 result = SCAN_SUCCEED;
600 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
601 referenced, writable, result);
602 return 1;
604 } else {
605 result = SCAN_PAGE_RO;
608 out:
609 release_pte_pages(pte, _pte);
610 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
611 referenced, writable, result);
612 return 0;
615 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
616 struct vm_area_struct *vma,
617 unsigned long address,
618 spinlock_t *ptl)
620 pte_t *_pte;
621 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
622 _pte++, page++, address += PAGE_SIZE) {
623 pte_t pteval = *_pte;
624 struct page *src_page;
626 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
627 clear_user_highpage(page, address);
628 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
629 if (is_zero_pfn(pte_pfn(pteval))) {
631 * ptl mostly unnecessary.
633 spin_lock(ptl);
635 * paravirt calls inside pte_clear here are
636 * superfluous.
638 pte_clear(vma->vm_mm, address, _pte);
639 spin_unlock(ptl);
641 } else {
642 src_page = pte_page(pteval);
643 copy_user_highpage(page, src_page, address, vma);
644 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
645 release_pte_page(src_page);
647 * ptl mostly unnecessary, but preempt has to
648 * be disabled to update the per-cpu stats
649 * inside page_remove_rmap().
651 spin_lock(ptl);
653 * paravirt calls inside pte_clear here are
654 * superfluous.
656 pte_clear(vma->vm_mm, address, _pte);
657 page_remove_rmap(src_page, false);
658 spin_unlock(ptl);
659 free_page_and_swap_cache(src_page);
664 static void khugepaged_alloc_sleep(void)
666 DEFINE_WAIT(wait);
668 add_wait_queue(&khugepaged_wait, &wait);
669 freezable_schedule_timeout_interruptible(
670 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
671 remove_wait_queue(&khugepaged_wait, &wait);
674 static int khugepaged_node_load[MAX_NUMNODES];
676 static bool khugepaged_scan_abort(int nid)
678 int i;
681 * If node_reclaim_mode is disabled, then no extra effort is made to
682 * allocate memory locally.
684 if (!node_reclaim_mode)
685 return false;
687 /* If there is a count for this node already, it must be acceptable */
688 if (khugepaged_node_load[nid])
689 return false;
691 for (i = 0; i < MAX_NUMNODES; i++) {
692 if (!khugepaged_node_load[i])
693 continue;
694 if (node_distance(nid, i) > RECLAIM_DISTANCE)
695 return true;
697 return false;
700 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
701 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
703 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
706 #ifdef CONFIG_NUMA
707 static int khugepaged_find_target_node(void)
709 static int last_khugepaged_target_node = NUMA_NO_NODE;
710 int nid, target_node = 0, max_value = 0;
712 /* find first node with max normal pages hit */
713 for (nid = 0; nid < MAX_NUMNODES; nid++)
714 if (khugepaged_node_load[nid] > max_value) {
715 max_value = khugepaged_node_load[nid];
716 target_node = nid;
719 /* do some balance if several nodes have the same hit record */
720 if (target_node <= last_khugepaged_target_node)
721 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
722 nid++)
723 if (max_value == khugepaged_node_load[nid]) {
724 target_node = nid;
725 break;
728 last_khugepaged_target_node = target_node;
729 return target_node;
732 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
734 if (IS_ERR(*hpage)) {
735 if (!*wait)
736 return false;
738 *wait = false;
739 *hpage = NULL;
740 khugepaged_alloc_sleep();
741 } else if (*hpage) {
742 put_page(*hpage);
743 *hpage = NULL;
746 return true;
749 static struct page *
750 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
752 VM_BUG_ON_PAGE(*hpage, *hpage);
754 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
755 if (unlikely(!*hpage)) {
756 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
757 *hpage = ERR_PTR(-ENOMEM);
758 return NULL;
761 prep_transhuge_page(*hpage);
762 count_vm_event(THP_COLLAPSE_ALLOC);
763 return *hpage;
765 #else
766 static int khugepaged_find_target_node(void)
768 return 0;
771 static inline struct page *alloc_khugepaged_hugepage(void)
773 struct page *page;
775 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
776 HPAGE_PMD_ORDER);
777 if (page)
778 prep_transhuge_page(page);
779 return page;
782 static struct page *khugepaged_alloc_hugepage(bool *wait)
784 struct page *hpage;
786 do {
787 hpage = alloc_khugepaged_hugepage();
788 if (!hpage) {
789 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
790 if (!*wait)
791 return NULL;
793 *wait = false;
794 khugepaged_alloc_sleep();
795 } else
796 count_vm_event(THP_COLLAPSE_ALLOC);
797 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
799 return hpage;
802 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
804 if (!*hpage)
805 *hpage = khugepaged_alloc_hugepage(wait);
807 if (unlikely(!*hpage))
808 return false;
810 return true;
813 static struct page *
814 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
816 VM_BUG_ON(!*hpage);
818 return *hpage;
820 #endif
822 static bool hugepage_vma_check(struct vm_area_struct *vma)
824 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
825 (vma->vm_flags & VM_NOHUGEPAGE) ||
826 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
827 return false;
828 if (shmem_file(vma->vm_file)) {
829 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
830 return false;
831 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
832 HPAGE_PMD_NR);
834 if (!vma->anon_vma || vma->vm_ops)
835 return false;
836 if (is_vma_temporary_stack(vma))
837 return false;
838 return !(vma->vm_flags & VM_NO_KHUGEPAGED);
842 * If mmap_sem temporarily dropped, revalidate vma
843 * before taking mmap_sem.
844 * Return 0 if succeeds, otherwise return none-zero
845 * value (scan code).
848 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
849 struct vm_area_struct **vmap)
851 struct vm_area_struct *vma;
852 unsigned long hstart, hend;
854 if (unlikely(khugepaged_test_exit(mm)))
855 return SCAN_ANY_PROCESS;
857 *vmap = vma = find_vma(mm, address);
858 if (!vma)
859 return SCAN_VMA_NULL;
861 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
862 hend = vma->vm_end & HPAGE_PMD_MASK;
863 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
864 return SCAN_ADDRESS_RANGE;
865 if (!hugepage_vma_check(vma))
866 return SCAN_VMA_CHECK;
867 return 0;
871 * Bring missing pages in from swap, to complete THP collapse.
872 * Only done if khugepaged_scan_pmd believes it is worthwhile.
874 * Called and returns without pte mapped or spinlocks held,
875 * but with mmap_sem held to protect against vma changes.
878 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
879 struct vm_area_struct *vma,
880 unsigned long address, pmd_t *pmd,
881 int referenced)
883 int swapped_in = 0, ret = 0;
884 struct vm_fault vmf = {
885 .vma = vma,
886 .address = address,
887 .flags = FAULT_FLAG_ALLOW_RETRY,
888 .pmd = pmd,
889 .pgoff = linear_page_index(vma, address),
892 /* we only decide to swapin, if there is enough young ptes */
893 if (referenced < HPAGE_PMD_NR/2) {
894 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
895 return false;
897 vmf.pte = pte_offset_map(pmd, address);
898 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
899 vmf.pte++, vmf.address += PAGE_SIZE) {
900 vmf.orig_pte = *vmf.pte;
901 if (!is_swap_pte(vmf.orig_pte))
902 continue;
903 swapped_in++;
904 ret = do_swap_page(&vmf);
906 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
907 if (ret & VM_FAULT_RETRY) {
908 down_read(&mm->mmap_sem);
909 if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
910 /* vma is no longer available, don't continue to swapin */
911 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
912 return false;
914 /* check if the pmd is still valid */
915 if (mm_find_pmd(mm, address) != pmd) {
916 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
917 return false;
920 if (ret & VM_FAULT_ERROR) {
921 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
922 return false;
924 /* pte is unmapped now, we need to map it */
925 vmf.pte = pte_offset_map(pmd, vmf.address);
927 vmf.pte--;
928 pte_unmap(vmf.pte);
929 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
930 return true;
933 static void collapse_huge_page(struct mm_struct *mm,
934 unsigned long address,
935 struct page **hpage,
936 int node, int referenced)
938 pmd_t *pmd, _pmd;
939 pte_t *pte;
940 pgtable_t pgtable;
941 struct page *new_page;
942 spinlock_t *pmd_ptl, *pte_ptl;
943 int isolated = 0, result = 0;
944 struct mem_cgroup *memcg;
945 struct vm_area_struct *vma;
946 unsigned long mmun_start; /* For mmu_notifiers */
947 unsigned long mmun_end; /* For mmu_notifiers */
948 gfp_t gfp;
950 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
952 /* Only allocate from the target node */
953 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
956 * Before allocating the hugepage, release the mmap_sem read lock.
957 * The allocation can take potentially a long time if it involves
958 * sync compaction, and we do not need to hold the mmap_sem during
959 * that. We will recheck the vma after taking it again in write mode.
961 up_read(&mm->mmap_sem);
962 new_page = khugepaged_alloc_page(hpage, gfp, node);
963 if (!new_page) {
964 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
965 goto out_nolock;
968 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
969 result = SCAN_CGROUP_CHARGE_FAIL;
970 goto out_nolock;
973 down_read(&mm->mmap_sem);
974 result = hugepage_vma_revalidate(mm, address, &vma);
975 if (result) {
976 mem_cgroup_cancel_charge(new_page, memcg, true);
977 up_read(&mm->mmap_sem);
978 goto out_nolock;
981 pmd = mm_find_pmd(mm, address);
982 if (!pmd) {
983 result = SCAN_PMD_NULL;
984 mem_cgroup_cancel_charge(new_page, memcg, true);
985 up_read(&mm->mmap_sem);
986 goto out_nolock;
990 * __collapse_huge_page_swapin always returns with mmap_sem locked.
991 * If it fails, we release mmap_sem and jump out_nolock.
992 * Continuing to collapse causes inconsistency.
994 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
995 mem_cgroup_cancel_charge(new_page, memcg, true);
996 up_read(&mm->mmap_sem);
997 goto out_nolock;
1000 up_read(&mm->mmap_sem);
1002 * Prevent all access to pagetables with the exception of
1003 * gup_fast later handled by the ptep_clear_flush and the VM
1004 * handled by the anon_vma lock + PG_lock.
1006 down_write(&mm->mmap_sem);
1007 result = hugepage_vma_revalidate(mm, address, &vma);
1008 if (result)
1009 goto out;
1010 /* check if the pmd is still valid */
1011 if (mm_find_pmd(mm, address) != pmd)
1012 goto out;
1014 anon_vma_lock_write(vma->anon_vma);
1016 pte = pte_offset_map(pmd, address);
1017 pte_ptl = pte_lockptr(mm, pmd);
1019 mmun_start = address;
1020 mmun_end = address + HPAGE_PMD_SIZE;
1021 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1022 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1024 * After this gup_fast can't run anymore. This also removes
1025 * any huge TLB entry from the CPU so we won't allow
1026 * huge and small TLB entries for the same virtual address
1027 * to avoid the risk of CPU bugs in that area.
1029 _pmd = pmdp_collapse_flush(vma, address, pmd);
1030 spin_unlock(pmd_ptl);
1031 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1033 spin_lock(pte_ptl);
1034 isolated = __collapse_huge_page_isolate(vma, address, pte);
1035 spin_unlock(pte_ptl);
1037 if (unlikely(!isolated)) {
1038 pte_unmap(pte);
1039 spin_lock(pmd_ptl);
1040 BUG_ON(!pmd_none(*pmd));
1042 * We can only use set_pmd_at when establishing
1043 * hugepmds and never for establishing regular pmds that
1044 * points to regular pagetables. Use pmd_populate for that
1046 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1047 spin_unlock(pmd_ptl);
1048 anon_vma_unlock_write(vma->anon_vma);
1049 result = SCAN_FAIL;
1050 goto out;
1054 * All pages are isolated and locked so anon_vma rmap
1055 * can't run anymore.
1057 anon_vma_unlock_write(vma->anon_vma);
1059 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1060 pte_unmap(pte);
1061 __SetPageUptodate(new_page);
1062 pgtable = pmd_pgtable(_pmd);
1064 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1065 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1068 * spin_lock() below is not the equivalent of smp_wmb(), so
1069 * this is needed to avoid the copy_huge_page writes to become
1070 * visible after the set_pmd_at() write.
1072 smp_wmb();
1074 spin_lock(pmd_ptl);
1075 BUG_ON(!pmd_none(*pmd));
1076 page_add_new_anon_rmap(new_page, vma, address, true);
1077 mem_cgroup_commit_charge(new_page, memcg, false, true);
1078 lru_cache_add_active_or_unevictable(new_page, vma);
1079 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1080 set_pmd_at(mm, address, pmd, _pmd);
1081 update_mmu_cache_pmd(vma, address, pmd);
1082 spin_unlock(pmd_ptl);
1084 *hpage = NULL;
1086 khugepaged_pages_collapsed++;
1087 result = SCAN_SUCCEED;
1088 out_up_write:
1089 up_write(&mm->mmap_sem);
1090 out_nolock:
1091 trace_mm_collapse_huge_page(mm, isolated, result);
1092 return;
1093 out:
1094 mem_cgroup_cancel_charge(new_page, memcg, true);
1095 goto out_up_write;
1098 static int khugepaged_scan_pmd(struct mm_struct *mm,
1099 struct vm_area_struct *vma,
1100 unsigned long address,
1101 struct page **hpage)
1103 pmd_t *pmd;
1104 pte_t *pte, *_pte;
1105 int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
1106 struct page *page = NULL;
1107 unsigned long _address;
1108 spinlock_t *ptl;
1109 int node = NUMA_NO_NODE, unmapped = 0;
1110 bool writable = false;
1112 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1114 pmd = mm_find_pmd(mm, address);
1115 if (!pmd) {
1116 result = SCAN_PMD_NULL;
1117 goto out;
1120 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1121 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1122 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1123 _pte++, _address += PAGE_SIZE) {
1124 pte_t pteval = *_pte;
1125 if (is_swap_pte(pteval)) {
1126 if (++unmapped <= khugepaged_max_ptes_swap) {
1127 continue;
1128 } else {
1129 result = SCAN_EXCEED_SWAP_PTE;
1130 goto out_unmap;
1133 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1134 if (!userfaultfd_armed(vma) &&
1135 ++none_or_zero <= khugepaged_max_ptes_none) {
1136 continue;
1137 } else {
1138 result = SCAN_EXCEED_NONE_PTE;
1139 goto out_unmap;
1142 if (!pte_present(pteval)) {
1143 result = SCAN_PTE_NON_PRESENT;
1144 goto out_unmap;
1146 if (pte_write(pteval))
1147 writable = true;
1149 page = vm_normal_page(vma, _address, pteval);
1150 if (unlikely(!page)) {
1151 result = SCAN_PAGE_NULL;
1152 goto out_unmap;
1155 /* TODO: teach khugepaged to collapse THP mapped with pte */
1156 if (PageCompound(page)) {
1157 result = SCAN_PAGE_COMPOUND;
1158 goto out_unmap;
1162 * Record which node the original page is from and save this
1163 * information to khugepaged_node_load[].
1164 * Khupaged will allocate hugepage from the node has the max
1165 * hit record.
1167 node = page_to_nid(page);
1168 if (khugepaged_scan_abort(node)) {
1169 result = SCAN_SCAN_ABORT;
1170 goto out_unmap;
1172 khugepaged_node_load[node]++;
1173 if (!PageLRU(page)) {
1174 result = SCAN_PAGE_LRU;
1175 goto out_unmap;
1177 if (PageLocked(page)) {
1178 result = SCAN_PAGE_LOCK;
1179 goto out_unmap;
1181 if (!PageAnon(page)) {
1182 result = SCAN_PAGE_ANON;
1183 goto out_unmap;
1187 * cannot use mapcount: can't collapse if there's a gup pin.
1188 * The page must only be referenced by the scanned process
1189 * and page swap cache.
1191 if (page_count(page) != 1 + PageSwapCache(page)) {
1192 result = SCAN_PAGE_COUNT;
1193 goto out_unmap;
1195 if (pte_young(pteval) ||
1196 page_is_young(page) || PageReferenced(page) ||
1197 mmu_notifier_test_young(vma->vm_mm, address))
1198 referenced++;
1200 if (writable) {
1201 if (referenced) {
1202 result = SCAN_SUCCEED;
1203 ret = 1;
1204 } else {
1205 result = SCAN_LACK_REFERENCED_PAGE;
1207 } else {
1208 result = SCAN_PAGE_RO;
1210 out_unmap:
1211 pte_unmap_unlock(pte, ptl);
1212 if (ret) {
1213 node = khugepaged_find_target_node();
1214 /* collapse_huge_page will return with the mmap_sem released */
1215 collapse_huge_page(mm, address, hpage, node, referenced);
1217 out:
1218 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1219 none_or_zero, result, unmapped);
1220 return ret;
1223 static void collect_mm_slot(struct mm_slot *mm_slot)
1225 struct mm_struct *mm = mm_slot->mm;
1227 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1229 if (khugepaged_test_exit(mm)) {
1230 /* free mm_slot */
1231 hash_del(&mm_slot->hash);
1232 list_del(&mm_slot->mm_node);
1235 * Not strictly needed because the mm exited already.
1237 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1240 /* khugepaged_mm_lock actually not necessary for the below */
1241 free_mm_slot(mm_slot);
1242 mmdrop(mm);
1246 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
1247 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1249 struct vm_area_struct *vma;
1250 unsigned long addr;
1251 pmd_t *pmd, _pmd;
1253 i_mmap_lock_write(mapping);
1254 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1255 /* probably overkill */
1256 if (vma->anon_vma)
1257 continue;
1258 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1259 if (addr & ~HPAGE_PMD_MASK)
1260 continue;
1261 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1262 continue;
1263 pmd = mm_find_pmd(vma->vm_mm, addr);
1264 if (!pmd)
1265 continue;
1267 * We need exclusive mmap_sem to retract page table.
1268 * If trylock fails we would end up with pte-mapped THP after
1269 * re-fault. Not ideal, but it's more important to not disturb
1270 * the system too much.
1272 if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
1273 spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1274 /* assume page table is clear */
1275 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1276 spin_unlock(ptl);
1277 up_write(&vma->vm_mm->mmap_sem);
1278 mm_dec_nr_ptes(vma->vm_mm);
1279 pte_free(vma->vm_mm, pmd_pgtable(_pmd));
1282 i_mmap_unlock_write(mapping);
1286 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1288 * Basic scheme is simple, details are more complex:
1289 * - allocate and freeze a new huge page;
1290 * - scan over radix tree replacing old pages the new one
1291 * + swap in pages if necessary;
1292 * + fill in gaps;
1293 * + keep old pages around in case if rollback is required;
1294 * - if replacing succeed:
1295 * + copy data over;
1296 * + free old pages;
1297 * + unfreeze huge page;
1298 * - if replacing failed;
1299 * + put all pages back and unfreeze them;
1300 * + restore gaps in the radix-tree;
1301 * + free huge page;
1303 static void collapse_shmem(struct mm_struct *mm,
1304 struct address_space *mapping, pgoff_t start,
1305 struct page **hpage, int node)
1307 gfp_t gfp;
1308 struct page *page, *new_page, *tmp;
1309 struct mem_cgroup *memcg;
1310 pgoff_t index, end = start + HPAGE_PMD_NR;
1311 LIST_HEAD(pagelist);
1312 struct radix_tree_iter iter;
1313 void **slot;
1314 int nr_none = 0, result = SCAN_SUCCEED;
1316 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1318 /* Only allocate from the target node */
1319 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1321 new_page = khugepaged_alloc_page(hpage, gfp, node);
1322 if (!new_page) {
1323 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1324 goto out;
1327 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
1328 result = SCAN_CGROUP_CHARGE_FAIL;
1329 goto out;
1332 new_page->index = start;
1333 new_page->mapping = mapping;
1334 __SetPageSwapBacked(new_page);
1335 __SetPageLocked(new_page);
1336 BUG_ON(!page_ref_freeze(new_page, 1));
1340 * At this point the new_page is 'frozen' (page_count() is zero), locked
1341 * and not up-to-date. It's safe to insert it into radix tree, because
1342 * nobody would be able to map it or use it in other way until we
1343 * unfreeze it.
1346 index = start;
1347 xa_lock_irq(&mapping->i_pages);
1348 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
1349 int n = min(iter.index, end) - index;
1352 * Handle holes in the radix tree: charge it from shmem and
1353 * insert relevant subpage of new_page into the radix-tree.
1355 if (n && !shmem_charge(mapping->host, n)) {
1356 result = SCAN_FAIL;
1357 break;
1359 nr_none += n;
1360 for (; index < min(iter.index, end); index++) {
1361 radix_tree_insert(&mapping->i_pages, index,
1362 new_page + (index % HPAGE_PMD_NR));
1365 /* We are done. */
1366 if (index >= end)
1367 break;
1369 page = radix_tree_deref_slot_protected(slot,
1370 &mapping->i_pages.xa_lock);
1371 if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) {
1372 xa_unlock_irq(&mapping->i_pages);
1373 /* swap in or instantiate fallocated page */
1374 if (shmem_getpage(mapping->host, index, &page,
1375 SGP_NOHUGE)) {
1376 result = SCAN_FAIL;
1377 goto tree_unlocked;
1379 xa_lock_irq(&mapping->i_pages);
1380 } else if (trylock_page(page)) {
1381 get_page(page);
1382 } else {
1383 result = SCAN_PAGE_LOCK;
1384 break;
1388 * The page must be locked, so we can drop the i_pages lock
1389 * without racing with truncate.
1391 VM_BUG_ON_PAGE(!PageLocked(page), page);
1392 VM_BUG_ON_PAGE(!PageUptodate(page), page);
1393 VM_BUG_ON_PAGE(PageTransCompound(page), page);
1395 if (page_mapping(page) != mapping) {
1396 result = SCAN_TRUNCATED;
1397 goto out_unlock;
1399 xa_unlock_irq(&mapping->i_pages);
1401 if (isolate_lru_page(page)) {
1402 result = SCAN_DEL_PAGE_LRU;
1403 goto out_isolate_failed;
1406 if (page_mapped(page))
1407 unmap_mapping_pages(mapping, index, 1, false);
1409 xa_lock_irq(&mapping->i_pages);
1411 slot = radix_tree_lookup_slot(&mapping->i_pages, index);
1412 VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
1413 &mapping->i_pages.xa_lock), page);
1414 VM_BUG_ON_PAGE(page_mapped(page), page);
1417 * The page is expected to have page_count() == 3:
1418 * - we hold a pin on it;
1419 * - one reference from radix tree;
1420 * - one from isolate_lru_page;
1422 if (!page_ref_freeze(page, 3)) {
1423 result = SCAN_PAGE_COUNT;
1424 goto out_lru;
1428 * Add the page to the list to be able to undo the collapse if
1429 * something go wrong.
1431 list_add_tail(&page->lru, &pagelist);
1433 /* Finally, replace with the new page. */
1434 radix_tree_replace_slot(&mapping->i_pages, slot,
1435 new_page + (index % HPAGE_PMD_NR));
1437 slot = radix_tree_iter_resume(slot, &iter);
1438 index++;
1439 continue;
1440 out_lru:
1441 xa_unlock_irq(&mapping->i_pages);
1442 putback_lru_page(page);
1443 out_isolate_failed:
1444 unlock_page(page);
1445 put_page(page);
1446 goto tree_unlocked;
1447 out_unlock:
1448 unlock_page(page);
1449 put_page(page);
1450 break;
1454 * Handle hole in radix tree at the end of the range.
1455 * This code only triggers if there's nothing in radix tree
1456 * beyond 'end'.
1458 if (result == SCAN_SUCCEED && index < end) {
1459 int n = end - index;
1461 if (!shmem_charge(mapping->host, n)) {
1462 result = SCAN_FAIL;
1463 goto tree_locked;
1466 for (; index < end; index++) {
1467 radix_tree_insert(&mapping->i_pages, index,
1468 new_page + (index % HPAGE_PMD_NR));
1470 nr_none += n;
1473 tree_locked:
1474 xa_unlock_irq(&mapping->i_pages);
1475 tree_unlocked:
1477 if (result == SCAN_SUCCEED) {
1478 unsigned long flags;
1479 struct zone *zone = page_zone(new_page);
1482 * Replacing old pages with new one has succeed, now we need to
1483 * copy the content and free old pages.
1485 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1486 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1487 page);
1488 list_del(&page->lru);
1489 unlock_page(page);
1490 page_ref_unfreeze(page, 1);
1491 page->mapping = NULL;
1492 ClearPageActive(page);
1493 ClearPageUnevictable(page);
1494 put_page(page);
1497 local_irq_save(flags);
1498 __inc_node_page_state(new_page, NR_SHMEM_THPS);
1499 if (nr_none) {
1500 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1501 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1503 local_irq_restore(flags);
1506 * Remove pte page tables, so we can re-faulti
1507 * the page as huge.
1509 retract_page_tables(mapping, start);
1511 /* Everything is ready, let's unfreeze the new_page */
1512 set_page_dirty(new_page);
1513 SetPageUptodate(new_page);
1514 page_ref_unfreeze(new_page, HPAGE_PMD_NR);
1515 mem_cgroup_commit_charge(new_page, memcg, false, true);
1516 lru_cache_add_anon(new_page);
1517 unlock_page(new_page);
1519 *hpage = NULL;
1520 } else {
1521 /* Something went wrong: rollback changes to the radix-tree */
1522 shmem_uncharge(mapping->host, nr_none);
1523 xa_lock_irq(&mapping->i_pages);
1524 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
1525 if (iter.index >= end)
1526 break;
1527 page = list_first_entry_or_null(&pagelist,
1528 struct page, lru);
1529 if (!page || iter.index < page->index) {
1530 if (!nr_none)
1531 break;
1532 nr_none--;
1533 /* Put holes back where they were */
1534 radix_tree_delete(&mapping->i_pages, iter.index);
1535 continue;
1538 VM_BUG_ON_PAGE(page->index != iter.index, page);
1540 /* Unfreeze the page. */
1541 list_del(&page->lru);
1542 page_ref_unfreeze(page, 2);
1543 radix_tree_replace_slot(&mapping->i_pages, slot, page);
1544 slot = radix_tree_iter_resume(slot, &iter);
1545 xa_unlock_irq(&mapping->i_pages);
1546 putback_lru_page(page);
1547 unlock_page(page);
1548 xa_lock_irq(&mapping->i_pages);
1550 VM_BUG_ON(nr_none);
1551 xa_unlock_irq(&mapping->i_pages);
1553 /* Unfreeze new_page, caller would take care about freeing it */
1554 page_ref_unfreeze(new_page, 1);
1555 mem_cgroup_cancel_charge(new_page, memcg, true);
1556 unlock_page(new_page);
1557 new_page->mapping = NULL;
1559 out:
1560 VM_BUG_ON(!list_empty(&pagelist));
1561 /* TODO: tracepoints */
1564 static void khugepaged_scan_shmem(struct mm_struct *mm,
1565 struct address_space *mapping,
1566 pgoff_t start, struct page **hpage)
1568 struct page *page = NULL;
1569 struct radix_tree_iter iter;
1570 void **slot;
1571 int present, swap;
1572 int node = NUMA_NO_NODE;
1573 int result = SCAN_SUCCEED;
1575 present = 0;
1576 swap = 0;
1577 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1578 rcu_read_lock();
1579 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
1580 if (iter.index >= start + HPAGE_PMD_NR)
1581 break;
1583 page = radix_tree_deref_slot(slot);
1584 if (radix_tree_deref_retry(page)) {
1585 slot = radix_tree_iter_retry(&iter);
1586 continue;
1589 if (radix_tree_exception(page)) {
1590 if (++swap > khugepaged_max_ptes_swap) {
1591 result = SCAN_EXCEED_SWAP_PTE;
1592 break;
1594 continue;
1597 if (PageTransCompound(page)) {
1598 result = SCAN_PAGE_COMPOUND;
1599 break;
1602 node = page_to_nid(page);
1603 if (khugepaged_scan_abort(node)) {
1604 result = SCAN_SCAN_ABORT;
1605 break;
1607 khugepaged_node_load[node]++;
1609 if (!PageLRU(page)) {
1610 result = SCAN_PAGE_LRU;
1611 break;
1614 if (page_count(page) != 1 + page_mapcount(page)) {
1615 result = SCAN_PAGE_COUNT;
1616 break;
1620 * We probably should check if the page is referenced here, but
1621 * nobody would transfer pte_young() to PageReferenced() for us.
1622 * And rmap walk here is just too costly...
1625 present++;
1627 if (need_resched()) {
1628 slot = radix_tree_iter_resume(slot, &iter);
1629 cond_resched_rcu();
1632 rcu_read_unlock();
1634 if (result == SCAN_SUCCEED) {
1635 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1636 result = SCAN_EXCEED_NONE_PTE;
1637 } else {
1638 node = khugepaged_find_target_node();
1639 collapse_shmem(mm, mapping, start, hpage, node);
1643 /* TODO: tracepoints */
1645 #else
1646 static void khugepaged_scan_shmem(struct mm_struct *mm,
1647 struct address_space *mapping,
1648 pgoff_t start, struct page **hpage)
1650 BUILD_BUG();
1652 #endif
1654 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1655 struct page **hpage)
1656 __releases(&khugepaged_mm_lock)
1657 __acquires(&khugepaged_mm_lock)
1659 struct mm_slot *mm_slot;
1660 struct mm_struct *mm;
1661 struct vm_area_struct *vma;
1662 int progress = 0;
1664 VM_BUG_ON(!pages);
1665 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1667 if (khugepaged_scan.mm_slot)
1668 mm_slot = khugepaged_scan.mm_slot;
1669 else {
1670 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1671 struct mm_slot, mm_node);
1672 khugepaged_scan.address = 0;
1673 khugepaged_scan.mm_slot = mm_slot;
1675 spin_unlock(&khugepaged_mm_lock);
1677 mm = mm_slot->mm;
1679 * Don't wait for semaphore (to avoid long wait times). Just move to
1680 * the next mm on the list.
1682 vma = NULL;
1683 if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1684 goto breakouterloop_mmap_sem;
1685 if (likely(!khugepaged_test_exit(mm)))
1686 vma = find_vma(mm, khugepaged_scan.address);
1688 progress++;
1689 for (; vma; vma = vma->vm_next) {
1690 unsigned long hstart, hend;
1692 cond_resched();
1693 if (unlikely(khugepaged_test_exit(mm))) {
1694 progress++;
1695 break;
1697 if (!hugepage_vma_check(vma)) {
1698 skip:
1699 progress++;
1700 continue;
1702 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1703 hend = vma->vm_end & HPAGE_PMD_MASK;
1704 if (hstart >= hend)
1705 goto skip;
1706 if (khugepaged_scan.address > hend)
1707 goto skip;
1708 if (khugepaged_scan.address < hstart)
1709 khugepaged_scan.address = hstart;
1710 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1712 while (khugepaged_scan.address < hend) {
1713 int ret;
1714 cond_resched();
1715 if (unlikely(khugepaged_test_exit(mm)))
1716 goto breakouterloop;
1718 VM_BUG_ON(khugepaged_scan.address < hstart ||
1719 khugepaged_scan.address + HPAGE_PMD_SIZE >
1720 hend);
1721 if (shmem_file(vma->vm_file)) {
1722 struct file *file;
1723 pgoff_t pgoff = linear_page_index(vma,
1724 khugepaged_scan.address);
1725 if (!shmem_huge_enabled(vma))
1726 goto skip;
1727 file = get_file(vma->vm_file);
1728 up_read(&mm->mmap_sem);
1729 ret = 1;
1730 khugepaged_scan_shmem(mm, file->f_mapping,
1731 pgoff, hpage);
1732 fput(file);
1733 } else {
1734 ret = khugepaged_scan_pmd(mm, vma,
1735 khugepaged_scan.address,
1736 hpage);
1738 /* move to next address */
1739 khugepaged_scan.address += HPAGE_PMD_SIZE;
1740 progress += HPAGE_PMD_NR;
1741 if (ret)
1742 /* we released mmap_sem so break loop */
1743 goto breakouterloop_mmap_sem;
1744 if (progress >= pages)
1745 goto breakouterloop;
1748 breakouterloop:
1749 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1750 breakouterloop_mmap_sem:
1752 spin_lock(&khugepaged_mm_lock);
1753 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1755 * Release the current mm_slot if this mm is about to die, or
1756 * if we scanned all vmas of this mm.
1758 if (khugepaged_test_exit(mm) || !vma) {
1760 * Make sure that if mm_users is reaching zero while
1761 * khugepaged runs here, khugepaged_exit will find
1762 * mm_slot not pointing to the exiting mm.
1764 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
1765 khugepaged_scan.mm_slot = list_entry(
1766 mm_slot->mm_node.next,
1767 struct mm_slot, mm_node);
1768 khugepaged_scan.address = 0;
1769 } else {
1770 khugepaged_scan.mm_slot = NULL;
1771 khugepaged_full_scans++;
1774 collect_mm_slot(mm_slot);
1777 return progress;
1780 static int khugepaged_has_work(void)
1782 return !list_empty(&khugepaged_scan.mm_head) &&
1783 khugepaged_enabled();
1786 static int khugepaged_wait_event(void)
1788 return !list_empty(&khugepaged_scan.mm_head) ||
1789 kthread_should_stop();
1792 static void khugepaged_do_scan(void)
1794 struct page *hpage = NULL;
1795 unsigned int progress = 0, pass_through_head = 0;
1796 unsigned int pages = khugepaged_pages_to_scan;
1797 bool wait = true;
1799 barrier(); /* write khugepaged_pages_to_scan to local stack */
1801 while (progress < pages) {
1802 if (!khugepaged_prealloc_page(&hpage, &wait))
1803 break;
1805 cond_resched();
1807 if (unlikely(kthread_should_stop() || try_to_freeze()))
1808 break;
1810 spin_lock(&khugepaged_mm_lock);
1811 if (!khugepaged_scan.mm_slot)
1812 pass_through_head++;
1813 if (khugepaged_has_work() &&
1814 pass_through_head < 2)
1815 progress += khugepaged_scan_mm_slot(pages - progress,
1816 &hpage);
1817 else
1818 progress = pages;
1819 spin_unlock(&khugepaged_mm_lock);
1822 if (!IS_ERR_OR_NULL(hpage))
1823 put_page(hpage);
1826 static bool khugepaged_should_wakeup(void)
1828 return kthread_should_stop() ||
1829 time_after_eq(jiffies, khugepaged_sleep_expire);
1832 static void khugepaged_wait_work(void)
1834 if (khugepaged_has_work()) {
1835 const unsigned long scan_sleep_jiffies =
1836 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
1838 if (!scan_sleep_jiffies)
1839 return;
1841 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
1842 wait_event_freezable_timeout(khugepaged_wait,
1843 khugepaged_should_wakeup(),
1844 scan_sleep_jiffies);
1845 return;
1848 if (khugepaged_enabled())
1849 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
1852 static int khugepaged(void *none)
1854 struct mm_slot *mm_slot;
1856 set_freezable();
1857 set_user_nice(current, MAX_NICE);
1859 while (!kthread_should_stop()) {
1860 khugepaged_do_scan();
1861 khugepaged_wait_work();
1864 spin_lock(&khugepaged_mm_lock);
1865 mm_slot = khugepaged_scan.mm_slot;
1866 khugepaged_scan.mm_slot = NULL;
1867 if (mm_slot)
1868 collect_mm_slot(mm_slot);
1869 spin_unlock(&khugepaged_mm_lock);
1870 return 0;
1873 static void set_recommended_min_free_kbytes(void)
1875 struct zone *zone;
1876 int nr_zones = 0;
1877 unsigned long recommended_min;
1879 for_each_populated_zone(zone) {
1881 * We don't need to worry about fragmentation of
1882 * ZONE_MOVABLE since it only has movable pages.
1884 if (zone_idx(zone) > gfp_zone(GFP_USER))
1885 continue;
1887 nr_zones++;
1890 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1891 recommended_min = pageblock_nr_pages * nr_zones * 2;
1894 * Make sure that on average at least two pageblocks are almost free
1895 * of another type, one for a migratetype to fall back to and a
1896 * second to avoid subsequent fallbacks of other types There are 3
1897 * MIGRATE_TYPES we care about.
1899 recommended_min += pageblock_nr_pages * nr_zones *
1900 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
1902 /* don't ever allow to reserve more than 5% of the lowmem */
1903 recommended_min = min(recommended_min,
1904 (unsigned long) nr_free_buffer_pages() / 20);
1905 recommended_min <<= (PAGE_SHIFT-10);
1907 if (recommended_min > min_free_kbytes) {
1908 if (user_min_free_kbytes >= 0)
1909 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1910 min_free_kbytes, recommended_min);
1912 min_free_kbytes = recommended_min;
1914 setup_per_zone_wmarks();
1917 int start_stop_khugepaged(void)
1919 static struct task_struct *khugepaged_thread __read_mostly;
1920 static DEFINE_MUTEX(khugepaged_mutex);
1921 int err = 0;
1923 mutex_lock(&khugepaged_mutex);
1924 if (khugepaged_enabled()) {
1925 if (!khugepaged_thread)
1926 khugepaged_thread = kthread_run(khugepaged, NULL,
1927 "khugepaged");
1928 if (IS_ERR(khugepaged_thread)) {
1929 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1930 err = PTR_ERR(khugepaged_thread);
1931 khugepaged_thread = NULL;
1932 goto fail;
1935 if (!list_empty(&khugepaged_scan.mm_head))
1936 wake_up_interruptible(&khugepaged_wait);
1938 set_recommended_min_free_kbytes();
1939 } else if (khugepaged_thread) {
1940 kthread_stop(khugepaged_thread);
1941 khugepaged_thread = NULL;
1943 fail:
1944 mutex_unlock(&khugepaged_mutex);
1945 return err;