4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * demand-loading started 01.12.91 - seems it is high on the list of
9 * things wanted, and it should be easy to implement. - Linus
13 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14 * pages started 02.12.91, seems to work. - Linus.
16 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17 * would have taken more than the 6M I have free, but it worked well as
20 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
24 * Real VM (paging to/from disk) started 18.12.91. Much more work and
25 * thought has to go into this. Oh, well..
26 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
27 * Found it. Everything seems to work now.
28 * 20.12.91 - Ok, making the swap-device changeable like the root.
32 * 05.04.94 - Multi-page memory management added for v1.1.
33 * Idea by Alex Bligh (alex@cconcepts.co.uk)
35 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
36 * (Gerhard.Wichert@pdb.siemens.de)
38 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
41 #include <linux/kernel_stat.h>
43 #include <linux/hugetlb.h>
44 #include <linux/mman.h>
45 #include <linux/swap.h>
46 #include <linux/highmem.h>
47 #include <linux/pagemap.h>
48 #include <linux/ksm.h>
49 #include <linux/rmap.h>
50 #include <linux/export.h>
51 #include <linux/delayacct.h>
52 #include <linux/init.h>
53 #include <linux/writeback.h>
54 #include <linux/memcontrol.h>
55 #include <linux/mmu_notifier.h>
56 #include <linux/kallsyms.h>
57 #include <linux/swapops.h>
58 #include <linux/elf.h>
59 #include <linux/gfp.h>
60 #include <linux/migrate.h>
63 #include <asm/pgalloc.h>
64 #include <asm/uaccess.h>
66 #include <asm/tlbflush.h>
67 #include <asm/pgtable.h>
71 #ifndef CONFIG_NEED_MULTIPLE_NODES
72 /* use the per-pgdat data instead for discontigmem - mbligh */
73 unsigned long max_mapnr
;
76 EXPORT_SYMBOL(max_mapnr
);
77 EXPORT_SYMBOL(mem_map
);
80 unsigned long num_physpages
;
82 * A number of key systems in x86 including ioremap() rely on the assumption
83 * that high_memory defines the upper bound on direct map memory, then end
84 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
85 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
90 EXPORT_SYMBOL(num_physpages
);
91 EXPORT_SYMBOL(high_memory
);
94 * Randomize the address space (stacks, mmaps, brk, etc.).
96 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
97 * as ancient (libc5 based) binaries can segfault. )
99 int randomize_va_space __read_mostly
=
100 #ifdef CONFIG_COMPAT_BRK
106 static int __init
disable_randmaps(char *s
)
108 randomize_va_space
= 0;
111 __setup("norandmaps", disable_randmaps
);
113 unsigned long zero_pfn __read_mostly
;
114 unsigned long highest_memmap_pfn __read_mostly
;
117 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
119 static int __init
init_zero_pfn(void)
121 zero_pfn
= page_to_pfn(ZERO_PAGE(0));
124 core_initcall(init_zero_pfn
);
127 #if defined(SPLIT_RSS_COUNTING)
129 void sync_mm_rss(struct mm_struct
*mm
)
133 for (i
= 0; i
< NR_MM_COUNTERS
; i
++) {
134 if (current
->rss_stat
.count
[i
]) {
135 add_mm_counter(mm
, i
, current
->rss_stat
.count
[i
]);
136 current
->rss_stat
.count
[i
] = 0;
139 current
->rss_stat
.events
= 0;
142 static void add_mm_counter_fast(struct mm_struct
*mm
, int member
, int val
)
144 struct task_struct
*task
= current
;
146 if (likely(task
->mm
== mm
))
147 task
->rss_stat
.count
[member
] += val
;
149 add_mm_counter(mm
, member
, val
);
151 #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
152 #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
154 /* sync counter once per 64 page faults */
155 #define TASK_RSS_EVENTS_THRESH (64)
156 static void check_sync_rss_stat(struct task_struct
*task
)
158 if (unlikely(task
!= current
))
160 if (unlikely(task
->rss_stat
.events
++ > TASK_RSS_EVENTS_THRESH
))
161 sync_mm_rss(task
->mm
);
163 #else /* SPLIT_RSS_COUNTING */
165 #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
166 #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
168 static void check_sync_rss_stat(struct task_struct
*task
)
172 #endif /* SPLIT_RSS_COUNTING */
174 #ifdef HAVE_GENERIC_MMU_GATHER
176 static int tlb_next_batch(struct mmu_gather
*tlb
)
178 struct mmu_gather_batch
*batch
;
182 tlb
->active
= batch
->next
;
186 batch
= (void *)__get_free_pages(GFP_NOWAIT
| __GFP_NOWARN
, 0);
192 batch
->max
= MAX_GATHER_BATCH
;
194 tlb
->active
->next
= batch
;
201 * Called to initialize an (on-stack) mmu_gather structure for page-table
202 * tear-down from @mm. The @fullmm argument is used when @mm is without
203 * users and we're going to destroy the full address space (exit/execve).
205 void tlb_gather_mmu(struct mmu_gather
*tlb
, struct mm_struct
*mm
, bool fullmm
)
209 tlb
->fullmm
= fullmm
;
213 tlb
->fast_mode
= (num_possible_cpus() == 1);
214 tlb
->local
.next
= NULL
;
216 tlb
->local
.max
= ARRAY_SIZE(tlb
->__pages
);
217 tlb
->active
= &tlb
->local
;
219 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
224 void tlb_flush_mmu(struct mmu_gather
*tlb
)
226 struct mmu_gather_batch
*batch
;
228 if (!tlb
->need_flush
)
232 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
233 tlb_table_flush(tlb
);
236 if (tlb_fast_mode(tlb
))
239 for (batch
= &tlb
->local
; batch
; batch
= batch
->next
) {
240 free_pages_and_swap_cache(batch
->pages
, batch
->nr
);
243 tlb
->active
= &tlb
->local
;
247 * Called at the end of the shootdown operation to free up any resources
248 * that were required.
250 void tlb_finish_mmu(struct mmu_gather
*tlb
, unsigned long start
, unsigned long end
)
252 struct mmu_gather_batch
*batch
, *next
;
258 /* keep the page table cache within bounds */
261 for (batch
= tlb
->local
.next
; batch
; batch
= next
) {
263 free_pages((unsigned long)batch
, 0);
265 tlb
->local
.next
= NULL
;
269 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
270 * handling the additional races in SMP caused by other CPUs caching valid
271 * mappings in their TLBs. Returns the number of free page slots left.
272 * When out of page slots we must call tlb_flush_mmu().
274 int __tlb_remove_page(struct mmu_gather
*tlb
, struct page
*page
)
276 struct mmu_gather_batch
*batch
;
278 VM_BUG_ON(!tlb
->need_flush
);
280 if (tlb_fast_mode(tlb
)) {
281 free_page_and_swap_cache(page
);
282 return 1; /* avoid calling tlb_flush_mmu() */
286 batch
->pages
[batch
->nr
++] = page
;
287 if (batch
->nr
== batch
->max
) {
288 if (!tlb_next_batch(tlb
))
292 VM_BUG_ON(batch
->nr
> batch
->max
);
294 return batch
->max
- batch
->nr
;
297 #endif /* HAVE_GENERIC_MMU_GATHER */
299 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
302 * See the comment near struct mmu_table_batch.
305 static void tlb_remove_table_smp_sync(void *arg
)
307 /* Simply deliver the interrupt */
310 static void tlb_remove_table_one(void *table
)
313 * This isn't an RCU grace period and hence the page-tables cannot be
314 * assumed to be actually RCU-freed.
316 * It is however sufficient for software page-table walkers that rely on
317 * IRQ disabling. See the comment near struct mmu_table_batch.
319 smp_call_function(tlb_remove_table_smp_sync
, NULL
, 1);
320 __tlb_remove_table(table
);
323 static void tlb_remove_table_rcu(struct rcu_head
*head
)
325 struct mmu_table_batch
*batch
;
328 batch
= container_of(head
, struct mmu_table_batch
, rcu
);
330 for (i
= 0; i
< batch
->nr
; i
++)
331 __tlb_remove_table(batch
->tables
[i
]);
333 free_page((unsigned long)batch
);
336 void tlb_table_flush(struct mmu_gather
*tlb
)
338 struct mmu_table_batch
**batch
= &tlb
->batch
;
341 call_rcu_sched(&(*batch
)->rcu
, tlb_remove_table_rcu
);
346 void tlb_remove_table(struct mmu_gather
*tlb
, void *table
)
348 struct mmu_table_batch
**batch
= &tlb
->batch
;
353 * When there's less then two users of this mm there cannot be a
354 * concurrent page-table walk.
356 if (atomic_read(&tlb
->mm
->mm_users
) < 2) {
357 __tlb_remove_table(table
);
361 if (*batch
== NULL
) {
362 *batch
= (struct mmu_table_batch
*)__get_free_page(GFP_NOWAIT
| __GFP_NOWARN
);
363 if (*batch
== NULL
) {
364 tlb_remove_table_one(table
);
369 (*batch
)->tables
[(*batch
)->nr
++] = table
;
370 if ((*batch
)->nr
== MAX_TABLE_BATCH
)
371 tlb_table_flush(tlb
);
374 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
377 * If a p?d_bad entry is found while walking page tables, report
378 * the error, before resetting entry to p?d_none. Usually (but
379 * very seldom) called out from the p?d_none_or_clear_bad macros.
382 void pgd_clear_bad(pgd_t
*pgd
)
388 void pud_clear_bad(pud_t
*pud
)
394 void pmd_clear_bad(pmd_t
*pmd
)
401 * Note: this doesn't free the actual pages themselves. That
402 * has been handled earlier when unmapping all the memory regions.
404 static void free_pte_range(struct mmu_gather
*tlb
, pmd_t
*pmd
,
407 pgtable_t token
= pmd_pgtable(*pmd
);
409 pte_free_tlb(tlb
, token
, addr
);
413 static inline void free_pmd_range(struct mmu_gather
*tlb
, pud_t
*pud
,
414 unsigned long addr
, unsigned long end
,
415 unsigned long floor
, unsigned long ceiling
)
422 pmd
= pmd_offset(pud
, addr
);
424 next
= pmd_addr_end(addr
, end
);
425 if (pmd_none_or_clear_bad(pmd
))
427 free_pte_range(tlb
, pmd
, addr
);
428 } while (pmd
++, addr
= next
, addr
!= end
);
438 if (end
- 1 > ceiling
- 1)
441 pmd
= pmd_offset(pud
, start
);
443 pmd_free_tlb(tlb
, pmd
, start
);
446 static inline void free_pud_range(struct mmu_gather
*tlb
, pgd_t
*pgd
,
447 unsigned long addr
, unsigned long end
,
448 unsigned long floor
, unsigned long ceiling
)
455 pud
= pud_offset(pgd
, addr
);
457 next
= pud_addr_end(addr
, end
);
458 if (pud_none_or_clear_bad(pud
))
460 free_pmd_range(tlb
, pud
, addr
, next
, floor
, ceiling
);
461 } while (pud
++, addr
= next
, addr
!= end
);
467 ceiling
&= PGDIR_MASK
;
471 if (end
- 1 > ceiling
- 1)
474 pud
= pud_offset(pgd
, start
);
476 pud_free_tlb(tlb
, pud
, start
);
480 * This function frees user-level page tables of a process.
482 * Must be called with pagetable lock held.
484 void free_pgd_range(struct mmu_gather
*tlb
,
485 unsigned long addr
, unsigned long end
,
486 unsigned long floor
, unsigned long ceiling
)
492 * The next few lines have given us lots of grief...
494 * Why are we testing PMD* at this top level? Because often
495 * there will be no work to do at all, and we'd prefer not to
496 * go all the way down to the bottom just to discover that.
498 * Why all these "- 1"s? Because 0 represents both the bottom
499 * of the address space and the top of it (using -1 for the
500 * top wouldn't help much: the masks would do the wrong thing).
501 * The rule is that addr 0 and floor 0 refer to the bottom of
502 * the address space, but end 0 and ceiling 0 refer to the top
503 * Comparisons need to use "end - 1" and "ceiling - 1" (though
504 * that end 0 case should be mythical).
506 * Wherever addr is brought up or ceiling brought down, we must
507 * be careful to reject "the opposite 0" before it confuses the
508 * subsequent tests. But what about where end is brought down
509 * by PMD_SIZE below? no, end can't go down to 0 there.
511 * Whereas we round start (addr) and ceiling down, by different
512 * masks at different levels, in order to test whether a table
513 * now has no other vmas using it, so can be freed, we don't
514 * bother to round floor or end up - the tests don't need that.
528 if (end
- 1 > ceiling
- 1)
533 pgd
= pgd_offset(tlb
->mm
, addr
);
535 next
= pgd_addr_end(addr
, end
);
536 if (pgd_none_or_clear_bad(pgd
))
538 free_pud_range(tlb
, pgd
, addr
, next
, floor
, ceiling
);
539 } while (pgd
++, addr
= next
, addr
!= end
);
542 void free_pgtables(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
543 unsigned long floor
, unsigned long ceiling
)
546 struct vm_area_struct
*next
= vma
->vm_next
;
547 unsigned long addr
= vma
->vm_start
;
550 * Hide vma from rmap and truncate_pagecache before freeing
553 unlink_anon_vmas(vma
);
554 unlink_file_vma(vma
);
556 if (is_vm_hugetlb_page(vma
)) {
557 hugetlb_free_pgd_range(tlb
, addr
, vma
->vm_end
,
558 floor
, next
? next
->vm_start
: ceiling
);
561 * Optimization: gather nearby vmas into one call down
563 while (next
&& next
->vm_start
<= vma
->vm_end
+ PMD_SIZE
564 && !is_vm_hugetlb_page(next
)) {
567 unlink_anon_vmas(vma
);
568 unlink_file_vma(vma
);
570 free_pgd_range(tlb
, addr
, vma
->vm_end
,
571 floor
, next
? next
->vm_start
: ceiling
);
577 int __pte_alloc(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
578 pmd_t
*pmd
, unsigned long address
)
580 pgtable_t
new = pte_alloc_one(mm
, address
);
581 int wait_split_huge_page
;
586 * Ensure all pte setup (eg. pte page lock and page clearing) are
587 * visible before the pte is made visible to other CPUs by being
588 * put into page tables.
590 * The other side of the story is the pointer chasing in the page
591 * table walking code (when walking the page table without locking;
592 * ie. most of the time). Fortunately, these data accesses consist
593 * of a chain of data-dependent loads, meaning most CPUs (alpha
594 * being the notable exception) will already guarantee loads are
595 * seen in-order. See the alpha page table accessors for the
596 * smp_read_barrier_depends() barriers in page table walking code.
598 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
600 spin_lock(&mm
->page_table_lock
);
601 wait_split_huge_page
= 0;
602 if (likely(pmd_none(*pmd
))) { /* Has another populated it ? */
604 pmd_populate(mm
, pmd
, new);
606 } else if (unlikely(pmd_trans_splitting(*pmd
)))
607 wait_split_huge_page
= 1;
608 spin_unlock(&mm
->page_table_lock
);
611 if (wait_split_huge_page
)
612 wait_split_huge_page(vma
->anon_vma
, pmd
);
616 int __pte_alloc_kernel(pmd_t
*pmd
, unsigned long address
)
618 pte_t
*new = pte_alloc_one_kernel(&init_mm
, address
);
622 smp_wmb(); /* See comment in __pte_alloc */
624 spin_lock(&init_mm
.page_table_lock
);
625 if (likely(pmd_none(*pmd
))) { /* Has another populated it ? */
626 pmd_populate_kernel(&init_mm
, pmd
, new);
629 VM_BUG_ON(pmd_trans_splitting(*pmd
));
630 spin_unlock(&init_mm
.page_table_lock
);
632 pte_free_kernel(&init_mm
, new);
636 static inline void init_rss_vec(int *rss
)
638 memset(rss
, 0, sizeof(int) * NR_MM_COUNTERS
);
641 static inline void add_mm_rss_vec(struct mm_struct
*mm
, int *rss
)
645 if (current
->mm
== mm
)
647 for (i
= 0; i
< NR_MM_COUNTERS
; i
++)
649 add_mm_counter(mm
, i
, rss
[i
]);
653 * This function is called to print an error when a bad pte
654 * is found. For example, we might have a PFN-mapped pte in
655 * a region that doesn't allow it.
657 * The calling function must still handle the error.
659 static void print_bad_pte(struct vm_area_struct
*vma
, unsigned long addr
,
660 pte_t pte
, struct page
*page
)
662 pgd_t
*pgd
= pgd_offset(vma
->vm_mm
, addr
);
663 pud_t
*pud
= pud_offset(pgd
, addr
);
664 pmd_t
*pmd
= pmd_offset(pud
, addr
);
665 struct address_space
*mapping
;
667 static unsigned long resume
;
668 static unsigned long nr_shown
;
669 static unsigned long nr_unshown
;
672 * Allow a burst of 60 reports, then keep quiet for that minute;
673 * or allow a steady drip of one report per second.
675 if (nr_shown
== 60) {
676 if (time_before(jiffies
, resume
)) {
682 "BUG: Bad page map: %lu messages suppressed\n",
689 resume
= jiffies
+ 60 * HZ
;
691 mapping
= vma
->vm_file
? vma
->vm_file
->f_mapping
: NULL
;
692 index
= linear_page_index(vma
, addr
);
695 "BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
697 (long long)pte_val(pte
), (long long)pmd_val(*pmd
));
701 "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
702 (void *)addr
, vma
->vm_flags
, vma
->anon_vma
, mapping
, index
);
704 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
707 print_symbol(KERN_ALERT
"vma->vm_ops->fault: %s\n",
708 (unsigned long)vma
->vm_ops
->fault
);
709 if (vma
->vm_file
&& vma
->vm_file
->f_op
)
710 print_symbol(KERN_ALERT
"vma->vm_file->f_op->mmap: %s\n",
711 (unsigned long)vma
->vm_file
->f_op
->mmap
);
713 add_taint(TAINT_BAD_PAGE
);
716 static inline bool is_cow_mapping(vm_flags_t flags
)
718 return (flags
& (VM_SHARED
| VM_MAYWRITE
)) == VM_MAYWRITE
;
722 * vm_normal_page -- This function gets the "struct page" associated with a pte.
724 * "Special" mappings do not wish to be associated with a "struct page" (either
725 * it doesn't exist, or it exists but they don't want to touch it). In this
726 * case, NULL is returned here. "Normal" mappings do have a struct page.
728 * There are 2 broad cases. Firstly, an architecture may define a pte_special()
729 * pte bit, in which case this function is trivial. Secondly, an architecture
730 * may not have a spare pte bit, which requires a more complicated scheme,
733 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
734 * special mapping (even if there are underlying and valid "struct pages").
735 * COWed pages of a VM_PFNMAP are always normal.
737 * The way we recognize COWed pages within VM_PFNMAP mappings is through the
738 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
739 * set, and the vm_pgoff will point to the first PFN mapped: thus every special
740 * mapping will always honor the rule
742 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
744 * And for normal mappings this is false.
746 * This restricts such mappings to be a linear translation from virtual address
747 * to pfn. To get around this restriction, we allow arbitrary mappings so long
748 * as the vma is not a COW mapping; in that case, we know that all ptes are
749 * special (because none can have been COWed).
752 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
754 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
755 * page" backing, however the difference is that _all_ pages with a struct
756 * page (that is, those where pfn_valid is true) are refcounted and considered
757 * normal pages by the VM. The disadvantage is that pages are refcounted
758 * (which can be slower and simply not an option for some PFNMAP users). The
759 * advantage is that we don't have to follow the strict linearity rule of
760 * PFNMAP mappings in order to support COWable mappings.
763 #ifdef __HAVE_ARCH_PTE_SPECIAL
764 # define HAVE_PTE_SPECIAL 1
766 # define HAVE_PTE_SPECIAL 0
768 struct page
*vm_normal_page(struct vm_area_struct
*vma
, unsigned long addr
,
771 unsigned long pfn
= pte_pfn(pte
);
773 if (HAVE_PTE_SPECIAL
) {
774 if (likely(!pte_special(pte
)))
776 if (vma
->vm_flags
& (VM_PFNMAP
| VM_MIXEDMAP
))
778 if (!is_zero_pfn(pfn
))
779 print_bad_pte(vma
, addr
, pte
, NULL
);
783 /* !HAVE_PTE_SPECIAL case follows: */
785 if (unlikely(vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
))) {
786 if (vma
->vm_flags
& VM_MIXEDMAP
) {
792 off
= (addr
- vma
->vm_start
) >> PAGE_SHIFT
;
793 if (pfn
== vma
->vm_pgoff
+ off
)
795 if (!is_cow_mapping(vma
->vm_flags
))
800 if (is_zero_pfn(pfn
))
803 if (unlikely(pfn
> highest_memmap_pfn
)) {
804 print_bad_pte(vma
, addr
, pte
, NULL
);
809 * NOTE! We still have PageReserved() pages in the page tables.
810 * eg. VDSO mappings can cause them to exist.
813 return pfn_to_page(pfn
);
817 * copy one vm_area from one task to the other. Assumes the page tables
818 * already present in the new task to be cleared in the whole range
819 * covered by this vma.
822 static inline unsigned long
823 copy_one_pte(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
824 pte_t
*dst_pte
, pte_t
*src_pte
, struct vm_area_struct
*vma
,
825 unsigned long addr
, int *rss
)
827 unsigned long vm_flags
= vma
->vm_flags
;
828 pte_t pte
= *src_pte
;
831 /* pte contains position in swap or file, so copy. */
832 if (unlikely(!pte_present(pte
))) {
833 if (!pte_file(pte
)) {
834 swp_entry_t entry
= pte_to_swp_entry(pte
);
836 if (swap_duplicate(entry
) < 0)
839 /* make sure dst_mm is on swapoff's mmlist. */
840 if (unlikely(list_empty(&dst_mm
->mmlist
))) {
841 spin_lock(&mmlist_lock
);
842 if (list_empty(&dst_mm
->mmlist
))
843 list_add(&dst_mm
->mmlist
,
845 spin_unlock(&mmlist_lock
);
847 if (likely(!non_swap_entry(entry
)))
849 else if (is_migration_entry(entry
)) {
850 page
= migration_entry_to_page(entry
);
857 if (is_write_migration_entry(entry
) &&
858 is_cow_mapping(vm_flags
)) {
860 * COW mappings require pages in both
861 * parent and child to be set to read.
863 make_migration_entry_read(&entry
);
864 pte
= swp_entry_to_pte(entry
);
865 set_pte_at(src_mm
, addr
, src_pte
, pte
);
873 * If it's a COW mapping, write protect it both
874 * in the parent and the child
876 if (is_cow_mapping(vm_flags
)) {
877 ptep_set_wrprotect(src_mm
, addr
, src_pte
);
878 pte
= pte_wrprotect(pte
);
882 * If it's a shared mapping, mark it clean in
885 if (vm_flags
& VM_SHARED
)
886 pte
= pte_mkclean(pte
);
887 pte
= pte_mkold(pte
);
889 page
= vm_normal_page(vma
, addr
, pte
);
900 set_pte_at(dst_mm
, addr
, dst_pte
, pte
);
904 int copy_pte_range(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
905 pmd_t
*dst_pmd
, pmd_t
*src_pmd
, struct vm_area_struct
*vma
,
906 unsigned long addr
, unsigned long end
)
908 pte_t
*orig_src_pte
, *orig_dst_pte
;
909 pte_t
*src_pte
, *dst_pte
;
910 spinlock_t
*src_ptl
, *dst_ptl
;
912 int rss
[NR_MM_COUNTERS
];
913 swp_entry_t entry
= (swp_entry_t
){0};
918 dst_pte
= pte_alloc_map_lock(dst_mm
, dst_pmd
, addr
, &dst_ptl
);
921 src_pte
= pte_offset_map(src_pmd
, addr
);
922 src_ptl
= pte_lockptr(src_mm
, src_pmd
);
923 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
924 orig_src_pte
= src_pte
;
925 orig_dst_pte
= dst_pte
;
926 arch_enter_lazy_mmu_mode();
930 * We are holding two locks at this point - either of them
931 * could generate latencies in another task on another CPU.
933 if (progress
>= 32) {
935 if (need_resched() ||
936 spin_needbreak(src_ptl
) || spin_needbreak(dst_ptl
))
939 if (pte_none(*src_pte
)) {
943 entry
.val
= copy_one_pte(dst_mm
, src_mm
, dst_pte
, src_pte
,
948 } while (dst_pte
++, src_pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
950 arch_leave_lazy_mmu_mode();
951 spin_unlock(src_ptl
);
952 pte_unmap(orig_src_pte
);
953 add_mm_rss_vec(dst_mm
, rss
);
954 pte_unmap_unlock(orig_dst_pte
, dst_ptl
);
958 if (add_swap_count_continuation(entry
, GFP_KERNEL
) < 0)
967 static inline int copy_pmd_range(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
968 pud_t
*dst_pud
, pud_t
*src_pud
, struct vm_area_struct
*vma
,
969 unsigned long addr
, unsigned long end
)
971 pmd_t
*src_pmd
, *dst_pmd
;
974 dst_pmd
= pmd_alloc(dst_mm
, dst_pud
, addr
);
977 src_pmd
= pmd_offset(src_pud
, addr
);
979 next
= pmd_addr_end(addr
, end
);
980 if (pmd_trans_huge(*src_pmd
)) {
982 VM_BUG_ON(next
-addr
!= HPAGE_PMD_SIZE
);
983 err
= copy_huge_pmd(dst_mm
, src_mm
,
984 dst_pmd
, src_pmd
, addr
, vma
);
991 if (pmd_none_or_clear_bad(src_pmd
))
993 if (copy_pte_range(dst_mm
, src_mm
, dst_pmd
, src_pmd
,
996 } while (dst_pmd
++, src_pmd
++, addr
= next
, addr
!= end
);
1000 static inline int copy_pud_range(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
1001 pgd_t
*dst_pgd
, pgd_t
*src_pgd
, struct vm_area_struct
*vma
,
1002 unsigned long addr
, unsigned long end
)
1004 pud_t
*src_pud
, *dst_pud
;
1007 dst_pud
= pud_alloc(dst_mm
, dst_pgd
, addr
);
1010 src_pud
= pud_offset(src_pgd
, addr
);
1012 next
= pud_addr_end(addr
, end
);
1013 if (pud_none_or_clear_bad(src_pud
))
1015 if (copy_pmd_range(dst_mm
, src_mm
, dst_pud
, src_pud
,
1018 } while (dst_pud
++, src_pud
++, addr
= next
, addr
!= end
);
1022 int copy_page_range(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
1023 struct vm_area_struct
*vma
)
1025 pgd_t
*src_pgd
, *dst_pgd
;
1027 unsigned long addr
= vma
->vm_start
;
1028 unsigned long end
= vma
->vm_end
;
1029 unsigned long mmun_start
; /* For mmu_notifiers */
1030 unsigned long mmun_end
; /* For mmu_notifiers */
1035 * Don't copy ptes where a page fault will fill them correctly.
1036 * Fork becomes much lighter when there are big shared or private
1037 * readonly mappings. The tradeoff is that copy_page_range is more
1038 * efficient than faulting.
1040 if (!(vma
->vm_flags
& (VM_HUGETLB
| VM_NONLINEAR
|
1041 VM_PFNMAP
| VM_MIXEDMAP
))) {
1046 if (is_vm_hugetlb_page(vma
))
1047 return copy_hugetlb_page_range(dst_mm
, src_mm
, vma
);
1049 if (unlikely(vma
->vm_flags
& VM_PFNMAP
)) {
1051 * We do not free on error cases below as remove_vma
1052 * gets called on error from higher level routine
1054 ret
= track_pfn_copy(vma
);
1060 * We need to invalidate the secondary MMU mappings only when
1061 * there could be a permission downgrade on the ptes of the
1062 * parent mm. And a permission downgrade will only happen if
1063 * is_cow_mapping() returns true.
1065 is_cow
= is_cow_mapping(vma
->vm_flags
);
1069 mmu_notifier_invalidate_range_start(src_mm
, mmun_start
,
1073 dst_pgd
= pgd_offset(dst_mm
, addr
);
1074 src_pgd
= pgd_offset(src_mm
, addr
);
1076 next
= pgd_addr_end(addr
, end
);
1077 if (pgd_none_or_clear_bad(src_pgd
))
1079 if (unlikely(copy_pud_range(dst_mm
, src_mm
, dst_pgd
, src_pgd
,
1080 vma
, addr
, next
))) {
1084 } while (dst_pgd
++, src_pgd
++, addr
= next
, addr
!= end
);
1087 mmu_notifier_invalidate_range_end(src_mm
, mmun_start
, mmun_end
);
1091 static unsigned long zap_pte_range(struct mmu_gather
*tlb
,
1092 struct vm_area_struct
*vma
, pmd_t
*pmd
,
1093 unsigned long addr
, unsigned long end
,
1094 struct zap_details
*details
)
1096 struct mm_struct
*mm
= tlb
->mm
;
1097 int force_flush
= 0;
1098 int rss
[NR_MM_COUNTERS
];
1105 start_pte
= pte_offset_map_lock(mm
, pmd
, addr
, &ptl
);
1107 arch_enter_lazy_mmu_mode();
1110 if (pte_none(ptent
)) {
1114 if (pte_present(ptent
)) {
1117 page
= vm_normal_page(vma
, addr
, ptent
);
1118 if (unlikely(details
) && page
) {
1120 * unmap_shared_mapping_pages() wants to
1121 * invalidate cache without truncating:
1122 * unmap shared but keep private pages.
1124 if (details
->check_mapping
&&
1125 details
->check_mapping
!= page
->mapping
)
1128 * Each page->index must be checked when
1129 * invalidating or truncating nonlinear.
1131 if (details
->nonlinear_vma
&&
1132 (page
->index
< details
->first_index
||
1133 page
->index
> details
->last_index
))
1136 ptent
= ptep_get_and_clear_full(mm
, addr
, pte
,
1138 tlb_remove_tlb_entry(tlb
, pte
, addr
);
1139 if (unlikely(!page
))
1141 if (unlikely(details
) && details
->nonlinear_vma
1142 && linear_page_index(details
->nonlinear_vma
,
1143 addr
) != page
->index
)
1144 set_pte_at(mm
, addr
, pte
,
1145 pgoff_to_pte(page
->index
));
1147 rss
[MM_ANONPAGES
]--;
1149 if (pte_dirty(ptent
))
1150 set_page_dirty(page
);
1151 if (pte_young(ptent
) &&
1152 likely(!VM_SequentialReadHint(vma
)))
1153 mark_page_accessed(page
);
1154 rss
[MM_FILEPAGES
]--;
1156 page_remove_rmap(page
);
1157 if (unlikely(page_mapcount(page
) < 0))
1158 print_bad_pte(vma
, addr
, ptent
, page
);
1159 force_flush
= !__tlb_remove_page(tlb
, page
);
1165 * If details->check_mapping, we leave swap entries;
1166 * if details->nonlinear_vma, we leave file entries.
1168 if (unlikely(details
))
1170 if (pte_file(ptent
)) {
1171 if (unlikely(!(vma
->vm_flags
& VM_NONLINEAR
)))
1172 print_bad_pte(vma
, addr
, ptent
, NULL
);
1174 swp_entry_t entry
= pte_to_swp_entry(ptent
);
1176 if (!non_swap_entry(entry
))
1178 else if (is_migration_entry(entry
)) {
1181 page
= migration_entry_to_page(entry
);
1184 rss
[MM_ANONPAGES
]--;
1186 rss
[MM_FILEPAGES
]--;
1188 if (unlikely(!free_swap_and_cache(entry
)))
1189 print_bad_pte(vma
, addr
, ptent
, NULL
);
1191 pte_clear_not_present_full(mm
, addr
, pte
, tlb
->fullmm
);
1192 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
1194 add_mm_rss_vec(mm
, rss
);
1195 arch_leave_lazy_mmu_mode();
1196 pte_unmap_unlock(start_pte
, ptl
);
1199 * mmu_gather ran out of room to batch pages, we break out of
1200 * the PTE lock to avoid doing the potential expensive TLB invalidate
1201 * and page-free while holding it.
1206 #ifdef HAVE_GENERIC_MMU_GATHER
1218 static inline unsigned long zap_pmd_range(struct mmu_gather
*tlb
,
1219 struct vm_area_struct
*vma
, pud_t
*pud
,
1220 unsigned long addr
, unsigned long end
,
1221 struct zap_details
*details
)
1226 pmd
= pmd_offset(pud
, addr
);
1228 next
= pmd_addr_end(addr
, end
);
1229 if (pmd_trans_huge(*pmd
)) {
1230 if (next
- addr
!= HPAGE_PMD_SIZE
) {
1231 #ifdef CONFIG_DEBUG_VM
1232 if (!rwsem_is_locked(&tlb
->mm
->mmap_sem
)) {
1233 pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n",
1234 __func__
, addr
, end
,
1240 split_huge_page_pmd(vma
, addr
, pmd
);
1241 } else if (zap_huge_pmd(tlb
, vma
, pmd
, addr
))
1246 * Here there can be other concurrent MADV_DONTNEED or
1247 * trans huge page faults running, and if the pmd is
1248 * none or trans huge it can change under us. This is
1249 * because MADV_DONTNEED holds the mmap_sem in read
1252 if (pmd_none_or_trans_huge_or_clear_bad(pmd
))
1254 next
= zap_pte_range(tlb
, vma
, pmd
, addr
, next
, details
);
1257 } while (pmd
++, addr
= next
, addr
!= end
);
1262 static inline unsigned long zap_pud_range(struct mmu_gather
*tlb
,
1263 struct vm_area_struct
*vma
, pgd_t
*pgd
,
1264 unsigned long addr
, unsigned long end
,
1265 struct zap_details
*details
)
1270 pud
= pud_offset(pgd
, addr
);
1272 next
= pud_addr_end(addr
, end
);
1273 if (pud_none_or_clear_bad(pud
))
1275 next
= zap_pmd_range(tlb
, vma
, pud
, addr
, next
, details
);
1276 } while (pud
++, addr
= next
, addr
!= end
);
1281 static void unmap_page_range(struct mmu_gather
*tlb
,
1282 struct vm_area_struct
*vma
,
1283 unsigned long addr
, unsigned long end
,
1284 struct zap_details
*details
)
1289 if (details
&& !details
->check_mapping
&& !details
->nonlinear_vma
)
1292 BUG_ON(addr
>= end
);
1293 mem_cgroup_uncharge_start();
1294 tlb_start_vma(tlb
, vma
);
1295 pgd
= pgd_offset(vma
->vm_mm
, addr
);
1297 next
= pgd_addr_end(addr
, end
);
1298 if (pgd_none_or_clear_bad(pgd
))
1300 next
= zap_pud_range(tlb
, vma
, pgd
, addr
, next
, details
);
1301 } while (pgd
++, addr
= next
, addr
!= end
);
1302 tlb_end_vma(tlb
, vma
);
1303 mem_cgroup_uncharge_end();
1307 static void unmap_single_vma(struct mmu_gather
*tlb
,
1308 struct vm_area_struct
*vma
, unsigned long start_addr
,
1309 unsigned long end_addr
,
1310 struct zap_details
*details
)
1312 unsigned long start
= max(vma
->vm_start
, start_addr
);
1315 if (start
>= vma
->vm_end
)
1317 end
= min(vma
->vm_end
, end_addr
);
1318 if (end
<= vma
->vm_start
)
1322 uprobe_munmap(vma
, start
, end
);
1324 if (unlikely(vma
->vm_flags
& VM_PFNMAP
))
1325 untrack_pfn(vma
, 0, 0);
1328 if (unlikely(is_vm_hugetlb_page(vma
))) {
1330 * It is undesirable to test vma->vm_file as it
1331 * should be non-null for valid hugetlb area.
1332 * However, vm_file will be NULL in the error
1333 * cleanup path of do_mmap_pgoff. When
1334 * hugetlbfs ->mmap method fails,
1335 * do_mmap_pgoff() nullifies vma->vm_file
1336 * before calling this function to clean up.
1337 * Since no pte has actually been setup, it is
1338 * safe to do nothing in this case.
1341 mutex_lock(&vma
->vm_file
->f_mapping
->i_mmap_mutex
);
1342 __unmap_hugepage_range_final(tlb
, vma
, start
, end
, NULL
);
1343 mutex_unlock(&vma
->vm_file
->f_mapping
->i_mmap_mutex
);
1346 unmap_page_range(tlb
, vma
, start
, end
, details
);
1351 * unmap_vmas - unmap a range of memory covered by a list of vma's
1352 * @tlb: address of the caller's struct mmu_gather
1353 * @vma: the starting vma
1354 * @start_addr: virtual address at which to start unmapping
1355 * @end_addr: virtual address at which to end unmapping
1357 * Unmap all pages in the vma list.
1359 * Only addresses between `start' and `end' will be unmapped.
1361 * The VMA list must be sorted in ascending virtual address order.
1363 * unmap_vmas() assumes that the caller will flush the whole unmapped address
1364 * range after unmap_vmas() returns. So the only responsibility here is to
1365 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1366 * drops the lock and schedules.
1368 void unmap_vmas(struct mmu_gather
*tlb
,
1369 struct vm_area_struct
*vma
, unsigned long start_addr
,
1370 unsigned long end_addr
)
1372 struct mm_struct
*mm
= vma
->vm_mm
;
1374 mmu_notifier_invalidate_range_start(mm
, start_addr
, end_addr
);
1375 for ( ; vma
&& vma
->vm_start
< end_addr
; vma
= vma
->vm_next
)
1376 unmap_single_vma(tlb
, vma
, start_addr
, end_addr
, NULL
);
1377 mmu_notifier_invalidate_range_end(mm
, start_addr
, end_addr
);
1381 * zap_page_range - remove user pages in a given range
1382 * @vma: vm_area_struct holding the applicable pages
1383 * @start: starting address of pages to zap
1384 * @size: number of bytes to zap
1385 * @details: details of nonlinear truncation or shared cache invalidation
1387 * Caller must protect the VMA list
1389 void zap_page_range(struct vm_area_struct
*vma
, unsigned long start
,
1390 unsigned long size
, struct zap_details
*details
)
1392 struct mm_struct
*mm
= vma
->vm_mm
;
1393 struct mmu_gather tlb
;
1394 unsigned long end
= start
+ size
;
1397 tlb_gather_mmu(&tlb
, mm
, 0);
1398 update_hiwater_rss(mm
);
1399 mmu_notifier_invalidate_range_start(mm
, start
, end
);
1400 for ( ; vma
&& vma
->vm_start
< end
; vma
= vma
->vm_next
)
1401 unmap_single_vma(&tlb
, vma
, start
, end
, details
);
1402 mmu_notifier_invalidate_range_end(mm
, start
, end
);
1403 tlb_finish_mmu(&tlb
, start
, end
);
1407 * zap_page_range_single - remove user pages in a given range
1408 * @vma: vm_area_struct holding the applicable pages
1409 * @address: starting address of pages to zap
1410 * @size: number of bytes to zap
1411 * @details: details of nonlinear truncation or shared cache invalidation
1413 * The range must fit into one VMA.
1415 static void zap_page_range_single(struct vm_area_struct
*vma
, unsigned long address
,
1416 unsigned long size
, struct zap_details
*details
)
1418 struct mm_struct
*mm
= vma
->vm_mm
;
1419 struct mmu_gather tlb
;
1420 unsigned long end
= address
+ size
;
1423 tlb_gather_mmu(&tlb
, mm
, 0);
1424 update_hiwater_rss(mm
);
1425 mmu_notifier_invalidate_range_start(mm
, address
, end
);
1426 unmap_single_vma(&tlb
, vma
, address
, end
, details
);
1427 mmu_notifier_invalidate_range_end(mm
, address
, end
);
1428 tlb_finish_mmu(&tlb
, address
, end
);
1432 * zap_vma_ptes - remove ptes mapping the vma
1433 * @vma: vm_area_struct holding ptes to be zapped
1434 * @address: starting address of pages to zap
1435 * @size: number of bytes to zap
1437 * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1439 * The entire address range must be fully contained within the vma.
1441 * Returns 0 if successful.
1443 int zap_vma_ptes(struct vm_area_struct
*vma
, unsigned long address
,
1446 if (address
< vma
->vm_start
|| address
+ size
> vma
->vm_end
||
1447 !(vma
->vm_flags
& VM_PFNMAP
))
1449 zap_page_range_single(vma
, address
, size
, NULL
);
1452 EXPORT_SYMBOL_GPL(zap_vma_ptes
);
1455 * follow_page - look up a page descriptor from a user-virtual address
1456 * @vma: vm_area_struct mapping @address
1457 * @address: virtual address to look up
1458 * @flags: flags modifying lookup behaviour
1460 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
1462 * Returns the mapped (struct page *), %NULL if no mapping exists, or
1463 * an error pointer if there is a mapping to something not represented
1464 * by a page descriptor (see also vm_normal_page()).
1466 struct page
*follow_page(struct vm_area_struct
*vma
, unsigned long address
,
1475 struct mm_struct
*mm
= vma
->vm_mm
;
1477 page
= follow_huge_addr(mm
, address
, flags
& FOLL_WRITE
);
1478 if (!IS_ERR(page
)) {
1479 BUG_ON(flags
& FOLL_GET
);
1484 pgd
= pgd_offset(mm
, address
);
1485 if (pgd_none(*pgd
) || unlikely(pgd_bad(*pgd
)))
1488 pud
= pud_offset(pgd
, address
);
1491 if (pud_huge(*pud
) && vma
->vm_flags
& VM_HUGETLB
) {
1492 BUG_ON(flags
& FOLL_GET
);
1493 page
= follow_huge_pud(mm
, address
, pud
, flags
& FOLL_WRITE
);
1496 if (unlikely(pud_bad(*pud
)))
1499 pmd
= pmd_offset(pud
, address
);
1502 if (pmd_huge(*pmd
) && vma
->vm_flags
& VM_HUGETLB
) {
1503 BUG_ON(flags
& FOLL_GET
);
1504 page
= follow_huge_pmd(mm
, address
, pmd
, flags
& FOLL_WRITE
);
1507 if ((flags
& FOLL_NUMA
) && pmd_numa(*pmd
))
1509 if (pmd_trans_huge(*pmd
)) {
1510 if (flags
& FOLL_SPLIT
) {
1511 split_huge_page_pmd(vma
, address
, pmd
);
1512 goto split_fallthrough
;
1514 spin_lock(&mm
->page_table_lock
);
1515 if (likely(pmd_trans_huge(*pmd
))) {
1516 if (unlikely(pmd_trans_splitting(*pmd
))) {
1517 spin_unlock(&mm
->page_table_lock
);
1518 wait_split_huge_page(vma
->anon_vma
, pmd
);
1520 page
= follow_trans_huge_pmd(vma
, address
,
1522 spin_unlock(&mm
->page_table_lock
);
1526 spin_unlock(&mm
->page_table_lock
);
1530 if (unlikely(pmd_bad(*pmd
)))
1533 ptep
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
1536 if (!pte_present(pte
))
1538 if ((flags
& FOLL_NUMA
) && pte_numa(pte
))
1540 if ((flags
& FOLL_WRITE
) && !pte_write(pte
))
1543 page
= vm_normal_page(vma
, address
, pte
);
1544 if (unlikely(!page
)) {
1545 if ((flags
& FOLL_DUMP
) ||
1546 !is_zero_pfn(pte_pfn(pte
)))
1548 page
= pte_page(pte
);
1551 if (flags
& FOLL_GET
)
1552 get_page_foll(page
);
1553 if (flags
& FOLL_TOUCH
) {
1554 if ((flags
& FOLL_WRITE
) &&
1555 !pte_dirty(pte
) && !PageDirty(page
))
1556 set_page_dirty(page
);
1558 * pte_mkyoung() would be more correct here, but atomic care
1559 * is needed to avoid losing the dirty bit: it is easier to use
1560 * mark_page_accessed().
1562 mark_page_accessed(page
);
1564 if ((flags
& FOLL_MLOCK
) && (vma
->vm_flags
& VM_LOCKED
)) {
1566 * The preliminary mapping check is mainly to avoid the
1567 * pointless overhead of lock_page on the ZERO_PAGE
1568 * which might bounce very badly if there is contention.
1570 * If the page is already locked, we don't need to
1571 * handle it now - vmscan will handle it later if and
1572 * when it attempts to reclaim the page.
1574 if (page
->mapping
&& trylock_page(page
)) {
1575 lru_add_drain(); /* push cached pages to LRU */
1577 * Because we lock page here, and migration is
1578 * blocked by the pte's page reference, and we
1579 * know the page is still mapped, we don't even
1580 * need to check for file-cache page truncation.
1582 mlock_vma_page(page
);
1587 pte_unmap_unlock(ptep
, ptl
);
1592 pte_unmap_unlock(ptep
, ptl
);
1593 return ERR_PTR(-EFAULT
);
1596 pte_unmap_unlock(ptep
, ptl
);
1602 * When core dumping an enormous anonymous area that nobody
1603 * has touched so far, we don't want to allocate unnecessary pages or
1604 * page tables. Return error instead of NULL to skip handle_mm_fault,
1605 * then get_dump_page() will return NULL to leave a hole in the dump.
1606 * But we can only make this optimization where a hole would surely
1607 * be zero-filled if handle_mm_fault() actually did handle it.
1609 if ((flags
& FOLL_DUMP
) &&
1610 (!vma
->vm_ops
|| !vma
->vm_ops
->fault
))
1611 return ERR_PTR(-EFAULT
);
1615 static inline int stack_guard_page(struct vm_area_struct
*vma
, unsigned long addr
)
1617 return stack_guard_page_start(vma
, addr
) ||
1618 stack_guard_page_end(vma
, addr
+PAGE_SIZE
);
1622 * __get_user_pages() - pin user pages in memory
1623 * @tsk: task_struct of target task
1624 * @mm: mm_struct of target mm
1625 * @start: starting user address
1626 * @nr_pages: number of pages from start to pin
1627 * @gup_flags: flags modifying pin behaviour
1628 * @pages: array that receives pointers to the pages pinned.
1629 * Should be at least nr_pages long. Or NULL, if caller
1630 * only intends to ensure the pages are faulted in.
1631 * @vmas: array of pointers to vmas corresponding to each page.
1632 * Or NULL if the caller does not require them.
1633 * @nonblocking: whether waiting for disk IO or mmap_sem contention
1635 * Returns number of pages pinned. This may be fewer than the number
1636 * requested. If nr_pages is 0 or negative, returns 0. If no pages
1637 * were pinned, returns -errno. Each page returned must be released
1638 * with a put_page() call when it is finished with. vmas will only
1639 * remain valid while mmap_sem is held.
1641 * Must be called with mmap_sem held for read or write.
1643 * __get_user_pages walks a process's page tables and takes a reference to
1644 * each struct page that each user address corresponds to at a given
1645 * instant. That is, it takes the page that would be accessed if a user
1646 * thread accesses the given user virtual address at that instant.
1648 * This does not guarantee that the page exists in the user mappings when
1649 * __get_user_pages returns, and there may even be a completely different
1650 * page there in some cases (eg. if mmapped pagecache has been invalidated
1651 * and subsequently re faulted). However it does guarantee that the page
1652 * won't be freed completely. And mostly callers simply care that the page
1653 * contains data that was valid *at some point in time*. Typically, an IO
1654 * or similar operation cannot guarantee anything stronger anyway because
1655 * locks can't be held over the syscall boundary.
1657 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1658 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1659 * appropriate) must be called after the page is finished with, and
1660 * before put_page is called.
1662 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
1663 * or mmap_sem contention, and if waiting is needed to pin all pages,
1664 * *@nonblocking will be set to 0.
1666 * In most cases, get_user_pages or get_user_pages_fast should be used
1667 * instead of __get_user_pages. __get_user_pages should be used only if
1668 * you need some special @gup_flags.
1670 int __get_user_pages(struct task_struct
*tsk
, struct mm_struct
*mm
,
1671 unsigned long start
, int nr_pages
, unsigned int gup_flags
,
1672 struct page
**pages
, struct vm_area_struct
**vmas
,
1676 unsigned long vm_flags
;
1681 VM_BUG_ON(!!pages
!= !!(gup_flags
& FOLL_GET
));
1684 * Require read or write permissions.
1685 * If FOLL_FORCE is set, we only require the "MAY" flags.
1687 vm_flags
= (gup_flags
& FOLL_WRITE
) ?
1688 (VM_WRITE
| VM_MAYWRITE
) : (VM_READ
| VM_MAYREAD
);
1689 vm_flags
&= (gup_flags
& FOLL_FORCE
) ?
1690 (VM_MAYREAD
| VM_MAYWRITE
) : (VM_READ
| VM_WRITE
);
1693 * If FOLL_FORCE and FOLL_NUMA are both set, handle_mm_fault
1694 * would be called on PROT_NONE ranges. We must never invoke
1695 * handle_mm_fault on PROT_NONE ranges or the NUMA hinting
1696 * page faults would unprotect the PROT_NONE ranges if
1697 * _PAGE_NUMA and _PAGE_PROTNONE are sharing the same pte/pmd
1698 * bitflag. So to avoid that, don't set FOLL_NUMA if
1699 * FOLL_FORCE is set.
1701 if (!(gup_flags
& FOLL_FORCE
))
1702 gup_flags
|= FOLL_NUMA
;
1707 struct vm_area_struct
*vma
;
1709 vma
= find_extend_vma(mm
, start
);
1710 if (!vma
&& in_gate_area(mm
, start
)) {
1711 unsigned long pg
= start
& PAGE_MASK
;
1717 /* user gate pages are read-only */
1718 if (gup_flags
& FOLL_WRITE
)
1719 return i
? : -EFAULT
;
1721 pgd
= pgd_offset_k(pg
);
1723 pgd
= pgd_offset_gate(mm
, pg
);
1724 BUG_ON(pgd_none(*pgd
));
1725 pud
= pud_offset(pgd
, pg
);
1726 BUG_ON(pud_none(*pud
));
1727 pmd
= pmd_offset(pud
, pg
);
1729 return i
? : -EFAULT
;
1730 VM_BUG_ON(pmd_trans_huge(*pmd
));
1731 pte
= pte_offset_map(pmd
, pg
);
1732 if (pte_none(*pte
)) {
1734 return i
? : -EFAULT
;
1736 vma
= get_gate_vma(mm
);
1740 page
= vm_normal_page(vma
, start
, *pte
);
1742 if (!(gup_flags
& FOLL_DUMP
) &&
1743 is_zero_pfn(pte_pfn(*pte
)))
1744 page
= pte_page(*pte
);
1747 return i
? : -EFAULT
;
1758 (vma
->vm_flags
& (VM_IO
| VM_PFNMAP
)) ||
1759 !(vm_flags
& vma
->vm_flags
))
1760 return i
? : -EFAULT
;
1762 if (is_vm_hugetlb_page(vma
)) {
1763 i
= follow_hugetlb_page(mm
, vma
, pages
, vmas
,
1764 &start
, &nr_pages
, i
, gup_flags
);
1770 unsigned int foll_flags
= gup_flags
;
1773 * If we have a pending SIGKILL, don't keep faulting
1774 * pages and potentially allocating memory.
1776 if (unlikely(fatal_signal_pending(current
)))
1777 return i
? i
: -ERESTARTSYS
;
1780 while (!(page
= follow_page(vma
, start
, foll_flags
))) {
1782 unsigned int fault_flags
= 0;
1784 /* For mlock, just skip the stack guard page. */
1785 if (foll_flags
& FOLL_MLOCK
) {
1786 if (stack_guard_page(vma
, start
))
1789 if (foll_flags
& FOLL_WRITE
)
1790 fault_flags
|= FAULT_FLAG_WRITE
;
1792 fault_flags
|= FAULT_FLAG_ALLOW_RETRY
;
1793 if (foll_flags
& FOLL_NOWAIT
)
1794 fault_flags
|= (FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_RETRY_NOWAIT
);
1796 ret
= handle_mm_fault(mm
, vma
, start
,
1799 if (ret
& VM_FAULT_ERROR
) {
1800 if (ret
& VM_FAULT_OOM
)
1801 return i
? i
: -ENOMEM
;
1802 if (ret
& (VM_FAULT_HWPOISON
|
1803 VM_FAULT_HWPOISON_LARGE
)) {
1806 else if (gup_flags
& FOLL_HWPOISON
)
1811 if (ret
& VM_FAULT_SIGBUS
)
1812 return i
? i
: -EFAULT
;
1817 if (ret
& VM_FAULT_MAJOR
)
1823 if (ret
& VM_FAULT_RETRY
) {
1830 * The VM_FAULT_WRITE bit tells us that
1831 * do_wp_page has broken COW when necessary,
1832 * even if maybe_mkwrite decided not to set
1833 * pte_write. We can thus safely do subsequent
1834 * page lookups as if they were reads. But only
1835 * do so when looping for pte_write is futile:
1836 * in some cases userspace may also be wanting
1837 * to write to the gotten user page, which a
1838 * read fault here might prevent (a readonly
1839 * page might get reCOWed by userspace write).
1841 if ((ret
& VM_FAULT_WRITE
) &&
1842 !(vma
->vm_flags
& VM_WRITE
))
1843 foll_flags
&= ~FOLL_WRITE
;
1848 return i
? i
: PTR_ERR(page
);
1852 flush_anon_page(vma
, page
, start
);
1853 flush_dcache_page(page
);
1861 } while (nr_pages
&& start
< vma
->vm_end
);
1865 EXPORT_SYMBOL(__get_user_pages
);
1868 * fixup_user_fault() - manually resolve a user page fault
1869 * @tsk: the task_struct to use for page fault accounting, or
1870 * NULL if faults are not to be recorded.
1871 * @mm: mm_struct of target mm
1872 * @address: user address
1873 * @fault_flags:flags to pass down to handle_mm_fault()
1875 * This is meant to be called in the specific scenario where for locking reasons
1876 * we try to access user memory in atomic context (within a pagefault_disable()
1877 * section), this returns -EFAULT, and we want to resolve the user fault before
1880 * Typically this is meant to be used by the futex code.
1882 * The main difference with get_user_pages() is that this function will
1883 * unconditionally call handle_mm_fault() which will in turn perform all the
1884 * necessary SW fixup of the dirty and young bits in the PTE, while
1885 * handle_mm_fault() only guarantees to update these in the struct page.
1887 * This is important for some architectures where those bits also gate the
1888 * access permission to the page because they are maintained in software. On
1889 * such architectures, gup() will not be enough to make a subsequent access
1892 * This should be called with the mm_sem held for read.
1894 int fixup_user_fault(struct task_struct
*tsk
, struct mm_struct
*mm
,
1895 unsigned long address
, unsigned int fault_flags
)
1897 struct vm_area_struct
*vma
;
1900 vma
= find_extend_vma(mm
, address
);
1901 if (!vma
|| address
< vma
->vm_start
)
1904 ret
= handle_mm_fault(mm
, vma
, address
, fault_flags
);
1905 if (ret
& VM_FAULT_ERROR
) {
1906 if (ret
& VM_FAULT_OOM
)
1908 if (ret
& (VM_FAULT_HWPOISON
| VM_FAULT_HWPOISON_LARGE
))
1910 if (ret
& VM_FAULT_SIGBUS
)
1915 if (ret
& VM_FAULT_MAJOR
)
1924 * get_user_pages() - pin user pages in memory
1925 * @tsk: the task_struct to use for page fault accounting, or
1926 * NULL if faults are not to be recorded.
1927 * @mm: mm_struct of target mm
1928 * @start: starting user address
1929 * @nr_pages: number of pages from start to pin
1930 * @write: whether pages will be written to by the caller
1931 * @force: whether to force write access even if user mapping is
1932 * readonly. This will result in the page being COWed even
1933 * in MAP_SHARED mappings. You do not want this.
1934 * @pages: array that receives pointers to the pages pinned.
1935 * Should be at least nr_pages long. Or NULL, if caller
1936 * only intends to ensure the pages are faulted in.
1937 * @vmas: array of pointers to vmas corresponding to each page.
1938 * Or NULL if the caller does not require them.
1940 * Returns number of pages pinned. This may be fewer than the number
1941 * requested. If nr_pages is 0 or negative, returns 0. If no pages
1942 * were pinned, returns -errno. Each page returned must be released
1943 * with a put_page() call when it is finished with. vmas will only
1944 * remain valid while mmap_sem is held.
1946 * Must be called with mmap_sem held for read or write.
1948 * get_user_pages walks a process's page tables and takes a reference to
1949 * each struct page that each user address corresponds to at a given
1950 * instant. That is, it takes the page that would be accessed if a user
1951 * thread accesses the given user virtual address at that instant.
1953 * This does not guarantee that the page exists in the user mappings when
1954 * get_user_pages returns, and there may even be a completely different
1955 * page there in some cases (eg. if mmapped pagecache has been invalidated
1956 * and subsequently re faulted). However it does guarantee that the page
1957 * won't be freed completely. And mostly callers simply care that the page
1958 * contains data that was valid *at some point in time*. Typically, an IO
1959 * or similar operation cannot guarantee anything stronger anyway because
1960 * locks can't be held over the syscall boundary.
1962 * If write=0, the page must not be written to. If the page is written to,
1963 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
1964 * after the page is finished with, and before put_page is called.
1966 * get_user_pages is typically used for fewer-copy IO operations, to get a
1967 * handle on the memory by some means other than accesses via the user virtual
1968 * addresses. The pages may be submitted for DMA to devices or accessed via
1969 * their kernel linear mapping (via the kmap APIs). Care should be taken to
1970 * use the correct cache flushing APIs.
1972 * See also get_user_pages_fast, for performance critical applications.
1974 int get_user_pages(struct task_struct
*tsk
, struct mm_struct
*mm
,
1975 unsigned long start
, int nr_pages
, int write
, int force
,
1976 struct page
**pages
, struct vm_area_struct
**vmas
)
1978 int flags
= FOLL_TOUCH
;
1983 flags
|= FOLL_WRITE
;
1985 flags
|= FOLL_FORCE
;
1987 return __get_user_pages(tsk
, mm
, start
, nr_pages
, flags
, pages
, vmas
,
1990 EXPORT_SYMBOL(get_user_pages
);
1993 * get_dump_page() - pin user page in memory while writing it to core dump
1994 * @addr: user address
1996 * Returns struct page pointer of user page pinned for dump,
1997 * to be freed afterwards by page_cache_release() or put_page().
1999 * Returns NULL on any kind of failure - a hole must then be inserted into
2000 * the corefile, to preserve alignment with its headers; and also returns
2001 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
2002 * allowing a hole to be left in the corefile to save diskspace.
2004 * Called without mmap_sem, but after all other threads have been killed.
2006 #ifdef CONFIG_ELF_CORE
2007 struct page
*get_dump_page(unsigned long addr
)
2009 struct vm_area_struct
*vma
;
2012 if (__get_user_pages(current
, current
->mm
, addr
, 1,
2013 FOLL_FORCE
| FOLL_DUMP
| FOLL_GET
, &page
, &vma
,
2016 flush_cache_page(vma
, addr
, page_to_pfn(page
));
2019 #endif /* CONFIG_ELF_CORE */
2021 pte_t
*__get_locked_pte(struct mm_struct
*mm
, unsigned long addr
,
2024 pgd_t
* pgd
= pgd_offset(mm
, addr
);
2025 pud_t
* pud
= pud_alloc(mm
, pgd
, addr
);
2027 pmd_t
* pmd
= pmd_alloc(mm
, pud
, addr
);
2029 VM_BUG_ON(pmd_trans_huge(*pmd
));
2030 return pte_alloc_map_lock(mm
, pmd
, addr
, ptl
);
2037 * This is the old fallback for page remapping.
2039 * For historical reasons, it only allows reserved pages. Only
2040 * old drivers should use this, and they needed to mark their
2041 * pages reserved for the old functions anyway.
2043 static int insert_page(struct vm_area_struct
*vma
, unsigned long addr
,
2044 struct page
*page
, pgprot_t prot
)
2046 struct mm_struct
*mm
= vma
->vm_mm
;
2055 flush_dcache_page(page
);
2056 pte
= get_locked_pte(mm
, addr
, &ptl
);
2060 if (!pte_none(*pte
))
2063 /* Ok, finally just insert the thing.. */
2065 inc_mm_counter_fast(mm
, MM_FILEPAGES
);
2066 page_add_file_rmap(page
);
2067 set_pte_at(mm
, addr
, pte
, mk_pte(page
, prot
));
2070 pte_unmap_unlock(pte
, ptl
);
2073 pte_unmap_unlock(pte
, ptl
);
2079 * vm_insert_page - insert single page into user vma
2080 * @vma: user vma to map to
2081 * @addr: target user address of this page
2082 * @page: source kernel page
2084 * This allows drivers to insert individual pages they've allocated
2087 * The page has to be a nice clean _individual_ kernel allocation.
2088 * If you allocate a compound page, you need to have marked it as
2089 * such (__GFP_COMP), or manually just split the page up yourself
2090 * (see split_page()).
2092 * NOTE! Traditionally this was done with "remap_pfn_range()" which
2093 * took an arbitrary page protection parameter. This doesn't allow
2094 * that. Your vma protection will have to be set up correctly, which
2095 * means that if you want a shared writable mapping, you'd better
2096 * ask for a shared writable mapping!
2098 * The page does not need to be reserved.
2100 * Usually this function is called from f_op->mmap() handler
2101 * under mm->mmap_sem write-lock, so it can change vma->vm_flags.
2102 * Caller must set VM_MIXEDMAP on vma if it wants to call this
2103 * function from other places, for example from page-fault handler.
2105 int vm_insert_page(struct vm_area_struct
*vma
, unsigned long addr
,
2108 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
2110 if (!page_count(page
))
2112 if (!(vma
->vm_flags
& VM_MIXEDMAP
)) {
2113 BUG_ON(down_read_trylock(&vma
->vm_mm
->mmap_sem
));
2114 BUG_ON(vma
->vm_flags
& VM_PFNMAP
);
2115 vma
->vm_flags
|= VM_MIXEDMAP
;
2117 return insert_page(vma
, addr
, page
, vma
->vm_page_prot
);
2119 EXPORT_SYMBOL(vm_insert_page
);
2121 static int insert_pfn(struct vm_area_struct
*vma
, unsigned long addr
,
2122 unsigned long pfn
, pgprot_t prot
)
2124 struct mm_struct
*mm
= vma
->vm_mm
;
2130 pte
= get_locked_pte(mm
, addr
, &ptl
);
2134 if (!pte_none(*pte
))
2137 /* Ok, finally just insert the thing.. */
2138 entry
= pte_mkspecial(pfn_pte(pfn
, prot
));
2139 set_pte_at(mm
, addr
, pte
, entry
);
2140 update_mmu_cache(vma
, addr
, pte
); /* XXX: why not for insert_page? */
2144 pte_unmap_unlock(pte
, ptl
);
2150 * vm_insert_pfn - insert single pfn into user vma
2151 * @vma: user vma to map to
2152 * @addr: target user address of this page
2153 * @pfn: source kernel pfn
2155 * Similar to vm_insert_page, this allows drivers to insert individual pages
2156 * they've allocated into a user vma. Same comments apply.
2158 * This function should only be called from a vm_ops->fault handler, and
2159 * in that case the handler should return NULL.
2161 * vma cannot be a COW mapping.
2163 * As this is called only for pages that do not currently exist, we
2164 * do not need to flush old virtual caches or the TLB.
2166 int vm_insert_pfn(struct vm_area_struct
*vma
, unsigned long addr
,
2170 pgprot_t pgprot
= vma
->vm_page_prot
;
2172 * Technically, architectures with pte_special can avoid all these
2173 * restrictions (same for remap_pfn_range). However we would like
2174 * consistency in testing and feature parity among all, so we should
2175 * try to keep these invariants in place for everybody.
2177 BUG_ON(!(vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)));
2178 BUG_ON((vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) ==
2179 (VM_PFNMAP
|VM_MIXEDMAP
));
2180 BUG_ON((vma
->vm_flags
& VM_PFNMAP
) && is_cow_mapping(vma
->vm_flags
));
2181 BUG_ON((vma
->vm_flags
& VM_MIXEDMAP
) && pfn_valid(pfn
));
2183 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
2185 if (track_pfn_insert(vma
, &pgprot
, pfn
))
2188 ret
= insert_pfn(vma
, addr
, pfn
, pgprot
);
2192 EXPORT_SYMBOL(vm_insert_pfn
);
2194 int vm_insert_mixed(struct vm_area_struct
*vma
, unsigned long addr
,
2197 BUG_ON(!(vma
->vm_flags
& VM_MIXEDMAP
));
2199 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
2203 * If we don't have pte special, then we have to use the pfn_valid()
2204 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2205 * refcount the page if pfn_valid is true (hence insert_page rather
2206 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
2207 * without pte special, it would there be refcounted as a normal page.
2209 if (!HAVE_PTE_SPECIAL
&& pfn_valid(pfn
)) {
2212 page
= pfn_to_page(pfn
);
2213 return insert_page(vma
, addr
, page
, vma
->vm_page_prot
);
2215 return insert_pfn(vma
, addr
, pfn
, vma
->vm_page_prot
);
2217 EXPORT_SYMBOL(vm_insert_mixed
);
2220 * maps a range of physical memory into the requested pages. the old
2221 * mappings are removed. any references to nonexistent pages results
2222 * in null mappings (currently treated as "copy-on-access")
2224 static int remap_pte_range(struct mm_struct
*mm
, pmd_t
*pmd
,
2225 unsigned long addr
, unsigned long end
,
2226 unsigned long pfn
, pgprot_t prot
)
2231 pte
= pte_alloc_map_lock(mm
, pmd
, addr
, &ptl
);
2234 arch_enter_lazy_mmu_mode();
2236 BUG_ON(!pte_none(*pte
));
2237 set_pte_at(mm
, addr
, pte
, pte_mkspecial(pfn_pte(pfn
, prot
)));
2239 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
2240 arch_leave_lazy_mmu_mode();
2241 pte_unmap_unlock(pte
- 1, ptl
);
2245 static inline int remap_pmd_range(struct mm_struct
*mm
, pud_t
*pud
,
2246 unsigned long addr
, unsigned long end
,
2247 unsigned long pfn
, pgprot_t prot
)
2252 pfn
-= addr
>> PAGE_SHIFT
;
2253 pmd
= pmd_alloc(mm
, pud
, addr
);
2256 VM_BUG_ON(pmd_trans_huge(*pmd
));
2258 next
= pmd_addr_end(addr
, end
);
2259 if (remap_pte_range(mm
, pmd
, addr
, next
,
2260 pfn
+ (addr
>> PAGE_SHIFT
), prot
))
2262 } while (pmd
++, addr
= next
, addr
!= end
);
2266 static inline int remap_pud_range(struct mm_struct
*mm
, pgd_t
*pgd
,
2267 unsigned long addr
, unsigned long end
,
2268 unsigned long pfn
, pgprot_t prot
)
2273 pfn
-= addr
>> PAGE_SHIFT
;
2274 pud
= pud_alloc(mm
, pgd
, addr
);
2278 next
= pud_addr_end(addr
, end
);
2279 if (remap_pmd_range(mm
, pud
, addr
, next
,
2280 pfn
+ (addr
>> PAGE_SHIFT
), prot
))
2282 } while (pud
++, addr
= next
, addr
!= end
);
2287 * remap_pfn_range - remap kernel memory to userspace
2288 * @vma: user vma to map to
2289 * @addr: target user address to start at
2290 * @pfn: physical address of kernel memory
2291 * @size: size of map area
2292 * @prot: page protection flags for this mapping
2294 * Note: this is only safe if the mm semaphore is held when called.
2296 int remap_pfn_range(struct vm_area_struct
*vma
, unsigned long addr
,
2297 unsigned long pfn
, unsigned long size
, pgprot_t prot
)
2301 unsigned long end
= addr
+ PAGE_ALIGN(size
);
2302 struct mm_struct
*mm
= vma
->vm_mm
;
2306 * Physically remapped pages are special. Tell the
2307 * rest of the world about it:
2308 * VM_IO tells people not to look at these pages
2309 * (accesses can have side effects).
2310 * VM_PFNMAP tells the core MM that the base pages are just
2311 * raw PFN mappings, and do not have a "struct page" associated
2314 * Disable vma merging and expanding with mremap().
2316 * Omit vma from core dump, even when VM_IO turned off.
2318 * There's a horrible special case to handle copy-on-write
2319 * behaviour that some programs depend on. We mark the "original"
2320 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2321 * See vm_normal_page() for details.
2323 if (is_cow_mapping(vma
->vm_flags
)) {
2324 if (addr
!= vma
->vm_start
|| end
!= vma
->vm_end
)
2326 vma
->vm_pgoff
= pfn
;
2329 err
= track_pfn_remap(vma
, &prot
, pfn
, addr
, PAGE_ALIGN(size
));
2333 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
| VM_DONTEXPAND
| VM_DONTDUMP
;
2335 BUG_ON(addr
>= end
);
2336 pfn
-= addr
>> PAGE_SHIFT
;
2337 pgd
= pgd_offset(mm
, addr
);
2338 flush_cache_range(vma
, addr
, end
);
2340 next
= pgd_addr_end(addr
, end
);
2341 err
= remap_pud_range(mm
, pgd
, addr
, next
,
2342 pfn
+ (addr
>> PAGE_SHIFT
), prot
);
2345 } while (pgd
++, addr
= next
, addr
!= end
);
2348 untrack_pfn(vma
, pfn
, PAGE_ALIGN(size
));
2352 EXPORT_SYMBOL(remap_pfn_range
);
2354 static int apply_to_pte_range(struct mm_struct
*mm
, pmd_t
*pmd
,
2355 unsigned long addr
, unsigned long end
,
2356 pte_fn_t fn
, void *data
)
2361 spinlock_t
*uninitialized_var(ptl
);
2363 pte
= (mm
== &init_mm
) ?
2364 pte_alloc_kernel(pmd
, addr
) :
2365 pte_alloc_map_lock(mm
, pmd
, addr
, &ptl
);
2369 BUG_ON(pmd_huge(*pmd
));
2371 arch_enter_lazy_mmu_mode();
2373 token
= pmd_pgtable(*pmd
);
2376 err
= fn(pte
++, token
, addr
, data
);
2379 } while (addr
+= PAGE_SIZE
, addr
!= end
);
2381 arch_leave_lazy_mmu_mode();
2384 pte_unmap_unlock(pte
-1, ptl
);
2388 static int apply_to_pmd_range(struct mm_struct
*mm
, pud_t
*pud
,
2389 unsigned long addr
, unsigned long end
,
2390 pte_fn_t fn
, void *data
)
2396 BUG_ON(pud_huge(*pud
));
2398 pmd
= pmd_alloc(mm
, pud
, addr
);
2402 next
= pmd_addr_end(addr
, end
);
2403 err
= apply_to_pte_range(mm
, pmd
, addr
, next
, fn
, data
);
2406 } while (pmd
++, addr
= next
, addr
!= end
);
2410 static int apply_to_pud_range(struct mm_struct
*mm
, pgd_t
*pgd
,
2411 unsigned long addr
, unsigned long end
,
2412 pte_fn_t fn
, void *data
)
2418 pud
= pud_alloc(mm
, pgd
, addr
);
2422 next
= pud_addr_end(addr
, end
);
2423 err
= apply_to_pmd_range(mm
, pud
, addr
, next
, fn
, data
);
2426 } while (pud
++, addr
= next
, addr
!= end
);
2431 * Scan a region of virtual memory, filling in page tables as necessary
2432 * and calling a provided function on each leaf page table.
2434 int apply_to_page_range(struct mm_struct
*mm
, unsigned long addr
,
2435 unsigned long size
, pte_fn_t fn
, void *data
)
2439 unsigned long end
= addr
+ size
;
2442 BUG_ON(addr
>= end
);
2443 pgd
= pgd_offset(mm
, addr
);
2445 next
= pgd_addr_end(addr
, end
);
2446 err
= apply_to_pud_range(mm
, pgd
, addr
, next
, fn
, data
);
2449 } while (pgd
++, addr
= next
, addr
!= end
);
2453 EXPORT_SYMBOL_GPL(apply_to_page_range
);
2456 * handle_pte_fault chooses page fault handler according to an entry
2457 * which was read non-atomically. Before making any commitment, on
2458 * those architectures or configurations (e.g. i386 with PAE) which
2459 * might give a mix of unmatched parts, do_swap_page and do_nonlinear_fault
2460 * must check under lock before unmapping the pte and proceeding
2461 * (but do_wp_page is only called after already making such a check;
2462 * and do_anonymous_page can safely check later on).
2464 static inline int pte_unmap_same(struct mm_struct
*mm
, pmd_t
*pmd
,
2465 pte_t
*page_table
, pte_t orig_pte
)
2468 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
2469 if (sizeof(pte_t
) > sizeof(unsigned long)) {
2470 spinlock_t
*ptl
= pte_lockptr(mm
, pmd
);
2472 same
= pte_same(*page_table
, orig_pte
);
2476 pte_unmap(page_table
);
2480 static inline void cow_user_page(struct page
*dst
, struct page
*src
, unsigned long va
, struct vm_area_struct
*vma
)
2483 * If the source page was a PFN mapping, we don't have
2484 * a "struct page" for it. We do a best-effort copy by
2485 * just copying from the original user address. If that
2486 * fails, we just zero-fill it. Live with it.
2488 if (unlikely(!src
)) {
2489 void *kaddr
= kmap_atomic(dst
);
2490 void __user
*uaddr
= (void __user
*)(va
& PAGE_MASK
);
2493 * This really shouldn't fail, because the page is there
2494 * in the page tables. But it might just be unreadable,
2495 * in which case we just give up and fill the result with
2498 if (__copy_from_user_inatomic(kaddr
, uaddr
, PAGE_SIZE
))
2500 kunmap_atomic(kaddr
);
2501 flush_dcache_page(dst
);
2503 copy_user_highpage(dst
, src
, va
, vma
);
2507 * This routine handles present pages, when users try to write
2508 * to a shared page. It is done by copying the page to a new address
2509 * and decrementing the shared-page counter for the old page.
2511 * Note that this routine assumes that the protection checks have been
2512 * done by the caller (the low-level page fault routine in most cases).
2513 * Thus we can safely just mark it writable once we've done any necessary
2516 * We also mark the page dirty at this point even though the page will
2517 * change only once the write actually happens. This avoids a few races,
2518 * and potentially makes it more efficient.
2520 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2521 * but allow concurrent faults), with pte both mapped and locked.
2522 * We return with mmap_sem still held, but pte unmapped and unlocked.
2524 static int do_wp_page(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
2525 unsigned long address
, pte_t
*page_table
, pmd_t
*pmd
,
2526 spinlock_t
*ptl
, pte_t orig_pte
)
2529 struct page
*old_page
, *new_page
= NULL
;
2532 int page_mkwrite
= 0;
2533 struct page
*dirty_page
= NULL
;
2534 unsigned long mmun_start
= 0; /* For mmu_notifiers */
2535 unsigned long mmun_end
= 0; /* For mmu_notifiers */
2537 old_page
= vm_normal_page(vma
, address
, orig_pte
);
2540 * VM_MIXEDMAP !pfn_valid() case
2542 * We should not cow pages in a shared writeable mapping.
2543 * Just mark the pages writable as we can't do any dirty
2544 * accounting on raw pfn maps.
2546 if ((vma
->vm_flags
& (VM_WRITE
|VM_SHARED
)) ==
2547 (VM_WRITE
|VM_SHARED
))
2553 * Take out anonymous pages first, anonymous shared vmas are
2554 * not dirty accountable.
2556 if (PageAnon(old_page
) && !PageKsm(old_page
)) {
2557 if (!trylock_page(old_page
)) {
2558 page_cache_get(old_page
);
2559 pte_unmap_unlock(page_table
, ptl
);
2560 lock_page(old_page
);
2561 page_table
= pte_offset_map_lock(mm
, pmd
, address
,
2563 if (!pte_same(*page_table
, orig_pte
)) {
2564 unlock_page(old_page
);
2567 page_cache_release(old_page
);
2569 if (reuse_swap_page(old_page
)) {
2571 * The page is all ours. Move it to our anon_vma so
2572 * the rmap code will not search our parent or siblings.
2573 * Protected against the rmap code by the page lock.
2575 page_move_anon_rmap(old_page
, vma
, address
);
2576 unlock_page(old_page
);
2579 unlock_page(old_page
);
2580 } else if (unlikely((vma
->vm_flags
& (VM_WRITE
|VM_SHARED
)) ==
2581 (VM_WRITE
|VM_SHARED
))) {
2583 * Only catch write-faults on shared writable pages,
2584 * read-only shared pages can get COWed by
2585 * get_user_pages(.write=1, .force=1).
2587 if (vma
->vm_ops
&& vma
->vm_ops
->page_mkwrite
) {
2588 struct vm_fault vmf
;
2591 vmf
.virtual_address
= (void __user
*)(address
&
2593 vmf
.pgoff
= old_page
->index
;
2594 vmf
.flags
= FAULT_FLAG_WRITE
|FAULT_FLAG_MKWRITE
;
2595 vmf
.page
= old_page
;
2598 * Notify the address space that the page is about to
2599 * become writable so that it can prohibit this or wait
2600 * for the page to get into an appropriate state.
2602 * We do this without the lock held, so that it can
2603 * sleep if it needs to.
2605 page_cache_get(old_page
);
2606 pte_unmap_unlock(page_table
, ptl
);
2608 tmp
= vma
->vm_ops
->page_mkwrite(vma
, &vmf
);
2610 (VM_FAULT_ERROR
| VM_FAULT_NOPAGE
))) {
2612 goto unwritable_page
;
2614 if (unlikely(!(tmp
& VM_FAULT_LOCKED
))) {
2615 lock_page(old_page
);
2616 if (!old_page
->mapping
) {
2617 ret
= 0; /* retry the fault */
2618 unlock_page(old_page
);
2619 goto unwritable_page
;
2622 VM_BUG_ON(!PageLocked(old_page
));
2625 * Since we dropped the lock we need to revalidate
2626 * the PTE as someone else may have changed it. If
2627 * they did, we just return, as we can count on the
2628 * MMU to tell us if they didn't also make it writable.
2630 page_table
= pte_offset_map_lock(mm
, pmd
, address
,
2632 if (!pte_same(*page_table
, orig_pte
)) {
2633 unlock_page(old_page
);
2639 dirty_page
= old_page
;
2640 get_page(dirty_page
);
2643 flush_cache_page(vma
, address
, pte_pfn(orig_pte
));
2644 entry
= pte_mkyoung(orig_pte
);
2645 entry
= maybe_mkwrite(pte_mkdirty(entry
), vma
);
2646 if (ptep_set_access_flags(vma
, address
, page_table
, entry
,1))
2647 update_mmu_cache(vma
, address
, page_table
);
2648 pte_unmap_unlock(page_table
, ptl
);
2649 ret
|= VM_FAULT_WRITE
;
2655 * Yes, Virginia, this is actually required to prevent a race
2656 * with clear_page_dirty_for_io() from clearing the page dirty
2657 * bit after it clear all dirty ptes, but before a racing
2658 * do_wp_page installs a dirty pte.
2660 * __do_fault is protected similarly.
2662 if (!page_mkwrite
) {
2663 wait_on_page_locked(dirty_page
);
2664 set_page_dirty_balance(dirty_page
, page_mkwrite
);
2665 /* file_update_time outside page_lock */
2667 file_update_time(vma
->vm_file
);
2669 put_page(dirty_page
);
2671 struct address_space
*mapping
= dirty_page
->mapping
;
2673 set_page_dirty(dirty_page
);
2674 unlock_page(dirty_page
);
2675 page_cache_release(dirty_page
);
2678 * Some device drivers do not set page.mapping
2679 * but still dirty their pages
2681 balance_dirty_pages_ratelimited(mapping
);
2689 * Ok, we need to copy. Oh, well..
2691 page_cache_get(old_page
);
2693 pte_unmap_unlock(page_table
, ptl
);
2695 if (unlikely(anon_vma_prepare(vma
)))
2698 if (is_zero_pfn(pte_pfn(orig_pte
))) {
2699 new_page
= alloc_zeroed_user_highpage_movable(vma
, address
);
2703 new_page
= alloc_page_vma(GFP_HIGHUSER_MOVABLE
, vma
, address
);
2706 cow_user_page(new_page
, old_page
, address
, vma
);
2708 __SetPageUptodate(new_page
);
2710 if (mem_cgroup_newpage_charge(new_page
, mm
, GFP_KERNEL
))
2713 mmun_start
= address
& PAGE_MASK
;
2714 mmun_end
= mmun_start
+ PAGE_SIZE
;
2715 mmu_notifier_invalidate_range_start(mm
, mmun_start
, mmun_end
);
2718 * Re-check the pte - we dropped the lock
2720 page_table
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
2721 if (likely(pte_same(*page_table
, orig_pte
))) {
2723 if (!PageAnon(old_page
)) {
2724 dec_mm_counter_fast(mm
, MM_FILEPAGES
);
2725 inc_mm_counter_fast(mm
, MM_ANONPAGES
);
2728 inc_mm_counter_fast(mm
, MM_ANONPAGES
);
2729 flush_cache_page(vma
, address
, pte_pfn(orig_pte
));
2730 entry
= mk_pte(new_page
, vma
->vm_page_prot
);
2731 entry
= maybe_mkwrite(pte_mkdirty(entry
), vma
);
2733 * Clear the pte entry and flush it first, before updating the
2734 * pte with the new entry. This will avoid a race condition
2735 * seen in the presence of one thread doing SMC and another
2738 ptep_clear_flush(vma
, address
, page_table
);
2739 page_add_new_anon_rmap(new_page
, vma
, address
);
2741 * We call the notify macro here because, when using secondary
2742 * mmu page tables (such as kvm shadow page tables), we want the
2743 * new page to be mapped directly into the secondary page table.
2745 set_pte_at_notify(mm
, address
, page_table
, entry
);
2746 update_mmu_cache(vma
, address
, page_table
);
2749 * Only after switching the pte to the new page may
2750 * we remove the mapcount here. Otherwise another
2751 * process may come and find the rmap count decremented
2752 * before the pte is switched to the new page, and
2753 * "reuse" the old page writing into it while our pte
2754 * here still points into it and can be read by other
2757 * The critical issue is to order this
2758 * page_remove_rmap with the ptp_clear_flush above.
2759 * Those stores are ordered by (if nothing else,)
2760 * the barrier present in the atomic_add_negative
2761 * in page_remove_rmap.
2763 * Then the TLB flush in ptep_clear_flush ensures that
2764 * no process can access the old page before the
2765 * decremented mapcount is visible. And the old page
2766 * cannot be reused until after the decremented
2767 * mapcount is visible. So transitively, TLBs to
2768 * old page will be flushed before it can be reused.
2770 page_remove_rmap(old_page
);
2773 /* Free the old page.. */
2774 new_page
= old_page
;
2775 ret
|= VM_FAULT_WRITE
;
2777 mem_cgroup_uncharge_page(new_page
);
2780 page_cache_release(new_page
);
2782 pte_unmap_unlock(page_table
, ptl
);
2783 if (mmun_end
> mmun_start
)
2784 mmu_notifier_invalidate_range_end(mm
, mmun_start
, mmun_end
);
2787 * Don't let another task, with possibly unlocked vma,
2788 * keep the mlocked page.
2790 if ((ret
& VM_FAULT_WRITE
) && (vma
->vm_flags
& VM_LOCKED
)) {
2791 lock_page(old_page
); /* LRU manipulation */
2792 munlock_vma_page(old_page
);
2793 unlock_page(old_page
);
2795 page_cache_release(old_page
);
2799 page_cache_release(new_page
);
2802 page_cache_release(old_page
);
2803 return VM_FAULT_OOM
;
2806 page_cache_release(old_page
);
2810 static void unmap_mapping_range_vma(struct vm_area_struct
*vma
,
2811 unsigned long start_addr
, unsigned long end_addr
,
2812 struct zap_details
*details
)
2814 zap_page_range_single(vma
, start_addr
, end_addr
- start_addr
, details
);
2817 static inline void unmap_mapping_range_tree(struct rb_root
*root
,
2818 struct zap_details
*details
)
2820 struct vm_area_struct
*vma
;
2821 pgoff_t vba
, vea
, zba
, zea
;
2823 vma_interval_tree_foreach(vma
, root
,
2824 details
->first_index
, details
->last_index
) {
2826 vba
= vma
->vm_pgoff
;
2827 vea
= vba
+ ((vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
) - 1;
2828 /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
2829 zba
= details
->first_index
;
2832 zea
= details
->last_index
;
2836 unmap_mapping_range_vma(vma
,
2837 ((zba
- vba
) << PAGE_SHIFT
) + vma
->vm_start
,
2838 ((zea
- vba
+ 1) << PAGE_SHIFT
) + vma
->vm_start
,
2843 static inline void unmap_mapping_range_list(struct list_head
*head
,
2844 struct zap_details
*details
)
2846 struct vm_area_struct
*vma
;
2849 * In nonlinear VMAs there is no correspondence between virtual address
2850 * offset and file offset. So we must perform an exhaustive search
2851 * across *all* the pages in each nonlinear VMA, not just the pages
2852 * whose virtual address lies outside the file truncation point.
2854 list_for_each_entry(vma
, head
, shared
.nonlinear
) {
2855 details
->nonlinear_vma
= vma
;
2856 unmap_mapping_range_vma(vma
, vma
->vm_start
, vma
->vm_end
, details
);
2861 * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
2862 * @mapping: the address space containing mmaps to be unmapped.
2863 * @holebegin: byte in first page to unmap, relative to the start of
2864 * the underlying file. This will be rounded down to a PAGE_SIZE
2865 * boundary. Note that this is different from truncate_pagecache(), which
2866 * must keep the partial page. In contrast, we must get rid of
2868 * @holelen: size of prospective hole in bytes. This will be rounded
2869 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
2871 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
2872 * but 0 when invalidating pagecache, don't throw away private data.
2874 void unmap_mapping_range(struct address_space
*mapping
,
2875 loff_t
const holebegin
, loff_t
const holelen
, int even_cows
)
2877 struct zap_details details
;
2878 pgoff_t hba
= holebegin
>> PAGE_SHIFT
;
2879 pgoff_t hlen
= (holelen
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
2881 /* Check for overflow. */
2882 if (sizeof(holelen
) > sizeof(hlen
)) {
2884 (holebegin
+ holelen
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
2885 if (holeend
& ~(long long)ULONG_MAX
)
2886 hlen
= ULONG_MAX
- hba
+ 1;
2889 details
.check_mapping
= even_cows
? NULL
: mapping
;
2890 details
.nonlinear_vma
= NULL
;
2891 details
.first_index
= hba
;
2892 details
.last_index
= hba
+ hlen
- 1;
2893 if (details
.last_index
< details
.first_index
)
2894 details
.last_index
= ULONG_MAX
;
2897 mutex_lock(&mapping
->i_mmap_mutex
);
2898 if (unlikely(!RB_EMPTY_ROOT(&mapping
->i_mmap
)))
2899 unmap_mapping_range_tree(&mapping
->i_mmap
, &details
);
2900 if (unlikely(!list_empty(&mapping
->i_mmap_nonlinear
)))
2901 unmap_mapping_range_list(&mapping
->i_mmap_nonlinear
, &details
);
2902 mutex_unlock(&mapping
->i_mmap_mutex
);
2904 EXPORT_SYMBOL(unmap_mapping_range
);
2907 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2908 * but allow concurrent faults), and pte mapped but not yet locked.
2909 * We return with mmap_sem still held, but pte unmapped and unlocked.
2911 static int do_swap_page(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
2912 unsigned long address
, pte_t
*page_table
, pmd_t
*pmd
,
2913 unsigned int flags
, pte_t orig_pte
)
2916 struct page
*page
, *swapcache
= NULL
;
2920 struct mem_cgroup
*ptr
;
2924 if (!pte_unmap_same(mm
, pmd
, page_table
, orig_pte
))
2927 entry
= pte_to_swp_entry(orig_pte
);
2928 if (unlikely(non_swap_entry(entry
))) {
2929 if (is_migration_entry(entry
)) {
2930 migration_entry_wait(mm
, pmd
, address
);
2931 } else if (is_hwpoison_entry(entry
)) {
2932 ret
= VM_FAULT_HWPOISON
;
2934 print_bad_pte(vma
, address
, orig_pte
, NULL
);
2935 ret
= VM_FAULT_SIGBUS
;
2939 delayacct_set_flag(DELAYACCT_PF_SWAPIN
);
2940 page
= lookup_swap_cache(entry
);
2942 page
= swapin_readahead(entry
,
2943 GFP_HIGHUSER_MOVABLE
, vma
, address
);
2946 * Back out if somebody else faulted in this pte
2947 * while we released the pte lock.
2949 page_table
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
2950 if (likely(pte_same(*page_table
, orig_pte
)))
2952 delayacct_clear_flag(DELAYACCT_PF_SWAPIN
);
2956 /* Had to read the page from swap area: Major fault */
2957 ret
= VM_FAULT_MAJOR
;
2958 count_vm_event(PGMAJFAULT
);
2959 mem_cgroup_count_vm_event(mm
, PGMAJFAULT
);
2960 } else if (PageHWPoison(page
)) {
2962 * hwpoisoned dirty swapcache pages are kept for killing
2963 * owner processes (which may be unknown at hwpoison time)
2965 ret
= VM_FAULT_HWPOISON
;
2966 delayacct_clear_flag(DELAYACCT_PF_SWAPIN
);
2970 locked
= lock_page_or_retry(page
, mm
, flags
);
2972 delayacct_clear_flag(DELAYACCT_PF_SWAPIN
);
2974 ret
|= VM_FAULT_RETRY
;
2979 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
2980 * release the swapcache from under us. The page pin, and pte_same
2981 * test below, are not enough to exclude that. Even if it is still
2982 * swapcache, we need to check that the page's swap has not changed.
2984 if (unlikely(!PageSwapCache(page
) || page_private(page
) != entry
.val
))
2987 if (ksm_might_need_to_copy(page
, vma
, address
)) {
2989 page
= ksm_does_need_to_copy(page
, vma
, address
);
2991 if (unlikely(!page
)) {
2999 if (mem_cgroup_try_charge_swapin(mm
, page
, GFP_KERNEL
, &ptr
)) {
3005 * Back out if somebody else already faulted in this pte.
3007 page_table
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
3008 if (unlikely(!pte_same(*page_table
, orig_pte
)))
3011 if (unlikely(!PageUptodate(page
))) {
3012 ret
= VM_FAULT_SIGBUS
;
3017 * The page isn't present yet, go ahead with the fault.
3019 * Be careful about the sequence of operations here.
3020 * To get its accounting right, reuse_swap_page() must be called
3021 * while the page is counted on swap but not yet in mapcount i.e.
3022 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
3023 * must be called after the swap_free(), or it will never succeed.
3024 * Because delete_from_swap_page() may be called by reuse_swap_page(),
3025 * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry
3026 * in page->private. In this case, a record in swap_cgroup is silently
3027 * discarded at swap_free().
3030 inc_mm_counter_fast(mm
, MM_ANONPAGES
);
3031 dec_mm_counter_fast(mm
, MM_SWAPENTS
);
3032 pte
= mk_pte(page
, vma
->vm_page_prot
);
3033 if ((flags
& FAULT_FLAG_WRITE
) && reuse_swap_page(page
)) {
3034 pte
= maybe_mkwrite(pte_mkdirty(pte
), vma
);
3035 flags
&= ~FAULT_FLAG_WRITE
;
3036 ret
|= VM_FAULT_WRITE
;
3039 flush_icache_page(vma
, page
);
3040 set_pte_at(mm
, address
, page_table
, pte
);
3041 do_page_add_anon_rmap(page
, vma
, address
, exclusive
);
3042 /* It's better to call commit-charge after rmap is established */
3043 mem_cgroup_commit_charge_swapin(page
, ptr
);
3046 if (vm_swap_full() || (vma
->vm_flags
& VM_LOCKED
) || PageMlocked(page
))
3047 try_to_free_swap(page
);
3051 * Hold the lock to avoid the swap entry to be reused
3052 * until we take the PT lock for the pte_same() check
3053 * (to avoid false positives from pte_same). For
3054 * further safety release the lock after the swap_free
3055 * so that the swap count won't change under a
3056 * parallel locked swapcache.
3058 unlock_page(swapcache
);
3059 page_cache_release(swapcache
);
3062 if (flags
& FAULT_FLAG_WRITE
) {
3063 ret
|= do_wp_page(mm
, vma
, address
, page_table
, pmd
, ptl
, pte
);
3064 if (ret
& VM_FAULT_ERROR
)
3065 ret
&= VM_FAULT_ERROR
;
3069 /* No need to invalidate - it was non-present before */
3070 update_mmu_cache(vma
, address
, page_table
);
3072 pte_unmap_unlock(page_table
, ptl
);
3076 mem_cgroup_cancel_charge_swapin(ptr
);
3077 pte_unmap_unlock(page_table
, ptl
);
3081 page_cache_release(page
);
3083 unlock_page(swapcache
);
3084 page_cache_release(swapcache
);
3090 * This is like a special single-page "expand_{down|up}wards()",
3091 * except we must first make sure that 'address{-|+}PAGE_SIZE'
3092 * doesn't hit another vma.
3094 static inline int check_stack_guard_page(struct vm_area_struct
*vma
, unsigned long address
)
3096 address
&= PAGE_MASK
;
3097 if ((vma
->vm_flags
& VM_GROWSDOWN
) && address
== vma
->vm_start
) {
3098 struct vm_area_struct
*prev
= vma
->vm_prev
;
3101 * Is there a mapping abutting this one below?
3103 * That's only ok if it's the same stack mapping
3104 * that has gotten split..
3106 if (prev
&& prev
->vm_end
== address
)
3107 return prev
->vm_flags
& VM_GROWSDOWN
? 0 : -ENOMEM
;
3109 expand_downwards(vma
, address
- PAGE_SIZE
);
3111 if ((vma
->vm_flags
& VM_GROWSUP
) && address
+ PAGE_SIZE
== vma
->vm_end
) {
3112 struct vm_area_struct
*next
= vma
->vm_next
;
3114 /* As VM_GROWSDOWN but s/below/above/ */
3115 if (next
&& next
->vm_start
== address
+ PAGE_SIZE
)
3116 return next
->vm_flags
& VM_GROWSUP
? 0 : -ENOMEM
;
3118 expand_upwards(vma
, address
+ PAGE_SIZE
);
3124 * We enter with non-exclusive mmap_sem (to exclude vma changes,
3125 * but allow concurrent faults), and pte mapped but not yet locked.
3126 * We return with mmap_sem still held, but pte unmapped and unlocked.
3128 static int do_anonymous_page(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
3129 unsigned long address
, pte_t
*page_table
, pmd_t
*pmd
,
3136 pte_unmap(page_table
);
3138 /* Check if we need to add a guard page to the stack */
3139 if (check_stack_guard_page(vma
, address
) < 0)
3140 return VM_FAULT_SIGBUS
;
3142 /* Use the zero-page for reads */
3143 if (!(flags
& FAULT_FLAG_WRITE
)) {
3144 entry
= pte_mkspecial(pfn_pte(my_zero_pfn(address
),
3145 vma
->vm_page_prot
));
3146 page_table
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
3147 if (!pte_none(*page_table
))
3152 /* Allocate our own private page. */
3153 if (unlikely(anon_vma_prepare(vma
)))
3155 page
= alloc_zeroed_user_highpage_movable(vma
, address
);
3158 __SetPageUptodate(page
);
3160 if (mem_cgroup_newpage_charge(page
, mm
, GFP_KERNEL
))
3163 entry
= mk_pte(page
, vma
->vm_page_prot
);
3164 if (vma
->vm_flags
& VM_WRITE
)
3165 entry
= pte_mkwrite(pte_mkdirty(entry
));
3167 page_table
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
3168 if (!pte_none(*page_table
))
3171 inc_mm_counter_fast(mm
, MM_ANONPAGES
);
3172 page_add_new_anon_rmap(page
, vma
, address
);
3174 set_pte_at(mm
, address
, page_table
, entry
);
3176 /* No need to invalidate - it was non-present before */
3177 update_mmu_cache(vma
, address
, page_table
);
3179 pte_unmap_unlock(page_table
, ptl
);
3182 mem_cgroup_uncharge_page(page
);
3183 page_cache_release(page
);
3186 page_cache_release(page
);
3188 return VM_FAULT_OOM
;
3192 * __do_fault() tries to create a new page mapping. It aggressively
3193 * tries to share with existing pages, but makes a separate copy if
3194 * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid
3195 * the next page fault.
3197 * As this is called only for pages that do not currently exist, we
3198 * do not need to flush old virtual caches or the TLB.
3200 * We enter with non-exclusive mmap_sem (to exclude vma changes,
3201 * but allow concurrent faults), and pte neither mapped nor locked.
3202 * We return with mmap_sem still held, but pte unmapped and unlocked.
3204 static int __do_fault(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
3205 unsigned long address
, pmd_t
*pmd
,
3206 pgoff_t pgoff
, unsigned int flags
, pte_t orig_pte
)
3211 struct page
*cow_page
;
3214 struct page
*dirty_page
= NULL
;
3215 struct vm_fault vmf
;
3217 int page_mkwrite
= 0;
3220 * If we do COW later, allocate page befor taking lock_page()
3221 * on the file cache page. This will reduce lock holding time.
3223 if ((flags
& FAULT_FLAG_WRITE
) && !(vma
->vm_flags
& VM_SHARED
)) {
3225 if (unlikely(anon_vma_prepare(vma
)))
3226 return VM_FAULT_OOM
;
3228 cow_page
= alloc_page_vma(GFP_HIGHUSER_MOVABLE
, vma
, address
);
3230 return VM_FAULT_OOM
;
3232 if (mem_cgroup_newpage_charge(cow_page
, mm
, GFP_KERNEL
)) {
3233 page_cache_release(cow_page
);
3234 return VM_FAULT_OOM
;
3239 vmf
.virtual_address
= (void __user
*)(address
& PAGE_MASK
);
3244 ret
= vma
->vm_ops
->fault(vma
, &vmf
);
3245 if (unlikely(ret
& (VM_FAULT_ERROR
| VM_FAULT_NOPAGE
|
3249 if (unlikely(PageHWPoison(vmf
.page
))) {
3250 if (ret
& VM_FAULT_LOCKED
)
3251 unlock_page(vmf
.page
);
3252 ret
= VM_FAULT_HWPOISON
;
3257 * For consistency in subsequent calls, make the faulted page always
3260 if (unlikely(!(ret
& VM_FAULT_LOCKED
)))
3261 lock_page(vmf
.page
);
3263 VM_BUG_ON(!PageLocked(vmf
.page
));
3266 * Should we do an early C-O-W break?
3269 if (flags
& FAULT_FLAG_WRITE
) {
3270 if (!(vma
->vm_flags
& VM_SHARED
)) {
3273 copy_user_highpage(page
, vmf
.page
, address
, vma
);
3274 __SetPageUptodate(page
);
3277 * If the page will be shareable, see if the backing
3278 * address space wants to know that the page is about
3279 * to become writable
3281 if (vma
->vm_ops
->page_mkwrite
) {
3285 vmf
.flags
= FAULT_FLAG_WRITE
|FAULT_FLAG_MKWRITE
;
3286 tmp
= vma
->vm_ops
->page_mkwrite(vma
, &vmf
);
3288 (VM_FAULT_ERROR
| VM_FAULT_NOPAGE
))) {
3290 goto unwritable_page
;
3292 if (unlikely(!(tmp
& VM_FAULT_LOCKED
))) {
3294 if (!page
->mapping
) {
3295 ret
= 0; /* retry the fault */
3297 goto unwritable_page
;
3300 VM_BUG_ON(!PageLocked(page
));
3307 page_table
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
3310 * This silly early PAGE_DIRTY setting removes a race
3311 * due to the bad i386 page protection. But it's valid
3312 * for other architectures too.
3314 * Note that if FAULT_FLAG_WRITE is set, we either now have
3315 * an exclusive copy of the page, or this is a shared mapping,
3316 * so we can make it writable and dirty to avoid having to
3317 * handle that later.
3319 /* Only go through if we didn't race with anybody else... */
3320 if (likely(pte_same(*page_table
, orig_pte
))) {
3321 flush_icache_page(vma
, page
);
3322 entry
= mk_pte(page
, vma
->vm_page_prot
);
3323 if (flags
& FAULT_FLAG_WRITE
)
3324 entry
= maybe_mkwrite(pte_mkdirty(entry
), vma
);
3326 inc_mm_counter_fast(mm
, MM_ANONPAGES
);
3327 page_add_new_anon_rmap(page
, vma
, address
);
3329 inc_mm_counter_fast(mm
, MM_FILEPAGES
);
3330 page_add_file_rmap(page
);
3331 if (flags
& FAULT_FLAG_WRITE
) {
3333 get_page(dirty_page
);
3336 set_pte_at(mm
, address
, page_table
, entry
);
3338 /* no need to invalidate: a not-present page won't be cached */
3339 update_mmu_cache(vma
, address
, page_table
);
3342 mem_cgroup_uncharge_page(cow_page
);
3344 page_cache_release(page
);
3346 anon
= 1; /* no anon but release faulted_page */
3349 pte_unmap_unlock(page_table
, ptl
);
3352 struct address_space
*mapping
= page
->mapping
;
3355 if (set_page_dirty(dirty_page
))
3357 unlock_page(dirty_page
);
3358 put_page(dirty_page
);
3359 if ((dirtied
|| page_mkwrite
) && mapping
) {
3361 * Some device drivers do not set page.mapping but still
3364 balance_dirty_pages_ratelimited(mapping
);
3367 /* file_update_time outside page_lock */
3368 if (vma
->vm_file
&& !page_mkwrite
)
3369 file_update_time(vma
->vm_file
);
3371 unlock_page(vmf
.page
);
3373 page_cache_release(vmf
.page
);
3379 page_cache_release(page
);
3382 /* fs's fault handler get error */
3384 mem_cgroup_uncharge_page(cow_page
);
3385 page_cache_release(cow_page
);
3390 static int do_linear_fault(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
3391 unsigned long address
, pte_t
*page_table
, pmd_t
*pmd
,
3392 unsigned int flags
, pte_t orig_pte
)
3394 pgoff_t pgoff
= (((address
& PAGE_MASK
)
3395 - vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
3397 pte_unmap(page_table
);
3398 return __do_fault(mm
, vma
, address
, pmd
, pgoff
, flags
, orig_pte
);
3402 * Fault of a previously existing named mapping. Repopulate the pte
3403 * from the encoded file_pte if possible. This enables swappable
3406 * We enter with non-exclusive mmap_sem (to exclude vma changes,
3407 * but allow concurrent faults), and pte mapped but not yet locked.
3408 * We return with mmap_sem still held, but pte unmapped and unlocked.
3410 static int do_nonlinear_fault(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
3411 unsigned long address
, pte_t
*page_table
, pmd_t
*pmd
,
3412 unsigned int flags
, pte_t orig_pte
)
3416 flags
|= FAULT_FLAG_NONLINEAR
;
3418 if (!pte_unmap_same(mm
, pmd
, page_table
, orig_pte
))
3421 if (unlikely(!(vma
->vm_flags
& VM_NONLINEAR
))) {
3423 * Page table corrupted: show pte and kill process.
3425 print_bad_pte(vma
, address
, orig_pte
, NULL
);
3426 return VM_FAULT_SIGBUS
;
3429 pgoff
= pte_to_pgoff(orig_pte
);
3430 return __do_fault(mm
, vma
, address
, pmd
, pgoff
, flags
, orig_pte
);
3433 int numa_migrate_prep(struct page
*page
, struct vm_area_struct
*vma
,
3434 unsigned long addr
, int current_nid
)
3438 count_vm_numa_event(NUMA_HINT_FAULTS
);
3439 if (current_nid
== numa_node_id())
3440 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL
);
3442 return mpol_misplaced(page
, vma
, addr
);
3445 int do_numa_page(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
3446 unsigned long addr
, pte_t pte
, pte_t
*ptep
, pmd_t
*pmd
)
3448 struct page
*page
= NULL
;
3450 int current_nid
= -1;
3452 bool migrated
= false;
3455 * The "pte" at this point cannot be used safely without
3456 * validation through pte_unmap_same(). It's of NUMA type but
3457 * the pfn may be screwed if the read is non atomic.
3459 * ptep_modify_prot_start is not called as this is clearing
3460 * the _PAGE_NUMA bit and it is not really expected that there
3461 * would be concurrent hardware modifications to the PTE.
3463 ptl
= pte_lockptr(mm
, pmd
);
3465 if (unlikely(!pte_same(*ptep
, pte
))) {
3466 pte_unmap_unlock(ptep
, ptl
);
3470 pte
= pte_mknonnuma(pte
);
3471 set_pte_at(mm
, addr
, ptep
, pte
);
3472 update_mmu_cache(vma
, addr
, ptep
);
3474 page
= vm_normal_page(vma
, addr
, pte
);
3476 pte_unmap_unlock(ptep
, ptl
);
3480 current_nid
= page_to_nid(page
);
3481 target_nid
= numa_migrate_prep(page
, vma
, addr
, current_nid
);
3482 pte_unmap_unlock(ptep
, ptl
);
3483 if (target_nid
== -1) {
3485 * Account for the fault against the current node if it not
3486 * being replaced regardless of where the page is located.
3488 current_nid
= numa_node_id();
3493 /* Migrate to the requested node */
3494 migrated
= migrate_misplaced_page(page
, target_nid
);
3496 current_nid
= target_nid
;
3499 if (current_nid
!= -1)
3500 task_numa_fault(current_nid
, 1, migrated
);
3504 /* NUMA hinting page fault entry point for regular pmds */
3505 #ifdef CONFIG_NUMA_BALANCING
3506 static int do_pmd_numa_page(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
3507 unsigned long addr
, pmd_t
*pmdp
)
3510 pte_t
*pte
, *orig_pte
;
3511 unsigned long _addr
= addr
& PMD_MASK
;
3512 unsigned long offset
;
3515 int local_nid
= numa_node_id();
3517 spin_lock(&mm
->page_table_lock
);
3519 if (pmd_numa(pmd
)) {
3520 set_pmd_at(mm
, _addr
, pmdp
, pmd_mknonnuma(pmd
));
3523 spin_unlock(&mm
->page_table_lock
);
3528 /* we're in a page fault so some vma must be in the range */
3530 BUG_ON(vma
->vm_start
>= _addr
+ PMD_SIZE
);
3531 offset
= max(_addr
, vma
->vm_start
) & ~PMD_MASK
;
3532 VM_BUG_ON(offset
>= PMD_SIZE
);
3533 orig_pte
= pte
= pte_offset_map_lock(mm
, pmdp
, _addr
, &ptl
);
3534 pte
+= offset
>> PAGE_SHIFT
;
3535 for (addr
= _addr
+ offset
; addr
< _addr
+ PMD_SIZE
; pte
++, addr
+= PAGE_SIZE
) {
3536 pte_t pteval
= *pte
;
3538 int curr_nid
= local_nid
;
3541 if (!pte_present(pteval
))
3543 if (!pte_numa(pteval
))
3545 if (addr
>= vma
->vm_end
) {
3546 vma
= find_vma(mm
, addr
);
3547 /* there's a pte present so there must be a vma */
3549 BUG_ON(addr
< vma
->vm_start
);
3551 if (pte_numa(pteval
)) {
3552 pteval
= pte_mknonnuma(pteval
);
3553 set_pte_at(mm
, addr
, pte
, pteval
);
3555 page
= vm_normal_page(vma
, addr
, pteval
);
3556 if (unlikely(!page
))
3558 /* only check non-shared pages */
3559 if (unlikely(page_mapcount(page
) != 1))
3563 * Note that the NUMA fault is later accounted to either
3564 * the node that is currently running or where the page is
3567 curr_nid
= local_nid
;
3568 target_nid
= numa_migrate_prep(page
, vma
, addr
,
3570 if (target_nid
== -1) {
3575 /* Migrate to the requested node */
3576 pte_unmap_unlock(pte
, ptl
);
3577 migrated
= migrate_misplaced_page(page
, target_nid
);
3579 curr_nid
= target_nid
;
3580 task_numa_fault(curr_nid
, 1, migrated
);
3582 pte
= pte_offset_map_lock(mm
, pmdp
, addr
, &ptl
);
3584 pte_unmap_unlock(orig_pte
, ptl
);
3589 static int do_pmd_numa_page(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
3590 unsigned long addr
, pmd_t
*pmdp
)
3595 #endif /* CONFIG_NUMA_BALANCING */
3598 * These routines also need to handle stuff like marking pages dirty
3599 * and/or accessed for architectures that don't do it in hardware (most
3600 * RISC architectures). The early dirtying is also good on the i386.
3602 * There is also a hook called "update_mmu_cache()" that architectures
3603 * with external mmu caches can use to update those (ie the Sparc or
3604 * PowerPC hashed page tables that act as extended TLBs).
3606 * We enter with non-exclusive mmap_sem (to exclude vma changes,
3607 * but allow concurrent faults), and pte mapped but not yet locked.
3608 * We return with mmap_sem still held, but pte unmapped and unlocked.
3610 int handle_pte_fault(struct mm_struct
*mm
,
3611 struct vm_area_struct
*vma
, unsigned long address
,
3612 pte_t
*pte
, pmd_t
*pmd
, unsigned int flags
)
3618 if (!pte_present(entry
)) {
3619 if (pte_none(entry
)) {
3621 if (likely(vma
->vm_ops
->fault
))
3622 return do_linear_fault(mm
, vma
, address
,
3623 pte
, pmd
, flags
, entry
);
3625 return do_anonymous_page(mm
, vma
, address
,
3628 if (pte_file(entry
))
3629 return do_nonlinear_fault(mm
, vma
, address
,
3630 pte
, pmd
, flags
, entry
);
3631 return do_swap_page(mm
, vma
, address
,
3632 pte
, pmd
, flags
, entry
);
3635 if (pte_numa(entry
))
3636 return do_numa_page(mm
, vma
, address
, entry
, pte
, pmd
);
3638 ptl
= pte_lockptr(mm
, pmd
);
3640 if (unlikely(!pte_same(*pte
, entry
)))
3642 if (flags
& FAULT_FLAG_WRITE
) {
3643 if (!pte_write(entry
))
3644 return do_wp_page(mm
, vma
, address
,
3645 pte
, pmd
, ptl
, entry
);
3646 entry
= pte_mkdirty(entry
);
3648 entry
= pte_mkyoung(entry
);
3649 if (ptep_set_access_flags(vma
, address
, pte
, entry
, flags
& FAULT_FLAG_WRITE
)) {
3650 update_mmu_cache(vma
, address
, pte
);
3653 * This is needed only for protection faults but the arch code
3654 * is not yet telling us if this is a protection fault or not.
3655 * This still avoids useless tlb flushes for .text page faults
3658 if (flags
& FAULT_FLAG_WRITE
)
3659 flush_tlb_fix_spurious_fault(vma
, address
);
3662 pte_unmap_unlock(pte
, ptl
);
3667 * By the time we get here, we already hold the mm semaphore
3669 int handle_mm_fault(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
3670 unsigned long address
, unsigned int flags
)
3677 __set_current_state(TASK_RUNNING
);
3679 count_vm_event(PGFAULT
);
3680 mem_cgroup_count_vm_event(mm
, PGFAULT
);
3682 /* do counter updates before entering really critical section. */
3683 check_sync_rss_stat(current
);
3685 if (unlikely(is_vm_hugetlb_page(vma
)))
3686 return hugetlb_fault(mm
, vma
, address
, flags
);
3689 pgd
= pgd_offset(mm
, address
);
3690 pud
= pud_alloc(mm
, pgd
, address
);
3692 return VM_FAULT_OOM
;
3693 pmd
= pmd_alloc(mm
, pud
, address
);
3695 return VM_FAULT_OOM
;
3696 if (pmd_none(*pmd
) && transparent_hugepage_enabled(vma
)) {
3698 return do_huge_pmd_anonymous_page(mm
, vma
, address
,
3701 pmd_t orig_pmd
= *pmd
;
3705 if (pmd_trans_huge(orig_pmd
)) {
3706 unsigned int dirty
= flags
& FAULT_FLAG_WRITE
;
3708 if (pmd_numa(orig_pmd
))
3709 return do_huge_pmd_numa_page(mm
, vma
, address
,
3712 if (dirty
&& !pmd_write(orig_pmd
)) {
3713 ret
= do_huge_pmd_wp_page(mm
, vma
, address
, pmd
,
3716 * If COW results in an oom, the huge pmd will
3717 * have been split, so retry the fault on the
3718 * pte for a smaller charge.
3720 if (unlikely(ret
& VM_FAULT_OOM
))
3724 huge_pmd_set_accessed(mm
, vma
, address
, pmd
,
3733 return do_pmd_numa_page(mm
, vma
, address
, pmd
);
3736 * Use __pte_alloc instead of pte_alloc_map, because we can't
3737 * run pte_offset_map on the pmd, if an huge pmd could
3738 * materialize from under us from a different thread.
3740 if (unlikely(pmd_none(*pmd
)) &&
3741 unlikely(__pte_alloc(mm
, vma
, pmd
, address
)))
3742 return VM_FAULT_OOM
;
3743 /* if an huge pmd materialized from under us just retry later */
3744 if (unlikely(pmd_trans_huge(*pmd
)))
3747 * A regular pmd is established and it can't morph into a huge pmd
3748 * from under us anymore at this point because we hold the mmap_sem
3749 * read mode and khugepaged takes it in write mode. So now it's
3750 * safe to run pte_offset_map().
3752 pte
= pte_offset_map(pmd
, address
);
3754 return handle_pte_fault(mm
, vma
, address
, pte
, pmd
, flags
);
3757 #ifndef __PAGETABLE_PUD_FOLDED
3759 * Allocate page upper directory.
3760 * We've already handled the fast-path in-line.
3762 int __pud_alloc(struct mm_struct
*mm
, pgd_t
*pgd
, unsigned long address
)
3764 pud_t
*new = pud_alloc_one(mm
, address
);
3768 smp_wmb(); /* See comment in __pte_alloc */
3770 spin_lock(&mm
->page_table_lock
);
3771 if (pgd_present(*pgd
)) /* Another has populated it */
3774 pgd_populate(mm
, pgd
, new);
3775 spin_unlock(&mm
->page_table_lock
);
3778 #endif /* __PAGETABLE_PUD_FOLDED */
3780 #ifndef __PAGETABLE_PMD_FOLDED
3782 * Allocate page middle directory.
3783 * We've already handled the fast-path in-line.
3785 int __pmd_alloc(struct mm_struct
*mm
, pud_t
*pud
, unsigned long address
)
3787 pmd_t
*new = pmd_alloc_one(mm
, address
);
3791 smp_wmb(); /* See comment in __pte_alloc */
3793 spin_lock(&mm
->page_table_lock
);
3794 #ifndef __ARCH_HAS_4LEVEL_HACK
3795 if (pud_present(*pud
)) /* Another has populated it */
3798 pud_populate(mm
, pud
, new);
3800 if (pgd_present(*pud
)) /* Another has populated it */
3803 pgd_populate(mm
, pud
, new);
3804 #endif /* __ARCH_HAS_4LEVEL_HACK */
3805 spin_unlock(&mm
->page_table_lock
);
3808 #endif /* __PAGETABLE_PMD_FOLDED */
3810 int make_pages_present(unsigned long addr
, unsigned long end
)
3812 int ret
, len
, write
;
3813 struct vm_area_struct
* vma
;
3815 vma
= find_vma(current
->mm
, addr
);
3819 * We want to touch writable mappings with a write fault in order
3820 * to break COW, except for shared mappings because these don't COW
3821 * and we would not want to dirty them for nothing.
3823 write
= (vma
->vm_flags
& (VM_WRITE
| VM_SHARED
)) == VM_WRITE
;
3824 BUG_ON(addr
>= end
);
3825 BUG_ON(end
> vma
->vm_end
);
3826 len
= DIV_ROUND_UP(end
, PAGE_SIZE
) - addr
/PAGE_SIZE
;
3827 ret
= get_user_pages(current
, current
->mm
, addr
,
3828 len
, write
, 0, NULL
, NULL
);
3831 return ret
== len
? 0 : -EFAULT
;
3834 #if !defined(__HAVE_ARCH_GATE_AREA)
3836 #if defined(AT_SYSINFO_EHDR)
3837 static struct vm_area_struct gate_vma
;
3839 static int __init
gate_vma_init(void)
3841 gate_vma
.vm_mm
= NULL
;
3842 gate_vma
.vm_start
= FIXADDR_USER_START
;
3843 gate_vma
.vm_end
= FIXADDR_USER_END
;
3844 gate_vma
.vm_flags
= VM_READ
| VM_MAYREAD
| VM_EXEC
| VM_MAYEXEC
;
3845 gate_vma
.vm_page_prot
= __P101
;
3849 __initcall(gate_vma_init
);
3852 struct vm_area_struct
*get_gate_vma(struct mm_struct
*mm
)
3854 #ifdef AT_SYSINFO_EHDR
3861 int in_gate_area_no_mm(unsigned long addr
)
3863 #ifdef AT_SYSINFO_EHDR
3864 if ((addr
>= FIXADDR_USER_START
) && (addr
< FIXADDR_USER_END
))
3870 #endif /* __HAVE_ARCH_GATE_AREA */
3872 static int __follow_pte(struct mm_struct
*mm
, unsigned long address
,
3873 pte_t
**ptepp
, spinlock_t
**ptlp
)
3880 pgd
= pgd_offset(mm
, address
);
3881 if (pgd_none(*pgd
) || unlikely(pgd_bad(*pgd
)))
3884 pud
= pud_offset(pgd
, address
);
3885 if (pud_none(*pud
) || unlikely(pud_bad(*pud
)))
3888 pmd
= pmd_offset(pud
, address
);
3889 VM_BUG_ON(pmd_trans_huge(*pmd
));
3890 if (pmd_none(*pmd
) || unlikely(pmd_bad(*pmd
)))
3893 /* We cannot handle huge page PFN maps. Luckily they don't exist. */
3897 ptep
= pte_offset_map_lock(mm
, pmd
, address
, ptlp
);
3900 if (!pte_present(*ptep
))
3905 pte_unmap_unlock(ptep
, *ptlp
);
3910 static inline int follow_pte(struct mm_struct
*mm
, unsigned long address
,
3911 pte_t
**ptepp
, spinlock_t
**ptlp
)
3915 /* (void) is needed to make gcc happy */
3916 (void) __cond_lock(*ptlp
,
3917 !(res
= __follow_pte(mm
, address
, ptepp
, ptlp
)));
3922 * follow_pfn - look up PFN at a user virtual address
3923 * @vma: memory mapping
3924 * @address: user virtual address
3925 * @pfn: location to store found PFN
3927 * Only IO mappings and raw PFN mappings are allowed.
3929 * Returns zero and the pfn at @pfn on success, -ve otherwise.
3931 int follow_pfn(struct vm_area_struct
*vma
, unsigned long address
,
3938 if (!(vma
->vm_flags
& (VM_IO
| VM_PFNMAP
)))
3941 ret
= follow_pte(vma
->vm_mm
, address
, &ptep
, &ptl
);
3944 *pfn
= pte_pfn(*ptep
);
3945 pte_unmap_unlock(ptep
, ptl
);
3948 EXPORT_SYMBOL(follow_pfn
);
3950 #ifdef CONFIG_HAVE_IOREMAP_PROT
3951 int follow_phys(struct vm_area_struct
*vma
,
3952 unsigned long address
, unsigned int flags
,
3953 unsigned long *prot
, resource_size_t
*phys
)
3959 if (!(vma
->vm_flags
& (VM_IO
| VM_PFNMAP
)))
3962 if (follow_pte(vma
->vm_mm
, address
, &ptep
, &ptl
))
3966 if ((flags
& FOLL_WRITE
) && !pte_write(pte
))
3969 *prot
= pgprot_val(pte_pgprot(pte
));
3970 *phys
= (resource_size_t
)pte_pfn(pte
) << PAGE_SHIFT
;
3974 pte_unmap_unlock(ptep
, ptl
);
3979 int generic_access_phys(struct vm_area_struct
*vma
, unsigned long addr
,
3980 void *buf
, int len
, int write
)
3982 resource_size_t phys_addr
;
3983 unsigned long prot
= 0;
3984 void __iomem
*maddr
;
3985 int offset
= addr
& (PAGE_SIZE
-1);
3987 if (follow_phys(vma
, addr
, write
, &prot
, &phys_addr
))
3990 maddr
= ioremap_prot(phys_addr
, PAGE_SIZE
, prot
);
3992 memcpy_toio(maddr
+ offset
, buf
, len
);
3994 memcpy_fromio(buf
, maddr
+ offset
, len
);
4002 * Access another process' address space as given in mm. If non-NULL, use the
4003 * given task for page fault accounting.
4005 static int __access_remote_vm(struct task_struct
*tsk
, struct mm_struct
*mm
,
4006 unsigned long addr
, void *buf
, int len
, int write
)
4008 struct vm_area_struct
*vma
;
4009 void *old_buf
= buf
;
4011 down_read(&mm
->mmap_sem
);
4012 /* ignore errors, just check how much was successfully transferred */
4014 int bytes
, ret
, offset
;
4016 struct page
*page
= NULL
;
4018 ret
= get_user_pages(tsk
, mm
, addr
, 1,
4019 write
, 1, &page
, &vma
);
4022 * Check if this is a VM_IO | VM_PFNMAP VMA, which
4023 * we can access using slightly different code.
4025 #ifdef CONFIG_HAVE_IOREMAP_PROT
4026 vma
= find_vma(mm
, addr
);
4027 if (!vma
|| vma
->vm_start
> addr
)
4029 if (vma
->vm_ops
&& vma
->vm_ops
->access
)
4030 ret
= vma
->vm_ops
->access(vma
, addr
, buf
,
4038 offset
= addr
& (PAGE_SIZE
-1);
4039 if (bytes
> PAGE_SIZE
-offset
)
4040 bytes
= PAGE_SIZE
-offset
;
4044 copy_to_user_page(vma
, page
, addr
,
4045 maddr
+ offset
, buf
, bytes
);
4046 set_page_dirty_lock(page
);
4048 copy_from_user_page(vma
, page
, addr
,
4049 buf
, maddr
+ offset
, bytes
);
4052 page_cache_release(page
);
4058 up_read(&mm
->mmap_sem
);
4060 return buf
- old_buf
;
4064 * access_remote_vm - access another process' address space
4065 * @mm: the mm_struct of the target address space
4066 * @addr: start address to access
4067 * @buf: source or destination buffer
4068 * @len: number of bytes to transfer
4069 * @write: whether the access is a write
4071 * The caller must hold a reference on @mm.
4073 int access_remote_vm(struct mm_struct
*mm
, unsigned long addr
,
4074 void *buf
, int len
, int write
)
4076 return __access_remote_vm(NULL
, mm
, addr
, buf
, len
, write
);
4080 * Access another process' address space.
4081 * Source/target buffer must be kernel space,
4082 * Do not walk the page table directly, use get_user_pages
4084 int access_process_vm(struct task_struct
*tsk
, unsigned long addr
,
4085 void *buf
, int len
, int write
)
4087 struct mm_struct
*mm
;
4090 mm
= get_task_mm(tsk
);
4094 ret
= __access_remote_vm(tsk
, mm
, addr
, buf
, len
, write
);
4101 * Print the name of a VMA.
4103 void print_vma_addr(char *prefix
, unsigned long ip
)
4105 struct mm_struct
*mm
= current
->mm
;
4106 struct vm_area_struct
*vma
;
4109 * Do not print if we are in atomic
4110 * contexts (in exception stacks, etc.):
4112 if (preempt_count())
4115 down_read(&mm
->mmap_sem
);
4116 vma
= find_vma(mm
, ip
);
4117 if (vma
&& vma
->vm_file
) {
4118 struct file
*f
= vma
->vm_file
;
4119 char *buf
= (char *)__get_free_page(GFP_KERNEL
);
4123 p
= d_path(&f
->f_path
, buf
, PAGE_SIZE
);
4126 s
= strrchr(p
, '/');
4129 printk("%s%s[%lx+%lx]", prefix
, p
,
4131 vma
->vm_end
- vma
->vm_start
);
4132 free_page((unsigned long)buf
);
4135 up_read(&mm
->mmap_sem
);
4138 #ifdef CONFIG_PROVE_LOCKING
4139 void might_fault(void)
4142 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
4143 * holding the mmap_sem, this is safe because kernel memory doesn't
4144 * get paged out, therefore we'll never actually fault, and the
4145 * below annotations will generate false positives.
4147 if (segment_eq(get_fs(), KERNEL_DS
))
4152 * it would be nicer only to annotate paths which are not under
4153 * pagefault_disable, however that requires a larger audit and
4154 * providing helpers like get_user_atomic.
4156 if (!in_atomic() && current
->mm
)
4157 might_lock_read(¤t
->mm
->mmap_sem
);
4159 EXPORT_SYMBOL(might_fault
);
4162 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
4163 static void clear_gigantic_page(struct page
*page
,
4165 unsigned int pages_per_huge_page
)
4168 struct page
*p
= page
;
4171 for (i
= 0; i
< pages_per_huge_page
;
4172 i
++, p
= mem_map_next(p
, page
, i
)) {
4174 clear_user_highpage(p
, addr
+ i
* PAGE_SIZE
);
4177 void clear_huge_page(struct page
*page
,
4178 unsigned long addr
, unsigned int pages_per_huge_page
)
4182 if (unlikely(pages_per_huge_page
> MAX_ORDER_NR_PAGES
)) {
4183 clear_gigantic_page(page
, addr
, pages_per_huge_page
);
4188 for (i
= 0; i
< pages_per_huge_page
; i
++) {
4190 clear_user_highpage(page
+ i
, addr
+ i
* PAGE_SIZE
);
4194 static void copy_user_gigantic_page(struct page
*dst
, struct page
*src
,
4196 struct vm_area_struct
*vma
,
4197 unsigned int pages_per_huge_page
)
4200 struct page
*dst_base
= dst
;
4201 struct page
*src_base
= src
;
4203 for (i
= 0; i
< pages_per_huge_page
; ) {
4205 copy_user_highpage(dst
, src
, addr
+ i
*PAGE_SIZE
, vma
);
4208 dst
= mem_map_next(dst
, dst_base
, i
);
4209 src
= mem_map_next(src
, src_base
, i
);
4213 void copy_user_huge_page(struct page
*dst
, struct page
*src
,
4214 unsigned long addr
, struct vm_area_struct
*vma
,
4215 unsigned int pages_per_huge_page
)
4219 if (unlikely(pages_per_huge_page
> MAX_ORDER_NR_PAGES
)) {
4220 copy_user_gigantic_page(dst
, src
, addr
, vma
,
4221 pages_per_huge_page
);
4226 for (i
= 0; i
< pages_per_huge_page
; i
++) {
4228 copy_user_highpage(dst
+ i
, src
+ i
, addr
+ i
*PAGE_SIZE
, vma
);
4231 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */