4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * demand-loading started 01.12.91 - seems it is high on the list of
9 * things wanted, and it should be easy to implement. - Linus
13 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14 * pages started 02.12.91, seems to work. - Linus.
16 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17 * would have taken more than the 6M I have free, but it worked well as
20 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
24 * Real VM (paging to/from disk) started 18.12.91. Much more work and
25 * thought has to go into this. Oh, well..
26 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
27 * Found it. Everything seems to work now.
28 * 20.12.91 - Ok, making the swap-device changeable like the root.
32 * 05.04.94 - Multi-page memory management added for v1.1.
33 * Idea by Alex Bligh (alex@cconcepts.co.uk)
35 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
36 * (Gerhard.Wichert@pdb.siemens.de)
40 #include <linux/mman.h>
41 #include <linux/swap.h>
42 #include <linux/pagemap.h>
43 #include <linux/smp_lock.h>
44 #include <linux/swapctl.h>
45 #include <linux/iobuf.h>
46 #include <linux/highmem.h>
48 #include <asm/uaccess.h>
49 #include <asm/pgtable.h>
51 unsigned long max_mapnr
= 0;
52 unsigned long num_physpages
= 0;
53 void * high_memory
= NULL
;
54 struct page
*highmem_start_page
;
57 * We special-case the C-O-W ZERO_PAGE, because it's such
58 * a common occurrence (no need to read the page to know
59 * that it's zero - better for the cache and memory subsystem).
61 static inline void copy_cow_page(struct page
* from
, struct page
* to
)
63 if (from
== ZERO_PAGE(to
)) {
67 copy_highpage(to
, from
);
70 mem_map_t
* mem_map
= NULL
;
73 * oom() prints a message (so that the user knows why the process died),
74 * and gives the process an untrappable SIGKILL.
76 void oom(struct task_struct
* task
)
78 printk("\nOut of memory for %s.\n", task
->comm
);
79 force_sig(SIGKILL
, task
);
83 * Note: this doesn't free the actual pages themselves. That
84 * has been handled earlier when unmapping all the memory regions.
86 static inline void free_one_pmd(pmd_t
* dir
)
97 pte
= pte_offset(dir
, 0);
102 static inline void free_one_pgd(pgd_t
* dir
)
114 pmd
= pmd_offset(dir
, 0);
116 for (j
= 0; j
< PTRS_PER_PMD
; j
++)
121 /* Low and high watermarks for page table cache.
122 The system should try to have pgt_water[0] <= cache elements <= pgt_water[1]
124 int pgt_cache_water
[2] = { 25, 50 };
126 /* Returns the number of pages freed */
127 int check_pgt_cache(void)
129 return do_check_pgt_cache(pgt_cache_water
[0], pgt_cache_water
[1]);
134 * This function clears all user-level page tables of a process - this
135 * is needed by execve(), so that old pages aren't in the way.
137 void clear_page_tables(struct mm_struct
*mm
, unsigned long first
, int nr
)
139 pgd_t
* page_dir
= mm
->pgd
;
143 free_one_pgd(page_dir
);
147 /* keep the page table cache within bounds */
151 #define PTE_TABLE_MASK ((PTRS_PER_PTE-1) * sizeof(pte_t))
152 #define PMD_TABLE_MASK ((PTRS_PER_PMD-1) * sizeof(pmd_t))
155 * copy one vm_area from one task to the other. Assumes the page tables
156 * already present in the new task to be cleared in the whole range
157 * covered by this vma.
159 * 08Jan98 Merged into one routine from several inline routines to reduce
160 * variable count and make things faster. -jj
162 int copy_page_range(struct mm_struct
*dst
, struct mm_struct
*src
,
163 struct vm_area_struct
*vma
)
165 pgd_t
* src_pgd
, * dst_pgd
;
166 unsigned long address
= vma
->vm_start
;
167 unsigned long end
= vma
->vm_end
;
168 unsigned long cow
= (vma
->vm_flags
& (VM_SHARED
| VM_MAYWRITE
)) == VM_MAYWRITE
;
170 src_pgd
= pgd_offset(src
, address
)-1;
171 dst_pgd
= pgd_offset(dst
, address
)-1;
174 pmd_t
* src_pmd
, * dst_pmd
;
176 src_pgd
++; dst_pgd
++;
180 if (pgd_none(*src_pgd
))
181 goto skip_copy_pmd_range
;
182 if (pgd_bad(*src_pgd
)) {
185 skip_copy_pmd_range
: address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
186 if (!address
|| (address
>= end
))
190 if (pgd_none(*dst_pgd
)) {
191 if (!pmd_alloc(dst_pgd
, 0))
195 src_pmd
= pmd_offset(src_pgd
, address
);
196 dst_pmd
= pmd_offset(dst_pgd
, address
);
199 pte_t
* src_pte
, * dst_pte
;
203 if (pmd_none(*src_pmd
))
204 goto skip_copy_pte_range
;
205 if (pmd_bad(*src_pmd
)) {
208 skip_copy_pte_range
: address
= (address
+ PMD_SIZE
) & PMD_MASK
;
211 goto cont_copy_pmd_range
;
213 if (pmd_none(*dst_pmd
)) {
214 if (!pte_alloc(dst_pmd
, 0))
218 src_pte
= pte_offset(src_pmd
, address
);
219 dst_pte
= pte_offset(dst_pmd
, address
);
222 pte_t pte
= *src_pte
;
223 unsigned long page_nr
;
228 goto cont_copy_pte_range
;
229 if (!pte_present(pte
)) {
230 swap_duplicate(pte_to_swp_entry(pte
));
231 set_pte(dst_pte
, pte
);
232 goto cont_copy_pte_range
;
234 page_nr
= pte_pagenr(pte
);
235 if (page_nr
>= max_mapnr
||
236 PageReserved(mem_map
+page_nr
)) {
237 set_pte(dst_pte
, pte
);
238 goto cont_copy_pte_range
;
240 /* If it's a COW mapping, write protect it both in the parent and the child */
242 pte
= pte_wrprotect(pte
);
243 set_pte(src_pte
, pte
);
245 /* If it's a shared mapping, mark it clean in the child */
246 if (vma
->vm_flags
& VM_SHARED
)
247 pte
= pte_mkclean(pte
);
248 set_pte(dst_pte
, pte_mkold(pte
));
249 get_page(mem_map
+ page_nr
);
251 cont_copy_pte_range
: address
+= PAGE_SIZE
;
256 } while ((unsigned long)src_pte
& PTE_TABLE_MASK
);
258 cont_copy_pmd_range
: src_pmd
++;
260 } while ((unsigned long)src_pmd
& PMD_TABLE_MASK
);
270 * Return indicates whether a page was freed so caller can adjust rss
272 static inline int free_pte(pte_t page
)
274 if (pte_present(page
)) {
275 unsigned long nr
= pte_pagenr(page
);
276 if (nr
>= max_mapnr
|| PageReserved(mem_map
+nr
))
279 * free_page() used to be able to clear swap cache
280 * entries. We may now have to do it manually.
282 free_page_and_swap_cache(mem_map
+nr
);
285 swap_free(pte_to_swp_entry(page
));
289 static inline void forget_pte(pte_t page
)
291 if (!pte_none(page
)) {
292 printk("forget_pte: old mapping existed!\n");
297 static inline int zap_pte_range(struct mm_struct
*mm
, pmd_t
* pmd
, unsigned long address
, unsigned long size
)
309 pte
= pte_offset(pmd
, address
);
310 address
&= ~PMD_MASK
;
311 if (address
+ size
> PMD_SIZE
)
312 size
= PMD_SIZE
- address
;
325 freed
+= free_pte(page
);
330 static inline int zap_pmd_range(struct mm_struct
*mm
, pgd_t
* dir
, unsigned long address
, unsigned long size
)
343 pmd
= pmd_offset(dir
, address
);
344 address
&= ~PGDIR_MASK
;
345 end
= address
+ size
;
346 if (end
> PGDIR_SIZE
)
350 freed
+= zap_pte_range(mm
, pmd
, address
, end
- address
);
351 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
353 } while (address
< end
);
358 * remove user pages in a given range.
360 void zap_page_range(struct mm_struct
*mm
, unsigned long address
, unsigned long size
)
363 unsigned long end
= address
+ size
;
366 dir
= pgd_offset(mm
, address
);
369 * This is a long-lived spinlock. That's fine.
370 * There's no contention, because the page table
371 * lock only protects against kswapd anyway, and
372 * even if kswapd happened to be looking at this
373 * process we _want_ it to get stuck.
377 spin_lock(&mm
->page_table_lock
);
379 freed
+= zap_pmd_range(mm
, dir
, address
, end
- address
);
380 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
382 } while (address
&& (address
< end
));
383 spin_unlock(&mm
->page_table_lock
);
385 * Update rss for the mm_struct (not necessarily current->mm)
396 * Do a quick page-table lookup for a single page.
398 static struct page
* follow_page(unsigned long address
)
403 pgd
= pgd_offset(current
->mm
, address
);
404 pmd
= pmd_offset(pgd
, address
);
406 pte_t
* pte
= pte_offset(pmd
, address
);
407 if (pte
&& pte_present(*pte
))
408 return pte_page(*pte
);
411 printk(KERN_ERR
"Missing page in follow_page\n");
416 * Given a physical address, is there a useful struct page pointing to it?
419 struct page
* get_page_map(struct page
*page
)
421 if (MAP_NR(page
) >= max_mapnr
)
423 if (page
== ZERO_PAGE(page
))
425 if (PageReserved(page
))
431 * Force in an entire range of pages from the current process's user VA,
432 * and pin and lock the pages for IO.
435 #define dprintk(x...)
436 int map_user_kiobuf(int rw
, struct kiobuf
*iobuf
, unsigned long va
, size_t len
)
438 unsigned long ptr
, end
;
440 struct mm_struct
* mm
;
441 struct vm_area_struct
* vma
= 0;
447 /* Make sure the iobuf is not already mapped somewhere. */
452 dprintk ("map_user_kiobuf: begin\n");
454 ptr
= va
& PAGE_MASK
;
455 end
= (va
+ len
+ PAGE_SIZE
- 1) & PAGE_MASK
;
456 err
= expand_kiobuf(iobuf
, (end
- ptr
) >> PAGE_SHIFT
);
465 iobuf
->offset
= va
& ~PAGE_MASK
;
471 * First of all, try to fault in all of the necessary pages
474 if (!vma
|| ptr
>= vma
->vm_end
) {
475 vma
= find_vma(current
->mm
, ptr
);
479 if (handle_mm_fault(current
, vma
, ptr
, (rw
==READ
)) <= 0)
481 spin_lock(&mm
->page_table_lock
);
482 map
= follow_page(ptr
);
484 dprintk (KERN_ERR
"Missing page in map_user_kiobuf\n");
487 map
= get_page_map(map
);
489 if (TryLockPage(map
)) {
492 atomic_inc(&map
->count
);
494 spin_unlock(&mm
->page_table_lock
);
495 iobuf
->maplist
[i
] = map
;
496 iobuf
->nr_pages
= ++i
;
502 dprintk ("map_user_kiobuf: end OK\n");
508 dprintk ("map_user_kiobuf: end %d\n", err
);
514 * Undo the locking so far, wait on the page we got to, and try again.
516 spin_unlock(&mm
->page_table_lock
);
521 * Did the release also unlock the page we got stuck on?
524 if (!PageLocked(map
)) {
525 /* If so, we may well have the page mapped twice
526 * in the IO address range. Bad news. Of
527 * course, it _might_ * just be a coincidence,
528 * but if it happens more than * once, chances
529 * are we have a double-mapped page. */
530 if (++doublepage
>= 3) {
542 ptr
= va
& PAGE_MASK
;
550 * Unmap all of the pages referenced by a kiobuf. We release the pages,
551 * and unlock them if they were locked.
554 void unmap_kiobuf (struct kiobuf
*iobuf
)
559 for (i
= 0; i
< iobuf
->nr_pages
; i
++) {
560 map
= iobuf
->maplist
[i
];
562 if (map
&& iobuf
->locked
) {
572 static inline void zeromap_pte_range(pte_t
* pte
, unsigned long address
,
573 unsigned long size
, pgprot_t prot
)
577 address
&= ~PMD_MASK
;
578 end
= address
+ size
;
582 pte_t zero_pte
= pte_wrprotect(mk_pte(ZERO_PAGE(address
), prot
));
583 pte_t oldpage
= *pte
;
584 set_pte(pte
, zero_pte
);
586 address
+= PAGE_SIZE
;
588 } while (address
&& (address
< end
));
591 static inline int zeromap_pmd_range(pmd_t
* pmd
, unsigned long address
,
592 unsigned long size
, pgprot_t prot
)
596 address
&= ~PGDIR_MASK
;
597 end
= address
+ size
;
598 if (end
> PGDIR_SIZE
)
601 pte_t
* pte
= pte_alloc(pmd
, address
);
604 zeromap_pte_range(pte
, address
, end
- address
, prot
);
605 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
607 } while (address
&& (address
< end
));
611 int zeromap_page_range(unsigned long address
, unsigned long size
, pgprot_t prot
)
615 unsigned long beg
= address
;
616 unsigned long end
= address
+ size
;
618 dir
= pgd_offset(current
->mm
, address
);
619 flush_cache_range(current
->mm
, beg
, end
);
623 pmd_t
*pmd
= pmd_alloc(dir
, address
);
627 error
= zeromap_pmd_range(pmd
, address
, end
- address
, prot
);
630 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
632 } while (address
&& (address
< end
));
633 flush_tlb_range(current
->mm
, beg
, end
);
638 * maps a range of physical memory into the requested pages. the old
639 * mappings are removed. any references to nonexistent pages results
640 * in null mappings (currently treated as "copy-on-access")
642 static inline void remap_pte_range(pte_t
* pte
, unsigned long address
, unsigned long size
,
643 unsigned long phys_addr
, pgprot_t prot
)
647 address
&= ~PMD_MASK
;
648 end
= address
+ size
;
653 pte_t oldpage
= *pte
;
656 mapnr
= MAP_NR(__va(phys_addr
));
657 if (mapnr
>= max_mapnr
|| PageReserved(mem_map
+mapnr
))
658 set_pte(pte
, mk_pte_phys(phys_addr
, prot
));
660 address
+= PAGE_SIZE
;
661 phys_addr
+= PAGE_SIZE
;
663 } while (address
&& (address
< end
));
666 static inline int remap_pmd_range(pmd_t
* pmd
, unsigned long address
, unsigned long size
,
667 unsigned long phys_addr
, pgprot_t prot
)
671 address
&= ~PGDIR_MASK
;
672 end
= address
+ size
;
673 if (end
> PGDIR_SIZE
)
675 phys_addr
-= address
;
677 pte_t
* pte
= pte_alloc(pmd
, address
);
680 remap_pte_range(pte
, address
, end
- address
, address
+ phys_addr
, prot
);
681 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
683 } while (address
&& (address
< end
));
687 int remap_page_range(unsigned long from
, unsigned long phys_addr
, unsigned long size
, pgprot_t prot
)
691 unsigned long beg
= from
;
692 unsigned long end
= from
+ size
;
695 dir
= pgd_offset(current
->mm
, from
);
696 flush_cache_range(current
->mm
, beg
, end
);
700 pmd_t
*pmd
= pmd_alloc(dir
, from
);
704 error
= remap_pmd_range(pmd
, from
, end
- from
, phys_addr
+ from
, prot
);
707 from
= (from
+ PGDIR_SIZE
) & PGDIR_MASK
;
709 } while (from
&& (from
< end
));
710 flush_tlb_range(current
->mm
, beg
, end
);
715 * This routine is used to map in a page into an address space: needed by
716 * execve() for the initial stack and environment pages.
718 struct page
* put_dirty_page(struct task_struct
* tsk
, struct page
*page
,
719 unsigned long address
)
725 if (page_count(page
) != 1)
726 printk("mem_map disagrees with %p at %08lx\n", page
, address
);
727 pgd
= pgd_offset(tsk
->mm
, address
);
728 pmd
= pmd_alloc(pgd
, address
);
734 pte
= pte_alloc(pmd
, address
);
740 if (!pte_none(*pte
)) {
745 flush_page_to_ram(page
);
746 set_pte(pte
, pte_mkwrite(page_pte_prot(page
, PAGE_COPY
)));
747 /* no need for flush_tlb */
752 * This routine handles present pages, when users try to write
753 * to a shared page. It is done by copying the page to a new address
754 * and decrementing the shared-page counter for the old page.
756 * Goto-purists beware: the only reason for goto's here is that it results
757 * in better assembly code.. The "default" path will see no jumps at all.
759 * Note that this routine assumes that the protection checks have been
760 * done by the caller (the low-level page fault routine in most cases).
761 * Thus we can safely just mark it writable once we've done any necessary
764 * We also mark the page dirty at this point even though the page will
765 * change only once the write actually happens. This avoids a few races,
766 * and potentially makes it more efficient.
768 * We enter with the page table read-lock held, and need to exit without
771 static int do_wp_page(struct task_struct
* tsk
, struct vm_area_struct
* vma
,
772 unsigned long address
, pte_t
*page_table
, pte_t pte
)
774 unsigned long map_nr
;
775 struct page
*old_page
, *new_page
;
777 map_nr
= pte_pagenr(pte
);
778 if (map_nr
>= max_mapnr
)
781 old_page
= mem_map
+ map_nr
;
784 * We can avoid the copy if:
785 * - we're the only user (count == 1)
786 * - the only other user is the swap cache,
787 * and the only swap cache user is itself,
788 * in which case we can remove the page
789 * from the swap cache.
791 switch (page_count(old_page
)) {
793 if (!PageSwapCache(old_page
))
795 if (swap_count(old_page
) != 1)
797 delete_from_swap_cache(old_page
);
800 flush_cache_page(vma
, address
);
801 set_pte(page_table
, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte
))));
802 flush_tlb_page(vma
, address
);
803 spin_unlock(&tsk
->mm
->page_table_lock
);
808 * Ok, we need to copy. Oh, well..
810 spin_unlock(&tsk
->mm
->page_table_lock
);
811 new_page
= get_free_highpage(GFP_HIGHUSER
);
814 spin_lock(&tsk
->mm
->page_table_lock
);
817 * Re-check the pte - we dropped the lock
819 if (pte_val(*page_table
) == pte_val(pte
)) {
820 if (PageReserved(old_page
))
822 copy_cow_page(old_page
, new_page
);
823 flush_page_to_ram(new_page
);
824 flush_cache_page(vma
, address
);
825 set_pte(page_table
, pte_mkwrite(pte_mkdirty(mk_pte(new_page
, vma
->vm_page_prot
))));
826 flush_tlb_page(vma
, address
);
828 /* Free the old page.. */
831 spin_unlock(&tsk
->mm
->page_table_lock
);
832 __free_page(new_page
);
836 spin_unlock(&tsk
->mm
->page_table_lock
);
837 printk("do_wp_page: bogus page at address %08lx (nr %ld)\n",address
,map_nr
);
842 * This function zeroes out partial mmap'ed pages at truncation time..
844 static void partial_clear(struct vm_area_struct
*vma
, unsigned long address
)
850 pte_t
*page_table
, pte
;
852 page_dir
= pgd_offset(vma
->vm_mm
, address
);
853 if (pgd_none(*page_dir
))
855 if (pgd_bad(*page_dir
)) {
856 pgd_ERROR(*page_dir
);
860 page_middle
= pmd_offset(page_dir
, address
);
861 if (pmd_none(*page_middle
))
863 if (pmd_bad(*page_middle
)) {
864 pmd_ERROR(*page_middle
);
865 pmd_clear(page_middle
);
868 page_table
= pte_offset(page_middle
, address
);
870 if (!pte_present(pte
))
872 flush_cache_page(vma
, address
);
873 page
= pte_page(pte
);
874 if (page
-mem_map
>= max_mapnr
)
876 offset
= address
& ~PAGE_MASK
;
877 memclear_highpage_flush(page
, offset
, PAGE_SIZE
- offset
);
881 * Handle all mappings that got truncated by a "truncate()"
884 * NOTE! We have to be ready to update the memory sharing
885 * between the file and the memory map for a potential last
886 * incomplete page. Ugly, but necessary.
888 void vmtruncate(struct inode
* inode
, unsigned long offset
)
890 unsigned long partial
, pgoff
;
891 struct vm_area_struct
* mpnt
;
893 truncate_inode_pages(inode
, offset
);
894 spin_lock(&inode
->i_shared_lock
);
898 partial
= offset
& (PAGE_CACHE_SIZE
- 1);
899 pgoff
= offset
>> PAGE_CACHE_SHIFT
;
903 mpnt
= inode
->i_mmap
;
905 struct mm_struct
*mm
= mpnt
->vm_mm
;
906 unsigned long start
= mpnt
->vm_start
;
907 unsigned long end
= mpnt
->vm_end
;
908 unsigned long len
= end
- start
;
911 /* mapping wholly truncated? */
912 if (mpnt
->vm_pgoff
>= pgoff
) {
913 flush_cache_range(mm
, start
, end
);
914 zap_page_range(mm
, start
, len
);
915 flush_tlb_range(mm
, start
, end
);
919 /* mapping wholly unaffected? */
920 len
= len
>> PAGE_SHIFT
;
921 diff
= pgoff
- mpnt
->vm_pgoff
;
925 /* Ok, partially affected.. */
926 start
+= diff
<< PAGE_SHIFT
;
927 len
= (len
- diff
) << PAGE_SHIFT
;
928 if (start
& ~PAGE_MASK
) {
929 partial_clear(mpnt
, start
);
930 start
= (start
+ ~PAGE_MASK
) & PAGE_MASK
;
932 flush_cache_range(mm
, start
, end
);
933 zap_page_range(mm
, start
, len
);
934 flush_tlb_range(mm
, start
, end
);
935 } while ((mpnt
= mpnt
->vm_next_share
) != NULL
);
937 spin_unlock(&inode
->i_shared_lock
);
943 * Primitive swap readahead code. We simply read an aligned block of
944 * (1 << page_cluster) entries in the swap area. This method is chosen
945 * because it doesn't cost us any seek time. We also make sure to queue
946 * the 'original' request together with the readahead ones...
948 void swapin_readahead(swp_entry_t entry
)
951 struct page
*new_page
;
952 unsigned long offset
= SWP_OFFSET(entry
);
953 struct swap_info_struct
*swapdev
= SWP_TYPE(entry
) + swap_info
;
955 offset
= (offset
>> page_cluster
) << page_cluster
;
957 i
= 1 << page_cluster
;
959 /* Don't read-ahead past the end of the swap area */
960 if (offset
>= swapdev
->max
)
962 /* Don't block on I/O for read-ahead */
963 if (atomic_read(&nr_async_pages
) >= pager_daemon
.swap_cluster
)
965 /* Don't read in bad or busy pages */
966 if (!swapdev
->swap_map
[offset
])
968 if (swapdev
->swap_map
[offset
] == SWAP_MAP_BAD
)
971 /* Ok, do the async read-ahead now */
972 new_page
= read_swap_cache_async(SWP_ENTRY(SWP_TYPE(entry
), offset
), 0);
973 if (new_page
!= NULL
)
974 __free_page(new_page
);
980 static int do_swap_page(struct task_struct
* tsk
,
981 struct vm_area_struct
* vma
, unsigned long address
,
982 pte_t
* page_table
, swp_entry_t entry
, int write_access
)
984 struct page
*page
= lookup_swap_cache(entry
);
989 swapin_readahead(entry
);
990 page
= read_swap_cache(entry
);
995 flush_page_to_ram(page
);
1004 pte
= mk_pte(page
, vma
->vm_page_prot
);
1006 set_bit(PG_swap_entry
, &page
->flags
);
1007 if (write_access
&& !is_page_shared(page
)) {
1008 delete_from_swap_cache(page
);
1009 page
= replace_with_highmem(page
);
1010 pte
= mk_pte(page
, vma
->vm_page_prot
);
1011 pte
= pte_mkwrite(pte_mkdirty(pte
));
1013 set_pte(page_table
, pte
);
1014 /* No need to invalidate - it was non-present before */
1015 update_mmu_cache(vma
, address
, pte
);
1020 * This only needs the MM semaphore
1022 static int do_anonymous_page(struct task_struct
* tsk
, struct vm_area_struct
* vma
, pte_t
*page_table
, int write_access
, unsigned long addr
)
1025 struct page
*page
= NULL
;
1026 pte_t entry
= pte_wrprotect(mk_pte(ZERO_PAGE(addr
), vma
->vm_page_prot
));
1028 page
= get_free_highpage(GFP_HIGHUSER
);
1031 if (PageHighMem(page
))
1033 clear_highpage(page
);
1034 entry
= pte_mkwrite(pte_mkdirty(mk_pte(page
, vma
->vm_page_prot
)));
1037 flush_page_to_ram(page
);
1039 set_pte(page_table
, entry
);
1040 /* No need to invalidate - it was non-present before */
1041 update_mmu_cache(vma
, addr
, entry
);
1046 * do_no_page() tries to create a new page mapping. It aggressively
1047 * tries to share with existing pages, but makes a separate copy if
1048 * the "write_access" parameter is true in order to avoid the next
1051 * As this is called only for pages that do not currently exist, we
1052 * do not need to flush old virtual caches or the TLB.
1054 * This is called with the MM semaphore and the kernel lock held.
1055 * We need to release the kernel lock as soon as possible..
1057 static int do_no_page(struct task_struct
* tsk
, struct vm_area_struct
* vma
,
1058 unsigned long address
, int write_access
, pte_t
*page_table
)
1060 struct page
* new_page
;
1063 if (!vma
->vm_ops
|| !vma
->vm_ops
->nopage
)
1064 return do_anonymous_page(tsk
, vma
, page_table
, write_access
, address
);
1067 * The third argument is "no_share", which tells the low-level code
1068 * to copy, not share the page even if sharing is possible. It's
1069 * essentially an early COW detection.
1071 new_page
= vma
->vm_ops
->nopage(vma
, address
& PAGE_MASK
, (vma
->vm_flags
& VM_SHARED
)?0:write_access
);
1073 return 0; /* SIGBUS - but we _really_ should know whether it is OOM or SIGBUS */
1074 if (new_page
== (struct page
*)-1)
1075 return -1; /* OOM */
1079 * This silly early PAGE_DIRTY setting removes a race
1080 * due to the bad i386 page protection. But it's valid
1081 * for other architectures too.
1083 * Note that if write_access is true, we either now have
1084 * an exclusive copy of the page, or this is a shared mapping,
1085 * so we can make it writable and dirty to avoid having to
1086 * handle that later.
1088 flush_page_to_ram(new_page
);
1089 entry
= mk_pte(new_page
, vma
->vm_page_prot
);
1091 entry
= pte_mkwrite(pte_mkdirty(entry
));
1092 } else if (page_count(new_page
) > 1 &&
1093 !(vma
->vm_flags
& VM_SHARED
))
1094 entry
= pte_wrprotect(entry
);
1095 set_pte(page_table
, entry
);
1096 /* no need to invalidate: a not-present page shouldn't be cached */
1097 update_mmu_cache(vma
, address
, entry
);
1102 * These routines also need to handle stuff like marking pages dirty
1103 * and/or accessed for architectures that don't do it in hardware (most
1104 * RISC architectures). The early dirtying is also good on the i386.
1106 * There is also a hook called "update_mmu_cache()" that architectures
1107 * with external mmu caches can use to update those (ie the Sparc or
1108 * PowerPC hashed page tables that act as extended TLBs).
1110 * Note the "page_table_lock". It is to protect against kswapd removing
1111 * pages from under us. Note that kswapd only ever _removes_ pages, never
1112 * adds them. As such, once we have noticed that the page is not present,
1113 * we can drop the lock early.
1115 * The adding of pages is protected by the MM semaphore (which we hold),
1116 * so we don't need to worry about a page being suddenly been added into
1119 static inline int handle_pte_fault(struct task_struct
*tsk
,
1120 struct vm_area_struct
* vma
, unsigned long address
,
1121 int write_access
, pte_t
* pte
)
1126 if (!pte_present(entry
)) {
1127 if (pte_none(entry
))
1128 return do_no_page(tsk
, vma
, address
, write_access
, pte
);
1129 return do_swap_page(tsk
, vma
, address
, pte
, pte_to_swp_entry(entry
), write_access
);
1133 * Ok, the entry was present, we need to get the page table
1134 * lock to synchronize with kswapd, and verify that the entry
1135 * didn't change from under us..
1137 spin_lock(&tsk
->mm
->page_table_lock
);
1138 if (pte_val(entry
) == pte_val(*pte
)) {
1140 if (!pte_write(entry
))
1141 return do_wp_page(tsk
, vma
, address
, pte
, entry
);
1143 entry
= pte_mkdirty(entry
);
1145 entry
= pte_mkyoung(entry
);
1146 set_pte(pte
, entry
);
1147 flush_tlb_page(vma
, address
);
1148 update_mmu_cache(vma
, address
, entry
);
1150 spin_unlock(&tsk
->mm
->page_table_lock
);
1155 * By the time we get here, we already hold the mm semaphore
1157 int handle_mm_fault(struct task_struct
*tsk
, struct vm_area_struct
* vma
,
1158 unsigned long address
, int write_access
)
1164 pgd
= pgd_offset(vma
->vm_mm
, address
);
1165 pmd
= pmd_alloc(pgd
, address
);
1168 pte_t
* pte
= pte_alloc(pmd
, address
);
1170 ret
= handle_pte_fault(tsk
, vma
, address
, write_access
, pte
);
1176 * Simplistic page force-in..
1178 int make_pages_present(unsigned long addr
, unsigned long end
)
1181 struct task_struct
*tsk
= current
;
1182 struct vm_area_struct
* vma
;
1184 vma
= find_vma(tsk
->mm
, addr
);
1185 write
= (vma
->vm_flags
& VM_WRITE
) != 0;
1189 if (handle_mm_fault(tsk
, vma
, addr
, write
) < 0)
1192 } while (addr
< end
);