4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * demand-loading started 01.12.91 - seems it is high on the list of
9 * things wanted, and it should be easy to implement. - Linus
13 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14 * pages started 02.12.91, seems to work. - Linus.
16 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17 * would have taken more than the 6M I have free, but it worked well as
20 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
24 * Real VM (paging to/from disk) started 18.12.91. Much more work and
25 * thought has to go into this. Oh, well..
26 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
27 * Found it. Everything seems to work now.
28 * 20.12.91 - Ok, making the swap-device changeable like the root.
32 * 05.04.94 - Multi-page memory management added for v1.1.
33 * Idea by Alex Bligh (alex@cconcepts.co.uk)
35 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
36 * (Gerhard.Wichert@pdb.siemens.de)
40 #include <linux/mman.h>
41 #include <linux/swap.h>
42 #include <linux/smp_lock.h>
43 #include <linux/swapctl.h>
44 #include <linux/iobuf.h>
45 #include <asm/uaccess.h>
46 #include <asm/pgalloc.h>
47 #include <linux/highmem.h>
48 #include <linux/pagemap.h>
51 unsigned long max_mapnr
;
52 unsigned long num_physpages
;
54 struct page
*highmem_start_page
;
57 * We special-case the C-O-W ZERO_PAGE, because it's such
58 * a common occurrence (no need to read the page to know
59 * that it's zero - better for the cache and memory subsystem).
61 static inline void copy_cow_page(struct page
* from
, struct page
* to
, unsigned long address
)
63 if (from
== ZERO_PAGE(address
)) {
64 clear_user_highpage(to
, address
);
67 copy_user_highpage(to
, from
, address
);
70 mem_map_t
* mem_map
= NULL
;
73 * Note: this doesn't free the actual pages themselves. That
74 * has been handled earlier when unmapping all the memory regions.
76 static inline void free_one_pmd(pmd_t
* dir
)
87 pte
= pte_offset(dir
, 0);
92 static inline void free_one_pgd(pgd_t
* dir
)
104 pmd
= pmd_offset(dir
, 0);
106 for (j
= 0; j
< PTRS_PER_PMD
; j
++)
111 /* Low and high watermarks for page table cache.
112 The system should try to have pgt_water[0] <= cache elements <= pgt_water[1]
114 int pgt_cache_water
[2] = { 25, 50 };
116 /* Returns the number of pages freed */
117 int check_pgt_cache(void)
119 return do_check_pgt_cache(pgt_cache_water
[0], pgt_cache_water
[1]);
124 * This function clears all user-level page tables of a process - this
125 * is needed by execve(), so that old pages aren't in the way.
127 void clear_page_tables(struct mm_struct
*mm
, unsigned long first
, int nr
)
129 pgd_t
* page_dir
= mm
->pgd
;
133 free_one_pgd(page_dir
);
137 /* keep the page table cache within bounds */
141 #define PTE_TABLE_MASK ((PTRS_PER_PTE-1) * sizeof(pte_t))
142 #define PMD_TABLE_MASK ((PTRS_PER_PMD-1) * sizeof(pmd_t))
145 * copy one vm_area from one task to the other. Assumes the page tables
146 * already present in the new task to be cleared in the whole range
147 * covered by this vma.
149 * 08Jan98 Merged into one routine from several inline routines to reduce
150 * variable count and make things faster. -jj
152 int copy_page_range(struct mm_struct
*dst
, struct mm_struct
*src
,
153 struct vm_area_struct
*vma
)
155 pgd_t
* src_pgd
, * dst_pgd
;
156 unsigned long address
= vma
->vm_start
;
157 unsigned long end
= vma
->vm_end
;
158 unsigned long cow
= (vma
->vm_flags
& (VM_SHARED
| VM_MAYWRITE
)) == VM_MAYWRITE
;
160 src_pgd
= pgd_offset(src
, address
)-1;
161 dst_pgd
= pgd_offset(dst
, address
)-1;
164 pmd_t
* src_pmd
, * dst_pmd
;
166 src_pgd
++; dst_pgd
++;
170 if (pgd_none(*src_pgd
))
171 goto skip_copy_pmd_range
;
172 if (pgd_bad(*src_pgd
)) {
175 skip_copy_pmd_range
: address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
176 if (!address
|| (address
>= end
))
180 if (pgd_none(*dst_pgd
)) {
181 if (!pmd_alloc(dst_pgd
, 0))
185 src_pmd
= pmd_offset(src_pgd
, address
);
186 dst_pmd
= pmd_offset(dst_pgd
, address
);
189 pte_t
* src_pte
, * dst_pte
;
193 if (pmd_none(*src_pmd
))
194 goto skip_copy_pte_range
;
195 if (pmd_bad(*src_pmd
)) {
198 skip_copy_pte_range
: address
= (address
+ PMD_SIZE
) & PMD_MASK
;
201 goto cont_copy_pmd_range
;
203 if (pmd_none(*dst_pmd
)) {
204 if (!pte_alloc(dst_pmd
, 0))
208 src_pte
= pte_offset(src_pmd
, address
);
209 dst_pte
= pte_offset(dst_pmd
, address
);
212 pte_t pte
= *src_pte
;
213 struct page
*ptepage
;
218 goto cont_copy_pte_range
;
219 if (!pte_present(pte
)) {
220 swap_duplicate(pte_to_swp_entry(pte
));
221 set_pte(dst_pte
, pte
);
222 goto cont_copy_pte_range
;
224 ptepage
= pte_page(pte
);
225 if ((!VALID_PAGE(ptepage
)) ||
226 PageReserved(ptepage
)) {
227 set_pte(dst_pte
, pte
);
228 goto cont_copy_pte_range
;
230 /* If it's a COW mapping, write protect it both in the parent and the child */
232 pte
= pte_wrprotect(pte
);
233 set_pte(src_pte
, pte
);
235 /* If it's a shared mapping, mark it clean in the child */
236 if (vma
->vm_flags
& VM_SHARED
)
237 pte
= pte_mkclean(pte
);
238 set_pte(dst_pte
, pte_mkold(pte
));
241 cont_copy_pte_range
: address
+= PAGE_SIZE
;
246 } while ((unsigned long)src_pte
& PTE_TABLE_MASK
);
248 cont_copy_pmd_range
: src_pmd
++;
250 } while ((unsigned long)src_pmd
& PMD_TABLE_MASK
);
260 * Return indicates whether a page was freed so caller can adjust rss
262 static inline int free_pte(pte_t page
)
264 if (pte_present(page
)) {
265 struct page
*ptpage
= pte_page(page
);
266 if ((!VALID_PAGE(ptpage
)) || PageReserved(ptpage
))
269 * free_page() used to be able to clear swap cache
270 * entries. We may now have to do it manually.
272 free_page_and_swap_cache(ptpage
);
275 swap_free(pte_to_swp_entry(page
));
279 static inline void forget_pte(pte_t page
)
281 if (!pte_none(page
)) {
282 printk("forget_pte: old mapping existed!\n");
287 static inline int zap_pte_range(struct mm_struct
*mm
, pmd_t
* pmd
, unsigned long address
, unsigned long size
)
299 pte
= pte_offset(pmd
, address
);
300 address
&= ~PMD_MASK
;
301 if (address
+ size
> PMD_SIZE
)
302 size
= PMD_SIZE
- address
;
315 freed
+= free_pte(page
);
320 static inline int zap_pmd_range(struct mm_struct
*mm
, pgd_t
* dir
, unsigned long address
, unsigned long size
)
333 pmd
= pmd_offset(dir
, address
);
334 address
&= ~PGDIR_MASK
;
335 end
= address
+ size
;
336 if (end
> PGDIR_SIZE
)
340 freed
+= zap_pte_range(mm
, pmd
, address
, end
- address
);
341 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
343 } while (address
< end
);
348 * remove user pages in a given range.
350 void zap_page_range(struct mm_struct
*mm
, unsigned long address
, unsigned long size
)
353 unsigned long end
= address
+ size
;
356 dir
= pgd_offset(mm
, address
);
359 * This is a long-lived spinlock. That's fine.
360 * There's no contention, because the page table
361 * lock only protects against kswapd anyway, and
362 * even if kswapd happened to be looking at this
363 * process we _want_ it to get stuck.
367 spin_lock(&mm
->page_table_lock
);
369 freed
+= zap_pmd_range(mm
, dir
, address
, end
- address
);
370 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
372 } while (address
&& (address
< end
));
373 spin_unlock(&mm
->page_table_lock
);
375 * Update rss for the mm_struct (not necessarily current->mm)
376 * Notice that rss is an unsigned long.
386 * Do a quick page-table lookup for a single page.
388 static struct page
* follow_page(unsigned long address
)
393 pgd
= pgd_offset(current
->mm
, address
);
394 pmd
= pmd_offset(pgd
, address
);
396 pte_t
* pte
= pte_offset(pmd
, address
);
397 if (pte
&& pte_present(*pte
))
398 return pte_page(*pte
);
405 * Given a physical address, is there a useful struct page pointing to
406 * it? This may become more complex in the future if we start dealing
407 * with IO-aperture pages in kiobufs.
410 static inline struct page
* get_page_map(struct page
*page
)
412 if (!VALID_PAGE(page
))
418 * Force in an entire range of pages from the current process's user VA,
419 * and pin them in physical memory.
422 #define dprintk(x...)
423 int map_user_kiobuf(int rw
, struct kiobuf
*iobuf
, unsigned long va
, size_t len
)
425 unsigned long ptr
, end
;
427 struct mm_struct
* mm
;
428 struct vm_area_struct
* vma
= 0;
431 int datain
= (rw
== READ
);
433 /* Make sure the iobuf is not already mapped somewhere. */
438 dprintk ("map_user_kiobuf: begin\n");
440 ptr
= va
& PAGE_MASK
;
441 end
= (va
+ len
+ PAGE_SIZE
- 1) & PAGE_MASK
;
442 err
= expand_kiobuf(iobuf
, (end
- ptr
) >> PAGE_SHIFT
);
450 iobuf
->offset
= va
& ~PAGE_MASK
;
456 * First of all, try to fault in all of the necessary pages
459 if (!vma
|| ptr
>= vma
->vm_end
) {
460 vma
= find_vma(current
->mm
, ptr
);
463 if (vma
->vm_start
> ptr
) {
464 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
466 if (expand_stack(vma
, ptr
))
469 if (((datain
) && (!(vma
->vm_flags
& VM_WRITE
))) ||
470 (!(vma
->vm_flags
& VM_READ
))) {
475 if (handle_mm_fault(current
->mm
, vma
, ptr
, datain
) <= 0)
477 spin_lock(&mm
->page_table_lock
);
478 map
= follow_page(ptr
);
480 spin_unlock(&mm
->page_table_lock
);
481 dprintk (KERN_ERR
"Missing page in map_user_kiobuf\n");
484 map
= get_page_map(map
);
486 atomic_inc(&map
->count
);
488 printk (KERN_INFO
"Mapped page missing [%d]\n", i
);
489 spin_unlock(&mm
->page_table_lock
);
490 iobuf
->maplist
[i
] = map
;
491 iobuf
->nr_pages
= ++i
;
497 dprintk ("map_user_kiobuf: end OK\n");
503 dprintk ("map_user_kiobuf: end %d\n", err
);
509 * Unmap all of the pages referenced by a kiobuf. We release the pages,
510 * and unlock them if they were locked.
513 void unmap_kiobuf (struct kiobuf
*iobuf
)
518 for (i
= 0; i
< iobuf
->nr_pages
; i
++) {
519 map
= iobuf
->maplist
[i
];
533 * Lock down all of the pages of a kiovec for IO.
535 * If any page is mapped twice in the kiovec, we return the error -EINVAL.
537 * The optional wait parameter causes the lock call to block until all
538 * pages can be locked if set. If wait==0, the lock operation is
539 * aborted if any locked pages are found and -EAGAIN is returned.
542 int lock_kiovec(int nr
, struct kiobuf
*iovec
[], int wait
)
544 struct kiobuf
*iobuf
;
546 struct page
*page
, **ppage
;
552 for (i
= 0; i
< nr
; i
++) {
559 ppage
= iobuf
->maplist
;
560 for (j
= 0; j
< iobuf
->nr_pages
; ppage
++, j
++) {
565 if (TryLockPage(page
))
575 * We couldn't lock one of the pages. Undo the locking so far,
576 * wait on the page we got to, and try again.
579 unlock_kiovec(nr
, iovec
);
584 * Did the release also unlock the page we got stuck on?
586 if (!PageLocked(page
)) {
588 * If so, we may well have the page mapped twice
589 * in the IO address range. Bad news. Of
590 * course, it _might_ just be a coincidence,
591 * but if it happens more than once, chances
592 * are we have a double-mapped page.
594 if (++doublepage
>= 3)
607 * Unlock all of the pages of a kiovec after IO.
610 int unlock_kiovec(int nr
, struct kiobuf
*iovec
[])
612 struct kiobuf
*iobuf
;
614 struct page
*page
, **ppage
;
616 for (i
= 0; i
< nr
; i
++) {
623 ppage
= iobuf
->maplist
;
624 for (j
= 0; j
< iobuf
->nr_pages
; ppage
++, j
++) {
634 static inline void zeromap_pte_range(pte_t
* pte
, unsigned long address
,
635 unsigned long size
, pgprot_t prot
)
639 address
&= ~PMD_MASK
;
640 end
= address
+ size
;
644 pte_t zero_pte
= pte_wrprotect(mk_pte(ZERO_PAGE(address
), prot
));
645 pte_t oldpage
= *pte
;
646 set_pte(pte
, zero_pte
);
648 address
+= PAGE_SIZE
;
650 } while (address
&& (address
< end
));
653 static inline int zeromap_pmd_range(pmd_t
* pmd
, unsigned long address
,
654 unsigned long size
, pgprot_t prot
)
658 address
&= ~PGDIR_MASK
;
659 end
= address
+ size
;
660 if (end
> PGDIR_SIZE
)
663 pte_t
* pte
= pte_alloc(pmd
, address
);
666 zeromap_pte_range(pte
, address
, end
- address
, prot
);
667 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
669 } while (address
&& (address
< end
));
673 int zeromap_page_range(unsigned long address
, unsigned long size
, pgprot_t prot
)
677 unsigned long beg
= address
;
678 unsigned long end
= address
+ size
;
680 dir
= pgd_offset(current
->mm
, address
);
681 flush_cache_range(current
->mm
, beg
, end
);
685 pmd_t
*pmd
= pmd_alloc(dir
, address
);
689 error
= zeromap_pmd_range(pmd
, address
, end
- address
, prot
);
692 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
694 } while (address
&& (address
< end
));
695 flush_tlb_range(current
->mm
, beg
, end
);
700 * maps a range of physical memory into the requested pages. the old
701 * mappings are removed. any references to nonexistent pages results
702 * in null mappings (currently treated as "copy-on-access")
704 static inline void remap_pte_range(pte_t
* pte
, unsigned long address
, unsigned long size
,
705 unsigned long phys_addr
, pgprot_t prot
)
709 address
&= ~PMD_MASK
;
710 end
= address
+ size
;
715 pte_t oldpage
= *pte
;
718 page
= virt_to_page(__va(phys_addr
));
719 if ((!VALID_PAGE(page
)) || PageReserved(page
))
720 set_pte(pte
, mk_pte_phys(phys_addr
, prot
));
722 address
+= PAGE_SIZE
;
723 phys_addr
+= PAGE_SIZE
;
725 } while (address
&& (address
< end
));
728 static inline int remap_pmd_range(pmd_t
* pmd
, unsigned long address
, unsigned long size
,
729 unsigned long phys_addr
, pgprot_t prot
)
733 address
&= ~PGDIR_MASK
;
734 end
= address
+ size
;
735 if (end
> PGDIR_SIZE
)
737 phys_addr
-= address
;
739 pte_t
* pte
= pte_alloc(pmd
, address
);
742 remap_pte_range(pte
, address
, end
- address
, address
+ phys_addr
, prot
);
743 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
745 } while (address
&& (address
< end
));
749 int remap_page_range(unsigned long from
, unsigned long phys_addr
, unsigned long size
, pgprot_t prot
)
753 unsigned long beg
= from
;
754 unsigned long end
= from
+ size
;
757 dir
= pgd_offset(current
->mm
, from
);
758 flush_cache_range(current
->mm
, beg
, end
);
762 pmd_t
*pmd
= pmd_alloc(dir
, from
);
766 error
= remap_pmd_range(pmd
, from
, end
- from
, phys_addr
+ from
, prot
);
769 from
= (from
+ PGDIR_SIZE
) & PGDIR_MASK
;
771 } while (from
&& (from
< end
));
772 flush_tlb_range(current
->mm
, beg
, end
);
777 * Establish a new mapping:
778 * - flush the old one
779 * - update the page tables
780 * - inform the TLB about the new one
782 static inline void establish_pte(struct vm_area_struct
* vma
, unsigned long address
, pte_t
*page_table
, pte_t entry
)
784 flush_tlb_page(vma
, address
);
785 set_pte(page_table
, entry
);
786 update_mmu_cache(vma
, address
, entry
);
789 static inline void break_cow(struct vm_area_struct
* vma
, struct page
* old_page
, struct page
* new_page
, unsigned long address
,
792 copy_cow_page(old_page
,new_page
,address
);
793 flush_page_to_ram(new_page
);
794 flush_cache_page(vma
, address
);
795 establish_pte(vma
, address
, page_table
, pte_mkwrite(pte_mkdirty(mk_pte(new_page
, vma
->vm_page_prot
))));
799 * This routine handles present pages, when users try to write
800 * to a shared page. It is done by copying the page to a new address
801 * and decrementing the shared-page counter for the old page.
803 * Goto-purists beware: the only reason for goto's here is that it results
804 * in better assembly code.. The "default" path will see no jumps at all.
806 * Note that this routine assumes that the protection checks have been
807 * done by the caller (the low-level page fault routine in most cases).
808 * Thus we can safely just mark it writable once we've done any necessary
811 * We also mark the page dirty at this point even though the page will
812 * change only once the write actually happens. This avoids a few races,
813 * and potentially makes it more efficient.
815 * We enter with the page table read-lock held, and need to exit without
818 static int do_wp_page(struct mm_struct
*mm
, struct vm_area_struct
* vma
,
819 unsigned long address
, pte_t
*page_table
, pte_t pte
)
821 struct page
*old_page
, *new_page
;
823 old_page
= pte_page(pte
);
824 if (!VALID_PAGE(old_page
))
828 * We can avoid the copy if:
829 * - we're the only user (count == 1)
830 * - the only other user is the swap cache,
831 * and the only swap cache user is itself,
832 * in which case we can remove the page
833 * from the swap cache.
835 switch (page_count(old_page
)) {
838 * Lock the page so that no one can look it up from
839 * the swap cache, grab a reference and start using it.
840 * Can not do lock_page, holding page_table_lock.
842 if (!PageSwapCache(old_page
) || TryLockPage(old_page
))
844 if (is_page_shared(old_page
)) {
845 UnlockPage(old_page
);
848 delete_from_swap_cache_nolock(old_page
);
849 UnlockPage(old_page
);
852 flush_cache_page(vma
, address
);
853 establish_pte(vma
, address
, page_table
, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte
))));
854 spin_unlock(&mm
->page_table_lock
);
855 return 1; /* Minor fault */
859 * Ok, we need to copy. Oh, well..
861 spin_unlock(&mm
->page_table_lock
);
862 new_page
= page_cache_alloc();
865 spin_lock(&mm
->page_table_lock
);
868 * Re-check the pte - we dropped the lock
870 if (pte_val(*page_table
) == pte_val(pte
)) {
871 if (PageReserved(old_page
))
873 break_cow(vma
, old_page
, new_page
, address
, page_table
);
875 /* Free the old page.. */
878 spin_unlock(&mm
->page_table_lock
);
879 page_cache_release(new_page
);
880 return 1; /* Minor fault */
883 spin_unlock(&mm
->page_table_lock
);
884 printk("do_wp_page: bogus page at address %08lx (page 0x%lx)\n",address
,(unsigned long)old_page
);
889 * This function zeroes out partial mmap'ed pages at truncation time..
891 static void partial_clear(struct vm_area_struct
*vma
, unsigned long address
)
897 pte_t
*page_table
, pte
;
899 page_dir
= pgd_offset(vma
->vm_mm
, address
);
900 if (pgd_none(*page_dir
))
902 if (pgd_bad(*page_dir
)) {
903 pgd_ERROR(*page_dir
);
907 page_middle
= pmd_offset(page_dir
, address
);
908 if (pmd_none(*page_middle
))
910 if (pmd_bad(*page_middle
)) {
911 pmd_ERROR(*page_middle
);
912 pmd_clear(page_middle
);
915 page_table
= pte_offset(page_middle
, address
);
917 if (!pte_present(pte
))
919 flush_cache_page(vma
, address
);
920 page
= pte_page(pte
);
921 if ((!VALID_PAGE(page
)) || PageReserved(page
))
923 offset
= address
& ~PAGE_MASK
;
924 memclear_highpage_flush(page
, offset
, PAGE_SIZE
- offset
);
928 * Handle all mappings that got truncated by a "truncate()"
931 * NOTE! We have to be ready to update the memory sharing
932 * between the file and the memory map for a potential last
933 * incomplete page. Ugly, but necessary.
935 void vmtruncate(struct inode
* inode
, loff_t offset
)
937 unsigned long partial
, pgoff
;
938 struct vm_area_struct
* mpnt
;
939 struct address_space
*mapping
= inode
->i_mapping
;
942 if (inode
->i_size
< offset
)
944 inode
->i_size
= offset
;
945 truncate_inode_pages(mapping
, offset
);
946 spin_lock(&mapping
->i_shared_lock
);
947 if (!mapping
->i_mmap
)
950 pgoff
= (offset
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
951 partial
= (unsigned long)offset
& (PAGE_CACHE_SIZE
- 1);
953 mpnt
= mapping
->i_mmap
;
955 struct mm_struct
*mm
= mpnt
->vm_mm
;
956 unsigned long start
= mpnt
->vm_start
;
957 unsigned long end
= mpnt
->vm_end
;
958 unsigned long len
= end
- start
;
961 /* mapping wholly truncated? */
962 if (mpnt
->vm_pgoff
>= pgoff
) {
963 flush_cache_range(mm
, start
, end
);
964 zap_page_range(mm
, start
, len
);
965 flush_tlb_range(mm
, start
, end
);
969 /* mapping wholly unaffected? */
970 len
= len
>> PAGE_SHIFT
;
971 diff
= pgoff
- mpnt
->vm_pgoff
;
975 /* Ok, partially affected.. */
976 start
+= diff
<< PAGE_SHIFT
;
977 len
= (len
- diff
) << PAGE_SHIFT
;
978 if (start
& ~PAGE_MASK
) {
979 partial_clear(mpnt
, start
);
980 start
= (start
+ ~PAGE_MASK
) & PAGE_MASK
;
982 flush_cache_range(mm
, start
, end
);
983 zap_page_range(mm
, start
, len
);
984 flush_tlb_range(mm
, start
, end
);
985 } while ((mpnt
= mpnt
->vm_next_share
) != NULL
);
987 spin_unlock(&mapping
->i_shared_lock
);
988 /* this should go into ->truncate */
989 inode
->i_size
= offset
;
990 if (inode
->i_op
&& inode
->i_op
->truncate
)
991 inode
->i_op
->truncate(inode
);
995 limit
= current
->rlim
[RLIMIT_FSIZE
].rlim_cur
;
996 if (limit
!= RLIM_INFINITY
) {
997 if (inode
->i_size
>= limit
) {
998 send_sig(SIGXFSZ
, current
, 0);
1001 if (offset
> limit
) {
1002 send_sig(SIGXFSZ
, current
, 0);
1006 inode
->i_size
= offset
;
1007 if (inode
->i_op
&& inode
->i_op
->truncate
)
1008 inode
->i_op
->truncate(inode
);
1016 * Primitive swap readahead code. We simply read an aligned block of
1017 * (1 << page_cluster) entries in the swap area. This method is chosen
1018 * because it doesn't cost us any seek time. We also make sure to queue
1019 * the 'original' request together with the readahead ones...
1021 void swapin_readahead(swp_entry_t entry
)
1024 struct page
*new_page
;
1025 unsigned long offset
;
1028 * Get the number of handles we should do readahead io to. Also,
1029 * grab temporary references on them, releasing them as io completes.
1031 num
= valid_swaphandles(entry
, &offset
);
1032 for (i
= 0; i
< num
; offset
++, i
++) {
1033 /* Don't block on I/O for read-ahead */
1034 if (atomic_read(&nr_async_pages
) >= pager_daemon
.swap_cluster
) {
1036 swap_free(SWP_ENTRY(SWP_TYPE(entry
), offset
++));
1039 /* Ok, do the async read-ahead now */
1040 new_page
= read_swap_cache_async(SWP_ENTRY(SWP_TYPE(entry
), offset
), 0);
1041 if (new_page
!= NULL
)
1042 page_cache_release(new_page
);
1043 swap_free(SWP_ENTRY(SWP_TYPE(entry
), offset
));
1048 static int do_swap_page(struct mm_struct
* mm
,
1049 struct vm_area_struct
* vma
, unsigned long address
,
1050 pte_t
* page_table
, swp_entry_t entry
, int write_access
)
1052 struct page
*page
= lookup_swap_cache(entry
);
1057 swapin_readahead(entry
);
1058 page
= read_swap_cache(entry
);
1063 flush_page_to_ram(page
);
1064 flush_icache_page(vma
, page
, address
);
1069 pte
= mk_pte(page
, vma
->vm_page_prot
);
1072 * Freeze the "shared"ness of the page, ie page_count + swap_count.
1073 * Must lock page before transferring our swap count to already
1074 * obtained page count.
1078 if (write_access
&& !is_page_shared(page
)) {
1079 delete_from_swap_cache_nolock(page
);
1081 page
= replace_with_highmem(page
);
1082 pte
= mk_pte(page
, vma
->vm_page_prot
);
1083 pte
= pte_mkwrite(pte_mkdirty(pte
));
1087 set_pte(page_table
, pte
);
1088 /* No need to invalidate - it was non-present before */
1089 update_mmu_cache(vma
, address
, pte
);
1090 return 1; /* Minor fault */
1094 * This only needs the MM semaphore
1096 static int do_anonymous_page(struct mm_struct
* mm
, struct vm_area_struct
* vma
, pte_t
*page_table
, int write_access
, unsigned long addr
)
1099 struct page
*page
= NULL
;
1100 pte_t entry
= pte_wrprotect(mk_pte(ZERO_PAGE(addr
), vma
->vm_page_prot
));
1102 page
= alloc_page(GFP_HIGHUSER
);
1105 if (PageHighMem(page
))
1107 clear_user_highpage(page
, addr
);
1108 entry
= pte_mkwrite(pte_mkdirty(mk_pte(page
, vma
->vm_page_prot
)));
1110 flush_page_to_ram(page
);
1112 set_pte(page_table
, entry
);
1113 /* No need to invalidate - it was non-present before */
1114 update_mmu_cache(vma
, addr
, entry
);
1115 return 1; /* Minor fault */
1119 * do_no_page() tries to create a new page mapping. It aggressively
1120 * tries to share with existing pages, but makes a separate copy if
1121 * the "write_access" parameter is true in order to avoid the next
1124 * As this is called only for pages that do not currently exist, we
1125 * do not need to flush old virtual caches or the TLB.
1127 * This is called with the MM semaphore held.
1129 static int do_no_page(struct mm_struct
* mm
, struct vm_area_struct
* vma
,
1130 unsigned long address
, int write_access
, pte_t
*page_table
)
1132 struct page
* new_page
;
1135 if (!vma
->vm_ops
|| !vma
->vm_ops
->nopage
)
1136 return do_anonymous_page(mm
, vma
, page_table
, write_access
, address
);
1139 * The third argument is "no_share", which tells the low-level code
1140 * to copy, not share the page even if sharing is possible. It's
1141 * essentially an early COW detection.
1143 new_page
= vma
->vm_ops
->nopage(vma
, address
& PAGE_MASK
, (vma
->vm_flags
& VM_SHARED
)?0:write_access
);
1144 if (new_page
== NULL
) /* no page was available -- SIGBUS */
1146 if (new_page
== NOPAGE_OOM
)
1150 * This silly early PAGE_DIRTY setting removes a race
1151 * due to the bad i386 page protection. But it's valid
1152 * for other architectures too.
1154 * Note that if write_access is true, we either now have
1155 * an exclusive copy of the page, or this is a shared mapping,
1156 * so we can make it writable and dirty to avoid having to
1157 * handle that later.
1159 flush_page_to_ram(new_page
);
1160 flush_icache_page(vma
, new_page
, address
);
1161 entry
= mk_pte(new_page
, vma
->vm_page_prot
);
1163 entry
= pte_mkwrite(pte_mkdirty(entry
));
1164 } else if (page_count(new_page
) > 1 &&
1165 !(vma
->vm_flags
& VM_SHARED
))
1166 entry
= pte_wrprotect(entry
);
1167 set_pte(page_table
, entry
);
1168 /* no need to invalidate: a not-present page shouldn't be cached */
1169 update_mmu_cache(vma
, address
, entry
);
1170 return 2; /* Major fault */
1174 * These routines also need to handle stuff like marking pages dirty
1175 * and/or accessed for architectures that don't do it in hardware (most
1176 * RISC architectures). The early dirtying is also good on the i386.
1178 * There is also a hook called "update_mmu_cache()" that architectures
1179 * with external mmu caches can use to update those (ie the Sparc or
1180 * PowerPC hashed page tables that act as extended TLBs).
1182 * Note the "page_table_lock". It is to protect against kswapd removing
1183 * pages from under us. Note that kswapd only ever _removes_ pages, never
1184 * adds them. As such, once we have noticed that the page is not present,
1185 * we can drop the lock early.
1187 * The adding of pages is protected by the MM semaphore (which we hold),
1188 * so we don't need to worry about a page being suddenly been added into
1191 static inline int handle_pte_fault(struct mm_struct
*mm
,
1192 struct vm_area_struct
* vma
, unsigned long address
,
1193 int write_access
, pte_t
* pte
)
1198 if (!pte_present(entry
)) {
1199 if (pte_none(entry
))
1200 return do_no_page(mm
, vma
, address
, write_access
, pte
);
1201 return do_swap_page(mm
, vma
, address
, pte
, pte_to_swp_entry(entry
), write_access
);
1205 * Ok, the entry was present, we need to get the page table
1206 * lock to synchronize with kswapd, and verify that the entry
1207 * didn't change from under us..
1209 spin_lock(&mm
->page_table_lock
);
1210 if (pte_val(entry
) == pte_val(*pte
)) {
1212 if (!pte_write(entry
))
1213 return do_wp_page(mm
, vma
, address
, pte
, entry
);
1215 entry
= pte_mkdirty(entry
);
1217 entry
= pte_mkyoung(entry
);
1218 establish_pte(vma
, address
, pte
, entry
);
1220 spin_unlock(&mm
->page_table_lock
);
1225 * By the time we get here, we already hold the mm semaphore
1227 int handle_mm_fault(struct mm_struct
*mm
, struct vm_area_struct
* vma
,
1228 unsigned long address
, int write_access
)
1234 pgd
= pgd_offset(mm
, address
);
1235 pmd
= pmd_alloc(pgd
, address
);
1238 pte_t
* pte
= pte_alloc(pmd
, address
);
1240 ret
= handle_pte_fault(mm
, vma
, address
, write_access
, pte
);
1246 * Simplistic page force-in..
1248 int make_pages_present(unsigned long addr
, unsigned long end
)
1251 struct mm_struct
*mm
= current
->mm
;
1252 struct vm_area_struct
* vma
;
1254 vma
= find_vma(mm
, addr
);
1255 write
= (vma
->vm_flags
& VM_WRITE
) != 0;
1259 if (handle_mm_fault(mm
, vma
, addr
, write
) < 0)
1262 } while (addr
< end
);