- Kai Germaschewski: ymfpci cleanups and resource leak fixes
[davej-history.git] / mm / memory.c
blobfda0940e07cde99edf3c259e986abeb4e2ed6136
1 /*
2 * linux/mm/memory.c
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
7 /*
8 * demand-loading started 01.12.91 - seems it is high on the list of
9 * things wanted, and it should be easy to implement. - Linus
13 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14 * pages started 02.12.91, seems to work. - Linus.
16 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17 * would have taken more than the 6M I have free, but it worked well as
18 * far as I could see.
20 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
24 * Real VM (paging to/from disk) started 18.12.91. Much more work and
25 * thought has to go into this. Oh, well..
26 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
27 * Found it. Everything seems to work now.
28 * 20.12.91 - Ok, making the swap-device changeable like the root.
32 * 05.04.94 - Multi-page memory management added for v1.1.
33 * Idea by Alex Bligh (alex@cconcepts.co.uk)
35 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
36 * (Gerhard.Wichert@pdb.siemens.de)
39 #include <linux/mm.h>
40 #include <linux/mman.h>
41 #include <linux/swap.h>
42 #include <linux/smp_lock.h>
43 #include <linux/swapctl.h>
44 #include <linux/iobuf.h>
45 #include <asm/uaccess.h>
46 #include <asm/pgalloc.h>
47 #include <linux/highmem.h>
48 #include <linux/pagemap.h>
51 unsigned long max_mapnr;
52 unsigned long num_physpages;
53 void * high_memory;
54 struct page *highmem_start_page;
57 * We special-case the C-O-W ZERO_PAGE, because it's such
58 * a common occurrence (no need to read the page to know
59 * that it's zero - better for the cache and memory subsystem).
61 static inline void copy_cow_page(struct page * from, struct page * to, unsigned long address)
63 if (from == ZERO_PAGE(address)) {
64 clear_user_highpage(to, address);
65 return;
67 copy_user_highpage(to, from, address);
70 mem_map_t * mem_map;
73 * Note: this doesn't free the actual pages themselves. That
74 * has been handled earlier when unmapping all the memory regions.
76 static inline void free_one_pmd(pmd_t * dir)
78 pte_t * pte;
80 if (pmd_none(*dir))
81 return;
82 if (pmd_bad(*dir)) {
83 pmd_ERROR(*dir);
84 pmd_clear(dir);
85 return;
87 pte = pte_offset(dir, 0);
88 pmd_clear(dir);
89 pte_free(pte);
92 static inline void free_one_pgd(pgd_t * dir)
94 int j;
95 pmd_t * pmd;
97 if (pgd_none(*dir))
98 return;
99 if (pgd_bad(*dir)) {
100 pgd_ERROR(*dir);
101 pgd_clear(dir);
102 return;
104 pmd = pmd_offset(dir, 0);
105 pgd_clear(dir);
106 for (j = 0; j < PTRS_PER_PMD ; j++)
107 free_one_pmd(pmd+j);
108 pmd_free(pmd);
111 /* Low and high watermarks for page table cache.
112 The system should try to have pgt_water[0] <= cache elements <= pgt_water[1]
114 int pgt_cache_water[2] = { 25, 50 };
116 /* Returns the number of pages freed */
117 int check_pgt_cache(void)
119 return do_check_pgt_cache(pgt_cache_water[0], pgt_cache_water[1]);
124 * This function clears all user-level page tables of a process - this
125 * is needed by execve(), so that old pages aren't in the way.
127 void clear_page_tables(struct mm_struct *mm, unsigned long first, int nr)
129 pgd_t * page_dir = mm->pgd;
131 page_dir += first;
132 do {
133 free_one_pgd(page_dir);
134 page_dir++;
135 } while (--nr);
137 /* keep the page table cache within bounds */
138 check_pgt_cache();
141 #define PTE_TABLE_MASK ((PTRS_PER_PTE-1) * sizeof(pte_t))
142 #define PMD_TABLE_MASK ((PTRS_PER_PMD-1) * sizeof(pmd_t))
145 * copy one vm_area from one task to the other. Assumes the page tables
146 * already present in the new task to be cleared in the whole range
147 * covered by this vma.
149 * 08Jan98 Merged into one routine from several inline routines to reduce
150 * variable count and make things faster. -jj
152 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
153 struct vm_area_struct *vma)
155 pgd_t * src_pgd, * dst_pgd;
156 unsigned long address = vma->vm_start;
157 unsigned long end = vma->vm_end;
158 unsigned long cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
160 src_pgd = pgd_offset(src, address)-1;
161 dst_pgd = pgd_offset(dst, address)-1;
163 for (;;) {
164 pmd_t * src_pmd, * dst_pmd;
166 src_pgd++; dst_pgd++;
168 /* copy_pmd_range */
170 if (pgd_none(*src_pgd))
171 goto skip_copy_pmd_range;
172 if (pgd_bad(*src_pgd)) {
173 pgd_ERROR(*src_pgd);
174 pgd_clear(src_pgd);
175 skip_copy_pmd_range: address = (address + PGDIR_SIZE) & PGDIR_MASK;
176 if (!address || (address >= end))
177 goto out;
178 continue;
180 if (pgd_none(*dst_pgd)) {
181 if (!pmd_alloc(dst_pgd, 0))
182 goto nomem;
185 src_pmd = pmd_offset(src_pgd, address);
186 dst_pmd = pmd_offset(dst_pgd, address);
188 do {
189 pte_t * src_pte, * dst_pte;
191 /* copy_pte_range */
193 if (pmd_none(*src_pmd))
194 goto skip_copy_pte_range;
195 if (pmd_bad(*src_pmd)) {
196 pmd_ERROR(*src_pmd);
197 pmd_clear(src_pmd);
198 skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
199 if (address >= end)
200 goto out;
201 goto cont_copy_pmd_range;
203 if (pmd_none(*dst_pmd)) {
204 if (!pte_alloc(dst_pmd, 0))
205 goto nomem;
208 src_pte = pte_offset(src_pmd, address);
209 dst_pte = pte_offset(dst_pmd, address);
211 do {
212 pte_t pte = *src_pte;
213 struct page *ptepage;
215 /* copy_one_pte */
217 if (pte_none(pte))
218 goto cont_copy_pte_range_noset;
219 if (!pte_present(pte)) {
220 swap_duplicate(pte_to_swp_entry(pte));
221 goto cont_copy_pte_range;
223 ptepage = pte_page(pte);
224 if ((!VALID_PAGE(ptepage)) ||
225 PageReserved(ptepage))
226 goto cont_copy_pte_range;
228 /* If it's a COW mapping, write protect it both in the parent and the child */
229 if (cow) {
230 ptep_clear_wrprotect(src_pte);
231 pte = *src_pte;
234 /* If it's a shared mapping, mark it clean in the child */
235 if (vma->vm_flags & VM_SHARED)
236 pte = pte_mkclean(pte);
237 pte = pte_mkold(pte);
238 get_page(ptepage);
240 cont_copy_pte_range: set_pte(dst_pte, pte);
241 cont_copy_pte_range_noset: address += PAGE_SIZE;
242 if (address >= end)
243 goto out;
244 src_pte++;
245 dst_pte++;
246 } while ((unsigned long)src_pte & PTE_TABLE_MASK);
248 cont_copy_pmd_range: src_pmd++;
249 dst_pmd++;
250 } while ((unsigned long)src_pmd & PMD_TABLE_MASK);
252 out:
253 return 0;
255 nomem:
256 return -ENOMEM;
260 * Return indicates whether a page was freed so caller can adjust rss
262 static inline int free_pte(pte_t page)
264 if (pte_present(page)) {
265 struct page *ptpage = pte_page(page);
266 if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
267 return 0;
269 * free_page() used to be able to clear swap cache
270 * entries. We may now have to do it manually.
272 if (pte_dirty(page))
273 SetPageDirty(ptpage);
274 free_page_and_swap_cache(ptpage);
275 return 1;
277 swap_free(pte_to_swp_entry(page));
278 return 0;
281 static inline void forget_pte(pte_t page)
283 if (!pte_none(page)) {
284 printk("forget_pte: old mapping existed!\n");
285 free_pte(page);
289 static inline int zap_pte_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size)
291 pte_t * pte;
292 int freed;
294 if (pmd_none(*pmd))
295 return 0;
296 if (pmd_bad(*pmd)) {
297 pmd_ERROR(*pmd);
298 pmd_clear(pmd);
299 return 0;
301 pte = pte_offset(pmd, address);
302 address &= ~PMD_MASK;
303 if (address + size > PMD_SIZE)
304 size = PMD_SIZE - address;
305 size >>= PAGE_SHIFT;
306 freed = 0;
307 for (;;) {
308 pte_t page;
309 if (!size)
310 break;
311 page = ptep_get_and_clear(pte);
312 pte++;
313 size--;
314 if (pte_none(page))
315 continue;
316 freed += free_pte(page);
318 return freed;
321 static inline int zap_pmd_range(struct mm_struct *mm, pgd_t * dir, unsigned long address, unsigned long size)
323 pmd_t * pmd;
324 unsigned long end;
325 int freed;
327 if (pgd_none(*dir))
328 return 0;
329 if (pgd_bad(*dir)) {
330 pgd_ERROR(*dir);
331 pgd_clear(dir);
332 return 0;
334 pmd = pmd_offset(dir, address);
335 address &= ~PGDIR_MASK;
336 end = address + size;
337 if (end > PGDIR_SIZE)
338 end = PGDIR_SIZE;
339 freed = 0;
340 do {
341 freed += zap_pte_range(mm, pmd, address, end - address);
342 address = (address + PMD_SIZE) & PMD_MASK;
343 pmd++;
344 } while (address < end);
345 return freed;
349 * remove user pages in a given range.
351 void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size)
353 pgd_t * dir;
354 unsigned long end = address + size;
355 int freed = 0;
357 dir = pgd_offset(mm, address);
360 * This is a long-lived spinlock. That's fine.
361 * There's no contention, because the page table
362 * lock only protects against kswapd anyway, and
363 * even if kswapd happened to be looking at this
364 * process we _want_ it to get stuck.
366 if (address >= end)
367 BUG();
368 spin_lock(&mm->page_table_lock);
369 do {
370 freed += zap_pmd_range(mm, dir, address, end - address);
371 address = (address + PGDIR_SIZE) & PGDIR_MASK;
372 dir++;
373 } while (address && (address < end));
374 spin_unlock(&mm->page_table_lock);
376 * Update rss for the mm_struct (not necessarily current->mm)
377 * Notice that rss is an unsigned long.
379 if (mm->rss > freed)
380 mm->rss -= freed;
381 else
382 mm->rss = 0;
387 * Do a quick page-table lookup for a single page.
389 static struct page * follow_page(unsigned long address)
391 pgd_t *pgd;
392 pmd_t *pmd;
394 pgd = pgd_offset(current->mm, address);
395 pmd = pmd_offset(pgd, address);
396 if (pmd) {
397 pte_t * pte = pte_offset(pmd, address);
398 if (pte && pte_present(*pte))
399 return pte_page(*pte);
402 return NULL;
406 * Given a physical address, is there a useful struct page pointing to
407 * it? This may become more complex in the future if we start dealing
408 * with IO-aperture pages in kiobufs.
411 static inline struct page * get_page_map(struct page *page)
413 if (!VALID_PAGE(page))
414 return 0;
415 return page;
419 * Force in an entire range of pages from the current process's user VA,
420 * and pin them in physical memory.
423 #define dprintk(x...)
424 int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len)
426 unsigned long ptr, end;
427 int err;
428 struct mm_struct * mm;
429 struct vm_area_struct * vma = 0;
430 struct page * map;
431 int i;
432 int datain = (rw == READ);
434 /* Make sure the iobuf is not already mapped somewhere. */
435 if (iobuf->nr_pages)
436 return -EINVAL;
438 mm = current->mm;
439 dprintk ("map_user_kiobuf: begin\n");
441 ptr = va & PAGE_MASK;
442 end = (va + len + PAGE_SIZE - 1) & PAGE_MASK;
443 err = expand_kiobuf(iobuf, (end - ptr) >> PAGE_SHIFT);
444 if (err)
445 return err;
447 down(&mm->mmap_sem);
449 err = -EFAULT;
450 iobuf->locked = 0;
451 iobuf->offset = va & ~PAGE_MASK;
452 iobuf->length = len;
454 i = 0;
457 * First of all, try to fault in all of the necessary pages
459 while (ptr < end) {
460 if (!vma || ptr >= vma->vm_end) {
461 vma = find_vma(current->mm, ptr);
462 if (!vma)
463 goto out_unlock;
464 if (vma->vm_start > ptr) {
465 if (!(vma->vm_flags & VM_GROWSDOWN))
466 goto out_unlock;
467 if (expand_stack(vma, ptr))
468 goto out_unlock;
470 if (((datain) && (!(vma->vm_flags & VM_WRITE))) ||
471 (!(vma->vm_flags & VM_READ))) {
472 err = -EACCES;
473 goto out_unlock;
476 if (handle_mm_fault(current->mm, vma, ptr, datain) <= 0)
477 goto out_unlock;
478 spin_lock(&mm->page_table_lock);
479 map = follow_page(ptr);
480 if (!map) {
481 spin_unlock(&mm->page_table_lock);
482 dprintk (KERN_ERR "Missing page in map_user_kiobuf\n");
483 goto out_unlock;
485 map = get_page_map(map);
486 if (map)
487 atomic_inc(&map->count);
488 else
489 printk (KERN_INFO "Mapped page missing [%d]\n", i);
490 spin_unlock(&mm->page_table_lock);
491 iobuf->maplist[i] = map;
492 iobuf->nr_pages = ++i;
494 ptr += PAGE_SIZE;
497 up(&mm->mmap_sem);
498 dprintk ("map_user_kiobuf: end OK\n");
499 return 0;
501 out_unlock:
502 up(&mm->mmap_sem);
503 unmap_kiobuf(iobuf);
504 dprintk ("map_user_kiobuf: end %d\n", err);
505 return err;
510 * Unmap all of the pages referenced by a kiobuf. We release the pages,
511 * and unlock them if they were locked.
514 void unmap_kiobuf (struct kiobuf *iobuf)
516 int i;
517 struct page *map;
519 for (i = 0; i < iobuf->nr_pages; i++) {
520 map = iobuf->maplist[i];
521 if (map) {
522 if (iobuf->locked)
523 UnlockPage(map);
524 __free_page(map);
528 iobuf->nr_pages = 0;
529 iobuf->locked = 0;
534 * Lock down all of the pages of a kiovec for IO.
536 * If any page is mapped twice in the kiovec, we return the error -EINVAL.
538 * The optional wait parameter causes the lock call to block until all
539 * pages can be locked if set. If wait==0, the lock operation is
540 * aborted if any locked pages are found and -EAGAIN is returned.
543 int lock_kiovec(int nr, struct kiobuf *iovec[], int wait)
545 struct kiobuf *iobuf;
546 int i, j;
547 struct page *page, **ppage;
548 int doublepage = 0;
549 int repeat = 0;
551 repeat:
553 for (i = 0; i < nr; i++) {
554 iobuf = iovec[i];
556 if (iobuf->locked)
557 continue;
558 iobuf->locked = 1;
560 ppage = iobuf->maplist;
561 for (j = 0; j < iobuf->nr_pages; ppage++, j++) {
562 page = *ppage;
563 if (!page)
564 continue;
566 if (TryLockPage(page))
567 goto retry;
571 return 0;
573 retry:
576 * We couldn't lock one of the pages. Undo the locking so far,
577 * wait on the page we got to, and try again.
580 unlock_kiovec(nr, iovec);
581 if (!wait)
582 return -EAGAIN;
585 * Did the release also unlock the page we got stuck on?
587 if (!PageLocked(page)) {
589 * If so, we may well have the page mapped twice
590 * in the IO address range. Bad news. Of
591 * course, it _might_ just be a coincidence,
592 * but if it happens more than once, chances
593 * are we have a double-mapped page.
595 if (++doublepage >= 3)
596 return -EINVAL;
598 /* Try again... */
599 wait_on_page(page);
602 if (++repeat < 16)
603 goto repeat;
604 return -EAGAIN;
608 * Unlock all of the pages of a kiovec after IO.
611 int unlock_kiovec(int nr, struct kiobuf *iovec[])
613 struct kiobuf *iobuf;
614 int i, j;
615 struct page *page, **ppage;
617 for (i = 0; i < nr; i++) {
618 iobuf = iovec[i];
620 if (!iobuf->locked)
621 continue;
622 iobuf->locked = 0;
624 ppage = iobuf->maplist;
625 for (j = 0; j < iobuf->nr_pages; ppage++, j++) {
626 page = *ppage;
627 if (!page)
628 continue;
629 UnlockPage(page);
632 return 0;
635 static inline void zeromap_pte_range(pte_t * pte, unsigned long address,
636 unsigned long size, pgprot_t prot)
638 unsigned long end;
640 address &= ~PMD_MASK;
641 end = address + size;
642 if (end > PMD_SIZE)
643 end = PMD_SIZE;
644 do {
645 pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot));
646 pte_t oldpage = ptep_get_and_clear(pte);
647 set_pte(pte, zero_pte);
648 forget_pte(oldpage);
649 address += PAGE_SIZE;
650 pte++;
651 } while (address && (address < end));
654 static inline int zeromap_pmd_range(pmd_t * pmd, unsigned long address,
655 unsigned long size, pgprot_t prot)
657 unsigned long end;
659 address &= ~PGDIR_MASK;
660 end = address + size;
661 if (end > PGDIR_SIZE)
662 end = PGDIR_SIZE;
663 do {
664 pte_t * pte = pte_alloc(pmd, address);
665 if (!pte)
666 return -ENOMEM;
667 zeromap_pte_range(pte, address, end - address, prot);
668 address = (address + PMD_SIZE) & PMD_MASK;
669 pmd++;
670 } while (address && (address < end));
671 return 0;
674 int zeromap_page_range(unsigned long address, unsigned long size, pgprot_t prot)
676 int error = 0;
677 pgd_t * dir;
678 unsigned long beg = address;
679 unsigned long end = address + size;
681 dir = pgd_offset(current->mm, address);
682 flush_cache_range(current->mm, beg, end);
683 if (address >= end)
684 BUG();
685 do {
686 pmd_t *pmd = pmd_alloc(dir, address);
687 error = -ENOMEM;
688 if (!pmd)
689 break;
690 error = zeromap_pmd_range(pmd, address, end - address, prot);
691 if (error)
692 break;
693 address = (address + PGDIR_SIZE) & PGDIR_MASK;
694 dir++;
695 } while (address && (address < end));
696 flush_tlb_range(current->mm, beg, end);
697 return error;
701 * maps a range of physical memory into the requested pages. the old
702 * mappings are removed. any references to nonexistent pages results
703 * in null mappings (currently treated as "copy-on-access")
705 static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
706 unsigned long phys_addr, pgprot_t prot)
708 unsigned long end;
710 address &= ~PMD_MASK;
711 end = address + size;
712 if (end > PMD_SIZE)
713 end = PMD_SIZE;
714 do {
715 struct page *page;
716 pte_t oldpage;
717 oldpage = ptep_get_and_clear(pte);
719 page = virt_to_page(__va(phys_addr));
720 if ((!VALID_PAGE(page)) || PageReserved(page))
721 set_pte(pte, mk_pte_phys(phys_addr, prot));
722 forget_pte(oldpage);
723 address += PAGE_SIZE;
724 phys_addr += PAGE_SIZE;
725 pte++;
726 } while (address && (address < end));
729 static inline int remap_pmd_range(pmd_t * pmd, unsigned long address, unsigned long size,
730 unsigned long phys_addr, pgprot_t prot)
732 unsigned long end;
734 address &= ~PGDIR_MASK;
735 end = address + size;
736 if (end > PGDIR_SIZE)
737 end = PGDIR_SIZE;
738 phys_addr -= address;
739 do {
740 pte_t * pte = pte_alloc(pmd, address);
741 if (!pte)
742 return -ENOMEM;
743 remap_pte_range(pte, address, end - address, address + phys_addr, prot);
744 address = (address + PMD_SIZE) & PMD_MASK;
745 pmd++;
746 } while (address && (address < end));
747 return 0;
750 /* Note: this is only safe if the mm semaphore is held when called. */
751 int remap_page_range(unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
753 int error = 0;
754 pgd_t * dir;
755 unsigned long beg = from;
756 unsigned long end = from + size;
758 phys_addr -= from;
759 dir = pgd_offset(current->mm, from);
760 flush_cache_range(current->mm, beg, end);
761 if (from >= end)
762 BUG();
763 do {
764 pmd_t *pmd = pmd_alloc(dir, from);
765 error = -ENOMEM;
766 if (!pmd)
767 break;
768 error = remap_pmd_range(pmd, from, end - from, phys_addr + from, prot);
769 if (error)
770 break;
771 from = (from + PGDIR_SIZE) & PGDIR_MASK;
772 dir++;
773 } while (from && (from < end));
774 flush_tlb_range(current->mm, beg, end);
775 return error;
779 * Establish a new mapping:
780 * - flush the old one
781 * - update the page tables
782 * - inform the TLB about the new one
784 static inline void establish_pte(struct vm_area_struct * vma, unsigned long address, pte_t *page_table, pte_t entry)
786 set_pte(page_table, entry);
787 flush_tlb_page(vma, address);
788 update_mmu_cache(vma, address, entry);
791 static inline void break_cow(struct vm_area_struct * vma, struct page * old_page, struct page * new_page, unsigned long address,
792 pte_t *page_table)
794 copy_cow_page(old_page,new_page,address);
795 flush_page_to_ram(new_page);
796 flush_cache_page(vma, address);
797 establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
801 * This routine handles present pages, when users try to write
802 * to a shared page. It is done by copying the page to a new address
803 * and decrementing the shared-page counter for the old page.
805 * Goto-purists beware: the only reason for goto's here is that it results
806 * in better assembly code.. The "default" path will see no jumps at all.
808 * Note that this routine assumes that the protection checks have been
809 * done by the caller (the low-level page fault routine in most cases).
810 * Thus we can safely just mark it writable once we've done any necessary
811 * COW.
813 * We also mark the page dirty at this point even though the page will
814 * change only once the write actually happens. This avoids a few races,
815 * and potentially makes it more efficient.
817 * We enter with the page table read-lock held, and need to exit without
818 * it.
820 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
821 unsigned long address, pte_t *page_table, pte_t pte)
823 struct page *old_page, *new_page;
825 old_page = pte_page(pte);
826 if (!VALID_PAGE(old_page))
827 goto bad_wp_page;
830 * We can avoid the copy if:
831 * - we're the only user (count == 1)
832 * - the only other user is the swap cache,
833 * and the only swap cache user is itself,
834 * in which case we can just continue to
835 * use the same swap cache (it will be
836 * marked dirty).
838 switch (page_count(old_page)) {
839 case 2:
841 * Lock the page so that no one can look it up from
842 * the swap cache, grab a reference and start using it.
843 * Can not do lock_page, holding page_table_lock.
845 if (!PageSwapCache(old_page) || TryLockPage(old_page))
846 break;
847 if (is_page_shared(old_page)) {
848 UnlockPage(old_page);
849 break;
851 UnlockPage(old_page);
852 /* FallThrough */
853 case 1:
854 flush_cache_page(vma, address);
855 establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
856 spin_unlock(&mm->page_table_lock);
857 return 1; /* Minor fault */
861 * Ok, we need to copy. Oh, well..
863 spin_unlock(&mm->page_table_lock);
864 new_page = page_cache_alloc();
865 if (!new_page)
866 return -1;
867 spin_lock(&mm->page_table_lock);
870 * Re-check the pte - we dropped the lock
872 if (pte_same(*page_table, pte)) {
873 if (PageReserved(old_page))
874 ++mm->rss;
875 break_cow(vma, old_page, new_page, address, page_table);
877 /* Free the old page.. */
878 new_page = old_page;
880 spin_unlock(&mm->page_table_lock);
881 page_cache_release(new_page);
882 return 1; /* Minor fault */
884 bad_wp_page:
885 spin_unlock(&mm->page_table_lock);
886 printk("do_wp_page: bogus page at address %08lx (page 0x%lx)\n",address,(unsigned long)old_page);
887 return -1;
891 * This function zeroes out partial mmap'ed pages at truncation time..
893 static void partial_clear(struct vm_area_struct *vma, unsigned long address)
895 unsigned int offset;
896 struct page *page;
897 pgd_t *page_dir;
898 pmd_t *page_middle;
899 pte_t *page_table, pte;
901 page_dir = pgd_offset(vma->vm_mm, address);
902 if (pgd_none(*page_dir))
903 return;
904 if (pgd_bad(*page_dir)) {
905 pgd_ERROR(*page_dir);
906 pgd_clear(page_dir);
907 return;
909 page_middle = pmd_offset(page_dir, address);
910 if (pmd_none(*page_middle))
911 return;
912 if (pmd_bad(*page_middle)) {
913 pmd_ERROR(*page_middle);
914 pmd_clear(page_middle);
915 return;
917 page_table = pte_offset(page_middle, address);
918 pte = *page_table;
919 if (!pte_present(pte))
920 return;
921 flush_cache_page(vma, address);
922 page = pte_page(pte);
923 if ((!VALID_PAGE(page)) || PageReserved(page))
924 return;
925 offset = address & ~PAGE_MASK;
926 memclear_highpage_flush(page, offset, PAGE_SIZE - offset);
929 static void vmtruncate_list(struct vm_area_struct *mpnt,
930 unsigned long pgoff, unsigned long partial)
932 do {
933 struct mm_struct *mm = mpnt->vm_mm;
934 unsigned long start = mpnt->vm_start;
935 unsigned long end = mpnt->vm_end;
936 unsigned long len = end - start;
937 unsigned long diff;
939 /* mapping wholly truncated? */
940 if (mpnt->vm_pgoff >= pgoff) {
941 flush_cache_range(mm, start, end);
942 zap_page_range(mm, start, len);
943 flush_tlb_range(mm, start, end);
944 continue;
947 /* mapping wholly unaffected? */
948 len = len >> PAGE_SHIFT;
949 diff = pgoff - mpnt->vm_pgoff;
950 if (diff >= len)
951 continue;
953 /* Ok, partially affected.. */
954 start += diff << PAGE_SHIFT;
955 len = (len - diff) << PAGE_SHIFT;
956 if (start & ~PAGE_MASK) {
957 partial_clear(mpnt, start);
958 start = (start + ~PAGE_MASK) & PAGE_MASK;
960 flush_cache_range(mm, start, end);
961 zap_page_range(mm, start, len);
962 flush_tlb_range(mm, start, end);
963 } while ((mpnt = mpnt->vm_next_share) != NULL);
968 * Handle all mappings that got truncated by a "truncate()"
969 * system call.
971 * NOTE! We have to be ready to update the memory sharing
972 * between the file and the memory map for a potential last
973 * incomplete page. Ugly, but necessary.
975 void vmtruncate(struct inode * inode, loff_t offset)
977 unsigned long partial, pgoff;
978 struct address_space *mapping = inode->i_mapping;
979 unsigned long limit;
981 if (inode->i_size < offset)
982 goto do_expand;
983 inode->i_size = offset;
984 truncate_inode_pages(mapping, offset);
985 spin_lock(&mapping->i_shared_lock);
986 if (!mapping->i_mmap && !mapping->i_mmap_shared)
987 goto out_unlock;
989 pgoff = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
990 partial = (unsigned long)offset & (PAGE_CACHE_SIZE - 1);
992 if (mapping->i_mmap != NULL)
993 vmtruncate_list(mapping->i_mmap, pgoff, partial);
994 if (mapping->i_mmap_shared != NULL)
995 vmtruncate_list(mapping->i_mmap_shared, pgoff, partial);
997 out_unlock:
998 spin_unlock(&mapping->i_shared_lock);
999 /* this should go into ->truncate */
1000 inode->i_size = offset;
1001 if (inode->i_op && inode->i_op->truncate)
1002 inode->i_op->truncate(inode);
1003 return;
1005 do_expand:
1006 limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
1007 if (limit != RLIM_INFINITY) {
1008 if (inode->i_size >= limit) {
1009 send_sig(SIGXFSZ, current, 0);
1010 goto out;
1012 if (offset > limit) {
1013 send_sig(SIGXFSZ, current, 0);
1014 offset = limit;
1017 inode->i_size = offset;
1018 if (inode->i_op && inode->i_op->truncate)
1019 inode->i_op->truncate(inode);
1020 out:
1021 return;
1027 * Primitive swap readahead code. We simply read an aligned block of
1028 * (1 << page_cluster) entries in the swap area. This method is chosen
1029 * because it doesn't cost us any seek time. We also make sure to queue
1030 * the 'original' request together with the readahead ones...
1032 void swapin_readahead(swp_entry_t entry)
1034 int i, num;
1035 struct page *new_page;
1036 unsigned long offset;
1039 * Get the number of handles we should do readahead io to. Also,
1040 * grab temporary references on them, releasing them as io completes.
1042 num = valid_swaphandles(entry, &offset);
1043 for (i = 0; i < num; offset++, i++) {
1044 /* Don't block on I/O for read-ahead */
1045 if (atomic_read(&nr_async_pages) >= pager_daemon.swap_cluster
1046 * (1 << page_cluster)) {
1047 while (i++ < num)
1048 swap_free(SWP_ENTRY(SWP_TYPE(entry), offset++));
1049 break;
1051 /* Ok, do the async read-ahead now */
1052 new_page = read_swap_cache_async(SWP_ENTRY(SWP_TYPE(entry), offset), 0);
1053 if (new_page != NULL)
1054 page_cache_release(new_page);
1055 swap_free(SWP_ENTRY(SWP_TYPE(entry), offset));
1057 return;
1060 static int do_swap_page(struct mm_struct * mm,
1061 struct vm_area_struct * vma, unsigned long address,
1062 pte_t * page_table, swp_entry_t entry, int write_access)
1064 struct page *page = lookup_swap_cache(entry);
1065 pte_t pte;
1067 if (!page) {
1068 lock_kernel();
1069 swapin_readahead(entry);
1070 page = read_swap_cache(entry);
1071 unlock_kernel();
1072 if (!page)
1073 return -1;
1075 flush_page_to_ram(page);
1076 flush_icache_page(vma, page);
1079 mm->rss++;
1081 pte = mk_pte(page, vma->vm_page_prot);
1084 * Freeze the "shared"ness of the page, ie page_count + swap_count.
1085 * Must lock page before transferring our swap count to already
1086 * obtained page count.
1088 lock_page(page);
1089 swap_free(entry);
1090 if (write_access && !is_page_shared(page))
1091 pte = pte_mkwrite(pte_mkdirty(pte));
1092 UnlockPage(page);
1094 set_pte(page_table, pte);
1095 /* No need to invalidate - it was non-present before */
1096 update_mmu_cache(vma, address, pte);
1097 return 1; /* Minor fault */
1101 * This only needs the MM semaphore
1103 static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma, pte_t *page_table, int write_access, unsigned long addr)
1105 struct page *page = NULL;
1106 pte_t entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
1107 if (write_access) {
1108 page = alloc_page(GFP_HIGHUSER);
1109 if (!page)
1110 return -1;
1111 clear_user_highpage(page, addr);
1112 entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
1113 mm->rss++;
1114 flush_page_to_ram(page);
1116 set_pte(page_table, entry);
1117 /* No need to invalidate - it was non-present before */
1118 update_mmu_cache(vma, addr, entry);
1119 return 1; /* Minor fault */
1123 * do_no_page() tries to create a new page mapping. It aggressively
1124 * tries to share with existing pages, but makes a separate copy if
1125 * the "write_access" parameter is true in order to avoid the next
1126 * page fault.
1128 * As this is called only for pages that do not currently exist, we
1129 * do not need to flush old virtual caches or the TLB.
1131 * This is called with the MM semaphore held.
1133 static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma,
1134 unsigned long address, int write_access, pte_t *page_table)
1136 struct page * new_page;
1137 pte_t entry;
1139 if (!vma->vm_ops || !vma->vm_ops->nopage)
1140 return do_anonymous_page(mm, vma, page_table, write_access, address);
1143 * The third argument is "no_share", which tells the low-level code
1144 * to copy, not share the page even if sharing is possible. It's
1145 * essentially an early COW detection.
1147 new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, (vma->vm_flags & VM_SHARED)?0:write_access);
1148 if (new_page == NULL) /* no page was available -- SIGBUS */
1149 return 0;
1150 if (new_page == NOPAGE_OOM)
1151 return -1;
1152 ++mm->rss;
1154 * This silly early PAGE_DIRTY setting removes a race
1155 * due to the bad i386 page protection. But it's valid
1156 * for other architectures too.
1158 * Note that if write_access is true, we either now have
1159 * an exclusive copy of the page, or this is a shared mapping,
1160 * so we can make it writable and dirty to avoid having to
1161 * handle that later.
1163 flush_page_to_ram(new_page);
1164 flush_icache_page(vma, new_page);
1165 entry = mk_pte(new_page, vma->vm_page_prot);
1166 if (write_access) {
1167 entry = pte_mkwrite(pte_mkdirty(entry));
1168 } else if (page_count(new_page) > 1 &&
1169 !(vma->vm_flags & VM_SHARED))
1170 entry = pte_wrprotect(entry);
1171 set_pte(page_table, entry);
1172 /* no need to invalidate: a not-present page shouldn't be cached */
1173 update_mmu_cache(vma, address, entry);
1174 return 2; /* Major fault */
1178 * These routines also need to handle stuff like marking pages dirty
1179 * and/or accessed for architectures that don't do it in hardware (most
1180 * RISC architectures). The early dirtying is also good on the i386.
1182 * There is also a hook called "update_mmu_cache()" that architectures
1183 * with external mmu caches can use to update those (ie the Sparc or
1184 * PowerPC hashed page tables that act as extended TLBs).
1186 * Note the "page_table_lock". It is to protect against kswapd removing
1187 * pages from under us. Note that kswapd only ever _removes_ pages, never
1188 * adds them. As such, once we have noticed that the page is not present,
1189 * we can drop the lock early.
1191 * The adding of pages is protected by the MM semaphore (which we hold),
1192 * so we don't need to worry about a page being suddenly been added into
1193 * our VM.
1195 static inline int handle_pte_fault(struct mm_struct *mm,
1196 struct vm_area_struct * vma, unsigned long address,
1197 int write_access, pte_t * pte)
1199 pte_t entry;
1202 * We need the page table lock to synchronize with kswapd
1203 * and the SMP-safe atomic PTE updates.
1205 spin_lock(&mm->page_table_lock);
1206 entry = *pte;
1207 if (!pte_present(entry)) {
1209 * If it truly wasn't present, we know that kswapd
1210 * and the PTE updates will not touch it later. So
1211 * drop the lock.
1213 spin_unlock(&mm->page_table_lock);
1214 if (pte_none(entry))
1215 return do_no_page(mm, vma, address, write_access, pte);
1216 return do_swap_page(mm, vma, address, pte, pte_to_swp_entry(entry), write_access);
1219 if (write_access) {
1220 if (!pte_write(entry))
1221 return do_wp_page(mm, vma, address, pte, entry);
1223 entry = pte_mkdirty(entry);
1225 entry = pte_mkyoung(entry);
1226 establish_pte(vma, address, pte, entry);
1227 spin_unlock(&mm->page_table_lock);
1228 return 1;
1232 * By the time we get here, we already hold the mm semaphore
1234 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
1235 unsigned long address, int write_access)
1237 int ret = -1;
1238 pgd_t *pgd;
1239 pmd_t *pmd;
1241 pgd = pgd_offset(mm, address);
1242 pmd = pmd_alloc(pgd, address);
1244 if (pmd) {
1245 pte_t * pte = pte_alloc(pmd, address);
1246 if (pte)
1247 ret = handle_pte_fault(mm, vma, address, write_access, pte);
1249 return ret;
1253 * Simplistic page force-in..
1255 int make_pages_present(unsigned long addr, unsigned long end)
1257 int write;
1258 struct mm_struct *mm = current->mm;
1259 struct vm_area_struct * vma;
1261 vma = find_vma(mm, addr);
1262 write = (vma->vm_flags & VM_WRITE) != 0;
1263 if (addr >= end)
1264 BUG();
1265 do {
1266 if (handle_mm_fault(mm, vma, addr, write) < 0)
1267 return -1;
1268 addr += PAGE_SIZE;
1269 } while (addr < end);
1270 return 0;