Import 2.3.30pre7
[davej-history.git] / mm / memory.c
blob88232ba05df9b2ebd3d666a9fad94c166327c1f3
1 /*
2 * linux/mm/memory.c
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
7 /*
8 * demand-loading started 01.12.91 - seems it is high on the list of
9 * things wanted, and it should be easy to implement. - Linus
13 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14 * pages started 02.12.91, seems to work. - Linus.
16 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17 * would have taken more than the 6M I have free, but it worked well as
18 * far as I could see.
20 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
24 * Real VM (paging to/from disk) started 18.12.91. Much more work and
25 * thought has to go into this. Oh, well..
26 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
27 * Found it. Everything seems to work now.
28 * 20.12.91 - Ok, making the swap-device changeable like the root.
32 * 05.04.94 - Multi-page memory management added for v1.1.
33 * Idea by Alex Bligh (alex@cconcepts.co.uk)
35 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
36 * (Gerhard.Wichert@pdb.siemens.de)
39 #include <linux/mm.h>
40 #include <linux/mman.h>
41 #include <linux/swap.h>
42 #include <linux/smp_lock.h>
43 #include <linux/swapctl.h>
44 #include <linux/iobuf.h>
45 #include <asm/uaccess.h>
46 #include <asm/pgalloc.h>
47 #include <linux/highmem.h>
48 #include <linux/pagemap.h>
51 unsigned long max_mapnr = 0;
52 unsigned long num_physpages = 0;
53 void * high_memory = NULL;
54 struct page *highmem_start_page;
57 * We special-case the C-O-W ZERO_PAGE, because it's such
58 * a common occurrence (no need to read the page to know
59 * that it's zero - better for the cache and memory subsystem).
61 static inline void copy_cow_page(struct page * from, struct page * to, unsigned long address)
63 if (from == ZERO_PAGE(address)) {
64 clear_highpage(to);
65 return;
67 copy_highpage(to, from);
70 mem_map_t * mem_map = NULL;
73 * oom() prints a message (so that the user knows why the process died),
74 * and gives the process an untrappable SIGKILL.
76 void oom(struct task_struct * task)
78 printk("\nOut of memory for %s.\n", task->comm);
79 force_sig(SIGKILL, task);
83 * Note: this doesn't free the actual pages themselves. That
84 * has been handled earlier when unmapping all the memory regions.
86 static inline void free_one_pmd(pmd_t * dir)
88 pte_t * pte;
90 if (pmd_none(*dir))
91 return;
92 if (pmd_bad(*dir)) {
93 pmd_ERROR(*dir);
94 pmd_clear(dir);
95 return;
97 pte = pte_offset(dir, 0);
98 pmd_clear(dir);
99 pte_free(pte);
102 static inline void free_one_pgd(pgd_t * dir)
104 int j;
105 pmd_t * pmd;
107 if (pgd_none(*dir))
108 return;
109 if (pgd_bad(*dir)) {
110 pgd_ERROR(*dir);
111 pgd_clear(dir);
112 return;
114 pmd = pmd_offset(dir, 0);
115 pgd_clear(dir);
116 for (j = 0; j < PTRS_PER_PMD ; j++)
117 free_one_pmd(pmd+j);
118 pmd_free(pmd);
121 /* Low and high watermarks for page table cache.
122 The system should try to have pgt_water[0] <= cache elements <= pgt_water[1]
124 int pgt_cache_water[2] = { 25, 50 };
126 /* Returns the number of pages freed */
127 int check_pgt_cache(void)
129 return do_check_pgt_cache(pgt_cache_water[0], pgt_cache_water[1]);
134 * This function clears all user-level page tables of a process - this
135 * is needed by execve(), so that old pages aren't in the way.
137 void clear_page_tables(struct mm_struct *mm, unsigned long first, int nr)
139 pgd_t * page_dir = mm->pgd;
141 page_dir += first;
142 do {
143 free_one_pgd(page_dir);
144 page_dir++;
145 } while (--nr);
147 /* keep the page table cache within bounds */
148 check_pgt_cache();
151 #define PTE_TABLE_MASK ((PTRS_PER_PTE-1) * sizeof(pte_t))
152 #define PMD_TABLE_MASK ((PTRS_PER_PMD-1) * sizeof(pmd_t))
155 * copy one vm_area from one task to the other. Assumes the page tables
156 * already present in the new task to be cleared in the whole range
157 * covered by this vma.
159 * 08Jan98 Merged into one routine from several inline routines to reduce
160 * variable count and make things faster. -jj
162 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
163 struct vm_area_struct *vma)
165 pgd_t * src_pgd, * dst_pgd;
166 unsigned long address = vma->vm_start;
167 unsigned long end = vma->vm_end;
168 unsigned long cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
170 src_pgd = pgd_offset(src, address)-1;
171 dst_pgd = pgd_offset(dst, address)-1;
173 for (;;) {
174 pmd_t * src_pmd, * dst_pmd;
176 src_pgd++; dst_pgd++;
178 /* copy_pmd_range */
180 if (pgd_none(*src_pgd))
181 goto skip_copy_pmd_range;
182 if (pgd_bad(*src_pgd)) {
183 pgd_ERROR(*src_pgd);
184 pgd_clear(src_pgd);
185 skip_copy_pmd_range: address = (address + PGDIR_SIZE) & PGDIR_MASK;
186 if (!address || (address >= end))
187 goto out;
188 continue;
190 if (pgd_none(*dst_pgd)) {
191 if (!pmd_alloc(dst_pgd, 0))
192 goto nomem;
195 src_pmd = pmd_offset(src_pgd, address);
196 dst_pmd = pmd_offset(dst_pgd, address);
198 do {
199 pte_t * src_pte, * dst_pte;
201 /* copy_pte_range */
203 if (pmd_none(*src_pmd))
204 goto skip_copy_pte_range;
205 if (pmd_bad(*src_pmd)) {
206 pmd_ERROR(*src_pmd);
207 pmd_clear(src_pmd);
208 skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
209 if (address >= end)
210 goto out;
211 goto cont_copy_pmd_range;
213 if (pmd_none(*dst_pmd)) {
214 if (!pte_alloc(dst_pmd, 0))
215 goto nomem;
218 src_pte = pte_offset(src_pmd, address);
219 dst_pte = pte_offset(dst_pmd, address);
221 do {
222 pte_t pte = *src_pte;
223 unsigned long page_nr;
225 /* copy_one_pte */
227 if (pte_none(pte))
228 goto cont_copy_pte_range;
229 if (!pte_present(pte)) {
230 swap_duplicate(pte_to_swp_entry(pte));
231 set_pte(dst_pte, pte);
232 goto cont_copy_pte_range;
234 page_nr = pte_pagenr(pte);
235 if (page_nr >= max_mapnr ||
236 PageReserved(mem_map+page_nr)) {
237 set_pte(dst_pte, pte);
238 goto cont_copy_pte_range;
240 /* If it's a COW mapping, write protect it both in the parent and the child */
241 if (cow) {
242 pte = pte_wrprotect(pte);
243 set_pte(src_pte, pte);
245 /* If it's a shared mapping, mark it clean in the child */
246 if (vma->vm_flags & VM_SHARED)
247 pte = pte_mkclean(pte);
248 set_pte(dst_pte, pte_mkold(pte));
249 get_page(mem_map + page_nr);
251 cont_copy_pte_range: address += PAGE_SIZE;
252 if (address >= end)
253 goto out;
254 src_pte++;
255 dst_pte++;
256 } while ((unsigned long)src_pte & PTE_TABLE_MASK);
258 cont_copy_pmd_range: src_pmd++;
259 dst_pmd++;
260 } while ((unsigned long)src_pmd & PMD_TABLE_MASK);
262 out:
263 return 0;
265 nomem:
266 return -ENOMEM;
270 * Return indicates whether a page was freed so caller can adjust rss
272 static inline int free_pte(pte_t page)
274 if (pte_present(page)) {
275 unsigned long nr = pte_pagenr(page);
276 if (nr >= max_mapnr || PageReserved(mem_map+nr))
277 return 0;
279 * free_page() used to be able to clear swap cache
280 * entries. We may now have to do it manually.
282 free_page_and_swap_cache(mem_map+nr);
283 return 1;
285 swap_free(pte_to_swp_entry(page));
286 return 0;
289 static inline void forget_pte(pte_t page)
291 if (!pte_none(page)) {
292 printk("forget_pte: old mapping existed!\n");
293 free_pte(page);
297 static inline int zap_pte_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size)
299 pte_t * pte;
300 int freed;
302 if (pmd_none(*pmd))
303 return 0;
304 if (pmd_bad(*pmd)) {
305 pmd_ERROR(*pmd);
306 pmd_clear(pmd);
307 return 0;
309 pte = pte_offset(pmd, address);
310 address &= ~PMD_MASK;
311 if (address + size > PMD_SIZE)
312 size = PMD_SIZE - address;
313 size >>= PAGE_SHIFT;
314 freed = 0;
315 for (;;) {
316 pte_t page;
317 if (!size)
318 break;
319 page = *pte;
320 pte++;
321 size--;
322 pte_clear(pte-1);
323 if (pte_none(page))
324 continue;
325 freed += free_pte(page);
327 return freed;
330 static inline int zap_pmd_range(struct mm_struct *mm, pgd_t * dir, unsigned long address, unsigned long size)
332 pmd_t * pmd;
333 unsigned long end;
334 int freed;
336 if (pgd_none(*dir))
337 return 0;
338 if (pgd_bad(*dir)) {
339 pgd_ERROR(*dir);
340 pgd_clear(dir);
341 return 0;
343 pmd = pmd_offset(dir, address);
344 address &= ~PGDIR_MASK;
345 end = address + size;
346 if (end > PGDIR_SIZE)
347 end = PGDIR_SIZE;
348 freed = 0;
349 do {
350 freed += zap_pte_range(mm, pmd, address, end - address);
351 address = (address + PMD_SIZE) & PMD_MASK;
352 pmd++;
353 } while (address < end);
354 return freed;
358 * remove user pages in a given range.
360 void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size)
362 pgd_t * dir;
363 unsigned long end = address + size;
364 int freed = 0;
366 dir = pgd_offset(mm, address);
369 * This is a long-lived spinlock. That's fine.
370 * There's no contention, because the page table
371 * lock only protects against kswapd anyway, and
372 * even if kswapd happened to be looking at this
373 * process we _want_ it to get stuck.
375 if (address >= end)
376 BUG();
377 spin_lock(&mm->page_table_lock);
378 do {
379 freed += zap_pmd_range(mm, dir, address, end - address);
380 address = (address + PGDIR_SIZE) & PGDIR_MASK;
381 dir++;
382 } while (address && (address < end));
383 spin_unlock(&mm->page_table_lock);
385 * Update rss for the mm_struct (not necessarily current->mm)
387 if (mm->rss > 0) {
388 mm->rss -= freed;
389 if (mm->rss < 0)
390 mm->rss = 0;
396 * Do a quick page-table lookup for a single page.
398 static struct page * follow_page(unsigned long address)
400 pgd_t *pgd;
401 pmd_t *pmd;
403 pgd = pgd_offset(current->mm, address);
404 pmd = pmd_offset(pgd, address);
405 if (pmd) {
406 pte_t * pte = pte_offset(pmd, address);
407 if (pte && pte_present(*pte))
408 return pte_page(*pte);
411 printk(KERN_ERR "Missing page in follow_page\n");
412 return NULL;
416 * Given a physical address, is there a useful struct page pointing to it?
419 struct page * get_page_map(struct page *page, unsigned long vaddr)
421 if (MAP_NR(page) >= max_mapnr)
422 return 0;
423 if (page == ZERO_PAGE(vaddr))
424 return 0;
425 if (PageReserved(page))
426 return 0;
427 return page;
431 * Force in an entire range of pages from the current process's user VA,
432 * and pin and lock the pages for IO.
435 #define dprintk(x...)
436 int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len)
438 unsigned long ptr, end;
439 int err;
440 struct mm_struct * mm;
441 struct vm_area_struct * vma = 0;
442 struct page * map;
443 int doublepage = 0;
444 int repeat = 0;
445 int i;
447 /* Make sure the iobuf is not already mapped somewhere. */
448 if (iobuf->nr_pages)
449 return -EINVAL;
451 mm = current->mm;
452 dprintk ("map_user_kiobuf: begin\n");
454 ptr = va & PAGE_MASK;
455 end = (va + len + PAGE_SIZE - 1) & PAGE_MASK;
456 err = expand_kiobuf(iobuf, (end - ptr) >> PAGE_SHIFT);
457 if (err)
458 return err;
460 repeat:
461 down(&mm->mmap_sem);
463 err = -EFAULT;
464 iobuf->locked = 1;
465 iobuf->offset = va & ~PAGE_MASK;
466 iobuf->length = len;
468 i = 0;
471 * First of all, try to fault in all of the necessary pages
473 while (ptr < end) {
474 if (!vma || ptr >= vma->vm_end) {
475 vma = find_vma(current->mm, ptr);
476 if (!vma)
477 goto out_unlock;
479 if (handle_mm_fault(current, vma, ptr, (rw==READ)) <= 0)
480 goto out_unlock;
481 spin_lock(&mm->page_table_lock);
482 map = follow_page(ptr);
483 if (!map) {
484 dprintk (KERN_ERR "Missing page in map_user_kiobuf\n");
485 goto retry;
487 map = get_page_map(map, ptr);
488 if (map) {
489 if (TryLockPage(map)) {
490 goto retry;
492 atomic_inc(&map->count);
494 spin_unlock(&mm->page_table_lock);
495 iobuf->maplist[i] = map;
496 iobuf->nr_pages = ++i;
498 ptr += PAGE_SIZE;
501 up(&mm->mmap_sem);
502 dprintk ("map_user_kiobuf: end OK\n");
503 return 0;
505 out_unlock:
506 up(&mm->mmap_sem);
507 unmap_kiobuf(iobuf);
508 dprintk ("map_user_kiobuf: end %d\n", err);
509 return err;
511 retry:
514 * Undo the locking so far, wait on the page we got to, and try again.
516 spin_unlock(&mm->page_table_lock);
517 unmap_kiobuf(iobuf);
518 up(&mm->mmap_sem);
521 * Did the release also unlock the page we got stuck on?
523 if (map) {
524 if (!PageLocked(map)) {
525 /* If so, we may well have the page mapped twice
526 * in the IO address range. Bad news. Of
527 * course, it _might_ * just be a coincidence,
528 * but if it happens more than * once, chances
529 * are we have a double-mapped page. */
530 if (++doublepage >= 3) {
531 return -EINVAL;
536 * Try again...
538 wait_on_page(map);
541 if (++repeat < 16) {
542 ptr = va & PAGE_MASK;
543 goto repeat;
545 return -EAGAIN;
550 * Unmap all of the pages referenced by a kiobuf. We release the pages,
551 * and unlock them if they were locked.
554 void unmap_kiobuf (struct kiobuf *iobuf)
556 int i;
557 struct page *map;
559 for (i = 0; i < iobuf->nr_pages; i++) {
560 map = iobuf->maplist[i];
562 if (map && iobuf->locked) {
563 __free_page(map);
564 UnlockPage(map);
568 iobuf->nr_pages = 0;
569 iobuf->locked = 0;
572 static inline void zeromap_pte_range(pte_t * pte, unsigned long address,
573 unsigned long size, pgprot_t prot)
575 unsigned long end;
577 address &= ~PMD_MASK;
578 end = address + size;
579 if (end > PMD_SIZE)
580 end = PMD_SIZE;
581 do {
582 pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot));
583 pte_t oldpage = *pte;
584 set_pte(pte, zero_pte);
585 forget_pte(oldpage);
586 address += PAGE_SIZE;
587 pte++;
588 } while (address && (address < end));
591 static inline int zeromap_pmd_range(pmd_t * pmd, unsigned long address,
592 unsigned long size, pgprot_t prot)
594 unsigned long end;
596 address &= ~PGDIR_MASK;
597 end = address + size;
598 if (end > PGDIR_SIZE)
599 end = PGDIR_SIZE;
600 do {
601 pte_t * pte = pte_alloc(pmd, address);
602 if (!pte)
603 return -ENOMEM;
604 zeromap_pte_range(pte, address, end - address, prot);
605 address = (address + PMD_SIZE) & PMD_MASK;
606 pmd++;
607 } while (address && (address < end));
608 return 0;
611 int zeromap_page_range(unsigned long address, unsigned long size, pgprot_t prot)
613 int error = 0;
614 pgd_t * dir;
615 unsigned long beg = address;
616 unsigned long end = address + size;
618 dir = pgd_offset(current->mm, address);
619 flush_cache_range(current->mm, beg, end);
620 if (address >= end)
621 BUG();
622 do {
623 pmd_t *pmd = pmd_alloc(dir, address);
624 error = -ENOMEM;
625 if (!pmd)
626 break;
627 error = zeromap_pmd_range(pmd, address, end - address, prot);
628 if (error)
629 break;
630 address = (address + PGDIR_SIZE) & PGDIR_MASK;
631 dir++;
632 } while (address && (address < end));
633 flush_tlb_range(current->mm, beg, end);
634 return error;
638 * maps a range of physical memory into the requested pages. the old
639 * mappings are removed. any references to nonexistent pages results
640 * in null mappings (currently treated as "copy-on-access")
642 static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
643 unsigned long phys_addr, pgprot_t prot)
645 unsigned long end;
647 address &= ~PMD_MASK;
648 end = address + size;
649 if (end > PMD_SIZE)
650 end = PMD_SIZE;
651 do {
652 unsigned long mapnr;
653 pte_t oldpage = *pte;
654 pte_clear(pte);
656 mapnr = MAP_NR(__va(phys_addr));
657 if (mapnr >= max_mapnr || PageReserved(mem_map+mapnr))
658 set_pte(pte, mk_pte_phys(phys_addr, prot));
659 forget_pte(oldpage);
660 address += PAGE_SIZE;
661 phys_addr += PAGE_SIZE;
662 pte++;
663 } while (address && (address < end));
666 static inline int remap_pmd_range(pmd_t * pmd, unsigned long address, unsigned long size,
667 unsigned long phys_addr, pgprot_t prot)
669 unsigned long end;
671 address &= ~PGDIR_MASK;
672 end = address + size;
673 if (end > PGDIR_SIZE)
674 end = PGDIR_SIZE;
675 phys_addr -= address;
676 do {
677 pte_t * pte = pte_alloc(pmd, address);
678 if (!pte)
679 return -ENOMEM;
680 remap_pte_range(pte, address, end - address, address + phys_addr, prot);
681 address = (address + PMD_SIZE) & PMD_MASK;
682 pmd++;
683 } while (address && (address < end));
684 return 0;
687 int remap_page_range(unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
689 int error = 0;
690 pgd_t * dir;
691 unsigned long beg = from;
692 unsigned long end = from + size;
694 phys_addr -= from;
695 dir = pgd_offset(current->mm, from);
696 flush_cache_range(current->mm, beg, end);
697 if (from >= end)
698 BUG();
699 do {
700 pmd_t *pmd = pmd_alloc(dir, from);
701 error = -ENOMEM;
702 if (!pmd)
703 break;
704 error = remap_pmd_range(pmd, from, end - from, phys_addr + from, prot);
705 if (error)
706 break;
707 from = (from + PGDIR_SIZE) & PGDIR_MASK;
708 dir++;
709 } while (from && (from < end));
710 flush_tlb_range(current->mm, beg, end);
711 return error;
715 * This routine is used to map in a page into an address space: needed by
716 * execve() for the initial stack and environment pages.
718 struct page * put_dirty_page(struct task_struct * tsk, struct page *page,
719 unsigned long address)
721 pgd_t * pgd;
722 pmd_t * pmd;
723 pte_t * pte;
725 if (page_count(page) != 1)
726 printk("mem_map disagrees with %p at %08lx\n", page, address);
727 pgd = pgd_offset(tsk->mm, address);
728 pmd = pmd_alloc(pgd, address);
729 if (!pmd) {
730 __free_page(page);
731 oom(tsk);
732 return 0;
734 pte = pte_alloc(pmd, address);
735 if (!pte) {
736 __free_page(page);
737 oom(tsk);
738 return 0;
740 if (!pte_none(*pte)) {
741 pte_ERROR(*pte);
742 __free_page(page);
743 return 0;
745 flush_page_to_ram(page);
746 set_pte(pte, pte_mkwrite(mk_pte(page, PAGE_COPY)));
747 /* no need for flush_tlb */
748 return page;
752 * This routine handles present pages, when users try to write
753 * to a shared page. It is done by copying the page to a new address
754 * and decrementing the shared-page counter for the old page.
756 * Goto-purists beware: the only reason for goto's here is that it results
757 * in better assembly code.. The "default" path will see no jumps at all.
759 * Note that this routine assumes that the protection checks have been
760 * done by the caller (the low-level page fault routine in most cases).
761 * Thus we can safely just mark it writable once we've done any necessary
762 * COW.
764 * We also mark the page dirty at this point even though the page will
765 * change only once the write actually happens. This avoids a few races,
766 * and potentially makes it more efficient.
768 * We enter with the page table read-lock held, and need to exit without
769 * it.
771 static int do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma,
772 unsigned long address, pte_t *page_table, pte_t pte)
774 unsigned long map_nr;
775 struct page *old_page, *new_page;
777 map_nr = pte_pagenr(pte);
778 if (map_nr >= max_mapnr)
779 goto bad_wp_page;
780 tsk->min_flt++;
781 old_page = mem_map + map_nr;
784 * We can avoid the copy if:
785 * - we're the only user (count == 1)
786 * - the only other user is the swap cache,
787 * and the only swap cache user is itself,
788 * in which case we can remove the page
789 * from the swap cache.
791 switch (page_count(old_page)) {
792 case 2:
794 * Lock the page so that no one can look it up from
795 * the swap cache, grab a reference and start using it.
796 * Can not do lock_page, holding page_table_lock.
798 if (!PageSwapCache(old_page) || TryLockPage(old_page))
799 break;
800 if (is_page_shared(old_page)) {
801 UnlockPage(old_page);
802 break;
804 delete_from_swap_cache_nolock(old_page);
805 UnlockPage(old_page);
806 /* FallThrough */
807 case 1:
808 flush_cache_page(vma, address);
809 set_pte(page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
810 flush_tlb_page(vma, address);
811 spin_unlock(&tsk->mm->page_table_lock);
812 return 1;
816 * Ok, we need to copy. Oh, well..
818 spin_unlock(&tsk->mm->page_table_lock);
819 new_page = alloc_page(GFP_HIGHUSER);
820 if (!new_page)
821 return -1;
822 spin_lock(&tsk->mm->page_table_lock);
825 * Re-check the pte - we dropped the lock
827 if (pte_val(*page_table) == pte_val(pte)) {
828 if (PageReserved(old_page))
829 ++vma->vm_mm->rss;
830 copy_cow_page(old_page, new_page, address);
831 flush_page_to_ram(new_page);
832 flush_cache_page(vma, address);
833 set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
834 flush_tlb_page(vma, address);
836 /* Free the old page.. */
837 new_page = old_page;
839 spin_unlock(&tsk->mm->page_table_lock);
840 __free_page(new_page);
841 return 1;
843 bad_wp_page:
844 spin_unlock(&tsk->mm->page_table_lock);
845 printk("do_wp_page: bogus page at address %08lx (nr %ld)\n",address,map_nr);
846 return -1;
850 * This function zeroes out partial mmap'ed pages at truncation time..
852 static void partial_clear(struct vm_area_struct *vma, unsigned long address)
854 unsigned int offset;
855 struct page *page;
856 pgd_t *page_dir;
857 pmd_t *page_middle;
858 pte_t *page_table, pte;
860 page_dir = pgd_offset(vma->vm_mm, address);
861 if (pgd_none(*page_dir))
862 return;
863 if (pgd_bad(*page_dir)) {
864 pgd_ERROR(*page_dir);
865 pgd_clear(page_dir);
866 return;
868 page_middle = pmd_offset(page_dir, address);
869 if (pmd_none(*page_middle))
870 return;
871 if (pmd_bad(*page_middle)) {
872 pmd_ERROR(*page_middle);
873 pmd_clear(page_middle);
874 return;
876 page_table = pte_offset(page_middle, address);
877 pte = *page_table;
878 if (!pte_present(pte))
879 return;
880 flush_cache_page(vma, address);
881 page = pte_page(pte);
882 if (page-mem_map >= max_mapnr)
883 return;
884 offset = address & ~PAGE_MASK;
885 memclear_highpage_flush(page, offset, PAGE_SIZE - offset);
889 * Handle all mappings that got truncated by a "truncate()"
890 * system call.
892 * NOTE! We have to be ready to update the memory sharing
893 * between the file and the memory map for a potential last
894 * incomplete page. Ugly, but necessary.
896 void vmtruncate(struct inode * inode, loff_t offset)
898 unsigned long partial, pgoff;
899 struct vm_area_struct * mpnt;
901 truncate_inode_pages(inode, offset);
902 spin_lock(&inode->i_shared_lock);
903 if (!inode->i_mmap)
904 goto out_unlock;
906 pgoff = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
907 partial = (unsigned long)offset & (PAGE_CACHE_SIZE - 1);
909 mpnt = inode->i_mmap;
910 do {
911 struct mm_struct *mm = mpnt->vm_mm;
912 unsigned long start = mpnt->vm_start;
913 unsigned long end = mpnt->vm_end;
914 unsigned long len = end - start;
915 unsigned long diff;
917 /* mapping wholly truncated? */
918 if (mpnt->vm_pgoff >= pgoff) {
919 flush_cache_range(mm, start, end);
920 zap_page_range(mm, start, len);
921 flush_tlb_range(mm, start, end);
922 continue;
925 /* mapping wholly unaffected? */
926 len = len >> PAGE_SHIFT;
927 diff = pgoff - mpnt->vm_pgoff;
928 if (diff >= len)
929 continue;
931 /* Ok, partially affected.. */
932 start += diff << PAGE_SHIFT;
933 len = (len - diff) << PAGE_SHIFT;
934 if (start & ~PAGE_MASK) {
935 partial_clear(mpnt, start);
936 start = (start + ~PAGE_MASK) & PAGE_MASK;
938 flush_cache_range(mm, start, end);
939 zap_page_range(mm, start, len);
940 flush_tlb_range(mm, start, end);
941 } while ((mpnt = mpnt->vm_next_share) != NULL);
942 out_unlock:
943 spin_unlock(&inode->i_shared_lock);
949 * Primitive swap readahead code. We simply read an aligned block of
950 * (1 << page_cluster) entries in the swap area. This method is chosen
951 * because it doesn't cost us any seek time. We also make sure to queue
952 * the 'original' request together with the readahead ones...
954 void swapin_readahead(swp_entry_t entry)
956 int i, num;
957 struct page *new_page;
958 unsigned long offset;
961 * Get the number of handles we should do readahead io to. Also,
962 * grab temporary references on them, releasing them as io completes.
964 num = valid_swaphandles(entry, &offset);
965 for (i = 0; i < num; offset++, i++) {
966 /* Don't block on I/O for read-ahead */
967 if (atomic_read(&nr_async_pages) >= pager_daemon.swap_cluster) {
968 while (i++ < num)
969 swap_free(SWP_ENTRY(SWP_TYPE(entry), offset++));
970 break;
972 /* Ok, do the async read-ahead now */
973 new_page = read_swap_cache_async(SWP_ENTRY(SWP_TYPE(entry), offset), 0);
974 if (new_page != NULL)
975 __free_page(new_page);
976 swap_free(SWP_ENTRY(SWP_TYPE(entry), offset));
978 return;
981 static int do_swap_page(struct task_struct * tsk,
982 struct vm_area_struct * vma, unsigned long address,
983 pte_t * page_table, swp_entry_t entry, int write_access)
985 int dograb = 0;
986 struct page *page = lookup_swap_cache(entry);
987 pte_t pte;
989 if (!page) {
990 lock_kernel();
991 swapin_readahead(entry);
992 page = read_swap_cache(entry);
993 unlock_kernel();
994 if (!page)
995 return -1;
997 flush_page_to_ram(page);
1000 vma->vm_mm->rss++;
1001 tsk->min_flt++;
1003 pte = mk_pte(page, vma->vm_page_prot);
1005 set_bit(PG_swap_entry, &page->flags);
1008 * Freeze the "shared"ness of the page, ie page_count + swap_count.
1009 * Must lock page before transferring our swap count to already
1010 * obtained page count.
1012 lock_page(page);
1013 swap_free(entry);
1014 if (write_access && !is_page_shared(page)) {
1015 delete_from_swap_cache_nolock(page);
1016 page = replace_with_highmem(page);
1017 pte = mk_pte(page, vma->vm_page_prot);
1018 pte = pte_mkwrite(pte_mkdirty(pte));
1020 UnlockPage(page);
1022 set_pte(page_table, pte);
1023 /* No need to invalidate - it was non-present before */
1024 update_mmu_cache(vma, address, pte);
1025 return 1;
1029 * This only needs the MM semaphore
1031 static int do_anonymous_page(struct task_struct * tsk, struct vm_area_struct * vma, pte_t *page_table, int write_access, unsigned long addr)
1033 int high = 0;
1034 struct page *page = NULL;
1035 pte_t entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
1036 if (write_access) {
1037 page = alloc_page(GFP_HIGHUSER);
1038 if (!page)
1039 return -1;
1040 if (PageHighMem(page))
1041 high = 1;
1042 clear_highpage(page);
1043 entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
1044 vma->vm_mm->rss++;
1045 tsk->min_flt++;
1046 flush_page_to_ram(page);
1048 set_pte(page_table, entry);
1049 /* No need to invalidate - it was non-present before */
1050 update_mmu_cache(vma, addr, entry);
1051 return 1;
1055 * do_no_page() tries to create a new page mapping. It aggressively
1056 * tries to share with existing pages, but makes a separate copy if
1057 * the "write_access" parameter is true in order to avoid the next
1058 * page fault.
1060 * As this is called only for pages that do not currently exist, we
1061 * do not need to flush old virtual caches or the TLB.
1063 * This is called with the MM semaphore held.
1065 static int do_no_page(struct task_struct * tsk, struct vm_area_struct * vma,
1066 unsigned long address, int write_access, pte_t *page_table)
1068 struct page * new_page;
1069 pte_t entry;
1071 if (!vma->vm_ops || !vma->vm_ops->nopage)
1072 return do_anonymous_page(tsk, vma, page_table, write_access, address);
1075 * The third argument is "no_share", which tells the low-level code
1076 * to copy, not share the page even if sharing is possible. It's
1077 * essentially an early COW detection.
1079 new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, (vma->vm_flags & VM_SHARED)?0:write_access);
1080 if (new_page == NULL) /* no page was available -- SIGBUS */
1081 return 0;
1082 if (new_page == NOPAGE_OOM)
1083 return -1;
1084 ++tsk->maj_flt;
1085 ++vma->vm_mm->rss;
1087 * This silly early PAGE_DIRTY setting removes a race
1088 * due to the bad i386 page protection. But it's valid
1089 * for other architectures too.
1091 * Note that if write_access is true, we either now have
1092 * an exclusive copy of the page, or this is a shared mapping,
1093 * so we can make it writable and dirty to avoid having to
1094 * handle that later.
1096 flush_page_to_ram(new_page);
1097 entry = mk_pte(new_page, vma->vm_page_prot);
1098 if (write_access) {
1099 entry = pte_mkwrite(pte_mkdirty(entry));
1100 } else if (page_count(new_page) > 1 &&
1101 !(vma->vm_flags & VM_SHARED))
1102 entry = pte_wrprotect(entry);
1103 set_pte(page_table, entry);
1104 /* no need to invalidate: a not-present page shouldn't be cached */
1105 update_mmu_cache(vma, address, entry);
1106 return 1;
1110 * These routines also need to handle stuff like marking pages dirty
1111 * and/or accessed for architectures that don't do it in hardware (most
1112 * RISC architectures). The early dirtying is also good on the i386.
1114 * There is also a hook called "update_mmu_cache()" that architectures
1115 * with external mmu caches can use to update those (ie the Sparc or
1116 * PowerPC hashed page tables that act as extended TLBs).
1118 * Note the "page_table_lock". It is to protect against kswapd removing
1119 * pages from under us. Note that kswapd only ever _removes_ pages, never
1120 * adds them. As such, once we have noticed that the page is not present,
1121 * we can drop the lock early.
1123 * The adding of pages is protected by the MM semaphore (which we hold),
1124 * so we don't need to worry about a page being suddenly been added into
1125 * our VM.
1127 static inline int handle_pte_fault(struct task_struct *tsk,
1128 struct vm_area_struct * vma, unsigned long address,
1129 int write_access, pte_t * pte)
1131 pte_t entry;
1133 entry = *pte;
1134 if (!pte_present(entry)) {
1135 if (pte_none(entry))
1136 return do_no_page(tsk, vma, address, write_access, pte);
1137 return do_swap_page(tsk, vma, address, pte, pte_to_swp_entry(entry), write_access);
1141 * Ok, the entry was present, we need to get the page table
1142 * lock to synchronize with kswapd, and verify that the entry
1143 * didn't change from under us..
1145 spin_lock(&tsk->mm->page_table_lock);
1146 if (pte_val(entry) == pte_val(*pte)) {
1147 if (write_access) {
1148 if (!pte_write(entry))
1149 return do_wp_page(tsk, vma, address, pte, entry);
1151 entry = pte_mkdirty(entry);
1153 entry = pte_mkyoung(entry);
1154 set_pte(pte, entry);
1155 flush_tlb_page(vma, address);
1156 update_mmu_cache(vma, address, entry);
1158 spin_unlock(&tsk->mm->page_table_lock);
1159 return 1;
1163 * By the time we get here, we already hold the mm semaphore
1165 int handle_mm_fault(struct task_struct *tsk, struct vm_area_struct * vma,
1166 unsigned long address, int write_access)
1168 int ret = -1;
1169 pgd_t *pgd;
1170 pmd_t *pmd;
1172 pgd = pgd_offset(vma->vm_mm, address);
1173 pmd = pmd_alloc(pgd, address);
1175 if (pmd) {
1176 pte_t * pte = pte_alloc(pmd, address);
1177 if (pte)
1178 ret = handle_pte_fault(tsk, vma, address, write_access, pte);
1180 return ret;
1184 * Simplistic page force-in..
1186 int make_pages_present(unsigned long addr, unsigned long end)
1188 int write;
1189 struct task_struct *tsk = current;
1190 struct vm_area_struct * vma;
1192 vma = find_vma(tsk->mm, addr);
1193 write = (vma->vm_flags & VM_WRITE) != 0;
1194 if (addr >= end)
1195 BUG();
1196 do {
1197 if (handle_mm_fault(tsk, vma, addr, write) < 0)
1198 return -1;
1199 addr += PAGE_SIZE;
1200 } while (addr < end);
1201 return 0;