Import 2.3.49pre2
[davej-history.git] / mm / memory.c
blob1232bd928c76c9757c8d824d4a635c56e83aa4b2
1 /*
2 * linux/mm/memory.c
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
7 /*
8 * demand-loading started 01.12.91 - seems it is high on the list of
9 * things wanted, and it should be easy to implement. - Linus
13 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14 * pages started 02.12.91, seems to work. - Linus.
16 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17 * would have taken more than the 6M I have free, but it worked well as
18 * far as I could see.
20 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
24 * Real VM (paging to/from disk) started 18.12.91. Much more work and
25 * thought has to go into this. Oh, well..
26 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
27 * Found it. Everything seems to work now.
28 * 20.12.91 - Ok, making the swap-device changeable like the root.
32 * 05.04.94 - Multi-page memory management added for v1.1.
33 * Idea by Alex Bligh (alex@cconcepts.co.uk)
35 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
36 * (Gerhard.Wichert@pdb.siemens.de)
39 #include <linux/mm.h>
40 #include <linux/mman.h>
41 #include <linux/swap.h>
42 #include <linux/smp_lock.h>
43 #include <linux/swapctl.h>
44 #include <linux/iobuf.h>
45 #include <asm/uaccess.h>
46 #include <asm/pgalloc.h>
47 #include <linux/highmem.h>
48 #include <linux/pagemap.h>
51 unsigned long max_mapnr = 0;
52 unsigned long num_physpages = 0;
53 void * high_memory = NULL;
54 struct page *highmem_start_page;
57 * We special-case the C-O-W ZERO_PAGE, because it's such
58 * a common occurrence (no need to read the page to know
59 * that it's zero - better for the cache and memory subsystem).
61 static inline void copy_cow_page(struct page * from, struct page * to, unsigned long address)
63 if (from == ZERO_PAGE(address)) {
64 clear_highpage(to);
65 return;
67 copy_highpage(to, from);
70 mem_map_t * mem_map = NULL;
73 * Note: this doesn't free the actual pages themselves. That
74 * has been handled earlier when unmapping all the memory regions.
76 static inline void free_one_pmd(pmd_t * dir)
78 pte_t * pte;
80 if (pmd_none(*dir))
81 return;
82 if (pmd_bad(*dir)) {
83 pmd_ERROR(*dir);
84 pmd_clear(dir);
85 return;
87 pte = pte_offset(dir, 0);
88 pmd_clear(dir);
89 pte_free(pte);
92 static inline void free_one_pgd(pgd_t * dir)
94 int j;
95 pmd_t * pmd;
97 if (pgd_none(*dir))
98 return;
99 if (pgd_bad(*dir)) {
100 pgd_ERROR(*dir);
101 pgd_clear(dir);
102 return;
104 pmd = pmd_offset(dir, 0);
105 pgd_clear(dir);
106 for (j = 0; j < PTRS_PER_PMD ; j++)
107 free_one_pmd(pmd+j);
108 pmd_free(pmd);
111 /* Low and high watermarks for page table cache.
112 The system should try to have pgt_water[0] <= cache elements <= pgt_water[1]
114 int pgt_cache_water[2] = { 25, 50 };
116 /* Returns the number of pages freed */
117 int check_pgt_cache(void)
119 return do_check_pgt_cache(pgt_cache_water[0], pgt_cache_water[1]);
124 * This function clears all user-level page tables of a process - this
125 * is needed by execve(), so that old pages aren't in the way.
127 void clear_page_tables(struct mm_struct *mm, unsigned long first, int nr)
129 pgd_t * page_dir = mm->pgd;
131 page_dir += first;
132 do {
133 free_one_pgd(page_dir);
134 page_dir++;
135 } while (--nr);
137 /* keep the page table cache within bounds */
138 check_pgt_cache();
141 #define PTE_TABLE_MASK ((PTRS_PER_PTE-1) * sizeof(pte_t))
142 #define PMD_TABLE_MASK ((PTRS_PER_PMD-1) * sizeof(pmd_t))
145 * copy one vm_area from one task to the other. Assumes the page tables
146 * already present in the new task to be cleared in the whole range
147 * covered by this vma.
149 * 08Jan98 Merged into one routine from several inline routines to reduce
150 * variable count and make things faster. -jj
152 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
153 struct vm_area_struct *vma)
155 pgd_t * src_pgd, * dst_pgd;
156 unsigned long address = vma->vm_start;
157 unsigned long end = vma->vm_end;
158 unsigned long cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
160 src_pgd = pgd_offset(src, address)-1;
161 dst_pgd = pgd_offset(dst, address)-1;
163 for (;;) {
164 pmd_t * src_pmd, * dst_pmd;
166 src_pgd++; dst_pgd++;
168 /* copy_pmd_range */
170 if (pgd_none(*src_pgd))
171 goto skip_copy_pmd_range;
172 if (pgd_bad(*src_pgd)) {
173 pgd_ERROR(*src_pgd);
174 pgd_clear(src_pgd);
175 skip_copy_pmd_range: address = (address + PGDIR_SIZE) & PGDIR_MASK;
176 if (!address || (address >= end))
177 goto out;
178 continue;
180 if (pgd_none(*dst_pgd)) {
181 if (!pmd_alloc(dst_pgd, 0))
182 goto nomem;
185 src_pmd = pmd_offset(src_pgd, address);
186 dst_pmd = pmd_offset(dst_pgd, address);
188 do {
189 pte_t * src_pte, * dst_pte;
191 /* copy_pte_range */
193 if (pmd_none(*src_pmd))
194 goto skip_copy_pte_range;
195 if (pmd_bad(*src_pmd)) {
196 pmd_ERROR(*src_pmd);
197 pmd_clear(src_pmd);
198 skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
199 if (address >= end)
200 goto out;
201 goto cont_copy_pmd_range;
203 if (pmd_none(*dst_pmd)) {
204 if (!pte_alloc(dst_pmd, 0))
205 goto nomem;
208 src_pte = pte_offset(src_pmd, address);
209 dst_pte = pte_offset(dst_pmd, address);
211 do {
212 pte_t pte = *src_pte;
213 unsigned long page_nr;
215 /* copy_one_pte */
217 if (pte_none(pte))
218 goto cont_copy_pte_range;
219 if (!pte_present(pte)) {
220 swap_duplicate(pte_to_swp_entry(pte));
221 set_pte(dst_pte, pte);
222 goto cont_copy_pte_range;
224 page_nr = pte_pagenr(pte);
225 if (page_nr >= max_mapnr ||
226 PageReserved(mem_map+page_nr)) {
227 set_pte(dst_pte, pte);
228 goto cont_copy_pte_range;
230 /* If it's a COW mapping, write protect it both in the parent and the child */
231 if (cow) {
232 pte = pte_wrprotect(pte);
233 set_pte(src_pte, pte);
235 /* If it's a shared mapping, mark it clean in the child */
236 if (vma->vm_flags & VM_SHARED)
237 pte = pte_mkclean(pte);
238 set_pte(dst_pte, pte_mkold(pte));
239 get_page(mem_map + page_nr);
241 cont_copy_pte_range: address += PAGE_SIZE;
242 if (address >= end)
243 goto out;
244 src_pte++;
245 dst_pte++;
246 } while ((unsigned long)src_pte & PTE_TABLE_MASK);
248 cont_copy_pmd_range: src_pmd++;
249 dst_pmd++;
250 } while ((unsigned long)src_pmd & PMD_TABLE_MASK);
252 out:
253 return 0;
255 nomem:
256 return -ENOMEM;
260 * Return indicates whether a page was freed so caller can adjust rss
262 static inline int free_pte(pte_t page)
264 if (pte_present(page)) {
265 unsigned long nr = pte_pagenr(page);
266 if (nr >= max_mapnr || PageReserved(mem_map+nr))
267 return 0;
269 * free_page() used to be able to clear swap cache
270 * entries. We may now have to do it manually.
272 free_page_and_swap_cache(mem_map+nr);
273 return 1;
275 swap_free(pte_to_swp_entry(page));
276 return 0;
279 static inline void forget_pte(pte_t page)
281 if (!pte_none(page)) {
282 printk("forget_pte: old mapping existed!\n");
283 free_pte(page);
287 static inline int zap_pte_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size)
289 pte_t * pte;
290 int freed;
292 if (pmd_none(*pmd))
293 return 0;
294 if (pmd_bad(*pmd)) {
295 pmd_ERROR(*pmd);
296 pmd_clear(pmd);
297 return 0;
299 pte = pte_offset(pmd, address);
300 address &= ~PMD_MASK;
301 if (address + size > PMD_SIZE)
302 size = PMD_SIZE - address;
303 size >>= PAGE_SHIFT;
304 freed = 0;
305 for (;;) {
306 pte_t page;
307 if (!size)
308 break;
309 page = *pte;
310 pte++;
311 size--;
312 pte_clear(pte-1);
313 if (pte_none(page))
314 continue;
315 freed += free_pte(page);
317 return freed;
320 static inline int zap_pmd_range(struct mm_struct *mm, pgd_t * dir, unsigned long address, unsigned long size)
322 pmd_t * pmd;
323 unsigned long end;
324 int freed;
326 if (pgd_none(*dir))
327 return 0;
328 if (pgd_bad(*dir)) {
329 pgd_ERROR(*dir);
330 pgd_clear(dir);
331 return 0;
333 pmd = pmd_offset(dir, address);
334 address &= ~PGDIR_MASK;
335 end = address + size;
336 if (end > PGDIR_SIZE)
337 end = PGDIR_SIZE;
338 freed = 0;
339 do {
340 freed += zap_pte_range(mm, pmd, address, end - address);
341 address = (address + PMD_SIZE) & PMD_MASK;
342 pmd++;
343 } while (address < end);
344 return freed;
348 * remove user pages in a given range.
350 void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size)
352 pgd_t * dir;
353 unsigned long end = address + size;
354 int freed = 0;
356 dir = pgd_offset(mm, address);
359 * This is a long-lived spinlock. That's fine.
360 * There's no contention, because the page table
361 * lock only protects against kswapd anyway, and
362 * even if kswapd happened to be looking at this
363 * process we _want_ it to get stuck.
365 if (address >= end)
366 BUG();
367 spin_lock(&mm->page_table_lock);
368 do {
369 freed += zap_pmd_range(mm, dir, address, end - address);
370 address = (address + PGDIR_SIZE) & PGDIR_MASK;
371 dir++;
372 } while (address && (address < end));
373 spin_unlock(&mm->page_table_lock);
375 * Update rss for the mm_struct (not necessarily current->mm)
377 if (mm->rss > 0) {
378 mm->rss -= freed;
379 if (mm->rss < 0)
380 mm->rss = 0;
386 * Do a quick page-table lookup for a single page.
388 static struct page * follow_page(unsigned long address)
390 pgd_t *pgd;
391 pmd_t *pmd;
393 pgd = pgd_offset(current->mm, address);
394 pmd = pmd_offset(pgd, address);
395 if (pmd) {
396 pte_t * pte = pte_offset(pmd, address);
397 if (pte && pte_present(*pte))
398 return pte_page(*pte);
401 printk(KERN_ERR "Missing page in follow_page\n");
402 return NULL;
406 * Given a physical address, is there a useful struct page pointing to it?
409 struct page * get_page_map(struct page *page, unsigned long vaddr)
411 if (MAP_NR(vaddr) >= max_mapnr)
412 return 0;
413 if (page == ZERO_PAGE(vaddr))
414 return 0;
415 if (PageReserved(page))
416 return 0;
417 return page;
421 * Force in an entire range of pages from the current process's user VA,
422 * and pin and lock the pages for IO.
425 #define dprintk(x...)
426 int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len)
428 unsigned long ptr, end;
429 int err;
430 struct mm_struct * mm;
431 struct vm_area_struct * vma = 0;
432 struct page * map;
433 int doublepage = 0;
434 int repeat = 0;
435 int i;
437 /* Make sure the iobuf is not already mapped somewhere. */
438 if (iobuf->nr_pages)
439 return -EINVAL;
441 mm = current->mm;
442 dprintk ("map_user_kiobuf: begin\n");
444 ptr = va & PAGE_MASK;
445 end = (va + len + PAGE_SIZE - 1) & PAGE_MASK;
446 err = expand_kiobuf(iobuf, (end - ptr) >> PAGE_SHIFT);
447 if (err)
448 return err;
450 repeat:
451 down(&mm->mmap_sem);
453 err = -EFAULT;
454 iobuf->locked = 1;
455 iobuf->offset = va & ~PAGE_MASK;
456 iobuf->length = len;
458 i = 0;
461 * First of all, try to fault in all of the necessary pages
463 while (ptr < end) {
464 if (!vma || ptr >= vma->vm_end) {
465 vma = find_vma(current->mm, ptr);
466 if (!vma)
467 goto out_unlock;
469 if (handle_mm_fault(current, vma, ptr, (rw==READ)) <= 0)
470 goto out_unlock;
471 spin_lock(&mm->page_table_lock);
472 map = follow_page(ptr);
473 if (!map) {
474 dprintk (KERN_ERR "Missing page in map_user_kiobuf\n");
475 goto retry;
477 map = get_page_map(map, ptr);
478 if (map) {
479 if (TryLockPage(map)) {
480 goto retry;
482 atomic_inc(&map->count);
484 spin_unlock(&mm->page_table_lock);
485 iobuf->maplist[i] = map;
486 iobuf->nr_pages = ++i;
488 ptr += PAGE_SIZE;
491 up(&mm->mmap_sem);
492 dprintk ("map_user_kiobuf: end OK\n");
493 return 0;
495 out_unlock:
496 up(&mm->mmap_sem);
497 unmap_kiobuf(iobuf);
498 dprintk ("map_user_kiobuf: end %d\n", err);
499 return err;
501 retry:
504 * Undo the locking so far, wait on the page we got to, and try again.
506 spin_unlock(&mm->page_table_lock);
507 unmap_kiobuf(iobuf);
508 up(&mm->mmap_sem);
511 * Did the release also unlock the page we got stuck on?
513 if (map) {
514 if (!PageLocked(map)) {
515 /* If so, we may well have the page mapped twice
516 * in the IO address range. Bad news. Of
517 * course, it _might_ * just be a coincidence,
518 * but if it happens more than * once, chances
519 * are we have a double-mapped page. */
520 if (++doublepage >= 3) {
521 return -EINVAL;
526 * Try again...
528 wait_on_page(map);
531 if (++repeat < 16) {
532 ptr = va & PAGE_MASK;
533 goto repeat;
535 return -EAGAIN;
540 * Unmap all of the pages referenced by a kiobuf. We release the pages,
541 * and unlock them if they were locked.
544 void unmap_kiobuf (struct kiobuf *iobuf)
546 int i;
547 struct page *map;
549 for (i = 0; i < iobuf->nr_pages; i++) {
550 map = iobuf->maplist[i];
552 if (map && iobuf->locked) {
553 UnlockPage(map);
554 __free_page(map);
558 iobuf->nr_pages = 0;
559 iobuf->locked = 0;
562 static inline void zeromap_pte_range(pte_t * pte, unsigned long address,
563 unsigned long size, pgprot_t prot)
565 unsigned long end;
567 address &= ~PMD_MASK;
568 end = address + size;
569 if (end > PMD_SIZE)
570 end = PMD_SIZE;
571 do {
572 pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot));
573 pte_t oldpage = *pte;
574 set_pte(pte, zero_pte);
575 forget_pte(oldpage);
576 address += PAGE_SIZE;
577 pte++;
578 } while (address && (address < end));
581 static inline int zeromap_pmd_range(pmd_t * pmd, unsigned long address,
582 unsigned long size, pgprot_t prot)
584 unsigned long end;
586 address &= ~PGDIR_MASK;
587 end = address + size;
588 if (end > PGDIR_SIZE)
589 end = PGDIR_SIZE;
590 do {
591 pte_t * pte = pte_alloc(pmd, address);
592 if (!pte)
593 return -ENOMEM;
594 zeromap_pte_range(pte, address, end - address, prot);
595 address = (address + PMD_SIZE) & PMD_MASK;
596 pmd++;
597 } while (address && (address < end));
598 return 0;
601 int zeromap_page_range(unsigned long address, unsigned long size, pgprot_t prot)
603 int error = 0;
604 pgd_t * dir;
605 unsigned long beg = address;
606 unsigned long end = address + size;
608 dir = pgd_offset(current->mm, address);
609 flush_cache_range(current->mm, beg, end);
610 if (address >= end)
611 BUG();
612 do {
613 pmd_t *pmd = pmd_alloc(dir, address);
614 error = -ENOMEM;
615 if (!pmd)
616 break;
617 error = zeromap_pmd_range(pmd, address, end - address, prot);
618 if (error)
619 break;
620 address = (address + PGDIR_SIZE) & PGDIR_MASK;
621 dir++;
622 } while (address && (address < end));
623 flush_tlb_range(current->mm, beg, end);
624 return error;
628 * maps a range of physical memory into the requested pages. the old
629 * mappings are removed. any references to nonexistent pages results
630 * in null mappings (currently treated as "copy-on-access")
632 static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
633 unsigned long phys_addr, pgprot_t prot)
635 unsigned long end;
637 address &= ~PMD_MASK;
638 end = address + size;
639 if (end > PMD_SIZE)
640 end = PMD_SIZE;
641 do {
642 unsigned long mapnr;
643 pte_t oldpage = *pte;
644 pte_clear(pte);
646 mapnr = MAP_NR(__va(phys_addr));
647 if (mapnr >= max_mapnr || PageReserved(mem_map+mapnr))
648 set_pte(pte, mk_pte_phys(phys_addr, prot));
649 forget_pte(oldpage);
650 address += PAGE_SIZE;
651 phys_addr += PAGE_SIZE;
652 pte++;
653 } while (address && (address < end));
656 static inline int remap_pmd_range(pmd_t * pmd, unsigned long address, unsigned long size,
657 unsigned long phys_addr, pgprot_t prot)
659 unsigned long end;
661 address &= ~PGDIR_MASK;
662 end = address + size;
663 if (end > PGDIR_SIZE)
664 end = PGDIR_SIZE;
665 phys_addr -= address;
666 do {
667 pte_t * pte = pte_alloc(pmd, address);
668 if (!pte)
669 return -ENOMEM;
670 remap_pte_range(pte, address, end - address, address + phys_addr, prot);
671 address = (address + PMD_SIZE) & PMD_MASK;
672 pmd++;
673 } while (address && (address < end));
674 return 0;
677 int remap_page_range(unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
679 int error = 0;
680 pgd_t * dir;
681 unsigned long beg = from;
682 unsigned long end = from + size;
684 phys_addr -= from;
685 dir = pgd_offset(current->mm, from);
686 flush_cache_range(current->mm, beg, end);
687 if (from >= end)
688 BUG();
689 do {
690 pmd_t *pmd = pmd_alloc(dir, from);
691 error = -ENOMEM;
692 if (!pmd)
693 break;
694 error = remap_pmd_range(pmd, from, end - from, phys_addr + from, prot);
695 if (error)
696 break;
697 from = (from + PGDIR_SIZE) & PGDIR_MASK;
698 dir++;
699 } while (from && (from < end));
700 flush_tlb_range(current->mm, beg, end);
701 return error;
705 * Establish a new mapping:
706 * - flush the old one
707 * - update the page tables
708 * - inform the TLB about the new one
710 static inline void establish_pte(struct vm_area_struct * vma, unsigned long address, pte_t *page_table, pte_t entry)
712 flush_tlb_page(vma, address);
713 set_pte(page_table, entry);
714 update_mmu_cache(vma, address, entry);
718 * This routine handles present pages, when users try to write
719 * to a shared page. It is done by copying the page to a new address
720 * and decrementing the shared-page counter for the old page.
722 * Goto-purists beware: the only reason for goto's here is that it results
723 * in better assembly code.. The "default" path will see no jumps at all.
725 * Note that this routine assumes that the protection checks have been
726 * done by the caller (the low-level page fault routine in most cases).
727 * Thus we can safely just mark it writable once we've done any necessary
728 * COW.
730 * We also mark the page dirty at this point even though the page will
731 * change only once the write actually happens. This avoids a few races,
732 * and potentially makes it more efficient.
734 * We enter with the page table read-lock held, and need to exit without
735 * it.
737 static int do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma,
738 unsigned long address, pte_t *page_table, pte_t pte)
740 unsigned long map_nr;
741 struct page *old_page, *new_page;
743 map_nr = pte_pagenr(pte);
744 if (map_nr >= max_mapnr)
745 goto bad_wp_page;
746 tsk->min_flt++;
747 old_page = mem_map + map_nr;
750 * We can avoid the copy if:
751 * - we're the only user (count == 1)
752 * - the only other user is the swap cache,
753 * and the only swap cache user is itself,
754 * in which case we can remove the page
755 * from the swap cache.
757 switch (page_count(old_page)) {
758 case 2:
760 * Lock the page so that no one can look it up from
761 * the swap cache, grab a reference and start using it.
762 * Can not do lock_page, holding page_table_lock.
764 if (!PageSwapCache(old_page) || TryLockPage(old_page))
765 break;
766 if (is_page_shared(old_page)) {
767 UnlockPage(old_page);
768 break;
770 delete_from_swap_cache_nolock(old_page);
771 UnlockPage(old_page);
772 /* FallThrough */
773 case 1:
774 flush_cache_page(vma, address);
775 establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
776 spin_unlock(&tsk->mm->page_table_lock);
777 return 1;
781 * Ok, we need to copy. Oh, well..
783 spin_unlock(&tsk->mm->page_table_lock);
784 new_page = alloc_page(GFP_HIGHUSER);
785 if (!new_page)
786 return -1;
787 spin_lock(&tsk->mm->page_table_lock);
790 * Re-check the pte - we dropped the lock
792 if (pte_val(*page_table) == pte_val(pte)) {
793 if (PageReserved(old_page))
794 ++vma->vm_mm->rss;
795 copy_cow_page(old_page, new_page, address);
796 flush_page_to_ram(new_page);
797 flush_cache_page(vma, address);
798 establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
800 /* Free the old page.. */
801 new_page = old_page;
803 spin_unlock(&tsk->mm->page_table_lock);
804 __free_page(new_page);
805 return 1;
807 bad_wp_page:
808 spin_unlock(&tsk->mm->page_table_lock);
809 printk("do_wp_page: bogus page at address %08lx (nr %ld)\n",address,map_nr);
810 return -1;
814 * This function zeroes out partial mmap'ed pages at truncation time..
816 static void partial_clear(struct vm_area_struct *vma, unsigned long address)
818 unsigned int offset;
819 struct page *page;
820 pgd_t *page_dir;
821 pmd_t *page_middle;
822 pte_t *page_table, pte;
824 page_dir = pgd_offset(vma->vm_mm, address);
825 if (pgd_none(*page_dir))
826 return;
827 if (pgd_bad(*page_dir)) {
828 pgd_ERROR(*page_dir);
829 pgd_clear(page_dir);
830 return;
832 page_middle = pmd_offset(page_dir, address);
833 if (pmd_none(*page_middle))
834 return;
835 if (pmd_bad(*page_middle)) {
836 pmd_ERROR(*page_middle);
837 pmd_clear(page_middle);
838 return;
840 page_table = pte_offset(page_middle, address);
841 pte = *page_table;
842 if (!pte_present(pte))
843 return;
844 flush_cache_page(vma, address);
845 page = pte_page(pte);
846 if (page-mem_map >= max_mapnr)
847 return;
848 offset = address & ~PAGE_MASK;
849 memclear_highpage_flush(page, offset, PAGE_SIZE - offset);
853 * Handle all mappings that got truncated by a "truncate()"
854 * system call.
856 * NOTE! We have to be ready to update the memory sharing
857 * between the file and the memory map for a potential last
858 * incomplete page. Ugly, but necessary.
860 void vmtruncate(struct inode * inode, loff_t offset)
862 unsigned long partial, pgoff;
863 struct vm_area_struct * mpnt;
864 struct address_space *mapping = inode->i_mapping;
866 if (inode->i_size < offset)
867 goto out;
868 inode->i_size = offset;
869 truncate_inode_pages(mapping, offset);
870 spin_lock(&mapping->i_shared_lock);
871 if (!mapping->i_mmap)
872 goto out_unlock;
874 pgoff = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
875 partial = (unsigned long)offset & (PAGE_CACHE_SIZE - 1);
877 mpnt = mapping->i_mmap;
878 do {
879 struct mm_struct *mm = mpnt->vm_mm;
880 unsigned long start = mpnt->vm_start;
881 unsigned long end = mpnt->vm_end;
882 unsigned long len = end - start;
883 unsigned long diff;
885 /* mapping wholly truncated? */
886 if (mpnt->vm_pgoff >= pgoff) {
887 flush_cache_range(mm, start, end);
888 zap_page_range(mm, start, len);
889 flush_tlb_range(mm, start, end);
890 continue;
893 /* mapping wholly unaffected? */
894 len = len >> PAGE_SHIFT;
895 diff = pgoff - mpnt->vm_pgoff;
896 if (diff >= len)
897 continue;
899 /* Ok, partially affected.. */
900 start += diff << PAGE_SHIFT;
901 len = (len - diff) << PAGE_SHIFT;
902 if (start & ~PAGE_MASK) {
903 partial_clear(mpnt, start);
904 start = (start + ~PAGE_MASK) & PAGE_MASK;
906 flush_cache_range(mm, start, end);
907 zap_page_range(mm, start, len);
908 flush_tlb_range(mm, start, end);
909 } while ((mpnt = mpnt->vm_next_share) != NULL);
910 out_unlock:
911 spin_unlock(&mapping->i_shared_lock);
912 out:
913 /* this should go into ->truncate */
914 inode->i_size = offset;
915 if (inode->i_op && inode->i_op->truncate)
916 inode->i_op->truncate(inode);
922 * Primitive swap readahead code. We simply read an aligned block of
923 * (1 << page_cluster) entries in the swap area. This method is chosen
924 * because it doesn't cost us any seek time. We also make sure to queue
925 * the 'original' request together with the readahead ones...
927 void swapin_readahead(swp_entry_t entry)
929 int i, num;
930 struct page *new_page;
931 unsigned long offset;
934 * Get the number of handles we should do readahead io to. Also,
935 * grab temporary references on them, releasing them as io completes.
937 num = valid_swaphandles(entry, &offset);
938 for (i = 0; i < num; offset++, i++) {
939 /* Don't block on I/O for read-ahead */
940 if (atomic_read(&nr_async_pages) >= pager_daemon.swap_cluster) {
941 while (i++ < num)
942 swap_free(SWP_ENTRY(SWP_TYPE(entry), offset++));
943 break;
945 /* Ok, do the async read-ahead now */
946 new_page = read_swap_cache_async(SWP_ENTRY(SWP_TYPE(entry), offset), 0);
947 if (new_page != NULL)
948 __free_page(new_page);
949 swap_free(SWP_ENTRY(SWP_TYPE(entry), offset));
951 return;
954 static int do_swap_page(struct task_struct * tsk,
955 struct vm_area_struct * vma, unsigned long address,
956 pte_t * page_table, swp_entry_t entry, int write_access)
958 struct page *page = lookup_swap_cache(entry);
959 pte_t pte;
961 if (!page) {
962 lock_kernel();
963 swapin_readahead(entry);
964 page = read_swap_cache(entry);
965 unlock_kernel();
966 if (!page)
967 return -1;
969 flush_page_to_ram(page);
970 flush_icache_page(vma, page);
973 vma->vm_mm->rss++;
974 tsk->min_flt++;
976 pte = mk_pte(page, vma->vm_page_prot);
978 set_bit(PG_swap_entry, &page->flags);
981 * Freeze the "shared"ness of the page, ie page_count + swap_count.
982 * Must lock page before transferring our swap count to already
983 * obtained page count.
985 lock_page(page);
986 swap_free(entry);
987 if (write_access && !is_page_shared(page)) {
988 delete_from_swap_cache_nolock(page);
989 UnlockPage(page);
990 page = replace_with_highmem(page);
991 pte = mk_pte(page, vma->vm_page_prot);
992 pte = pte_mkwrite(pte_mkdirty(pte));
993 } else
994 UnlockPage(page);
996 set_pte(page_table, pte);
997 /* No need to invalidate - it was non-present before */
998 update_mmu_cache(vma, address, pte);
999 return 1;
1003 * This only needs the MM semaphore
1005 static int do_anonymous_page(struct task_struct * tsk, struct vm_area_struct * vma, pte_t *page_table, int write_access, unsigned long addr)
1007 int high = 0;
1008 struct page *page = NULL;
1009 pte_t entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
1010 if (write_access) {
1011 page = alloc_page(GFP_HIGHUSER);
1012 if (!page)
1013 return -1;
1014 if (PageHighMem(page))
1015 high = 1;
1016 clear_highpage(page);
1017 entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
1018 vma->vm_mm->rss++;
1019 tsk->min_flt++;
1020 flush_page_to_ram(page);
1022 set_pte(page_table, entry);
1023 /* No need to invalidate - it was non-present before */
1024 update_mmu_cache(vma, addr, entry);
1025 return 1;
1029 * do_no_page() tries to create a new page mapping. It aggressively
1030 * tries to share with existing pages, but makes a separate copy if
1031 * the "write_access" parameter is true in order to avoid the next
1032 * page fault.
1034 * As this is called only for pages that do not currently exist, we
1035 * do not need to flush old virtual caches or the TLB.
1037 * This is called with the MM semaphore held.
1039 static int do_no_page(struct task_struct * tsk, struct vm_area_struct * vma,
1040 unsigned long address, int write_access, pte_t *page_table)
1042 struct page * new_page;
1043 pte_t entry;
1045 if (!vma->vm_ops || !vma->vm_ops->nopage)
1046 return do_anonymous_page(tsk, vma, page_table, write_access, address);
1049 * The third argument is "no_share", which tells the low-level code
1050 * to copy, not share the page even if sharing is possible. It's
1051 * essentially an early COW detection.
1053 new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, (vma->vm_flags & VM_SHARED)?0:write_access);
1054 if (new_page == NULL) /* no page was available -- SIGBUS */
1055 return 0;
1056 if (new_page == NOPAGE_OOM)
1057 return -1;
1058 ++tsk->maj_flt;
1059 ++vma->vm_mm->rss;
1061 * This silly early PAGE_DIRTY setting removes a race
1062 * due to the bad i386 page protection. But it's valid
1063 * for other architectures too.
1065 * Note that if write_access is true, we either now have
1066 * an exclusive copy of the page, or this is a shared mapping,
1067 * so we can make it writable and dirty to avoid having to
1068 * handle that later.
1070 flush_page_to_ram(new_page);
1071 flush_icache_page(vma, new_page);
1072 entry = mk_pte(new_page, vma->vm_page_prot);
1073 if (write_access) {
1074 entry = pte_mkwrite(pte_mkdirty(entry));
1075 } else if (page_count(new_page) > 1 &&
1076 !(vma->vm_flags & VM_SHARED))
1077 entry = pte_wrprotect(entry);
1078 set_pte(page_table, entry);
1079 /* no need to invalidate: a not-present page shouldn't be cached */
1080 update_mmu_cache(vma, address, entry);
1081 return 1;
1085 * These routines also need to handle stuff like marking pages dirty
1086 * and/or accessed for architectures that don't do it in hardware (most
1087 * RISC architectures). The early dirtying is also good on the i386.
1089 * There is also a hook called "update_mmu_cache()" that architectures
1090 * with external mmu caches can use to update those (ie the Sparc or
1091 * PowerPC hashed page tables that act as extended TLBs).
1093 * Note the "page_table_lock". It is to protect against kswapd removing
1094 * pages from under us. Note that kswapd only ever _removes_ pages, never
1095 * adds them. As such, once we have noticed that the page is not present,
1096 * we can drop the lock early.
1098 * The adding of pages is protected by the MM semaphore (which we hold),
1099 * so we don't need to worry about a page being suddenly been added into
1100 * our VM.
1102 static inline int handle_pte_fault(struct task_struct *tsk,
1103 struct vm_area_struct * vma, unsigned long address,
1104 int write_access, pte_t * pte)
1106 pte_t entry;
1108 entry = *pte;
1109 if (!pte_present(entry)) {
1110 if (pte_none(entry))
1111 return do_no_page(tsk, vma, address, write_access, pte);
1112 return do_swap_page(tsk, vma, address, pte, pte_to_swp_entry(entry), write_access);
1116 * Ok, the entry was present, we need to get the page table
1117 * lock to synchronize with kswapd, and verify that the entry
1118 * didn't change from under us..
1120 spin_lock(&tsk->mm->page_table_lock);
1121 if (pte_val(entry) == pte_val(*pte)) {
1122 if (write_access) {
1123 if (!pte_write(entry))
1124 return do_wp_page(tsk, vma, address, pte, entry);
1126 entry = pte_mkdirty(entry);
1128 entry = pte_mkyoung(entry);
1129 establish_pte(vma, address, pte, entry);
1131 spin_unlock(&tsk->mm->page_table_lock);
1132 return 1;
1136 * By the time we get here, we already hold the mm semaphore
1138 int handle_mm_fault(struct task_struct *tsk, struct vm_area_struct * vma,
1139 unsigned long address, int write_access)
1141 int ret = -1;
1142 pgd_t *pgd;
1143 pmd_t *pmd;
1145 pgd = pgd_offset(vma->vm_mm, address);
1146 pmd = pmd_alloc(pgd, address);
1148 if (pmd) {
1149 pte_t * pte = pte_alloc(pmd, address);
1150 if (pte)
1151 ret = handle_pte_fault(tsk, vma, address, write_access, pte);
1153 return ret;
1157 * Simplistic page force-in..
1159 int make_pages_present(unsigned long addr, unsigned long end)
1161 int write;
1162 struct task_struct *tsk = current;
1163 struct vm_area_struct * vma;
1165 vma = find_vma(tsk->mm, addr);
1166 write = (vma->vm_flags & VM_WRITE) != 0;
1167 if (addr >= end)
1168 BUG();
1169 do {
1170 if (handle_mm_fault(tsk, vma, addr, write) < 0)
1171 return -1;
1172 addr += PAGE_SIZE;
1173 } while (addr < end);
1174 return 0;