x86: move mp_bus_id_to_node to numa.c
[linux-2.6/mini2440.git] / mm / pagewalk.c
blob1cf1417ef8b77bc111f2fc5b4f4c24c3fbee1f88
1 #include <linux/mm.h>
2 #include <linux/highmem.h>
3 #include <linux/sched.h>
5 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
6 const struct mm_walk *walk, void *private)
8 pte_t *pte;
9 int err = 0;
11 pte = pte_offset_map(pmd, addr);
12 do {
13 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, private);
14 if (err)
15 break;
16 } while (pte++, addr += PAGE_SIZE, addr != end);
18 pte_unmap(pte);
19 return err;
22 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
23 const struct mm_walk *walk, void *private)
25 pmd_t *pmd;
26 unsigned long next;
27 int err = 0;
29 pmd = pmd_offset(pud, addr);
30 do {
31 next = pmd_addr_end(addr, end);
32 if (pmd_none_or_clear_bad(pmd)) {
33 if (walk->pte_hole)
34 err = walk->pte_hole(addr, next, private);
35 if (err)
36 break;
37 continue;
39 if (walk->pmd_entry)
40 err = walk->pmd_entry(pmd, addr, next, private);
41 if (!err && walk->pte_entry)
42 err = walk_pte_range(pmd, addr, next, walk, private);
43 if (err)
44 break;
45 } while (pmd++, addr = next, addr != end);
47 return err;
50 static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
51 const struct mm_walk *walk, void *private)
53 pud_t *pud;
54 unsigned long next;
55 int err = 0;
57 pud = pud_offset(pgd, addr);
58 do {
59 next = pud_addr_end(addr, end);
60 if (pud_none_or_clear_bad(pud)) {
61 if (walk->pte_hole)
62 err = walk->pte_hole(addr, next, private);
63 if (err)
64 break;
65 continue;
67 if (walk->pud_entry)
68 err = walk->pud_entry(pud, addr, next, private);
69 if (!err && (walk->pmd_entry || walk->pte_entry))
70 err = walk_pmd_range(pud, addr, next, walk, private);
71 if (err)
72 break;
73 } while (pud++, addr = next, addr != end);
75 return err;
78 /**
79 * walk_page_range - walk a memory map's page tables with a callback
80 * @mm: memory map to walk
81 * @addr: starting address
82 * @end: ending address
83 * @walk: set of callbacks to invoke for each level of the tree
84 * @private: private data passed to the callback function
86 * Recursively walk the page table for the memory area in a VMA,
87 * calling supplied callbacks. Callbacks are called in-order (first
88 * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
89 * etc.). If lower-level callbacks are omitted, walking depth is reduced.
91 * Each callback receives an entry pointer, the start and end of the
92 * associated range, and a caller-supplied private data pointer.
94 * No locks are taken, but the bottom level iterator will map PTE
95 * directories from highmem if necessary.
97 * If any callback returns a non-zero value, the walk is aborted and
98 * the return value is propagated back to the caller. Otherwise 0 is returned.
100 int walk_page_range(const struct mm_struct *mm,
101 unsigned long addr, unsigned long end,
102 const struct mm_walk *walk, void *private)
104 pgd_t *pgd;
105 unsigned long next;
106 int err = 0;
108 if (addr >= end)
109 return err;
111 pgd = pgd_offset(mm, addr);
112 do {
113 next = pgd_addr_end(addr, end);
114 if (pgd_none_or_clear_bad(pgd)) {
115 if (walk->pte_hole)
116 err = walk->pte_hole(addr, next, private);
117 if (err)
118 break;
119 continue;
121 if (walk->pgd_entry)
122 err = walk->pgd_entry(pgd, addr, next, private);
123 if (!err &&
124 (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
125 err = walk_pud_range(pgd, addr, next, walk, private);
126 if (err)
127 break;
128 } while (pgd++, addr = next, addr != end);
130 return err;