Import 2.3.13pre6
[davej-history.git] / mm / vmalloc.c
blob3c04ab020494784e84a4689f60d45982602e62df
1 /*
2 * linux/mm/vmalloc.c
4 * Copyright (C) 1993 Linus Torvalds
5 */
7 #include <linux/malloc.h>
8 #include <linux/vmalloc.h>
10 #include <asm/uaccess.h>
12 static struct vm_struct * vmlist = NULL;
14 static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
16 pte_t * pte;
17 unsigned long end;
19 if (pmd_none(*pmd))
20 return;
21 if (pmd_bad(*pmd)) {
22 printk("free_area_pte: bad pmd (%08lx)\n", pmd_val(*pmd));
23 pmd_clear(pmd);
24 return;
26 pte = pte_offset(pmd, address);
27 address &= ~PMD_MASK;
28 end = address + size;
29 if (end > PMD_SIZE)
30 end = PMD_SIZE;
31 while (address < end) {
32 pte_t page = *pte;
33 pte_clear(pte);
34 address += PAGE_SIZE;
35 pte++;
36 if (pte_none(page))
37 continue;
38 if (pte_present(page)) {
39 free_page(pte_page(page));
40 continue;
42 printk("Whee.. Swapped out page in kernel page table\n");
46 static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
48 pmd_t * pmd;
49 unsigned long end;
51 if (pgd_none(*dir))
52 return;
53 if (pgd_bad(*dir)) {
54 printk("free_area_pmd: bad pgd (%08lx)\n", pgd_val(*dir));
55 pgd_clear(dir);
56 return;
58 pmd = pmd_offset(dir, address);
59 address &= ~PGDIR_MASK;
60 end = address + size;
61 if (end > PGDIR_SIZE)
62 end = PGDIR_SIZE;
63 while (address < end) {
64 free_area_pte(pmd, address, end - address);
65 address = (address + PMD_SIZE) & PMD_MASK;
66 pmd++;
70 void vmfree_area_pages(unsigned long address, unsigned long size)
72 pgd_t * dir;
73 unsigned long end = address + size;
75 dir = pgd_offset_k(address);
76 flush_cache_all();
77 while (address < end) {
78 free_area_pmd(dir, address, end - address);
79 address = (address + PGDIR_SIZE) & PGDIR_MASK;
80 dir++;
82 flush_tlb_all();
85 static inline int alloc_area_pte(pte_t * pte, unsigned long address, unsigned long size)
87 unsigned long end;
89 address &= ~PMD_MASK;
90 end = address + size;
91 if (end > PMD_SIZE)
92 end = PMD_SIZE;
93 while (address < end) {
94 unsigned long page;
95 if (!pte_none(*pte))
96 printk("alloc_area_pte: page already exists\n");
97 page = __get_free_page(GFP_KERNEL);
98 if (!page)
99 return -ENOMEM;
100 set_pte(pte, mk_pte(page, PAGE_KERNEL));
101 address += PAGE_SIZE;
102 pte++;
104 return 0;
107 static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size)
109 unsigned long end;
111 address &= ~PGDIR_MASK;
112 end = address + size;
113 if (end > PGDIR_SIZE)
114 end = PGDIR_SIZE;
115 while (address < end) {
116 pte_t * pte = pte_alloc_kernel(pmd, address);
117 if (!pte)
118 return -ENOMEM;
119 if (alloc_area_pte(pte, address, end - address))
120 return -ENOMEM;
121 address = (address + PMD_SIZE) & PMD_MASK;
122 pmd++;
124 return 0;
127 int vmalloc_area_pages(unsigned long address, unsigned long size)
129 pgd_t * dir;
130 unsigned long end = address + size;
132 dir = pgd_offset_k(address);
133 flush_cache_all();
134 while (address < end) {
135 pmd_t *pmd;
136 pgd_t olddir = *dir;
138 pmd = pmd_alloc_kernel(dir, address);
139 if (!pmd)
140 return -ENOMEM;
141 if (alloc_area_pmd(pmd, address, end - address))
142 return -ENOMEM;
143 if (pgd_val(olddir) != pgd_val(*dir))
144 set_pgdir(address, *dir);
145 address = (address + PGDIR_SIZE) & PGDIR_MASK;
146 dir++;
148 flush_tlb_all();
149 return 0;
152 struct vm_struct * get_vm_area(unsigned long size)
154 unsigned long addr;
155 struct vm_struct **p, *tmp, *area;
157 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
158 if (!area)
159 return NULL;
160 addr = VMALLOC_START;
161 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
162 if (size + addr < (unsigned long) tmp->addr)
163 break;
164 addr = tmp->size + (unsigned long) tmp->addr;
165 if (addr > VMALLOC_END-size) {
166 kfree(area);
167 return NULL;
170 area->addr = (void *)addr;
171 area->size = size + PAGE_SIZE;
172 area->next = *p;
173 *p = area;
174 return area;
177 void vfree(void * addr)
179 struct vm_struct **p, *tmp;
181 if (!addr)
182 return;
183 if ((PAGE_SIZE-1) & (unsigned long) addr) {
184 printk("Trying to vfree() bad address (%p)\n", addr);
185 return;
187 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
188 if (tmp->addr == addr) {
189 *p = tmp->next;
190 vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
191 kfree(tmp);
192 return;
195 printk("Trying to vfree() nonexistent vm area (%p)\n", addr);
198 void * vmalloc(unsigned long size)
200 void * addr;
201 struct vm_struct *area;
203 size = PAGE_ALIGN(size);
204 if (!size || size > (max_mapnr << PAGE_SHIFT))
205 return NULL;
206 area = get_vm_area(size);
207 if (!area)
208 return NULL;
209 addr = area->addr;
210 if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size)) {
211 vfree(addr);
212 return NULL;
214 return addr;
217 long vread(char *buf, char *addr, unsigned long count)
219 struct vm_struct *tmp;
220 char *vaddr, *buf_start = buf;
221 unsigned long n;
223 /* Don't allow overflow */
224 if ((unsigned long) addr + count < count)
225 count = -(unsigned long) addr;
227 for (tmp = vmlist; tmp; tmp = tmp->next) {
228 vaddr = (char *) tmp->addr;
229 if (addr >= vaddr + tmp->size - PAGE_SIZE)
230 continue;
231 while (addr < vaddr) {
232 if (count == 0)
233 goto finished;
234 put_user('\0', buf);
235 buf++;
236 addr++;
237 count--;
239 n = vaddr + tmp->size - PAGE_SIZE - addr;
240 do {
241 if (count == 0)
242 goto finished;
243 put_user(*addr, buf);
244 buf++;
245 addr++;
246 count--;
247 } while (--n > 0);
249 finished:
250 return buf - buf_start;