Import 2.3.99pre10-1
[davej-history.git] / mm / vmalloc.c
blobb3f1cf5b76e012576c88811109a2dc69e5192969
1 /*
2 * linux/mm/vmalloc.c
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 */
9 #include <linux/malloc.h>
10 #include <linux/vmalloc.h>
11 #include <linux/spinlock.h>
13 #include <asm/uaccess.h>
14 #include <asm/pgalloc.h>
16 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
17 struct vm_struct * vmlist = NULL;
19 static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
21 pte_t * pte;
22 unsigned long end;
24 if (pmd_none(*pmd))
25 return;
26 if (pmd_bad(*pmd)) {
27 pmd_ERROR(*pmd);
28 pmd_clear(pmd);
29 return;
31 pte = pte_offset(pmd, address);
32 address &= ~PMD_MASK;
33 end = address + size;
34 if (end > PMD_SIZE)
35 end = PMD_SIZE;
36 do {
37 pte_t page = *pte;
38 pte_clear(pte);
39 address += PAGE_SIZE;
40 pte++;
41 if (pte_none(page))
42 continue;
43 if (pte_present(page)) {
44 unsigned long map_nr = pte_pagenr(page);
45 if ((map_nr < max_mapnr) &&
46 (!PageReserved(mem_map + map_nr)))
47 __free_page(mem_map + map_nr);
48 continue;
50 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
51 } while (address < end);
54 static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
56 pmd_t * pmd;
57 unsigned long end;
59 if (pgd_none(*dir))
60 return;
61 if (pgd_bad(*dir)) {
62 pgd_ERROR(*dir);
63 pgd_clear(dir);
64 return;
66 pmd = pmd_offset(dir, address);
67 address &= ~PGDIR_MASK;
68 end = address + size;
69 if (end > PGDIR_SIZE)
70 end = PGDIR_SIZE;
71 do {
72 free_area_pte(pmd, address, end - address);
73 address = (address + PMD_SIZE) & PMD_MASK;
74 pmd++;
75 } while (address < end);
78 void vmfree_area_pages(unsigned long address, unsigned long size)
80 pgd_t * dir;
81 unsigned long end = address + size;
83 dir = pgd_offset_k(address);
84 flush_cache_all();
85 do {
86 free_area_pmd(dir, address, end - address);
87 address = (address + PGDIR_SIZE) & PGDIR_MASK;
88 dir++;
89 } while (address && (address < end));
90 flush_tlb_all();
93 static inline int alloc_area_pte (pte_t * pte, unsigned long address,
94 unsigned long size, int gfp_mask)
96 unsigned long end;
98 address &= ~PMD_MASK;
99 end = address + size;
100 if (end > PMD_SIZE)
101 end = PMD_SIZE;
102 do {
103 struct page * page;
104 if (!pte_none(*pte))
105 printk(KERN_ERR "alloc_area_pte: page already exists\n");
106 page = alloc_page(gfp_mask);
107 if (!page)
108 return -ENOMEM;
109 set_pte(pte, mk_pte(page, PAGE_KERNEL));
110 address += PAGE_SIZE;
111 pte++;
112 } while (address < end);
113 return 0;
116 static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, int gfp_mask)
118 unsigned long end;
120 address &= ~PGDIR_MASK;
121 end = address + size;
122 if (end > PGDIR_SIZE)
123 end = PGDIR_SIZE;
124 do {
125 pte_t * pte = pte_alloc_kernel(pmd, address);
126 if (!pte)
127 return -ENOMEM;
128 if (alloc_area_pte(pte, address, end - address, gfp_mask))
129 return -ENOMEM;
130 address = (address + PMD_SIZE) & PMD_MASK;
131 pmd++;
132 } while (address < end);
133 return 0;
136 inline int vmalloc_area_pages (unsigned long address,
137 unsigned long size, int gfp_mask)
139 pgd_t * dir;
140 unsigned long end = address + size;
142 dir = pgd_offset_k(address);
143 flush_cache_all();
144 do {
145 pmd_t *pmd;
146 pgd_t olddir = *dir;
148 pmd = pmd_alloc_kernel(dir, address);
149 if (!pmd)
150 return -ENOMEM;
151 if (alloc_area_pmd(pmd, address, end - address, gfp_mask))
152 return -ENOMEM;
153 if (pgd_val(olddir) != pgd_val(*dir))
154 set_pgdir(address, *dir);
155 address = (address + PGDIR_SIZE) & PGDIR_MASK;
156 dir++;
157 } while (address && (address < end));
158 flush_tlb_all();
159 return 0;
162 struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
164 unsigned long addr;
165 struct vm_struct **p, *tmp, *area;
167 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
168 if (!area)
169 return NULL;
170 addr = VMALLOC_START;
171 write_lock(&vmlist_lock);
172 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
173 if (size + addr < (unsigned long) tmp->addr)
174 break;
175 addr = tmp->size + (unsigned long) tmp->addr;
176 if (addr > VMALLOC_END-size) {
177 write_unlock(&vmlist_lock);
178 kfree(area);
179 return NULL;
182 area->flags = flags;
183 area->addr = (void *)addr;
184 area->size = size + PAGE_SIZE;
185 area->next = *p;
186 *p = area;
187 write_unlock(&vmlist_lock);
188 return area;
191 void vfree(void * addr)
193 struct vm_struct **p, *tmp;
195 if (!addr)
196 return;
197 if ((PAGE_SIZE-1) & (unsigned long) addr) {
198 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
199 return;
201 write_lock(&vmlist_lock);
202 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
203 if (tmp->addr == addr) {
204 *p = tmp->next;
205 vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
206 kfree(tmp);
207 write_unlock(&vmlist_lock);
208 return;
211 write_unlock(&vmlist_lock);
212 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
215 void * __vmalloc (unsigned long size, int gfp_mask)
217 void * addr;
218 struct vm_struct *area;
220 size = PAGE_ALIGN(size);
221 if (!size || (size >> PAGE_SHIFT) > num_physpages) {
222 BUG();
223 return NULL;
225 area = get_vm_area(size, VM_ALLOC);
226 if (!area) {
227 BUG();
228 return NULL;
230 addr = area->addr;
231 if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask)) {
232 vfree(addr);
233 BUG();
234 return NULL;
236 return addr;
239 long vread(char *buf, char *addr, unsigned long count)
241 struct vm_struct *tmp;
242 char *vaddr, *buf_start = buf;
243 unsigned long n;
245 /* Don't allow overflow */
246 if ((unsigned long) addr + count < count)
247 count = -(unsigned long) addr;
249 read_lock(&vmlist_lock);
250 for (tmp = vmlist; tmp; tmp = tmp->next) {
251 vaddr = (char *) tmp->addr;
252 if (addr >= vaddr + tmp->size - PAGE_SIZE)
253 continue;
254 while (addr < vaddr) {
255 if (count == 0)
256 goto finished;
257 *buf = '\0';
258 buf++;
259 addr++;
260 count--;
262 n = vaddr + tmp->size - PAGE_SIZE - addr;
263 do {
264 if (count == 0)
265 goto finished;
266 *buf = *addr;
267 buf++;
268 addr++;
269 count--;
270 } while (--n > 0);
272 finished:
273 read_unlock(&vmlist_lock);
274 return buf - buf_start;