4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
8 #include <linux/malloc.h>
9 #include <linux/vmalloc.h>
11 #include <asm/uaccess.h>
12 #include <asm/pgalloc.h>
14 struct vm_struct
* vmlist
= NULL
;
16 static inline void free_area_pte(pmd_t
* pmd
, unsigned long address
, unsigned long size
)
28 pte
= pte_offset(pmd
, address
);
40 if (pte_present(page
)) {
41 unsigned long map_nr
= pte_pagenr(page
);
42 if (map_nr
< max_mapnr
)
43 __free_page(mem_map
+ map_nr
);
46 printk(KERN_CRIT
"Whee.. Swapped out page in kernel page table\n");
47 } while (address
< end
);
50 static inline void free_area_pmd(pgd_t
* dir
, unsigned long address
, unsigned long size
)
62 pmd
= pmd_offset(dir
, address
);
63 address
&= ~PGDIR_MASK
;
68 free_area_pte(pmd
, address
, end
- address
);
69 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
71 } while (address
< end
);
74 void vmfree_area_pages(unsigned long address
, unsigned long size
)
77 unsigned long end
= address
+ size
;
79 dir
= pgd_offset_k(address
);
82 free_area_pmd(dir
, address
, end
- address
);
83 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
85 } while (address
&& (address
< end
));
89 static inline int alloc_area_pte(pte_t
* pte
, unsigned long address
, unsigned long size
)
100 printk(KERN_ERR
"alloc_area_pte: page already exists\n");
101 page
= alloc_page(GFP_KERNEL
|__GFP_HIGHMEM
);
104 set_pte(pte
, mk_pte(page
, PAGE_KERNEL
));
105 address
+= PAGE_SIZE
;
107 } while (address
< end
);
111 static inline int alloc_area_pmd(pmd_t
* pmd
, unsigned long address
, unsigned long size
)
115 address
&= ~PGDIR_MASK
;
116 end
= address
+ size
;
117 if (end
> PGDIR_SIZE
)
120 pte_t
* pte
= pte_alloc_kernel(pmd
, address
);
123 if (alloc_area_pte(pte
, address
, end
- address
))
125 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
127 } while (address
< end
);
131 int vmalloc_area_pages(unsigned long address
, unsigned long size
)
134 unsigned long end
= address
+ size
;
136 dir
= pgd_offset_k(address
);
142 pmd
= pmd_alloc_kernel(dir
, address
);
145 if (alloc_area_pmd(pmd
, address
, end
- address
))
147 if (pgd_val(olddir
) != pgd_val(*dir
))
148 set_pgdir(address
, *dir
);
149 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
151 } while (address
&& (address
< end
));
156 struct vm_struct
* get_vm_area(unsigned long size
, unsigned long flags
)
159 struct vm_struct
**p
, *tmp
, *area
;
161 area
= (struct vm_struct
*) kmalloc(sizeof(*area
), GFP_KERNEL
);
164 addr
= VMALLOC_START
;
165 for (p
= &vmlist
; (tmp
= *p
) ; p
= &tmp
->next
) {
166 if (size
+ addr
< (unsigned long) tmp
->addr
)
168 addr
= tmp
->size
+ (unsigned long) tmp
->addr
;
169 if (addr
> VMALLOC_END
-size
) {
175 area
->addr
= (void *)addr
;
176 area
->size
= size
+ PAGE_SIZE
;
182 void vfree(void * addr
)
184 struct vm_struct
**p
, *tmp
;
188 if ((PAGE_SIZE
-1) & (unsigned long) addr
) {
189 printk(KERN_ERR
"Trying to vfree() bad address (%p)\n", addr
);
192 for (p
= &vmlist
; (tmp
= *p
) ; p
= &tmp
->next
) {
193 if (tmp
->addr
== addr
) {
195 vmfree_area_pages(VMALLOC_VMADDR(tmp
->addr
), tmp
->size
);
200 printk(KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n", addr
);
203 void * vmalloc(unsigned long size
)
206 struct vm_struct
*area
;
208 size
= PAGE_ALIGN(size
);
209 if (!size
|| (size
>> PAGE_SHIFT
) > max_mapnr
) {
213 area
= get_vm_area(size
, VM_ALLOC
);
219 if (vmalloc_area_pages(VMALLOC_VMADDR(addr
), size
)) {
227 long vread(char *buf
, char *addr
, unsigned long count
)
229 struct vm_struct
*tmp
;
230 char *vaddr
, *buf_start
= buf
;
233 /* Don't allow overflow */
234 if ((unsigned long) addr
+ count
< count
)
235 count
= -(unsigned long) addr
;
237 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
238 vaddr
= (char *) tmp
->addr
;
239 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
241 while (addr
< vaddr
) {
249 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
253 put_user(*addr
, buf
);
260 return buf
- buf_start
;