4 * Copyright (C) 1993 Linus Torvalds
7 #include <linux/malloc.h>
8 #include <linux/vmalloc.h>
10 #include <asm/uaccess.h>
12 static struct vm_struct
* vmlist
= NULL
;
14 static inline void free_area_pte(pmd_t
* pmd
, unsigned long address
, unsigned long size
)
22 printk("free_area_pte: bad pmd (%08lx)\n", pmd_val(*pmd
));
26 pte
= pte_offset(pmd
, address
);
31 while (address
< end
) {
38 if (pte_present(page
)) {
39 free_page(pte_page(page
));
42 printk("Whee.. Swapped out page in kernel page table\n");
46 static inline void free_area_pmd(pgd_t
* dir
, unsigned long address
, unsigned long size
)
54 printk("free_area_pmd: bad pgd (%08lx)\n", pgd_val(*dir
));
58 pmd
= pmd_offset(dir
, address
);
59 address
&= ~PGDIR_MASK
;
63 while (address
< end
) {
64 free_area_pte(pmd
, address
, end
- address
);
65 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
70 void vmfree_area_pages(unsigned long address
, unsigned long size
)
73 unsigned long end
= address
+ size
;
75 dir
= pgd_offset_k(address
);
77 while (address
< end
) {
78 free_area_pmd(dir
, address
, end
- address
);
79 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
85 static inline int alloc_area_pte(pte_t
* pte
, unsigned long address
, unsigned long size
)
93 while (address
< end
) {
96 printk("alloc_area_pte: page already exists\n");
97 page
= __get_free_page(GFP_KERNEL
);
100 set_pte(pte
, mk_pte(page
, PAGE_KERNEL
));
101 address
+= PAGE_SIZE
;
107 static inline int alloc_area_pmd(pmd_t
* pmd
, unsigned long address
, unsigned long size
)
111 address
&= ~PGDIR_MASK
;
112 end
= address
+ size
;
113 if (end
> PGDIR_SIZE
)
115 while (address
< end
) {
116 pte_t
* pte
= pte_alloc_kernel(pmd
, address
);
119 if (alloc_area_pte(pte
, address
, end
- address
))
121 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
127 int vmalloc_area_pages(unsigned long address
, unsigned long size
)
130 unsigned long end
= address
+ size
;
132 dir
= pgd_offset_k(address
);
134 while (address
< end
) {
138 pmd
= pmd_alloc_kernel(dir
, address
);
141 if (alloc_area_pmd(pmd
, address
, end
- address
))
143 if (pgd_val(olddir
) != pgd_val(*dir
))
144 set_pgdir(address
, *dir
);
145 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
152 struct vm_struct
* get_vm_area(unsigned long size
)
155 struct vm_struct
**p
, *tmp
, *area
;
157 area
= (struct vm_struct
*) kmalloc(sizeof(*area
), GFP_KERNEL
);
160 addr
= VMALLOC_START
;
161 for (p
= &vmlist
; (tmp
= *p
) ; p
= &tmp
->next
) {
162 if (size
+ addr
< (unsigned long) tmp
->addr
)
164 addr
= tmp
->size
+ (unsigned long) tmp
->addr
;
165 if (addr
> VMALLOC_END
-size
) {
170 area
->addr
= (void *)addr
;
171 area
->size
= size
+ PAGE_SIZE
;
177 void vfree(void * addr
)
179 struct vm_struct
**p
, *tmp
;
183 if ((PAGE_SIZE
-1) & (unsigned long) addr
) {
184 printk("Trying to vfree() bad address (%p)\n", addr
);
187 for (p
= &vmlist
; (tmp
= *p
) ; p
= &tmp
->next
) {
188 if (tmp
->addr
== addr
) {
190 vmfree_area_pages(VMALLOC_VMADDR(tmp
->addr
), tmp
->size
);
195 printk("Trying to vfree() nonexistent vm area (%p)\n", addr
);
198 void * vmalloc(unsigned long size
)
201 struct vm_struct
*area
;
203 size
= PAGE_ALIGN(size
);
204 if (!size
|| size
> (max_mapnr
<< PAGE_SHIFT
))
206 area
= get_vm_area(size
);
210 if (vmalloc_area_pages(VMALLOC_VMADDR(addr
), size
)) {
217 long vread(char *buf
, char *addr
, unsigned long count
)
219 struct vm_struct
*tmp
;
220 char *vaddr
, *buf_start
= buf
;
223 /* Don't allow overflow */
224 if ((unsigned long) addr
+ count
< count
)
225 count
= -(unsigned long) addr
;
227 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
228 vaddr
= (char *) tmp
->addr
;
229 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
231 while (addr
< vaddr
) {
239 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
243 put_user(*addr
, buf
);
250 return buf
- buf_start
;