2 ** Copyright 2001, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
5 #include <kernel/kernel.h>
7 #include <kernel/lock.h>
8 #include <kernel/heap.h>
9 #include <kernel/debug.h>
11 #include <kernel/arch/cpu.h>
13 #include <boot/stage2.h>
18 #define PARANOID_KFREE 1
26 // ripped mostly from nujeffos
29 unsigned short bin_index
: 5;
30 unsigned short free_count
: 9;
31 unsigned short cleaning
: 1;
32 unsigned short in_use
: 1;
35 static struct heap_page
*heap_alloc_table
;
36 static addr_t heap_base_ptr
;
37 static addr_t heap_base
;
38 static addr_t heap_size
;
41 unsigned int element_size
;
42 unsigned int grow_size
;
43 unsigned int alloc_count
;
45 unsigned int free_count
;
47 unsigned int raw_count
;
49 static struct heap_bin bins
[] = {
50 {16, PAGE_SIZE
, 0, 0, 0, 0, 0},
51 {32, PAGE_SIZE
, 0, 0, 0, 0, 0},
52 {64, PAGE_SIZE
, 0, 0, 0, 0, 0},
53 {128, PAGE_SIZE
, 0, 0, 0, 0, 0},
54 {256, PAGE_SIZE
, 0, 0, 0, 0, 0},
55 {512, PAGE_SIZE
, 0, 0, 0, 0, 0},
56 {1024, PAGE_SIZE
, 0, 0, 0, 0, 0},
57 {2048, PAGE_SIZE
, 0, 0, 0, 0, 0},
58 {0x1000, 0x1000, 0, 0, 0, 0, 0},
59 {0x2000, 0x2000, 0, 0, 0, 0, 0},
60 {0x3000, 0x3000, 0, 0, 0, 0, 0},
61 {0x4000, 0x4000, 0, 0, 0, 0, 0},
62 {0x5000, 0x5000, 0, 0, 0, 0, 0},
63 {0x6000, 0x6000, 0, 0, 0, 0, 0},
64 {0x7000, 0x7000, 0, 0, 0, 0, 0},
65 {0x8000, 0x8000, 0, 0, 0, 0, 0},
66 {0x9000, 0x9000, 0, 0, 0, 0, 0},
67 {0xa000, 0xa000, 0, 0, 0, 0, 0},
68 {0xb000, 0xb000, 0, 0, 0, 0, 0},
69 {0xc000, 0xc000, 0, 0, 0, 0, 0},
70 {0xd000, 0xd000, 0, 0, 0, 0, 0},
71 {0xe000, 0xe000, 0, 0, 0, 0, 0},
72 {0xf000, 0xf000, 0, 0, 0, 0, 0},
73 {0x10000, 0x10000, 0, 0, 0, 0, 0} // 64k
76 static const int bin_count
= sizeof(bins
) / sizeof(struct heap_bin
);
77 static mutex heap_lock
;
79 static void dump_bin(int bin_index
)
81 struct heap_bin
*bin
= &bins
[bin_index
];
84 dprintf("%d:\tesize %d\tgrow_size %d\talloc_count %d\tfree_count %d\traw_count %d\traw_list %p\n",
85 bin_index
, bin
->element_size
, bin
->grow_size
, bin
->alloc_count
, bin
->free_count
, bin
->raw_count
, bin
->raw_list
);
86 dprintf("free_list: ");
87 for(temp
= bin
->free_list
; temp
!= NULL
; temp
= (unsigned int *)*temp
) {
93 static void dump_bin_list(int argc
, char **argv
)
97 dprintf("%d heap bins at %p:\n", bin_count
, bins
);
99 for(i
=0; i
<bin_count
; i
++) {
104 // called from vm_init. The heap should already be mapped in at this point, we just
105 // do a little housekeeping to set up the data structure.
106 int heap_init(addr_t new_heap_base
, unsigned int new_heap_size
)
108 const unsigned int page_entries
= PAGE_SIZE
/ sizeof(struct heap_page
);
109 // set some global pointers
110 heap_alloc_table
= (struct heap_page
*)new_heap_base
;
111 //heap_size = ((uint64)new_heap_size * page_entries / (page_entries + 1)) & ~(PAGE_SIZE-1);
112 heap_size
= new_heap_size
- PAGE_SIZE
; // use above line instead if new_heap_size > sqr(PAGE_SIZE)/2
113 heap_base
= (unsigned int)heap_alloc_table
+ PAGE_ALIGN(heap_size
/ page_entries
);
114 heap_base_ptr
= heap_base
;
115 dprintf("heap_alloc_table = %p, heap_base = 0x%lx, heap_size = 0x%lx\n", heap_alloc_table
, heap_base
, heap_size
);
117 // zero out the heap alloc table at the base of the heap
118 memset((void *)heap_alloc_table
, 0, (heap_size
/ PAGE_SIZE
) * sizeof(struct heap_page
));
120 // pre-init the mutex to at least fall through any semaphore calls
122 heap_lock
.holder
= -1;
124 // set up some debug commands
125 dbg_add_command(&dump_bin_list
, "heap_bindump", "dump stats about bin usage");
130 int heap_init_postsem(kernel_args
*ka
)
132 if(mutex_init(&heap_lock
, "heap_mutex") < 0) {
133 panic("error creating heap mutex\n");
138 static char *raw_alloc(unsigned int size
, int bin_index
)
140 unsigned int new_heap_ptr
;
142 struct heap_page
*page
;
145 new_heap_ptr
= heap_base_ptr
+ PAGE_ALIGN(size
);
146 if(new_heap_ptr
> heap_base
+ heap_size
) {
147 panic("heap overgrew itself!\n");
150 for(addr
= heap_base_ptr
; addr
< new_heap_ptr
; addr
+= PAGE_SIZE
) {
151 page
= &heap_alloc_table
[(addr
- heap_base
) / PAGE_SIZE
];
154 page
->bin_index
= bin_index
;
155 if (bin_index
< bin_count
&& bins
[bin_index
].element_size
< PAGE_SIZE
)
156 page
->free_count
= PAGE_SIZE
/ bins
[bin_index
].element_size
;
158 page
->free_count
= 1;
161 retval
= (char *)heap_base_ptr
;
162 heap_base_ptr
= new_heap_ptr
;
166 void *kmalloc(unsigned int size
)
168 void *address
= NULL
;
171 struct heap_page
*page
;
174 dprintf("kmalloc: asked to allocate size %d\n", size
);
177 mutex_lock(&heap_lock
);
179 for (bin_index
= 0; bin_index
< bin_count
; bin_index
++)
180 if (size
<= bins
[bin_index
].element_size
)
183 if (bin_index
== bin_count
) {
184 // XXX fix the raw alloc later.
185 //address = raw_alloc(size, bin_index);
186 panic("kmalloc: asked to allocate too much for now!\n");
189 if (bins
[bin_index
].free_list
!= NULL
) {
190 address
= bins
[bin_index
].free_list
;
191 bins
[bin_index
].free_list
= (void *)(*(unsigned int *)bins
[bin_index
].free_list
);
192 bins
[bin_index
].free_count
--;
194 if (bins
[bin_index
].raw_count
== 0) {
195 bins
[bin_index
].raw_list
= raw_alloc(bins
[bin_index
].grow_size
, bin_index
);
196 bins
[bin_index
].raw_count
= bins
[bin_index
].grow_size
/ bins
[bin_index
].element_size
;
199 bins
[bin_index
].raw_count
--;
200 address
= bins
[bin_index
].raw_list
;
201 bins
[bin_index
].raw_list
+= bins
[bin_index
].element_size
;
204 bins
[bin_index
].alloc_count
++;
205 page
= &heap_alloc_table
[((unsigned int)address
- heap_base
) / PAGE_SIZE
];
206 page
[0].free_count
--;
208 dprintf("kmalloc0: page 0x%x: bin_index %d, free_count %d\n", page
, page
->bin_index
, page
->free_count
);
210 for(i
= 1; i
< bins
[bin_index
].element_size
/ PAGE_SIZE
; i
++) {
211 page
[i
].free_count
--;
213 dprintf("kmalloc1: page 0x%x: bin_index %d, free_count %d\n", page
[i
], page
[i
].bin_index
, page
[i
].free_count
);
219 mutex_unlock(&heap_lock
);
222 dprintf("kmalloc: asked to allocate size %d, returning ptr = %p\n", size
, address
);
227 void kfree(void *address
)
229 struct heap_page
*page
;
230 struct heap_bin
*bin
;
236 if ((addr_t
)address
< heap_base
|| (addr_t
)address
>= (heap_base
+ heap_size
))
237 panic("kfree: asked to free invalid address %p\n", address
);
239 mutex_lock(&heap_lock
);
242 dprintf("kfree: asked to free at ptr = %p\n", address
);
245 page
= &heap_alloc_table
[((unsigned)address
- heap_base
) / PAGE_SIZE
];
248 dprintf("kfree: page 0x%x: bin_index %d, free_count %d\n", page
, page
->bin_index
, page
->free_count
);
251 if(page
[0].bin_index
>= bin_count
)
252 panic("kfree: page %p: invalid bin_index %d\n", page
, page
->bin_index
);
254 bin
= &bins
[page
[0].bin_index
];
256 if(bin
->element_size
<= PAGE_SIZE
&& (addr_t
)address
% bin
->element_size
!= 0)
257 panic("kfree: passed invalid pointer %p! Supposed to be in bin for esize 0x%x\n", address
, bin
->element_size
);
259 for(i
= 0; i
< bin
->element_size
/ PAGE_SIZE
; i
++) {
260 if(page
[i
].bin_index
!= page
[0].bin_index
)
261 panic("kfree: not all pages in allocation match bin_index\n");
262 page
[i
].free_count
++;
266 // walk the free list on this bin to make sure this address doesn't exist already
269 for(temp
= bin
->free_list
; temp
!= NULL
; temp
= (unsigned int *)*temp
) {
270 if(temp
== (unsigned int *)address
) {
271 panic("kfree: address %p already exists in bin free list\n", address
);
277 memset(address
, 0x99, bin
->element_size
);
280 *(unsigned int *)address
= (unsigned int)bin
->free_list
;
281 bin
->free_list
= address
;
285 mutex_unlock(&heap_lock
);
288 char *kstrdup(const char *text
)
290 char *buf
= (char *)kmalloc(strlen(text
) + 1);