2 * Copyright (c) 2008 Joshua Phillips. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
15 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
21 * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
23 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
25 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "mm/allocation.h"
30 #include "mm/paging.h" // for PAGE_TAB
31 #include "stdlib.h" // for malloc!
35 // Boundaries of contiguous free memory.
36 // This is far from ideal.
37 static uintptr_t freemem_low_bound
;
38 static uintptr_t freemem_high_bound
;
40 // Physical buddy allocator
41 static struct buddy
*physical_buddy
= NULL
;
43 // Kernel virtual page allocator + buddy
44 struct vpallocator kvpa
;
46 // Base of physical memory that we're allocating
47 static uintptr_t phys_base
;
49 void pmem_init_set_freemem_base(uintptr_t freemem_base
)
51 freemem_low_bound
= alignup(freemem_base
, PAGE_SIZE
);
54 void pmem_init_mark_free(uintptr_t region_start
, uintptr_t region_end
)
56 // Don't use non-contiguous parts of memory. This might throw away
57 // large chunks of physical memory that we'll never use!
58 // It's easier this way for now. At least print a warning if any
60 if (region_start
<= freemem_low_bound
){
61 freemem_high_bound
= region_end
;
63 TRACE("Throwing away %d MiB of memory!",
64 uldivru(region_end
- region_start
, 1024 * 1024));
68 size_t pmem_get_size(void)
70 return freemem_high_bound
- freemem_low_bound
;
73 void mm_allocation_init(void)
77 size_t struct_size
, old_struct_size
;
78 uintptr_t new_freemem_low_bound
;
80 // create the virtual buddy allocator (for kernel address space)
82 n_chunks
= (0 - KERNEL_VIRT_BASE
) / PAGE_SIZE
;
84 // calculate the size the buddy structures will take
85 get_buddy_size(n_chunks
, &height
, &struct_size
);
86 TRACE("Buddy data for kernel's virtual addr space: %u kiB",
87 uldivru(struct_size
, 1024));
88 // TODO: make sure struct_size doesn't exceed the amount of memory mapped
89 // after the kernel image at 0xC0000000
91 // Here, we can convert a physical address to a virtual address just by
92 // adding. This only works during bootstrapping, due to the simple page
94 kvpa
.buddy
= (struct buddy
*) ((freemem_low_bound
- KERNEL_PHYS_BASE
) + KERNEL_VIRT_BASE
);
96 // initialize the buddy structure
97 buddy_init(kvpa
.buddy
, n_chunks
, height
, struct_size
);
98 kvpa
.base
= KERNEL_VIRT_BASE
;
100 // Now that we've set up the virtual buddy structure, there's
101 // even less free memory.
102 struct_size
= alignup(struct_size
, PAGE_SIZE
);
103 new_freemem_low_bound
= freemem_low_bound
+ struct_size
;
105 // Create the physical buddy, to manage all normal physical memory,
106 // with a granularity of one page.
110 // how many pages (allocator 'chunks') do we need to manage?
111 n_chunks
= ((freemem_high_bound
- new_freemem_low_bound
)
112 - alignup(old_struct_size
, PAGE_SIZE
)) / PAGE_SIZE
;
113 // calculate amount of data needed for buddy allocator
114 get_buddy_size(n_chunks
, &height
, &struct_size
);
115 if (struct_size
!= old_struct_size
){
116 // We don't need to manage the memory management data,
117 // so we'll calculate the size again, and again, until
118 // we settle on an optimum size. This only actually takes
119 // about two iterations. There could also be a more
120 // linear calculation? :)
121 old_struct_size
= struct_size
;
123 // the struct_size hasn't changed. Stop iterating.
128 TRACE("Buddy data for %u MiB physical addr space: %u kiB",
129 uldivru(freemem_high_bound
- freemem_low_bound
, 1024 * 1024),
130 uldivru(struct_size
, 1024));
131 // TODO: make sure struct_size doesn't exceed the amount of memory mapped
132 // after the kernel image at 0xC0000000
133 physical_buddy
= (struct buddy
*) ((new_freemem_low_bound
- KERNEL_PHYS_BASE
)
135 phys_base
= new_freemem_low_bound
+ alignup(struct_size
, PAGE_SIZE
);
136 buddy_init(physical_buddy
, n_chunks
, height
, struct_size
);
137 TRACE("Buddy allocators initialized.");
139 // Mark the kernel as used kernel virtual address space (because it is)
140 buddy_mark_used(kvpa
.buddy
, 0, uldivru(phys_base
- KERNEL_PHYS_BASE
, PAGE_SIZE
));
143 size_t ppalloc(size_t min_pages
, size_t max_pages
, uintptr_t *start_addr
)
146 size_t allocated_pages
;
147 result
= buddy_alloc(physical_buddy
, min_pages
, max_pages
, &allocated_pages
);
151 *start_addr
= phys_base
+ (result
* PAGE_SIZE
);
152 return allocated_pages
;
156 void ppfree(uintptr_t start_addr
, size_t n_pages
)
158 buddy_free(physical_buddy
, (start_addr
- phys_base
) / PAGE_SIZE
, n_pages
);
161 int vpcreate(struct vpallocator
*restrict vp
, uintptr_t base
, size_t extent
)
167 vp
->base
= base
& ~(PAGE_SIZE
- 1);
169 // calculate the size the buddy structure will take
170 get_buddy_size(extent
, &height
, &struct_size
);
172 // allocate the buddy structure, using malloc
173 // since we're using malloc, we can't use vpcreate
174 // for the kernel virtual address space ^_^
175 vp
->buddy
= malloc(struct_size
);
177 TRACE("memory full (struct_size = %d)", struct_size
);
181 // initialize the buddy structure
182 buddy_init(vp
->buddy
, n_chunks
, height
, struct_size
);
184 // thank you, caller. Enjoy your allocator!
188 size_t vpalloc(struct vpallocator
*restrict vp
, size_t min_pages
, size_t max_pages
, uintptr_t *start_addr
)
191 size_t allocated_pages
;
192 result
= buddy_alloc(vp
->buddy
, min_pages
, max_pages
, &allocated_pages
);
196 *start_addr
= vp
->base
+ (result
* PAGE_SIZE
);
197 return allocated_pages
;
201 void vpfree(struct vpallocator
*restrict vp
, uintptr_t start_addr
, size_t n_pages
)
203 buddy_free(vp
->buddy
, (start_addr
- vp
->base
) / PAGE_SIZE
, n_pages
);