Moved PAGE_SIZE to kernel/archinf.h
[marionette.git] / kernel / mm / allocation.c
blobb1f8b02806b8741bd1cb9c310f292997a5aa4833
1 /*
2 * Copyright (c) 2008 Joshua Phillips. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
15 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
21 * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
23 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
25 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "mm/allocation.h"
29 #include "mm/buddy.h"
30 #include "stdlib.h" // for malloc!
31 #include "trace.h"
32 #include "extlib.h"
33 #include "archinf.h"
35 // Boundaries of contiguous free memory.
36 // This is far from ideal.
37 static uintptr_t freemem_low_bound;
38 static uintptr_t freemem_high_bound;
40 // Physical buddy allocator
41 static struct buddy *physical_buddy = NULL;
43 // Kernel virtual page allocator + buddy
44 struct vpallocator kvpa;
46 // Base of physical memory that we're allocating
47 static uintptr_t phys_base;
49 void pmem_init_set_freemem_base(uintptr_t freemem_base)
51 freemem_low_bound = alignup(freemem_base, PAGE_SIZE);
54 void pmem_init_mark_free(uintptr_t region_start, uintptr_t region_end)
56 // Don't use non-contiguous parts of memory. This might throw away
57 // large chunks of physical memory that we'll never use!
58 // It's easier this way for now. At least print a warning if any
59 // gets wasted.
60 if (region_start <= freemem_low_bound){
61 freemem_high_bound = region_end;
62 } else {
63 TRACE("Throwing away %d MiB of memory!",
64 uldivru(region_end - region_start, 1024 * 1024));
68 size_t pmem_get_size(void)
70 return freemem_high_bound - freemem_low_bound;
73 void mm_allocation_init(void)
75 size_t n_chunks;
76 int height;
77 size_t struct_size, old_struct_size;
78 uintptr_t new_freemem_low_bound;
80 // create the virtual buddy allocator (for kernel address space)
82 n_chunks = (0 - KERNEL_VIRT_BASE) / PAGE_SIZE;
84 // calculate the size the buddy structures will take
85 get_buddy_size(n_chunks, &height, &struct_size);
86 TRACE("Buddy data for kernel's virtual addr space: %u kiB",
87 uldivru(struct_size, 1024));
88 // TODO: make sure struct_size doesn't exceed the amount of memory mapped
89 // after the kernel image at 0xC0000000
91 // Here, we can convert a physical address to a virtual address just by
92 // adding. This only works during bootstrapping, due to the simple page
93 // mapping!
94 kvpa.buddy = (struct buddy *) ((freemem_low_bound - KERNEL_PHYS_BASE) + KERNEL_VIRT_BASE);
96 // initialize the buddy structure
97 buddy_init(kvpa.buddy, n_chunks, height, struct_size);
98 kvpa.base = KERNEL_VIRT_BASE;
100 // Now that we've set up the virtual buddy structure, there's
101 // even less free memory.
102 struct_size = alignup(struct_size, PAGE_SIZE);
103 new_freemem_low_bound = freemem_low_bound + struct_size;
105 // Create the physical buddy, to manage all normal physical memory,
106 // with a granularity of one page.
108 old_struct_size = 0;
109 for (;;){
110 // how many pages (allocator 'chunks') do we need to manage?
111 n_chunks = ((freemem_high_bound - new_freemem_low_bound)
112 - alignup(old_struct_size, PAGE_SIZE)) / PAGE_SIZE;
113 // calculate amount of data needed for buddy allocator
114 get_buddy_size(n_chunks, &height, &struct_size);
115 if (struct_size != old_struct_size){
116 // We don't need to manage the memory management data,
117 // so we'll calculate the size again, and again, until
118 // we settle on an optimum size. This only actually takes
119 // about two iterations. There could also be a more
120 // linear calculation? :)
121 old_struct_size = struct_size;
122 } else {
123 // the struct_size hasn't changed. Stop iterating.
124 break;
128 TRACE("Buddy data for %u MiB physical addr space: %u kiB",
129 uldivru(freemem_high_bound - freemem_low_bound, 1024 * 1024),
130 uldivru(struct_size, 1024));
131 // TODO: make sure struct_size doesn't exceed the amount of memory mapped
132 // after the kernel image at 0xC0000000
133 physical_buddy = (struct buddy *) ((new_freemem_low_bound - KERNEL_PHYS_BASE)
134 + KERNEL_VIRT_BASE);
135 phys_base = new_freemem_low_bound + alignup(struct_size, PAGE_SIZE);
136 buddy_init(physical_buddy, n_chunks, height, struct_size);
137 TRACE("Buddy allocators initialized.");
139 // Mark the kernel as used kernel virtual address space (because it is)
140 buddy_mark_used(kvpa.buddy, 0, uldivru(phys_base - KERNEL_PHYS_BASE, PAGE_SIZE));
143 size_t ppalloc(size_t min_pages, size_t max_pages, uintptr_t *start_addr)
145 unsigned int result;
146 size_t allocated_pages;
147 result = buddy_alloc(physical_buddy, min_pages, max_pages, &allocated_pages);
148 if (result == -1){
149 return 0;
150 } else {
151 *start_addr = phys_base + (result * PAGE_SIZE);
152 return allocated_pages;
156 void ppfree(uintptr_t start_addr, size_t n_pages)
158 buddy_free(physical_buddy, (start_addr - phys_base) / PAGE_SIZE, n_pages);
161 int vpcreate(struct vpallocator *restrict vp, uintptr_t base, size_t extent)
163 int height;
164 size_t struct_size;
165 size_t n_chunks;
167 vp->base = base & ~(PAGE_SIZE - 1);
169 // calculate the size the buddy structure will take
170 get_buddy_size(extent, &height, &struct_size);
172 // allocate the buddy structure, using malloc
173 // since we're using malloc, we can't use vpcreate
174 // for the kernel virtual address space ^_^
175 vp->buddy = malloc(struct_size);
176 if (!vp->buddy){
177 TRACE("memory full (struct_size = %d)", struct_size);
178 return 1;
181 // initialize the buddy structure
182 buddy_init(vp->buddy, n_chunks, height, struct_size);
184 // thank you, caller. Enjoy your allocator!
185 return 0;
188 size_t vpalloc(struct vpallocator *restrict vp, size_t min_pages, size_t max_pages, uintptr_t *start_addr)
190 unsigned int result;
191 size_t allocated_pages;
192 result = buddy_alloc(vp->buddy, min_pages, max_pages, &allocated_pages);
193 if (result == -1){
194 return 0;
195 } else {
196 *start_addr = vp->base + (result * PAGE_SIZE);
197 return allocated_pages;
201 void vpfree(struct vpallocator *restrict vp, uintptr_t start_addr, size_t n_pages)
203 buddy_free(vp->buddy, (start_addr - vp->base) / PAGE_SIZE, n_pages);