Moved PAGE_SIZE to kernel/archinf.h
[marionette.git] / kernel / mm / kvmalloc.c
bloba8af2b6926b759011254e48d11370c7926a525b0
1 /*
2 * Copyright (c) 2009 Joshua Phillips. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
15 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
21 * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
23 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
25 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * Allocates kernel virtual memory, and physical memory, in units of pages,
30 * and performs a mapping. (Like malloc, but for pages)
33 #include "mm/kvmalloc.h"
34 #include "mm/allocation.h"
35 #include "mm/paging.h"
36 #include "archinf.h"
38 size_t kvmalloc(size_t min_pages, size_t max_pages,
39 void **ptr_out, uintptr_t *phys_array)
41 // TODO: call alloc_pages repeatedly to try to get up to max_pages
42 uintptr_t phys_addr, virt_addr;
43 size_t n_phys, n_virt;
44 unsigned int i;
46 // allocate physical pages
47 n_phys = ppalloc(min_pages, max_pages, &phys_addr);
48 if (n_phys == 0){
49 // failed
50 return 0;
52 // allocate virtual pages to match
53 n_virt = vpalloc(&kvpa, min_pages, n_phys, &virt_addr);
54 if (n_virt == 0){
55 // failed
56 ppfree(phys_addr, n_phys);
57 return 0;
58 } else if (n_virt < n_phys){
59 // free some of our physical pages
60 // XXX: this assumes ppfree won't mind freeing half an
61 // allocated region. For my buddy allocator, this is true.
62 ppfree(phys_addr + n_virt * PAGE_SIZE, n_phys - n_virt);
63 n_phys = n_virt;
65 // store the physical addresses
66 if (phys_array){
67 for (i=0; i<n_phys; i++){
68 phys_array[i] = phys_addr + i * PAGE_SIZE;
71 // perform the mapping
72 if (map_mem(NULL, phys_addr, virt_addr, n_phys, PTE_PRESENT | PTE_WRITABLE) != 0){
73 // oh dear - couldn't even map :(
74 ppfree(phys_addr, n_phys);
75 vpfree(&kvpa, virt_addr, n_virt);
76 return 0;
78 *ptr_out = (void *) virt_addr;
79 return n_phys;
82 void kvfree(void *ptr, size_t n_allocated)
84 // free a physical page at a time, getting the address
85 // out the page table
86 // XXX: this assumes free_pages won't mind freeing half an
87 // allocated region. For my buddy allocator, this is true.
88 uintptr_t virt_addr = (uintptr_t) ptr;
89 uintptr_t phys_addr;
90 size_t i;
92 for (i=0; i<n_allocated; i++){
93 // fetch physical address and free it
94 phys_addr = xlat_v2p(NULL, virt_addr);
95 ppfree(phys_addr, 1);
96 // next page
97 virt_addr += PAGE_SIZE;
99 // now we can perform an unmap
100 map_mem(NULL, 0, (uintptr_t) ptr, n_allocated, 0);
101 // and finally, free the virtual pages
102 vpfree(&kvpa, (uintptr_t) ptr, n_allocated);