Omitted comma.
[helenos.git] / kernel / arch / sparc64 / src / mm / page.c
blob9b8fa5d2ced8c86ff2553e7c176adc0b91085ae8
1 /*
2 * Copyright (C) 2005 Jakub Jermar
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup sparc64mm
30 * @{
32 /** @file
35 #include <arch/mm/page.h>
36 #include <arch/mm/tlb.h>
37 #include <genarch/mm/page_ht.h>
38 #include <mm/frame.h>
39 #include <arch/mm/frame.h>
40 #include <bitops.h>
41 #include <debug.h>
42 #include <align.h>
43 #include <config.h>
45 #ifdef CONFIG_SMP
46 /** Entries locked in DTLB of BSP.
48 * Application processors need to have the same locked entries
49 * in their DTLBs as the bootstrap processor.
51 static struct {
52 uintptr_t virt_page;
53 uintptr_t phys_page;
54 int pagesize_code;
55 } bsp_locked_dtlb_entry[DTLB_ENTRY_COUNT];
57 /** Number of entries in bsp_locked_dtlb_entry array. */
58 static count_t bsp_locked_dtlb_entries = 0;
59 #endif /* CONFIG_SMP */
61 /** Perform sparc64 specific initialization of paging. */
62 void page_arch_init(void)
64 if (config.cpu_active == 1) {
65 page_mapping_operations = &ht_mapping_operations;
66 } else {
68 #ifdef CONFIG_SMP
69 int i;
72 * Copy locked DTLB entries from the BSP.
73 */
74 for (i = 0; i < bsp_locked_dtlb_entries; i++) {
75 dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page,
76 bsp_locked_dtlb_entry[i].phys_page, bsp_locked_dtlb_entry[i].pagesize_code,
77 true, false);
79 #endif
84 /** Map memory-mapped device into virtual memory.
86 * So far, only DTLB is used to map devices into memory.
87 * Chances are that there will be only a limited amount of
88 * devices that the kernel itself needs to lock in DTLB.
90 * @param physaddr Physical address of the page where the
91 * device is located. Must be at least
92 * page-aligned.
93 * @param size Size of the device's registers. Must not
94 * exceed 4M and must include extra space
95 * caused by the alignment.
97 * @return Virtual address of the page where the device is
98 * mapped.
100 uintptr_t hw_map(uintptr_t physaddr, size_t size)
102 unsigned int order;
103 int i;
105 ASSERT(config.cpu_active == 1);
107 struct {
108 int pagesize_code;
109 size_t increment;
110 count_t count;
111 } sizemap[] = {
112 { PAGESIZE_8K, 0, 1 }, /* 8K */
113 { PAGESIZE_8K, PAGE_SIZE, 2 }, /* 16K */
114 { PAGESIZE_8K, PAGE_SIZE, 4 }, /* 32K */
115 { PAGESIZE_64K, 0, 1}, /* 64K */
116 { PAGESIZE_64K, 8*PAGE_SIZE, 2 }, /* 128K */
117 { PAGESIZE_64K, 8*PAGE_SIZE, 4 }, /* 256K */
118 { PAGESIZE_512K, 0, 1 }, /* 512K */
119 { PAGESIZE_512K, 64*PAGE_SIZE, 2 }, /* 1M */
120 { PAGESIZE_512K, 64*PAGE_SIZE, 4 }, /* 2M */
121 { PAGESIZE_4M, 0, 1 }, /* 4M */
122 { PAGESIZE_4M, 512*PAGE_SIZE, 2 } /* 8M */
125 ASSERT(ALIGN_UP(physaddr, PAGE_SIZE) == physaddr);
126 ASSERT(size <= 8*1024*1024);
128 if (size <= FRAME_SIZE)
129 order = 0;
130 else
131 order = (fnzb64(size - 1) + 1) - FRAME_WIDTH;
134 * Use virtual addresses that are beyond the limit of physical memory.
135 * Thus, the physical address space will not be wasted by holes created
136 * by frame_alloc().
138 ASSERT(last_frame);
139 uintptr_t virtaddr = ALIGN_UP(last_frame, 1<<(order + FRAME_WIDTH));
140 last_frame = ALIGN_UP(virtaddr + size, 1<<(order + FRAME_WIDTH));
142 for (i = 0; i < sizemap[order].count; i++) {
144 * First, insert the mapping into DTLB.
146 dtlb_insert_mapping(virtaddr + i*sizemap[order].increment,
147 physaddr + i*sizemap[order].increment,
148 sizemap[order].pagesize_code, true, false);
150 #ifdef CONFIG_SMP
152 * Second, save the information about the mapping for APs.
154 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = virtaddr + i*sizemap[order].increment;
155 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = physaddr + i*sizemap[order].increment;
156 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = sizemap[order].pagesize_code;
157 bsp_locked_dtlb_entries++;
158 #endif
161 return virtaddr;
164 /** @}