arm64: remove printk() before console_init()
[coreboot.git] / src / arch / arm64 / armv8 / mmu.c
blob2c1555c808171c6f002ea7c7151549c9fddb54ba
1 /*
2 * This file is part of the coreboot project.
4 * Copyright 2014 Google Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
30 #include <stdlib.h>
31 #include <stdint.h>
32 #include <string.h>
34 #include <memrange.h>
35 #include <arch/mmu.h>
36 #include <arch/lib_helpers.h>
37 #include <arch/cache.h>
39 /* Maximum number of XLAT Tables available based on ttb buffer size */
40 static unsigned int max_tables;
41 /* Address of ttb buffer */
42 static uint64_t *xlat_addr;
44 static const uint64_t level_to_addr_mask[] = {
45 L1_ADDR_MASK,
46 L2_ADDR_MASK,
47 L3_ADDR_MASK,
50 static const uint64_t level_to_addr_shift[] = {
51 L1_ADDR_SHIFT,
52 L2_ADDR_SHIFT,
53 L3_ADDR_SHIFT,
56 /* Func : get_block_attr
57 * Desc : Get block descriptor attributes based on the value of tag in memrange
58 * region
60 static uint64_t get_block_attr(unsigned long tag)
62 uint64_t attr;
64 attr = (tag & MA_NS)? BLOCK_NS : 0;
65 attr |= (tag & MA_RO)? BLOCK_AP_RO : BLOCK_AP_RW;
66 attr |= BLOCK_ACCESS;
68 if (tag & MA_MEM) {
69 if (tag & MA_MEM_NC)
70 attr |= BLOCK_INDEX_MEM_NORMAL_NC << BLOCK_INDEX_SHIFT;
71 else
72 attr |= BLOCK_INDEX_MEM_NORMAL << BLOCK_INDEX_SHIFT;
73 } else {
74 attr |= BLOCK_INDEX_MEM_DEV_NGNRNE << BLOCK_INDEX_SHIFT;
78 return attr;
81 /* Func : get_index_from_addr
82 * Desc : Get index into table at a given level using appropriate bits from the
83 * base address
85 static uint64_t get_index_from_addr(uint64_t addr, uint8_t level)
87 uint64_t mask = level_to_addr_mask[level-1];
88 uint8_t shift = level_to_addr_shift[level-1];
90 return ((addr & mask) >> shift);
93 /* Func : table_desc_valid
94 * Desc : Check if a table entry contains valid desc
96 static uint64_t table_desc_valid(uint64_t desc)
98 return((desc & TABLE_DESC) == TABLE_DESC);
101 /* Func : get_new_table
102 * Desc : Return the next free XLAT table from ttb buffer
104 static uint64_t *get_new_table(void)
106 static int free_idx = 1;
107 uint64_t *new;
109 if (free_idx >= max_tables) {
110 return NULL;
113 new = (uint64_t*)((unsigned char *)xlat_addr + free_idx * GRANULE_SIZE);
114 free_idx++;
116 memset(new, 0, GRANULE_SIZE);
118 return new;
121 /* Func : get_table_from_desc
122 * Desc : Get next level table address from table descriptor
124 static uint64_t *get_table_from_desc(uint64_t desc)
126 uint64_t *ptr = (uint64_t*)(desc & XLAT_TABLE_MASK);
127 return ptr;
130 /* Func: get_next_level_table
131 * Desc: Check if the table entry is a valid descriptor. If not, allocate new
132 * table, update the entry and return the table addr. If valid, return the addr
134 static uint64_t *get_next_level_table(uint64_t *ptr)
136 uint64_t desc = *ptr;
138 if (!table_desc_valid(desc)) {
139 uint64_t *new_table = get_new_table();
140 if (new_table == NULL)
141 return NULL;
142 desc = ((uint64_t)new_table) | TABLE_DESC;
143 *ptr = desc;
145 return get_table_from_desc(desc);
148 /* Func : init_xlat_table
149 * Desc : Given a base address and size, it identifies the indices within
150 * different level XLAT tables which map the given base addr. Similar to table
151 * walk, except that all invalid entries during the walk are updated
152 * accordingly. On success, it returns the size of the block/page addressed by
153 * the final table
155 static uint64_t init_xlat_table(uint64_t base_addr,
156 uint64_t size,
157 uint64_t tag)
159 uint64_t l1_index = get_index_from_addr(base_addr,1);
160 uint64_t l2_index = get_index_from_addr(base_addr,2);
161 uint64_t l3_index = get_index_from_addr(base_addr,3);
162 uint64_t *table = xlat_addr;
163 uint64_t desc;
164 uint64_t attr = get_block_attr(tag);
166 /* L1 table lookup */
167 /* If VA has bits more than 41, lookup starts at L1 */
168 if (l1_index) {
169 table = get_next_level_table(&table[l1_index]);
170 if (!table)
171 return 0;
174 /* L2 table lookup */
175 /* If lookup was performed at L1, L2 table addr is obtained from L1 desc
176 else, lookup starts at ttbr address */
177 if (!l3_index && (size >= L2_XLAT_SIZE)) {
178 /* If block address is aligned and size is greater than or equal
179 to 512MiB i.e. size addressed by each L2 entry, we can
180 directly store a block desc */
181 desc = base_addr | BLOCK_DESC | attr;
182 table[l2_index] = desc;
183 /* L3 lookup is not required */
184 return L2_XLAT_SIZE;
185 } else {
186 /* L2 entry stores a table descriptor */
187 table = get_next_level_table(&table[l2_index]);
188 if (!table)
189 return 0;
192 /* L3 table lookup */
193 desc = base_addr | PAGE_DESC | attr;
194 table[l3_index] = desc;
195 return L3_XLAT_SIZE;
198 /* Func : sanity_check
199 * Desc : Check if the address is aligned and size is atleast the granule size
201 static uint64_t sanity_check(uint64_t addr,
202 uint64_t size)
204 /* Address should be atleast 64 KiB aligned */
205 if (addr & GRANULE_SIZE_MASK)
206 return 1;
208 /* Size should be atleast granule size */
209 if (size < GRANULE_SIZE)
210 return 1;
212 return 0;
215 /* Func : init_mmap_entry
216 * Desc : For each mmap entry, this function calls init_xlat_table with the base
217 * address. Based on size returned from init_xlat_table, base_addr is updated
218 * and subsequent calls are made for initializing the xlat table until the whole
219 * region is initialized.
221 static void init_mmap_entry(struct range_entry *r)
223 uint64_t base_addr = range_entry_base(r);
224 uint64_t size = range_entry_size(r);
225 uint64_t tag = range_entry_tag(r);
226 uint64_t temp_size = size;
228 while (temp_size) {
229 uint64_t ret;
231 if (sanity_check(base_addr,temp_size)) {
232 return;
235 ret = init_xlat_table(base_addr + (size - temp_size),
236 temp_size,tag);
238 if (ret == 0)
239 return;
241 temp_size -= ret;
245 /* Func : mmu_init
246 * Desc : Initialize mmu based on the mmap_ranges passed. ttb_buffer is used as
247 * the base address for xlat tables. ttb_size defines the max number of tables
248 * that can be used
250 void mmu_init(struct memranges *mmap_ranges,
251 uint64_t *ttb_buffer,
252 uint64_t ttb_size)
254 struct range_entry *mmap_entry;
256 if (sanity_check((uint64_t)ttb_buffer, ttb_size)) {
257 return;
260 memset((void*)ttb_buffer, 0, GRANULE_SIZE);
261 max_tables = (ttb_size >> GRANULE_SIZE_SHIFT);
262 xlat_addr = ttb_buffer;
264 memranges_each_entry(mmap_entry, mmap_ranges) {
265 init_mmap_entry(mmap_entry);
269 void mmu_enable(void)
271 uint32_t sctlr;
273 /* Initialize MAIR indices */
274 raw_write_mair_el3(MAIR_ATTRIBUTES);
276 /* Invalidate TLBs */
277 tlbiall_el3();
279 /* Initialize TCR flags */
280 raw_write_tcr_el3(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC |
281 TCR_SH0_IS | TCR_TG0_64KB | TCR_PS_64GB |
282 TCR_TBI_USED);
284 /* Initialize TTBR */
285 raw_write_ttbr0_el3((uintptr_t)xlat_addr);
287 /* Ensure all translation table writes are committed before enabling MMU */
288 dsb();
289 isb();
291 /* Enable MMU */
292 sctlr = raw_read_sctlr_el3();
293 sctlr |= SCTLR_C | SCTLR_M | SCTLR_I;
294 raw_write_sctlr_el3(sctlr);
296 isb();