2 * This file is part of the coreboot project.
4 * Copyright 2014 Google Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 * ======================== stage_entry.S =====================================
18 * This file acts as an entry point to the different stages of arm64 as well as
19 * for the secure monitor. They share the same process of setting up stacks and
20 * jumping to c code. It is important to save x25 from corruption as it contains
21 * the argument for secure monitor.
22 * =============================================================================
27 #include <arch/lib_helpers.h>
28 #include <arch/startup.h>
30 #define STACK_SZ CONFIG_STACK_SIZE
31 #define EXCEPTION_STACK_SZ CONFIG_STACK_SIZE
34 * The stacks for each of the armv8 cores grows down from _estack. It is sized
35 * according to MAX_CPUS. Additionally provide exception stacks for each CPU.
37 .section .bss, "aw", @nobits
39 .global _arm64_startup_data
42 .space NUM_ELEMENTS*PER_ELEMENT_SIZE_BYTES
48 .space CONFIG_MAX_CPUS*STACK_SZ
51 .global _stack_exceptions
52 .global _estack_exceptions
53 .balign EXCEPTION_STACK_SZ
55 .space CONFIG_MAX_CPUS*EXCEPTION_STACK_SZ
67 ENDPROC(cpu_get_stack)
69 ENTRY(cpu_get_exception_stack)
70 mov x1, #EXCEPTION_STACK_SZ
77 .quad _estack_exceptions
78 ENDPROC(cpu_get_exception_stack)
81 * Boot strap the processor into a C environemnt. That consists of providing
82 * 16-byte aligned stack. The programming enviroment uses SP_EL0 as its main
83 * stack while keeping SP_ELx reserved for exception entry.
86 * IMPORTANT: Ensure x25 is not corrupted because it saves the argument to
89 ENTRY(arm64_c_environment)
90 bl smp_processor_id /* x0 = cpu */
94 /* Set the exception stack for this cpu. */
95 bl cpu_get_exception_stack
100 /* Have stack pointer use SP_EL0. */
104 /* Set stack for this cpu. */
105 mov x0, x24 /* x0 = cpu */
109 /* Get entry point by dereferencing c_entry. */
111 /* Retrieve entry in c_entry array using x26 as the index. */
112 adds x1, x1, x26, lsl #3
114 /* Move back the arguments from x25 to x0 */
120 ENDPROC(arm64_c_environment)
122 /* The first 2 instructions are for BSP and secondary CPUs,
123 * respectively. x26 holds the index into c_entry array. */
124 .macro split_bsp_path
137 /* Save the arguments to secmon in x25 */
139 b arm64_c_environment
143 * Setup SCTLR so that:
144 * Little endian mode is setup, XN is not enforced, MMU and caches are disabled.
145 * Alignment and stack alignment checks are disabled.
148 read_current x0, sctlr
149 bic x0, x0, #(1 << 25) /* Little Endian */
150 bic x0, x0, #(1 << 19) /* XN not enforced */
151 bic x0, x0, #(1 << 12) /* Disable Instruction Cache */
152 bic x0, x0, #0xf /* Clear SA, C, A and M */
153 write_current sctlr, x0, x1
157 * This macro assumes x2 has base address and returns value read in x0
158 * x1 is used as temporary register.
160 .macro get_element_addr index
161 add x1, x2, #(\index * PER_ELEMENT_SIZE_BYTES)
166 * Uses following registers:
167 * x0 = reading stored value
169 * x2 = base address of saved data region
171 .macro startup_restore
172 adr x2, _arm64_startup_data
174 get_element_addr MAIR_INDEX
175 write_current mair, x0, x1
177 get_element_addr TCR_INDEX
178 write_current tcr, x0, x1
180 get_element_addr TTBR0_INDEX
181 write_current ttbr0, x0, x1
183 get_element_addr SCR_INDEX
184 write_el3 scr, x0, x1
186 get_element_addr VBAR_INDEX
187 write_current vbar, x0, x1
189 get_element_addr CNTFRQ_INDEX
190 write_el0 cntfrq, x0, x1
192 get_element_addr CPTR_INDEX
193 write_el3 cptr, x0, x1
195 get_element_addr CPACR_INDEX
196 write_el1 cpacr, x0, x1
202 read_current x0, sctlr
203 orr x0, x0, #(1 << 12) /* Enable Instruction Cache */
204 orr x0, x0, #(1 << 2) /* Enable Data/Unified Cache */
205 orr x0, x0, #(1 << 0) /* Enable MMU */
206 write_current sctlr, x0, x1
212 CPU_RESET_ENTRY(arm64_cpu_startup)
214 bl arm64_cpu_early_setup
216 b arm64_c_environment
217 ENDPROC(arm64_cpu_startup)
219 CPU_RESET_ENTRY(arm64_cpu_startup_resume)
221 bl arm64_cpu_early_setup
224 b arm64_c_environment
225 ENDPROC(arm64_cpu_startup_resume)
228 * stage_entry is defined as a weak symbol to allow SoCs/CPUs to define a custom
229 * entry point to perform any fixups that need to be done immediately after
230 * power on reset. In case SoC/CPU does not need any custom-defined entrypoint,
231 * this weak symbol can be used to jump directly to arm64_cpu_startup.
233 ENTRY_WEAK(stage_entry)