util/mmap-alloc: Factor out calculation of the pagesize for the guard page
[qemu/kevin.git] / target / arm / arm-powerctl.c
blobb75f813b403230a61c41c9f55b6574c74503c444
1 /*
2 * QEMU support -- ARM Power Control specific functions.
4 * Copyright (c) 2016 Jean-Christophe Dubois
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 */
11 #include "qemu/osdep.h"
12 #include "cpu.h"
13 #include "cpu-qom.h"
14 #include "internals.h"
15 #include "arm-powerctl.h"
16 #include "qemu/log.h"
17 #include "qemu/main-loop.h"
19 #ifndef DEBUG_ARM_POWERCTL
20 #define DEBUG_ARM_POWERCTL 0
21 #endif
23 #define DPRINTF(fmt, args...) \
24 do { \
25 if (DEBUG_ARM_POWERCTL) { \
26 fprintf(stderr, "[ARM]%s: " fmt , __func__, ##args); \
27 } \
28 } while (0)
30 CPUState *arm_get_cpu_by_id(uint64_t id)
32 CPUState *cpu;
34 DPRINTF("cpu %" PRId64 "\n", id);
36 CPU_FOREACH(cpu) {
37 ARMCPU *armcpu = ARM_CPU(cpu);
39 if (armcpu->mp_affinity == id) {
40 return cpu;
44 qemu_log_mask(LOG_GUEST_ERROR,
45 "[ARM]%s: Requesting unknown CPU %" PRId64 "\n",
46 __func__, id);
48 return NULL;
51 struct CpuOnInfo {
52 uint64_t entry;
53 uint64_t context_id;
54 uint32_t target_el;
55 bool target_aa64;
59 static void arm_set_cpu_on_async_work(CPUState *target_cpu_state,
60 run_on_cpu_data data)
62 ARMCPU *target_cpu = ARM_CPU(target_cpu_state);
63 struct CpuOnInfo *info = (struct CpuOnInfo *) data.host_ptr;
65 /* Initialize the cpu we are turning on */
66 cpu_reset(target_cpu_state);
67 target_cpu_state->halted = 0;
69 if (info->target_aa64) {
70 if ((info->target_el < 3) && arm_feature(&target_cpu->env,
71 ARM_FEATURE_EL3)) {
73 * As target mode is AArch64, we need to set lower
74 * exception level (the requested level 2) to AArch64
76 target_cpu->env.cp15.scr_el3 |= SCR_RW;
79 if ((info->target_el < 2) && arm_feature(&target_cpu->env,
80 ARM_FEATURE_EL2)) {
82 * As target mode is AArch64, we need to set lower
83 * exception level (the requested level 1) to AArch64
85 target_cpu->env.cp15.hcr_el2 |= HCR_RW;
88 target_cpu->env.pstate = aarch64_pstate_mode(info->target_el, true);
89 } else {
90 /* We are requested to boot in AArch32 mode */
91 static const uint32_t mode_for_el[] = { 0,
92 ARM_CPU_MODE_SVC,
93 ARM_CPU_MODE_HYP,
94 ARM_CPU_MODE_SVC };
96 cpsr_write(&target_cpu->env, mode_for_el[info->target_el], CPSR_M,
97 CPSRWriteRaw);
100 if (info->target_el == 3) {
101 /* Processor is in secure mode */
102 target_cpu->env.cp15.scr_el3 &= ~SCR_NS;
103 } else {
104 /* Processor is not in secure mode */
105 target_cpu->env.cp15.scr_el3 |= SCR_NS;
107 /* Set NSACR.{CP11,CP10} so NS can access the FPU */
108 target_cpu->env.cp15.nsacr |= 3 << 10;
111 * If QEMU is providing the equivalent of EL3 firmware, then we need
112 * to make sure a CPU targeting EL2 comes out of reset with a
113 * functional HVC insn.
115 if (arm_feature(&target_cpu->env, ARM_FEATURE_EL3)
116 && info->target_el == 2) {
117 target_cpu->env.cp15.scr_el3 |= SCR_HCE;
121 /* We check if the started CPU is now at the correct level */
122 assert(info->target_el == arm_current_el(&target_cpu->env));
124 if (info->target_aa64) {
125 target_cpu->env.xregs[0] = info->context_id;
126 } else {
127 target_cpu->env.regs[0] = info->context_id;
130 /* CP15 update requires rebuilding hflags */
131 arm_rebuild_hflags(&target_cpu->env);
133 /* Start the new CPU at the requested address */
134 cpu_set_pc(target_cpu_state, info->entry);
136 g_free(info);
138 /* Finally set the power status */
139 assert(qemu_mutex_iothread_locked());
140 target_cpu->power_state = PSCI_ON;
143 int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id,
144 uint32_t target_el, bool target_aa64)
146 CPUState *target_cpu_state;
147 ARMCPU *target_cpu;
148 struct CpuOnInfo *info;
150 assert(qemu_mutex_iothread_locked());
152 DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64
153 "\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry,
154 context_id);
156 /* requested EL level need to be in the 1 to 3 range */
157 assert((target_el > 0) && (target_el < 4));
159 if (target_aa64 && (entry & 3)) {
161 * if we are booting in AArch64 mode then "entry" needs to be 4 bytes
162 * aligned.
164 return QEMU_ARM_POWERCTL_INVALID_PARAM;
167 /* Retrieve the cpu we are powering up */
168 target_cpu_state = arm_get_cpu_by_id(cpuid);
169 if (!target_cpu_state) {
170 /* The cpu was not found */
171 return QEMU_ARM_POWERCTL_INVALID_PARAM;
174 target_cpu = ARM_CPU(target_cpu_state);
175 if (target_cpu->power_state == PSCI_ON) {
176 qemu_log_mask(LOG_GUEST_ERROR,
177 "[ARM]%s: CPU %" PRId64 " is already on\n",
178 __func__, cpuid);
179 return QEMU_ARM_POWERCTL_ALREADY_ON;
183 * The newly brought CPU is requested to enter the exception level
184 * "target_el" and be in the requested mode (AArch64 or AArch32).
187 if (((target_el == 3) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL3)) ||
188 ((target_el == 2) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL2))) {
190 * The CPU does not support requested level
192 return QEMU_ARM_POWERCTL_INVALID_PARAM;
195 if (!target_aa64 && arm_feature(&target_cpu->env, ARM_FEATURE_AARCH64)) {
197 * For now we don't support booting an AArch64 CPU in AArch32 mode
198 * TODO: We should add this support later
200 qemu_log_mask(LOG_UNIMP,
201 "[ARM]%s: Starting AArch64 CPU %" PRId64
202 " in AArch32 mode is not supported yet\n",
203 __func__, cpuid);
204 return QEMU_ARM_POWERCTL_INVALID_PARAM;
208 * If another CPU has powered the target on we are in the state
209 * ON_PENDING and additional attempts to power on the CPU should
210 * fail (see 6.6 Implementation CPU_ON/CPU_OFF races in the PSCI
211 * spec)
213 if (target_cpu->power_state == PSCI_ON_PENDING) {
214 qemu_log_mask(LOG_GUEST_ERROR,
215 "[ARM]%s: CPU %" PRId64 " is already powering on\n",
216 __func__, cpuid);
217 return QEMU_ARM_POWERCTL_ON_PENDING;
220 /* To avoid racing with a CPU we are just kicking off we do the
221 * final bit of preparation for the work in the target CPUs
222 * context.
224 info = g_new(struct CpuOnInfo, 1);
225 info->entry = entry;
226 info->context_id = context_id;
227 info->target_el = target_el;
228 info->target_aa64 = target_aa64;
230 async_run_on_cpu(target_cpu_state, arm_set_cpu_on_async_work,
231 RUN_ON_CPU_HOST_PTR(info));
233 /* We are good to go */
234 return QEMU_ARM_POWERCTL_RET_SUCCESS;
237 static void arm_set_cpu_on_and_reset_async_work(CPUState *target_cpu_state,
238 run_on_cpu_data data)
240 ARMCPU *target_cpu = ARM_CPU(target_cpu_state);
242 /* Initialize the cpu we are turning on */
243 cpu_reset(target_cpu_state);
244 target_cpu_state->halted = 0;
246 /* Finally set the power status */
247 assert(qemu_mutex_iothread_locked());
248 target_cpu->power_state = PSCI_ON;
251 int arm_set_cpu_on_and_reset(uint64_t cpuid)
253 CPUState *target_cpu_state;
254 ARMCPU *target_cpu;
256 assert(qemu_mutex_iothread_locked());
258 /* Retrieve the cpu we are powering up */
259 target_cpu_state = arm_get_cpu_by_id(cpuid);
260 if (!target_cpu_state) {
261 /* The cpu was not found */
262 return QEMU_ARM_POWERCTL_INVALID_PARAM;
265 target_cpu = ARM_CPU(target_cpu_state);
266 if (target_cpu->power_state == PSCI_ON) {
267 qemu_log_mask(LOG_GUEST_ERROR,
268 "[ARM]%s: CPU %" PRId64 " is already on\n",
269 __func__, cpuid);
270 return QEMU_ARM_POWERCTL_ALREADY_ON;
274 * If another CPU has powered the target on we are in the state
275 * ON_PENDING and additional attempts to power on the CPU should
276 * fail (see 6.6 Implementation CPU_ON/CPU_OFF races in the PSCI
277 * spec)
279 if (target_cpu->power_state == PSCI_ON_PENDING) {
280 qemu_log_mask(LOG_GUEST_ERROR,
281 "[ARM]%s: CPU %" PRId64 " is already powering on\n",
282 __func__, cpuid);
283 return QEMU_ARM_POWERCTL_ON_PENDING;
286 async_run_on_cpu(target_cpu_state, arm_set_cpu_on_and_reset_async_work,
287 RUN_ON_CPU_NULL);
289 /* We are good to go */
290 return QEMU_ARM_POWERCTL_RET_SUCCESS;
293 static void arm_set_cpu_off_async_work(CPUState *target_cpu_state,
294 run_on_cpu_data data)
296 ARMCPU *target_cpu = ARM_CPU(target_cpu_state);
298 assert(qemu_mutex_iothread_locked());
299 target_cpu->power_state = PSCI_OFF;
300 target_cpu_state->halted = 1;
301 target_cpu_state->exception_index = EXCP_HLT;
304 int arm_set_cpu_off(uint64_t cpuid)
306 CPUState *target_cpu_state;
307 ARMCPU *target_cpu;
309 assert(qemu_mutex_iothread_locked());
311 DPRINTF("cpu %" PRId64 "\n", cpuid);
313 /* change to the cpu we are powering up */
314 target_cpu_state = arm_get_cpu_by_id(cpuid);
315 if (!target_cpu_state) {
316 return QEMU_ARM_POWERCTL_INVALID_PARAM;
318 target_cpu = ARM_CPU(target_cpu_state);
319 if (target_cpu->power_state == PSCI_OFF) {
320 qemu_log_mask(LOG_GUEST_ERROR,
321 "[ARM]%s: CPU %" PRId64 " is already off\n",
322 __func__, cpuid);
323 return QEMU_ARM_POWERCTL_IS_OFF;
326 /* Queue work to run under the target vCPUs context */
327 async_run_on_cpu(target_cpu_state, arm_set_cpu_off_async_work,
328 RUN_ON_CPU_NULL);
330 return QEMU_ARM_POWERCTL_RET_SUCCESS;
333 static void arm_reset_cpu_async_work(CPUState *target_cpu_state,
334 run_on_cpu_data data)
336 /* Reset the cpu */
337 cpu_reset(target_cpu_state);
340 int arm_reset_cpu(uint64_t cpuid)
342 CPUState *target_cpu_state;
343 ARMCPU *target_cpu;
345 assert(qemu_mutex_iothread_locked());
347 DPRINTF("cpu %" PRId64 "\n", cpuid);
349 /* change to the cpu we are resetting */
350 target_cpu_state = arm_get_cpu_by_id(cpuid);
351 if (!target_cpu_state) {
352 return QEMU_ARM_POWERCTL_INVALID_PARAM;
354 target_cpu = ARM_CPU(target_cpu_state);
356 if (target_cpu->power_state == PSCI_OFF) {
357 qemu_log_mask(LOG_GUEST_ERROR,
358 "[ARM]%s: CPU %" PRId64 " is off\n",
359 __func__, cpuid);
360 return QEMU_ARM_POWERCTL_IS_OFF;
363 /* Queue work to run under the target vCPUs context */
364 async_run_on_cpu(target_cpu_state, arm_reset_cpu_async_work,
365 RUN_ON_CPU_NULL);
367 return QEMU_ARM_POWERCTL_RET_SUCCESS;