ppc: Add a POWER10 DD2 CPU
[qemu.git] / hw / intc / armv7m_nvic.c
blob1e7ddcb94cbe081ff06ae37714ff4bba48ff7ac0
1 /*
2 * ARM Nested Vectored Interrupt Controller
4 * Copyright (c) 2006-2007 CodeSourcery.
5 * Written by Paul Brook
7 * This code is licensed under the GPL.
9 * The ARMv7M System controller is fairly tightly tied in with the
10 * NVIC. Much of that is also implemented here.
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "hw/sysbus.h"
16 #include "migration/vmstate.h"
17 #include "qemu/timer.h"
18 #include "hw/intc/armv7m_nvic.h"
19 #include "hw/irq.h"
20 #include "hw/qdev-properties.h"
21 #include "sysemu/runstate.h"
22 #include "target/arm/cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/memop.h"
25 #include "qemu/log.h"
26 #include "qemu/module.h"
27 #include "trace.h"
29 /* IRQ number counting:
31 * the num-irq property counts the number of external IRQ lines
33 * NVICState::num_irq counts the total number of exceptions
34 * (external IRQs, the 15 internal exceptions including reset,
35 * and one for the unused exception number 0).
37 * NVIC_MAX_IRQ is the highest permitted number of external IRQ lines.
39 * NVIC_MAX_VECTORS is the highest permitted number of exceptions.
41 * Iterating through all exceptions should typically be done with
42 * for (i = 1; i < s->num_irq; i++) to avoid the unused slot 0.
44 * The external qemu_irq lines are the NVIC's external IRQ lines,
45 * so line 0 is exception 16.
47 * In the terminology of the architecture manual, "interrupts" are
48 * a subcategory of exception referring to the external interrupts
49 * (which are exception numbers NVIC_FIRST_IRQ and upward).
50 * For historical reasons QEMU tends to use "interrupt" and
51 * "exception" more or less interchangeably.
53 #define NVIC_FIRST_IRQ NVIC_INTERNAL_VECTORS
54 #define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ)
56 /* Effective running priority of the CPU when no exception is active
57 * (higher than the highest possible priority value)
59 #define NVIC_NOEXC_PRIO 0x100
60 /* Maximum priority of non-secure exceptions when AIRCR.PRIS is set */
61 #define NVIC_NS_PRIO_LIMIT 0x80
63 static const uint8_t nvic_id[] = {
64 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1
67 static void signal_sysresetreq(NVICState *s)
69 if (qemu_irq_is_connected(s->sysresetreq)) {
70 qemu_irq_pulse(s->sysresetreq);
71 } else {
73 * Default behaviour if the SoC doesn't need to wire up
74 * SYSRESETREQ (eg to a system reset controller of some kind):
75 * perform a system reset via the usual QEMU API.
77 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
81 static int nvic_pending_prio(NVICState *s)
83 /* return the group priority of the current pending interrupt,
84 * or NVIC_NOEXC_PRIO if no interrupt is pending
86 return s->vectpending_prio;
89 /* Return the value of the ISCR RETTOBASE bit:
90 * 1 if there is exactly one active exception
91 * 0 if there is more than one active exception
92 * UNKNOWN if there are no active exceptions (we choose 1,
93 * which matches the choice Cortex-M3 is documented as making).
95 * NB: some versions of the documentation talk about this
96 * counting "active exceptions other than the one shown by IPSR";
97 * this is only different in the obscure corner case where guest
98 * code has manually deactivated an exception and is about
99 * to fail an exception-return integrity check. The definition
100 * above is the one from the v8M ARM ARM and is also in line
101 * with the behaviour documented for the Cortex-M3.
103 static bool nvic_rettobase(NVICState *s)
105 int irq, nhand = 0;
106 bool check_sec = arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
108 for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) {
109 if (s->vectors[irq].active ||
110 (check_sec && irq < NVIC_INTERNAL_VECTORS &&
111 s->sec_vectors[irq].active)) {
112 nhand++;
113 if (nhand == 2) {
114 return 0;
119 return 1;
122 /* Return the value of the ISCR ISRPENDING bit:
123 * 1 if an external interrupt is pending
124 * 0 if no external interrupt is pending
126 static bool nvic_isrpending(NVICState *s)
128 int irq;
131 * We can shortcut if the highest priority pending interrupt
132 * happens to be external; if not we need to check the whole
133 * vectors[] array.
135 if (s->vectpending > NVIC_FIRST_IRQ) {
136 return true;
139 for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) {
140 if (s->vectors[irq].pending) {
141 return true;
144 return false;
147 static bool exc_is_banked(int exc)
149 /* Return true if this is one of the limited set of exceptions which
150 * are banked (and thus have state in sec_vectors[])
152 return exc == ARMV7M_EXCP_HARD ||
153 exc == ARMV7M_EXCP_MEM ||
154 exc == ARMV7M_EXCP_USAGE ||
155 exc == ARMV7M_EXCP_SVC ||
156 exc == ARMV7M_EXCP_PENDSV ||
157 exc == ARMV7M_EXCP_SYSTICK;
160 /* Return a mask word which clears the subpriority bits from
161 * a priority value for an M-profile exception, leaving only
162 * the group priority.
164 static inline uint32_t nvic_gprio_mask(NVICState *s, bool secure)
166 return ~0U << (s->prigroup[secure] + 1);
169 static bool exc_targets_secure(NVICState *s, int exc)
171 /* Return true if this non-banked exception targets Secure state. */
172 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
173 return false;
176 if (exc >= NVIC_FIRST_IRQ) {
177 return !s->itns[exc];
180 /* Function shouldn't be called for banked exceptions. */
181 assert(!exc_is_banked(exc));
183 switch (exc) {
184 case ARMV7M_EXCP_NMI:
185 case ARMV7M_EXCP_BUS:
186 return !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
187 case ARMV7M_EXCP_SECURE:
188 return true;
189 case ARMV7M_EXCP_DEBUG:
190 /* TODO: controlled by DEMCR.SDME, which we don't yet implement */
191 return false;
192 default:
193 /* reset, and reserved (unused) low exception numbers.
194 * We'll get called by code that loops through all the exception
195 * numbers, but it doesn't matter what we return here as these
196 * non-existent exceptions will never be pended or active.
198 return true;
202 static int exc_group_prio(NVICState *s, int rawprio, bool targets_secure)
204 /* Return the group priority for this exception, given its raw
205 * (group-and-subgroup) priority value and whether it is targeting
206 * secure state or not.
208 if (rawprio < 0) {
209 return rawprio;
211 rawprio &= nvic_gprio_mask(s, targets_secure);
212 /* AIRCR.PRIS causes us to squash all NS priorities into the
213 * lower half of the total range
215 if (!targets_secure &&
216 (s->cpu->env.v7m.aircr & R_V7M_AIRCR_PRIS_MASK)) {
217 rawprio = (rawprio >> 1) + NVIC_NS_PRIO_LIMIT;
219 return rawprio;
222 /* Recompute vectpending and exception_prio for a CPU which implements
223 * the Security extension
225 static void nvic_recompute_state_secure(NVICState *s)
227 int i, bank;
228 int pend_prio = NVIC_NOEXC_PRIO;
229 int active_prio = NVIC_NOEXC_PRIO;
230 int pend_irq = 0;
231 bool pending_is_s_banked = false;
232 int pend_subprio = 0;
234 /* R_CQRV: precedence is by:
235 * - lowest group priority; if both the same then
236 * - lowest subpriority; if both the same then
237 * - lowest exception number; if both the same (ie banked) then
238 * - secure exception takes precedence
239 * Compare pseudocode RawExecutionPriority.
240 * Annoyingly, now we have two prigroup values (for S and NS)
241 * we can't do the loop comparison on raw priority values.
243 for (i = 1; i < s->num_irq; i++) {
244 for (bank = M_REG_S; bank >= M_REG_NS; bank--) {
245 VecInfo *vec;
246 int prio, subprio;
247 bool targets_secure;
249 if (bank == M_REG_S) {
250 if (!exc_is_banked(i)) {
251 continue;
253 vec = &s->sec_vectors[i];
254 targets_secure = true;
255 } else {
256 vec = &s->vectors[i];
257 targets_secure = !exc_is_banked(i) && exc_targets_secure(s, i);
260 prio = exc_group_prio(s, vec->prio, targets_secure);
261 subprio = vec->prio & ~nvic_gprio_mask(s, targets_secure);
262 if (vec->enabled && vec->pending &&
263 ((prio < pend_prio) ||
264 (prio == pend_prio && prio >= 0 && subprio < pend_subprio))) {
265 pend_prio = prio;
266 pend_subprio = subprio;
267 pend_irq = i;
268 pending_is_s_banked = (bank == M_REG_S);
270 if (vec->active && prio < active_prio) {
271 active_prio = prio;
276 s->vectpending_is_s_banked = pending_is_s_banked;
277 s->vectpending = pend_irq;
278 s->vectpending_prio = pend_prio;
279 s->exception_prio = active_prio;
281 trace_nvic_recompute_state_secure(s->vectpending,
282 s->vectpending_is_s_banked,
283 s->vectpending_prio,
284 s->exception_prio);
287 /* Recompute vectpending and exception_prio */
288 static void nvic_recompute_state(NVICState *s)
290 int i;
291 int pend_prio = NVIC_NOEXC_PRIO;
292 int active_prio = NVIC_NOEXC_PRIO;
293 int pend_irq = 0;
295 /* In theory we could write one function that handled both
296 * the "security extension present" and "not present"; however
297 * the security related changes significantly complicate the
298 * recomputation just by themselves and mixing both cases together
299 * would be even worse, so we retain a separate non-secure-only
300 * version for CPUs which don't implement the security extension.
302 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
303 nvic_recompute_state_secure(s);
304 return;
307 for (i = 1; i < s->num_irq; i++) {
308 VecInfo *vec = &s->vectors[i];
310 if (vec->enabled && vec->pending && vec->prio < pend_prio) {
311 pend_prio = vec->prio;
312 pend_irq = i;
314 if (vec->active && vec->prio < active_prio) {
315 active_prio = vec->prio;
319 if (active_prio > 0) {
320 active_prio &= nvic_gprio_mask(s, false);
323 if (pend_prio > 0) {
324 pend_prio &= nvic_gprio_mask(s, false);
327 s->vectpending = pend_irq;
328 s->vectpending_prio = pend_prio;
329 s->exception_prio = active_prio;
331 trace_nvic_recompute_state(s->vectpending,
332 s->vectpending_prio,
333 s->exception_prio);
336 /* Return the current execution priority of the CPU
337 * (equivalent to the pseudocode ExecutionPriority function).
338 * This is a value between -2 (NMI priority) and NVIC_NOEXC_PRIO.
340 static inline int nvic_exec_prio(NVICState *s)
342 CPUARMState *env = &s->cpu->env;
343 int running = NVIC_NOEXC_PRIO;
345 if (env->v7m.basepri[M_REG_NS] > 0) {
346 running = exc_group_prio(s, env->v7m.basepri[M_REG_NS], M_REG_NS);
349 if (env->v7m.basepri[M_REG_S] > 0) {
350 int basepri = exc_group_prio(s, env->v7m.basepri[M_REG_S], M_REG_S);
351 if (running > basepri) {
352 running = basepri;
356 if (env->v7m.primask[M_REG_NS]) {
357 if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
358 if (running > NVIC_NS_PRIO_LIMIT) {
359 running = NVIC_NS_PRIO_LIMIT;
361 } else {
362 running = 0;
366 if (env->v7m.primask[M_REG_S]) {
367 running = 0;
370 if (env->v7m.faultmask[M_REG_NS]) {
371 if (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
372 running = -1;
373 } else {
374 if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
375 if (running > NVIC_NS_PRIO_LIMIT) {
376 running = NVIC_NS_PRIO_LIMIT;
378 } else {
379 running = 0;
384 if (env->v7m.faultmask[M_REG_S]) {
385 running = (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) ? -3 : -1;
388 /* consider priority of active handler */
389 return MIN(running, s->exception_prio);
392 bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure)
394 /* Return true if the requested execution priority is negative
395 * for the specified security state, ie that security state
396 * has an active NMI or HardFault or has set its FAULTMASK.
397 * Note that this is not the same as whether the execution
398 * priority is actually negative (for instance AIRCR.PRIS may
399 * mean we don't allow FAULTMASK_NS to actually make the execution
400 * priority negative). Compare pseudocode IsReqExcPriNeg().
402 NVICState *s = opaque;
404 if (s->cpu->env.v7m.faultmask[secure]) {
405 return true;
408 if (secure ? s->sec_vectors[ARMV7M_EXCP_HARD].active :
409 s->vectors[ARMV7M_EXCP_HARD].active) {
410 return true;
413 if (s->vectors[ARMV7M_EXCP_NMI].active &&
414 exc_targets_secure(s, ARMV7M_EXCP_NMI) == secure) {
415 return true;
418 return false;
421 bool armv7m_nvic_can_take_pending_exception(void *opaque)
423 NVICState *s = opaque;
425 return nvic_exec_prio(s) > nvic_pending_prio(s);
428 int armv7m_nvic_raw_execution_priority(void *opaque)
430 NVICState *s = opaque;
432 return s->exception_prio;
435 /* caller must call nvic_irq_update() after this.
436 * secure indicates the bank to use for banked exceptions (we assert if
437 * we are passed secure=true for a non-banked exception).
439 static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio)
441 assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
442 assert(irq < s->num_irq);
444 prio &= MAKE_64BIT_MASK(8 - s->num_prio_bits, s->num_prio_bits);
446 if (secure) {
447 assert(exc_is_banked(irq));
448 s->sec_vectors[irq].prio = prio;
449 } else {
450 s->vectors[irq].prio = prio;
453 trace_nvic_set_prio(irq, secure, prio);
456 /* Return the current raw priority register value.
457 * secure indicates the bank to use for banked exceptions (we assert if
458 * we are passed secure=true for a non-banked exception).
460 static int get_prio(NVICState *s, unsigned irq, bool secure)
462 assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
463 assert(irq < s->num_irq);
465 if (secure) {
466 assert(exc_is_banked(irq));
467 return s->sec_vectors[irq].prio;
468 } else {
469 return s->vectors[irq].prio;
473 /* Recompute state and assert irq line accordingly.
474 * Must be called after changes to:
475 * vec->active, vec->enabled, vec->pending or vec->prio for any vector
476 * prigroup
478 static void nvic_irq_update(NVICState *s)
480 int lvl;
481 int pend_prio;
483 nvic_recompute_state(s);
484 pend_prio = nvic_pending_prio(s);
486 /* Raise NVIC output if this IRQ would be taken, except that we
487 * ignore the effects of the BASEPRI, FAULTMASK and PRIMASK (which
488 * will be checked for in arm_v7m_cpu_exec_interrupt()); changes
489 * to those CPU registers don't cause us to recalculate the NVIC
490 * pending info.
492 lvl = (pend_prio < s->exception_prio);
493 trace_nvic_irq_update(s->vectpending, pend_prio, s->exception_prio, lvl);
494 qemu_set_irq(s->excpout, lvl);
498 * armv7m_nvic_clear_pending: mark the specified exception as not pending
499 * @opaque: the NVIC
500 * @irq: the exception number to mark as not pending
501 * @secure: false for non-banked exceptions or for the nonsecure
502 * version of a banked exception, true for the secure version of a banked
503 * exception.
505 * Marks the specified exception as not pending. Note that we will assert()
506 * if @secure is true and @irq does not specify one of the fixed set
507 * of architecturally banked exceptions.
509 static void armv7m_nvic_clear_pending(void *opaque, int irq, bool secure)
511 NVICState *s = (NVICState *)opaque;
512 VecInfo *vec;
514 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
516 if (secure) {
517 assert(exc_is_banked(irq));
518 vec = &s->sec_vectors[irq];
519 } else {
520 vec = &s->vectors[irq];
522 trace_nvic_clear_pending(irq, secure, vec->enabled, vec->prio);
523 if (vec->pending) {
524 vec->pending = 0;
525 nvic_irq_update(s);
529 static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure,
530 bool derived)
532 /* Pend an exception, including possibly escalating it to HardFault.
534 * This function handles both "normal" pending of interrupts and
535 * exceptions, and also derived exceptions (ones which occur as
536 * a result of trying to take some other exception).
538 * If derived == true, the caller guarantees that we are part way through
539 * trying to take an exception (but have not yet called
540 * armv7m_nvic_acknowledge_irq() to make it active), and so:
541 * - s->vectpending is the "original exception" we were trying to take
542 * - irq is the "derived exception"
543 * - nvic_exec_prio(s) gives the priority before exception entry
544 * Here we handle the prioritization logic which the pseudocode puts
545 * in the DerivedLateArrival() function.
548 NVICState *s = (NVICState *)opaque;
549 bool banked = exc_is_banked(irq);
550 VecInfo *vec;
551 bool targets_secure;
553 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
554 assert(!secure || banked);
556 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
558 targets_secure = banked ? secure : exc_targets_secure(s, irq);
560 trace_nvic_set_pending(irq, secure, targets_secure,
561 derived, vec->enabled, vec->prio);
563 if (derived) {
564 /* Derived exceptions are always synchronous. */
565 assert(irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV);
567 if (irq == ARMV7M_EXCP_DEBUG &&
568 exc_group_prio(s, vec->prio, secure) >= nvic_exec_prio(s)) {
569 /* DebugMonitorFault, but its priority is lower than the
570 * preempted exception priority: just ignore it.
572 return;
575 if (irq == ARMV7M_EXCP_HARD && vec->prio >= s->vectpending_prio) {
576 /* If this is a terminal exception (one which means we cannot
577 * take the original exception, like a failure to read its
578 * vector table entry), then we must take the derived exception.
579 * If the derived exception can't take priority over the
580 * original exception, then we go into Lockup.
582 * For QEMU, we rely on the fact that a derived exception is
583 * terminal if and only if it's reported to us as HardFault,
584 * which saves having to have an extra argument is_terminal
585 * that we'd only use in one place.
587 cpu_abort(&s->cpu->parent_obj,
588 "Lockup: can't take terminal derived exception "
589 "(original exception priority %d)\n",
590 s->vectpending_prio);
592 /* We now continue with the same code as for a normal pending
593 * exception, which will cause us to pend the derived exception.
594 * We'll then take either the original or the derived exception
595 * based on which is higher priority by the usual mechanism
596 * for selecting the highest priority pending interrupt.
600 if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) {
601 /* If a synchronous exception is pending then it may be
602 * escalated to HardFault if:
603 * * it is equal or lower priority to current execution
604 * * it is disabled
605 * (ie we need to take it immediately but we can't do so).
606 * Asynchronous exceptions (and interrupts) simply remain pending.
608 * For QEMU, we don't have any imprecise (asynchronous) faults,
609 * so we can assume that PREFETCH_ABORT and DATA_ABORT are always
610 * synchronous.
611 * Debug exceptions are awkward because only Debug exceptions
612 * resulting from the BKPT instruction should be escalated,
613 * but we don't currently implement any Debug exceptions other
614 * than those that result from BKPT, so we treat all debug exceptions
615 * as needing escalation.
617 * This all means we can identify whether to escalate based only on
618 * the exception number and don't (yet) need the caller to explicitly
619 * tell us whether this exception is synchronous or not.
621 int running = nvic_exec_prio(s);
622 bool escalate = false;
624 if (exc_group_prio(s, vec->prio, secure) >= running) {
625 trace_nvic_escalate_prio(irq, vec->prio, running);
626 escalate = true;
627 } else if (!vec->enabled) {
628 trace_nvic_escalate_disabled(irq);
629 escalate = true;
632 if (escalate) {
634 /* We need to escalate this exception to a synchronous HardFault.
635 * If BFHFNMINS is set then we escalate to the banked HF for
636 * the target security state of the original exception; otherwise
637 * we take a Secure HardFault.
639 irq = ARMV7M_EXCP_HARD;
640 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
641 (targets_secure ||
642 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
643 vec = &s->sec_vectors[irq];
644 } else {
645 vec = &s->vectors[irq];
647 if (running <= vec->prio) {
648 /* We want to escalate to HardFault but we can't take the
649 * synchronous HardFault at this point either. This is a
650 * Lockup condition due to a guest bug. We don't model
651 * Lockup, so report via cpu_abort() instead.
653 cpu_abort(&s->cpu->parent_obj,
654 "Lockup: can't escalate %d to HardFault "
655 "(current priority %d)\n", irq, running);
658 /* HF may be banked but there is only one shared HFSR */
659 s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
663 if (!vec->pending) {
664 vec->pending = 1;
665 nvic_irq_update(s);
669 void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
671 do_armv7m_nvic_set_pending(opaque, irq, secure, false);
674 void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure)
676 do_armv7m_nvic_set_pending(opaque, irq, secure, true);
679 void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure)
682 * Pend an exception during lazy FP stacking. This differs
683 * from the usual exception pending because the logic for
684 * whether we should escalate depends on the saved context
685 * in the FPCCR register, not on the current state of the CPU/NVIC.
687 NVICState *s = (NVICState *)opaque;
688 bool banked = exc_is_banked(irq);
689 VecInfo *vec;
690 bool targets_secure;
691 bool escalate = false;
693 * We will only look at bits in fpccr if this is a banked exception
694 * (in which case 'secure' tells us whether it is the S or NS version).
695 * All the bits for the non-banked exceptions are in fpccr_s.
697 uint32_t fpccr_s = s->cpu->env.v7m.fpccr[M_REG_S];
698 uint32_t fpccr = s->cpu->env.v7m.fpccr[secure];
700 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
701 assert(!secure || banked);
703 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
705 targets_secure = banked ? secure : exc_targets_secure(s, irq);
707 switch (irq) {
708 case ARMV7M_EXCP_DEBUG:
709 if (!(fpccr_s & R_V7M_FPCCR_MONRDY_MASK)) {
710 /* Ignore DebugMonitor exception */
711 return;
713 break;
714 case ARMV7M_EXCP_MEM:
715 escalate = !(fpccr & R_V7M_FPCCR_MMRDY_MASK);
716 break;
717 case ARMV7M_EXCP_USAGE:
718 escalate = !(fpccr & R_V7M_FPCCR_UFRDY_MASK);
719 break;
720 case ARMV7M_EXCP_BUS:
721 escalate = !(fpccr_s & R_V7M_FPCCR_BFRDY_MASK);
722 break;
723 case ARMV7M_EXCP_SECURE:
724 escalate = !(fpccr_s & R_V7M_FPCCR_SFRDY_MASK);
725 break;
726 default:
727 g_assert_not_reached();
730 if (escalate) {
732 * Escalate to HardFault: faults that initially targeted Secure
733 * continue to do so, even if HF normally targets NonSecure.
735 irq = ARMV7M_EXCP_HARD;
736 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
737 (targets_secure ||
738 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
739 vec = &s->sec_vectors[irq];
740 } else {
741 vec = &s->vectors[irq];
745 if (!vec->enabled ||
746 nvic_exec_prio(s) <= exc_group_prio(s, vec->prio, secure)) {
747 if (!(fpccr_s & R_V7M_FPCCR_HFRDY_MASK)) {
749 * We want to escalate to HardFault but the context the
750 * FP state belongs to prevents the exception pre-empting.
752 cpu_abort(&s->cpu->parent_obj,
753 "Lockup: can't escalate to HardFault during "
754 "lazy FP register stacking\n");
758 if (escalate) {
759 s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
761 if (!vec->pending) {
762 vec->pending = 1;
764 * We do not call nvic_irq_update(), because we know our caller
765 * is going to handle causing us to take the exception by
766 * raising EXCP_LAZYFP, so raising the IRQ line would be
767 * pointless extra work. We just need to recompute the
768 * priorities so that armv7m_nvic_can_take_pending_exception()
769 * returns the right answer.
771 nvic_recompute_state(s);
775 /* Make pending IRQ active. */
776 void armv7m_nvic_acknowledge_irq(void *opaque)
778 NVICState *s = (NVICState *)opaque;
779 CPUARMState *env = &s->cpu->env;
780 const int pending = s->vectpending;
781 const int running = nvic_exec_prio(s);
782 VecInfo *vec;
784 assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
786 if (s->vectpending_is_s_banked) {
787 vec = &s->sec_vectors[pending];
788 } else {
789 vec = &s->vectors[pending];
792 assert(vec->enabled);
793 assert(vec->pending);
795 assert(s->vectpending_prio < running);
797 trace_nvic_acknowledge_irq(pending, s->vectpending_prio);
799 vec->active = 1;
800 vec->pending = 0;
802 write_v7m_exception(env, s->vectpending);
804 nvic_irq_update(s);
807 static bool vectpending_targets_secure(NVICState *s)
809 /* Return true if s->vectpending targets Secure state */
810 if (s->vectpending_is_s_banked) {
811 return true;
813 return !exc_is_banked(s->vectpending) &&
814 exc_targets_secure(s, s->vectpending);
817 void armv7m_nvic_get_pending_irq_info(void *opaque,
818 int *pirq, bool *ptargets_secure)
820 NVICState *s = (NVICState *)opaque;
821 const int pending = s->vectpending;
822 bool targets_secure;
824 assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
826 targets_secure = vectpending_targets_secure(s);
828 trace_nvic_get_pending_irq_info(pending, targets_secure);
830 *ptargets_secure = targets_secure;
831 *pirq = pending;
834 int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
836 NVICState *s = (NVICState *)opaque;
837 VecInfo *vec = NULL;
838 int ret = 0;
840 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
842 trace_nvic_complete_irq(irq, secure);
844 if (secure && exc_is_banked(irq)) {
845 vec = &s->sec_vectors[irq];
846 } else {
847 vec = &s->vectors[irq];
851 * Identify illegal exception return cases. We can't immediately
852 * return at this point because we still need to deactivate
853 * (either this exception or NMI/HardFault) first.
855 if (!exc_is_banked(irq) && exc_targets_secure(s, irq) != secure) {
857 * Return from a configurable exception targeting the opposite
858 * security state from the one we're trying to complete it for.
859 * Clear vec because it's not really the VecInfo for this
860 * (irq, secstate) so we mustn't deactivate it.
862 ret = -1;
863 vec = NULL;
864 } else if (!vec->active) {
865 /* Return from an inactive interrupt */
866 ret = -1;
867 } else {
868 /* Legal return, we will return the RETTOBASE bit value to the caller */
869 ret = nvic_rettobase(s);
873 * For negative priorities, v8M will forcibly deactivate the appropriate
874 * NMI or HardFault regardless of what interrupt we're being asked to
875 * deactivate (compare the DeActivate() pseudocode). This is a guard
876 * against software returning from NMI or HardFault with a corrupted
877 * IPSR and leaving the CPU in a negative-priority state.
878 * v7M does not do this, but simply deactivates the requested interrupt.
880 if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
881 switch (armv7m_nvic_raw_execution_priority(s)) {
882 case -1:
883 if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
884 vec = &s->vectors[ARMV7M_EXCP_HARD];
885 } else {
886 vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
888 break;
889 case -2:
890 vec = &s->vectors[ARMV7M_EXCP_NMI];
891 break;
892 case -3:
893 vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
894 break;
895 default:
896 break;
900 if (!vec) {
901 return ret;
904 vec->active = 0;
905 if (vec->level) {
906 /* Re-pend the exception if it's still held high; only
907 * happens for extenal IRQs
909 assert(irq >= NVIC_FIRST_IRQ);
910 vec->pending = 1;
913 nvic_irq_update(s);
915 return ret;
918 bool armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure)
921 * Return whether an exception is "ready", i.e. it is enabled and is
922 * configured at a priority which would allow it to interrupt the
923 * current execution priority.
925 * irq and secure have the same semantics as for armv7m_nvic_set_pending():
926 * for non-banked exceptions secure is always false; for banked exceptions
927 * it indicates which of the exceptions is required.
929 NVICState *s = (NVICState *)opaque;
930 bool banked = exc_is_banked(irq);
931 VecInfo *vec;
932 int running = nvic_exec_prio(s);
934 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
935 assert(!secure || banked);
938 * HardFault is an odd special case: we always check against -1,
939 * even if we're secure and HardFault has priority -3; we never
940 * need to check for enabled state.
942 if (irq == ARMV7M_EXCP_HARD) {
943 return running > -1;
946 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
948 return vec->enabled &&
949 exc_group_prio(s, vec->prio, secure) < running;
952 /* callback when external interrupt line is changed */
953 static void set_irq_level(void *opaque, int n, int level)
955 NVICState *s = opaque;
956 VecInfo *vec;
958 n += NVIC_FIRST_IRQ;
960 assert(n >= NVIC_FIRST_IRQ && n < s->num_irq);
962 trace_nvic_set_irq_level(n, level);
964 /* The pending status of an external interrupt is
965 * latched on rising edge and exception handler return.
967 * Pulsing the IRQ will always run the handler
968 * once, and the handler will re-run until the
969 * level is low when the handler completes.
971 vec = &s->vectors[n];
972 if (level != vec->level) {
973 vec->level = level;
974 if (level) {
975 armv7m_nvic_set_pending(s, n, false);
980 /* callback when external NMI line is changed */
981 static void nvic_nmi_trigger(void *opaque, int n, int level)
983 NVICState *s = opaque;
985 trace_nvic_set_nmi_level(level);
988 * The architecture doesn't specify whether NMI should share
989 * the normal-interrupt behaviour of being resampled on
990 * exception handler return. We choose not to, so just
991 * set NMI pending here and don't track the current level.
993 if (level) {
994 armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
998 static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
1000 ARMCPU *cpu = s->cpu;
1001 uint32_t val;
1003 switch (offset) {
1004 case 4: /* Interrupt Control Type. */
1005 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1006 goto bad_offset;
1008 return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
1009 case 0xc: /* CPPWR */
1010 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1011 goto bad_offset;
1013 /* We make the IMPDEF choice that nothing can ever go into a
1014 * non-retentive power state, which allows us to RAZ/WI this.
1016 return 0;
1017 case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
1019 int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
1020 int i;
1022 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1023 goto bad_offset;
1025 if (!attrs.secure) {
1026 return 0;
1028 val = 0;
1029 for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
1030 if (s->itns[startvec + i]) {
1031 val |= (1 << i);
1034 return val;
1036 case 0xcfc:
1037 if (!arm_feature(&cpu->env, ARM_FEATURE_V8_1M)) {
1038 goto bad_offset;
1040 return cpu->revidr;
1041 case 0xd00: /* CPUID Base. */
1042 return cpu->midr;
1043 case 0xd04: /* Interrupt Control State (ICSR) */
1044 /* VECTACTIVE */
1045 val = cpu->env.v7m.exception;
1046 /* VECTPENDING */
1047 if (s->vectpending) {
1049 * From v8.1M VECTPENDING must read as 1 if accessed as
1050 * NonSecure and the highest priority pending and enabled
1051 * exception targets Secure.
1053 int vp = s->vectpending;
1054 if (!attrs.secure && arm_feature(&cpu->env, ARM_FEATURE_V8_1M) &&
1055 vectpending_targets_secure(s)) {
1056 vp = 1;
1058 val |= (vp & 0x1ff) << 12;
1060 /* ISRPENDING - set if any external IRQ is pending */
1061 if (nvic_isrpending(s)) {
1062 val |= (1 << 22);
1064 /* RETTOBASE - set if only one handler is active */
1065 if (nvic_rettobase(s)) {
1066 val |= (1 << 11);
1068 if (attrs.secure) {
1069 /* PENDSTSET */
1070 if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].pending) {
1071 val |= (1 << 26);
1073 /* PENDSVSET */
1074 if (s->sec_vectors[ARMV7M_EXCP_PENDSV].pending) {
1075 val |= (1 << 28);
1077 } else {
1078 /* PENDSTSET */
1079 if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) {
1080 val |= (1 << 26);
1082 /* PENDSVSET */
1083 if (s->vectors[ARMV7M_EXCP_PENDSV].pending) {
1084 val |= (1 << 28);
1087 /* NMIPENDSET */
1088 if ((attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))
1089 && s->vectors[ARMV7M_EXCP_NMI].pending) {
1090 val |= (1 << 31);
1092 /* ISRPREEMPT: RES0 when halting debug not implemented */
1093 /* STTNS: RES0 for the Main Extension */
1094 return val;
1095 case 0xd08: /* Vector Table Offset. */
1096 return cpu->env.v7m.vecbase[attrs.secure];
1097 case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
1098 val = 0xfa050000 | (s->prigroup[attrs.secure] << 8);
1099 if (attrs.secure) {
1100 /* s->aircr stores PRIS, BFHFNMINS, SYSRESETREQS */
1101 val |= cpu->env.v7m.aircr;
1102 } else {
1103 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1104 /* BFHFNMINS is R/O from NS; other bits are RAZ/WI. If
1105 * security isn't supported then BFHFNMINS is RAO (and
1106 * the bit in env.v7m.aircr is always set).
1108 val |= cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK;
1111 return val;
1112 case 0xd10: /* System Control. */
1113 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1114 goto bad_offset;
1116 return cpu->env.v7m.scr[attrs.secure];
1117 case 0xd14: /* Configuration Control. */
1119 * Non-banked bits: BFHFNMIGN (stored in the NS copy of the register)
1120 * and TRD (stored in the S copy of the register)
1122 val = cpu->env.v7m.ccr[attrs.secure];
1123 val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
1124 /* BFHFNMIGN is RAZ/WI from NS if AIRCR.BFHFNMINS is 0 */
1125 if (!attrs.secure) {
1126 if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1127 val &= ~R_V7M_CCR_BFHFNMIGN_MASK;
1130 return val;
1131 case 0xd24: /* System Handler Control and State (SHCSR) */
1132 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1133 goto bad_offset;
1135 val = 0;
1136 if (attrs.secure) {
1137 if (s->sec_vectors[ARMV7M_EXCP_MEM].active) {
1138 val |= (1 << 0);
1140 if (s->sec_vectors[ARMV7M_EXCP_HARD].active) {
1141 val |= (1 << 2);
1143 if (s->sec_vectors[ARMV7M_EXCP_USAGE].active) {
1144 val |= (1 << 3);
1146 if (s->sec_vectors[ARMV7M_EXCP_SVC].active) {
1147 val |= (1 << 7);
1149 if (s->sec_vectors[ARMV7M_EXCP_PENDSV].active) {
1150 val |= (1 << 10);
1152 if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].active) {
1153 val |= (1 << 11);
1155 if (s->sec_vectors[ARMV7M_EXCP_USAGE].pending) {
1156 val |= (1 << 12);
1158 if (s->sec_vectors[ARMV7M_EXCP_MEM].pending) {
1159 val |= (1 << 13);
1161 if (s->sec_vectors[ARMV7M_EXCP_SVC].pending) {
1162 val |= (1 << 15);
1164 if (s->sec_vectors[ARMV7M_EXCP_MEM].enabled) {
1165 val |= (1 << 16);
1167 if (s->sec_vectors[ARMV7M_EXCP_USAGE].enabled) {
1168 val |= (1 << 18);
1170 if (s->sec_vectors[ARMV7M_EXCP_HARD].pending) {
1171 val |= (1 << 21);
1173 /* SecureFault is not banked but is always RAZ/WI to NS */
1174 if (s->vectors[ARMV7M_EXCP_SECURE].active) {
1175 val |= (1 << 4);
1177 if (s->vectors[ARMV7M_EXCP_SECURE].enabled) {
1178 val |= (1 << 19);
1180 if (s->vectors[ARMV7M_EXCP_SECURE].pending) {
1181 val |= (1 << 20);
1183 } else {
1184 if (s->vectors[ARMV7M_EXCP_MEM].active) {
1185 val |= (1 << 0);
1187 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1188 /* HARDFAULTACT, HARDFAULTPENDED not present in v7M */
1189 if (s->vectors[ARMV7M_EXCP_HARD].active) {
1190 val |= (1 << 2);
1192 if (s->vectors[ARMV7M_EXCP_HARD].pending) {
1193 val |= (1 << 21);
1196 if (s->vectors[ARMV7M_EXCP_USAGE].active) {
1197 val |= (1 << 3);
1199 if (s->vectors[ARMV7M_EXCP_SVC].active) {
1200 val |= (1 << 7);
1202 if (s->vectors[ARMV7M_EXCP_PENDSV].active) {
1203 val |= (1 << 10);
1205 if (s->vectors[ARMV7M_EXCP_SYSTICK].active) {
1206 val |= (1 << 11);
1208 if (s->vectors[ARMV7M_EXCP_USAGE].pending) {
1209 val |= (1 << 12);
1211 if (s->vectors[ARMV7M_EXCP_MEM].pending) {
1212 val |= (1 << 13);
1214 if (s->vectors[ARMV7M_EXCP_SVC].pending) {
1215 val |= (1 << 15);
1217 if (s->vectors[ARMV7M_EXCP_MEM].enabled) {
1218 val |= (1 << 16);
1220 if (s->vectors[ARMV7M_EXCP_USAGE].enabled) {
1221 val |= (1 << 18);
1224 if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1225 if (s->vectors[ARMV7M_EXCP_BUS].active) {
1226 val |= (1 << 1);
1228 if (s->vectors[ARMV7M_EXCP_BUS].pending) {
1229 val |= (1 << 14);
1231 if (s->vectors[ARMV7M_EXCP_BUS].enabled) {
1232 val |= (1 << 17);
1234 if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
1235 s->vectors[ARMV7M_EXCP_NMI].active) {
1236 /* NMIACT is not present in v7M */
1237 val |= (1 << 5);
1241 /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
1242 if (s->vectors[ARMV7M_EXCP_DEBUG].active) {
1243 val |= (1 << 8);
1245 return val;
1246 case 0xd2c: /* Hard Fault Status. */
1247 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1248 goto bad_offset;
1250 return cpu->env.v7m.hfsr;
1251 case 0xd30: /* Debug Fault Status. */
1252 return cpu->env.v7m.dfsr;
1253 case 0xd34: /* MMFAR MemManage Fault Address */
1254 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1255 goto bad_offset;
1257 return cpu->env.v7m.mmfar[attrs.secure];
1258 case 0xd38: /* Bus Fault Address. */
1259 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1260 goto bad_offset;
1262 if (!attrs.secure &&
1263 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1264 return 0;
1266 return cpu->env.v7m.bfar;
1267 case 0xd3c: /* Aux Fault Status. */
1268 /* TODO: Implement fault status registers. */
1269 qemu_log_mask(LOG_UNIMP,
1270 "Aux Fault status registers unimplemented\n");
1271 return 0;
1272 case 0xd40: /* PFR0. */
1273 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1274 goto bad_offset;
1276 return cpu->isar.id_pfr0;
1277 case 0xd44: /* PFR1. */
1278 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1279 goto bad_offset;
1281 return cpu->isar.id_pfr1;
1282 case 0xd48: /* DFR0. */
1283 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1284 goto bad_offset;
1286 return cpu->isar.id_dfr0;
1287 case 0xd4c: /* AFR0. */
1288 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1289 goto bad_offset;
1291 return cpu->id_afr0;
1292 case 0xd50: /* MMFR0. */
1293 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1294 goto bad_offset;
1296 return cpu->isar.id_mmfr0;
1297 case 0xd54: /* MMFR1. */
1298 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1299 goto bad_offset;
1301 return cpu->isar.id_mmfr1;
1302 case 0xd58: /* MMFR2. */
1303 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1304 goto bad_offset;
1306 return cpu->isar.id_mmfr2;
1307 case 0xd5c: /* MMFR3. */
1308 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1309 goto bad_offset;
1311 return cpu->isar.id_mmfr3;
1312 case 0xd60: /* ISAR0. */
1313 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1314 goto bad_offset;
1316 return cpu->isar.id_isar0;
1317 case 0xd64: /* ISAR1. */
1318 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1319 goto bad_offset;
1321 return cpu->isar.id_isar1;
1322 case 0xd68: /* ISAR2. */
1323 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1324 goto bad_offset;
1326 return cpu->isar.id_isar2;
1327 case 0xd6c: /* ISAR3. */
1328 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1329 goto bad_offset;
1331 return cpu->isar.id_isar3;
1332 case 0xd70: /* ISAR4. */
1333 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1334 goto bad_offset;
1336 return cpu->isar.id_isar4;
1337 case 0xd74: /* ISAR5. */
1338 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1339 goto bad_offset;
1341 return cpu->isar.id_isar5;
1342 case 0xd78: /* CLIDR */
1343 return cpu->clidr;
1344 case 0xd7c: /* CTR */
1345 return cpu->ctr;
1346 case 0xd80: /* CSSIDR */
1348 int idx = cpu->env.v7m.csselr[attrs.secure] & R_V7M_CSSELR_INDEX_MASK;
1349 return cpu->ccsidr[idx];
1351 case 0xd84: /* CSSELR */
1352 return cpu->env.v7m.csselr[attrs.secure];
1353 case 0xd88: /* CPACR */
1354 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
1355 return 0;
1357 return cpu->env.v7m.cpacr[attrs.secure];
1358 case 0xd8c: /* NSACR */
1359 if (!attrs.secure || !cpu_isar_feature(aa32_vfp_simd, cpu)) {
1360 return 0;
1362 return cpu->env.v7m.nsacr;
1363 /* TODO: Implement debug registers. */
1364 case 0xd90: /* MPU_TYPE */
1365 /* Unified MPU; if the MPU is not present this value is zero */
1366 return cpu->pmsav7_dregion << 8;
1367 case 0xd94: /* MPU_CTRL */
1368 return cpu->env.v7m.mpu_ctrl[attrs.secure];
1369 case 0xd98: /* MPU_RNR */
1370 return cpu->env.pmsav7.rnr[attrs.secure];
1371 case 0xd9c: /* MPU_RBAR */
1372 case 0xda4: /* MPU_RBAR_A1 */
1373 case 0xdac: /* MPU_RBAR_A2 */
1374 case 0xdb4: /* MPU_RBAR_A3 */
1376 int region = cpu->env.pmsav7.rnr[attrs.secure];
1378 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1379 /* PMSAv8M handling of the aliases is different from v7M:
1380 * aliases A1, A2, A3 override the low two bits of the region
1381 * number in MPU_RNR, and there is no 'region' field in the
1382 * RBAR register.
1384 int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1385 if (aliasno) {
1386 region = deposit32(region, 0, 2, aliasno);
1388 if (region >= cpu->pmsav7_dregion) {
1389 return 0;
1391 return cpu->env.pmsav8.rbar[attrs.secure][region];
1394 if (region >= cpu->pmsav7_dregion) {
1395 return 0;
1397 return (cpu->env.pmsav7.drbar[region] & ~0x1f) | (region & 0xf);
1399 case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
1400 case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
1401 case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
1402 case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
1404 int region = cpu->env.pmsav7.rnr[attrs.secure];
1406 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1407 /* PMSAv8M handling of the aliases is different from v7M:
1408 * aliases A1, A2, A3 override the low two bits of the region
1409 * number in MPU_RNR.
1411 int aliasno = (offset - 0xda0) / 8; /* 0..3 */
1412 if (aliasno) {
1413 region = deposit32(region, 0, 2, aliasno);
1415 if (region >= cpu->pmsav7_dregion) {
1416 return 0;
1418 return cpu->env.pmsav8.rlar[attrs.secure][region];
1421 if (region >= cpu->pmsav7_dregion) {
1422 return 0;
1424 return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) |
1425 (cpu->env.pmsav7.drsr[region] & 0xffff);
1427 case 0xdc0: /* MPU_MAIR0 */
1428 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1429 goto bad_offset;
1431 return cpu->env.pmsav8.mair0[attrs.secure];
1432 case 0xdc4: /* MPU_MAIR1 */
1433 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1434 goto bad_offset;
1436 return cpu->env.pmsav8.mair1[attrs.secure];
1437 case 0xdd0: /* SAU_CTRL */
1438 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1439 goto bad_offset;
1441 if (!attrs.secure) {
1442 return 0;
1444 return cpu->env.sau.ctrl;
1445 case 0xdd4: /* SAU_TYPE */
1446 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1447 goto bad_offset;
1449 if (!attrs.secure) {
1450 return 0;
1452 return cpu->sau_sregion;
1453 case 0xdd8: /* SAU_RNR */
1454 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1455 goto bad_offset;
1457 if (!attrs.secure) {
1458 return 0;
1460 return cpu->env.sau.rnr;
1461 case 0xddc: /* SAU_RBAR */
1463 int region = cpu->env.sau.rnr;
1465 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1466 goto bad_offset;
1468 if (!attrs.secure) {
1469 return 0;
1471 if (region >= cpu->sau_sregion) {
1472 return 0;
1474 return cpu->env.sau.rbar[region];
1476 case 0xde0: /* SAU_RLAR */
1478 int region = cpu->env.sau.rnr;
1480 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1481 goto bad_offset;
1483 if (!attrs.secure) {
1484 return 0;
1486 if (region >= cpu->sau_sregion) {
1487 return 0;
1489 return cpu->env.sau.rlar[region];
1491 case 0xde4: /* SFSR */
1492 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1493 goto bad_offset;
1495 if (!attrs.secure) {
1496 return 0;
1498 return cpu->env.v7m.sfsr;
1499 case 0xde8: /* SFAR */
1500 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1501 goto bad_offset;
1503 if (!attrs.secure) {
1504 return 0;
1506 return cpu->env.v7m.sfar;
1507 case 0xf04: /* RFSR */
1508 if (!cpu_isar_feature(aa32_ras, cpu)) {
1509 goto bad_offset;
1511 /* We provide minimal-RAS only: RFSR is RAZ/WI */
1512 return 0;
1513 case 0xf34: /* FPCCR */
1514 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
1515 return 0;
1517 if (attrs.secure) {
1518 return cpu->env.v7m.fpccr[M_REG_S];
1519 } else {
1521 * NS can read LSPEN, CLRONRET and MONRDY. It can read
1522 * BFRDY and HFRDY if AIRCR.BFHFNMINS != 0;
1523 * other non-banked bits RAZ.
1524 * TODO: MONRDY should RAZ/WI if DEMCR.SDME is set.
1526 uint32_t value = cpu->env.v7m.fpccr[M_REG_S];
1527 uint32_t mask = R_V7M_FPCCR_LSPEN_MASK |
1528 R_V7M_FPCCR_CLRONRET_MASK |
1529 R_V7M_FPCCR_MONRDY_MASK;
1531 if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1532 mask |= R_V7M_FPCCR_BFRDY_MASK | R_V7M_FPCCR_HFRDY_MASK;
1535 value &= mask;
1537 value |= cpu->env.v7m.fpccr[M_REG_NS];
1538 return value;
1540 case 0xf38: /* FPCAR */
1541 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
1542 return 0;
1544 return cpu->env.v7m.fpcar[attrs.secure];
1545 case 0xf3c: /* FPDSCR */
1546 if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
1547 return 0;
1549 return cpu->env.v7m.fpdscr[attrs.secure];
1550 case 0xf40: /* MVFR0 */
1551 return cpu->isar.mvfr0;
1552 case 0xf44: /* MVFR1 */
1553 return cpu->isar.mvfr1;
1554 case 0xf48: /* MVFR2 */
1555 return cpu->isar.mvfr2;
1556 default:
1557 bad_offset:
1558 qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset);
1559 return 0;
1563 static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
1564 MemTxAttrs attrs)
1566 ARMCPU *cpu = s->cpu;
1568 switch (offset) {
1569 case 0xc: /* CPPWR */
1570 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1571 goto bad_offset;
1573 /* Make the IMPDEF choice to RAZ/WI this. */
1574 break;
1575 case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
1577 int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
1578 int i;
1580 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1581 goto bad_offset;
1583 if (!attrs.secure) {
1584 break;
1586 for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
1587 s->itns[startvec + i] = (value >> i) & 1;
1589 nvic_irq_update(s);
1590 break;
1592 case 0xd04: /* Interrupt Control State (ICSR) */
1593 if (attrs.secure || cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1594 if (value & (1 << 31)) {
1595 armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
1596 } else if (value & (1 << 30) &&
1597 arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1598 /* PENDNMICLR didn't exist in v7M */
1599 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_NMI, false);
1602 if (value & (1 << 28)) {
1603 armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
1604 } else if (value & (1 << 27)) {
1605 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
1607 if (value & (1 << 26)) {
1608 armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
1609 } else if (value & (1 << 25)) {
1610 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
1612 break;
1613 case 0xd08: /* Vector Table Offset. */
1614 cpu->env.v7m.vecbase[attrs.secure] = value & 0xffffff80;
1615 break;
1616 case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
1617 if ((value >> R_V7M_AIRCR_VECTKEY_SHIFT) == 0x05fa) {
1618 if (value & R_V7M_AIRCR_SYSRESETREQ_MASK) {
1619 if (attrs.secure ||
1620 !(cpu->env.v7m.aircr & R_V7M_AIRCR_SYSRESETREQS_MASK)) {
1621 signal_sysresetreq(s);
1624 if (value & R_V7M_AIRCR_VECTCLRACTIVE_MASK) {
1625 qemu_log_mask(LOG_GUEST_ERROR,
1626 "Setting VECTCLRACTIVE when not in DEBUG mode "
1627 "is UNPREDICTABLE\n");
1629 if (value & R_V7M_AIRCR_VECTRESET_MASK) {
1630 /* NB: this bit is RES0 in v8M */
1631 qemu_log_mask(LOG_GUEST_ERROR,
1632 "Setting VECTRESET when not in DEBUG mode "
1633 "is UNPREDICTABLE\n");
1635 if (arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1636 s->prigroup[attrs.secure] =
1637 extract32(value,
1638 R_V7M_AIRCR_PRIGROUP_SHIFT,
1639 R_V7M_AIRCR_PRIGROUP_LENGTH);
1641 /* AIRCR.IESB is RAZ/WI because we implement only minimal RAS */
1642 if (attrs.secure) {
1643 /* These bits are only writable by secure */
1644 cpu->env.v7m.aircr = value &
1645 (R_V7M_AIRCR_SYSRESETREQS_MASK |
1646 R_V7M_AIRCR_BFHFNMINS_MASK |
1647 R_V7M_AIRCR_PRIS_MASK);
1648 /* BFHFNMINS changes the priority of Secure HardFault, and
1649 * allows a pending Non-secure HardFault to preempt (which
1650 * we implement by marking it enabled).
1652 if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
1653 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -3;
1654 s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
1655 } else {
1656 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
1657 s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
1660 nvic_irq_update(s);
1662 break;
1663 case 0xd10: /* System Control. */
1664 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1665 goto bad_offset;
1667 /* We don't implement deep-sleep so these bits are RAZ/WI.
1668 * The other bits in the register are banked.
1669 * QEMU's implementation ignores SEVONPEND and SLEEPONEXIT, which
1670 * is architecturally permitted.
1672 value &= ~(R_V7M_SCR_SLEEPDEEP_MASK | R_V7M_SCR_SLEEPDEEPS_MASK);
1673 cpu->env.v7m.scr[attrs.secure] = value;
1674 break;
1675 case 0xd14: /* Configuration Control. */
1677 uint32_t mask;
1679 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1680 goto bad_offset;
1683 /* Enforce RAZ/WI on reserved and must-RAZ/WI bits */
1684 mask = R_V7M_CCR_STKALIGN_MASK |
1685 R_V7M_CCR_BFHFNMIGN_MASK |
1686 R_V7M_CCR_DIV_0_TRP_MASK |
1687 R_V7M_CCR_UNALIGN_TRP_MASK |
1688 R_V7M_CCR_USERSETMPEND_MASK |
1689 R_V7M_CCR_NONBASETHRDENA_MASK;
1690 if (arm_feature(&cpu->env, ARM_FEATURE_V8_1M) && attrs.secure) {
1691 /* TRD is always RAZ/WI from NS */
1692 mask |= R_V7M_CCR_TRD_MASK;
1694 value &= mask;
1696 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1697 /* v8M makes NONBASETHRDENA and STKALIGN be RES1 */
1698 value |= R_V7M_CCR_NONBASETHRDENA_MASK
1699 | R_V7M_CCR_STKALIGN_MASK;
1701 if (attrs.secure) {
1702 /* the BFHFNMIGN bit is not banked; keep that in the NS copy */
1703 cpu->env.v7m.ccr[M_REG_NS] =
1704 (cpu->env.v7m.ccr[M_REG_NS] & ~R_V7M_CCR_BFHFNMIGN_MASK)
1705 | (value & R_V7M_CCR_BFHFNMIGN_MASK);
1706 value &= ~R_V7M_CCR_BFHFNMIGN_MASK;
1707 } else {
1709 * BFHFNMIGN is RAZ/WI from NS if AIRCR.BFHFNMINS is 0, so
1710 * preserve the state currently in the NS element of the array
1712 if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1713 value &= ~R_V7M_CCR_BFHFNMIGN_MASK;
1714 value |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
1718 cpu->env.v7m.ccr[attrs.secure] = value;
1719 break;
1721 case 0xd24: /* System Handler Control and State (SHCSR) */
1722 if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
1723 goto bad_offset;
1725 if (attrs.secure) {
1726 s->sec_vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1727 /* Secure HardFault active bit cannot be written */
1728 s->sec_vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1729 s->sec_vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1730 s->sec_vectors[ARMV7M_EXCP_PENDSV].active =
1731 (value & (1 << 10)) != 0;
1732 s->sec_vectors[ARMV7M_EXCP_SYSTICK].active =
1733 (value & (1 << 11)) != 0;
1734 s->sec_vectors[ARMV7M_EXCP_USAGE].pending =
1735 (value & (1 << 12)) != 0;
1736 s->sec_vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1737 s->sec_vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
1738 s->sec_vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1739 s->sec_vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1740 s->sec_vectors[ARMV7M_EXCP_USAGE].enabled =
1741 (value & (1 << 18)) != 0;
1742 s->sec_vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
1743 /* SecureFault not banked, but RAZ/WI to NS */
1744 s->vectors[ARMV7M_EXCP_SECURE].active = (value & (1 << 4)) != 0;
1745 s->vectors[ARMV7M_EXCP_SECURE].enabled = (value & (1 << 19)) != 0;
1746 s->vectors[ARMV7M_EXCP_SECURE].pending = (value & (1 << 20)) != 0;
1747 } else {
1748 s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1749 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1750 /* HARDFAULTPENDED is not present in v7M */
1751 s->vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
1753 s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1754 s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1755 s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0;
1756 s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0;
1757 s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0;
1758 s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1759 s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
1760 s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1761 s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0;
1763 if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1764 s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0;
1765 s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0;
1766 s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1768 /* NMIACT can only be written if the write is of a zero, with
1769 * BFHFNMINS 1, and by the CPU in secure state via the NS alias.
1771 if (!attrs.secure && cpu->env.v7m.secure &&
1772 (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
1773 (value & (1 << 5)) == 0) {
1774 s->vectors[ARMV7M_EXCP_NMI].active = 0;
1776 /* HARDFAULTACT can only be written if the write is of a zero
1777 * to the non-secure HardFault state by the CPU in secure state.
1778 * The only case where we can be targeting the non-secure HF state
1779 * when in secure state is if this is a write via the NS alias
1780 * and BFHFNMINS is 1.
1782 if (!attrs.secure && cpu->env.v7m.secure &&
1783 (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
1784 (value & (1 << 2)) == 0) {
1785 s->vectors[ARMV7M_EXCP_HARD].active = 0;
1788 /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
1789 s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0;
1790 nvic_irq_update(s);
1791 break;
1792 case 0xd2c: /* Hard Fault Status. */
1793 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1794 goto bad_offset;
1796 cpu->env.v7m.hfsr &= ~value; /* W1C */
1797 break;
1798 case 0xd30: /* Debug Fault Status. */
1799 cpu->env.v7m.dfsr &= ~value; /* W1C */
1800 break;
1801 case 0xd34: /* Mem Manage Address. */
1802 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1803 goto bad_offset;
1805 cpu->env.v7m.mmfar[attrs.secure] = value;
1806 return;
1807 case 0xd38: /* Bus Fault Address. */
1808 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
1809 goto bad_offset;
1811 if (!attrs.secure &&
1812 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1813 return;
1815 cpu->env.v7m.bfar = value;
1816 return;
1817 case 0xd3c: /* Aux Fault Status. */
1818 qemu_log_mask(LOG_UNIMP,
1819 "NVIC: Aux fault status registers unimplemented\n");
1820 break;
1821 case 0xd84: /* CSSELR */
1822 if (!arm_v7m_csselr_razwi(cpu)) {
1823 cpu->env.v7m.csselr[attrs.secure] = value & R_V7M_CSSELR_INDEX_MASK;
1825 break;
1826 case 0xd88: /* CPACR */
1827 if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
1828 /* We implement only the Floating Point extension's CP10/CP11 */
1829 cpu->env.v7m.cpacr[attrs.secure] = value & (0xf << 20);
1831 break;
1832 case 0xd8c: /* NSACR */
1833 if (attrs.secure && cpu_isar_feature(aa32_vfp_simd, cpu)) {
1834 /* We implement only the Floating Point extension's CP10/CP11 */
1835 cpu->env.v7m.nsacr = value & (3 << 10);
1837 break;
1838 case 0xd90: /* MPU_TYPE */
1839 return; /* RO */
1840 case 0xd94: /* MPU_CTRL */
1841 if ((value &
1842 (R_V7M_MPU_CTRL_HFNMIENA_MASK | R_V7M_MPU_CTRL_ENABLE_MASK))
1843 == R_V7M_MPU_CTRL_HFNMIENA_MASK) {
1844 qemu_log_mask(LOG_GUEST_ERROR, "MPU_CTRL: HFNMIENA and !ENABLE is "
1845 "UNPREDICTABLE\n");
1847 cpu->env.v7m.mpu_ctrl[attrs.secure]
1848 = value & (R_V7M_MPU_CTRL_ENABLE_MASK |
1849 R_V7M_MPU_CTRL_HFNMIENA_MASK |
1850 R_V7M_MPU_CTRL_PRIVDEFENA_MASK);
1851 tlb_flush(CPU(cpu));
1852 break;
1853 case 0xd98: /* MPU_RNR */
1854 if (value >= cpu->pmsav7_dregion) {
1855 qemu_log_mask(LOG_GUEST_ERROR, "MPU region out of range %"
1856 PRIu32 "/%" PRIu32 "\n",
1857 value, cpu->pmsav7_dregion);
1858 } else {
1859 cpu->env.pmsav7.rnr[attrs.secure] = value;
1861 break;
1862 case 0xd9c: /* MPU_RBAR */
1863 case 0xda4: /* MPU_RBAR_A1 */
1864 case 0xdac: /* MPU_RBAR_A2 */
1865 case 0xdb4: /* MPU_RBAR_A3 */
1867 int region;
1869 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1870 /* PMSAv8M handling of the aliases is different from v7M:
1871 * aliases A1, A2, A3 override the low two bits of the region
1872 * number in MPU_RNR, and there is no 'region' field in the
1873 * RBAR register.
1875 int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1877 region = cpu->env.pmsav7.rnr[attrs.secure];
1878 if (aliasno) {
1879 region = deposit32(region, 0, 2, aliasno);
1881 if (region >= cpu->pmsav7_dregion) {
1882 return;
1884 cpu->env.pmsav8.rbar[attrs.secure][region] = value;
1885 tlb_flush(CPU(cpu));
1886 return;
1889 if (value & (1 << 4)) {
1890 /* VALID bit means use the region number specified in this
1891 * value and also update MPU_RNR.REGION with that value.
1893 region = extract32(value, 0, 4);
1894 if (region >= cpu->pmsav7_dregion) {
1895 qemu_log_mask(LOG_GUEST_ERROR,
1896 "MPU region out of range %u/%" PRIu32 "\n",
1897 region, cpu->pmsav7_dregion);
1898 return;
1900 cpu->env.pmsav7.rnr[attrs.secure] = region;
1901 } else {
1902 region = cpu->env.pmsav7.rnr[attrs.secure];
1905 if (region >= cpu->pmsav7_dregion) {
1906 return;
1909 cpu->env.pmsav7.drbar[region] = value & ~0x1f;
1910 tlb_flush(CPU(cpu));
1911 break;
1913 case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
1914 case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
1915 case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
1916 case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
1918 int region = cpu->env.pmsav7.rnr[attrs.secure];
1920 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1921 /* PMSAv8M handling of the aliases is different from v7M:
1922 * aliases A1, A2, A3 override the low two bits of the region
1923 * number in MPU_RNR.
1925 int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1927 region = cpu->env.pmsav7.rnr[attrs.secure];
1928 if (aliasno) {
1929 region = deposit32(region, 0, 2, aliasno);
1931 if (region >= cpu->pmsav7_dregion) {
1932 return;
1934 cpu->env.pmsav8.rlar[attrs.secure][region] = value;
1935 tlb_flush(CPU(cpu));
1936 return;
1939 if (region >= cpu->pmsav7_dregion) {
1940 return;
1943 cpu->env.pmsav7.drsr[region] = value & 0xff3f;
1944 cpu->env.pmsav7.dracr[region] = (value >> 16) & 0x173f;
1945 tlb_flush(CPU(cpu));
1946 break;
1948 case 0xdc0: /* MPU_MAIR0 */
1949 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1950 goto bad_offset;
1952 if (cpu->pmsav7_dregion) {
1953 /* Register is RES0 if no MPU regions are implemented */
1954 cpu->env.pmsav8.mair0[attrs.secure] = value;
1956 /* We don't need to do anything else because memory attributes
1957 * only affect cacheability, and we don't implement caching.
1959 break;
1960 case 0xdc4: /* MPU_MAIR1 */
1961 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1962 goto bad_offset;
1964 if (cpu->pmsav7_dregion) {
1965 /* Register is RES0 if no MPU regions are implemented */
1966 cpu->env.pmsav8.mair1[attrs.secure] = value;
1968 /* We don't need to do anything else because memory attributes
1969 * only affect cacheability, and we don't implement caching.
1971 break;
1972 case 0xdd0: /* SAU_CTRL */
1973 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1974 goto bad_offset;
1976 if (!attrs.secure) {
1977 return;
1979 cpu->env.sau.ctrl = value & 3;
1980 break;
1981 case 0xdd4: /* SAU_TYPE */
1982 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1983 goto bad_offset;
1985 break;
1986 case 0xdd8: /* SAU_RNR */
1987 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1988 goto bad_offset;
1990 if (!attrs.secure) {
1991 return;
1993 if (value >= cpu->sau_sregion) {
1994 qemu_log_mask(LOG_GUEST_ERROR, "SAU region out of range %"
1995 PRIu32 "/%" PRIu32 "\n",
1996 value, cpu->sau_sregion);
1997 } else {
1998 cpu->env.sau.rnr = value;
2000 break;
2001 case 0xddc: /* SAU_RBAR */
2003 int region = cpu->env.sau.rnr;
2005 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2006 goto bad_offset;
2008 if (!attrs.secure) {
2009 return;
2011 if (region >= cpu->sau_sregion) {
2012 return;
2014 cpu->env.sau.rbar[region] = value & ~0x1f;
2015 tlb_flush(CPU(cpu));
2016 break;
2018 case 0xde0: /* SAU_RLAR */
2020 int region = cpu->env.sau.rnr;
2022 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2023 goto bad_offset;
2025 if (!attrs.secure) {
2026 return;
2028 if (region >= cpu->sau_sregion) {
2029 return;
2031 cpu->env.sau.rlar[region] = value & ~0x1c;
2032 tlb_flush(CPU(cpu));
2033 break;
2035 case 0xde4: /* SFSR */
2036 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2037 goto bad_offset;
2039 if (!attrs.secure) {
2040 return;
2042 cpu->env.v7m.sfsr &= ~value; /* W1C */
2043 break;
2044 case 0xde8: /* SFAR */
2045 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2046 goto bad_offset;
2048 if (!attrs.secure) {
2049 return;
2051 cpu->env.v7m.sfsr = value;
2052 break;
2053 case 0xf00: /* Software Triggered Interrupt Register */
2055 int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
2057 if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
2058 goto bad_offset;
2061 if (excnum < s->num_irq) {
2062 armv7m_nvic_set_pending(s, excnum, false);
2064 break;
2066 case 0xf04: /* RFSR */
2067 if (!cpu_isar_feature(aa32_ras, cpu)) {
2068 goto bad_offset;
2070 /* We provide minimal-RAS only: RFSR is RAZ/WI */
2071 break;
2072 case 0xf34: /* FPCCR */
2073 if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
2074 /* Not all bits here are banked. */
2075 uint32_t fpccr_s;
2077 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
2078 /* Don't allow setting of bits not present in v7M */
2079 value &= (R_V7M_FPCCR_LSPACT_MASK |
2080 R_V7M_FPCCR_USER_MASK |
2081 R_V7M_FPCCR_THREAD_MASK |
2082 R_V7M_FPCCR_HFRDY_MASK |
2083 R_V7M_FPCCR_MMRDY_MASK |
2084 R_V7M_FPCCR_BFRDY_MASK |
2085 R_V7M_FPCCR_MONRDY_MASK |
2086 R_V7M_FPCCR_LSPEN_MASK |
2087 R_V7M_FPCCR_ASPEN_MASK);
2089 value &= ~R_V7M_FPCCR_RES0_MASK;
2091 if (!attrs.secure) {
2092 /* Some non-banked bits are configurably writable by NS */
2093 fpccr_s = cpu->env.v7m.fpccr[M_REG_S];
2094 if (!(fpccr_s & R_V7M_FPCCR_LSPENS_MASK)) {
2095 uint32_t lspen = FIELD_EX32(value, V7M_FPCCR, LSPEN);
2096 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, LSPEN, lspen);
2098 if (!(fpccr_s & R_V7M_FPCCR_CLRONRETS_MASK)) {
2099 uint32_t cor = FIELD_EX32(value, V7M_FPCCR, CLRONRET);
2100 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, CLRONRET, cor);
2102 if ((s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2103 uint32_t hfrdy = FIELD_EX32(value, V7M_FPCCR, HFRDY);
2104 uint32_t bfrdy = FIELD_EX32(value, V7M_FPCCR, BFRDY);
2105 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
2106 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
2108 /* TODO MONRDY should RAZ/WI if DEMCR.SDME is set */
2110 uint32_t monrdy = FIELD_EX32(value, V7M_FPCCR, MONRDY);
2111 fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, MONRDY, monrdy);
2115 * All other non-banked bits are RAZ/WI from NS; write
2116 * just the banked bits to fpccr[M_REG_NS].
2118 value &= R_V7M_FPCCR_BANKED_MASK;
2119 cpu->env.v7m.fpccr[M_REG_NS] = value;
2120 } else {
2121 fpccr_s = value;
2123 cpu->env.v7m.fpccr[M_REG_S] = fpccr_s;
2125 break;
2126 case 0xf38: /* FPCAR */
2127 if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
2128 value &= ~7;
2129 cpu->env.v7m.fpcar[attrs.secure] = value;
2131 break;
2132 case 0xf3c: /* FPDSCR */
2133 if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
2134 uint32_t mask = FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE_MASK;
2135 if (cpu_isar_feature(any_fp16, cpu)) {
2136 mask |= FPCR_FZ16;
2138 value &= mask;
2139 if (cpu_isar_feature(aa32_lob, cpu)) {
2140 value |= 4 << FPCR_LTPSIZE_SHIFT;
2142 cpu->env.v7m.fpdscr[attrs.secure] = value;
2144 break;
2145 case 0xf50: /* ICIALLU */
2146 case 0xf58: /* ICIMVAU */
2147 case 0xf5c: /* DCIMVAC */
2148 case 0xf60: /* DCISW */
2149 case 0xf64: /* DCCMVAU */
2150 case 0xf68: /* DCCMVAC */
2151 case 0xf6c: /* DCCSW */
2152 case 0xf70: /* DCCIMVAC */
2153 case 0xf74: /* DCCISW */
2154 case 0xf78: /* BPIALL */
2155 /* Cache and branch predictor maintenance: for QEMU these always NOP */
2156 break;
2157 default:
2158 bad_offset:
2159 qemu_log_mask(LOG_GUEST_ERROR,
2160 "NVIC: Bad write offset 0x%x\n", offset);
2164 static bool nvic_user_access_ok(NVICState *s, hwaddr offset, MemTxAttrs attrs)
2166 /* Return true if unprivileged access to this register is permitted. */
2167 switch (offset) {
2168 case 0xf00: /* STIR: accessible only if CCR.USERSETMPEND permits */
2169 /* For access via STIR_NS it is the NS CCR.USERSETMPEND that
2170 * controls access even though the CPU is in Secure state (I_QDKX).
2172 return s->cpu->env.v7m.ccr[attrs.secure] & R_V7M_CCR_USERSETMPEND_MASK;
2173 default:
2174 /* All other user accesses cause a BusFault unconditionally */
2175 return false;
2179 static int shpr_bank(NVICState *s, int exc, MemTxAttrs attrs)
2181 /* Behaviour for the SHPR register field for this exception:
2182 * return M_REG_NS to use the nonsecure vector (including for
2183 * non-banked exceptions), M_REG_S for the secure version of
2184 * a banked exception, and -1 if this field should RAZ/WI.
2186 switch (exc) {
2187 case ARMV7M_EXCP_MEM:
2188 case ARMV7M_EXCP_USAGE:
2189 case ARMV7M_EXCP_SVC:
2190 case ARMV7M_EXCP_PENDSV:
2191 case ARMV7M_EXCP_SYSTICK:
2192 /* Banked exceptions */
2193 return attrs.secure;
2194 case ARMV7M_EXCP_BUS:
2195 /* Not banked, RAZ/WI from nonsecure if BFHFNMINS is zero */
2196 if (!attrs.secure &&
2197 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2198 return -1;
2200 return M_REG_NS;
2201 case ARMV7M_EXCP_SECURE:
2202 /* Not banked, RAZ/WI from nonsecure */
2203 if (!attrs.secure) {
2204 return -1;
2206 return M_REG_NS;
2207 case ARMV7M_EXCP_DEBUG:
2208 /* Not banked. TODO should RAZ/WI if DEMCR.SDME is set */
2209 return M_REG_NS;
2210 case 8 ... 10:
2211 case 13:
2212 /* RES0 */
2213 return -1;
2214 default:
2215 /* Not reachable due to decode of SHPR register addresses */
2216 g_assert_not_reached();
2220 static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
2221 uint64_t *data, unsigned size,
2222 MemTxAttrs attrs)
2224 NVICState *s = (NVICState *)opaque;
2225 uint32_t offset = addr;
2226 unsigned i, startvec, end;
2227 uint32_t val;
2229 if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
2230 /* Generate BusFault for unprivileged accesses */
2231 return MEMTX_ERROR;
2234 switch (offset) {
2235 /* reads of set and clear both return the status */
2236 case 0x100 ... 0x13f: /* NVIC Set enable */
2237 offset += 0x80;
2238 /* fall through */
2239 case 0x180 ... 0x1bf: /* NVIC Clear enable */
2240 val = 0;
2241 startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ; /* vector # */
2243 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2244 if (s->vectors[startvec + i].enabled &&
2245 (attrs.secure || s->itns[startvec + i])) {
2246 val |= (1 << i);
2249 break;
2250 case 0x200 ... 0x23f: /* NVIC Set pend */
2251 offset += 0x80;
2252 /* fall through */
2253 case 0x280 ... 0x2bf: /* NVIC Clear pend */
2254 val = 0;
2255 startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
2256 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2257 if (s->vectors[startvec + i].pending &&
2258 (attrs.secure || s->itns[startvec + i])) {
2259 val |= (1 << i);
2262 break;
2263 case 0x300 ... 0x33f: /* NVIC Active */
2264 val = 0;
2266 if (!arm_feature(&s->cpu->env, ARM_FEATURE_V7)) {
2267 break;
2270 startvec = 8 * (offset - 0x300) + NVIC_FIRST_IRQ; /* vector # */
2272 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2273 if (s->vectors[startvec + i].active &&
2274 (attrs.secure || s->itns[startvec + i])) {
2275 val |= (1 << i);
2278 break;
2279 case 0x400 ... 0x5ef: /* NVIC Priority */
2280 val = 0;
2281 startvec = offset - 0x400 + NVIC_FIRST_IRQ; /* vector # */
2283 for (i = 0; i < size && startvec + i < s->num_irq; i++) {
2284 if (attrs.secure || s->itns[startvec + i]) {
2285 val |= s->vectors[startvec + i].prio << (8 * i);
2288 break;
2289 case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */
2290 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2291 val = 0;
2292 break;
2294 /* fall through */
2295 case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
2296 val = 0;
2297 for (i = 0; i < size; i++) {
2298 unsigned hdlidx = (offset - 0xd14) + i;
2299 int sbank = shpr_bank(s, hdlidx, attrs);
2301 if (sbank < 0) {
2302 continue;
2304 val = deposit32(val, i * 8, 8, get_prio(s, hdlidx, sbank));
2306 break;
2307 case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
2308 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2309 val = 0;
2310 break;
2313 * The BFSR bits [15:8] are shared between security states
2314 * and we store them in the NS copy. They are RAZ/WI for
2315 * NS code if AIRCR.BFHFNMINS is 0.
2317 val = s->cpu->env.v7m.cfsr[attrs.secure];
2318 if (!attrs.secure &&
2319 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2320 val &= ~R_V7M_CFSR_BFSR_MASK;
2321 } else {
2322 val |= s->cpu->env.v7m.cfsr[M_REG_NS] & R_V7M_CFSR_BFSR_MASK;
2324 val = extract32(val, (offset - 0xd28) * 8, size * 8);
2325 break;
2326 case 0xfe0 ... 0xfff: /* ID. */
2327 if (offset & 3) {
2328 val = 0;
2329 } else {
2330 val = nvic_id[(offset - 0xfe0) >> 2];
2332 break;
2333 default:
2334 if (size == 4) {
2335 val = nvic_readl(s, offset, attrs);
2336 } else {
2337 qemu_log_mask(LOG_GUEST_ERROR,
2338 "NVIC: Bad read of size %d at offset 0x%x\n",
2339 size, offset);
2340 val = 0;
2344 trace_nvic_sysreg_read(addr, val, size);
2345 *data = val;
2346 return MEMTX_OK;
2349 static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
2350 uint64_t value, unsigned size,
2351 MemTxAttrs attrs)
2353 NVICState *s = (NVICState *)opaque;
2354 uint32_t offset = addr;
2355 unsigned i, startvec, end;
2356 unsigned setval = 0;
2358 trace_nvic_sysreg_write(addr, value, size);
2360 if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
2361 /* Generate BusFault for unprivileged accesses */
2362 return MEMTX_ERROR;
2365 switch (offset) {
2366 case 0x100 ... 0x13f: /* NVIC Set enable */
2367 offset += 0x80;
2368 setval = 1;
2369 /* fall through */
2370 case 0x180 ... 0x1bf: /* NVIC Clear enable */
2371 startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ;
2373 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2374 if (value & (1 << i) &&
2375 (attrs.secure || s->itns[startvec + i])) {
2376 s->vectors[startvec + i].enabled = setval;
2379 nvic_irq_update(s);
2380 goto exit_ok;
2381 case 0x200 ... 0x23f: /* NVIC Set pend */
2382 /* the special logic in armv7m_nvic_set_pending()
2383 * is not needed since IRQs are never escalated
2385 offset += 0x80;
2386 setval = 1;
2387 /* fall through */
2388 case 0x280 ... 0x2bf: /* NVIC Clear pend */
2389 startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
2391 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
2392 if (value & (1 << i) &&
2393 (attrs.secure || s->itns[startvec + i])) {
2394 s->vectors[startvec + i].pending = setval;
2397 nvic_irq_update(s);
2398 goto exit_ok;
2399 case 0x300 ... 0x33f: /* NVIC Active */
2400 goto exit_ok; /* R/O */
2401 case 0x400 ... 0x5ef: /* NVIC Priority */
2402 startvec = (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */
2404 for (i = 0; i < size && startvec + i < s->num_irq; i++) {
2405 if (attrs.secure || s->itns[startvec + i]) {
2406 set_prio(s, startvec + i, false, (value >> (i * 8)) & 0xff);
2409 nvic_irq_update(s);
2410 goto exit_ok;
2411 case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */
2412 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2413 goto exit_ok;
2415 /* fall through */
2416 case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
2417 for (i = 0; i < size; i++) {
2418 unsigned hdlidx = (offset - 0xd14) + i;
2419 int newprio = extract32(value, i * 8, 8);
2420 int sbank = shpr_bank(s, hdlidx, attrs);
2422 if (sbank < 0) {
2423 continue;
2425 set_prio(s, hdlidx, sbank, newprio);
2427 nvic_irq_update(s);
2428 goto exit_ok;
2429 case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
2430 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
2431 goto exit_ok;
2433 /* All bits are W1C, so construct 32 bit value with 0s in
2434 * the parts not written by the access size
2436 value <<= ((offset - 0xd28) * 8);
2438 if (!attrs.secure &&
2439 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
2440 /* BFSR bits are RAZ/WI for NS if BFHFNMINS is set */
2441 value &= ~R_V7M_CFSR_BFSR_MASK;
2444 s->cpu->env.v7m.cfsr[attrs.secure] &= ~value;
2445 if (attrs.secure) {
2446 /* The BFSR bits [15:8] are shared between security states
2447 * and we store them in the NS copy.
2449 s->cpu->env.v7m.cfsr[M_REG_NS] &= ~(value & R_V7M_CFSR_BFSR_MASK);
2451 goto exit_ok;
2453 if (size == 4) {
2454 nvic_writel(s, offset, value, attrs);
2455 goto exit_ok;
2457 qemu_log_mask(LOG_GUEST_ERROR,
2458 "NVIC: Bad write of size %d at offset 0x%x\n", size, offset);
2459 /* This is UNPREDICTABLE; treat as RAZ/WI */
2461 exit_ok:
2462 /* Ensure any changes made are reflected in the cached hflags. */
2463 arm_rebuild_hflags(&s->cpu->env);
2464 return MEMTX_OK;
2467 static const MemoryRegionOps nvic_sysreg_ops = {
2468 .read_with_attrs = nvic_sysreg_read,
2469 .write_with_attrs = nvic_sysreg_write,
2470 .endianness = DEVICE_NATIVE_ENDIAN,
2473 static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr,
2474 uint64_t value, unsigned size,
2475 MemTxAttrs attrs)
2477 MemoryRegion *mr = opaque;
2479 if (attrs.secure) {
2480 /* S accesses to the alias act like NS accesses to the real region */
2481 attrs.secure = 0;
2482 return memory_region_dispatch_write(mr, addr, value,
2483 size_memop(size) | MO_TE, attrs);
2484 } else {
2485 /* NS attrs are RAZ/WI for privileged, and BusFault for user */
2486 if (attrs.user) {
2487 return MEMTX_ERROR;
2489 return MEMTX_OK;
2493 static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr,
2494 uint64_t *data, unsigned size,
2495 MemTxAttrs attrs)
2497 MemoryRegion *mr = opaque;
2499 if (attrs.secure) {
2500 /* S accesses to the alias act like NS accesses to the real region */
2501 attrs.secure = 0;
2502 return memory_region_dispatch_read(mr, addr, data,
2503 size_memop(size) | MO_TE, attrs);
2504 } else {
2505 /* NS attrs are RAZ/WI for privileged, and BusFault for user */
2506 if (attrs.user) {
2507 return MEMTX_ERROR;
2509 *data = 0;
2510 return MEMTX_OK;
2514 static const MemoryRegionOps nvic_sysreg_ns_ops = {
2515 .read_with_attrs = nvic_sysreg_ns_read,
2516 .write_with_attrs = nvic_sysreg_ns_write,
2517 .endianness = DEVICE_NATIVE_ENDIAN,
2520 static MemTxResult nvic_systick_write(void *opaque, hwaddr addr,
2521 uint64_t value, unsigned size,
2522 MemTxAttrs attrs)
2524 NVICState *s = opaque;
2525 MemoryRegion *mr;
2527 /* Direct the access to the correct systick */
2528 mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
2529 return memory_region_dispatch_write(mr, addr, value,
2530 size_memop(size) | MO_TE, attrs);
2533 static MemTxResult nvic_systick_read(void *opaque, hwaddr addr,
2534 uint64_t *data, unsigned size,
2535 MemTxAttrs attrs)
2537 NVICState *s = opaque;
2538 MemoryRegion *mr;
2540 /* Direct the access to the correct systick */
2541 mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
2542 return memory_region_dispatch_read(mr, addr, data, size_memop(size) | MO_TE,
2543 attrs);
2546 static const MemoryRegionOps nvic_systick_ops = {
2547 .read_with_attrs = nvic_systick_read,
2548 .write_with_attrs = nvic_systick_write,
2549 .endianness = DEVICE_NATIVE_ENDIAN,
2553 static MemTxResult ras_read(void *opaque, hwaddr addr,
2554 uint64_t *data, unsigned size,
2555 MemTxAttrs attrs)
2557 if (attrs.user) {
2558 return MEMTX_ERROR;
2561 switch (addr) {
2562 case 0xe10: /* ERRIIDR */
2563 /* architect field = Arm; product/variant/revision 0 */
2564 *data = 0x43b;
2565 break;
2566 case 0xfc8: /* ERRDEVID */
2567 /* Minimal RAS: we implement 0 error record indexes */
2568 *data = 0;
2569 break;
2570 default:
2571 qemu_log_mask(LOG_UNIMP, "Read RAS register offset 0x%x\n",
2572 (uint32_t)addr);
2573 *data = 0;
2574 break;
2576 return MEMTX_OK;
2579 static MemTxResult ras_write(void *opaque, hwaddr addr,
2580 uint64_t value, unsigned size,
2581 MemTxAttrs attrs)
2583 if (attrs.user) {
2584 return MEMTX_ERROR;
2587 switch (addr) {
2588 default:
2589 qemu_log_mask(LOG_UNIMP, "Write to RAS register offset 0x%x\n",
2590 (uint32_t)addr);
2591 break;
2593 return MEMTX_OK;
2596 static const MemoryRegionOps ras_ops = {
2597 .read_with_attrs = ras_read,
2598 .write_with_attrs = ras_write,
2599 .endianness = DEVICE_NATIVE_ENDIAN,
2603 * Unassigned portions of the PPB space are RAZ/WI for privileged
2604 * accesses, and fault for non-privileged accesses.
2606 static MemTxResult ppb_default_read(void *opaque, hwaddr addr,
2607 uint64_t *data, unsigned size,
2608 MemTxAttrs attrs)
2610 qemu_log_mask(LOG_UNIMP, "Read of unassigned area of PPB: offset 0x%x\n",
2611 (uint32_t)addr);
2612 if (attrs.user) {
2613 return MEMTX_ERROR;
2615 *data = 0;
2616 return MEMTX_OK;
2619 static MemTxResult ppb_default_write(void *opaque, hwaddr addr,
2620 uint64_t value, unsigned size,
2621 MemTxAttrs attrs)
2623 qemu_log_mask(LOG_UNIMP, "Write of unassigned area of PPB: offset 0x%x\n",
2624 (uint32_t)addr);
2625 if (attrs.user) {
2626 return MEMTX_ERROR;
2628 return MEMTX_OK;
2631 static const MemoryRegionOps ppb_default_ops = {
2632 .read_with_attrs = ppb_default_read,
2633 .write_with_attrs = ppb_default_write,
2634 .endianness = DEVICE_NATIVE_ENDIAN,
2635 .valid.min_access_size = 1,
2636 .valid.max_access_size = 8,
2639 static int nvic_post_load(void *opaque, int version_id)
2641 NVICState *s = opaque;
2642 unsigned i;
2643 int resetprio;
2645 /* Check for out of range priority settings */
2646 resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
2648 if (s->vectors[ARMV7M_EXCP_RESET].prio != resetprio ||
2649 s->vectors[ARMV7M_EXCP_NMI].prio != -2 ||
2650 s->vectors[ARMV7M_EXCP_HARD].prio != -1) {
2651 return 1;
2653 for (i = ARMV7M_EXCP_MEM; i < s->num_irq; i++) {
2654 if (s->vectors[i].prio & ~0xff) {
2655 return 1;
2659 nvic_recompute_state(s);
2661 return 0;
2664 static const VMStateDescription vmstate_VecInfo = {
2665 .name = "armv7m_nvic_info",
2666 .version_id = 1,
2667 .minimum_version_id = 1,
2668 .fields = (VMStateField[]) {
2669 VMSTATE_INT16(prio, VecInfo),
2670 VMSTATE_UINT8(enabled, VecInfo),
2671 VMSTATE_UINT8(pending, VecInfo),
2672 VMSTATE_UINT8(active, VecInfo),
2673 VMSTATE_UINT8(level, VecInfo),
2674 VMSTATE_END_OF_LIST()
2678 static bool nvic_security_needed(void *opaque)
2680 NVICState *s = opaque;
2682 return arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
2685 static int nvic_security_post_load(void *opaque, int version_id)
2687 NVICState *s = opaque;
2688 int i;
2690 /* Check for out of range priority settings */
2691 if (s->sec_vectors[ARMV7M_EXCP_HARD].prio != -1
2692 && s->sec_vectors[ARMV7M_EXCP_HARD].prio != -3) {
2693 /* We can't cross-check against AIRCR.BFHFNMINS as we don't know
2694 * if the CPU state has been migrated yet; a mismatch won't
2695 * cause the emulation to blow up, though.
2697 return 1;
2699 for (i = ARMV7M_EXCP_MEM; i < ARRAY_SIZE(s->sec_vectors); i++) {
2700 if (s->sec_vectors[i].prio & ~0xff) {
2701 return 1;
2704 return 0;
2707 static const VMStateDescription vmstate_nvic_security = {
2708 .name = "armv7m_nvic/m-security",
2709 .version_id = 1,
2710 .minimum_version_id = 1,
2711 .needed = nvic_security_needed,
2712 .post_load = &nvic_security_post_load,
2713 .fields = (VMStateField[]) {
2714 VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1,
2715 vmstate_VecInfo, VecInfo),
2716 VMSTATE_UINT32(prigroup[M_REG_S], NVICState),
2717 VMSTATE_BOOL_ARRAY(itns, NVICState, NVIC_MAX_VECTORS),
2718 VMSTATE_END_OF_LIST()
2722 static const VMStateDescription vmstate_nvic = {
2723 .name = "armv7m_nvic",
2724 .version_id = 4,
2725 .minimum_version_id = 4,
2726 .post_load = &nvic_post_load,
2727 .fields = (VMStateField[]) {
2728 VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1,
2729 vmstate_VecInfo, VecInfo),
2730 VMSTATE_UINT32(prigroup[M_REG_NS], NVICState),
2731 VMSTATE_END_OF_LIST()
2733 .subsections = (const VMStateDescription*[]) {
2734 &vmstate_nvic_security,
2735 NULL
2739 static Property props_nvic[] = {
2740 /* Number of external IRQ lines (so excluding the 16 internal exceptions) */
2741 DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64),
2742 DEFINE_PROP_END_OF_LIST()
2745 static void armv7m_nvic_reset(DeviceState *dev)
2747 int resetprio;
2748 NVICState *s = NVIC(dev);
2750 memset(s->vectors, 0, sizeof(s->vectors));
2751 memset(s->sec_vectors, 0, sizeof(s->sec_vectors));
2752 s->prigroup[M_REG_NS] = 0;
2753 s->prigroup[M_REG_S] = 0;
2755 s->vectors[ARMV7M_EXCP_NMI].enabled = 1;
2756 /* MEM, BUS, and USAGE are enabled through
2757 * the System Handler Control register
2759 s->vectors[ARMV7M_EXCP_SVC].enabled = 1;
2760 s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
2761 s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
2763 /* DebugMonitor is enabled via DEMCR.MON_EN */
2764 s->vectors[ARMV7M_EXCP_DEBUG].enabled = 0;
2766 resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
2767 s->vectors[ARMV7M_EXCP_RESET].prio = resetprio;
2768 s->vectors[ARMV7M_EXCP_NMI].prio = -2;
2769 s->vectors[ARMV7M_EXCP_HARD].prio = -1;
2771 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2772 s->sec_vectors[ARMV7M_EXCP_HARD].enabled = 1;
2773 s->sec_vectors[ARMV7M_EXCP_SVC].enabled = 1;
2774 s->sec_vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
2775 s->sec_vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
2777 /* AIRCR.BFHFNMINS resets to 0 so Secure HF is priority -1 (R_CMTC) */
2778 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
2779 /* If AIRCR.BFHFNMINS is 0 then NS HF is (effectively) disabled */
2780 s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
2781 } else {
2782 s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
2785 /* Strictly speaking the reset handler should be enabled.
2786 * However, we don't simulate soft resets through the NVIC,
2787 * and the reset vector should never be pended.
2788 * So we leave it disabled to catch logic errors.
2791 s->exception_prio = NVIC_NOEXC_PRIO;
2792 s->vectpending = 0;
2793 s->vectpending_is_s_banked = false;
2794 s->vectpending_prio = NVIC_NOEXC_PRIO;
2796 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2797 memset(s->itns, 0, sizeof(s->itns));
2798 } else {
2799 /* This state is constant and not guest accessible in a non-security
2800 * NVIC; we set the bits to true to avoid having to do a feature
2801 * bit check in the NVIC enable/pend/etc register accessors.
2803 int i;
2805 for (i = NVIC_FIRST_IRQ; i < ARRAY_SIZE(s->itns); i++) {
2806 s->itns[i] = true;
2811 * We updated state that affects the CPU's MMUidx and thus its hflags;
2812 * and we can't guarantee that we run before the CPU reset function.
2814 arm_rebuild_hflags(&s->cpu->env);
2817 static void nvic_systick_trigger(void *opaque, int n, int level)
2819 NVICState *s = opaque;
2821 if (level) {
2822 /* SysTick just asked us to pend its exception.
2823 * (This is different from an external interrupt line's
2824 * behaviour.)
2825 * n == 0 : NonSecure systick
2826 * n == 1 : Secure systick
2828 armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, n);
2832 static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
2834 NVICState *s = NVIC(dev);
2836 /* The armv7m container object will have set our CPU pointer */
2837 if (!s->cpu || !arm_feature(&s->cpu->env, ARM_FEATURE_M)) {
2838 error_setg(errp, "The NVIC can only be used with a Cortex-M CPU");
2839 return;
2842 if (s->num_irq > NVIC_MAX_IRQ) {
2843 error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq);
2844 return;
2847 qdev_init_gpio_in(dev, set_irq_level, s->num_irq);
2849 /* include space for internal exception vectors */
2850 s->num_irq += NVIC_FIRST_IRQ;
2852 s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2;
2854 if (!sysbus_realize(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), errp)) {
2855 return;
2857 sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), 0,
2858 qdev_get_gpio_in_named(dev, "systick-trigger",
2859 M_REG_NS));
2861 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
2862 /* We couldn't init the secure systick device in instance_init
2863 * as we didn't know then if the CPU had the security extensions;
2864 * so we have to do it here.
2866 object_initialize_child(OBJECT(dev), "systick-reg-s",
2867 &s->systick[M_REG_S], TYPE_SYSTICK);
2869 if (!sysbus_realize(SYS_BUS_DEVICE(&s->systick[M_REG_S]), errp)) {
2870 return;
2872 sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_S]), 0,
2873 qdev_get_gpio_in_named(dev, "systick-trigger",
2874 M_REG_S));
2878 * This device provides a single sysbus memory region which
2879 * represents the whole of the "System PPB" space. This is the
2880 * range from 0xe0000000 to 0xe00fffff and includes the NVIC,
2881 * the System Control Space (system registers), the systick timer,
2882 * and for CPUs with the Security extension an NS banked version
2883 * of all of these.
2885 * The default behaviour for unimplemented registers/ranges
2886 * (for instance the Data Watchpoint and Trace unit at 0xe0001000)
2887 * is to RAZ/WI for privileged access and BusFault for non-privileged
2888 * access.
2890 * The NVIC and System Control Space (SCS) starts at 0xe000e000
2891 * and looks like this:
2892 * 0x004 - ICTR
2893 * 0x010 - 0xff - systick
2894 * 0x100..0x7ec - NVIC
2895 * 0x7f0..0xcff - Reserved
2896 * 0xd00..0xd3c - SCS registers
2897 * 0xd40..0xeff - Reserved or Not implemented
2898 * 0xf00 - STIR
2900 * Some registers within this space are banked between security states.
2901 * In v8M there is a second range 0xe002e000..0xe002efff which is the
2902 * NonSecure alias SCS; secure accesses to this behave like NS accesses
2903 * to the main SCS range, and non-secure accesses (including when
2904 * the security extension is not implemented) are RAZ/WI.
2905 * Note that both the main SCS range and the alias range are defined
2906 * to be exempt from memory attribution (R_BLJT) and so the memory
2907 * transaction attribute always matches the current CPU security
2908 * state (attrs.secure == env->v7m.secure). In the nvic_sysreg_ns_ops
2909 * wrappers we change attrs.secure to indicate the NS access; so
2910 * generally code determining which banked register to use should
2911 * use attrs.secure; code determining actual behaviour of the system
2912 * should use env->v7m.secure.
2914 * The container covers the whole PPB space. Within it the priority
2915 * of overlapping regions is:
2916 * - default region (for RAZ/WI and BusFault) : -1
2917 * - system register regions : 0
2918 * - systick : 1
2919 * This is because the systick device is a small block of registers
2920 * in the middle of the other system control registers.
2922 memory_region_init(&s->container, OBJECT(s), "nvic", 0x100000);
2923 memory_region_init_io(&s->defaultmem, OBJECT(s), &ppb_default_ops, s,
2924 "nvic-default", 0x100000);
2925 memory_region_add_subregion_overlap(&s->container, 0, &s->defaultmem, -1);
2926 memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s,
2927 "nvic_sysregs", 0x1000);
2928 memory_region_add_subregion(&s->container, 0xe000, &s->sysregmem);
2930 memory_region_init_io(&s->systickmem, OBJECT(s),
2931 &nvic_systick_ops, s,
2932 "nvic_systick", 0xe0);
2934 memory_region_add_subregion_overlap(&s->container, 0xe010,
2935 &s->systickmem, 1);
2937 if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
2938 memory_region_init_io(&s->sysreg_ns_mem, OBJECT(s),
2939 &nvic_sysreg_ns_ops, &s->sysregmem,
2940 "nvic_sysregs_ns", 0x1000);
2941 memory_region_add_subregion(&s->container, 0x2e000, &s->sysreg_ns_mem);
2942 memory_region_init_io(&s->systick_ns_mem, OBJECT(s),
2943 &nvic_sysreg_ns_ops, &s->systickmem,
2944 "nvic_systick_ns", 0xe0);
2945 memory_region_add_subregion_overlap(&s->container, 0x2e010,
2946 &s->systick_ns_mem, 1);
2949 if (cpu_isar_feature(aa32_ras, s->cpu)) {
2950 memory_region_init_io(&s->ras_mem, OBJECT(s),
2951 &ras_ops, s, "nvic_ras", 0x1000);
2952 memory_region_add_subregion(&s->container, 0x5000, &s->ras_mem);
2955 sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container);
2958 static void armv7m_nvic_instance_init(Object *obj)
2960 DeviceState *dev = DEVICE(obj);
2961 NVICState *nvic = NVIC(obj);
2962 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
2964 object_initialize_child(obj, "systick-reg-ns", &nvic->systick[M_REG_NS],
2965 TYPE_SYSTICK);
2966 /* We can't initialize the secure systick here, as we don't know
2967 * yet if we need it.
2970 sysbus_init_irq(sbd, &nvic->excpout);
2971 qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1);
2972 qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger",
2973 M_REG_NUM_BANKS);
2974 qdev_init_gpio_in_named(dev, nvic_nmi_trigger, "NMI", 1);
2977 static void armv7m_nvic_class_init(ObjectClass *klass, void *data)
2979 DeviceClass *dc = DEVICE_CLASS(klass);
2981 dc->vmsd = &vmstate_nvic;
2982 device_class_set_props(dc, props_nvic);
2983 dc->reset = armv7m_nvic_reset;
2984 dc->realize = armv7m_nvic_realize;
2987 static const TypeInfo armv7m_nvic_info = {
2988 .name = TYPE_NVIC,
2989 .parent = TYPE_SYS_BUS_DEVICE,
2990 .instance_init = armv7m_nvic_instance_init,
2991 .instance_size = sizeof(NVICState),
2992 .class_init = armv7m_nvic_class_init,
2993 .class_size = sizeof(SysBusDeviceClass),
2996 static void armv7m_nvic_register_types(void)
2998 type_register_static(&armv7m_nvic_info);
3001 type_init(armv7m_nvic_register_types)