2 * ARM Nested Vectored Interrupt Controller
4 * Copyright (c) 2006-2007 CodeSourcery.
5 * Written by Paul Brook
7 * This code is licensed under the GPL.
9 * The ARMv7M System controller is fairly tightly tied in with the
10 * NVIC. Much of that is also implemented here.
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "hw/sysbus.h"
16 #include "migration/vmstate.h"
17 #include "qemu/timer.h"
18 #include "hw/intc/armv7m_nvic.h"
20 #include "hw/qdev-properties.h"
21 #include "sysemu/runstate.h"
22 #include "target/arm/cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/memop.h"
26 #include "qemu/module.h"
29 /* IRQ number counting:
31 * the num-irq property counts the number of external IRQ lines
33 * NVICState::num_irq counts the total number of exceptions
34 * (external IRQs, the 15 internal exceptions including reset,
35 * and one for the unused exception number 0).
37 * NVIC_MAX_IRQ is the highest permitted number of external IRQ lines.
39 * NVIC_MAX_VECTORS is the highest permitted number of exceptions.
41 * Iterating through all exceptions should typically be done with
42 * for (i = 1; i < s->num_irq; i++) to avoid the unused slot 0.
44 * The external qemu_irq lines are the NVIC's external IRQ lines,
45 * so line 0 is exception 16.
47 * In the terminology of the architecture manual, "interrupts" are
48 * a subcategory of exception referring to the external interrupts
49 * (which are exception numbers NVIC_FIRST_IRQ and upward).
50 * For historical reasons QEMU tends to use "interrupt" and
51 * "exception" more or less interchangeably.
53 #define NVIC_FIRST_IRQ NVIC_INTERNAL_VECTORS
54 #define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ)
56 /* Effective running priority of the CPU when no exception is active
57 * (higher than the highest possible priority value)
59 #define NVIC_NOEXC_PRIO 0x100
60 /* Maximum priority of non-secure exceptions when AIRCR.PRIS is set */
61 #define NVIC_NS_PRIO_LIMIT 0x80
63 static const uint8_t nvic_id
[] = {
64 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1
67 static void signal_sysresetreq(NVICState
*s
)
69 if (qemu_irq_is_connected(s
->sysresetreq
)) {
70 qemu_irq_pulse(s
->sysresetreq
);
73 * Default behaviour if the SoC doesn't need to wire up
74 * SYSRESETREQ (eg to a system reset controller of some kind):
75 * perform a system reset via the usual QEMU API.
77 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
81 static int nvic_pending_prio(NVICState
*s
)
83 /* return the group priority of the current pending interrupt,
84 * or NVIC_NOEXC_PRIO if no interrupt is pending
86 return s
->vectpending_prio
;
89 /* Return the value of the ISCR RETTOBASE bit:
90 * 1 if there is exactly one active exception
91 * 0 if there is more than one active exception
92 * UNKNOWN if there are no active exceptions (we choose 1,
93 * which matches the choice Cortex-M3 is documented as making).
95 * NB: some versions of the documentation talk about this
96 * counting "active exceptions other than the one shown by IPSR";
97 * this is only different in the obscure corner case where guest
98 * code has manually deactivated an exception and is about
99 * to fail an exception-return integrity check. The definition
100 * above is the one from the v8M ARM ARM and is also in line
101 * with the behaviour documented for the Cortex-M3.
103 static bool nvic_rettobase(NVICState
*s
)
106 bool check_sec
= arm_feature(&s
->cpu
->env
, ARM_FEATURE_M_SECURITY
);
108 for (irq
= ARMV7M_EXCP_RESET
; irq
< s
->num_irq
; irq
++) {
109 if (s
->vectors
[irq
].active
||
110 (check_sec
&& irq
< NVIC_INTERNAL_VECTORS
&&
111 s
->sec_vectors
[irq
].active
)) {
122 /* Return the value of the ISCR ISRPENDING bit:
123 * 1 if an external interrupt is pending
124 * 0 if no external interrupt is pending
126 static bool nvic_isrpending(NVICState
*s
)
131 * We can shortcut if the highest priority pending interrupt
132 * happens to be external; if not we need to check the whole
135 if (s
->vectpending
> NVIC_FIRST_IRQ
) {
139 for (irq
= NVIC_FIRST_IRQ
; irq
< s
->num_irq
; irq
++) {
140 if (s
->vectors
[irq
].pending
) {
147 static bool exc_is_banked(int exc
)
149 /* Return true if this is one of the limited set of exceptions which
150 * are banked (and thus have state in sec_vectors[])
152 return exc
== ARMV7M_EXCP_HARD
||
153 exc
== ARMV7M_EXCP_MEM
||
154 exc
== ARMV7M_EXCP_USAGE
||
155 exc
== ARMV7M_EXCP_SVC
||
156 exc
== ARMV7M_EXCP_PENDSV
||
157 exc
== ARMV7M_EXCP_SYSTICK
;
160 /* Return a mask word which clears the subpriority bits from
161 * a priority value for an M-profile exception, leaving only
162 * the group priority.
164 static inline uint32_t nvic_gprio_mask(NVICState
*s
, bool secure
)
166 return ~0U << (s
->prigroup
[secure
] + 1);
169 static bool exc_targets_secure(NVICState
*s
, int exc
)
171 /* Return true if this non-banked exception targets Secure state. */
172 if (!arm_feature(&s
->cpu
->env
, ARM_FEATURE_M_SECURITY
)) {
176 if (exc
>= NVIC_FIRST_IRQ
) {
177 return !s
->itns
[exc
];
180 /* Function shouldn't be called for banked exceptions. */
181 assert(!exc_is_banked(exc
));
184 case ARMV7M_EXCP_NMI
:
185 case ARMV7M_EXCP_BUS
:
186 return !(s
->cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
);
187 case ARMV7M_EXCP_SECURE
:
189 case ARMV7M_EXCP_DEBUG
:
190 /* TODO: controlled by DEMCR.SDME, which we don't yet implement */
193 /* reset, and reserved (unused) low exception numbers.
194 * We'll get called by code that loops through all the exception
195 * numbers, but it doesn't matter what we return here as these
196 * non-existent exceptions will never be pended or active.
202 static int exc_group_prio(NVICState
*s
, int rawprio
, bool targets_secure
)
204 /* Return the group priority for this exception, given its raw
205 * (group-and-subgroup) priority value and whether it is targeting
206 * secure state or not.
211 rawprio
&= nvic_gprio_mask(s
, targets_secure
);
212 /* AIRCR.PRIS causes us to squash all NS priorities into the
213 * lower half of the total range
215 if (!targets_secure
&&
216 (s
->cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_PRIS_MASK
)) {
217 rawprio
= (rawprio
>> 1) + NVIC_NS_PRIO_LIMIT
;
222 /* Recompute vectpending and exception_prio for a CPU which implements
223 * the Security extension
225 static void nvic_recompute_state_secure(NVICState
*s
)
228 int pend_prio
= NVIC_NOEXC_PRIO
;
229 int active_prio
= NVIC_NOEXC_PRIO
;
231 bool pending_is_s_banked
= false;
232 int pend_subprio
= 0;
234 /* R_CQRV: precedence is by:
235 * - lowest group priority; if both the same then
236 * - lowest subpriority; if both the same then
237 * - lowest exception number; if both the same (ie banked) then
238 * - secure exception takes precedence
239 * Compare pseudocode RawExecutionPriority.
240 * Annoyingly, now we have two prigroup values (for S and NS)
241 * we can't do the loop comparison on raw priority values.
243 for (i
= 1; i
< s
->num_irq
; i
++) {
244 for (bank
= M_REG_S
; bank
>= M_REG_NS
; bank
--) {
249 if (bank
== M_REG_S
) {
250 if (!exc_is_banked(i
)) {
253 vec
= &s
->sec_vectors
[i
];
254 targets_secure
= true;
256 vec
= &s
->vectors
[i
];
257 targets_secure
= !exc_is_banked(i
) && exc_targets_secure(s
, i
);
260 prio
= exc_group_prio(s
, vec
->prio
, targets_secure
);
261 subprio
= vec
->prio
& ~nvic_gprio_mask(s
, targets_secure
);
262 if (vec
->enabled
&& vec
->pending
&&
263 ((prio
< pend_prio
) ||
264 (prio
== pend_prio
&& prio
>= 0 && subprio
< pend_subprio
))) {
266 pend_subprio
= subprio
;
268 pending_is_s_banked
= (bank
== M_REG_S
);
270 if (vec
->active
&& prio
< active_prio
) {
276 s
->vectpending_is_s_banked
= pending_is_s_banked
;
277 s
->vectpending
= pend_irq
;
278 s
->vectpending_prio
= pend_prio
;
279 s
->exception_prio
= active_prio
;
281 trace_nvic_recompute_state_secure(s
->vectpending
,
282 s
->vectpending_is_s_banked
,
287 /* Recompute vectpending and exception_prio */
288 static void nvic_recompute_state(NVICState
*s
)
291 int pend_prio
= NVIC_NOEXC_PRIO
;
292 int active_prio
= NVIC_NOEXC_PRIO
;
295 /* In theory we could write one function that handled both
296 * the "security extension present" and "not present"; however
297 * the security related changes significantly complicate the
298 * recomputation just by themselves and mixing both cases together
299 * would be even worse, so we retain a separate non-secure-only
300 * version for CPUs which don't implement the security extension.
302 if (arm_feature(&s
->cpu
->env
, ARM_FEATURE_M_SECURITY
)) {
303 nvic_recompute_state_secure(s
);
307 for (i
= 1; i
< s
->num_irq
; i
++) {
308 VecInfo
*vec
= &s
->vectors
[i
];
310 if (vec
->enabled
&& vec
->pending
&& vec
->prio
< pend_prio
) {
311 pend_prio
= vec
->prio
;
314 if (vec
->active
&& vec
->prio
< active_prio
) {
315 active_prio
= vec
->prio
;
319 if (active_prio
> 0) {
320 active_prio
&= nvic_gprio_mask(s
, false);
324 pend_prio
&= nvic_gprio_mask(s
, false);
327 s
->vectpending
= pend_irq
;
328 s
->vectpending_prio
= pend_prio
;
329 s
->exception_prio
= active_prio
;
331 trace_nvic_recompute_state(s
->vectpending
,
336 /* Return the current execution priority of the CPU
337 * (equivalent to the pseudocode ExecutionPriority function).
338 * This is a value between -2 (NMI priority) and NVIC_NOEXC_PRIO.
340 static inline int nvic_exec_prio(NVICState
*s
)
342 CPUARMState
*env
= &s
->cpu
->env
;
343 int running
= NVIC_NOEXC_PRIO
;
345 if (env
->v7m
.basepri
[M_REG_NS
] > 0) {
346 running
= exc_group_prio(s
, env
->v7m
.basepri
[M_REG_NS
], M_REG_NS
);
349 if (env
->v7m
.basepri
[M_REG_S
] > 0) {
350 int basepri
= exc_group_prio(s
, env
->v7m
.basepri
[M_REG_S
], M_REG_S
);
351 if (running
> basepri
) {
356 if (env
->v7m
.primask
[M_REG_NS
]) {
357 if (env
->v7m
.aircr
& R_V7M_AIRCR_PRIS_MASK
) {
358 if (running
> NVIC_NS_PRIO_LIMIT
) {
359 running
= NVIC_NS_PRIO_LIMIT
;
366 if (env
->v7m
.primask
[M_REG_S
]) {
370 if (env
->v7m
.faultmask
[M_REG_NS
]) {
371 if (env
->v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
) {
374 if (env
->v7m
.aircr
& R_V7M_AIRCR_PRIS_MASK
) {
375 if (running
> NVIC_NS_PRIO_LIMIT
) {
376 running
= NVIC_NS_PRIO_LIMIT
;
384 if (env
->v7m
.faultmask
[M_REG_S
]) {
385 running
= (env
->v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
) ? -3 : -1;
388 /* consider priority of active handler */
389 return MIN(running
, s
->exception_prio
);
392 bool armv7m_nvic_neg_prio_requested(void *opaque
, bool secure
)
394 /* Return true if the requested execution priority is negative
395 * for the specified security state, ie that security state
396 * has an active NMI or HardFault or has set its FAULTMASK.
397 * Note that this is not the same as whether the execution
398 * priority is actually negative (for instance AIRCR.PRIS may
399 * mean we don't allow FAULTMASK_NS to actually make the execution
400 * priority negative). Compare pseudocode IsReqExcPriNeg().
402 NVICState
*s
= opaque
;
404 if (s
->cpu
->env
.v7m
.faultmask
[secure
]) {
408 if (secure
? s
->sec_vectors
[ARMV7M_EXCP_HARD
].active
:
409 s
->vectors
[ARMV7M_EXCP_HARD
].active
) {
413 if (s
->vectors
[ARMV7M_EXCP_NMI
].active
&&
414 exc_targets_secure(s
, ARMV7M_EXCP_NMI
) == secure
) {
421 bool armv7m_nvic_can_take_pending_exception(void *opaque
)
423 NVICState
*s
= opaque
;
425 return nvic_exec_prio(s
) > nvic_pending_prio(s
);
428 int armv7m_nvic_raw_execution_priority(void *opaque
)
430 NVICState
*s
= opaque
;
432 return s
->exception_prio
;
435 /* caller must call nvic_irq_update() after this.
436 * secure indicates the bank to use for banked exceptions (we assert if
437 * we are passed secure=true for a non-banked exception).
439 static void set_prio(NVICState
*s
, unsigned irq
, bool secure
, uint8_t prio
)
441 assert(irq
> ARMV7M_EXCP_NMI
); /* only use for configurable prios */
442 assert(irq
< s
->num_irq
);
444 prio
&= MAKE_64BIT_MASK(8 - s
->num_prio_bits
, s
->num_prio_bits
);
447 assert(exc_is_banked(irq
));
448 s
->sec_vectors
[irq
].prio
= prio
;
450 s
->vectors
[irq
].prio
= prio
;
453 trace_nvic_set_prio(irq
, secure
, prio
);
456 /* Return the current raw priority register value.
457 * secure indicates the bank to use for banked exceptions (we assert if
458 * we are passed secure=true for a non-banked exception).
460 static int get_prio(NVICState
*s
, unsigned irq
, bool secure
)
462 assert(irq
> ARMV7M_EXCP_NMI
); /* only use for configurable prios */
463 assert(irq
< s
->num_irq
);
466 assert(exc_is_banked(irq
));
467 return s
->sec_vectors
[irq
].prio
;
469 return s
->vectors
[irq
].prio
;
473 /* Recompute state and assert irq line accordingly.
474 * Must be called after changes to:
475 * vec->active, vec->enabled, vec->pending or vec->prio for any vector
478 static void nvic_irq_update(NVICState
*s
)
483 nvic_recompute_state(s
);
484 pend_prio
= nvic_pending_prio(s
);
486 /* Raise NVIC output if this IRQ would be taken, except that we
487 * ignore the effects of the BASEPRI, FAULTMASK and PRIMASK (which
488 * will be checked for in arm_v7m_cpu_exec_interrupt()); changes
489 * to those CPU registers don't cause us to recalculate the NVIC
492 lvl
= (pend_prio
< s
->exception_prio
);
493 trace_nvic_irq_update(s
->vectpending
, pend_prio
, s
->exception_prio
, lvl
);
494 qemu_set_irq(s
->excpout
, lvl
);
498 * armv7m_nvic_clear_pending: mark the specified exception as not pending
500 * @irq: the exception number to mark as not pending
501 * @secure: false for non-banked exceptions or for the nonsecure
502 * version of a banked exception, true for the secure version of a banked
505 * Marks the specified exception as not pending. Note that we will assert()
506 * if @secure is true and @irq does not specify one of the fixed set
507 * of architecturally banked exceptions.
509 static void armv7m_nvic_clear_pending(void *opaque
, int irq
, bool secure
)
511 NVICState
*s
= (NVICState
*)opaque
;
514 assert(irq
> ARMV7M_EXCP_RESET
&& irq
< s
->num_irq
);
517 assert(exc_is_banked(irq
));
518 vec
= &s
->sec_vectors
[irq
];
520 vec
= &s
->vectors
[irq
];
522 trace_nvic_clear_pending(irq
, secure
, vec
->enabled
, vec
->prio
);
529 static void do_armv7m_nvic_set_pending(void *opaque
, int irq
, bool secure
,
532 /* Pend an exception, including possibly escalating it to HardFault.
534 * This function handles both "normal" pending of interrupts and
535 * exceptions, and also derived exceptions (ones which occur as
536 * a result of trying to take some other exception).
538 * If derived == true, the caller guarantees that we are part way through
539 * trying to take an exception (but have not yet called
540 * armv7m_nvic_acknowledge_irq() to make it active), and so:
541 * - s->vectpending is the "original exception" we were trying to take
542 * - irq is the "derived exception"
543 * - nvic_exec_prio(s) gives the priority before exception entry
544 * Here we handle the prioritization logic which the pseudocode puts
545 * in the DerivedLateArrival() function.
548 NVICState
*s
= (NVICState
*)opaque
;
549 bool banked
= exc_is_banked(irq
);
553 assert(irq
> ARMV7M_EXCP_RESET
&& irq
< s
->num_irq
);
554 assert(!secure
|| banked
);
556 vec
= (banked
&& secure
) ? &s
->sec_vectors
[irq
] : &s
->vectors
[irq
];
558 targets_secure
= banked
? secure
: exc_targets_secure(s
, irq
);
560 trace_nvic_set_pending(irq
, secure
, targets_secure
,
561 derived
, vec
->enabled
, vec
->prio
);
564 /* Derived exceptions are always synchronous. */
565 assert(irq
>= ARMV7M_EXCP_HARD
&& irq
< ARMV7M_EXCP_PENDSV
);
567 if (irq
== ARMV7M_EXCP_DEBUG
&&
568 exc_group_prio(s
, vec
->prio
, secure
) >= nvic_exec_prio(s
)) {
569 /* DebugMonitorFault, but its priority is lower than the
570 * preempted exception priority: just ignore it.
575 if (irq
== ARMV7M_EXCP_HARD
&& vec
->prio
>= s
->vectpending_prio
) {
576 /* If this is a terminal exception (one which means we cannot
577 * take the original exception, like a failure to read its
578 * vector table entry), then we must take the derived exception.
579 * If the derived exception can't take priority over the
580 * original exception, then we go into Lockup.
582 * For QEMU, we rely on the fact that a derived exception is
583 * terminal if and only if it's reported to us as HardFault,
584 * which saves having to have an extra argument is_terminal
585 * that we'd only use in one place.
587 cpu_abort(&s
->cpu
->parent_obj
,
588 "Lockup: can't take terminal derived exception "
589 "(original exception priority %d)\n",
590 s
->vectpending_prio
);
592 /* We now continue with the same code as for a normal pending
593 * exception, which will cause us to pend the derived exception.
594 * We'll then take either the original or the derived exception
595 * based on which is higher priority by the usual mechanism
596 * for selecting the highest priority pending interrupt.
600 if (irq
>= ARMV7M_EXCP_HARD
&& irq
< ARMV7M_EXCP_PENDSV
) {
601 /* If a synchronous exception is pending then it may be
602 * escalated to HardFault if:
603 * * it is equal or lower priority to current execution
605 * (ie we need to take it immediately but we can't do so).
606 * Asynchronous exceptions (and interrupts) simply remain pending.
608 * For QEMU, we don't have any imprecise (asynchronous) faults,
609 * so we can assume that PREFETCH_ABORT and DATA_ABORT are always
611 * Debug exceptions are awkward because only Debug exceptions
612 * resulting from the BKPT instruction should be escalated,
613 * but we don't currently implement any Debug exceptions other
614 * than those that result from BKPT, so we treat all debug exceptions
615 * as needing escalation.
617 * This all means we can identify whether to escalate based only on
618 * the exception number and don't (yet) need the caller to explicitly
619 * tell us whether this exception is synchronous or not.
621 int running
= nvic_exec_prio(s
);
622 bool escalate
= false;
624 if (exc_group_prio(s
, vec
->prio
, secure
) >= running
) {
625 trace_nvic_escalate_prio(irq
, vec
->prio
, running
);
627 } else if (!vec
->enabled
) {
628 trace_nvic_escalate_disabled(irq
);
634 /* We need to escalate this exception to a synchronous HardFault.
635 * If BFHFNMINS is set then we escalate to the banked HF for
636 * the target security state of the original exception; otherwise
637 * we take a Secure HardFault.
639 irq
= ARMV7M_EXCP_HARD
;
640 if (arm_feature(&s
->cpu
->env
, ARM_FEATURE_M_SECURITY
) &&
642 !(s
->cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
))) {
643 vec
= &s
->sec_vectors
[irq
];
645 vec
= &s
->vectors
[irq
];
647 if (running
<= vec
->prio
) {
648 /* We want to escalate to HardFault but we can't take the
649 * synchronous HardFault at this point either. This is a
650 * Lockup condition due to a guest bug. We don't model
651 * Lockup, so report via cpu_abort() instead.
653 cpu_abort(&s
->cpu
->parent_obj
,
654 "Lockup: can't escalate %d to HardFault "
655 "(current priority %d)\n", irq
, running
);
658 /* HF may be banked but there is only one shared HFSR */
659 s
->cpu
->env
.v7m
.hfsr
|= R_V7M_HFSR_FORCED_MASK
;
669 void armv7m_nvic_set_pending(void *opaque
, int irq
, bool secure
)
671 do_armv7m_nvic_set_pending(opaque
, irq
, secure
, false);
674 void armv7m_nvic_set_pending_derived(void *opaque
, int irq
, bool secure
)
676 do_armv7m_nvic_set_pending(opaque
, irq
, secure
, true);
679 void armv7m_nvic_set_pending_lazyfp(void *opaque
, int irq
, bool secure
)
682 * Pend an exception during lazy FP stacking. This differs
683 * from the usual exception pending because the logic for
684 * whether we should escalate depends on the saved context
685 * in the FPCCR register, not on the current state of the CPU/NVIC.
687 NVICState
*s
= (NVICState
*)opaque
;
688 bool banked
= exc_is_banked(irq
);
691 bool escalate
= false;
693 * We will only look at bits in fpccr if this is a banked exception
694 * (in which case 'secure' tells us whether it is the S or NS version).
695 * All the bits for the non-banked exceptions are in fpccr_s.
697 uint32_t fpccr_s
= s
->cpu
->env
.v7m
.fpccr
[M_REG_S
];
698 uint32_t fpccr
= s
->cpu
->env
.v7m
.fpccr
[secure
];
700 assert(irq
> ARMV7M_EXCP_RESET
&& irq
< s
->num_irq
);
701 assert(!secure
|| banked
);
703 vec
= (banked
&& secure
) ? &s
->sec_vectors
[irq
] : &s
->vectors
[irq
];
705 targets_secure
= banked
? secure
: exc_targets_secure(s
, irq
);
708 case ARMV7M_EXCP_DEBUG
:
709 if (!(fpccr_s
& R_V7M_FPCCR_MONRDY_MASK
)) {
710 /* Ignore DebugMonitor exception */
714 case ARMV7M_EXCP_MEM
:
715 escalate
= !(fpccr
& R_V7M_FPCCR_MMRDY_MASK
);
717 case ARMV7M_EXCP_USAGE
:
718 escalate
= !(fpccr
& R_V7M_FPCCR_UFRDY_MASK
);
720 case ARMV7M_EXCP_BUS
:
721 escalate
= !(fpccr_s
& R_V7M_FPCCR_BFRDY_MASK
);
723 case ARMV7M_EXCP_SECURE
:
724 escalate
= !(fpccr_s
& R_V7M_FPCCR_SFRDY_MASK
);
727 g_assert_not_reached();
732 * Escalate to HardFault: faults that initially targeted Secure
733 * continue to do so, even if HF normally targets NonSecure.
735 irq
= ARMV7M_EXCP_HARD
;
736 if (arm_feature(&s
->cpu
->env
, ARM_FEATURE_M_SECURITY
) &&
738 !(s
->cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
))) {
739 vec
= &s
->sec_vectors
[irq
];
741 vec
= &s
->vectors
[irq
];
746 nvic_exec_prio(s
) <= exc_group_prio(s
, vec
->prio
, secure
)) {
747 if (!(fpccr_s
& R_V7M_FPCCR_HFRDY_MASK
)) {
749 * We want to escalate to HardFault but the context the
750 * FP state belongs to prevents the exception pre-empting.
752 cpu_abort(&s
->cpu
->parent_obj
,
753 "Lockup: can't escalate to HardFault during "
754 "lazy FP register stacking\n");
759 s
->cpu
->env
.v7m
.hfsr
|= R_V7M_HFSR_FORCED_MASK
;
764 * We do not call nvic_irq_update(), because we know our caller
765 * is going to handle causing us to take the exception by
766 * raising EXCP_LAZYFP, so raising the IRQ line would be
767 * pointless extra work. We just need to recompute the
768 * priorities so that armv7m_nvic_can_take_pending_exception()
769 * returns the right answer.
771 nvic_recompute_state(s
);
775 /* Make pending IRQ active. */
776 void armv7m_nvic_acknowledge_irq(void *opaque
)
778 NVICState
*s
= (NVICState
*)opaque
;
779 CPUARMState
*env
= &s
->cpu
->env
;
780 const int pending
= s
->vectpending
;
781 const int running
= nvic_exec_prio(s
);
784 assert(pending
> ARMV7M_EXCP_RESET
&& pending
< s
->num_irq
);
786 if (s
->vectpending_is_s_banked
) {
787 vec
= &s
->sec_vectors
[pending
];
789 vec
= &s
->vectors
[pending
];
792 assert(vec
->enabled
);
793 assert(vec
->pending
);
795 assert(s
->vectpending_prio
< running
);
797 trace_nvic_acknowledge_irq(pending
, s
->vectpending_prio
);
802 write_v7m_exception(env
, s
->vectpending
);
807 static bool vectpending_targets_secure(NVICState
*s
)
809 /* Return true if s->vectpending targets Secure state */
810 if (s
->vectpending_is_s_banked
) {
813 return !exc_is_banked(s
->vectpending
) &&
814 exc_targets_secure(s
, s
->vectpending
);
817 void armv7m_nvic_get_pending_irq_info(void *opaque
,
818 int *pirq
, bool *ptargets_secure
)
820 NVICState
*s
= (NVICState
*)opaque
;
821 const int pending
= s
->vectpending
;
824 assert(pending
> ARMV7M_EXCP_RESET
&& pending
< s
->num_irq
);
826 targets_secure
= vectpending_targets_secure(s
);
828 trace_nvic_get_pending_irq_info(pending
, targets_secure
);
830 *ptargets_secure
= targets_secure
;
834 int armv7m_nvic_complete_irq(void *opaque
, int irq
, bool secure
)
836 NVICState
*s
= (NVICState
*)opaque
;
840 assert(irq
> ARMV7M_EXCP_RESET
&& irq
< s
->num_irq
);
842 trace_nvic_complete_irq(irq
, secure
);
844 if (secure
&& exc_is_banked(irq
)) {
845 vec
= &s
->sec_vectors
[irq
];
847 vec
= &s
->vectors
[irq
];
851 * Identify illegal exception return cases. We can't immediately
852 * return at this point because we still need to deactivate
853 * (either this exception or NMI/HardFault) first.
855 if (!exc_is_banked(irq
) && exc_targets_secure(s
, irq
) != secure
) {
857 * Return from a configurable exception targeting the opposite
858 * security state from the one we're trying to complete it for.
859 * Clear vec because it's not really the VecInfo for this
860 * (irq, secstate) so we mustn't deactivate it.
864 } else if (!vec
->active
) {
865 /* Return from an inactive interrupt */
868 /* Legal return, we will return the RETTOBASE bit value to the caller */
869 ret
= nvic_rettobase(s
);
873 * For negative priorities, v8M will forcibly deactivate the appropriate
874 * NMI or HardFault regardless of what interrupt we're being asked to
875 * deactivate (compare the DeActivate() pseudocode). This is a guard
876 * against software returning from NMI or HardFault with a corrupted
877 * IPSR and leaving the CPU in a negative-priority state.
878 * v7M does not do this, but simply deactivates the requested interrupt.
880 if (arm_feature(&s
->cpu
->env
, ARM_FEATURE_V8
)) {
881 switch (armv7m_nvic_raw_execution_priority(s
)) {
883 if (s
->cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
) {
884 vec
= &s
->vectors
[ARMV7M_EXCP_HARD
];
886 vec
= &s
->sec_vectors
[ARMV7M_EXCP_HARD
];
890 vec
= &s
->vectors
[ARMV7M_EXCP_NMI
];
893 vec
= &s
->sec_vectors
[ARMV7M_EXCP_HARD
];
906 /* Re-pend the exception if it's still held high; only
907 * happens for extenal IRQs
909 assert(irq
>= NVIC_FIRST_IRQ
);
918 bool armv7m_nvic_get_ready_status(void *opaque
, int irq
, bool secure
)
921 * Return whether an exception is "ready", i.e. it is enabled and is
922 * configured at a priority which would allow it to interrupt the
923 * current execution priority.
925 * irq and secure have the same semantics as for armv7m_nvic_set_pending():
926 * for non-banked exceptions secure is always false; for banked exceptions
927 * it indicates which of the exceptions is required.
929 NVICState
*s
= (NVICState
*)opaque
;
930 bool banked
= exc_is_banked(irq
);
932 int running
= nvic_exec_prio(s
);
934 assert(irq
> ARMV7M_EXCP_RESET
&& irq
< s
->num_irq
);
935 assert(!secure
|| banked
);
938 * HardFault is an odd special case: we always check against -1,
939 * even if we're secure and HardFault has priority -3; we never
940 * need to check for enabled state.
942 if (irq
== ARMV7M_EXCP_HARD
) {
946 vec
= (banked
&& secure
) ? &s
->sec_vectors
[irq
] : &s
->vectors
[irq
];
948 return vec
->enabled
&&
949 exc_group_prio(s
, vec
->prio
, secure
) < running
;
952 /* callback when external interrupt line is changed */
953 static void set_irq_level(void *opaque
, int n
, int level
)
955 NVICState
*s
= opaque
;
960 assert(n
>= NVIC_FIRST_IRQ
&& n
< s
->num_irq
);
962 trace_nvic_set_irq_level(n
, level
);
964 /* The pending status of an external interrupt is
965 * latched on rising edge and exception handler return.
967 * Pulsing the IRQ will always run the handler
968 * once, and the handler will re-run until the
969 * level is low when the handler completes.
971 vec
= &s
->vectors
[n
];
972 if (level
!= vec
->level
) {
975 armv7m_nvic_set_pending(s
, n
, false);
980 /* callback when external NMI line is changed */
981 static void nvic_nmi_trigger(void *opaque
, int n
, int level
)
983 NVICState
*s
= opaque
;
985 trace_nvic_set_nmi_level(level
);
988 * The architecture doesn't specify whether NMI should share
989 * the normal-interrupt behaviour of being resampled on
990 * exception handler return. We choose not to, so just
991 * set NMI pending here and don't track the current level.
994 armv7m_nvic_set_pending(s
, ARMV7M_EXCP_NMI
, false);
998 static uint32_t nvic_readl(NVICState
*s
, uint32_t offset
, MemTxAttrs attrs
)
1000 ARMCPU
*cpu
= s
->cpu
;
1004 case 4: /* Interrupt Control Type. */
1005 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V7
)) {
1008 return ((s
->num_irq
- NVIC_FIRST_IRQ
) / 32) - 1;
1009 case 0xc: /* CPPWR */
1010 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1013 /* We make the IMPDEF choice that nothing can ever go into a
1014 * non-retentive power state, which allows us to RAZ/WI this.
1017 case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
1019 int startvec
= 8 * (offset
- 0x380) + NVIC_FIRST_IRQ
;
1022 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1025 if (!attrs
.secure
) {
1029 for (i
= 0; i
< 32 && startvec
+ i
< s
->num_irq
; i
++) {
1030 if (s
->itns
[startvec
+ i
]) {
1037 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8_1M
)) {
1041 case 0xd00: /* CPUID Base. */
1043 case 0xd04: /* Interrupt Control State (ICSR) */
1045 val
= cpu
->env
.v7m
.exception
;
1047 if (s
->vectpending
) {
1049 * From v8.1M VECTPENDING must read as 1 if accessed as
1050 * NonSecure and the highest priority pending and enabled
1051 * exception targets Secure.
1053 int vp
= s
->vectpending
;
1054 if (!attrs
.secure
&& arm_feature(&cpu
->env
, ARM_FEATURE_V8_1M
) &&
1055 vectpending_targets_secure(s
)) {
1058 val
|= (vp
& 0x1ff) << 12;
1060 /* ISRPENDING - set if any external IRQ is pending */
1061 if (nvic_isrpending(s
)) {
1064 /* RETTOBASE - set if only one handler is active */
1065 if (nvic_rettobase(s
)) {
1070 if (s
->sec_vectors
[ARMV7M_EXCP_SYSTICK
].pending
) {
1074 if (s
->sec_vectors
[ARMV7M_EXCP_PENDSV
].pending
) {
1079 if (s
->vectors
[ARMV7M_EXCP_SYSTICK
].pending
) {
1083 if (s
->vectors
[ARMV7M_EXCP_PENDSV
].pending
) {
1088 if ((attrs
.secure
|| (cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
))
1089 && s
->vectors
[ARMV7M_EXCP_NMI
].pending
) {
1092 /* ISRPREEMPT: RES0 when halting debug not implemented */
1093 /* STTNS: RES0 for the Main Extension */
1095 case 0xd08: /* Vector Table Offset. */
1096 return cpu
->env
.v7m
.vecbase
[attrs
.secure
];
1097 case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
1098 val
= 0xfa050000 | (s
->prigroup
[attrs
.secure
] << 8);
1100 /* s->aircr stores PRIS, BFHFNMINS, SYSRESETREQS */
1101 val
|= cpu
->env
.v7m
.aircr
;
1103 if (arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1104 /* BFHFNMINS is R/O from NS; other bits are RAZ/WI. If
1105 * security isn't supported then BFHFNMINS is RAO (and
1106 * the bit in env.v7m.aircr is always set).
1108 val
|= cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
;
1112 case 0xd10: /* System Control. */
1113 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V7
)) {
1116 return cpu
->env
.v7m
.scr
[attrs
.secure
];
1117 case 0xd14: /* Configuration Control. */
1119 * Non-banked bits: BFHFNMIGN (stored in the NS copy of the register)
1120 * and TRD (stored in the S copy of the register)
1122 val
= cpu
->env
.v7m
.ccr
[attrs
.secure
];
1123 val
|= cpu
->env
.v7m
.ccr
[M_REG_NS
] & R_V7M_CCR_BFHFNMIGN_MASK
;
1124 /* BFHFNMIGN is RAZ/WI from NS if AIRCR.BFHFNMINS is 0 */
1125 if (!attrs
.secure
) {
1126 if (!(cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
)) {
1127 val
&= ~R_V7M_CCR_BFHFNMIGN_MASK
;
1131 case 0xd24: /* System Handler Control and State (SHCSR) */
1132 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V7
)) {
1137 if (s
->sec_vectors
[ARMV7M_EXCP_MEM
].active
) {
1140 if (s
->sec_vectors
[ARMV7M_EXCP_HARD
].active
) {
1143 if (s
->sec_vectors
[ARMV7M_EXCP_USAGE
].active
) {
1146 if (s
->sec_vectors
[ARMV7M_EXCP_SVC
].active
) {
1149 if (s
->sec_vectors
[ARMV7M_EXCP_PENDSV
].active
) {
1152 if (s
->sec_vectors
[ARMV7M_EXCP_SYSTICK
].active
) {
1155 if (s
->sec_vectors
[ARMV7M_EXCP_USAGE
].pending
) {
1158 if (s
->sec_vectors
[ARMV7M_EXCP_MEM
].pending
) {
1161 if (s
->sec_vectors
[ARMV7M_EXCP_SVC
].pending
) {
1164 if (s
->sec_vectors
[ARMV7M_EXCP_MEM
].enabled
) {
1167 if (s
->sec_vectors
[ARMV7M_EXCP_USAGE
].enabled
) {
1170 if (s
->sec_vectors
[ARMV7M_EXCP_HARD
].pending
) {
1173 /* SecureFault is not banked but is always RAZ/WI to NS */
1174 if (s
->vectors
[ARMV7M_EXCP_SECURE
].active
) {
1177 if (s
->vectors
[ARMV7M_EXCP_SECURE
].enabled
) {
1180 if (s
->vectors
[ARMV7M_EXCP_SECURE
].pending
) {
1184 if (s
->vectors
[ARMV7M_EXCP_MEM
].active
) {
1187 if (arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1188 /* HARDFAULTACT, HARDFAULTPENDED not present in v7M */
1189 if (s
->vectors
[ARMV7M_EXCP_HARD
].active
) {
1192 if (s
->vectors
[ARMV7M_EXCP_HARD
].pending
) {
1196 if (s
->vectors
[ARMV7M_EXCP_USAGE
].active
) {
1199 if (s
->vectors
[ARMV7M_EXCP_SVC
].active
) {
1202 if (s
->vectors
[ARMV7M_EXCP_PENDSV
].active
) {
1205 if (s
->vectors
[ARMV7M_EXCP_SYSTICK
].active
) {
1208 if (s
->vectors
[ARMV7M_EXCP_USAGE
].pending
) {
1211 if (s
->vectors
[ARMV7M_EXCP_MEM
].pending
) {
1214 if (s
->vectors
[ARMV7M_EXCP_SVC
].pending
) {
1217 if (s
->vectors
[ARMV7M_EXCP_MEM
].enabled
) {
1220 if (s
->vectors
[ARMV7M_EXCP_USAGE
].enabled
) {
1224 if (attrs
.secure
|| (cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
)) {
1225 if (s
->vectors
[ARMV7M_EXCP_BUS
].active
) {
1228 if (s
->vectors
[ARMV7M_EXCP_BUS
].pending
) {
1231 if (s
->vectors
[ARMV7M_EXCP_BUS
].enabled
) {
1234 if (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) &&
1235 s
->vectors
[ARMV7M_EXCP_NMI
].active
) {
1236 /* NMIACT is not present in v7M */
1241 /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
1242 if (s
->vectors
[ARMV7M_EXCP_DEBUG
].active
) {
1246 case 0xd2c: /* Hard Fault Status. */
1247 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1250 return cpu
->env
.v7m
.hfsr
;
1251 case 0xd30: /* Debug Fault Status. */
1252 return cpu
->env
.v7m
.dfsr
;
1253 case 0xd34: /* MMFAR MemManage Fault Address */
1254 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1257 return cpu
->env
.v7m
.mmfar
[attrs
.secure
];
1258 case 0xd38: /* Bus Fault Address. */
1259 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1262 if (!attrs
.secure
&&
1263 !(s
->cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
)) {
1266 return cpu
->env
.v7m
.bfar
;
1267 case 0xd3c: /* Aux Fault Status. */
1268 /* TODO: Implement fault status registers. */
1269 qemu_log_mask(LOG_UNIMP
,
1270 "Aux Fault status registers unimplemented\n");
1272 case 0xd40: /* PFR0. */
1273 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1276 return cpu
->isar
.id_pfr0
;
1277 case 0xd44: /* PFR1. */
1278 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1281 return cpu
->isar
.id_pfr1
;
1282 case 0xd48: /* DFR0. */
1283 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1286 return cpu
->isar
.id_dfr0
;
1287 case 0xd4c: /* AFR0. */
1288 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1291 return cpu
->id_afr0
;
1292 case 0xd50: /* MMFR0. */
1293 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1296 return cpu
->isar
.id_mmfr0
;
1297 case 0xd54: /* MMFR1. */
1298 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1301 return cpu
->isar
.id_mmfr1
;
1302 case 0xd58: /* MMFR2. */
1303 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1306 return cpu
->isar
.id_mmfr2
;
1307 case 0xd5c: /* MMFR3. */
1308 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1311 return cpu
->isar
.id_mmfr3
;
1312 case 0xd60: /* ISAR0. */
1313 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1316 return cpu
->isar
.id_isar0
;
1317 case 0xd64: /* ISAR1. */
1318 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1321 return cpu
->isar
.id_isar1
;
1322 case 0xd68: /* ISAR2. */
1323 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1326 return cpu
->isar
.id_isar2
;
1327 case 0xd6c: /* ISAR3. */
1328 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1331 return cpu
->isar
.id_isar3
;
1332 case 0xd70: /* ISAR4. */
1333 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1336 return cpu
->isar
.id_isar4
;
1337 case 0xd74: /* ISAR5. */
1338 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1341 return cpu
->isar
.id_isar5
;
1342 case 0xd78: /* CLIDR */
1344 case 0xd7c: /* CTR */
1346 case 0xd80: /* CSSIDR */
1348 int idx
= cpu
->env
.v7m
.csselr
[attrs
.secure
] & R_V7M_CSSELR_INDEX_MASK
;
1349 return cpu
->ccsidr
[idx
];
1351 case 0xd84: /* CSSELR */
1352 return cpu
->env
.v7m
.csselr
[attrs
.secure
];
1353 case 0xd88: /* CPACR */
1354 if (!cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
1357 return cpu
->env
.v7m
.cpacr
[attrs
.secure
];
1358 case 0xd8c: /* NSACR */
1359 if (!attrs
.secure
|| !cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
1362 return cpu
->env
.v7m
.nsacr
;
1363 /* TODO: Implement debug registers. */
1364 case 0xd90: /* MPU_TYPE */
1365 /* Unified MPU; if the MPU is not present this value is zero */
1366 return cpu
->pmsav7_dregion
<< 8;
1367 case 0xd94: /* MPU_CTRL */
1368 return cpu
->env
.v7m
.mpu_ctrl
[attrs
.secure
];
1369 case 0xd98: /* MPU_RNR */
1370 return cpu
->env
.pmsav7
.rnr
[attrs
.secure
];
1371 case 0xd9c: /* MPU_RBAR */
1372 case 0xda4: /* MPU_RBAR_A1 */
1373 case 0xdac: /* MPU_RBAR_A2 */
1374 case 0xdb4: /* MPU_RBAR_A3 */
1376 int region
= cpu
->env
.pmsav7
.rnr
[attrs
.secure
];
1378 if (arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1379 /* PMSAv8M handling of the aliases is different from v7M:
1380 * aliases A1, A2, A3 override the low two bits of the region
1381 * number in MPU_RNR, and there is no 'region' field in the
1384 int aliasno
= (offset
- 0xd9c) / 8; /* 0..3 */
1386 region
= deposit32(region
, 0, 2, aliasno
);
1388 if (region
>= cpu
->pmsav7_dregion
) {
1391 return cpu
->env
.pmsav8
.rbar
[attrs
.secure
][region
];
1394 if (region
>= cpu
->pmsav7_dregion
) {
1397 return (cpu
->env
.pmsav7
.drbar
[region
] & ~0x1f) | (region
& 0xf);
1399 case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
1400 case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
1401 case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
1402 case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
1404 int region
= cpu
->env
.pmsav7
.rnr
[attrs
.secure
];
1406 if (arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1407 /* PMSAv8M handling of the aliases is different from v7M:
1408 * aliases A1, A2, A3 override the low two bits of the region
1409 * number in MPU_RNR.
1411 int aliasno
= (offset
- 0xda0) / 8; /* 0..3 */
1413 region
= deposit32(region
, 0, 2, aliasno
);
1415 if (region
>= cpu
->pmsav7_dregion
) {
1418 return cpu
->env
.pmsav8
.rlar
[attrs
.secure
][region
];
1421 if (region
>= cpu
->pmsav7_dregion
) {
1424 return ((cpu
->env
.pmsav7
.dracr
[region
] & 0xffff) << 16) |
1425 (cpu
->env
.pmsav7
.drsr
[region
] & 0xffff);
1427 case 0xdc0: /* MPU_MAIR0 */
1428 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1431 return cpu
->env
.pmsav8
.mair0
[attrs
.secure
];
1432 case 0xdc4: /* MPU_MAIR1 */
1433 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1436 return cpu
->env
.pmsav8
.mair1
[attrs
.secure
];
1437 case 0xdd0: /* SAU_CTRL */
1438 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1441 if (!attrs
.secure
) {
1444 return cpu
->env
.sau
.ctrl
;
1445 case 0xdd4: /* SAU_TYPE */
1446 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1449 if (!attrs
.secure
) {
1452 return cpu
->sau_sregion
;
1453 case 0xdd8: /* SAU_RNR */
1454 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1457 if (!attrs
.secure
) {
1460 return cpu
->env
.sau
.rnr
;
1461 case 0xddc: /* SAU_RBAR */
1463 int region
= cpu
->env
.sau
.rnr
;
1465 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1468 if (!attrs
.secure
) {
1471 if (region
>= cpu
->sau_sregion
) {
1474 return cpu
->env
.sau
.rbar
[region
];
1476 case 0xde0: /* SAU_RLAR */
1478 int region
= cpu
->env
.sau
.rnr
;
1480 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1483 if (!attrs
.secure
) {
1486 if (region
>= cpu
->sau_sregion
) {
1489 return cpu
->env
.sau
.rlar
[region
];
1491 case 0xde4: /* SFSR */
1492 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1495 if (!attrs
.secure
) {
1498 return cpu
->env
.v7m
.sfsr
;
1499 case 0xde8: /* SFAR */
1500 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1503 if (!attrs
.secure
) {
1506 return cpu
->env
.v7m
.sfar
;
1507 case 0xf04: /* RFSR */
1508 if (!cpu_isar_feature(aa32_ras
, cpu
)) {
1511 /* We provide minimal-RAS only: RFSR is RAZ/WI */
1513 case 0xf34: /* FPCCR */
1514 if (!cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
1518 return cpu
->env
.v7m
.fpccr
[M_REG_S
];
1521 * NS can read LSPEN, CLRONRET and MONRDY. It can read
1522 * BFRDY and HFRDY if AIRCR.BFHFNMINS != 0;
1523 * other non-banked bits RAZ.
1524 * TODO: MONRDY should RAZ/WI if DEMCR.SDME is set.
1526 uint32_t value
= cpu
->env
.v7m
.fpccr
[M_REG_S
];
1527 uint32_t mask
= R_V7M_FPCCR_LSPEN_MASK
|
1528 R_V7M_FPCCR_CLRONRET_MASK
|
1529 R_V7M_FPCCR_MONRDY_MASK
;
1531 if (s
->cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
) {
1532 mask
|= R_V7M_FPCCR_BFRDY_MASK
| R_V7M_FPCCR_HFRDY_MASK
;
1537 value
|= cpu
->env
.v7m
.fpccr
[M_REG_NS
];
1540 case 0xf38: /* FPCAR */
1541 if (!cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
1544 return cpu
->env
.v7m
.fpcar
[attrs
.secure
];
1545 case 0xf3c: /* FPDSCR */
1546 if (!cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
1549 return cpu
->env
.v7m
.fpdscr
[attrs
.secure
];
1550 case 0xf40: /* MVFR0 */
1551 return cpu
->isar
.mvfr0
;
1552 case 0xf44: /* MVFR1 */
1553 return cpu
->isar
.mvfr1
;
1554 case 0xf48: /* MVFR2 */
1555 return cpu
->isar
.mvfr2
;
1558 qemu_log_mask(LOG_GUEST_ERROR
, "NVIC: Bad read offset 0x%x\n", offset
);
1563 static void nvic_writel(NVICState
*s
, uint32_t offset
, uint32_t value
,
1566 ARMCPU
*cpu
= s
->cpu
;
1569 case 0xc: /* CPPWR */
1570 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1573 /* Make the IMPDEF choice to RAZ/WI this. */
1575 case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
1577 int startvec
= 8 * (offset
- 0x380) + NVIC_FIRST_IRQ
;
1580 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1583 if (!attrs
.secure
) {
1586 for (i
= 0; i
< 32 && startvec
+ i
< s
->num_irq
; i
++) {
1587 s
->itns
[startvec
+ i
] = (value
>> i
) & 1;
1592 case 0xd04: /* Interrupt Control State (ICSR) */
1593 if (attrs
.secure
|| cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
) {
1594 if (value
& (1 << 31)) {
1595 armv7m_nvic_set_pending(s
, ARMV7M_EXCP_NMI
, false);
1596 } else if (value
& (1 << 30) &&
1597 arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1598 /* PENDNMICLR didn't exist in v7M */
1599 armv7m_nvic_clear_pending(s
, ARMV7M_EXCP_NMI
, false);
1602 if (value
& (1 << 28)) {
1603 armv7m_nvic_set_pending(s
, ARMV7M_EXCP_PENDSV
, attrs
.secure
);
1604 } else if (value
& (1 << 27)) {
1605 armv7m_nvic_clear_pending(s
, ARMV7M_EXCP_PENDSV
, attrs
.secure
);
1607 if (value
& (1 << 26)) {
1608 armv7m_nvic_set_pending(s
, ARMV7M_EXCP_SYSTICK
, attrs
.secure
);
1609 } else if (value
& (1 << 25)) {
1610 armv7m_nvic_clear_pending(s
, ARMV7M_EXCP_SYSTICK
, attrs
.secure
);
1613 case 0xd08: /* Vector Table Offset. */
1614 cpu
->env
.v7m
.vecbase
[attrs
.secure
] = value
& 0xffffff80;
1616 case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
1617 if ((value
>> R_V7M_AIRCR_VECTKEY_SHIFT
) == 0x05fa) {
1618 if (value
& R_V7M_AIRCR_SYSRESETREQ_MASK
) {
1620 !(cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_SYSRESETREQS_MASK
)) {
1621 signal_sysresetreq(s
);
1624 if (value
& R_V7M_AIRCR_VECTCLRACTIVE_MASK
) {
1625 qemu_log_mask(LOG_GUEST_ERROR
,
1626 "Setting VECTCLRACTIVE when not in DEBUG mode "
1627 "is UNPREDICTABLE\n");
1629 if (value
& R_V7M_AIRCR_VECTRESET_MASK
) {
1630 /* NB: this bit is RES0 in v8M */
1631 qemu_log_mask(LOG_GUEST_ERROR
,
1632 "Setting VECTRESET when not in DEBUG mode "
1633 "is UNPREDICTABLE\n");
1635 if (arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1636 s
->prigroup
[attrs
.secure
] =
1638 R_V7M_AIRCR_PRIGROUP_SHIFT
,
1639 R_V7M_AIRCR_PRIGROUP_LENGTH
);
1641 /* AIRCR.IESB is RAZ/WI because we implement only minimal RAS */
1643 /* These bits are only writable by secure */
1644 cpu
->env
.v7m
.aircr
= value
&
1645 (R_V7M_AIRCR_SYSRESETREQS_MASK
|
1646 R_V7M_AIRCR_BFHFNMINS_MASK
|
1647 R_V7M_AIRCR_PRIS_MASK
);
1648 /* BFHFNMINS changes the priority of Secure HardFault, and
1649 * allows a pending Non-secure HardFault to preempt (which
1650 * we implement by marking it enabled).
1652 if (cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
) {
1653 s
->sec_vectors
[ARMV7M_EXCP_HARD
].prio
= -3;
1654 s
->vectors
[ARMV7M_EXCP_HARD
].enabled
= 1;
1656 s
->sec_vectors
[ARMV7M_EXCP_HARD
].prio
= -1;
1657 s
->vectors
[ARMV7M_EXCP_HARD
].enabled
= 0;
1663 case 0xd10: /* System Control. */
1664 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V7
)) {
1667 /* We don't implement deep-sleep so these bits are RAZ/WI.
1668 * The other bits in the register are banked.
1669 * QEMU's implementation ignores SEVONPEND and SLEEPONEXIT, which
1670 * is architecturally permitted.
1672 value
&= ~(R_V7M_SCR_SLEEPDEEP_MASK
| R_V7M_SCR_SLEEPDEEPS_MASK
);
1673 cpu
->env
.v7m
.scr
[attrs
.secure
] = value
;
1675 case 0xd14: /* Configuration Control. */
1679 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1683 /* Enforce RAZ/WI on reserved and must-RAZ/WI bits */
1684 mask
= R_V7M_CCR_STKALIGN_MASK
|
1685 R_V7M_CCR_BFHFNMIGN_MASK
|
1686 R_V7M_CCR_DIV_0_TRP_MASK
|
1687 R_V7M_CCR_UNALIGN_TRP_MASK
|
1688 R_V7M_CCR_USERSETMPEND_MASK
|
1689 R_V7M_CCR_NONBASETHRDENA_MASK
;
1690 if (arm_feature(&cpu
->env
, ARM_FEATURE_V8_1M
) && attrs
.secure
) {
1691 /* TRD is always RAZ/WI from NS */
1692 mask
|= R_V7M_CCR_TRD_MASK
;
1696 if (arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1697 /* v8M makes NONBASETHRDENA and STKALIGN be RES1 */
1698 value
|= R_V7M_CCR_NONBASETHRDENA_MASK
1699 | R_V7M_CCR_STKALIGN_MASK
;
1702 /* the BFHFNMIGN bit is not banked; keep that in the NS copy */
1703 cpu
->env
.v7m
.ccr
[M_REG_NS
] =
1704 (cpu
->env
.v7m
.ccr
[M_REG_NS
] & ~R_V7M_CCR_BFHFNMIGN_MASK
)
1705 | (value
& R_V7M_CCR_BFHFNMIGN_MASK
);
1706 value
&= ~R_V7M_CCR_BFHFNMIGN_MASK
;
1709 * BFHFNMIGN is RAZ/WI from NS if AIRCR.BFHFNMINS is 0, so
1710 * preserve the state currently in the NS element of the array
1712 if (!(cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
)) {
1713 value
&= ~R_V7M_CCR_BFHFNMIGN_MASK
;
1714 value
|= cpu
->env
.v7m
.ccr
[M_REG_NS
] & R_V7M_CCR_BFHFNMIGN_MASK
;
1718 cpu
->env
.v7m
.ccr
[attrs
.secure
] = value
;
1721 case 0xd24: /* System Handler Control and State (SHCSR) */
1722 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V7
)) {
1726 s
->sec_vectors
[ARMV7M_EXCP_MEM
].active
= (value
& (1 << 0)) != 0;
1727 /* Secure HardFault active bit cannot be written */
1728 s
->sec_vectors
[ARMV7M_EXCP_USAGE
].active
= (value
& (1 << 3)) != 0;
1729 s
->sec_vectors
[ARMV7M_EXCP_SVC
].active
= (value
& (1 << 7)) != 0;
1730 s
->sec_vectors
[ARMV7M_EXCP_PENDSV
].active
=
1731 (value
& (1 << 10)) != 0;
1732 s
->sec_vectors
[ARMV7M_EXCP_SYSTICK
].active
=
1733 (value
& (1 << 11)) != 0;
1734 s
->sec_vectors
[ARMV7M_EXCP_USAGE
].pending
=
1735 (value
& (1 << 12)) != 0;
1736 s
->sec_vectors
[ARMV7M_EXCP_MEM
].pending
= (value
& (1 << 13)) != 0;
1737 s
->sec_vectors
[ARMV7M_EXCP_SVC
].pending
= (value
& (1 << 15)) != 0;
1738 s
->sec_vectors
[ARMV7M_EXCP_MEM
].enabled
= (value
& (1 << 16)) != 0;
1739 s
->sec_vectors
[ARMV7M_EXCP_BUS
].enabled
= (value
& (1 << 17)) != 0;
1740 s
->sec_vectors
[ARMV7M_EXCP_USAGE
].enabled
=
1741 (value
& (1 << 18)) != 0;
1742 s
->sec_vectors
[ARMV7M_EXCP_HARD
].pending
= (value
& (1 << 21)) != 0;
1743 /* SecureFault not banked, but RAZ/WI to NS */
1744 s
->vectors
[ARMV7M_EXCP_SECURE
].active
= (value
& (1 << 4)) != 0;
1745 s
->vectors
[ARMV7M_EXCP_SECURE
].enabled
= (value
& (1 << 19)) != 0;
1746 s
->vectors
[ARMV7M_EXCP_SECURE
].pending
= (value
& (1 << 20)) != 0;
1748 s
->vectors
[ARMV7M_EXCP_MEM
].active
= (value
& (1 << 0)) != 0;
1749 if (arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1750 /* HARDFAULTPENDED is not present in v7M */
1751 s
->vectors
[ARMV7M_EXCP_HARD
].pending
= (value
& (1 << 21)) != 0;
1753 s
->vectors
[ARMV7M_EXCP_USAGE
].active
= (value
& (1 << 3)) != 0;
1754 s
->vectors
[ARMV7M_EXCP_SVC
].active
= (value
& (1 << 7)) != 0;
1755 s
->vectors
[ARMV7M_EXCP_PENDSV
].active
= (value
& (1 << 10)) != 0;
1756 s
->vectors
[ARMV7M_EXCP_SYSTICK
].active
= (value
& (1 << 11)) != 0;
1757 s
->vectors
[ARMV7M_EXCP_USAGE
].pending
= (value
& (1 << 12)) != 0;
1758 s
->vectors
[ARMV7M_EXCP_MEM
].pending
= (value
& (1 << 13)) != 0;
1759 s
->vectors
[ARMV7M_EXCP_SVC
].pending
= (value
& (1 << 15)) != 0;
1760 s
->vectors
[ARMV7M_EXCP_MEM
].enabled
= (value
& (1 << 16)) != 0;
1761 s
->vectors
[ARMV7M_EXCP_USAGE
].enabled
= (value
& (1 << 18)) != 0;
1763 if (attrs
.secure
|| (cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
)) {
1764 s
->vectors
[ARMV7M_EXCP_BUS
].active
= (value
& (1 << 1)) != 0;
1765 s
->vectors
[ARMV7M_EXCP_BUS
].pending
= (value
& (1 << 14)) != 0;
1766 s
->vectors
[ARMV7M_EXCP_BUS
].enabled
= (value
& (1 << 17)) != 0;
1768 /* NMIACT can only be written if the write is of a zero, with
1769 * BFHFNMINS 1, and by the CPU in secure state via the NS alias.
1771 if (!attrs
.secure
&& cpu
->env
.v7m
.secure
&&
1772 (cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
) &&
1773 (value
& (1 << 5)) == 0) {
1774 s
->vectors
[ARMV7M_EXCP_NMI
].active
= 0;
1776 /* HARDFAULTACT can only be written if the write is of a zero
1777 * to the non-secure HardFault state by the CPU in secure state.
1778 * The only case where we can be targeting the non-secure HF state
1779 * when in secure state is if this is a write via the NS alias
1780 * and BFHFNMINS is 1.
1782 if (!attrs
.secure
&& cpu
->env
.v7m
.secure
&&
1783 (cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
) &&
1784 (value
& (1 << 2)) == 0) {
1785 s
->vectors
[ARMV7M_EXCP_HARD
].active
= 0;
1788 /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
1789 s
->vectors
[ARMV7M_EXCP_DEBUG
].active
= (value
& (1 << 8)) != 0;
1792 case 0xd2c: /* Hard Fault Status. */
1793 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1796 cpu
->env
.v7m
.hfsr
&= ~value
; /* W1C */
1798 case 0xd30: /* Debug Fault Status. */
1799 cpu
->env
.v7m
.dfsr
&= ~value
; /* W1C */
1801 case 0xd34: /* Mem Manage Address. */
1802 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1805 cpu
->env
.v7m
.mmfar
[attrs
.secure
] = value
;
1807 case 0xd38: /* Bus Fault Address. */
1808 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
1811 if (!attrs
.secure
&&
1812 !(s
->cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
)) {
1815 cpu
->env
.v7m
.bfar
= value
;
1817 case 0xd3c: /* Aux Fault Status. */
1818 qemu_log_mask(LOG_UNIMP
,
1819 "NVIC: Aux fault status registers unimplemented\n");
1821 case 0xd84: /* CSSELR */
1822 if (!arm_v7m_csselr_razwi(cpu
)) {
1823 cpu
->env
.v7m
.csselr
[attrs
.secure
] = value
& R_V7M_CSSELR_INDEX_MASK
;
1826 case 0xd88: /* CPACR */
1827 if (cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
1828 /* We implement only the Floating Point extension's CP10/CP11 */
1829 cpu
->env
.v7m
.cpacr
[attrs
.secure
] = value
& (0xf << 20);
1832 case 0xd8c: /* NSACR */
1833 if (attrs
.secure
&& cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
1834 /* We implement only the Floating Point extension's CP10/CP11 */
1835 cpu
->env
.v7m
.nsacr
= value
& (3 << 10);
1838 case 0xd90: /* MPU_TYPE */
1840 case 0xd94: /* MPU_CTRL */
1842 (R_V7M_MPU_CTRL_HFNMIENA_MASK
| R_V7M_MPU_CTRL_ENABLE_MASK
))
1843 == R_V7M_MPU_CTRL_HFNMIENA_MASK
) {
1844 qemu_log_mask(LOG_GUEST_ERROR
, "MPU_CTRL: HFNMIENA and !ENABLE is "
1847 cpu
->env
.v7m
.mpu_ctrl
[attrs
.secure
]
1848 = value
& (R_V7M_MPU_CTRL_ENABLE_MASK
|
1849 R_V7M_MPU_CTRL_HFNMIENA_MASK
|
1850 R_V7M_MPU_CTRL_PRIVDEFENA_MASK
);
1851 tlb_flush(CPU(cpu
));
1853 case 0xd98: /* MPU_RNR */
1854 if (value
>= cpu
->pmsav7_dregion
) {
1855 qemu_log_mask(LOG_GUEST_ERROR
, "MPU region out of range %"
1856 PRIu32
"/%" PRIu32
"\n",
1857 value
, cpu
->pmsav7_dregion
);
1859 cpu
->env
.pmsav7
.rnr
[attrs
.secure
] = value
;
1862 case 0xd9c: /* MPU_RBAR */
1863 case 0xda4: /* MPU_RBAR_A1 */
1864 case 0xdac: /* MPU_RBAR_A2 */
1865 case 0xdb4: /* MPU_RBAR_A3 */
1869 if (arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1870 /* PMSAv8M handling of the aliases is different from v7M:
1871 * aliases A1, A2, A3 override the low two bits of the region
1872 * number in MPU_RNR, and there is no 'region' field in the
1875 int aliasno
= (offset
- 0xd9c) / 8; /* 0..3 */
1877 region
= cpu
->env
.pmsav7
.rnr
[attrs
.secure
];
1879 region
= deposit32(region
, 0, 2, aliasno
);
1881 if (region
>= cpu
->pmsav7_dregion
) {
1884 cpu
->env
.pmsav8
.rbar
[attrs
.secure
][region
] = value
;
1885 tlb_flush(CPU(cpu
));
1889 if (value
& (1 << 4)) {
1890 /* VALID bit means use the region number specified in this
1891 * value and also update MPU_RNR.REGION with that value.
1893 region
= extract32(value
, 0, 4);
1894 if (region
>= cpu
->pmsav7_dregion
) {
1895 qemu_log_mask(LOG_GUEST_ERROR
,
1896 "MPU region out of range %u/%" PRIu32
"\n",
1897 region
, cpu
->pmsav7_dregion
);
1900 cpu
->env
.pmsav7
.rnr
[attrs
.secure
] = region
;
1902 region
= cpu
->env
.pmsav7
.rnr
[attrs
.secure
];
1905 if (region
>= cpu
->pmsav7_dregion
) {
1909 cpu
->env
.pmsav7
.drbar
[region
] = value
& ~0x1f;
1910 tlb_flush(CPU(cpu
));
1913 case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
1914 case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
1915 case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
1916 case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
1918 int region
= cpu
->env
.pmsav7
.rnr
[attrs
.secure
];
1920 if (arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1921 /* PMSAv8M handling of the aliases is different from v7M:
1922 * aliases A1, A2, A3 override the low two bits of the region
1923 * number in MPU_RNR.
1925 int aliasno
= (offset
- 0xd9c) / 8; /* 0..3 */
1927 region
= cpu
->env
.pmsav7
.rnr
[attrs
.secure
];
1929 region
= deposit32(region
, 0, 2, aliasno
);
1931 if (region
>= cpu
->pmsav7_dregion
) {
1934 cpu
->env
.pmsav8
.rlar
[attrs
.secure
][region
] = value
;
1935 tlb_flush(CPU(cpu
));
1939 if (region
>= cpu
->pmsav7_dregion
) {
1943 cpu
->env
.pmsav7
.drsr
[region
] = value
& 0xff3f;
1944 cpu
->env
.pmsav7
.dracr
[region
] = (value
>> 16) & 0x173f;
1945 tlb_flush(CPU(cpu
));
1948 case 0xdc0: /* MPU_MAIR0 */
1949 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1952 if (cpu
->pmsav7_dregion
) {
1953 /* Register is RES0 if no MPU regions are implemented */
1954 cpu
->env
.pmsav8
.mair0
[attrs
.secure
] = value
;
1956 /* We don't need to do anything else because memory attributes
1957 * only affect cacheability, and we don't implement caching.
1960 case 0xdc4: /* MPU_MAIR1 */
1961 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1964 if (cpu
->pmsav7_dregion
) {
1965 /* Register is RES0 if no MPU regions are implemented */
1966 cpu
->env
.pmsav8
.mair1
[attrs
.secure
] = value
;
1968 /* We don't need to do anything else because memory attributes
1969 * only affect cacheability, and we don't implement caching.
1972 case 0xdd0: /* SAU_CTRL */
1973 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1976 if (!attrs
.secure
) {
1979 cpu
->env
.sau
.ctrl
= value
& 3;
1981 case 0xdd4: /* SAU_TYPE */
1982 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1986 case 0xdd8: /* SAU_RNR */
1987 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
1990 if (!attrs
.secure
) {
1993 if (value
>= cpu
->sau_sregion
) {
1994 qemu_log_mask(LOG_GUEST_ERROR
, "SAU region out of range %"
1995 PRIu32
"/%" PRIu32
"\n",
1996 value
, cpu
->sau_sregion
);
1998 cpu
->env
.sau
.rnr
= value
;
2001 case 0xddc: /* SAU_RBAR */
2003 int region
= cpu
->env
.sau
.rnr
;
2005 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
2008 if (!attrs
.secure
) {
2011 if (region
>= cpu
->sau_sregion
) {
2014 cpu
->env
.sau
.rbar
[region
] = value
& ~0x1f;
2015 tlb_flush(CPU(cpu
));
2018 case 0xde0: /* SAU_RLAR */
2020 int region
= cpu
->env
.sau
.rnr
;
2022 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
2025 if (!attrs
.secure
) {
2028 if (region
>= cpu
->sau_sregion
) {
2031 cpu
->env
.sau
.rlar
[region
] = value
& ~0x1c;
2032 tlb_flush(CPU(cpu
));
2035 case 0xde4: /* SFSR */
2036 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
2039 if (!attrs
.secure
) {
2042 cpu
->env
.v7m
.sfsr
&= ~value
; /* W1C */
2044 case 0xde8: /* SFAR */
2045 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
2048 if (!attrs
.secure
) {
2051 cpu
->env
.v7m
.sfsr
= value
;
2053 case 0xf00: /* Software Triggered Interrupt Register */
2055 int excnum
= (value
& 0x1ff) + NVIC_FIRST_IRQ
;
2057 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
)) {
2061 if (excnum
< s
->num_irq
) {
2062 armv7m_nvic_set_pending(s
, excnum
, false);
2066 case 0xf04: /* RFSR */
2067 if (!cpu_isar_feature(aa32_ras
, cpu
)) {
2070 /* We provide minimal-RAS only: RFSR is RAZ/WI */
2072 case 0xf34: /* FPCCR */
2073 if (cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
2074 /* Not all bits here are banked. */
2077 if (!arm_feature(&cpu
->env
, ARM_FEATURE_V8
)) {
2078 /* Don't allow setting of bits not present in v7M */
2079 value
&= (R_V7M_FPCCR_LSPACT_MASK
|
2080 R_V7M_FPCCR_USER_MASK
|
2081 R_V7M_FPCCR_THREAD_MASK
|
2082 R_V7M_FPCCR_HFRDY_MASK
|
2083 R_V7M_FPCCR_MMRDY_MASK
|
2084 R_V7M_FPCCR_BFRDY_MASK
|
2085 R_V7M_FPCCR_MONRDY_MASK
|
2086 R_V7M_FPCCR_LSPEN_MASK
|
2087 R_V7M_FPCCR_ASPEN_MASK
);
2089 value
&= ~R_V7M_FPCCR_RES0_MASK
;
2091 if (!attrs
.secure
) {
2092 /* Some non-banked bits are configurably writable by NS */
2093 fpccr_s
= cpu
->env
.v7m
.fpccr
[M_REG_S
];
2094 if (!(fpccr_s
& R_V7M_FPCCR_LSPENS_MASK
)) {
2095 uint32_t lspen
= FIELD_EX32(value
, V7M_FPCCR
, LSPEN
);
2096 fpccr_s
= FIELD_DP32(fpccr_s
, V7M_FPCCR
, LSPEN
, lspen
);
2098 if (!(fpccr_s
& R_V7M_FPCCR_CLRONRETS_MASK
)) {
2099 uint32_t cor
= FIELD_EX32(value
, V7M_FPCCR
, CLRONRET
);
2100 fpccr_s
= FIELD_DP32(fpccr_s
, V7M_FPCCR
, CLRONRET
, cor
);
2102 if ((s
->cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
)) {
2103 uint32_t hfrdy
= FIELD_EX32(value
, V7M_FPCCR
, HFRDY
);
2104 uint32_t bfrdy
= FIELD_EX32(value
, V7M_FPCCR
, BFRDY
);
2105 fpccr_s
= FIELD_DP32(fpccr_s
, V7M_FPCCR
, HFRDY
, hfrdy
);
2106 fpccr_s
= FIELD_DP32(fpccr_s
, V7M_FPCCR
, BFRDY
, bfrdy
);
2108 /* TODO MONRDY should RAZ/WI if DEMCR.SDME is set */
2110 uint32_t monrdy
= FIELD_EX32(value
, V7M_FPCCR
, MONRDY
);
2111 fpccr_s
= FIELD_DP32(fpccr_s
, V7M_FPCCR
, MONRDY
, monrdy
);
2115 * All other non-banked bits are RAZ/WI from NS; write
2116 * just the banked bits to fpccr[M_REG_NS].
2118 value
&= R_V7M_FPCCR_BANKED_MASK
;
2119 cpu
->env
.v7m
.fpccr
[M_REG_NS
] = value
;
2123 cpu
->env
.v7m
.fpccr
[M_REG_S
] = fpccr_s
;
2126 case 0xf38: /* FPCAR */
2127 if (cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
2129 cpu
->env
.v7m
.fpcar
[attrs
.secure
] = value
;
2132 case 0xf3c: /* FPDSCR */
2133 if (cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
2134 uint32_t mask
= FPCR_AHP
| FPCR_DN
| FPCR_FZ
| FPCR_RMODE_MASK
;
2135 if (cpu_isar_feature(any_fp16
, cpu
)) {
2139 if (cpu_isar_feature(aa32_lob
, cpu
)) {
2140 value
|= 4 << FPCR_LTPSIZE_SHIFT
;
2142 cpu
->env
.v7m
.fpdscr
[attrs
.secure
] = value
;
2145 case 0xf50: /* ICIALLU */
2146 case 0xf58: /* ICIMVAU */
2147 case 0xf5c: /* DCIMVAC */
2148 case 0xf60: /* DCISW */
2149 case 0xf64: /* DCCMVAU */
2150 case 0xf68: /* DCCMVAC */
2151 case 0xf6c: /* DCCSW */
2152 case 0xf70: /* DCCIMVAC */
2153 case 0xf74: /* DCCISW */
2154 case 0xf78: /* BPIALL */
2155 /* Cache and branch predictor maintenance: for QEMU these always NOP */
2159 qemu_log_mask(LOG_GUEST_ERROR
,
2160 "NVIC: Bad write offset 0x%x\n", offset
);
2164 static bool nvic_user_access_ok(NVICState
*s
, hwaddr offset
, MemTxAttrs attrs
)
2166 /* Return true if unprivileged access to this register is permitted. */
2168 case 0xf00: /* STIR: accessible only if CCR.USERSETMPEND permits */
2169 /* For access via STIR_NS it is the NS CCR.USERSETMPEND that
2170 * controls access even though the CPU is in Secure state (I_QDKX).
2172 return s
->cpu
->env
.v7m
.ccr
[attrs
.secure
] & R_V7M_CCR_USERSETMPEND_MASK
;
2174 /* All other user accesses cause a BusFault unconditionally */
2179 static int shpr_bank(NVICState
*s
, int exc
, MemTxAttrs attrs
)
2181 /* Behaviour for the SHPR register field for this exception:
2182 * return M_REG_NS to use the nonsecure vector (including for
2183 * non-banked exceptions), M_REG_S for the secure version of
2184 * a banked exception, and -1 if this field should RAZ/WI.
2187 case ARMV7M_EXCP_MEM
:
2188 case ARMV7M_EXCP_USAGE
:
2189 case ARMV7M_EXCP_SVC
:
2190 case ARMV7M_EXCP_PENDSV
:
2191 case ARMV7M_EXCP_SYSTICK
:
2192 /* Banked exceptions */
2193 return attrs
.secure
;
2194 case ARMV7M_EXCP_BUS
:
2195 /* Not banked, RAZ/WI from nonsecure if BFHFNMINS is zero */
2196 if (!attrs
.secure
&&
2197 !(s
->cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
)) {
2201 case ARMV7M_EXCP_SECURE
:
2202 /* Not banked, RAZ/WI from nonsecure */
2203 if (!attrs
.secure
) {
2207 case ARMV7M_EXCP_DEBUG
:
2208 /* Not banked. TODO should RAZ/WI if DEMCR.SDME is set */
2215 /* Not reachable due to decode of SHPR register addresses */
2216 g_assert_not_reached();
2220 static MemTxResult
nvic_sysreg_read(void *opaque
, hwaddr addr
,
2221 uint64_t *data
, unsigned size
,
2224 NVICState
*s
= (NVICState
*)opaque
;
2225 uint32_t offset
= addr
;
2226 unsigned i
, startvec
, end
;
2229 if (attrs
.user
&& !nvic_user_access_ok(s
, addr
, attrs
)) {
2230 /* Generate BusFault for unprivileged accesses */
2235 /* reads of set and clear both return the status */
2236 case 0x100 ... 0x13f: /* NVIC Set enable */
2239 case 0x180 ... 0x1bf: /* NVIC Clear enable */
2241 startvec
= 8 * (offset
- 0x180) + NVIC_FIRST_IRQ
; /* vector # */
2243 for (i
= 0, end
= size
* 8; i
< end
&& startvec
+ i
< s
->num_irq
; i
++) {
2244 if (s
->vectors
[startvec
+ i
].enabled
&&
2245 (attrs
.secure
|| s
->itns
[startvec
+ i
])) {
2250 case 0x200 ... 0x23f: /* NVIC Set pend */
2253 case 0x280 ... 0x2bf: /* NVIC Clear pend */
2255 startvec
= 8 * (offset
- 0x280) + NVIC_FIRST_IRQ
; /* vector # */
2256 for (i
= 0, end
= size
* 8; i
< end
&& startvec
+ i
< s
->num_irq
; i
++) {
2257 if (s
->vectors
[startvec
+ i
].pending
&&
2258 (attrs
.secure
|| s
->itns
[startvec
+ i
])) {
2263 case 0x300 ... 0x33f: /* NVIC Active */
2266 if (!arm_feature(&s
->cpu
->env
, ARM_FEATURE_V7
)) {
2270 startvec
= 8 * (offset
- 0x300) + NVIC_FIRST_IRQ
; /* vector # */
2272 for (i
= 0, end
= size
* 8; i
< end
&& startvec
+ i
< s
->num_irq
; i
++) {
2273 if (s
->vectors
[startvec
+ i
].active
&&
2274 (attrs
.secure
|| s
->itns
[startvec
+ i
])) {
2279 case 0x400 ... 0x5ef: /* NVIC Priority */
2281 startvec
= offset
- 0x400 + NVIC_FIRST_IRQ
; /* vector # */
2283 for (i
= 0; i
< size
&& startvec
+ i
< s
->num_irq
; i
++) {
2284 if (attrs
.secure
|| s
->itns
[startvec
+ i
]) {
2285 val
|= s
->vectors
[startvec
+ i
].prio
<< (8 * i
);
2289 case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */
2290 if (!arm_feature(&s
->cpu
->env
, ARM_FEATURE_M_MAIN
)) {
2295 case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
2297 for (i
= 0; i
< size
; i
++) {
2298 unsigned hdlidx
= (offset
- 0xd14) + i
;
2299 int sbank
= shpr_bank(s
, hdlidx
, attrs
);
2304 val
= deposit32(val
, i
* 8, 8, get_prio(s
, hdlidx
, sbank
));
2307 case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
2308 if (!arm_feature(&s
->cpu
->env
, ARM_FEATURE_M_MAIN
)) {
2313 * The BFSR bits [15:8] are shared between security states
2314 * and we store them in the NS copy. They are RAZ/WI for
2315 * NS code if AIRCR.BFHFNMINS is 0.
2317 val
= s
->cpu
->env
.v7m
.cfsr
[attrs
.secure
];
2318 if (!attrs
.secure
&&
2319 !(s
->cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
)) {
2320 val
&= ~R_V7M_CFSR_BFSR_MASK
;
2322 val
|= s
->cpu
->env
.v7m
.cfsr
[M_REG_NS
] & R_V7M_CFSR_BFSR_MASK
;
2324 val
= extract32(val
, (offset
- 0xd28) * 8, size
* 8);
2326 case 0xfe0 ... 0xfff: /* ID. */
2330 val
= nvic_id
[(offset
- 0xfe0) >> 2];
2335 val
= nvic_readl(s
, offset
, attrs
);
2337 qemu_log_mask(LOG_GUEST_ERROR
,
2338 "NVIC: Bad read of size %d at offset 0x%x\n",
2344 trace_nvic_sysreg_read(addr
, val
, size
);
2349 static MemTxResult
nvic_sysreg_write(void *opaque
, hwaddr addr
,
2350 uint64_t value
, unsigned size
,
2353 NVICState
*s
= (NVICState
*)opaque
;
2354 uint32_t offset
= addr
;
2355 unsigned i
, startvec
, end
;
2356 unsigned setval
= 0;
2358 trace_nvic_sysreg_write(addr
, value
, size
);
2360 if (attrs
.user
&& !nvic_user_access_ok(s
, addr
, attrs
)) {
2361 /* Generate BusFault for unprivileged accesses */
2366 case 0x100 ... 0x13f: /* NVIC Set enable */
2370 case 0x180 ... 0x1bf: /* NVIC Clear enable */
2371 startvec
= 8 * (offset
- 0x180) + NVIC_FIRST_IRQ
;
2373 for (i
= 0, end
= size
* 8; i
< end
&& startvec
+ i
< s
->num_irq
; i
++) {
2374 if (value
& (1 << i
) &&
2375 (attrs
.secure
|| s
->itns
[startvec
+ i
])) {
2376 s
->vectors
[startvec
+ i
].enabled
= setval
;
2381 case 0x200 ... 0x23f: /* NVIC Set pend */
2382 /* the special logic in armv7m_nvic_set_pending()
2383 * is not needed since IRQs are never escalated
2388 case 0x280 ... 0x2bf: /* NVIC Clear pend */
2389 startvec
= 8 * (offset
- 0x280) + NVIC_FIRST_IRQ
; /* vector # */
2391 for (i
= 0, end
= size
* 8; i
< end
&& startvec
+ i
< s
->num_irq
; i
++) {
2392 if (value
& (1 << i
) &&
2393 (attrs
.secure
|| s
->itns
[startvec
+ i
])) {
2394 s
->vectors
[startvec
+ i
].pending
= setval
;
2399 case 0x300 ... 0x33f: /* NVIC Active */
2400 goto exit_ok
; /* R/O */
2401 case 0x400 ... 0x5ef: /* NVIC Priority */
2402 startvec
= (offset
- 0x400) + NVIC_FIRST_IRQ
; /* vector # */
2404 for (i
= 0; i
< size
&& startvec
+ i
< s
->num_irq
; i
++) {
2405 if (attrs
.secure
|| s
->itns
[startvec
+ i
]) {
2406 set_prio(s
, startvec
+ i
, false, (value
>> (i
* 8)) & 0xff);
2411 case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */
2412 if (!arm_feature(&s
->cpu
->env
, ARM_FEATURE_M_MAIN
)) {
2416 case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
2417 for (i
= 0; i
< size
; i
++) {
2418 unsigned hdlidx
= (offset
- 0xd14) + i
;
2419 int newprio
= extract32(value
, i
* 8, 8);
2420 int sbank
= shpr_bank(s
, hdlidx
, attrs
);
2425 set_prio(s
, hdlidx
, sbank
, newprio
);
2429 case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
2430 if (!arm_feature(&s
->cpu
->env
, ARM_FEATURE_M_MAIN
)) {
2433 /* All bits are W1C, so construct 32 bit value with 0s in
2434 * the parts not written by the access size
2436 value
<<= ((offset
- 0xd28) * 8);
2438 if (!attrs
.secure
&&
2439 !(s
->cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
)) {
2440 /* BFSR bits are RAZ/WI for NS if BFHFNMINS is set */
2441 value
&= ~R_V7M_CFSR_BFSR_MASK
;
2444 s
->cpu
->env
.v7m
.cfsr
[attrs
.secure
] &= ~value
;
2446 /* The BFSR bits [15:8] are shared between security states
2447 * and we store them in the NS copy.
2449 s
->cpu
->env
.v7m
.cfsr
[M_REG_NS
] &= ~(value
& R_V7M_CFSR_BFSR_MASK
);
2454 nvic_writel(s
, offset
, value
, attrs
);
2457 qemu_log_mask(LOG_GUEST_ERROR
,
2458 "NVIC: Bad write of size %d at offset 0x%x\n", size
, offset
);
2459 /* This is UNPREDICTABLE; treat as RAZ/WI */
2462 /* Ensure any changes made are reflected in the cached hflags. */
2463 arm_rebuild_hflags(&s
->cpu
->env
);
2467 static const MemoryRegionOps nvic_sysreg_ops
= {
2468 .read_with_attrs
= nvic_sysreg_read
,
2469 .write_with_attrs
= nvic_sysreg_write
,
2470 .endianness
= DEVICE_NATIVE_ENDIAN
,
2473 static int nvic_post_load(void *opaque
, int version_id
)
2475 NVICState
*s
= opaque
;
2479 /* Check for out of range priority settings */
2480 resetprio
= arm_feature(&s
->cpu
->env
, ARM_FEATURE_V8
) ? -4 : -3;
2482 if (s
->vectors
[ARMV7M_EXCP_RESET
].prio
!= resetprio
||
2483 s
->vectors
[ARMV7M_EXCP_NMI
].prio
!= -2 ||
2484 s
->vectors
[ARMV7M_EXCP_HARD
].prio
!= -1) {
2487 for (i
= ARMV7M_EXCP_MEM
; i
< s
->num_irq
; i
++) {
2488 if (s
->vectors
[i
].prio
& ~0xff) {
2493 nvic_recompute_state(s
);
2498 static const VMStateDescription vmstate_VecInfo
= {
2499 .name
= "armv7m_nvic_info",
2501 .minimum_version_id
= 1,
2502 .fields
= (VMStateField
[]) {
2503 VMSTATE_INT16(prio
, VecInfo
),
2504 VMSTATE_UINT8(enabled
, VecInfo
),
2505 VMSTATE_UINT8(pending
, VecInfo
),
2506 VMSTATE_UINT8(active
, VecInfo
),
2507 VMSTATE_UINT8(level
, VecInfo
),
2508 VMSTATE_END_OF_LIST()
2512 static bool nvic_security_needed(void *opaque
)
2514 NVICState
*s
= opaque
;
2516 return arm_feature(&s
->cpu
->env
, ARM_FEATURE_M_SECURITY
);
2519 static int nvic_security_post_load(void *opaque
, int version_id
)
2521 NVICState
*s
= opaque
;
2524 /* Check for out of range priority settings */
2525 if (s
->sec_vectors
[ARMV7M_EXCP_HARD
].prio
!= -1
2526 && s
->sec_vectors
[ARMV7M_EXCP_HARD
].prio
!= -3) {
2527 /* We can't cross-check against AIRCR.BFHFNMINS as we don't know
2528 * if the CPU state has been migrated yet; a mismatch won't
2529 * cause the emulation to blow up, though.
2533 for (i
= ARMV7M_EXCP_MEM
; i
< ARRAY_SIZE(s
->sec_vectors
); i
++) {
2534 if (s
->sec_vectors
[i
].prio
& ~0xff) {
2541 static const VMStateDescription vmstate_nvic_security
= {
2542 .name
= "armv7m_nvic/m-security",
2544 .minimum_version_id
= 1,
2545 .needed
= nvic_security_needed
,
2546 .post_load
= &nvic_security_post_load
,
2547 .fields
= (VMStateField
[]) {
2548 VMSTATE_STRUCT_ARRAY(sec_vectors
, NVICState
, NVIC_INTERNAL_VECTORS
, 1,
2549 vmstate_VecInfo
, VecInfo
),
2550 VMSTATE_UINT32(prigroup
[M_REG_S
], NVICState
),
2551 VMSTATE_BOOL_ARRAY(itns
, NVICState
, NVIC_MAX_VECTORS
),
2552 VMSTATE_END_OF_LIST()
2556 static const VMStateDescription vmstate_nvic
= {
2557 .name
= "armv7m_nvic",
2559 .minimum_version_id
= 4,
2560 .post_load
= &nvic_post_load
,
2561 .fields
= (VMStateField
[]) {
2562 VMSTATE_STRUCT_ARRAY(vectors
, NVICState
, NVIC_MAX_VECTORS
, 1,
2563 vmstate_VecInfo
, VecInfo
),
2564 VMSTATE_UINT32(prigroup
[M_REG_NS
], NVICState
),
2565 VMSTATE_END_OF_LIST()
2567 .subsections
= (const VMStateDescription
*[]) {
2568 &vmstate_nvic_security
,
2573 static Property props_nvic
[] = {
2574 /* Number of external IRQ lines (so excluding the 16 internal exceptions) */
2575 DEFINE_PROP_UINT32("num-irq", NVICState
, num_irq
, 64),
2576 DEFINE_PROP_END_OF_LIST()
2579 static void armv7m_nvic_reset(DeviceState
*dev
)
2582 NVICState
*s
= NVIC(dev
);
2584 memset(s
->vectors
, 0, sizeof(s
->vectors
));
2585 memset(s
->sec_vectors
, 0, sizeof(s
->sec_vectors
));
2586 s
->prigroup
[M_REG_NS
] = 0;
2587 s
->prigroup
[M_REG_S
] = 0;
2589 s
->vectors
[ARMV7M_EXCP_NMI
].enabled
= 1;
2590 /* MEM, BUS, and USAGE are enabled through
2591 * the System Handler Control register
2593 s
->vectors
[ARMV7M_EXCP_SVC
].enabled
= 1;
2594 s
->vectors
[ARMV7M_EXCP_PENDSV
].enabled
= 1;
2595 s
->vectors
[ARMV7M_EXCP_SYSTICK
].enabled
= 1;
2597 /* DebugMonitor is enabled via DEMCR.MON_EN */
2598 s
->vectors
[ARMV7M_EXCP_DEBUG
].enabled
= 0;
2600 resetprio
= arm_feature(&s
->cpu
->env
, ARM_FEATURE_V8
) ? -4 : -3;
2601 s
->vectors
[ARMV7M_EXCP_RESET
].prio
= resetprio
;
2602 s
->vectors
[ARMV7M_EXCP_NMI
].prio
= -2;
2603 s
->vectors
[ARMV7M_EXCP_HARD
].prio
= -1;
2605 if (arm_feature(&s
->cpu
->env
, ARM_FEATURE_M_SECURITY
)) {
2606 s
->sec_vectors
[ARMV7M_EXCP_HARD
].enabled
= 1;
2607 s
->sec_vectors
[ARMV7M_EXCP_SVC
].enabled
= 1;
2608 s
->sec_vectors
[ARMV7M_EXCP_PENDSV
].enabled
= 1;
2609 s
->sec_vectors
[ARMV7M_EXCP_SYSTICK
].enabled
= 1;
2611 /* AIRCR.BFHFNMINS resets to 0 so Secure HF is priority -1 (R_CMTC) */
2612 s
->sec_vectors
[ARMV7M_EXCP_HARD
].prio
= -1;
2613 /* If AIRCR.BFHFNMINS is 0 then NS HF is (effectively) disabled */
2614 s
->vectors
[ARMV7M_EXCP_HARD
].enabled
= 0;
2616 s
->vectors
[ARMV7M_EXCP_HARD
].enabled
= 1;
2619 /* Strictly speaking the reset handler should be enabled.
2620 * However, we don't simulate soft resets through the NVIC,
2621 * and the reset vector should never be pended.
2622 * So we leave it disabled to catch logic errors.
2625 s
->exception_prio
= NVIC_NOEXC_PRIO
;
2627 s
->vectpending_is_s_banked
= false;
2628 s
->vectpending_prio
= NVIC_NOEXC_PRIO
;
2630 if (arm_feature(&s
->cpu
->env
, ARM_FEATURE_M_SECURITY
)) {
2631 memset(s
->itns
, 0, sizeof(s
->itns
));
2633 /* This state is constant and not guest accessible in a non-security
2634 * NVIC; we set the bits to true to avoid having to do a feature
2635 * bit check in the NVIC enable/pend/etc register accessors.
2639 for (i
= NVIC_FIRST_IRQ
; i
< ARRAY_SIZE(s
->itns
); i
++) {
2645 * We updated state that affects the CPU's MMUidx and thus its hflags;
2646 * and we can't guarantee that we run before the CPU reset function.
2648 arm_rebuild_hflags(&s
->cpu
->env
);
2651 static void nvic_systick_trigger(void *opaque
, int n
, int level
)
2653 NVICState
*s
= opaque
;
2656 /* SysTick just asked us to pend its exception.
2657 * (This is different from an external interrupt line's
2659 * n == 0 : NonSecure systick
2660 * n == 1 : Secure systick
2662 armv7m_nvic_set_pending(s
, ARMV7M_EXCP_SYSTICK
, n
);
2666 static void armv7m_nvic_realize(DeviceState
*dev
, Error
**errp
)
2668 NVICState
*s
= NVIC(dev
);
2670 /* The armv7m container object will have set our CPU pointer */
2671 if (!s
->cpu
|| !arm_feature(&s
->cpu
->env
, ARM_FEATURE_M
)) {
2672 error_setg(errp
, "The NVIC can only be used with a Cortex-M CPU");
2676 if (s
->num_irq
> NVIC_MAX_IRQ
) {
2677 error_setg(errp
, "num-irq %d exceeds NVIC maximum", s
->num_irq
);
2681 qdev_init_gpio_in(dev
, set_irq_level
, s
->num_irq
);
2683 /* include space for internal exception vectors */
2684 s
->num_irq
+= NVIC_FIRST_IRQ
;
2686 s
->num_prio_bits
= arm_feature(&s
->cpu
->env
, ARM_FEATURE_V7
) ? 8 : 2;
2689 * This device provides a single memory region which covers the
2690 * sysreg/NVIC registers from 0xE000E000 .. 0xE000EFFF, with the
2691 * exception of the systick timer registers 0xE000E010 .. 0xE000E0FF.
2693 memory_region_init_io(&s
->sysregmem
, OBJECT(s
), &nvic_sysreg_ops
, s
,
2694 "nvic_sysregs", 0x1000);
2695 sysbus_init_mmio(SYS_BUS_DEVICE(dev
), &s
->sysregmem
);
2698 static void armv7m_nvic_instance_init(Object
*obj
)
2700 DeviceState
*dev
= DEVICE(obj
);
2701 NVICState
*nvic
= NVIC(obj
);
2702 SysBusDevice
*sbd
= SYS_BUS_DEVICE(obj
);
2704 sysbus_init_irq(sbd
, &nvic
->excpout
);
2705 qdev_init_gpio_out_named(dev
, &nvic
->sysresetreq
, "SYSRESETREQ", 1);
2706 qdev_init_gpio_in_named(dev
, nvic_systick_trigger
, "systick-trigger",
2708 qdev_init_gpio_in_named(dev
, nvic_nmi_trigger
, "NMI", 1);
2711 static void armv7m_nvic_class_init(ObjectClass
*klass
, void *data
)
2713 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2715 dc
->vmsd
= &vmstate_nvic
;
2716 device_class_set_props(dc
, props_nvic
);
2717 dc
->reset
= armv7m_nvic_reset
;
2718 dc
->realize
= armv7m_nvic_realize
;
2721 static const TypeInfo armv7m_nvic_info
= {
2723 .parent
= TYPE_SYS_BUS_DEVICE
,
2724 .instance_init
= armv7m_nvic_instance_init
,
2725 .instance_size
= sizeof(NVICState
),
2726 .class_init
= armv7m_nvic_class_init
,
2727 .class_size
= sizeof(SysBusDeviceClass
),
2730 static void armv7m_nvic_register_types(void)
2732 type_register_static(&armv7m_nvic_info
);
2735 type_init(armv7m_nvic_register_types
)