2 * s390x exception / interrupt helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #include "exec/address-spaces.h"
30 #ifndef CONFIG_USER_ONLY
31 #include "sysemu/sysemu.h"
34 /* #define DEBUG_S390 */
35 /* #define DEBUG_S390_STDOUT */
38 #ifdef DEBUG_S390_STDOUT
39 #define DPRINTF(fmt, ...) \
40 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
41 if (qemu_log_separate()) { qemu_log(fmt, ##__VA_ARGS__); } } while (0)
43 #define DPRINTF(fmt, ...) \
44 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
47 #define DPRINTF(fmt, ...) \
51 #if defined(CONFIG_USER_ONLY)
53 void s390_cpu_do_interrupt(CPUState
*cs
)
55 cs
->exception_index
= -1;
58 int s390_cpu_handle_mmu_fault(CPUState
*cs
, vaddr address
,
61 S390CPU
*cpu
= S390_CPU(cs
);
63 trigger_pgm_exception(&cpu
->env
, PGM_ADDRESSING
, ILEN_AUTO
);
64 /* On real machines this value is dropped into LowMem. Since this
65 is userland, simply put this someplace that cpu_loop can find it. */
66 cpu
->env
.__excp_addr
= address
;
70 #else /* !CONFIG_USER_ONLY */
72 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx
)
76 return PSW_ASC_PRIMARY
;
77 case MMU_SECONDARY_IDX
:
78 return PSW_ASC_SECONDARY
;
86 int s390_cpu_handle_mmu_fault(CPUState
*cs
, vaddr orig_vaddr
,
89 S390CPU
*cpu
= S390_CPU(cs
);
90 CPUS390XState
*env
= &cpu
->env
;
91 target_ulong vaddr
, raddr
;
95 DPRINTF("%s: address 0x%" VADDR_PRIx
" rw %d mmu_idx %d\n",
96 __func__
, orig_vaddr
, rw
, mmu_idx
);
98 orig_vaddr
&= TARGET_PAGE_MASK
;
101 if (mmu_idx
< MMU_REAL_IDX
) {
102 asc
= cpu_mmu_idx_to_asc(mmu_idx
);
104 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
107 if (mmu_translate(env
, vaddr
, rw
, asc
, &raddr
, &prot
, true)) {
110 } else if (mmu_idx
== MMU_REAL_IDX
) {
111 if (mmu_translate_real(env
, vaddr
, rw
, &raddr
, &prot
)) {
118 /* check out of RAM access */
119 if (!address_space_access_valid(&address_space_memory
, raddr
,
120 TARGET_PAGE_SIZE
, rw
)) {
121 DPRINTF("%s: raddr %" PRIx64
" > ram_size %" PRIx64
"\n", __func__
,
122 (uint64_t)raddr
, (uint64_t)ram_size
);
123 trigger_pgm_exception(env
, PGM_ADDRESSING
, ILEN_AUTO
);
127 qemu_log_mask(CPU_LOG_MMU
, "%s: set tlb %" PRIx64
" -> %" PRIx64
" (%x)\n",
128 __func__
, (uint64_t)vaddr
, (uint64_t)raddr
, prot
);
130 tlb_set_page(cs
, orig_vaddr
, raddr
, prot
,
131 mmu_idx
, TARGET_PAGE_SIZE
);
136 static void do_program_interrupt(CPUS390XState
*env
)
140 int ilen
= env
->int_pgm_ilen
;
142 if (ilen
== ILEN_AUTO
) {
143 ilen
= get_ilen(cpu_ldub_code(env
, env
->psw
.addr
));
145 assert(ilen
== 2 || ilen
== 4 || ilen
== 6);
147 switch (env
->int_pgm_code
) {
149 if (env
->per_perc_atmid
& PER_CODE_EVENT_NULLIFICATION
) {
158 case PGM_SPECIFICATION
:
160 case PGM_FIXPT_OVERFLOW
:
161 case PGM_FIXPT_DIVIDE
:
162 case PGM_DEC_OVERFLOW
:
164 case PGM_HFP_EXP_OVERFLOW
:
165 case PGM_HFP_EXP_UNDERFLOW
:
166 case PGM_HFP_SIGNIFICANCE
:
172 case PGM_PC_TRANS_SPEC
:
175 /* advance the PSW if our exception is not nullifying */
176 env
->psw
.addr
+= ilen
;
180 qemu_log_mask(CPU_LOG_INT
, "%s: code=0x%x ilen=%d\n",
181 __func__
, env
->int_pgm_code
, ilen
);
183 lowcore
= cpu_map_lowcore(env
);
185 /* Signal PER events with the exception. */
186 if (env
->per_perc_atmid
) {
187 env
->int_pgm_code
|= PGM_PER
;
188 lowcore
->per_address
= cpu_to_be64(env
->per_address
);
189 lowcore
->per_perc_atmid
= cpu_to_be16(env
->per_perc_atmid
);
190 env
->per_perc_atmid
= 0;
193 lowcore
->pgm_ilen
= cpu_to_be16(ilen
);
194 lowcore
->pgm_code
= cpu_to_be16(env
->int_pgm_code
);
195 lowcore
->program_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
196 lowcore
->program_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
197 mask
= be64_to_cpu(lowcore
->program_new_psw
.mask
);
198 addr
= be64_to_cpu(lowcore
->program_new_psw
.addr
);
199 lowcore
->per_breaking_event_addr
= cpu_to_be64(env
->gbea
);
201 cpu_unmap_lowcore(lowcore
);
203 DPRINTF("%s: %x %x %" PRIx64
" %" PRIx64
"\n", __func__
,
204 env
->int_pgm_code
, ilen
, env
->psw
.mask
,
207 load_psw(env
, mask
, addr
);
210 static void do_svc_interrupt(CPUS390XState
*env
)
215 lowcore
= cpu_map_lowcore(env
);
217 lowcore
->svc_code
= cpu_to_be16(env
->int_svc_code
);
218 lowcore
->svc_ilen
= cpu_to_be16(env
->int_svc_ilen
);
219 lowcore
->svc_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
220 lowcore
->svc_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
+ env
->int_svc_ilen
);
221 mask
= be64_to_cpu(lowcore
->svc_new_psw
.mask
);
222 addr
= be64_to_cpu(lowcore
->svc_new_psw
.addr
);
224 cpu_unmap_lowcore(lowcore
);
226 load_psw(env
, mask
, addr
);
228 /* When a PER event is pending, the PER exception has to happen
229 immediately after the SERVICE CALL one. */
230 if (env
->per_perc_atmid
) {
231 env
->int_pgm_code
= PGM_PER
;
232 env
->int_pgm_ilen
= env
->int_svc_ilen
;
233 do_program_interrupt(env
);
237 #define VIRTIO_SUBCODE_64 0x0D00
239 static void do_ext_interrupt(CPUS390XState
*env
)
241 S390CPU
*cpu
= s390_env_get_cpu(env
);
246 if (!(env
->psw
.mask
& PSW_MASK_EXT
)) {
247 cpu_abort(CPU(cpu
), "Ext int w/o ext mask\n");
250 lowcore
= cpu_map_lowcore(env
);
252 if ((env
->pending_int
& INTERRUPT_EMERGENCY_SIGNAL
) &&
253 (env
->cregs
[0] & CR0_EMERGENCY_SIGNAL_SC
)) {
254 lowcore
->ext_int_code
= cpu_to_be16(EXT_EMERGENCY
);
255 cpu_addr
= find_first_bit(env
->emergency_signals
, S390_MAX_CPUS
);
256 g_assert(cpu_addr
< S390_MAX_CPUS
);
257 lowcore
->cpu_addr
= cpu_to_be16(cpu_addr
);
258 clear_bit(cpu_addr
, env
->emergency_signals
);
259 if (bitmap_empty(env
->emergency_signals
, max_cpus
)) {
260 env
->pending_int
&= ~INTERRUPT_EMERGENCY_SIGNAL
;
262 } else if ((env
->pending_int
& INTERRUPT_EXTERNAL_CALL
) &&
263 (env
->cregs
[0] & CR0_EXTERNAL_CALL_SC
)) {
264 lowcore
->ext_int_code
= cpu_to_be16(EXT_EXTERNAL_CALL
);
265 lowcore
->cpu_addr
= cpu_to_be16(env
->external_call_addr
);
266 env
->pending_int
&= ~INTERRUPT_EXTERNAL_CALL
;
267 } else if ((env
->pending_int
& INTERRUPT_EXT_CLOCK_COMPARATOR
) &&
268 (env
->cregs
[0] & CR0_CKC_SC
)) {
269 lowcore
->ext_int_code
= cpu_to_be16(EXT_CLOCK_COMP
);
270 lowcore
->cpu_addr
= 0;
271 env
->pending_int
&= ~INTERRUPT_EXT_CLOCK_COMPARATOR
;
272 } else if ((env
->pending_int
& INTERRUPT_EXT_CPU_TIMER
) &&
273 (env
->cregs
[0] & CR0_CPU_TIMER_SC
)) {
274 lowcore
->ext_int_code
= cpu_to_be16(EXT_CPU_TIMER
);
275 lowcore
->cpu_addr
= 0;
276 env
->pending_int
&= ~INTERRUPT_EXT_CPU_TIMER
;
277 } else if ((env
->pending_int
& INTERRUPT_EXT_SERVICE
) &&
278 (env
->cregs
[0] & CR0_SERVICE_SC
)) {
280 * FIXME: floating IRQs should be considered by all CPUs and
281 * shuld not get cleared by CPU reset.
283 lowcore
->ext_int_code
= cpu_to_be16(EXT_SERVICE
);
284 lowcore
->ext_params
= cpu_to_be32(env
->service_param
);
285 lowcore
->cpu_addr
= 0;
286 env
->service_param
= 0;
287 env
->pending_int
&= ~INTERRUPT_EXT_SERVICE
;
289 g_assert_not_reached();
292 mask
= be64_to_cpu(lowcore
->external_new_psw
.mask
);
293 addr
= be64_to_cpu(lowcore
->external_new_psw
.addr
);
294 lowcore
->external_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
295 lowcore
->external_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
297 cpu_unmap_lowcore(lowcore
);
299 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
300 env
->psw
.mask
, env
->psw
.addr
);
302 load_psw(env
, mask
, addr
);
305 static void do_io_interrupt(CPUS390XState
*env
)
307 S390CPU
*cpu
= s390_env_get_cpu(env
);
314 if (!(env
->psw
.mask
& PSW_MASK_IO
)) {
315 cpu_abort(CPU(cpu
), "I/O int w/o I/O mask\n");
318 for (isc
= 0; isc
< ARRAY_SIZE(env
->io_index
); isc
++) {
321 if (env
->io_index
[isc
] < 0) {
324 if (env
->io_index
[isc
] >= MAX_IO_QUEUE
) {
325 cpu_abort(CPU(cpu
), "I/O queue overrun for isc %d: %d\n",
326 isc
, env
->io_index
[isc
]);
329 q
= &env
->io_queue
[env
->io_index
[isc
]][isc
];
330 isc_bits
= ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q
->word
));
331 if (!(env
->cregs
[6] & isc_bits
)) {
339 lowcore
= cpu_map_lowcore(env
);
341 lowcore
->subchannel_id
= cpu_to_be16(q
->id
);
342 lowcore
->subchannel_nr
= cpu_to_be16(q
->nr
);
343 lowcore
->io_int_parm
= cpu_to_be32(q
->parm
);
344 lowcore
->io_int_word
= cpu_to_be32(q
->word
);
345 lowcore
->io_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
346 lowcore
->io_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
347 mask
= be64_to_cpu(lowcore
->io_new_psw
.mask
);
348 addr
= be64_to_cpu(lowcore
->io_new_psw
.addr
);
350 cpu_unmap_lowcore(lowcore
);
352 env
->io_index
[isc
]--;
354 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
355 env
->psw
.mask
, env
->psw
.addr
);
356 load_psw(env
, mask
, addr
);
358 if (env
->io_index
[isc
] >= 0) {
365 env
->pending_int
&= ~INTERRUPT_IO
;
370 static void do_mchk_interrupt(CPUS390XState
*env
)
372 S390CPU
*cpu
= s390_env_get_cpu(env
);
378 if (!(env
->psw
.mask
& PSW_MASK_MCHECK
)) {
379 cpu_abort(CPU(cpu
), "Machine check w/o mchk mask\n");
382 if (env
->mchk_index
< 0 || env
->mchk_index
>= MAX_MCHK_QUEUE
) {
383 cpu_abort(CPU(cpu
), "Mchk queue overrun: %d\n", env
->mchk_index
);
386 q
= &env
->mchk_queue
[env
->mchk_index
];
389 /* Don't know how to handle this... */
390 cpu_abort(CPU(cpu
), "Unknown machine check type %d\n", q
->type
);
392 if (!(env
->cregs
[14] & (1 << 28))) {
393 /* CRW machine checks disabled */
397 lowcore
= cpu_map_lowcore(env
);
399 for (i
= 0; i
< 16; i
++) {
400 lowcore
->floating_pt_save_area
[i
] = cpu_to_be64(get_freg(env
, i
)->ll
);
401 lowcore
->gpregs_save_area
[i
] = cpu_to_be64(env
->regs
[i
]);
402 lowcore
->access_regs_save_area
[i
] = cpu_to_be32(env
->aregs
[i
]);
403 lowcore
->cregs_save_area
[i
] = cpu_to_be64(env
->cregs
[i
]);
405 lowcore
->prefixreg_save_area
= cpu_to_be32(env
->psa
);
406 lowcore
->fpt_creg_save_area
= cpu_to_be32(env
->fpc
);
407 lowcore
->tod_progreg_save_area
= cpu_to_be32(env
->todpr
);
408 lowcore
->cpu_timer_save_area
[0] = cpu_to_be32(env
->cputm
>> 32);
409 lowcore
->cpu_timer_save_area
[1] = cpu_to_be32((uint32_t)env
->cputm
);
410 lowcore
->clock_comp_save_area
[0] = cpu_to_be32(env
->ckc
>> 32);
411 lowcore
->clock_comp_save_area
[1] = cpu_to_be32((uint32_t)env
->ckc
);
413 lowcore
->mcck_interruption_code
[0] = cpu_to_be32(0x00400f1d);
414 lowcore
->mcck_interruption_code
[1] = cpu_to_be32(0x40330000);
415 lowcore
->mcck_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
416 lowcore
->mcck_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
417 mask
= be64_to_cpu(lowcore
->mcck_new_psw
.mask
);
418 addr
= be64_to_cpu(lowcore
->mcck_new_psw
.addr
);
420 cpu_unmap_lowcore(lowcore
);
423 if (env
->mchk_index
== -1) {
424 env
->pending_int
&= ~INTERRUPT_MCHK
;
427 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
428 env
->psw
.mask
, env
->psw
.addr
);
430 load_psw(env
, mask
, addr
);
433 void s390_cpu_do_interrupt(CPUState
*cs
)
435 S390CPU
*cpu
= S390_CPU(cs
);
436 CPUS390XState
*env
= &cpu
->env
;
438 qemu_log_mask(CPU_LOG_INT
, "%s: %d at pc=%" PRIx64
"\n",
439 __func__
, cs
->exception_index
, env
->psw
.addr
);
441 /* handle machine checks */
442 if (cs
->exception_index
== -1 && s390_cpu_has_mcck_int(cpu
)) {
443 cs
->exception_index
= EXCP_MCHK
;
445 /* handle external interrupts */
446 if (cs
->exception_index
== -1 && s390_cpu_has_ext_int(cpu
)) {
447 cs
->exception_index
= EXCP_EXT
;
449 /* handle I/O interrupts */
450 if (cs
->exception_index
== -1 && s390_cpu_has_io_int(cpu
)) {
451 cs
->exception_index
= EXCP_IO
;
454 switch (cs
->exception_index
) {
456 do_program_interrupt(env
);
459 do_svc_interrupt(env
);
462 do_ext_interrupt(env
);
465 do_io_interrupt(env
);
468 do_mchk_interrupt(env
);
471 cs
->exception_index
= -1;
473 /* we might still have pending interrupts, but not deliverable */
474 if (!env
->pending_int
) {
475 cs
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
479 bool s390_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
481 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
482 S390CPU
*cpu
= S390_CPU(cs
);
483 CPUS390XState
*env
= &cpu
->env
;
486 /* Execution of the target insn is indivisible from
487 the parent EXECUTE insn. */
490 if (s390_cpu_has_int(cpu
)) {
491 s390_cpu_do_interrupt(cs
);
498 void s390x_cpu_debug_excp_handler(CPUState
*cs
)
500 S390CPU
*cpu
= S390_CPU(cs
);
501 CPUS390XState
*env
= &cpu
->env
;
502 CPUWatchpoint
*wp_hit
= cs
->watchpoint_hit
;
504 if (wp_hit
&& wp_hit
->flags
& BP_CPU
) {
505 /* FIXME: When the storage-alteration-space control bit is set,
506 the exception should only be triggered if the memory access
507 is done using an address space with the storage-alteration-event
508 bit set. We have no way to detect that with the current
510 cs
->watchpoint_hit
= NULL
;
512 env
->per_address
= env
->psw
.addr
;
513 env
->per_perc_atmid
|= PER_CODE_EVENT_STORE
| get_per_atmid(env
);
514 /* FIXME: We currently no way to detect the address space used
515 to trigger the watchpoint. For now just consider it is the
516 current default ASC. This turn to be true except when MVCP
517 and MVCS instrutions are not used. */
518 env
->per_perc_atmid
|= env
->psw
.mask
& (PSW_MASK_ASC
) >> 46;
520 /* Remove all watchpoints to re-execute the code. A PER exception
521 will be triggered, it will call load_psw which will recompute
523 cpu_watchpoint_remove_all(cs
, BP_CPU
);
524 cpu_loop_exit_noexc(cs
);
528 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
529 this is only for the atomic operations, for which we want to raise a
530 specification exception. */
531 void s390x_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
532 MMUAccessType access_type
,
533 int mmu_idx
, uintptr_t retaddr
)
535 S390CPU
*cpu
= S390_CPU(cs
);
536 CPUS390XState
*env
= &cpu
->env
;
539 cpu_restore_state(cs
, retaddr
);
541 program_interrupt(env
, PGM_SPECIFICATION
, ILEN_AUTO
);
544 #endif /* CONFIG_USER_ONLY */